hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8a3daad118e5fad562fcd9c13b3b8c5d05fafa86
| 914
|
py
|
Python
|
test/test_widget_response.py
|
mxenabled/mx-platform-python
|
060dae7ddb02fdcf41fa7f7aebfa4b8a0273afac
|
[
"MIT"
] | null | null | null |
test/test_widget_response.py
|
mxenabled/mx-platform-python
|
060dae7ddb02fdcf41fa7f7aebfa4b8a0273afac
|
[
"MIT"
] | 14
|
2021-11-30T21:56:19.000Z
|
2022-02-07T18:47:10.000Z
|
test/test_widget_response.py
|
mxenabled/mx-platform-python
|
060dae7ddb02fdcf41fa7f7aebfa4b8a0273afac
|
[
"MIT"
] | 1
|
2022-01-12T14:59:39.000Z
|
2022-01-12T14:59:39.000Z
|
"""
MX Platform API
The MX Platform API is a powerful, fully-featured API designed to make aggregating and enhancing financial data easy and reliable. It can seamlessly connect your app or website to tens of thousands of financial institutions. # noqa: E501
The version of the OpenAPI document: 0.1.0
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import mx_platform_python
from mx_platform_python.model.widget_response import WidgetResponse
class TestWidgetResponse(unittest.TestCase):
"""WidgetResponse unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testWidgetResponse(self):
"""Test WidgetResponse"""
# FIXME: construct object with mandatory attributes with example values
# model = WidgetResponse() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 25.388889
| 242
| 0.71116
| 113
| 914
| 5.637168
| 0.654867
| 0.062794
| 0.040816
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012605
| 0.218818
| 914
| 35
| 243
| 26.114286
| 0.879552
| 0.551422
| 0
| 0.230769
| 0
| 0
| 0.02139
| 0
| 0
| 0
| 0
| 0.028571
| 0
| 1
| 0.230769
| false
| 0.230769
| 0.307692
| 0
| 0.615385
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 1
| 0
|
0
| 5
|
8a50bc9d16de8dc510ea786663e22d367705e09f
| 49
|
py
|
Python
|
djavError/tests/widgets/test_problem_requests_table.py
|
dasmith2/djavError
|
6fc1bfcf8b1443be817a9bd8ec2d59e7682521dd
|
[
"MIT"
] | null | null | null |
djavError/tests/widgets/test_problem_requests_table.py
|
dasmith2/djavError
|
6fc1bfcf8b1443be817a9bd8ec2d59e7682521dd
|
[
"MIT"
] | null | null | null |
djavError/tests/widgets/test_problem_requests_table.py
|
dasmith2/djavError
|
6fc1bfcf8b1443be817a9bd8ec2d59e7682521dd
|
[
"MIT"
] | null | null | null |
# ProblemRequestsTable is an abstract base class
| 24.5
| 48
| 0.836735
| 6
| 49
| 6.833333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 49
| 1
| 49
| 49
| 0.97619
| 0.938776
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
8a63b2c4c81b4d095c1585f4f6d11d6ea177c096
| 111
|
py
|
Python
|
network/models/building_blocks/__init__.py
|
fnozarian/coiltraine
|
bf418d7924dc47cc84cd21636f17a3cf85e3350f
|
[
"MIT"
] | null | null | null |
network/models/building_blocks/__init__.py
|
fnozarian/coiltraine
|
bf418d7924dc47cc84cd21636f17a3cf85e3350f
|
[
"MIT"
] | null | null | null |
network/models/building_blocks/__init__.py
|
fnozarian/coiltraine
|
bf418d7924dc47cc84cd21636f17a3cf85e3350f
|
[
"MIT"
] | null | null | null |
from .branching import Branching
from .conv import Conv
from .fc import FC, Gaussian_FC
from .join import Join
| 22.2
| 32
| 0.801802
| 18
| 111
| 4.888889
| 0.388889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.153153
| 111
| 4
| 33
| 27.75
| 0.93617
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
8a6ef9aa355786bb6c6b48ee1dbae27bc0146ffe
| 10,706
|
py
|
Python
|
src/tests/dybm_test.py
|
rraymondhp/dybm
|
3d618874a2f8838eaeca17ce40649a3789e9f140
|
[
"Apache-2.0"
] | 1
|
2017-09-04T13:40:41.000Z
|
2017-09-04T13:40:41.000Z
|
src/tests/dybm_test.py
|
rraymondhp/dybm
|
3d618874a2f8838eaeca17ce40649a3789e9f140
|
[
"Apache-2.0"
] | null | null | null |
src/tests/dybm_test.py
|
rraymondhp/dybm
|
3d618874a2f8838eaeca17ce40649a3789e9f140
|
[
"Apache-2.0"
] | 1
|
2019-06-09T01:24:25.000Z
|
2019-06-09T01:24:25.000Z
|
# -*- coding: utf-8 -*-
""" DyBM_test """
__author__ = "Takayuki Osogami"
__copyright__ = "(C) Copyright IBM Corp. 2016"
import unittest
from six.moves import xrange
import numpy as np
import tests.simple
from tests.arraymath import NumpyTestMixin, CupyTestMixin
import pydybm.arraymath as amath
from pydybm.time_series.dybm import LinearDyBM, BinaryDyBM
from pydybm.base.sgd import AdaGrad, ADAM
from pydybm.base.generator import Uniform
class DyBMTestCase(object):
""" unit test for VectorLogisticRegression
"""
"""
Attributes
----------
max_repeat : int
maximum number of training iterations
in_dim : int
dimension of input sequence
out_dim : int
dimension of target sequence
"""
def setUp(self):
self.max_repeat = 100000
self.in_dim = 3 # dimension of input sequence
self.out_dim = 2 # dimension of target sequence
self.rate = 0.01 # learning rate
def tearDown(self):
pass
def test_GenerativeLinearDyBM(self):
""" testing minimal consistency in learning a sequence
"""
print("\nDyBMTestCase.testGenerativeGaussianDyBM")
for delay in [1, 3]:
for SGD in [AdaGrad, ADAM]:
for GENERATOR in [True, False]:
model = LinearDyBM(self.in_dim, self.in_dim, delay,
SGD=SGD())
model.set_learning_rate(self.rate)
i = tests.simple.test_real_model(model, self.max_repeat,
GENERATOR)
self.assertLess(i, self.max_repeat)
def test_DiscriminativeLinearDyBM(self):
""" testing minimal consistency in learning a sequence to an output
"""
print("\nDyBMTestCase.testDiscriminativeGaussianDyBM")
for delay in [1, 3]:
for SGD in [AdaGrad, ADAM]:
for GENERATOR in [True, False]:
model = LinearDyBM(self.in_dim, self.out_dim, delay,
SGD=SGD())
model.set_learning_rate(self.rate)
i = tests.simple.test_real_model(model, self.max_repeat,
GENERATOR)
self.assertLess(i, self.max_repeat)
def test_GenerativeBinaryDyBM(self):
""" testing minimal consistency in learning a sequence
"""
print("\nDyBMTestCase.testGenerativeBinaryDyBM")
for delay in [1, 3]:
for SGD in [AdaGrad]:
for GENERATOR in [True]:
model = BinaryDyBM(self.in_dim, self.in_dim, delay,
SGD=SGD())
model.set_learning_rate(1.)
i = tests.simple.test_binary_model(model, self.max_repeat,
GENERATOR)
self.assertLess(i, self.max_repeat)
def test_DiscriminativeBinaryDyBM(self):
""" testing minimal consistency in learning a sequence to an output
"""
print("\nDyBMTestCase.testDiscriminativeBinaryDyBM")
for delay in [1, 3]:
for SGD in [AdaGrad]:
for GENERATOR in [True]:
model = BinaryDyBM(self.in_dim, self.out_dim, delay,
SGD=SGD())
model.set_learning_rate(1.)
i = tests.simple.test_binary_model(model, self.max_repeat,
GENERATOR)
self.assertLess(i, self.max_repeat)
def test_LearnGenerator(self):
""" testing learning with generator
"""
print("\nDyBMTestCase.testLearnGenerator")
batch = 3
in_mean = 1.0
out_mean = 2.0
d = 0.01
delay = 1
rates = [0.5, 0.8]
L1 = 0.0
L2 = 0.0
random = amath.random.RandomState(0)
in_gen = Uniform(length=batch, low=in_mean - d, high=in_mean + d,
dim=self.in_dim)
in_seq = random.uniform(low=in_mean - d, high=in_mean + d,
size=(batch, self.in_dim))
model = LinearDyBM(self.in_dim, self.in_dim, delay=delay,
decay_rates=rates, L1=L1, L2=L2)
model.set_learning_rate(0.1)
model._learn_sequence(in_seq)
model2 = LinearDyBM(self.in_dim, self.in_dim, delay=delay,
decay_rates=rates, L1=L1, L2=L2)
model2.set_learning_rate(0.1)
model2.learn(in_gen)
random = amath.random.RandomState(0)
in_gen = Uniform(length=batch, low=in_mean - d, high=in_mean + d,
dim=self.in_dim)
in_seq = random.uniform(low=in_mean - d, high=in_mean + d,
size=(batch, self.in_dim))
random = amath.random.RandomState(0)
out_gen = Uniform(length=batch, low=out_mean - d, high=out_mean + d,
dim=self.out_dim)
out_seq = random.uniform(low=out_mean - d, high=out_mean + d,
size=(batch, self.out_dim))
self.assertEqual(model.variables.keys(), model2.variables.keys())
for key in model.variables:
self.assertTrue((model.variables[key] ==
model2.variables[key]).all())
model = LinearDyBM(self.in_dim, self.out_dim, delay=delay,
decay_rates=rates, L1=L1, L2=L2)
model.set_learning_rate(0.1)
model._learn_sequence(in_seq, out_seq)
model2 = LinearDyBM(self.in_dim, self.out_dim, delay=delay,
decay_rates=rates, L1=L1, L2=L2)
model2.set_learning_rate(0.1)
model2.learn(in_gen, out_gen)
self.assertEqual(model.variables.keys(), model2.variables.keys())
for key in model.variables:
self.assertTrue((model.variables[key] ==
model2.variables[key]).all())
def test_UpdateState(self):
""" testing fifo, eligibility trace, and update_state method in
LinearDyBM
"""
print("\n * testing fifo, eligibility trace, and update_state method"
" in LinearDyBM \n")
in_dim = 3
delay = 3
decay_rate = 0.5
len_ts = 10
print("testing wo_delay, single e_trace")
model = LinearDyBM(in_dim, delay=delay, decay_rates=[decay_rate],
insert_to_etrace="wo_delay")
random = np.random.RandomState(0)
in_patterns = np.random.uniform(size=(len_ts, in_dim))
fifo_test = np.zeros((delay - 1, in_dim))
e_trace_test = np.zeros((1, in_dim))
for i in xrange(len_ts):
self.assertTrue(np.allclose(amath.to_numpy(model.fifo.to_array()),
fifo_test))
self.assertTrue(np.allclose(amath.to_numpy(model.e_trace),
e_trace_test))
model.learn_one_step(amath.array(in_patterns[i]))
model._update_state(amath.array(in_patterns[i]))
fifo_test[1:] = fifo_test[:-1]
fifo_test[0] = in_patterns[i]
e_trace_test = e_trace_test * decay_rate + in_patterns[i]
print("testing w_delay, single e_trace")
model = LinearDyBM(in_dim, delay=delay, decay_rates=[decay_rate],
insert_to_etrace="w_delay")
random = np.random.RandomState(0)
in_patterns = np.random.uniform(size=(len_ts, in_dim))
fifo_test = np.zeros((delay - 1, in_dim))
e_trace_test = np.zeros((1, in_dim))
for i in xrange(len_ts):
self.assertTrue(np.allclose(amath.to_numpy(model.fifo.to_array()),
fifo_test))
self.assertTrue(np.allclose(amath.to_numpy(model.e_trace),
e_trace_test))
model.learn_one_step(amath.array(in_patterns[i]))
model._update_state(amath.array(in_patterns[i]))
fifo_test[1:] = fifo_test[:-1]
fifo_test[0] = in_patterns[i]
if i < delay - 1:
pass
else:
e_trace_test = e_trace_test * decay_rate \
+ in_patterns[i - delay + 1]
print("testing w_delay, two e_traces")
model = LinearDyBM(in_dim, delay=delay,
decay_rates=[decay_rate, decay_rate**2],
insert_to_etrace="w_delay")
random = np.random.RandomState(0)
in_patterns = np.random.uniform(size=(len_ts, in_dim))
fifo_test = np.zeros((delay - 1, in_dim))
e_trace_test = np.zeros((2, in_dim))
for i in xrange(len_ts):
self.assertTrue(np.allclose(amath.to_numpy(model.fifo.to_array()),
fifo_test))
self.assertTrue(np.allclose(amath.to_numpy(model.e_trace),
e_trace_test))
model.learn_one_step(amath.array(in_patterns[i]))
model._update_state(amath.array(in_patterns[i]))
fifo_test[1:] = fifo_test[:-1]
fifo_test[0] = in_patterns[i]
if i < delay - 1:
pass
else:
e_trace_test[0] = e_trace_test[0] * decay_rate \
+ in_patterns[i - delay + 1]
e_trace_test[1] = e_trace_test[1] * (decay_rate**2) \
+ in_patterns[i - delay + 1]
print("testing w_delay, single e_trace, delay=1")
delay = 1
model = LinearDyBM(in_dim, delay=delay, decay_rates=[decay_rate],
insert_to_etrace="w_delay")
random = np.random.RandomState(0)
in_patterns = random.uniform(size=(len_ts, in_dim))
e_trace_test = np.zeros((1, in_dim))
for i in xrange(len_ts):
self.assertTrue(np.allclose(amath.to_numpy(model.e_trace),
e_trace_test))
model.learn_one_step(amath.array(in_patterns[i]))
model._update_state(amath.array(in_patterns[i]))
if i < delay - 1:
pass
else:
e_trace_test = e_trace_test * decay_rate \
+ in_patterns[i - delay + 1]
class DyBMTestCaseNumpy(NumpyTestMixin, DyBMTestCase, unittest.TestCase):
pass
class DyBMTestCaseCupy(CupyTestMixin, DyBMTestCase, unittest.TestCase):
pass
if __name__ == "__main__":
unittest.main()
| 39.360294
| 78
| 0.548197
| 1,260
| 10,706
| 4.434921
| 0.126984
| 0.030422
| 0.032212
| 0.018611
| 0.761811
| 0.7466
| 0.7466
| 0.730852
| 0.72262
| 0.711346
| 0
| 0.017723
| 0.351765
| 10,706
| 271
| 79
| 39.505535
| 0.787464
| 0.049131
| 0
| 0.637255
| 0
| 0
| 0.049727
| 0.020315
| 0
| 0
| 0
| 0
| 0.073529
| 1
| 0.039216
| false
| 0.029412
| 0.044118
| 0
| 0.098039
| 0.04902
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
8aba20b0d71cbd6ba5195fad5a2992c79889e4e2
| 10,744
|
py
|
Python
|
tests/providers/amazon/aws/sensors/test_emr_job_flow.py
|
ChaseKnowlden/airflow
|
6b71eac1997a7c0db3b8e3aed6b4e65d01871440
|
[
"Apache-2.0"
] | 15,947
|
2019-01-05T13:51:02.000Z
|
2022-03-31T23:33:16.000Z
|
tests/providers/amazon/aws/sensors/test_emr_job_flow.py
|
ChaseKnowlden/airflow
|
6b71eac1997a7c0db3b8e3aed6b4e65d01871440
|
[
"Apache-2.0"
] | 14,603
|
2019-01-05T09:43:19.000Z
|
2022-03-31T23:11:59.000Z
|
tests/providers/amazon/aws/sensors/test_emr_job_flow.py
|
ChaseKnowlden/airflow
|
6b71eac1997a7c0db3b8e3aed6b4e65d01871440
|
[
"Apache-2.0"
] | 8,429
|
2019-01-05T19:45:47.000Z
|
2022-03-31T22:13:01.000Z
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import datetime
import unittest
from unittest.mock import MagicMock, patch
import pytest
from dateutil.tz import tzlocal
from airflow.exceptions import AirflowException
from airflow.providers.amazon.aws.sensors.emr_job_flow import EmrJobFlowSensor
DESCRIBE_CLUSTER_STARTING_RETURN = {
'Cluster': {
'Applications': [{'Name': 'Spark', 'Version': '1.6.1'}],
'AutoTerminate': True,
'Configurations': [],
'Ec2InstanceAttributes': {'IamInstanceProfile': 'EMR_EC2_DefaultRole'},
'Id': 'j-27ZY9GBEEU2GU',
'LogUri': 's3n://some-location/',
'Name': 'PiCalc',
'NormalizedInstanceHours': 0,
'ReleaseLabel': 'emr-4.6.0',
'ServiceRole': 'EMR_DefaultRole',
'Status': {
'State': 'STARTING',
'StateChangeReason': {},
'Timeline': {
'CreationDateTime': datetime.datetime(2016, 6, 27, 21, 5, 2, 348000, tzinfo=tzlocal())
},
},
'Tags': [{'Key': 'app', 'Value': 'analytics'}, {'Key': 'environment', 'Value': 'development'}],
'TerminationProtected': False,
'VisibleToAllUsers': True,
},
'ResponseMetadata': {'HTTPStatusCode': 200, 'RequestId': 'd5456308-3caa-11e6-9d46-951401f04e0e'},
}
DESCRIBE_CLUSTER_BOOTSTRAPPING_RETURN = {
'Cluster': {
'Applications': [{'Name': 'Spark', 'Version': '1.6.1'}],
'AutoTerminate': True,
'Configurations': [],
'Ec2InstanceAttributes': {'IamInstanceProfile': 'EMR_EC2_DefaultRole'},
'Id': 'j-27ZY9GBEEU2GU',
'LogUri': 's3n://some-location/',
'Name': 'PiCalc',
'NormalizedInstanceHours': 0,
'ReleaseLabel': 'emr-4.6.0',
'ServiceRole': 'EMR_DefaultRole',
'Status': {
'State': 'BOOTSTRAPPING',
'StateChangeReason': {},
'Timeline': {
'CreationDateTime': datetime.datetime(2016, 6, 27, 21, 5, 2, 348000, tzinfo=tzlocal())
},
},
'Tags': [{'Key': 'app', 'Value': 'analytics'}, {'Key': 'environment', 'Value': 'development'}],
'TerminationProtected': False,
'VisibleToAllUsers': True,
},
'ResponseMetadata': {'HTTPStatusCode': 200, 'RequestId': 'd5456308-3caa-11e6-9d46-951401f04e0e'},
}
DESCRIBE_CLUSTER_RUNNING_RETURN = {
'Cluster': {
'Applications': [{'Name': 'Spark', 'Version': '1.6.1'}],
'AutoTerminate': True,
'Configurations': [],
'Ec2InstanceAttributes': {'IamInstanceProfile': 'EMR_EC2_DefaultRole'},
'Id': 'j-27ZY9GBEEU2GU',
'LogUri': 's3n://some-location/',
'Name': 'PiCalc',
'NormalizedInstanceHours': 0,
'ReleaseLabel': 'emr-4.6.0',
'ServiceRole': 'EMR_DefaultRole',
'Status': {
'State': 'RUNNING',
'StateChangeReason': {},
'Timeline': {
'CreationDateTime': datetime.datetime(2016, 6, 27, 21, 5, 2, 348000, tzinfo=tzlocal())
},
},
'Tags': [{'Key': 'app', 'Value': 'analytics'}, {'Key': 'environment', 'Value': 'development'}],
'TerminationProtected': False,
'VisibleToAllUsers': True,
},
'ResponseMetadata': {'HTTPStatusCode': 200, 'RequestId': 'd5456308-3caa-11e6-9d46-951401f04e0e'},
}
DESCRIBE_CLUSTER_WAITING_RETURN = {
'Cluster': {
'Applications': [{'Name': 'Spark', 'Version': '1.6.1'}],
'AutoTerminate': True,
'Configurations': [],
'Ec2InstanceAttributes': {'IamInstanceProfile': 'EMR_EC2_DefaultRole'},
'Id': 'j-27ZY9GBEEU2GU',
'LogUri': 's3n://some-location/',
'Name': 'PiCalc',
'NormalizedInstanceHours': 0,
'ReleaseLabel': 'emr-4.6.0',
'ServiceRole': 'EMR_DefaultRole',
'Status': {
'State': 'WAITING',
'StateChangeReason': {},
'Timeline': {
'CreationDateTime': datetime.datetime(2016, 6, 27, 21, 5, 2, 348000, tzinfo=tzlocal())
},
},
'Tags': [{'Key': 'app', 'Value': 'analytics'}, {'Key': 'environment', 'Value': 'development'}],
'TerminationProtected': False,
'VisibleToAllUsers': True,
},
'ResponseMetadata': {'HTTPStatusCode': 200, 'RequestId': 'd5456308-3caa-11e6-9d46-951401f04e0e'},
}
DESCRIBE_CLUSTER_TERMINATED_RETURN = {
'Cluster': {
'Applications': [{'Name': 'Spark', 'Version': '1.6.1'}],
'AutoTerminate': True,
'Configurations': [],
'Ec2InstanceAttributes': {'IamInstanceProfile': 'EMR_EC2_DefaultRole'},
'Id': 'j-27ZY9GBEEU2GU',
'LogUri': 's3n://some-location/',
'Name': 'PiCalc',
'NormalizedInstanceHours': 0,
'ReleaseLabel': 'emr-4.6.0',
'ServiceRole': 'EMR_DefaultRole',
'Status': {
'State': 'TERMINATED',
'StateChangeReason': {},
'Timeline': {
'CreationDateTime': datetime.datetime(2016, 6, 27, 21, 5, 2, 348000, tzinfo=tzlocal())
},
},
'Tags': [{'Key': 'app', 'Value': 'analytics'}, {'Key': 'environment', 'Value': 'development'}],
'TerminationProtected': False,
'VisibleToAllUsers': True,
},
'ResponseMetadata': {'HTTPStatusCode': 200, 'RequestId': 'd5456308-3caa-11e6-9d46-951401f04e0e'},
}
DESCRIBE_CLUSTER_TERMINATED_WITH_ERRORS_RETURN = {
'Cluster': {
'Applications': [{'Name': 'Spark', 'Version': '1.6.1'}],
'AutoTerminate': True,
'Configurations': [],
'Ec2InstanceAttributes': {'IamInstanceProfile': 'EMR_EC2_DefaultRole'},
'Id': 'j-27ZY9GBEEU2GU',
'LogUri': 's3n://some-location/',
'Name': 'PiCalc',
'NormalizedInstanceHours': 0,
'ReleaseLabel': 'emr-4.6.0',
'ServiceRole': 'EMR_DefaultRole',
'Status': {
'State': 'TERMINATED_WITH_ERRORS',
'StateChangeReason': {
'Code': 'BOOTSTRAP_FAILURE',
'Message': 'Master instance (i-0663047709b12345c) failed attempting to '
'download bootstrap action 1 file from S3',
},
'Timeline': {
'CreationDateTime': datetime.datetime(2016, 6, 27, 21, 5, 2, 348000, tzinfo=tzlocal())
},
},
'Tags': [{'Key': 'app', 'Value': 'analytics'}, {'Key': 'environment', 'Value': 'development'}],
'TerminationProtected': False,
'VisibleToAllUsers': True,
},
'ResponseMetadata': {'HTTPStatusCode': 200, 'RequestId': 'd5456308-3caa-11e6-9d46-951401f04e0e'},
}
class TestEmrJobFlowSensor(unittest.TestCase):
def setUp(self):
# Mock out the emr_client (moto has incorrect response)
self.mock_emr_client = MagicMock()
mock_emr_session = MagicMock()
mock_emr_session.client.return_value = self.mock_emr_client
# Mock out the emr_client creator
self.boto3_session_mock = MagicMock(return_value=mock_emr_session)
def test_execute_calls_with_the_job_flow_id_until_it_reaches_a_target_state(self):
self.mock_emr_client.describe_cluster.side_effect = [
DESCRIBE_CLUSTER_STARTING_RETURN,
DESCRIBE_CLUSTER_RUNNING_RETURN,
DESCRIBE_CLUSTER_TERMINATED_RETURN,
]
with patch('boto3.session.Session', self.boto3_session_mock):
operator = EmrJobFlowSensor(
task_id='test_task', poke_interval=0, job_flow_id='j-8989898989', aws_conn_id='aws_default'
)
operator.execute(None)
# make sure we called twice
assert self.mock_emr_client.describe_cluster.call_count == 3
# make sure it was called with the job_flow_id
calls = [unittest.mock.call(ClusterId='j-8989898989')]
self.mock_emr_client.describe_cluster.assert_has_calls(calls)
def test_execute_calls_with_the_job_flow_id_until_it_reaches_failed_state_with_exception(self):
self.mock_emr_client.describe_cluster.side_effect = [
DESCRIBE_CLUSTER_RUNNING_RETURN,
DESCRIBE_CLUSTER_TERMINATED_WITH_ERRORS_RETURN,
]
with patch('boto3.session.Session', self.boto3_session_mock):
operator = EmrJobFlowSensor(
task_id='test_task', poke_interval=0, job_flow_id='j-8989898989', aws_conn_id='aws_default'
)
with pytest.raises(AirflowException):
operator.execute(None)
# make sure we called twice
assert self.mock_emr_client.describe_cluster.call_count == 2
# make sure it was called with the job_flow_id
self.mock_emr_client.describe_cluster.assert_called_once_with(ClusterId='j-8989898989')
def test_different_target_states(self):
self.mock_emr_client.describe_cluster.side_effect = [
DESCRIBE_CLUSTER_STARTING_RETURN, # return False
DESCRIBE_CLUSTER_BOOTSTRAPPING_RETURN, # return False
DESCRIBE_CLUSTER_RUNNING_RETURN, # return True
DESCRIBE_CLUSTER_WAITING_RETURN, # will not be used
DESCRIBE_CLUSTER_TERMINATED_RETURN, # will not be used
DESCRIBE_CLUSTER_TERMINATED_WITH_ERRORS_RETURN, # will not be used
]
with patch('boto3.session.Session', self.boto3_session_mock):
operator = EmrJobFlowSensor(
task_id='test_task',
poke_interval=0,
job_flow_id='j-8989898989',
aws_conn_id='aws_default',
target_states=['RUNNING', 'WAITING'],
)
operator.execute(None)
# make sure we called twice
assert self.mock_emr_client.describe_cluster.call_count == 3
# make sure it was called with the job_flow_id
calls = [unittest.mock.call(ClusterId='j-8989898989')]
self.mock_emr_client.describe_cluster.assert_has_calls(calls)
| 40.390977
| 107
| 0.60927
| 1,046
| 10,744
| 6.064054
| 0.211281
| 0.061485
| 0.019076
| 0.029481
| 0.764465
| 0.74728
| 0.741605
| 0.721268
| 0.707394
| 0.707394
| 0
| 0.054398
| 0.254002
| 10,744
| 265
| 108
| 40.543396
| 0.736993
| 0.106106
| 0
| 0.660465
| 0
| 0
| 0.334204
| 0.06122
| 0
| 0
| 0
| 0
| 0.027907
| 1
| 0.018605
| false
| 0
| 0.032558
| 0
| 0.055814
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
8ac2586229a740e81c7d46782bf3c8fabc0f30ca
| 112
|
py
|
Python
|
test/test_app.py
|
gsmcwhirter/owlbear
|
a27deb5974c9192ac9fe2db00fba6c1921c97323
|
[
"MIT"
] | null | null | null |
test/test_app.py
|
gsmcwhirter/owlbear
|
a27deb5974c9192ac9fe2db00fba6c1921c97323
|
[
"MIT"
] | null | null | null |
test/test_app.py
|
gsmcwhirter/owlbear
|
a27deb5974c9192ac9fe2db00fba6c1921c97323
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Tests for owlbear.app"""
from owlbear import app
import test.helpers as test_helpers
| 22.4
| 35
| 0.705357
| 17
| 112
| 4.588235
| 0.705882
| 0.282051
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010417
| 0.142857
| 112
| 4
| 36
| 28
| 0.802083
| 0.392857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
8ac9b2c3db2dd6ed2572d45d7e54d5bca22ed6c0
| 24,433
|
py
|
Python
|
mmu/metrics/metrics.py
|
RUrlus/ModelMetricUncertainty
|
f401a25dd196d6e4edf4901fcfee4b56ebd7c10b
|
[
"Apache-2.0"
] | null | null | null |
mmu/metrics/metrics.py
|
RUrlus/ModelMetricUncertainty
|
f401a25dd196d6e4edf4901fcfee4b56ebd7c10b
|
[
"Apache-2.0"
] | 11
|
2021-12-08T10:34:17.000Z
|
2022-01-20T13:40:05.000Z
|
mmu/metrics/metrics.py
|
RUrlus/ModelMetricUncertainty
|
f401a25dd196d6e4edf4901fcfee4b56ebd7c10b
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import pandas as pd
import mmu.lib._mmu_core as _core
from mmu.commons import check_array
from mmu.metrics.confmat import confusion_matrix_to_dataframe
from mmu.metrics.confmat import confusion_matrices_to_dataframe
from mmu.commons import _convert_to_ext_types
from mmu.commons import _convert_to_int
from mmu.commons import _convert_to_float
col_index = {
'neg.precision': 0,
'neg.prec': 0,
'npv': 0,
'pos.precision': 1,
'pos.prec': 1,
'ppv': 1,
'neg.recall': 2,
'neg.rec': 2,
'tnr': 2,
'specificity': 2,
'pos.recall': 3,
'pos.rec': 3,
'tpr': 3,
'sensitivity': 3,
'neg.f1': 4,
'neg.f1_score': 4,
'pos.f1': 5,
'pos.f1_score': 5,
'fpr': 6,
'fnr': 7,
'accuracy': 8,
'acc': 8,
'mcc': 9,
}
col_names = [
'neg.precision',
'pos.precision',
'neg.recall',
'pos.recall',
'neg.f1',
'pos.f1',
'fpr',
'fnr',
'acc',
'mcc',
]
def metrics_to_dataframe(metrics, metric_names=None):
"""Return DataFrame with metrics.
Parameters
----------
metrics : np.ndarray
metrics where the rows are the metrics for various runs or
classification thresholds and the columns are the metrics.
metric_names : str, list[str], default=None
if you computed a subset of the metrics you should set the column
names here
Returns
-------
pd.DataFrame
the metrics as a DataFrame
"""
if metric_names is None:
metric_names = col_names
elif isinstance(metric_names, str):
metric_names = [metric_names]
elif isinstance(metric_names, (tuple, list, np.ndarray)):
if not isinstance(metric_names[0], str):
raise TypeError('``metrics_names`` should contain strings.')
else:
raise TypeError('``metrics_names`` has an unsupported type.')
if metrics.ndim == 1:
return pd.DataFrame(metrics[None, :], columns=metric_names)
return pd.DataFrame(metrics, columns=metric_names)
def binary_metrics(y, yhat=None, scores=None, threshold=None, fill=1.0, return_df=False):
r"""Compute binary classification metrics.
`bmetrics` is an alias for this function.
Computes the following metrics where [i] indicates the i'th value in the
array.
* [0] neg.precision aka Negative Predictive Value (NPV)
* [1] pos.precision aka Positive Predictive Value (PPV)
* [2] neg.recall aka True Negative Rate (TNR) aka Specificity
* [3] pos.recall aka True Positive Rate (TPR) aka Sensitivity
* [4] neg.f1 score
* [5] pos.f1 score
* [6] False Positive Rate (FPR)
* [7] False Negative Rate (FNR)
* [8] Accuracy
* [9] MCC
Parameters
----------
y : np.ndarray[bool, int32, int64, float32, float64]
true labels for observations
yhat : np.ndarray[bool, int32, int64, float32, float64], default=None
the predicted labels, the same dtypes are supported as y. Can be `None`
if `scores` is not `None`, if both are provided, `scores` is ignored.
scores : np.ndarray[float32, float64], default=None
the classifier scores to be evaluated against the `threshold`, i.e.
`yhat` = `scores` >= `threshold`. Can be `None` if `yhat` is not `None`,
if both are provided, this parameter is ignored.
threshold : float, default=0.5
the classification threshold to which the classifier scores is evaluated,
is inclusive.
fill : float, default=1.0
value to fill when a metric is not defined, e.g. divide by zero.
return_df : bool, default=False
return confusion matrix as pd.DataFrame
Returns
-------
confusion_matrix : np.ndarray, pd.DataFrame
the confusion_matrix with layout
[0, 0] = TN, [0, 1] = FP, [1, 0] = FN, [1, 1] = TP
metrics : np.ndarray, pd.DataFrame
the computed metrics
"""
if not isinstance(fill, float):
raise TypeError("`fill` must be a float.")
y = check_array(
y,
max_dim=1,
dtype_check=_convert_to_ext_types
)
if scores is not None:
scores = check_array(
scores,
max_dim=1,
dtype_check=_convert_to_float,
)
if not isinstance(threshold, float):
raise TypeError("`threshold` must be a float if scores is not None")
if scores.size != y.size:
raise ValueError('`scores` and `y` must have equal length.')
conf_mat = _core.confusion_matrix_score(y, scores, threshold)
elif yhat is not None:
yhat = check_array(
yhat,
max_dim=1,
dtype_check=_convert_to_ext_types,
)
if yhat.size != y.size:
raise ValueError('`yhat` and `y` must have equal length.')
conf_mat = _core.confusion_matrix(y, yhat)
else:
raise TypeError("`yhat` must not be None if `scores` is None")
metrics = _core.binary_metrics(conf_mat, fill)
if return_df:
return (
confusion_matrix_to_dataframe(conf_mat),
metrics_to_dataframe(metrics)
)
return conf_mat, metrics
def binary_metrics_confusion_matrix(conf_mat, fill=1.0, return_df=False):
"""Compute binary classification metrics.
`bmetrics_conf_mat` is an alias for this function.
Computes the following metrics where [i] indicates the i'th value in the
array.
* [0] neg.precision aka Negative Predictive Value (NPV)
* [1] pos.precision aka Positive Predictive Value (PPV)
* [2] neg.recall aka True Negative Rate (TNR) aka Specificity
* [3] pos.recall aka True Positive Rate (TPR) aka Sensitivity
* [4] neg.f1 score
* [5] pos.f1 score
* [6] False Positive Rate (FPR)
* [7] False Negative Rate (FNR)
* [8] Accuracy
* [9] MCC
Parameters
----------
conf_mat : np.ndarray[int32, int64],
confusion_matrix as returned by mmu.confusion_matrix
fill : float, default=1.0
value to fill when a metric is not defined, e.g. divide by zero.
return_df : bool, default=False
return the metrics confusion matrix and metrics as a DataFrame
Returns
-------
metrics : np.ndarray, pd.DataFrame
the computed metrics
"""
if not isinstance(fill, float):
raise TypeError("`fill` must be a float.")
if conf_mat.shape == (2, 2):
conf_mat = conf_mat.flatten()
conf_mat = check_array(
conf_mat,
max_dim=1,
target_order=0,
dtype_check=_convert_to_int,
)
metrics = _core.binary_metrics(conf_mat, fill)
if return_df:
return metrics_to_dataframe(metrics)
return metrics
def binary_metrics_confusion_matrices(conf_mat, fill=1.0, return_df=False):
"""Compute binary classification metrics.
`bmetrics_conf_mats` is an alias for this function.
Computes the following metrics where [i] indicates the i'th value in the
array.
* [0] neg.precision aka Negative Predictive Value (NPV)
* [1] pos.precision aka Positive Predictive Value (PPV)
* [2] neg.recall aka True Negative Rate (TNR) aka Specificity
* [3] pos.recall aka True Positive Rate (TPR) aka Sensitivity
* [4] neg.f1 score
* [5] pos.f1 score
* [6] False Positive Rate (FPR)
* [7] False Negative Rate (FNR)
* [8] Accuracy
* [9] MCC
Parameters
----------
conf_mat : np.ndarray[int32, int64],
confusion_matrix as returned by mmu.confusion_matrices, should have
shape (N, 4) and be C-Contiguous
fill : float, default=1.0
value to fill when a metric is not defined, e.g. divide by zero.
return_df : bool, default=False
return the metrics confusion matrix and metrics as a DataFrame
Returns
-------
metrics : np.ndarray, pd.DataFrame
the computed metrics
"""
if not isinstance(fill, float):
raise TypeError("`fill` must be a float.")
conf_mat = check_array(
conf_mat,
max_dim=2,
target_order=0,
dtype_check=_convert_to_int,
)
metrics = _core.binary_metrics_2d(conf_mat, fill)
if return_df:
return metrics_to_dataframe(metrics)
return metrics
def binary_metrics_thresholds(
y, scores, thresholds, fill=1.0, return_df=False
):
"""Compute binary classification metrics over multiple thresholds.
`bmetrics_thresh` is an alias for this function.
Computes the following metrics where [i] indicates the i'th column in the
array.
* [0] neg.precision aka Negative Predictive Value (NPV)
* [1] pos.precision aka Positive Predictive Value (PPV)
* [2] neg.recall aka True Negative Rate (TNR) aka Specificity
* [3] pos.recall aka True Positive Rate (TPR) aka Sensitivity
* [4] neg.f1 score
* [5] pos.f1 score
* [6] False Positive Rate (FPR)
* [7] False Negative Rate (FNR)
* [8] Accuracy
* [9] MCC
Parameters
----------
y : np.ndarray[bool, int32, int64, float32, float64]
true labels for observations
yhat : np.ndarray[bool, int32, int64, float32, float64], default=None
the predicted labels, the same dtypes are supported as y. Can be `None`
if `scores` is not `None`, if both are provided, `scores` is ignored.
scores : np.ndarray[float32, float64], default=None
the classifier scores to be evaluated against the `threshold`, i.e.
`yhat` = `scores` >= `threshold`. Can be `None` if `yhat` is not `None`,
if both are provided, this parameter is ignored.
thresholds : np.ndarray[float32, float64]
the classification thresholds for which the classifier scores is evaluated,
is inclusive.
fill : float, default=1.0
value to fill when a metric is not defined, e.g. divide by zero.
return_df : bool, default=False
return the metrics confusion matrix and metrics as a DataFrame
Returns
-------
conf_mat : np.ndarray, pd.DataFrame
the confusion_matrices where the rows contain the counts for a
threshold, [i, 0] = TN, [i, 1] = FP, [i, 2] = FN, [i, 3] = TP
metrics : np.ndarray, pd.DataFrame
the computed metrics where the rows contain the metrics for a single
threshold
"""
if not isinstance(fill, float):
raise TypeError("`fill` must be a float.")
y = check_array(
y,
max_dim=1,
dtype_check=_convert_to_ext_types,
)
scores = check_array(
scores,
max_dim=1,
dtype_check=_convert_to_float,
)
thresholds = check_array(
thresholds,
max_dim=1,
dtype_check=_convert_to_float,
)
if scores.size != y.size:
raise ValueError('`scores` and `y` must have equal length.')
conf_mat = _core.confusion_matrix_thresholds(y, scores, thresholds)
metrics = _core.binary_metrics_2d(conf_mat, fill)
if return_df:
return (
confusion_matrices_to_dataframe(conf_mat),
metrics_to_dataframe(metrics)
)
return conf_mat, metrics
def binary_metrics_runs(
y, yhat=None, scores=None, threshold=None, obs_axis=0, fill=1.0, return_df=False
):
"""Compute binary classification metrics over multiple runs.
`bmetrics_runs` is an alias for this function.
Computes the following metrics where [i] indicates the i'th column in the
array.
* [0] neg.precision aka Negative Predictive Value (NPV)
* [1] pos.precision aka Positive Predictive Value (PPV)
* [2] neg.recall aka True Negative Rate (TNR) aka Specificity
* [3] pos.recall aka True Positive Rate (TPR) aka Sensitivity
* [4] neg.f1 score
* [5] pos.f1 score
* [6] False Positive Rate (FPR)
* [7] False Negative Rate (FNR)
* [8] Accuracy
* [9] MCC
Parameters
----------
y : np.ndarray[bool, int32, int64, float32, float64]
true labels for observations, should have shape (N, K) for `K` runs
each consisting of `N` observations if `obs_axis`
yhat : np.ndarray[bool, int32, int64, float32, float64], default=None
the predicted labels, the same dtypes are supported as y. Can be `None`
if `scores` is not `None`, if both are provided, `scores` is ignored.
`yhat` shape must be compatible with `y`.
scores : np.ndarray[float32, float64], default=None
the classifier scores to be evaluated against the `threshold`, i.e.
`yhat` = `scores` >= `threshold`. Can be `None` if `yhat` is not `None`,
if both are provided, this parameter is ignored.
`scores` shape must be compatible with `y`.
thresholds : np.ndarray[float32, float64]
the classification thresholds for which the classifier scores is evaluated,
is inclusive.
obs_axis : int, default=0
the axis containing the observations for a single run, e.g. 0 when the
labels and scoress are stored as columns
fill : float, default=1.0
value to fill when a metric is not defined, e.g. divide by zero.
return_df : bool, default=False
return the metrics confusion matrix and metrics as a DataFrame
Returns
-------
conf_mat : np.ndarray, pd.DataFrame
the confusion_matrices where the rows contain the counts for a
run, [i, 0] = TN, [i, 1] = FP, [i, 2] = FN, [i, 3] = TP
metrics : np.ndarray, pd.DataFrame
the computed metrics where the rows contain the metrics for a single
run
"""
if not isinstance(fill, float):
raise TypeError("`fill` must be a float.")
if not isinstance(obs_axis, int) or (obs_axis != 0 and obs_axis != 1):
raise TypeError("`obs_axis` must be either 0 or 1.")
y = check_array(
y,
axis=obs_axis,
target_axis=obs_axis,
target_order=1-obs_axis,
max_dim=2,
dtype_check=_convert_to_ext_types,
)
if scores is not None:
scores = check_array(
scores,
axis=obs_axis,
target_axis=obs_axis,
target_order=1-obs_axis,
max_dim=2,
dtype_check=_convert_to_float,
)
if not isinstance(threshold, float):
raise TypeError("`threshold` must be a float if scores is not None")
if scores.size != y.size:
raise ValueError('`scores` and `y` must have equal length.')
conf_mat = _core.confusion_matrix_score_runs(y, scores, threshold, obs_axis)
elif yhat is not None:
yhat = check_array(
yhat,
axis=obs_axis,
target_axis=obs_axis,
target_order=1-obs_axis,
max_dim=2,
dtype_check=_convert_to_ext_types,
)
if yhat.size != y.size:
raise ValueError('`yhat` and `y` must have equal length.')
conf_mat = _core.confusion_matrix_runs(y, yhat, obs_axis)
else:
raise TypeError("`yhat` must not be None if `scores` is None")
metrics = _core.binary_metrics_2d(conf_mat, fill)
if return_df:
return (
confusion_matrices_to_dataframe(conf_mat),
metrics_to_dataframe(metrics)
)
return conf_mat, metrics
def binary_metrics_runs_thresholds(
y, scores, thresholds, n_obs=None, fill=1.0, obs_axis=0):
"""Compute binary classification metrics over runs and thresholds.
`bmetrics_runs_thresh` is an alias for this function.
Computes the following metrics where [i] indicates the i'th column in the
array.
* [0] neg.precision aka Negative Predictive Value (NPV)
* [1] pos.precision aka Positive Predictive Value (PPV)
* [2] neg.recall aka True Negative Rate (TNR) aka Specificity
* [3] pos.recall aka True Positive Rate (TPR) aka Sensitivity
* [4] neg.f1 score
* [5] pos.f1 score
* [6] False Positive Rate (FPR)
* [7] False Negative Rate (FNR)
* [8] Accuracy
* [9] MCC
Parameters
----------
y : np.ndarray[bool, int32, int64, float32, float64]
the ground truth labels, if different runs have different number of
observations the n_obs parameter must be set to avoid computing metrics
of the filled values. If ``y`` is one dimensional and ``scores`` is not
the ``y`` values are assumed to be the same for each run.
scores : np.array[float32, float64]
the classifier scoress, if different runs have different number of
observations the n_obs parameter must be set to avoid computing metrics
of the filled values.
thresholds : np.array[float32, float64]
classification thresholds
n_obs : np.array[int64], default=None
the number of observations per run, if None the same number of
observations are assumed exist for each run.
fill : double, default=1.0
value to fill when a metric is not defined, e.g. divide by zero.
obs_axis : {0, 1}, default=0
0 if the observations for a single run is a column (e.g. from
pd.DataFrame) and 1 otherwhise
Returns
-------
conf_mat : np.ndarray[int64]
3D array where the rows contain the counts for a threshold,
the columns the confusion matrix entries and the slices the counts for
a run
metrics : np.ndarray[float64]
3D array where the first axis is the threshold, the second the metrics
and the third the run
"""
thresholds = check_array(
thresholds,
max_dim=1,
dtype_check=_convert_to_float,
)
scores = check_array(
scores,
axis=obs_axis,
target_axis=obs_axis,
target_order=1-obs_axis,
max_dim=2,
dtype_check=_convert_to_float,
)
n_runs = scores.shape[1 - obs_axis]
max_obs = scores.shape[obs_axis]
if y.ndim == 1:
y = np.tile(y[:, None], n_runs)
elif y.shape[1] == 1 and y.shape[0] >= 2:
y = np.tile(y, n_runs)
y = check_array(
y,
axis=obs_axis,
target_axis=obs_axis,
target_order=1-obs_axis,
max_dim=2,
dtype_check=_convert_to_ext_types,
)
n_thresholds = thresholds.size
if n_obs is None:
n_obs = np.repeat(max_obs, n_runs)
cm = _core.confusion_matrix_runs_thresholds(
y, scores, thresholds, n_obs
)
mtr = _core.binary_metrics_2d(cm, fill)
# cm and mtr are both flat arrays with order conf_mat, thresholds, runs
# as this is fastest to create. However, how the cubes will be sliced
# later doesn't align with this. So we incur a copy such that the cubes
# have the optimal strides for further processing
if n_thresholds == 1:
# create cube from flat array
cm = cm.reshape(n_runs, 4, order='C')
else:
# create cube from flat array
cm = cm.reshape(n_runs, n_thresholds, 4, order='C')
# reorder such that with F-order we get from smallest to largest
# strides: conf_mat, runs, thresholds
cm = np.swapaxes(np.swapaxes(cm, 0, 2), 1, 2)
# make values over the confusion matrix and runs contiguous
cm = np.asarray(cm, order='F')
# change order s.t. we have thresholds, conf_mat, runs
cm = np.swapaxes(cm.T, 1, 2)
# create cube from flat array
# order is runs, thresholds, metrics
if n_thresholds == 1:
# make values over the runs contiguous
mtr = np.asarray(mtr.reshape(n_runs, 10, order='C'), order='F')
else:
mtr = mtr.reshape(n_runs, n_thresholds, 10, order='C')
# make values over the runs contiguous
mtr = np.asarray(mtr, order='F')
# change order s.t. we have thresholds, metrics, runs
mtr = np.swapaxes(mtr.T, 0, 1)
return cm, mtr
def precision_recall(y, yhat=None, scores=None, threshold=None, fill=1.0, return_df=False):
r"""Compute precision and recall.
Parameters
----------
y : np.ndarray[bool, int32, int64, float32, float64]
true labels for observations
yhat : np.ndarray[bool, int32, int64, float32, float64], default=None
the predicted labels, the same dtypes are supported as y. Can be `None`
if `scores` is not `None`, if both are provided, `scores` is ignored.
scores : np.ndarray[float32, float64], default=None
the classifier scores to be evaluated against the `threshold`, i.e.
`yhat` = `scores` >= `threshold`. Can be `None` if `yhat` is not `None`,
if both are provided, this parameter is ignored.
threshold : float, default=0.5
the classification threshold to which the classifier scores is evaluated,
is inclusive.
fill : float, default=1.0
value to fill when a metric is not defined, e.g. divide by zero.
return_df : bool, default=False
return confusion matrix as pd.DataFrame
Returns
-------
confusion_matrix : np.ndarray, pd.DataFrame
the confusion_matrix with layout
[0, 0] = TN, [0, 1] = FP, [1, 0] = FN, [1, 1] = TP
prec_rec : np.ndarray, pd.DataFrame
precision and recall
"""
if not isinstance(fill, float):
raise TypeError("`fill` must be a float.")
y = check_array(
y,
max_dim=1,
dtype_check=_convert_to_ext_types
)
if scores is not None:
scores = check_array(
scores,
max_dim=1,
dtype_check=_convert_to_float,
)
if not isinstance(threshold, float):
raise TypeError("`threshold` must be a float if scores is not None")
if scores.size != y.size:
raise ValueError('`scores` and `y` must have equal length.')
conf_mat = _core.confusion_matrix_score(y, scores, threshold)
elif yhat is not None:
yhat = check_array(
yhat,
max_dim=1,
dtype_check=_convert_to_ext_types,
)
if yhat.size != y.size:
raise ValueError('`yhat` and `y` must have equal length.')
conf_mat = _core.confusion_matrix(y, yhat)
else:
raise TypeError("`yhat` must not be None if `scores` is None")
prec_rec = _core.precision_recall(conf_mat, fill)
if return_df:
return (
confusion_matrix_to_dataframe(conf_mat),
pd.DataFrame(prec_rec, index=['precision', 'recall']).T
)
return conf_mat, prec_rec
def precision_recall_curve(y, scores, thresholds=None, fill=1.0, return_df=False):
"""Compute precision and recall over the thresholds.
`pr_curve` is an alias for this function.
Parameters
----------
y : np.ndarray[bool, int32, int64, float32, float64]
true labels for observations
scores : np.ndarray[float32, float64]
the classifier scores to be evaluated against the `threshold`, i.e.
`yhat` = `scores` >= `threshold`
threshold : np.ndarray[float32, float64]
the classification thresholds to which the classifier scores is evaluated,
is inclusive.
fill : float, default=1.0
value to fill when a metric is not defined, e.g. divide by zero.
return_df : bool, default=False
return confusion matrix as pd.DataFrame
Returns
-------
precision : np.ndarray[float64]
the precision for each threshold
recall : np.ndarray[float64]
the recall for each threshold
"""
if not isinstance(fill, float):
raise TypeError("`fill` must be a float.")
y = check_array(
y,
max_dim=1,
dtype_check=_convert_to_ext_types,
)
scores = check_array(
scores,
max_dim=1,
dtype_check=_convert_to_float,
)
thresholds = check_array(
thresholds,
max_dim=1,
dtype_check=_convert_to_float,
)
if scores.size != y.size:
raise ValueError('`scores` and `y` must have equal length.')
conf_mat = _core.confusion_matrix_thresholds(y, scores, thresholds)
metrics = _core.precision_recall_2d(conf_mat, fill)
if return_df:
df = pd.DataFrame(metrics, columns=['precision', 'recall'])
df['thresholds'] = thresholds
return df
return metrics[:, 0].copy(), metrics[:, 1].copy()
| 33.332879
| 91
| 0.625629
| 3,330
| 24,433
| 4.46967
| 0.07988
| 0.019282
| 0.022843
| 0.025531
| 0.777412
| 0.758667
| 0.739116
| 0.725611
| 0.72158
| 0.711166
| 0
| 0.021277
| 0.27864
| 24,433
| 732
| 92
| 33.378415
| 0.823206
| 0.515328
| 0
| 0.5875
| 0
| 0
| 0.108164
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.028125
| false
| 0
| 0.028125
| 0
| 0.109375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
8acbda7782c2a70bae0ce9d37599a3eecd298823
| 113
|
py
|
Python
|
neovim/plugins/__init__.py
|
tarruda/python-client
|
f4fb9f1de0e8d1fc75949b3ca4d0ad73dfd37e5f
|
[
"Apache-2.0"
] | 1
|
2022-02-07T19:15:17.000Z
|
2022-02-07T19:15:17.000Z
|
neovim/plugins/__init__.py
|
tarruda/python-client
|
f4fb9f1de0e8d1fc75949b3ca4d0ad73dfd37e5f
|
[
"Apache-2.0"
] | null | null | null |
neovim/plugins/__init__.py
|
tarruda/python-client
|
f4fb9f1de0e8d1fc75949b3ca4d0ad73dfd37e5f
|
[
"Apache-2.0"
] | null | null | null |
from .plugin_host import PluginHost
from .script_host import ScriptHost
__all__ = ['PluginHost', 'ScriptHost']
| 18.833333
| 38
| 0.787611
| 13
| 113
| 6.384615
| 0.615385
| 0.240964
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.123894
| 113
| 5
| 39
| 22.6
| 0.838384
| 0
| 0
| 0
| 0
| 0
| 0.176991
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
76e1da16191e6edf455b5fd72f1cc5cdcae550c0
| 120
|
py
|
Python
|
students/admin.py
|
estudeplus/perfil
|
58b847aa226b885ca6a7a128035f09de2322519f
|
[
"MIT"
] | null | null | null |
students/admin.py
|
estudeplus/perfil
|
58b847aa226b885ca6a7a128035f09de2322519f
|
[
"MIT"
] | 21
|
2019-05-11T18:01:10.000Z
|
2022-02-10T11:22:01.000Z
|
students/admin.py
|
estudeplus/perfil
|
58b847aa226b885ca6a7a128035f09de2322519f
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Student
admin.site.register(Student)
# Register your models here.
| 20
| 32
| 0.808333
| 17
| 120
| 5.705882
| 0.647059
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 120
| 5
| 33
| 24
| 0.92381
| 0.216667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
76f66190f21a0cca523ee2225b83151da67ffb8a
| 116
|
py
|
Python
|
arcus/__init__.py
|
cuenca-mx/arcus-python
|
71e35728d75d9413eb62c18bff0abb3a18bf80ac
|
[
"MIT"
] | 4
|
2018-10-30T20:23:44.000Z
|
2020-06-29T13:56:34.000Z
|
arcus/__init__.py
|
cuenca-mx/arcus-python
|
71e35728d75d9413eb62c18bff0abb3a18bf80ac
|
[
"MIT"
] | 131
|
2018-10-29T17:13:49.000Z
|
2022-03-30T16:05:21.000Z
|
arcus/__init__.py
|
cuenca-mx/arcus-python
|
71e35728d75d9413eb62c18bff0abb3a18bf80ac
|
[
"MIT"
] | 2
|
2019-09-22T03:43:29.000Z
|
2021-04-19T09:20:24.000Z
|
__all__ = ['client', 'exc', 'resources', 'Client']
from . import client, exc, resources
from .client import Client
| 23.2
| 50
| 0.698276
| 14
| 116
| 5.5
| 0.428571
| 0.233766
| 0.467532
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.146552
| 116
| 4
| 51
| 29
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0.206897
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
0a061aa8f75252fa3c16a2f6fa959bc35beb3e48
| 102
|
py
|
Python
|
music/admin.py
|
bvermeulen/Django
|
e4ef21c2f1fb7d026207c25bd443252c6df354bf
|
[
"MIT"
] | 4
|
2018-12-25T13:56:18.000Z
|
2019-12-22T16:04:50.000Z
|
music/admin.py
|
bvermeulen/Django
|
e4ef21c2f1fb7d026207c25bd443252c6df354bf
|
[
"MIT"
] | 15
|
2019-12-10T06:22:19.000Z
|
2022-03-11T23:46:49.000Z
|
music/admin.py
|
bvermeulen/Django
|
e4ef21c2f1fb7d026207c25bd443252c6df354bf
|
[
"MIT"
] | 2
|
2021-02-16T18:52:19.000Z
|
2021-03-30T16:40:46.000Z
|
from django.contrib import admin
from music.models import MusicTrack
admin.site.register(MusicTrack)
| 20.4
| 35
| 0.843137
| 14
| 102
| 6.142857
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.098039
| 102
| 4
| 36
| 25.5
| 0.934783
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
0a33d97f24bd88a3ed4be258b93db4ea6880328f
| 15,643
|
py
|
Python
|
torchrec/models/tests/test_dlrm.py
|
xing-liu/torchrec
|
82ffde7a69fdb9c66b79a753d6f03afa5db3f73e
|
[
"BSD-3-Clause"
] | 814
|
2022-02-23T17:24:14.000Z
|
2022-03-31T16:52:23.000Z
|
torchrec/models/tests/test_dlrm.py
|
xing-liu/torchrec
|
82ffde7a69fdb9c66b79a753d6f03afa5db3f73e
|
[
"BSD-3-Clause"
] | 89
|
2022-02-23T17:29:56.000Z
|
2022-03-31T23:44:13.000Z
|
torchrec/models/tests/test_dlrm.py
|
xing-liu/torchrec
|
82ffde7a69fdb9c66b79a753d6f03afa5db3f73e
|
[
"BSD-3-Clause"
] | 68
|
2022-02-23T17:42:17.000Z
|
2022-03-28T06:39:55.000Z
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
from torch.testing import FileCheck # @manual
from torchrec.fx import symbolic_trace
from torchrec.models.dlrm import choose, DenseArch, DLRM, InteractionArch, SparseArch
from torchrec.modules.embedding_configs import EmbeddingBagConfig
from torchrec.modules.embedding_modules import EmbeddingBagCollection
from torchrec.sparse.jagged_tensor import KeyedJaggedTensor
class SparseArchTest(unittest.TestCase):
def test_basic(self) -> None:
torch.manual_seed(0)
D = 3
eb1_config = EmbeddingBagConfig(
name="t1", embedding_dim=D, num_embeddings=10, feature_names=["f1", "f3"]
)
eb2_config = EmbeddingBagConfig(
name="t2",
embedding_dim=D,
num_embeddings=10,
feature_names=["f2"],
)
ebc = EmbeddingBagCollection(tables=[eb1_config, eb2_config])
sparse_arch = SparseArch(ebc)
keys = ["f1", "f2", "f3", "f4", "f5"]
offsets = torch.tensor([0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 19])
features = KeyedJaggedTensor.from_offsets_sync(
keys=keys,
values=torch.tensor(
[1, 2, 4, 5, 4, 3, 2, 9, 1, 2, 4, 5, 4, 3, 2, 9, 1, 2, 3]
),
offsets=offsets,
)
B = (len(offsets) - 1) // len(keys)
sparse_features = sparse_arch(features)
F = len(sparse_arch.sparse_feature_names)
self.assertEqual(sparse_features.shape, (B, F, D))
expected_values = torch.tensor(
[
[
[-0.7499, -1.2665, 1.0143],
[-0.7499, -1.2665, 1.0143],
[3.2276, 2.9643, -0.3816],
],
[
[0.0082, 0.6241, -0.1119],
[0.0082, 0.6241, -0.1119],
[2.0722, -2.2734, -1.6307],
],
]
)
self.assertTrue(
torch.allclose(
sparse_features,
expected_values,
rtol=1e-4,
atol=1e-4,
),
)
def test_fx_and_shape(self) -> None:
D = 3
eb1_config = EmbeddingBagConfig(
name="t1", embedding_dim=D, num_embeddings=10, feature_names=["f1", "f3"]
)
eb2_config = EmbeddingBagConfig(
name="t2",
embedding_dim=D,
num_embeddings=10,
feature_names=["f2"],
)
ebc = EmbeddingBagCollection(tables=[eb1_config, eb2_config])
sparse_arch = SparseArch(ebc)
F = len(sparse_arch.sparse_feature_names)
gm = symbolic_trace(sparse_arch)
FileCheck().check("KeyedJaggedTensor").check("cat").run(gm.code)
keys = ["f1", "f2", "f3", "f4", "f5"]
offsets = torch.tensor([0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 19])
features = KeyedJaggedTensor.from_offsets_sync(
keys=keys,
values=torch.tensor(
[1, 2, 4, 5, 4, 3, 2, 9, 1, 2, 4, 5, 4, 3, 2, 9, 1, 2, 3]
),
offsets=offsets,
)
B = (len(offsets) - 1) // len(keys)
sparse_features = gm(features)
self.assertEqual(sparse_features.shape, (B, F, D))
# TODO(T89043538): Auto-generate this test.
def test_fx_script(self) -> None:
D = 3
eb1_config = EmbeddingBagConfig(
name="t1", embedding_dim=D, num_embeddings=10, feature_names=["f1"]
)
eb2_config = EmbeddingBagConfig(
name="t2", embedding_dim=D, num_embeddings=10, feature_names=["f2"]
)
ebc = EmbeddingBagCollection(tables=[eb1_config, eb2_config])
sparse_arch = SparseArch(ebc)
gm = symbolic_trace(sparse_arch)
torch.jit.script(gm)
class DenseArchTest(unittest.TestCase):
def test_basic(self) -> None:
torch.manual_seed(0)
B = 4
D = 3
in_features = 10
dense_arch = DenseArch(in_features=in_features, layer_sizes=[10, D])
dense_embedded = dense_arch(torch.rand((B, in_features)))
self.assertEqual(dense_embedded.size(), (B, D))
expected = torch.tensor(
[
[0.2351, 0.1578, 0.2784],
[0.1579, 0.1012, 0.2660],
[0.2459, 0.2379, 0.2749],
[0.2582, 0.2178, 0.2860],
]
)
self.assertTrue(
torch.allclose(
dense_embedded,
expected,
rtol=1e-4,
atol=1e-4,
)
)
def test_fx_and_shape(self) -> None:
B = 20
D = 3
in_features = 10
dense_arch = DenseArch(in_features=in_features, layer_sizes=[10, D])
gm = symbolic_trace(dense_arch)
dense_embedded = gm(torch.rand((B, in_features)))
self.assertEqual(dense_embedded.size(), (B, D))
# TODO(T89043538): Auto-generate this test.
def test_fx_script(self) -> None:
B = 20
D = 3
in_features = 10
dense_arch = DenseArch(in_features=in_features, layer_sizes=[10, D])
gm = symbolic_trace(dense_arch)
scripted_gm = torch.jit.script(gm)
dense_embedded = scripted_gm(torch.rand((B, in_features)))
self.assertEqual(dense_embedded.size(), (B, D))
class InteractionArchTest(unittest.TestCase):
def test_basic(self) -> None:
D = 3
B = 10
keys = ["f1", "f2"]
F = len(keys)
inter_arch = InteractionArch(num_sparse_features=F)
dense_features = torch.rand((B, D))
sparse_features = torch.rand((B, F, D))
concat_dense = inter_arch(dense_features, sparse_features)
# B X (D + F + F choose 2)
self.assertEqual(concat_dense.size(), (B, D + F + choose(F, 2)))
def test_larger(self) -> None:
D = 8
B = 20
keys = ["f1", "f2", "f3", "f4"]
F = len(keys)
inter_arch = InteractionArch(num_sparse_features=F)
dense_features = torch.rand((B, D))
sparse_features = torch.rand((B, F, D))
concat_dense = inter_arch(dense_features, sparse_features)
# B X (D + F + F choose 2)
self.assertEqual(concat_dense.size(), (B, D + F + choose(F, 2)))
def test_fx_and_shape(self) -> None:
D = 3
B = 10
keys = ["f1", "f2"]
F = len(keys)
inter_arch = InteractionArch(num_sparse_features=F)
gm = symbolic_trace(inter_arch)
dense_features = torch.rand((B, D))
sparse_features = torch.rand((B, F, D))
concat_dense = gm(dense_features, sparse_features)
# B X (D + F + F choose 2)
self.assertEqual(concat_dense.size(), (B, D + F + choose(F, 2)))
# TODO(T89043538): Auto-generate this test.
def test_fx_script(self) -> None:
D = 3
B = 10
keys = ["f1", "f2"]
F = len(keys)
inter_arch = InteractionArch(num_sparse_features=F)
gm = symbolic_trace(inter_arch)
scripted_gm = torch.jit.script(gm)
dense_features = torch.rand((B, D))
sparse_features = torch.rand((B, F, D))
concat_dense = scripted_gm(dense_features, sparse_features)
# B X (D + F + F choose 2)
self.assertEqual(concat_dense.size(), (B, D + F + choose(F, 2)))
def test_correctness(self) -> None:
D = 4
B = 3
keys = [
"f1",
"f2",
"f3",
"f4",
]
F = len(keys)
inter_arch = InteractionArch(num_sparse_features=F)
torch.manual_seed(0)
dense_features = torch.rand((B, D))
sparse_features = torch.rand((B, F, D))
concat_dense = inter_arch(dense_features, sparse_features)
# B X (D + F + F choose 2)
self.assertEqual(concat_dense.size(), (B, D + F + choose(F, 2)))
expected = torch.tensor(
[
[
0.4963,
0.7682,
0.0885,
0.1320,
0.2353,
1.0123,
1.1919,
0.7220,
0.3444,
0.7397,
0.4015,
1.5184,
0.8986,
1.2018,
],
[
0.3074,
0.6341,
0.4901,
0.8964,
1.2787,
0.3275,
1.6734,
0.6325,
0.2089,
1.2982,
0.3977,
0.4200,
0.2475,
0.7834,
],
[
0.4556,
0.6323,
0.3489,
0.4017,
0.8195,
1.1181,
1.0511,
0.4919,
1.6147,
1.0786,
0.4264,
1.3576,
0.5860,
0.6559,
],
]
)
self.assertTrue(
torch.allclose(
concat_dense,
expected,
rtol=1e-4,
atol=1e-4,
)
)
def test_numerical_stability(self) -> None:
D = 3
B = 6
keys = ["f1", "f2"]
F = len(keys)
inter_arch = InteractionArch(num_sparse_features=F)
torch.manual_seed(0)
dense_features = torch.randint(0, 10, (B, D))
sparse_features = torch.randint(0, 10, (B, F, D))
concat_dense = inter_arch(dense_features, sparse_features)
expected = torch.LongTensor(
[
[4, 9, 3, 61, 57, 63],
[0, 3, 9, 84, 27, 45],
[7, 3, 7, 34, 50, 25],
[3, 1, 6, 21, 50, 91],
[6, 9, 8, 125, 109, 74],
[6, 6, 8, 18, 80, 21],
]
)
self.assertTrue(torch.equal(concat_dense, expected))
class DLRMTest(unittest.TestCase):
def test_basic(self) -> None:
torch.manual_seed(0)
B = 2
D = 8
dense_in_features = 100
eb1_config = EmbeddingBagConfig(
name="t1", embedding_dim=D, num_embeddings=100, feature_names=["f1", "f3"]
)
eb2_config = EmbeddingBagConfig(
name="t2",
embedding_dim=D,
num_embeddings=100,
feature_names=["f2"],
)
ebc = EmbeddingBagCollection(tables=[eb1_config, eb2_config])
sparse_nn = DLRM(
embedding_bag_collection=ebc,
dense_in_features=dense_in_features,
dense_arch_layer_sizes=[20, D],
over_arch_layer_sizes=[5, 1],
)
features = torch.rand((B, dense_in_features))
sparse_features = KeyedJaggedTensor.from_offsets_sync(
keys=["f1", "f3", "f2"],
values=torch.tensor([1, 2, 4, 5, 4, 3, 2, 9, 1, 2, 3]),
offsets=torch.tensor([0, 2, 4, 6, 8, 10, 11]),
)
logits = sparse_nn(
dense_features=features,
sparse_features=sparse_features,
)
self.assertEqual(logits.size(), (B, 1))
expected_logits = torch.tensor([[0.5805], [0.5909]])
self.assertTrue(
torch.allclose(
logits,
expected_logits,
rtol=1e-4,
atol=1e-4,
)
)
def test_one_sparse(self) -> None:
B = 2
D = 8
dense_in_features = 100
eb1_config = EmbeddingBagConfig(
name="t2",
embedding_dim=D,
num_embeddings=100,
feature_names=["f2"],
)
ebc = EmbeddingBagCollection(tables=[eb1_config])
sparse_nn = DLRM(
embedding_bag_collection=ebc,
dense_in_features=dense_in_features,
dense_arch_layer_sizes=[20, D],
over_arch_layer_sizes=[5, 1],
)
features = torch.rand((B, dense_in_features))
sparse_features = KeyedJaggedTensor.from_offsets_sync(
keys=["f2"],
values=torch.tensor(range(3)),
offsets=torch.tensor([0, 2, 3]),
)
logits = sparse_nn(
dense_features=features,
sparse_features=sparse_features,
)
self.assertEqual(logits.size(), (B, 1))
def test_no_sparse(self) -> None:
ebc = EmbeddingBagCollection(tables=[])
D_unused = 1
with self.assertRaises(AssertionError):
DLRM(
embedding_bag_collection=ebc,
dense_in_features=100,
dense_arch_layer_sizes=[20, D_unused],
over_arch_layer_sizes=[5, 1],
)
def test_fx(self) -> None:
B = 2
D = 8
dense_in_features = 100
eb1_config = EmbeddingBagConfig(
name="t2",
embedding_dim=D,
num_embeddings=100,
feature_names=["f2"],
)
ebc = EmbeddingBagCollection(tables=[eb1_config])
sparse_nn = DLRM(
embedding_bag_collection=ebc,
dense_in_features=dense_in_features,
dense_arch_layer_sizes=[20, D],
over_arch_layer_sizes=[5, 1],
)
gm = symbolic_trace(sparse_nn)
FileCheck().check("KeyedJaggedTensor").check("cat").check("f2").run(gm.code)
features = torch.rand((B, dense_in_features))
sparse_features = KeyedJaggedTensor.from_offsets_sync(
keys=["f2"],
values=torch.tensor(range(3)),
offsets=torch.tensor([0, 2, 3]),
)
logits = gm(
dense_features=features,
sparse_features=sparse_features,
)
self.assertEqual(logits.size(), (B, 1))
# TODO(T89043538): Auto-generate this test.
def test_fx_script(self) -> None:
B = 2
D = 8
dense_in_features = 100
eb1_config = EmbeddingBagConfig(
name="t1", embedding_dim=D, num_embeddings=100, feature_names=["f1", "f3"]
)
eb2_config = EmbeddingBagConfig(
name="t2",
embedding_dim=D,
num_embeddings=100,
feature_names=["f2"],
)
ebc = EmbeddingBagCollection(tables=[eb1_config, eb2_config])
sparse_nn = DLRM(
embedding_bag_collection=ebc,
dense_in_features=dense_in_features,
dense_arch_layer_sizes=[20, D],
over_arch_layer_sizes=[5, 1],
)
features = torch.rand((B, dense_in_features))
sparse_features = KeyedJaggedTensor.from_offsets_sync(
keys=["f1", "f3", "f2"],
values=torch.tensor([1, 2, 4, 5, 4, 3, 2, 9, 1, 2, 3]),
offsets=torch.tensor([0, 2, 4, 6, 8, 10, 11]),
)
sparse_nn(
dense_features=features,
sparse_features=sparse_features,
)
gm = symbolic_trace(sparse_nn)
scripted_gm = torch.jit.script(gm)
logits = scripted_gm(features, sparse_features)
self.assertEqual(logits.size(), (B, 1))
| 30.315891
| 86
| 0.507639
| 1,763
| 15,643
| 4.313103
| 0.139535
| 0.066281
| 0.054971
| 0.03314
| 0.786823
| 0.761573
| 0.740268
| 0.728169
| 0.702525
| 0.682404
| 0
| 0.084213
| 0.376015
| 15,643
| 515
| 87
| 30.374757
| 0.694806
| 0.033561
| 0
| 0.607059
| 0
| 0
| 0.010992
| 0
| 0
| 0
| 0
| 0.001942
| 0.047059
| 1
| 0.04
| false
| 0
| 0.018824
| 0
| 0.068235
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
0a471865d7c0792e57ca9c1de50a4b512ab86c6f
| 1,505
|
py
|
Python
|
pni_report_dashboard/pni_report_dashboard/page/pni_dashboard/pni_dashboard.py
|
DaizyModi/pni_report_dashboard
|
709c2903330531c02af9992fc53c744259c84efc
|
[
"MIT"
] | null | null | null |
pni_report_dashboard/pni_report_dashboard/page/pni_dashboard/pni_dashboard.py
|
DaizyModi/pni_report_dashboard
|
709c2903330531c02af9992fc53c744259c84efc
|
[
"MIT"
] | null | null | null |
pni_report_dashboard/pni_report_dashboard/page/pni_dashboard/pni_dashboard.py
|
DaizyModi/pni_report_dashboard
|
709c2903330531c02af9992fc53c744259c84efc
|
[
"MIT"
] | null | null | null |
import frappe
from frappe.utils import date_diff, add_months, today, getdate, add_days
@frappe.whitelist()
def get_ink(item_group):
_date = add_days(today(), -1)
sql = frappe.db.sql("""
select sum(sed.qty)
from `tabStock Entry Detail` as sed, `tabStock Entry` as se
where sed.docstatus=1 and sed.item_group="{1}" and se.posting_date="{0}" and sed.parent = se.name
and se.stock_entry_type = "Material Consumption for Manufacture"
""".format(_date, item_group))
data = 0
if sql[0] and sql[0][0]:
data = sql[0][0]
return data
@frappe.whitelist()
def get_cup_production(item_group, tw = ""):
_date = add_days(today(), -1)
condition = ""
if tw:
condition += " and sed.t_warehouse = '{0}'".format(tw)
sql = frappe.db.sql("""
select sum(sed.qty)
from `tabStock Entry Detail` as sed, `tabStock Entry` as se
where sed.docstatus=1 and sed.item_group="{1}" and se.posting_date="{0}" and sed.parent = se.name and se.stock_entry_type = "Manufacture" {2}
""".format(_date, item_group, condition))
data = 0
if sql[0] and sql[0][0]:
data = sql[0][0]
return data
@frappe.whitelist()
def get_ldpe():
_date = add_days(today(), -1)
sql = frappe.db.sql("""
select sum(sed.qty)
from `tabStock Entry Detail` as sed, `tabStock Entry` as se
where sed.docstatus=1 and sed.item_code="LDP LA 17"
and se.posting_date="{0}" and sed.parent = se.name and se.stock_entry_type = "Manufacture"
""".format(_date))
data = 0
if sql[0] and sql[0][0]:
data = sql[0][0]
return data
| 32.717391
| 143
| 0.678405
| 252
| 1,505
| 3.920635
| 0.230159
| 0.036437
| 0.030364
| 0.063765
| 0.717611
| 0.700405
| 0.700405
| 0.700405
| 0.700405
| 0.700405
| 0
| 0.02619
| 0.162791
| 1,505
| 46
| 144
| 32.717391
| 0.757937
| 0
| 0
| 0.627907
| 0
| 0.069767
| 0.5
| 0.041833
| 0
| 0
| 0
| 0
| 0
| 1
| 0.069767
| false
| 0
| 0.046512
| 0
| 0.186047
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
0a484f2f9deea137beaf52a39e3b97296019240a
| 134
|
py
|
Python
|
scipy/optimize/moduleTNC_clr.py
|
jasonmccampbell/scipy-refactor
|
52708e04bca51e7043248d56383780b1e51e0d8f
|
[
"BSD-3-Clause"
] | 8
|
2015-10-07T00:37:32.000Z
|
2022-01-21T17:02:33.000Z
|
scipy/optimize/moduleTNC_clr.py
|
enthought/scipy-refactor
|
52708e04bca51e7043248d56383780b1e51e0d8f
|
[
"BSD-3-Clause"
] | null | null | null |
scipy/optimize/moduleTNC_clr.py
|
enthought/scipy-refactor
|
52708e04bca51e7043248d56383780b1e51e0d8f
|
[
"BSD-3-Clause"
] | 8
|
2015-05-09T14:23:57.000Z
|
2018-11-15T05:56:00.000Z
|
import sys
if sys.platform == 'cli':
import clr
clr.AddReference('optimize')
from scipy__optimize__moduleTNC import *
| 13.4
| 44
| 0.69403
| 16
| 134
| 5.5625
| 0.6875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.216418
| 134
| 9
| 45
| 14.888889
| 0.847619
| 0
| 0
| 0
| 0
| 0
| 0.083969
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.6
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
6a56724ede016ee933cc7cfe1ec80dcb39302feb
| 203
|
py
|
Python
|
02-19-Cuma/inheritance/ChineseChef.py
|
cihatdev/misha-staj
|
d0ee95d5e77a7d7a1f16611d49c87be429a25b31
|
[
"MIT"
] | 9
|
2021-03-16T20:21:54.000Z
|
2022-01-08T09:15:10.000Z
|
02-19-Cuma/inheritance/ChineseChef.py
|
cihatdev/misha-staj
|
d0ee95d5e77a7d7a1f16611d49c87be429a25b31
|
[
"MIT"
] | 1
|
2021-02-28T21:27:17.000Z
|
2021-02-28T21:27:17.000Z
|
02-19-Cuma/inheritance/ChineseChef.py
|
cihatdev/misha-staj
|
d0ee95d5e77a7d7a1f16611d49c87be429a25b31
|
[
"MIT"
] | 1
|
2021-05-24T11:34:48.000Z
|
2021-05-24T11:34:48.000Z
|
from Chef import Chef
class ChineseChef(Chef):
def make_special_dish(self):
print("The chef makes orange chicken")
def make_fried_rice(self):
print("The chef make fried rice")
| 20.3
| 46
| 0.684729
| 29
| 203
| 4.655172
| 0.586207
| 0.103704
| 0.177778
| 0.237037
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.231527
| 203
| 9
| 47
| 22.555556
| 0.865385
| 0
| 0
| 0
| 0
| 0
| 0.261084
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.166667
| 0
| 0.666667
| 0.333333
| 1
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
6a923802a14e5b975989c7f8d32756c6b5eda27e
| 96
|
py
|
Python
|
venv/lib/python3.8/site-packages/numpy/f2py/f2py2e.py
|
Retraces/UkraineBot
|
3d5d7f8aaa58fa0cb8b98733b8808e5dfbdb8b71
|
[
"MIT"
] | 2
|
2022-03-13T01:58:52.000Z
|
2022-03-31T06:07:54.000Z
|
venv/lib/python3.8/site-packages/numpy/f2py/f2py2e.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | 19
|
2021-11-20T04:09:18.000Z
|
2022-03-23T15:05:55.000Z
|
venv/lib/python3.8/site-packages/numpy/f2py/f2py2e.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | null | null | null |
/home/runner/.cache/pip/pool/c1/ec/cc/7415585ae840f4099f1c814ea1b5e3eeac6d38b24eff4af9490fd33b2c
| 96
| 96
| 0.895833
| 9
| 96
| 9.555556
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.364583
| 0
| 96
| 1
| 96
| 96
| 0.53125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
6ab0f3a7bccf47bbd61fa467ba6a76545eb14bed
| 198
|
py
|
Python
|
api/devjobhubapi/jobs/permissions.py
|
Mastersam07/devjobhub
|
b0e80352bd0ff6d6b89c12093cc13b3ceae309ea
|
[
"MIT"
] | null | null | null |
api/devjobhubapi/jobs/permissions.py
|
Mastersam07/devjobhub
|
b0e80352bd0ff6d6b89c12093cc13b3ceae309ea
|
[
"MIT"
] | 4
|
2021-04-08T21:20:19.000Z
|
2021-09-22T19:48:32.000Z
|
jobs/permissions.py
|
icvntechstudio/devjobapi
|
8cc7cc31abe3a96c8fc3b54a579fb663d98bd237
|
[
"PostgreSQL",
"MIT"
] | null | null | null |
from rest_framework.permissions import BasePermission
class IsEmployee(BasePermission):
def has_permission(self, request, view):
return request.user and request.user.role == 'employee'
| 33
| 63
| 0.772727
| 23
| 198
| 6.565217
| 0.826087
| 0.145695
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.146465
| 198
| 6
| 63
| 33
| 0.893491
| 0
| 0
| 0
| 0
| 0
| 0.040201
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0.25
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
6ac4a9244c73feb26c34586ad97e0c9a72bb2283
| 140
|
py
|
Python
|
app/api_public/routes.py
|
benjaminhuanghuang/math_clone
|
8fd5d5ed878559629101c6c33d61f2ead80a652c
|
[
"MIT"
] | null | null | null |
app/api_public/routes.py
|
benjaminhuanghuang/math_clone
|
8fd5d5ed878559629101c6c33d61f2ead80a652c
|
[
"MIT"
] | null | null | null |
app/api_public/routes.py
|
benjaminhuanghuang/math_clone
|
8fd5d5ed878559629101c6c33d61f2ead80a652c
|
[
"MIT"
] | null | null | null |
from flask import jsonify
from . import api_public
@api_public.route('/hello', methods=['GET'])
def hello():
return jsonify("Hello")
| 15.555556
| 44
| 0.7
| 19
| 140
| 5.052632
| 0.631579
| 0.1875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15
| 140
| 8
| 45
| 17.5
| 0.806723
| 0
| 0
| 0
| 0
| 0
| 0.100719
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| true
| 0
| 0.4
| 0.2
| 0.8
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
|
0
| 5
|
6ac99aea71f8786b83647783f596cf18b1d71016
| 3,063
|
py
|
Python
|
tests/openbb_terminal/stocks/backtesting/test_bt_view.py
|
tehcoderer/GamestonkTerminal
|
54a1b6f545a0016c576e9e00eef5c003d229dacf
|
[
"MIT"
] | 255
|
2022-03-29T16:43:51.000Z
|
2022-03-31T23:57:08.000Z
|
tests/openbb_terminal/stocks/backtesting/test_bt_view.py
|
tehcoderer/GamestonkTerminal
|
54a1b6f545a0016c576e9e00eef5c003d229dacf
|
[
"MIT"
] | 14
|
2022-03-29T14:20:33.000Z
|
2022-03-31T23:39:20.000Z
|
tests/openbb_terminal/stocks/backtesting/test_bt_view.py
|
tehcoderer/GamestonkTerminal
|
54a1b6f545a0016c576e9e00eef5c003d229dacf
|
[
"MIT"
] | 24
|
2022-03-29T15:28:56.000Z
|
2022-03-31T23:54:15.000Z
|
# IMPORTATION STANDARD
from datetime import datetime
# IMPORTATION THIRDPARTY
import pytest
# IMPORTATION INTERNAL
from openbb_terminal.stocks import stocks_helper
from openbb_terminal.stocks.backtesting import bt_view
@pytest.fixture(scope="module")
def vcr_config():
return {
"filter_headers": [("User-Agent", None)],
"filter_query_parameters": [
("period1", "1598220000"),
("period2", "1635980400"),
],
}
@pytest.mark.vcr
@pytest.mark.record_stdout
def test_display_simple_ema(mocker):
yf_download = stocks_helper.yf.download
def mock_yf_download(*args, **kwargs):
kwargs["threads"] = False
return yf_download(*args, **kwargs)
mocker.patch("yfinance.download", side_effect=mock_yf_download)
# MOCK VISUALIZE_OUTPUT
mocker.patch(target="openbb_terminal.helper_classes.TerminalStyle.visualize_output")
ticker = "PM"
start = datetime.strptime("2020-12-01", "%Y-%m-%d")
end = datetime.strptime("2020-12-02", "%Y-%m-%d")
df_stock = stocks_helper.load_ticker(ticker=ticker, start_date=start, end_date=end)
bt_view.display_simple_ema(
ticker=ticker,
df_stock=df_stock,
ema_length=2,
spy_bt=True,
no_bench=False,
export=False,
)
@pytest.mark.vcr
@pytest.mark.record_stdout
def test_display_ema_cross(mocker):
yf_download = stocks_helper.yf.download
def mock_yf_download(*args, **kwargs):
kwargs["threads"] = False
return yf_download(*args, **kwargs)
mocker.patch("yfinance.download", side_effect=mock_yf_download)
# MOCK VISUALIZE_OUTPUT
mocker.patch(target="openbb_terminal.helper_classes.TerminalStyle.visualize_output")
ticker = "PM"
start = datetime.strptime("2020-12-01", "%Y-%m-%d")
end = datetime.strptime("2020-12-02", "%Y-%m-%d")
df_stock = stocks_helper.load_ticker(ticker=ticker, start_date=start, end_date=end)
bt_view.display_ema_cross(
ticker=ticker,
df_stock=df_stock,
short_ema=2,
long_ema=2,
spy_bt=True,
no_bench=False,
shortable=True,
export=False,
)
@pytest.mark.vcr
@pytest.mark.record_stdout
def test_display_rsi_strategy(mocker):
yf_download = stocks_helper.yf.download
def mock_yf_download(*args, **kwargs):
kwargs["threads"] = False
return yf_download(*args, **kwargs)
mocker.patch("yfinance.download", side_effect=mock_yf_download)
# MOCK VISUALIZE_OUTPUT
mocker.patch(target="openbb_terminal.helper_classes.TerminalStyle.visualize_output")
ticker = "PM"
start = datetime.strptime("2020-12-01", "%Y-%m-%d")
end = datetime.strptime("2020-12-02", "%Y-%m-%d")
df_stock = stocks_helper.load_ticker(ticker=ticker, start_date=start, end_date=end)
bt_view.display_rsi_strategy(
ticker=ticker,
df_stock=df_stock,
periods=2,
low_rsi=2,
high_rsi=2,
spy_bt=True,
no_bench=False,
shortable=True,
export=False,
)
| 27.845455
| 88
| 0.671237
| 392
| 3,063
| 4.997449
| 0.227041
| 0.07657
| 0.042879
| 0.061256
| 0.789689
| 0.789689
| 0.749872
| 0.738642
| 0.738642
| 0.738642
| 0
| 0.03116
| 0.203722
| 3,063
| 109
| 89
| 28.100917
| 0.772038
| 0.042442
| 0
| 0.654321
| 0
| 0
| 0.155844
| 0.070403
| 0
| 0
| 0
| 0
| 0
| 1
| 0.08642
| false
| 0
| 0.049383
| 0.012346
| 0.185185
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
6aef9d1088e42cef261bc4d0127c8a4b2c4bb2fa
| 53
|
py
|
Python
|
enthought/pyface/directory_dialog.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | 3
|
2016-12-09T06:05:18.000Z
|
2018-03-01T13:00:29.000Z
|
enthought/pyface/directory_dialog.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | 1
|
2020-12-02T00:51:32.000Z
|
2020-12-02T08:48:55.000Z
|
enthought/pyface/directory_dialog.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | null | null | null |
# proxy module
from pyface.directory_dialog import *
| 17.666667
| 37
| 0.811321
| 7
| 53
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.132075
| 53
| 2
| 38
| 26.5
| 0.913043
| 0.226415
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
0a7526f9708caa1d2e106bcbeff0e40f452b7a46
| 172
|
py
|
Python
|
lime/database/db.py
|
toastwaffle/LiME
|
7d529d9eaeb9de7a9aebe6aa373e1b3611bfd2ad
|
[
"MIT"
] | null | null | null |
lime/database/db.py
|
toastwaffle/LiME
|
7d529d9eaeb9de7a9aebe6aa373e1b3611bfd2ad
|
[
"MIT"
] | 69
|
2018-02-04T15:08:07.000Z
|
2020-03-19T22:04:16.000Z
|
lime/database/db.py
|
toastwaffle/LiME
|
7d529d9eaeb9de7a9aebe6aa373e1b3611bfd2ad
|
[
"MIT"
] | null | null | null |
"""Create the SQLAlchemy connection object."""
import flask_sqlalchemy
from . import custom_model
DB = flask_sqlalchemy.SQLAlchemy(model_class=custom_model.CustomModel)
| 21.5
| 70
| 0.819767
| 21
| 172
| 6.47619
| 0.619048
| 0.220588
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.098837
| 172
| 7
| 71
| 24.571429
| 0.877419
| 0.232558
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
0a931ee62ba7d473b041d57b9fcbf79d02b13234
| 28
|
py
|
Python
|
Chapter01/single_quote_inside_quotes1.py
|
kaushalkumarshah/Learn-Python-in-7-Days
|
2663656767c8959ace836f0c0e272f3e501bbe6e
|
[
"MIT"
] | 12
|
2018-07-09T16:20:31.000Z
|
2022-03-21T22:52:15.000Z
|
Chapter01/single_quote_inside_quotes1.py
|
kaushalkumarshah/Learn-Python-in-7-Days
|
2663656767c8959ace836f0c0e272f3e501bbe6e
|
[
"MIT"
] | null | null | null |
Chapter01/single_quote_inside_quotes1.py
|
kaushalkumarshah/Learn-Python-in-7-Days
|
2663656767c8959ace836f0c0e272f3e501bbe6e
|
[
"MIT"
] | 19
|
2018-01-09T12:49:06.000Z
|
2021-11-23T08:05:55.000Z
|
print 'Hey there it's a cow'
| 28
| 28
| 0.714286
| 7
| 28
| 2.857143
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.178571
| 28
| 1
| 28
| 28
| 0.869565
| 0
| 0
| 0
| 0
| 0
| 0.413793
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
0a9914bd3de67979c0fd54c3b8a69c1f5adbadf4
| 8,424
|
py
|
Python
|
tests/test_inline_functions/test_selection_queries.py
|
JourneyG/pymapdl
|
23fdc008c151c0546504e4ef8257a64f5f169100
|
[
"MIT"
] | 1
|
2021-07-28T00:42:53.000Z
|
2021-07-28T00:42:53.000Z
|
tests/test_inline_functions/test_selection_queries.py
|
JourneyG/pymapdl
|
23fdc008c151c0546504e4ef8257a64f5f169100
|
[
"MIT"
] | null | null | null |
tests/test_inline_functions/test_selection_queries.py
|
JourneyG/pymapdl
|
23fdc008c151c0546504e4ef8257a64f5f169100
|
[
"MIT"
] | null | null | null |
from ansys.mapdl.core.inline_functions import SelectionStatus
import pytest
class TestSelectionStatus:
@pytest.mark.parametrize('value', [1, -1, 0, 1., -1., 0.])
def test_happy(self, value):
select = SelectionStatus(value)
assert select == value
assert select is not value
@pytest.mark.parametrize('value', [1.5, 999, 99., '1'])
def test_unhappy(self, value):
with pytest.raises(ValueError):
SelectionStatus(value)
class TestNSEL:
def test_selected(self, selection_test_geometry):
q = selection_test_geometry
q._mapdl.nsel('S', 'LOC', 'X', 0)
node = q.node(0, 0, 0)
select = q.nsel(node)
assert select == 1
def test_unselected(self, selection_test_geometry):
q = selection_test_geometry
node = q.node(0, 0, 0)
q._mapdl.nsel('NONE')
select = q.nsel(node)
assert select == -1
def test_undefined(self, selection_test_geometry):
q = selection_test_geometry
select = q.nsel(999)
assert select == 0
class TestKSEL:
def test_selected(self, selection_test_geometry):
q = selection_test_geometry
q._mapdl.ksel('S', 'LOC', 'X', 0)
node = q.kp(0, 0, 0)
select = q.ksel(node)
assert select == 1
def test_unselected(self, selection_test_geometry):
q = selection_test_geometry
node = q.kp(0, 0, 0)
q._mapdl.ksel('NONE')
select = q.ksel(node)
assert select == -1
def test_undefined(self, selection_test_geometry):
q = selection_test_geometry
select = q.ksel(999)
assert select == 0
class TestLSEL:
def test_selected(self, selection_test_geometry):
q = selection_test_geometry
q._mapdl.lsel('all')
# there are 6 lines numbered 1-6
for line in range(1, 7, 1):
select = q.lsel(line)
assert select == 1
def test_unselected(self, selection_test_geometry):
q = selection_test_geometry
q._mapdl.lsel('NONE')
for line in range(1, 7, 1):
select = q.lsel(line)
assert select == -1
def test_undefined(self, selection_test_geometry):
q = selection_test_geometry
select = q.lsel(999)
assert select == 0
class TestASEL:
def test_selected(self, selection_test_geometry):
q = selection_test_geometry
q._mapdl.asel('all')
# there are 4 areas numbered 1-4
for area in range(1, 5, 1):
select = q.asel(area)
assert select == 1
def test_unselected(self, selection_test_geometry):
q = selection_test_geometry
q._mapdl.asel('NONE')
for area in range(1, 5, 1):
select = q.asel(area)
assert select == -1
def test_undefined(self, selection_test_geometry):
q = selection_test_geometry
select = q.asel(999)
assert select == 0
class TestESEL:
def test_selected(self, selection_test_geometry):
q = selection_test_geometry
q._mapdl.esel('all')
# there are at least 4 elements numbered 1-4
for element in range(1, 5, 1):
select = q.esel(element)
assert select == 1
def test_unselected(self, selection_test_geometry):
q = selection_test_geometry
q._mapdl.esel('NONE')
for element in range(1, 5, 1):
select = q.esel(element)
assert select == -1
def test_undefined(self, selection_test_geometry):
q = selection_test_geometry
select = q.esel(999)
assert select == 0
class TestVSEL:
def test_selected(self, selection_test_geometry):
q = selection_test_geometry
q._mapdl.vsel('all')
select = q.vsel(1)
assert select == 1
def test_unselected(self, selection_test_geometry):
q = selection_test_geometry
q._mapdl.vsel('NONE')
select = q.vsel(1)
assert select == -1
def test_undefined(self, selection_test_geometry):
q = selection_test_geometry
select = q.vsel(999)
assert select == 0
class TestNDNEXT:
def test_existing_nodes(self, selection_test_geometry,
common_functions_and_classes):
get_details_of_nodes, get_details_of_elements, _, _ = \
common_functions_and_classes
q = selection_test_geometry
nodes = get_details_of_nodes(q._mapdl)
next_ = q.ndnext(1)
assert next_ in nodes
def test_unselected_nodes(self, selection_test_geometry,
common_functions_and_classes):
get_details_of_nodes, get_details_of_elements, _, _ = \
common_functions_and_classes
q = selection_test_geometry
nodes = get_details_of_nodes(q._mapdl)
last_node = len(nodes)
next_ = q.ndnext(last_node)
assert next_ == 0
def test_non_existing_nodes(self, selection_test_geometry):
q = selection_test_geometry
next_ = q.ndnext(999)
assert next_ == 0
class TestELNEXT:
def test_existing_elements(self, selection_test_geometry,
common_functions_and_classes):
get_details_of_nodes, get_details_of_elements, _, _ = \
common_functions_and_classes
q = selection_test_geometry
elements = get_details_of_elements(q._mapdl)
next_ = q.elnext(1)
assert next_ in elements
def test_unselected_elements(self, selection_test_geometry,
common_functions_and_classes):
get_details_of_nodes, get_details_of_elements, _, _ = \
common_functions_and_classes
q = selection_test_geometry
elements = get_details_of_elements(q._mapdl)
last_element = len(elements)
next_ = q.elnext(last_element)
assert next_ == 0
def test_non_existing_elements(self, selection_test_geometry):
q = selection_test_geometry
next_ = q.elnext(999)
assert next_ == 0
class TestKPNEXT:
def test_existing_kps(self, selection_test_geometry):
q = selection_test_geometry
next_ = q.kpnext(1)
assert next_ == 2
def test_unselected_kps(self, selection_test_geometry):
q = selection_test_geometry
next_ = q.kpnext(4)
assert next_ == 0
def test_non_existing_kps(self, selection_test_geometry):
q = selection_test_geometry
next_ = q.kpnext(999)
assert next_ == 0
class TestLSNEXT:
def test_existing_lines(self, selection_test_geometry):
# there are 6 lines in in the selection_test_geometry fixture
q = selection_test_geometry
next_ = q.lsnext(1)
assert next_ == 2
def test_unselected_lines(self, selection_test_geometry):
q = selection_test_geometry
next_ = q.lsnext(6)
assert next_ == 0
def test_non_existing_lines(self, selection_test_geometry):
q = selection_test_geometry
next_ = q.lsnext(999)
assert next_ == 0
class TestARNEXT:
def test_existing_areas(self, selection_test_geometry):
# there are 4 areas in in the selection_test_geometry fixture
q = selection_test_geometry
next_ = q.arnext(1)
assert next_ == 2
def test_unselected_areas(self, selection_test_geometry):
q = selection_test_geometry
next_ = q.arnext(4)
assert next_ == 0
def test_non_existing_areas(self, selection_test_geometry):
q = selection_test_geometry
next_ = q.arnext(999)
assert next_ == 0
class TestVLNEXT:
@staticmethod
def make_volumes(mapdl):
mapdl.prep7()
point1 = mapdl.k(999, 0, 10, 0)
point2 = mapdl.k(99, 0, 0, 10)
kps = [mapdl.k(i + 1, i, 0, 0) for i in range(10)]
vols = [mapdl.v(i, i + 1, point1, point2) for i in kps[:-1]]
return vols
def test_existing_volumes(self, query):
q = query
_ = self.make_volumes(q._mapdl)
next_ = q.vlnext(1)
assert next_ == 2
def test_unselected_volumes(self, query):
q = query
vols = self.make_volumes(q._mapdl)
next_ = q.vlnext(len(vols))
assert next_ == 0
def test_non_existing_volumes(self, query):
next_ = query.vlnext(999)
assert next_ == 0
| 30.411552
| 69
| 0.62405
| 1,070
| 8,424
| 4.605607
| 0.106542
| 0.179383
| 0.289773
| 0.165179
| 0.800528
| 0.73336
| 0.699472
| 0.649959
| 0.62642
| 0.612622
| 0
| 0.027262
| 0.290242
| 8,424
| 276
| 70
| 30.521739
| 0.796956
| 0.026591
| 0
| 0.576037
| 0
| 0
| 0.006958
| 0
| 0
| 0
| 0
| 0
| 0.175115
| 1
| 0.179724
| false
| 0
| 0.009217
| 0
| 0.253456
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
0aa86ce31d5276d2cd39d1100d7110966948923e
| 34,414
|
py
|
Python
|
src/sage/manifolds/differentiable/diff_form_module.py
|
LaisRast/sage
|
5fb2a6ea44400e469caee82748cf863ca0c5f724
|
[
"BSL-1.0"
] | null | null | null |
src/sage/manifolds/differentiable/diff_form_module.py
|
LaisRast/sage
|
5fb2a6ea44400e469caee82748cf863ca0c5f724
|
[
"BSL-1.0"
] | null | null | null |
src/sage/manifolds/differentiable/diff_form_module.py
|
LaisRast/sage
|
5fb2a6ea44400e469caee82748cf863ca0c5f724
|
[
"BSL-1.0"
] | null | null | null |
r"""
Differential Form Modules
The set `\Omega^p(U, \Phi)` of `p`-forms along a differentiable manifold `U`
with values on a differentiable manifold `M` via a differentiable map
`\Phi:\ U \rightarrow M` (possibly `U = M` and `\Phi = \mathrm{Id}_M`)
is a module over the algebra `C^k(U)` of differentiable scalar fields on `U`.
It is a free module if and only if `M` is parallelizable. Accordingly,
two classes implement `\Omega^p(U, \Phi)`:
- :class:`DiffFormModule` for differential forms with values on a generic
(in practice, not parallelizable) differentiable manifold `M`
- :class:`DiffFormFreeModule` for differential forms with values on a
parallelizable manifold `M`
AUTHORS:
- Eric Gourgoulhon (2015): initial version
- Travis Scrimshaw (2016): review tweaks
REFERENCES:
- [KN1963]_
- [Lee2013]_
"""
# *****************************************************************************
# Copyright (C) 2015 Eric Gourgoulhon <eric.gourgoulhon@obspm.fr>
# Copyright (C) 2016 Travis Scrimshaw <tscrimsh@umn.edu>
#
# Distributed under the terms of the GNU General Public License (GPL)
# as published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
# https://www.gnu.org/licenses/
# *****************************************************************************
from sage.misc.cachefunc import cached_method
from sage.structure.unique_representation import UniqueRepresentation
from sage.structure.parent import Parent
from sage.categories.modules import Modules
from sage.tensor.modules.ext_pow_free_module import ExtPowerDualFreeModule
from sage.manifolds.differentiable.diff_form import DiffForm, DiffFormParal
from sage.manifolds.differentiable.tensorfield import TensorField
from sage.manifolds.differentiable.tensorfield_paral import TensorFieldParal
class DiffFormModule(UniqueRepresentation, Parent):
r"""
Module of differential forms of a given degree `p` (`p`-forms) along a
differentiable manifold `U` with values on a differentiable manifold `M`.
Given a differentiable manifold `U` and a differentiable map
`\Phi: U \rightarrow M` to a differentiable manifold `M`, the set
`\Omega^p(U, \Phi)` of `p`-forms along `U` with values on `M` is
a module over `C^k(U)`, the commutative algebra of differentiable
scalar fields on `U` (see
:class:`~sage.manifolds.differentiable.scalarfield_algebra.DiffScalarFieldAlgebra`).
The standard case of `p`-forms *on* a differentiable manifold `M`
corresponds to `U = M` and `\Phi = \mathrm{Id}_M`. Other common cases
are `\Phi` being an immersion and `\Phi` being a curve in `M`
(`U` is then an open interval of `\RR`).
.. NOTE::
This class implements `\Omega^p(U,\Phi)` in the case where `M` is
not assumed to be parallelizable; the module `\Omega^p(U, \Phi)`
is then not necessarily free. If `M` is parallelizable, the class
:class:`DiffFormFreeModule` must be used instead.
INPUT:
- ``vector_field_module`` -- module `\mathfrak{X}(U, \Phi)` of vector
fields along `U` with values on `M` via the map `\Phi: U \rightarrow M`
- ``degree`` -- positive integer; the degree `p` of the differential forms
EXAMPLES:
Module of 2-forms on a non-parallelizable 2-dimensional manifold::
sage: M = Manifold(2, 'M')
sage: U = M.open_subset('U') ; V = M.open_subset('V')
sage: M.declare_union(U,V) # M is the union of U and V
sage: c_xy.<x,y> = U.chart() ; c_uv.<u,v> = V.chart()
sage: transf = c_xy.transition_map(c_uv, (x+y, x-y),
....: intersection_name='W', restrictions1= x>0, restrictions2= u+v>0)
sage: inv = transf.inverse()
sage: W = U.intersection(V)
sage: eU = c_xy.frame() ; eV = c_uv.frame()
sage: XM = M.vector_field_module() ; XM
Module X(M) of vector fields on the 2-dimensional differentiable
manifold M
sage: A = M.diff_form_module(2) ; A
Module Omega^2(M) of 2-forms on the 2-dimensional differentiable
manifold M
sage: latex(A)
\Omega^{2}\left(M\right)
``A`` is nothing but the second exterior power of the dual of ``XM``, i.e.
we have `\Omega^{2}(M) = \Lambda^2(\mathfrak{X}(M)^*)`::
sage: A is XM.dual_exterior_power(2)
True
Modules of differential forms are unique::
sage: A is M.diff_form_module(2)
True
`\Omega^2(M)` is a module over the algebra `C^k(M)` of (differentiable)
scalar fields on `M`::
sage: A.category()
Category of modules over Algebra of differentiable scalar fields on
the 2-dimensional differentiable manifold M
sage: CM = M.scalar_field_algebra() ; CM
Algebra of differentiable scalar fields on the 2-dimensional
differentiable manifold M
sage: A in Modules(CM)
True
sage: A.base_ring() is CM
True
sage: A.base_module()
Module X(M) of vector fields on the 2-dimensional differentiable
manifold M
sage: A.base_module() is XM
True
Elements can be constructed from ``A()``. In particular, ``0`` yields
the zero element of ``A``::
sage: z = A(0) ; z
2-form zero on the 2-dimensional differentiable manifold M
sage: z.display(eU)
zero = 0
sage: z.display(eV)
zero = 0
sage: z is A.zero()
True
while non-zero elements are constructed by providing their components in a
given vector frame::
sage: a = A([[0,3*x],[-3*x,0]], frame=eU, name='a') ; a
2-form a on the 2-dimensional differentiable manifold M
sage: a.add_comp_by_continuation(eV, W, c_uv) # finishes initializ. of a
sage: a.display(eU)
a = 3*x dx∧dy
sage: a.display(eV)
a = (-3/4*u - 3/4*v) du∧dv
An alternative is to construct the 2-form from an empty list of
components and to set the nonzero nonredundant components afterwards::
sage: a = A([], name='a')
sage: a[eU,0,1] = 3*x
sage: a.add_comp_by_continuation(eV, W, c_uv)
sage: a.display(eU)
a = 3*x dx∧dy
sage: a.display(eV)
a = (-3/4*u - 3/4*v) du∧dv
The module `\Omega^1(M)` is nothing but the dual of `\mathfrak{X}(M)`
(the module of vector fields on `M`)::
sage: L1 = M.diff_form_module(1) ; L1
Module Omega^1(M) of 1-forms on the 2-dimensional differentiable
manifold M
sage: L1 is XM.dual()
True
Since any tensor field of type `(0,1)` is a 1-form, there is a coercion
map from the set `T^{(0,1)}(M)` of such tensors to `\Omega^1(M)`::
sage: T01 = M.tensor_field_module((0,1)) ; T01
Module T^(0,1)(M) of type-(0,1) tensors fields on the 2-dimensional
differentiable manifold M
sage: L1.has_coerce_map_from(T01)
True
There is also a coercion map in the reverse direction::
sage: T01.has_coerce_map_from(L1)
True
For a degree `p \geq 2`, the coercion holds only in the direction
`\Omega^p(M)\rightarrow T^{(0,p)}(M)`::
sage: T02 = M.tensor_field_module((0,2)) ; T02
Module T^(0,2)(M) of type-(0,2) tensors fields on the 2-dimensional
differentiable manifold M
sage: T02.has_coerce_map_from(A)
True
sage: A.has_coerce_map_from(T02)
False
The coercion map `T^{(0,1)}(M) \rightarrow \Omega^1(M)` in action::
sage: b = T01([y,x], frame=eU, name='b') ; b
Tensor field b of type (0,1) on the 2-dimensional differentiable
manifold M
sage: b.add_comp_by_continuation(eV, W, c_uv)
sage: b.display(eU)
b = y dx + x dy
sage: b.display(eV)
b = 1/2*u du - 1/2*v dv
sage: lb = L1(b) ; lb
1-form b on the 2-dimensional differentiable manifold M
sage: lb.display(eU)
b = y dx + x dy
sage: lb.display(eV)
b = 1/2*u du - 1/2*v dv
The coercion map `\Omega^1(M) \rightarrow T^{(0,1)}(M)` in action::
sage: tlb = T01(lb) ; tlb
Tensor field b of type (0,1) on the 2-dimensional differentiable
manifold M
sage: tlb.display(eU)
b = y dx + x dy
sage: tlb.display(eV)
b = 1/2*u du - 1/2*v dv
sage: tlb == b
True
The coercion map `\Omega^2(M) \rightarrow T^{(0,2)}(M)` in action::
sage: ta = T02(a) ; ta
Tensor field a of type (0,2) on the 2-dimensional differentiable
manifold M
sage: ta.display(eU)
a = 3*x dx⊗dy - 3*x dy⊗dx
sage: a.display(eU)
a = 3*x dx∧dy
sage: ta.display(eV)
a = (-3/4*u - 3/4*v) du⊗dv + (3/4*u + 3/4*v) dv⊗du
sage: a.display(eV)
a = (-3/4*u - 3/4*v) du∧dv
There is also coercion to subdomains, which is nothing but the restriction
of the differential form to some subset of its domain::
sage: L2U = U.diff_form_module(2) ; L2U
Free module Omega^2(U) of 2-forms on the Open subset U of the
2-dimensional differentiable manifold M
sage: L2U.has_coerce_map_from(A)
True
sage: a_U = L2U(a) ; a_U
2-form a on the Open subset U of the 2-dimensional differentiable
manifold M
sage: a_U.display(eU)
a = 3*x dx∧dy
"""
Element = DiffForm
def __init__(self, vector_field_module, degree):
r"""
Construction a module of differential forms.
TESTS:
Module of 2-forms on a non-parallelizable 2-dimensional manifold::
sage: M = Manifold(2, 'M')
sage: U = M.open_subset('U') ; V = M.open_subset('V')
sage: M.declare_union(U,V) # M is the union of U and V
sage: c_xy.<x,y> = U.chart() ; c_uv.<u,v> = V.chart()
sage: transf = c_xy.transition_map(c_uv, (x+y, x-y),
....: intersection_name='W', restrictions1= x>0,
....: restrictions2= u+v>0)
sage: inv = transf.inverse()
sage: from sage.manifolds.differentiable.diff_form_module import \
....: DiffFormModule
sage: A = DiffFormModule(M.vector_field_module(), 2) ; A
Module Omega^2(M) of 2-forms on the 2-dimensional differentiable
manifold M
sage: TestSuite(A).run(skip='_test_elements')
In the above test suite, ``_test_elements`` is skipped because of the
``_test_pickling`` error of the elements (to be fixed in
:class:`sage.manifolds.differentiable.tensorfield.TensorField`)
"""
domain = vector_field_module._domain
dest_map = vector_field_module._dest_map
name = "Omega^{}(".format(degree) + domain._name
latex_name = r"\Omega^{{{}}}\left({}".format(degree, domain._latex_name)
if dest_map is not domain.identity_map():
dm_name = dest_map._name
dm_latex_name = dest_map._latex_name
if dm_name is None:
dm_name = "unnamed map"
if dm_latex_name is None:
dm_latex_name = r"\mathrm{unnamed\; map}"
name += "," + dm_name
latex_name += "," + dm_latex_name
self._name = name + ")"
self._latex_name = latex_name + r"\right)"
self._vmodule = vector_field_module
self._degree = degree
# the member self._ring is created for efficiency (to avoid calls to
# self.base_ring()):
self._ring = domain.scalar_field_algebra()
Parent.__init__(self, base=self._ring, category=Modules(self._ring))
self._domain = domain
self._dest_map = dest_map
self._ambient_domain = vector_field_module._ambient_domain
# NB: self._zero_element is not constructed here, since no element
# can be constructed here, to avoid some infinite recursion.
#### Parent methods
def _element_constructor_(self, comp=[], frame=None, name=None,
latex_name=None):
r"""
Construct a differential form.
TESTS::
sage: M = Manifold(2, 'M')
sage: U = M.open_subset('U'); V = M.open_subset('V')
sage: c_xy.<x,y> = U.chart(); c_uv.<u,v> = V.chart()
sage: M.declare_union(U,V)
sage: A = M.diff_form_module(2)
sage: a = A([[0, x*y], [-x*y, 0]], name='a'); a
2-form a on the 2-dimensional differentiable manifold M
sage: a.display(c_xy.frame())
a = x*y dx∧dy
sage: A(0) is A.zero()
True
"""
try:
if comp.is_trivial_zero():
return self.zero()
except AttributeError:
if comp == 0:
return self.zero()
if isinstance(comp, (DiffForm, DiffFormParal)):
# coercion by domain restriction
if (self._degree == comp._tensor_type[1]
and self._domain.is_subset(comp._domain)
and self._ambient_domain.is_subset(comp._ambient_domain)):
return comp.restrict(self._domain)
else:
raise TypeError("cannot convert the {} ".format(comp) +
"to an element of {}".format(self))
if isinstance(comp, TensorField):
# coercion of a tensor of type (0,1) to a linear form
tensor = comp # for readability
if (tensor.tensor_type() == (0,1) and self._degree == 1
and tensor._vmodule is self._vmodule):
resu = self.element_class(self._vmodule, 1, name=tensor._name,
latex_name=tensor._latex_name)
for dom, rst in tensor._restrictions.items():
resu._restrictions[dom] = dom.diff_form_module(1)(rst)
return resu
else:
raise TypeError("cannot convert the {} ".format(tensor) +
"to an element of {}".format(self))
if not isinstance(comp, (list, tuple)):
raise TypeError("cannot convert the {} ".format(comp) +
"to an element of {}".format(self))
# standard construction
resu = self.element_class(self._vmodule, self._degree, name=name,
latex_name=latex_name)
if comp:
resu.set_comp(frame)[:] = comp
return resu
def _an_element_(self):
r"""
Construct some (unnamed) differential form.
TESTS::
sage: M = Manifold(2, 'M')
sage: U = M.open_subset('U'); V = M.open_subset('V')
sage: c_xy.<x,y> = U.chart(); c_uv.<u,v> = V.chart()
sage: M.declare_union(U,V)
sage: A = M.diff_form_module(2)
sage: A._an_element_()
2-form on the 2-dimensional differentiable manifold M
"""
resu = self.element_class(self._vmodule, self._degree)
for oc in self._domain.open_covers(trivial=False):
# the first non-trivial open cover is selected
for dom in oc:
vmodule_dom = dom.vector_field_module(
dest_map=self._dest_map.restrict(dom))
dmodule_dom = vmodule_dom.dual_exterior_power(self._degree)
resu.set_restriction(dmodule_dom._an_element_())
return resu
return resu
def _coerce_map_from_(self, other):
r"""
Determine whether coercion to ``self`` exists from other parent.
TESTS::
sage: M = Manifold(3, 'M')
sage: A1 = M.diff_form_module(1)
sage: A1._coerce_map_from_(M.tensor_field_module((0,1)))
True
sage: A2 = M.diff_form_module(2)
sage: A2._coerce_map_from_(M.tensor_field_module((0,2)))
False
sage: U = M.open_subset('U')
sage: A2U = U.diff_form_module(2)
sage: A2U._coerce_map_from_(A2)
True
sage: A2._coerce_map_from_(A2U)
False
"""
if isinstance(other, (DiffFormModule, DiffFormFreeModule)):
# coercion by domain restriction
return (self._degree == other._degree
and self._domain.is_subset(other._domain)
and self._ambient_domain.is_subset(other._ambient_domain))
from sage.manifolds.differentiable.tensorfield_module import TensorFieldModule
if isinstance(other, TensorFieldModule):
# coercion of a type-(0,1) tensor to a linear form
return (self._vmodule is other._vmodule and self._degree == 1
and other.tensor_type() == (0,1))
return False
@cached_method
def zero(self):
"""
Return the zero of ``self``.
EXAMPLES::
sage: M = Manifold(3, 'M')
sage: A2 = M.diff_form_module(2)
sage: A2.zero()
2-form zero on the 3-dimensional differentiable manifold M
"""
zero = self._element_constructor_(name='zero', latex_name='0')
for frame in self._domain._frames:
if self._dest_map.restrict(frame._domain) == frame._dest_map:
zero.add_comp(frame)
# (since new components are initialized to zero)
zero._is_zero = True # This element is certainly zero
zero.set_immutable()
return zero
#### End of Parent methods
def _repr_(self):
r"""
Return a string representation of the object.
TESTS::
sage: M = Manifold(3, 'M')
sage: A2 = M.diff_form_module(2)
sage: A2
Module Omega^2(M) of 2-forms on
the 3-dimensional differentiable manifold M
"""
description = "Module "
if self._name is not None:
description += self._name + " "
description += "of {}-forms ".format(self._degree)
if self._dest_map is self._domain.identity_map():
description += "on the {}".format(self._domain)
else:
description += "along the {} mapped into the {}".format(
self._domain, self._ambient_domain)
return description
def _latex_(self):
r"""
Return a LaTeX representation of the object.
TESTS::
sage: M = Manifold(3, 'M', latex_name=r'\mathcal{M}')
sage: A2 = M.diff_form_module(2)
sage: A2._latex_()
'\\Omega^{2}\\left(\\mathcal{M}\\right)'
sage: latex(A2) # indirect doctest
\Omega^{2}\left(\mathcal{M}\right)
"""
if self._latex_name is None:
return r'\mbox{' + str(self) + r'}'
else:
return self._latex_name
def base_module(self):
r"""
Return the vector field module on which the differential form module
``self`` is constructed.
OUTPUT:
- a
:class:`~sage.manifolds.differentiable.vectorfield_module.VectorFieldModule`
representing the module on which ``self`` is defined
EXAMPLES::
sage: M = Manifold(3, 'M')
sage: A2 = M.diff_form_module(2) ; A2
Module Omega^2(M) of 2-forms on the 3-dimensional differentiable
manifold M
sage: A2.base_module()
Module X(M) of vector fields on the 3-dimensional differentiable
manifold M
sage: A2.base_module() is M.vector_field_module()
True
sage: U = M.open_subset('U')
sage: A2U = U.diff_form_module(2) ; A2U
Module Omega^2(U) of 2-forms on the Open subset U of the
3-dimensional differentiable manifold M
sage: A2U.base_module()
Module X(U) of vector fields on the Open subset U of the
3-dimensional differentiable manifold M
"""
return self._vmodule
def degree(self):
r"""
Return the degree of the differential forms in ``self``.
OUTPUT:
- integer `p` such that ``self`` is a set of `p`-forms
EXAMPLES::
sage: M = Manifold(3, 'M')
sage: M.diff_form_module(1).degree()
1
sage: M.diff_form_module(2).degree()
2
sage: M.diff_form_module(3).degree()
3
"""
return self._degree
# *****************************************************************************
class DiffFormFreeModule(ExtPowerDualFreeModule):
r"""
Free module of differential forms of a given degree `p` (`p`-forms) along
a differentiable manifold `U` with values on a parallelizable manifold `M`.
Given a differentiable manifold `U` and a differentiable map
`\Phi:\; U \rightarrow M` to a parallelizable manifold `M` of dimension
`n`, the set `\Omega^p(U, \Phi)` of `p`-forms along `U` with values on `M`
is a free module of rank `\binom{n}{p}` over `C^k(U)`, the commutative
algebra of differentiable scalar fields on `U` (see
:class:`~sage.manifolds.differentiable.scalarfield_algebra.DiffScalarFieldAlgebra`).
The standard case of `p`-forms *on* a differentiable manifold `M`
corresponds to `U = M` and `\Phi = \mathrm{Id}_M`. Other common cases are
`\Phi` being an immersion and `\Phi` being a curve in `M` (`U` is then an
open interval of `\RR`).
.. NOTE::
This class implements `\Omega^p(U, \Phi)` in the case where `M` is
parallelizable; `\Omega^p(U, \Phi)` is then a *free* module. If `M`
is not parallelizable, the class :class:`DiffFormModule` must be used
instead.
INPUT:
- ``vector_field_module`` -- free module `\mathfrak{X}(U,\Phi)` of vector
fields along `U` associated with the map `\Phi: U \rightarrow V`
- ``degree`` -- positive integer; the degree `p` of the differential forms
EXAMPLES:
Free module of 2-forms on a parallelizable 3-dimensional manifold::
sage: M = Manifold(3, 'M')
sage: X.<x,y,z> = M.chart()
sage: XM = M.vector_field_module() ; XM
Free module X(M) of vector fields on the 3-dimensional differentiable
manifold M
sage: A = M.diff_form_module(2) ; A
Free module Omega^2(M) of 2-forms on the 3-dimensional differentiable
manifold M
sage: latex(A)
\Omega^{2}\left(M\right)
``A`` is nothing but the second exterior power of the dual of ``XM``, i.e.
we have `\Omega^{2}(M) = \Lambda^2(\mathfrak{X}(M)^*)` (see
:class:`~sage.tensor.modules.ext_pow_free_module.ExtPowerDualFreeModule`)::
sage: A is XM.dual_exterior_power(2)
True
`\Omega^{2}(M)` is a module over the algebra `C^k(M)` of (differentiable)
scalar fields on `M`::
sage: A.category()
Category of finite dimensional modules over Algebra of differentiable
scalar fields on the 3-dimensional differentiable manifold M
sage: CM = M.scalar_field_algebra() ; CM
Algebra of differentiable scalar fields on the 3-dimensional
differentiable manifold M
sage: A in Modules(CM)
True
sage: A.base_ring()
Algebra of differentiable scalar fields on
the 3-dimensional differentiable manifold M
sage: A.base_module()
Free module X(M) of vector fields on
the 3-dimensional differentiable manifold M
sage: A.base_module() is XM
True
sage: A.rank()
3
Elements can be constructed from `A`. In particular, ``0`` yields
the zero element of `A`::
sage: A(0)
2-form zero on the 3-dimensional differentiable manifold M
sage: A(0) is A.zero()
True
while non-zero elements are constructed by providing their components
in a given vector frame::
sage: comp = [[0,3*x,-z],[-3*x,0,4],[z,-4,0]]
sage: a = A(comp, frame=X.frame(), name='a') ; a
2-form a on the 3-dimensional differentiable manifold M
sage: a.display()
a = 3*x dx∧dy - z dx∧dz + 4 dy∧dz
An alternative is to construct the 2-form from an empty list of
components and to set the nonzero nonredundant components afterwards::
sage: a = A([], name='a')
sage: a[0,1] = 3*x # component in the manifold's default frame
sage: a[0,2] = -z
sage: a[1,2] = 4
sage: a.display()
a = 3*x dx∧dy - z dx∧dz + 4 dy∧dz
The module `\Omega^1(M)` is nothing but the dual of `\mathfrak{X}(M)`
(the free module of vector fields on `M`)::
sage: L1 = M.diff_form_module(1) ; L1
Free module Omega^1(M) of 1-forms on the 3-dimensional differentiable
manifold M
sage: L1 is XM.dual()
True
Since any tensor field of type `(0,1)` is a 1-form, there is a coercion
map from the set `T^{(0,1)}(M)` of such tensors to `\Omega^1(M)`::
sage: T01 = M.tensor_field_module((0,1)) ; T01
Free module T^(0,1)(M) of type-(0,1) tensors fields on the
3-dimensional differentiable manifold M
sage: L1.has_coerce_map_from(T01)
True
There is also a coercion map in the reverse direction::
sage: T01.has_coerce_map_from(L1)
True
For a degree `p \geq 2`, the coercion holds only in the direction
`\Omega^p(M) \rightarrow T^{(0,p)}(M)`::
sage: T02 = M.tensor_field_module((0,2)); T02
Free module T^(0,2)(M) of type-(0,2) tensors fields on the
3-dimensional differentiable manifold M
sage: T02.has_coerce_map_from(A)
True
sage: A.has_coerce_map_from(T02)
False
The coercion map `T^{(0,1)}(M) \rightarrow \Omega^1(M)` in action::
sage: b = T01([-x,2,3*y], name='b'); b
Tensor field b of type (0,1) on the 3-dimensional differentiable
manifold M
sage: b.display()
b = -x dx + 2 dy + 3*y dz
sage: lb = L1(b) ; lb
1-form b on the 3-dimensional differentiable manifold M
sage: lb.display()
b = -x dx + 2 dy + 3*y dz
The coercion map `\Omega^1(M) \rightarrow T^{(0,1)}(M)` in action::
sage: tlb = T01(lb); tlb
Tensor field b of type (0,1) on
the 3-dimensional differentiable manifold M
sage: tlb == b
True
The coercion map `\Omega^2(M) \rightarrow T^{(0,2)}(M)` in action::
sage: T02 = M.tensor_field_module((0,2)) ; T02
Free module T^(0,2)(M) of type-(0,2) tensors fields on the
3-dimensional differentiable manifold M
sage: ta = T02(a) ; ta
Tensor field a of type (0,2) on the 3-dimensional differentiable
manifold M
sage: ta.display()
a = 3*x dx⊗dy - z dx⊗dz - 3*x dy⊗dx + 4 dy⊗dz + z dz⊗dx - 4 dz⊗dy
sage: a.display()
a = 3*x dx∧dy - z dx∧dz + 4 dy∧dz
sage: ta.symmetries() # the antisymmetry is preserved
no symmetry; antisymmetry: (0, 1)
There is also coercion to subdomains, which is nothing but the
restriction of the differential form to some subset of its domain::
sage: U = M.open_subset('U', coord_def={X: x^2+y^2<1})
sage: B = U.diff_form_module(2) ; B
Free module Omega^2(U) of 2-forms on the Open subset U of the
3-dimensional differentiable manifold M
sage: B.has_coerce_map_from(A)
True
sage: a_U = B(a) ; a_U
2-form a on the Open subset U of the 3-dimensional differentiable
manifold M
sage: a_U.display()
a = 3*x dx∧dy - z dx∧dz + 4 dy∧dz
"""
Element = DiffFormParal
def __init__(self, vector_field_module, degree):
r"""
Construct a free module of differential forms.
TESTS::
sage: M = Manifold(3, 'M')
sage: X.<x,y,z> = M.chart()
sage: from sage.manifolds.differentiable.diff_form_module import DiffFormFreeModule
sage: A = DiffFormFreeModule(M.vector_field_module(), 2) ; A
Free module Omega^2(M) of 2-forms on
the 3-dimensional differentiable manifold M
sage: TestSuite(A).run()
"""
domain = vector_field_module._domain
dest_map = vector_field_module._dest_map
name = "Omega^{}(".format(degree) + domain._name
latex_name = r"\Omega^{{{}}}\left({}".format(degree, domain._latex_name)
if dest_map is not domain.identity_map():
dm_name = dest_map._name
dm_latex_name = dest_map._latex_name
if dm_name is None:
dm_name = "unnamed map"
if dm_latex_name is None:
dm_latex_name = r"\mathrm{unnamed\; map}"
name += "," + dm_name
latex_name += "," + dm_latex_name
name += ")"
latex_name += r"\right)"
ExtPowerDualFreeModule.__init__(self, vector_field_module, degree,
name=name, latex_name=latex_name)
self._domain = domain
self._dest_map = dest_map
self._ambient_domain = vector_field_module._ambient_domain
#### Parent methods
def _element_constructor_(self, comp=[], frame=None, name=None,
latex_name=None):
r"""
Construct a differential form.
TESTS::
sage: M = Manifold(2, 'M')
sage: X.<x,y> = M.chart() # makes M parallelizable
sage: A = M.diff_form_module(2)
sage: a = A([[0, x], [-x, 0]], name='a'); a
2-form a on the 2-dimensional differentiable manifold M
sage: a.display()
a = x dx∧dy
sage: A(0) is A.zero()
True
Check that :trac:`27658` is fixed::
sage: f = M.scalar_field(x)
sage: f in A
False
"""
try:
if comp.is_trivial_zero():
return self.zero()
except AttributeError:
if comp == 0:
return self.zero()
if isinstance(comp, (DiffForm, DiffFormParal)):
# coercion by domain restriction
if (self._degree == comp._tensor_type[1]
and self._domain.is_subset(comp._domain)
and self._ambient_domain.is_subset(comp._ambient_domain)):
return comp.restrict(self._domain)
else:
raise TypeError("cannot convert the {} ".format(comp) +
"to a differential form in {}".format(self))
if isinstance(comp, TensorFieldParal):
# coercion of a tensor of type (0,1) to a linear form
tensor = comp # for readability
if (tensor.tensor_type() == (0,1) and self._degree == 1
and tensor._fmodule is self._fmodule):
resu = self.element_class(self._fmodule, 1, name=tensor._name,
latex_name=tensor._latex_name)
for frame, comp in tensor._components.items():
resu._components[frame] = comp.copy()
return resu
else:
raise TypeError("cannot convert the {} ".format(tensor) +
"to an element of {}".format(self))
if not isinstance(comp, (list, tuple)):
raise TypeError("cannot convert the {} ".format(comp) +
"to an element of {}".format(self))
# standard construction
resu = self.element_class(self._fmodule, self._degree, name=name,
latex_name=latex_name)
if comp:
resu.set_comp(frame)[:] = comp
return resu
# Rem: _an_element_ is declared in the superclass ExtPowerDualFreeModule
def _coerce_map_from_(self, other):
r"""
Determine whether coercion to ``self`` exists from other parent.
TESTS::
sage: M = Manifold(3, 'M')
sage: X.<x,y,z> = M.chart()
sage: A2 = M.diff_form_module(2)
sage: U = M.open_subset('U', coord_def = {X: z<0})
sage: A2U = U.diff_form_module(2)
sage: A2U._coerce_map_from_(A2)
True
sage: A2._coerce_map_from_(A2U)
False
sage: A1 = M.diff_form_module(1)
sage: A2U._coerce_map_from_(A1)
False
sage: A1._coerce_map_from_(M.tensor_field_module((0,1)))
True
sage: A1._coerce_map_from_(M.tensor_field_module((1,0)))
False
"""
if isinstance(other, (DiffFormModule, DiffFormFreeModule)):
# coercion by domain restriction
return (self._degree == other._degree
and self._domain.is_subset(other._domain)
and self._ambient_domain.is_subset(other._ambient_domain))
from sage.manifolds.differentiable.tensorfield_module import TensorFieldFreeModule
if isinstance(other, TensorFieldFreeModule):
# coercion of a type-(0,1) tensor to a linear form
return (self._fmodule is other._fmodule and self._degree == 1
and other.tensor_type() == (0,1))
return False
#### End of Parent methods
def _repr_(self):
r"""
Return a string representation of ``self``.
TESTS::
sage: M = Manifold(3, 'M')
sage: X.<x,y,z> = M.chart()
sage: A = M.diff_form_module(2)
sage: A
Free module Omega^2(M) of 2-forms on
the 3-dimensional differentiable manifold M
"""
description = "Free module "
if self._name is not None:
description += self._name + " "
description += "of {}-forms ".format(self._degree)
if self._dest_map is self._domain.identity_map():
description += "on the {}".format(self._domain)
else:
description += "along the {} mapped into the {}".format(
self._domain, self._ambient_domain)
return description
| 37.900881
| 95
| 0.574301
| 4,720
| 34,414
| 4.072881
| 0.08178
| 0.016906
| 0.062214
| 0.081357
| 0.792239
| 0.767062
| 0.750104
| 0.729921
| 0.700114
| 0.669216
| 0
| 0.020864
| 0.3106
| 34,414
| 907
| 96
| 37.942668
| 0.788114
| 0.625617
| 0
| 0.610329
| 0
| 0
| 0.053554
| 0.004181
| 0
| 0
| 0
| 0
| 0
| 1
| 0.061033
| false
| 0
| 0.046948
| 0
| 0.244131
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
0ad343fbd5642cd048dddd939e01ff7bdf5dedbc
| 159
|
py
|
Python
|
wsgi.py
|
shiroyuki/2019-cfp
|
90c20ad01c19ddf17b0bfd1f96b264c715456c01
|
[
"BSD-3-Clause"
] | null | null | null |
wsgi.py
|
shiroyuki/2019-cfp
|
90c20ad01c19ddf17b0bfd1f96b264c715456c01
|
[
"BSD-3-Clause"
] | 6
|
2019-04-27T16:48:33.000Z
|
2019-08-06T20:28:23.000Z
|
wsgi.py
|
shiroyuki/2019-cfp
|
90c20ad01c19ddf17b0bfd1f96b264c715456c01
|
[
"BSD-3-Clause"
] | 2
|
2019-08-06T15:23:57.000Z
|
2019-08-21T23:16:01.000Z
|
from yakbak.core import create_app
from yakbak.settings import load_settings_from_env
settings = load_settings_from_env()
application = create_app(settings)
| 22.714286
| 50
| 0.849057
| 23
| 159
| 5.521739
| 0.434783
| 0.15748
| 0.251969
| 0.299213
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.100629
| 159
| 6
| 51
| 26.5
| 0.888112
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
0ada11418717237a249c6ece10f69a6b0979adfd
| 19
|
py
|
Python
|
bc/__init__.py
|
fxxing/ljtool
|
9d4e2d6890e586657ea7f5e1e3129eaf2c205907
|
[
"MIT"
] | 1
|
2020-04-15T19:09:45.000Z
|
2020-04-15T19:09:45.000Z
|
bc/__init__.py
|
fxxing/ljtool
|
9d4e2d6890e586657ea7f5e1e3129eaf2c205907
|
[
"MIT"
] | null | null | null |
bc/__init__.py
|
fxxing/ljtool
|
9d4e2d6890e586657ea7f5e1e3129eaf2c205907
|
[
"MIT"
] | null | null | null |
# LuaJit byte code
| 9.5
| 18
| 0.736842
| 3
| 19
| 4.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.210526
| 19
| 1
| 19
| 19
| 0.933333
| 0.842105
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
0afa81d31dc7e002a48591fb2cf387ee3abac591
| 87
|
py
|
Python
|
pylibgen/exceptions.py
|
solomond6/pylibgen
|
b515ac107ef61d79b7ce6c348b849009f43f7969
|
[
"MIT"
] | 66
|
2017-03-08T00:21:42.000Z
|
2019-04-09T21:14:29.000Z
|
pylibgen/exceptions.py
|
papis/pylibgen
|
b9a7b8058acedeaac71995ea3f915ce5eb37def5
|
[
"MIT"
] | 7
|
2017-05-25T02:46:17.000Z
|
2019-01-12T18:16:59.000Z
|
pylibgen/exceptions.py
|
papis/pylibgen
|
b9a7b8058acedeaac71995ea3f915ce5eb37def5
|
[
"MIT"
] | 14
|
2019-05-28T17:57:39.000Z
|
2021-08-10T03:06:59.000Z
|
class LibraryException(Exception):
pass
class BookException(Exception):
pass
| 12.428571
| 34
| 0.747126
| 8
| 87
| 8.125
| 0.625
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.183908
| 87
| 6
| 35
| 14.5
| 0.915493
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
7c22c03675e0c844a3ddfd44a772a4f4866ebd59
| 30
|
py
|
Python
|
utils/custom_errors.py
|
ChrissisCodeXD/Hikari-TestProject
|
236c8fc9081172d9edff6d629e5d11c5abe64205
|
[
"MIT"
] | null | null | null |
utils/custom_errors.py
|
ChrissisCodeXD/Hikari-TestProject
|
236c8fc9081172d9edff6d629e5d11c5abe64205
|
[
"MIT"
] | null | null | null |
utils/custom_errors.py
|
ChrissisCodeXD/Hikari-TestProject
|
236c8fc9081172d9edff6d629e5d11c5abe64205
|
[
"MIT"
] | null | null | null |
class WrongDuration:
pass
| 10
| 20
| 0.733333
| 3
| 30
| 7.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.233333
| 30
| 2
| 21
| 15
| 0.956522
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
7c2376f8050758bdb4097195234463c3cc8cb9da
| 188
|
py
|
Python
|
pkgs/conda-manager-0.3.1-py27_0/lib/python2.7/site-packages/conda_manager/api/__init__.py
|
wangyum/anaconda
|
6e5a0dbead3327661d73a61e85414cf92aa52be6
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 25
|
2015-03-24T20:50:12.000Z
|
2021-12-29T02:35:57.000Z
|
pkgs/conda-manager-0.3.1-py27_0/lib/python2.7/site-packages/conda_manager/api/__init__.py
|
wangyum/anaconda
|
6e5a0dbead3327661d73a61e85414cf92aa52be6
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 71
|
2015-03-25T22:35:21.000Z
|
2021-03-30T18:19:01.000Z
|
pkgs/conda-manager-0.3.1-py27_0/lib/python2.7/site-packages/conda_manager/api/__init__.py
|
wangyum/anaconda
|
6e5a0dbead3327661d73a61e85414cf92aa52be6
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 12
|
2015-05-28T16:38:30.000Z
|
2021-08-11T15:03:16.000Z
|
# -*- coding: utf-8 -*-
"""
This module contains the main Anaconda API plus the external and adapted apis
used..
"""
# Local imports
from conda_manager.api.manager_api import ManagerAPI
| 18.8
| 77
| 0.734043
| 27
| 188
| 5.037037
| 0.851852
| 0.147059
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006329
| 0.159574
| 188
| 9
| 78
| 20.888889
| 0.85443
| 0.643617
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
7c23c406b118404d56a9edbc45234d4d79c8df8b
| 165
|
py
|
Python
|
asados/templatetags/getfield.py
|
TenStrings/organizador-de-asados
|
5f8ede2ca5c8f9d2faf7fec316c4014142113464
|
[
"MIT"
] | null | null | null |
asados/templatetags/getfield.py
|
TenStrings/organizador-de-asados
|
5f8ede2ca5c8f9d2faf7fec316c4014142113464
|
[
"MIT"
] | null | null | null |
asados/templatetags/getfield.py
|
TenStrings/organizador-de-asados
|
5f8ede2ca5c8f9d2faf7fec316c4014142113464
|
[
"MIT"
] | null | null | null |
from django import template
register = template.Library()
@register.simple_tag
def getfield(an_object, an_attribute):
return getattr(an_object, an_attribute)
| 18.333333
| 43
| 0.793939
| 22
| 165
| 5.727273
| 0.681818
| 0.126984
| 0.15873
| 0.301587
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.127273
| 165
| 8
| 44
| 20.625
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.2
| 0.2
| 0.6
| 0
| 1
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
7c33f04a92c7ec7d39745a702372a685e0762c61
| 97
|
py
|
Python
|
photonqat/Fockbase/__init__.py
|
ryosukehata/Photonqat
|
d5e320d3cc9ed94f6d63b1721f6871f13a0e6ea7
|
[
"Apache-2.0"
] | 25
|
2018-09-16T22:54:48.000Z
|
2019-02-22T01:21:30.000Z
|
blueqat/photonqat/Fockbase/__init__.py
|
mdrft/blueqat
|
6c5f26b377bc3ce0d02adec8b9132d70870b3d95
|
[
"Apache-2.0"
] | 22
|
2018-09-20T02:47:56.000Z
|
2019-02-08T05:25:30.000Z
|
blueqat/photonqat/Fockbase/__init__.py
|
mdrft/blueqat
|
6c5f26b377bc3ce0d02adec8b9132d70870b3d95
|
[
"Apache-2.0"
] | 5
|
2018-10-23T04:56:04.000Z
|
2019-02-13T14:02:31.000Z
|
from .bosonicLadder import *
from .gates import *
from .states import *
from .WignerFunc import *
| 24.25
| 28
| 0.762887
| 12
| 97
| 6.166667
| 0.5
| 0.405405
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.154639
| 97
| 4
| 29
| 24.25
| 0.902439
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
7ca9b89a02349f54607053532e12ff16005d1451
| 486
|
py
|
Python
|
scripts/Logger.py
|
alexander-danilenko/ubuntu-offline-driver-install
|
c1954c6606fd8a137fa7f63acd57910fd682ee40
|
[
"MIT"
] | null | null | null |
scripts/Logger.py
|
alexander-danilenko/ubuntu-offline-driver-install
|
c1954c6606fd8a137fa7f63acd57910fd682ee40
|
[
"MIT"
] | null | null | null |
scripts/Logger.py
|
alexander-danilenko/ubuntu-offline-driver-install
|
c1954c6606fd8a137fa7f63acd57910fd682ee40
|
[
"MIT"
] | null | null | null |
class Logger:
# Logger channel.
channel: str
def __init__(self, channel: str):
self.channel = channel
def log(self, message):
print(f'\033[92m[{self.channel}] {message}\033[0m')
def info(self, message):
print(f'\033[94m[{self.channel}] {message}\033[0m')
def warning(self, message):
print(f'\033[93m[{self.channel}] {message}\033[0m')
def error(self, message):
print(f'\033[91m[{self.channel}] {message}\033[0m')
| 25.578947
| 59
| 0.600823
| 66
| 486
| 4.363636
| 0.30303
| 0.229167
| 0.222222
| 0.236111
| 0.628472
| 0.270833
| 0
| 0
| 0
| 0
| 0
| 0.094987
| 0.220165
| 486
| 18
| 60
| 27
| 0.664908
| 0.030864
| 0
| 0
| 0
| 0
| 0.34968
| 0.204691
| 0
| 0
| 0
| 0
| 0
| 1
| 0.416667
| false
| 0
| 0
| 0
| 0.583333
| 0.333333
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
7cb5f6cf3a0408d680129d61bc13788fefd5880e
| 47
|
py
|
Python
|
test/pipeviz/test_pipeviz.py
|
roryk/bipy
|
9fbe7074b7c559a7c6a4ae256bbac54379d6538e
|
[
"MIT"
] | 1
|
2015-01-24T21:55:02.000Z
|
2015-01-24T21:55:02.000Z
|
test/pipeviz/test_pipeviz.py
|
roryk/bipy
|
9fbe7074b7c559a7c6a4ae256bbac54379d6538e
|
[
"MIT"
] | null | null | null |
test/pipeviz/test_pipeviz.py
|
roryk/bipy
|
9fbe7074b7c559a7c6a4ae256bbac54379d6538e
|
[
"MIT"
] | 9
|
2015-05-29T02:43:43.000Z
|
2020-05-21T18:59:10.000Z
|
import yaml
import unittest
from bipy.pipeline
| 11.75
| 18
| 0.851064
| 7
| 47
| 5.714286
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12766
| 47
| 3
| 19
| 15.666667
| 0.97561
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.666667
| null | null | 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
6b114a330e734c56b81d137ba9910596dc0022f5
| 1,172
|
py
|
Python
|
dev/configuration.py
|
julien-lange/qcover
|
8b4872f7533b75ce265ddfb7eb077e681169b89d
|
[
"Apache-2.0"
] | 8
|
2016-10-06T14:06:45.000Z
|
2021-12-23T06:58:39.000Z
|
dev/configuration.py
|
julien-lange/qcover
|
8b4872f7533b75ce265ddfb7eb077e681169b89d
|
[
"Apache-2.0"
] | null | null | null |
dev/configuration.py
|
julien-lange/qcover
|
8b4872f7533b75ce265ddfb7eb077e681169b89d
|
[
"Apache-2.0"
] | 6
|
2017-08-23T15:02:53.000Z
|
2021-12-29T07:23:19.000Z
|
# Copyright 2017 Michael Blondin, Alain Finkel, Christoph Haase, Serge Haddad
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Configurations must be immutable
class Configuration:
def __init__(self):
pass
def __eq__(self, other):
raise NotImplementedError()
def __ne__(self, other):
raise NotImplementedError()
def __gt__(self, other):
raise NotImplementedError()
def __ge__(self, other):
raise NotImplementedError()
def __lt__(self, other):
raise NotImplementedError()
def __le__(self, other):
raise NotImplementedError()
def __hash__(self):
raise NotImplementedError()
| 29.3
| 77
| 0.713311
| 147
| 1,172
| 5.469388
| 0.605442
| 0.208955
| 0.104478
| 0.246269
| 0.268657
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008705
| 0.21587
| 1,172
| 39
| 78
| 30.051282
| 0.866159
| 0.537543
| 0
| 0.411765
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.470588
| false
| 0.058824
| 0
| 0
| 0.529412
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 5
|
861c2223c5c281c74f7db9963bd5eb202351e0a6
| 11,423
|
py
|
Python
|
aiida/backends/tests/cmdline/params/types/test_plugin.py
|
borellim/aiida_core
|
eebef392c81e8b130834a92e1d7abf5e2e30b3ce
|
[
"BSD-2-Clause"
] | 1
|
2019-03-15T10:37:53.000Z
|
2019-03-15T10:37:53.000Z
|
aiida/backends/tests/cmdline/params/types/test_plugin.py
|
odarbelaeze/aiida_core
|
934b4ccdc73a993f2a6656caf516500470e3da08
|
[
"BSD-2-Clause"
] | null | null | null |
aiida/backends/tests/cmdline/params/types/test_plugin.py
|
odarbelaeze/aiida_core
|
934b4ccdc73a993f2a6656caf516500470e3da08
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida_core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""Tests for the `PluginParamType`."""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import click
from aiida.backends.testbase import AiidaTestCase
from aiida.cmdline.params.types.plugin import PluginParamType
from aiida.plugins.entry_point import get_entry_point_from_string
class TestPluginParamType(AiidaTestCase):
"""Tests for the `PluginParamType`."""
def test_group_definition(self):
"""
Test the various accepted syntaxes of defining supported entry point groups. Both single
values as well as tuples should be allowed. The `aiida.` prefix should also be optional.
"""
param = PluginParamType(group='calculations')
self.assertIn('aiida.calculations', param.groups)
self.assertTrue(len(param.groups), 1)
param = PluginParamType(group='aiida.calculations')
self.assertIn('aiida.calculations', param.groups)
self.assertTrue(len(param.groups), 1)
param = PluginParamType(group=('calculations',))
self.assertIn('aiida.calculations', param.groups)
self.assertTrue(len(param.groups), 1)
param = PluginParamType(group=('aiida.calculations',))
self.assertIn('aiida.calculations', param.groups)
self.assertTrue(len(param.groups), 1)
param = PluginParamType(group=('aiida.calculations', 'aiida.data'))
self.assertIn('aiida.calculations', param.groups)
self.assertIn('aiida.data', param.groups)
self.assertTrue(len(param.groups), 2)
param = PluginParamType(group=('aiida.calculations', 'data'))
self.assertIn('aiida.calculations', param.groups)
self.assertIn('aiida.data', param.groups)
self.assertTrue(len(param.groups), 2)
def test_get_entry_point_from_string(self):
"""
Test the functionality of the get_entry_point_from_string which will take an entry point string
and try to map it onto a valid entry point that is part of the groups defined for the parameter.
"""
param = PluginParamType(group='transports')
entry_point = get_entry_point_from_string('aiida.transports:ssh')
# Invalid entry point strings
with self.assertRaises(ValueError):
param.get_entry_point_from_string('aiida.transport:ssh')
with self.assertRaises(ValueError):
param.get_entry_point_from_string('aiid.transports:ssh')
with self.assertRaises(ValueError):
param.get_entry_point_from_string('aiida..transports:ssh')
# Unsupported entry points for all formats
with self.assertRaises(ValueError):
param.get_entry_point_from_string('aiida.data:structure')
with self.assertRaises(ValueError):
param.get_entry_point_from_string('data:structure')
with self.assertRaises(ValueError):
param.get_entry_point_from_string('structure')
# Non-existent entry points for all formats
with self.assertRaises(ValueError):
param.get_entry_point_from_string('aiida.transports:not_existent')
with self.assertRaises(ValueError):
param.get_entry_point_from_string('transports:not_existent')
with self.assertRaises(ValueError):
param.get_entry_point_from_string('not_existent')
# Valid entry point strings
self.assertEqual(param.get_entry_point_from_string('aiida.transports:ssh').name, entry_point.name)
self.assertEqual(param.get_entry_point_from_string('transports:ssh').name, entry_point.name)
self.assertEqual(param.get_entry_point_from_string('ssh').name, entry_point.name)
def test_get_entry_point_from_ambiguous(self):
"""
Test the functionality of the get_entry_point_from_string which will take an entry point string
and try to map it onto a valid entry point that is part of the groups defined for the parameter.
"""
param = PluginParamType(group=('aiida.calculations', 'aiida.parsers'))
entry_point = get_entry_point_from_string('aiida.calculations:arithmetic.add')
# Both groups contain entry point `arithmetic.add` so passing only name is ambiguous and should raise
with self.assertRaises(ValueError):
param.get_entry_point_from_string('arithmetic.add')
# Passing PARTIAL or FULL should allow entry point to be returned
self.assertEqual(param.get_entry_point_from_string('aiida.calculations:arithmetic.add').name, entry_point.name)
self.assertEqual(param.get_entry_point_from_string('calculations:arithmetic.add').name, entry_point.name)
def test_convert(self):
"""
Test that the convert method returns the correct entry point
"""
param = PluginParamType(group=('transports', 'data'))
entry_point = param.convert('aiida.transports:ssh', None, None)
self.assertEqual(entry_point.name, 'ssh')
# self.assertTrue(isinstance(entry_point, EntryPoint))
entry_point = param.convert('transports:ssh', None, None)
self.assertEqual(entry_point.name, 'ssh')
# self.assertTrue(isinstance(entry_point, EntryPoint))
entry_point = param.convert('ssh', None, None)
self.assertEqual(entry_point.name, 'ssh')
# self.assertTrue(isinstance(entry_point, EntryPoint))
entry_point = param.convert('aiida.data:structure', None, None)
self.assertEqual(entry_point.name, 'structure')
# self.assertTrue(isinstance(entry_point, EntryPoint))
entry_point = param.convert('data:structure', None, None)
self.assertEqual(entry_point.name, 'structure')
# self.assertTrue(isinstance(entry_point, EntryPoint))
entry_point = param.convert('structure', None, None)
self.assertEqual(entry_point.name, 'structure')
# self.assertTrue(isinstance(entry_point, EntryPoint))
with self.assertRaises(click.BadParameter):
param.convert('not_existent', None, None)
def test_convert_load(self):
"""
Test that the convert method returns the loaded entry point if load=True at construction time of parameter
"""
param = PluginParamType(group=('transports', 'data'), load=True)
entry_point_ssh = get_entry_point_from_string('aiida.transports:ssh')
entry_point_structure = get_entry_point_from_string('aiida.data:structure')
entry_point = param.convert('aiida.transports:ssh', None, None)
self.assertTrue(entry_point, entry_point_ssh)
entry_point = param.convert('transports:ssh', None, None)
self.assertTrue(entry_point, entry_point_ssh)
entry_point = param.convert('ssh', None, None)
self.assertTrue(entry_point, entry_point_ssh)
entry_point = param.convert('aiida.data:structure', None, None)
self.assertTrue(entry_point, entry_point_structure)
entry_point = param.convert('data:structure', None, None)
self.assertTrue(entry_point, entry_point_structure)
entry_point = param.convert('structure', None, None)
self.assertTrue(entry_point, entry_point_structure)
with self.assertRaises(click.BadParameter):
param.convert('not_existent', None, None)
def test_complete_single_group(self):
"""
Test the complete method which is used for auto completion when there is only a single valid group, which
means there should never be ambiguity and specifying a full entry point string is not necessary, however,
when the user decides to user either a FULL or PARTIAL string anyway, the completion should match that syntax
"""
param = PluginParamType(group=('transports'))
entry_point_minimal = 'ssh'
entry_point_partial = 'transports:ssh'
entry_point_full = 'aiida.transports:ssh'
options = [item[0] for item in param.complete(None, 'ss')]
self.assertIn(entry_point_minimal, options)
options = [item[0] for item in param.complete(None, 'ssh')]
self.assertIn(entry_point_minimal, options)
options = [item[0] for item in param.complete(None, 'transports:ss')]
self.assertIn(entry_point_partial, options)
options = [item[0] for item in param.complete(None, 'transports:ssh')]
self.assertIn(entry_point_partial, options)
options = [item[0] for item in param.complete(None, 'aiida.transports:ss')]
self.assertIn(entry_point_full, options)
options = [item[0] for item in param.complete(None, 'aiida.transports:ssh')]
self.assertIn(entry_point_full, options)
def test_complete_amibguity(self):
"""
Test the complete method which is used for auto completion when the supported groups share an entry point
with the same name, which can lead to ambiguity. In this case the autocomplete should always return the
possibilites in the FULL entry point string format. When the user tries to autocomplete
"""
param = PluginParamType(group=('aiida.calculations', 'aiida.parsers'))
entry_point_full_calculations = 'aiida.calculations:arithmetic.add'
entry_point_full_parsers = 'aiida.parsers:arithmetic.add'
options = [item[0] for item in param.complete(None, 'aiida.calculations:arith')]
self.assertIn(entry_point_full_calculations, options)
options = [item[0] for item in param.complete(None, 'aiida.calculations:arithmetic.add')]
self.assertIn(entry_point_full_calculations, options)
options = [item[0] for item in param.complete(None, 'aiida.parsers:arith')]
self.assertIn(entry_point_full_parsers, options)
options = [item[0] for item in param.complete(None, 'aiida.parsers:arithmetic.add')]
self.assertIn(entry_point_full_parsers, options)
# PARTIAL or MINIMAL string formats will not be autocompleted
options = [item[0] for item in param.complete(None, 'parsers:arith')]
self.assertNotIn(entry_point_full_calculations, options)
self.assertNotIn(entry_point_full_parsers, options)
options = [item[0] for item in param.complete(None, 'parsers:arithmetic.add')]
self.assertNotIn(entry_point_full_calculations, options)
self.assertNotIn(entry_point_full_parsers, options)
options = [item[0] for item in param.complete(None, 'arith')]
self.assertNotIn(entry_point_full_calculations, options)
self.assertNotIn(entry_point_full_parsers, options)
options = [item[0] for item in param.complete(None, 'arithmetic.add')]
self.assertNotIn(entry_point_full_calculations, options)
self.assertNotIn(entry_point_full_parsers, options)
| 46.815574
| 119
| 0.682833
| 1,371
| 11,423
| 5.512764
| 0.140044
| 0.141572
| 0.041281
| 0.053983
| 0.76634
| 0.744377
| 0.716327
| 0.700318
| 0.672665
| 0.63059
| 0
| 0.002323
| 0.208439
| 11,423
| 243
| 120
| 47.00823
| 0.833555
| 0.223847
| 0
| 0.533333
| 0
| 0
| 0.149474
| 0.039466
| 0
| 0
| 0
| 0
| 0.451852
| 1
| 0.051852
| false
| 0
| 0.051852
| 0
| 0.111111
| 0.007407
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
862c08d1d4ac40f9d57c1d9c3725b9935fcf5b8f
| 148
|
py
|
Python
|
src/gt4sd/algorithms/generation/polymer_blocks/__init__.py
|
hhhsu0825/gt4sd-core
|
4a1fe9da58d2f33bba2fba64604427e037ad7a46
|
[
"MIT"
] | 1
|
2022-02-22T02:06:10.000Z
|
2022-02-22T02:06:10.000Z
|
src/gt4sd/algorithms/generation/polymer_blocks/__init__.py
|
hhhsu0825/gt4sd-core
|
4a1fe9da58d2f33bba2fba64604427e037ad7a46
|
[
"MIT"
] | 12
|
2022-02-21T12:59:24.000Z
|
2022-02-22T12:25:49.000Z
|
src/gt4sd/algorithms/generation/polymer_blocks/__init__.py
|
hhhsu0825/gt4sd-core
|
4a1fe9da58d2f33bba2fba64604427e037ad7a46
|
[
"MIT"
] | null | null | null |
"""PolymerBlocks initialization."""
from .core import PolymerBlocks, PolymerBlocksGenerator
__all__ = ["PolymerBlocks", "PolymerBlocksGenerator"]
| 24.666667
| 55
| 0.797297
| 10
| 148
| 11.4
| 0.7
| 0.614035
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.087838
| 148
| 5
| 56
| 29.6
| 0.844444
| 0.195946
| 0
| 0
| 0
| 0
| 0.309735
| 0.19469
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
864676ff1e262ec69ad93579bdc6b2491cc345ea
| 1,040
|
py
|
Python
|
pwndbg/color/backtrace.py
|
dev2ero/pwndbg
|
dbaa3806fd462d4840cded98a155275ea58df7f8
|
[
"MIT"
] | 4,461
|
2016-05-30T17:14:06.000Z
|
2022-03-31T11:19:49.000Z
|
pwndbg/color/backtrace.py
|
dev2ero/pwndbg
|
dbaa3806fd462d4840cded98a155275ea58df7f8
|
[
"MIT"
] | 789
|
2016-05-27T21:17:52.000Z
|
2022-03-31T12:37:06.000Z
|
pwndbg/color/backtrace.py
|
dev2ero/pwndbg
|
dbaa3806fd462d4840cded98a155275ea58df7f8
|
[
"MIT"
] | 844
|
2016-06-01T00:37:04.000Z
|
2022-03-31T10:39:30.000Z
|
import pwndbg.color.theme as theme
import pwndbg.config as config
from pwndbg.color import generateColorFunction
config_prefix = theme.Parameter('backtrace-prefix', '►', 'prefix for current backtrace label')
config_prefix_color = theme.ColoredParameter('backtrace-prefix-color', 'none', 'color for prefix of current backtrace label')
config_address_color = theme.ColoredParameter('backtrace-address-color', 'none', 'color for backtrace (address)')
config_symbol_color = theme.ColoredParameter('backtrace-symbol-color', 'none', 'color for backtrace (symbol)')
config_label_color = theme.ColoredParameter('backtrace-frame-label-color', 'none', 'color for backtrace (frame label)')
def prefix(x):
return generateColorFunction(config.backtrace_prefix_color)(x)
def address(x):
return generateColorFunction(config.backtrace_address_color)(x)
def symbol(x):
return generateColorFunction(config.backtrace_symbol_color)(x)
def frame_label(x):
return generateColorFunction(config.backtrace_frame_label_color)(x)
| 47.272727
| 127
| 0.785577
| 127
| 1,040
| 6.291339
| 0.188976
| 0.062578
| 0.130163
| 0.175219
| 0.312891
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.105769
| 1,040
| 21
| 128
| 49.52381
| 0.858065
| 0
| 0
| 0
| 1
| 0
| 0.282692
| 0.090385
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.1875
| 0.25
| 0.6875
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
864abd68598639b08451346f7023f982c1bf4be7
| 1,511
|
py
|
Python
|
project/locations/admin.py
|
mrclro/kbxrec
|
49728303387d0ce25588461c3148d9c36af56101
|
[
"MIT"
] | null | null | null |
project/locations/admin.py
|
mrclro/kbxrec
|
49728303387d0ce25588461c3148d9c36af56101
|
[
"MIT"
] | null | null | null |
project/locations/admin.py
|
mrclro/kbxrec
|
49728303387d0ce25588461c3148d9c36af56101
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import *
class CountryAdmin(admin.ModelAdmin):
prepopulated_fields = {"iso": ("name",)}
list_display = ('name', 'nationality', 'iso',)
ordering = ('name',)
admin.site.register(Country, CountryAdmin)
class RegionAdmin(admin.ModelAdmin):
prepopulated_fields = {"iso": ("name",)}
list_display = ('name', 'iso', 'country',)
ordering = ('iso',)
admin.site.register(Region, RegionAdmin)
class CityAdmin(admin.ModelAdmin):
prepopulated_fields = {"slug": ("name",)}
list_display = ('name', 'region', 'get_country',)
def get_country(self, obj):
return obj.region.country
get_country.short_description = 'Country'
search_fields = ['name', 'slug',]
ordering = ('slug',)
admin.site.register(City, CityAdmin)
class VenueAdmin(admin.ModelAdmin):
prepopulated_fields = {"slug": ("name",)}
list_display = ('name', 'city', 'get_country',)
def get_country(self, obj):
return obj.city.region.country
get_country.short_description = 'Country'
search_fields = ['name', 'slug',]
ordering = ('slug',)
admin.site.register(Venue, VenueAdmin)
class TeamAdmin(admin.ModelAdmin):
prepopulated_fields = {"slug": ("name",)}
list_display = ('name', 'city', 'get_country',)
def get_country(self, obj):
return obj.city.region.country
get_country.short_description = 'Country'
search_fields = ['name', 'slug',]
ordering = ('slug',)
admin.site.register(Team, TeamAdmin)
| 30.22
| 53
| 0.665122
| 167
| 1,511
| 5.868263
| 0.221557
| 0.091837
| 0.137755
| 0.168367
| 0.710204
| 0.710204
| 0.710204
| 0.710204
| 0.710204
| 0.50102
| 0
| 0
| 0.168101
| 1,511
| 49
| 54
| 30.836735
| 0.779634
| 0
| 0
| 0.538462
| 0
| 0
| 0.12773
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.051282
| 0.076923
| 0.794872
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
86661a45f239d37cb8aa9b20df3215409d65c0d4
| 167
|
py
|
Python
|
hashphrase/urls.py
|
Hobsons/django-hashphrase
|
715db8d955b609f30f5e7aeba0563fd13bebb580
|
[
"BSD-2-Clause"
] | null | null | null |
hashphrase/urls.py
|
Hobsons/django-hashphrase
|
715db8d955b609f30f5e7aeba0563fd13bebb580
|
[
"BSD-2-Clause"
] | null | null | null |
hashphrase/urls.py
|
Hobsons/django-hashphrase
|
715db8d955b609f30f5e7aeba0563fd13bebb580
|
[
"BSD-2-Clause"
] | 2
|
2016-12-08T14:23:55.000Z
|
2021-04-27T19:37:26.000Z
|
from django.conf.urls import *
urlpatterns = patterns('',
(r'^test/', 'hashphrase.views.hash_link_test'),
(r'^(?P<key>.*)/$', 'hashphrase.views.hash_link'),
)
| 27.833333
| 54
| 0.634731
| 21
| 167
| 4.904762
| 0.714286
| 0.291262
| 0.368932
| 0.446602
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.11976
| 167
| 6
| 55
| 27.833333
| 0.70068
| 0
| 0
| 0
| 0
| 0
| 0.458333
| 0.339286
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.2
| 0
| 0.2
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
867250c4ce786eabbcb331f49d97d645de3a51f9
| 37
|
py
|
Python
|
spelregels.py
|
robijn112/opseilen
|
f17a95cd5166a0ae4ca0929081f4f0311e6b9f82
|
[
"MIT"
] | 2
|
2017-01-17T12:53:41.000Z
|
2017-02-02T08:59:10.000Z
|
spelregels.py
|
robijn112/opseilen
|
f17a95cd5166a0ae4ca0929081f4f0311e6b9f82
|
[
"MIT"
] | null | null | null |
spelregels.py
|
robijn112/opseilen
|
f17a95cd5166a0ae4ca0929081f4f0311e6b9f82
|
[
"MIT"
] | null | null | null |
input(str("Regels tijdens het spel"))
| 37
| 37
| 0.756757
| 6
| 37
| 4.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.081081
| 37
| 1
| 37
| 37
| 0.823529
| 0
| 0
| 0
| 0
| 0
| 0.605263
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
86d600b6b602915d311030320a06bd1893273599
| 87
|
py
|
Python
|
src/prefect/environments/execution/fargate/__init__.py
|
concreted/prefect
|
dd732f5990ee2b0f3d816adb285168fd63b239e4
|
[
"Apache-2.0"
] | 8,633
|
2019-03-23T17:51:03.000Z
|
2022-03-31T22:17:42.000Z
|
src/prefect/environments/execution/fargate/__init__.py
|
concreted/prefect
|
dd732f5990ee2b0f3d816adb285168fd63b239e4
|
[
"Apache-2.0"
] | 3,903
|
2019-03-23T19:11:21.000Z
|
2022-03-31T23:21:23.000Z
|
src/prefect/environments/execution/fargate/__init__.py
|
ngriffiths13/prefect
|
7f5613abcb182494b7dc12159277c3bc5f3c9898
|
[
"Apache-2.0"
] | 937
|
2019-03-23T18:49:44.000Z
|
2022-03-31T21:45:13.000Z
|
from prefect.environments.execution.fargate.fargate_task import FargateTaskEnvironment
| 43.5
| 86
| 0.908046
| 9
| 87
| 8.666667
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.045977
| 87
| 1
| 87
| 87
| 0.939759
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
86f00b1ee99a7f3cf182b885f1212dbde7f87bb0
| 179
|
py
|
Python
|
checkov/common/typing.py
|
niradler/checkov
|
2628c6f28a5604efe3877d6eacc3044d2b66b7b1
|
[
"Apache-2.0"
] | 4,013
|
2019-12-09T13:16:54.000Z
|
2022-03-31T14:31:01.000Z
|
checkov/common/typing.py
|
niradler/checkov
|
2628c6f28a5604efe3877d6eacc3044d2b66b7b1
|
[
"Apache-2.0"
] | 1,258
|
2019-12-17T09:55:51.000Z
|
2022-03-31T19:17:17.000Z
|
checkov/common/typing.py
|
niradler/checkov
|
2628c6f28a5604efe3877d6eacc3044d2b66b7b1
|
[
"Apache-2.0"
] | 638
|
2019-12-19T08:57:38.000Z
|
2022-03-30T21:38:37.000Z
|
from typing import Optional
from typing_extensions import TypedDict
class _SkippedCheck(TypedDict, total=False):
bc_id: Optional[str]
id: str
suppress_comment: str
| 17.9
| 44
| 0.765363
| 23
| 179
| 5.782609
| 0.652174
| 0.150376
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.178771
| 179
| 9
| 45
| 19.888889
| 0.904762
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
86fd0eb93872725491b3a5f1981378bdc597ec52
| 22,799
|
py
|
Python
|
cogs/logging.py
|
nickalaskreynolds/yin-bot
|
3f01f239532c1d30ee4f2f6825388cfce3d4a265
|
[
"MIT"
] | 17
|
2018-05-25T04:37:01.000Z
|
2021-09-18T03:38:05.000Z
|
cogs/logging.py
|
nickalaskreynolds/yin-bot
|
3f01f239532c1d30ee4f2f6825388cfce3d4a265
|
[
"MIT"
] | 42
|
2018-07-27T21:53:07.000Z
|
2019-12-10T22:17:25.000Z
|
cogs/logging.py
|
nickalaskreynolds/yin-bot
|
3f01f239532c1d30ee4f2f6825388cfce3d4a265
|
[
"MIT"
] | 20
|
2018-09-15T22:33:12.000Z
|
2021-02-19T05:43:58.000Z
|
"""This cog will handle logging all server actions to a specific channel."""
import discord
from discord.ext import commands
from .utils import checks, embeds
class Logging(commands.Cog):
"""General logging cog for guild."""
def __init__(self, bot):
"""Init method."""
super().__init__()
self.bot = bot
self.logger = bot.logger
@commands.group(hidden=True, aliases=['ldbc', 'get_these_errors_outta_here']) # noqa
@commands.is_owner()
async def log_db_cleaning(self, ctx):
"""Clean a deleted channel from the voice log and server log databases.""" # noqa
embed_title = f'Database Cleaning Tool'
if ctx.subcommand_passed is None:
local_embed = discord.Embed(
title=embed_title,
description="""Check the console for channel
errors and pass them as so:\n
ldbc *channel snowflake id*""",
color=0xCCCCCC
)
await ctx.send(embed=local_embed)
return
remove_id = ctx.subcommand_passed
was_log_removed = False
was_voice_removed = False
for guild in self.bot.guilds:
log_channel = await self.bot.pg_utils.get_logger_channels(guild.id)
voice_channel = await self.bot.pg_utils.get_voice_channels(guild.id) # noqa
for remove_id in log_channel:
await self.bot.pg_utils.rem_logger_channel(
guild.id, remove_id, self.bot.logger
)
was_log_removed = True
for remove_id in voice_channel:
await self.bot.pg_utils.rem_voice_channel(
guild.id, remove_id, self.bot.logger
)
was_voice_removed = True
if was_log_removed or was_voice_removed:
local_embed = discord.Embed(
title=embed_title,
description=f'Channel was removed from database',
color=0x419400
)
await ctx.send(embed=local_embed)
return
local_embed = discord.Embed(
title=embed_title,
description="""Channel not found in database,
or you did not give a valid channel id""",
color=0x651111
)
await ctx.send(embed=local_embed)
@commands.group()
@commands.guild_only()
@checks.is_admin()
async def logging(self, ctx):
"""Enable and disable logging to channel."""
if ctx.invoked_subcommand is None:
desc = ''
modlogs = await self.bot.pg_utils.get_logger_channels(
ctx.guild.id)
for channel in ctx.guild.channels:
if channel.id in modlogs:
desc += f'{channel.name} \n'
local_embed = discord.Embed(
title=f'Current log channel list is: ',
description=desc,
color=0x419400
)
await ctx.send(embed=local_embed)
@logging.command()
async def enable(self, ctx):
"""Add channel to the log channel list."""
added_channels = []
desc = ''
try:
success = await \
self.bot.pg_utils.add_logger_channel(
ctx.guild.id, ctx.message.channel.id, self.bot.logger
)
if success:
added_channels.append(ctx.message.channel.name)
if added_channels:
for channel in added_channels:
desc += f'{channel} \n'
local_embed = discord.Embed(
title=f'Channels added to log channel list:',
description=desc,
color=0x419400
)
self.bot.server_settings[ctx.guild.id]['logging_enabled']\
= True
else:
local_embed = discord.Embed(
title=f'Internal error, please contact @dashwav#7785',
description=' ',
color=0x651111
)
await ctx.send(embed=local_embed)
except Exception as e:
self.bot.logger.info(f'Error adding channels {e}')
local_embed = discord.Embed(
title=f'Internal issue, please contact @dashwav#7785',
description=' ',
color=0x651111
)
await ctx.send(embed=local_embed)
@logging.command(aliases=['rem'])
async def disable(self, ctx):
"""Remove channel from the log channel list."""
removed_channels = []
absent_channels = []
desc = ''
try:
try:
success = False
success = await \
self.bot.pg_utils.rem_logger_channel(
ctx.guild.id, ctx.message.channel.id, self.bot.logger
)
except ValueError:
absent_channels.append(ctx.message.channel.name)
if success:
removed_channels.append(ctx.message.channel.name)
if removed_channels:
for channel in removed_channels:
desc += f'{channel} \n'
local_embed = discord.Embed(
title=f'Channels removed from log channel list:',
description=desc,
color=0x419400
)
logs = await self.bot.pg_utils.get_logger_channels(
ctx.guild.id)
if not logs:
self.bot.server_settings[ctx.guild.id]['logging_enabled']\
= False
if absent_channels:
desc = ''
for channel in absent_channels:
desc += f'{channel}\n'
local_embed.add_field(
name='Channels not in log channel list :',
value=desc
)
elif absent_channels:
desc = ''
for channel in absent_channels:
desc += f'{channel}\n'
local_embed = discord.Embed(
title=f'Channels not in log channel list: ',
description=desc,
color=0x651111
)
else:
local_embed = discord.Embed(
title=f'Internal error, please contact @dashwav#7785',
description=' ',
color=0x651111
)
await ctx.send(embed=local_embed)
except Exception as e:
self.bot.logger.warning(f'Issue: {e}')
local_embed = discord.Embed(
title=f'Internal issue, please contact @dashwav#7785',
description=' ',
color=0x651111
)
await ctx.send(embed=local_embed)
@commands.group(aliases=['vclogs', 'prescence_logging'])
@commands.guild_only()
@checks.is_admin()
async def voice_logging(self, ctx):
"""Enable and disable logging to channel."""
if ctx.invoked_subcommand is None:
desc = ''
voicelogs = await self.bot.pg_utils.get_voice_channels(
ctx.guild.id)
for channel in ctx.guild.channels:
if channel.id in voicelogs:
desc += f'{channel.name} \n'
local_embed = discord.Embed(
title=f'Current voice log channel list is: ',
description=desc,
color=0x419400
)
await ctx.send(embed=local_embed)
@voice_logging.command(name='enable')
async def _enable(self, ctx):
"""Add channel to the voice log channel list."""
added_channels = []
desc = ''
try:
success = await \
self.bot.pg_utils.add_voice_channel(
ctx.guild.id, ctx.message.channel.id, self.bot.logger
)
if success:
added_channels.append(ctx.message.channel.name)
if added_channels:
for channel in added_channels:
desc += f'{channel} \n'
local_embed = discord.Embed(
title=f'Channels added to voice log channel list:',
description=desc,
color=0x419400
)
else:
local_embed = discord.Embed(
title=f'Internal error, please contact @dashwav#7785',
description=' ',
color=0x651111
)
await ctx.send(embed=local_embed)
except Exception as e:
self.bot.logger.info(f'Error adding channels {e}')
local_embed = discord.Embed(
title=f'Internal issue, please contact @dashwav#7785',
description=' ',
color=0x651111
)
await ctx.send(embed=local_embed)
@voice_logging.command(name='disable', aliases=['rem'])
async def _disable(self, ctx):
"""Remove channel from the voice log channel list."""
removed_channels = []
absent_channels = []
desc = ''
try:
try:
success = False
success = await \
self.bot.pg_utils.rem_voice_channel(
ctx.guild.id, ctx.message.channel.id, self.bot.logger
)
except ValueError:
absent_channels.append(ctx.message.channel.name)
if success:
removed_channels.append(ctx.message.channel.name)
if removed_channels:
for channel in removed_channels:
desc += f'{channel} \n'
local_embed = discord.Embed(
title=f'Channels removed from voice log channel list:',
description=desc,
color=0x419400
)
if absent_channels:
desc = ''
for channel in absent_channels:
desc += f'{channel}\n'
local_embed.add_field(
name='Channels not in voice log channel list :',
value=desc
)
elif absent_channels:
desc = ''
for channel in absent_channels:
desc += f'{channel}\n'
local_embed = discord.Embed(
title=f'Channels not in voice log channel list: ',
description=desc,
color=0x651111
)
else:
local_embed = discord.Embed(
title=f'Internal error, please contact @dashwav#7785',
description=' ',
color=0x651111
)
await ctx.send(embed=local_embed)
except Exception as e:
self.bot.logger.warning(f'Issue: {e}')
local_embed = discord.Embed(
title=f'Internal issue, please contact @dashwav#7785',
description=' ',
color=0x651111
)
await ctx.send(embed=local_embed)
@commands.Cog.listener()
async def on_member_ban(self, guild, user):
"""Send a message on user ban."""
if not self.bot.server_settings[guild.id]['logging_enabled']:
return
channels = await self.bot.pg_utils.get_logger_channels(
guild.id)
local_embed = embeds.LogBanEmbed(user)
for channel in channels:
try:
ch = self.bot.get_channel(channel)
await ch.send(embed=local_embed)
except Exception as e:
self.bot.logger.info(
f'Error logging user ban in channel {channel}'
f', error: {e}'
)
@commands.Cog.listener()
async def on_member_join(self, member):
"""Send a message on a user join."""
if not self.bot.server_settings[member.guild.id]['logging_enabled']:
return
channels = await self.bot.pg_utils.get_logger_channels(
member.guild.id)
local_embed = embeds.JoinEmbed(member)
for channel in channels:
try:
ch = self.bot.get_channel(channel)
await ch.send(embed=local_embed)
except Exception as e:
self.bot.logger.info(
f'Error logging user join in channel {channel}'
f', error: {e}'
)
@commands.Cog.listener()
async def on_member_remove(self, member):
"""Send message on a user leaving."""
if not self.bot.server_settings[member.guild.id]['logging_enabled']:
return
channels = await self.bot.pg_utils.get_logger_channels(
member.guild.id)
local_embed = embeds.LeaveEmbed(member)
for channel in channels:
try:
ch = self.bot.get_channel(channel)
await ch.send(embed=local_embed)
except Exception as e:
self.bot.logger.info(
f'Error logging user leave in channel {channel}'
f', error: {e}'
)
@commands.Cog.listener()
async def on_message_edit(self, before, after):
"""Send message on a user editing messages."""
if not self.bot.server_settings[before.guild.id]['logging_enabled']:
return
try:
if not before.content.strip() != after.content.strip():
return
channels = await self.bot.pg_utils.get_logger_channels(
before.guild.id)
try:
local_embed = embeds.MessageEditEmbed(
before.author,
before.channel.name,
before.content,
after.content
)
try:
for channel in channels:
ch = self.bot.get_channel(channel)
await ch.send(embed=local_embed)
except Exception as e:
self.bot.logger.warning(
f'Issue logging message edit in channel {channel}'
f', error: {e}'
)
except Exception as e:
self.bot.logger.warning(
f'Issue making embed for channel {channel}'
f', error: {e}'
)
except AttributeError:
pass
@commands.Cog.listener()
async def on_message_delete(self, message):
"""Send message on a user editing messages."""
if not self.bot.server_settings[message.guild.id]['logging_enabled']:
return
if message.author.bot:
return
channels = await self.bot.pg_utils.get_logger_channels(
message.guild.id)
try:
local_embed = embeds.MessageDeleteEmbed(
message.author,
message.channel.name,
message.content,
)
try:
for channel in channels:
ch = self.bot.get_channel(channel)
await ch.send(embed=local_embed)
except Exception as e:
self.bot.logger.warning(
f'Issue logging message delete in channel {channel}'
f', error: {e}'
)
except Exception as e:
self.bot.logger.warning(
f'Issue making embed for channel {channel}'
f', error: {e}'
)
@commands.Cog.listener()
async def on_member_update(self, before, after):
"""Send message on a user role or name update."""
if not self.bot.server_settings[before.guild.id]['logging_enabled']:
return
if before.roles == after.roles:
return
channels = await self.bot.pg_utils.get_logger_channels(
before.guild.id)
role_diff = set(after.roles) - (set(before.roles))
for role in role_diff:
local_embed = embeds.RoleAddEmbed(
after,
role.name
)
for channel in channels:
try:
ch = self.bot.get_channel(channel)
await ch.send(embed=local_embed)
except Exception as e:
self.bot.logger.info(
f'Error logging role change'
f' in channel {channel}, error: {e}'
)
role_diff = set(before.roles) - (set(after.roles))
for role in role_diff:
local_embed = embeds.RoleRemoveEmbed(
after,
role.name
)
for channel in channels:
try:
ch = self.bot.get_channel(channel)
await ch.send(embed=local_embed)
except Exception as e:
self.bot.logger.info(
f'Error logging role remove in'
f' channel {channel}, error: {e}'
)
@commands.Cog.listener()
async def on_user_update(self, before, after):
"""Send message on a username update."""
if before.name == after.name:
return
user_mutuals = []
for guild in self.bot.guilds:
if before in guild.members:
user_mutuals.append(guild.id)
extended_channels = []
for guild_id in user_mutuals:
extended_channels.extend(
await self.bot.pg_utils.get_logger_channels(
guild_id))
local_embed = embeds.UsernameUpdateEmbed(
after, before.name, after.name)
for channel in extended_channels:
try:
ch = self.bot.get_channel(channel)
await ch.send(embed=local_embed)
except Exception as e:
self.bot.logger.info(
f'Error logging name change'
f' in channel {channel}, error {e}'
)
@commands.Cog.listener()
async def on_voice_state_update(self, member, before, after):
"""Send a message on user vc update."""
vc_logging = await self.bot.pg_utils.get_voice_logging(
member.guild.id)
if not vc_logging:
return
vc_channels = await self.bot.pg_utils.get_voice_channels(
member.guild.id
)
if before.channel is None and after.channel:
local_embed = embeds.VoiceChannelStateEmbed(
member, after.channel, 'joined'
)
for channel in vc_channels:
try:
channel = self.bot.get_channel(channel)
await channel.send(embed=local_embed)
except Exception as e:
self.bot.logger.info(
f'Error logging voice join in'
f' channel {channel}, error: {e}'
)
elif after.channel is None and before.channel:
local_embed = embeds.VoiceChannelStateEmbed(
member, before.channel, 'left'
)
for channel in vc_channels:
try:
channel = self.bot.get_channel(channel)
await channel.send(embed=local_embed)
except Exception as e:
self.bot.logger.info(
f'Error logging voice leave in'
f' channel {channel}, error: {e}'
)
elif before.channel != after.channel:
local_embed = embeds.VoiceChannelMoveEmbed(
member, before.channel, after.channel
)
for channel in vc_channels:
try:
channel = self.bot.get_channel(channel)
await channel.send(embed=local_embed)
except Exception as e:
self.bot.logger.info(
f'Error logging voice move in'
f' channel {channel}, error: {e}'
)
@commands.Cog.listener()
async def on_guild_channel_delete(self, channel):
"""Attempt to remove deleted channel from the logging databases."""
logger = await self.bot.pg_utils.get_logger_channels(channel.guild.id)
vlogger = await self.bot.pg_utils.get_voice_channels(channel.guild.id)
self.bot.logger.info(channel.id)
self.bot.logger.info(logger)
self.bot.logger.info(vlogger)
channel_type = -1
if channel.id in logger and channel.id in vlogger:
channel_type = 0
elif channel.id in logger:
channel_type = 1
elif channel.id in vlogger:
channel_type = 2 # noqa
else:
return
server_del = channel.guild.id
if (channel_type % 2) <= 1:
try:
success = False
success = await \
self.bot.pg_utils.rem_logger_channel(
server_del, channel, self.bot.logger
)
if success:
self.bot.logger.info(
f'Channel deleted from server {server_del}'
f', removed from log db'
)
except Exception as e:
self.bot.logger.info(
f"""Issue removing channel {channel} from log database
after deletion error: {e}"""
)
if (channel_type % 2) == 0:
try:
success = False
success = await \
self.bot.pg_utils.rem_voice_channel(
server_del, channel, self.bot.logger
)
if success:
self.bot.logger.info(
f'Channel deleted from server {server_del}'
f', removed from voice log db'
)
except Exception as e:
self.bot.logger.info(
f"""Issue removing channel {channel} from voice database
after deletion error: {e}"""
)
def setup(bot):
"""General cog loading."""
bot.add_cog(Logging(bot))
| 38.839864
| 90
| 0.504671
| 2,334
| 22,799
| 4.805913
| 0.084404
| 0.0493
| 0.037087
| 0.040653
| 0.801908
| 0.782473
| 0.75448
| 0.721138
| 0.659267
| 0.631274
| 0
| 0.012466
| 0.41243
| 22,799
| 586
| 91
| 38.906143
| 0.824873
| 0.006842
| 0
| 0.616514
| 0
| 0
| 0.112371
| 0.001234
| 0
| 0
| 0.006949
| 0
| 0
| 1
| 0.00367
| false
| 0.007339
| 0.005505
| 0
| 0.036697
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
8100f2748d193d1ddd9e53b415e965a0e0ff8767
| 55
|
py
|
Python
|
swagger-4-es development/images/swagger/source/create_operations/__init__.py
|
swarmee/swagger-4-es
|
8ee367267c9a4afd9abba5964570d32c44c7ce34
|
[
"MIT"
] | 3
|
2021-12-28T08:43:00.000Z
|
2022-02-09T14:51:07.000Z
|
swagger-4-es development/images/swagger/source/create_operations/__init__.py
|
swarmee/swagger-4-es
|
8ee367267c9a4afd9abba5964570d32c44c7ce34
|
[
"MIT"
] | null | null | null |
swagger-4-es development/images/swagger/source/create_operations/__init__.py
|
swarmee/swagger-4-es
|
8ee367267c9a4afd9abba5964570d32c44c7ce34
|
[
"MIT"
] | null | null | null |
from .create_operations import api as create_operations
| 55
| 55
| 0.890909
| 8
| 55
| 5.875
| 0.75
| 0.680851
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 55
| 1
| 55
| 55
| 0.94
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
810ea601296ec25031ea552745f7de47adf57472
| 94
|
py
|
Python
|
plugins/tenet/util/qt/__init__.py
|
onesorzer0es/tenet
|
62c0bec073fcbb31c5be52c9ea236b6ea2bcc418
|
[
"MIT"
] | 877
|
2021-04-20T14:49:39.000Z
|
2022-03-26T15:38:14.000Z
|
plugins/tenet/util/qt/__init__.py
|
onesorzer0es/tenet
|
62c0bec073fcbb31c5be52c9ea236b6ea2bcc418
|
[
"MIT"
] | 12
|
2021-04-21T07:32:46.000Z
|
2022-02-21T12:50:14.000Z
|
plugins/tenet/util/qt/__init__.py
|
onesorzer0es/tenet
|
62c0bec073fcbb31c5be52c9ea236b6ea2bcc418
|
[
"MIT"
] | 76
|
2021-04-20T16:35:09.000Z
|
2022-03-25T03:57:10.000Z
|
from .shim import *
if QT_AVAILABLE:
from .util import *
from .waitbox import WaitBox
| 18.8
| 32
| 0.702128
| 13
| 94
| 5
| 0.615385
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.234043
| 94
| 5
| 32
| 18.8
| 0.902778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.75
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
814219d8274e0512389a3d865347b1ab0c174468
| 655
|
py
|
Python
|
app/api/serializers.py
|
Arkaikus/StarWarsAPI
|
837904fb2dde62297f09c9dc16bb261ba3ae4b12
|
[
"MIT"
] | null | null | null |
app/api/serializers.py
|
Arkaikus/StarWarsAPI
|
837904fb2dde62297f09c9dc16bb261ba3ae4b12
|
[
"MIT"
] | null | null | null |
app/api/serializers.py
|
Arkaikus/StarWarsAPI
|
837904fb2dde62297f09c9dc16bb261ba3ae4b12
|
[
"MIT"
] | null | null | null |
from api.models import Character, Film, Planet
from rest_framework import serializers
from django.contrib.auth.models import User
from django.contrib.auth.hashers import make_password
class CharactersSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Character
fields = ('id', 'name', 'films')
class FilmsSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Film
fields = ('id', 'title', 'prelude', 'characters', 'planets')
class PlanetsSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Planet
fields = ('id', 'name', 'films')
| 28.478261
| 68
| 0.712977
| 65
| 655
| 7.153846
| 0.492308
| 0.23871
| 0.270968
| 0.296774
| 0.329032
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.18626
| 655
| 22
| 69
| 29.772727
| 0.87242
| 0
| 0
| 0.3125
| 0
| 0
| 0.080916
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.0625
| 0.25
| 0
| 0.625
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 5
|
d4a0a7e2d35ddfc8bcd2e12d049866fec1ffe15a
| 82
|
py
|
Python
|
crates/iks-to-fks/tests/helper-python-scripts/print-actions-to-stdout.py
|
chinedufn/landon
|
6df895621e6dd518557b6d60d0920f70e50750bb
|
[
"MIT"
] | 117
|
2018-09-12T12:50:01.000Z
|
2022-03-14T03:24:49.000Z
|
crates/iks-to-fks/tests/helper-python-scripts/print-actions-to-stdout.py
|
chinedufn/blender-exporter
|
00d8daa0a3f2c9261d0e2bdd386194f141f39887
|
[
"MIT"
] | 22
|
2019-03-27T11:36:41.000Z
|
2022-01-03T02:21:11.000Z
|
crates/iks-to-fks/tests/helper-python-scripts/print-actions-to-stdout.py
|
chinedufn/blender-exporter
|
00d8daa0a3f2c9261d0e2bdd386194f141f39887
|
[
"MIT"
] | 10
|
2019-09-05T05:14:22.000Z
|
2021-10-10T16:51:00.000Z
|
import bpy
print("The number of actions is: " + str(len(list(bpy.data.actions))))
| 27.333333
| 70
| 0.707317
| 14
| 82
| 4.142857
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.121951
| 82
| 2
| 71
| 41
| 0.805556
| 0
| 0
| 0
| 0
| 0
| 0.317073
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 5
|
d4b2bcb1baa8130d164597c9d6dd33006124a958
| 9,047
|
py
|
Python
|
tests/test_annotation.py
|
ECGKit/wfdb-python
|
0d42dfb4b2946625f00cbf500d830d374a201153
|
[
"MIT"
] | null | null | null |
tests/test_annotation.py
|
ECGKit/wfdb-python
|
0d42dfb4b2946625f00cbf500d830d374a201153
|
[
"MIT"
] | null | null | null |
tests/test_annotation.py
|
ECGKit/wfdb-python
|
0d42dfb4b2946625f00cbf500d830d374a201153
|
[
"MIT"
] | null | null | null |
import os
import re
import unittest
import numpy as np
import wfdb
class TestAnnotation(unittest.TestCase):
"""
Testing read and write of WFDB annotations, including Physionet
streaming.
Target files created using the original WFDB Software Package
version 10.5.24
"""
def test_1(self):
"""
Target file created with:
rdann -r sample-data/100 -a atr > ann-1
"""
annotation = wfdb.rdann("sample-data/100", "atr")
# This is not the fault of the script. The annotation file specifies a
# length 3
annotation.aux_note[0] = "(N"
# aux_note field with a null written after '(N' which the script correctly picks up. I am just
# getting rid of the null in this unit test to compare with the regexp output below which has
# no null to detect in the output text file of rdann.
# Target data from WFDB software package
lines = tuple(open("tests/target-output/ann-1", "r"))
nannot = len(lines)
target_time = [None] * nannot
target_sample = np.empty(nannot, dtype="object")
target_symbol = [None] * nannot
target_subtype = np.empty(nannot, dtype="object")
target_chan = np.empty(nannot, dtype="object")
target_num = np.empty(nannot, dtype="object")
target_aux_note = [None] * nannot
RXannot = re.compile(
"[ \t]*(?P<time>[\[\]\w\.:]+) +(?P<sample>\d+) +(?P<symbol>.) +(?P<subtype>\d+) +(?P<chan>\d+) +(?P<num>\d+)\t?(?P<aux_note>.*)"
)
for i in range(0, nannot):
(
target_time[i],
target_sample[i],
target_symbol[i],
target_subtype[i],
target_chan[i],
target_num[i],
target_aux_note[i],
) = RXannot.findall(lines[i])[0]
# Convert objects into integers
target_sample = target_sample.astype("int")
target_num = target_num.astype("int")
target_subtype = target_subtype.astype("int")
target_chan = target_chan.astype("int")
# Compare
comp = [
np.array_equal(annotation.sample, target_sample),
np.array_equal(annotation.symbol, target_symbol),
np.array_equal(annotation.subtype, target_subtype),
np.array_equal(annotation.chan, target_chan),
np.array_equal(annotation.num, target_num),
annotation.aux_note == target_aux_note,
]
# Test file streaming
pn_annotation = wfdb.rdann(
"100",
"atr",
pn_dir="mitdb",
return_label_elements=["label_store", "symbol"],
)
pn_annotation.aux_note[0] = "(N"
pn_annotation.create_label_map()
# Test file writing
annotation.wrann(write_fs=True)
write_annotation = wfdb.rdann(
"100", "atr", return_label_elements=["label_store", "symbol"]
)
write_annotation.create_label_map()
assert comp == [True] * 6
assert annotation.__eq__(pn_annotation)
assert annotation.__eq__(write_annotation)
def test_2(self):
"""
Annotation file with many aux_note strings.
Target file created with:
rdann -r sample-data/100 -a atr > ann-2
"""
annotation = wfdb.rdann("sample-data/12726", "anI")
# Target data from WFDB software package
lines = tuple(open("tests/target-output/ann-2", "r"))
nannot = len(lines)
target_time = [None] * nannot
target_sample = np.empty(nannot, dtype="object")
target_symbol = [None] * nannot
target_subtype = np.empty(nannot, dtype="object")
target_chan = np.empty(nannot, dtype="object")
target_num = np.empty(nannot, dtype="object")
target_aux_note = [None] * nannot
RXannot = re.compile(
"[ \t]*(?P<time>[\[\]\w\.:]+) +(?P<sample>\d+) +(?P<symbol>.) +(?P<subtype>\d+) +(?P<chan>\d+) +(?P<num>\d+)\t?(?P<aux_note>.*)"
)
for i in range(0, nannot):
(
target_time[i],
target_sample[i],
target_symbol[i],
target_subtype[i],
target_chan[i],
target_num[i],
target_aux_note[i],
) = RXannot.findall(lines[i])[0]
# Convert objects into integers
target_sample = target_sample.astype("int")
target_num = target_num.astype("int")
target_subtype = target_subtype.astype("int")
target_chan = target_chan.astype("int")
# Compare
comp = [
np.array_equal(annotation.sample, target_sample),
np.array_equal(annotation.symbol, target_symbol),
np.array_equal(annotation.subtype, target_subtype),
np.array_equal(annotation.chan, target_chan),
np.array_equal(annotation.num, target_num),
annotation.aux_note == target_aux_note,
]
# Test file streaming
pn_annotation = wfdb.rdann(
"12726",
"anI",
pn_dir="prcp",
return_label_elements=["label_store", "symbol"],
)
pn_annotation.create_label_map()
# Test file writing
annotation.wrann(write_fs=True)
write_annotation = wfdb.rdann(
"12726", "anI", return_label_elements=["label_store", "symbol"]
)
write_annotation.create_label_map()
assert comp == [True] * 6
assert annotation.__eq__(pn_annotation)
assert annotation.__eq__(write_annotation)
def test_3(self):
"""
Annotation file with custom annotation types
Target file created with:
rdann -r sample-data/1003 -a atr > ann-3
"""
annotation = wfdb.rdann("sample-data/1003", "atr")
# Target data from WFDB software package
lines = tuple(open("tests/target-output/ann-3", "r"))
nannot = len(lines)
target_time = [None] * nannot
target_sample = np.empty(nannot, dtype="object")
target_symbol = [None] * nannot
target_subtype = np.empty(nannot, dtype="object")
target_chan = np.empty(nannot, dtype="object")
target_num = np.empty(nannot, dtype="object")
target_aux_note = [None] * nannot
RXannot = re.compile(
"[ \t]*(?P<time>[\[\]\w\.:]+) +(?P<sample>\d+) +(?P<symbol>.) +(?P<subtype>\d+) +(?P<chan>\d+) +(?P<num>\d+)\t?(?P<aux_note>.*)"
)
for i in range(0, nannot):
(
target_time[i],
target_sample[i],
target_symbol[i],
target_subtype[i],
target_chan[i],
target_num[i],
target_aux_note[i],
) = RXannot.findall(lines[i])[0]
# Convert objects into integers
target_sample = target_sample.astype("int")
target_num = target_num.astype("int")
target_subtype = target_subtype.astype("int")
target_chan = target_chan.astype("int")
# Compare
comp = [
np.array_equal(annotation.sample, target_sample),
np.array_equal(annotation.symbol, target_symbol),
np.array_equal(annotation.subtype, target_subtype),
np.array_equal(annotation.chan, target_chan),
np.array_equal(annotation.num, target_num),
annotation.aux_note == target_aux_note,
]
# Test file streaming
pn_annotation = wfdb.rdann(
"1003",
"atr",
pn_dir="challenge-2014/set-p2",
return_label_elements=["label_store", "symbol"],
)
pn_annotation.create_label_map()
# Test file writing
annotation.wrann(write_fs=True)
write_annotation = wfdb.rdann(
"1003", "atr", return_label_elements=["label_store", "symbol"]
)
write_annotation.create_label_map()
assert comp == [True] * 6
assert annotation.__eq__(pn_annotation)
assert annotation.__eq__(write_annotation)
def test_4(self):
"""
Read and write annotations with large time skips
Annotation file created by:
echo "xxxxxxxxx 10000000000 N 0 0 0" | wrann -r huge -a qrs
"""
annotation = wfdb.rdann("sample-data/huge", "qrs")
self.assertEqual(annotation.sample[0], 10000000000)
annotation.wrann()
annotation1 = wfdb.rdann("sample-data/huge", "qrs")
annotation2 = wfdb.rdann("huge", "qrs")
self.assertEqual(annotation1, annotation2)
@classmethod
def tearDownClass(cls):
writefiles = [
"100.atr",
"1003.atr",
"12726.anI",
"huge.qrs",
]
for file in writefiles:
if os.path.isfile(file):
os.remove(file)
if __name__ == "__main__":
unittest.main()
| 33.507407
| 140
| 0.566044
| 1,039
| 9,047
| 4.727623
| 0.161694
| 0.027077
| 0.036645
| 0.067182
| 0.791327
| 0.75855
| 0.747964
| 0.747964
| 0.730863
| 0.730863
| 0
| 0.019234
| 0.310379
| 9,047
| 269
| 141
| 33.63197
| 0.768072
| 0.139052
| 0
| 0.657459
| 0
| 0.016575
| 0.116408
| 0.035554
| 0
| 0
| 0
| 0
| 0.060773
| 1
| 0.027624
| false
| 0
| 0.027624
| 0
| 0.060773
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
d4bd167d021b3c79aa9a1b18b1f8b08aefd8b9fb
| 6,412
|
py
|
Python
|
streettraffic/predefined/cities.py
|
streettraffic/streettraffic
|
26cbda67b803eb42f3fe70354689648bafd6d718
|
[
"MIT"
] | 23
|
2017-08-09T19:57:23.000Z
|
2021-11-23T12:30:33.000Z
|
streettraffic/predefined/cities.py
|
streettraffic/streettraffic
|
26cbda67b803eb42f3fe70354689648bafd6d718
|
[
"MIT"
] | 1
|
2018-10-04T18:17:11.000Z
|
2018-10-23T00:12:19.000Z
|
streettraffic/predefined/cities.py
|
streettraffic/streettraffic
|
26cbda67b803eb42f3fe70354689648bafd6d718
|
[
"MIT"
] | 4
|
2017-10-12T03:31:34.000Z
|
2021-05-19T15:11:30.000Z
|
San_Francisco_polygon = [[37.837174338616975,-122.48725891113281],[37.83364941345965,-122.48485565185547],[37.83093781796035,-122.4814224243164],[37.82415839321614,-122.48004913330078],[37.8203616433087,-122.47970581054688],[37.81059767530207,-122.47798919677734],[37.806122091729485,-122.47627258300781],[37.79215110146845,-122.48039245605469],[37.78726741375342,-122.48519897460938],[37.78618210598413,-122.49927520751953],[37.78645343442073,-122.50614166259766],[37.779127216982424,-122.51232147216797],[37.772614414082014,-122.51163482666016],[37.76121562849642,-122.51197814941406],[37.75171529845649,-122.51060485839844],[37.74329970164702,-122.50957489013672],[37.735969208590504,-122.50717163085938],[37.73081027834234,-122.50717163085938],[37.72293542866175,-122.50682830810547],[37.715331331027045,-122.50442504882812],[37.714244967649265,-122.49893188476562],[37.71940505182832,-122.50030517578125],[37.724564776604836,-122.5030517578125],[37.729724141962045,-122.50167846679688],[37.7324394530424,-122.49549865722656],[37.72918106779786,-122.49378204345703],[37.729724141962045,-122.48828887939453],[37.72782336496339,-122.4807357788086],[37.73271097867418,-122.37945556640625],[37.74520008134973,-122.37533569335938],[37.74655746554895,-122.39112854003906],[37.75008654795525,-122.3873519897461],[37.754972691904946,-122.38391876220703],[37.76148704857093,-122.38597869873047],[37.769629187677005,-122.3876953125],[37.78265474565738,-122.38872528076172],[37.78781006166096,-122.3880386352539],[37.79594930209237,-122.37911224365234],[37.804358908571395,-122.36984252929688],[37.812767557570204,-122.3605728149414],[37.817649559511125,-122.35130310058594],[37.82009043941308,-122.332763671875],[37.823344820392535,-122.30632781982422],[37.8271414168374,-122.30701446533203],[37.824700770115996,-122.31765747070312],[37.82253123860035,-122.33139038085938],[37.8203616433087,-122.34615325927734],[37.81792077237497,-122.35576629638672],[37.81168262440736,-122.3653793334961],[37.803002585189645,-122.37396240234375],[37.790523241426946,-122.3880386352539],[37.79594930209237,-122.39490509033203],[37.80273131752431,-122.39936828613281],[37.80815648152641,-122.40726470947266],[37.80734273233311,-122.42305755615234],[37.807071480609274,-122.43267059326172],[37.80571520704469,-122.44194030761719],[37.80463017025873,-122.45189666748047],[37.80463017025873,-122.464599609375],[37.807071480609274,-122.47421264648438],[37.815208598896255,-122.47695922851562],[37.82768377181359,-122.47798919677734],[37.835276322922695,-122.48004913330078],[37.837174338616975,-122.48725891113281]]
Boston_polygon = [[42.32453946380133,-71.13029479980469],[42.32758538845383,-71.0489273071289],[42.330631165629846,-71.03588104248047],[42.33316920061984,-71.02180480957031],[42.339513840022754,-71.02455139160156],[42.3397676122846,-71.04412078857422],[42.34611158596906,-71.02558135986328],[42.356514317057886,-71.02317810058594],[42.348648996207956,-71.00669860839844],[42.35829022102701,-71.00360870361328],[42.353469793490646,-70.99090576171875],[42.36057345238455,-70.98918914794922],[42.363110278811256,-71.00223541259766],[42.37883631647602,-70.99983215332031],[42.37756823359386,-71.00738525390625],[42.37224200585402,-71.01631164550781],[42.37680737157286,-71.02008819580078],[42.381879610913195,-71.015625],[42.38999434161929,-71.00704193115234],[42.40444610741266,-71.00395202636719],[42.40444610741266,-71.13029479980469],[42.32453946380133,-71.13029479980469]]
Pittsburgh_polygon = [[40.603526799885884,-80.09445190429688],[40.564937785967224,-80.13427734375],[40.50126945841646,-80.14801025390625],[40.42290582797254,-80.12054443359375],[40.3549167507906,-80.1287841796875],[40.330842639095756,-80.14389038085938],[40.31199603742692,-80.16311645507812],[40.319325896602095,-79.9310302734375],[40.365381076021734,-79.80056762695312],[40.57119697629581,-79.7991943359375],[40.603526799885884,-80.09445190429688]]
Greenville_polygon = [[34.96531080784271,-82.44483947753906],[34.946176590087454,-82.44140625],[34.92422301690582,-82.45994567871094],[34.89888467073924,-82.46818542480469],[34.871285134570016,-82.47367858886719],[34.837477162415986,-82.452392578125],[34.83015027082022,-82.54096984863281],[34.820004267650454,-82.53822326660156],[34.829022998858306,-82.45101928710938],[34.8047829195724,-82.44071960449219],[34.79068657192738,-82.44552612304688],[34.770383597610255,-82.47573852539062],[34.76192255039478,-82.47024536132812],[34.784483415461345,-82.44415283203125],[34.7483830709853,-82.31849670410156],[34.71847552413778,-82.25944519042969],[34.72073307506407,-82.24845886230469],[34.75740963726007,-82.28141784667969],[34.77263973038464,-82.26699829101562],[34.83184114982865,-82.28965759277344],[34.86058077988933,-82.24845886230469],[34.86790496256872,-82.25601196289062],[34.839167890957015,-82.30133056640625],[34.86903170200862,-82.34458923339844],[34.96531080784271,-82.44483947753906]]
Norfolk_polygon = [[37.00693943418586,-76.32339477539062],[36.98884240936997,-76.31034851074219],[36.982260605282676,-76.30348205566406],[36.96799807635307,-76.30210876464844],[36.95976847846004,-76.27738952636719],[36.953732874654285,-76.2725830078125],[36.94769679250732,-76.28700256347656],[36.95098926024786,-76.29661560058594],[36.95757376878687,-76.30348205566406],[36.95976847846004,-76.31584167480469],[36.9669008480318,-76.32820129394531],[36.90323455156814,-76.32888793945312],[36.91366629380721,-76.31721496582031],[36.90927415514871,-76.28631591796875],[36.89499795802219,-76.28974914550781],[36.901587303978474,-76.30348205566406],[36.90323455156814,-76.31515502929688],[36.874127942666334,-76.32888793945312],[36.8631414329529,-76.30828857421875],[36.849955535919875,-76.32682800292969],[36.856548768788954,-76.34056091308594],[36.82852360193767,-76.33094787597656],[36.820278951308744,-76.34536743164062],[36.81203341240741,-76.34124755859375],[36.83017242546416,-76.28974914550781],[36.79993834872292,-76.2835693359375],[36.80323719192363,-76.19155883789062],[36.84116367417466,-76.19499206542969],[36.82137828938333,-76.1407470703125],[36.82577548376294,-76.04873657226562],[36.84281222525469,-76.04736328125],[36.85160389745116,-76.14280700683594],[36.92739009701458,-76.16065979003906],[36.93726970584893,-76.22039794921875],[36.96799807635307,-76.27670288085938],[36.96854668458301,-76.29592895507812],[36.98390610968992,-76.29936218261719],[37.00693943418586,-76.32339477539062]]
| 1,068.666667
| 2,589
| 0.821429
| 651
| 6,412
| 8.081413
| 0.47619
| 0.009124
| 0.010264
| 0.012925
| 0.013305
| 0
| 0
| 0
| 0
| 0
| 0
| 0.809911
| 0.002339
| 6,412
| 5
| 2,590
| 1,282.4
| 0.012506
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
d4c22af82f05a34bb86b9af4e9eeb653b043e9e8
| 189
|
py
|
Python
|
packs/errors/err.py
|
CiceroAraujo/SB
|
637cc4bc63c952f058c316b2b1fbfbb5cd6250c8
|
[
"MIT"
] | 1
|
2020-09-09T17:42:52.000Z
|
2020-09-09T17:42:52.000Z
|
packs/errors/err.py
|
CiceroAraujo/SB
|
637cc4bc63c952f058c316b2b1fbfbb5cd6250c8
|
[
"MIT"
] | null | null | null |
packs/errors/err.py
|
CiceroAraujo/SB
|
637cc4bc63c952f058c316b2b1fbfbb5cd6250c8
|
[
"MIT"
] | null | null | null |
class EmptyQueueError(Exception):
pass
class DualStructureError(Exception):
pass
class ConservativeVolumeError(Exception):
pass
class PmsFluxFacesError(Exception):
pass
| 14.538462
| 41
| 0.767196
| 16
| 189
| 9.0625
| 0.4375
| 0.358621
| 0.372414
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.169312
| 189
| 12
| 42
| 15.75
| 0.923567
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
d4e76880f4b0a2fac9f92acc5710c3b678003a77
| 166
|
py
|
Python
|
logcord/__init__.py
|
LaudateCorpus1/logcord
|
6d9866f0ecb62043c3220ae335abd67e649bd1e0
|
[
"MIT"
] | 3
|
2020-12-12T22:24:24.000Z
|
2021-06-14T19:45:34.000Z
|
logcord/__init__.py
|
python-discord/logcord
|
31a23e983cd613e39aa943f48254bd532493ad1c
|
[
"MIT"
] | 3
|
2021-01-17T03:50:58.000Z
|
2021-06-02T03:47:05.000Z
|
logcord/__init__.py
|
LaudateCorpus1/logcord
|
6d9866f0ecb62043c3220ae335abd67e649bd1e0
|
[
"MIT"
] | 3
|
2021-02-27T15:24:47.000Z
|
2022-03-10T13:00:44.000Z
|
import hug
from logcord.routes import api, frontend
@hug.extend_api()
def add_routes() -> None:
"""Adds the routes for this app."""
return [api, frontend]
| 16.6
| 40
| 0.680723
| 24
| 166
| 4.625
| 0.708333
| 0.198198
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.192771
| 166
| 9
| 41
| 18.444444
| 0.828358
| 0.174699
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| true
| 0
| 0.4
| 0
| 0.8
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
d4f65a5b1fa71aa8fef3d10bc42543d3b7a2a175
| 110
|
py
|
Python
|
models/__init__.py
|
yo16/roundrobin
|
0118b9f8354fcea2332e38a4553c53d100254a73
|
[
"MIT"
] | null | null | null |
models/__init__.py
|
yo16/roundrobin
|
0118b9f8354fcea2332e38a4553c53d100254a73
|
[
"MIT"
] | null | null | null |
models/__init__.py
|
yo16/roundrobin
|
0118b9f8354fcea2332e38a4553c53d100254a73
|
[
"MIT"
] | null | null | null |
from .RoundRobin import regist_roundrobin, get_roundrobin
from .ShortUri import create_new_uri, is_valid_uri
| 27.5
| 57
| 0.863636
| 16
| 110
| 5.5625
| 0.6875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 110
| 3
| 58
| 36.666667
| 0.89899
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
d4fcc6648b55fd8bf8db009864afc9b18c13a1dd
| 14,356
|
py
|
Python
|
tensormonk/layers/inception.py
|
Tensor46/TensorMONK
|
67617d3fdf8fde072ba9cab42de7d67c79b17494
|
[
"MIT"
] | 29
|
2018-07-06T23:57:23.000Z
|
2022-03-08T20:38:57.000Z
|
tensormonk/layers/inception.py
|
Johnson-yue/TensorMONK
|
1785132b82c685c3b3fc05b00dec46b1fccfc948
|
[
"MIT"
] | 3
|
2018-12-14T22:21:26.000Z
|
2020-06-19T02:13:34.000Z
|
tensormonk/layers/inception.py
|
Johnson-yue/TensorMONK
|
1785132b82c685c3b3fc05b00dec46b1fccfc948
|
[
"MIT"
] | 8
|
2018-07-06T23:58:03.000Z
|
2021-04-12T01:35:54.000Z
|
""" TensorMONK :: layers :: inception """
__all__ = ["Stem2", "InceptionA", "InceptionB", "InceptionC",
"ReductionA", "ReductionB"]
import torch
import torch.nn as nn
from .convolution import Convolution
from .utils import update_kwargs, compute_flops
from .carryresidue import CarryModular
class Stem2(nn.Module):
r""" For InceptionV4 - https://arxiv.org/pdf/1602.07261.pdf
All args are similar to Convolution and shift is disabled. Designed for an
input tensor_size of (1, 3, 299, 299) to deliver an output of size
(1, 384, 35, 35)
"""
def __init__(self, tensor_size=(1, 3, 299, 299), activation="relu",
dropout=0., normalization="batch", pre_nm=False, groups=1,
weight_nm=False, equalized=False, shift=False,
bias=False, dropblock=True, **kwargs):
super(Stem2, self).__init__()
kwargs = update_kwargs(kwargs, *([None]*5), activation, dropout,
normalization, pre_nm, groups, weight_nm,
equalized, shift, bias, dropblock)
self.C3_32_2 = Convolution(tensor_size, 3, 32, 2, False, **kwargs)
self.C3_32_1 = Convolution(self.C3_32_2.tensor_size, 3, 32, 1, False,
**kwargs)
self.C3_64_1 = Convolution(self.C3_32_1.tensor_size, 3, 64, 1, True,
**kwargs)
self.C160 = CarryModular(self.C3_64_1.tensor_size, 3, 160, 2, False,
block=Convolution, pool="max", **kwargs)
channel1 = nn.Sequential()
channel1.add_module("C1_64_1", Convolution(self.C160.tensor_size, 1,
64, 1, True, **kwargs))
channel1.add_module("C17_64_1", Convolution(channel1[-1].tensor_size,
(1, 7), 64, 1, True,
**kwargs))
channel1.add_module("C71_64_1", Convolution(channel1[-1].tensor_size,
(7, 1), 64, 1, True,
**kwargs))
channel1.add_module("C3_96_1", Convolution(channel1[-1].tensor_size, 3,
96, 1, False, **kwargs))
channel2 = nn.Sequential()
channel2.add_module("C1_64_1", Convolution(self.C160.tensor_size, 1,
64, 1, True, **kwargs))
channel2.add_module("C3_96_1", Convolution(channel2[-1].tensor_size, 3,
96, 1, False, **kwargs))
self.C192 = CarryModular(self.C160.tensor_size, 3, 192, 2, False,
block=channel1, carry_network=channel2,
**kwargs)
self.C384 = CarryModular(self.C192.tensor_size, 3, 384, 2, False,
block=Convolution, pool="max", **kwargs)
self.tensor_size = self.C384.tensor_size
def forward(self, tensor):
tensor = self.C3_64_1(self.C3_32_1(self.C3_32_2(tensor)))
tensor = self.C160(tensor)
return self.C384(self.C192(tensor))
def flops(self):
return compute_flops(self)
# =========================================================================== #
class InceptionA(nn.Module):
r""" For InceptionV4 - https://arxiv.org/pdf/1602.07261.pdf
All args are similar to Convolution and shift is disabled. Designed for an
input tensor_size of (1, 384, 35, 35) to deliver an output of size
(1, 384, 35, 35)
"""
def __init__(self, tensor_size=(1, 384, 35, 35), activation="relu",
dropout=0., normalization="batch", pre_nm=False, groups=1,
weight_nm=False, equalized=False, shift=False,
bias=False, dropblock=True, **kwargs):
super(InceptionA, self).__init__()
h, w = tensor_size[2:]
kwargs = update_kwargs(kwargs, *([None]*4), True, activation, dropout,
normalization, pre_nm, groups, weight_nm,
equalized, shift, bias, dropblock)
self._flops = tensor_size[1] * (h//2) * (w//2) * (3*3+1)
self.path1 = nn.Sequential(nn.AvgPool2d((3, 3), (1, 1), 1),
Convolution(tensor_size, 1, 96, 1,
**kwargs))
self.path2 = Convolution(tensor_size, 1, 96, 1, **kwargs)
path3 = [Convolution(tensor_size, 1, 64, 1, **kwargs),
Convolution((1, 64, h, w), 3, 96, 1, **kwargs)]
self.path3 = nn.Sequential(*path3)
path4 = [Convolution(tensor_size, 1, 64, 1, **kwargs),
Convolution((1, 64, h, w), 3, 96, 1, **kwargs),
Convolution((1, 96, h, w), 3, 96, 1, **kwargs)]
self.path4 = nn.Sequential(*path4)
self.tensor_size = (1, 96*4, h, w)
def forward(self, tensor):
return torch.cat((self.path1(tensor), self.path2(tensor),
self.path3(tensor), self.path4(tensor)), 1)
def flops(self):
return compute_flops(self) + self._flops
# =========================================================================== #
class ReductionA(nn.Module):
r""" For InceptionV4 - https://arxiv.org/pdf/1602.07261.pdf
All args are similar to Convolution and shift is disabled. Designed for an
input tensor_size of (1, 384, 35, 35) to deliver an output of size
(1, 1024, 17, 17)
"""
def __init__(self, tensor_size=(1, 384, 35, 35), activation="relu",
dropout=0., normalization="batch", pre_nm=False, groups=1,
weight_nm=False, equalized=False, shift=False,
bias=False, dropblock=True, **kwargs):
super(ReductionA, self).__init__()
h, w = tensor_size[2:]
kwargs = update_kwargs(kwargs, *([None]*5), activation, dropout,
normalization, pre_nm, groups, weight_nm,
equalized, shift, bias, dropblock)
self._flops = tensor_size[1] * (h//2) * (w//2) * (3*3+1)
self.path1 = nn.MaxPool2d((3, 3), stride=(2, 2))
self.path2 = Convolution(tensor_size, 3, 384, 2, False, **kwargs)
path3 = [Convolution(tensor_size, 1, 192, 1, True, **kwargs),
Convolution((1, 192, h, w), 3, 224, 1, True, **kwargs),
Convolution((1, 224, h, w), 3, 256, 2, False, **kwargs)]
self.path3 = nn.Sequential(*path3)
self.tensor_size = (1, tensor_size[1]+384+256,
self.path2.tensor_size[2],
self.path2.tensor_size[3])
def forward(self, tensor):
return torch.cat((self.path1(tensor), self.path2(tensor),
self.path3(tensor)), 1)
def flops(self):
return compute_flops(self) + self._flops
# =========================================================================== #
class InceptionB(nn.Module):
r""" For InceptionV4 - https://arxiv.org/pdf/1602.07261.pdf
All args are similar to Convolution and shift is disabled. Designed for an
input tensor_size of (1, 1024, 17, 17) to deliver an output of size
(1, 1024, 17, 17)
"""
def __init__(self, tensor_size=(1, 1024, 17, 17), activation="relu",
dropout=0., normalization="batch", pre_nm=False, groups=1,
weight_nm=False, equalized=False, shift=False,
bias=False, dropblock=True, **kwargs):
super(InceptionB, self).__init__()
h, w = tensor_size[2:]
kwargs = update_kwargs(kwargs, *([None]*4), True, activation, dropout,
normalization, pre_nm, groups, weight_nm,
equalized, shift, bias, dropblock)
self._flops = tensor_size[1] * (h//2) * (w//2) * (3*3+1)
self.path1 = nn.Sequential(nn.AvgPool2d((3, 3), (1, 1), 1),
Convolution(tensor_size, 1, 128, 1,
**kwargs))
self.path2 = Convolution(tensor_size, 1, 384, 1, **kwargs)
path3 = [Convolution(tensor_size, 1, 192, 1, **kwargs),
Convolution((1, 192, h, w), (1, 7), 224, 1, **kwargs),
Convolution((1, 224, h, w), (1, 7), 256, 1, **kwargs)]
self.path3 = nn.Sequential(*path3)
path4 = [Convolution(tensor_size, 1, 192, 1, **kwargs),
Convolution((1, 192, h, w), (1, 7), 192, 1, **kwargs),
Convolution((1, 192, h, w), (7, 1), 224, 1, **kwargs),
Convolution((1, 224, h, w), (1, 7), 224, 1, **kwargs),
Convolution((1, 224, h, w), (7, 1), 256, 1, **kwargs)]
self.path4 = nn.Sequential(*path4)
self.tensor_size = (1, 128+384+256+256, h, w)
def forward(self, tensor):
return torch.cat((self.path1(tensor), self.path2(tensor),
self.path3(tensor), self.path4(tensor)), 1)
def flops(self):
return compute_flops(self) + self._flops
# =========================================================================== #
class ReductionB(nn.Module):
r""" For InceptionV4 - https://arxiv.org/pdf/1602.07261.pdf
All args are similar to Convolution and shift is disabled. Designed for an
input tensor_size of (1, 1024, 17, 17) to deliver an output of size
(1, 1536, 8, 8)
"""
def __init__(self, tensor_size=(1, 1024, 17, 17), activation="relu",
dropout=0., normalization="batch", pre_nm=False, groups=1,
weight_nm=False, equalized=False, shift=False,
bias=False, dropblock=True, **kwargs):
super(ReductionB, self).__init__()
h, w = tensor_size[2:]
kwargs = update_kwargs(kwargs, *([None]*5), activation, dropout,
normalization, pre_nm, groups, weight_nm,
equalized, shift, bias, dropblock)
self._flops = tensor_size[1] * (h//2) * (w//2) * (3*3+1)
self.path1 = nn.MaxPool2d((3, 3), stride=(2, 2))
path2 = [Convolution(tensor_size, 1, 192, 1, True, **kwargs),
Convolution((1, 192, h, w), 3, 192, 2, False, **kwargs)]
self.path2 = nn.Sequential(*path2)
path3 = [Convolution(tensor_size, 1, 256, 1, True, **kwargs),
Convolution((1, 256, h, w), (1, 7), 256, 1, True, **kwargs),
Convolution((1, 256, h, w), (1, 7), 256, 1, True, **kwargs),
Convolution((1, 256, h, w), 3, 320, 2, False, **kwargs)]
self.path3 = nn.Sequential(*path3)
self.tensor_size = (1, tensor_size[1]+192+320,
self.path2[-1].tensor_size[2],
self.path2[-1].tensor_size[3])
def forward(self, tensor):
return torch.cat((self.path1(tensor), self.path2(tensor),
self.path3(tensor)), 1)
def flops(self):
return compute_flops(self) + self._flops
# =========================================================================== #
class InceptionC(nn.Module):
r""" For InceptionV4 - https://arxiv.org/pdf/1602.07261.pdf
All args are similar to Convolution and shift is disabled. Designed for an
input tensor_size of (1, 1536, 8, 8) to deliver an output of size
(1, 1536, 8, 8)
"""
def __init__(self, tensor_size=(1, 1536, 8, 8), activation="relu",
dropout=0., normalization="batch", pre_nm=False, groups=1,
weight_nm=False, equalized=False, shift=False,
bias=False, dropblock=True, **kwargs):
super(InceptionC, self).__init__()
h, w = tensor_size[2:]
kwargs = update_kwargs(kwargs, *([None]*4), True, activation, dropout,
normalization, pre_nm, groups, weight_nm,
equalized, shift, bias, dropblock)
self._flops = tensor_size[1] * (h//2) * (w//2) * (3*3+1)
self.path1 = nn.Sequential(nn.AvgPool2d((3, 3), (1, 1), 1),
Convolution(tensor_size, 1, 256, 1,
**kwargs))
self.path2 = Convolution(tensor_size, 1, 256, 1, **kwargs)
self.path3 = Convolution(tensor_size, 1, 384, 1, **kwargs)
self.path3a = Convolution(self.path3.tensor_size, (1, 3), 256, 1,
**kwargs)
self.path3b = Convolution(self.path3.tensor_size, (3, 1), 256, 1,
**kwargs)
path4 = [Convolution(tensor_size, 1, 384, 1, **kwargs),
Convolution((1, 384, h, w), (1, 3), 448, 1, **kwargs),
Convolution((1, 448, h, w), (3, 1), 512, 1, **kwargs)]
self.path4 = nn.Sequential(*path4)
self.path4a = Convolution(self.path4[-1].tensor_size, (1, 3), 256, 1,
**kwargs)
self.path4b = Convolution(self.path4[-1].tensor_size, (3, 1), 256, 1,
**kwargs)
self.tensor_size = (1, 256+256+512+512, h, w)
def forward(self, tensor):
path3 = self.path3(tensor)
path4 = self.path4(tensor)
return torch.cat((self.path1(tensor), self.path2(tensor),
self.path3a(path3), self.path3b(path3),
self.path4a(path4), self.path4b(path4)), 1)
def flops(self):
return compute_flops(self) + self._flops
# from tensormonk.layers import Convolution, CarryModular
# from tensormonk.layers.utils import update_kwargs, compute_flops
# tensor_size = (3, 3, 299, 299)
# x = torch.rand(*tensor_size)
# test = Stem2(tensor_size, "relu", 0., "batch", False)
# test(x).size()
# test.flops()
# %timeit test(x).size()
# test = InceptionA()
# test(torch.rand(*(1, 384, 35, 35))).size()
# test.flops()
# test = ReductionA()
# test(torch.rand(*(1, 384, 35, 35))).size()
# test.flops()
# test = InceptionB((1, 1024, 17, 17))
# test(torch.rand(*(1, 1024, 17, 17))).size()
# test.flops()
# test = ReductionB((1, 1024, 17, 17))
# test(torch.rand(*(1, 1024, 17, 17))).size()
# test.flops()
# test = InceptionC((1, 1536, 8, 8))
# test(torch.rand(*(1, 1536, 8, 8))).size()
# test.flops()
| 48.5
| 79
| 0.525425
| 1,735
| 14,356
| 4.227089
| 0.074928
| 0.095446
| 0.056995
| 0.044996
| 0.814426
| 0.78552
| 0.749386
| 0.703981
| 0.653668
| 0.653668
| 0
| 0.092663
| 0.30691
| 14,356
| 295
| 80
| 48.664407
| 0.644422
| 0.16864
| 0
| 0.524272
| 0
| 0
| 0.013523
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.087379
| false
| 0
| 0.024272
| 0.048544
| 0.199029
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
be12e59ad579ac928c74eba40a73b47ad6749d98
| 132
|
py
|
Python
|
psyneulink/library/components/mechanisms/adaptive/control/__init__.py
|
bdsinger/PsyNeuLink
|
71d8a0bb1691ff85061d4ad3de866d9930a69a73
|
[
"Apache-2.0"
] | null | null | null |
psyneulink/library/components/mechanisms/adaptive/control/__init__.py
|
bdsinger/PsyNeuLink
|
71d8a0bb1691ff85061d4ad3de866d9930a69a73
|
[
"Apache-2.0"
] | null | null | null |
psyneulink/library/components/mechanisms/adaptive/control/__init__.py
|
bdsinger/PsyNeuLink
|
71d8a0bb1691ff85061d4ad3de866d9930a69a73
|
[
"Apache-2.0"
] | null | null | null |
from . import agt
from . import evc
from .agt import *
from .evc import *
__all__ = list(agt.__all__)
__all__.extend(evc.__all__)
| 14.666667
| 27
| 0.727273
| 20
| 132
| 4
| 0.35
| 0.25
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 132
| 8
| 28
| 16.5
| 0.727273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
be134a3430254a75c2395978fd411b74e47549ff
| 141
|
py
|
Python
|
specifications/views.py
|
CAPSLOCKFURY/django-eshop
|
18d47be47e568800e51c4b6ff868138a7350893b
|
[
"MIT"
] | 2
|
2021-05-28T11:39:36.000Z
|
2021-08-20T04:43:00.000Z
|
specifications/views.py
|
CAPSLOCKFURY/django-eshop
|
18d47be47e568800e51c4b6ff868138a7350893b
|
[
"MIT"
] | null | null | null |
specifications/views.py
|
CAPSLOCKFURY/django-eshop
|
18d47be47e568800e51c4b6ff868138a7350893b
|
[
"MIT"
] | null | null | null |
"""
Author : Vadim Dembitskii (CAPSLOCKFURY)
License : The MIT License
"""
from django.shortcuts import render
# Create your views here.
| 15.666667
| 41
| 0.730496
| 17
| 141
| 6.058824
| 0.941176
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.177305
| 141
| 8
| 42
| 17.625
| 0.887931
| 0.64539
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
076fb3fd216a13d0ae21674f7b172c567f197d22
| 63
|
py
|
Python
|
template/main/toplevel.py
|
marton-munz/Skeleton
|
8d6780b06ed83920586b58a0caa3d13dbe3dfc03
|
[
"MIT"
] | null | null | null |
template/main/toplevel.py
|
marton-munz/Skeleton
|
8d6780b06ed83920586b58a0caa3d13dbe3dfc03
|
[
"MIT"
] | null | null | null |
template/main/toplevel.py
|
marton-munz/Skeleton
|
8d6780b06ed83920586b58a0caa3d13dbe3dfc03
|
[
"MIT"
] | null | null | null |
from .version import __version__
def run(options):
pass
| 9
| 32
| 0.714286
| 8
| 63
| 5.125
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.222222
| 63
| 6
| 33
| 10.5
| 0.836735
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
|
0
| 5
|
0771b8ed8f7092af7acabef097fb2832822f6517
| 459
|
py
|
Python
|
ex009.py
|
RafahOliveira/python
|
241db92999125d63d61d272ac75384eede40246c
|
[
"MIT"
] | null | null | null |
ex009.py
|
RafahOliveira/python
|
241db92999125d63d61d272ac75384eede40246c
|
[
"MIT"
] | null | null | null |
ex009.py
|
RafahOliveira/python
|
241db92999125d63d61d272ac75384eede40246c
|
[
"MIT"
] | null | null | null |
n1 = int(input("Digite um número: "))
print("{} x {} = {}" .format(n1,1,n1*1))
print("{} x {} = {}" .format(n1,2,n1*2))
print("{} x {} = {}" .format(n1,3,n1*3))
print("{} x {} = {}" .format(n1,4,n1*4))
print("{} x {} = {}" .format(n1,5,n1*5))
print("{} x {} = {}" .format(n1,6,n1*6))
print("{} x {} = {}" .format(n1,7,n1*7))
print("{} x {} = {}" .format(n1,8,n1*8))
print("{} x {} = {}" .format(n1,9,n1*9))
print("{} x {} = {}" .format(n1,10,n1*10))
| 41.727273
| 42
| 0.448802
| 76
| 459
| 2.710526
| 0.25
| 0.291262
| 0.582524
| 0.679612
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111979
| 0.163399
| 459
| 11
| 42
| 41.727273
| 0.424479
| 0
| 0
| 0
| 0
| 0
| 0.306667
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.909091
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
ed0b661c5c341094ee944a1a369a509dc8ebe46f
| 2,603
|
py
|
Python
|
checkov/common/checks_infra/solvers/attribute_solvers/__init__.py
|
peaudecastor/checkov
|
a4804b61c1b1390b7abd44ab53285fcbc3e7e80b
|
[
"Apache-2.0"
] | null | null | null |
checkov/common/checks_infra/solvers/attribute_solvers/__init__.py
|
peaudecastor/checkov
|
a4804b61c1b1390b7abd44ab53285fcbc3e7e80b
|
[
"Apache-2.0"
] | null | null | null |
checkov/common/checks_infra/solvers/attribute_solvers/__init__.py
|
peaudecastor/checkov
|
a4804b61c1b1390b7abd44ab53285fcbc3e7e80b
|
[
"Apache-2.0"
] | null | null | null |
from checkov.common.checks_infra.solvers.attribute_solvers.any_attribute_solver import AnyResourceSolver # noqa
from checkov.common.checks_infra.solvers.attribute_solvers.contains_attribute_solver import ContainsAttributeSolver # noqa
from checkov.common.checks_infra.solvers.attribute_solvers.not_contains_attribute_solver import NotContainsAttributeSolver # noqa
from checkov.common.checks_infra.solvers.attribute_solvers.ending_with_attribute_solver import EndingWithAttributeSolver # noqa
from checkov.common.checks_infra.solvers.attribute_solvers.equals_attribute_solver import EqualsAttributeSolver # noqa
from checkov.common.checks_infra.solvers.attribute_solvers.regex_match_attribute_solver import RegexMatchAttributeSolver # noqa
from checkov.common.checks_infra.solvers.attribute_solvers.exists_attribute_solver import ExistsAttributeSolver # noqa
from checkov.common.checks_infra.solvers.attribute_solvers.not_ending_with_attribute_solver import NotEndingWithAttributeSolver # noqa
from checkov.common.checks_infra.solvers.attribute_solvers.not_equals_attribute_solver import NotEqualsAttributeSolver # noqa
from checkov.common.checks_infra.solvers.attribute_solvers.not_regex_match_attribute_solver import NotRegexMatchAttributeSolver # noqa
from checkov.common.checks_infra.solvers.attribute_solvers.not_exists_attribute_solver import NotExistsAttributeSolver # noqa
from checkov.common.checks_infra.solvers.attribute_solvers.not_starting_with_attribute_solver import NotStartingWithAttributeSolver # noqa
from checkov.common.checks_infra.solvers.attribute_solvers.starting_with_attribute_solver import StartingWithAttributeSolver # noqa
from checkov.common.checks_infra.solvers.attribute_solvers.within_attribute_solver import WithinAttributeSolver # noqa
from checkov.common.checks_infra.solvers.attribute_solvers.greater_than_attribute_solver import GreaterThanAttributeSolver # noqa
from checkov.common.checks_infra.solvers.attribute_solvers.greater_than_or_equal_attribute_solver import GreaterThanOrEqualAttributeSolver # noqa
from checkov.common.checks_infra.solvers.attribute_solvers.less_than_attribute_solver import LessThanAttributeSolver # noqa
from checkov.common.checks_infra.solvers.attribute_solvers.less_than_or_equal_attribute_solver import LessThanOrEqualAttributeSolver # noqa
from checkov.common.checks_infra.solvers.attribute_solvers.jsonpath_equals_attribute_solver import JsonpathEqualsAttributeSolver # noqa
from checkov.common.checks_infra.solvers.attribute_solvers.jsonpath_exists_attribute_solver import JsonpathExistsAttributeSolver # noqa
| 123.952381
| 146
| 0.900115
| 302
| 2,603
| 7.42053
| 0.155629
| 0.09817
| 0.151718
| 0.205266
| 0.630968
| 0.546185
| 0.521196
| 0.521196
| 0.498438
| 0.32664
| 0
| 0
| 0.053784
| 2,603
| 20
| 147
| 130.15
| 0.909866
| 0.038033
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
ed3716a8c944c16472afa9f766e74a0e019a4ee6
| 17,916
|
py
|
Python
|
airflow/providers/google/cloud/hooks/workflows.py
|
karakanb/airflow
|
d0cea6d849ccf11e2b1e55d3280fcca59948eb53
|
[
"Apache-2.0"
] | 3
|
2020-12-25T04:09:44.000Z
|
2021-04-02T13:37:42.000Z
|
airflow/providers/google/cloud/hooks/workflows.py
|
karakanb/airflow
|
d0cea6d849ccf11e2b1e55d3280fcca59948eb53
|
[
"Apache-2.0"
] | 5
|
2021-06-16T11:41:36.000Z
|
2022-01-27T17:20:37.000Z
|
airflow/providers/google/cloud/hooks/workflows.py
|
karakanb/airflow
|
d0cea6d849ccf11e2b1e55d3280fcca59948eb53
|
[
"Apache-2.0"
] | 2
|
2021-12-28T22:46:07.000Z
|
2022-01-08T13:29:00.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import Dict, Optional, Sequence, Tuple, Union
from google.api_core.operation import Operation
from google.api_core.retry import Retry
from google.cloud.workflows.executions_v1beta import Execution, ExecutionsClient
from google.cloud.workflows.executions_v1beta.services.executions.pagers import ListExecutionsPager
from google.cloud.workflows_v1beta import Workflow, WorkflowsClient
from google.cloud.workflows_v1beta.services.workflows.pagers import ListWorkflowsPager
from google.protobuf.field_mask_pb2 import FieldMask
from airflow.providers.google.common.hooks.base_google import GoogleBaseHook
class WorkflowsHook(GoogleBaseHook):
"""
Hook for Google GCP APIs.
All the methods in the hook where project_id is used must be called with
keyword arguments rather than positional.
"""
def get_workflows_client(self) -> WorkflowsClient:
"""Returns WorkflowsClient."""
return WorkflowsClient(credentials=self._get_credentials(), client_info=self.client_info)
def get_executions_client(self) -> ExecutionsClient:
"""Returns ExecutionsClient."""
return ExecutionsClient(credentials=self._get_credentials(), client_info=self.client_info)
@GoogleBaseHook.fallback_to_default_project_id
def create_workflow(
self,
workflow: Dict,
workflow_id: str,
location: str,
project_id: str,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
) -> Operation:
"""
Creates a new workflow. If a workflow with the specified name
already exists in the specified project and location, the long
running operation will return
[ALREADY_EXISTS][google.rpc.Code.ALREADY_EXISTS] error.
:param workflow: Required. Workflow to be created.
:type workflow: Dict
:param workflow_id: Required. The ID of the workflow to be created.
:type workflow_id: str
:param project_id: Required. The ID of the Google Cloud project the cluster belongs to.
:type project_id: str
:param location: Required. The GCP region in which to handle the request.
:type location: str
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:type retry: google.api_core.retry.Retry
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:type timeout: float
:param metadata: Additional metadata that is provided to the method.
:type metadata: Sequence[Tuple[str, str]]
"""
metadata = metadata or ()
client = self.get_workflows_client()
parent = f"projects/{project_id}/locations/{location}"
return client.create_workflow(
request={"parent": parent, "workflow": workflow, "workflow_id": workflow_id},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def get_workflow(
self,
workflow_id: str,
location: str,
project_id: str,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
) -> Workflow:
"""
Gets details of a single Workflow.
:param workflow_id: Required. The ID of the workflow to be created.
:type workflow_id: str
:param project_id: Required. The ID of the Google Cloud project the cluster belongs to.
:type project_id: str
:param location: Required. The GCP region in which to handle the request.
:type location: str
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:type retry: google.api_core.retry.Retry
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:type timeout: float
:param metadata: Additional metadata that is provided to the method.
:type metadata: Sequence[Tuple[str, str]]
"""
metadata = metadata or ()
client = self.get_workflows_client()
name = f"projects/{project_id}/locations/{location}/workflows/{workflow_id}"
return client.get_workflow(request={"name": name}, retry=retry, timeout=timeout, metadata=metadata)
def update_workflow(
self,
workflow: Union[Dict, Workflow],
update_mask: Optional[FieldMask] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
) -> Operation:
"""
Updates an existing workflow.
Running this method has no impact on already running
executions of the workflow. A new revision of the
workflow may be created as a result of a successful
update operation. In that case, such revision will be
used in new workflow executions.
:param workflow: Required. Workflow to be created.
:type workflow: Dict
:param update_mask: List of fields to be updated. If not present,
the entire workflow will be updated.
:type update_mask: FieldMask
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:type retry: google.api_core.retry.Retry
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:type timeout: float
:param metadata: Additional metadata that is provided to the method.
:type metadata: Sequence[Tuple[str, str]]
"""
metadata = metadata or ()
client = self.get_workflows_client()
return client.update_workflow(
request={"workflow": workflow, "update_mask": update_mask},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def delete_workflow(
self,
workflow_id: str,
location: str,
project_id: str,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
) -> Operation:
"""
Deletes a workflow with the specified name.
This method also cancels and deletes all running
executions of the workflow.
:param workflow_id: Required. The ID of the workflow to be created.
:type workflow_id: str
:param project_id: Required. The ID of the Google Cloud project the cluster belongs to.
:type project_id: str
:param location: Required. The GCP region in which to handle the request.
:type location: str
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:type retry: google.api_core.retry.Retry
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:type timeout: float
:param metadata: Additional metadata that is provided to the method.
:type metadata: Sequence[Tuple[str, str]]
"""
metadata = metadata or ()
client = self.get_workflows_client()
name = f"projects/{project_id}/locations/{location}/workflows/{workflow_id}"
return client.delete_workflow(request={"name": name}, retry=retry, timeout=timeout, metadata=metadata)
@GoogleBaseHook.fallback_to_default_project_id
def list_workflows(
self,
location: str,
project_id: str,
filter_: Optional[str] = None,
order_by: Optional[str] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
) -> ListWorkflowsPager:
"""
Lists Workflows in a given project and location.
The default order is not specified.
:param filter_: Filter to restrict results to specific workflows.
:type filter_: str
:param order_by: Comma-separated list of fields that
specifies the order of the results. Default sorting order for a field is ascending.
To specify descending order for a field, append a "desc" suffix.
If not specified, the results will be returned in an unspecified order.
:type order_by: str
:param project_id: Required. The ID of the Google Cloud project the cluster belongs to.
:type project_id: str
:param location: Required. The GCP region in which to handle the request.
:type location: str
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:type retry: google.api_core.retry.Retry
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:type timeout: float
:param metadata: Additional metadata that is provided to the method.
:type metadata: Sequence[Tuple[str, str]]
"""
metadata = metadata or ()
client = self.get_workflows_client()
parent = f"projects/{project_id}/locations/{location}"
return client.list_workflows(
request={"parent": parent, "filter": filter_, "order_by": order_by},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def create_execution(
self,
workflow_id: str,
location: str,
project_id: str,
execution: Dict,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
) -> Execution:
"""
Creates a new execution using the latest revision of
the given workflow.
:param execution: Required. Input parameters of the execution represented as a dictionary.
:type execution: Dict
:param workflow_id: Required. The ID of the workflow.
:type workflow_id: str
:param project_id: Required. The ID of the Google Cloud project the cluster belongs to.
:type project_id: str
:param location: Required. The GCP region in which to handle the request.
:type location: str
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:type retry: google.api_core.retry.Retry
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:type timeout: float
:param metadata: Additional metadata that is provided to the method.
:type metadata: Sequence[Tuple[str, str]]
"""
metadata = metadata or ()
client = self.get_executions_client()
parent = f"projects/{project_id}/locations/{location}/workflows/{workflow_id}"
return client.create_execution(
request={"parent": parent, "execution": execution},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def get_execution(
self,
workflow_id: str,
execution_id: str,
location: str,
project_id: str,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
) -> Execution:
"""
Returns an execution for the given ``workflow_id`` and ``execution_id``.
:param workflow_id: Required. The ID of the workflow.
:type workflow_id: str
:param execution_id: Required. The ID of the execution.
:type execution_id: str
:param project_id: Required. The ID of the Google Cloud project the cluster belongs to.
:type project_id: str
:param location: Required. The GCP region in which to handle the request.
:type location: str
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:type retry: google.api_core.retry.Retry
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:type timeout: float
:param metadata: Additional metadata that is provided to the method.
:type metadata: Sequence[Tuple[str, str]]
"""
metadata = metadata or ()
client = self.get_executions_client()
name = f"projects/{project_id}/locations/{location}/workflows/{workflow_id}/executions/{execution_id}"
return client.get_execution(request={"name": name}, retry=retry, timeout=timeout, metadata=metadata)
@GoogleBaseHook.fallback_to_default_project_id
def cancel_execution(
self,
workflow_id: str,
execution_id: str,
location: str,
project_id: str,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
) -> Execution:
"""
Cancels an execution using the given ``workflow_id`` and ``execution_id``.
:param workflow_id: Required. The ID of the workflow.
:type workflow_id: str
:param execution_id: Required. The ID of the execution.
:type execution_id: str
:param project_id: Required. The ID of the Google Cloud project the cluster belongs to.
:type project_id: str
:param location: Required. The GCP region in which to handle the request.
:type location: str
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:type retry: google.api_core.retry.Retry
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:type timeout: float
:param metadata: Additional metadata that is provided to the method.
:type metadata: Sequence[Tuple[str, str]]
"""
metadata = metadata or ()
client = self.get_executions_client()
name = f"projects/{project_id}/locations/{location}/workflows/{workflow_id}/executions/{execution_id}"
return client.cancel_execution(
request={"name": name}, retry=retry, timeout=timeout, metadata=metadata
)
@GoogleBaseHook.fallback_to_default_project_id
def list_executions(
self,
workflow_id: str,
location: str,
project_id: str,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
) -> ListExecutionsPager:
"""
Returns a list of executions which belong to the
workflow with the given name. The method returns
executions of all workflow revisions. Returned
executions are ordered by their start time (newest
first).
:param workflow_id: Required. The ID of the workflow to be created.
:type workflow_id: str
:param project_id: Required. The ID of the Google Cloud project the cluster belongs to.
:type project_id: str
:param location: Required. The GCP region in which to handle the request.
:type location: str
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:type retry: google.api_core.retry.Retry
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:type timeout: float
:param metadata: Additional metadata that is provided to the method.
:type metadata: Sequence[Tuple[str, str]]
"""
metadata = metadata or ()
client = self.get_executions_client()
parent = f"projects/{project_id}/locations/{location}/workflows/{workflow_id}"
return client.list_executions(
request={"parent": parent}, retry=retry, timeout=timeout, metadata=metadata
)
| 45.015075
| 110
| 0.657624
| 2,220
| 17,916
| 5.222523
| 0.109009
| 0.031827
| 0.02484
| 0.029498
| 0.736502
| 0.724081
| 0.708729
| 0.708729
| 0.703985
| 0.691392
| 0
| 0.000682
| 0.26334
| 17,916
| 397
| 111
| 45.128463
| 0.877785
| 0.525508
| 0
| 0.680723
| 0
| 0
| 0.090261
| 0.075859
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066265
| false
| 0
| 0.054217
| 0
| 0.192771
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
ed39cda0ddf307d8b90e9cfcebe9c61a9ce66abd
| 65,536
|
py
|
Python
|
tests/common_test.py
|
portintegration/releasetool
|
7567bd29b3c13b14f6336cbdb11965c2d2b65a2e
|
[
"Apache-2.0"
] | null | null | null |
tests/common_test.py
|
portintegration/releasetool
|
7567bd29b3c13b14f6336cbdb11965c2d2b65a2e
|
[
"Apache-2.0"
] | null | null | null |
tests/common_test.py
|
portintegration/releasetool
|
7567bd29b3c13b14f6336cbdb11965c2d2b65a2e
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import pathlib
import pytest
import string
from autorelease import github
from autorelease.common import _determine_language, guess_language
if not os.environ.get("GITHUB_TOKEN"):
pytest.skip(
"skipping tests that require a valid github token", allow_module_level=True
)
def repo_name_to_test_name(repo_name: str) -> str:
letters = []
for letter in repo_name:
if letter in string.ascii_lowercase:
letters.append(letter)
elif letter in string.ascii_uppercase:
if letters:
letters.append("_")
letters.append(letter.lower())
else:
letters.append("_")
return "test_guess_" + "".join(letters)
def test_determine_language():
# determine_language() is the old function that depends on sloth's repo.json.
# Use it to generate the code for test_guess_language() so we can confirm
# the output is 100% the same.
repos_json = (pathlib.Path(__file__).parent / "testdata" / "repos.json").read_text()
repos = json.loads(repos_json)["repos"]
python_tools_repo_names = [
"googleapis/releasetool",
"googleapis/synthtool",
"googleapis/docuploader",
]
repo_names = python_tools_repo_names + [repo["repo"] for repo in repos]
languages = set()
for name in repo_names:
language = _determine_language(lambda: repos_json, name)
languages.add((language, name))
for language, name in sorted(languages):
test_name = repo_name_to_test_name(name)
print(
f"""def {test_name}():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert {repr(language)} == guess_language(gh, {repr(name)})
"""
)
def test_guess_google_cloud_platform_cpp_docs_samples():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "cpp" == guess_language(gh, "GoogleCloudPlatform/cpp-docs-samples")
def test_guess_googleapis_google_cloud_cpp():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "cpp" == guess_language(gh, "googleapis/google-cloud-cpp")
def test_guess_google_cloud_platform_cloud_code_samples():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "dotnet" == guess_language(gh, "GoogleCloudPlatform/cloud-code-samples")
def test_guess_google_cloud_platform_dotnet_docs_samples():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "dotnet" == guess_language(gh, "GoogleCloudPlatform/dotnet-docs-samples")
def test_guess_google_cloud_platform_getting_started_dotnet():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "dotnet" == guess_language(gh, "GoogleCloudPlatform/getting-started-dotnet")
def test_guess_google_cloud_platform_stackdriver_sandbox():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "dotnet" == guess_language(gh, "GoogleCloudPlatform/stackdriver-sandbox")
def test_guess_googleapis_gapic_generator_csharp():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "dotnet" == guess_language(gh, "googleapis/gapic-generator-csharp")
def test_guess_googleapis_gax_dotnet():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "dotnet" == guess_language(gh, "googleapis/gax-dotnet")
def test_guess_googleapis_google_api_dotnet_client():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "dotnet" == guess_language(gh, "googleapis/google-api-dotnet-client")
def test_guess_googleapis_google_cloud_dotnet():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "dotnet" == guess_language(gh, "googleapis/google-cloud-dotnet")
def test_guess_google_cloud_platform_elixir_samples():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "elixir" == guess_language(gh, "GoogleCloudPlatform/elixir-samples")
def test_guess_googleapis_elixir_google_api():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "elixir" == guess_language(gh, "googleapis/elixir-google-api")
def test_guess_google_cloud_platform_golang_samples():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "go" == guess_language(gh, "GoogleCloudPlatform/golang-samples")
def test_guess_googleapis_gapic_generator_go():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "go" == guess_language(gh, "googleapis/gapic-generator-go")
def test_guess_googleapis_gax_go():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "go" == guess_language(gh, "googleapis/gax-go")
def test_guess_googleapis_go_genproto():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "go" == guess_language(gh, "googleapis/go-genproto")
def test_guess_googleapis_google_api_go_client():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "go" == guess_language(gh, "googleapis/google-api-go-client")
def test_guess_googleapis_google_cloud_go():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "go" == guess_language(gh, "googleapis/google-cloud-go")
def test_guess_googleapis_google_cloud_go_testing():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "go" == guess_language(gh, "googleapis/google-cloud-go-testing")
def test_guess_google_cloud_platform_getting_started_java():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "java" == guess_language(gh, "GoogleCloudPlatform/getting-started-java")
def test_guess_google_cloud_platform_java_docs_samples():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "java" == guess_language(gh, "GoogleCloudPlatform/java-docs-samples")
def test_guess_googleapis_api_common_java():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "java" == guess_language(gh, "googleapis/api-common-java")
def test_guess_googleapis_gapic_generator():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "java" == guess_language(gh, "googleapis/gapic-generator")
def test_guess_googleapis_gax_java():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "java" == guess_language(gh, "googleapis/gax-java")
def test_guess_googleapis_google_api_java_client():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "java" == guess_language(gh, "googleapis/google-api-java-client")
def test_guess_googleapis_google_api_java_client_services():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "java" == guess_language(gh, "googleapis/google-api-java-client-services")
def test_guess_googleapis_google_auth_library_java():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "java" == guess_language(gh, "googleapis/google-auth-library-java")
def test_guess_googleapis_google_cloud_java():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "java" == guess_language(gh, "googleapis/google-cloud-java")
def test_guess_googleapis_google_http_java_client():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "java" == guess_language(gh, "googleapis/google-http-java-client")
def test_guess_googleapis_google_oauth_java_client():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "java" == guess_language(gh, "googleapis/google-oauth-java-client")
def test_guess_googleapis_java_accessapproval():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "java" == guess_language(gh, "googleapis/java-accessapproval")
def test_guess_googleapis_java_accesscontextmanager():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "java" == guess_language(gh, "googleapis/java-accesscontextmanager")
def test_guess_googleapis_java_analytics_admin():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "java" == guess_language(gh, "googleapis/java-analytics-admin")
def test_guess_googleapis_java_asset():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "java" == guess_language(gh, "googleapis/java-asset")
def test_guess_googleapis_java_automl():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "java" == guess_language(gh, "googleapis/java-automl")
def test_guess_googleapis_java_bigquery():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "java" == guess_language(gh, "googleapis/java-bigquery")
def test_guess_googleapis_java_bigqueryconnection():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "java" == guess_language(gh, "googleapis/java-bigqueryconnection")
def test_guess_googleapis_java_bigquerydatatransfer():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "java" == guess_language(gh, "googleapis/java-bigquerydatatransfer")
def test_guess_googleapis_java_bigqueryreservation():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "java" == guess_language(gh, "googleapis/java-bigqueryreservation")
def test_guess_googleapis_java_bigquerystorage():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "java" == guess_language(gh, "googleapis/java-bigquerystorage")
def test_guess_googleapis_java_bigtable():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "java" == guess_language(gh, "googleapis/java-bigtable")
def test_guess_googleapis_java_bigtable_emulator():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "java" == guess_language(gh, "googleapis/java-bigtable-emulator")
def test_guess_googleapis_java_bigtable_hbase():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "java" == guess_language(gh, "googleapis/java-bigtable-hbase")
def test_guess_googleapis_java_billing():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "java" == guess_language(gh, "googleapis/java-billing")
def test_guess_googleapis_java_billingbudgets():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "java" == guess_language(gh, "googleapis/java-billingbudgets")
def test_guess_googleapis_java_cloud_bom():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "java" == guess_language(gh, "googleapis/java-cloud-bom")
def test_guess_googleapis_java_cloudbuild():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "java" == guess_language(gh, "googleapis/java-cloudbuild")
def test_guess_googleapis_java_common_protos():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "java" == guess_language(gh, "googleapis/java-common-protos")
def test_guess_googleapis_java_compute():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "java" == guess_language(gh, "googleapis/java-compute")
def test_guess_googleapis_java_conformance_tests():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "java" == guess_language(gh, "googleapis/java-conformance-tests")
def test_guess_googleapis_java_container():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "java" == guess_language(gh, "googleapis/java-container")
def test_guess_googleapis_java_containeranalysis():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "java" == guess_language(gh, "googleapis/java-containeranalysis")
def test_guess_googleapis_java_core():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "java" == guess_language(gh, "googleapis/java-core")
def test_guess_googleapis_java_datacatalog():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "java" == guess_language(gh, "googleapis/java-datacatalog")
def test_guess_googleapis_java_datalabeling():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "java" == guess_language(gh, "googleapis/java-datalabeling")
def test_guess_googleapis_java_dataproc():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "java" == guess_language(gh, "googleapis/java-dataproc")
def test_guess_googleapis_java_datastore():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "java" == guess_language(gh, "googleapis/java-datastore")
def test_guess_googleapis_java_dialogflow():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "java" == guess_language(gh, "googleapis/java-dialogflow")
def test_guess_googleapis_java_dlp():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "java" == guess_language(gh, "googleapis/java-dlp")
def test_guess_googleapis_java_dns():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "java" == guess_language(gh, "googleapis/java-dns")
def test_guess_googleapis_java_document_ai():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "java" == guess_language(gh, "googleapis/java-document-ai")
def test_guess_googleapis_java_errorreporting():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "java" == guess_language(gh, "googleapis/java-errorreporting")
def test_guess_googleapis_java_firestore():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "java" == guess_language(gh, "googleapis/java-firestore")
def test_guess_googleapis_java_functions():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "java" == guess_language(gh, "googleapis/java-functions")
def test_guess_googleapis_java_game_servers():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "java" == guess_language(gh, "googleapis/java-game-servers")
def test_guess_googleapis_java_gcloud_maven_plugin():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "java" == guess_language(gh, "googleapis/java-gcloud-maven-plugin")
def test_guess_googleapis_java_grafeas():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "java" == guess_language(gh, "googleapis/java-grafeas")
def test_guess_googleapis_java_iam():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "java" == guess_language(gh, "googleapis/java-iam")
def test_guess_googleapis_java_iamcredentials():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "java" == guess_language(gh, "googleapis/java-iamcredentials")
def test_guess_googleapis_java_iot():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "java" == guess_language(gh, "googleapis/java-iot")
def test_guess_googleapis_java_irm():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "java" == guess_language(gh, "googleapis/java-irm")
def test_guess_googleapis_java_kms():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "java" == guess_language(gh, "googleapis/java-kms")
def test_guess_googleapis_java_language():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "java" == guess_language(gh, "googleapis/java-language")
def test_guess_googleapis_java_logging():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "java" == guess_language(gh, "googleapis/java-logging")
def test_guess_googleapis_java_logging_logback():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "java" == guess_language(gh, "googleapis/java-logging-logback")
def test_guess_googleapis_java_mediatranslation():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "java" == guess_language(gh, "googleapis/java-mediatranslation")
def test_guess_googleapis_java_memcache():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "java" == guess_language(gh, "googleapis/java-memcache")
def test_guess_googleapis_java_monitoring():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "java" == guess_language(gh, "googleapis/java-monitoring")
def test_guess_googleapis_java_monitoring_dashboards():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "java" == guess_language(gh, "googleapis/java-monitoring-dashboards")
def test_guess_googleapis_java_notebooks():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "java" == guess_language(gh, "googleapis/java-notebooks")
def test_guess_googleapis_java_notification():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "java" == guess_language(gh, "googleapis/java-notification")
def test_guess_googleapis_java_orgpolicy():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "java" == guess_language(gh, "googleapis/java-orgpolicy")
def test_guess_googleapis_java_os_config():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "java" == guess_language(gh, "googleapis/java-os-config")
def test_guess_googleapis_java_os_login():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "java" == guess_language(gh, "googleapis/java-os-login")
def test_guess_googleapis_java_phishingprotection():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "java" == guess_language(gh, "googleapis/java-phishingprotection")
def test_guess_googleapis_java_pubsub():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "java" == guess_language(gh, "googleapis/java-pubsub")
def test_guess_googleapis_java_pubsublite():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "java" == guess_language(gh, "googleapis/java-pubsublite")
def test_guess_googleapis_java_recaptchaenterprise():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "java" == guess_language(gh, "googleapis/java-recaptchaenterprise")
def test_guess_googleapis_java_recommendations_ai():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "java" == guess_language(gh, "googleapis/java-recommendations-ai")
def test_guess_googleapis_java_recommender():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "java" == guess_language(gh, "googleapis/java-recommender")
def test_guess_googleapis_java_redis():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "java" == guess_language(gh, "googleapis/java-redis")
def test_guess_googleapis_java_resourcemanager():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "java" == guess_language(gh, "googleapis/java-resourcemanager")
def test_guess_googleapis_java_scheduler():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "java" == guess_language(gh, "googleapis/java-scheduler")
def test_guess_googleapis_java_secretmanager():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "java" == guess_language(gh, "googleapis/java-secretmanager")
def test_guess_googleapis_java_securitycenter():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "java" == guess_language(gh, "googleapis/java-securitycenter")
def test_guess_googleapis_java_securitycenter_settings():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "java" == guess_language(gh, "googleapis/java-securitycenter-settings")
def test_guess_googleapis_java_servicedirectory():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "java" == guess_language(gh, "googleapis/java-servicedirectory")
def test_guess_googleapis_java_shared_config():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "java" == guess_language(gh, "googleapis/java-shared-config")
def test_guess_googleapis_java_shared_dependencies():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "java" == guess_language(gh, "googleapis/java-shared-dependencies")
def test_guess_googleapis_java_spanner():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "java" == guess_language(gh, "googleapis/java-spanner")
def test_guess_googleapis_java_spanner_jdbc():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "java" == guess_language(gh, "googleapis/java-spanner-jdbc")
def test_guess_googleapis_java_speech():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "java" == guess_language(gh, "googleapis/java-speech")
def test_guess_googleapis_java_storage():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "java" == guess_language(gh, "googleapis/java-storage")
def test_guess_googleapis_java_storage_nio():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "java" == guess_language(gh, "googleapis/java-storage-nio")
def test_guess_googleapis_java_talent():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "java" == guess_language(gh, "googleapis/java-talent")
def test_guess_googleapis_java_tasks():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "java" == guess_language(gh, "googleapis/java-tasks")
def test_guess_googleapis_java_texttospeech():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "java" == guess_language(gh, "googleapis/java-texttospeech")
def test_guess_googleapis_java_trace():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "java" == guess_language(gh, "googleapis/java-trace")
def test_guess_googleapis_java_translate():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "java" == guess_language(gh, "googleapis/java-translate")
def test_guess_googleapis_java_video_intelligence():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "java" == guess_language(gh, "googleapis/java-video-intelligence")
def test_guess_googleapis_java_vision():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "java" == guess_language(gh, "googleapis/java-vision")
def test_guess_googleapis_java_webrisk():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "java" == guess_language(gh, "googleapis/java-webrisk")
def test_guess_googleapis_java_websecurityscanner():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "java" == guess_language(gh, "googleapis/java-websecurityscanner")
def test_guess_google_cloud_platform_nodejs_docs_samples():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "nodejs" == guess_language(gh, "GoogleCloudPlatform/nodejs-docs-samples")
def test_guess_google_cloud_platform_nodejs_getting_started():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "nodejs" == guess_language(gh, "GoogleCloudPlatform/nodejs-getting-started")
def test_guess_googleapis_cloud_debug_nodejs():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "nodejs" == guess_language(gh, "googleapis/cloud-debug-nodejs")
def test_guess_googleapis_cloud_profiler_nodejs():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "nodejs" == guess_language(gh, "googleapis/cloud-profiler-nodejs")
def test_guess_googleapis_cloud_trace_nodejs():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "nodejs" == guess_language(gh, "googleapis/cloud-trace-nodejs")
def test_guess_googleapis_code_suggester():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "nodejs" == guess_language(gh, "googleapis/code-suggester")
def test_guess_googleapis_gapic_generator_typescript():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "nodejs" == guess_language(gh, "googleapis/gapic-generator-typescript")
def test_guess_googleapis_gax_nodejs():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "nodejs" == guess_language(gh, "googleapis/gax-nodejs")
def test_guess_googleapis_gaxios():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "nodejs" == guess_language(gh, "googleapis/gaxios")
def test_guess_googleapis_gcp_metadata():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "nodejs" == guess_language(gh, "googleapis/gcp-metadata")
def test_guess_googleapis_gcs_resumable_upload():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "nodejs" == guess_language(gh, "googleapis/gcs-resumable-upload")
def test_guess_googleapis_github_repo_automation():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "nodejs" == guess_language(gh, "googleapis/github-repo-automation")
def test_guess_googleapis_google_api_nodejs_client():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "nodejs" == guess_language(gh, "googleapis/google-api-nodejs-client")
def test_guess_googleapis_google_auth_library_nodejs():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "nodejs" == guess_language(gh, "googleapis/google-auth-library-nodejs")
def test_guess_googleapis_google_cloud_node():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "nodejs" == guess_language(gh, "googleapis/google-cloud-node")
def test_guess_googleapis_google_p___pem():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "nodejs" == guess_language(gh, "googleapis/google-p12-pem")
def test_guess_googleapis_jsdoc_fresh():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "nodejs" == guess_language(gh, "googleapis/jsdoc-fresh")
def test_guess_googleapis_jsdoc_region_tag():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "nodejs" == guess_language(gh, "googleapis/jsdoc-region-tag")
def test_guess_googleapis_node_gtoken():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "nodejs" == guess_language(gh, "googleapis/node-gtoken")
def test_guess_googleapis_nodejs_analytics_admin():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "nodejs" == guess_language(gh, "googleapis/nodejs-analytics-admin")
def test_guess_googleapis_nodejs_asset():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "nodejs" == guess_language(gh, "googleapis/nodejs-asset")
def test_guess_googleapis_nodejs_automl():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "nodejs" == guess_language(gh, "googleapis/nodejs-automl")
def test_guess_googleapis_nodejs_bigquery():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "nodejs" == guess_language(gh, "googleapis/nodejs-bigquery")
def test_guess_googleapis_nodejs_bigquery_connection():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "nodejs" == guess_language(gh, "googleapis/nodejs-bigquery-connection")
def test_guess_googleapis_nodejs_bigquery_data_transfer():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "nodejs" == guess_language(gh, "googleapis/nodejs-bigquery-data-transfer")
def test_guess_googleapis_nodejs_bigquery_reservation():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "nodejs" == guess_language(gh, "googleapis/nodejs-bigquery-reservation")
def test_guess_googleapis_nodejs_bigquery_storage():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "nodejs" == guess_language(gh, "googleapis/nodejs-bigquery-storage")
def test_guess_googleapis_nodejs_bigtable():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "nodejs" == guess_language(gh, "googleapis/nodejs-bigtable")
def test_guess_googleapis_nodejs_billing():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "nodejs" == guess_language(gh, "googleapis/nodejs-billing")
def test_guess_googleapis_nodejs_billing_budgets():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "nodejs" == guess_language(gh, "googleapis/nodejs-billing-budgets")
def test_guess_googleapis_nodejs_cloud_container():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "nodejs" == guess_language(gh, "googleapis/nodejs-cloud-container")
def test_guess_googleapis_nodejs_cloudbuild():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "nodejs" == guess_language(gh, "googleapis/nodejs-cloudbuild")
def test_guess_googleapis_nodejs_common():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "nodejs" == guess_language(gh, "googleapis/nodejs-common")
def test_guess_googleapis_nodejs_compute():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "nodejs" == guess_language(gh, "googleapis/nodejs-compute")
def test_guess_googleapis_nodejs_containeranalysis():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "nodejs" == guess_language(gh, "googleapis/nodejs-containeranalysis")
def test_guess_googleapis_nodejs_datacatalog():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "nodejs" == guess_language(gh, "googleapis/nodejs-datacatalog")
def test_guess_googleapis_nodejs_datalabeling():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "nodejs" == guess_language(gh, "googleapis/nodejs-datalabeling")
def test_guess_googleapis_nodejs_dataproc():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "nodejs" == guess_language(gh, "googleapis/nodejs-dataproc")
def test_guess_googleapis_nodejs_datastore():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "nodejs" == guess_language(gh, "googleapis/nodejs-datastore")
def test_guess_googleapis_nodejs_datastore_kvstore():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "nodejs" == guess_language(gh, "googleapis/nodejs-datastore-kvstore")
def test_guess_googleapis_nodejs_datastore_session():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "nodejs" == guess_language(gh, "googleapis/nodejs-datastore-session")
def test_guess_googleapis_nodejs_dialogflow():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "nodejs" == guess_language(gh, "googleapis/nodejs-dialogflow")
def test_guess_googleapis_nodejs_dlp():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "nodejs" == guess_language(gh, "googleapis/nodejs-dlp")
def test_guess_googleapis_nodejs_dns():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "nodejs" == guess_language(gh, "googleapis/nodejs-dns")
def test_guess_googleapis_nodejs_document_ai():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "nodejs" == guess_language(gh, "googleapis/nodejs-document-ai")
def test_guess_googleapis_nodejs_error_reporting():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "nodejs" == guess_language(gh, "googleapis/nodejs-error-reporting")
def test_guess_googleapis_nodejs_firestore():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "nodejs" == guess_language(gh, "googleapis/nodejs-firestore")
def test_guess_googleapis_nodejs_firestore_session():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "nodejs" == guess_language(gh, "googleapis/nodejs-firestore-session")
def test_guess_googleapis_nodejs_functions():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "nodejs" == guess_language(gh, "googleapis/nodejs-functions")
def test_guess_googleapis_nodejs_game_servers():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "nodejs" == guess_language(gh, "googleapis/nodejs-game-servers")
def test_guess_googleapis_nodejs_gce_images():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "nodejs" == guess_language(gh, "googleapis/nodejs-gce-images")
def test_guess_googleapis_nodejs_googleapis_common():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "nodejs" == guess_language(gh, "googleapis/nodejs-googleapis-common")
def test_guess_googleapis_nodejs_grafeas():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "nodejs" == guess_language(gh, "googleapis/nodejs-grafeas")
def test_guess_googleapis_nodejs_iot():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "nodejs" == guess_language(gh, "googleapis/nodejs-iot")
def test_guess_googleapis_nodejs_irm():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "nodejs" == guess_language(gh, "googleapis/nodejs-irm")
def test_guess_googleapis_nodejs_kms():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "nodejs" == guess_language(gh, "googleapis/nodejs-kms")
def test_guess_googleapis_nodejs_language():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "nodejs" == guess_language(gh, "googleapis/nodejs-language")
def test_guess_googleapis_nodejs_local_auth():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "nodejs" == guess_language(gh, "googleapis/nodejs-local-auth")
def test_guess_googleapis_nodejs_logging():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "nodejs" == guess_language(gh, "googleapis/nodejs-logging")
def test_guess_googleapis_nodejs_logging_bunyan():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "nodejs" == guess_language(gh, "googleapis/nodejs-logging-bunyan")
def test_guess_googleapis_nodejs_logging_winston():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "nodejs" == guess_language(gh, "googleapis/nodejs-logging-winston")
def test_guess_googleapis_nodejs_media_translation():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "nodejs" == guess_language(gh, "googleapis/nodejs-media-translation")
def test_guess_googleapis_nodejs_memcache():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "nodejs" == guess_language(gh, "googleapis/nodejs-memcache")
def test_guess_googleapis_nodejs_monitoring():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "nodejs" == guess_language(gh, "googleapis/nodejs-monitoring")
def test_guess_googleapis_nodejs_monitoring_dashboards():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "nodejs" == guess_language(gh, "googleapis/nodejs-monitoring-dashboards")
def test_guess_googleapis_nodejs_os_config():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "nodejs" == guess_language(gh, "googleapis/nodejs-os-config")
def test_guess_googleapis_nodejs_os_login():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "nodejs" == guess_language(gh, "googleapis/nodejs-os-login")
def test_guess_googleapis_nodejs_paginator():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "nodejs" == guess_language(gh, "googleapis/nodejs-paginator")
def test_guess_googleapis_nodejs_phishing_protection():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "nodejs" == guess_language(gh, "googleapis/nodejs-phishing-protection")
def test_guess_googleapis_nodejs_precise_date():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "nodejs" == guess_language(gh, "googleapis/nodejs-precise-date")
def test_guess_googleapis_nodejs_projectify():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "nodejs" == guess_language(gh, "googleapis/nodejs-projectify")
def test_guess_googleapis_nodejs_promisify():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "nodejs" == guess_language(gh, "googleapis/nodejs-promisify")
def test_guess_googleapis_nodejs_proto_files():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "nodejs" == guess_language(gh, "googleapis/nodejs-proto-files")
def test_guess_googleapis_nodejs_pubsub():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "nodejs" == guess_language(gh, "googleapis/nodejs-pubsub")
def test_guess_googleapis_nodejs_rcloadenv():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "nodejs" == guess_language(gh, "googleapis/nodejs-rcloadenv")
def test_guess_googleapis_nodejs_recaptcha_enterprise():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "nodejs" == guess_language(gh, "googleapis/nodejs-recaptcha-enterprise")
def test_guess_googleapis_nodejs_recommender():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "nodejs" == guess_language(gh, "googleapis/nodejs-recommender")
def test_guess_googleapis_nodejs_redis():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "nodejs" == guess_language(gh, "googleapis/nodejs-redis")
def test_guess_googleapis_nodejs_resource():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "nodejs" == guess_language(gh, "googleapis/nodejs-resource")
def test_guess_googleapis_nodejs_scheduler():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "nodejs" == guess_language(gh, "googleapis/nodejs-scheduler")
def test_guess_googleapis_nodejs_secret_manager():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "nodejs" == guess_language(gh, "googleapis/nodejs-secret-manager")
def test_guess_googleapis_nodejs_security_center():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "nodejs" == guess_language(gh, "googleapis/nodejs-security-center")
def test_guess_googleapis_nodejs_service_directory():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "nodejs" == guess_language(gh, "googleapis/nodejs-service-directory")
def test_guess_googleapis_nodejs_spanner():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "nodejs" == guess_language(gh, "googleapis/nodejs-spanner")
def test_guess_googleapis_nodejs_speech():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "nodejs" == guess_language(gh, "googleapis/nodejs-speech")
def test_guess_googleapis_nodejs_storage():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "nodejs" == guess_language(gh, "googleapis/nodejs-storage")
def test_guess_googleapis_nodejs_talent():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "nodejs" == guess_language(gh, "googleapis/nodejs-talent")
def test_guess_googleapis_nodejs_tasks():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "nodejs" == guess_language(gh, "googleapis/nodejs-tasks")
def test_guess_googleapis_nodejs_text_to_speech():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "nodejs" == guess_language(gh, "googleapis/nodejs-text-to-speech")
def test_guess_googleapis_nodejs_translate():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "nodejs" == guess_language(gh, "googleapis/nodejs-translate")
def test_guess_googleapis_nodejs_video_intelligence():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "nodejs" == guess_language(gh, "googleapis/nodejs-video-intelligence")
def test_guess_googleapis_nodejs_vision():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "nodejs" == guess_language(gh, "googleapis/nodejs-vision")
def test_guess_googleapis_nodejs_web_risk():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "nodejs" == guess_language(gh, "googleapis/nodejs-web-risk")
def test_guess_googleapis_release_please():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "nodejs" == guess_language(gh, "googleapis/release-please")
def test_guess_googleapis_repo_automation_bots():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "nodejs" == guess_language(gh, "googleapis/repo-automation-bots")
def test_guess_googleapis_sloth():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "nodejs" == guess_language(gh, "googleapis/sloth")
def test_guess_googleapis_teeny_request():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "nodejs" == guess_language(gh, "googleapis/teeny-request")
def test_guess_google_cloud_platform_getting_started_php():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "php" == guess_language(gh, "GoogleCloudPlatform/getting-started-php")
def test_guess_google_cloud_platform_php_docs_samples():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "php" == guess_language(gh, "GoogleCloudPlatform/php-docs-samples")
def test_guess_googleapis_gax_php():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "php" == guess_language(gh, "googleapis/gax-php")
def test_guess_googleapis_google_api_php_client():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "php" == guess_language(gh, "googleapis/google-api-php-client")
def test_guess_googleapis_google_api_php_client_services():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "php" == guess_language(gh, "googleapis/google-api-php-client-services")
def test_guess_googleapis_google_auth_library_php():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "php" == guess_language(gh, "googleapis/google-auth-library-php")
def test_guess_googleapis_google_cloud_php():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "php" == guess_language(gh, "googleapis/google-cloud-php")
def test_guess_googleapis_google_cloud_php_asset():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "php" == guess_language(gh, "googleapis/google-cloud-php-asset")
def test_guess_googleapis_google_cloud_php_automl():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "php" == guess_language(gh, "googleapis/google-cloud-php-automl")
def test_guess_googleapis_google_cloud_php_bigquery():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "php" == guess_language(gh, "googleapis/google-cloud-php-bigquery")
def test_guess_googleapis_google_cloud_php_bigquerydatatransfer():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "php" == guess_language(
gh, "googleapis/google-cloud-php-bigquerydatatransfer"
)
def test_guess_googleapis_google_cloud_php_bigtable():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "php" == guess_language(gh, "googleapis/google-cloud-php-bigtable")
def test_guess_googleapis_google_cloud_php_common_protos():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "php" == guess_language(gh, "googleapis/google-cloud-php-common-protos")
def test_guess_googleapis_google_cloud_php_container():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "php" == guess_language(gh, "googleapis/google-cloud-php-container")
def test_guess_googleapis_google_cloud_php_core():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "php" == guess_language(gh, "googleapis/google-cloud-php-core")
def test_guess_googleapis_google_cloud_php_dataproc():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "php" == guess_language(gh, "googleapis/google-cloud-php-dataproc")
def test_guess_googleapis_google_cloud_php_datastore():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "php" == guess_language(gh, "googleapis/google-cloud-php-datastore")
def test_guess_googleapis_google_cloud_php_debugger():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "php" == guess_language(gh, "googleapis/google-cloud-php-debugger")
def test_guess_googleapis_google_cloud_php_dialogflow():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "php" == guess_language(gh, "googleapis/google-cloud-php-dialogflow")
def test_guess_googleapis_google_cloud_php_dlp():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "php" == guess_language(gh, "googleapis/google-cloud-php-dlp")
def test_guess_googleapis_google_cloud_php_errorreporting():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "php" == guess_language(gh, "googleapis/google-cloud-php-errorreporting")
def test_guess_googleapis_google_cloud_php_firestore():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "php" == guess_language(gh, "googleapis/google-cloud-php-firestore")
def test_guess_googleapis_google_cloud_php_iot():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "php" == guess_language(gh, "googleapis/google-cloud-php-iot")
def test_guess_googleapis_google_cloud_php_irm():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "php" == guess_language(gh, "googleapis/google-cloud-php-irm")
def test_guess_googleapis_google_cloud_php_kms():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "php" == guess_language(gh, "googleapis/google-cloud-php-kms")
def test_guess_googleapis_google_cloud_php_language():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "php" == guess_language(gh, "googleapis/google-cloud-php-language")
def test_guess_googleapis_google_cloud_php_logging():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "php" == guess_language(gh, "googleapis/google-cloud-php-logging")
def test_guess_googleapis_google_cloud_php_monitoring():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "php" == guess_language(gh, "googleapis/google-cloud-php-monitoring")
def test_guess_googleapis_google_cloud_php_oslogin():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "php" == guess_language(gh, "googleapis/google-cloud-php-oslogin")
def test_guess_googleapis_google_cloud_php_pubsub():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "php" == guess_language(gh, "googleapis/google-cloud-php-pubsub")
def test_guess_googleapis_google_cloud_php_recommender():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "php" == guess_language(gh, "googleapis/google-cloud-php-recommender")
def test_guess_googleapis_google_cloud_php_redis():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "php" == guess_language(gh, "googleapis/google-cloud-php-redis")
def test_guess_googleapis_google_cloud_php_scheduler():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "php" == guess_language(gh, "googleapis/google-cloud-php-scheduler")
def test_guess_googleapis_google_cloud_php_secret_manager():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "php" == guess_language(gh, "googleapis/google-cloud-php-secret-manager")
def test_guess_googleapis_google_cloud_php_security_center():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "php" == guess_language(gh, "googleapis/google-cloud-php-security-center")
def test_guess_googleapis_google_cloud_php_spanner():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "php" == guess_language(gh, "googleapis/google-cloud-php-spanner")
def test_guess_googleapis_google_cloud_php_speech():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "php" == guess_language(gh, "googleapis/google-cloud-php-speech")
def test_guess_googleapis_google_cloud_php_storage():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "php" == guess_language(gh, "googleapis/google-cloud-php-storage")
def test_guess_googleapis_google_cloud_php_talent():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "php" == guess_language(gh, "googleapis/google-cloud-php-talent")
def test_guess_googleapis_google_cloud_php_tasks():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "php" == guess_language(gh, "googleapis/google-cloud-php-tasks")
def test_guess_googleapis_google_cloud_php_text_to_speech():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "php" == guess_language(gh, "googleapis/google-cloud-php-text-to-speech")
def test_guess_googleapis_google_cloud_php_trace():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "php" == guess_language(gh, "googleapis/google-cloud-php-trace")
def test_guess_googleapis_google_cloud_php_translate():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "php" == guess_language(gh, "googleapis/google-cloud-php-translate")
def test_guess_googleapis_google_cloud_php_videointelligence():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "php" == guess_language(gh, "googleapis/google-cloud-php-videointelligence")
def test_guess_googleapis_google_cloud_php_vision():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "php" == guess_language(gh, "googleapis/google-cloud-php-vision")
def test_guess_googleapis_google_cloud_php_web_risk():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "php" == guess_language(gh, "googleapis/google-cloud-php-web-risk")
def test_guess_googleapis_google_cloud_php_web_security_scanner():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "php" == guess_language(
gh, "googleapis/google-cloud-php-web-security-scanner"
)
def test_guess_google_cloud_platform_getting_started_python():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "python" == guess_language(gh, "GoogleCloudPlatform/getting-started-python")
def test_guess_google_cloud_platform_python_docs_samples():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "python" == guess_language(gh, "GoogleCloudPlatform/python-docs-samples")
def test_guess_googleapis_dialogflow_python_client_v_():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "python" == guess_language(gh, "googleapis/dialogflow-python-client-v2")
def test_guess_googleapis_doc_pipeline():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "python" == guess_language(gh, "googleapis/doc-pipeline")
def test_guess_googleapis_doc_templates():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "python" == guess_language(gh, "googleapis/doc-templates")
def test_guess_googleapis_gapic_generator_python():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "python" == guess_language(gh, "googleapis/gapic-generator-python")
def test_guess_googleapis_google_api_python_client():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "python" == guess_language(gh, "googleapis/google-api-python-client")
def test_guess_googleapis_google_auth_library_python():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "python" == guess_language(gh, "googleapis/google-auth-library-python")
def test_guess_googleapis_google_auth_library_python_oauthlib():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "python" == guess_language(
gh, "googleapis/google-auth-library-python-oauthlib"
)
def test_guess_googleapis_google_cloud_python():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "python" == guess_language(gh, "googleapis/google-cloud-python")
def test_guess_googleapis_google_cloud_python_happybase():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "python" == guess_language(gh, "googleapis/google-cloud-python-happybase")
def test_guess_googleapis_google_resumable_media_python():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "python" == guess_language(gh, "googleapis/google-resumable-media-python")
def test_guess_googleapis_proto_plus_python():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "python" == guess_language(gh, "googleapis/proto-plus-python")
def test_guess_googleapis_python_access_approval():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "python" == guess_language(gh, "googleapis/python-access-approval")
def test_guess_googleapis_python_access_context_manager():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "python" == guess_language(gh, "googleapis/python-access-context-manager")
def test_guess_googleapis_python_analytics_admin():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "python" == guess_language(gh, "googleapis/python-analytics-admin")
def test_guess_googleapis_python_api_common_protos():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "python" == guess_language(gh, "googleapis/python-api-common-protos")
def test_guess_googleapis_python_api_core():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "python" == guess_language(gh, "googleapis/python-api-core")
def test_guess_googleapis_python_asset():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "python" == guess_language(gh, "googleapis/python-asset")
def test_guess_googleapis_python_audit_log():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "python" == guess_language(gh, "googleapis/python-audit-log")
def test_guess_googleapis_python_automl():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "python" == guess_language(gh, "googleapis/python-automl")
def test_guess_googleapis_python_bigquery():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "python" == guess_language(gh, "googleapis/python-bigquery")
def test_guess_googleapis_python_bigquery_connection():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "python" == guess_language(gh, "googleapis/python-bigquery-connection")
def test_guess_googleapis_python_bigquery_datatransfer():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "python" == guess_language(gh, "googleapis/python-bigquery-datatransfer")
def test_guess_googleapis_python_bigquery_reservation():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "python" == guess_language(gh, "googleapis/python-bigquery-reservation")
def test_guess_googleapis_python_bigquery_storage():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "python" == guess_language(gh, "googleapis/python-bigquery-storage")
def test_guess_googleapis_python_bigtable():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "python" == guess_language(gh, "googleapis/python-bigtable")
def test_guess_googleapis_python_billing():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "python" == guess_language(gh, "googleapis/python-billing")
def test_guess_googleapis_python_billingbudgets():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "python" == guess_language(gh, "googleapis/python-billingbudgets")
def test_guess_googleapis_python_cloud_core():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "python" == guess_language(gh, "googleapis/python-cloud-core")
def test_guess_googleapis_python_cloudbuild():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "python" == guess_language(gh, "googleapis/python-cloudbuild")
def test_guess_googleapis_python_container():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "python" == guess_language(gh, "googleapis/python-container")
def test_guess_googleapis_python_containeranalysis():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "python" == guess_language(gh, "googleapis/python-containeranalysis")
def test_guess_googleapis_python_crc__c():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "python" == guess_language(gh, "googleapis/python-crc32c")
def test_guess_googleapis_python_datacatalog():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "python" == guess_language(gh, "googleapis/python-datacatalog")
def test_guess_googleapis_python_datalabeling():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "python" == guess_language(gh, "googleapis/python-datalabeling")
def test_guess_googleapis_python_dataproc():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "python" == guess_language(gh, "googleapis/python-dataproc")
def test_guess_googleapis_python_datastore():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "python" == guess_language(gh, "googleapis/python-datastore")
def test_guess_googleapis_python_dlp():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "python" == guess_language(gh, "googleapis/python-dlp")
def test_guess_googleapis_python_dns():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "python" == guess_language(gh, "googleapis/python-dns")
def test_guess_googleapis_python_documentai():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "python" == guess_language(gh, "googleapis/python-documentai")
def test_guess_googleapis_python_error_reporting():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "python" == guess_language(gh, "googleapis/python-error-reporting")
def test_guess_googleapis_python_firestore():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "python" == guess_language(gh, "googleapis/python-firestore")
def test_guess_googleapis_python_functions():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "python" == guess_language(gh, "googleapis/python-functions")
def test_guess_googleapis_python_game_servers():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "python" == guess_language(gh, "googleapis/python-game-servers")
def test_guess_googleapis_python_grafeas():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "python" == guess_language(gh, "googleapis/python-grafeas")
def test_guess_googleapis_python_iam():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "python" == guess_language(gh, "googleapis/python-iam")
def test_guess_googleapis_python_iot():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "python" == guess_language(gh, "googleapis/python-iot")
def test_guess_googleapis_python_kms():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "python" == guess_language(gh, "googleapis/python-kms")
def test_guess_googleapis_python_language():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "python" == guess_language(gh, "googleapis/python-language")
def test_guess_googleapis_python_logging():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "python" == guess_language(gh, "googleapis/python-logging")
def test_guess_googleapis_python_media_translation():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "python" == guess_language(gh, "googleapis/python-media-translation")
def test_guess_googleapis_python_memcache():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "python" == guess_language(gh, "googleapis/python-memcache")
def test_guess_googleapis_python_monitoring():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "python" == guess_language(gh, "googleapis/python-monitoring")
def test_guess_googleapis_python_monitoring_dashboards():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "python" == guess_language(gh, "googleapis/python-monitoring-dashboards")
def test_guess_googleapis_python_ndb():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "python" == guess_language(gh, "googleapis/python-ndb")
def test_guess_googleapis_python_notebooks():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "python" == guess_language(gh, "googleapis/python-notebooks")
def test_guess_googleapis_python_org_policy():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "python" == guess_language(gh, "googleapis/python-org-policy")
def test_guess_googleapis_python_os_config():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "python" == guess_language(gh, "googleapis/python-os-config")
def test_guess_googleapis_python_oslogin():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "python" == guess_language(gh, "googleapis/python-oslogin")
def test_guess_googleapis_python_phishingprotection():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "python" == guess_language(gh, "googleapis/python-phishingprotection")
def test_guess_googleapis_python_pubsub():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "python" == guess_language(gh, "googleapis/python-pubsub")
def test_guess_googleapis_python_recaptcha_enterprise():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "python" == guess_language(gh, "googleapis/python-recaptcha-enterprise")
def test_guess_googleapis_python_recommendations_ai():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "python" == guess_language(gh, "googleapis/python-recommendations-ai")
def test_guess_googleapis_python_recommender():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "python" == guess_language(gh, "googleapis/python-recommender")
def test_guess_googleapis_python_redis():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "python" == guess_language(gh, "googleapis/python-redis")
def test_guess_googleapis_python_resource_manager():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "python" == guess_language(gh, "googleapis/python-resource-manager")
def test_guess_googleapis_python_runtimeconfig():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "python" == guess_language(gh, "googleapis/python-runtimeconfig")
def test_guess_googleapis_python_scheduler():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "python" == guess_language(gh, "googleapis/python-scheduler")
def test_guess_googleapis_python_secret_manager():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "python" == guess_language(gh, "googleapis/python-secret-manager")
def test_guess_googleapis_python_securitycenter():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "python" == guess_language(gh, "googleapis/python-securitycenter")
def test_guess_googleapis_python_service_directory():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "python" == guess_language(gh, "googleapis/python-service-directory")
def test_guess_googleapis_python_spanner():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "python" == guess_language(gh, "googleapis/python-spanner")
def test_guess_googleapis_python_spanner_django():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "python" == guess_language(gh, "googleapis/python-spanner-django")
def test_guess_googleapis_python_speech():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "python" == guess_language(gh, "googleapis/python-speech")
def test_guess_googleapis_python_storage():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "python" == guess_language(gh, "googleapis/python-storage")
def test_guess_googleapis_python_talent():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "python" == guess_language(gh, "googleapis/python-talent")
def test_guess_googleapis_python_tasks():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "python" == guess_language(gh, "googleapis/python-tasks")
def test_guess_googleapis_python_test_utils():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "python" == guess_language(gh, "googleapis/python-test-utils")
def test_guess_googleapis_python_texttospeech():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "python" == guess_language(gh, "googleapis/python-texttospeech")
def test_guess_googleapis_python_trace():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "python" == guess_language(gh, "googleapis/python-trace")
def test_guess_googleapis_python_translate():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "python" == guess_language(gh, "googleapis/python-translate")
def test_guess_googleapis_python_videointelligence():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "python" == guess_language(gh, "googleapis/python-videointelligence")
def test_guess_googleapis_python_vision():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "python" == guess_language(gh, "googleapis/python-vision")
def test_guess_googleapis_python_webrisk():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "python" == guess_language(gh, "googleapis/python-webrisk")
def test_guess_googleapis_python_websecurityscanner():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "python" == guess_language(gh, "googleapis/python-websecurityscanner")
def test_guess_googleapis_sample_tester():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "python" == guess_language(gh, "googleapis/sample-tester")
def test_guess_googleapis_docuploader():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "python_tool" == guess_language(gh, "googleapis/docuploader")
def test_guess_googleapis_releasetool():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "python" == guess_language(gh, "googleapis/releasetool")
def test_guess_googleapis_synthtool():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "python_tool" == guess_language(gh, "googleapis/synthtool")
def test_guess_google_cloud_platform_getting_started_ruby():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "ruby" == guess_language(gh, "GoogleCloudPlatform/getting-started-ruby")
def test_guess_google_cloud_platform_ruby_docs_samples():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "ruby" == guess_language(gh, "GoogleCloudPlatform/ruby-docs-samples")
def test_guess_googleapis_discovery_artifact_manager():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "ruby" == guess_language(gh, "googleapis/discovery-artifact-manager")
def test_guess_googleapis_gapic_generator_ruby():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "ruby" == guess_language(gh, "googleapis/gapic-generator-ruby")
def test_guess_googleapis_gax_ruby():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "ruby" == guess_language(gh, "googleapis/gax-ruby")
def test_guess_googleapis_google_api_ruby_client():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "ruby" == guess_language(gh, "googleapis/google-api-ruby-client")
def test_guess_googleapis_google_auth_library_ruby():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "ruby" == guess_language(gh, "googleapis/google-auth-library-ruby")
def test_guess_googleapis_google_cloud_ruby():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "ruby" == guess_language(gh, "googleapis/google-cloud-ruby")
def test_guess_googleapis_ruby_spanner_activerecord():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "ruby" == guess_language(gh, "googleapis/ruby-spanner-activerecord")
def test_guess_googleapis_ruby_style():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "ruby" == guess_language(gh, "googleapis/ruby-style")
def test_guess_googleapis_signet():
gh = github.GitHub(os.environ["GITHUB_TOKEN"])
assert "ruby" == guess_language(gh, "googleapis/signet")
| 35.083512
| 88
| 0.749573
| 8,359
| 65,536
| 5.579376
| 0.043067
| 0.077834
| 0.107766
| 0.123161
| 0.919186
| 0.793023
| 0.70063
| 0.663064
| 0.64456
| 0.613448
| 0
| 0.000277
| 0.117706
| 65,536
| 1,867
| 89
| 35.102303
| 0.806302
| 0.011063
| 0
| 0.322954
| 0
| 0
| 0.262478
| 0.161234
| 0
| 0
| 0
| 0
| 0.319395
| 1
| 0.320285
| false
| 0
| 0.006228
| 0
| 0.327402
| 0.00089
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
ed3ac0ef62156fcc283dbd0cfd5dcd1737acefad
| 481
|
py
|
Python
|
src/model/loss/meanSquaredErrorLoss.py
|
dhruvtapasvi/implementation
|
964980f431517f4548a87172a05107cdf700fb84
|
[
"MIT"
] | 1
|
2020-05-25T10:24:58.000Z
|
2020-05-25T10:24:58.000Z
|
src/model/loss/meanSquaredErrorLoss.py
|
dhruvtapasvi/implementation
|
964980f431517f4548a87172a05107cdf700fb84
|
[
"MIT"
] | 1
|
2017-12-18T02:16:44.000Z
|
2017-12-18T02:16:44.000Z
|
src/model/loss/meanSquaredErrorLoss.py
|
dhruvtapasvi/implementation
|
964980f431517f4548a87172a05107cdf700fb84
|
[
"MIT"
] | null | null | null |
import numpy as np
from keras.metrics import mean_squared_error
from keras.backend import mean, flatten
def meanSquaredErrorLossConstructor(inputRepresentationDimensions):
totalNumberOfPixels = np.prod(inputRepresentationDimensions)
def meanSquaredErrorLoss(inputRepresentation, decodedInputRepresentation):
return mean(totalNumberOfPixels * mean_squared_error(flatten(inputRepresentation), flatten(decodedInputRepresentation)))
return meanSquaredErrorLoss
| 37
| 128
| 0.841996
| 40
| 481
| 10.025
| 0.525
| 0.044888
| 0.079801
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108108
| 481
| 12
| 129
| 40.083333
| 0.934732
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.375
| 0.125
| 0.875
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 5
|
ed7854d2edcf05bffebe01376cae37ce5a50ef9a
| 202
|
py
|
Python
|
FbPy/config.py
|
voidabhi/python-scripts
|
a6d06bd3ccf4ec24df521a3cf305d22176f68a18
|
[
"MIT"
] | 2
|
2015-06-01T18:33:38.000Z
|
2018-11-21T19:40:37.000Z
|
FbPy/config.py
|
voidabhi/python-scripts
|
a6d06bd3ccf4ec24df521a3cf305d22176f68a18
|
[
"MIT"
] | 102
|
2015-01-20T17:26:52.000Z
|
2017-12-28T17:32:51.000Z
|
FbPy/config.py
|
voidabhi/python-scripts
|
a6d06bd3ccf4ec24df521a3cf305d22176f68a18
|
[
"MIT"
] | 3
|
2020-03-02T06:54:18.000Z
|
2021-01-07T16:36:35.000Z
|
import ConfigParser
config = ConfigParser.ConfigParser()
config.read("settings.cfg")
username = config.get("auth","username")
password = config.get("auth","password")
print username
print password
| 22.444444
| 41
| 0.757426
| 23
| 202
| 6.652174
| 0.478261
| 0.235294
| 0.169935
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.10396
| 202
| 9
| 42
| 22.444444
| 0.845304
| 0
| 0
| 0
| 0
| 0
| 0.178218
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.285714
| 0.142857
| null | null | 0.285714
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
71e848681a87e5d42914c71ec34509fb6c827458
| 2,695
|
py
|
Python
|
runtime/components/Time_length_operations/test_linear_interpolation_datetime_index.py
|
ulise/hetida-designer
|
a6be8eb45abf950d5498e3ca756ea1d2e46b5c00
|
[
"MIT"
] | 41
|
2020-11-18T10:12:29.000Z
|
2022-03-28T21:46:41.000Z
|
runtime/components/Time_length_operations/test_linear_interpolation_datetime_index.py
|
ulise/hetida-designer
|
a6be8eb45abf950d5498e3ca756ea1d2e46b5c00
|
[
"MIT"
] | 4
|
2020-12-08T15:28:15.000Z
|
2022-02-01T11:40:17.000Z
|
runtime/components/Time_length_operations/test_linear_interpolation_datetime_index.py
|
ulise/hetida-designer
|
a6be8eb45abf950d5498e3ca756ea1d2e46b5c00
|
[
"MIT"
] | 14
|
2020-11-18T11:39:17.000Z
|
2022-03-21T15:05:11.000Z
|
import pandas as pd
import numpy as np
import pytest
from .linear_interpolation_datetime_index import main
def test_date():
pd.testing.assert_series_equal(
main(
data=pd.Series(
{
"2019-08-01T15:20:10": 0.0,
"2019-08-01T15:20:11": 1.0,
"2019-08-01T15:20:14": 18.0,
"2019-08-01T15:20:16": 2.0,
}
),
t="s",
)["interpolation"],
pd.Series(
[0.000, 1.000, 6.666, 12.333, 18.000, 10.000, 2.000],
index=pd.to_datetime(
[
"2019-08-01T15:20:10",
"2019-08-01T15:20:11",
"2019-08-01T15:20:12",
"2019-08-01T15:20:13",
"2019-08-01T15:20:14",
"2019-08-01T15:20:15",
"2019-08-01T15:20:16",
]
),
).asfreq("s"),
atol=1e-3,
)
def test_date_unsorted():
pd.testing.assert_series_equal(
main(
data=pd.Series(
{
"2019-08-01T15:20:10": 0.0,
"2019-08-01T15:20:14": 18.0,
"2019-08-01T15:20:16": 2.0,
"2019-08-01T15:20:11": 1.0,
}
),
t="s",
)["interpolation"],
pd.Series(
[0.000, 1.000, 6.666, 12.333, 18.000, 10.000, 2.000],
index=pd.to_datetime(
[
"2019-08-01T15:20:10",
"2019-08-01T15:20:11",
"2019-08-01T15:20:12",
"2019-08-01T15:20:13",
"2019-08-01T15:20:14",
"2019-08-01T15:20:15",
"2019-08-01T15:20:16",
]
),
).asfreq("s"),
atol=1e-3,
)
def test_series_empty():
assert main(data=pd.Series(dtype=float), t="h")["interpolation"].empty
def test_index_string():
with pytest.raises(TypeError, match="indices of data must be datetime"):
assert main(
data=pd.Series(
{"test": 0.3, "hello": 1.7, "2019-08-01T15:20:25.113Z": -0.3}
),
t="d",
)
def test_wrong_t():
with pytest.raises(ValueError, match="t could not be parsed as frequency: hello"):
assert main(
data=pd.Series(
{
"2019-08-01T15:20:00": 0.3,
"2019-08-01T15:20:11": 1.7,
"2019-08-01T15:20:25": -0.3,
}
),
t="hello",
)
| 28.072917
| 86
| 0.407421
| 320
| 2,695
| 3.375
| 0.228125
| 0.144444
| 0.264815
| 0.312963
| 0.688889
| 0.642593
| 0.628704
| 0.597222
| 0.552778
| 0.552778
| 0
| 0.307641
| 0.441558
| 2,695
| 95
| 87
| 28.368421
| 0.409967
| 0
| 0
| 0.619048
| 0
| 0
| 0.234224
| 0.008909
| 0
| 0
| 0
| 0
| 0.059524
| 1
| 0.059524
| true
| 0
| 0.047619
| 0
| 0.107143
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
71f40b9f067cd5d8127d7c80325413958a717a8f
| 188
|
py
|
Python
|
panoptic/panoptic/doctype/city/test_city.py
|
scmmishra/panoptic
|
bbb5b78aed9405afcb2b6e36a32d19b0dc4e8a6a
|
[
"MIT"
] | 1
|
2020-11-25T07:56:43.000Z
|
2020-11-25T07:56:43.000Z
|
panoptic/panoptic/doctype/city/test_city.py
|
scmmishra/panoptic
|
bbb5b78aed9405afcb2b6e36a32d19b0dc4e8a6a
|
[
"MIT"
] | null | null | null |
panoptic/panoptic/doctype/city/test_city.py
|
scmmishra/panoptic
|
bbb5b78aed9405afcb2b6e36a32d19b0dc4e8a6a
|
[
"MIT"
] | 1
|
2020-11-25T07:56:45.000Z
|
2020-11-25T07:56:45.000Z
|
# Copyright (c) 2022, Internet Freedom Foundation and Contributors
# See license.txt
# import frappe
from frappe.tests.utils import FrappeTestCase
class TestCity(FrappeTestCase):
pass
| 18.8
| 66
| 0.797872
| 23
| 188
| 6.521739
| 0.869565
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.024691
| 0.138298
| 188
| 9
| 67
| 20.888889
| 0.901235
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
|
0
| 5
|
9c2bb537299f5358ab3ec4ef9557c7596b13fc75
| 110
|
py
|
Python
|
comet/cmdline/__init__.py
|
dneise/Comet
|
abaa0da65d69f90a5262d81416477b4e71deb2ad
|
[
"BSD-2-Clause"
] | 15
|
2015-11-29T18:53:58.000Z
|
2022-03-09T15:47:30.000Z
|
comet/cmdline/__init__.py
|
dneise/Comet
|
abaa0da65d69f90a5262d81416477b4e71deb2ad
|
[
"BSD-2-Clause"
] | 29
|
2016-01-21T18:10:45.000Z
|
2021-10-01T16:41:12.000Z
|
comet/cmdline/__init__.py
|
dneise/Comet
|
abaa0da65d69f90a5262d81416477b4e71deb2ad
|
[
"BSD-2-Clause"
] | 11
|
2016-01-22T14:05:51.000Z
|
2022-03-09T17:49:56.000Z
|
# Comet VOEvent Broker.
# Utility classes for command line event sender.
from comet.cmdline.options import *
| 22
| 48
| 0.781818
| 15
| 110
| 5.733333
| 0.933333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.154545
| 110
| 4
| 49
| 27.5
| 0.924731
| 0.618182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
9c38abe5561b3b105a7a08aa6725b41c3379f191
| 2,801
|
py
|
Python
|
tests/test_operators.py
|
Kuree/magma
|
be2439aa897768c5810be72e3a55a6f772ac83cf
|
[
"MIT"
] | null | null | null |
tests/test_operators.py
|
Kuree/magma
|
be2439aa897768c5810be72e3a55a6f772ac83cf
|
[
"MIT"
] | null | null | null |
tests/test_operators.py
|
Kuree/magma
|
be2439aa897768c5810be72e3a55a6f772ac83cf
|
[
"MIT"
] | null | null | null |
import magma as m
from magma.operators import MantleImportError
from common import DeclareAnd
import pytest
from magma.testing import check_files_equal
def test_error():
circ = m.DefineCircuit("test", "a", m.In(m.Bits[4]), "b", m.Out(m.Bits[4]))
try:
~circ.a
assert False, \
"Operator should throw an error since mantle is not imported"
except MantleImportError:
pass
@pytest.mark.parametrize("width", [None, 3])
@pytest.mark.parametrize("output", ["verilog", "coreir"])
def test_assign(width, output):
T = m.util.BitOrBits(width)
name = f"test_assign_operator_{width}_{output}"
circ = m.DefineCircuit(name, "a", m.In(T), "b", m.In(T),
"c", m.Out(T))
and2 = DeclareAnd(width)()
and2.I0 <= circ.a
and2.I1 <= circ.b
circ.c <= and2.O
m.EndDefine()
m.compile(f"build/{name}", circ, output)
suffix = "v" if output == "verilog" else "json"
assert check_files_equal(__file__, f"build/{name}.{suffix}",
f"gold/{name}.{suffix}")
@pytest.mark.parametrize("width", [None, 3])
@pytest.mark.parametrize("output", ["verilog", "coreir"])
def test_assign_to_var(width, output):
T = m.util.BitOrBits(width)
name = f"test_assign_operator2_{width}_{output}"
circ = m.DefineCircuit(name, "a", m.In(T), "b", m.In(T),
"c", m.Out(T))
and2 = DeclareAnd(width)()
c, I0, I1 = and2.I0, and2.I1, circ.c
I0 <= circ.a
I1 <= circ.b
c <= and2.O
m.EndDefine()
m.compile(f"build/{name}", circ, output)
suffix = "v" if output == "verilog" else "json"
assert check_files_equal(__file__, f"build/{name}.{suffix}",
f"gold/{name}.{suffix}")
@pytest.mark.parametrize("width", [None, 3])
def test_assign_error_0(width):
T = m.util.BitOrBits(width)
name = f"test_assign_operator_{width}"
circ = m.DefineCircuit(name, "a", m.In(T), "b", m.In(T),
"c", m.Out(T))
and2 = DeclareAnd(width)()
try:
and2.O <= circ.a
assert False, "Should raise type error"
except TypeError as e:
assert str(e) == f"Cannot use <= to assign to output: {and2.O.debug_name} (trying to assign {circ.a.debug_name})"
@pytest.mark.parametrize("width", [None, 3])
def test_assign_error_1(width):
T = m.util.BitOrBits(width)
name = f"test_assign_operator_{width}"
circ = m.DefineCircuit(name, "a", m.In(T), "b", m.In(T),
"c", m.Out(T))
and2 = DeclareAnd(width)()
try:
circ.a <= and2.O
assert False, "Should raise type error"
except TypeError as e:
assert str(e) == f"Cannot use <= to assign to output: {circ.a.debug_name} (trying to assign {and2.O.debug_name})"
| 33.345238
| 121
| 0.594074
| 401
| 2,801
| 4.042394
| 0.204489
| 0.016656
| 0.019741
| 0.064158
| 0.742751
| 0.714374
| 0.714374
| 0.714374
| 0.714374
| 0.714374
| 0
| 0.014595
| 0.241699
| 2,801
| 83
| 122
| 33.746988
| 0.748588
| 0
| 0
| 0.585714
| 0
| 0.028571
| 0.224206
| 0.061764
| 0
| 0
| 0
| 0
| 0.1
| 1
| 0.071429
| false
| 0.014286
| 0.1
| 0
| 0.171429
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
9c3db3f1ad101e216393b6b6998c1abb3af518cd
| 2,123
|
py
|
Python
|
custom_components/krisinfo/kriscom/kriscom.py
|
isabellaalstrom/Krisinformation_V3
|
3337977bede95867fef58eb7db53f1be2074bbe9
|
[
"MIT"
] | null | null | null |
custom_components/krisinfo/kriscom/kriscom.py
|
isabellaalstrom/Krisinformation_V3
|
3337977bede95867fef58eb7db53f1be2074bbe9
|
[
"MIT"
] | 1
|
2022-03-18T10:56:10.000Z
|
2022-03-18T10:56:10.000Z
|
custom_components/krisinfo/kriscom/kriscom.py
|
isabellaalstrom/Krisinformation_V3
|
3337977bede95867fef58eb7db53f1be2074bbe9
|
[
"MIT"
] | null | null | null |
import json
import httpx
import logging
from .exceptions import *
from .const import VERSION, DOMAIN, USER_AGENT, BASE_URL, NEWS_PARAMETER, VMAS_PARAMETER
logger = logging.getLogger(DOMAIN)
class kriscom(object):
def __init__(self, timeout=None):
self._timeout = timeout
def version(self):
return VERSION
async def requestVmas(self):
try:
async with httpx.AsyncClient() as client:
response = await client.get(
BASE_URL + VMAS_PARAMETER,
headers={"User-agent": USER_AGENT},
# allow_redirects=True,
timeout=self._timeout,
)
except Exception as e:
error = HTTP_Error(997, f"A HTTP error occured: {str(e)}", str(e))
logger.debug(error)
raise error
try:
response.encoding = "UTF-8"
intermediateResponse = json.dumps(response.json())
jsonResponse = json.loads(intermediateResponse)
except Exception as e:
error = API_Error(995, f"A parsing error occured: {str(e)}", str(e))
logger.debug(error)
raise error
return jsonResponse
async def requestNews(self):
try:
async with httpx.AsyncClient() as client:
response = await client.get(
BASE_URL + NEWS_PARAMETER,
headers={"User-agent": USER_AGENT},
# allow_redirects=True,
timeout=self._timeout,
)
except Exception as e:
error = HTTP_Error(997, f"A HTTP error occured: {str(e)}", str(e))
logger.debug(error)
raise error
try:
response.encoding = "UTF-8"
intermediateResponse = json.dumps(response.json())
jsonResponse = json.loads(intermediateResponse)
except Exception as e:
error = API_Error(995, f"A parsing error occured: {str(e)}", str(e))
logger.debug(error)
raise error
return jsonResponse
| 31.220588
| 88
| 0.556759
| 222
| 2,123
| 5.220721
| 0.288288
| 0.02761
| 0.058671
| 0.062123
| 0.755824
| 0.755824
| 0.755824
| 0.755824
| 0.755824
| 0.755824
| 0
| 0.010226
| 0.355158
| 2,123
| 68
| 89
| 31.220588
| 0.836377
| 0.020254
| 0
| 0.679245
| 0
| 0
| 0.075072
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037736
| false
| 0
| 0.09434
| 0.018868
| 0.207547
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
9c47bc909cd237b4811365c91afcadaad57d3177
| 77
|
py
|
Python
|
BanditSim/update_rules/constant_update.py
|
AJB0211/BanditSim
|
5426486b40c35492049b09f9b57eb18ad5d6ce63
|
[
"MIT"
] | null | null | null |
BanditSim/update_rules/constant_update.py
|
AJB0211/BanditSim
|
5426486b40c35492049b09f9b57eb18ad5d6ce63
|
[
"MIT"
] | null | null | null |
BanditSim/update_rules/constant_update.py
|
AJB0211/BanditSim
|
5426486b40c35492049b09f9b57eb18ad5d6ce63
|
[
"MIT"
] | null | null | null |
def constant_update_rule(self, q, r, a):
return q + self.alpha * (r - q)
| 25.666667
| 40
| 0.623377
| 14
| 77
| 3.285714
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.220779
| 77
| 2
| 41
| 38.5
| 0.766667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
9c652e6a9a6156d948a087584aaa849a164c5ce1
| 3,072
|
py
|
Python
|
setup.py
|
IloveKanade/k3redisutil
|
cd564ee8efec7fadd7d19412916dcb9538145fa0
|
[
"MIT"
] | null | null | null |
setup.py
|
IloveKanade/k3redisutil
|
cd564ee8efec7fadd7d19412916dcb9538145fa0
|
[
"MIT"
] | 2
|
2021-11-11T06:21:22.000Z
|
2022-03-23T07:02:22.000Z
|
setup.py
|
IloveKanade/k3redisutil
|
cd564ee8efec7fadd7d19412916dcb9538145fa0
|
[
"MIT"
] | null | null | null |
# DO NOT EDIT!!! built with `python _building/build_setup.py`
import setuptools
setuptools.setup(
name="k3redisutil",
packages=["k3redisutil"],
version="0.1.0",
license='MIT',
description='For using redis more easily.',
long_description='# k3redisutil\n\n[](https://github.com/pykit3/k3redisutil/actions/workflows/python-package.yml)\n[](https://travis-ci.com/pykit3/k3redisutil)\n[](https://k3redisutil.readthedocs.io/en/stable/?badge=stable)\n[](https://pypi.org/project/k3redisutil)\n\nFor using redis more easily.\n\nk3redisutil is a component of [pykit3] project: a python3 toolkit set.\n\n\nFor using redis more easily.\n\n\n\n# Install\n\n```\npip install k3redisutil\n```\n\n# Synopsis\n\n```python\n\nimport k3redisutil\nimport time\n\n# Using redis as a duplex cross process communication channel pool.\n\n# client and server with the same channel name "/foo" is a pair\nc = k3redisutil.RedisChannel(6379, \'/foo\', \'client\')\ns = k3redisutil.RedisChannel(6379, \'/foo\', \'server\')\n\nc.send_msg(\'c2s\')\ns.send_msg(\'s2c\')\n\n# list channels\nprint(c.list_channel(\'/\')) # ["/foo"]\nprint(s.recv_msg()) # c2s\nprint(c.recv_msg()) # s2c\n\ncli = k3redisutil.RedisProxyClient([(\'127.0.0.1\', 2222), (\'192.168.0.100\', 222)])\n\ncli.set(\'k1\', \'v1\', retry=1)\ncli.set(\'k2\', \'v2\', expire=1000) # msec\n\nprint(cli.get(\'k1\', retry=2))\n# out: \'v1\'\n\nprint(cli.get(\'k2\'))\n# out: \'v2\'\n\ntime.sleep(1)\ncli.get(\'k2\')\n# raise a \'redisutil.KeyNotFoundError\' because it is timeout\n\ncli.hset(\'hashname1\', \'k3\', \'v3\')\ncli.hset(\'hashname2\', \'k4\', \'v4\', expire=1000)\n\nprint(cli.hget(\'hashname1\', \'k3\'))\n# out: \'v3\'\n\nprint(cli.hget(\'hashname2\', \'k4\'))\n# out: \'v4\'\n\ntime.sleep(1)\ncli.hget(\'hashname2\', \'k4\')\n# raise a \'redisutil.KeyNotFoundError\' because it is timeout\n\n```\n\n# Author\n\nZhang Yanpo (张炎泼) <drdr.xp@gmail.com>\n\n# Copyright and License\n\nThe MIT License (MIT)\n\nCopyright (c) 2015 Zhang Yanpo (张炎泼) <drdr.xp@gmail.com>\n\n\n[pykit3]: https://github.com/pykit3',
long_description_content_type="text/markdown",
author='Zhang Yanpo',
author_email='drdr.xp@gmail.com',
url='https://github.com/pykit3/k3redisutil',
keywords=['python', 'redis'],
python_requires='>=3.0',
install_requires=['k3ut<0.2,>=0.1.15', 'k3awssign<0.2,>=0.1.0', 'k3thread<0.2,>=0.1.0', 'k3utfjson<0.2,>=0.1.1', 'k3confloader<0.2,>=0.1.1', 'k3utdocker<0.2,>=0.1.0', 'k3http<0.2,>=0.1.0', 'redis>=3.5.0', 'mock>=4.0.0'],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries',
] + ['Programming Language :: Python :: 3'],
)
| 128
| 2,170
| 0.672526
| 471
| 3,072
| 4.356688
| 0.375796
| 0.017544
| 0.010234
| 0.013645
| 0.237329
| 0.192008
| 0.159844
| 0.134503
| 0.111111
| 0.111111
| 0
| 0.056252
| 0.091471
| 3,072
| 23
| 2,171
| 133.565217
| 0.678968
| 0.019206
| 0
| 0
| 1
| 0.142857
| 0.769844
| 0.176021
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.095238
| 0
| 0.095238
| 0.047619
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
92f5cb55153b0f9e7ad42c1d07f36f52569f26ba
| 232
|
py
|
Python
|
level00.py
|
Vinay26k/pythonchallenge
|
91ae7b85b8efe551818d6b8a483a849302fc0c85
|
[
"Apache-2.0"
] | 3
|
2019-02-22T12:57:15.000Z
|
2021-08-07T16:27:47.000Z
|
level00.py
|
Vinay26k/pythonchallenge
|
91ae7b85b8efe551818d6b8a483a849302fc0c85
|
[
"Apache-2.0"
] | null | null | null |
level00.py
|
Vinay26k/pythonchallenge
|
91ae7b85b8efe551818d6b8a483a849302fc0c85
|
[
"Apache-2.0"
] | 2
|
2019-04-27T06:02:12.000Z
|
2020-12-16T14:50:41.000Z
|
#! python 2
#https://github.com/Vinay26k/pythonchallenge
'''
Given
http://www.pythonchallenge.com/pc/def/0.html
Hint : Change Url
display : 2^38
'''
print r'http://www.pythonchallenge.com/pc/def/{0}.html'.format(2**38)
| 19.333333
| 70
| 0.676724
| 35
| 232
| 4.485714
| 0.628571
| 0.089172
| 0.280255
| 0.318471
| 0.44586
| 0.44586
| 0.44586
| 0.44586
| 0
| 0
| 0
| 0.054455
| 0.12931
| 232
| 11
| 71
| 21.090909
| 0.722772
| 0.228448
| 0
| 0
| 0
| 0
| 0.613333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 1
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
130733a5a177f8c659de190e69e282cb77c9dc95
| 91
|
py
|
Python
|
haystackbrowser/__init__.py
|
jayvdb/django-haystackbrowser
|
255baf25a4cb97262ec8373c2e4fa4bb0134fda7
|
[
"BSD-2-Clause-FreeBSD"
] | 22
|
2015-01-11T12:44:57.000Z
|
2021-02-11T15:23:24.000Z
|
haystackbrowser/__init__.py
|
jayvdb/django-haystackbrowser
|
255baf25a4cb97262ec8373c2e4fa4bb0134fda7
|
[
"BSD-2-Clause-FreeBSD"
] | 12
|
2015-03-13T19:31:52.000Z
|
2020-06-17T06:10:15.000Z
|
haystackbrowser/__init__.py
|
jayvdb/django-haystackbrowser
|
255baf25a4cb97262ec8373c2e4fa4bb0134fda7
|
[
"BSD-2-Clause-FreeBSD"
] | 11
|
2015-09-24T10:42:09.000Z
|
2020-07-02T04:26:03.000Z
|
# -*- coding: utf-8 -*-
__version_info__ = '0.6.3'
__version__ = '0.6.3'
version = '0.6.3'
| 18.2
| 26
| 0.582418
| 16
| 91
| 2.75
| 0.5
| 0.136364
| 0.204545
| 0.454545
| 0.522727
| 0.522727
| 0.522727
| 0
| 0
| 0
| 0
| 0.12987
| 0.153846
| 91
| 4
| 27
| 22.75
| 0.441558
| 0.230769
| 0
| 0
| 0
| 0
| 0.220588
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
132da3ba1536a54c73f36e5195bc88e34a322477
| 53
|
py
|
Python
|
monitor/__init__.py
|
garagonc/optimization-framework
|
1ca57699d6a3f2f98dcaea96430e75c3f847b49f
|
[
"Apache-2.0"
] | null | null | null |
monitor/__init__.py
|
garagonc/optimization-framework
|
1ca57699d6a3f2f98dcaea96430e75c3f847b49f
|
[
"Apache-2.0"
] | null | null | null |
monitor/__init__.py
|
garagonc/optimization-framework
|
1ca57699d6a3f2f98dcaea96430e75c3f847b49f
|
[
"Apache-2.0"
] | null | null | null |
"""
Created on Jan 28 16:58 2020
@author: nishit
"""
| 10.6
| 28
| 0.641509
| 9
| 53
| 3.777778
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.232558
| 0.188679
| 53
| 5
| 29
| 10.6
| 0.55814
| 0.849057
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
13329ebda51941e6ba4ebd549892f776b853502b
| 25,757
|
py
|
Python
|
store_backend/pipelines/tests/test_views.py
|
EUGINELETHAL/ChRIS_store
|
b842dbfa80f29f86468fe0ebd3514aaac4898717
|
[
"MIT"
] | 11
|
2018-03-23T19:27:10.000Z
|
2021-04-30T16:40:04.000Z
|
store_backend/pipelines/tests/test_views.py
|
EUGINELETHAL/ChRIS_store
|
b842dbfa80f29f86468fe0ebd3514aaac4898717
|
[
"MIT"
] | 46
|
2018-05-21T14:54:43.000Z
|
2022-01-28T01:37:57.000Z
|
store_backend/pipelines/tests/test_views.py
|
EUGINELETHAL/ChRIS_store
|
b842dbfa80f29f86468fe0ebd3514aaac4898717
|
[
"MIT"
] | 11
|
2018-03-28T04:37:25.000Z
|
2021-05-28T06:40:30.000Z
|
import logging
import json
from django.test import TestCase
from django.urls import reverse
from django.contrib.auth.models import User
from rest_framework import status
from plugins.models import PluginMeta, Plugin
from plugins.models import PluginParameter
from plugins.models import DefaultStrParameter, DefaultBoolParameter
from plugins.models import DefaultFloatParameter, DefaultIntParameter
from pipelines.models import Pipeline, PluginPiping
class ViewTests(TestCase):
def setUp(self):
# avoid cluttered console output (for instance logging all the http requests)
logging.disable(logging.WARNING)
self.content_type = 'application/vnd.collection+json'
self.plugin_ds_name = "simpledsapp"
self.plugin_ds_parameters = {'dummyInt': {'type': 'integer', 'optional': True,
'default': 111111}}
self.username = 'foo'
self.password = 'foo-pass'
# create plugin
(meta, tf) = PluginMeta.objects.get_or_create(name=self.plugin_ds_name, type='ds')
(plugin_ds, tf) = Plugin.objects.get_or_create(meta=meta)
# add a parameter with a default
(plg_param_ds, tf)= PluginParameter.objects.get_or_create(
plugin=plugin_ds,
name='dummyInt',
type=self.plugin_ds_parameters['dummyInt']['type'],
optional=self.plugin_ds_parameters['dummyInt']['optional']
)
default = self.plugin_ds_parameters['dummyInt']['default']
DefaultIntParameter.objects.get_or_create(plugin_param=plg_param_ds,
value=default) # set plugin parameter default
# create user
user = User.objects.create_user(username=self.username, password=self.password)
# create a pipeline
self.pipeline_name = 'Pipeline1'
(pipeline, tf) = Pipeline.objects.get_or_create(name=self.pipeline_name,
owner=user, category='test')
# create two plugin pipings
self.pips = []
(pip, tf) = PluginPiping.objects.get_or_create(plugin=plugin_ds,
pipeline=pipeline)
self.pips.append(pip)
(pip, tf) = PluginPiping.objects.get_or_create(plugin=plugin_ds, previous=pip,
pipeline=pipeline)
self.pips.append(pip)
# create another user
self.other_username = 'boo'
self.other_password = 'far'
User.objects.create_user(username=self.other_username,
password=self.other_password)
def tearDown(self):
# re-enable logging
logging.disable(logging.NOTSET)
class PipelineViewTests(ViewTests):
"""
Generic tests for pipeline views.
"""
def setUp(self):
super(PipelineViewTests, self).setUp()
class PipelineListViewTests(PipelineViewTests):
"""
Test the pipeline-list view.
"""
def setUp(self):
super(PipelineListViewTests, self).setUp()
self.create_read_url = reverse("pipeline-list")
def test_pipeline_create_success(self):
(meta, tf) = PluginMeta.objects.get_or_create(name='mri_convert', type='ds')
(plugin_ds1, tf) = Plugin.objects.get_or_create(meta=meta)
(meta, tf) = PluginMeta.objects.get_or_create(name='mri_analyze', type='ds')
(plugin_ds2, tf) = Plugin.objects.get_or_create(meta=meta)
plugin_tree = '[{"plugin_id": ' + str(plugin_ds1.id) + \
', "previous_index": null}, {"plugin_id": ' + \
str(plugin_ds2.id) + ', "previous_index": 0}]'
post = json.dumps(
{"template": {"data": [{"name": "name", "value": "Pipeline2"},
{"name": "plugin_tree", "value": plugin_tree}]}})
# make API request
self.client.login(username=self.username, password=self.password)
response = self.client.post(self.create_read_url, data=post,
content_type=self.content_type)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_pipeline_list_success(self):
self.client.login(username=self.username, password=self.password)
response = self.client.get(self.create_read_url)
self.assertContains(response, "Pipeline1")
class PipelineListQuerySearchViewTests(PipelineViewTests):
"""
Test the pipeline-list-query-search view.
"""
def setUp(self):
super(PipelineListQuerySearchViewTests, self).setUp()
self.list_url = reverse("pipeline-list-query-search") + '?name=Pipeline1'
def test_pipeline_list_query_search_success(self):
owner = User.objects.get(username=self.username)
Pipeline.objects.get_or_create(name='Pipeline2', owner=owner,
category='test')
self.client.login(username=self.username, password=self.password)
response = self.client.get(self.list_url)
self.assertContains(response, "Pipeline1")
self.assertNotContains(response, "Pipeline2")
list_url = reverse("pipeline-list-query-search") + '?category=test'
response = self.client.get(list_url)
self.assertContains(response, "Pipeline1")
self.assertContains(response, "Pipeline2")
class PipelineDetailViewTests(PipelineViewTests):
"""
Test the pipeline-detail view.
"""
def setUp(self):
super(PipelineDetailViewTests, self).setUp()
pipeline = Pipeline.objects.get(name="Pipeline1")
self.read_update_delete_url = reverse("pipeline-detail", kwargs={"pk": pipeline.id})
self.put = json.dumps(
{"template": {"data": [{"name": "name", "value": "Pipeline2"}]}})
def test_pipeline_detail_success(self):
self.client.login(username=self.username, password=self.password)
response = self.client.get(self.read_update_delete_url)
self.assertContains(response, "Pipeline1")
def test_pipeline_detail_failure_unauthenticated(self):
response = self.client.get(self.read_update_delete_url)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_pipeline_update_success(self):
self.client.login(username=self.username, password=self.password)
response = self.client.put(self.read_update_delete_url, data=self.put,
content_type=self.content_type)
self.assertContains(response, "Pipeline2")
def test_pipeline_update_failure_unauthenticated(self):
response = self.client.put(self.read_update_delete_url, data=self.put,
content_type=self.content_type)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_pipeline_update_failure_access_denied(self):
self.client.login(username=self.other_username, password=self.other_password)
response = self.client.put(self.read_update_delete_url, data=self.put,
content_type=self.content_type)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_pipeline_delete_success(self):
self.client.login(username=self.username, password=self.password)
response = self.client.delete(self.read_update_delete_url)
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
self.assertEqual(Pipeline.objects.count(), 0)
def test_pipeline_delete_failure_unauthenticated(self):
response = self.client.delete(self.read_update_delete_url)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_pipeline_delete_failure_access_denied(self):
self.client.login(username=self.other_username, password=self.other_password)
response = self.client.delete(self.read_update_delete_url)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
class PipelinePluginListViewTests(PipelineViewTests):
"""
Test the pipeline-plugin-list view.
"""
def setUp(self):
super(PipelinePluginListViewTests, self).setUp()
self.pipeline = Pipeline.objects.get(name="Pipeline1")
self.list_url = reverse("pipeline-plugin-list", kwargs={"pk": self.pipeline.id})
def test_pipeline_plugin_list_success(self):
# create plugins
(meta, tf) = PluginMeta.objects.get_or_create(name='mri_convert', type='ds')
(plugin_ds1, tf) = Plugin.objects.get_or_create(meta=meta)
(meta, tf) = PluginMeta.objects.get_or_create(name='mri_analyze', type='ds')
Plugin.objects.get_or_create(meta=meta)
# pipe one plugin into pipeline
PluginPiping.objects.get_or_create(pipeline=self.pipeline, plugin=plugin_ds1,
previous=None)
self.client.login(username=self.username, password=self.password)
response = self.client.get(self.list_url)
self.assertContains(response, "mri_convert")
self.assertNotContains(response, "mri_analyze") # plugin list is pipe-specific
def test_pipeline_plugin_list_failure_unauthenticated(self):
response = self.client.get(self.list_url)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
class PipelinePluginPipingListViewTests(PipelineViewTests):
"""
Test the pipeline-pluginpiping-list view.
"""
def setUp(self):
super(PipelinePluginPipingListViewTests, self).setUp()
self.pipeline = Pipeline.objects.get(name="Pipeline1")
self.list_url = reverse("pipeline-pluginpiping-list",
kwargs={"pk": self.pipeline.id})
def test_pipeline_plugin_piping_list_success(self):
# create plugins
(meta, tf) = PluginMeta.objects.get_or_create(name='mri_convert', type='ds')
(plugin_ds, tf) = Plugin.objects.get_or_create(meta=meta)
# pipe one plugin into pipeline
PluginPiping.objects.get_or_create(pipeline=self.pipeline, plugin=plugin_ds,
previous=None)
self.client.login(username=self.username, password=self.password)
response = self.client.get(self.list_url)
self.assertContains(response, "plugin_id")
def test_pipeline_plugin_piping_list_failure_unauthenticated(self):
response = self.client.get(self.list_url)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
class PipelineDefaultParameterListViewTests(PipelineViewTests):
"""
Test the pipeline-defaultparameter-list view.
"""
def setUp(self):
super(PipelineDefaultParameterListViewTests, self).setUp()
self.pipeline = Pipeline.objects.get(name="Pipeline1")
self.list_url = reverse("pipeline-defaultparameter-list",
kwargs={"pk": self.pipeline.id})
def test_pipeline_default_parameter_list_success(self):
plugin_ds = Plugin.objects.get(meta__name=self.plugin_ds_name)
param = plugin_ds.parameters.get(name='dummyInt')
self.client.login(username=self.username, password=self.password)
response = self.client.get(self.list_url)
self.assertContains(response, self.pips[0].id)
self.assertContains(response, self.pips[1].id)
self.assertContains(response, param.name)
self.assertContains(response, plugin_ds.meta.name)
self.assertContains(response, 111111)
def test_pipeline_default_parameter_list_failure_unauthenticated(self):
response = self.client.get(self.list_url)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
class PluginPipingDetailViewTests(PipelineViewTests):
"""
Test the pluginpiping-detail view.
"""
def setUp(self):
super(PluginPipingDetailViewTests, self).setUp()
self.read_url = reverse("pluginpiping-detail", kwargs={"pk": self.pips[0].id})
def test_plugin_piping_detail_success(self):
self.client.login(username=self.username, password=self.password)
response = self.client.get(self.read_url)
self.assertContains(response, "plugin_id")
self.assertContains(response, "pipeline_id")
def test_plugin_piping_detail_failure_unauthenticated(self):
response = self.client.get(self.read_url)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
class DefaultPipingStrParameterDetailViewTests(ViewTests):
"""
Test the defaultpipingstrparameter-detail view.
"""
def setUp(self):
super(DefaultPipingStrParameterDetailViewTests, self).setUp()
plugin_ds = Plugin.objects.get(meta__name=self.plugin_ds_name)
# add a parameter with a default
(plg_param_ds, tf)= PluginParameter.objects.get_or_create(
plugin=plugin_ds,
name='dummyStr',
type='string',
optional=True
)
DefaultStrParameter.objects.get_or_create(plugin_param=plg_param_ds,
value='test') # set plugin parameter default
pipeline = Pipeline.objects.get(name="Pipeline1")
(pip, tf) = PluginPiping.objects.get_or_create(plugin=plugin_ds,
pipeline=pipeline, previous=self.pips[1])
default_param = pip.string_param.get(plugin_piping=pip)
self.read_update_url = reverse("defaultpipingstrparameter-detail", kwargs={"pk": default_param.id})
self.put = json.dumps({
"template": {"data": [{"name": "value", "value": "updated"}]}})
def test_default_piping_str_parameter_detail_success_owner(self):
self.client.login(username=self.username, password=self.password)
response = self.client.get(self.read_update_url)
self.assertContains(response, "test")
#self.assertTrue(response.data["feed"].endswith(self.corresponding_feed_url))
def test_default_piping_str_parameter_detail_failure_access_denied_pipeline_locked(self):
self.client.login(username=self.other_username, password=self.other_password)
response = self.client.get(self.read_update_url)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_default_piping_str_parameter_detail_success_pipeline_unlocked(self):
pipeline = Pipeline.objects.get(name="Pipeline1")
pipeline.locked = False
pipeline.save()
self.client.login(username=self.other_username, password=self.other_password)
response = self.client.get(self.read_update_url)
self.assertContains(response, "test")
def test_default_piping_str_parameter_detail_failure_unauthenticated(self):
response = self.client.get(self.read_update_url)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_default_piping_str_parameter_update_success(self):
self.client.login(username=self.username, password=self.password)
response = self.client.put(self.read_update_url, data=self.put,
content_type=self.content_type)
self.assertContains(response, "updated")
def test_default_piping_str_parameter_update_failure_unauthenticated(self):
response = self.client.put(self.read_update_url, data=self.put,
content_type=self.content_type)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_default_piping_str_parameter_update_failure_access_denied(self):
self.client.login(username=self.other_username, password=self.other_password)
response = self.client.put(self.read_update_url, data=self.put,
content_type=self.content_type)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
class DefaultPipingIntParameterDetailViewTests(ViewTests):
"""
Test the defaultpipingintparameter-detail view.
"""
def setUp(self):
super(DefaultPipingIntParameterDetailViewTests, self).setUp()
plugin_ds = Plugin.objects.get(meta__name=self.plugin_ds_name)
pipeline = Pipeline.objects.get(name="Pipeline1")
(pip, tf) = PluginPiping.objects.get_or_create(plugin=plugin_ds,
pipeline=pipeline, previous=self.pips[1])
default_param = pip.integer_param.get(plugin_piping=pip)
self.read_update_url = reverse("defaultpipingintparameter-detail", kwargs={"pk": default_param.id})
self.put = json.dumps({
"template": {"data": [{"name": "value", "value": 222222}]}})
def test_default_piping_int_parameter_detail_success_owner(self):
self.client.login(username=self.username, password=self.password)
response = self.client.get(self.read_update_url)
self.assertContains(response, 111111)
def test_default_piping_int_parameter_detail_failure_access_denied_pipeline_locked(self):
self.client.login(username=self.other_username, password=self.other_password)
response = self.client.get(self.read_update_url)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_default_piping_int_parameter_detail_success_pipeline_unlocked(self):
pipeline = Pipeline.objects.get(name="Pipeline1")
pipeline.locked = False
pipeline.save()
self.client.login(username=self.other_username, password=self.other_password)
response = self.client.get(self.read_update_url)
self.assertContains(response, 111111)
def test_default_piping_int_parameter_detail_failure_unauthenticated(self):
response = self.client.get(self.read_update_url)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_default_piping_int_parameter_update_success(self):
self.client.login(username=self.username, password=self.password)
response = self.client.put(self.read_update_url, data=self.put,
content_type=self.content_type)
self.assertContains(response, 222222)
def test_default_piping_int_parameter_update_failure_unauthenticated(self):
response = self.client.put(self.read_update_url, data=self.put,
content_type=self.content_type)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_default_piping_int_parameter_update_failure_access_denied(self):
self.client.login(username=self.other_username, password=self.other_password)
response = self.client.put(self.read_update_url, data=self.put,
content_type=self.content_type)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
class DefaultPipingFloatParameterDetailViewTests(ViewTests):
"""
Test the defaultpipingfloatparameter-detail view.
"""
def setUp(self):
super(DefaultPipingFloatParameterDetailViewTests, self).setUp()
plugin_ds = Plugin.objects.get(meta__name=self.plugin_ds_name)
# add a parameter with a default
(plg_param_ds, tf)= PluginParameter.objects.get_or_create(
plugin=plugin_ds,
name='dummyFloat',
type='float',
optional=True
)
DefaultFloatParameter.objects.get_or_create(plugin_param=plg_param_ds,
value=1.11111) # set plugin parameter default
pipeline = Pipeline.objects.get(name="Pipeline1")
(pip, tf) = PluginPiping.objects.get_or_create(plugin=plugin_ds,
pipeline=pipeline, previous=self.pips[1])
default_param = pip.float_param.get(plugin_piping=pip)
self.read_update_url = reverse("defaultpipingfloatparameter-detail", kwargs={"pk": default_param.id})
self.put = json.dumps({
"template": {"data": [{"name": "value", "value": 1.22222}]}})
def test_default_piping_float_parameter_detail_success_owner(self):
self.client.login(username=self.username, password=self.password)
response = self.client.get(self.read_update_url)
self.assertContains(response, 1.11111)
def test_default_piping_float_parameter_detail_failure_access_denied_pipeline_locked(self):
self.client.login(username=self.other_username, password=self.other_password)
response = self.client.get(self.read_update_url)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_default_piping_float_parameter_detail_success_pipeline_unlocked(self):
pipeline = Pipeline.objects.get(name="Pipeline1")
pipeline.locked = False
pipeline.save()
self.client.login(username=self.other_username, password=self.other_password)
response = self.client.get(self.read_update_url)
self.assertContains(response, 1.11111)
def test_default_piping_float_parameter_detail_failure_unauthenticated(self):
response = self.client.get(self.read_update_url)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_default_piping_float_parameter_update_success(self):
self.client.login(username=self.username, password=self.password)
response = self.client.put(self.read_update_url, data=self.put,
content_type=self.content_type)
self.assertContains(response, 1.22222)
def test_default_piping_float_parameter_update_failure_unauthenticated(self):
response = self.client.put(self.read_update_url, data=self.put,
content_type=self.content_type)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_default_piping_float_parameter_update_failure_access_denied(self):
self.client.login(username=self.other_username, password=self.other_password)
response = self.client.put(self.read_update_url, data=self.put,
content_type=self.content_type)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
class DefaultPipingBoolParameterDetailViewTests(ViewTests):
"""
Test the defaultpipingboolparameter-detail view.
"""
def setUp(self):
super(DefaultPipingBoolParameterDetailViewTests, self).setUp()
plugin_ds = Plugin.objects.get(meta__name=self.plugin_ds_name)
# add a parameter with a default
(plg_param_ds, tf)= PluginParameter.objects.get_or_create(
plugin=plugin_ds,
name='dummyBool',
type='boolean',
optional=True
)
DefaultBoolParameter.objects.get_or_create(plugin_param=plg_param_ds,
value=False) # set plugin parameter default
pipeline = Pipeline.objects.get(name="Pipeline1")
(pip, tf) = PluginPiping.objects.get_or_create(plugin=plugin_ds,
pipeline=pipeline, previous=self.pips[1])
default_param = pip.boolean_param.get(plugin_piping=pip)
self.read_update_url = reverse("defaultpipingboolparameter-detail", kwargs={"pk": default_param.id})
self.put = json.dumps({
"template": {"data": [{"name": "value", "value": "true"}]}})
def test_default_piping_bool_parameter_detail_success_owner(self):
self.client.login(username=self.username, password=self.password)
response = self.client.get(self.read_update_url)
self.assertContains(response, "false")
def test_default_piping_bool_parameter_detail_failure_access_denied_pipeline_locked(self):
self.client.login(username=self.other_username, password=self.other_password)
response = self.client.get(self.read_update_url)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_default_piping_bool_parameter_detail_success_pipeline_unlocked(self):
pipeline = Pipeline.objects.get(name="Pipeline1")
pipeline.locked = False
pipeline.save()
self.client.login(username=self.other_username, password=self.other_password)
response = self.client.get(self.read_update_url)
self.assertContains(response, "false")
def test_default_piping_bool_parameter_detail_failure_unauthenticated(self):
response = self.client.get(self.read_update_url)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_default_piping_bool_parameter_update_success(self):
self.client.login(username=self.username, password=self.password)
response = self.client.put(self.read_update_url, data=self.put,
content_type=self.content_type)
self.assertContains(response, "true")
def test_default_piping_bool_parameter_update_failure_unauthenticated(self):
response = self.client.put(self.read_update_url, data=self.put,
content_type=self.content_type)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_default_piping_bool_parameter_update_failure_access_denied(self):
self.client.login(username=self.other_username, password=self.other_password)
response = self.client.put(self.read_update_url, data=self.put,
content_type=self.content_type)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
| 46.576854
| 109
| 0.685018
| 2,906
| 25,757
| 5.819683
| 0.067103
| 0.047304
| 0.051088
| 0.043519
| 0.788316
| 0.766911
| 0.724515
| 0.710028
| 0.681232
| 0.678808
| 0
| 0.009209
| 0.215864
| 25,757
| 552
| 110
| 46.661232
| 0.828143
| 0.043716
| 0
| 0.544529
| 0
| 0
| 0.048305
| 0.011053
| 0
| 0
| 0
| 0
| 0.145038
| 1
| 0.155216
| false
| 0.091603
| 0.02799
| 0
| 0.216285
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
13364652ee0b561abe5858d0ac8374debbaafa9d
| 113
|
py
|
Python
|
blocksmith/__init__.py
|
plattthompson/generate-btc-wallet
|
8571447871edb691fcbac294bdbd93d23cec8fbc
|
[
"Apache-2.0"
] | 1
|
2022-01-14T14:31:34.000Z
|
2022-01-14T14:31:34.000Z
|
blocksmith/__init__.py
|
plattthompson/generate-btc-wallet
|
8571447871edb691fcbac294bdbd93d23cec8fbc
|
[
"Apache-2.0"
] | null | null | null |
blocksmith/__init__.py
|
plattthompson/generate-btc-wallet
|
8571447871edb691fcbac294bdbd93d23cec8fbc
|
[
"Apache-2.0"
] | null | null | null |
from .generator import KeyGenerator
from .bitcoin import BitcoinWallet
# from .ethereum import EthereumWallet
| 28.25
| 39
| 0.823009
| 12
| 113
| 7.75
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.141593
| 113
| 3
| 40
| 37.666667
| 0.958763
| 0.318584
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
1384ccc27707cc2808b118477a9843163bdcdd0b
| 83
|
py
|
Python
|
call-workflow.py
|
humberry/Workflow
|
6c1480c7d7a0949ae090d378ec5518ac13450a4a
|
[
"MIT"
] | 6
|
2015-02-28T21:35:01.000Z
|
2019-04-02T04:48:46.000Z
|
call-workflow.py
|
humberry/Workflow
|
6c1480c7d7a0949ae090d378ec5518ac13450a4a
|
[
"MIT"
] | null | null | null |
call-workflow.py
|
humberry/Workflow
|
6c1480c7d7a0949ae090d378ec5518ac13450a4a
|
[
"MIT"
] | 6
|
2015-05-11T02:13:53.000Z
|
2020-10-03T23:11:11.000Z
|
import webbrowser
webbrowser.open('workflow://run-workflow?name=workflow-script')
| 20.75
| 63
| 0.807229
| 10
| 83
| 6.7
| 0.7
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.048193
| 83
| 3
| 64
| 27.666667
| 0.848101
| 0
| 0
| 0
| 0
| 0
| 0.53012
| 0.53012
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
139941f7b01a14259d2d33764e3d23c02a49c1d6
| 909
|
py
|
Python
|
metric/db/errors.py
|
kzulfazriawan/metric
|
a9a9ddc284a12618a93febe238f12a71f95dd9f1
|
[
"MIT"
] | 1
|
2020-12-21T06:52:29.000Z
|
2020-12-21T06:52:29.000Z
|
metric/db/errors.py
|
kzulfazriawan/metric
|
a9a9ddc284a12618a93febe238f12a71f95dd9f1
|
[
"MIT"
] | null | null | null |
metric/db/errors.py
|
kzulfazriawan/metric
|
a9a9ddc284a12618a93febe238f12a71f95dd9f1
|
[
"MIT"
] | null | null | null |
class BaseError(Exception):
pass
class AddQueryInvalid(Exception):
def __init__(self, exception, message="Parameter Add condition is invalid"):
self.message = message
self.exception = exception
super().__init__(self.message)
def __str__(self):
return f'{self.message}: {self.exception}'
class DataValueInvalid(BaseError):
def __init__(self, exception, message="Invalid value data given"):
self.message = message
self.exception = exception
super().__init__(self.message)
def __str__(self):
return f'{self.message}: {self.exception}'
class NoneTypeValue(BaseError):
def __init__(self, exception, message="The value is empty or none"):
self.message = message
self.exception = exception
super().__init__(self.message)
def __str__(self):
return f'{self.message}: {self.exception}'
| 27.545455
| 80
| 0.665567
| 100
| 909
| 5.69
| 0.27
| 0.205624
| 0.210896
| 0.105448
| 0.724077
| 0.676626
| 0.550088
| 0.550088
| 0.550088
| 0.550088
| 0
| 0
| 0.222222
| 909
| 32
| 81
| 28.40625
| 0.804809
| 0
| 0
| 0.652174
| 0
| 0
| 0.19802
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.26087
| false
| 0.043478
| 0
| 0.130435
| 0.565217
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
139f6488af2ad0d39171a352763e4a963d886161
| 79
|
py
|
Python
|
app/widget/__init__.py
|
HansBug/pyqt5-demo
|
df10ba54209bcf993d5dab8d969ab63d7a0acc90
|
[
"Apache-2.0"
] | null | null | null |
app/widget/__init__.py
|
HansBug/pyqt5-demo
|
df10ba54209bcf993d5dab8d969ab63d7a0acc90
|
[
"Apache-2.0"
] | null | null | null |
app/widget/__init__.py
|
HansBug/pyqt5-demo
|
df10ba54209bcf993d5dab8d969ab63d7a0acc90
|
[
"Apache-2.0"
] | null | null | null |
from .dialog_config import DialogConfig
from .main_window import AppMainWindow
| 26.333333
| 39
| 0.873418
| 10
| 79
| 6.7
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.101266
| 79
| 2
| 40
| 39.5
| 0.943662
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
13a4b8a7a15255c7da7e019923950de8a4e1ce66
| 101
|
py
|
Python
|
openpype/hosts/blender/blender_addon/startup/init.py
|
Tilix4/OpenPype
|
8909bd890170880aa7ec8b673abaa25a9bdf40f2
|
[
"MIT"
] | 1
|
2022-02-08T15:40:41.000Z
|
2022-02-08T15:40:41.000Z
|
openpype/hosts/blender/blender_addon/startup/init.py
|
zafrs/OpenPype
|
4b8e7e1ed002fc55b31307efdea70b0feaed474f
|
[
"MIT"
] | 2
|
2022-03-18T01:46:03.000Z
|
2022-03-18T01:46:16.000Z
|
openpype/hosts/blender/blender_addon/startup/init.py
|
zafrs/OpenPype
|
4b8e7e1ed002fc55b31307efdea70b0feaed474f
|
[
"MIT"
] | null | null | null |
from openpype.pipeline import install_host
from openpype.hosts.blender import api
install_host(api)
| 20.2
| 42
| 0.851485
| 15
| 101
| 5.6
| 0.6
| 0.285714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.09901
| 101
| 4
| 43
| 25.25
| 0.923077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
13a6760973cd06a6796bfac7d38c820f092e16b5
| 11,029
|
py
|
Python
|
template/<project_name>/tests/unit/api/middleware/test_faults.py
|
hahaps/openstack-project-generator
|
646b7b9372082b409e39863bb6604656ade4f44d
|
[
"Apache-2.0"
] | 3
|
2016-08-29T10:25:07.000Z
|
2020-06-03T12:10:32.000Z
|
template/<project_name>/tests/unit/api/middleware/test_faults.py
|
hahaps/openstack-project-generator
|
646b7b9372082b409e39863bb6604656ade4f44d
|
[
"Apache-2.0"
] | null | null | null |
template/<project_name>/tests/unit/api/middleware/test_faults.py
|
hahaps/openstack-project-generator
|
646b7b9372082b409e39863bb6604656ade4f44d
|
[
"Apache-2.0"
] | 3
|
2016-08-29T02:45:40.000Z
|
2019-07-01T01:45:33.000Z
|
# Copyright 2010 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from xml.dom import minidom
import mock
from oslo_i18n import fixture as i18n_fixture
from oslo_serialization import jsonutils
import webob.dec
from <project_name>.api import common
from <project_name>.api.openstack import wsgi
from <project_name>.i18n import _
from <project_name> import test
class TestFaults(test.TestCase):
"""Tests covering `<project_name>.api.openstack.faults:Fault` class."""
def setUp(self):
super(TestFaults, self).setUp()
self.useFixture(i18n_fixture.ToggleLazy(True))
def _prepare_xml(self, xml_string):
"""Remove characters from string which hinder XML equality testing."""
xml_string = xml_string.replace(" ", "")
xml_string = xml_string.replace("\n", "")
xml_string = xml_string.replace("\t", "")
return xml_string
def test_400_fault_json(self):
"""Test fault serialized to JSON via file-extension and/or header."""
requests = [
webob.Request.blank('/.json'),
webob.Request.blank('/', headers={"Accept": "application/json"}),
]
for request in requests:
fault = wsgi.Fault(webob.exc.HTTPBadRequest(explanation='scram'))
response = request.get_response(fault)
expected = {
"badRequest": {
"message": "scram",
"code": 400,
},
}
actual = jsonutils.loads(response.body)
self.assertEqual("application/json", response.content_type)
self.assertEqual(expected, actual)
def test_413_fault_json(self):
"""Test fault serialized to JSON via file-extension and/or header."""
requests = [
webob.Request.blank('/.json'),
webob.Request.blank('/', headers={"Accept": "application/json"}),
]
for request in requests:
exc = webob.exc.HTTPRequestEntityTooLarge
fault = wsgi.Fault(exc(explanation='sorry',
headers={'Retry-After': 4}))
response = request.get_response(fault)
expected = {
"overLimit": {
"message": "sorry",
"code": 413,
"retryAfter": 4,
},
}
actual = jsonutils.loads(response.body)
self.assertEqual("application/json", response.content_type)
self.assertEqual(expected, actual)
def test_raise(self):
"""Ensure the ability to raise :class:`Fault` in WSGI-ified methods."""
@webob.dec.wsgify
def raiser(req):
raise wsgi.Fault(webob.exc.HTTPNotFound(explanation='whut?'))
req = webob.Request.blank('/.xml')
resp = req.get_response(raiser)
self.assertEqual("application/xml", resp.content_type)
self.assertEqual(404, resp.status_int)
self.assertIn('whut?', resp.body)
def test_raise_403(self):
"""Ensure the ability to raise :class:`Fault` in WSGI-ified methods."""
@webob.dec.wsgify
def raiser(req):
raise wsgi.Fault(webob.exc.HTTPForbidden(explanation='whut?'))
req = webob.Request.blank('/.xml')
resp = req.get_response(raiser)
self.assertEqual("application/xml", resp.content_type)
self.assertEqual(403, resp.status_int)
self.assertNotIn('resizeNotAllowed', resp.body)
self.assertIn('forbidden', resp.body)
@mock.patch('<project_name>.api.openstack.wsgi.i18n.translate')
def test_raise_http_with_localized_explanation(self, mock_translate):
params = ('blah', )
expl = _("String with params: %s") % params
def _mock_translation(msg, locale):
return "Mensaje traducido"
mock_translate.side_effect = _mock_translation
@webob.dec.wsgify
def raiser(req):
raise wsgi.Fault(webob.exc.HTTPNotFound(explanation=expl))
req = webob.Request.blank('/.xml')
resp = req.get_response(raiser)
self.assertEqual("application/xml", resp.content_type)
self.assertEqual(404, resp.status_int)
self.assertIn(("Mensaje traducido"), resp.body)
self.stubs.UnsetAll()
def test_fault_has_status_int(self):
"""Ensure the status_int is set correctly on faults."""
fault = wsgi.Fault(webob.exc.HTTPBadRequest(explanation='what?'))
self.assertEqual(400, fault.status_int)
def test_xml_serializer(self):
"""Ensure that a v1 request responds with a v1 xmlns."""
request = webob.Request.blank('/v1',
headers={"Accept": "application/xml"})
fault = wsgi.Fault(webob.exc.HTTPBadRequest(explanation='scram'))
response = request.get_response(fault)
self.assertIn(common.XML_NS_V1, response.body)
self.assertEqual("application/xml", response.content_type)
self.assertEqual(400, response.status_int)
class FaultsXMLSerializationTestV11(test.TestCase):
"""Tests covering `<project_name>.api.openstack.faults:Fault` class."""
def _prepare_xml(self, xml_string):
xml_string = xml_string.replace(" ", "")
xml_string = xml_string.replace("\n", "")
xml_string = xml_string.replace("\t", "")
return xml_string
def test_400_fault(self):
metadata = {'attributes': {"badRequest": 'code'}}
serializer = wsgi.XMLDictSerializer(metadata=metadata,
xmlns=common.XML_NS_V1)
fixture = {
"badRequest": {
"message": "scram",
"code": 400,
},
}
output = serializer.serialize(fixture)
actual = minidom.parseString(self._prepare_xml(output))
expected = minidom.parseString(self._prepare_xml("""
<badRequest code="400" xmlns="%s">
<message>scram</message>
</badRequest>
""") % common.XML_NS_V1)
self.assertEqual(expected.toxml(), actual.toxml())
def test_413_fault(self):
metadata = {'attributes': {"overLimit": 'code'}}
serializer = wsgi.XMLDictSerializer(metadata=metadata,
xmlns=common.XML_NS_V1)
fixture = {
"overLimit": {
"message": "sorry",
"code": 413,
"retryAfter": 4,
},
}
output = serializer.serialize(fixture)
actual = minidom.parseString(self._prepare_xml(output))
expected = minidom.parseString(self._prepare_xml("""
<overLimit code="413" xmlns="%s">
<message>sorry</message>
<retryAfter>4</retryAfter>
</overLimit>
""") % common.XML_NS_V1)
self.assertEqual(expected.toxml(), actual.toxml())
def test_404_fault(self):
metadata = {'attributes': {"itemNotFound": 'code'}}
serializer = wsgi.XMLDictSerializer(metadata=metadata,
xmlns=common.XML_NS_V1)
fixture = {
"itemNotFound": {
"message": "sorry",
"code": 404,
},
}
output = serializer.serialize(fixture)
actual = minidom.parseString(self._prepare_xml(output))
expected = minidom.parseString(self._prepare_xml("""
<itemNotFound code="404" xmlns="%s">
<message>sorry</message>
</itemNotFound>
""") % common.XML_NS_V1)
self.assertEqual(expected.toxml(), actual.toxml())
class FaultsXMLSerializationTestV2(test.TestCase):
"""Tests covering `<project_name>.api.openstack.faults:Fault` class."""
def _prepare_xml(self, xml_string):
xml_string = xml_string.replace(" ", "")
xml_string = xml_string.replace("\n", "")
xml_string = xml_string.replace("\t", "")
return xml_string
def test_400_fault(self):
metadata = {'attributes': {"badRequest": 'code'}}
serializer = wsgi.XMLDictSerializer(metadata=metadata,
xmlns=common.XML_NS_V1)
fixture = {
"badRequest": {
"message": "scram",
"code": 400,
},
}
output = serializer.serialize(fixture)
actual = minidom.parseString(self._prepare_xml(output))
expected = minidom.parseString(self._prepare_xml("""
<badRequest code="400" xmlns="%s">
<message>scram</message>
</badRequest>
""") % common.XML_NS_V1)
self.assertEqual(expected.toxml(), actual.toxml())
def test_413_fault(self):
metadata = {'attributes': {"overLimit": 'code'}}
serializer = wsgi.XMLDictSerializer(metadata=metadata,
xmlns=common.XML_NS_V1)
fixture = {
"overLimit": {
"message": "sorry",
"code": 413,
"retryAfter": 4,
},
}
output = serializer.serialize(fixture)
actual = minidom.parseString(self._prepare_xml(output))
expected = minidom.parseString(self._prepare_xml("""
<overLimit code="413" xmlns="%s">
<message>sorry</message>
<retryAfter>4</retryAfter>
</overLimit>
""") % common.XML_NS_V1)
self.assertEqual(expected.toxml(), actual.toxml())
def test_404_fault(self):
metadata = {'attributes': {"itemNotFound": 'code'}}
serializer = wsgi.XMLDictSerializer(metadata=metadata,
xmlns=common.XML_NS_V1)
fixture = {
"itemNotFound": {
"message": "sorry",
"code": 404,
},
}
output = serializer.serialize(fixture)
actual = minidom.parseString(self._prepare_xml(output))
expected = minidom.parseString(self._prepare_xml("""
<itemNotFound code="404" xmlns="%s">
<message>sorry</message>
</itemNotFound>
""") % common.XML_NS_V1)
self.assertEqual(expected.toxml(), actual.toxml())
| 35.124204
| 79
| 0.574304
| 1,093
| 11,029
| 5.656908
| 0.185727
| 0.034935
| 0.023128
| 0.027333
| 0.729096
| 0.712761
| 0.700954
| 0.687045
| 0.687045
| 0.687045
| 0
| 0.016539
| 0.303745
| 11,029
| 313
| 80
| 35.236422
| 0.788644
| 0.054946
| 0
| 0.711111
| 0
| 0
| 0.174954
| 0.025067
| 0
| 0
| 0
| 0
| 0.106667
| 0
| null | null | 0
| 0.04
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
13a79bb72ff655f488df528ec6c5fc8f99e38727
| 22
|
py
|
Python
|
Dmitry_Shevelev/task1.py
|
Perekalskiyigor/Sirius
|
2dcf792b072fa2f3fe4c2e900a9d4b6d0c2bd9b8
|
[
"MIT"
] | null | null | null |
Dmitry_Shevelev/task1.py
|
Perekalskiyigor/Sirius
|
2dcf792b072fa2f3fe4c2e900a9d4b6d0c2bd9b8
|
[
"MIT"
] | null | null | null |
Dmitry_Shevelev/task1.py
|
Perekalskiyigor/Sirius
|
2dcf792b072fa2f3fe4c2e900a9d4b6d0c2bd9b8
|
[
"MIT"
] | null | null | null |
print("Python work!")
| 11
| 21
| 0.681818
| 3
| 22
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 22
| 1
| 22
| 22
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0.545455
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
13aa30235eea9a030a84fe7d6fa3e9783bd72093
| 58
|
py
|
Python
|
1.0.py
|
Pontianak/Python-For-Informatics
|
0645c5e8114e9b91959a47380dcd7da620eb552e
|
[
"MIT"
] | 1
|
2016-05-16T14:41:07.000Z
|
2016-05-16T14:41:07.000Z
|
1.0.py
|
Pontianak/Python-For-Informatics-Assignments
|
0645c5e8114e9b91959a47380dcd7da620eb552e
|
[
"MIT"
] | null | null | null |
1.0.py
|
Pontianak/Python-For-Informatics-Assignments
|
0645c5e8114e9b91959a47380dcd7da620eb552e
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2015 Pontianak
print "First Assignment!"
| 14.5
| 30
| 0.741379
| 7
| 58
| 6.142857
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.081633
| 0.155172
| 58
| 3
| 31
| 19.333333
| 0.795918
| 0.482759
| 0
| 0
| 0
| 0
| 0.607143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 1
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
13e88f458e6acb72d34013a73650716f2a93d3d4
| 192
|
py
|
Python
|
authlib/client/oauth_client.py
|
jonathanunderwood/authlib
|
3834a2a80876a87cdaab4240d77185179970c3ab
|
[
"BSD-3-Clause"
] | 1
|
2021-12-09T07:11:05.000Z
|
2021-12-09T07:11:05.000Z
|
authlib/client/oauth_client.py
|
jonathanunderwood/authlib
|
3834a2a80876a87cdaab4240d77185179970c3ab
|
[
"BSD-3-Clause"
] | 4
|
2021-03-19T08:17:59.000Z
|
2021-06-10T19:34:36.000Z
|
authlib/client/oauth_client.py
|
jonathanunderwood/authlib
|
3834a2a80876a87cdaab4240d77185179970c3ab
|
[
"BSD-3-Clause"
] | 2
|
2021-05-24T20:34:12.000Z
|
2022-03-26T07:46:17.000Z
|
from authlib.integrations._client import RemoteApp as OAuthClient
from authlib.integrations._client.oauth_registry import OAUTH_CLIENT_PARAMS
__all__ = ['OAUTH_CLIENT_PARAMS', 'OAuthClient']
| 38.4
| 75
| 0.854167
| 23
| 192
| 6.652174
| 0.521739
| 0.143791
| 0.300654
| 0.379085
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.078125
| 192
| 4
| 76
| 48
| 0.864407
| 0
| 0
| 0
| 0
| 0
| 0.15625
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
b91620286046f7c22f66ebe72afb7271c4914a12
| 152
|
py
|
Python
|
pycompanies/admin.py
|
Devecoop/pyarweb
|
ee42f0aa871569cc30a6a678dcdc43293d38c0bb
|
[
"Apache-2.0"
] | 1
|
2022-01-14T18:38:25.000Z
|
2022-01-14T18:38:25.000Z
|
pycompanies/admin.py
|
Devecoop/pyarweb
|
ee42f0aa871569cc30a6a678dcdc43293d38c0bb
|
[
"Apache-2.0"
] | 37
|
2022-01-17T14:41:51.000Z
|
2022-02-16T13:50:05.000Z
|
pycompanies/admin.py
|
Devecoop/pyarweb
|
ee42f0aa871569cc30a6a678dcdc43293d38c0bb
|
[
"Apache-2.0"
] | null | null | null |
from django.contrib import admin
from .models import Company, UserCompanyProfile
admin.site.register(Company)
admin.site.register(UserCompanyProfile)
| 21.714286
| 47
| 0.842105
| 18
| 152
| 7.111111
| 0.555556
| 0.140625
| 0.265625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.085526
| 152
| 6
| 48
| 25.333333
| 0.920863
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
b9282242070642559128654e40ae3616af00d16d
| 41
|
py
|
Python
|
app/exceptions.py
|
GinnyGaga/20171202flasky
|
298787c1f54b9ece8048fd359d56044716ffa345
|
[
"MIT"
] | null | null | null |
app/exceptions.py
|
GinnyGaga/20171202flasky
|
298787c1f54b9ece8048fd359d56044716ffa345
|
[
"MIT"
] | 5
|
2020-03-24T15:26:17.000Z
|
2021-02-02T21:42:07.000Z
|
app/exceptions.py
|
GinnyGaga/flaskyblog
|
e0e5d8d5bbc38a2237c0a055f1d15f26adb97f7c
|
[
"MIT"
] | null | null | null |
class ValidationError(ValueError):
pass
| 13.666667
| 34
| 0.829268
| 4
| 41
| 8.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.097561
| 41
| 2
| 35
| 20.5
| 0.918919
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
b982814722b898ce9050108e67b03d5a61bc431e
| 111
|
py
|
Python
|
dashboard/covid/admin.py
|
guiyshd/new-dashboard
|
78e43b066f153a902514a97a7e66349d2ffc9f36
|
[
"MIT"
] | null | null | null |
dashboard/covid/admin.py
|
guiyshd/new-dashboard
|
78e43b066f153a902514a97a7e66349d2ffc9f36
|
[
"MIT"
] | null | null | null |
dashboard/covid/admin.py
|
guiyshd/new-dashboard
|
78e43b066f153a902514a97a7e66349d2ffc9f36
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import WcotaBaseNacional
admin.site.register(WcotaBaseNacional)
| 22.2
| 38
| 0.855856
| 13
| 111
| 7.307692
| 0.692308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.09009
| 111
| 5
| 38
| 22.2
| 0.940594
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
b9e47890a1a961b04adf2f16b0fd30ba6c2eb51f
| 75
|
py
|
Python
|
examples/tests/__init__.py
|
wthie/nevow
|
e630de8f640f27df85c38bc37ecdaf4e7b931afc
|
[
"MIT"
] | 49
|
2015-03-18T15:29:16.000Z
|
2021-11-17T12:30:51.000Z
|
examples/tests/__init__.py
|
wthie/nevow
|
e630de8f640f27df85c38bc37ecdaf4e7b931afc
|
[
"MIT"
] | 62
|
2015-01-21T08:48:08.000Z
|
2021-04-02T17:31:29.000Z
|
examples/tests/__init__.py
|
wthie/nevow
|
e630de8f640f27df85c38bc37ecdaf4e7b931afc
|
[
"MIT"
] | 30
|
2015-02-26T09:35:39.000Z
|
2021-07-24T12:45:04.000Z
|
"""Tests for the examples, and some functional tests for nevow itself.
"""
| 37.5
| 71
| 0.733333
| 11
| 75
| 5
| 0.818182
| 0.290909
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.16
| 75
| 2
| 72
| 37.5
| 0.873016
| 0.893333
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
b9e82039f8b3fee50bca83055ac15098d363aa58
| 215
|
py
|
Python
|
asdf/tags/core/tests/setup_package.py
|
eteq/asdf
|
6d9e0e48bbffea166a19b71e29f5f9c211983bfe
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null |
asdf/tags/core/tests/setup_package.py
|
eteq/asdf
|
6d9e0e48bbffea166a19b71e29f5f9c211983bfe
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null |
asdf/tags/core/tests/setup_package.py
|
eteq/asdf
|
6d9e0e48bbffea166a19b71e29f5f9c211983bfe
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
def get_package_data(): # pragma: no cover
return {
str(_PACKAGE_NAME_ + '.tags.core.tests'): ['data/*.yaml']}
| 23.888889
| 66
| 0.627907
| 30
| 215
| 4.333333
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011696
| 0.204651
| 215
| 8
| 67
| 26.875
| 0.748538
| 0.465116
| 0
| 0
| 0
| 0
| 0.243243
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0
| 0.333333
| 0.666667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
b9ff435155249133500a5ee9e465a249d9442b76
| 74
|
py
|
Python
|
ctcomp/_payday.py
|
bubbleboy14/ctcomp
|
52f495a5ada76841539f98f808a1ae6a8577a6d4
|
[
"MIT"
] | null | null | null |
ctcomp/_payday.py
|
bubbleboy14/ctcomp
|
52f495a5ada76841539f98f808a1ae6a8577a6d4
|
[
"MIT"
] | null | null | null |
ctcomp/_payday.py
|
bubbleboy14/ctcomp
|
52f495a5ada76841539f98f808a1ae6a8577a6d4
|
[
"MIT"
] | null | null | null |
from cantools.web import respond
from model import payDay
respond(payDay)
| 18.5
| 32
| 0.837838
| 11
| 74
| 5.636364
| 0.636364
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.121622
| 74
| 4
| 33
| 18.5
| 0.953846
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
6a02cf6969dcd6659ec97109cc4069ee42ca669e
| 102
|
py
|
Python
|
gdax/__init__.py
|
chesswiz16/Trader
|
008308016be6803094b4f5891efa8687af95bacf
|
[
"MIT"
] | 1
|
2018-01-11T14:39:18.000Z
|
2018-01-11T14:39:18.000Z
|
gdax/__init__.py
|
chesswiz16/Trader
|
008308016be6803094b4f5891efa8687af95bacf
|
[
"MIT"
] | null | null | null |
gdax/__init__.py
|
chesswiz16/Trader
|
008308016be6803094b4f5891efa8687af95bacf
|
[
"MIT"
] | null | null | null |
from gdax.authenticated_client import AuthenticatedClient
from gdax.public_client import PublicClient
| 34
| 57
| 0.901961
| 12
| 102
| 7.5
| 0.666667
| 0.177778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.078431
| 102
| 2
| 58
| 51
| 0.957447
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
6a0ad479cb5e02adc948370bbd47aae6a72a57e2
| 63
|
py
|
Python
|
litex/build/quicklogic/__init__.py
|
osterwood/litex
|
db20cb172dc982c5879aa8080ec7aa18de181cc5
|
[
"ADSL"
] | 1,501
|
2016-04-19T18:16:21.000Z
|
2022-03-31T17:46:31.000Z
|
litex/build/quicklogic/__init__.py
|
osterwood/litex
|
db20cb172dc982c5879aa8080ec7aa18de181cc5
|
[
"ADSL"
] | 1,135
|
2016-04-19T05:49:14.000Z
|
2022-03-31T15:21:19.000Z
|
litex/build/quicklogic/__init__.py
|
osterwood/litex
|
db20cb172dc982c5879aa8080ec7aa18de181cc5
|
[
"ADSL"
] | 357
|
2016-04-19T05:00:24.000Z
|
2022-03-31T11:28:32.000Z
|
from litex.build.quicklogic.platform import QuickLogicPlatform
| 31.5
| 62
| 0.888889
| 7
| 63
| 8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.063492
| 63
| 1
| 63
| 63
| 0.949153
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
dbe4fbd0e45bbdd37b29f22817e2fa20e3687e0a
| 278
|
py
|
Python
|
src/config/__init__.py
|
villanuevab/squeezeDet
|
4c78fa350f9c11ab9a5b620e78286846a7ee66de
|
[
"BSD-2-Clause"
] | 812
|
2016-12-11T02:59:15.000Z
|
2022-02-23T14:35:45.000Z
|
src/config/__init__.py
|
Tony-Hou/squeezeDet
|
e7c0860eb1d141729cf02a2ec9cafc0cfb4a21aa
|
[
"BSD-2-Clause"
] | 128
|
2016-12-15T08:18:38.000Z
|
2022-03-11T23:18:57.000Z
|
src/config/__init__.py
|
Tony-Hou/squeezeDet
|
e7c0860eb1d141729cf02a2ec9cafc0cfb4a21aa
|
[
"BSD-2-Clause"
] | 363
|
2016-12-11T04:21:09.000Z
|
2022-03-05T17:26:45.000Z
|
from kitti_model_config import kitti_model_config
from kitti_vgg16_config import kitti_vgg16_config
from kitti_res50_config import kitti_res50_config
from kitti_squeezeDet_config import kitti_squeezeDet_config
from kitti_squeezeDetPlus_config import kitti_squeezeDetPlus_config
| 46.333333
| 67
| 0.928058
| 40
| 278
| 5.95
| 0.225
| 0.189076
| 0.357143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.031008
| 0.071942
| 278
| 5
| 68
| 55.6
| 0.891473
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
dbfe336ea4eb0b5578fd9980acd344064cd766bb
| 62
|
py
|
Python
|
lf3py/aws/firehose/__init__.py
|
rog-works/lf3py
|
e89937f7aa133ed54d85764f06101ab9abf6b960
|
[
"CNRI-Python"
] | null | null | null |
lf3py/aws/firehose/__init__.py
|
rog-works/lf3py
|
e89937f7aa133ed54d85764f06101ab9abf6b960
|
[
"CNRI-Python"
] | 48
|
2020-12-19T13:47:26.000Z
|
2021-01-07T22:27:56.000Z
|
lf3py/aws/firehose/__init__.py
|
rog-works/lf3py
|
e89937f7aa133ed54d85764f06101ab9abf6b960
|
[
"CNRI-Python"
] | null | null | null |
from lf3py.aws.firehose.firehose import FireHose # noqa F401
| 31
| 61
| 0.806452
| 9
| 62
| 5.555556
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.074074
| 0.129032
| 62
| 1
| 62
| 62
| 0.851852
| 0.145161
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
e036e6f2ad79ad20456e86844288febaff304c5a
| 119
|
py
|
Python
|
encrypt.py
|
MaayanLab/scAVI
|
7f3f83657d749520243535581db1080075e48aa5
|
[
"Apache-2.0"
] | 3
|
2020-01-23T08:48:33.000Z
|
2021-07-21T02:42:28.000Z
|
encrypt.py
|
MaayanLab/scAVI
|
7f3f83657d749520243535581db1080075e48aa5
|
[
"Apache-2.0"
] | 21
|
2019-10-25T15:38:37.000Z
|
2022-01-27T16:04:04.000Z
|
encrypt.py
|
MaayanLab/scAVI
|
7f3f83657d749520243535581db1080075e48aa5
|
[
"Apache-2.0"
] | 1
|
2019-10-24T18:15:26.000Z
|
2019-10-24T18:15:26.000Z
|
'''
Encrypt and decrypt a string
'''
def encrypt(s):
return s.encode('hex')
def decrypt(s):
return s.decode('hex')
| 11.9
| 28
| 0.655462
| 19
| 119
| 4.105263
| 0.578947
| 0.179487
| 0.205128
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.159664
| 119
| 9
| 29
| 13.222222
| 0.78
| 0.235294
| 0
| 0
| 0
| 0
| 0.072289
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 5
|
e045c9a3fc2f22dfbd95d24a12c5d8ae09fbc49a
| 884
|
py
|
Python
|
amt_tools/tools/__init__.py
|
cwitkowitz/transcription-models
|
e8697d6969b074926ac55986bc02fa1aad04b471
|
[
"MIT"
] | 4
|
2021-06-15T19:45:26.000Z
|
2022-03-31T20:42:26.000Z
|
amt_tools/tools/__init__.py
|
cwitkowitz/transcription-models
|
e8697d6969b074926ac55986bc02fa1aad04b471
|
[
"MIT"
] | null | null | null |
amt_tools/tools/__init__.py
|
cwitkowitz/transcription-models
|
e8697d6969b074926ac55986bc02fa1aad04b471
|
[
"MIT"
] | 1
|
2021-11-08T02:13:02.000Z
|
2021-11-08T02:13:02.000Z
|
"""
Should be able to use the following import patterns (e.g.):
------------------------------------------------------------
import amt_tools
amt_tools.tools.load_normalize_audio()
------------------------------------------------------------
import amt_tools.tools as tools
tools.load_notes_midi()
------------------------------------------------------------
from amt_tools import tools
tools.array_to_tensor()
------------------------------------------------------------
from amt_tools.tools import *
notes_to_multi_pitch()
------------------------------------------------------------
from amt_tools.tools import GuitarProfile
GuitarProfile()
------------------------------------------------------------
from amt_tools.tools.constants import KEY_AUDIO
KEY_AUDIO
"""
from .constants import *
from .instrument import *
from .io import *
from .utils import *
#from .visualize import *
| 31.571429
| 60
| 0.469457
| 80
| 884
| 4.9625
| 0.3875
| 0.141058
| 0.163728
| 0.128463
| 0.115869
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.075792
| 884
| 27
| 61
| 32.740741
| 0.485924
| 0.885747
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
e04b6f665304a1078990bdbad8608bdc14b331e2
| 1,061
|
py
|
Python
|
cart_venv/Lib/site-packages/tensorflow_estimator/_api/v1/estimator/tpu/__init__.py
|
juice1000/Synchronous-vs-Asynchronous-Learning-Tensorflow-
|
654be60f7986ac9bb7ce1d080ddee377c3389f93
|
[
"MIT"
] | 4
|
2020-09-02T16:13:51.000Z
|
2021-06-05T08:45:59.000Z
|
cart_venv/Lib/site-packages/tensorflow_estimator/_api/v1/estimator/tpu/__init__.py
|
juice1000/Synchronous-vs-Asynchronous-Learning-Tensorflow-
|
654be60f7986ac9bb7ce1d080ddee377c3389f93
|
[
"MIT"
] | null | null | null |
cart_venv/Lib/site-packages/tensorflow_estimator/_api/v1/estimator/tpu/__init__.py
|
juice1000/Synchronous-vs-Asynchronous-Learning-Tensorflow-
|
654be60f7986ac9bb7ce1d080ddee377c3389f93
|
[
"MIT"
] | 1
|
2021-01-28T01:57:41.000Z
|
2021-01-28T01:57:41.000Z
|
# This file is MACHINE GENERATED! Do not edit.
# Generated by: tensorflow/python/tools/api/generator/create_python_api.py script.
"""Public API for tf.estimator.tpu namespace.
"""
from __future__ import print_function as _print_function
import sys as _sys
from tensorflow_estimator._api.v1.estimator.tpu import experimental
from tensorflow_estimator.python.estimator.tpu.tpu_config import InputPipelineConfig
from tensorflow_estimator.python.estimator.tpu.tpu_config import RunConfig
from tensorflow_estimator.python.estimator.tpu.tpu_config import TPUConfig
from tensorflow_estimator.python.estimator.tpu.tpu_estimator import TPUEstimator
from tensorflow_estimator.python.estimator.tpu.tpu_estimator import TPUEstimatorSpec
del _print_function
from tensorflow.python.util import module_wrapper as _module_wrapper
if not isinstance(_sys.modules[__name__], _module_wrapper.TFModuleWrapper):
_sys.modules[__name__] = _module_wrapper.TFModuleWrapper(
_sys.modules[__name__], "estimator.tpu", public_apis=None, deprecation=True,
has_lite=False)
| 42.44
| 84
| 0.840716
| 139
| 1,061
| 6.071942
| 0.388489
| 0.113744
| 0.163507
| 0.171801
| 0.454976
| 0.454976
| 0.454976
| 0.454976
| 0.454976
| 0
| 0
| 0.001034
| 0.088596
| 1,061
| 24
| 85
| 44.208333
| 0.871768
| 0.159284
| 0
| 0
| 1
| 0
| 0.014706
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.642857
| 0
| 0.642857
| 0.142857
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
e07a226a855a2f8af0608b320e0b5d8cc04e3b8f
| 82
|
py
|
Python
|
app/ultraauth_jupyter_config.py
|
chbrandt/jupyterhub-oauth
|
de32594649be043b7e74844f3c94b1d57ff326e5
|
[
"Apache-2.0"
] | null | null | null |
app/ultraauth_jupyter_config.py
|
chbrandt/jupyterhub-oauth
|
de32594649be043b7e74844f3c94b1d57ff326e5
|
[
"Apache-2.0"
] | null | null | null |
app/ultraauth_jupyter_config.py
|
chbrandt/jupyterhub-oauth
|
de32594649be043b7e74844f3c94b1d57ff326e5
|
[
"Apache-2.0"
] | null | null | null |
c.JupyterHub.authenticator_class = 'oauthenticator.generic.GenericOAuthenticator'
| 41
| 81
| 0.878049
| 7
| 82
| 10.142857
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.036585
| 82
| 1
| 82
| 82
| 0.898734
| 0
| 0
| 0
| 0
| 0
| 0.536585
| 0.536585
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
0ec7abbd34adb5e3290452bd1f4fcea9d5ba1554
| 132
|
py
|
Python
|
donfig/__init__.py
|
yufeizhu600/donfig
|
4ad0cc483044369b2ef5608ae24bad5ba50bb7f2
|
[
"MIT",
"BSD-3-Clause"
] | 27
|
2018-11-04T16:50:49.000Z
|
2022-02-05T16:33:58.000Z
|
donfig/__init__.py
|
yufeizhu600/donfig
|
4ad0cc483044369b2ef5608ae24bad5ba50bb7f2
|
[
"MIT",
"BSD-3-Clause"
] | 23
|
2018-12-22T15:36:41.000Z
|
2022-03-14T22:10:15.000Z
|
donfig/__init__.py
|
yufeizhu600/donfig
|
4ad0cc483044369b2ef5608ae24bad5ba50bb7f2
|
[
"MIT",
"BSD-3-Clause"
] | 4
|
2019-04-24T18:02:29.000Z
|
2021-04-09T19:33:23.000Z
|
from .version import get_versions
__version__ = get_versions()['version']
del get_versions
from .config_obj import Config # noqa
| 18.857143
| 39
| 0.787879
| 18
| 132
| 5.333333
| 0.5
| 0.34375
| 0.375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.136364
| 132
| 6
| 40
| 22
| 0.842105
| 0.030303
| 0
| 0
| 0
| 0
| 0.056
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
0efc401295da4a90b5c6925bd689c2c8334c218e
| 34
|
py
|
Python
|
djephys/__init__.py
|
vathes/canonical-ephys
|
34e867c2e117a25424b25e90abf07791285ff010
|
[
"MIT"
] | null | null | null |
djephys/__init__.py
|
vathes/canonical-ephys
|
34e867c2e117a25424b25e90abf07791285ff010
|
[
"MIT"
] | null | null | null |
djephys/__init__.py
|
vathes/canonical-ephys
|
34e867c2e117a25424b25e90abf07791285ff010
|
[
"MIT"
] | 3
|
2020-04-22T14:16:39.000Z
|
2020-05-21T23:05:50.000Z
|
from .ephys import schema as ephys
| 34
| 34
| 0.823529
| 6
| 34
| 4.666667
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.147059
| 34
| 1
| 34
| 34
| 0.965517
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
1602dedcd1ef9dc3450ae8cf1b2b73fe6f3a27fd
| 79
|
py
|
Python
|
scripts/hooks/hook-jsonschema.py
|
thombashi/sandbox
|
525bc258dee5f7654c6a2e0967af99d4127e6db1
|
[
"MIT"
] | 1
|
2016-01-18T22:38:40.000Z
|
2016-01-18T22:38:40.000Z
|
scripts/hooks/hook-jsonschema.py
|
thombashi/sandbox
|
525bc258dee5f7654c6a2e0967af99d4127e6db1
|
[
"MIT"
] | 1
|
2016-02-25T11:13:51.000Z
|
2016-02-25T11:14:43.000Z
|
scripts/hooks/hook-jsonschema.py
|
thombashi/sandbox
|
525bc258dee5f7654c6a2e0967af99d4127e6db1
|
[
"MIT"
] | null | null | null |
from PyInstaller.utils.hooks import copy_metadata
copy_metadata("jsonschema")
| 19.75
| 49
| 0.848101
| 10
| 79
| 6.5
| 0.8
| 0.369231
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.075949
| 79
| 3
| 50
| 26.333333
| 0.890411
| 0
| 0
| 0
| 0
| 0
| 0.126582
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
163ee4b4b619ce6e3342167191813b8223371875
| 89
|
py
|
Python
|
restdbserver/main.py
|
bitwyre/database-server
|
cac4f24ab0c1b34f1aea616f312bdb13a3274e7b
|
[
"BSD-3-Clause"
] | 1
|
2020-04-07T17:00:57.000Z
|
2020-04-07T17:00:57.000Z
|
restdbserver/main.py
|
bitwyre/database-server
|
cac4f24ab0c1b34f1aea616f312bdb13a3274e7b
|
[
"BSD-3-Clause"
] | null | null | null |
restdbserver/main.py
|
bitwyre/database-server
|
cac4f24ab0c1b34f1aea616f312bdb13a3274e7b
|
[
"BSD-3-Clause"
] | 1
|
2020-04-07T17:00:58.000Z
|
2020-04-07T17:00:58.000Z
|
def main():
"""Entry point for the application script"""
print("Server started")
| 22.25
| 48
| 0.651685
| 11
| 89
| 5.272727
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.202247
| 89
| 3
| 49
| 29.666667
| 0.816901
| 0.426966
| 0
| 0
| 0
| 0
| 0.311111
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0
| 0.5
| 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.