hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6299e0392205d9f5a8f373e2ce6dd853b836ebfd
| 199
|
py
|
Python
|
dmstudio/__init__.py
|
yamtannagata/dmstudio
|
dc61cc8ab3aea79eef5b31dda7057e3ea02da1da
|
[
"MIT"
] | 17
|
2018-08-27T22:42:06.000Z
|
2022-01-28T13:16:01.000Z
|
dmstudio/__init__.py
|
yuminti/dmstudio
|
6aca5e5f6161e1b3e5085aea42a71d1fd194002d
|
[
"MIT"
] | null | null | null |
dmstudio/__init__.py
|
yuminti/dmstudio
|
6aca5e5f6161e1b3e5085aea42a71d1fd194002d
|
[
"MIT"
] | 5
|
2018-08-23T14:49:21.000Z
|
2021-12-12T11:00:35.000Z
|
'''
Initialization file to enable importing of dmdir.py
'''
import dmstudio.dmcommands
import dmstudio.dmfiles
import dmstudio.initialize
import dmstudio.special
import dmstudio.superprocess
| 22.111111
| 52
| 0.80402
| 23
| 199
| 6.956522
| 0.652174
| 0.4375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.135678
| 199
| 9
| 53
| 22.111111
| 0.930233
| 0.256281
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
62ba95abcba879345af5a7b443a33b44cdab1602
| 278
|
py
|
Python
|
conan/tools/cmake/__init__.py
|
dvirtz/conan
|
21617e5fec1c0b053e5ccf3749cf641d31c0e3a6
|
[
"MIT"
] | 1
|
2022-01-21T05:31:13.000Z
|
2022-01-21T05:31:13.000Z
|
conan/tools/cmake/__init__.py
|
dvirtz/conan
|
21617e5fec1c0b053e5ccf3749cf641d31c0e3a6
|
[
"MIT"
] | null | null | null |
conan/tools/cmake/__init__.py
|
dvirtz/conan
|
21617e5fec1c0b053e5ccf3749cf641d31c0e3a6
|
[
"MIT"
] | null | null | null |
from conan.tools.cmake.toolchain import CMakeToolchain
from conan.tools.cmake.toolchain import Block as CMakeToolchainBlock
from conan.tools.cmake.cmake import CMake
from conan.tools.cmake.cmakedeps.cmakedeps import CMakeDeps
from conan.tools.cmake.file_api import CMakeFileAPI
| 46.333333
| 68
| 0.863309
| 39
| 278
| 6.128205
| 0.358974
| 0.188285
| 0.292887
| 0.39749
| 0.284519
| 0.284519
| 0
| 0
| 0
| 0
| 0
| 0
| 0.079137
| 278
| 5
| 69
| 55.6
| 0.933594
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
1a192454055ee35ca4dc6854cc334596390ce707
| 112
|
py
|
Python
|
test/test_wsgi.py
|
wileykestner/falcon-sqlalchemy-demo
|
a1c8bdf212bafc4b577dbebab57753d724871572
|
[
"MIT"
] | 41
|
2016-10-21T04:08:05.000Z
|
2020-11-27T22:07:18.000Z
|
test/test_wsgi.py
|
wileykestner/falcon-sqlalchemy-demo
|
a1c8bdf212bafc4b577dbebab57753d724871572
|
[
"MIT"
] | null | null | null |
test/test_wsgi.py
|
wileykestner/falcon-sqlalchemy-demo
|
a1c8bdf212bafc4b577dbebab57753d724871572
|
[
"MIT"
] | 8
|
2017-12-19T21:56:49.000Z
|
2022-01-30T12:29:05.000Z
|
from falcon import API
from falcon_web_demo.wsgi import app
def test_wsgi():
assert isinstance(app, API)
| 14
| 36
| 0.758929
| 18
| 112
| 4.555556
| 0.666667
| 0.243902
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.178571
| 112
| 7
| 37
| 16
| 0.891304
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.25
| 1
| 0.25
| true
| 0
| 0.5
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
a7e0c3f91565c90d67408b3345e3f427ae1a6fc7
| 17,542
|
py
|
Python
|
fuzzy_modeling/tests/models/test_set_model.py
|
arruda/cloudfuzzy
|
5f834814fa28b68213c114f1c1b34d5c0df9475d
|
[
"MIT"
] | 2
|
2016-10-15T15:17:21.000Z
|
2019-04-22T05:52:43.000Z
|
fuzzy_modeling/tests/models/test_set_model.py
|
arruda/cloudfuzzy
|
5f834814fa28b68213c114f1c1b34d5c0df9475d
|
[
"MIT"
] | null | null | null |
fuzzy_modeling/tests/models/test_set_model.py
|
arruda/cloudfuzzy
|
5f834814fa28b68213c114f1c1b34d5c0df9475d
|
[
"MIT"
] | 1
|
2020-06-14T16:08:11.000Z
|
2020-06-14T16:08:11.000Z
|
# -*- coding: utf-8 -*-
import mock
from django.test import TestCase
from fuzzy_modeling.tests.utils import ResetMock
from fuzzy_modeling.models.sets import SetModel
from fuzzy.set.Set import Set
from fuzzy.set.Polygon import Polygon
from fuzzy.set.Triangle import Triangle
from fuzzy.set.Singleton import Singleton
from fuzzy.set.Trapez import Trapez
from fuzzy.set.Function import Function
from fuzzy.set.SFunction import SFunction
from fuzzy.set.ZFunction import ZFunction
from fuzzy.set.PiFunction import PiFunction
class SetModelTest(TestCase, ResetMock):
# def setUp(self):
# pass
def tearDown(self):
self.reset_all_pre_mocks(SetModel)
def _parameters_mock(self, name, value):
"""
mock a parameter
"""
param = mock.Mock()
param.name = name
param.get_value = lambda : value
return param
def _mock_setModel(self, set_choice):
self.set_choice = set_choice
self.set = SetModel(set=set_choice)
return self.set
def test_set_get_pyfuzzy_for_set_type(self):
" shoud return the correct corresponding pyfuzzy object for the Set type "
new_set = self._mock_setModel('fuzzy.set.Set.Set')
new_pyfuzzy_set = new_set.get_pyfuzzy()
# the expected pyfuzzy system
pyfuzzy_set_expected = Set()
# are from the same class
self.assertEquals(type(pyfuzzy_set_expected), type(new_pyfuzzy_set))
def test_set_get_pyfuzzy_for_polygon_type(self):
" shoud return the correct corresponding pyfuzzy object for the Polygon type "
new_set = self._mock_setModel('fuzzy.set.Polygon.Polygon')
points = [(0.,0.),(30.,1.),(60.,0.)]
points_value = str(points)
self.parameters_mock = [
self._parameters_mock(name="points", value=points_value)
]
# mocking parameters (queryset)
parameters_queryset = mock.Mock()
parameters_queryset.all = lambda : self.parameters_mock
self.set_pre_mock(SetModel,'parameters')
SetModel.parameters = parameters_queryset
new_pyfuzzy_set = new_set.get_pyfuzzy()
# the expected pyfuzzy system
pyfuzzy_set_expected = Polygon(points=points)
# are from the same class
self.assertEquals(type(pyfuzzy_set_expected), type(new_pyfuzzy_set))
# have the same points
self.assertEquals(pyfuzzy_set_expected.points, new_pyfuzzy_set.points)
def test_set_get_pyfuzzy_for_triangle_type(self):
" shoud return the correct corresponding pyfuzzy object for the Triangle type "
new_set = self._mock_setModel('fuzzy.set.Triangle.Triangle')
m = 1.2
alpha = 2.3
beta = 3.4
y_max = 4.5
y_min = 5.4
self.parameters_mock = [
self._parameters_mock(name="m", value=m),
self._parameters_mock(name="alpha", value=alpha),
self._parameters_mock(name="beta", value=beta),
self._parameters_mock(name="y_max", value=y_max),
self._parameters_mock(name="y_min", value=y_min)
]
# mocking parameters (queryset)
parameters_queryset = mock.Mock()
parameters_queryset.all = lambda : self.parameters_mock
self.set_pre_mock(SetModel,'parameters')
SetModel.parameters = parameters_queryset
new_pyfuzzy_set = new_set.get_pyfuzzy()
# the expected pyfuzzy system
pyfuzzy_set_expected = Triangle(m = m, alpha = alpha, beta = beta, y_max = y_max, y_min = y_min)
# are from the same class
self.assertEquals(type(pyfuzzy_set_expected), type(new_pyfuzzy_set))
# have the same args
self.assertEquals(pyfuzzy_set_expected.m, new_pyfuzzy_set.m)
self.assertEquals(pyfuzzy_set_expected.alpha, new_pyfuzzy_set.alpha)
self.assertEquals(pyfuzzy_set_expected.beta, new_pyfuzzy_set.beta)
self.assertEquals(pyfuzzy_set_expected.y_max, new_pyfuzzy_set.y_max)
self.assertEquals(pyfuzzy_set_expected.y_min, new_pyfuzzy_set.y_min)
def test_set_get_pyfuzzy_for_singleton_type(self):
" shoud return the correct corresponding pyfuzzy object for the Singleton type "
new_set = self._mock_setModel('fuzzy.set.Singleton.Singleton')
x = 1.2
self.parameters_mock = [
self._parameters_mock(name="x", value=x),
]
# mocking parameters (queryset)
parameters_queryset = mock.Mock()
parameters_queryset.all = lambda : self.parameters_mock
self.set_pre_mock(SetModel,'parameters')
SetModel.parameters = parameters_queryset
new_pyfuzzy_set = new_set.get_pyfuzzy()
# the expected pyfuzzy system
pyfuzzy_set_expected = Singleton(x=x)
# are from the same class
self.assertEquals(type(pyfuzzy_set_expected), type(new_pyfuzzy_set))
# have the same args
self.assertEquals(pyfuzzy_set_expected.x, new_pyfuzzy_set.x)
def test_set_get_pyfuzzy_for_trapez_type(self):
" shoud return the correct corresponding pyfuzzy object for the Trapez type "
new_set = self._mock_setModel('fuzzy.set.Trapez.Trapez')
m1 = 1.2
m2 = 1.3
alpha = 2.3
beta = 3.4
y_max = 4.5
y_min = 5.4
self.parameters_mock = [
self._parameters_mock(name="m1", value=m1),
self._parameters_mock(name="m2", value=m2),
self._parameters_mock(name="alpha", value=alpha),
self._parameters_mock(name="beta", value=beta),
self._parameters_mock(name="y_max", value=y_max),
self._parameters_mock(name="y_min", value=y_min)
]
# mocking parameters (queryset)
parameters_queryset = mock.Mock()
parameters_queryset.all = lambda : self.parameters_mock
self.set_pre_mock(SetModel,'parameters')
SetModel.parameters = parameters_queryset
new_pyfuzzy_set = new_set.get_pyfuzzy()
# the expected pyfuzzy system
pyfuzzy_set_expected = Trapez(m1 = m1, m2 = m2, alpha = alpha, beta = beta, y_max = y_max, y_min = y_min)
# are from the same class
self.assertEquals(type(pyfuzzy_set_expected), type(new_pyfuzzy_set))
# have the same args
self.assertEquals(pyfuzzy_set_expected.m1, new_pyfuzzy_set.m1)
self.assertEquals(pyfuzzy_set_expected.m2, new_pyfuzzy_set.m2)
self.assertEquals(pyfuzzy_set_expected.alpha, new_pyfuzzy_set.alpha)
self.assertEquals(pyfuzzy_set_expected.beta, new_pyfuzzy_set.beta)
self.assertEquals(pyfuzzy_set_expected.y_max, new_pyfuzzy_set.y_max)
self.assertEquals(pyfuzzy_set_expected.y_min, new_pyfuzzy_set.y_min)
def test_set_get_pyfuzzy_for_function_type(self):
" shoud return the correct corresponding pyfuzzy object for the Function type "
new_set = self._mock_setModel('fuzzy.set.Function.Function')
new_pyfuzzy_set = new_set.get_pyfuzzy()
# the expected pyfuzzy system
pyfuzzy_set_expected = Function()
# are from the same class
self.assertEquals(type(pyfuzzy_set_expected), type(new_pyfuzzy_set))
def test_set_get_pyfuzzy_for_sfunction_type(self):
" shoud return the correct corresponding pyfuzzy object for the SFunction type "
new_set = self._mock_setModel('fuzzy.set.SFunction.SFunction')
a = 1.2
delta = 2.3
self.parameters_mock = [
self._parameters_mock(name="a", value=a),
self._parameters_mock(name="delta", value=delta),
]
# mocking parameters (queryset)
parameters_queryset = mock.Mock()
parameters_queryset.all = lambda : self.parameters_mock
self.set_pre_mock(SetModel,'parameters')
SetModel.parameters = parameters_queryset
new_pyfuzzy_set = new_set.get_pyfuzzy()
# the expected pyfuzzy system
pyfuzzy_set_expected = SFunction(a = a, delta = delta)
# are from the same class
self.assertEquals(type(pyfuzzy_set_expected), type(new_pyfuzzy_set))
# have the same args
self.assertEquals(pyfuzzy_set_expected.a, new_pyfuzzy_set.a)
self.assertEquals(pyfuzzy_set_expected.delta, new_pyfuzzy_set.delta)
def test_set_get_pyfuzzy_for_zfunction_type(self):
" shoud return the correct corresponding pyfuzzy object for the ZFunction type "
new_set = self._mock_setModel('fuzzy.set.ZFunction.ZFunction')
a = 1.2
delta = 2.3
self.parameters_mock = [
self._parameters_mock(name="a", value=a),
self._parameters_mock(name="delta", value=delta),
]
# mocking parameters (queryset)
parameters_queryset = mock.Mock()
parameters_queryset.all = lambda : self.parameters_mock
self.set_pre_mock(SetModel,'parameters')
SetModel.parameters = parameters_queryset
new_pyfuzzy_set = new_set.get_pyfuzzy()
# the expected pyfuzzy system
pyfuzzy_set_expected = ZFunction(a = a, delta = delta)
# are from the same class
self.assertEquals(type(pyfuzzy_set_expected), type(new_pyfuzzy_set))
# have the same args
self.assertEquals(pyfuzzy_set_expected.a, new_pyfuzzy_set.a)
self.assertEquals(pyfuzzy_set_expected.delta, new_pyfuzzy_set.delta)
def test_set_get_pyfuzzy_for_pifunction_type(self):
" shoud return the correct corresponding pyfuzzy object for the PiFunction type "
new_set = self._mock_setModel('fuzzy.set.PiFunction.PiFunction')
a = 1.2
delta = 2.3
self.parameters_mock = [
self._parameters_mock(name="a", value=a),
self._parameters_mock(name="delta", value=delta),
]
# mocking parameters (queryset)
parameters_queryset = mock.Mock()
parameters_queryset.all = lambda : self.parameters_mock
self.set_pre_mock(SetModel,'parameters')
SetModel.parameters = parameters_queryset
new_pyfuzzy_set = new_set.get_pyfuzzy()
# the expected pyfuzzy system
pyfuzzy_set_expected = PiFunction(a = a, delta = delta)
# are from the same class
self.assertEquals(type(pyfuzzy_set_expected), type(new_pyfuzzy_set))
# have the same args
self.assertEquals(pyfuzzy_set_expected.a, new_pyfuzzy_set.a)
self.assertEquals(pyfuzzy_set_expected.delta, new_pyfuzzy_set.delta)
def test_set_from_pyfuzzy_for_set_type(self):
" shoud return the correct corresponding SetModel for the Set pyfuzzy object "
pyfuzzy_set = Set()
new_set = SetModel.from_pyfuzzy(pyfuzzy_set)
pyfuzzy_set_full_namespace = pyfuzzy_set.__module__ + "." + pyfuzzy_set.__class__.__name__
# are from the same class
self.assertEquals(pyfuzzy_set_full_namespace, new_set.set)
def test_set_from_pyfuzzy_for_polygon_type(self):
" shoud return the correct corresponding SetModel for the Polygon pyfuzzy object "
points = [(0.,0.),(30.,1.),(60.,0.)]
pyfuzzy_set = Polygon(points=points)
new_set = SetModel.from_pyfuzzy(pyfuzzy_set)
pyfuzzy_set_full_namespace = pyfuzzy_set.__module__ + "." + pyfuzzy_set.__class__.__name__
# are from the same class
self.assertEquals(pyfuzzy_set_full_namespace, new_set.set)
# have the same args
self.assertEquals(1,new_set.parameters.all().count())
points_param = new_set.parameters.all()[0]
self.assertEquals("points",points_param.name)
self.assertEquals(str(points),points_param.get_value())
def test_set_from_pyfuzzy_for_triangle_type(self):
" shoud return the correct corresponding SetModel for the Triangle pyfuzzy object "
m = 1.2
alpha = 2.3
beta = 3.4
y_max = 4.5
y_min = 5.4
pyfuzzy_set = Triangle(m = m, alpha = alpha, beta = beta, y_max = y_max, y_min = y_min)
new_set = SetModel.from_pyfuzzy(pyfuzzy_set)
pyfuzzy_set_full_namespace = pyfuzzy_set.__module__ + "." + pyfuzzy_set.__class__.__name__
# are from the same class
self.assertEquals(pyfuzzy_set_full_namespace, new_set.set)
# have the same args
self.assertEquals(5,new_set.parameters.all().count())
m_param = new_set.parameters.get(name="m")
alpha_param = new_set.parameters.get(name="alpha")
beta_param = new_set.parameters.get(name="beta")
y_max_param = new_set.parameters.get(name="y_max")
y_min_param = new_set.parameters.get(name="y_min")
self.assertEquals(pyfuzzy_set.m, m_param.get_value())
self.assertEquals(pyfuzzy_set.alpha, alpha_param.get_value())
self.assertEquals(pyfuzzy_set.beta, beta_param.get_value())
self.assertEquals(pyfuzzy_set.y_max, y_max_param.get_value())
self.assertEquals(pyfuzzy_set.y_min, y_min_param.get_value())
def test_set_from_pyfuzzy_for_trapez_type(self):
" shoud return the correct corresponding SetModel for the Trapez pyfuzzy object "
m1= 1.2
m2= 1.3
alpha = 2.3
beta = 3.4
y_max = 4.5
y_min = 5.4
pyfuzzy_set = Trapez(m1 = m1, m2 = m2, alpha = alpha, beta = beta, y_max = y_max, y_min = y_min)
new_set = SetModel.from_pyfuzzy(pyfuzzy_set)
pyfuzzy_set_full_namespace = pyfuzzy_set.__module__ + "." + pyfuzzy_set.__class__.__name__
# are from the same class
self.assertEquals(pyfuzzy_set_full_namespace, new_set.set)
# have the same args
self.assertEquals(6,new_set.parameters.all().count())
m1_param = new_set.parameters.get(name="m1")
m2_param = new_set.parameters.get(name="m2")
alpha_param = new_set.parameters.get(name="alpha")
beta_param = new_set.parameters.get(name="beta")
y_max_param = new_set.parameters.get(name="y_max")
y_min_param = new_set.parameters.get(name="y_min")
self.assertEquals(pyfuzzy_set.m1, m1_param.get_value())
self.assertEquals(pyfuzzy_set.m2, m2_param.get_value())
self.assertEquals(pyfuzzy_set.alpha, alpha_param.get_value())
self.assertEquals(pyfuzzy_set.beta, beta_param.get_value())
self.assertEquals(pyfuzzy_set.y_max, y_max_param.get_value())
self.assertEquals(pyfuzzy_set.y_min, y_min_param.get_value())
def test_set_from_pyfuzzy_for_function_type(self):
" shoud return the correct corresponding SetModel for the Function pyfuzzy object "
pyfuzzy_set = Function()
new_set = SetModel.from_pyfuzzy(pyfuzzy_set)
pyfuzzy_set_full_namespace = pyfuzzy_set.__module__ + "." + pyfuzzy_set.__class__.__name__
# are from the same class
self.assertEquals(pyfuzzy_set_full_namespace, new_set.set)
def test_set_from_pyfuzzy_for_sfunction_type(self):
" shoud return the correct corresponding SetModel for the SFunction pyfuzzy object "
a = 1.2
delta = 2.3
pyfuzzy_set = SFunction(a = a, delta = delta)
new_set = SetModel.from_pyfuzzy(pyfuzzy_set)
pyfuzzy_set_full_namespace = pyfuzzy_set.__module__ + "." + pyfuzzy_set.__class__.__name__
# are from the same class
self.assertEquals(pyfuzzy_set_full_namespace, new_set.set)
# have the same args
self.assertEquals(2,new_set.parameters.all().count())
a_param = new_set.parameters.get(name="a")
delta_param = new_set.parameters.get(name="delta")
self.assertEquals(pyfuzzy_set.a, a_param.get_value())
self.assertEquals(pyfuzzy_set.delta, delta_param.get_value())
def test_set_from_pyfuzzy_for_zfunction_type(self):
" shoud return the correct corresponding SetModel for the ZFunction pyfuzzy object "
a = 1.2
delta = 2.3
pyfuzzy_set = ZFunction(a = a, delta = delta)
new_set = SetModel.from_pyfuzzy(pyfuzzy_set)
pyfuzzy_set_full_namespace = pyfuzzy_set.__module__ + "." + pyfuzzy_set.__class__.__name__
# are from the same class
self.assertEquals(pyfuzzy_set_full_namespace, new_set.set)
# have the same args
self.assertEquals(2,new_set.parameters.all().count())
a_param = new_set.parameters.get(name="a")
delta_param = new_set.parameters.get(name="delta")
self.assertEquals(pyfuzzy_set.a, a_param.get_value())
self.assertEquals(pyfuzzy_set.delta, delta_param.get_value())
def test_set_from_pyfuzzy_for_pifunction_type(self):
" shoud return the correct corresponding SetModel for the PiFunction pyfuzzy object "
a = 1.2
delta = 2.3
pyfuzzy_set = PiFunction(a = a, delta = delta)
new_set = SetModel.from_pyfuzzy(pyfuzzy_set)
pyfuzzy_set_full_namespace = pyfuzzy_set.__module__ + "." + pyfuzzy_set.__class__.__name__
# are from the same class
self.assertEquals(pyfuzzy_set_full_namespace, new_set.set)
# have the same args
self.assertEquals(2,new_set.parameters.all().count())
a_param = new_set.parameters.get(name="a")
delta_param = new_set.parameters.get(name="delta")
self.assertEquals(pyfuzzy_set.a, a_param.get_value())
self.assertEquals(pyfuzzy_set.delta, delta_param.get_value())
| 34.667984
| 113
| 0.673526
| 2,284
| 17,542
| 4.844571
| 0.039405
| 0.125621
| 0.09146
| 0.103389
| 0.866787
| 0.849074
| 0.838771
| 0.82404
| 0.796385
| 0.793945
| 0
| 0.009118
| 0.237259
| 17,542
| 505
| 114
| 34.736634
| 0.817862
| 0.143541
| 0
| 0.597222
| 0
| 0
| 0.109542
| 0.013509
| 0
| 0
| 0
| 0
| 0.211806
| 1
| 0.069444
| false
| 0
| 0.045139
| 0
| 0.125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
c50ae166fb16754351ccbb44cb22293da55f5878
| 307
|
py
|
Python
|
microcosm_eventsource/models/__init__.py
|
globality-corp/microcosm-eventsource
|
e71665acfa30c74e75668ea309d36cb04824b014
|
[
"Apache-2.0"
] | 4
|
2017-08-24T09:45:24.000Z
|
2019-07-05T13:21:08.000Z
|
microcosm_eventsource/models/__init__.py
|
globality-corp/microcosm-eventsource
|
e71665acfa30c74e75668ea309d36cb04824b014
|
[
"Apache-2.0"
] | 9
|
2017-04-24T18:39:49.000Z
|
2020-04-20T18:26:10.000Z
|
microcosm_eventsource/models/__init__.py
|
globality-corp/microcosm-eventsource
|
e71665acfa30c74e75668ea309d36cb04824b014
|
[
"Apache-2.0"
] | 2
|
2019-03-17T03:44:49.000Z
|
2019-03-18T05:24:48.000Z
|
"""
Event modeling.
"""
from microcosm_eventsource.models.alias import ColumnAlias # noqa: F401
from microcosm_eventsource.models.base import BaseEvent # noqa: F401
from microcosm_eventsource.models.meta import EventMeta # noqa: F401
from microcosm_eventsource.models.rollup import RollUp # noqa: F401
| 34.111111
| 72
| 0.80456
| 38
| 307
| 6.394737
| 0.421053
| 0.213992
| 0.395062
| 0.493827
| 0.469136
| 0.469136
| 0
| 0
| 0
| 0
| 0
| 0.044444
| 0.120521
| 307
| 8
| 73
| 38.375
| 0.855556
| 0.19544
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
3d74c8f02a9357072a436d9e758c3f6f738d9c65
| 4,792
|
py
|
Python
|
tests/lib/cli_util_test.py
|
nikita-bykov/codalab-worksheets
|
d9c5b987a0b139847db6e758c167b7f2ca8936f3
|
[
"Apache-2.0"
] | null | null | null |
tests/lib/cli_util_test.py
|
nikita-bykov/codalab-worksheets
|
d9c5b987a0b139847db6e758c167b7f2ca8936f3
|
[
"Apache-2.0"
] | null | null | null |
tests/lib/cli_util_test.py
|
nikita-bykov/codalab-worksheets
|
d9c5b987a0b139847db6e758c167b7f2ca8936f3
|
[
"Apache-2.0"
] | 1
|
2020-03-13T08:16:17.000Z
|
2020-03-13T08:16:17.000Z
|
import unittest
from codalab.lib import cli_util
from codalab.common import UsageError
class CLIUtilTest(unittest.TestCase):
def test_parse_key_target(self):
cases = [
('a:b', ('a', 'b')),
(':b', ('', 'b')),
('b', (None, 'b')),
(
'dash-key:https://worksheets.codalab.org::some-worksheet//some-bundle-2.dirname/this/is/a/path.txt',
(
'dash-key',
'https://worksheets.codalab.org::some-worksheet//some-bundle-2.dirname/this/is/a/path.txt',
),
),
(
':https://worksheets.codalab.org::some-worksheet//some-bundle-2.dirname/this/is/a/path.txt',
(
'',
'https://worksheets.codalab.org::some-worksheet//some-bundle-2.dirname/this/is/a/path.txt',
),
),
(
'prod::some-worksheet//some-bundle-2.dirname/this/is/a/path.txt',
(None, 'prod::some-worksheet//some-bundle-2.dirname/this/is/a/path.txt'),
),
(
'dash-key:some-worksheet//some-bundle-2.dirname/this/is/a/path.txt',
('dash-key', 'some-worksheet//some-bundle-2.dirname/this/is/a/path.txt'),
),
(
':some-worksheet//some-bundle-2.dirname/this/is/a/path.txt',
('', 'some-worksheet//some-bundle-2.dirname/this/is/a/path.txt'),
),
(
'some-worksheet//some-bundle-2.dirname/this/is/a/path.txt',
(None, 'some-worksheet//some-bundle-2.dirname/this/is/a/path.txt'),
),
(
'dash-key:some-bundle-2.dirname/this/is/a/path.txt',
('dash-key', 'some-bundle-2.dirname/this/is/a/path.txt'),
),
(
':some-bundle-2.dirname/this/is/a/path.txt',
('', 'some-bundle-2.dirname/this/is/a/path.txt'),
),
(
'some-bundle-2.dirname/this/is/a/path.txt',
(None, 'some-bundle-2.dirname/this/is/a/path.txt'),
),
('dash-key:some-bundle-2.dirname', ('dash-key', 'some-bundle-2.dirname')),
(':some-bundle-2.dirname', ('', 'some-bundle-2.dirname')),
('some-bundle-2.dirname', (None, 'some-bundle-2.dirname')),
]
for spec, expected_parse in cases:
self.assertEqual(cli_util.parse_key_target(spec), expected_parse)
def test_parse_target_spec(self):
cases = [
(
'https://worksheets.codalab.org::some-worksheet//some-bundle-2.dirname/this/is/a/path.txt',
(
'https://worksheets.codalab.org',
'some-worksheet',
'some-bundle-2.dirname',
'this/is/a/path.txt',
),
),
(
'some-worksheet//some-bundle-2.dirname/this/is/a/path.txt',
(None, 'some-worksheet', 'some-bundle-2.dirname', 'this/is/a/path.txt'),
),
(
'some-bundle-2.dirname/this/is/a/path.txt',
(None, None, 'some-bundle-2.dirname', 'this/is/a/path.txt'),
),
('some-bundle-2.dirname', (None, None, 'some-bundle-2.dirname', None)),
('prod::bundle', ('prod', None, 'bundle', None)),
('worksheet//bundle', (None, 'worksheet', 'bundle', None)),
('bundle/path', (None, None, 'bundle', 'path')),
]
for spec, expected_parse in cases:
self.assertEqual(cli_util.parse_target_spec(spec), expected_parse)
def test_desugar(self):
self.assertEqual(cli_util.desugar_command([], 'echo hello'), ([], 'echo hello'))
self.assertEqual(
cli_util.desugar_command([':a-bundle'], 'run a-bundle'),
(["a-bundle:a-bundle"], 'run a-bundle'),
)
self.assertEqual(
cli_util.desugar_command(['a:b'], 'echo %b:c%'), (['a:b', 'b:c'], 'echo b')
)
self.assertEqual(
cli_util.desugar_command(['a:b'], 'echo %c%'), (['a:b', 'b2:c'], 'echo b2')
)
self.assertEqual(cli_util.desugar_command(['a:b'], 'echo %:c%'), (['a:b', 'c:c'], 'echo c'))
self.assertEqual(
cli_util.desugar_command(['a:b'], 'echo %a:b% %a:b%'), (['a:b'], 'echo a a')
)
self.assertEqual(
cli_util.desugar_command([], 'echo %a% %a% %a%'), (['b1:a'], 'echo b1 b1 b1')
)
self.assertRaises(UsageError, lambda: cli_util.desugar_command([], 'echo %a:b% %a:c%'))
self.assertRaises(UsageError, lambda: cli_util.desugar_command([':b'], 'echo %b:c%'))
| 43.563636
| 116
| 0.497287
| 562
| 4,792
| 4.176157
| 0.090747
| 0.136344
| 0.149979
| 0.24542
| 0.858117
| 0.792075
| 0.775458
| 0.725607
| 0.680443
| 0.644653
| 0
| 0.011526
| 0.311978
| 4,792
| 109
| 117
| 43.963303
| 0.700334
| 0
| 0
| 0.298077
| 0
| 0.144231
| 0.411311
| 0.220576
| 0
| 0
| 0
| 0
| 0.105769
| 1
| 0.028846
| false
| 0
| 0.028846
| 0
| 0.067308
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
3dcd1ff4c86fbbb7df2891c1a14ffd206015223d
| 233
|
py
|
Python
|
drf_pretty_update/serializers.py
|
yezyilomo/drf-pretty-put
|
1bc77f5f8fea58b2c30e4e3d7c0837b55b679d59
|
[
"MIT"
] | 28
|
2019-08-27T14:27:41.000Z
|
2020-02-04T18:54:18.000Z
|
drf_pretty_update/serializers.py
|
yezyilomo/drf-pretty-put
|
1bc77f5f8fea58b2c30e4e3d7c0837b55b679d59
|
[
"MIT"
] | 3
|
2019-09-04T10:06:15.000Z
|
2019-09-06T10:48:42.000Z
|
drf_pretty_update/serializers.py
|
yezyilomo/drf-pretty-update
|
1bc77f5f8fea58b2c30e4e3d7c0837b55b679d59
|
[
"MIT"
] | null | null | null |
from rest_framework.serializers import ModelSerializer
from .mixins import NestedCreateMixin, NestedUpdateMixin
class NestedModelSerializer(
NestedCreateMixin,
NestedUpdateMixin,
ModelSerializer):
pass
| 25.888889
| 56
| 0.76824
| 17
| 233
| 10.470588
| 0.705882
| 0.382022
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.197425
| 233
| 9
| 57
| 25.888889
| 0.951872
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.142857
| 0.285714
| 0
| 0.428571
| 0
| 1
| 0
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
3dce3da535e7569ce8800d1273f2ad3f99b12c16
| 97
|
py
|
Python
|
terrascript/mailgun/r.py
|
GarnerCorp/python-terrascript
|
ec6c2d9114dcd3cb955dd46069f8ba487e320a8c
|
[
"BSD-2-Clause"
] | null | null | null |
terrascript/mailgun/r.py
|
GarnerCorp/python-terrascript
|
ec6c2d9114dcd3cb955dd46069f8ba487e320a8c
|
[
"BSD-2-Clause"
] | null | null | null |
terrascript/mailgun/r.py
|
GarnerCorp/python-terrascript
|
ec6c2d9114dcd3cb955dd46069f8ba487e320a8c
|
[
"BSD-2-Clause"
] | 1
|
2018-11-15T16:23:05.000Z
|
2018-11-15T16:23:05.000Z
|
from terrascript import _resource
class mailgun_domain(_resource): pass
domain = mailgun_domain
| 19.4
| 37
| 0.845361
| 12
| 97
| 6.5
| 0.666667
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.113402
| 97
| 4
| 38
| 24.25
| 0.906977
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
9a9314e94c1b25165903224b0a7b4715d7517725
| 14,021
|
py
|
Python
|
tests/api/v2_2_1/test_reports.py
|
oboehmer/dnacentersdk
|
25c4e99900640deee91a56aa886874d9cb0ca960
|
[
"MIT"
] | 32
|
2019-09-05T05:16:56.000Z
|
2022-03-22T09:50:38.000Z
|
tests/api/v2_2_1/test_reports.py
|
oboehmer/dnacentersdk
|
25c4e99900640deee91a56aa886874d9cb0ca960
|
[
"MIT"
] | 35
|
2019-09-07T18:58:54.000Z
|
2022-03-24T19:29:36.000Z
|
tests/api/v2_2_1/test_reports.py
|
oboehmer/dnacentersdk
|
25c4e99900640deee91a56aa886874d9cb0ca960
|
[
"MIT"
] | 18
|
2019-09-09T11:07:21.000Z
|
2022-03-25T08:49:59.000Z
|
# -*- coding: utf-8 -*-
"""DNACenterAPI reports API fixtures and tests.
Copyright (c) 2019-2021 Cisco Systems.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import pytest
from fastjsonschema.exceptions import JsonSchemaException
from dnacentersdk.exceptions import MalformedRequest
from tests.environment import DNA_CENTER_VERSION
pytestmark = pytest.mark.skipif(DNA_CENTER_VERSION != '2.2.1', reason='version does not match')
def is_valid_get_views_for_a_given_view_group(json_schema_validate, obj):
json_schema_validate('jsd_c5879612ddc05cd0a0de09d29da4907e_v2_2_1').validate(obj)
return True
def get_views_for_a_given_view_group(api):
endpoint_result = api.reports.get_views_for_a_given_view_group(
view_group_id='string'
)
return endpoint_result
@pytest.mark.reports
def test_get_views_for_a_given_view_group(api, validator):
try:
assert is_valid_get_views_for_a_given_view_group(
validator,
get_views_for_a_given_view_group(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print(original_e)
raise original_e
def get_views_for_a_given_view_group_default(api):
endpoint_result = api.reports.get_views_for_a_given_view_group(
view_group_id='string'
)
return endpoint_result
@pytest.mark.reports
def test_get_views_for_a_given_view_group_default(api, validator):
try:
assert is_valid_get_views_for_a_given_view_group(
validator,
get_views_for_a_given_view_group_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_get_view_details_for_a_given_view_group_and_view(json_schema_validate, obj):
json_schema_validate('jsd_3d1944177c95598ebd1986582dc8069a_v2_2_1').validate(obj)
return True
def get_view_details_for_a_given_view_group_and_view(api):
endpoint_result = api.reports.get_view_details_for_a_given_view_group_and_view(
view_group_id='string',
view_id='string'
)
return endpoint_result
@pytest.mark.reports
def test_get_view_details_for_a_given_view_group_and_view(api, validator):
try:
assert is_valid_get_view_details_for_a_given_view_group_and_view(
validator,
get_view_details_for_a_given_view_group_and_view(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print(original_e)
raise original_e
def get_view_details_for_a_given_view_group_and_view_default(api):
endpoint_result = api.reports.get_view_details_for_a_given_view_group_and_view(
view_group_id='string',
view_id='string'
)
return endpoint_result
@pytest.mark.reports
def test_get_view_details_for_a_given_view_group_and_view_default(api, validator):
try:
assert is_valid_get_view_details_for_a_given_view_group_and_view(
validator,
get_view_details_for_a_given_view_group_and_view_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_get_a_scheduled_report(json_schema_validate, obj):
json_schema_validate('jsd_76f9cb7c424b5502b4ad54ccbb1ca4f4_v2_2_1').validate(obj)
return True
def get_a_scheduled_report(api):
endpoint_result = api.reports.get_a_scheduled_report(
report_id='string'
)
return endpoint_result
@pytest.mark.reports
def test_get_a_scheduled_report(api, validator):
try:
assert is_valid_get_a_scheduled_report(
validator,
get_a_scheduled_report(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print(original_e)
raise original_e
def get_a_scheduled_report_default(api):
endpoint_result = api.reports.get_a_scheduled_report(
report_id='string'
)
return endpoint_result
@pytest.mark.reports
def test_get_a_scheduled_report_default(api, validator):
try:
assert is_valid_get_a_scheduled_report(
validator,
get_a_scheduled_report_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_delete_a_scheduled_report(json_schema_validate, obj):
json_schema_validate('jsd_8a6a151b68d450dfaf1e8a92e0f5cc68_v2_2_1').validate(obj)
return True
def delete_a_scheduled_report(api):
endpoint_result = api.reports.delete_a_scheduled_report(
report_id='string'
)
return endpoint_result
@pytest.mark.reports
def test_delete_a_scheduled_report(api, validator):
try:
assert is_valid_delete_a_scheduled_report(
validator,
delete_a_scheduled_report(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print(original_e)
raise original_e
def delete_a_scheduled_report_default(api):
endpoint_result = api.reports.delete_a_scheduled_report(
report_id='string'
)
return endpoint_result
@pytest.mark.reports
def test_delete_a_scheduled_report_default(api, validator):
try:
assert is_valid_delete_a_scheduled_report(
validator,
delete_a_scheduled_report_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_get_list_of_scheduled_reports(json_schema_validate, obj):
json_schema_validate('jsd_095d89e1c3e150ef9faaff44fa483de5_v2_2_1').validate(obj)
return True
def get_list_of_scheduled_reports(api):
endpoint_result = api.reports.get_list_of_scheduled_reports(
view_group_id='string',
view_id='string'
)
return endpoint_result
@pytest.mark.reports
def test_get_list_of_scheduled_reports(api, validator):
try:
assert is_valid_get_list_of_scheduled_reports(
validator,
get_list_of_scheduled_reports(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print(original_e)
raise original_e
def get_list_of_scheduled_reports_default(api):
endpoint_result = api.reports.get_list_of_scheduled_reports(
view_group_id=None,
view_id=None
)
return endpoint_result
@pytest.mark.reports
def test_get_list_of_scheduled_reports_default(api, validator):
try:
assert is_valid_get_list_of_scheduled_reports(
validator,
get_list_of_scheduled_reports_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_create_or_schedule_a_report(json_schema_validate, obj):
json_schema_validate('jsd_220fa310ab095148bdb00d7d3d5e1676_v2_2_1').validate(obj)
return True
def create_or_schedule_a_report(api):
endpoint_result = api.reports.create_or_schedule_a_report(
active_validation=True,
deliveries=[{}],
name='string',
payload=None,
schedule={},
tags=['string'],
view={'fieldGroups': [{'fieldGroupDisplayName': 'string', 'fieldGroupName': 'string', 'fields': [{'displayName': 'string', 'name': 'string'}]}], 'filters': [{'displayName': 'string', 'name': 'string', 'type': 'string', 'value': {}}], 'format': {'formatType': 'string', 'name': 'string'}, 'name': 'string', 'viewId': 'string'},
viewGroupId='string',
viewGroupVersion='string'
)
return endpoint_result
@pytest.mark.reports
def test_create_or_schedule_a_report(api, validator):
try:
assert is_valid_create_or_schedule_a_report(
validator,
create_or_schedule_a_report(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print(original_e)
raise original_e
def create_or_schedule_a_report_default(api):
endpoint_result = api.reports.create_or_schedule_a_report(
active_validation=True,
deliveries=None,
name=None,
payload=None,
schedule=None,
tags=None,
view=None,
viewGroupId=None,
viewGroupVersion=None
)
return endpoint_result
@pytest.mark.reports
def test_create_or_schedule_a_report_default(api, validator):
try:
assert is_valid_create_or_schedule_a_report(
validator,
create_or_schedule_a_report_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_get_all_view_groups(json_schema_validate, obj):
json_schema_validate('jsd_bbff833d5d5756698f4764a9d488cc98_v2_2_1').validate(obj)
return True
def get_all_view_groups(api):
endpoint_result = api.reports.get_all_view_groups(
)
return endpoint_result
@pytest.mark.reports
def test_get_all_view_groups(api, validator):
try:
assert is_valid_get_all_view_groups(
validator,
get_all_view_groups(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print(original_e)
raise original_e
def get_all_view_groups_default(api):
endpoint_result = api.reports.get_all_view_groups(
)
return endpoint_result
@pytest.mark.reports
def test_get_all_view_groups_default(api, validator):
try:
assert is_valid_get_all_view_groups(
validator,
get_all_view_groups_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_get_all_execution_details_for_a_given_report(json_schema_validate, obj):
json_schema_validate('jsd_a4b1ca0320185570bc12da238f0e88bb_v2_2_1').validate(obj)
return True
def get_all_execution_details_for_a_given_report(api):
endpoint_result = api.reports.get_all_execution_details_for_a_given_report(
report_id='string'
)
return endpoint_result
@pytest.mark.reports
def test_get_all_execution_details_for_a_given_report(api, validator):
try:
assert is_valid_get_all_execution_details_for_a_given_report(
validator,
get_all_execution_details_for_a_given_report(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print(original_e)
raise original_e
def get_all_execution_details_for_a_given_report_default(api):
endpoint_result = api.reports.get_all_execution_details_for_a_given_report(
report_id='string'
)
return endpoint_result
@pytest.mark.reports
def test_get_all_execution_details_for_a_given_report_default(api, validator):
try:
assert is_valid_get_all_execution_details_for_a_given_report(
validator,
get_all_execution_details_for_a_given_report_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_download_report_content(json_schema_validate, obj):
json_schema_validate('jsd_2921b2790cdb5abf98c8e00011de86a4_v2_2_1').validate(obj)
return True
def download_report_content(api):
endpoint_result = api.reports.download_report_content(
dirpath=None,
save_file=None,
execution_id='string',
report_id='string'
)
return endpoint_result
@pytest.mark.reports
def test_download_report_content(api, validator):
try:
assert is_valid_download_report_content(
validator,
download_report_content(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print(original_e)
raise original_e
def download_report_content_default(api):
endpoint_result = api.reports.download_report_content(
dirpath=None,
save_file=None,
execution_id='string',
report_id='string'
)
return endpoint_result
@pytest.mark.reports
def test_download_report_content_default(api, validator):
try:
assert is_valid_download_report_content(
validator,
download_report_content_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
| 31.019912
| 334
| 0.727552
| 1,764
| 14,021
| 5.365079
| 0.112245
| 0.042794
| 0.031382
| 0.03022
| 0.809489
| 0.80336
| 0.794484
| 0.78339
| 0.744294
| 0.704565
| 0
| 0.019391
| 0.205549
| 14,021
| 451
| 335
| 31.088692
| 0.830236
| 0.080665
| 0
| 0.614925
| 0
| 0
| 0.05744
| 0.03167
| 0
| 0
| 0
| 0
| 0.053731
| 1
| 0.134328
| false
| 0
| 0.01194
| 0
| 0.226866
| 0.026866
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
9a95322fa6af74f398ff86642090659531a59d57
| 166
|
py
|
Python
|
pythoninc/extencmodule/hellouse.py
|
cmacro/simple
|
cb29af00a964490f978705f92c54e3f7ebfafe1e
|
[
"MIT"
] | 21
|
2015-11-27T15:10:24.000Z
|
2021-08-03T08:13:25.000Z
|
pythoninc/extencmodule/hellouse.py
|
coolzpl/simple
|
cb29af00a964490f978705f92c54e3f7ebfafe1e
|
[
"MIT"
] | null | null | null |
pythoninc/extencmodule/hellouse.py
|
coolzpl/simple
|
cb29af00a964490f978705f92c54e3f7ebfafe1e
|
[
"MIT"
] | 24
|
2015-11-27T15:10:37.000Z
|
2021-08-30T13:24:49.000Z
|
"""
import and use a C extension library module
www.moguf.com 2016-05-28
"""
import hello
print(hello.message('C'))
print(hello.message('module ' + hello.__file__))
| 18.444444
| 48
| 0.722892
| 26
| 166
| 4.461538
| 0.692308
| 0.172414
| 0.293103
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.054795
| 0.120482
| 166
| 9
| 48
| 18.444444
| 0.739726
| 0.409639
| 0
| 0
| 0
| 0
| 0.087912
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0.666667
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 6
|
9aae6b8e6cbcbb7d4114ee7ba13ff2d4d6e14719
| 1,313
|
py
|
Python
|
tests/atest/transformers/SplitTooLongLine/test_transformer.py
|
josflorap/robotframework-tidy
|
9d4e1ccc6a50c415187468305235830f80f3373b
|
[
"Apache-2.0"
] | null | null | null |
tests/atest/transformers/SplitTooLongLine/test_transformer.py
|
josflorap/robotframework-tidy
|
9d4e1ccc6a50c415187468305235830f80f3373b
|
[
"Apache-2.0"
] | null | null | null |
tests/atest/transformers/SplitTooLongLine/test_transformer.py
|
josflorap/robotframework-tidy
|
9d4e1ccc6a50c415187468305235830f80f3373b
|
[
"Apache-2.0"
] | null | null | null |
from .. import run_tidy_and_compare
class TestSplitTooLongLine:
TRANSFORMER_NAME = "SplitTooLongLine"
def test_split_too_long_lines(self):
run_tidy_and_compare(
self.TRANSFORMER_NAME,
source="tests.robot",
expected="feed_until_line_length.robot",
config=":line_length=80:split_on_every_arg=False -s 4",
)
def test_split_too_long_lines_split_on_every_arg(self):
run_tidy_and_compare(
self.TRANSFORMER_NAME,
source="tests.robot",
expected="split_on_every_arg.robot",
config=":line_length=80:split_on_every_arg=True -s 4",
)
def test_split_lines_with_multiple_assignments(self):
run_tidy_and_compare(
self.TRANSFORMER_NAME,
source="multiple_assignments.robot",
expected="multiple_assignments_until_line_length.robot",
config=":line_length=80:split_on_every_arg=False -s 4",
)
def test_split_lines_with_multiple_assignments_on_every_arg(self):
run_tidy_and_compare(
self.TRANSFORMER_NAME,
source="multiple_assignments.robot",
expected="multiple_assignments_on_every_arg.robot",
config=":line_length=80:split_on_every_arg=True -s 4",
)
| 34.552632
| 70
| 0.664128
| 160
| 1,313
| 4.96875
| 0.2375
| 0.07044
| 0.100629
| 0.113208
| 0.877987
| 0.877987
| 0.832704
| 0.832704
| 0.832704
| 0.74717
| 0
| 0.01222
| 0.252094
| 1,313
| 37
| 71
| 35.486486
| 0.797352
| 0
| 0
| 0.516129
| 0
| 0
| 0.306931
| 0.262757
| 0
| 0
| 0
| 0
| 0
| 1
| 0.129032
| false
| 0
| 0.032258
| 0
| 0.225806
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
9ad878db54b23048ef04ac847a9c90ed226b9cff
| 96
|
py
|
Python
|
venv/lib/python3.8/site-packages/pyls/plugins/pyflakes_lint.py
|
GiulianaPola/select_repeats
|
17a0d053d4f874e42cf654dd142168c2ec8fbd11
|
[
"MIT"
] | 2
|
2022-03-13T01:58:52.000Z
|
2022-03-31T06:07:54.000Z
|
venv/lib/python3.8/site-packages/pyls/plugins/pyflakes_lint.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | 19
|
2021-11-20T04:09:18.000Z
|
2022-03-23T15:05:55.000Z
|
venv/lib/python3.8/site-packages/pyls/plugins/pyflakes_lint.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | null | null | null |
/home/runner/.cache/pip/pool/70/5f/86/d95f82fae07c6430b7c5c57505041c9d5471b018a2873ea0b61ce478b3
| 96
| 96
| 0.895833
| 9
| 96
| 9.555556
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.458333
| 0
| 96
| 1
| 96
| 96
| 0.4375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
9aeece8101e1326ddb759b421f8c73527729b944
| 206
|
py
|
Python
|
tree/views.py
|
elyamanukyan/django-consul-tree
|
9d5737fd8ea48a905fd6db383ef04d89fc6a7cc3
|
[
"Apache-2.0"
] | 3
|
2020-05-04T05:22:19.000Z
|
2020-07-08T16:41:04.000Z
|
tree/views.py
|
elyamanukyan/django-consul-tree
|
9d5737fd8ea48a905fd6db383ef04d89fc6a7cc3
|
[
"Apache-2.0"
] | 1
|
2021-05-05T17:44:05.000Z
|
2021-05-05T17:44:05.000Z
|
tree/views.py
|
elyamanukyan/django-consul-tree
|
9d5737fd8ea48a905fd6db383ef04d89fc6a7cc3
|
[
"Apache-2.0"
] | 1
|
2021-05-05T17:34:24.000Z
|
2021-05-05T17:34:24.000Z
|
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
@login_required(login_url='/accounts/login/')
def load_home(request):
return render(request, 'home.html')
| 25.75
| 57
| 0.796117
| 28
| 206
| 5.714286
| 0.642857
| 0.125
| 0.225
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.097087
| 206
| 7
| 58
| 29.428571
| 0.860215
| 0
| 0
| 0
| 0
| 0
| 0.121359
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.4
| 0.2
| 0.8
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
9af54668fa844ea2cda01b29b78a981ba9c88c87
| 137
|
py
|
Python
|
pyblog/blueprints/main/__init__.py
|
demetrius-mp/pyblog
|
6e37d7881ed676ab49811fba5025fd3ff625cb0c
|
[
"MIT"
] | 1
|
2022-03-18T21:03:51.000Z
|
2022-03-18T21:03:51.000Z
|
pyblog/blueprints/main/__init__.py
|
demetrius-mp/pyblog
|
6e37d7881ed676ab49811fba5025fd3ff625cb0c
|
[
"MIT"
] | 2
|
2021-09-25T05:26:17.000Z
|
2021-09-27T15:43:46.000Z
|
pyblog/blueprints/main/__init__.py
|
demetrius-mp/pyblog
|
6e37d7881ed676ab49811fba5025fd3ff625cb0c
|
[
"MIT"
] | null | null | null |
from flask import Flask
from pyblog.blueprints.main.routes import main as bp
def init_app(app: Flask):
app.register_blueprint(bp)
| 17.125
| 52
| 0.773723
| 22
| 137
| 4.727273
| 0.636364
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.153285
| 137
| 7
| 53
| 19.571429
| 0.896552
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.5
| 0
| 0.75
| 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 1
|
0
| 6
|
9afb892670674830a918a87155e54eaad3d5a0d1
| 3,602
|
py
|
Python
|
tests/contracts/root_chain/test_challenge_in_flight_exit_input_spent.py
|
pgebal/plasma-contracts
|
2ff791e420b0702afe1e1514a6cd2af82cd6df4d
|
[
"Apache-2.0"
] | null | null | null |
tests/contracts/root_chain/test_challenge_in_flight_exit_input_spent.py
|
pgebal/plasma-contracts
|
2ff791e420b0702afe1e1514a6cd2af82cd6df4d
|
[
"Apache-2.0"
] | null | null | null |
tests/contracts/root_chain/test_challenge_in_flight_exit_input_spent.py
|
pgebal/plasma-contracts
|
2ff791e420b0702afe1e1514a6cd2af82cd6df4d
|
[
"Apache-2.0"
] | null | null | null |
import pytest
from ethereum.tools.tester import TransactionFailed
from plasma_core.constants import NULL_ADDRESS
# should succeed even when phase 2 of in-flight exit is over
@pytest.mark.parametrize("period", [1, 2, 4])
def test_challenge_in_flight_exit_input_spent_should_succeed(testlang, period):
owner_1, owner_2, amount = testlang.accounts[0], testlang.accounts[1], 100
deposit_id = testlang.deposit(owner_1, amount)
spend_id = testlang.spend_utxo([deposit_id], [owner_1.key])
double_spend_id = testlang.spend_utxo([deposit_id], [owner_1.key], [(owner_1.address, NULL_ADDRESS, 100)], force_invalid=True)
testlang.start_in_flight_exit(spend_id)
testlang.piggyback_in_flight_exit_input(spend_id, 0, owner_1.key)
testlang.forward_to_period(period)
testlang.challenge_in_flight_exit_input_spent(spend_id, double_spend_id, owner_2.key)
in_flight_exit = testlang.get_in_flight_exit(spend_id)
assert not in_flight_exit.input_piggybacked(0)
def test_challenge_in_flight_exit_input_spent_not_piggybacked_should_fail(testlang):
owner_1, owner_2, amount = testlang.accounts[0], testlang.accounts[1], 100
deposit_id = testlang.deposit(owner_1, amount)
spend_id = testlang.spend_utxo([deposit_id], [owner_1.key])
double_spend_id = testlang.spend_utxo([deposit_id], [owner_1.key], [(owner_1.address, NULL_ADDRESS, 100)], force_invalid=True)
testlang.start_in_flight_exit(spend_id)
testlang.forward_to_period(2)
with pytest.raises(TransactionFailed):
testlang.challenge_in_flight_exit_input_spent(spend_id, double_spend_id, owner_2.key)
def test_challenge_in_flight_exit_input_spent_same_tx_should_fail(testlang):
owner_1, owner_2, amount = testlang.accounts[0], testlang.accounts[1], 100
deposit_id = testlang.deposit(owner_1, amount)
spend_id = testlang.spend_utxo([deposit_id], [owner_1.key])
testlang.start_in_flight_exit(spend_id)
testlang.piggyback_in_flight_exit_input(spend_id, 0, owner_1.key)
testlang.forward_to_period(2)
with pytest.raises(TransactionFailed):
testlang.challenge_in_flight_exit_input_spent(spend_id, spend_id, owner_2.key)
def test_challenge_in_flight_exit_input_spent_unrelated_tx_should_fail(testlang):
owner_1, owner_2, amount = testlang.accounts[0], testlang.accounts[1], 100
deposit_id_1 = testlang.deposit(owner_1, amount)
deposit_id_2 = testlang.deposit(owner_1, amount)
spend_id = testlang.spend_utxo([deposit_id_1], [owner_1.key])
unrelated_spend_id = testlang.spend_utxo([deposit_id_2], [owner_1.key], [(owner_1.address, NULL_ADDRESS, 100)])
testlang.start_in_flight_exit(spend_id)
testlang.piggyback_in_flight_exit_input(spend_id, 0, owner_1.key)
testlang.forward_to_period(2)
with pytest.raises(TransactionFailed):
testlang.challenge_in_flight_exit_input_spent(spend_id, unrelated_spend_id, owner_2.key)
def test_challenge_in_flight_exit_input_spent_invalid_signature_should_fail(testlang):
owner_1, owner_2, amount = testlang.accounts[0], testlang.accounts[1], 100
deposit_id = testlang.deposit(owner_1, amount)
spend_id = testlang.spend_utxo([deposit_id], [owner_1.key])
double_spend_id = testlang.spend_utxo([deposit_id], [owner_2.key], [(owner_1.address, NULL_ADDRESS, 100)], force_invalid=True)
testlang.start_in_flight_exit(spend_id)
testlang.piggyback_in_flight_exit_input(spend_id, 0, owner_1.key)
testlang.forward_to_period(2)
with pytest.raises(TransactionFailed):
testlang.challenge_in_flight_exit_input_spent(spend_id, double_spend_id, owner_2.key)
| 50.027778
| 130
| 0.788173
| 547
| 3,602
| 4.76234
| 0.117002
| 0.077927
| 0.10595
| 0.097889
| 0.864491
| 0.846833
| 0.846833
| 0.834165
| 0.80499
| 0.791171
| 0
| 0.028822
| 0.113826
| 3,602
| 71
| 131
| 50.732394
| 0.787281
| 0.016102
| 0
| 0.648148
| 0
| 0
| 0.001694
| 0
| 0
| 0
| 0
| 0
| 0.018519
| 1
| 0.092593
| false
| 0
| 0.055556
| 0
| 0.148148
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b1118b8798179f9fbc7efe9bca594b12775c9b94
| 3,814
|
py
|
Python
|
gdsclient/tests/unit/test_simple_algo.py
|
FlorentinD/gdsclient
|
04f41d9b60c3de3af308d1d264fadbce0ed54e68
|
[
"Apache-2.0"
] | null | null | null |
gdsclient/tests/unit/test_simple_algo.py
|
FlorentinD/gdsclient
|
04f41d9b60c3de3af308d1d264fadbce0ed54e68
|
[
"Apache-2.0"
] | null | null | null |
gdsclient/tests/unit/test_simple_algo.py
|
FlorentinD/gdsclient
|
04f41d9b60c3de3af308d1d264fadbce0ed54e68
|
[
"Apache-2.0"
] | null | null | null |
import pytest
from gdsclient.graph.graph_object import Graph
from gdsclient.graph_data_science import GraphDataScience
from gdsclient.tests.unit.conftest import CollectingQueryRunner
GRAPH_NAME = "g"
@pytest.fixture(scope="class")
def G(gds: GraphDataScience) -> Graph:
return gds.graph.project(GRAPH_NAME, "Node", "REL")
def test_algoName_mutate(
runner: CollectingQueryRunner, gds: GraphDataScience, G: Graph
) -> None:
gds.algoName.mutate(G, mutateProperty="rank", dampingFactor=0.2, tolerance=0.3)
assert runner.last_query() == "CALL gds.algoName.mutate($graph_name, $config)"
assert runner.last_params() == {
"graph_name": GRAPH_NAME,
"config": {"mutateProperty": "rank", "dampingFactor": 0.2, "tolerance": 0.3},
}
def test_algoName_stats(
runner: CollectingQueryRunner, gds: GraphDataScience, G: Graph
) -> None:
gds.algoName.stats(G, dampingFactor=0.2, tolerance=0.3)
assert runner.last_query() == "CALL gds.algoName.stats($graph_name, $config)"
assert runner.last_params() == {
"graph_name": GRAPH_NAME,
"config": {"dampingFactor": 0.2, "tolerance": 0.3},
}
def test_algoName_stream(
runner: CollectingQueryRunner, gds: GraphDataScience, G: Graph
) -> None:
gds.algoName.stream(G, dampingFactor=0.2, tolerance=0.3)
assert runner.last_query() == "CALL gds.algoName.stream($graph_name, $config)"
assert runner.last_params() == {
"graph_name": GRAPH_NAME,
"config": {"dampingFactor": 0.2, "tolerance": 0.3},
}
def test_algoName_write(
runner: CollectingQueryRunner, gds: GraphDataScience, G: Graph
) -> None:
gds.algoName.write(G, writeProperty="rank", dampingFactor=0.2, tolerance=0.3)
assert runner.last_query() == "CALL gds.algoName.write($graph_name, $config)"
assert runner.last_params() == {
"graph_name": GRAPH_NAME,
"config": {"writeProperty": "rank", "dampingFactor": 0.2, "tolerance": 0.3},
}
def test_algoName_mutate_estimate(
runner: CollectingQueryRunner, gds: GraphDataScience, G: Graph
) -> None:
gds.algoName.mutate.estimate(
G, mutateProperty="rank", dampingFactor=0.2, tolerance=0.3
)
assert (
runner.last_query() == "CALL gds.algoName.mutate.estimate($graph_name, $config)"
)
assert runner.last_params() == {
"graph_name": GRAPH_NAME,
"config": {"mutateProperty": "rank", "dampingFactor": 0.2, "tolerance": 0.3},
}
def test_algoName_stats_estimate(
runner: CollectingQueryRunner, gds: GraphDataScience, G: Graph
) -> None:
gds.algoName.stats.estimate(G, dampingFactor=0.2, tolerance=0.3)
assert (
runner.last_query() == "CALL gds.algoName.stats.estimate($graph_name, $config)"
)
assert runner.last_params() == {
"graph_name": GRAPH_NAME,
"config": {"dampingFactor": 0.2, "tolerance": 0.3},
}
def test_algoName_stream_estimate(
runner: CollectingQueryRunner, gds: GraphDataScience, G: Graph
) -> None:
gds.algoName.stream.estimate(G, dampingFactor=0.2, tolerance=0.3)
assert (
runner.last_query() == "CALL gds.algoName.stream.estimate($graph_name, $config)"
)
assert runner.last_params() == {
"graph_name": GRAPH_NAME,
"config": {"dampingFactor": 0.2, "tolerance": 0.3},
}
def test_algoName_write_estimate(
runner: CollectingQueryRunner, gds: GraphDataScience, G: Graph
) -> None:
gds.algoName.write.estimate(
G, writeProperty="rank", dampingFactor=0.2, tolerance=0.3
)
assert (
runner.last_query() == "CALL gds.algoName.write.estimate($graph_name, $config)"
)
assert runner.last_params() == {
"graph_name": GRAPH_NAME,
"config": {"writeProperty": "rank", "dampingFactor": 0.2, "tolerance": 0.3},
}
| 31.520661
| 88
| 0.667803
| 447
| 3,814
| 5.552573
| 0.105145
| 0.094279
| 0.096696
| 0.154714
| 0.878727
| 0.878727
| 0.878727
| 0.878727
| 0.878727
| 0.878727
| 0
| 0.020546
| 0.183272
| 3,814
| 120
| 89
| 31.783333
| 0.776244
| 0
| 0
| 0.478261
| 0
| 0
| 0.21054
| 0.075511
| 0
| 0
| 0
| 0
| 0.173913
| 1
| 0.097826
| false
| 0
| 0.043478
| 0.01087
| 0.152174
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
492f704df7586941e82deb1310518e34a0f372f1
| 206
|
py
|
Python
|
lims/projects/admin.py
|
sqilz/LIMS-Backend
|
b64e1fa512f89e4492803d44c6b8c35e4d4724cc
|
[
"MIT"
] | 12
|
2017-03-01T10:39:36.000Z
|
2022-01-04T06:17:19.000Z
|
lims/projects/admin.py
|
sqilz/LIMS-Backend
|
b64e1fa512f89e4492803d44c6b8c35e4d4724cc
|
[
"MIT"
] | 29
|
2017-04-25T14:05:08.000Z
|
2021-06-21T14:41:53.000Z
|
lims/projects/admin.py
|
sqilz/LIMS-Backend
|
b64e1fa512f89e4492803d44c6b8c35e4d4724cc
|
[
"MIT"
] | 4
|
2017-10-11T16:22:53.000Z
|
2021-02-23T15:45:21.000Z
|
from django.contrib import admin
from .models import Project, Product, Comment, WorkLog
admin.site.register(Project)
admin.site.register(Product)
admin.site.register(Comment)
admin.site.register(WorkLog)
| 22.888889
| 54
| 0.815534
| 28
| 206
| 6
| 0.428571
| 0.214286
| 0.404762
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.082524
| 206
| 8
| 55
| 25.75
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
49328efddd93b1e726cc41d1fef33850c2c3b068
| 17,224
|
py
|
Python
|
tf_rl/common/networks.py
|
Rowing0914/TF_RL
|
68e5e9a23e38ed2d8ac5f97d380567b919a3d2e7
|
[
"MIT"
] | 23
|
2019-04-04T17:34:56.000Z
|
2021-12-14T19:34:10.000Z
|
tf_rl/common/networks.py
|
Rowing0914/TF_RL
|
68e5e9a23e38ed2d8ac5f97d380567b919a3d2e7
|
[
"MIT"
] | null | null | null |
tf_rl/common/networks.py
|
Rowing0914/TF_RL
|
68e5e9a23e38ed2d8ac5f97d380567b919a3d2e7
|
[
"MIT"
] | 3
|
2019-07-17T23:56:36.000Z
|
2022-03-13T03:55:21.000Z
|
import tensorflow as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
L2 = tf.keras.regularizers.l2(1e-2)
KERNEL_INIT = tf.random_uniform_initializer(minval=-3e-3, maxval=3e-3)
XAVIER_INIT = tf.contrib.layers.xavier_initializer()
class Nature_DQN(tf.keras.Model):
def __init__(self, num_action):
super(Nature_DQN, self).__init__()
self.conv1 = tf.keras.layers.Conv2D(32, kernel_size=8, strides=8, activation='relu')
self.conv2 = tf.keras.layers.Conv2D(64, kernel_size=4, strides=2, activation='relu')
self.conv3 = tf.keras.layers.Conv2D(64, kernel_size=3, strides=1, activation='relu')
self.flat = tf.keras.layers.Flatten()
self.fc1 = tf.keras.layers.Dense(512, activation='relu')
self.pred = tf.keras.layers.Dense(num_action, activation='linear')
@tf.contrib.eager.defun(autograph=False)
def call(self, inputs):
x = self.conv1(inputs)
x = self.conv2(x)
x = self.conv3(x)
x = self.flat(x)
x = self.fc1(x)
return self.pred(x)
class CartPole(tf.keras.Model):
def __init__(self, num_action):
super(CartPole, self).__init__()
self.dense1 = tf.keras.layers.Dense(16, activation='relu')
self.dense2 = tf.keras.layers.Dense(16, activation='relu')
self.dense3 = tf.keras.layers.Dense(16, activation='relu')
self.pred = tf.keras.layers.Dense(num_action, activation='linear')
@tf.contrib.eager.defun(autograph=False)
def call(self, inputs):
x = self.dense1(inputs)
x = self.dense2(x)
x = self.dense3(x)
return self.pred(x)
class Duelling_atari(tf.keras.Model):
def __init__(self, num_action, duelling_type="avg"):
super(Duelling_atari, self).__init__()
self.duelling_type = duelling_type
self.conv1 = tf.keras.layers.Conv2D(32, kernel_size=8, strides=8, activation='relu', kernel_regularizer=L2,
bias_regularizer=L2)
self.conv2 = tf.keras.layers.Conv2D(64, kernel_size=4, strides=2, activation='relu', kernel_regularizer=L2,
bias_regularizer=L2)
self.conv3 = tf.keras.layers.Conv2D(64, kernel_size=3, strides=1, activation='relu', kernel_regularizer=L2,
bias_regularizer=L2)
self.flat = tf.keras.layers.Flatten()
self.fc1 = tf.keras.layers.Dense(512, activation='relu', kernel_regularizer=L2, bias_regularizer=L2)
self.q_value = tf.keras.layers.Dense(num_action, activation='linear', kernel_regularizer=L2,
bias_regularizer=L2)
self.v_value = tf.keras.layers.Dense(1, activation='linear', kernel_regularizer=L2, bias_regularizer=L2)
@tf.contrib.eager.defun(autograph=False)
def call(self, inputs):
x = self.conv1(inputs)
x = self.conv2(x)
x = self.conv3(x)
x = self.flat(x)
x = self.fc1(x)
q_value = self.q_value(x)
v_value = self.v_value(x)
if self.duelling_type == "avg":
# Q(s,a;theta) = V(s;theta) + (A(s,a;theta)-Avg_a(A(s,a;theta)))
output = tf.math.add(v_value, tf.math.subtract(q_value, tf.reduce_mean(q_value)))
elif self.duelling_type == "max":
# Q(s,a;theta) = V(s;theta) + (A(s,a;theta)-max_a(A(s,a;theta)))
output = tf.math.add(v_value, tf.math.subtract(q_value, tf.math.reduce_max(q_value)))
elif self.duelling_type == "naive":
# Q(s,a;theta) = V(s;theta) + A(s,a;theta)
output = tf.math.add(v_value, q_value)
else:
output = 0 # defun does not accept the variable may not be intialised, so that temporarily initialise it
assert False, "dueling_type must be one of {'avg','max','naive'}"
return output
class Duelling_cartpole(tf.keras.Model):
def __init__(self, num_action, duelling_type="avg"):
super(Duelling_cartpole, self).__init__()
self.duelling_type = duelling_type
self.dense1 = tf.keras.layers.Dense(16, activation='relu', kernel_regularizer=L2, bias_regularizer=L2, )
self.dense2 = tf.keras.layers.Dense(16, activation='relu', kernel_regularizer=L2, bias_regularizer=L2, )
self.dense3 = tf.keras.layers.Dense(16, activation='relu', kernel_regularizer=L2, bias_regularizer=L2, )
self.q_value = tf.keras.layers.Dense(num_action, activation='linear', kernel_regularizer=L2,
bias_regularizer=L2, )
self.v_value = tf.keras.layers.Dense(1, activation='linear', kernel_regularizer=L2, bias_regularizer=L2, )
@tf.contrib.eager.defun(autograph=False)
def call(self, inputs):
x = self.dense1(inputs)
x = self.dense2(x)
x = self.dense3(x)
q_value = self.q_value(x)
v_value = self.v_value(x)
if self.duelling_type == "avg":
# Q(s,a;theta) = V(s;theta) + (A(s,a;theta)-Avg_a(A(s,a;theta)))
output = tf.math.add(v_value, tf.math.subtract(q_value, tf.reduce_mean(q_value)))
elif self.duelling_type == "max":
# Q(s,a;theta) = V(s;theta) + (A(s,a;theta)-max_a(A(s,a;theta)))
output = tf.math.add(v_value, tf.math.subtract(q_value, tf.math.reduce_max(q_value)))
elif self.duelling_type == "naive":
# Q(s,a;theta) = V(s;theta) + A(s,a;theta)
output = tf.math.add(v_value, q_value)
else:
output = 0 # defun does not accept the variable may not be intialised, so that temporarily initialise it
assert False, "dueling_type must be one of {'avg','max','naive'}"
return output
class DDPG_Actor(tf.keras.Model):
def __init__(self, num_action=1):
super(DDPG_Actor, self).__init__()
self.dense1 = tf.keras.layers.Dense(400, activation='relu', kernel_initializer=KERNEL_INIT)
self.dense2 = tf.keras.layers.Dense(300, activation='relu', kernel_initializer=KERNEL_INIT)
self.pred = tf.keras.layers.Dense(num_action, activation='tanh', kernel_initializer=KERNEL_INIT)
@tf.contrib.eager.defun(autograph=False)
def call(self, inputs):
x = self.dense1(inputs)
x = self.dense2(x)
pred = self.pred(x)
return pred
class DDPG_Critic(tf.keras.Model):
def __init__(self, output_shape):
super(DDPG_Critic, self).__init__()
self.dense1 = tf.keras.layers.Dense(400, activation='relu', kernel_regularizer=L2, bias_regularizer=L2,
kernel_initializer=KERNEL_INIT)
self.dense2 = tf.keras.layers.Dense(300, activation='relu', kernel_regularizer=L2, bias_regularizer=L2,
kernel_initializer=KERNEL_INIT)
self.pred = tf.keras.layers.Dense(output_shape, activation='linear', kernel_regularizer=L2, bias_regularizer=L2,
kernel_initializer=KERNEL_INIT)
@tf.contrib.eager.defun(autograph=False)
def call(self, obs, act):
x = self.dense1(obs)
x = self.dense2(tf.concat([x, act], axis=-1))
pred = self.pred(x)
return pred
class BatchNorm_DDPG_Actor(tf.keras.Model):
def __init__(self, num_action=1):
super(BatchNorm_DDPG_Actor, self).__init__()
self.dense1 = tf.keras.layers.Dense(400, activation='relu', kernel_initializer=KERNEL_INIT)
self.batch1 = tf.keras.layers.BatchNormalization()
self.dense2 = tf.keras.layers.Dense(300, activation='relu', kernel_initializer=KERNEL_INIT)
self.batch2 = tf.keras.layers.BatchNormalization()
self.pred = tf.keras.layers.Dense(num_action, activation='tanh', kernel_initializer=KERNEL_INIT)
@tf.contrib.eager.defun(autograph=False)
def call(self, inputs):
x = self.dense1(inputs)
x = self.batch1(x)
x = self.dense2(x)
x = self.batch2(x)
pred = self.pred(x)
return pred
class BatchNorm_DDPG_Critic(tf.keras.Model):
def __init__(self, output_shape):
super(BatchNorm_DDPG_Critic, self).__init__()
self.dense1 = tf.keras.layers.Dense(400, activation='relu', kernel_regularizer=L2, bias_regularizer=L2,
kernel_initializer=KERNEL_INIT)
self.batch1 = tf.keras.layers.BatchNormalization()
self.dense2 = tf.keras.layers.Dense(300, activation='relu', kernel_regularizer=L2, bias_regularizer=L2,
kernel_initializer=KERNEL_INIT)
self.batch2 = tf.keras.layers.BatchNormalization()
self.pred = tf.keras.layers.Dense(output_shape, activation='linear', kernel_regularizer=L2, bias_regularizer=L2,
kernel_initializer=KERNEL_INIT)
@tf.contrib.eager.defun(autograph=False)
def call(self, obs, act):
x = self.dense1(obs)
x = self.batch1(x)
x = self.dense2(tf.concat([x, act], axis=-1))
x = self.batch2(x)
pred = self.pred(x)
return pred
class self_rewarding_DDPG_Actor(tf.keras.Model):
def __init__(self, num_action=1):
super(self_rewarding_DDPG_Actor, self).__init__()
self.dense1 = tf.keras.layers.Dense(400, activation='relu', kernel_initializer=KERNEL_INIT)
self.batch1 = tf.keras.layers.BatchNormalization()
self.dense2 = tf.keras.layers.Dense(300, activation='relu', kernel_initializer=KERNEL_INIT)
self.batch2 = tf.keras.layers.BatchNormalization()
self.pred = tf.keras.layers.Dense(num_action, activation='tanh', kernel_initializer=KERNEL_INIT)
self.reward = tf.keras.layers.Dense(1, activation='tanh', kernel_initializer=KERNEL_INIT)
@tf.contrib.eager.defun(autograph=False)
def call(self, inputs):
x = self.dense1(inputs)
# x = self.batch1(x)
x = self.dense2(x)
x = self.batch2(x)
pred = self.pred(x)
reward = self.reward(x)
return pred, reward
class HER_Actor(tf.keras.Model):
"""
In paper, it's saying that all layers consist of 64 neurons...
but in OpenAI her implementation, they used 256. so I'll stick with 256
"""
def __init__(self, num_action=1):
super(HER_Actor, self).__init__()
self.dense1 = tf.keras.layers.Dense(256, activation='relu', kernel_initializer=KERNEL_INIT)
self.dense2 = tf.keras.layers.Dense(256, activation='relu', kernel_initializer=KERNEL_INIT)
self.dense3 = tf.keras.layers.Dense(256, activation='relu', kernel_initializer=KERNEL_INIT)
self.pred = tf.keras.layers.Dense(num_action, activation='tanh', kernel_initializer=KERNEL_INIT)
@tf.contrib.eager.defun(autograph=False)
def call(self, inputs):
x = self.dense1(inputs)
x = self.dense2(x)
x = self.dense3(x)
pred = self.pred(x)
return pred
class HER_Critic(tf.keras.Model):
"""
In paper, it's saying that all layers consist of 64 neurons...
but in OpenAI her implementation, they used 256. so I'll stick with 256
"""
def __init__(self, output_shape):
super(HER_Critic, self).__init__()
self.dense1 = tf.keras.layers.Dense(256, activation='relu', kernel_initializer=KERNEL_INIT)
self.dense2 = tf.keras.layers.Dense(256, activation='relu', kernel_initializer=KERNEL_INIT)
self.dense3 = tf.keras.layers.Dense(256, activation='relu', kernel_initializer=KERNEL_INIT)
self.pred = tf.keras.layers.Dense(output_shape, activation='linear', kernel_initializer=KERNEL_INIT)
@tf.contrib.eager.defun(autograph=False)
def call(self, inputs, act):
# _input is already concatenated of obs and g
x = self.dense1(inputs)
x = self.dense2(tf.concat([x, act], axis=-1))
x = self.dense3(x)
pred = self.pred(x)
return pred
class SAC_Actor(tf.keras.Model):
"""
Policy network: Gaussian Policy.
It outputs Mean and Std with the size of number of actions.
And we sample from Normal dist upon resulting Mean&Std
In Haarnoja's implementation, he uses 100 neurons for hidden layers... so it's up to you!!
"""
def __init__(self, num_action=1):
super(SAC_Actor, self).__init__()
self.LOG_SIG_MAX = 2
self.LOG_SIG_MIN = -20
self.dense1 = tf.keras.layers.Dense(256, activation='relu', kernel_initializer=XAVIER_INIT)
self.dense2 = tf.keras.layers.Dense(256, activation='relu', kernel_initializer=XAVIER_INIT)
self.mean = tf.keras.layers.Dense(num_action, activation='linear', kernel_initializer=XAVIER_INIT)
self.std = tf.keras.layers.Dense(num_action, activation='linear', kernel_initializer=XAVIER_INIT)
@tf.contrib.eager.defun(autograph=False)
def call(self, inputs):
"""
As mentioned in the topic of `policy evaluation` at sec5.2(`ablation study`) in the paper,
for evaluation phase, using a deterministic action(choosing the mean of the policy dist) works better than
stochastic one(Gaussian Policy). So that we need to output three different values. I know it's kind of weird design..
"""
x = self.dense1(inputs)
x = self.dense2(x)
mean = self.mean(x)
std = self.std(x)
std = tf.clip_by_value(std, self.LOG_SIG_MIN, self.LOG_SIG_MAX)
std = tf.math.exp(std)
dist = tfd.Normal(loc=mean, scale=std)
# dist = tfd.MultivariateNormalDiag(loc=mean, scale_diag=std)
x = dist.sample()
action = tf.keras.activations.tanh(x)
log_prob = dist.log_prob(x)
log_prob -= tf.math.log(1. - tf.math.square(action) + 1e-6)
log_prob = tf.math.reduce_sum(log_prob, 1, keep_dims=True)
return action, log_prob, tf.keras.activations.tanh(mean)
class SAC_Critic(tf.keras.Model):
"""
It contains two Q-network. And the usage of two Q-functions improves performance by reducing overestimation bias.
"""
def __init__(self, output_shape):
super(SAC_Critic, self).__init__()
# Q1 architecture
self.dense1 = tf.keras.layers.Dense(256, activation='relu', kernel_initializer=XAVIER_INIT)
self.dense2 = tf.keras.layers.Dense(256, activation='relu', kernel_initializer=XAVIER_INIT)
self.Q1 = tf.keras.layers.Dense(output_shape, activation='linear', kernel_initializer=XAVIER_INIT)
# Q2 architecture
self.dense3 = tf.keras.layers.Dense(256, activation='relu', kernel_initializer=XAVIER_INIT)
self.dense4 = tf.keras.layers.Dense(256, activation='relu', kernel_initializer=XAVIER_INIT)
self.Q2 = tf.keras.layers.Dense(output_shape, activation='linear', kernel_initializer=XAVIER_INIT)
# @tf.contrib.eager.defun(autograph=False)
# def call(self, obs, act):
# """ My Implementation """
# x1 = self.dense1(obs)
# x1 = self.dense2(tf.concat([x1, act], axis=-1))
# Q1 = self.Q1(x1)
#
# x2 = self.dense3(obs)
# x2 = self.dense4(tf.concat([x2, act], axis=-1))
# Q2 = self.Q2(x2)
# return Q1, Q2
@tf.contrib.eager.defun(autograph=False)
def call(self, obs, act):
""" Original Implementation """
_concat = tf.concat([obs, act], axis=-1)
x1 = self.dense1(_concat)
x1 = self.dense2(x1)
Q1 = self.Q1(x1)
x2 = self.dense3(_concat)
x2 = self.dense4(x2)
Q2 = self.Q2(x2)
return Q1, Q2
class TRPO_Policy(tf.keras.Model):
"""
TRPO Policy network
"""
def __init__(self, output_shape):
super(TRPO_Policy, self).__init__()
self.dense1 = tf.keras.layers.Dense(128, activation='tanh', kernel_initializer=KERNEL_INIT)
self.dense2 = tf.keras.layers.Dense(128, activation='tanh', kernel_initializer=KERNEL_INIT)
self.mean = tf.keras.layers.Dense(output_shape, activation='linear', kernel_initializer=KERNEL_INIT)
self.std = tf.get_variable('sigma', (1, output_shape), tf.float32, tf.constant_initializer(0.6))
@tf.contrib.eager.defun(autograph=False)
def call(self, inputs):
x = self.dense1(inputs)
x = self.dense2(x)
mean = self.mean(x)
return mean, self.std
class TRPO_Value(tf.keras.Model):
"""
TRPO State Value network
"""
def __init__(self, output_shape):
super(TRPO_Value, self).__init__()
self.dense1 = tf.keras.layers.Dense(128, activation='tanh', kernel_regularizer=L2, bias_regularizer=L2,
kernel_initializer=KERNEL_INIT)
self.dense2 = tf.keras.layers.Dense(128, activation='tanh', kernel_regularizer=L2, bias_regularizer=L2,
kernel_initializer=KERNEL_INIT)
self.pred = tf.keras.layers.Dense(output_shape, activation='linear', kernel_regularizer=L2, bias_regularizer=L2,
kernel_initializer=KERNEL_INIT)
@tf.contrib.eager.defun(autograph=False)
def call(self, inputs):
x = self.dense1(inputs)
x = self.dense2(x)
pred = self.pred(x)
return pred
| 44.277635
| 125
| 0.639979
| 2,314
| 17,224
| 4.592481
| 0.102852
| 0.056648
| 0.083184
| 0.091465
| 0.831749
| 0.830997
| 0.821398
| 0.806248
| 0.791098
| 0.751764
| 0
| 0.026585
| 0.233453
| 17,224
| 388
| 126
| 44.391753
| 0.778308
| 0.115885
| 0
| 0.669091
| 0
| 0
| 0.02665
| 0.002791
| 0
| 0
| 0
| 0
| 0.007273
| 1
| 0.109091
| false
| 0
| 0.007273
| 0
| 0.225455
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4955e79db1ee3af9d5d27394f55cf4f292d6fc25
| 211
|
py
|
Python
|
pytvision/__init__.py
|
CarlosPena00/pytorchvision
|
824b3a5a8940f3ee6b4da5de7a391a88e5aa36a2
|
[
"MIT"
] | null | null | null |
pytvision/__init__.py
|
CarlosPena00/pytorchvision
|
824b3a5a8940f3ee6b4da5de7a391a88e5aa36a2
|
[
"MIT"
] | null | null | null |
pytvision/__init__.py
|
CarlosPena00/pytorchvision
|
824b3a5a8940f3ee6b4da5de7a391a88e5aa36a2
|
[
"MIT"
] | null | null | null |
from pytvision import datasets
from pytvision import netmodels
from pytvision import transforms
from pytvision import logger
from pytvision import graphic
from pytvision import neuralnet
__version__ = '0.0.0'
| 21.1
| 32
| 0.838863
| 28
| 211
| 6.178571
| 0.392857
| 0.450867
| 0.65896
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.016484
| 0.137441
| 211
| 9
| 33
| 23.444444
| 0.934066
| 0
| 0
| 0
| 0
| 0
| 0.02381
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.857143
| 0
| 0.857143
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
49910be82e8dd9f5e8dabbf52a42e3497fa7143d
| 267
|
py
|
Python
|
src/symbolic.py
|
pvphan/camera-calibration
|
46ecdf49410124841aa830abbc959ab922492747
|
[
"MIT"
] | null | null | null |
src/symbolic.py
|
pvphan/camera-calibration
|
46ecdf49410124841aa830abbc959ab922492747
|
[
"MIT"
] | null | null | null |
src/symbolic.py
|
pvphan/camera-calibration
|
46ecdf49410124841aa830abbc959ab922492747
|
[
"MIT"
] | null | null | null |
import sympy
def getModelPointSymbols():
return tuple(sympy.symbols("X Y Z"))
def getExtrinsicSymbols():
return tuple(sympy.symbols("ρx ρy ρz tx ty tz"))
def getHomographySymbols():
return tuple(sympy.symbols("H11 H12 H13 H21 H22 H23 H31 H32 H33"))
| 19.071429
| 70
| 0.71161
| 38
| 267
| 5
| 0.710526
| 0.173684
| 0.252632
| 0.363158
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.081818
| 0.17603
| 267
| 13
| 71
| 20.538462
| 0.781818
| 0
| 0
| 0
| 0
| 0
| 0.213483
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.428571
| true
| 0
| 0.142857
| 0.428571
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
b8cb4c174399d5ef600357998dce6847b5865bd4
| 152
|
py
|
Python
|
file_server/admin.py
|
yamachig/Lawtext-on-Heroku
|
c19ab5871af33153114a5b7b158605a7471e389b
|
[
"MIT"
] | 1
|
2017-12-18T19:25:41.000Z
|
2017-12-18T19:25:41.000Z
|
file_server/admin.py
|
yamachig/Lawtext-on-Heroku
|
c19ab5871af33153114a5b7b158605a7471e389b
|
[
"MIT"
] | null | null | null |
file_server/admin.py
|
yamachig/Lawtext-on-Heroku
|
c19ab5871af33153114a5b7b158605a7471e389b
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from file_server.models import File
class FileAdmin(admin.ModelAdmin):
pass
admin.site.register(File, FileAdmin)
| 19
| 36
| 0.802632
| 21
| 152
| 5.761905
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 152
| 7
| 37
| 21.714286
| 0.909774
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.2
| 0.4
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
b8cc882d26082ca250832bafbdd811aebd7015bb
| 129
|
py
|
Python
|
python-client/onesaitplatform/iotbroker/__init__.py
|
javieronsurbe/onesait-cloud-platform-clientlibraries
|
832cb058b3144cbe56b1ac2cb88a040573741d66
|
[
"Apache-2.0"
] | null | null | null |
python-client/onesaitplatform/iotbroker/__init__.py
|
javieronsurbe/onesait-cloud-platform-clientlibraries
|
832cb058b3144cbe56b1ac2cb88a040573741d66
|
[
"Apache-2.0"
] | null | null | null |
python-client/onesaitplatform/iotbroker/__init__.py
|
javieronsurbe/onesait-cloud-platform-clientlibraries
|
832cb058b3144cbe56b1ac2cb88a040573741d66
|
[
"Apache-2.0"
] | null | null | null |
from .iotbrokerclient import IotBrokerClient
from .iotbrokerclient import IotBrokerClient as DigitalClient
__version__ = "1.1.2"
| 32.25
| 61
| 0.844961
| 14
| 129
| 7.5
| 0.571429
| 0.361905
| 0.47619
| 0.761905
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.025862
| 0.100775
| 129
| 3
| 62
| 43
| 0.87931
| 0
| 0
| 0
| 0
| 0
| 0.03876
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
77023035b7d2bd95006df165ec8c964243e151db
| 1,067
|
py
|
Python
|
env/lib/python2.7/abc.py
|
essien1990/Flask-Mysqldb
|
e0917b90c45a0aaf922bfa672ddb479cb450a02d
|
[
"MIT"
] | null | null | null |
env/lib/python2.7/abc.py
|
essien1990/Flask-Mysqldb
|
e0917b90c45a0aaf922bfa672ddb479cb450a02d
|
[
"MIT"
] | 6
|
2020-06-05T22:57:03.000Z
|
2021-06-10T18:48:39.000Z
|
env/lib/python2.7/abc.py
|
essien1990/Flask-Mysqldb
|
e0917b90c45a0aaf922bfa672ddb479cb450a02d
|
[
"MIT"
] | 1
|
2021-12-16T17:09:52.000Z
|
2021-12-16T17:09:52.000Z
|
XSym
0070
3f611f55a75887b2c50c5d4e30c4ad59
/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/abc.py
| 213.4
| 953
| 0.091846
| 15
| 1,067
| 6.533333
| 0.933333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.263636
| 0.896907
| 1,067
| 5
| 953
| 213.4
| 0.627273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
7730fcd44970e7d5834530a20c0f450d33e5e289
| 1,611
|
py
|
Python
|
osrefl/loaders/reduction/corrections.py
|
reflectometry/osrefl
|
ddf55d542f2eab2a29fd6ffc862379820a06d5c7
|
[
"BSD-3-Clause"
] | 2
|
2015-05-21T15:16:46.000Z
|
2015-10-23T17:47:36.000Z
|
osrefl/loaders/reduction/corrections.py
|
reflectometry/osrefl
|
ddf55d542f2eab2a29fd6ffc862379820a06d5c7
|
[
"BSD-3-Clause"
] | null | null | null |
osrefl/loaders/reduction/corrections.py
|
reflectometry/osrefl
|
ddf55d542f2eab2a29fd6ffc862379820a06d5c7
|
[
"BSD-3-Clause"
] | null | null | null |
# This program is public domain
"""
Data corrections for reflectometry.
"""
# TODO Autogenerate these entries from the corrections themselves.
# TODO This serves to improve maintainability by only listing the
# TODO objects in one place, and improve documentation by copying
# TODO the complete description of constructor arguments and function
# TODO description.
# TODO find a better way to delay loading of symbols
def normalize(*args, **kw):
"""Normalization correction; should be applied first"""
from .normcor import Normalize
return Normalize(*args, **kw)
def polarization_efficiency(*args, **kw):
"""Polarization efficiency correction"""
from .polcor import PolarizationEfficiency
return PolarizationEfficiency(*args, **kw)
def smooth(*args, **kw):
"""Data smoothing using 1-D moving window least squares filter"""
from .smoothcor import Smooth
return Smooth(*args, **kw)
def water_intensity(*args, **kw):
"""Intensity estimate from water scatter"""
from .ratiocor import WaterIntensity
return WaterIntensity(*args, **kw)
def ratio_intensity(*args, **kw):
"""Intensity estimate from reflection off a standard sample"""
from .ratiocor import RatioIntensity
return RatioIntensity(*args, **kw)
def measured_area_correction(*args, **kw):
"""Detector area correction from file"""
from .areacor import measured_area_correction
return measured_area_correction(*args,**kw)
def area_correction(*args, **kw):
"""Detector area correction from file"""
from .areacor import AreaCorrection
return AreaCorrection(*args,**kw)
| 33.5625
| 69
| 0.734327
| 194
| 1,611
| 6.046392
| 0.458763
| 0.071611
| 0.046036
| 0.051151
| 0.206309
| 0.175618
| 0.114237
| 0.114237
| 0.114237
| 0.114237
| 0
| 0.000745
| 0.166977
| 1,611
| 47
| 70
| 34.276596
| 0.873323
| 0.438237
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021277
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
7736ab80a68006d93bf3039a4ecffb1260c8d8f6
| 22,012
|
py
|
Python
|
demisto_sdk/tests/integration_tests/update_release_notes_integration_test.py
|
guiguitodelperuu/demisto-sdk
|
3eb0206593bc955a64c6594d717c04e52e254e1d
|
[
"MIT"
] | 42
|
2019-11-07T13:02:00.000Z
|
2022-03-29T03:39:04.000Z
|
demisto_sdk/tests/integration_tests/update_release_notes_integration_test.py
|
guiguitodelperuu/demisto-sdk
|
3eb0206593bc955a64c6594d717c04e52e254e1d
|
[
"MIT"
] | 1,437
|
2019-11-07T13:02:25.000Z
|
2022-03-31T12:48:11.000Z
|
demisto_sdk/tests/integration_tests/update_release_notes_integration_test.py
|
guiguitodelperuu/demisto-sdk
|
3eb0206593bc955a64c6594d717c04e52e254e1d
|
[
"MIT"
] | 46
|
2019-12-09T21:44:30.000Z
|
2022-03-24T17:36:45.000Z
|
import os
from os.path import join
import pytest
from click.testing import CliRunner
import conftest # noqa: F401
from demisto_sdk.__main__ import main
from demisto_sdk.commands.common.git_util import GitUtil
from demisto_sdk.commands.common.legacy_git_tools import git_path
from demisto_sdk.commands.update_release_notes.update_rn import UpdateRN
from demisto_sdk.commands.update_release_notes.update_rn_manager import \
UpdateReleaseNotesManager
from demisto_sdk.commands.validate.validate_manager import ValidateManager
from TestSuite.test_tools import ChangeCWD
UPDATE_RN_COMMAND = "update-release-notes"
DEMISTO_SDK_PATH = join(git_path(), "demisto_sdk")
TEST_FILES_PATH = join(git_path(), 'demisto_sdk', 'tests')
AZURE_FEED_PACK_PATH = join(TEST_FILES_PATH, 'test_files', 'content_repo_example', 'Packs', 'FeedAzureValid')
RN_FOLDER = join(git_path(), 'Packs', 'FeedAzureValid', 'ReleaseNotes')
VMWARE_PACK_PATH = join(TEST_FILES_PATH, 'test_files', 'content_repo_example', 'Packs', 'VMware')
VMWARE_RN_PACK_PATH = join(git_path(), 'Packs', 'VMware', 'ReleaseNotes')
THINKCANARY_RN_FOLDER = join(git_path(), 'Packs', 'ThinkCanary', 'ReleaseNotes')
@pytest.fixture
def demisto_client(mocker):
mocker.patch(
"demisto_sdk.commands.download.downloader.demisto_client",
return_valure="object"
)
def test_update_release_notes_new_integration(demisto_client, mocker):
"""
Given
- Azure feed pack path.
When
- Running demisto-sdk update-release-notes command.
Then
- Ensure release notes file created with no errors
- Ensure message is printed when update release notes process finished.
- Ensure the release motes content is valid and as expected.
"""
expected_rn = '\n' + '#### Integrations\n' + \
'##### New: Azure Feed\n' + \
'- Azure.CloudIPs Feed Integration. (Available from Cortex XSOAR 5.5.0).\n'
added_files = {join(AZURE_FEED_PACK_PATH, 'Integrations', 'FeedAzureValid', 'FeedAzureValid.yml')}
rn_path = join(RN_FOLDER, '1_0_1.md')
runner = CliRunner(mix_stderr=True)
mocker.patch('demisto_sdk.commands.update_release_notes.update_rn_manager.get_pack_name',
return_value='FeedAzureValid')
mocker.patch('demisto_sdk.commands.common.tools.get_pack_name', return_value='FeedAzureValid')
mocker.patch.object(UpdateRN, 'is_bump_required', return_value=True)
mocker.patch.object(ValidateManager, 'setup_git_params', return_value='')
mocker.patch.object(ValidateManager, 'get_unfiltered_changed_files_from_git',
return_value=(set(), added_files, set()))
mocker.patch.object(GitUtil, 'get_current_working_branch', return_value="branch_name")
mocker.patch.object(UpdateRN, 'get_pack_metadata', return_value={'currentVersion': '1.0.0'})
mocker.patch.object(UpdateRN, 'get_master_version', return_value='1.0.0')
if os.path.exists(rn_path):
os.remove(rn_path)
result = runner.invoke(main, [UPDATE_RN_COMMAND, "-i", join('Packs', 'FeedAzureValid')])
assert result.exit_code == 0
assert os.path.isfile(rn_path)
assert not result.exception
assert 'Changes were detected. Bumping FeedAzureValid to version: 1.0.1' in result.stdout
assert 'Finished updating release notes for FeedAzureValid.' in result.stdout
with open(rn_path, 'r') as f:
rn = f.read()
assert expected_rn == rn
def test_update_release_notes_modified_integration(demisto_client, mocker):
"""
Given
- Azure feed pack path.
When
- Running demisto-sdk update-release-notes command.
Then
- Ensure release notes file created with no errors
- Ensure message is printed when update release notes process finished.
- Ensure the release motes content is valid and as expected.
"""
expected_rn = '\n' + '#### Integrations\n' + \
'##### Azure Feed\n' + \
'- %%UPDATE_RN%%\n'
modified_files = {join(AZURE_FEED_PACK_PATH, 'Integrations', 'FeedAzureValid', 'FeedAzureValid.yml')}
rn_path = join(RN_FOLDER, '1_0_1.md')
runner = CliRunner(mix_stderr=False)
mocker.patch('demisto_sdk.commands.common.tools.get_pack_name', return_value='FeedAzureValid')
mocker.patch.object(UpdateRN, 'is_bump_required', return_value=True)
mocker.patch.object(ValidateManager, 'setup_git_params', return_value='')
mocker.patch.object(ValidateManager, 'get_unfiltered_changed_files_from_git', return_value=(modified_files, set(),
set()))
mocker.patch.object(GitUtil, 'get_current_working_branch', return_value="branch_name")
mocker.patch.object(UpdateRN, 'get_pack_metadata', return_value={'currentVersion': '1.0.0'})
mocker.patch.object(UpdateRN, 'get_master_version', return_value='1.0.0')
if os.path.exists(rn_path):
os.remove(rn_path)
result = runner.invoke(main, [UPDATE_RN_COMMAND, "-i", join('Packs', 'FeedAzureValid')])
assert result.exit_code == 0
assert os.path.isfile(rn_path)
assert not result.exception
assert 'Changes were detected. Bumping FeedAzureValid to version: 1.0.1' in result.stdout
assert 'Finished updating release notes for FeedAzureValid.' in result.stdout
with open(rn_path, 'r') as f:
rn = f.read()
assert expected_rn == rn
def test_update_release_notes_incident_field(demisto_client, mocker):
"""
Given
- Azure feed pack path.
When
- Running demisto-sdk update-release-notes command.
Then
- Ensure release notes file created with no errors
- Ensure message is printed when update release notes process finished.
- Ensure the release motes content is valid and as expected.
"""
expected_rn = '\n' + '#### Incident Fields\n' + \
'- **City**\n'
runner = CliRunner(mix_stderr=False)
modified_files = {join(AZURE_FEED_PACK_PATH, 'IncidentFields', 'incidentfield-city.json')}
rn_path = join(RN_FOLDER, '1_0_1.md')
mocker.patch.object(UpdateRN, 'is_bump_required', return_value=True)
mocker.patch.object(ValidateManager, 'setup_git_params', return_value='')
mocker.patch.object(ValidateManager, 'get_unfiltered_changed_files_from_git', return_value=(modified_files, set(),
set()))
mocker.patch.object(GitUtil, 'get_current_working_branch', return_value="branch_name")
mocker.patch.object(UpdateRN, 'get_pack_metadata', return_value={'currentVersion': '1.0.0'})
mocker.patch('demisto_sdk.commands.common.tools.get_pack_name', return_value='FeedAzureValid')
mocker.patch.object(UpdateRN, 'get_master_version', return_value='1.0.0')
if os.path.exists(rn_path):
os.remove(rn_path)
result = runner.invoke(main, [UPDATE_RN_COMMAND, "-i", join('Packs', 'FeedAzureValid')])
assert result.exit_code == 0
assert os.path.isfile(rn_path)
assert not result.exception
assert 'Changes were detected. Bumping FeedAzureValid to version: 1.0.1' in result.stdout
assert 'Finished updating release notes for FeedAzureValid.' in result.stdout
with open(rn_path, 'r') as f:
rn = f.read()
assert expected_rn == rn
def test_update_release_notes_unified_yml_integration(demisto_client, mocker):
"""
Given
- VMware pack path.
When
- Running demisto-sdk update-release-notes command.
Then
- Ensure release notes file created with no errors
- Ensure message is printed when update release notes process finished.
- Ensure the release motes content is valid and as expected.
"""
expected_rn = '\n' + '#### Integrations\n' + \
'##### VMware\n' + \
'- %%UPDATE_RN%%\n'
runner = CliRunner(mix_stderr=False)
old_files = {join(VMWARE_PACK_PATH, 'Integrations', 'integration-VMware.yml')}
rn_path = join(VMWARE_RN_PACK_PATH, '1_0_1.md')
mocker.patch.object(UpdateRN, 'is_bump_required', return_value=True)
mocker.patch.object(ValidateManager, 'setup_git_params', return_value='')
mocker.patch.object(GitUtil, 'get_current_working_branch', return_value="branch_name")
mocker.patch.object(ValidateManager, 'get_unfiltered_changed_files_from_git', return_value=(set(), old_files,
set()))
mocker.patch.object(UpdateRN, 'get_pack_metadata', return_value={'currentVersion': '1.0.0'})
mocker.patch('demisto_sdk.commands.common.tools.get_pack_name', return_value='VMware')
mocker.patch.object(UpdateRN, 'get_master_version', return_value='1.0.0')
if os.path.exists(rn_path):
os.remove(rn_path)
result = runner.invoke(main, [UPDATE_RN_COMMAND, "-i", join('Packs', 'VMware')])
assert result.exit_code == 0
assert not result.exception
assert 'Changes were detected. Bumping VMware to version: 1.0.1' in result.stdout
assert 'Finished updating release notes for VMware.' in result.stdout
assert os.path.isfile(rn_path)
with open(rn_path, 'r') as f:
rn = f.read()
assert expected_rn == rn
def test_update_release_notes_non_content_path(demisto_client, mocker):
"""
Given
- non content pack path.
When
- Running demisto-sdk update-release-notes command.
Then
- Ensure an error is raised
"""
runner = CliRunner(mix_stderr=False)
mocker.patch.object(ValidateManager, 'setup_git_params', return_value='')
mocker.patch.object(GitUtil, 'get_current_working_branch', return_value="branch_name")
mocker.patch.object(ValidateManager, 'get_unfiltered_changed_files_from_git', side_effect=FileNotFoundError)
mocker.patch.object(UpdateRN, 'get_pack_metadata', return_value={'currentVersion': '1.0.0'})
mocker.patch('demisto_sdk.commands.common.tools.get_pack_name', return_value='VMware')
mocker.patch.object(UpdateRN, 'get_master_version', return_value='1.0.0')
result = runner.invoke(main, [UPDATE_RN_COMMAND, "-i", join('Users', 'MyPacks', 'VMware')])
assert result.exit_code == 1
assert result.exception
assert "You are not running" in result.stdout # check error str is in stdout
def test_update_release_notes_existing(demisto_client, mocker):
"""
Given
- Azure feed pack path.
When
- Running demisto-sdk update-release-notes command.
Then
- Ensure release notes file updated with no errors
- Ensure message is printed when update release notes process finished.
- Ensure the release motes content is valid and as expected.
"""
expected_rn = '\n' + '#### Integrations\n' + \
'##### New: Azure Feed\n' + \
'- Azure.CloudIPs Feed Integration.\n' + \
'\n' + '#### Incident Fields\n' + \
'- **City**'
input_rn = '\n' + '#### Integrations\n' + \
'##### New: Azure Feed\n' + \
'- Azure.CloudIPs Feed Integration.\n'
rn_path = join(RN_FOLDER, '1_0_0.md')
modified_files = {join(AZURE_FEED_PACK_PATH, 'IncidentFields', 'incidentfield-city.json')}
with open(rn_path, 'w') as file_:
file_.write(input_rn)
runner = CliRunner(mix_stderr=False)
mocker.patch.object(UpdateRN, 'is_bump_required', return_value=False)
mocker.patch.object(ValidateManager, 'setup_git_params', return_value='')
mocker.patch.object(GitUtil, 'get_current_working_branch', return_value="branch_name")
mocker.patch.object(ValidateManager, 'get_unfiltered_changed_files_from_git', return_value=(modified_files, set(),
set()))
mocker.patch.object(UpdateRN, 'get_pack_metadata', return_value={'currentVersion': '1.0.0'})
mocker.patch.object(UpdateRN, 'get_master_version', return_value='1.0.0')
mocker.patch('demisto_sdk.commands.common.tools.get_pack_name', return_value='FeedAzureValid')
result = runner.invoke(main, [UPDATE_RN_COMMAND, "-i", join('Packs', 'FeedAzureValid')])
assert result.exit_code == 0
assert os.path.exists(rn_path)
assert not result.exception
assert 'Finished updating release notes for FeedAzureValid.' in result.stdout
with open(rn_path, 'r') as f:
rn = f.read()
os.remove(rn_path)
assert expected_rn == rn
def test_update_release_notes_modified_apimodule(demisto_client, repo, mocker):
"""
Given
- ApiModules_script.yml which is part of APIModules pack was changed.
- FeedTAXII pack path exists and uses ApiModules_script
- id_set.json indicates FeedTAXII uses APIModules
When
- Running demisto-sdk update-release-notes command.
Then
- Ensure release notes file created with no errors for APIModule and related pack FeedTAXII:
- Ensure message is printed when update release notes process finished.
"""
repo.setup_one_pack("ApiModules")
api_module_pack = repo.packs[0]
api_module_script_path = join(api_module_pack.path, "Scripts/ApiModules_script/ApiModules_script.yml")
repo.setup_one_pack("FeedTAXII")
taxii_feed_pack = repo.packs[1]
taxii_feed_integration_path = join(taxii_feed_pack.path,
"Integrations/FeedTAXII_integration/FeedTAXII_integration.yml")
repo.id_set.update({
"scripts": [
{
"ApiModules_script": {
"name": "ApiModules_script",
"file_path": api_module_script_path,
"pack": "ApiModules"
}
}
],
"integrations": [
{
"FeedTAXII_integration": {
"name": "FeedTAXII_integration",
"file_path": taxii_feed_integration_path,
"pack": "FeedTAXII",
"api_modules": "ApiModules_script"
}
}
]
})
modified_files = {api_module_script_path}
runner = CliRunner(mix_stderr=False)
mocker.patch.object(UpdateRN, 'is_bump_required', return_value=True)
mocker.patch.object(ValidateManager, 'setup_git_params', return_value='')
mocker.patch.object(ValidateManager, 'get_unfiltered_changed_files_from_git', return_value=(modified_files, set(),
set()))
mocker.patch.object(GitUtil, 'get_current_working_branch', return_value="branch_name")
mocker.patch.object(UpdateRN, 'get_pack_metadata', return_value={'currentVersion': '1.0.0'})
mocker.patch('demisto_sdk.commands.common.tools.get_pack_name', return_value='ApiModules')
mocker.patch.object(UpdateRN, 'get_master_version', return_value='1.0.0')
result = runner.invoke(main, [UPDATE_RN_COMMAND, "-i", join('Packs', 'ApiModules'), "-idp", repo.id_set.path])
assert result.exit_code == 0
assert not result.exception
assert 'Release notes are not required for the ApiModules pack since this pack is not versioned.' in result.stdout
assert 'Changes were detected. Bumping FeedTAXII to version: 1.0.1' in result.stdout
def test_update_release_on_matadata_change(demisto_client, mocker, repo):
"""
Given
- change only in metadata
When
- Running demisto-sdk update-release-notes command.
Then
- Ensure not find changes which would belong in release notes .
"""
pack = repo.create_pack('FeedAzureValid')
pack.pack_metadata.write_json(open('demisto_sdk/tests/test_files/1.pack_metadata.json').read())
validate_manager = ValidateManager(skip_pack_rn_validation=True,
silence_init_prints=True, skip_conf_json=True, check_is_unskipped=False)
validate_manager.git_util = "Not None"
mocker.patch.object(UpdateRN, 'is_bump_required', return_value=True)
mocker.patch.object(ValidateManager, 'get_unfiltered_changed_files_from_git',
return_value=({pack.pack_metadata.path}, set(), set()))
mocker.patch.object(UpdateReleaseNotesManager, 'setup_validate_manager', return_value=validate_manager)
mocker.patch.object(ValidateManager, 'setup_git_params', return_value='')
mocker.patch.object(GitUtil, 'get_current_working_branch', return_value="branch_name")
mocker.patch.object(UpdateRN, 'get_pack_metadata', return_value={'currentVersion': '1.0.0'})
mocker.patch('demisto_sdk.commands.common.tools.get_pack_name', return_value='FeedAzureValid')
mocker.patch('demisto_sdk.commands.common.tools.get_pack_names_from_files', return_value={'FeedAzureValid'})
with ChangeCWD(repo.path):
runner = CliRunner(mix_stderr=False)
result = runner.invoke(main, [UPDATE_RN_COMMAND, "-g"])
assert result.exit_code == 0
assert 'No changes that require release notes were detected. If such changes were made, ' \
'please commit the changes and rerun the command' in result.stdout
def test_update_release_notes_master_ahead_of_current(demisto_client, mocker, repo):
"""
Given
- Azure feed pack path.
When
- Running demisto-sdk update-release-notes command.
Then
- Ensure release notes file created with no errors
- Ensure the new version is taken from master and not from local metadata file.
"""
modified_files = {join(AZURE_FEED_PACK_PATH, 'IncidentFields', 'incidentfield-city.json')}
mocker.patch.object(UpdateRN, 'is_bump_required', return_value=True)
mocker.patch.object(ValidateManager, 'setup_git_params', return_value='')
mocker.patch.object(UpdateReleaseNotesManager, 'get_git_changed_files',
return_value=(modified_files, {'1_1_0.md'}, set()))
mocker.patch.object(UpdateRN, 'get_pack_metadata', return_value={'currentVersion': '1.0.0'})
mocker.patch('demisto_sdk.commands.common.tools.get_pack_name', return_value='FeedAzureValid')
mocker.patch.object(UpdateRN, 'get_master_version', return_value='2.0.0')
with ChangeCWD(repo.path):
runner = CliRunner(mix_stderr=False)
result = runner.invoke(main, [UPDATE_RN_COMMAND, "-i", join('Packs', 'FeedAzureValid')])
assert result.exit_code == 0
assert not result.exception
assert 'Changes were detected. Bumping FeedAzureValid to version: 2.0.1' in result.stdout
assert 'Finished updating release notes for FeedAzureValid.' in result.stdout
def test_update_release_notes_master_unavailable(demisto_client, mocker, repo):
"""
Given
- Azure feed pack path.
When
- Running demisto-sdk update-release-notes command.
Then
- Ensure release notes file created with no errors
- Ensure the new version is taken from local metadata file.
"""
modified_files = {join(AZURE_FEED_PACK_PATH, 'Integrations', 'FeedAzureValid', 'FeedAzureValid.yml')}
mocker.patch.object(UpdateRN, 'is_bump_required', return_value=True)
mocker.patch.object(ValidateManager, 'setup_git_params', return_value='')
mocker.patch.object(UpdateReleaseNotesManager, 'get_git_changed_files',
return_value=(modified_files, {'1_1_0.md'}, set()))
mocker.patch.object(UpdateRN, 'get_pack_metadata', return_value={'currentVersion': '1.1.0'})
mocker.patch('demisto_sdk.commands.common.tools.get_pack_name', return_value='FeedAzureValid')
mocker.patch.object(UpdateRN, 'get_master_version', return_value='0.0.0')
with ChangeCWD(repo.path):
runner = CliRunner(mix_stderr=False)
result = runner.invoke(main, [UPDATE_RN_COMMAND, "-i", join('Packs', 'FeedAzureValid')])
assert result.exit_code == 0
assert not result.exception
assert 'Changes were detected. Bumping FeedAzureValid to version: 1.1.1' in result.stdout
assert 'Finished updating release notes for FeedAzureValid.' in result.stdout
def test_force_update_release_no_pack_given(demisto_client, repo):
"""
Given
- Nothing have changed.
When
- Running demisto-sdk update-release-notes command with --force flag but no specific pack is given.
Then
- Ensure that an error is printed.
"""
runner = CliRunner(mix_stderr=True)
result = runner.invoke(main, [UPDATE_RN_COMMAND, "--force"])
assert 'Please add a specific pack in order to force' in result.stdout
def test_force_update_release(demisto_client, mocker, repo):
"""
Given
- Nothing have changed.
When
- Running demisto-sdk update-release-notes command with --force flag.
Then
- Ensure that RN were updated.
"""
rn_path = join(THINKCANARY_RN_FOLDER, '1_0_1.md')
if os.path.exists(rn_path):
os.remove(rn_path)
mocker.patch.object(UpdateRN, 'is_bump_required', return_value=True)
mocker.patch.object(ValidateManager, 'get_unfiltered_changed_files_from_git',
return_value=(set(), set(), set()))
mocker.patch.object(ValidateManager, 'setup_git_params', return_value='')
mocker.patch.object(GitUtil, 'get_current_working_branch', return_value="branch_name")
mocker.patch.object(UpdateRN, 'get_pack_metadata', return_value={'currentVersion': '1.0.0'})
mocker.patch('demisto_sdk.commands.update_release_notes.update_rn_manager.get_pack_name',
return_value='ThinkCanary')
mocker.patch('demisto_sdk.commands.update_release_notes.update_rn_manager.get_pack_names_from_files',
return_value={'ThinkCanary'})
runner = CliRunner(mix_stderr=True)
result = runner.invoke(main, [UPDATE_RN_COMMAND, "-i", join('Packs', 'ThinkCanary'), "--force"])
assert 'Bumping ThinkCanary to version: 1.0.1' in result.stdout
assert 'Finished updating release notes for ThinkCanary.' in result.stdout
with open(rn_path, 'r') as f:
rn = f.read()
assert '##### ThinkCanary\n- %%UPDATE_RN%%\n' == rn
| 43.588119
| 118
| 0.688715
| 2,767
| 22,012
| 5.241417
| 0.083123
| 0.058402
| 0.072675
| 0.051713
| 0.807005
| 0.783286
| 0.765566
| 0.756947
| 0.749569
| 0.736468
| 0
| 0.007198
| 0.198392
| 22,012
| 504
| 119
| 43.674603
| 0.814735
| 0.130383
| 0
| 0.569024
| 0
| 0
| 0.292176
| 0.093099
| 0.020202
| 0
| 0
| 0
| 0.16835
| 1
| 0.043771
| false
| 0
| 0.040404
| 0
| 0.084175
| 0.003367
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
91f674d7c7aefd0e441b7862f11323cf1cf8c821
| 34
|
py
|
Python
|
djackal/fields/__init__.py
|
jrog612/djackal
|
f46733f69f7a2e796ac611700ac5ffe20b7f0927
|
[
"MIT"
] | null | null | null |
djackal/fields/__init__.py
|
jrog612/djackal
|
f46733f69f7a2e796ac611700ac5ffe20b7f0927
|
[
"MIT"
] | null | null | null |
djackal/fields/__init__.py
|
jrog612/djackal
|
f46733f69f7a2e796ac611700ac5ffe20b7f0927
|
[
"MIT"
] | null | null | null |
from .json_field import JSONField
| 17
| 33
| 0.852941
| 5
| 34
| 5.6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.117647
| 34
| 1
| 34
| 34
| 0.933333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
6228132a33854df15db8cbca2c6b2078a69bae3d
| 184
|
py
|
Python
|
settings.py
|
theDrinkMD/twibbage
|
c0aba60bd2df50f0a5688db4a01048ea1efd1a45
|
[
"MIT"
] | null | null | null |
settings.py
|
theDrinkMD/twibbage
|
c0aba60bd2df50f0a5688db4a01048ea1efd1a45
|
[
"MIT"
] | null | null | null |
settings.py
|
theDrinkMD/twibbage
|
c0aba60bd2df50f0a5688db4a01048ea1efd1a45
|
[
"MIT"
] | null | null | null |
# settings.py
from os.path import join, dirname
from dotenv import load_dotenv
load_dotenv(find_dotenv())
#TWILIO_AUTH_TOKEN = os.environ.get("TWILIO_AUTH_TOKEN")
#TWILIO_ACCOUNT_SID
| 23
| 56
| 0.815217
| 29
| 184
| 4.862069
| 0.62069
| 0.141844
| 0.212766
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.092391
| 184
| 7
| 57
| 26.285714
| 0.844311
| 0.456522
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
62343bfbdec8a6ee487d85537cbc8e1df026df2f
| 6,047
|
py
|
Python
|
src/GNN_KNN.py
|
waddupitzme/graph-neural-pde
|
004a30c9e838866ac8b78d14b7414224a24014a5
|
[
"Apache-2.0"
] | 125
|
2021-06-16T09:36:18.000Z
|
2022-03-26T00:16:22.000Z
|
src/GNN_KNN.py
|
waddupitzme/graph-neural-pde
|
004a30c9e838866ac8b78d14b7414224a24014a5
|
[
"Apache-2.0"
] | 8
|
2021-06-23T04:49:12.000Z
|
2022-03-28T20:25:47.000Z
|
src/GNN_KNN.py
|
waddupitzme/graph-neural-pde
|
004a30c9e838866ac8b78d14b7414224a24014a5
|
[
"Apache-2.0"
] | 20
|
2021-06-23T06:55:35.000Z
|
2022-03-21T17:04:17.000Z
|
import torch
from torch import nn
import torch.nn.functional as F
from base_classes import BaseGNN
from model_configurations import set_block, set_function
from graph_rewiring import KNN, add_edges, edge_sampling, GDC
from utils import DummyData, get_full_adjacency
# Define the GNN model.
class GNN_KNN(BaseGNN):
def __init__(self, opt, dataset, device=torch.device('cpu')):
super(GNN_KNN, self).__init__(opt, dataset, device)
self.f = set_function(opt)
block = set_block(opt)
time_tensor = torch.tensor([0, self.T]).to(device)
self.odeblock = block(self.f, self.regularization_fns, opt, dataset.data, device, t=time_tensor).to(device)
self.data_edge_index = dataset.data.edge_index.to(device)
self.fa = get_full_adjacency(self.num_nodes).to(device)
def forward(self, x, pos_encoding):
# Encode each node based on its feature.
if self.opt['use_labels']:
y = x[:, -self.num_classes:]
x = x[:, :-self.num_classes]
if self.opt['beltrami']:
x = F.dropout(x, self.opt['input_dropout'], training=self.training)
x = self.mx(x)
if self.opt['dataset'] == 'ogbn-arxiv':
p = pos_encoding
else:
p = F.dropout(pos_encoding, self.opt['input_dropout'], training=self.training)
p = self.mp(p)
x = torch.cat([x, p], dim=1)
else:
x = F.dropout(x, self.opt['input_dropout'], training=self.training)
x = self.m1(x)
if self.opt['use_mlp']:
x = F.dropout(x, self.opt['dropout'], training=self.training)
x = F.dropout(x + self.m11(F.relu(x)), self.opt['dropout'], training=self.training)
x = F.dropout(x + self.m12(F.relu(x)), self.opt['dropout'], training=self.training)
# todo investigate if some input non-linearity solves the problem with smooth deformations identified in the ANODE paper
# if True:
# x = F.relu(x)
if self.opt['use_labels']:
x = torch.cat([x, y], dim=-1)
if self.opt['batch_norm']:
x = self.bn_in(x)
# Solve the initial value problem of the ODE.
if self.opt['augment']:
c_aux = torch.zeros(x.shape).to(self.device)
x = torch.cat([x, c_aux], dim=1)
self.odeblock.set_x0(x)
if self.training and self.odeblock.nreg > 0:
z, self.reg_states = self.odeblock(x)
else:
z = self.odeblock(x)
if self.opt['fa_layer']:
temp_time = self.opt['time']
temp_method = self.opt['method']
temp_step_size = self.opt['step_size']
self.opt['time'] = 1 # self.opt['fa_layer_time'] #1.0
self.opt['method'] = 'rk4' # self.opt['fa_layer_method']#'rk4'
self.opt['step_size'] = 1 # self.opt['fa_layer_step_size']#1.0
self.odeblock.set_x0(z)
self.odeblock.odefunc.edge_index = add_edges(self, self.opt)
if self.opt['edge_sampling_rmv'] != 0:
edge_sampling(self, z, self.opt)
z = self.odeblock(z)
self.odeblock.odefunc.edge_index = self.data_edge_index
self.opt['time'] = temp_time
self.opt['method'] = temp_method
self.opt['step_size'] = temp_step_size
if self.opt['augment']:
z = torch.split(z, x.shape[1] // 2, dim=1)[0]
# if self.opt['batch_norm']:
# z = self.bn_in(z)
# Activation.
z = F.relu(z)
if self.opt['fc_out']:
z = self.fc(z)
z = F.relu(z)
# Dropout.
z = F.dropout(z, self.opt['dropout'], training=self.training)
# Decode each node embedding to get node label.
z = self.m2(z)
return z
def forward_encoder(self, x, pos_encoding):
# Encode each node based on its feature.
if self.opt['use_labels']:
y = x[:, -self.num_classes:]
x = x[:, :-self.num_classes]
if self.opt['beltrami']:
# x = F.dropout(x, self.opt['input_dropout'], training=self.training)
x = self.mx(x)
if self.opt['dataset'] == 'ogbn-arxiv':
p = pos_encoding
else:
# p = F.dropout(pos_encoding, self.opt['input_dropout'], training=self.training)
p = self.mp(pos_encoding)
x = torch.cat([x, p], dim=1)
else:
# x = F.dropout(x, self.opt['input_dropout'], training=self.training)
x = self.m1(x)
if self.opt['use_mlp']:
# x = F.dropout(x, self.opt['dropout'], training=self.training)
# x = F.dropout(x + self.m11(F.relu(x)), self.opt['dropout'], training=self.training)
# x = F.dropout(x + self.m12(F.relu(x)), self.opt['dropout'], training=self.training)
x = x + self.m11(F.relu(x))
x = x + self.m12(F.relu(x))
# todo investigate if some input non-linearity solves the problem with smooth deformations identified in the ANODE paper
# if True:
# x = F.relu(x)
if self.opt['use_labels']:
x = torch.cat([x, y], dim=-1)
if self.opt['batch_norm']:
x = self.bn_in(x)
# Solve the initial value problem of the ODE.
if self.opt['augment']:
c_aux = torch.zeros(x.shape).to(self.device)
x = torch.cat([x, c_aux], dim=1)
return x
def forward_ODE(self, x, pos_encoding):
x = self.forward_encoder(x, pos_encoding)
self.odeblock.set_x0(x)
if self.training and self.odeblock.nreg > 0:
z, self.reg_states = self.odeblock(x)
else:
z = self.odeblock(x)
if self.opt['fa_layer']:
temp_time = self.opt['time']
temp_method = self.opt['method']
temp_step_size = self.opt['step_size']
self.opt['time'] = 1 # self.opt['fa_layer_time'] #1.0
self.opt['method'] = 'rk4' # self.opt['fa_layer_method']#'rk4'
self.opt['step_size'] = 1 # self.opt['fa_layer_step_size']#1.0
self.odeblock.set_x0(z)
self.odeblock.odefunc.edge_index = add_edges(self, self.opt)
if self.opt['edge_sampling_rmv'] != 0:
edge_sampling(self, z, self.opt)
z = self.odeblock(z)
self.odeblock.odefunc.edge_index = self.data_edge_index
self.opt['time'] = temp_time
self.opt['method'] = temp_method
self.opt['step_size'] = temp_step_size
if self.opt['augment']:
z = torch.split(z, x.shape[1] // 2, dim=1)[0]
return z
| 33.043716
| 124
| 0.624442
| 949
| 6,047
| 3.842993
| 0.14647
| 0.122841
| 0.054291
| 0.096243
| 0.778448
| 0.773512
| 0.756512
| 0.756512
| 0.756512
| 0.756512
| 0
| 0.011436
| 0.219117
| 6,047
| 182
| 125
| 33.225275
| 0.760906
| 0.203076
| 0
| 0.731707
| 0
| 0
| 0.083194
| 0
| 0
| 0
| 0
| 0.005495
| 0
| 1
| 0.03252
| false
| 0
| 0.056911
| 0
| 0.121951
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
62358950debb2601a4cf32dc1ab875d6d125c32f
| 106
|
py
|
Python
|
backslash/comment.py
|
oren0e/backslash-python
|
37f0fe37e21c384baa27b4f5b7210e79d02a65dc
|
[
"BSD-3-Clause"
] | null | null | null |
backslash/comment.py
|
oren0e/backslash-python
|
37f0fe37e21c384baa27b4f5b7210e79d02a65dc
|
[
"BSD-3-Clause"
] | null | null | null |
backslash/comment.py
|
oren0e/backslash-python
|
37f0fe37e21c384baa27b4f5b7210e79d02a65dc
|
[
"BSD-3-Clause"
] | null | null | null |
from .api_object import APIObject
class Comment(APIObject): # pylint: disable=abstract-method
pass
| 17.666667
| 60
| 0.764151
| 13
| 106
| 6.153846
| 0.923077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.160377
| 106
| 5
| 61
| 21.2
| 0.898876
| 0.292453
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
6264e48259c484f7d0caeec05df5a7d141e31449
| 142
|
py
|
Python
|
clinicadl/utils/task_manager/__init__.py
|
Raelag0112/clinicadl
|
4b9508ea6bbe5498069b1d76ad2c3636f67e3184
|
[
"MIT"
] | 25
|
2021-08-01T05:52:34.000Z
|
2022-03-22T04:18:01.000Z
|
clinicadl/utils/task_manager/__init__.py
|
Raelag0112/clinicadl
|
4b9508ea6bbe5498069b1d76ad2c3636f67e3184
|
[
"MIT"
] | 82
|
2021-07-12T08:28:36.000Z
|
2022-03-02T16:12:04.000Z
|
clinicadl/utils/task_manager/__init__.py
|
Raelag0112/clinicadl
|
4b9508ea6bbe5498069b1d76ad2c3636f67e3184
|
[
"MIT"
] | 12
|
2021-07-30T08:01:02.000Z
|
2022-03-14T11:45:03.000Z
|
from .classification import ClassificationManager
from .reconstruction import ReconstructionManager
from .regression import RegressionManager
| 35.5
| 49
| 0.894366
| 12
| 142
| 10.583333
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.084507
| 142
| 3
| 50
| 47.333333
| 0.976923
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
62674defdd9350ddc1b59e8cf6065e4767d8616b
| 270
|
py
|
Python
|
nehushtan/httpd/exceptions/NehushtanRequestProcessTargetError.py
|
sinri/nehushtan
|
6fda496e16a8d443a86c617173d35f31c392beb6
|
[
"MIT"
] | null | null | null |
nehushtan/httpd/exceptions/NehushtanRequestProcessTargetError.py
|
sinri/nehushtan
|
6fda496e16a8d443a86c617173d35f31c392beb6
|
[
"MIT"
] | 1
|
2020-11-20T03:10:23.000Z
|
2020-11-20T09:30:34.000Z
|
nehushtan/httpd/exceptions/NehushtanRequestProcessTargetError.py
|
sinri/nehushtan
|
6fda496e16a8d443a86c617173d35f31c392beb6
|
[
"MIT"
] | 1
|
2021-10-13T10:16:58.000Z
|
2021-10-13T10:16:58.000Z
|
from nehushtan.httpd.exceptions.NehushtanHTTPError import NehushtanHTTPError
class NehushtanRequestProcessTargetError(NehushtanHTTPError):
"""
Since 0.4.0
When the process target of the matched Route (filters or controllers) does not work
"""
pass
| 27
| 87
| 0.766667
| 29
| 270
| 7.137931
| 0.862069
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013453
| 0.174074
| 270
| 9
| 88
| 30
| 0.914798
| 0.351852
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
657092d15a87ca7fa43a43b585686bddf917d722
| 51
|
py
|
Python
|
tests/unit/timeout.py
|
tholom/pake
|
6777d63255eb3e4e834b77c9a1504b72dd2ed296
|
[
"BSD-3-Clause"
] | 3
|
2019-08-28T21:54:30.000Z
|
2021-10-13T22:00:59.000Z
|
tests/unit/timeout.py
|
tholom/pake
|
6777d63255eb3e4e834b77c9a1504b72dd2ed296
|
[
"BSD-3-Clause"
] | 1
|
2021-01-05T01:37:57.000Z
|
2021-01-05T14:10:17.000Z
|
tests/unit/timeout.py
|
tholom/pake
|
6777d63255eb3e4e834b77c9a1504b72dd2ed296
|
[
"BSD-3-Clause"
] | 1
|
2021-01-16T18:44:36.000Z
|
2021-01-16T18:44:36.000Z
|
import time
# sleep 1000 seconds
time.sleep(1000)
| 10.2
| 20
| 0.764706
| 8
| 51
| 4.875
| 0.625
| 0.461538
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.186047
| 0.156863
| 51
| 4
| 21
| 12.75
| 0.72093
| 0.352941
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
658d7cda8c55f135a84d6fbbe215113d2dbc97d3
| 17,581
|
py
|
Python
|
stores/apps/store_admin/tests.py
|
diassor/CollectorCity-Market-Place
|
892ad220b8cf1c0fc7433f625213fe61729522b2
|
[
"Apache-2.0"
] | 135
|
2015-03-19T13:28:18.000Z
|
2022-03-27T06:41:42.000Z
|
stores/apps/store_admin/tests.py
|
dfcoding/CollectorCity-Market-Place
|
e59acec3d600c049323397b17cae14fdcaaaec07
|
[
"Apache-2.0"
] | null | null | null |
stores/apps/store_admin/tests.py
|
dfcoding/CollectorCity-Market-Place
|
e59acec3d600c049323397b17cae14fdcaaaec07
|
[
"Apache-2.0"
] | 83
|
2015-01-30T01:00:15.000Z
|
2022-03-08T17:25:10.000Z
|
"""
This file demonstrates two different styles of tests (one doctest and one
unittest). These will both pass when you run "manage.py test".
Replace these with more appropriate tests for your application.
"""
import datetime
import decimal
import logging
import time
from django.test import TestCase
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from market.models import MarketCategory
from shops.models import Shop
from sell.models import Cart
from auctions.models import AuctionSession
from lots.models import Lot, BidderIncrementCalculator
from for_sale.models import Item
class StoreAdminTest(TestCase):
fixtures = [
'greatcoins_market.json',
'greatcoins_subscriptions.json',
'greatcoins_auth.json',
'greatcoins_shops.json',
'greatcoins_preferences.json',
'greatcoins_themes.json'
]
def test_urls_access(self):
context = decimal.Context(prec=20, rounding=decimal.ROUND_HALF_DOWN)
decimal.setcontext(context)
shop = Shop.objects.all()[0]
category = MarketCategory.objects.all()[0]
HTTP_HOST = shop.default_dns
now = datetime.datetime.now()
tomorrow = now + datetime.timedelta(days=1)
auction = AuctionSession(shop=shop, title="Auction Session Nr 0", description="-- no desc --", start=now, end=tomorrow)
auction.save()
lot = Lot(shop = shop,
title = "Coin From Egypt 1905 (PCGS 60)",
description = "rare coin",
category = category,
date_time = now,
weight = "5",
session=auction,
starting_bid=decimal.Decimal("10.00"),
reserve=decimal.Decimal("0.00"))
lot.save()
item = Item(shop = shop,
title = "Coin From Rusia 1917 (PCGS 60)",
description = "rare coin",
category = category,
date_time = now,
weight = "5",
qty = "10",
price = decimal.Decimal("150"))
item.save()
user = shop.admin
# response = self.client.get(reverse("bidding_view_lot", args=[lot.id]), HTTP_HOST=HTTP_HOST)
# self.assertEqual(response.status_code, 200, "Failed when trying to view lot")
#
success = self.client.login(username=user.username, password="test")
self.assertEqual(success, True, "Login failed")
############# CUSTOMERS ################
response = self.client.get(reverse("home_admin"), HTTP_HOST=HTTP_HOST)
self.assertEqual(response.status_code, 200, "Failed when trying to reach home_admin")
response = self.client.get(reverse("customers"), HTTP_HOST=HTTP_HOST)
self.assertEqual(response.status_code, 200, "Failed when trying to reach customers")
response = self.client.get(reverse("customers_overview"), HTTP_HOST=HTTP_HOST)
self.assertEqual(response.status_code, 200, "Failed when trying to reach customers_overview")
response = self.client.get(reverse("customers_profiles"), HTTP_HOST=HTTP_HOST)
self.assertEqual(response.status_code, 200, "Failed when trying to reach customers_profiles")
response = self.client.get(reverse("customers_sold_items"), HTTP_HOST=HTTP_HOST)
self.assertEqual(response.status_code, 200, "Failed when trying to reach customers_sold_items")
response = self.client.get(reverse("customers_payments"), HTTP_HOST=HTTP_HOST)
self.assertEqual(response.status_code, 200, "Failed when trying to reach customers_payments")
response = self.client.get(reverse("customers_shipments"), HTTP_HOST=HTTP_HOST)
self.assertEqual(response.status_code, 200, "Failed when trying to reach customers_shipments")
response = self.client.get(reverse("customers_wish_lists"), HTTP_HOST=HTTP_HOST)
self.assertEqual(response.status_code, 200, "Failed when trying to reach customers_wish_list")
# response = self.client.get(reverse("customers_send_notification"), HTTP_HOST=HTTP_HOST)
# self.assertEqual(response.status_code, 200, "Failed when trying to bid a valid amount")
response = self.client.get(reverse("customers_mailing_list"), HTTP_HOST=HTTP_HOST)
self.assertEqual(response.status_code, 200, "Failed when trying to reach customers_mailing_list")
response = self.client.get(reverse("customers_export_mailinglist"), HTTP_HOST=HTTP_HOST)
self.assertEqual(response.status_code, 200, "Failed when trying to reach customers_export_mailinglist")
######### WEBSTORE ############
response = self.client.get(reverse("web_store"), HTTP_HOST=HTTP_HOST)
self.assertEqual(response.status_code, 200, "Failed when trying to reach web_store")
response = self.client.get(reverse("web_store_overview"), HTTP_HOST=HTTP_HOST)
self.assertEqual(response.status_code, 200, "Failed when trying to reach web_store_overview")
response = self.client.get(reverse("web_store_marketing"), HTTP_HOST=HTTP_HOST)
self.assertEqual(response.status_code, 200, "Failed when trying to reach web_store_marketing")
response = self.client.get(reverse("web_store_shows"), HTTP_HOST=HTTP_HOST)
self.assertEqual(response.status_code, 200, "Failed when trying to reach web_store_shows")
# response = self.client.get(reverse("web_store_theme"), HTTP_HOST=HTTP_HOST)
# self.assertEqual(response.status_code, 200, "Failed when trying to reach web_store_theme")
response = self.client.get(reverse("web_store_pages"), HTTP_HOST=HTTP_HOST)
self.assertEqual(response.status_code, 200, "Failed when trying to reach web_store_pages")
response = self.client.get(reverse("web_store_blogs"), HTTP_HOST=HTTP_HOST)
self.assertEqual(response.status_code, 200, "Failed when trying to reach web_store_blogs")
response = self.client.get(reverse("web_store_navigation"), HTTP_HOST=HTTP_HOST)
self.assertEqual(response.status_code, 200, "Failed when trying to reach web_store_navigation")
# response = self.client.get(reverse("web_store_show_go"), HTTP_HOST=HTTP_HOST)
# self.assertEqual(response.status_code, 200, "Failed when trying to bid a valid amount")
#
# response = self.client.get(reverse("web_store_show_not_go"), HTTP_HOST=HTTP_HOST)
# self.assertEqual(response.status_code, 200, "Failed when trying to bid a valid amount")
#
# response = self.client.get(reverse("web_store_theme"), HTTP_HOST=HTTP_HOST)
# self.assertEqual(response.status_code, 200, "Failed when trying to bid a valid amount")
######### INVENTORY ##########
response = self.client.get(reverse("inventory"), HTTP_HOST=HTTP_HOST)
self.assertEqual(response.status_code, 200, "Failed when trying to reach inventory")
response = self.client.get(reverse("inventory_overview"), HTTP_HOST=HTTP_HOST)
self.assertEqual(response.status_code, 200, "Failed when trying to reach inventory_overview")
response = self.client.get(reverse("inventory_items"), HTTP_HOST=HTTP_HOST)
self.assertEqual(response.status_code, 200, "Failed when trying to reach inventory_items")
response = self.client.get(reverse("inventory_items_import"), HTTP_HOST=HTTP_HOST)
self.assertEqual(response.status_code, 200, "Failed when trying to reach inventory_items_import")
response = self.client.get(reverse("inventory_lots"), HTTP_HOST=HTTP_HOST)
self.assertEqual(response.status_code, 200, "Failed when trying to reach inventory_lots")
response = self.client.get(reverse("inventory_auctions"), HTTP_HOST=HTTP_HOST)
self.assertEqual(response.status_code, 200, "Failed when trying to reach inventory_auctions")
response = self.client.get(reverse("inventory_categorize"), HTTP_HOST=HTTP_HOST)
self.assertEqual(response.status_code, 200, "Failed when trying to reach inventory_categorize")
######## ACCOUNT #########
response = self.client.get(reverse("account"), HTTP_HOST=HTTP_HOST)
self.assertEqual(response.status_code, 200, "Failed when trying to reach account")
response = self.client.get(reverse("account_profile"), HTTP_HOST=HTTP_HOST)
self.assertEqual(response.status_code, 200, "Failed when trying to reach account_profile")
response = self.client.get(reverse("account_password"), HTTP_HOST=HTTP_HOST)
self.assertEqual(response.status_code, 200, "Failed when trying to reach account_password")
response = self.client.get(reverse("add_profile_photo"), HTTP_HOST=HTTP_HOST)
self.assertEqual(response.status_code, 302, "Failed when trying to reach add_profile_photo")
response = self.client.get(reverse("preferences"), HTTP_HOST=HTTP_HOST)
self.assertEqual(response.status_code, 200, "Failed when trying to reach preferences")
def test_urls_access_denied(self):
context = decimal.Context(prec=20, rounding=decimal.ROUND_HALF_DOWN)
decimal.setcontext(context)
shop = Shop.objects.all()[0]
category = MarketCategory.objects.all()[0]
HTTP_HOST = shop.default_dns
now = datetime.datetime.now()
tomorrow = now + datetime.timedelta(days=1)
auction = AuctionSession(shop=shop, title="Auction Session Nr 0", description="-- no desc --", start=now, end=tomorrow)
auction.save()
lot = Lot(shop = shop,
title = "Coin From Egypt 1905 (PCGS 60)",
description = "rare coin",
category = category,
date_time = now,
weight = "5",
session=auction,
starting_bid=decimal.Decimal("10.00"),
reserve=decimal.Decimal("0.00"))
lot.save()
item = Item(shop = shop,
title = "Coin From Rusia 1917 (PCGS 60)",
description = "rare coin",
category = category,
date_time = now,
weight = "5",
qty = "10",
price = decimal.Decimal("150"))
item.save()
############# CUSTOMERS ################
response = self.client.get(reverse("home_admin"), HTTP_HOST=HTTP_HOST)
self.assertEqual(response.status_code, 302, "Failed when trying to reach home_admin")
response = self.client.get(reverse("customers"), HTTP_HOST=HTTP_HOST)
self.assertEqual(response.status_code, 302, "Failed when trying to reach customers")
response = self.client.get(reverse("customers_overview"), HTTP_HOST=HTTP_HOST)
self.assertEqual(response.status_code, 302, "Failed when trying to reach customers_overview")
response = self.client.get(reverse("customers_profiles"), HTTP_HOST=HTTP_HOST)
self.assertEqual(response.status_code, 302, "Failed when trying to reach customers_profiles")
response = self.client.get(reverse("customers_sold_items"), HTTP_HOST=HTTP_HOST)
self.assertEqual(response.status_code, 302, "Failed when trying to reach customers_sold_items")
response = self.client.get(reverse("customers_payments"), HTTP_HOST=HTTP_HOST)
self.assertEqual(response.status_code, 302, "Failed when trying to reach customers_payments")
response = self.client.get(reverse("customers_shipments"), HTTP_HOST=HTTP_HOST)
self.assertEqual(response.status_code, 302, "Failed when trying to reach customers_shipments")
response = self.client.get(reverse("customers_wish_lists"), HTTP_HOST=HTTP_HOST)
self.assertEqual(response.status_code, 302, "Failed when trying to reach customers_wish_list")
# response = self.client.get(reverse("customers_send_notification"), HTTP_HOST=HTTP_HOST)
# self.assertEqual(response.status_code, 302, "Failed when trying to bid a valid amount")
response = self.client.get(reverse("customers_mailing_list"), HTTP_HOST=HTTP_HOST)
self.assertEqual(response.status_code, 302, "Failed when trying to reach customers_mailing_list")
response = self.client.get(reverse("customers_export_mailinglist"), HTTP_HOST=HTTP_HOST)
self.assertEqual(response.status_code, 302, "Failed when trying to reach customers_export_mailinglist")
######### WEBSTORE ############
response = self.client.get(reverse("web_store"), HTTP_HOST=HTTP_HOST)
self.assertEqual(response.status_code, 302, "Failed when trying to reach web_store")
response = self.client.get(reverse("web_store_overview"), HTTP_HOST=HTTP_HOST)
self.assertEqual(response.status_code, 302, "Failed when trying to reach web_store_overview")
response = self.client.get(reverse("web_store_marketing"), HTTP_HOST=HTTP_HOST)
self.assertEqual(response.status_code, 302, "Failed when trying to reach web_store_marketing")
response = self.client.get(reverse("web_store_shows"), HTTP_HOST=HTTP_HOST)
self.assertEqual(response.status_code, 302, "Failed when trying to reach web_store_shows")
# response = self.client.get(reverse("web_store_theme"), HTTP_HOST=HTTP_HOST)
# self.assertEqual(response.status_code, 302, "Failed when trying to reach web_store_theme")
response = self.client.get(reverse("web_store_pages"), HTTP_HOST=HTTP_HOST)
self.assertEqual(response.status_code, 302, "Failed when trying to reach web_store_pages")
response = self.client.get(reverse("web_store_blogs"), HTTP_HOST=HTTP_HOST)
self.assertEqual(response.status_code, 302, "Failed when trying to reach web_store_blogs")
response = self.client.get(reverse("web_store_navigation"), HTTP_HOST=HTTP_HOST)
self.assertEqual(response.status_code, 302, "Failed when trying to reach web_store_navigation")
# self.assertRedirects(response, "/login/", status_code=302, target_status_code=200, msg_prefix='')
# response = self.client.get(reverse("web_store_show_go"), HTTP_HOST=HTTP_HOST)
# self.assertEqual(response.status_code, 302, "Failed when trying to bid a valid amount")
#
# response = self.client.get(reverse("web_store_show_not_go"), HTTP_HOST=HTTP_HOST)
# self.assertEqual(response.status_code, 302, "Failed when trying to bid a valid amount")
#
# response = self.client.get(reverse("web_store_theme"), HTTP_HOST=HTTP_HOST)
# self.assertEqual(response.status_code, 302, "Failed when trying to bid a valid amount")
######### INVENTORY ##########
response = self.client.get(reverse("inventory"), HTTP_HOST=HTTP_HOST)
self.assertEqual(response.status_code, 302, "Failed when trying to reach inventory")
response = self.client.get(reverse("inventory_overview"), HTTP_HOST=HTTP_HOST)
self.assertEqual(response.status_code, 302, "Failed when trying to reach inventory_overview")
response = self.client.get(reverse("inventory_items"), HTTP_HOST=HTTP_HOST)
self.assertEqual(response.status_code, 302, "Failed when trying to reach inventory_items")
response = self.client.get(reverse("inventory_items_import"), HTTP_HOST=HTTP_HOST)
self.assertEqual(response.status_code, 302, "Failed when trying to reach inventory_items_import")
response = self.client.get(reverse("inventory_lots"), HTTP_HOST=HTTP_HOST)
self.assertEqual(response.status_code, 302, "Failed when trying to reach inventory_lots")
response = self.client.get(reverse("inventory_auctions"), HTTP_HOST=HTTP_HOST)
self.assertEqual(response.status_code, 302, "Failed when trying to reach inventory_auctions")
response = self.client.get(reverse("inventory_categorize"), HTTP_HOST=HTTP_HOST)
self.assertEqual(response.status_code, 302, "Failed when trying to reach inventory_categorize")
######## ACCOUNT #########
response = self.client.get(reverse("account"), HTTP_HOST=HTTP_HOST)
self.assertEqual(response.status_code, 302, "Failed when trying to reach account")
response = self.client.get(reverse("account_profile"), HTTP_HOST=HTTP_HOST)
self.assertEqual(response.status_code, 302, "Failed when trying to reach account_profile")
response = self.client.get(reverse("account_password"), HTTP_HOST=HTTP_HOST)
self.assertEqual(response.status_code, 302, "Failed when trying to reach account_password")
response = self.client.get(reverse("add_profile_photo"), HTTP_HOST=HTTP_HOST)
self.assertEqual(response.status_code, 302, "Failed when trying to reach add_profile_photo")
response = self.client.get(reverse("preferences"), HTTP_HOST=HTTP_HOST)
self.assertEqual(response.status_code, 302, "Failed when trying to reach preferences")
| 54.430341
| 127
| 0.668449
| 2,121
| 17,581
| 5.344177
| 0.08628
| 0.098809
| 0.109572
| 0.127834
| 0.918835
| 0.918835
| 0.913983
| 0.913983
| 0.912219
| 0.912219
| 0
| 0.020299
| 0.223821
| 17,581
| 323
| 128
| 54.430341
| 0.810347
| 0.138616
| 0
| 0.582915
| 0
| 0
| 0.263306
| 0.027382
| 0
| 0
| 0
| 0
| 0.296482
| 1
| 0.01005
| false
| 0.025126
| 0.085427
| 0
| 0.105528
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
65de0a1ac1e447374d7fcf51e0f70f959ae49606
| 40,971
|
py
|
Python
|
models/networks.py
|
icon-lab/provoGAN
|
e4abee668ca5a5733a04c0e27e379a0434b0270f
|
[
"BSD-3-Clause"
] | 1
|
2022-03-27T09:16:22.000Z
|
2022-03-27T09:16:22.000Z
|
models/networks.py
|
icon-lab/provoGAN
|
e4abee668ca5a5733a04c0e27e379a0434b0270f
|
[
"BSD-3-Clause"
] | null | null | null |
models/networks.py
|
icon-lab/provoGAN
|
e4abee668ca5a5733a04c0e27e379a0434b0270f
|
[
"BSD-3-Clause"
] | null | null | null |
import torch
import torch.nn as nn
from torch.nn import init
import functools
from torch.autograd import Variable
from torch.optim import lr_scheduler
###############################################################################
# Functions
###############################################################################
def weights_init_normal(m):
classname = m.__class__.__name__
# print(classname)
if classname.find('Conv') != -1:
init.normal(m.weight.data, 0.0, 0.02)
elif classname.find('Linear') != -1:
init.normal(m.weight.data, 0.0, 0.02)
elif classname.find('BatchNorm2d') != -1:
init.normal(m.weight.data, 1.0, 0.02)
init.constant(m.bias.data, 0.0)
def weights_init_xavier(m):
classname = m.__class__.__name__
# print(classname)
if classname.find('Conv') != -1:
init.xavier_normal(m.weight.data, gain=0.02)
elif classname.find('Linear') != -1:
init.xavier_normal(m.weight.data, gain=0.02)
elif classname.find('BatchNorm2d') != -1:
init.normal(m.weight.data, 1.0, 0.02)
init.constant(m.bias.data, 0.0)
def weights_init_kaiming(m):
classname = m.__class__.__name__
# print(classname)
if classname.find('Conv') != -1:
init.kaiming_normal(m.weight.data, a=0, mode='fan_in')
elif classname.find('Linear') != -1:
init.kaiming_normal(m.weight.data, a=0, mode='fan_in')
elif classname.find('BatchNorm2d') != -1:
init.normal(m.weight.data, 1.0, 0.02)
init.constant(m.bias.data, 0.0)
def weights_init_orthogonal(m):
classname = m.__class__.__name__
print(classname)
if classname.find('Conv') != -1:
init.orthogonal(m.weight.data, gain=1)
elif classname.find('Linear') != -1:
init.orthogonal(m.weight.data, gain=1)
elif classname.find('BatchNorm2d') != -1:
init.normal(m.weight.data, 1.0, 0.02)
init.constant(m.bias.data, 0.0)
def init_weights(net, init_type='normal'):
print('initialization method [%s]' % init_type)
if init_type == 'normal':
net.apply(weights_init_normal)
elif init_type == 'xavier':
net.apply(weights_init_xavier)
elif init_type == 'kaiming':
net.apply(weights_init_kaiming)
elif init_type == 'orthogonal':
net.apply(weights_init_orthogonal)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
def get_norm_layer(norm_type='instance'):
if norm_type == 'batch':
norm_layer = functools.partial(nn.BatchNorm2d, affine=True)
elif norm_type == 'instance':
norm_layer = functools.partial(nn.InstanceNorm2d, affine=False)
elif norm_type == 'batch_3D':
norm_layer = functools.partial(nn.BatchNorm3d, affine=True)
elif norm_type == 'instance_3D':
norm_layer = functools.partial(nn.InstanceNorm3d, affine=False)
elif norm_type == 'none':
norm_layer = None
else:
raise NotImplementedError('normalization layer [%s] is not found' % norm_type)
return norm_layer
def get_scheduler(optimizer, opt):
if opt.lr_policy == 'lambda':
def lambda_rule(epoch):
lr_l = 1.0 - max(0, epoch + 1 + opt.epoch_count - opt.niter) / float(opt.niter_decay + 1)
return lr_l
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
elif opt.lr_policy == 'step':
scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1)
elif opt.lr_policy == 'plateau':
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)
else:
return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)
return scheduler
def define_G(input_nc, output_nc, ngf, which_model_netG, norm='batch', use_dropout=False, init_type='normal', gpu_ids=[],down_samp=1):
netG = None
use_gpu = len(gpu_ids) > 0
norm_layer = get_norm_layer(norm_type=norm)
if use_gpu:
assert(torch.cuda.is_available())
if which_model_netG == 'resnet_9blocks':
netG = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=9, gpu_ids=gpu_ids,down_samp=down_samp)
elif which_model_netG == 'resnet_6blocks':
netG = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=6, gpu_ids=gpu_ids,down_samp=down_samp)
elif which_model_netG == 'unet_128':
netG = UnetGenerator(input_nc, output_nc, 7, ngf, norm_layer=norm_layer, use_dropout=use_dropout, gpu_ids=gpu_ids)
elif which_model_netG == 'unet_256':
netG = UnetGenerator(input_nc, output_nc, 8, ngf, norm_layer=norm_layer, use_dropout=use_dropout, gpu_ids=gpu_ids)
elif which_model_netG == 'unet_att':
netG = UnetGenerator_withatt(input_nc, output_nc, 5, ngf, norm_layer=norm_layer, use_dropout=use_dropout, gpu_ids=gpu_ids)
elif which_model_netG == 'resnet_9blocks_3D':
netG = ResnetGenerator_3D(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=9, gpu_ids=gpu_ids,down_samp=down_samp)
elif which_model_netG == 'unet_att_3D':
netG = UnetGenerator_withatt_3D(input_nc, output_nc, 5, ngf, norm_layer=norm_layer, use_dropout=use_dropout, gpu_ids=gpu_ids)
else:
raise NotImplementedError('Generator model name [%s] is not recognized' % which_model_netG)
if len(gpu_ids) > 0:
netG.cuda(gpu_ids[0])
init_weights(netG, init_type=init_type)
return netG
def define_D(input_nc, ndf, which_model_netD,
n_layers_D=3, norm='batch', use_sigmoid=False, init_type='normal', gpu_ids=[]):
netD = None
use_gpu = len(gpu_ids) > 0
norm_layer = get_norm_layer(norm_type=norm)
if use_gpu:
assert(torch.cuda.is_available())
if which_model_netD == 'basic':
netD = NLayerDiscriminator(input_nc, ndf, n_layers=3, norm_layer=norm_layer, use_sigmoid=use_sigmoid, gpu_ids=gpu_ids)
elif which_model_netD == 'basic_att':
netD = NLayerDiscriminator_att(input_nc, ndf, n_layers=3, norm_layer=norm_layer, use_sigmoid=use_sigmoid, gpu_ids=gpu_ids)
elif which_model_netD == 'n_layers':
netD = NLayerDiscriminator(input_nc, ndf, n_layers_D, norm_layer=norm_layer, use_sigmoid=use_sigmoid, gpu_ids=gpu_ids)
elif which_model_netD == 'pixel':
netD = PixelDiscriminator(input_nc, ndf, norm_layer=norm_layer, use_sigmoid=use_sigmoid, gpu_ids=gpu_ids)
if which_model_netD == 'basic_3D':
netD = NLayerDiscriminator_3D(input_nc, ndf, n_layers=3, norm_layer=norm_layer, use_sigmoid=use_sigmoid, gpu_ids=gpu_ids)
elif which_model_netD == 'basic_att_3D':
netD = NLayerDiscriminator_att_3D(input_nc, ndf, n_layers=3, norm_layer=norm_layer, use_sigmoid=use_sigmoid, gpu_ids=gpu_ids)
else:
raise NotImplementedError('Discriminator model name [%s] is not recognized' %
which_model_netD)
if use_gpu:
netD.cuda(gpu_ids[0])
init_weights(netD, init_type=init_type)
return netD
def print_network(net):
num_params = 0
for param in net.parameters():
num_params += param.numel()
print(net)
print('Total number of parameters: %d' % num_params)
##############################################################################
# Classes
##############################################################################
# Defines the GAN loss which uses either LSGAN or the regular GAN.
# When LSGAN is used, it is basically same as MSELoss,
# but it abstracts away the need to create the target label tensor
# that has the same size as the input
class GANLoss(nn.Module):
def __init__(self, use_lsgan=True, target_real_label=1.0, target_fake_label=0.0,
tensor=torch.FloatTensor):
super(GANLoss, self).__init__()
self.real_label = target_real_label
self.fake_label = target_fake_label
self.real_label_var = None
self.fake_label_var = None
self.Tensor = tensor
if use_lsgan:
self.loss = nn.MSELoss()
else:
self.loss = nn.BCELoss()
def get_target_tensor(self, input, target_is_real):
target_tensor = None
if target_is_real:
create_label = ((self.real_label_var is None) or
(self.real_label_var.numel() != input.numel()))
if create_label:
real_tensor = self.Tensor(input.size()).fill_(self.real_label)
self.real_label_var = Variable(real_tensor, requires_grad=False)
target_tensor = self.real_label_var
else:
create_label = ((self.fake_label_var is None) or
(self.fake_label_var.numel() != input.numel()))
if create_label:
fake_tensor = self.Tensor(input.size()).fill_(self.fake_label)
self.fake_label_var = Variable(fake_tensor, requires_grad=False)
target_tensor = self.fake_label_var
return target_tensor
def __call__(self, input, target_is_real):
target_tensor = self.get_target_tensor(input, target_is_real)
return self.loss(input, target_tensor)
# Defines the generator that consists of Resnet blocks between a few
# downsampling/upsampling operations.
# Code and idea originally from Justin Johnson's architecture.
# https://github.com/jcjohnson/fast-neural-style/
class ResnetGenerator(nn.Module):
def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, gpu_ids=[], padding_type='reflect',down_samp=1):
assert(n_blocks >= 0)
super(ResnetGenerator, self).__init__()
self.input_nc = input_nc
self.output_nc = output_nc
self.ngf = ngf
self.gpu_ids = gpu_ids
self.down_samp=down_samp
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
model = [nn.ReflectionPad2d(3),
nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0,
bias=use_bias),
norm_layer(ngf),
nn.ReLU(True)]
n_downsampling = 2
if down_samp==1:
for i in range(n_downsampling):
mult = 2**i
model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3,
stride=2, padding=1, bias=use_bias),
norm_layer(ngf * mult * 2),
nn.ReLU(True)]
else:
for i in range(n_downsampling):
mult = 2**i
model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3,
padding=1, bias=use_bias),
norm_layer(ngf * mult * 2),
nn.ReLU(True)]
mult = 2**n_downsampling
for i in range(n_blocks):
model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)]
if down_samp==1:
for i in range(n_downsampling):
mult = 2**(n_downsampling - i)
model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2),
kernel_size=3, stride=2,
padding=1, output_padding=1,
bias=use_bias),
norm_layer(int(ngf * mult / 2)),
nn.ReLU(True)]
else:
#mult = 2**n_downsampling
for i in range(n_downsampling):
mult = 2**(n_downsampling - i)
model += [nn.Conv2d(ngf * mult, int(ngf * mult / 2),
kernel_size=3, stride=1,
padding=1,
bias=use_bias),
norm_layer(int(ngf * mult / 2)),
nn.ReLU(True)]
model += [nn.ReflectionPad2d(3)]
model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]
model += [nn.Tanh()]
self.model = nn.Sequential(*model)
def forward(self, input):
if self.gpu_ids and isinstance(input.data, torch.cuda.FloatTensor):
return nn.parallel.data_parallel(self.model, input, self.gpu_ids)
else:
return self.model(input)
# Define a resnet block
class ResnetBlock(nn.Module):
def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias):
super(ResnetBlock, self).__init__()
self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias)
def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias):
conv_block = []
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias),
norm_layer(dim),
nn.ReLU(True)]
if use_dropout:
conv_block += [nn.Dropout(0.5)]
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias),
norm_layer(dim)]
return nn.Sequential(*conv_block)
def forward(self, x):
out = x + self.conv_block(x)
return out
class ResnetGenerator_3D(nn.Module):
def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm3d, use_dropout=False, n_blocks=6, gpu_ids=[], padding_type='reflect',down_samp=1, kernelsize_chosen=3, padsize=1):
assert(n_blocks >= 0)
super(ResnetGenerator_3D, self).__init__()
self.input_nc = input_nc
self.output_nc = output_nc
self.ngf = ngf
self.gpu_ids = gpu_ids
self.down_samp=down_samp
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm3d
else:
use_bias = norm_layer == nn.InstanceNorm3d
model = [nn.ReplicationPad3d(3),
nn.Conv3d(input_nc, ngf, kernel_size=7, padding=0,
bias=use_bias),
norm_layer(ngf),
nn.ReLU(True)]
n_downsampling = 2
if down_samp==1:
for i in range(n_downsampling):
mult = 2**i
model += [nn.Conv3d(ngf * mult, ngf * mult * 2, kernel_size=kernelsize_chosen,
stride=2, padding=padsize, bias=use_bias),
norm_layer(ngf * mult * 2),
nn.ReLU(True)]
else:
for i in range(n_downsampling):
mult = 2**i
model += [nn.Conv3d(ngf * mult, ngf * mult * 2, kernel_size=kernelsize_chosen,
padding=padsize, bias=use_bias),
norm_layer(ngf * mult * 2),
nn.ReLU(True)]
mult = 2**n_downsampling
for i in range(n_blocks):
model += [ResnetBlock_3D(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias, kernelsize_chosen=kernelsize_chosen, padsize=padsize)]
if down_samp==1:
for i in range(n_downsampling):
mult = 2**(n_downsampling - i)
model += [nn.ConvTranspose3d(ngf * mult, int(ngf * mult / 2),
kernel_size=kernelsize_chosen, stride=2,
padding=padsize, output_padding=1,
bias=use_bias),
norm_layer(int(ngf * mult / 2)),
nn.ReLU(True)]
else:
#mult = 2**n_downsampling
for i in range(n_downsampling):
mult = 2**(n_downsampling - i)
model += [nn.Conv3d(ngf * mult, int(ngf * mult / 2),
kernel_size=kernelsize_chosen, stride=1,
padding=padsize,
bias=use_bias),
norm_layer(int(ngf * mult / 2)),
nn.ReLU(True)]
model += [nn.ReplicationPad3d(3)]
model += [nn.Conv3d(ngf, output_nc, kernel_size=7, padding=0)]
model += [nn.Tanh()]
self.model = nn.Sequential(*model)
def forward(self, input):
if self.gpu_ids and isinstance(input.data, torch.cuda.FloatTensor):
return nn.parallel.data_parallel(self.model, input, self.gpu_ids)
else:
return self.model(input)
# Define a 3D resnet block
class ResnetBlock_3D(nn.Module):
def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias, kernelsize_chosen=3, padsize=1):
super(ResnetBlock_3D, self).__init__()
self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias, kernelsize_chosen, padsize)
def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias, kernelsize_chosen=3, padsize=1):
conv_block = []
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReplicationPad3d(padsize)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad3d(padsize)]
elif padding_type == 'zero':
p = padsize
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv3d(dim, dim, kernel_size=kernelsize_chosen, padding=p, bias=use_bias),
norm_layer(dim),
nn.ReLU(True)]
if use_dropout:
conv_block += [nn.Dropout(0.5)]
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReplicationPad3d(padsize)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad3d(padsize)]
elif padding_type == 'zero':
p = padsize
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv3d(dim, dim, kernel_size=kernelsize_chosen, padding=p, bias=use_bias),
norm_layer(dim)]
return nn.Sequential(*conv_block)
def forward(self, x):
out = x + self.conv_block(x)
return out
# Defines the Unet generator.
# |num_downs|: number of downsamplings in UNet. For example,
# if |num_downs| == 7, image of size 128x128 will become of size 1x1
# at the bottleneck
class UnetGenerator(nn.Module):
def __init__(self, input_nc, output_nc, num_downs, ngf=64,
norm_layer=nn.BatchNorm2d, use_dropout=False, gpu_ids=[]):
super(UnetGenerator, self).__init__()
self.gpu_ids = gpu_ids
# construct unet structure
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=None, norm_layer=norm_layer, innermost=True)
for i in range(num_downs - 5):
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer, use_dropout=use_dropout)
unet_block = UnetSkipConnectionBlock(ngf * 4, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(ngf * 2, ngf * 4, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(ngf, ngf * 2, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(output_nc, ngf, input_nc=input_nc, submodule=unet_block, outermost=True, norm_layer=norm_layer)
self.model = unet_block
def forward(self, input):
if self.gpu_ids and isinstance(input.data, torch.cuda.FloatTensor):
return nn.parallel.data_parallel(self.model, input, self.gpu_ids)
else:
return self.model(input)
class UnetGenerator_withatt(nn.Module):
def __init__(self, input_nc, output_nc, num_downs, ngf=64,
norm_layer=nn.BatchNorm2d, use_dropout=False, gpu_ids=[]):
super(UnetGenerator_withatt, self).__init__()
self.gpu_ids = gpu_ids
# construct unet structure
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=None, norm_layer=norm_layer, innermost=True)
unet_block = UnetSkipConnectionBlock(ngf * 4, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer, has_att=True)
unet_block = UnetSkipConnectionBlock(ngf * 2, ngf * 4, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(ngf, ngf * 2, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(output_nc, ngf, input_nc=input_nc, submodule=unet_block, outermost=True, norm_layer=norm_layer)
self.model = unet_block
def forward(self, input):
if self.gpu_ids and isinstance(input.data, torch.cuda.FloatTensor):
return nn.parallel.data_parallel(self.model, input, self.gpu_ids)
else:
return self.model(input)
# Defines the submodule with skip connection.
# X -------------------identity---------------------- X
# |-- downsampling -- |submodule| -- upsampling --|
class UnetSkipConnectionBlock(nn.Module):
def __init__(self, outer_nc, inner_nc, input_nc=None,
submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False, has_att=False):
super(UnetSkipConnectionBlock, self).__init__()
self.outermost = outermost
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
if input_nc is None:
input_nc = outer_nc
downconv = nn.Conv2d(input_nc, inner_nc, kernel_size=4,
stride=2, padding=1, bias=use_bias)
downrelu = nn.LeakyReLU(0.2, True)
downnorm = norm_layer(inner_nc)
uprelu = nn.ReLU(True)
upnorm = norm_layer(outer_nc)
if outermost:
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
kernel_size=4, stride=2,
padding=1)
down = [downconv]
up = [uprelu, upconv, nn.Tanh()]
model = down + [submodule] + up
elif innermost:
upconv = nn.ConvTranspose2d(inner_nc, outer_nc,
kernel_size=4, stride=2,
padding=1, bias=use_bias)
down = [downrelu, downconv]
up = [uprelu, upconv, upnorm]
model = down + up
else:
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
kernel_size=4, stride=2,
padding=1, bias=use_bias)
down = [downrelu, downconv, downnorm]
up = [uprelu, upconv, upnorm]
if has_att:
att1=SBA_Block(input_nc, 8)
att2=SBA_Block(outer_nc, 8)
if use_dropout:
model = down + [submodule] + up + [nn.Dropout(0.5)]
elif has_att:
model = [att1] + down + [submodule] + up + [att2]
else:
model = down + [submodule] + up
self.model = nn.Sequential(*model)
def forward(self, x):
if self.outermost:
return self.model(x)
else:
return torch.cat([x, self.model(x)], 1)
class UnetGenerator_withatt_3D(nn.Module):
def __init__(self, input_nc, output_nc, num_downs, ngf=64,
norm_layer=nn.BatchNorm3d, use_dropout=False, gpu_ids=[]):
super(UnetGenerator_withatt_3D, self).__init__()
self.gpu_ids = gpu_ids
# construct unet structure
unet_block = UnetSkipConnectionBlock_3D(ngf * 8, ngf * 8, input_nc=None, submodule=None, norm_layer=norm_layer, innermost=True)
unet_block = UnetSkipConnectionBlock_3D(ngf * 4, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer, has_att=True)
unet_block = UnetSkipConnectionBlock_3D(ngf * 2, ngf * 4, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock_3D(ngf, ngf * 2, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock_3D(output_nc, ngf, input_nc=input_nc, submodule=unet_block, outermost=True, norm_layer=norm_layer)
self.model = unet_block
def forward(self, input):
if self.gpu_ids and isinstance(input.data, torch.cuda.FloatTensor):
return nn.parallel.data_parallel(self.model, input, self.gpu_ids)
else:
return self.model(input)
class UnetSkipConnectionBlock_3D(nn.Module):
def __init__(self, outer_nc, inner_nc, input_nc=None,
submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm3d, use_dropout=False, has_att=False):
super(UnetSkipConnectionBlock_3D, self).__init__()
self.outermost = outermost
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm3d
else:
use_bias = norm_layer == nn.InstanceNorm3d
if input_nc is None:
input_nc = outer_nc
downconv = nn.Conv3d(input_nc, inner_nc, kernel_size=4,
stride=2, padding=1, bias=use_bias)
downrelu = nn.LeakyReLU(0.2, True)
downnorm = norm_layer(inner_nc)
uprelu = nn.ReLU(True)
upnorm = norm_layer(outer_nc)
if outermost:
upconv = nn.ConvTranspose3d(inner_nc * 2, outer_nc,
kernel_size=4, stride=2,
padding=1)
down = [downconv]
up = [uprelu, upconv, nn.Tanh()]
model = down + [submodule] + up
elif innermost:
upconv = nn.ConvTranspose3d(inner_nc, outer_nc,
kernel_size=4, stride=2,
padding=1, bias=use_bias)
down = [downrelu, downconv]
up = [uprelu, upconv, upnorm]
model = down + up
else:
upconv = nn.ConvTranspose3d(inner_nc * 2, outer_nc,
kernel_size=4, stride=2,
padding=1, bias=use_bias)
down = [downrelu, downconv, downnorm]
up = [uprelu, upconv, upnorm]
if has_att:
att1=SBA_Block_3D(input_nc, 8)
att2=SBA_Block_3D(outer_nc, 8)
if use_dropout:
model = down + [submodule] + up + [nn.Dropout(0.5)]
elif has_att:
model = [att1] + down + [submodule] + up + [att2]
else:
model = down + [submodule] + up
self.model = nn.Sequential(*model)
def forward(self, x):
if self.outermost:
return self.model(x)
else:
return torch.cat([x, self.model(x)], 1)
# Defines the PatchGAN discriminator with the specified arguments.
class NLayerDiscriminator(nn.Module):
def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, use_sigmoid=False, gpu_ids=[]):
super(NLayerDiscriminator, self).__init__()
self.gpu_ids = gpu_ids
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
kw = 4
padw = 1
sequence = [
nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw),
nn.LeakyReLU(0.2, True)
]
nf_mult = 1
nf_mult_prev = 1
for n in range(1, n_layers):
nf_mult_prev = nf_mult
nf_mult = min(2**n, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult,
kernel_size=kw, stride=2, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
nf_mult_prev = nf_mult
nf_mult = min(2**n_layers, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult,
kernel_size=kw, stride=1, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)]
if use_sigmoid:
sequence += [nn.Sigmoid()]
self.model = nn.Sequential(*sequence)
def forward(self, input):
if len(self.gpu_ids) and isinstance(input.data, torch.cuda.FloatTensor):
return nn.parallel.data_parallel(self.model, input, self.gpu_ids)
else:
return self.model(input)
class NLayerDiscriminator_3D(nn.Module):
def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, use_sigmoid=False, gpu_ids=[]):
super(NLayerDiscriminator_3D, self).__init__()
self.gpu_ids = gpu_ids
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm3d
else:
use_bias = norm_layer == nn.InstanceNorm3d
kw = (3,4,4)
padw = 1
sequence = [
nn.Conv3d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw),
nn.LeakyReLU(0.2, True)
]
nf_mult = 1
nf_mult_prev = 1
for n in range(1, n_layers):
nf_mult_prev = nf_mult
nf_mult = min(2**n, 8)
sequence += [
nn.Conv3d(ndf * nf_mult_prev, ndf * nf_mult,
kernel_size=kw, stride=2, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
nf_mult_prev = nf_mult
nf_mult = min(2**n_layers, 8)
sequence += [
nn.Conv3d(ndf * nf_mult_prev, ndf * nf_mult,
kernel_size=kw, stride=1, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
sequence += [nn.Conv3d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)]
if use_sigmoid:
sequence += [nn.Sigmoid()]
self.model = nn.Sequential(*sequence)
def forward(self, input):
if len(self.gpu_ids) and isinstance(input.data, torch.cuda.FloatTensor):
return nn.parallel.data_parallel(self.model, input, self.gpu_ids)
else:
return self.model(input)
class PixelDiscriminator(nn.Module):
def __init__(self, input_nc, ndf=64, norm_layer=nn.BatchNorm2d, use_sigmoid=False, gpu_ids=[]):
super(PixelDiscriminator, self).__init__()
self.gpu_ids = gpu_ids
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
self.net = [
nn.Conv2d(input_nc, ndf, kernel_size=1, stride=1, padding=0),
nn.LeakyReLU(0.2, True),
nn.Conv2d(ndf, ndf * 2, kernel_size=1, stride=1, padding=0, bias=use_bias),
norm_layer(ndf * 2),
nn.LeakyReLU(0.2, True),
nn.Conv2d(ndf * 2, 1, kernel_size=1, stride=1, padding=0, bias=use_bias)]
if use_sigmoid:
self.net.append(nn.Sigmoid())
self.net = nn.Sequential(*self.net)
def forward(self, input):
if len(self.gpu_ids) and isinstance(input.data, torch.cuda.FloatTensor):
return nn.parallel.data_parallel(self.net, input, self.gpu_ids)
else:
return self.net(input)
class NLayerDiscriminator_att(nn.Module):
def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, use_sigmoid=False, gpu_ids=[]):
super(NLayerDiscriminator_att, self).__init__()
self.gpu_ids = gpu_ids
H_size=192
W_size=160
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
kw = 4
padw = 1
sequence = [
nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw),
nn.LeakyReLU(0.2, True)
]
W_size=W_size/2
H_size=H_size/2
nf_mult = 1
nf_mult_prev = 1
for n in range(1, n_layers):
nf_mult_prev = nf_mult
nf_mult = min(2**n, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult,
kernel_size=kw, stride=2, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
W_size=W_size/2
H_size=H_size/2
model_att1=[SBA_Block(ndf * nf_mult, 8)]
nf_mult_prev = nf_mult
nf_mult = min(2**n_layers, 8)
sequence2 = [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult,
kernel_size=kw, stride=1, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
sequence2 += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)]
if use_sigmoid:
sequence2 += [nn.Sigmoid()]
self.model = nn.Sequential(*sequence)
self.model_att1 = nn.Sequential(*model_att1)
self.model2 = nn.Sequential(*sequence2)
def forward(self, input):
if len(self.gpu_ids) and isinstance(input.data, torch.cuda.FloatTensor):
out1= nn.parallel.data_parallel(self.model, input, self.gpu_ids)
out_a1= nn.parallel.data_parallel(self.model_att1, out1, self.gpu_ids)
out_2= nn.parallel.data_parallel(self.model2, out_a1, self.gpu_ids)
return out_2
else:
out1= self.model1(input)
out_a1= self.model_att1(out1)
out_2= self.model1(out_a1)
return out_2
class NLayerDiscriminator_att_3D(nn.Module):
def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm3d, use_sigmoid=False, gpu_ids=[]):
super(NLayerDiscriminator_att_3, self).__init__()
self.gpu_ids = gpu_ids
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm3d
else:
use_bias = norm_layer == nn.InstanceNorm3d
kw = (3,4,4)
padw = 1
sequence = [
nn.Conv3d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw),
nn.LeakyReLU(0.2, True)
]
nf_mult = 1
nf_mult_prev = 1
for n in range(1, n_layers):
nf_mult_prev = nf_mult
nf_mult = min(2**n, 8)
sequence += [
nn.Conv3d(ndf * nf_mult_prev, ndf * nf_mult,
kernel_size=kw, stride=2, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
model_att1=[SBA_Block_3(ndf * nf_mult, 8)]
nf_mult_prev = nf_mult
nf_mult = min(2**n_layers, 8)
sequence2 = [
nn.Conv3d(ndf * nf_mult_prev, ndf * nf_mult,
kernel_size=kw, stride=1, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
# model_att1=[SBA_Block(ndf * nf_mult, 8)]
sequence2 += [nn.Conv3d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)]
if use_sigmoid:
sequence2 += [nn.Sigmoid()]
self.model = nn.Sequential(*sequence)
self.model_att1 = nn.Sequential(*model_att1)
self.model2 = nn.Sequential(*sequence2)
def forward(self, input):
if len(self.gpu_ids) and isinstance(input.data, torch.cuda.FloatTensor):
out1= nn.parallel.data_parallel(self.model, input, self.gpu_ids)
out_a1= nn.parallel.data_parallel(self.model_att1, out1, self.gpu_ids)
out_2= nn.parallel.data_parallel(self.model2, out_a1, self.gpu_ids)
return out_2
else:
out1= self.model1(input)
out_a1= self.model_att1(out1)
out_2= self.model1(out_a1)
return out_2
# Define a attention modul
#Channel Based Attention
class CSE_Block(nn.Module):
def __init__(self, in_channel, r, w, h):
super(CSE_Block, self).__init__()
self.conv_block = self.build_att_block(in_channel, r, w, h)
def build_att_block(self, in_channel, r, w, h):
conv_block=[]
conv_block += [nn.AvgPool2d((w, h))]
conv_block += [nn.Conv2d(in_channel, int(in_channel/r), kernel_size=1)]
conv_block += [nn.ReLU()]
conv_block += [nn.Conv2d(int(in_channel/r), in_channel, kernel_size=1)]
conv_block += [nn.Sigmoid()]
return nn.Sequential(*conv_block)
def forward(self, x):
s = self.conv_block(x)
return s*x
#Sapce Based Attention
class SBA_Block(nn.Module):
def __init__(self, in_channel, r):
super(SBA_Block, self).__init__()
self.query_conv = nn.Conv2d(in_channels = in_channel , out_channels = int(in_channel/r) , kernel_size= 1)
self.key_conv = nn.Conv2d(in_channels = in_channel , out_channels = int(in_channel/r) , kernel_size= 1)
self.value_conv = nn.Conv2d(in_channels = in_channel , out_channels = in_channel , kernel_size= 1)
self.gamma = nn.Parameter(torch.zeros(1))
self.softmax = nn.Softmax(dim=-1)
def forward(self, x):
m_batchsize,C,width ,height = x.size()
out_q = self.query_conv(x).view(m_batchsize,-1,width*height).permute(0,2,1) # B X CX(N)
out_k = self.key_conv(x).view(m_batchsize,-1,width*height) # B X C x (*W*H)
energy = torch.bmm(out_q,out_k) # transpose check
attention = self.softmax(energy) # BX (N) X (N)
# attention = energy # BX (N) X (N)
proj_value = self.value_conv(x).view(m_batchsize,-1,width*height) # B X C X N
out = torch.bmm(proj_value,attention.permute(0,2,1) )
out = out.view(m_batchsize,C,width,height)
out = self.gamma*out + x
return out
#Sapce Based Attention 3D
class SBA_Block_3D(nn.Module):
def __init__(self, in_channel, r):
super(SBA_Block_3D, self).__init__()
self.query_conv = nn.Conv3d(in_channels = in_channel , out_channels = int(in_channel/r) , kernel_size= 1)
self.key_conv = nn.Conv3d(in_channels = in_channel , out_channels = int(in_channel/r) , kernel_size= 1)
self.value_conv = nn.Conv3d(in_channels = in_channel , out_channels = in_channel , kernel_size= 1)
self.gamma = nn.Parameter(torch.zeros(1))
self.softmax = nn.Softmax(dim=-1)
def forward(self, x):
m_batchsize,C, depth, width ,height = x.size()
out_q = self.query_conv(x).view(m_batchsize,-1,depth*width*height).permute(0,2,1) # B X CX(N)
out_k = self.key_conv(x).view(m_batchsize,-1,depth*width*height) # B X C x (*W*H)
energy = torch.bmm(out_q,out_k) # transpose check
attention = self.softmax(energy) # BX (N) X (N)
# attention = energy # BX (N) X (N)
proj_value = self.value_conv(x).view(m_batchsize,-1,depth*width*height) # B X C X N
out = torch.bmm(proj_value,attention.permute(0,2,1) )
out = out.view(m_batchsize,C,depth,width,height)
out = self.gamma*out + x
return out
| 41.637195
| 197
| 0.592395
| 5,271
| 40,971
| 4.350598
| 0.064124
| 0.056907
| 0.019667
| 0.028606
| 0.839351
| 0.817678
| 0.795744
| 0.769449
| 0.749477
| 0.738619
| 0
| 0.021845
| 0.291621
| 40,971
| 983
| 198
| 41.679552
| 0.768287
| 0.033878
| 0
| 0.661188
| 0
| 0
| 0.021264
| 0
| 0
| 0
| 0
| 0
| 0.005057
| 1
| 0.064475
| false
| 0
| 0.007585
| 0
| 0.145386
| 0.006321
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
028d78fb967bc9a4bb25abc5ef40208f4724f986
| 121
|
py
|
Python
|
sqrt.py
|
lovefov/Python
|
ba8fc49e6e503927dc1f827f37b77f3e43b5d0c8
|
[
"MIT"
] | null | null | null |
sqrt.py
|
lovefov/Python
|
ba8fc49e6e503927dc1f827f37b77f3e43b5d0c8
|
[
"MIT"
] | null | null | null |
sqrt.py
|
lovefov/Python
|
ba8fc49e6e503927dc1f827f37b77f3e43b5d0c8
|
[
"MIT"
] | 1
|
2021-02-08T08:48:44.000Z
|
2021-02-08T08:48:44.000Z
|
#!/usr/bin/env python3
#-*- coding:utf-8 -*-
#Author:贾江超
import math
def is_square(n=25):
return math.sqrt(n)**2==n
| 15.125
| 29
| 0.636364
| 22
| 121
| 3.454545
| 0.863636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.048077
| 0.140496
| 121
| 7
| 30
| 17.285714
| 0.682692
| 0.421488
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
02c0ed6491a231ceedf71840ef1db2c38dedb1cd
| 70
|
py
|
Python
|
func/exit.py
|
EvanMu96/pythonshell
|
d9869c936c54beea514d5be215306cbf00c63430
|
[
"MIT"
] | 7
|
2016-10-01T12:26:54.000Z
|
2016-10-27T10:15:56.000Z
|
func/exit.py
|
EvanMu96/pythonshell
|
d9869c936c54beea514d5be215306cbf00c63430
|
[
"MIT"
] | null | null | null |
func/exit.py
|
EvanMu96/pythonshell
|
d9869c936c54beea514d5be215306cbf00c63430
|
[
"MIT"
] | null | null | null |
from .constants import *
def exit(args):
return SHELL_STATUS_STOP
| 17.5
| 28
| 0.757143
| 10
| 70
| 5.1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.171429
| 70
| 4
| 28
| 17.5
| 0.87931
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
f322fe86a68b6ba7f70e9def0ed6c171364b50e6
| 35
|
py
|
Python
|
showroom/api/__init__.py
|
faisaldwinant/showroom
|
1a938afcf90f6cb29a2291882639ec64692015c9
|
[
"MIT"
] | 52
|
2016-06-15T17:21:46.000Z
|
2022-03-09T14:53:01.000Z
|
showroom/api/__init__.py
|
faisaldwinant/showroom
|
1a938afcf90f6cb29a2291882639ec64692015c9
|
[
"MIT"
] | 24
|
2016-10-18T08:45:18.000Z
|
2022-02-18T01:44:56.000Z
|
showroom/api/__init__.py
|
faisaldwinant/showroom
|
1a938afcf90f6cb29a2291882639ec64692015c9
|
[
"MIT"
] | 27
|
2016-10-16T10:51:24.000Z
|
2022-03-09T14:53:03.000Z
|
from .client import ShowroomClient
| 17.5
| 34
| 0.857143
| 4
| 35
| 7.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114286
| 35
| 1
| 35
| 35
| 0.967742
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b881ee47b5a7c36a6cf856587796f2c862dbf90b
| 48
|
py
|
Python
|
m3o_plugin/routing.py
|
JustIceQAQ/play_m3o_in_python
|
140b1f07cb574d1f0a2890503ae9e73ce3907f2b
|
[
"MIT"
] | null | null | null |
m3o_plugin/routing.py
|
JustIceQAQ/play_m3o_in_python
|
140b1f07cb574d1f0a2890503ae9e73ce3907f2b
|
[
"MIT"
] | null | null | null |
m3o_plugin/routing.py
|
JustIceQAQ/play_m3o_in_python
|
140b1f07cb574d1f0a2890503ae9e73ce3907f2b
|
[
"MIT"
] | null | null | null |
# TODO Routing: https://m3o.com/routing/overview
| 48
| 48
| 0.770833
| 7
| 48
| 5.285714
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.022222
| 0.0625
| 48
| 1
| 48
| 48
| 0.8
| 0.958333
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 1
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b8aff6fe6bda818d803eedaad278e89a7ab0ac4a
| 2,844
|
py
|
Python
|
games/migrations/0027_auto_20170929_0026.py
|
munisisazade/diplom_isi
|
767531ef3a4b090d1bc0963e687b5215d6f92f53
|
[
"MIT"
] | 1
|
2019-04-07T15:58:00.000Z
|
2019-04-07T15:58:00.000Z
|
games/migrations/0027_auto_20170929_0026.py
|
munisisazade/diplom_isi
|
767531ef3a4b090d1bc0963e687b5215d6f92f53
|
[
"MIT"
] | 12
|
2020-06-05T18:15:45.000Z
|
2022-03-11T23:20:26.000Z
|
games/migrations/0027_auto_20170929_0026.py
|
munisisazade/diplom_isi
|
767531ef3a4b090d1bc0963e687b5215d6f92f53
|
[
"MIT"
] | 1
|
2019-04-07T15:58:08.000Z
|
2019-04-07T15:58:08.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-09-28 20:26
from __future__ import unicode_literals
import datetime
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('games', '0026_monthboard_weekboard'),
]
operations = [
migrations.AddField(
model_name='leaderboard',
name='duration',
field=models.DurationField(default=datetime.timedelta(0)),
),
migrations.AddField(
model_name='leaderboard',
name='player',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='leaderboard',
name='score',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='monthboard',
name='duration',
field=models.DurationField(default=datetime.timedelta(0)),
),
migrations.AddField(
model_name='monthboard',
name='player',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='monthboard',
name='score',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='weekboard',
name='duration',
field=models.DurationField(default=datetime.timedelta(0)),
),
migrations.AddField(
model_name='weekboard',
name='player',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='weekboard',
name='score',
field=models.IntegerField(default=0),
),
migrations.AlterField(
model_name='leaderboard',
name='games',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='games.GameTime'),
),
migrations.AlterField(
model_name='monthboard',
name='games',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='games.GameTime'),
),
migrations.AlterField(
model_name='weekboard',
name='games',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='games.GameTime'),
),
]
| 35.55
| 133
| 0.605134
| 283
| 2,844
| 5.961131
| 0.215548
| 0.064019
| 0.122703
| 0.144043
| 0.785418
| 0.785418
| 0.738589
| 0.716064
| 0.686426
| 0.686426
| 0
| 0.013094
| 0.274965
| 2,844
| 79
| 134
| 36
| 0.805044
| 0.02391
| 0
| 0.833333
| 1
| 0
| 0.095204
| 0.009016
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.069444
| 0
| 0.111111
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b8c089bf98ddc08e6607ef157b83aceb41f39794
| 53
|
py
|
Python
|
ghlestimator/__init__.py
|
damiandraxler/ghlestimator
|
83f3929e22cba48e61ffd164c380c026ff6dddac
|
[
"MIT"
] | null | null | null |
ghlestimator/__init__.py
|
damiandraxler/ghlestimator
|
83f3929e22cba48e61ffd164c380c026ff6dddac
|
[
"MIT"
] | null | null | null |
ghlestimator/__init__.py
|
damiandraxler/ghlestimator
|
83f3929e22cba48e61ffd164c380c026ff6dddac
|
[
"MIT"
] | 1
|
2020-10-21T08:30:12.000Z
|
2020-10-21T08:30:12.000Z
|
from .ghlestimator import GeneralizedHuberRegressor
| 26.5
| 52
| 0.886792
| 4
| 53
| 11.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.09434
| 53
| 1
| 53
| 53
| 0.979167
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b2349f7d9476f604e213d17be9381cd299b836ee
| 43
|
py
|
Python
|
sprint-1/python/helloworld.py
|
pradeepwaviz/Aviral
|
08480be4290e7af95488bdb49ac870546c359ac2
|
[
"MIT"
] | null | null | null |
sprint-1/python/helloworld.py
|
pradeepwaviz/Aviral
|
08480be4290e7af95488bdb49ac870546c359ac2
|
[
"MIT"
] | null | null | null |
sprint-1/python/helloworld.py
|
pradeepwaviz/Aviral
|
08480be4290e7af95488bdb49ac870546c359ac2
|
[
"MIT"
] | null | null | null |
print("Hello python you are most Welcome")
| 21.5
| 42
| 0.767442
| 7
| 43
| 4.714286
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.139535
| 43
| 1
| 43
| 43
| 0.891892
| 0
| 0
| 0
| 0
| 0
| 0.767442
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
b2729269d13b062da392af6cd9fd549587f5dfe6
| 27
|
py
|
Python
|
src/urlcheck/__init__.py
|
Rhinik/adbot
|
58d0f6532db1934eb1ab7107314c3fd130f4d4c1
|
[
"MIT"
] | null | null | null |
src/urlcheck/__init__.py
|
Rhinik/adbot
|
58d0f6532db1934eb1ab7107314c3fd130f4d4c1
|
[
"MIT"
] | null | null | null |
src/urlcheck/__init__.py
|
Rhinik/adbot
|
58d0f6532db1934eb1ab7107314c3fd130f4d4c1
|
[
"MIT"
] | null | null | null |
from .main import urlcheck
| 13.5
| 26
| 0.814815
| 4
| 27
| 5.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.148148
| 27
| 1
| 27
| 27
| 0.956522
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a24b24b8afdadc4f172146a779030fe81b3fb3f6
| 203
|
py
|
Python
|
tccli/services/scf/__init__.py
|
hapsyou/tencentcloud-cli-intl-en
|
fa8ba71164484f9a2be4b983080a1de08606c0b0
|
[
"Apache-2.0"
] | null | null | null |
tccli/services/scf/__init__.py
|
hapsyou/tencentcloud-cli-intl-en
|
fa8ba71164484f9a2be4b983080a1de08606c0b0
|
[
"Apache-2.0"
] | null | null | null |
tccli/services/scf/__init__.py
|
hapsyou/tencentcloud-cli-intl-en
|
fa8ba71164484f9a2be4b983080a1de08606c0b0
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from tccli.services.scf.scf_client import register_arg
from tccli.services.scf.scf_client import get_actions_info
from tccli.services.scf.scf_client import AVAILABLE_VERSION_LIST
| 40.6
| 64
| 0.827586
| 32
| 203
| 5
| 0.53125
| 0.16875
| 0.31875
| 0.375
| 0.65625
| 0.65625
| 0.65625
| 0
| 0
| 0
| 0
| 0.005376
| 0.083744
| 203
| 4
| 65
| 50.75
| 0.854839
| 0.103448
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a28069de9af9b05f0534299883408dfbcb0c6e82
| 26
|
py
|
Python
|
tests/test_routes.py
|
prcutler/silversaucer
|
aff67757da934c0fe7a8c71c6b239356d737f701
|
[
"MIT"
] | 2
|
2020-06-27T13:55:19.000Z
|
2021-12-10T17:40:39.000Z
|
tests/test_routes.py
|
prcutler/silversaucer
|
aff67757da934c0fe7a8c71c6b239356d737f701
|
[
"MIT"
] | 23
|
2019-06-20T13:45:34.000Z
|
2022-03-10T10:23:21.000Z
|
tests/test_routes.py
|
prcutler/silversaucer
|
aff67757da934c0fe7a8c71c6b239356d737f701
|
[
"MIT"
] | null | null | null |
def test_home():
pass
| 8.666667
| 16
| 0.615385
| 4
| 26
| 3.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.269231
| 26
| 2
| 17
| 13
| 0.789474
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
a2dce6ab1c420f2f1f53a25ff82db621d6ba038e
| 17,928
|
py
|
Python
|
GeneVisualization/implementation.py
|
paoloBerizzi/ray_casting_rendering
|
39ae2df04b35ba2391eba5d29e65b49893a901ff
|
[
"MIT"
] | null | null | null |
GeneVisualization/implementation.py
|
paoloBerizzi/ray_casting_rendering
|
39ae2df04b35ba2391eba5d29e65b49893a901ff
|
[
"MIT"
] | null | null | null |
GeneVisualization/implementation.py
|
paoloBerizzi/ray_casting_rendering
|
39ae2df04b35ba2391eba5d29e65b49893a901ff
|
[
"MIT"
] | null | null | null |
import functools
import math
import numpy as np
import matplotlib.pyplot as plt
from genevis.render import RaycastRenderer
from genevis.transfer_function import TFColor
from volume.volume import GradientVolume, Volume
from itertools import permutations
from genevis.transfer_function import TransferFunction
def get_voxelInterpolated(volume: Volume, x: float, y: float, z: float):
"""
Retrieves the value of a voxel for the given coordinates.
:param volume: Volume from which the voxel will be retrieved.
:param x: X coordinate of the voxel
:param y: Y coordinate of the voxel
:param z: Z coordinate of the voxel
:return: Voxel value
"""
if x < 0 or y < 0 or z < 0 or x >= volume.dim_x or y >= volume.dim_y or z >= volume.dim_z:
return 0
x0 = int(np.floor(x))
y0 = int(np.floor(y))
z0 = int(np.floor(z))
x1 = int(np.floor(x) + 1)
y1 = int(np.floor(y) + 1)
z1 = int(np.floor(z) + 1)
alpha = x - x0 / (x1 - x0)
beta = y - y0 / (y1 - y0)
gamma = z - z0 / (z1 - z0)
vo = get_voxel(volume, x0, y0, z0)
v1 = get_voxel(volume, x1, y0, z0)
v2 = get_voxel(volume, x0, y1, z0)
v3 = get_voxel(volume, x1, y1, z0)
v4 = get_voxel(volume, x0, y0, z1)
v5 = get_voxel(volume, x1, y0, z1)
v6 = get_voxel(volume, x0, y1, z1)
v7 = get_voxel(volume, x1, y1, z1)
val = (1 - alpha) * (1 - beta) * (1 - gamma) * vo + \
alpha * (1 - beta) * (1 - gamma) * v1 + \
(1 - alpha) * beta * (1 - gamma) * v2 + \
(alpha) * (beta) * (1 - gamma) * v3 + \
(1 - alpha) * (1 - beta) * gamma * v4 + \
alpha * (1 - beta) * gamma * v5 + \
(1 - alpha) * beta * gamma * v6 + \
(alpha * gamma * beta) * v7
return val
def get_voxel(volume: Volume, x: float, y: float, z: float):
"""
Retrieves the value of a voxel for the given coordinates.
:param volume: Volume from which the voxel will be retrieved.
:param x: X coordinate of the voxel
:param y: Y coordinate of the voxel
:param z: Z coordinate of the voxel
:return: Voxel value
"""
if x < 0 or y < 0 or z < 0 or x >= volume.dim_x or y >= volume.dim_y or z >= volume.dim_z:
return 0
x = int(math.floor(x))
y = int(math.floor(y))
z = int(math.floor(z))
return volume.data[x, y, z]
def compute_gradient(volume: Volume, x: float, y: float, z: float):
gradient = [0, 0, 0]
gradient[0] = (get_voxelInterpolated(volume, x + 1, y, z) - get_voxelInterpolated(volume, x - 1, y, z)) / 2
gradient[1] = (get_voxelInterpolated(volume, x, y + 1, z) - get_voxelInterpolated(volume, x, y - 1, z)) / 2
gradient[2] = (get_voxelInterpolated(volume, x, y, z + 1) - get_voxelInterpolated(volume, x, y, z - 1)) / 2
return gradient
class RaycastRendererImplementation(RaycastRenderer):
def clear_image(self):
"""Clears the image data"""
self.image.fill(0)
def render_slicer(self, view_matrix: np.ndarray, volume: Volume, image_size: int, image: np.ndarray):
# Clear the image
self.clear_image()
# U vector. See documentation in parent's class
u_vector = view_matrix[0:3]
# V vector. See documentation in parent's class
v_vector = view_matrix[4:7]
# View vector. See documentation in parent's class
view_vector = view_matrix[8:11]
# Center of the image. Image is squared
image_center = image_size / 2
# Center of the volume (3-dimensional)
volume_center = [volume.dim_x / 2, volume.dim_y / 2, volume.dim_z / 2]
volume_maximum = volume.get_maximum()
# Define a step size to make the loop faster
step = 2 if self.interactive_mode else 1
for i in range(0, image_size, step):
for j in range(0, image_size, step):
# Compute the new coordinates in a vectorized form
voxel_cords = np.dot(u_vector, i - image_center) + np.dot(v_vector, j - image_center) + volume_center
# Get voxel value
value = get_voxel(volume, voxel_cords[0], voxel_cords[1], voxel_cords[2])
# Normalize value to be between 0 and 1
red = value / volume_maximum
green = red
blue = red
alpha = 1.0 if red > 0 else 0.0
# Compute the color value (0...255)
red = math.floor(red * 255) if red < 255 else 255
green = math.floor(green * 255) if green < 255 else 255
blue = math.floor(blue * 255) if blue < 255 else 255
alpha = math.floor(alpha * 255) if alpha < 255 else 255
# Assign color to the pixel i, j
image[(j * image_size + i) * 4] = red
image[(j * image_size + i) * 4 + 1] = green
image[(j * image_size + i) * 4 + 2] = blue
image[(j * image_size + i) * 4 + 3] = alpha
def render_mip(self, view_matrix: np.ndarray, volume: Volume, image_size: int, image: np.ndarray):
# Clear the image
self.clear_image()
# U vector. See documentation in parent's class
u_vector = view_matrix[0:3].reshape(-1,1)
# V vector. See documentation in parent's class
v_vector = view_matrix[4:7].reshape(-1,1)
# View vector. See documentation in parent's class
view_vector = view_matrix[8:11].reshape(-1,1)
# Center of the image. Image is squared
image_center = image_size / 2
# Center of the volume (3-dimensional)
volume_center = np.asarray([volume.dim_x / 2, volume.dim_y / 2, volume.dim_z / 2]).reshape(-1,1)
volume_maximum = volume.get_maximum()
# Define a step size to make the loop faster
step = 10 if self.interactive_mode else 1
diagonal = (np.sqrt(3) * np.max([volume.dim_x,volume.dim_y,volume.dim_z]))/2
diagonal = int(math.floor(diagonal)+1)
for i in range(0, image_size, step):
for j in range(0, image_size, step):
max_voxel_value = []
for k in range(-diagonal, diagonal, 5):
# Compute the new coordinates in a vectorized form
voxel_cords = np.dot(u_vector, i-image_center) + np.dot(v_vector, j-image_center) \
+ np.dot(view_vector, k) + volume_center
max_voxel_value.append(get_voxelInterpolated(volume, voxel_cords[0], voxel_cords[1], voxel_cords[2]))
value = np.amax(max_voxel_value)
# Normalize value to be between 0 and 1
red = value / volume_maximum
green = red
blue = red
alpha = 1.0 if red > 0 else 0.0
# Compute the color value (0...255)
red = math.floor(red * 255) if red < 255 else 255
green = math.floor(green * 255) if green < 255 else 255
blue = math.floor(blue * 255) if blue < 255 else 255
alpha = math.floor(alpha * 255) if alpha < 255 else 255
# Assign color to the pixel i, j
image[(j * image_size + i) * 4] = red
image[(j * image_size + i) * 4 + 1] = green
image[(j * image_size + i) * 4 + 2] = blue
image[(j * image_size + i) * 4 + 3] = alpha
def render_compositing(self, view_matrix: np.ndarray, volume: Volume, image_size: int, image: np.ndarray,
step=1):
# Clear the image
self.clear_image()
u_vector = view_matrix[0:3].reshape(-1, 1)
v_vector = view_matrix[4:7].reshape(-1, 1)
view_vector = view_matrix[8:11].reshape(-1, 1)
image_center = image_size / 2
volume_center = np.asarray([volume.dim_x / 2, volume.dim_y / 2, volume.dim_z / 2]).reshape(-1, 1)
volume_maximum = volume.get_maximum()
step = 10 if self.interactive_mode else 1
diagonal = np.sqrt(3) * np.max([volume.dim_x, volume.dim_y, volume.dim_z]) / 2
diagonal = int(math.floor(diagonal)) + 1
for i in range(0, int(image_size), step):
for j in range(0, int(image_size), step):
red, green, blue, alpha = [0, 0, 0, 1]
initial_color = TFColor(0, 0, 0, 0)
for k in range(diagonal, -diagonal, -10):
# Compute the new coordinates in a vectorized form
voxel_cords = np.dot(u_vector, i - image_center) \
+ np.dot(v_vector, j - image_center) \
+ np.dot(view_vector, k) + volume_center
voxel = get_voxelInterpolated(volume, voxel_cords[0], voxel_cords[1], voxel_cords[2])
color = self.tfunc.get_color(voxel)
current_color = TFColor(color.a * color.r + (1 - color.a) * initial_color.r,
color.a * color.g + (1 - color.a) * initial_color.g,
color.a * color.b + (1 - color.a) * initial_color.b,
color.a)
initial_color = current_color
red = math.floor(current_color.r * 255) if red < 255 else 255
green = math.floor(current_color.g * 255) if green < 255 else 255
blue = math.floor(current_color.b * 255) if blue < 255 else 255
alpha = math.floor(255) if alpha < 255 else 255
# Assign color to the pixel i, j
image[(j * image_size + i) * 4] = red
image[(j * image_size + i) * 4 + 1] = green
image[(j * image_size + i) * 4 + 2] = blue
image[(j * image_size + i) * 4 + 3] = alpha
def render_energy_compositing(self, view_matrix: np.ndarray, volume: Volume, image_size: int, image: np.ndarray, energy_volumes: dict):
# Clear the image
self.clear_image()
# Define color dictionary to associate a color to each energy
perm = np.asarray(list(permutations([1, 0, 0.5, 1], 3)))
ids = list(energy_volumes.keys())
colorDictionary = {0: [0, 0, 0]}
for i in range(len(ids)):
colorDictionary[ids[i]] = perm[i]
u_vector = view_matrix[0:3].reshape(-1, 1)
v_vector = view_matrix[4:7].reshape(-1, 1)
view_vector = view_matrix[8:11].reshape(-1, 1)
image_center = image_size / 2
volume_center = np.asarray([volume.dim_x / 2, volume.dim_y / 2, volume.dim_z / 2]).reshape(-1, 1)
diagonal = np.sqrt(3) * np.max([volume.dim_x, volume.dim_y, volume.dim_z]) / 2
diagonal = int(math.floor(diagonal)) + 1
step = 20 if self.interactive_mode else 1
for i in range(0, image_size, step):
for j in range(0, image_size, step):
red, green, blue, alpha = [0, 0, 0, 1]
initial_r, initial_g, initial_b = 0, 0, 0
for k in range(diagonal, -diagonal, -1):
# Compute the new coordinates in a vectorized form
voxel_cords = np.dot(u_vector, i - image_center) \
+ np.dot(v_vector, j - image_center) \
+ np.dot(view_vector, k) + volume_center
color_voxel = np.asarray([0, 0, 0])
for key, value in energy_volumes.items():
intensity_max = value.get_maximum()
energy_intensity = get_voxelInterpolated(value, voxel_cords[0], voxel_cords[1], voxel_cords[2])
if energy_intensity/intensity_max > 0.3:
# Show only energy with an intensity above a treshold (30%)
color_voxel = np.add(np.multiply(color_voxel, 1-energy_intensity/intensity_max), np.multiply(colorDictionary[key], energy_intensity/intensity_max))
color_a = np.max(color_voxel)
current_r = color_a * color_voxel[0] + (1 - color_a) * initial_r
current_g = color_a * color_voxel[1] + (1 - color_a) * initial_g
current_b = color_a * color_voxel[2] + (1 - color_a) * initial_b
initial_r, initial_g, initial_b = current_r, current_g, current_b
red = math.floor(current_r * 255) if red < 255 else 255
green = math.floor(current_g * 255) if green < 255 else 255
blue = math.floor(current_b * 255) if blue < 255 else 255
alpha = math.floor(255) if alpha < 255 else 255
# Assign color to the pixel i, j
image[(j * image_size + i) * 4] = red
image[(j * image_size + i) * 4 + 1] = green
image[(j * image_size + i) * 4 + 2] = blue
image[(j * image_size + i) * 4 + 3] = alpha
def render_energy_region_compositing(self, view_matrix: np.ndarray, volume: Volume, image_size: int, image: np.ndarray, energy_volumes: dict, magnitudeVolume: Volume):
# Clear the image
self.clear_image()
# Define color dictionary to associate a color to each energy
perm = np.asarray(list(permutations([1, 0, 0.5, 1], 3)))
ids = list(energy_volumes.keys())
colorDictionary = {0: [0, 0, 0]}
for i in range(len(ids)):
colorDictionary[ids[i]] = perm[i]
u_vector = view_matrix[0:3].reshape(-1, 1)
v_vector = view_matrix[4:7].reshape(-1, 1)
view_vector = view_matrix[8:11].reshape(-1, 1)
image_center = image_size / 2
volume_center = np.asarray([volume.dim_x / 2, volume.dim_y / 2, volume.dim_z / 2]).reshape(-1, 1)
diagonal = np.sqrt(3) * np.max([volume.dim_x, volume.dim_y, volume.dim_z]) / 2
diagonal = int(math.floor(diagonal)) + 1
step = 20 if self.interactive_mode else 1
for i in range(0, image_size, step):
for j in range(0, image_size, step):
red, green, blue, alpha = [0, 0, 0, 1]
initial_r, initial_g, initial_b = 0, 0, 0
for k in range(diagonal, -diagonal, -1):
# Compute the new coordinates in a vectorized form
voxel_cords = np.dot(u_vector, i - image_center) \
+ np.dot(v_vector, j - image_center) \
+ np.dot(view_vector, k) + volume_center
g = get_voxelInterpolated(magnitudeVolume, voxel_cords[0], voxel_cords[1], voxel_cords[2])
if g != 0:
# Set a white shade to define the region edges
color = self.tfunc.get_color(g)
color_voxel = np.asarray([color.r, color.g, color.b])
else:
# Inside the region there is no shade
color_voxel = np.asarray([0, 0, 0])
if get_voxelInterpolated(volume, voxel_cords[0], voxel_cords[1], voxel_cords[2]) > 0:
# Compute colors only inside the regions of interest
for key, value in energy_volumes.items():
intensity_max = value.get_maximum()
energy_intensity = get_voxelInterpolated(value, voxel_cords[0], voxel_cords[1], voxel_cords[2])
if energy_intensity/intensity_max > 0.3:
# Show only energy with an intensity above a treshold (30%)
color_voxel = np.add(np.multiply(color_voxel, 1-energy_intensity/intensity_max), np.multiply(colorDictionary[key], energy_intensity/intensity_max))
color_a = np.max(color_voxel)
current_r = color_a * color_voxel[0] + (1 - color_a) * initial_r
current_g = color_a * color_voxel[1] + (1 - color_a) * initial_g
current_b = color_a * color_voxel[2] + (1 - color_a) * initial_b
initial_r, initial_g, initial_b = current_r, current_g, current_b
red = math.floor(current_r * 255) if red < 255 else 255
green = math.floor(current_g * 255) if green < 255 else 255
blue = math.floor(current_b * 255) if blue < 255 else 255
alpha = math.floor(255) if alpha < 255 else 255
# Assign color to the pixel i, j
image[(j * image_size + i) * 4] = red
image[(j * image_size + i) * 4 + 1] = green
image[(j * image_size + i) * 4 + 2] = blue
image[(j * image_size + i) * 4 + 3] = alpha
def render_mouse_brain(self, view_matrix: np.ndarray, annotation_volume: Volume, energy_volumes: dict,
image_size: int, image: np.ndarray):
self.tfunc.init(0, math.ceil(self.annotation_gradient_volume.get_max_gradient_magnitude()))
magnitudeVolume = Volume(self.annotation_gradient_volume.magnitudeVolume)
# Chose the visulization mode
option = 1
if option == 0:
# Compositing rendering of the region specified in volume file
self.render_compositing(view_matrix, magnitudeVolume, image_size, image)
elif option == 1:
# Compositing rendering of multiple energy in the whole brain
self.render_energy_compositing(view_matrix, self.annotation_gradient_volume.volume, image_size, image, energy_volumes)
elif option == 2:
# Compositing rendering of multiple energy in the region specified in volume file
self.render_energy_region_compositing(view_matrix, self.annotation_gradient_volume.volume, image_size, image, energy_volumes, magnitudeVolume)
pass
| 46.809399
| 179
| 0.562026
| 2,461
| 17,928
| 3.946363
| 0.081674
| 0.040774
| 0.020593
| 0.03089
| 0.835873
| 0.798085
| 0.792525
| 0.760708
| 0.744028
| 0.740012
| 0
| 0.04695
| 0.333501
| 17,928
| 382
| 180
| 46.931937
| 0.765838
| 0.127677
| 0
| 0.587045
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.040486
| false
| 0.004049
| 0.036437
| 0
| 0.101215
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a2f4fa1f9478761a250b93101b60a732b863472a
| 74
|
py
|
Python
|
youwol_utils/clients/storage/__init__.py
|
youwol/py-youwol
|
85a8877e302c9da1aea168bf1d964d19036c1134
|
[
"MIT"
] | null | null | null |
youwol_utils/clients/storage/__init__.py
|
youwol/py-youwol
|
85a8877e302c9da1aea168bf1d964d19036c1134
|
[
"MIT"
] | 1
|
2022-03-14T09:40:15.000Z
|
2022-03-14T09:40:15.000Z
|
youwol_utils/clients/storage/__init__.py
|
youwol/py-youwol
|
85a8877e302c9da1aea168bf1d964d19036c1134
|
[
"MIT"
] | null | null | null |
from .local_storage import *
from .models import *
from .storage import *
| 18.5
| 28
| 0.756757
| 10
| 74
| 5.5
| 0.5
| 0.472727
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.162162
| 74
| 3
| 29
| 24.666667
| 0.887097
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0c02bda3c974f71efb25a4e741dfd4c6fd00a334
| 6,317
|
py
|
Python
|
tests/tests.py
|
abactel/random_username_python
|
f3320f02640a01ad7c8a6b84ca007658f47a4909
|
[
"MIT"
] | 9
|
2018-06-30T19:35:28.000Z
|
2022-02-01T01:50:17.000Z
|
tests/tests.py
|
abactel/random_username_python
|
f3320f02640a01ad7c8a6b84ca007658f47a4909
|
[
"MIT"
] | 5
|
2017-02-16T12:56:41.000Z
|
2017-03-24T18:27:23.000Z
|
tests/tests.py
|
abactel/username_generator_cli
|
f3320f02640a01ad7c8a6b84ca007658f47a4909
|
[
"MIT"
] | 3
|
2019-09-09T15:46:27.000Z
|
2019-12-05T19:35:58.000Z
|
#!/usr/bin/env python3
import username_generator
import unittest
class TestUM(unittest.TestCase):
def setUp(self):
pass
# number of usernames
def test_default_6_usernames(self):
args = {'num': 6, 'underscores': False, 'no_print': True, 'fname': '',
'max_size': 255, 'min_size': 0, 'indentation': 0,
'no_intro': True, 'return_val': True}
uname = username_generator.main(args=args)
self.assertEqual(len(uname), 6)
def test_1_usernames(self):
args = {'num': 1, 'underscores': False, 'no_print': True, 'fname': '',
'max_size': 255, 'min_size': 0, 'indentation': 0,
'no_intro': True, 'return_val': True}
uname = username_generator.main(args=args)
self.assertEqual(len(uname), 1)
def test_100_usernames(self):
args = {'num': 100, 'underscores': False, 'no_print': True, 'fname': '',
'max_size': 255, 'min_size': 0, 'indentation': 0,
'no_intro': True, 'return_val': True}
uname = username_generator.main(args=args)
self.assertEqual(len(uname), 100)
# camelcase / underscores
def test_camelcase_usernames_have_no_underscores(self):
args = {'num': 10, 'underscores': False, 'no_print': True, 'fname': '',
'max_size': 255, 'min_size': 0, 'indentation': 0,
'no_intro': True, 'return_val': True}
unames = username_generator.main(args=args)
n_underscores = sum(uname.count("_") for uname in unames)
self.assertEqual(n_underscores, 0)
def test_camelcase_usernames_have_two_capital_letters(self):
args = {'num': 10, 'underscores': False, 'no_print': True, 'fname': '',
'max_size': 255, 'min_size': 0, 'indentation': 0,
'no_intro': True, 'return_val': True}
unames = username_generator.main(args=args)
n_caps = sum(sum(1 for char in un if char.isupper()) for un in unames)
self.assertEqual(n_caps, 20)
def test_underscore_usernames_have_underscore(self):
args = {'num': 10, 'underscores': True, 'no_print': True, 'fname': '',
'max_size': 255, 'min_size': 0, 'indentation': 0,
'no_intro': True, 'return_val': True}
unames = username_generator.main(args=args)
n_underscores = sum(uname.count("_") for uname in unames)
self.assertEqual(n_underscores, 10)
def test_underscore_usernames_have_no_capital_letters(self):
args = {'num': 10, 'underscores': True, 'no_print': True, 'fname': '',
'max_size': 255, 'min_size': 0, 'indentation': 0,
'no_intro': True, 'return_val': True}
unames = username_generator.main(args=args)
n_caps = sum(sum(1 for char in un if char.isupper()) for un in unames)
self.assertEqual(n_caps, 0)
# size
def test_words_greater_than_7(self):
args = {'num': 10, 'underscores': False, 'no_print': True, 'fname': '',
'max_size': 255, 'min_size': 7, 'indentation': 0,
'no_intro': True, 'return_val': True}
unames = username_generator.main(args=args)
max_size = len(max(unames))
self.assertEqual(max_size >= 7, True)
def test_words_less_than_14(self):
args = {'num': 10, 'underscores': False, 'no_print': True, 'fname': '',
'max_size': 14, 'min_size': 0, 'indentation': 0,
'no_intro': True, 'return_val': True}
unames = username_generator.main(args=args)
max_size = len(max(unames))
self.assertEqual(max_size <= 14, True)
# indentation
def test_default_formatting_4_spaces_start(self):
args = {'num': 10, 'underscores': False, 'no_print': True, 'fname': '',
'max_size': 14, 'min_size': 0, 'indentation': 4,
'no_intro': True, 'return_val': True}
unames = username_generator.main(args=args)
valid_start_spaces = all(uname.startswith(" " * 4) for uname in unames)
self.assertEqual(valid_start_spaces, True)
def test_default_formatting_4_spaces_start(self):
args = {'num': 10, 'underscores': False, 'no_print': True, 'fname': '',
'max_size': 14, 'min_size': 0, 'indentation': 4,
'no_intro': True, 'return_val': True}
unames = username_generator.main(args=args)
n_spaces = sum(uname.count(" ") for uname in unames)
self.assertEqual(n_spaces, 40)
def test_indentation_no_spaces(self):
args = {'num': 10, 'underscores': False, 'no_print': True, 'fname': '',
'max_size': 14, 'min_size': 0, 'indentation': 0,
'no_intro': True, 'return_val': True}
unames = username_generator.main(args=args)
n_spaces = sum(uname.count(" ") for uname in unames)
self.assertEqual(n_spaces, 0)
# excpetions
def test_except_number_of_usernames_greater_than_10000(self):
args = {'num': 10001, 'underscores': False, 'no_print': True,
'fname': '', 'max_size': 255, 'min_size': 0, 'indentation': 0,
'no_intro': True, 'return_val': True}
self.assertRaises(ValueError, username_generator.check_arguments, args)
def test_except_min_size_greater_than_max(self):
args = {'num': 10, 'underscores': False, 'no_print': True, 'fname': '',
'max_size': 0, 'min_size': 14, 'indentation': 0,
'no_intro': True, 'return_val': True}
self.assertRaises(ValueError, username_generator.check_arguments, args)
def test_exception_min_size_greater_than_14(self):
args = {'num': 10, 'underscores': False, 'no_print': True, 'fname': '',
'max_size': 255, 'min_size': 15, 'indentation': 0,
'no_intro': True, 'return_val': True}
self.assertRaises(ValueError, username_generator.check_arguments, args)
def test_exception_max_size_less_than_8(self):
args = {'num': 10, 'underscores': False, 'no_print': True, 'fname': '',
'max_size': 7, 'min_size': 0, 'indentation': 0,
'no_intro': True, 'return_val': True}
self.assertRaises(ValueError, username_generator.check_arguments, args)
if __name__ == '__main__':
unittest.main()
| 44.801418
| 80
| 0.601868
| 776
| 6,317
| 4.635309
| 0.114691
| 0.040867
| 0.04893
| 0.07117
| 0.837364
| 0.804559
| 0.795941
| 0.788157
| 0.788157
| 0.788157
| 0
| 0.031111
| 0.252018
| 6,317
| 140
| 81
| 45.121429
| 0.730159
| 0.014722
| 0
| 0.601852
| 0
| 0
| 0.187359
| 0
| 0
| 0
| 0
| 0
| 0.148148
| 1
| 0.157407
| false
| 0.009259
| 0.018519
| 0
| 0.185185
| 0.148148
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0c16038b7320cdf16fa5de626635a53744d96abb
| 44
|
py
|
Python
|
LineAlpha/Api/__init__.py
|
Aldiergokil/Selfbot
|
130248e65db93538842681ea03de2c12ab3b5725
|
[
"MIT"
] | null | null | null |
LineAlpha/Api/__init__.py
|
Aldiergokil/Selfbot
|
130248e65db93538842681ea03de2c12ab3b5725
|
[
"MIT"
] | null | null | null |
LineAlpha/Api/__init__.py
|
Aldiergokil/Selfbot
|
130248e65db93538842681ea03de2c12ab3b5725
|
[
"MIT"
] | null | null | null |
from Talk import Talk
from Poll import Poll
| 14.666667
| 21
| 0.818182
| 8
| 44
| 4.5
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.181818
| 44
| 2
| 22
| 22
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0c30925d09b6be391cd424eb6fbf5fe115d8d059
| 6,407
|
py
|
Python
|
loldib/getratings/models/NA/na_ivern/na_ivern_mid.py
|
koliupy/loldib
|
c9ab94deb07213cdc42b5a7c26467cdafaf81b7f
|
[
"Apache-2.0"
] | null | null | null |
loldib/getratings/models/NA/na_ivern/na_ivern_mid.py
|
koliupy/loldib
|
c9ab94deb07213cdc42b5a7c26467cdafaf81b7f
|
[
"Apache-2.0"
] | null | null | null |
loldib/getratings/models/NA/na_ivern/na_ivern_mid.py
|
koliupy/loldib
|
c9ab94deb07213cdc42b5a7c26467cdafaf81b7f
|
[
"Apache-2.0"
] | null | null | null |
from getratings.models.ratings import Ratings
class NA_Ivern_Mid_Aatrox(Ratings):
pass
class NA_Ivern_Mid_Ahri(Ratings):
pass
class NA_Ivern_Mid_Akali(Ratings):
pass
class NA_Ivern_Mid_Alistar(Ratings):
pass
class NA_Ivern_Mid_Amumu(Ratings):
pass
class NA_Ivern_Mid_Anivia(Ratings):
pass
class NA_Ivern_Mid_Annie(Ratings):
pass
class NA_Ivern_Mid_Ashe(Ratings):
pass
class NA_Ivern_Mid_AurelionSol(Ratings):
pass
class NA_Ivern_Mid_Azir(Ratings):
pass
class NA_Ivern_Mid_Bard(Ratings):
pass
class NA_Ivern_Mid_Blitzcrank(Ratings):
pass
class NA_Ivern_Mid_Brand(Ratings):
pass
class NA_Ivern_Mid_Braum(Ratings):
pass
class NA_Ivern_Mid_Caitlyn(Ratings):
pass
class NA_Ivern_Mid_Camille(Ratings):
pass
class NA_Ivern_Mid_Cassiopeia(Ratings):
pass
class NA_Ivern_Mid_Chogath(Ratings):
pass
class NA_Ivern_Mid_Corki(Ratings):
pass
class NA_Ivern_Mid_Darius(Ratings):
pass
class NA_Ivern_Mid_Diana(Ratings):
pass
class NA_Ivern_Mid_Draven(Ratings):
pass
class NA_Ivern_Mid_DrMundo(Ratings):
pass
class NA_Ivern_Mid_Ekko(Ratings):
pass
class NA_Ivern_Mid_Elise(Ratings):
pass
class NA_Ivern_Mid_Evelynn(Ratings):
pass
class NA_Ivern_Mid_Ezreal(Ratings):
pass
class NA_Ivern_Mid_Fiddlesticks(Ratings):
pass
class NA_Ivern_Mid_Fiora(Ratings):
pass
class NA_Ivern_Mid_Fizz(Ratings):
pass
class NA_Ivern_Mid_Galio(Ratings):
pass
class NA_Ivern_Mid_Gangplank(Ratings):
pass
class NA_Ivern_Mid_Garen(Ratings):
pass
class NA_Ivern_Mid_Gnar(Ratings):
pass
class NA_Ivern_Mid_Gragas(Ratings):
pass
class NA_Ivern_Mid_Graves(Ratings):
pass
class NA_Ivern_Mid_Hecarim(Ratings):
pass
class NA_Ivern_Mid_Heimerdinger(Ratings):
pass
class NA_Ivern_Mid_Illaoi(Ratings):
pass
class NA_Ivern_Mid_Irelia(Ratings):
pass
class NA_Ivern_Mid_Ivern(Ratings):
pass
class NA_Ivern_Mid_Janna(Ratings):
pass
class NA_Ivern_Mid_JarvanIV(Ratings):
pass
class NA_Ivern_Mid_Jax(Ratings):
pass
class NA_Ivern_Mid_Jayce(Ratings):
pass
class NA_Ivern_Mid_Jhin(Ratings):
pass
class NA_Ivern_Mid_Jinx(Ratings):
pass
class NA_Ivern_Mid_Kalista(Ratings):
pass
class NA_Ivern_Mid_Karma(Ratings):
pass
class NA_Ivern_Mid_Karthus(Ratings):
pass
class NA_Ivern_Mid_Kassadin(Ratings):
pass
class NA_Ivern_Mid_Katarina(Ratings):
pass
class NA_Ivern_Mid_Kayle(Ratings):
pass
class NA_Ivern_Mid_Kayn(Ratings):
pass
class NA_Ivern_Mid_Kennen(Ratings):
pass
class NA_Ivern_Mid_Khazix(Ratings):
pass
class NA_Ivern_Mid_Kindred(Ratings):
pass
class NA_Ivern_Mid_Kled(Ratings):
pass
class NA_Ivern_Mid_KogMaw(Ratings):
pass
class NA_Ivern_Mid_Leblanc(Ratings):
pass
class NA_Ivern_Mid_LeeSin(Ratings):
pass
class NA_Ivern_Mid_Leona(Ratings):
pass
class NA_Ivern_Mid_Lissandra(Ratings):
pass
class NA_Ivern_Mid_Lucian(Ratings):
pass
class NA_Ivern_Mid_Lulu(Ratings):
pass
class NA_Ivern_Mid_Lux(Ratings):
pass
class NA_Ivern_Mid_Malphite(Ratings):
pass
class NA_Ivern_Mid_Malzahar(Ratings):
pass
class NA_Ivern_Mid_Maokai(Ratings):
pass
class NA_Ivern_Mid_MasterYi(Ratings):
pass
class NA_Ivern_Mid_MissFortune(Ratings):
pass
class NA_Ivern_Mid_MonkeyKing(Ratings):
pass
class NA_Ivern_Mid_Mordekaiser(Ratings):
pass
class NA_Ivern_Mid_Morgana(Ratings):
pass
class NA_Ivern_Mid_Nami(Ratings):
pass
class NA_Ivern_Mid_Nasus(Ratings):
pass
class NA_Ivern_Mid_Nautilus(Ratings):
pass
class NA_Ivern_Mid_Nidalee(Ratings):
pass
class NA_Ivern_Mid_Nocturne(Ratings):
pass
class NA_Ivern_Mid_Nunu(Ratings):
pass
class NA_Ivern_Mid_Olaf(Ratings):
pass
class NA_Ivern_Mid_Orianna(Ratings):
pass
class NA_Ivern_Mid_Ornn(Ratings):
pass
class NA_Ivern_Mid_Pantheon(Ratings):
pass
class NA_Ivern_Mid_Poppy(Ratings):
pass
class NA_Ivern_Mid_Quinn(Ratings):
pass
class NA_Ivern_Mid_Rakan(Ratings):
pass
class NA_Ivern_Mid_Rammus(Ratings):
pass
class NA_Ivern_Mid_RekSai(Ratings):
pass
class NA_Ivern_Mid_Renekton(Ratings):
pass
class NA_Ivern_Mid_Rengar(Ratings):
pass
class NA_Ivern_Mid_Riven(Ratings):
pass
class NA_Ivern_Mid_Rumble(Ratings):
pass
class NA_Ivern_Mid_Ryze(Ratings):
pass
class NA_Ivern_Mid_Sejuani(Ratings):
pass
class NA_Ivern_Mid_Shaco(Ratings):
pass
class NA_Ivern_Mid_Shen(Ratings):
pass
class NA_Ivern_Mid_Shyvana(Ratings):
pass
class NA_Ivern_Mid_Singed(Ratings):
pass
class NA_Ivern_Mid_Sion(Ratings):
pass
class NA_Ivern_Mid_Sivir(Ratings):
pass
class NA_Ivern_Mid_Skarner(Ratings):
pass
class NA_Ivern_Mid_Sona(Ratings):
pass
class NA_Ivern_Mid_Soraka(Ratings):
pass
class NA_Ivern_Mid_Swain(Ratings):
pass
class NA_Ivern_Mid_Syndra(Ratings):
pass
class NA_Ivern_Mid_TahmKench(Ratings):
pass
class NA_Ivern_Mid_Taliyah(Ratings):
pass
class NA_Ivern_Mid_Talon(Ratings):
pass
class NA_Ivern_Mid_Taric(Ratings):
pass
class NA_Ivern_Mid_Teemo(Ratings):
pass
class NA_Ivern_Mid_Thresh(Ratings):
pass
class NA_Ivern_Mid_Tristana(Ratings):
pass
class NA_Ivern_Mid_Trundle(Ratings):
pass
class NA_Ivern_Mid_Tryndamere(Ratings):
pass
class NA_Ivern_Mid_TwistedFate(Ratings):
pass
class NA_Ivern_Mid_Twitch(Ratings):
pass
class NA_Ivern_Mid_Udyr(Ratings):
pass
class NA_Ivern_Mid_Urgot(Ratings):
pass
class NA_Ivern_Mid_Varus(Ratings):
pass
class NA_Ivern_Mid_Vayne(Ratings):
pass
class NA_Ivern_Mid_Veigar(Ratings):
pass
class NA_Ivern_Mid_Velkoz(Ratings):
pass
class NA_Ivern_Mid_Vi(Ratings):
pass
class NA_Ivern_Mid_Viktor(Ratings):
pass
class NA_Ivern_Mid_Vladimir(Ratings):
pass
class NA_Ivern_Mid_Volibear(Ratings):
pass
class NA_Ivern_Mid_Warwick(Ratings):
pass
class NA_Ivern_Mid_Xayah(Ratings):
pass
class NA_Ivern_Mid_Xerath(Ratings):
pass
class NA_Ivern_Mid_XinZhao(Ratings):
pass
class NA_Ivern_Mid_Yasuo(Ratings):
pass
class NA_Ivern_Mid_Yorick(Ratings):
pass
class NA_Ivern_Mid_Zac(Ratings):
pass
class NA_Ivern_Mid_Zed(Ratings):
pass
class NA_Ivern_Mid_Ziggs(Ratings):
pass
class NA_Ivern_Mid_Zilean(Ratings):
pass
class NA_Ivern_Mid_Zyra(Ratings):
pass
| 15.364508
| 46
| 0.761667
| 972
| 6,407
| 4.59465
| 0.151235
| 0.216301
| 0.370802
| 0.463502
| 0.797582
| 0.797582
| 0
| 0
| 0
| 0
| 0
| 0
| 0.173404
| 6,407
| 416
| 47
| 15.401442
| 0.843278
| 0
| 0
| 0.498195
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.498195
| 0.00361
| 0
| 0.501805
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
0c43de90049fb8facabe284eda4eed42dc6c6de5
| 28,853
|
py
|
Python
|
pybind/slxos/v16r_1_00b/mpls_state/memory/stats/__init__.py
|
shivharis/pybind
|
4e1c6d54b9fd722ccec25546ba2413d79ce337e6
|
[
"Apache-2.0"
] | null | null | null |
pybind/slxos/v16r_1_00b/mpls_state/memory/stats/__init__.py
|
shivharis/pybind
|
4e1c6d54b9fd722ccec25546ba2413d79ce337e6
|
[
"Apache-2.0"
] | null | null | null |
pybind/slxos/v16r_1_00b/mpls_state/memory/stats/__init__.py
|
shivharis/pybind
|
4e1c6d54b9fd722ccec25546ba2413d79ce337e6
|
[
"Apache-2.0"
] | 1
|
2021-11-05T22:15:42.000Z
|
2021-11-05T22:15:42.000Z
|
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
class stats(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-mpls-operational - based on the path /mpls-state/memory/stats. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: 1
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__mem_stats_index','__mem_type','__num_alloc','__total_bytes','__total_allocs','__total_frees','__peak_alloc','__alloc_fails','__free_fails',)
_yang_name = 'stats'
_rest_name = 'stats'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__total_frees = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="total-frees", rest_name="total-frees", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)
self.__alloc_fails = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="alloc-fails", rest_name="alloc-fails", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)
self.__peak_alloc = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="peak-alloc", rest_name="peak-alloc", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)
self.__total_allocs = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="total-allocs", rest_name="total-allocs", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)
self.__mem_stats_index = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="mem-stats-index", rest_name="mem-stats-index", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)
self.__num_alloc = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="num-alloc", rest_name="num-alloc", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)
self.__free_fails = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="free-fails", rest_name="free-fails", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)
self.__total_bytes = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="total-bytes", rest_name="total-bytes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)
self.__mem_type = YANGDynClass(base=unicode, is_leaf=True, yang_name="mem-type", rest_name="mem-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='string', is_config=False)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'mpls-state', u'memory', u'stats']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'mpls-state', u'memory', u'stats']
def _get_mem_stats_index(self):
"""
Getter method for mem_stats_index, mapped from YANG variable /mpls_state/memory/stats/mem_stats_index (uint32)
YANG Description: Memory stats index
"""
return self.__mem_stats_index
def _set_mem_stats_index(self, v, load=False):
"""
Setter method for mem_stats_index, mapped from YANG variable /mpls_state/memory/stats/mem_stats_index (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_mem_stats_index is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mem_stats_index() directly.
YANG Description: Memory stats index
"""
parent = getattr(self, "_parent", None)
if parent is not None and load is False:
raise AttributeError("Cannot set keys directly when" +
" within an instantiated list")
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="mem-stats-index", rest_name="mem-stats-index", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """mem_stats_index must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="mem-stats-index", rest_name="mem-stats-index", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)""",
})
self.__mem_stats_index = t
if hasattr(self, '_set'):
self._set()
def _unset_mem_stats_index(self):
self.__mem_stats_index = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="mem-stats-index", rest_name="mem-stats-index", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)
def _get_mem_type(self):
"""
Getter method for mem_type, mapped from YANG variable /mpls_state/memory/stats/mem_type (string)
YANG Description: Memory type
"""
return self.__mem_type
def _set_mem_type(self, v, load=False):
"""
Setter method for mem_type, mapped from YANG variable /mpls_state/memory/stats/mem_type (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_mem_type is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mem_type() directly.
YANG Description: Memory type
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="mem-type", rest_name="mem-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='string', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """mem_type must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="mem-type", rest_name="mem-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='string', is_config=False)""",
})
self.__mem_type = t
if hasattr(self, '_set'):
self._set()
def _unset_mem_type(self):
self.__mem_type = YANGDynClass(base=unicode, is_leaf=True, yang_name="mem-type", rest_name="mem-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='string', is_config=False)
def _get_num_alloc(self):
"""
Getter method for num_alloc, mapped from YANG variable /mpls_state/memory/stats/num_alloc (uint32)
YANG Description: Number of allocations
"""
return self.__num_alloc
def _set_num_alloc(self, v, load=False):
"""
Setter method for num_alloc, mapped from YANG variable /mpls_state/memory/stats/num_alloc (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_num_alloc is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_num_alloc() directly.
YANG Description: Number of allocations
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="num-alloc", rest_name="num-alloc", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """num_alloc must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="num-alloc", rest_name="num-alloc", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)""",
})
self.__num_alloc = t
if hasattr(self, '_set'):
self._set()
def _unset_num_alloc(self):
self.__num_alloc = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="num-alloc", rest_name="num-alloc", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)
def _get_total_bytes(self):
"""
Getter method for total_bytes, mapped from YANG variable /mpls_state/memory/stats/total_bytes (uint32)
YANG Description: Total bytes
"""
return self.__total_bytes
def _set_total_bytes(self, v, load=False):
"""
Setter method for total_bytes, mapped from YANG variable /mpls_state/memory/stats/total_bytes (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_total_bytes is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_total_bytes() directly.
YANG Description: Total bytes
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="total-bytes", rest_name="total-bytes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """total_bytes must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="total-bytes", rest_name="total-bytes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)""",
})
self.__total_bytes = t
if hasattr(self, '_set'):
self._set()
def _unset_total_bytes(self):
self.__total_bytes = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="total-bytes", rest_name="total-bytes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)
def _get_total_allocs(self):
"""
Getter method for total_allocs, mapped from YANG variable /mpls_state/memory/stats/total_allocs (uint32)
YANG Description: Total allocations
"""
return self.__total_allocs
def _set_total_allocs(self, v, load=False):
"""
Setter method for total_allocs, mapped from YANG variable /mpls_state/memory/stats/total_allocs (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_total_allocs is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_total_allocs() directly.
YANG Description: Total allocations
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="total-allocs", rest_name="total-allocs", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """total_allocs must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="total-allocs", rest_name="total-allocs", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)""",
})
self.__total_allocs = t
if hasattr(self, '_set'):
self._set()
def _unset_total_allocs(self):
self.__total_allocs = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="total-allocs", rest_name="total-allocs", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)
def _get_total_frees(self):
"""
Getter method for total_frees, mapped from YANG variable /mpls_state/memory/stats/total_frees (uint32)
YANG Description: Total frees
"""
return self.__total_frees
def _set_total_frees(self, v, load=False):
"""
Setter method for total_frees, mapped from YANG variable /mpls_state/memory/stats/total_frees (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_total_frees is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_total_frees() directly.
YANG Description: Total frees
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="total-frees", rest_name="total-frees", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """total_frees must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="total-frees", rest_name="total-frees", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)""",
})
self.__total_frees = t
if hasattr(self, '_set'):
self._set()
def _unset_total_frees(self):
self.__total_frees = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="total-frees", rest_name="total-frees", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)
def _get_peak_alloc(self):
"""
Getter method for peak_alloc, mapped from YANG variable /mpls_state/memory/stats/peak_alloc (uint32)
YANG Description: Peak allocations
"""
return self.__peak_alloc
def _set_peak_alloc(self, v, load=False):
"""
Setter method for peak_alloc, mapped from YANG variable /mpls_state/memory/stats/peak_alloc (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_peak_alloc is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_peak_alloc() directly.
YANG Description: Peak allocations
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="peak-alloc", rest_name="peak-alloc", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """peak_alloc must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="peak-alloc", rest_name="peak-alloc", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)""",
})
self.__peak_alloc = t
if hasattr(self, '_set'):
self._set()
def _unset_peak_alloc(self):
self.__peak_alloc = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="peak-alloc", rest_name="peak-alloc", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)
def _get_alloc_fails(self):
"""
Getter method for alloc_fails, mapped from YANG variable /mpls_state/memory/stats/alloc_fails (uint32)
YANG Description: Allocation Fails
"""
return self.__alloc_fails
def _set_alloc_fails(self, v, load=False):
"""
Setter method for alloc_fails, mapped from YANG variable /mpls_state/memory/stats/alloc_fails (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_alloc_fails is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_alloc_fails() directly.
YANG Description: Allocation Fails
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="alloc-fails", rest_name="alloc-fails", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """alloc_fails must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="alloc-fails", rest_name="alloc-fails", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)""",
})
self.__alloc_fails = t
if hasattr(self, '_set'):
self._set()
def _unset_alloc_fails(self):
self.__alloc_fails = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="alloc-fails", rest_name="alloc-fails", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)
def _get_free_fails(self):
"""
Getter method for free_fails, mapped from YANG variable /mpls_state/memory/stats/free_fails (uint32)
YANG Description: Free fails
"""
return self.__free_fails
def _set_free_fails(self, v, load=False):
"""
Setter method for free_fails, mapped from YANG variable /mpls_state/memory/stats/free_fails (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_free_fails is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_free_fails() directly.
YANG Description: Free fails
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="free-fails", rest_name="free-fails", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """free_fails must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="free-fails", rest_name="free-fails", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)""",
})
self.__free_fails = t
if hasattr(self, '_set'):
self._set()
def _unset_free_fails(self):
self.__free_fails = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="free-fails", rest_name="free-fails", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)
mem_stats_index = __builtin__.property(_get_mem_stats_index)
mem_type = __builtin__.property(_get_mem_type)
num_alloc = __builtin__.property(_get_num_alloc)
total_bytes = __builtin__.property(_get_total_bytes)
total_allocs = __builtin__.property(_get_total_allocs)
total_frees = __builtin__.property(_get_total_frees)
peak_alloc = __builtin__.property(_get_peak_alloc)
alloc_fails = __builtin__.property(_get_alloc_fails)
free_fails = __builtin__.property(_get_free_fails)
_pyangbind_elements = {'mem_stats_index': mem_stats_index, 'mem_type': mem_type, 'num_alloc': num_alloc, 'total_bytes': total_bytes, 'total_allocs': total_allocs, 'total_frees': total_frees, 'peak_alloc': peak_alloc, 'alloc_fails': alloc_fails, 'free_fails': free_fails, }
| 64.692825
| 471
| 0.739923
| 3,910
| 28,853
| 5.186701
| 0.048082
| 0.042406
| 0.052465
| 0.051085
| 0.84571
| 0.815187
| 0.80932
| 0.807988
| 0.798126
| 0.798126
| 0
| 0.021979
| 0.131148
| 28,853
| 445
| 472
| 64.838202
| 0.786988
| 0.167608
| 0
| 0.47619
| 0
| 0.035714
| 0.348988
| 0.18632
| 0
| 0
| 0
| 0
| 0
| 1
| 0.119048
| false
| 0
| 0.031746
| 0
| 0.265873
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a73cf3ee930fdb9aba902abb75b9af4d7c571afb
| 2,896
|
py
|
Python
|
tests/test_schema/test_image_schema.py
|
locriandev/ocp-build-data-validator
|
66c8e7a37fc48af1bdb125c000e842b5c6ed536d
|
[
"Apache-2.0"
] | 1
|
2020-05-20T10:08:10.000Z
|
2020-05-20T10:08:10.000Z
|
tests/test_schema/test_image_schema.py
|
locriandev/ocp-build-data-validator
|
66c8e7a37fc48af1bdb125c000e842b5c6ed536d
|
[
"Apache-2.0"
] | 51
|
2019-10-08T09:55:38.000Z
|
2022-03-28T08:08:15.000Z
|
tests/test_schema/test_image_schema.py
|
locriandev/ocp-build-data-validator
|
66c8e7a37fc48af1bdb125c000e842b5c6ed536d
|
[
"Apache-2.0"
] | 18
|
2019-10-07T11:59:48.000Z
|
2021-12-10T11:00:57.000Z
|
import unittest
from flexmock import flexmock
from validator.schema import image_schema
class TestImageSchema(unittest.TestCase):
def test_validate_with_valid_data(self):
(flexmock(image_schema.support)
.should_receive('get_valid_streams_for')
.and_return([]))
(flexmock(image_schema.support)
.should_receive('get_valid_member_references_for')
.and_return([]))
valid_data = {
'from': {},
'name': 'my-name',
'for_payload': True,
}
self.assertIsNone(image_schema.validate('filename', valid_data))
def test_validate_with_invalid_data(self):
(flexmock(image_schema.support)
.should_receive('get_valid_streams_for')
.and_return([]))
(flexmock(image_schema.support)
.should_receive('get_valid_member_references_for')
.and_return([]))
invalid_data = {
'from': {},
'name': 1234,
}
self.assertEqual("Key 'name' error:\n1234 should be instance of 'str'",
image_schema.validate('filename', invalid_data))
def test_validate_with_invalid_content_source_git_url(self):
(flexmock(image_schema.support)
.should_receive('get_valid_streams_for')
.and_return([]))
(flexmock(image_schema.support)
.should_receive('get_valid_member_references_for')
.and_return([]))
url = 'https://github.com/openshift/csi-node-driver-registrar'
invalid_data = {
'content': {
'source': {
'git': {
'branch': {
'target': 'test',
},
'url': url
}
}
},
'name': '1234',
'from': {},
}
self.assertIn("Key 'content' error:\nKey", image_schema.validate('filename', invalid_data))
def test_validate_with_valid_content_source_git_url(self):
(flexmock(image_schema.support)
.should_receive('get_valid_streams_for')
.and_return([]))
(flexmock(image_schema.support)
.should_receive('get_valid_member_references_for')
.and_return([]))
url = 'git@github.com:openshift/csi-node-driver-registrar.git'
valid_data = {
'content': {
'source': {
'git': {
'branch': {
'target': 'test',
},
'url': url
}
}
},
'name': '1234',
'from': {},
'for_payload': True,
}
self.assertIsNone(image_schema.validate('filename', valid_data))
| 31.139785
| 99
| 0.51174
| 257
| 2,896
| 5.420233
| 0.229572
| 0.102656
| 0.109117
| 0.149318
| 0.809763
| 0.788945
| 0.765255
| 0.707825
| 0.707825
| 0.707825
| 0
| 0.008777
| 0.370511
| 2,896
| 92
| 100
| 31.478261
| 0.755348
| 0
| 0
| 0.649351
| 0
| 0
| 0.194406
| 0.09047
| 0
| 0
| 0
| 0
| 0.051948
| 1
| 0.051948
| false
| 0
| 0.038961
| 0
| 0.103896
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a73d124760425ea3d300dffaa5cbc16b05fd13b2
| 31,778
|
py
|
Python
|
LaU-reg/encoding/nn/customize.py
|
HolmesShuan/Location-aware-Upsampling-for-Semantic-Segmentation
|
83822e86570bbff4ca721d80089b5d82f1958852
|
[
"BSD-2-Clause"
] | 51
|
2019-11-14T01:48:24.000Z
|
2021-11-09T02:42:22.000Z
|
LaU-reg/encoding/nn/customize.py
|
HolmesShuan/Location-aware-Upsampling-for-Semantic-Segmentation
|
83822e86570bbff4ca721d80089b5d82f1958852
|
[
"BSD-2-Clause"
] | 4
|
2019-11-15T10:14:10.000Z
|
2020-03-17T12:14:50.000Z
|
LaU-reg/encoding/nn/customize.py
|
HolmesShuan/Location-aware-Upsampling-for-Semantic-Segmentation
|
83822e86570bbff4ca721d80089b5d82f1958852
|
[
"BSD-2-Clause"
] | 9
|
2019-11-14T12:39:03.000Z
|
2020-03-03T08:27:19.000Z
|
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Created by: Hang Zhang
## ECE Department, Rutgers University
## Email: zhang.hang@rutgers.edu
## Copyright (c) 2017
##
## This source code is licensed under the MIT-style license found in the
## LICENSE file in the root directory of this source tree
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
"""Encoding Custermized NN Module"""
import torch
import torch.nn as nn
from torch.nn import functional as F
from torch.nn import Module, Sequential, Conv2d, ReLU, AdaptiveAvgPool2d, BCELoss, CrossEntropyLoss, NLLLoss
from torch.autograd import Variable
torch_ver = torch.__version__[:3]
__all__ = ['SegmentationLosses', 'OffsetLosses', 'PyramidPooling', 'JPU', 'Mean']
class SegmentationLosses(CrossEntropyLoss):
"""2D Cross Entropy Loss with Auxilary Loss"""
def __init__(self, se_loss=False, se_weight=0.2, nclass=-1,
aux=False, aux_weight=0.4, weight=None,
size_average=True, ignore_index=-1):
super(SegmentationLosses, self).__init__(weight, size_average=True, ignore_index=ignore_index)
self.se_loss = se_loss
self.aux = aux
self.nclass = nclass
self.se_weight = se_weight
self.aux_weight = aux_weight
self.bceloss = BCELoss(weight, size_average)
def forward(self, *inputs):
if not self.se_loss and not self.aux:
return super(SegmentationLosses, self).forward(*inputs)
elif not self.se_loss:
pred1, pred2, target = tuple(inputs)
loss1 = super(SegmentationLosses, self).forward(pred1, target)
loss2 = super(SegmentationLosses, self).forward(pred2, target)
return loss1 + self.aux_weight * loss2
elif not self.aux:
pred, se_pred, target = tuple(inputs)
se_target = self._get_batch_label_vector(target, nclass=self.nclass).type_as(pred)
loss1 = super(SegmentationLosses, self).forward(pred, target)
loss2 = self.bceloss(torch.sigmoid(se_pred), se_target)
return loss1 + self.se_weight * loss2
else:
# pred1, se_pred, pred2, target = tuple(inputs)
pred1_diffdup, se_pred, pred1_detup, pred2_detup, pred2_diffup, target = tuple(inputs)
se_target = self._get_batch_label_vector(target, nclass=self.nclass).type_as(pred1_diffdup)
loss1 = super(SegmentationLosses, self).forward(pred1_diffdup, target)
loss2 = super(SegmentationLosses, self).forward(pred2_diffup, target)
loss3 = self.bceloss(torch.sigmoid(se_pred), se_target)
return loss1 + self.aux_weight * loss2 + self.se_weight * loss3
@staticmethod
def _get_batch_label_vector(target, nclass):
# target is a 3D Variable BxHxW, output is 2D BxnClass
batch = target.size(0)
tvect = Variable(torch.zeros(batch, nclass))
for i in range(batch):
hist = torch.histc(target[i].cpu().data.float(),
bins=nclass, min=0,
max=nclass-1)
vect = hist>0
tvect[i] = vect
return tvect
class OffsetLosses(Module):
"""2D Cross Entropy Loss with Auxilary Loss"""
def __init__(self, se_loss=False, se_weight=0.2, nclass=-1,
aux=False, aux_weight=0.4, offset=True, offset_weight=0.3, location_regression_weight=0.3,
weight=None, size_average=True, ignore_index=-1):
super(OffsetLosses, self).__init__()
self.se_loss = se_loss
self.aux = aux
self.nclass = nclass
self.offset = offset
self.se_weight = se_weight
self.aux_weight = aux_weight
self.offset_weight = offset_weight
self.location_regression_weight = location_regression_weight
self.bceloss = BCELoss(weight, size_average)
self.logsoftmax = nn.LogSoftmax(dim=1)
self.nllloss = nn.NLLLoss(reduction='none', ignore_index=ignore_index)
self.smoothl1 = nn.SmoothL1Loss(reduction='mean')
self.crossentropy = nn.CrossEntropyLoss(weight, size_average=size_average, ignore_index=ignore_index)
def forward(self, *inputs):
if self.se_loss and self.aux:
pred1_diffdup, se_pred, pred1_detup, grid, pred1_lt_detup, pred1_lb_detup, pred1_rt_detup, pred1_rb_detup, pred2, offsets, target = tuple(inputs)
se_target = self._get_batch_label_vector(target, nclass=self.nclass).type_as(pred1_diffdup)
pred1_diffup_logsoftmax = self.logsoftmax(pred1_diffdup)
target_1 = F.interpolate(target.unsqueeze(dim=1).float(), size=(pred1_diffdup.size(2),pred1_diffdup.size(3)), mode='nearest')
pred1_loss1 = self.nllloss(pred1_diffup_logsoftmax, target_1.squeeze().long()).unsqueeze(dim=1)
with torch.no_grad():
pred1_detup_logsoftmax = self.logsoftmax(pred1_detup)
pred1_lt_detup_logsoftmax = self.logsoftmax(pred1_lt_detup)
pred1_lb_detup_logsoftmax = self.logsoftmax(pred1_lb_detup)
pred1_rt_detup_logsoftmax = self.logsoftmax(pred1_rt_detup)
pred1_rb_detup_logsoftmax = self.logsoftmax(pred1_rb_detup)
pred1_loss2 = self.nllloss(pred1_detup_logsoftmax, target_1.squeeze().long()).unsqueeze(dim=1)
pred1_loss3 = self.nllloss(pred1_lt_detup_logsoftmax, target_1.squeeze().long()).unsqueeze(dim=1)
pred1_loss4 = self.nllloss(pred1_lb_detup_logsoftmax, target_1.squeeze().long()).unsqueeze(dim=1)
pred1_loss5 = self.nllloss(pred1_rt_detup_logsoftmax, target_1.squeeze().long()).unsqueeze(dim=1)
pred1_loss6 = self.nllloss(pred1_rb_detup_logsoftmax, target_1.squeeze().long()).unsqueeze(dim=1)
coords_lt = grid.floor().float() - grid.float()
coords_rb = grid.ceil().float() - grid.float()
coords_lb = torch.cat((coords_rb[:,0,:,:].unsqueeze(dim=1), coords_lt[:,1,:,:].unsqueeze(dim=1)), 1) # coords_lt[..., 0] : row | coords_lt[..., 1] : col
coords_rt = torch.cat((coords_lt[:,0,:,:].unsqueeze(dim=1), coords_rb[:,1,:,:].unsqueeze(dim=1)), 1)
gt_offsets = torch.zeros(offsets.shape).to(offsets.device)
gt_offsets = gt_offsets + offsets
min_error = pred1_loss1
error_map = torch.lt(pred1_loss3, min_error).float()
gt_offsets = gt_offsets + error_map*(coords_lt-gt_offsets)
min_error = torch.min(pred1_loss3, min_error)
error_map = torch.lt(pred1_loss4, min_error).float()
gt_offsets = gt_offsets + error_map*(coords_lb-gt_offsets)
min_error = torch.min(pred1_loss4, min_error)
error_map = torch.lt(pred1_loss5, min_error).float()
gt_offsets = gt_offsets + error_map*(coords_rt-gt_offsets)
min_error = torch.min(pred1_loss5, min_error)
error_map = torch.lt(pred1_loss6, min_error).float()
gt_offsets = gt_offsets + error_map*(coords_rb-gt_offsets)
min_error = torch.min(pred1_loss6, min_error)
error_map_loss1 = torch.gt(pred1_loss1, min_error).float()
error_map_loss1 = error_map_loss1.mul(self.offset_weight)
error_map_loss1.add_(1.0)
pred1_loss1.mul_(error_map_loss1.detach())
offset_loss = self.smoothl1(gt_offsets.detach(), offsets)
loss1 = torch.mean(pred1_loss1)
loss2 = self.crossentropy(pred2, target)
loss3 = self.bceloss(torch.sigmoid(se_pred), se_target)
loss4 = offset_loss
return loss1 + self.aux_weight * loss2 + self.se_weight * loss3 + self.location_regression_weight * loss4
elif not self.se_loss:
pred1_diffdup, pred1_detup, grid, pred1_lt_detup, pred1_lb_detup, pred1_rt_detup, pred1_rb_detup, pred2, offsets, target = tuple(inputs)
se_target = self._get_batch_label_vector(target, nclass=self.nclass).type_as(pred1_diffdup)
pred1_diffup_logsoftmax = self.logsoftmax(pred1_diffdup)
target_1 = F.interpolate(target.unsqueeze(dim=1).float(), size=(pred1_diffdup.size(2),pred1_diffdup.size(3)), mode='nearest')
pred1_loss1 = self.nllloss(pred1_diffup_logsoftmax, target_1.squeeze().long()).unsqueeze(dim=1)
with torch.no_grad():
pred1_detup_logsoftmax = self.logsoftmax(pred1_detup)
pred1_lt_detup_logsoftmax = self.logsoftmax(pred1_lt_detup)
pred1_lb_detup_logsoftmax = self.logsoftmax(pred1_lb_detup)
pred1_rt_detup_logsoftmax = self.logsoftmax(pred1_rt_detup)
pred1_rb_detup_logsoftmax = self.logsoftmax(pred1_rb_detup)
pred1_loss2 = self.nllloss(pred1_detup_logsoftmax, target_1.squeeze().long()).unsqueeze(dim=1)
pred1_loss3 = self.nllloss(pred1_lt_detup_logsoftmax, target_1.squeeze().long()).unsqueeze(dim=1)
pred1_loss4 = self.nllloss(pred1_lb_detup_logsoftmax, target_1.squeeze().long()).unsqueeze(dim=1)
pred1_loss5 = self.nllloss(pred1_rt_detup_logsoftmax, target_1.squeeze().long()).unsqueeze(dim=1)
pred1_loss6 = self.nllloss(pred1_rb_detup_logsoftmax, target_1.squeeze().long()).unsqueeze(dim=1)
coords_lt = grid.floor().float() - grid.float()
coords_rb = grid.ceil().float() - grid.float()
coords_lb = torch.cat((coords_rb[:,0,:,:].unsqueeze(dim=1), coords_lt[:,1,:,:].unsqueeze(dim=1)), 1) # coords_lt[..., 0] : row | coords_lt[..., 1] : col
coords_rt = torch.cat((coords_lt[:,0,:,:].unsqueeze(dim=1), coords_rb[:,1,:,:].unsqueeze(dim=1)), 1)
gt_offsets = torch.zeros(offsets.shape).to(offsets.device)
gt_offsets = gt_offsets + offsets
min_error = pred1_loss1
error_map = torch.lt(pred1_loss3, min_error).float()
gt_offsets = gt_offsets + error_map*(coords_lt-gt_offsets)
min_error = torch.min(pred1_loss3, min_error)
error_map = torch.lt(pred1_loss4, min_error).float()
gt_offsets = gt_offsets + error_map*(coords_lb-gt_offsets)
min_error = torch.min(pred1_loss4, min_error)
error_map = torch.lt(pred1_loss5, min_error).float()
gt_offsets = gt_offsets + error_map*(coords_rt-gt_offsets)
min_error = torch.min(pred1_loss5, min_error)
error_map = torch.lt(pred1_loss6, min_error).float()
gt_offsets = gt_offsets + error_map*(coords_rb-gt_offsets)
min_error = torch.min(pred1_loss6, min_error)
error_map_loss1 = torch.gt(pred1_loss1, min_error).float()
error_map_loss1 = error_map_loss1.mul(self.offset_weight)
error_map_loss1.add_(1.0)
pred1_loss1.mul_(error_map_loss1.detach())
offset_loss = self.smoothl1(gt_offsets.detach(), offsets)
loss1 = torch.mean(pred1_loss1)
loss2 = self.crossentropy(pred2, target)
loss4 = offset_loss
return loss1 + self.aux_weight * loss2 + self.location_regression_weight * loss4
elif not self.aux:
pred1_diffdup, se_pred, pred1_detup, grid, pred1_lt_detup, pred1_lb_detup, pred1_rt_detup, pred1_rb_detup, offsets, target = tuple(inputs)
se_target = self._get_batch_label_vector(target, nclass=self.nclass).type_as(pred1_diffdup)
pred1_diffup_logsoftmax = self.logsoftmax(pred1_diffdup)
target_1 = F.interpolate(target.unsqueeze(dim=1).float(), size=(pred1_diffdup.size(2),pred1_diffdup.size(3)), mode='nearest')
pred1_loss1 = self.nllloss(pred1_diffup_logsoftmax, target_1.squeeze().long()).unsqueeze(dim=1)
with torch.no_grad():
pred1_detup_logsoftmax = self.logsoftmax(pred1_detup)
pred1_lt_detup_logsoftmax = self.logsoftmax(pred1_lt_detup)
pred1_lb_detup_logsoftmax = self.logsoftmax(pred1_lb_detup)
pred1_rt_detup_logsoftmax = self.logsoftmax(pred1_rt_detup)
pred1_rb_detup_logsoftmax = self.logsoftmax(pred1_rb_detup)
pred1_loss2 = self.nllloss(pred1_detup_logsoftmax, target_1.squeeze().long()).unsqueeze(dim=1)
pred1_loss3 = self.nllloss(pred1_lt_detup_logsoftmax, target_1.squeeze().long()).unsqueeze(dim=1)
pred1_loss4 = self.nllloss(pred1_lb_detup_logsoftmax, target_1.squeeze().long()).unsqueeze(dim=1)
pred1_loss5 = self.nllloss(pred1_rt_detup_logsoftmax, target_1.squeeze().long()).unsqueeze(dim=1)
pred1_loss6 = self.nllloss(pred1_rb_detup_logsoftmax, target_1.squeeze().long()).unsqueeze(dim=1)
coords_lt = grid.floor().float() - grid.float()
coords_rb = grid.ceil().float() - grid.float()
coords_lb = torch.cat((coords_rb[:,0,:,:].unsqueeze(dim=1), coords_lt[:,1,:,:].unsqueeze(dim=1)), 1) # coords_lt[..., 0] : row | coords_lt[..., 1] : col
coords_rt = torch.cat((coords_lt[:,0,:,:].unsqueeze(dim=1), coords_rb[:,1,:,:].unsqueeze(dim=1)), 1)
gt_offsets = torch.zeros(offsets.shape).to(offsets.device)
gt_offsets = gt_offsets + offsets
min_error = pred1_loss1
error_map = torch.lt(pred1_loss3, min_error).float()
gt_offsets = gt_offsets + error_map*(coords_lt-gt_offsets)
min_error = torch.min(pred1_loss3, min_error)
error_map = torch.lt(pred1_loss4, min_error).float()
gt_offsets = gt_offsets + error_map*(coords_lb-gt_offsets)
min_error = torch.min(pred1_loss4, min_error)
error_map = torch.lt(pred1_loss5, min_error).float()
gt_offsets = gt_offsets + error_map*(coords_rt-gt_offsets)
min_error = torch.min(pred1_loss5, min_error)
error_map = torch.lt(pred1_loss6, min_error).float()
gt_offsets = gt_offsets + error_map*(coords_rb-gt_offsets)
min_error = torch.min(pred1_loss6, min_error)
error_map_loss1 = torch.gt(pred1_loss1, min_error).float()
error_map_loss1 = error_map_loss1.mul(self.offset_weight)
error_map_loss1.add_(1.0)
pred1_loss1.mul_(error_map_loss1.detach())
offset_loss = self.smoothl1(gt_offsets.detach(), offsets)
loss1 = torch.mean(pred1_loss1)
loss3 = self.bceloss(torch.sigmoid(se_pred), se_target)
loss4 = offset_loss
return loss1 + self.se_weight * loss3 + self.location_regression_weight * loss4
else:
pred1_diffdup, pred1_detup, grid, pred1_lt_detup, pred1_lb_detup, pred1_rt_detup, pred1_rb_detup, offsets, target = tuple(inputs)
se_target = self._get_batch_label_vector(target, nclass=self.nclass).type_as(pred1_diffdup)
pred1_diffup_logsoftmax = self.logsoftmax(pred1_diffdup)
target_1 = F.interpolate(target.unsqueeze(dim=1).float(), size=(pred1_diffdup.size(2),pred1_diffdup.size(3)), mode='nearest')
pred1_loss1 = self.nllloss(pred1_diffup_logsoftmax, target_1.squeeze().long()).unsqueeze(dim=1)
with torch.no_grad():
pred1_detup_logsoftmax = self.logsoftmax(pred1_detup)
pred1_lt_detup_logsoftmax = self.logsoftmax(pred1_lt_detup)
pred1_lb_detup_logsoftmax = self.logsoftmax(pred1_lb_detup)
pred1_rt_detup_logsoftmax = self.logsoftmax(pred1_rt_detup)
pred1_rb_detup_logsoftmax = self.logsoftmax(pred1_rb_detup)
pred1_loss2 = self.nllloss(pred1_detup_logsoftmax, target_1.squeeze().long()).unsqueeze(dim=1)
pred1_loss3 = self.nllloss(pred1_lt_detup_logsoftmax, target_1.squeeze().long()).unsqueeze(dim=1)
pred1_loss4 = self.nllloss(pred1_lb_detup_logsoftmax, target_1.squeeze().long()).unsqueeze(dim=1)
pred1_loss5 = self.nllloss(pred1_rt_detup_logsoftmax, target_1.squeeze().long()).unsqueeze(dim=1)
pred1_loss6 = self.nllloss(pred1_rb_detup_logsoftmax, target_1.squeeze().long()).unsqueeze(dim=1)
coords_lt = grid.floor().float() - grid.float()
coords_rb = grid.ceil().float() - grid.float()
coords_lb = torch.cat((coords_rb[:,0,:,:].unsqueeze(dim=1), coords_lt[:,1,:,:].unsqueeze(dim=1)), 1) # coords_lt[..., 0] : row | coords_lt[..., 1] : col
coords_rt = torch.cat((coords_lt[:,0,:,:].unsqueeze(dim=1), coords_rb[:,1,:,:].unsqueeze(dim=1)), 1)
gt_offsets = torch.zeros(offsets.shape).to(offsets.device)
gt_offsets = gt_offsets + offsets
min_error = pred1_loss1
error_map = torch.lt(pred1_loss3, min_error).float()
gt_offsets = gt_offsets + error_map*(coords_lt-gt_offsets)
min_error = torch.min(pred1_loss3, min_error)
error_map = torch.lt(pred1_loss4, min_error).float()
gt_offsets = gt_offsets + error_map*(coords_lb-gt_offsets)
min_error = torch.min(pred1_loss4, min_error)
error_map = torch.lt(pred1_loss5, min_error).float()
gt_offsets = gt_offsets + error_map*(coords_rt-gt_offsets)
min_error = torch.min(pred1_loss5, min_error)
error_map = torch.lt(pred1_loss6, min_error).float()
gt_offsets = gt_offsets + error_map*(coords_rb-gt_offsets)
min_error = torch.min(pred1_loss6, min_error)
error_map_loss1 = torch.gt(pred1_loss1, min_error).float()
error_map_loss1 = error_map_loss1.mul(self.offset_weight)
error_map_loss1.add_(1.0)
pred1_loss1.mul_(error_map_loss1.detach())
offset_loss = self.smoothl1(gt_offsets.detach(), offsets)
loss1 = torch.mean(pred1_loss1)
loss4 = offset_loss
return loss1 + self.location_regression_weight * loss4
@staticmethod
def to_one_hot(labels, C=2):
one_hot = torch.zeros(labels.size(0), C, labels.size(2), labels.size(3)).cuda().to(labels.device)
target = one_hot.scatter_(1, labels.long(), 1.0)
return target
@staticmethod
def _get_batch_label_vector(target, nclass):
# target is a 3D Variable BxHxW, output is 2D BxnClass
batch = target.size(0)
tvect = Variable(torch.zeros(batch, nclass))
for i in range(batch):
hist = torch.histc(target[i].cpu().data.float(),
bins=nclass, min=0,
max=nclass-1)
vect = hist>0
tvect[i] = vect
return tvect
'''
class OffsetLosses(Module):
"""2D Cross Entropy Loss with Auxilary Loss"""
def __init__(self, se_loss=False, se_weight=0.2, nclass=-1,
aux=False, aux_weight=0.4, offset=True, offset_weight=0.5, weight=None,
size_average=True, ignore_index=-1):
super(OffsetLosses, self).__init__()
self.se_loss = se_loss
self.aux = aux
self.nclass = nclass
self.offset = offset
self.se_weight = se_weight
self.aux_weight = aux_weight
self.offset_weight = offset_weight
self.bceloss = BCELoss(weight, size_average)
self.logsoftmax = nn.LogSoftmax(dim=1)
self.nllloss = nn.NLLLoss(reduction='none', ignore_index=ignore_index)
# self.crossentropy = nn.CrossEntropyLoss(reduction='mean', ignore_index=ignore_index)
def forward(self, *inputs):
if self.se_loss and self.aux:
pred1_diffdup, se_pred, pred1_detup, pred2_detup, pred2_diffup, target = tuple(inputs)
se_target = self._get_batch_label_vector(target, nclass=self.nclass).type_as(pred1_diffdup)
pred1_diffup_logsoftmax = self.logsoftmax(pred1_diffdup)
pred1_detup_logsoftmax = self.logsoftmax(pred1_detup)
target_1 = F.interpolate(target.unsqueeze(dim=1).float(), size=(pred1_diffdup.size(2),pred1_diffdup.size(3)), mode='nearest')
# pred1_loss1 = self.nllloss(pred1_diffup_logsoftmax, target)
pred1_loss1 = self.nllloss(pred1_diffup_logsoftmax, target_1.squeeze().long())
pred1_loss2 = self.nllloss(pred1_detup_logsoftmax, target_1.squeeze().long())
error_map1 = torch.gt(pred1_loss1, pred1_loss2).float()
error_map1.mul_(self.offset_weight)
error_map1.add_(1.0)
pred1_loss1.mul_(error_map1.detach())
loss1 = torch.mean(pred1_loss1)
pred2_diffup_logsoftmax = self.logsoftmax(pred2_diffup)
pred2_detup_logsoftmax = self.logsoftmax(pred2_detup)
target_2 = F.interpolate(target.unsqueeze(dim=1).float(), size=(pred2_diffup.size(2),pred2_diffup.size(3)), mode='nearest')
# pred2_loss1 = self.nllloss(pred2_diffup_logsoftmax, target)
pred2_loss1 = self.nllloss(pred2_diffup_logsoftmax, target_2.squeeze().long())
pred2_loss2 = self.nllloss(pred2_detup_logsoftmax, target_2.squeeze().long())
error_map2 = torch.gt(pred2_loss1, pred2_loss2).float()
error_map2.mul_(self.offset_weight)
error_map2.add_(1.0)
pred2_loss1.mul_(error_map2.detach())
loss2 = torch.mean(pred2_loss1)
loss3 = self.bceloss(torch.sigmoid(se_pred), se_target)
return loss1 + self.aux_weight * loss2 + self.se_weight * loss3
elif not self.se_loss:
pred1_diffdup, pred1_detup, pred2_detup, pred2_diffup, target = tuple(inputs)
pred1_diffup_logsoftmax = self.logsoftmax(pred1_diffdup)
pred1_detup_logsoftmax = self.logsoftmax(pred1_detup)
target_1 = F.interpolate(target.unsqueeze(dim=1).float(), size=(pred1_diffdup.size(2),pred1_diffdup.size(3)), mode='nearest')
pred1_loss1 = self.nllloss(pred1_diffup_logsoftmax, target_1.squeeze().long())
pred1_loss2 = self.nllloss(pred1_detup_logsoftmax, target_1.squeeze().long())
error_map1 = torch.gt(pred1_loss1, pred1_loss2).float()
error_map1.mul_(self.offset_weight)
error_map1.add_(1.0)
pred1_loss1.mul_(error_map1.detach())
loss1 = torch.mean(pred1_loss1)
pred2_diffup_logsoftmax = self.logsoftmax(pred2_diffup)
pred2_detup_logsoftmax = self.logsoftmax(pred2_detup)
target_2 = F.interpolate(target.unsqueeze(dim=1).float(), size=(pred2_diffup.size(2),pred2_diffup.size(3)), mode='nearest')
pred2_loss1 = self.nllloss(pred2_diffup_logsoftmax, target_2.squeeze().long())
pred2_loss2 = self.nllloss(pred2_detup_logsoftmax, target_2.squeeze().long())
error_map2 = torch.gt(pred2_loss1, pred2_loss2).float()
error_map2.mul_(self.offset_weight)
error_map2.add_(1.0)
pred2_loss1.mul_(error_map2.detach())
loss2 = torch.mean(pred2_loss1)
return loss1 + self.aux_weight * loss2
elif not self.aux:
pred1_diffdup, se_pred, pred1_detup, target = tuple(inputs)
se_target = self._get_batch_label_vector(target, nclass=self.nclass).type_as(pred1_diffdup)
pred1_diffup_logsoftmax = self.logsoftmax(pred1_diffdup)
pred1_detup_logsoftmax = self.logsoftmax(pred1_detup)
target_1 = F.interpolate(target.unsqueeze(dim=1).float(), size=(pred1_diffdup.size(2),pred1_diffdup.size(3)), mode='nearest')
pred1_loss1 = self.nllloss(pred1_diffup_logsoftmax, target_1.squeeze().long())
pred1_loss2 = self.nllloss(pred1_detup_logsoftmax, target_1.squeeze().long())
error_map1 = torch.gt(pred1_loss1, pred1_loss2).float()
error_map1.mul_(self.offset_weight)
error_map1.add_(1.0)
pred1_loss1.mul_(error_map1.detach())
loss1 = torch.mean(pred1_loss1)
loss3 = self.bceloss(torch.sigmoid(se_pred), se_target)
return loss1 + self.se_weight * loss3
else:
pred1_diffdup, pred1_detup, target = tuple(inputs)
pred1_diffup_logsoftmax = self.logsoftmax(pred1_diffdup)
pred1_detup_logsoftmax = self.logsoftmax(pred1_detup)
target_1 = F.interpolate(target.unsqueeze(dim=1).float(), size=(pred1_diffdup.size(2),pred1_diffdup.size(3)), mode='nearest')
pred1_loss1 = self.nllloss(pred1_diffup_logsoftmax, target_1.squeeze().long())
pred1_loss2 = self.nllloss(pred1_detup_logsoftmax, target_1.squeeze().long())
error_map1 = torch.gt(pred1_loss1, pred1_loss2).float()
error_map1.mul_(self.offset_weight)
error_map1.add_(1.0)
pred1_loss1.mul_(error_map1.detach())
loss1 = torch.mean(pred1_loss1)
return loss1
@staticmethod
def to_one_hot(labels, C=2):
one_hot = torch.zeros(labels.size(0), C, labels.size(2), labels.size(3)).cuda().to(labels.device)
target = one_hot.scatter_(1, labels.long(), 1.0)
return target
@staticmethod
def _get_batch_label_vector(target, nclass):
# target is a 3D Variable BxHxW, output is 2D BxnClass
batch = target.size(0)
tvect = Variable(torch.zeros(batch, nclass))
for i in range(batch):
hist = torch.histc(target[i].cpu().data.float(),
bins=nclass, min=0,
max=nclass-1)
vect = hist>0
tvect[i] = vect
return tvect
'''
class Normalize(Module):
r"""Performs :math:`L_p` normalization of inputs over specified dimension.
Does:
.. math::
v = \frac{v}{\max(\lVert v \rVert_p, \epsilon)}
for each subtensor v over dimension dim of input. Each subtensor is
flattened into a vector, i.e. :math:`\lVert v \rVert_p` is not a matrix
norm.
With default arguments normalizes over the second dimension with Euclidean
norm.
Args:
p (float): the exponent value in the norm formulation. Default: 2
dim (int): the dimension to reduce. Default: 1
"""
def __init__(self, p=2, dim=1):
super(Normalize, self).__init__()
self.p = p
self.dim = dim
def forward(self, x):
return F.normalize(x, self.p, self.dim, eps=1e-8)
class PyramidPooling(Module):
"""
Reference:
Zhao, Hengshuang, et al. *"Pyramid scene parsing network."*
"""
def __init__(self, in_channels, norm_layer, up_kwargs):
super(PyramidPooling, self).__init__()
self.pool1 = AdaptiveAvgPool2d(1)
self.pool2 = AdaptiveAvgPool2d(2)
self.pool3 = AdaptiveAvgPool2d(3)
self.pool4 = AdaptiveAvgPool2d(6)
out_channels = int(in_channels/4)
self.conv1 = Sequential(Conv2d(in_channels, out_channels, 1, bias=False),
norm_layer(out_channels),
ReLU(True))
self.conv2 = Sequential(Conv2d(in_channels, out_channels, 1, bias=False),
norm_layer(out_channels),
ReLU(True))
self.conv3 = Sequential(Conv2d(in_channels, out_channels, 1, bias=False),
norm_layer(out_channels),
ReLU(True))
self.conv4 = Sequential(Conv2d(in_channels, out_channels, 1, bias=False),
norm_layer(out_channels),
ReLU(True))
# bilinear upsample options
self._up_kwargs = up_kwargs
def forward(self, x):
_, _, h, w = x.size()
feat1 = F.upsample(self.conv1(self.pool1(x)), (h, w), **self._up_kwargs)
feat2 = F.upsample(self.conv2(self.pool2(x)), (h, w), **self._up_kwargs)
feat3 = F.upsample(self.conv3(self.pool3(x)), (h, w), **self._up_kwargs)
feat4 = F.upsample(self.conv4(self.pool4(x)), (h, w), **self._up_kwargs)
return torch.cat((x, feat1, feat2, feat3, feat4), 1)
class SeparableConv2d(nn.Module):
def __init__(self, inplanes, planes, kernel_size=3, stride=1, padding=1, dilation=1, bias=False, BatchNorm=nn.BatchNorm2d):
super(SeparableConv2d, self).__init__()
self.conv1 = nn.Conv2d(inplanes, inplanes, kernel_size, stride, padding, dilation, groups=inplanes, bias=bias)
self.bn = BatchNorm(inplanes)
self.pointwise = nn.Conv2d(inplanes, planes, 1, 1, 0, 1, 1, bias=bias)
def forward(self, x):
x = self.conv1(x)
x = self.bn(x)
x = self.pointwise(x)
return x
class JPU(nn.Module):
def __init__(self, in_channels, width=512, norm_layer=None, up_kwargs=None):
super(JPU, self).__init__()
self.up_kwargs = up_kwargs
self.conv5 = nn.Sequential(
nn.Conv2d(in_channels[-1], width, 3, padding=1, bias=False),
norm_layer(width),
nn.ReLU(inplace=True))
self.conv4 = nn.Sequential(
nn.Conv2d(in_channels[-2], width, 3, padding=1, bias=False),
norm_layer(width),
nn.ReLU(inplace=True))
self.conv3 = nn.Sequential(
nn.Conv2d(in_channels[-3], width, 3, padding=1, bias=False),
norm_layer(width),
nn.ReLU(inplace=True))
self.dilation1 = nn.Sequential(SeparableConv2d(3*width, width, kernel_size=3, padding=1, dilation=1, bias=False),
norm_layer(width),
nn.ReLU(inplace=True))
self.dilation2 = nn.Sequential(SeparableConv2d(3*width, width, kernel_size=3, padding=2, dilation=2, bias=False),
norm_layer(width),
nn.ReLU(inplace=True))
self.dilation3 = nn.Sequential(SeparableConv2d(3*width, width, kernel_size=3, padding=4, dilation=4, bias=False),
norm_layer(width),
nn.ReLU(inplace=True))
self.dilation4 = nn.Sequential(SeparableConv2d(3*width, width, kernel_size=3, padding=8, dilation=8, bias=False),
norm_layer(width),
nn.ReLU(inplace=True))
def forward(self, *inputs):
feats = [self.conv5(inputs[-1]), self.conv4(inputs[-2]), self.conv3(inputs[-3])]
_, _, h, w = feats[-1].size()
feats[-2] = F.upsample(feats[-2], (h, w), **self.up_kwargs)
feats[-3] = F.upsample(feats[-3], (h, w), **self.up_kwargs)
feat = torch.cat(feats, dim=1)
feat = torch.cat([self.dilation1(feat), self.dilation2(feat), self.dilation3(feat), self.dilation4(feat)], dim=1)
return inputs[0], inputs[1], inputs[2], feat
class Mean(Module):
def __init__(self, dim, keep_dim=False):
super(Mean, self).__init__()
self.dim = dim
self.keep_dim = keep_dim
def forward(self, input):
return input.mean(self.dim, self.keep_dim)
| 49.808777
| 168
| 0.621782
| 3,971
| 31,778
| 4.696802
| 0.068748
| 0.030883
| 0.034851
| 0.049756
| 0.85159
| 0.835827
| 0.818133
| 0.810788
| 0.809233
| 0.796472
| 0
| 0.037229
| 0.2604
| 31,778
| 637
| 169
| 49.88697
| 0.756329
| 0.045377
| 0
| 0.684507
| 0
| 0
| 0.003749
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047887
| false
| 0
| 0.014085
| 0.005634
| 0.126761
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a74e9f5d9cabcdbdb8ce7a6a895432bc5f409d23
| 140
|
py
|
Python
|
barry/cosmology/__init__.py
|
AaronGlanville/Barry
|
f181448b2ed10a8c08195e7e34819ceb8abfe532
|
[
"MIT"
] | 13
|
2019-07-29T20:39:20.000Z
|
2021-09-26T09:20:52.000Z
|
barry/cosmology/__init__.py
|
AaronGlanville/Barry
|
f181448b2ed10a8c08195e7e34819ceb8abfe532
|
[
"MIT"
] | 1
|
2021-02-11T10:54:58.000Z
|
2021-02-11T10:54:58.000Z
|
barry/cosmology/__init__.py
|
AaronGlanville/Barry
|
f181448b2ed10a8c08195e7e34819ceb8abfe532
|
[
"MIT"
] | 7
|
2019-08-26T04:54:00.000Z
|
2022-01-20T14:47:47.000Z
|
from barry.cosmology.camb_generator import getCambGenerator
from barry.cosmology.pk2xi import PowerToCorrelationGauss, PowerToCorrelationFT
| 46.666667
| 79
| 0.9
| 14
| 140
| 8.928571
| 0.714286
| 0.144
| 0.288
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007634
| 0.064286
| 140
| 2
| 80
| 70
| 0.946565
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a78ad3b2a7b4665f65545d1b67bcc28651246227
| 13,934
|
py
|
Python
|
test/store/mongo/test_mongo_api.py
|
SimplyVC/panic_polkadot
|
2c5517b0e01e27d4c54dc6a6609699471b833746
|
[
"Apache-2.0"
] | 41
|
2020-01-22T14:37:17.000Z
|
2021-12-30T16:12:20.000Z
|
test/store/mongo/test_mongo_api.py
|
SimplyVC/panic_polkadot
|
2c5517b0e01e27d4c54dc6a6609699471b833746
|
[
"Apache-2.0"
] | 33
|
2020-01-31T15:04:03.000Z
|
2022-02-27T11:23:13.000Z
|
test/store/mongo/test_mongo_api.py
|
SimplyVC/panic_polkadot
|
2c5517b0e01e27d4c54dc6a6609699471b833746
|
[
"Apache-2.0"
] | 9
|
2020-04-16T07:59:03.000Z
|
2021-10-09T04:35:35.000Z
|
import logging
import unittest
from datetime import timedelta
from time import sleep
from pymongo.errors import PyMongoError, OperationFailure, \
ServerSelectionTimeoutError
from src.store.mongo.mongo_api import MongoApi
from test import TestUserConf
class TestMongoApiWithMongoOnline(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
# Same as in setUp(), to avoid running all tests if Mongo is offline
logger = logging.getLogger('dummy')
db = TestUserConf.mongo_db_name
host = TestUserConf.mongo_host
port = TestUserConf.mongo_port
user = TestUserConf.mongo_user
password = TestUserConf.mongo_pass
mongo = MongoApi(logger, db, host, port, username=user,
password=password)
# Ping Mongo
try:
mongo.ping_unsafe()
except PyMongoError:
raise Exception('Mongo is not online.')
def setUp(self) -> None:
self.logger = logging.getLogger('dummy')
self.db = TestUserConf.mongo_db_name
self.host = TestUserConf.mongo_host
self.port = TestUserConf.mongo_port
self.user = TestUserConf.mongo_user
self.password = TestUserConf.mongo_pass
self.mongo = MongoApi(self.logger, self.db, self.host, self.port,
username=self.user, password=self.password)
# Ping Mongo
try:
self.mongo.ping_unsafe()
except PyMongoError:
self.fail('Mongo is not online.')
# Clear test database
self.mongo.drop_db()
self.col1 = 'collection1'
self.col2 = 'collection2'
self.val1 = {'a': 'b', 'c': 'd'}
self.val2 = {'e': 'f', 'g': 'h'}
self.val3 = {'i': 'j'}
self.val4 = {'k': 'l', 'm': {'n': ['o', 'p', True, False, 1, 2.1]}}
self.time = timedelta(seconds=3)
self.time_with_error_margin = timedelta(seconds=4)
self.default_str = 'DEFAULT'
self.default_int = 789
self.default_bool = False
def tearDown(self) -> None:
self.mongo.drop_db()
def test_insert_one_inserts_value_into_the_specified_collection(self):
# Check that col1 is empty
get_result = list(self.mongo._db[self.col1].find({}))
self.assertEqual(len(get_result), 0)
# Insert val1 into col1
self.mongo.insert_one(self.col1, self.val1)
# Check that value was added to col1
get_result = list(self.mongo._db[self.col1].find({}))
self.assertEqual(len(get_result), 1)
self.assertEqual(dict(get_result[0]), self.val1)
def test_insert_one_supports_more_complex_documents(self):
# Check that col1 is empty
get_result = list(self.mongo._db[self.col1].find({}))
self.assertEqual(len(get_result), 0)
# Insert val4 into col1
self.mongo.insert_one(self.col1, self.val4)
# Check that value was added to col1
get_result = list(self.mongo._db[self.col1].find({}))
self.assertEqual(len(get_result), 1)
self.assertEqual(dict(get_result[0]), self.val4)
def test_insert_many_inserts_all_values_into_the_specified_collection(self):
# Check that col1 is empty
get_result = list(self.mongo._db[self.col1].find({}))
self.assertEqual(len(get_result), 0)
# Insert val1, val2, and val3 into col1
self.mongo.insert_many(self.col1, [self.val1, self.val2, self.val3])
# Check that the values was added to col1
get_result = list(self.mongo._db[self.col1].find({}))
self.assertEqual(len(get_result), 3)
self.assertEqual(dict(get_result[0]), self.val1)
self.assertEqual(dict(get_result[1]), self.val2)
self.assertEqual(dict(get_result[2]), self.val3)
def test_get_all_returns_inserted_values_in_order_of_insert(self):
# Check that col1 is empty
get_result = self.mongo.get_all(self.col1)
self.assertEqual(len(get_result), 0)
# Insert val1, val2, and val3 into col1
self.mongo._db[self.col1].insert_many([self.val1, self.val2, self.val3])
# Check that the values was added to col1
get_result = self.mongo.get_all(self.col1)
self.assertEqual(len(get_result), 3)
self.assertEqual(dict(get_result[0]), self.val1)
self.assertEqual(dict(get_result[1]), self.val2)
self.assertEqual(dict(get_result[2]), self.val3)
def test_drop_collection_deletes_the_specified_collection(self):
# Check that col1 and col2 are empty
get_result1 = list(self.mongo._db[self.col1].find({}))
get_result2 = list(self.mongo._db[self.col2].find({}))
self.assertEqual(len(get_result1), 0)
self.assertEqual(len(get_result2), 0)
# Insert val1, val2, and val3 into col1 and val4 into col2
self.mongo._db[self.col1].insert_many([self.val1, self.val2, self.val3])
self.mongo._db[self.col2].insert_one(self.val4)
# Check that col1 and col2 are not empty
get_result1 = list(self.mongo._db[self.col1].find({}))
get_result2 = list(self.mongo._db[self.col2].find({}))
self.assertEqual(len(get_result1), 3)
self.assertEqual(len(get_result2), 1)
# Delete col1
self.mongo.drop_collection(self.col1)
# Check that col1 is back to being empty but col2 is not empty
get_result1 = list(self.mongo._db[self.col1].find({}))
get_result2 = list(self.mongo._db[self.col2].find({}))
self.assertEqual(len(get_result1), 0)
self.assertEqual(len(get_result2), 1)
def test_drop_db_deletes_all_collections(self):
# Check that col1 and col2 are empty
get_result1 = list(self.mongo._db[self.col1].find({}))
get_result2 = list(self.mongo._db[self.col2].find({}))
self.assertEqual(len(get_result1), 0)
self.assertEqual(len(get_result2), 0)
# Insert val1, val2, and val3 into col1 and val4 into col2
self.mongo._db[self.col1].insert_many([self.val1, self.val2, self.val3])
self.mongo._db[self.col2].insert_one(self.val4)
# Check that col1 and col2 are not empty
get_result1 = list(self.mongo._db[self.col1].find({}))
get_result2 = list(self.mongo._db[self.col2].find({}))
self.assertEqual(len(get_result1), 3)
self.assertEqual(len(get_result2), 1)
# Drop db
self.mongo.drop_db()
# Check that col1 and col2 are back to being empty
get_result1 = list(self.mongo._db[self.col1].find({}))
get_result2 = list(self.mongo._db[self.col2].find({}))
self.assertEqual(len(get_result1), 0)
self.assertEqual(len(get_result2), 0)
def test_ping_returns_true(self):
self.assertTrue(self.mongo.ping_unsafe())
def test_ping_auth_throws_value_error_for_empty_password(self):
try:
self.mongo.ping_auth(self.user, '')
self.fail('Expected ValueError exception to be thrown.')
except ValueError:
pass
def test_ping_auth_throws_operation_failure_for_wrong_password(self):
try:
self.mongo.ping_auth(self.user, 'incorrect_password')
self.fail('Expected OperationFailure exception to be thrown.')
except OperationFailure:
pass
class TestMongoApiWithMongoOffline(unittest.TestCase):
def setUp(self) -> None:
self.logger = logging.getLogger('dummy')
self.db = TestUserConf.mongo_db_name
self.host = 'dummyhost'
self.port = TestUserConf.mongo_port
self.user = TestUserConf.mongo_user
self.password = TestUserConf.mongo_pass
self.mongo = MongoApi(self.logger, self.db, self.host, self.port,
timeout_ms=1)
# timeout_ms is set to 1ms to speed up tests. It cannot be 0 :p
self.col1 = 'collection1'
self.val1 = {'a': 'b', 'c': 'd'}
self.val2 = {'e': 'f', 'g': 'h'}
self.val3 = {'i': 'j'}
def test_insert_one_throws_exception_first_time_round(self):
try:
self.mongo.insert_one(self.col1, self.val1)
self.fail('Expected ServerSelectionTimeoutError to be thrown.')
except ServerSelectionTimeoutError:
pass
def test_insert_many_throws_exception_first_time_round(self):
try:
self.mongo.insert_many(self.col1, [self.val1, self.val2, self.val3])
self.fail('Expected ServerSelectionTimeoutError to be thrown.')
except ServerSelectionTimeoutError:
pass
def test_get_all_throws_exception_first_time_round(self):
try:
self.mongo.get_all(self.col1)
self.fail('Expected ServerSelectionTimeoutError to be thrown.')
except ServerSelectionTimeoutError:
pass
def test_drop_collection_throws_exception_first_time_round(self):
try:
self.mongo.drop_collection(self.col1)
self.fail('Expected ServerSelectionTimeoutError to be thrown.')
except ServerSelectionTimeoutError:
pass
def test_drop_db_throws_exception_first_time_round(self):
try:
self.mongo.drop_db()
self.fail('Expected ServerSelectionTimeoutError to be thrown.')
except ServerSelectionTimeoutError:
pass
def test_ping_unsafe_throws_exception_first_time_round(self):
try:
self.mongo.ping_unsafe()
self.fail('Expected ServerSelectionTimeoutError to be thrown.')
except ServerSelectionTimeoutError:
pass
def test_ping_auth_throws_exception_first_time_round(self):
try:
self.mongo.ping_auth(username=self.user, password=self.password)
self.fail('Expected ServerSelectionTimeoutError to be thrown.')
except ServerSelectionTimeoutError:
pass
def test_insert_one_returns_none_if_mongo_already_down(self):
self.mongo._set_as_down()
self.assertIsNone(self.mongo.insert_one(self.col1, self.val1))
def test_insert_many_returns_none_if_mongo_already_down(self):
self.mongo._set_as_down()
documents = [self.val1, self.val2, self.val3]
self.assertIsNone(self.mongo.insert_many(self.col1, documents))
def test_get_all_returns_none_if_mongo_already_down(self):
self.mongo._set_as_down()
self.assertIsNone(self.mongo.get_all(self.col1))
def test_drop_collection_returns_none_if_mongo_already_down(self):
self.mongo._set_as_down()
self.assertIsNone(self.mongo.drop_collection(self.col1))
def test_drop_db_returns_none_if_mongo_already_down(self):
self.mongo._set_as_down()
self.assertIsNone(self.mongo.drop_db())
def test_ping_unsafe_throws_exception_if_mongo_already_down(self):
self.mongo._set_as_down()
try:
self.mongo.ping_unsafe()
self.fail('Expected ServerSelectionTimeoutError to be thrown.')
except ServerSelectionTimeoutError:
pass
def test_ping_auth_throws_exception_if_mongo_already_down(self):
self.mongo._set_as_down()
try:
self.mongo.ping_auth(username=self.user, password=self.password)
self.fail('Expected ServerSelectionTimeoutError to be thrown.')
except ServerSelectionTimeoutError:
pass
class TestMongoApiLiveAndDownFeaturesWithMongoOffline(unittest.TestCase):
def setUp(self) -> None:
self.logger = logging.getLogger('dummy')
self.db = TestUserConf.mongo_db_name
self.host = TestUserConf.mongo_host
self.port = TestUserConf.mongo_port
self.live_check_time_interval = timedelta(seconds=3)
self.live_check_time_interval_with_error_margin = timedelta(seconds=3.5)
self.mongo = MongoApi(self.logger, self.db, self.host, self.port,
live_check_time_interval=
self.live_check_time_interval)
def test_is_live_returns_true_by_default(self):
self.assertTrue(self.mongo.is_live)
def test_set_as_live_changes_is_live_to_true(self):
self.mongo._is_live = False
self.assertFalse(self.mongo.is_live)
self.mongo._set_as_live()
self.assertTrue(self.mongo._is_live)
def test_set_as_live_leaves_is_live_as_true_if_already_true(self):
self.mongo._is_live = True
self.assertTrue(self.mongo.is_live)
self.mongo._set_as_live()
self.assertTrue(self.mongo._is_live)
def test_set_as_down_changes_is_live_to_false(self):
self.mongo._set_as_down()
self.assertFalse(self.mongo.is_live)
def test_set_as_down_leaves_is_live_as_false_if_already_false(self):
self.mongo._is_live = False
self.assertFalse(self.mongo.is_live)
self.mongo._set_as_down()
self.assertFalse(self.mongo.is_live)
def test_allowed_to_use_by_default(self):
# noinspection PyBroadException
try:
self.mongo._do_not_use_if_recently_went_down()
except Exception:
self.fail('Expected to be allowed to use Mongo.')
def test_not_allowed_to_use_if_set_as_down_and_within_time_interval(self):
self.mongo._set_as_down()
# noinspection PyBroadException
try:
self.mongo._do_not_use_if_recently_went_down()
self.fail('Expected to not be allowed to use Mongo.')
except Exception:
pass
def test_allowed_to_use_if_set_as_down_and_within_time_interval(self):
self.mongo._set_as_down()
sleep(self.live_check_time_interval_with_error_margin.seconds)
# noinspection PyBroadException
try:
self.mongo._do_not_use_if_recently_went_down()
except Exception:
self.fail('Expected to be allowed to use Mongo.')
| 37.967302
| 80
| 0.66083
| 1,802
| 13,934
| 4.848502
| 0.104329
| 0.082408
| 0.028957
| 0.039487
| 0.802106
| 0.751974
| 0.720728
| 0.713861
| 0.696005
| 0.671512
| 0
| 0.018211
| 0.239414
| 13,934
| 366
| 81
| 38.071038
| 0.80619
| 0.073346
| 0
| 0.662879
| 0
| 0
| 0.06265
| 0.018865
| 0
| 0
| 0
| 0
| 0.159091
| 1
| 0.136364
| false
| 0.083333
| 0.026515
| 0
| 0.174242
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
a79cca50f6cbb366d53dcf0c31ace63d59d6edd9
| 58
|
py
|
Python
|
spikeforest/sorters/spykingcircus/__init__.py
|
flatironinstitute/spikeforest
|
bbb5e38f35f66b09c327a593012d5468f4c46d30
|
[
"Apache-2.0"
] | 22
|
2019-05-07T18:18:06.000Z
|
2021-11-29T12:03:17.000Z
|
spikeforest/sorters/spykingcircus/__init__.py
|
flatironinstitute/spikeforest
|
bbb5e38f35f66b09c327a593012d5468f4c46d30
|
[
"Apache-2.0"
] | 79
|
2019-03-05T13:04:46.000Z
|
2021-11-05T09:27:09.000Z
|
spikeforest/sorters/spykingcircus/__init__.py
|
flatironinstitute/spikeforest
|
bbb5e38f35f66b09c327a593012d5468f4c46d30
|
[
"Apache-2.0"
] | 8
|
2019-06-04T18:05:28.000Z
|
2021-09-23T01:09:34.000Z
|
from .spykingcircus_wrapper1 import spykingcircus_wrapper1
| 58
| 58
| 0.931034
| 6
| 58
| 8.666667
| 0.666667
| 0.807692
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.036364
| 0.051724
| 58
| 1
| 58
| 58
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a7aa5d920c3e79218f8e2ab9802aca3ffbd6ddf8
| 5,003
|
py
|
Python
|
tests/test_kallisto_quant.py
|
Multiscale-Genomics/mg-process-fastq
|
50c7115c0c1a6af48dc34f275e469d1b9eb02999
|
[
"Apache-2.0"
] | 2
|
2017-07-31T11:45:46.000Z
|
2017-08-09T09:32:35.000Z
|
tests/test_kallisto_quant.py
|
Multiscale-Genomics/mg-process-fastq
|
50c7115c0c1a6af48dc34f275e469d1b9eb02999
|
[
"Apache-2.0"
] | 28
|
2016-11-17T11:12:32.000Z
|
2018-11-02T14:09:13.000Z
|
tests/test_kallisto_quant.py
|
Multiscale-Genomics/mg-process-fastq
|
50c7115c0c1a6af48dc34f275e469d1b9eb02999
|
[
"Apache-2.0"
] | 4
|
2017-02-12T17:47:21.000Z
|
2018-05-29T08:16:27.000Z
|
"""
.. See the NOTICE file distributed with this work for additional information
regarding copyright ownership.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import os.path
import pytest
from basic_modules.metadata import Metadata
from tool.kallisto_quant import kallistoQuantificationTool
@pytest.mark.rnaseq
def test_kallisto_quant_paired():
"""
Function to test Kallisto quantifier
"""
resource_path = os.path.join(os.path.dirname(__file__), "data/")
input_files = {
"cdna": resource_path + "kallisto.Human.GRCh38.fasta",
"index": resource_path + "kallisto.Human.GRCh38.idx",
"fastq1": resource_path + "kallisto.Human.ERR030872_1.fastq",
"fastq2": resource_path + "kallisto.Human.ERR030872_2.fastq",
"gff": resource_path + "kallisto.Human.GRCh38.gff3"
}
output_files = {
"abundance_h5_file": resource_path + "kallisto.Human.ERR030872.paired.abundance.h5",
"abundance_tsv_file": resource_path + "kallisto.Human.ERR030872.paired.abundance.tsv",
"abundance_gff_file": resource_path + "kallisto.Human.ERR030872.paired.abundance.gff",
"run_info_file": resource_path + "kallisto.Human.ERR030872.paired.run_info.json"
}
metadata = {
"cdna": Metadata(
"data_cdna", "fasta", [], None,
{"assembly": "GCA_000001405.22", "ensembl": True}),
"index": Metadata(
"data_cdna", "fasta", [], None,
{"assembly": "GCA_000001405.22", "ensembl": True}),
"fastq1": Metadata(
"data_rnaseq", "fastq", [], None,
{"assembly": "GCA_000001405.22", "ensembl": True}),
"fastq2": Metadata(
"data_rnaseq", "fastq", [], None,
{"assembly": "GCA_000001405.22", "ensembl": True}),
"gff": Metadata(
"data_seq", "gff", [], None,
{"assembly": "GCA_000001405.22", "ensembl": True}),
}
kqft = kallistoQuantificationTool({"execution": resource_path})
rs_files, rs_meta = kqft.run(input_files, metadata, output_files)
# Checks that the returned files matches the expected set of results
assert len(rs_meta) == 4
# Add tests for all files created
for f_out in rs_files:
print("RNA SEQ RESULTS FILE:", f_out)
assert rs_files[f_out] == output_files[f_out]
assert os.path.isfile(rs_files[f_out]) is True
assert os.path.getsize(rs_files[f_out]) > 0
os.remove(rs_files[f_out])
@pytest.mark.rnaseq
def test_kallisto_quant_single():
"""
Function to test Kallisto quantifier
"""
resource_path = os.path.join(os.path.dirname(__file__), "data/")
input_files = {
"cdna": resource_path + "kallisto.Human.GRCh38.fasta",
"index": resource_path + "kallisto.Human.GRCh38.idx",
"fastq1": resource_path + "kallisto.Human.ERR030872_1.fastq",
"gff": resource_path + "kallisto.Human.GRCh38.gff3"
}
output_files = {
"abundance_h5_file": resource_path + "kallisto.Human.ERR030872.single.abundance.h5",
"abundance_tsv_file": resource_path + "kallisto.Human.ERR030872.single.abundance.tsv",
"abundance_gff_file": resource_path + "kallisto.Human.ERR030872.single.abundance.gff",
"run_info_file": resource_path + "kallisto.Human.ERR030872.single.run_info.json"
}
metadata = {
"cdna": Metadata(
"data_cdna", "fasta", [], None,
{"assembly": "GCA_000001405.22", "ensembl": True}),
"index": Metadata(
"data_cdna", "fasta", [], None,
{"assembly": "GCA_000001405.22", "ensembl": True}),
"fastq1": Metadata(
"data_rnaseq", "fastq", [], None,
{"assembly": "GCA_000001405.22", "ensembl": True}),
"gff": Metadata(
"data_seq", "gff", [], None,
{"assembly": "GCA_000001405.22", "ensembl": True}),
}
kqft = kallistoQuantificationTool({"execution": resource_path})
rs_files, rs_meta = kqft.run(input_files, metadata, output_files)
# Checks that the returned files matches the expected set of results
assert len(rs_meta) == 4
# Add tests for all files created
for f_out in rs_files:
print("RNA SEQ RESULTS FILE:", f_out)
assert rs_files[f_out] == output_files[f_out]
assert os.path.isfile(rs_files[f_out]) is True
assert os.path.getsize(rs_files[f_out]) > 0
os.remove(rs_files[f_out])
| 37.901515
| 94
| 0.647811
| 612
| 5,003
| 5.098039
| 0.238562
| 0.080769
| 0.108974
| 0.136218
| 0.783654
| 0.772756
| 0.772756
| 0.745833
| 0.734295
| 0.734295
| 0
| 0.051507
| 0.223866
| 5,003
| 131
| 95
| 38.19084
| 0.751996
| 0.186888
| 0
| 0.729412
| 0
| 0
| 0.323069
| 0.151478
| 0
| 0
| 0
| 0
| 0.094118
| 1
| 0.023529
| false
| 0
| 0.058824
| 0
| 0.082353
| 0.035294
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
38f03b5d70925ad0eddcc1f3cf1e0bed6742b736
| 264
|
py
|
Python
|
tests/lcs/agents/test_ImmutableSequence.py
|
GodspeedYouBlackEmperor/pyalcs
|
9811bc5cde935e04e0fd87fb5930bd1b9170e73a
|
[
"MIT"
] | 11
|
2018-02-13T05:37:26.000Z
|
2022-02-02T13:33:18.000Z
|
tests/lcs/agents/test_ImmutableSequence.py
|
GodspeedYouBlackEmperor/pyalcs
|
9811bc5cde935e04e0fd87fb5930bd1b9170e73a
|
[
"MIT"
] | 40
|
2017-09-07T07:15:43.000Z
|
2021-06-09T15:42:27.000Z
|
tests/lcs/agents/test_ImmutableSequence.py
|
GodspeedYouBlackEmperor/pyalcs
|
9811bc5cde935e04e0fd87fb5930bd1b9170e73a
|
[
"MIT"
] | 14
|
2017-10-31T09:01:14.000Z
|
2022-01-02T09:38:29.000Z
|
from lcs.agents import ImmutableSequence
class TestImmutableSequence:
def test_should_hash(self):
assert hash(ImmutableSequence('111')) == hash(ImmutableSequence('111'))
assert hash(ImmutableSequence('111')) != hash(ImmutableSequence('112'))
| 33
| 79
| 0.731061
| 26
| 264
| 7.346154
| 0.576923
| 0.439791
| 0.376963
| 0.314136
| 0.534031
| 0.534031
| 0
| 0
| 0
| 0
| 0
| 0.053097
| 0.143939
| 264
| 7
| 80
| 37.714286
| 0.792035
| 0
| 0
| 0
| 0
| 0
| 0.045455
| 0
| 0
| 0
| 0
| 0
| 0.4
| 1
| 0.2
| false
| 0
| 0.2
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
ac58635d450d9a8f07144a8e8a20f551706592c5
| 91
|
py
|
Python
|
python/uptune/opentuner/__init__.py
|
Hecmay/uptune
|
20a1462c772041b8d1b99f326b372284896faaba
|
[
"BSD-3-Clause"
] | 29
|
2020-06-19T18:07:38.000Z
|
2022-01-03T23:06:53.000Z
|
python/uptune/opentuner/__init__.py
|
Hecmay/uptune
|
20a1462c772041b8d1b99f326b372284896faaba
|
[
"BSD-3-Clause"
] | 4
|
2020-07-14T16:20:23.000Z
|
2021-05-15T13:56:24.000Z
|
python/uptune/opentuner/__init__.py
|
Hecmay/uptune
|
20a1462c772041b8d1b99f326b372284896faaba
|
[
"BSD-3-Clause"
] | 2
|
2020-06-20T00:43:23.000Z
|
2020-12-26T00:38:31.000Z
|
from . import measurement
from . import resultsdb
from . import search
from . import utils
| 18.2
| 25
| 0.78022
| 12
| 91
| 5.916667
| 0.5
| 0.56338
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.175824
| 91
| 4
| 26
| 22.75
| 0.946667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
3bd46371695c977559b9659fad44fa911f1540d5
| 73
|
py
|
Python
|
iadmin/tests/selenium_tests/__init__.py
|
saxix/django-iadmin
|
675317e8f0b4142eaf351595da27c065637a83ba
|
[
"BSD-1-Clause"
] | 1
|
2015-06-23T09:24:12.000Z
|
2015-06-23T09:24:12.000Z
|
iadmin/tests/selenium_tests/__init__.py
|
saxix/django-iadmin
|
675317e8f0b4142eaf351595da27c065637a83ba
|
[
"BSD-1-Clause"
] | null | null | null |
iadmin/tests/selenium_tests/__init__.py
|
saxix/django-iadmin
|
675317e8f0b4142eaf351595da27c065637a83ba
|
[
"BSD-1-Clause"
] | null | null | null |
#from importer import *
from changelist import *
from templates import *
| 18.25
| 24
| 0.780822
| 9
| 73
| 6.333333
| 0.555556
| 0.350877
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.164384
| 73
| 3
| 25
| 24.333333
| 0.934426
| 0.30137
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
3bd86fb8fc21c8420ec0cff63339bccad91b8ded
| 83
|
py
|
Python
|
app/run/__init__.py
|
imperial-genomics-facility/sample_tracking_database
|
fef8948e6f7974479385e9cb6d9ad5cadbab7dda
|
[
"Apache-2.0"
] | null | null | null |
app/run/__init__.py
|
imperial-genomics-facility/sample_tracking_database
|
fef8948e6f7974479385e9cb6d9ad5cadbab7dda
|
[
"Apache-2.0"
] | null | null | null |
app/run/__init__.py
|
imperial-genomics-facility/sample_tracking_database
|
fef8948e6f7974479385e9cb6d9ad5cadbab7dda
|
[
"Apache-2.0"
] | null | null | null |
from flask import Blueprint
runs = Blueprint('runs',__name__)
from . import views
| 16.6
| 33
| 0.771084
| 11
| 83
| 5.454545
| 0.636364
| 0.433333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.144578
| 83
| 5
| 34
| 16.6
| 0.84507
| 0
| 0
| 0
| 0
| 0
| 0.047619
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0.666667
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
|
0
| 6
|
3be284938f8d22bd80f7b443176fc370455b4eed
| 247
|
py
|
Python
|
pygmx/errors.py
|
mdevaluate/pygmx
|
a7e02ba47cd17b58141351724a72fe95b17a55e8
|
[
"BSD-3-Clause"
] | null | null | null |
pygmx/errors.py
|
mdevaluate/pygmx
|
a7e02ba47cd17b58141351724a72fe95b17a55e8
|
[
"BSD-3-Clause"
] | null | null | null |
pygmx/errors.py
|
mdevaluate/pygmx
|
a7e02ba47cd17b58141351724a72fe95b17a55e8
|
[
"BSD-3-Clause"
] | null | null | null |
"""Exceptions of the pygmx package."""
class InvalidMagicException(Exception): pass
class InvalidIndexException(Exception): pass
class UnknownLenError(Exception): pass
class FileTypeError(Exception): pass
class XTCError(Exception): pass
| 14.529412
| 44
| 0.789474
| 25
| 247
| 7.8
| 0.52
| 0.333333
| 0.369231
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.121457
| 247
| 16
| 45
| 15.4375
| 0.898618
| 0.129555
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 1
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
|
0
| 6
|
3bf6012456ca8c88f636902e849d5c1dc579d890
| 62
|
py
|
Python
|
blues/predictors/__init__.py
|
Kageshimasu/blues
|
a808fb8da86224f2e597916b04bdbd29376af6bb
|
[
"MIT"
] | null | null | null |
blues/predictors/__init__.py
|
Kageshimasu/blues
|
a808fb8da86224f2e597916b04bdbd29376af6bb
|
[
"MIT"
] | null | null | null |
blues/predictors/__init__.py
|
Kageshimasu/blues
|
a808fb8da86224f2e597916b04bdbd29376af6bb
|
[
"MIT"
] | 1
|
2021-02-15T07:54:17.000Z
|
2021-02-15T07:54:17.000Z
|
from .classification_predictor import ClassificationPredictor
| 31
| 61
| 0.919355
| 5
| 62
| 11.2
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.064516
| 62
| 1
| 62
| 62
| 0.965517
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ce0402d905ddb1b17dee720171e4c0c59acc609b
| 43,581
|
py
|
Python
|
utils/load_data.py
|
lujunzju/MachineLearningForAirTicketPredicting
|
e64b6c75a00a8b2a74d67d132f6e5b852db9c974
|
[
"MIT"
] | 47
|
2017-06-28T07:45:04.000Z
|
2022-01-31T09:15:13.000Z
|
utils/load_data.py
|
lujunzju/MachineLearningForAirTicketPredicting
|
e64b6c75a00a8b2a74d67d132f6e5b852db9c974
|
[
"MIT"
] | 2
|
2017-08-28T07:59:17.000Z
|
2018-03-02T06:37:08.000Z
|
utils/load_data.py
|
lujunzju/MachineLearningForAirTicketPredicting
|
e64b6c75a00a8b2a74d67d132f6e5b852db9c974
|
[
"MIT"
] | 20
|
2017-09-01T13:46:25.000Z
|
2021-05-05T12:47:16.000Z
|
# system-library
import json
import os
import numpy as np
# user-library
import util as util
"""
# data prepare for the specific data set
"""
routes_specific = ["BCN_BUD", # route 1
"BUD_BCN", # route 2
"CRL_OTP", # route 3
"MLH_SKP", # route 4
"MMX_SKP", # route 5
"OTP_CRL", # route 6
"SKP_MLH", # route 7
"SKP_MMX"] # route 8
# for currency change - change different currency to Euro
currency_specific = [1, # route 1 - Euro
0.0032, # route 2 - Hungarian Forint
1, # route 3 - Euro
1, # route 4 - Euro
0.12, # route 5 - Swedish Krona
0.25, # route 6 - Romanian Leu
0.018, # route 7 - Macedonian Denar
0.018 # route 8 - Macedonian Denar
]
"""
# data prepare for the general data set
"""
routes_general = ["BGY_OTP", # route 1
"BUD_VKO", # route 2
"CRL_OTP", # route 3
"CRL_WAW", # route 4
"LTN_OTP", # route 5
"LTN_PRG", # route 6
"OTP_BGY", # route 7
"OTP_CRL", # route 8
"OTP_LTN", # route 9
"PRG_LTN", # route 10
"VKO_BUD", # route 11
"WAW_CRL"] # route 12
# for currency change - change different currency to Euro
currency_general = [1, # route 1 - Euro
0.0032, # route 2 - Hungarian Forint
1, # route 3 - Euro
1, # route 4 - Euro
1, # route 5 - Euro
1, # route 6 - Euro
0.25, # route 7 - Romanian Leu
0.25, # route 8 - Romanian Leu
0.25, # route 9 - Romanian Leu
0.037, # route 10 - Czech Republic Koruna
1, # route 11 - Euro
0.23 # route 12 - Polish Zloty
]
def is_not_nullprice(data):
"""
used by the filter to filter out the null entries
:param data: input data
:return: true if it's not null, false if null
"""
return data and data["MinimumPrice"] != None
def check_if_only_one_flightNum(datas):
"""
check whether the datas only contain one flight number
:param datas: input data
:return: Ture if the datas only contain one flight number, False otherwise
"""
kinds = []
for data in datas:
kinds += data["Flights"]
flightNums = []
for kind in kinds:
flightNums.append(kind["FlightNumber"])
if len(util.remove_duplicates(flightNums)) == 1:
return True
else:
return False
def load_data_with_prefix_and_dataset(filePrefix="BCN_BUD", dataset="Specific"):
"""
load the data in the 'dataset' with 'filePrefix'
:param filePrefix: choose which route
:param dataset: dataset name('Specific' or 'General')
:return: decoded data
"""
currentDir = os.path.dirname(os.path.realpath(__file__))
observeDatesDirs = os.listdir(currentDir + "/data/" + dataset) # path directory of each observed date in the dataset
filePaths = [] # keep all the file paths start with "filePrefix"
data_decoded = [] # keep all the schedules start with "filePrefix"
for date in observeDatesDirs:
currentPath = currentDir + "/data/" + dataset + "/" + date
try:
files = os.listdir(currentPath) # file names in currect date directory
for file in files:
try:
if filePrefix in file:
filePath = os.path.join(currentPath, file)
filePaths.append(filePath)
fp = open(filePath, 'r')
datas_with_specific_date = json.load(fp)
# add observed data
for data in datas_with_specific_date:
#"Date" is the departure date, "ObservedDate" is the observed date
data["ObservedDate"] = date.replace("-", "")
data["State"] = util.days_between(data["Date"], data["ObservedDate"]) - 1
data_decoded += datas_with_specific_date # do not use append function
except:
print "Not a json file"
except:
print "Not a directory, MAC OS contains .DS_Store file."
# filter the null entries
data_decoded = filter(is_not_nullprice, data_decoded)
return data_decoded
def load_data_with_daysBeforeTakeoff_and_sameFlightNum(days, filePrefix="BCN_BUD", dataset="Specific"):
"""
Load data with same flight number and the same days before takeoff.
i.e. same equivalence class
But in out dataset, one route means one flight number.
:param days: the days before takeoff
:param filePrefix: choose which route
:param dataset: dataset name('Specific' or 'General')
:return: data with same flight number and the same days before takeoff
"""
datas = load_data_with_prefix_and_dataset(filePrefix, dataset)
output = [data for data in datas if util.days_between(data["ObservedDate"], data["Date"]) == days]
return output
def get_departure_len(filePrefix="BCN_BUD", dataset="Specific"):
"""
So far, used in QLearning, return the total departure date length in the chosen dataset.
"""
datas = load_data_with_prefix_and_dataset(filePrefix, dataset)
# get different departure data in the same flight number,
# to compute the Q Values for such (flight number, departure date) pair.
departureDates = []
[departureDates.append(data["Date"]) for data in datas]
departureDates = util.remove_duplicates(departureDates)
return len(departureDates)
def load_data_with_departureIndex(departureIndex, filePrefix="BCN_BUD", dataset="Specific"):
"""
Given the departureIndex, return the dataset with specific departure date in the chosen dataset.
"""
datas = load_data_with_prefix_and_dataset(filePrefix, dataset)
# get different departure data in the same flight number,
# to compute the Q Values for such (flight number, departure date) pair.
departureDates = []
[departureDates.append(data["Date"]) for data in datas]
departureDates = util.remove_duplicates(departureDates)
# choose the departure date by departureIndex
departureDate = departureDates[departureIndex]
print "Evaluating departure date " + departureDate + "..."
"""
# remove duplicate observedDate-departureDate pair
observedDates = []
[observedDates.append(data["ObservedDate"]) for data in datas if data["Date"]==departureDate]
observedDates = util.remove_duplicates(observedDates)
states = len(observedDates)
#print states
"""
specificDatas = []
specificDatas = [data for data in datas if data["Date"]==departureDate]
#states = []
#[states.append(data["State"]) for data in specificDatas]
#print max(states)
return specificDatas
def load_data_with_departureDate(departureDate, filePrefix="BCN_BUD", dataset="Specific"):
"""
Given the departureIndex, return the dataset with specific departure date in the chosen dataset.
"""
datas = load_data_with_prefix_and_dataset(filePrefix, dataset)
print "Evaluating departure date " + departureDate + "..."
"""
# remove duplicate observedDate-departureDate pair
observedDates = []
[observedDates.append(data["ObservedDate"]) for data in datas if data["Date"]==departureDate]
observedDates = util.remove_duplicates(observedDates)
states = len(observedDates)
#print states
"""
specificDatas = []
specificDatas = [data for data in datas if data["Date"]==departureDate]
return specificDatas
def getMinimumPrice(datas):
"""
Given the dataset, return the minimum price in the dataset
:param datas: input dataset(in QLearning and Neural Nets, it should have same departure date)
:return: minimum price in the dataset
"""
minimumPrice = util.getPrice(datas[0]["MinimumPrice"]) # in our json data files, MinimumPrice means the price in that day
for data in datas:
price = util.getPrice(data["MinimumPrice"])
minimumPrice = price if price<minimumPrice else minimumPrice
minimumPrice = minimumPrice
return minimumPrice
def getOptimalState(datas):
"""
Given the dataset, return the state correspongding to minimum price in the dataset
:param datas: input dataset(in QLearning and Neural Nets, it should have same departure date)
:return: minimum price state in the dataset
"""
optimalState = 0
minimumPrice = util.getPrice(datas[0]["MinimumPrice"]) # in our json data files, MinimumPrice means the price in that day
for data in datas:
price = util.getPrice(data["MinimumPrice"])
state = data["State"]
optimalState = state if price<minimumPrice else optimalState
minimumPrice = price if price<minimumPrice else minimumPrice
return optimalState
def getMaximumPrice(datas):
"""
Given the dataset, return the maximum price in the dataset
:param datas: input dataset(in QLearning and Neural Nets, it should have same departure date)
:return: maximum price in the dataset
"""
maximumPrice = util.getPrice(datas[0]["MinimumPrice"]) # in our json data files, MinimumPrice means the price in that day
for data in datas:
price = util.getPrice(data["MinimumPrice"])
maximumPrice = price if price>maximumPrice else maximumPrice
return maximumPrice
def getChosenPrice(state, datas):
"""
Given the state, i.e. the days before departure, and the dataset, return the price
:param state: the days before departure
:param datas: input dataset(in QLearning, it should have same departure date)
:return: the chosen price
"""
for data in datas:
if data["State"] == state:
return util.getPrice(data["MinimumPrice"])
def getMinimumPreviousPrice(departureDate, state, datas):
"""
Get the minimum previous price, corresponding to the departure date and the observed date
:param departureDate: departure date
:param state: observed date
:param datas: datasets
:return: minimum previous price
"""
specificDatas = []
specificDatas = [data for data in datas if data["Date"]==departureDate]
minimumPreviousPrice = util.getPrice(specificDatas[0]["MinimumPrice"])
for data in specificDatas:
if util.getPrice(data["MinimumPrice"]) < minimumPreviousPrice and data["State"]>=state:
minimumPreviousPrice = util.getPrice(data["MinimumPrice"])
return minimumPreviousPrice
def getMaximumPreviousPrice(departureDate, state, datas):
"""
Get the maximum previous price, corresponding to the departure date and the observed date
:param departureDate: departure date
:param state: observed date
:param datas: datasets
:return: maximum previous price
"""
specificDatas = []
specificDatas = [data for data in datas if data["Date"]==departureDate]
maximumPreviousPrice = util.getPrice(specificDatas[0]["MinimumPrice"])
for data in specificDatas:
if util.getPrice(data["MinimumPrice"]) > maximumPreviousPrice and data["State"]>=state:
maximumPreviousPrice = util.getPrice(data["MinimumPrice"])
return maximumPreviousPrice
"""
# step 1. The main data load function - for classification for specific dataset
"""
def load_for_classification_for_Specific(dataset="Specific", routes=routes_specific):
"""
Load the data for classification
:param dataset: dataset name('Specific' or 'General')
:return: X_train, y_train, X_test, y_test
"""
isOneOptimalState = False
# Construct the input data
dim = routes.__len__() + 4
X_train = np.empty(shape=(0, dim))
y_train = np.empty(shape=(0,1))
y_train_price = np.empty(shape=(0,1))
X_test = np.empty(shape=(0,dim))
y_test = np.empty(shape=(0,1))
y_test_price = np.empty(shape=(0,1))
for filePrefix in routes:
datas = load_data_with_prefix_and_dataset(filePrefix, dataset)
for data in datas:
print "Construct route {}, State {}, departureDate {}...".format(filePrefix, data["State"], data["Date"])
x_i = []
# feature 1: flight number -> dummy variables
for i in range(len(routes)):
"""
!!!need to change!
"""
if i == routes.index(filePrefix):
x_i.append(1)
else:
x_i.append(0)
# feature 2: departure date interval from "20151109", because the first observed date is 20151109
departureDate = data["Date"]
"""
!!!maybe need to change the first observed date
"""
departureDateGap = util.days_between(departureDate, "20151109")
x_i.append(departureDateGap)
# feature 3: observed days before departure date
state = data["State"]
x_i.append(state)
# feature 4: minimum price before the observed date
minimumPreviousPrice = getMinimumPreviousPrice(data["Date"], state, datas)
x_i.append(minimumPreviousPrice)
# feature 5: maximum price before the observed date
maximumPreviousPrice = getMaximumPreviousPrice(data["Date"], state, datas)
x_i.append(maximumPreviousPrice)
# output
y_i = [0]
specificDatas = []
specificDatas = [data2 for data2 in datas if data2["Date"]==departureDate]
# if isOneOptimalState:
# # Method 1: only 1 entry is buy
# optimalState = getOptimalState(specificDatas)
# if data["State"] == optimalState:
# y_i = [1]
# else:
# # Method 2: multiple entries can be buy
# minPrice = getMinimumPrice(specificDatas)
# if util.getPrice(data["MinimumPrice"]) == minPrice:
# y_i = [1]
#Method 2: multiple entries can be buy
minPrice = getMinimumPrice(specificDatas)
if util.getPrice(data["MinimumPrice"]) == minPrice:
y_i = [1]
# keep price info
y_price = [util.getPrice(data["MinimumPrice"])]
if int(departureDate) < 20160229 and int(departureDate) >= 20151129: # choose date between "20151129-20160229(20160115)" as training data
X_train = np.concatenate((X_train, [x_i]), axis=0)
y_train = np.concatenate((y_train, [y_i]), axis=0)
y_train_price = np.concatenate((y_train_price, [y_price]), axis=0)
elif int(departureDate) < 20160508 and int(departureDate) >= 20160229: # choose date before "20160508(20160220)" as test data
X_test = np.concatenate((X_test, [x_i]), axis=0)
y_test = np.concatenate((y_test, [y_i]), axis=0)
y_test_price = np.concatenate((y_test_price, [y_price]), axis=0)
else:
pass
# X_train = np.concatenate((X_train, [x_i]), axis=0)
# y_train = np.concatenate((y_train, [y_i]), axis=0)
# y_train_price = np.concatenate((y_train_price, [y_price]), axis=0)
# end of for datas
# end of for routes
"""
remove duplicate rows for train
"""
tmp_train = np.concatenate((X_train, y_train, y_train_price), axis=1)
new_array = [tuple(row) for row in tmp_train]
tmp_train = np.unique(new_array)
# get the result
X_train = tmp_train[:, 0:12]
y_train = tmp_train[:, 12]
y_train_price = tmp_train[:, 13]
"""
remove duplicate rows for test
"""
tmp_test = np.concatenate((X_test, y_test, y_test_price), axis=1)
new_array = [tuple(row) for row in tmp_test]
tmp_test = np.unique(new_array)
# get the result
X_test = tmp_test[:, 0:12]
y_test = tmp_test[:, 12]
y_test_price = tmp_test[:, 13]
# save the result
np.save('inputSpecificRaw/X_train', X_train)
np.save('inputSpecificRaw/y_train', y_train)
np.save('inputSpecificRaw/y_train_price', y_train_price)
np.save('inputSpecificRaw/X_test', X_test)
np.save('inputSpecificRaw/y_test', y_test)
np.save('inputSpecificRaw/y_test_price', y_test_price)
return X_train, y_train, X_test, y_test
"""
# step 1. The main data load function - for classification for the general dataset
"""
def load_for_classification_for_General(dataset="General", routes=routes_general):
"""
Load the data for classification
:param dataset: dataset name('Specific' or 'General')
:return: X_train, y_train, X_test, y_test
"""
isOneOptimalState = False
# Construct the input data
dim = routes.__len__() + 4
X_train = np.empty(shape=(0, dim))
y_train = np.empty(shape=(0,1))
y_train_price = np.empty(shape=(0,1))
for filePrefix in routes:
print filePrefix
datas = load_data_with_prefix_and_dataset(filePrefix, dataset)
for data in datas:
print "Construct route {}, State {}, departureDate {}...".format(filePrefix, data["State"], data["Date"])
x_i = []
# feature 1: flight number -> dummy variables
for i in range(len(routes)):
"""
!!!need to change!
"""
if i == routes.index(filePrefix):
x_i.append(1)
else:
x_i.append(0)
# feature 2: departure date interval from "20151109", because the first observed date is 20151109
departureDate = data["Date"]
"""
!!!maybe need to change the first observed date
"""
departureDateGap = util.days_between(departureDate, "20151109")
x_i.append(departureDateGap)
# feature 3: observed days before departure date
state = data["State"]
x_i.append(state)
# feature 4: minimum price before the observed date
minimumPreviousPrice = getMinimumPreviousPrice(data["Date"], state, datas)
x_i.append(minimumPreviousPrice)
# feature 5: maximum price before the observed date
maximumPreviousPrice = getMaximumPreviousPrice(data["Date"], state, datas)
x_i.append(maximumPreviousPrice)
# output
y_i = [0]
specificDatas = []
specificDatas = [data2 for data2 in datas if data2["Date"]==departureDate]
minPrice = getMinimumPrice(specificDatas)
if util.getPrice(data["MinimumPrice"]) == minPrice:
y_i = [1]
# keep price info
y_price = [util.getPrice(data["MinimumPrice"])]
X_train = np.concatenate((X_train, [x_i]), axis=0)
y_train = np.concatenate((y_train, [y_i]), axis=0)
y_train_price = np.concatenate((y_train_price, [y_price]), axis=0)
# end of for datas
# end of for routes
"""
remove duplicate rows
"""
tmp = np.concatenate((X_train, y_train, y_train_price), axis=1)
new_array = [tuple(row) for row in tmp]
tmp = np.unique(new_array)
# # get the result
# X_train = tmp[:, 0:16]
# y_train = tmp[:, 16]
# y_train_price = tmp[:, 17]
# save the result
np.save('inputGeneralRaw/X_train', X_train)
np.save('inputGeneralRaw/y_train', y_train)
np.save('inputGeneralRaw/y_train_price', y_train_price)
np.save('inputGeneralRaw/tmp', tmp)
return X_train, y_train, y_train_price
"""
# step 2. price normalize for the classification input - for specific
"""
def priceNormalize_for_Specific(routes=routes_specific, currency=currency_specific):
"""
Different routes have different units for the price, normalize it as Euro.
:return: NA
example: priceNormalize_for_Specific()
"""
"""
Get the input specific clf data for the training data set
"""
# feature 0~7: flight number dummy variables
# feature 8: departure date; feature 9: observed date state;
# feature 10: minimum price; feature 11: maximum price
X_train = np.load('inputSpecificRaw/X_train.npy')
y_train = np.load('inputSpecificRaw/y_train.npy')
y_train_price = np.load('inputSpecificRaw/y_train_price.npy')
# normalize feature 10, feature 11, feature 13
# feature 0~7: flight number dummy variables
# feature 8: departure date; feature 9: observed date state;
# feature 10: minimum price; feature 11: maximum price
# fearure 12: prediction(buy or wait); feature 13: price
evalMatrix_train = np.concatenate((X_train, y_train, y_train_price), axis=1)
matrixTrain = np.empty(shape=(0, evalMatrix_train.shape[1]))
for i in range(len(routes)):
evalMatrix = evalMatrix_train[np.where(evalMatrix_train[:, i]==1)[0], :]
evalMatrix[:, 10] *= currency[i]
evalMatrix[:, 11] *= currency[i]
evalMatrix[:, 13] *= currency[i]
matrixTrain = np.concatenate((matrixTrain, evalMatrix), axis=0)
X_train = matrixTrain[:, 0:12]
y_train = matrixTrain[:, 12]
y_train_price = matrixTrain[:, 13]
y_train = y_train.reshape((y_train.shape[0], 1))
y_train_price = y_train_price.reshape((y_train_price.shape[0], 1))
np.save('../Classification/inputClf_small/X_train', X_train)
np.save('../Classification/inputClf_small/y_train', y_train)
np.save('../Classification/inputClf_small/y_train_price', y_train_price)
"""
Get the input specific clf data for the test data set
"""
# feature 0~7: flight number dummy variables
# feature 8: departure date; feature 9: observed date state;
# feature 10: minimum price; feature 11: maximum price
X_test = np.load('inputSpecificRaw/X_test.npy')
y_test = np.load('inputSpecificRaw/y_test.npy')
y_test_price = np.load('inputSpecificRaw/y_test_price.npy')
# normalize feature 10, feature 11, feature 13
# feature 0~7: flight number dummy variables
# feature 8: departure date; feature 9: observed date state;
# feature 10: minimum price; feature 11: maximum price
# fearure 12: prediction(buy or wait); feature 13: price
evalMatrix_test = np.concatenate((X_test, y_test, y_test_price), axis=1)
evalMatrix_test = evalMatrix_test[np.where(evalMatrix_test[:,8]>=20)[0], :]
matrixTest = np.empty(shape=(0, evalMatrix_test.shape[1]))
for i in range(len(routes)):
evalMatrix = evalMatrix_test[np.where(evalMatrix_test[:, i]==1)[0], :]
evalMatrix[:, 10] *= currency[i]
evalMatrix[:, 11] *= currency[i]
evalMatrix[:, 13] *= currency[i]
matrixTest = np.concatenate((matrixTest, evalMatrix), axis=0)
X_test = matrixTest[:, 0:12]
y_test = matrixTest[:, 12]
y_test_price = matrixTest[:, 13]
y_test = y_test.reshape((y_test.shape[0], 1))
y_test_price = y_test_price.reshape((y_test_price.shape[0], 1))
np.save('../Classification/inputClf_small/X_test', X_test)
np.save('../Classification/inputClf_small/y_test', y_test)
np.save('../Classification/inputClf_small/y_test_price', y_test_price)
"""
# step 2. price normalize for the classification input - for general
"""
def priceNormalize_for_General(routes=routes_general, currency=currency_general):
"""
Different routes have different units for the price, normalize it as Euro.
:return: NA
example: priceNormalize_for_General()
"""
# feature 0~11: flight number dummy variables
# feature 12: departure date; feature 13: observed date state;
# feature 14: minimum price; feature 15: maximum price
X_train = np.load('inputGeneralRaw/X_train.npy')
y_train = np.load('inputGeneralRaw/y_train.npy')
y_train_price = np.load('inputGeneralRaw/y_train_price.npy')
# normalize feature 14, feature 15, feature 17
# feature 0~11: flight number dummy variables
# feature 12: departure date; feature 13: observed date state;
# feature 14: minimum price; feature 15: maximum price
# fearure 16: prediction(buy or wait); feature 17: price
evalMatrix_train = np.concatenate((X_train, y_train, y_train_price), axis=1)
matrixTrain = np.empty(shape=(0, evalMatrix_train.shape[1]))
for i in range(len(routes)):
evalMatrix = evalMatrix_train[np.where(evalMatrix_train[:, i]==1)[0], :]
evalMatrix[:, 14] *= currency[i]
evalMatrix[:, 15] *= currency[i]
evalMatrix[:, 17] *= currency[i]
matrixTrain = np.concatenate((matrixTrain, evalMatrix), axis=0)
X_train = matrixTrain[:, 0:16]
y_train = matrixTrain[:, 16]
y_train_price = matrixTrain[:, 17]
y_train = y_train.reshape((y_train.shape[0], 1))
y_train_price = y_train_price.reshape((y_train_price.shape[0], 1))
#self.X_train = np.concatenate((self.X_train, self.y_train_price), axis=1)
#self.X_test = np.concatenate((self.X_test, self.y_test_price), axis=1)
np.save('../Classification/inputGeneralClf_small/X_train', X_train)
np.save('../Classification/inputGeneralClf_small/y_train', y_train)
np.save('../Classification/inputGeneralClf_small/y_train_price', y_train_price)
"""
# step 3. get the regression input and output from classification inputs - for specific
"""
def getRegressionOutput_for_SpecificTrain(routes=routes_specific):
"""
Get the regression output formula from the classification datasets.
:return: Save the regression datasets into inputGeneralReg
"""
X_train = np.load('../Classification/inputClf_small/X_train.npy')
y_train = np.load('../Classification/inputClf_small/y_train.npy')
y_train_price = np.load('../Classification/inputClf_small/y_train_price.npy')
# concatenate the buy or wait info to get the total datas
y_train = y_train.reshape((y_train.shape[0],1))
y_train_price = y_train_price.reshape((y_train_price.shape[0],1))
# feature 0~7: flight numbers
# feature 8: departure date; feature 9: observed date state
# feature 10: minimum price; feature 11: maximum price
# feature 12: prediction(buy or wait); feature 13: current price
X_train = np.concatenate((X_train, y_train, y_train_price), axis=1)
"""
# define the variables needed to be changed
"""
dim = 14
idx_departureDate = 8
idx_minimumPrice = 10
idx_output = 12
idx_currentPrice = 13
# Construct train data
X_tmp = np.empty(shape=(0, dim))
for flightNum in range(len(routes)):
# choose one route datas
X_flightNum = X_train[np.where(X_train[:, flightNum]==1)[0], :]
# group by the feature: departure date
departureDates_train = np.unique(X_flightNum[:, idx_departureDate])
# get the final datas, the observed data state should be from large to small(i.e. for time series)
for departureDate in departureDates_train:
indexs = np.where(X_flightNum[:, idx_departureDate]==departureDate)[0]
datas = X_flightNum[indexs, :]
minPrice = min(datas[:, idx_minimumPrice]) # get the minimum price for the output
datas[:, idx_output] = minPrice
"""
print departureDate
print minPrice
print datas
"""
X_tmp = np.concatenate((X_tmp, datas), axis=0)
X_train = X_tmp[:, 0:idx_output]
y_train = X_tmp[:, idx_output]
y_train_price = X_tmp[:, idx_currentPrice]
y_train = y_train.reshape((y_train.shape[0], 1))
y_train_price = y_train_price.reshape((y_train_price.shape[0], 1))
# regression has one more feature than classification
X_train = np.concatenate((X_train, y_train_price), axis=1)
np.save('../Regression/inputReg_small/X_train', X_train)
np.save('../Regression/inputReg_small/y_train', y_train)
np.save('../Regression/inputReg_small/y_train_price', y_train_price)
def getRegressionOutput_for_SpecificTest(routes=routes_specific):
"""
Get the regression output formula from the classification datasets.
:return: Save the regression datasets into inputGeneralReg
"""
X_test = np.load('../Classification/inputClf_small/X_test.npy')
y_test = np.load('../Classification/inputClf_small/y_test.npy')
y_test_price = np.load('../Classification/inputClf_small/y_test_price.npy')
# concatenate the buy or wait info to get the total datas
y_test = y_test.reshape((y_test.shape[0],1))
y_test_price = y_test_price.reshape((y_test_price.shape[0],1))
# feature 0~7: flight numbers
# feature 8: departure date; feature 9: observed date state
# feature 10: minimum price; feature 11: maximum price
# feature 12: prediction(buy or wait); feature 13: current price
X_test = np.concatenate((X_test, y_test, y_test_price), axis=1)
"""
# define the variables needed to be changed
"""
dim = 14
idx_departureDate = 8
idx_minimumPrice = 10
idx_output = 12
idx_currentPrice = 13
# Construct train data
X_tmp = np.empty(shape=(0, dim))
for flightNum in range(len(routes)):
# choose one route datas
X_flightNum = X_test[np.where(X_test[:, flightNum]==1)[0], :]
# group by the feature: departure date
departureDates_test = np.unique(X_flightNum[:, idx_departureDate])
# get the final datas, the observed data state should be from large to small(i.e. for time series)
for departureDate in departureDates_test:
indexs = np.where(X_flightNum[:, idx_departureDate]==departureDate)[0]
datas = X_flightNum[indexs, :]
minPrice = min(datas[:, idx_minimumPrice]) # get the minimum price for the output
datas[:, idx_output] = minPrice
"""
print departureDate
print minPrice
print datas
"""
X_tmp = np.concatenate((X_tmp, datas), axis=0)
X_test = X_tmp[:, 0:idx_output]
y_test = X_tmp[:, idx_output]
y_test_price = X_tmp[:, idx_currentPrice]
y_test = y_test.reshape((y_test.shape[0], 1))
y_test_price = y_test_price.reshape((y_test_price.shape[0], 1))
# regression has one more feature than classification
X_test = np.concatenate((X_test, y_test_price), axis=1)
np.save('../Regression/inputReg_small/X_test', X_test)
np.save('../Regression/inputReg_small/y_test', y_test)
np.save('../Regression/inputReg_small/y_test_price', y_test_price)
"""
# step 3. get the regression input and output from classification inputs
"""
def getRegressionOutput_for_General(routes=routes_general):
"""
Get the regression output formula from the classification datasets.
:return: Save the regression datasets into inputGeneralReg
"""
X_train = np.load('../Classification/inputGeneralClf_small/X_train.npy')
y_train = np.load('../Classification/inputGeneralClf_small/y_train.npy')
y_train_price = np.load('../Classification/inputGeneralClf_small/y_train_price.npy')
# concatenate the buy or wait info to get the total datas
y_train = y_train.reshape((y_train.shape[0],1))
y_train_price = y_train_price.reshape((y_train_price.shape[0],1))
# feature 0~11: flight numbers
# feature 12: departure date; feature 3: observed date state
# feature 14: minimum price; feature 15: maximum price
# feature 16: prediction(buy or wait); feature 17: current price
X_train = np.concatenate((X_train, y_train, y_train_price), axis=1)
"""
# define the variables needed to be changed
"""
dim = 18
idx_departureDate = 12
idx_minimumPrice = 14
idx_output = 16
idx_currentPrice = 17
# Construct train data
X_tmp = np.empty(shape=(0, dim))
for flightNum in range(len(routes)):
# choose one route datas
X_flightNum = X_train[np.where(X_train[:, flightNum]==1)[0], :]
# group by the feature: departure date
departureDates_train = np.unique(X_flightNum[:, idx_departureDate])
# get the final datas, the observed data state should be from large to small(i.e. for time series)
for departureDate in departureDates_train:
indexs = np.where(X_flightNum[:, idx_departureDate]==departureDate)[0]
datas = X_flightNum[indexs, :]
minPrice = min(datas[:, idx_minimumPrice]) # get the minimum price for the output
datas[:, idx_output] = minPrice
"""
print departureDate
print minPrice
print datas
"""
X_tmp = np.concatenate((X_tmp, datas), axis=0)
X_train = X_tmp[:, 0:idx_output]
y_train = X_tmp[:, idx_output]
y_train_price = X_tmp[:, idx_currentPrice]
y_train = y_train.reshape((y_train.shape[0], 1))
y_train_price = y_train_price.reshape((y_train_price.shape[0], 1))
# regression has one more feature than classification
X_train = np.concatenate((X_train, y_train_price), axis=1)
np.save('../Regression/inputGeneralReg_small/X_train', X_train)
np.save('../Regression/inputGeneralReg_small/y_train', y_train)
np.save('../Regression/inputGeneralReg_small/y_train_price', y_train_price)
"""
# step 4. visualize for classification - for specific
"""
def visualizeData_for_SpecificClassification(filePrefix, isTrain=True, routes=routes_specific):
"""
Visualize the train buy entries for every departure date, for each route
:param filePrefix: route prefix
:return: NA
example: visualizeData_for_SpecificClassification(routes_specific[1], routes_specific)
"""
if isTrain:
X_train = np.load('../Classification/inputClf_small/X_train.npy')
y_train = np.load('../Classification/inputClf_small/y_train.npy')
y_train_price = np.load('../Classification/inputClf_small/y_train_price.npy')
else:
X_train = np.load('../Classification/inputClf_small/X_test.npy')
y_train = np.load('../Classification/inputClf_small/y_test.npy')
y_train_price = np.load('../Classification/inputClf_small/y_test_price.npy')
# route index
flightNum = routes.index(filePrefix)
# concatenate the buy or wait info to get the total datas
y_train = y_train.reshape((y_train.shape[0],1))
y_train_price = y_train_price.reshape((y_train_price.shape[0],1))
# feature 0~7: flight number dummy variables
# feature 8: departure date; feature 9: observed date state;
# feature 10: minimum price; feature 11: maximum price
# fearure 12: prediction(buy or wait); feature 13: price
X_train = np.concatenate((X_train, y_train, y_train_price), axis=1)
# choose one route datas
X_train = X_train[np.where(X_train[:, flightNum]==1)[0], :]
# remove dummy variables
# feature 0: departure date; feature 1: observed date state
# feature 2: minimum price; feature 3: maximum price
# feature 4: prediction(buy or wait); feature 5:price
X_train = X_train[:, 8:14]
# group by the feature: departure date
departureDates_train = np.unique(X_train[:, 0])
# get the final datas, the observed data state should be from large to small(i.e. for time series)
length_test = []
for departureDate in departureDates_train:
indexs = np.where(X_train[:, 0]==departureDate)[0]
datas = X_train[indexs, :]
length_test.append(len(datas))
print departureDate
print datas
"""
# step 4. visualize for classification - for general
"""
def visualizeTrainData_for_GeneralClassification(filePrefix, routes):
"""
Visualize the train buy entries for every departure date, for each route
:param filePrefix: route prefix
:return: NA
example: visualizeTrainData_for_General(routes_general[1], routes_general)
"""
X_train = np.load('../Classification/inputGeneralClf_small/X_train.npy')
y_train = np.load('../Classification/inputGeneralClf_small/y_train.npy')
y_train_price = np.load('../Classification/inputGeneralClf_small/y_train_price.npy')
# route index
flightNum = routes.index(filePrefix)
# concatenate the buy or wait info to get the total datas
y_train = y_train.reshape((y_train.shape[0],1))
y_train_price = y_train_price.reshape((y_train_price.shape[0],1))
# normalize feature 14, feature 15, feature 17
# feature 0~11: flight number dummy variables
# feature 12: departure date; feature 13: observed date state;
# feature 14: minimum price; feature 15: maximum price
# fearure 16: prediction(buy or wait); feature 17: price
X_train = np.concatenate((X_train, y_train, y_train_price), axis=1)
# choose one route datas
X_train = X_train[np.where(X_train[:, flightNum]==1)[0], :]
# remove dummy variables
# feature 0: departure date; feature 1: observed date state
# feature 2: minimum price; feature 3: maximum price
# feature 4: prediction(buy or wait); feature 5:price
X_train = X_train[:, 12:18]
# group by the feature: departure date
departureDates_train = np.unique(X_train[:, 0])
# get the final datas, the observed data state should be from large to small(i.e. for time series)
length_test = []
for departureDate in departureDates_train:
indexs = np.where(X_train[:, 0]==departureDate)[0]
datas = X_train[indexs, :]
length_test.append(len(datas))
print departureDate
print datas
"""
# step 5. visualize for regression - for general
"""
def visualizeTrainData_for_GeneralRegression(filePrefix, routes):
"""
Visualize the train buy entries for every departure date, for each route
:param filePrefix: route prefix
:return: NA
example: visualizeTrainData_for_General(routes_general[1], routes_general)
"""
X_train = np.load('../Regression/inputGeneralReg_small/X_train.npy')
y_train = np.load('../Regression/inputGeneralReg_small/y_train.npy')
y_train_price = np.load('../Regression/inputGeneralReg_small/y_train_price.npy')
"""
define the variables to be changed
"""
dim = 19
idx_departureDate = 12
# route index
flightNum = routes.index(filePrefix)
# concatenate the buy or wait info to get the total datas
y_train = y_train.reshape((y_train.shape[0],1))
y_train_price = y_train_price.reshape((y_train_price.shape[0],1))
# feature 0~11: flight number dummy variables
# feature 12: departure date; feature 13: observed date state;
# feature 14: minimum price; feature 15: maximum price
# fearure 16: current price;
# feature 17: minimum price; feature 18: current price
X_train = np.concatenate((X_train, y_train, y_train_price), axis=1)
# choose one route datas
X_train = X_train[np.where(X_train[:, flightNum]==1)[0], :]
# remove dummy variables
# feature 0: departure date; feature 1: observed date state
# feature 2: minimum price by now; feature 3: maximum price by now
# feature 4: current price;
# feature 5: minimum price; feature 6: current price
X_train = X_train[:, 12:dim]
# group by the feature: departure date
departureDates_train = np.unique(X_train[:, 0])
# get the final datas, the observed data state should be from large to small(i.e. for time series)
length_test = []
for departureDate in departureDates_train:
indexs = np.where(X_train[:, 0]==departureDate)[0]
datas = X_train[indexs, :]
length_test.append(len(datas))
print departureDate
print datas
"""
# step 5. visualize for regression - for specific
"""
def visualizeTrainData_for_SpecificRegression(filePrefix, routes):
"""
Visualize the train buy entries for every departure date, for each route
:param filePrefix: route prefix
:return: NA
example: visualizeTrainData_for_SpecificRegression(routes_general[1], routes_general)
"""
X_train = np.load('../Regression/inputReg_small/X_train.npy')
y_train = np.load('../Regression/inputReg_small/y_train.npy')
y_train_price = np.load('../Regression/inputReg_small/y_train_price.npy')
X_train2 = np.load('../Regression/inputReg_small/X_test.npy')
y_train2 = np.load('../Regression/inputReg_small/y_test.npy')
y_train2_price = np.load('../Regression/inputReg_small/y_test_price.npy')
X_train = np.concatenate((X_train, X_train2), axis=0)
y_train = np.concatenate((y_train, y_train2), axis=0)
y_train_price = np.concatenate((y_train_price, y_train2_price), axis=0)
"""
define the variables to be changed
"""
dim = 15
idx_departureDate = 8
# route index
flightNum = routes.index(filePrefix)
# concatenate the buy or wait info to get the total datas
y_train = y_train.reshape((y_train.shape[0],1))
y_train_price = y_train_price.reshape((y_train_price.shape[0],1))
# feature 0~7: flight number dummy variables
# feature 8: departure date; feature 9: observed date state;
# feature 10: minimum price; feature 11: maximum price
# fearure 12: current price;
# feature 13: minimum price; feature 14: current price
X_train = np.concatenate((X_train, y_train, y_train_price), axis=1)
# choose one route datas
X_train = X_train[np.where(X_train[:, flightNum]==1)[0], :]
# remove dummy variables
# feature 0: departure date; feature 1: observed date state
# feature 2: minimum price by now; feature 3: maximum price by now
# feature 4: current price;
# feature 5: minimum price; feature 6: current price
X_train = X_train[:, idx_departureDate:dim]
# group by the feature: departure date
departureDates_train = np.unique(X_train[:, 0])
# get the final datas, the observed data state should be from large to small(i.e. for time series)
length_test = []
for departureDate in departureDates_train:
indexs = np.where(X_train[:, 0]==departureDate)[0]
datas = X_train[indexs, :]
length_test.append(len(datas))
print departureDate
print datas
if __name__ == "__main__":
# priceNormalize_for_General()
#visualizeTrainData_for_GeneralClassification(routes_general[1], routes_general)
#visualizeTrainData_for_GeneralRegression(routes_general[1], routes_general)
#visualizeTrainData_for_GeneralClassification(routes_general[1], routes_general)
#visualizeTrainData_for_SpecificRegression(routes_specific[1], routes_specific)
"""
STEP 1: load raw data
"""
load_for_classification_for_Specific()
load_for_classification_for_General()
"""
STEP 2: get the data for the classification problem
"""
priceNormalize_for_Specific()
priceNormalize_for_General()
"""
STEP 3: get the data for the regression problem
"""
getRegressionOutput_for_SpecificTrain()
getRegressionOutput_for_SpecificTest()
"""
STEP 4: visualize the data set for classification problem
"""
isTrain = 0
visualizeData_for_SpecificClassification(routes_specific[1], isTrain, routes_specific)
visualizeTrainData_for_GeneralClassification(routes_general[11], routes_general)
"""
STEP 5: visualize the data set, but you can do this step at the classification object
"""
visualizeTrainData_for_SpecificRegression(routes_general[1], routes_general)
| 38.09528
| 149
| 0.657328
| 5,589
| 43,581
| 4.957237
| 0.062981
| 0.038548
| 0.035335
| 0.011694
| 0.803617
| 0.769364
| 0.733379
| 0.709052
| 0.674367
| 0.655887
| 0
| 0.022959
| 0.23843
| 43,581
| 1,143
| 150
| 38.128609
| 0.811811
| 0.20922
| 0
| 0.542969
| 0
| 0
| 0.127224
| 0.093764
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.001953
| 0.007813
| null | null | 0.029297
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
ce1b6d34092637695dfe6077f1d4c0c3047f5d34
| 224
|
py
|
Python
|
django_db_log_requestid/sqlite3/base.py
|
beniwohli/django-db-log-requestid
|
fbc0db0ff9924ec75a935eb422e0f6760fa8f790
|
[
"BSD-2-Clause"
] | 1
|
2016-11-01T13:34:20.000Z
|
2016-11-01T13:34:20.000Z
|
django_db_log_requestid/sqlite3/base.py
|
piquadrat/django-query-commenter
|
fbc0db0ff9924ec75a935eb422e0f6760fa8f790
|
[
"BSD-2-Clause"
] | 1
|
2018-11-27T08:37:37.000Z
|
2018-11-27T08:37:37.000Z
|
django_db_log_requestid/sqlite3/base.py
|
piquadrat/django-db-log-requestid
|
fbc0db0ff9924ec75a935eb422e0f6760fa8f790
|
[
"BSD-2-Clause"
] | 1
|
2018-11-26T22:23:02.000Z
|
2018-11-26T22:23:02.000Z
|
from django.db.backends.sqlite3 import base
from django_db_log_requestid.base_backend.base import DBLogRequestIdDatabaseWrapperMixin
class DatabaseWrapper(DBLogRequestIdDatabaseWrapperMixin, base.DatabaseWrapper):
pass
| 37.333333
| 88
| 0.879464
| 23
| 224
| 8.391304
| 0.608696
| 0.103627
| 0.124352
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004831
| 0.075893
| 224
| 6
| 89
| 37.333333
| 0.927536
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.25
| 0.5
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
5a106be4ad9ae70cf79a9d3c717b61a726a76d5a
| 19,859
|
py
|
Python
|
reputation/reputation_scenario_test.py
|
deborahduong/reputation
|
46c191a753fd720c2b11097e9ce7ef54390dbc24
|
[
"MIT"
] | 8
|
2019-02-02T08:51:24.000Z
|
2020-12-08T18:10:47.000Z
|
reputation/reputation_scenario_test.py
|
deborahduong/reputation
|
46c191a753fd720c2b11097e9ce7ef54390dbc24
|
[
"MIT"
] | 82
|
2018-12-14T15:48:54.000Z
|
2020-10-05T12:24:36.000Z
|
reputation/reputation_scenario_test.py
|
deborahduong/reputation
|
46c191a753fd720c2b11097e9ce7ef54390dbc24
|
[
"MIT"
] | 13
|
2018-11-01T01:31:29.000Z
|
2021-07-23T10:30:55.000Z
|
# MIT License
#
# Copyright (c) 2019 Stichting SingularityNET
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Reputation Scenario Test Data Generation
import time
import datetime
from reputation_scenario import reputation_simulate
from reputation_service_api import *
from aigents_reputation_api import AigentsAPIReputationService
def dict_sorted(d):
first = True
s = "{"
for key, value in sorted(d.items(), key=lambda x: x[0]):
template = "'{}': {}" if first else ", '{}': {}"
s += template.format(key, value)
first = False
s += "}"
return s
#TODO use any other Reputation Service here
rs = None
#rs = AigentsAPIReputationService('http://localtest.com:1288/', 'john@doe.org', 'q', 'a', False, 'test', True)
rs = PythonReputationService()
if rs is not None:
rs.set_parameters({'fullnorm':True,'weighting':True,'logratings':False,'logranks':True})
verbose = False
days = 364
consumers = 0.9
suppliers = 0.1
good_range = [1,9500]
bad_range = [9501,10000]
"""
days = 183
consumers = 0.9
suppliers = 0.1
good_range = [1,950]
bad_range = [951,1000]
days = 10
consumers = 0.5
suppliers = 0.5
good_range = [1,8]
bad_range = [9,10]
"""
good_transactions = 1
bad_transactions = 2
"""
# Comparing different reputation systems (RS) for different amount ratios (AR)
for ar in [1,2,5,10,20]:
print('Amount Ratio (AR): '+str(ar))
good_agent = {"range": good_range, "values": [100,1000], "transactions": good_transactions, "suppliers": suppliers, "consumers": consumers}
bad_agent = {"range": bad_range, "values": [good_agent['values'][0]/ar,good_agent['values'][1]/ar], "transactions": bad_transactions, "suppliers": suppliers, "consumers": consumers}
print('Good Agent: '+str(good_agent))
print('Bad Agent : '+str(bad_agent))
print('No RS, Regular RS, Weighted Rank RS, Denominated Weighted Rank RS:')
#print('No RS')
reputation_simulate(good_agent,bad_agent, datetime.date(2018, 1, 1), days, True, None, verbose)
#print('Regular RS')
rs.set_parameters({'fullnorm':True,'weighting':False,'logratings':False,'denomination':False})
reputation_simulate(good_agent,bad_agent, datetime.date(2018, 1, 1), days, True, rs, verbose)
#print('Weighted Rank RS')
rs.set_parameters({'fullnorm':True,'weighting':True,'logratings':False,'denomination':False})
reputation_simulate(good_agent,bad_agent, datetime.date(2018, 1, 1), days, True, rs, verbose)
#print('Denominated Weighted Rank RS')
rs.set_parameters({'fullnorm':True,'weighting':True,'logratings':False,'denomination':True})
reputation_simulate(good_agent,bad_agent, datetime.date(2018, 1, 1), days, True, rs, verbose)
"""
# Comparing different reputation systems (RS) for different scam periods (SP)
good_agent = {"range": good_range, "values": [100,1000], "transactions": good_transactions, "suppliers": suppliers, "consumers": consumers}
bad_agent = {"range": bad_range, "values": [100,1000], "transactions": bad_transactions, "suppliers": suppliers, "consumers": consumers}
print('Good Agent:',str(good_agent))
print('Bad Agent :',str(bad_agent))
for sp in [364,182,92,30]:
#for sp in [182,92,30,10]:
#for sp in [10,6,4,2]:
print('Scam period:',str(sp))
sip = sp/2
print('No RS:', end =" ")
reputation_simulate(good_agent,bad_agent, datetime.date(2018, 1, 1), days, True, None, campaign = [sp,sip], verbose=verbose)
print('Regular RS:', end =" ")
rs.set_parameters({'fullnorm':True,'weighting':False,'logratings':False,'denomination':False,'unrated':False,'default':0.5,'decayed':0.5,'ratings':1.0,'spendings':0.0})
reputation_simulate(good_agent,bad_agent, datetime.date(2018, 1, 1), days, True, rs, campaign = [sp,sip], verbose=verbose)
print('Weighted RS:', end =" ")
rs.set_parameters({'fullnorm':True,'weighting':True ,'logratings':False,'denomination':True ,'unrated':False,'default':0.5,'decayed':0.5,'ratings':1.0,'spendings':0.0})
reputation_simulate(good_agent,bad_agent, datetime.date(2018, 1, 1), days, True, rs, campaign = [sp,sip], verbose=verbose)
print('TOM-based RS:', end =" ")
rs.set_parameters({'fullnorm':True,'weighting':True ,'logratings':False,'denomination':True ,'unrated':True ,'default':0.0,'decayed':0.5,'ratings':1.0,'spendings':0.0})
reputation_simulate(good_agent,bad_agent, datetime.date(2018, 1, 1), days, True, rs, campaign = [sp,sip], verbose=verbose)
print('SOM-based RS:', end =" ")
rs.set_parameters({'fullnorm':True,'weighting':True ,'logratings':False,'denomination':True ,'unrated':False,'default':0.0,'decayed':0.5,'ratings':0.5,'spendings':0.5})
reputation_simulate(good_agent,bad_agent, datetime.date(2018, 1, 1), days, True, rs, campaign = [sp,sip], verbose=verbose)
"""
# Exploring different reputation system (RS) parameters ("space exploration")
good_agent = {"range": good_range, "values": [100,1000], "transactions": good_transactions, "suppliers": suppliers, "consumers": consumers}
bad_agent = {"range": bad_range, "values": [100,1000], "transactions": bad_transactions, "suppliers": suppliers, "consumers": consumers}
print('Good Agent:',str(good_agent))
print('Bad Agent :',str(bad_agent))
sp = 4 # 30
sip = 0 # sp/2
reputation_simulate(good_agent,bad_agent, datetime.date(2018, 1, 1), days, True, None, campaign = [sp,sip], verbose=False)
# Study SOM - initial
rs.set_parameters({'fullnorm':True,'weighting':True ,'logratings':False,'downrating':False,'denomination':True ,'unrated':False,'default':0.0,'decayed':0.5,'ratings':0.5,'spendings':0.5,'conservatism':0.5})
print(dict_sorted(rs.get_parameters()), end=" ")
reputation_simulate(good_agent,bad_agent, datetime.date(2018, 1, 1), days, True, rs, campaign = [sp,sip], verbose=False)
rs.set_parameters({'fullnorm':False,'weighting':True ,'logratings':False,'downrating':False,'denomination':True ,'unrated':False,'default':0.0,'decayed':0.5,'ratings':0.5,'spendings':0.5,'conservatism':0.5})
print(dict_sorted(rs.get_parameters()), end=" ")
reputation_simulate(good_agent,bad_agent, datetime.date(2018, 1, 1), days, True, rs, campaign = [sp,sip], verbose=False)
rs.set_parameters({'fullnorm':True,'weighting':True ,'logratings':True,'downrating':False,'denomination':True ,'unrated':False,'default':0.0,'decayed':0.5,'ratings':0.5,'spendings':0.5,'conservatism':0.5})
print(dict_sorted(rs.get_parameters()), end=" ")
reputation_simulate(good_agent,bad_agent, datetime.date(2018, 1, 1), days, True, rs, campaign = [sp,sip], verbose=False)
rs.set_parameters({'fullnorm':True,'weighting':True ,'logratings':False,'downrating':True,'denomination':True ,'unrated':False,'default':0.0,'decayed':0.5,'ratings':0.5,'spendings':0.5,'conservatism':0.5})
print(dict_sorted(rs.get_parameters()), end=" ")
reputation_simulate(good_agent,bad_agent, datetime.date(2018, 1, 1), days, True, rs, campaign = [sp,sip], verbose=False)
rs.set_parameters({'fullnorm':True,'weighting':True ,'logratings':False,'downrating':False,'denomination':True ,'unrated':False,'default':0.0,'decayed':0.5,'ratings':0.5,'spendings':0.5,'conservatism':0.9})
print(dict_sorted(rs.get_parameters()), end=" ")
reputation_simulate(good_agent,bad_agent, datetime.date(2018, 1, 1), days, True, rs, campaign = [sp,sip], verbose=False)
rs.set_parameters({'fullnorm':True,'weighting':True ,'logratings':False,'downrating':False,'denomination':True ,'unrated':False,'default':0.0,'decayed':0.5,'ratings':0.9,'spendings':0.1,'conservatism':0.5})
print(dict_sorted(rs.get_parameters()), end=" ")
reputation_simulate(good_agent,bad_agent, datetime.date(2018, 1, 1), days, True, rs, campaign = [sp,sip], verbose=False)
rs.set_parameters({'fullnorm':True,'weighting':True ,'logratings':False,'downrating':False,'denomination':True ,'unrated':False,'default':0.0,'decayed':0.5,'ratings':0.1,'spendings':0.9,'conservatism':0.5})
print(dict_sorted(rs.get_parameters()), end=" ")
reputation_simulate(good_agent,bad_agent, datetime.date(2018, 1, 1), days, True, rs, campaign = [sp,sip], verbose=False)
# Study SOM - ratings/spendings
rs.set_parameters({'fullnorm':True,'weighting':True ,'logratings':False,'downrating':False,'denomination':True ,'unrated':False,'default':0.0,'decayed':0.5,'ratings':0.1,'spendings':0.9,'conservatism':0.9})
print(dict_sorted(rs.get_parameters()), end=" ")
reputation_simulate(good_agent,bad_agent, datetime.date(2018, 1, 1), days, True, rs, campaign = [sp,sip], verbose=False)
rs.set_parameters({'fullnorm':True,'weighting':True ,'logratings':False,'downrating':False,'denomination':True ,'unrated':False,'default':0.0,'decayed':0.5,'ratings':0.3,'spendings':0.7,'conservatism':0.9})
print(dict_sorted(rs.get_parameters()), end=" ")
reputation_simulate(good_agent,bad_agent, datetime.date(2018, 1, 1), days, True, rs, campaign = [sp,sip], verbose=False)
rs.set_parameters({'fullnorm':True,'weighting':True ,'logratings':False,'downrating':False,'denomination':True ,'unrated':False,'default':0.0,'decayed':0.5,'ratings':0.7,'spendings':0.3,'conservatism':0.9})
print(dict_sorted(rs.get_parameters()), end=" ")
reputation_simulate(good_agent,bad_agent, datetime.date(2018, 1, 1), days, True, rs, campaign = [sp,sip], verbose=False)
rs.set_parameters({'fullnorm':True,'weighting':True ,'logratings':False,'downrating':False,'denomination':True ,'unrated':False,'default':0.0,'decayed':0.5,'ratings':0.9,'spendings':0.1,'conservatism':0.9})
print(dict_sorted(rs.get_parameters()), end=" ")
reputation_simulate(good_agent,bad_agent, datetime.date(2018, 1, 1), days, True, rs, campaign = [sp,sip], verbose=False)
# Study SOM - conservatism
rs.set_parameters({'fullnorm':True,'weighting':True ,'logratings':False,'downrating':False,'denomination':True ,'unrated':False,'default':0.0,'decayed':0.5,'ratings':0.5,'spendings':0.5,'conservatism':0.7})
print(dict_sorted(rs.get_parameters()), end=" ")
reputation_simulate(good_agent,bad_agent, datetime.date(2018, 1, 1), days, True, rs, campaign = [sp,sip], verbose=False)
rs.set_parameters({'fullnorm':True,'weighting':True ,'logratings':False,'downrating':False,'denomination':True ,'unrated':False,'default':0.0,'decayed':0.5,'ratings':0.5,'spendings':0.5,'conservatism':0.99})
print(dict_sorted(rs.get_parameters()), end=" ")
reputation_simulate(good_agent,bad_agent, datetime.date(2018, 1, 1), days, True, rs, campaign = [sp,sip], verbose=False)
# Study SOM - default/decayed
rs.set_parameters({'fullnorm':True,'weighting':True ,'logratings':False,'downrating':False,'denomination':True ,'unrated':False,'default':0.5,'decayed':0.5,'ratings':0.5,'spendings':0.5,'conservatism':0.9})
print(dict_sorted(rs.get_parameters()), end=" ")
reputation_simulate(good_agent,bad_agent, datetime.date(2018, 1, 1), days, True, rs, campaign = [sp,sip], verbose=False)
rs.set_parameters({'fullnorm':True,'weighting':True ,'logratings':False,'downrating':False,'denomination':True ,'unrated':False,'default':0.0,'decayed':1.0,'ratings':0.5,'spendings':0.5,'conservatism':0.9})
print(dict_sorted(rs.get_parameters()), end=" ")
reputation_simulate(good_agent,bad_agent, datetime.date(2018, 1, 1), days, True, rs, campaign = [sp,sip], verbose=False)
rs.set_parameters({'fullnorm':True,'weighting':True ,'logratings':False,'downrating':False,'denomination':True ,'unrated':False,'default':0.5,'decayed':1.0,'ratings':0.5,'spendings':0.5,'conservatism':0.9})
print(dict_sorted(rs.get_parameters()), end=" ")
reputation_simulate(good_agent,bad_agent, datetime.date(2018, 1, 1), days, True, rs, campaign = [sp,sip], verbose=False)
# Study TOM+SOM
rs.set_parameters({'fullnorm':True,'weighting':True ,'logratings':False,'downrating':False,'denomination':True ,'unrated':True,'default':0.0,'decayed':0.5,'ratings':0.5,'spendings':0.5,'conservatism':0.5})
print(dict_sorted(rs.get_parameters()), end=" ")
reputation_simulate(good_agent,bad_agent, datetime.date(2018, 1, 1), days, True, rs, campaign = [sp,sip], verbose=False)
rs.set_parameters({'fullnorm':False,'weighting':True ,'logratings':False,'downrating':False,'denomination':True ,'unrated':True,'default':0.0,'decayed':0.5,'ratings':0.5,'spendings':0.5,'conservatism':0.5})
print(dict_sorted(rs.get_parameters()), end=" ")
reputation_simulate(good_agent,bad_agent, datetime.date(2018, 1, 1), days, True, rs, campaign = [sp,sip], verbose=False)
rs.set_parameters({'fullnorm':True,'weighting':True ,'logratings':True,'downrating':False,'denomination':True ,'unrated':True,'default':0.0,'decayed':0.5,'ratings':0.5,'spendings':0.5,'conservatism':0.5})
print(dict_sorted(rs.get_parameters()), end=" ")
reputation_simulate(good_agent,bad_agent, datetime.date(2018, 1, 1), days, True, rs, campaign = [sp,sip], verbose=False)
rs.set_parameters({'fullnorm':True,'weighting':True ,'logratings':False,'downrating':True,'denomination':True ,'unrated':True,'default':0.0,'decayed':0.5,'ratings':0.5,'spendings':0.5,'conservatism':0.5})
print(dict_sorted(rs.get_parameters()), end=" ")
reputation_simulate(good_agent,bad_agent, datetime.date(2018, 1, 1), days, True, rs, campaign = [sp,sip], verbose=False)
rs.set_parameters({'fullnorm':True,'weighting':True ,'logratings':False,'downrating':False,'denomination':True ,'unrated':True,'default':0.0,'decayed':0.5,'ratings':0.5,'spendings':0.5,'conservatism':0.9})
print(dict_sorted(rs.get_parameters()), end=" ")
reputation_simulate(good_agent,bad_agent, datetime.date(2018, 1, 1), days, True, rs, campaign = [sp,sip], verbose=False)
rs.set_parameters({'fullnorm':True,'weighting':True ,'logratings':False,'downrating':False,'denomination':True ,'unrated':True,'default':0.0,'decayed':0.5,'ratings':0.9,'spendings':0.1,'conservatism':0.5})
print(dict_sorted(rs.get_parameters()), end=" ")
reputation_simulate(good_agent,bad_agent, datetime.date(2018, 1, 1), days, True, rs, campaign = [sp,sip], verbose=False)
rs.set_parameters({'fullnorm':True,'weighting':True ,'logratings':False,'downrating':False,'denomination':True ,'unrated':True,'default':0.0,'decayed':0.5,'ratings':0.1,'spendings':0.9,'conservatism':0.5})
print(dict_sorted(rs.get_parameters()), end=" ")
reputation_simulate(good_agent,bad_agent, datetime.date(2018, 1, 1), days, True, rs, campaign = [sp,sip], verbose=False)
# Study TOM
rs.set_parameters({'fullnorm':True,'weighting':True ,'logratings':False,'downrating':False,'denomination':True ,'unrated':True,'default':0.0,'decayed':0.5,'ratings':1.0,'spendings':0.0,'conservatism':0.5})
print(dict_sorted(rs.get_parameters()), end=" ")
reputation_simulate(good_agent,bad_agent, datetime.date(2018, 1, 1), days, True, rs, campaign = [sp,sip], verbose=False)
rs.set_parameters({'fullnorm':False,'weighting':True ,'logratings':False,'downrating':False,'denomination':True ,'unrated':True,'default':0.0,'decayed':0.5,'ratings':1.0,'spendings':0.0,'conservatism':0.5})
print(dict_sorted(rs.get_parameters()), end=" ")
reputation_simulate(good_agent,bad_agent, datetime.date(2018, 1, 1), days, True, rs, campaign = [sp,sip], verbose=False)
rs.set_parameters({'fullnorm':True,'weighting':True ,'logratings':True,'downrating':False,'denomination':True ,'unrated':True,'default':0.0,'decayed':0.5,'ratings':1.0,'spendings':0.0,'conservatism':0.5})
print(dict_sorted(rs.get_parameters()), end=" ")
reputation_simulate(good_agent,bad_agent, datetime.date(2018, 1, 1), days, True, rs, campaign = [sp,sip], verbose=False)
rs.set_parameters({'fullnorm':True,'weighting':True ,'logratings':False,'downrating':True,'denomination':True ,'unrated':True,'default':0.0,'decayed':0.5,'ratings':1.0,'spendings':0.0,'conservatism':0.5})
print(dict_sorted(rs.get_parameters()), end=" ")
reputation_simulate(good_agent,bad_agent, datetime.date(2018, 1, 1), days, True, rs, campaign = [sp,sip], verbose=False)
rs.set_parameters({'fullnorm':True,'weighting':True ,'logratings':False,'downrating':False,'denomination':True ,'unrated':True,'default':0.0,'decayed':0.5,'ratings':1.0,'spendings':0.0,'conservatism':0.9})
print(dict_sorted(rs.get_parameters()), end=" ")
reputation_simulate(good_agent,bad_agent, datetime.date(2018, 1, 1), days, True, rs, campaign = [sp,sip], verbose=False)
rs.set_parameters({'fullnorm':True,'weighting':True ,'logratings':False,'downrating':False,'denomination':True ,'unrated':True,'default':0.5,'decayed':1.0,'ratings':1.0,'spendings':0.0,'conservatism':0.9})
print(dict_sorted(rs.get_parameters()), end=" ")
reputation_simulate(good_agent,bad_agent, datetime.date(2018, 1, 1), days, True, rs, campaign = [sp,sip], verbose=False)
"""
#Very-very unhealthy agent environment set
good_agent = {"range": good_range, "values": [100,1000], "transactions": good_transactions, "suppliers": suppliers, "consumers": consumers}
bad_agent = {"range": bad_range, "values": [100,1000], "transactions": bad_transactions, "suppliers": suppliers, "consumers": consumers}
#reputation_simulate(good_agent,bad_agent, datetime.date(2018, 1, 1), days, True, rs, verbose)
#reputation_simulate(good_agent,bad_agent, datetime.date(2018, 1, 1), days, False, rs, verbose)
#Very unhealthy agent environment set
good_agent = {"range": good_range, "values": [100,1000], "transactions": good_transactions, "suppliers": suppliers, "consumers": consumers}
bad_agent = {"range": bad_range, "values": [50,500], "transactions": bad_transactions, "suppliers": suppliers, "consumers": consumers}
#reputation_simulate(good_agent,bad_agent, datetime.date(2018, 1, 1), days, True, rs, verbose)
#reputation_simulate(good_agent,bad_agent, datetime.date(2018, 1, 1), days, False, rs, verbose)
#Unhealthy agent environment set
good_agent = {"range": good_range, "values": [100,1000], "transactions": good_transactions, "suppliers": suppliers, "consumers": consumers}
bad_agent = {"range": bad_range, "values": [10,100], "transactions": bad_transactions, "suppliers": suppliers, "consumers": consumers}
#reputation_simulate(good_agent,bad_agent, datetime.date(2018, 1, 1), days, True, rs, verbose)
#reputation_simulate(good_agent,bad_agent, datetime.date(2018, 1, 1), days, False, rs, verbose)
#Semi-healthy agent environment set
good_agent = {"range": good_range, "values": [100,1000], "transactions": good_transactions, "suppliers": suppliers, "consumers": consumers}
bad_agent = {"range": bad_range, "values": [5,50], "transactions": bad_transactions, "suppliers": suppliers, "consumers": consumers}
#reputation_simulate(good_agent,bad_agent, datetime.date(2018, 1, 1), days, True, rs, verbose)
#reputation_simulate(good_agent,bad_agent, datetime.date(2018, 1, 1), days, False, rs, verbose)
#Healthy agent environment set (default)
good_agent = {"range": good_range, "values": [100,1000], "transactions": good_transactions, "suppliers": suppliers, "consumers": consumers}
bad_agent = {"range": bad_range, "values": [1,10], "transactions": bad_transactions, "suppliers": suppliers, "consumers": consumers}
#reputation_simulate(good_agent,bad_agent, datetime.date(2018, 1, 1), days, True, rs, verbose)
#reputation_simulate(good_agent,bad_agent, datetime.date(2018, 1, 1), days, False, rs, verbose)
del rs
| 64.687296
| 207
| 0.731809
| 2,864
| 19,859
| 4.965084
| 0.080307
| 0.011955
| 0.075809
| 0.093038
| 0.857947
| 0.857947
| 0.857103
| 0.848453
| 0.848101
| 0.839733
| 0
| 0.04418
| 0.076791
| 19,859
| 306
| 208
| 64.898693
| 0.731428
| 0.126341
| 0
| 0.206897
| 0
| 0
| 0.233737
| 0
| 0
| 0
| 0
| 0.003268
| 0
| 1
| 0.017241
| false
| 0
| 0.086207
| 0
| 0.12069
| 0.137931
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
5a1ff75d5190e280108692246c52d5ff50cc2b2b
| 211
|
py
|
Python
|
micromlgen/__init__.py
|
yangtuo250/micromlgen
|
ca66b9fbb2ac57c13c87cc053ab0621449559ec1
|
[
"MIT"
] | 90
|
2019-12-21T08:28:58.000Z
|
2022-03-29T12:28:23.000Z
|
micromlgen/__init__.py
|
yangtuo250/micromlgen
|
ca66b9fbb2ac57c13c87cc053ab0621449559ec1
|
[
"MIT"
] | 11
|
2020-11-29T09:05:52.000Z
|
2022-01-29T16:46:57.000Z
|
micromlgen/__init__.py
|
eloquentarduino/micromlgen
|
9a4aa80612cb1cc38498bfa36e0eccbe9ca7807c
|
[
"MIT"
] | 15
|
2020-07-27T21:54:50.000Z
|
2022-02-27T02:54:59.000Z
|
import micromlgen.platforms as platforms
from micromlgen.micromlgen import port
from micromlgen.utils import port_testset, port_trainset
from micromlgen.wifiindoorpositioning import port_wifi_indoor_positioning
| 42.2
| 73
| 0.895735
| 26
| 211
| 7.076923
| 0.5
| 0.228261
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.080569
| 211
| 4
| 74
| 52.75
| 0.948454
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
5a35d52e06ac79ff2f5df97de95ea3cd1a4b038a
| 67
|
py
|
Python
|
projects/thesis/continuous/custom/data/__init__.py
|
cpark90/rrrcnn
|
ba66cc391265be76fa3896b66459ff7241b47972
|
[
"Apache-2.0"
] | null | null | null |
projects/thesis/continuous/custom/data/__init__.py
|
cpark90/rrrcnn
|
ba66cc391265be76fa3896b66459ff7241b47972
|
[
"Apache-2.0"
] | null | null | null |
projects/thesis/continuous/custom/data/__init__.py
|
cpark90/rrrcnn
|
ba66cc391265be76fa3896b66459ff7241b47972
|
[
"Apache-2.0"
] | null | null | null |
from .build import *
from .mapper import *
from .datasets import *
| 16.75
| 23
| 0.731343
| 9
| 67
| 5.444444
| 0.555556
| 0.408163
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.179104
| 67
| 3
| 24
| 22.333333
| 0.890909
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
5a42f017a4fc7d18f5c73c21cd7738736376401f
| 43
|
py
|
Python
|
virl/cli/views/generate/nso/__init__.py
|
tombry/virlutils
|
e98136b4e88c456828f2d0496c14f851f2627a46
|
[
"MIT"
] | 133
|
2018-07-01T06:08:49.000Z
|
2022-03-26T15:22:21.000Z
|
virl/cli/views/generate/nso/__init__.py
|
tombry/virlutils
|
e98136b4e88c456828f2d0496c14f851f2627a46
|
[
"MIT"
] | 76
|
2018-06-28T16:41:57.000Z
|
2022-03-26T17:23:06.000Z
|
virl/cli/views/generate/nso/__init__.py
|
tombry/virlutils
|
e98136b4e88c456828f2d0496c14f851f2627a46
|
[
"MIT"
] | 43
|
2018-06-27T20:40:52.000Z
|
2022-02-22T06:16:11.000Z
|
from .sync_result import sync_table # noqa
| 21.5
| 42
| 0.813953
| 7
| 43
| 4.714286
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.139535
| 43
| 1
| 43
| 43
| 0.891892
| 0.093023
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
5a61b1e497b890d29e4d1c3c8461f256933f9775
| 118
|
py
|
Python
|
imctools/data/__init__.py
|
BodenmillerGroup/imctools
|
5019836df5dc2b682722e39d5f9c62799b658929
|
[
"MIT"
] | 19
|
2018-06-12T15:45:46.000Z
|
2022-02-12T08:33:59.000Z
|
imctools/data/__init__.py
|
BodenmillerGroup/imctools
|
5019836df5dc2b682722e39d5f9c62799b658929
|
[
"MIT"
] | 82
|
2017-09-19T18:38:50.000Z
|
2022-03-31T16:25:19.000Z
|
imctools/data/__init__.py
|
BodenmillerGroup/imctools
|
5019836df5dc2b682722e39d5f9c62799b658929
|
[
"MIT"
] | 12
|
2017-11-23T03:01:41.000Z
|
2022-03-22T14:06:27.000Z
|
from .acquisition import *
from .channel import *
from .panorama import *
from .session import *
from .slide import *
| 19.666667
| 26
| 0.745763
| 15
| 118
| 5.866667
| 0.466667
| 0.454545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.169492
| 118
| 5
| 27
| 23.6
| 0.897959
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ce69123ffe862dbd086777d4dc0c5a6fe95a0374
| 81
|
py
|
Python
|
Algorithms_sandbox/Runner.py
|
Gruschwick/ECG_PLATFORM
|
4a1ee568e8593938a3b51c595d4834f861a6db6e
|
[
"MIT"
] | 5
|
2021-01-28T00:04:35.000Z
|
2022-03-05T05:35:10.000Z
|
Algorithms_sandbox/Runner.py
|
Gruschwick/ECG_PLATFORM
|
4a1ee568e8593938a3b51c595d4834f861a6db6e
|
[
"MIT"
] | null | null | null |
Algorithms_sandbox/Runner.py
|
Gruschwick/ECG_PLATFORM
|
4a1ee568e8593938a3b51c595d4834f861a6db6e
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 22 16:21:21 2019
@author: x
"""
| 10.125
| 35
| 0.54321
| 14
| 81
| 3.142857
| 0.928571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.206349
| 0.222222
| 81
| 7
| 36
| 11.571429
| 0.492063
| 0.864198
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
ce6bf266a72fbe7d1d35a032d7365a943aa9ae35
| 29
|
py
|
Python
|
crnn/__init__.py
|
sticktoFE/rpaserver
|
b77843188d383622c31ff33e60570fcfc882c873
|
[
"Apache-2.0"
] | null | null | null |
crnn/__init__.py
|
sticktoFE/rpaserver
|
b77843188d383622c31ff33e60570fcfc882c873
|
[
"Apache-2.0"
] | null | null | null |
crnn/__init__.py
|
sticktoFE/rpaserver
|
b77843188d383622c31ff33e60570fcfc882c873
|
[
"Apache-2.0"
] | null | null | null |
from .CRNN import CRNNHandle
| 14.5
| 28
| 0.827586
| 4
| 29
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137931
| 29
| 1
| 29
| 29
| 0.96
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ce6e76b47a0b90703cfe09aa4cb8cd3a815065d1
| 26
|
py
|
Python
|
bugal/base/serializers/__init__.py
|
aquitania99/bugal-app
|
3e0d7253bde847962846b629085477244cb1abf2
|
[
"MIT"
] | 3
|
2019-08-29T10:14:40.000Z
|
2021-03-05T09:50:15.000Z
|
bugal/base/serializers/__init__.py
|
aquitania99/bugal-app
|
3e0d7253bde847962846b629085477244cb1abf2
|
[
"MIT"
] | null | null | null |
bugal/base/serializers/__init__.py
|
aquitania99/bugal-app
|
3e0d7253bde847962846b629085477244cb1abf2
|
[
"MIT"
] | 1
|
2021-03-05T09:50:29.000Z
|
2021-03-05T09:50:29.000Z
|
from .serializers import *
| 26
| 26
| 0.807692
| 3
| 26
| 7
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.115385
| 26
| 1
| 26
| 26
| 0.913043
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ceca04117a0a50876a812eeeee51a3e691c1c410
| 190
|
py
|
Python
|
test/lex_module.py
|
pyarnold/ply
|
98bb0e095d72c8aed9de01c15b65fa096c745ce3
|
[
"Unlicense"
] | 1
|
2020-12-18T01:07:42.000Z
|
2020-12-18T01:07:42.000Z
|
test/lex_module.py
|
pyarnold/ply
|
98bb0e095d72c8aed9de01c15b65fa096c745ce3
|
[
"Unlicense"
] | null | null | null |
test/lex_module.py
|
pyarnold/ply
|
98bb0e095d72c8aed9de01c15b65fa096c745ce3
|
[
"Unlicense"
] | null | null | null |
# lex_module.py
#
import sys
if ".." not in sys.path:
sys.path.insert(0, "..")
import ply.lex as lex
import lex_module_import
lex.lex(module=lex_module_import)
lex.runmain(data="3+4")
| 15.833333
| 33
| 0.705263
| 34
| 190
| 3.794118
| 0.5
| 0.27907
| 0.232558
| 0.27907
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.018293
| 0.136842
| 190
| 11
| 34
| 17.272727
| 0.768293
| 0.068421
| 0
| 0
| 0
| 0
| 0.04023
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.571429
| 0
| 0.571429
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ced1bbd2f914cb2a6bc16aaaeb33bb28e76c0816
| 29
|
py
|
Python
|
src/cms/views/error_handler/__init__.py
|
S10MC2015/cms-django
|
b08f2be60a9db6c8079ee923de2cd8912f550b12
|
[
"Apache-2.0"
] | 4
|
2019-12-05T16:45:17.000Z
|
2020-05-09T07:26:34.000Z
|
src/cms/views/error_handler/__init__.py
|
S10MC2015/cms-django
|
b08f2be60a9db6c8079ee923de2cd8912f550b12
|
[
"Apache-2.0"
] | 56
|
2019-12-05T12:31:37.000Z
|
2021-01-07T15:47:45.000Z
|
src/cms/views/error_handler/__init__.py
|
S10MC2015/cms-django
|
b08f2be60a9db6c8079ee923de2cd8912f550b12
|
[
"Apache-2.0"
] | 2
|
2019-12-11T09:52:26.000Z
|
2020-05-09T07:26:38.000Z
|
from .error_handler import *
| 14.5
| 28
| 0.793103
| 4
| 29
| 5.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137931
| 29
| 1
| 29
| 29
| 0.88
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
cedb18593708f6c745788f6b3850870274ae6df6
| 29
|
py
|
Python
|
lang/py/cookbook/v2/source/cb2_20_4_exm_3.py
|
ch1huizong/learning
|
632267634a9fd84a5f5116de09ff1e2681a6cc85
|
[
"MIT"
] | null | null | null |
lang/py/cookbook/v2/source/cb2_20_4_exm_3.py
|
ch1huizong/learning
|
632267634a9fd84a5f5116de09ff1e2681a6cc85
|
[
"MIT"
] | null | null | null |
lang/py/cookbook/v2/source/cb2_20_4_exm_3.py
|
ch1huizong/learning
|
632267634a9fd84a5f5116de09ff1e2681a6cc85
|
[
"MIT"
] | null | null | null |
class MyClass(MyClass): pass
| 14.5
| 28
| 0.793103
| 4
| 29
| 5.75
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.103448
| 29
| 1
| 29
| 29
| 0.884615
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 1
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
|
0
| 6
|
0c83affd7687a83c116c5b7654500ab578c77b88
| 98
|
py
|
Python
|
libs/__init__.py
|
MichaelWU0726/x2trt
|
75f34a8574315178589502ab14f64289e5c49061
|
[
"Apache-2.0"
] | null | null | null |
libs/__init__.py
|
MichaelWU0726/x2trt
|
75f34a8574315178589502ab14f64289e5c49061
|
[
"Apache-2.0"
] | null | null | null |
libs/__init__.py
|
MichaelWU0726/x2trt
|
75f34a8574315178589502ab14f64289e5c49061
|
[
"Apache-2.0"
] | null | null | null |
from .body_yolov5_dynamic import main_body_dynamic
from .body_yolov5_fixed import main_body_fixed
| 49
| 51
| 0.897959
| 16
| 98
| 5
| 0.4375
| 0.2
| 0.35
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.022222
| 0.081633
| 98
| 2
| 52
| 49
| 0.866667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0c8fb44c974bd512a2bb2c7adda845aab5a0f4fb
| 37
|
py
|
Python
|
src/__init__.py
|
benmack/classify-hls
|
ab9cf5c99b62544c8af7a92f7cf7f5a1e69bdcd7
|
[
"MIT"
] | 4
|
2019-04-15T12:15:46.000Z
|
2021-09-17T13:07:42.000Z
|
src/__init__.py
|
benmack/classify-hls
|
ab9cf5c99b62544c8af7a92f7cf7f5a1e69bdcd7
|
[
"MIT"
] | null | null | null |
src/__init__.py
|
benmack/classify-hls
|
ab9cf5c99b62544c8af7a92f7cf7f5a1e69bdcd7
|
[
"MIT"
] | null | null | null |
from .configs import PROJECT_ROOT_DIR
| 37
| 37
| 0.891892
| 6
| 37
| 5.166667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.081081
| 37
| 1
| 37
| 37
| 0.911765
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0b214430e29f5f00700af7229bac0a53d4228431
| 35
|
py
|
Python
|
python/ql/test/experimental/dataflow/pep_328/start.py
|
timoles/codeql
|
2d24387e9e300bf03be35694816b1e76ae88a50c
|
[
"MIT"
] | 4,036
|
2020-04-29T00:09:57.000Z
|
2022-03-31T14:16:38.000Z
|
python/ql/test/experimental/dataflow/pep_328/start.py
|
baby636/codeql
|
097b6e5e3364ecc7103586d6feb308861e15538e
|
[
"MIT"
] | 2,970
|
2020-04-28T17:24:18.000Z
|
2022-03-31T22:40:46.000Z
|
python/ql/test/experimental/dataflow/pep_328/start.py
|
ScriptBox99/github-codeql
|
2ecf0d3264db8fb4904b2056964da469372a235c
|
[
"MIT"
] | 794
|
2020-04-29T00:28:25.000Z
|
2022-03-30T08:21:46.000Z
|
import package.subpackage1.moduleX
| 17.5
| 34
| 0.885714
| 4
| 35
| 7.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.030303
| 0.057143
| 35
| 1
| 35
| 35
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0b365dd52c414e886bfcfe08f767cab7986eff1f
| 242
|
py
|
Python
|
Zad_Composite/Graphic.py
|
Paarzivall/Wzorce-Projektowe
|
aa4136f140ad02c0fc0de45709b5a01ca42b417f
|
[
"MIT"
] | null | null | null |
Zad_Composite/Graphic.py
|
Paarzivall/Wzorce-Projektowe
|
aa4136f140ad02c0fc0de45709b5a01ca42b417f
|
[
"MIT"
] | null | null | null |
Zad_Composite/Graphic.py
|
Paarzivall/Wzorce-Projektowe
|
aa4136f140ad02c0fc0de45709b5a01ca42b417f
|
[
"MIT"
] | null | null | null |
from abc import ABC, abstractmethod
class Graphic(ABC):
@abstractmethod
def draw(self):
pass
def add(self, graphic):
pass
def remove(self, graphic):
pass
def GetChild(self, child):
pass
| 15.125
| 35
| 0.590909
| 28
| 242
| 5.107143
| 0.5
| 0.146853
| 0.20979
| 0.251748
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.326446
| 242
| 16
| 36
| 15.125
| 0.877301
| 0
| 0
| 0.363636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.363636
| false
| 0.363636
| 0.090909
| 0
| 0.545455
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 6
|
0bb7ff43ca2d8ccb2172a1bfd291284f98c9d4e4
| 27
|
py
|
Python
|
segmentation_models_pytorch/unet_plus/__init__.py
|
maxjeblick/segmentation_models.pytorch
|
166c1cf133814d19fee452553c3ec530b610925a
|
[
"MIT"
] | null | null | null |
segmentation_models_pytorch/unet_plus/__init__.py
|
maxjeblick/segmentation_models.pytorch
|
166c1cf133814d19fee452553c3ec530b610925a
|
[
"MIT"
] | null | null | null |
segmentation_models_pytorch/unet_plus/__init__.py
|
maxjeblick/segmentation_models.pytorch
|
166c1cf133814d19fee452553c3ec530b610925a
|
[
"MIT"
] | null | null | null |
from .model import UnetPlus
| 27
| 27
| 0.851852
| 4
| 27
| 5.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 27
| 1
| 27
| 27
| 0.958333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f022610e380679936b3634b0b3cbcc394e0ae7e5
| 32
|
py
|
Python
|
tempo-api/src/app/constants.py
|
cuappdev/archives
|
061d0f9cccf278363ffaeb27fc655743b1052ae5
|
[
"MIT"
] | null | null | null |
tempo-api/src/app/constants.py
|
cuappdev/archives
|
061d0f9cccf278363ffaeb27fc655743b1052ae5
|
[
"MIT"
] | null | null | null |
tempo-api/src/app/constants.py
|
cuappdev/archives
|
061d0f9cccf278363ffaeb27fc655743b1052ae5
|
[
"MIT"
] | null | null | null |
# TODO - put all constants here
| 16
| 31
| 0.71875
| 5
| 32
| 4.6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.21875
| 32
| 1
| 32
| 32
| 0.92
| 0.90625
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 1
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
f03f6fc7a112fe67b3cfd644c0ceff0260143871
| 140
|
py
|
Python
|
benders-decomposition/src/standalone_facility_location_model/__init__.py
|
grzegorz-siekaniec/benders-decomposition-gurobi
|
5435e82c7ef4fe14fc53ff07b8eaa1516208b57c
|
[
"MIT"
] | 6
|
2021-05-31T10:23:18.000Z
|
2022-02-15T08:45:30.000Z
|
benders-decomposition/src/standalone_facility_location_model/__init__.py
|
grzegorz-siekaniec/benders-decomposition-gurobi
|
5435e82c7ef4fe14fc53ff07b8eaa1516208b57c
|
[
"MIT"
] | null | null | null |
benders-decomposition/src/standalone_facility_location_model/__init__.py
|
grzegorz-siekaniec/benders-decomposition-gurobi
|
5435e82c7ef4fe14fc53ff07b8eaa1516208b57c
|
[
"MIT"
] | null | null | null |
from .single_model_builder import SingleModelBuilder
from .single_model import SingleModel
from .solver import solve_using_standalone_model
| 35
| 52
| 0.892857
| 18
| 140
| 6.611111
| 0.611111
| 0.168067
| 0.252101
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.085714
| 140
| 3
| 53
| 46.666667
| 0.929688
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f041ccf60ed14bbaf694f60fbe04f600abaac3e5
| 3,590
|
py
|
Python
|
tests/atest/serarch_order/test_search_order.py
|
bhirsz/robotframework-sherlock
|
53edb5f15517d8fbdf05eb0c84eb34332dcbf308
|
[
"Apache-2.0"
] | 2
|
2022-03-17T07:55:37.000Z
|
2022-03-17T08:18:44.000Z
|
tests/atest/serarch_order/test_search_order.py
|
bhirsz/robotframework-sherlock
|
53edb5f15517d8fbdf05eb0c84eb34332dcbf308
|
[
"Apache-2.0"
] | 16
|
2022-03-09T09:29:34.000Z
|
2022-03-14T20:29:38.000Z
|
tests/atest/serarch_order/test_search_order.py
|
bhirsz/robotframework-sherlock
|
53edb5f15517d8fbdf05eb0c84eb34332dcbf308
|
[
"Apache-2.0"
] | null | null | null |
from pathlib import Path
from .. import Tree, Keyword, AcceptanceTest
class TestSearchOrder1(AcceptanceTest):
ROOT = Path(Path(__file__).parent, "search_order_1")
TEST_PATH = ""
def test(self):
data = self.run_sherlock()
expected = Tree(
name="search_order_1",
children=[
Tree(
name="a.resource",
res_type="Resource",
keywords=[Keyword(name="Duplicated", used=0), Keyword(name="Keyword", used=1)],
),
Tree(name="suite.robot", keywords=[Keyword(name="Duplicated", used=1)]),
],
)
self.should_match_tree(expected, data)
class TestSearchOrder2(AcceptanceTest):
ROOT = Path(Path(__file__).parent, "search_order_2")
TEST_PATH = ""
def test(self):
data = self.run_sherlock()
expected = Tree(
name="search_order_2",
children=[
Tree(name="a.resource", res_type="Resource", keywords=[Keyword(name="Duplicated", used=0)]),
Tree(
name="b.resource",
res_type="Resource",
keywords=[Keyword(name="Duplicated", used=0), Keyword(name="Keyword", used=1)],
),
Tree(name="suite.robot", keywords=[Keyword(name="Duplicated", used=1)]),
],
)
self.should_match_tree(expected, data)
class TestSearchOrder3(AcceptanceTest):
ROOT = Path(Path(__file__).parent, "search_order_3")
TEST_PATH = ""
def test(self):
data = self.run_sherlock()
expected = Tree(
name="search_order_3",
children=[
Tree(name="a.resource", res_type="Resource", keywords=[Keyword(name="Duplicated in resource", used=1)]),
Tree(
name="b.resource",
res_type="Resource",
keywords=[Keyword(name="Duplicated in resource", used=0), Keyword(name="Keyword", used=1)],
),
Tree(name="suite.robot", keywords=[]),
],
)
self.should_match_tree(expected, data)
class TestSearchOrder4(AcceptanceTest):
ROOT = Path(Path(__file__).parent, "search_order_4")
TEST_PATH = ""
def test(self):
data = self.run_sherlock()
expected = Tree(
name="search_order_4",
children=[
Tree(name="a.resource", res_type="Resource", keywords=[Keyword(name="1", used=0)]),
Tree(name="b.resource", res_type="Resource", keywords=[Keyword(name="Keyword", used=1)]),
Tree(name="from_b.resource", res_type="Resource", keywords=[Keyword(name="1", used=1)]),
Tree(name="suite.robot", keywords=[]),
],
)
self.should_match_tree(expected, data)
class TestSearchOrder5(AcceptanceTest):
ROOT = Path(Path(__file__).parent, "search_order_5")
TEST_PATH = ""
def test(self):
data = self.run_sherlock()
expected = Tree(
name="search_order_5",
children=[
Tree(name="a.resource", res_type="Resource", keywords=[Keyword(name="Keyword", used=1)]),
Tree(
name="b.resource",
res_type="Resource",
keywords=[Keyword(name="Something that a.resource needs", used=1)],
),
Tree(name="suite.robot", keywords=[]),
],
)
self.should_match_tree(expected, data)
| 34.190476
| 120
| 0.536212
| 363
| 3,590
| 5.107438
| 0.134986
| 0.0863
| 0.122977
| 0.124056
| 0.901834
| 0.901834
| 0.901834
| 0.901834
| 0.774542
| 0.750809
| 0
| 0.013158
| 0.322563
| 3,590
| 104
| 121
| 34.519231
| 0.749178
| 0
| 0
| 0.647727
| 0
| 0
| 0.150975
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.056818
| false
| 0
| 0.022727
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
f04eb3805851d0b2af82b7fe3254d01079128486
| 20,264
|
py
|
Python
|
tests/test_api_requester.py
|
dda-dev/ornitho-client-python
|
94d09774026786c021f35cae8cc74b65a28075d9
|
[
"MIT"
] | 3
|
2020-06-17T17:58:54.000Z
|
2022-03-27T17:26:07.000Z
|
tests/test_api_requester.py
|
dda-dev/ornitho-client-python
|
94d09774026786c021f35cae8cc74b65a28075d9
|
[
"MIT"
] | null | null | null |
tests/test_api_requester.py
|
dda-dev/ornitho-client-python
|
94d09774026786c021f35cae8cc74b65a28075d9
|
[
"MIT"
] | 1
|
2021-12-17T13:13:10.000Z
|
2021-12-17T13:13:10.000Z
|
import json
from datetime import datetime
from unittest import TestCase
from unittest.mock import MagicMock, Mock
import pytz
import ornitho
from ornitho import (
APIException,
APIHttpException,
APIRequester,
AuthenticationException,
BadGatewayException,
GatewayTimeoutException,
ServiceUnavailableException,
)
ornitho.consumer_key = "ORNITHO_CONSUMER_KEY"
ornitho.consumer_secret = "ORNITHO_CONSUMER_SECRET"
ornitho.user_email = "ORNITHO_USER_EMAIL"
ornitho.user_pw = "ORNITHO_USER_PW"
ornitho.api_base = "ORNITHO_API_BASE"
class TestAPIRequester(TestCase):
def setUp(self):
self.requester = APIRequester()
def test_missing_config(self):
ornitho.consumer_key = None
self.assertRaises(RuntimeError, lambda: APIRequester())
ornitho.consumer_key = "ORNITHO_CONSUMER_KEY"
ornitho.consumer_secret = None
self.assertRaises(RuntimeError, lambda: APIRequester())
ornitho.consumer_secret = "ORNITHO_CONSUMER_SECRET"
ornitho.user_email = None
self.assertRaises(RuntimeError, lambda: APIRequester())
ornitho.user_email = "ORNITHO_USER_EMAIL"
ornitho.user_pw = None
self.assertRaises(RuntimeError, lambda: APIRequester())
ornitho.user_pw = "ORNITHO_USER_PW"
ornitho.api_base = None
self.assertRaises(RuntimeError, lambda: APIRequester())
ornitho.api_base = "ORNITHO_API_BASE"
def test_enter(self):
requester = self.requester.__enter__()
self.assertEqual(requester, self.requester)
def test_exit(self):
self.requester.close = Mock()
self.requester.__exit__()
self.requester.close.assert_called()
def test_close(self):
self.requester.session = Mock()
self.requester.close()
self.requester.session.close.assert_called()
def test_request(self):
# Case 1: no data key
self.requester.request_raw = MagicMock(
return_value=[[{"id": "1"}, {"id": "2"}], None]
)
response, pk = self.requester.request(method="get", url="test")
self.assertEqual(response, [{"id": "1"}, {"id": "2"}])
self.assertEqual(pk, None)
# Case 2: data is list
self.requester.request_raw = MagicMock(
return_value=[{"data": [{"id": "1"}, {"id": "2"}]}, None]
)
response, pk = self.requester.request(method="get", url="test")
self.assertEqual(response, [{"id": "1"}, {"id": "2"}])
self.assertEqual(pk, None)
# Case 3: data is dict
self.requester.request_raw = MagicMock(
return_value=[
{
"data": {
"sightings": [],
"forms": [
{
"full_form": "1",
"sightings": [
{"id": "1", "date": {"@timestamp": "1584918000"}}
],
}
],
}
},
"pagination_key",
]
)
response, pk = self.requester.request(method="post", url="test")
self.assertEqual(
response,
[
{
"date": {"@timestamp": "1584918000"},
"form": {"day": {"@timestamp": "1584918000"}, "full_form": "1"},
"id": "1",
}
],
)
self.assertEqual(pk, "pagination_key")
# Case 4: request all
self.requester.request_raw = MagicMock(
side_effect=[
[{"data": [{"id": "1"}]}, "pagination_key"],
[{"data": []}, "pagination_key"],
]
)
response, pk = self.requester.request(
method="get", url="test", pagination_key="pagination_key", request_all=True
)
self.assertEqual(response, [{"id": "1"}])
self.assertEqual(pk, "pagination_key")
# Case 5: response is bytes
self.requester.request_raw = MagicMock(return_value=[b"BYTES", None])
response, pk = self.requester.request(method="get", url="test")
self.assertEqual(response, b"BYTES")
self.assertEqual(pk, None)
# Case 6: response is dict and has no data-attribute
self.requester.request_raw = MagicMock(return_value=[{"sites": "1"}, "pk"])
response, pk = self.requester.request(method="get", url="test")
self.assertEqual(response, [{"sites": "1"}])
self.assertEqual(pk, "pk")
# Case 7: first JSON, then byte response – no real world case
self.requester.request_raw = MagicMock(
side_effect=[
[{"data": [{"id": "1"}]}, "pagination_key"],
[b"BYTES", "pagination_key"],
]
)
self.assertRaises(
APIException,
lambda: self.requester.request(method="get", url="test", request_all=True),
)
# Case 8: No Data received
self.requester.request_raw = MagicMock(return_value=[[], None])
response, pk = self.requester.request(method="get", url="test")
self.assertEqual(response, [])
self.assertEqual(pk, None)
def test_handle_error_response(self):
self.assertRaises(
AuthenticationException,
lambda: self.requester.handle_error_response(
response=Mock(status_code=401)
),
)
self.assertRaises(
BadGatewayException,
lambda: self.requester.handle_error_response(
response=Mock(status_code=502)
),
)
self.assertRaises(
GatewayTimeoutException,
lambda: self.requester.handle_error_response(
response=Mock(status_code=504)
),
)
self.assertRaises(
ServiceUnavailableException,
lambda: self.requester.handle_error_response(
response=Mock(status_code=503)
),
)
self.assertRaises(
APIHttpException,
lambda: self.requester.handle_error_response(response=Mock(status_code=0)),
)
def test_request_headers(self):
headers = self.requester.request_headers()
self.assertEqual(
headers, {"User-Agent": f"API Python Client/{ornitho.__version__}"}
)
def test_request_raw(self):
# Case 1: GET Method
self.requester.session.request = MagicMock(
return_value=Mock(
status_code=200,
headers={
"pagination_key": "new_key",
"Content-Type": "application/json; charset=utf-8",
"Content-Length": 23,
},
text='{"data": [{"id": "1"}]}',
)
)
response, pk = self.requester.request_raw(
method="get",
url="test",
pagination_key="key",
params={"test": "param"},
body={"test": "filter"},
)
self.assertEqual({"data": [{"id": "1"}]}, response)
self.assertEqual(pk, "new_key")
# Case 2: Other Method
self.requester.session.request = MagicMock(
return_value=Mock(
status_code=200,
headers={
"Content-Type": "application/json; charset=utf-8",
"Content-Length": 23,
},
text='{"data": [{"id": "1"}]}',
)
)
response, pk = self.requester.request_raw(
method="post", url="test", pagination_key="key", body={"test": "filter"}
)
self.assertEqual({"data": [{"id": "1"}]}, response)
self.assertEqual(pk, None)
# Case 3: Error
self.requester.session.request = MagicMock(return_value=Mock(status_code=401))
self.assertRaises(
AuthenticationException,
lambda: self.requester.request_raw(method="post", url="test"),
)
# Case 4: PDF
self.requester.session.request = MagicMock(
return_value=Mock(
status_code=200,
headers={"Content-Type": "application/pdf", "Content-Length": 3},
content=b"PDF",
)
)
response, pk = self.requester.request_raw(
method="post",
url="test",
pagination_key="key",
body={"test": "filter"},
short_version=True,
)
self.assertEqual(b"PDF", response)
self.assertEqual(pk, None)
# Case 5: Unhandled content type
self.requester.session.request = MagicMock(
return_value=Mock(
status_code=200,
headers={"Content-Type": "application/foo", "Content-Length": 23},
cotent=b'{"data": [{"id": "1"}]}',
)
)
self.assertRaises(
APIHttpException,
lambda: self.requester.request_raw(method="post", url="test"),
)
# Case 6: No content type received
self.requester.session.request = MagicMock(
return_value=Mock(
status_code=200,
headers={"Content-Length": 23},
text=b'{"data": [{"id": "1"}]}',
)
)
self.assertRaises(
APIException,
lambda: self.requester.request_raw(method="post", url="test"),
)
# Case 7: Date as parameter
self.requester.session.request = MagicMock(
return_value=Mock(
status_code=200,
headers={
"pagination_key": "new_key",
"Content-Type": "application/json; charset=utf-8",
"Content-Length": 23,
},
text='{"data": [{"id": "1"}]}',
)
)
test_date = datetime.now().date()
response, pk = self.requester.request_raw(
method="get",
url="test",
pagination_key="key",
params={"test": test_date},
short_version=True,
)
self.assertEqual({"data": [{"id": "1"}]}, response)
self.assertEqual(pk, "new_key")
self.requester.session.request.assert_called_with(
"get",
f"ORNITHO_API_BASEtest?user_email=ORNITHO_USER_EMAIL&user_pw=ORNITHO_USER_PW&pagination_key=key&short_version=1&test={test_date.strftime('%d.%m.%Y')}",
data=None,
headers=APIRequester.request_headers(),
)
# Case 8: Unaware datetime as parameter
self.requester.session.request = MagicMock(
return_value=Mock(
status_code=200,
headers={
"pagination_key": "new_key",
"Content-Type": "application/json; charset=utf-8",
"Content-Length": 23,
},
text='{"data": [{"id": "1"}]}',
)
)
test_date = datetime.now()
response, pk = self.requester.request_raw(
method="get",
url="test",
pagination_key="key",
params={"test": test_date},
short_version=True,
)
self.assertEqual({"data": [{"id": "1"}]}, response)
self.assertEqual(pk, "new_key")
self.requester.session.request.assert_called_with(
"get",
f"ORNITHO_API_BASEtest?user_email=ORNITHO_USER_EMAIL&user_pw=ORNITHO_USER_PW&pagination_key=key&short_version=1&test={test_date.replace(microsecond=0).strftime('%d.%m.%Y')}",
data=None,
headers=APIRequester.request_headers(),
)
# Case 10: Date as body parameter
self.requester.session.request = MagicMock(
return_value=Mock(
status_code=200,
headers={
"pagination_key": "new_key",
"Content-Type": "application/json; charset=utf-8",
"Content-Length": 23,
},
text='{"data": [{"id": "1"}]}',
)
)
test_date = datetime.now().date()
response, pk = self.requester.request_raw(
method="get",
url="test",
pagination_key="key",
body={"test": test_date},
short_version=True,
)
self.assertEqual({"data": [{"id": "1"}]}, response)
self.assertEqual(pk, "new_key")
self.requester.session.request.assert_called_with(
"get",
f"ORNITHO_API_BASEtest?user_email=ORNITHO_USER_EMAIL&user_pw=ORNITHO_USER_PW&pagination_key=key&short_version=1",
data=json.dumps({"test": test_date.strftime("%d.%m.%Y")}),
headers=APIRequester.request_headers(),
)
# Case 11: Aware datetime as parameter
self.requester.session.request = MagicMock(
return_value=Mock(
status_code=200,
headers={
"pagination_key": "new_key",
"Content-Type": "application/json; charset=utf-8",
"Content-Length": 23,
},
text='{"data": [{"id": "1"}]}',
)
)
test_date = datetime.now(pytz.timezone("Europe/Berlin"))
response, pk = self.requester.request_raw(
method="get",
url="test",
pagination_key="key",
params={"test": test_date},
short_version=True,
)
self.assertEqual({"data": [{"id": "1"}]}, response)
self.assertEqual(pk, "new_key")
self.requester.session.request.assert_called_with(
"get",
f"ORNITHO_API_BASEtest?user_email=ORNITHO_USER_EMAIL&user_pw=ORNITHO_USER_PW&pagination_key=key&short_version=1&test={test_date.replace(microsecond=0).astimezone(datetime.now().astimezone().tzinfo).replace(tzinfo=None).strftime('%d.%m.%Y')}",
data=None,
headers=APIRequester.request_headers(),
)
# Case 12: Unaware datetime as body parameter
self.requester.session.request = MagicMock(
return_value=Mock(
status_code=200,
headers={
"pagination_key": "new_key",
"Content-Type": "application/json; charset=utf-8",
"Content-Length": 23,
},
text='{"data": [{"id": "1"}]}',
)
)
test_date = datetime.now()
response, pk = self.requester.request_raw(
method="get",
url="test",
pagination_key="key",
body={"test": test_date},
short_version=True,
)
self.assertEqual({"data": [{"id": "1"}]}, response)
self.assertEqual(pk, "new_key")
self.requester.session.request.assert_called_with(
"get",
f"ORNITHO_API_BASEtest?user_email=ORNITHO_USER_EMAIL&user_pw=ORNITHO_USER_PW&pagination_key=key&short_version=1",
data=json.dumps(
{"test": test_date.replace(microsecond=0).strftime("%d.%m.%Y")}
),
headers=APIRequester.request_headers(),
)
# Case 13: Aware datetime as body parameter
self.requester.session.request = MagicMock(
return_value=Mock(
status_code=200,
headers={
"pagination_key": "new_key",
"Content-Type": "application/json; charset=utf-8",
"Content-Length": 23,
},
text='{"data": [{"id": "1"}]}',
)
)
test_date = datetime.now(pytz.timezone("Europe/Berlin"))
response, pk = self.requester.request_raw(
method="get",
url="test",
pagination_key="key",
body={"test": test_date},
short_version=True,
)
self.assertEqual({"data": [{"id": "1"}]}, response)
self.assertEqual(pk, "new_key")
self.requester.session.request.assert_called_with(
"get",
f"ORNITHO_API_BASEtest?user_email=ORNITHO_USER_EMAIL&user_pw=ORNITHO_USER_PW&pagination_key=key&short_version=1",
data=json.dumps(
{"test": test_date.replace(microsecond=0).strftime("%d.%m.%Y")}
),
headers=APIRequester.request_headers(),
)
# Case 14: HTML Content Type
self.requester.session.request = MagicMock(
return_value=Mock(
status_code=200,
headers={
"Content-Type": "text/html; charset=UTF-8",
"Content-Length": 4,
},
text="HTML",
)
)
response, pk = self.requester.request_raw(
method="get",
url="test",
pagination_key="key",
body={"test": "filter"},
short_version=True,
)
self.assertEqual("HTML", response)
self.assertEqual(pk, None)
# Case 15: Boolean as parameter
self.requester.session.request = MagicMock(
return_value=Mock(
status_code=200,
headers={
"pagination_key": "new_key",
"Content-Type": "application/json; charset=utf-8",
"Content-Length": 23,
},
text='{"data": [{"id": "1"}]}',
)
)
test_bool = True
response, pk = self.requester.request_raw(
method="get",
url="test",
pagination_key="key",
params={"test": test_bool},
short_version=True,
)
self.assertEqual({"data": [{"id": "1"}]}, response)
self.assertEqual(pk, "new_key")
self.requester.session.request.assert_called_with(
"get",
f"ORNITHO_API_BASEtest?user_email=ORNITHO_USER_EMAIL&user_pw=ORNITHO_USER_PW&pagination_key=key&short_version=1&test={1 if test_bool else 0}",
data=None,
headers=APIRequester.request_headers(),
)
# Case 16: First Line is not part of the JSON response (success)
self.requester.session.request = MagicMock(
return_value=Mock(
status_code=200,
headers={
"pagination_key": "new_key",
"Content-Type": "application/json; charset=utf-8",
"Content-Length": 43,
},
text='API message : Ihre Beobachtungsdaten wurden erfolgreich übermittelt, vielen Dank!\n{"data": [{"id": "1"}]}',
)
)
response, pk = self.requester.request_raw(
method="get",
url="test",
pagination_key="key",
short_version=True,
)
self.assertEqual({"data": [{"id": "1"}]}, response)
self.assertEqual(pk, "new_key")
self.requester.session.request.assert_called_with(
"get",
f"ORNITHO_API_BASEtest?user_email=ORNITHO_USER_EMAIL&user_pw=ORNITHO_USER_PW&pagination_key=key&short_version=1",
data=None,
headers=APIRequester.request_headers(),
)
# Case 17: First Line is not part of the JSON response (error)
self.requester.session.request = MagicMock(
return_value=Mock(
status_code=200,
headers={
"pagination_key": "new_key",
"Content-Type": "application/json; charset=utf-8",
"Content-Length": 43,
},
text='A very stupid line!\n{"data": [{"id": "1"}]}',
)
)
self.assertRaises(
APIException,
lambda: self.requester.request_raw(
method="get",
url="test",
pagination_key="key",
short_version=True,
),
)
| 36.05694
| 254
| 0.52275
| 1,946
| 20,264
| 5.272867
| 0.093525
| 0.091219
| 0.064321
| 0.053796
| 0.837833
| 0.821168
| 0.792223
| 0.754702
| 0.712796
| 0.662703
| 0
| 0.01639
| 0.346625
| 20,264
| 561
| 255
| 36.121212
| 0.758535
| 0.038146
| 0
| 0.629032
| 0
| 0.010081
| 0.184476
| 0.060721
| 0
| 0
| 0
| 0
| 0.131048
| 1
| 0.018145
| false
| 0
| 0.014113
| 0
| 0.034274
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b2e9e7ced9ee505d2eabd7149e679f99dcb65ac6
| 454
|
py
|
Python
|
tests/test_gitlab.py
|
fsdaniel/marge-bot
|
065cb45b686258364f96c5f2a571bbd93e9c927d
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_gitlab.py
|
fsdaniel/marge-bot
|
065cb45b686258364f96c5f2a571bbd93e9c927d
|
[
"BSD-3-Clause"
] | 1
|
2019-02-06T21:54:55.000Z
|
2019-02-06T21:56:29.000Z
|
tests/test_gitlab.py
|
fsdaniel/marge-bot
|
065cb45b686258364f96c5f2a571bbd93e9c927d
|
[
"BSD-3-Clause"
] | 3
|
2021-02-19T18:40:12.000Z
|
2021-10-01T22:12:34.000Z
|
import marge.gitlab as gitlab
class TestVersion(object):
def test_parse(self):
assert gitlab.Version.parse('9.2.2-ee') == gitlab.Version(release=(9, 2, 2), edition='ee')
def test_parse_no_edition(self):
assert gitlab.Version.parse('9.4.0') == gitlab.Version(release=(9, 4, 0), edition=None)
def test_is_ee(self):
assert gitlab.Version.parse('9.4.0-ee').is_ee
assert not gitlab.Version.parse('9.4.0').is_ee
| 32.428571
| 98
| 0.660793
| 74
| 454
| 3.945946
| 0.324324
| 0.267123
| 0.246575
| 0.260274
| 0.383562
| 0.383562
| 0.212329
| 0.212329
| 0
| 0
| 0
| 0.048
| 0.174009
| 454
| 13
| 99
| 34.923077
| 0.730667
| 0
| 0
| 0
| 0
| 0
| 0.061674
| 0
| 0
| 0
| 0
| 0
| 0.444444
| 1
| 0.333333
| false
| 0
| 0.111111
| 0
| 0.555556
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
65161d19ddcdbb10e382bd524ce5aa1c527b235c
| 109
|
py
|
Python
|
config_parser/config.py
|
loftwah/avatar-generator
|
9987c0a1532fa8cea02d9ed34d09b3ba0d548b41
|
[
"MIT"
] | null | null | null |
config_parser/config.py
|
loftwah/avatar-generator
|
9987c0a1532fa8cea02d9ed34d09b3ba0d548b41
|
[
"MIT"
] | null | null | null |
config_parser/config.py
|
loftwah/avatar-generator
|
9987c0a1532fa8cea02d9ed34d09b3ba0d548b41
|
[
"MIT"
] | null | null | null |
from config_parser.parser import new_config
_CONFIG_PATH = 'config.json'
CONFIG = new_config(_CONFIG_PATH)
| 18.166667
| 43
| 0.816514
| 16
| 109
| 5.125
| 0.4375
| 0.219512
| 0.365854
| 0.463415
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.110092
| 109
| 5
| 44
| 21.8
| 0.845361
| 0
| 0
| 0
| 0
| 0
| 0.100917
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
651671c39d72709be70ace4400f9c9f5be1b6590
| 23
|
py
|
Python
|
tmuxp/_vendor/__init__.py
|
enchanter/tmuxp
|
b8adcf94da2ea45dd38c67681fef74054d30a68b
|
[
"BSD-3-Clause"
] | 2
|
2018-02-05T01:27:07.000Z
|
2018-06-10T02:02:25.000Z
|
tmuxp/_vendor/__init__.py
|
wrongwaycn/tmuxp
|
367cca3eb1b3162bb7e4801fe752b520f1f8eefa
|
[
"BSD-3-Clause"
] | null | null | null |
tmuxp/_vendor/__init__.py
|
wrongwaycn/tmuxp
|
367cca3eb1b3162bb7e4801fe752b520f1f8eefa
|
[
"BSD-3-Clause"
] | null | null | null |
from . import colorama
| 11.5
| 22
| 0.782609
| 3
| 23
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.173913
| 23
| 1
| 23
| 23
| 0.947368
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
6525ab5bb41a2ae7bbbb15995f9463d45f752cf0
| 33
|
py
|
Python
|
tabnine-vim/third_party/ycmd/ycmd/tests/python/testdata/project/settings_none_extra_conf.py
|
MrMonk3y/vimrc
|
950230fb3fd7991d1234c2ab516ec03245945677
|
[
"MIT"
] | 10
|
2020-07-21T21:59:54.000Z
|
2021-07-19T11:01:47.000Z
|
tabnine-vim/third_party/ycmd/ycmd/tests/python/testdata/project/settings_none_extra_conf.py
|
MrMonk3y/vimrc
|
950230fb3fd7991d1234c2ab516ec03245945677
|
[
"MIT"
] | null | null | null |
tabnine-vim/third_party/ycmd/ycmd/tests/python/testdata/project/settings_none_extra_conf.py
|
MrMonk3y/vimrc
|
950230fb3fd7991d1234c2ab516ec03245945677
|
[
"MIT"
] | 1
|
2021-01-30T18:17:01.000Z
|
2021-01-30T18:17:01.000Z
|
def Settings( **kwargs ):
pass
| 11
| 25
| 0.636364
| 4
| 33
| 5.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.212121
| 33
| 2
| 26
| 16.5
| 0.807692
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
6549861c959d89feb120ec0f1f0789b75778033e
| 40
|
py
|
Python
|
applications/baseapp/admin/__init__.py
|
vigo/django-project-template
|
a0458c45934356ab8b33969fdcd4bb6f41a19548
|
[
"MIT"
] | 13
|
2017-09-22T11:49:16.000Z
|
2019-12-20T18:53:50.000Z
|
applications/baseapp/admin/__init__.py
|
vigo/django-project-template
|
a0458c45934356ab8b33969fdcd4bb6f41a19548
|
[
"MIT"
] | null | null | null |
applications/baseapp/admin/__init__.py
|
vigo/django-project-template
|
a0458c45934356ab8b33969fdcd4bb6f41a19548
|
[
"MIT"
] | 1
|
2021-12-19T10:57:31.000Z
|
2021-12-19T10:57:31.000Z
|
from .user import *
from .base import *
| 13.333333
| 19
| 0.7
| 6
| 40
| 4.666667
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2
| 40
| 2
| 20
| 20
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
330d368d1b9867b95e0a592fc1539332fb7dfcf6
| 5,189
|
py
|
Python
|
mazegame/mazegame1.py
|
kantel/pygamezero
|
93c202a2bd5bcc827eabe952575b7714b36e4b9d
|
[
"MIT"
] | 1
|
2020-06-29T00:36:07.000Z
|
2020-06-29T00:36:07.000Z
|
mazegame/mazegame1.py
|
kantel/pygamezero
|
93c202a2bd5bcc827eabe952575b7714b36e4b9d
|
[
"MIT"
] | null | null | null |
mazegame/mazegame1.py
|
kantel/pygamezero
|
93c202a2bd5bcc827eabe952575b7714b36e4b9d
|
[
"MIT"
] | null | null | null |
# Simple Maze Game with Pygame Zero (v 1.2) , Python 3
# Stage 1 (Initialisierung und Kollisionserkennung)
# Assets: DawnLike-Tileset (CC BY 4.0) by DawnBringer und DragonDePlatino
# (https://opengameart.org/content/dawnlike-16x16-universal-rogue-like-tileset-v181)
# Jörg Kantel 2022 (MIT-Lizenz)
import pgzrun
# WIDTH: 25 Tiles á 16 Pixel + je 20 Pixel Rand
WIDTH = 440
# HEIGHT: 25 Tiles á 16 Pixel + je 20 Pixel Rand
HEIGHT = 440
TITLE = "Mazegame Stage 1"
WALL = 63
CHEST = 22
margin_x = 20
margin_y = 20
sz = 16 # Step-/Tile-Size
maze_map = [[63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63],
[63,-1,-1,63,63,63,63,63,63,63,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,63,63,63,63,63],
[63,-1,-1,63,63,63,63,63,63,63,-1,-1,63,63,63,63,63,63,-1,-1,63,63,63,63,63],
[63,-1,-1,-1,-1,-1,-1,-1,63,63,-1,-1,63,63,63,63,63,63,-1,-1,63,63,63,63,63],
[63,-1,-1,-1,-1,-1,-1,-1,63,63,-1,-1,63,63,63,-1,-1,-1,-1,-1,-1,-1,-1,63,63],
[63,63,63,63,63,63,-1,-1,63,63,-1,-1,63,63,63,-1,-1,-1,-1,-1,-1,-1,-1,63,63],
[63,63,63,63,63,63,-1,-1,63,63,-1,-1,63,63,63,63,63,63,-1,-1,63,63,63,63,63],
[63,63,63,63,63,63,-1,-1,63,63,-1,-1,-1,-1,63,63,63,63,-1,-1,63,63,63,63,63],
[63,-1,-1,63,63,63,-1,-1,-1,-1,-1,-1,-1,-1,63,63,63,63,22,-1,63,63,63,63,63],
[63,-1,-1,63,63,63,-1,-1,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63],
[63,-1,-1,-1,-1,-1,-1,-1,-1,-1,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63],
[63,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,63,63,63,63,63,63,63,63],
[63,63,63,63,63,63,63,63,63,63,63,63,-1,-1,-1,-1,-1,63,63,63,63,63,-1,22,63],
[63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,-1,-1,63,63,63,63,63,-1,-1,63],
[63,-1,-1,63,63,63,63,63,63,63,63,63,63,63,63,-1,-1,-1,-1,-1,-1,-1,-1,-1,63],
[63,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,63],
[63,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,63,63,63,63,63,63,63,63,63,63,63,63,63],
[63,63,63,63,63,63,63,63,63,63,-1,-1,63,63,63,63,63,63,63,63,63,63,63,63,63],
[63,63,63,63,63,63,63,63,63,63,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,63],
[63,22,-1,-1,63,63,63,63,63,63,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,63],
[63,-1,-1,-1,63,63,63,63,63,63,63,63,63,63,63,63,63,63,-1,-1,63,63,63,63,63],
[63,-1,-1,-1,-1,-1,63,63,63,63,63,63,63,63,63,63,63,63,-1,-1,63,63,63,63,63],
[63,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,63,63,63,63,-1,-1,-1,-1,-1,-1,-1,-1,63],
[63,63,63,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,63],
[63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63]]
walls = []
chests = []
walls_pos = []
chests_pos = []
for y in range(25):
for x in range(25):
if maze_map[y][x] == WALL:
wall = Actor("wall16")
wall.topleft = margin_x + x*sz, margin_y + y*sz
walls.append(wall)
walls_pos.append((margin_x + x*sz, margin_y + y*sz))
if maze_map[y][x] == CHEST:
chest = Actor("chest16")
chest.topleft = margin_x + x*sz, margin_y + y*sz
chests.append(chest)
chests_pos.append((margin_x + x*sz, margin_y + y*sz))
rogue = Actor("rogue16")
rogue_x = 1
rogue_y = 1
rogue.topleft = margin_x + rogue_x*sz, margin_y + rogue_y*sz
def update():
global dir, rogue_x, rogue_y
if dir == "left":
move_to_x = margin_x + (rogue_x*sz) - sz
move_to_y = margin_y + rogue_y*sz
dir = None
if (move_to_x, move_to_y) not in walls_pos: # Kollisionserkennung
rogue.topleft = move_to_x, move_to_y
rogue_x -= 1
elif dir == "right":
move_to_x = margin_x + (rogue_x*sz) + sz
move_to_y = margin_y + rogue_y*sz
dir = None
if (move_to_x, move_to_y) not in walls_pos: # Kollisionserkennung
rogue.topleft = move_to_x, move_to_y
rogue_x += 1
elif dir == "up":
move_to_x = margin_x + rogue_x*sz
move_to_y = margin_y + (rogue_y*sz) - sz
dir = None
if (move_to_x, move_to_y) not in walls_pos: # Kollisionserkennung
rogue.topleft = move_to_x, move_to_y
rogue_y -= 1
elif dir == "down":
move_to_x = margin_x + rogue_x*sz
move_to_y = margin_y + (rogue_y*sz) + sz
dir = None
if (move_to_x, move_to_y) not in walls_pos: # Kollisionserkennung
rogue.topleft = move_to_x, move_to_y
rogue_y += 1
def draw():
screen.fill((90, 90, 90))
for wall in walls:
wall.draw()
for chest in chests:
chest.draw()
rogue.draw()
def on_key_down(key):
global dir
if key == keys.LEFT:
dir = "left"
elif key == keys.RIGHT:
dir = "right"
elif key == keys.UP:
dir = "up"
elif key == keys.DOWN:
dir = "down"
if key == keys.ESCAPE: # ESCAPE beendet das Spiel
print("Bye, bye, Baby!")
quit()
pgzrun.go()
| 41.846774
| 89
| 0.521873
| 1,073
| 5,189
| 2.4315
| 0.09972
| 0.496742
| 0.637026
| 0.735914
| 0.675738
| 0.655807
| 0.655807
| 0.655807
| 0.655424
| 0.613262
| 0
| 0.269513
| 0.23704
| 5,189
| 124
| 90
| 41.846774
| 0.389492
| 0.096743
| 0
| 0.152381
| 0
| 0
| 0.01733
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.028571
| false
| 0
| 0.009524
| 0
| 0.038095
| 0.009524
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
33256d31644f6f927df3da243a2fdb3855cfd98a
| 153
|
py
|
Python
|
temperature.py
|
mapostig/code-refinery-bq
|
755992a11dd5adf2067055fe89ad0df6a7fa8c5b
|
[
"Apache-2.0"
] | null | null | null |
temperature.py
|
mapostig/code-refinery-bq
|
755992a11dd5adf2067055fe89ad0df6a7fa8c5b
|
[
"Apache-2.0"
] | 1
|
2021-06-07T10:31:31.000Z
|
2021-06-07T10:36:49.000Z
|
temperature.py
|
mapostig/code-refinery-bq
|
755992a11dd5adf2067055fe89ad0df6a7fa8c5b
|
[
"Apache-2.0"
] | 1
|
2021-06-07T09:24:09.000Z
|
2021-06-07T09:24:09.000Z
|
def fahrenheit_to_celsius(temp_f):
"""Convert temperature in Fahrenheit to Celsius
"""
temp_c = (temp_f-32)*(5.0/9.0)
return temp_c
| 25.5
| 52
| 0.647059
| 24
| 153
| 3.875
| 0.625
| 0.258065
| 0.408602
| 0.494624
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.050847
| 0.228758
| 153
| 5
| 53
| 30.6
| 0.737288
| 0.287582
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
33295011a9c61c43bae5620aad6146c28c11ce0e
| 131
|
py
|
Python
|
autocluster/utils/__init__.py
|
wywongbd/autocluster
|
f6c938949c7ef41e28820c4406473120a07fffc5
|
[
"BSD-3-Clause"
] | 24
|
2019-09-08T10:09:50.000Z
|
2022-03-23T13:24:20.000Z
|
autocluster/utils/__init__.py
|
wywongbd/autocluster
|
f6c938949c7ef41e28820c4406473120a07fffc5
|
[
"BSD-3-Clause"
] | 3
|
2020-06-04T22:44:05.000Z
|
2021-12-01T07:33:20.000Z
|
autocluster/utils/__init__.py
|
wywongbd/autocluster
|
f6c938949c7ef41e28820c4406473120a07fffc5
|
[
"BSD-3-Clause"
] | 11
|
2020-01-03T10:38:28.000Z
|
2022-02-03T08:18:53.000Z
|
from .clusterutils import *
from .metafeatures import *
from .logutils import *
from .plotting import *
from .stringutils import *
| 21.833333
| 27
| 0.770992
| 15
| 131
| 6.733333
| 0.466667
| 0.39604
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.152672
| 131
| 5
| 28
| 26.2
| 0.90991
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
3331fdbe1cdaa73ad156f2f24d7420b71b4559fb
| 26
|
py
|
Python
|
easyai/solver/__init__.py
|
lpj0822/image_point_cloud_det
|
7b20e2f42f3f2ff4881485da58ad188a1f0d0e0f
|
[
"MIT"
] | 1
|
2020-09-05T09:18:56.000Z
|
2020-09-05T09:18:56.000Z
|
easyai/solver/__init__.py
|
lpj0822/image_point_cloud_det
|
7b20e2f42f3f2ff4881485da58ad188a1f0d0e0f
|
[
"MIT"
] | 8
|
2020-04-20T02:18:55.000Z
|
2022-03-12T00:24:50.000Z
|
easyai/solver/__init__.py
|
lpj0822/image_point_cloud_det
|
7b20e2f42f3f2ff4881485da58ad188a1f0d0e0f
|
[
"MIT"
] | null | null | null |
from . import lr_scheduler
| 26
| 26
| 0.846154
| 4
| 26
| 5.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.115385
| 26
| 1
| 26
| 26
| 0.913043
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
336b4a366394e29fefb86abcf4f9b68b44b6dda4
| 93
|
py
|
Python
|
clickhouse_sqlalchemy/drivers/__init__.py
|
hodgesrm/clickhouse-sqlalchemy
|
ee0a98af063483d8e6b7c7fdc5724ed2b9738d64
|
[
"MIT"
] | 251
|
2017-03-30T08:53:43.000Z
|
2022-03-30T16:54:30.000Z
|
clickhouse_sqlalchemy/drivers/__init__.py
|
hodgesrm/clickhouse-sqlalchemy
|
ee0a98af063483d8e6b7c7fdc5724ed2b9738d64
|
[
"MIT"
] | 162
|
2017-04-28T22:45:35.000Z
|
2022-03-22T06:24:19.000Z
|
clickhouse_sqlalchemy/drivers/__init__.py
|
hodgesrm/clickhouse-sqlalchemy
|
ee0a98af063483d8e6b7c7fdc5724ed2b9738d64
|
[
"MIT"
] | 86
|
2017-04-25T13:17:32.000Z
|
2022-03-22T04:11:18.000Z
|
from . import base
from .http import base as http_driver
base.dialect = http_driver.dialect
| 18.6
| 37
| 0.795699
| 15
| 93
| 4.8
| 0.466667
| 0.277778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.150538
| 93
| 4
| 38
| 23.25
| 0.911392
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
68267523f6c615023f386e46167d919164a13636
| 114
|
py
|
Python
|
src/footings_idi_model/models/extract_models/__init__.py
|
dustindall/idi-model
|
5d026f4756f03f9cb797de5a8f0c3c6d2b349ccb
|
[
"BSD-3-Clause"
] | 2
|
2020-10-06T15:52:12.000Z
|
2020-11-30T19:07:35.000Z
|
src/footings_idi_model/models/extract_models/__init__.py
|
dustindall/idi-model
|
5d026f4756f03f9cb797de5a8f0c3c6d2b349ccb
|
[
"BSD-3-Clause"
] | 29
|
2020-06-28T12:22:59.000Z
|
2021-04-21T11:03:07.000Z
|
src/footings_idi_model/models/extract_models/__init__.py
|
footings/footings-idi-model
|
5d026f4756f03f9cb797de5a8f0c3c6d2b349ccb
|
[
"BSD-3-Clause"
] | 1
|
2020-06-24T09:54:46.000Z
|
2020-06-24T09:54:46.000Z
|
from .active_lives import ActiveLivesValEMD
from .disabled_lives import DisabledLivesProjEMD, DisabledLivesValEMD
| 38
| 69
| 0.894737
| 11
| 114
| 9.090909
| 0.727273
| 0.22
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.078947
| 114
| 2
| 70
| 57
| 0.952381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
68364af04bf991f4770c2a44c5768f6fab473bf5
| 30
|
py
|
Python
|
main.py
|
actions-iplist-test/actions-iplist-test-public
|
0ce88b8fd41a86df46801b7a1e00cfb1d65f2ca9
|
[
"MIT"
] | null | null | null |
main.py
|
actions-iplist-test/actions-iplist-test-public
|
0ce88b8fd41a86df46801b7a1e00cfb1d65f2ca9
|
[
"MIT"
] | null | null | null |
main.py
|
actions-iplist-test/actions-iplist-test-public
|
0ce88b8fd41a86df46801b7a1e00cfb1d65f2ca9
|
[
"MIT"
] | null | null | null |
print("Test program was run")
| 15
| 29
| 0.733333
| 5
| 30
| 4.4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.133333
| 30
| 1
| 30
| 30
| 0.846154
| 0
| 0
| 0
| 0
| 0
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
68b37b9b8b8868b780da3174d62edf7535cd6d11
| 86
|
py
|
Python
|
dglt/train/__init__.py
|
uta-smile/CD-MVGNN
|
b48f4cd14befed298980a83edb417ab6809f0af6
|
[
"MIT"
] | 3
|
2022-02-06T09:13:51.000Z
|
2022-02-19T15:03:35.000Z
|
dglt/train/__init__.py
|
uta-smile/CD-MVGNN
|
b48f4cd14befed298980a83edb417ab6809f0af6
|
[
"MIT"
] | 1
|
2022-02-14T23:16:27.000Z
|
2022-02-14T23:16:27.000Z
|
dglt/train/__init__.py
|
uta-smile/CD-MVGNN
|
b48f4cd14befed298980a83edb417ab6809f0af6
|
[
"MIT"
] | null | null | null |
from dglt.train.prediction.make_predictions import make_predictions, write_prediction
| 43
| 85
| 0.895349
| 11
| 86
| 6.727273
| 0.727273
| 0.405405
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.05814
| 86
| 1
| 86
| 86
| 0.91358
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
cc17f696883677a22566b1c06fffa1bd81fe7570
| 573
|
py
|
Python
|
KernelGenerator/kernelgenerator/models/__init__.py
|
linusseelinger/ExaHyPE-Tsunami
|
92a6e14926862e1584ef1e935874c91d252e8112
|
[
"BSD-3-Clause"
] | null | null | null |
KernelGenerator/kernelgenerator/models/__init__.py
|
linusseelinger/ExaHyPE-Tsunami
|
92a6e14926862e1584ef1e935874c91d252e8112
|
[
"BSD-3-Clause"
] | null | null | null |
KernelGenerator/kernelgenerator/models/__init__.py
|
linusseelinger/ExaHyPE-Tsunami
|
92a6e14926862e1584ef1e935874c91d252e8112
|
[
"BSD-3-Clause"
] | 1
|
2021-04-08T16:12:18.000Z
|
2021-04-08T16:12:18.000Z
|
__all__ = ["adjustSolutionModel", "amrRoutinesModel", "boundaryConditionsModel", "configurationParametersModel", "converterModel", "deltaDistributionModel", "dgMatrixModel", "faceIntegralModel", "fusedSpaceTimePredictorVolumeIntegralModel", "fvBoundaryLayerExtractionModel", "fvGhostLayerFillingModel", "fvGhostLayerFillingAtBoundaryModel", "fvSolutionUpdateModel", "gemmsCPPModel", "gemmsGeneratorModel", "kernelsHeaderModel", "limiterModel", "matrixUtilsModel", "quadratureModel", "riemannModel", "solutionUpdateModel", "stableTimeStepSizeModel", "surfaceIntegralModel"]
| 286.5
| 572
| 0.832461
| 24
| 573
| 19.708333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.04363
| 573
| 1
| 573
| 573
| 0.863139
| 0
| 0
| 0
| 0
| 0
| 0.820244
| 0.431065
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0bc3f87e0fd53760f2e73059b2e680359a4192c5
| 3,556
|
py
|
Python
|
tests/test_output_parser.py
|
DavidWylie/RollWitch
|
9fe16db2117b1cbce02d2206cd529c4bfcc93f55
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_output_parser.py
|
DavidWylie/RollWitch
|
9fe16db2117b1cbce02d2206cd529c4bfcc93f55
|
[
"BSD-3-Clause"
] | 1
|
2020-10-26T17:29:27.000Z
|
2020-10-27T13:43:44.000Z
|
tests/test_output_parser.py
|
DavidWylie/RollWitch
|
9fe16db2117b1cbce02d2206cd529c4bfcc93f55
|
[
"BSD-3-Clause"
] | null | null | null |
from unittest import TestCase
from roll_witch.rolling.output import StandardOutputWriter, TargetedOutputWriter
from roll_witch.rolling.input.spec.operation import RollSpec
from roll_witch.rolling.roller import RollResult
class TestStandardOutputWriter(TestCase):
def test_build_result_string(self):
writer = StandardOutputWriter()
roll_spec = RollSpec(dice_sides=10, dice_count=2)
roll_result = RollResult(spec=roll_spec)
roll_result.append_roll(3)
roll_result.append_roll(4)
result_string = writer.build_result_string(
roll_result=roll_result, total_string="totalString", user="tester"
)
expected_result_string = "tester Roll: totalString Result: 7"
self.assertEqual(expected_result_string, result_string)
class TestTargetedOutputWriter(TestCase):
def test_build_result_string_met_target(self):
writer = TargetedOutputWriter()
roll_spec = RollSpec(dice_sides=10, dice_count=2, target_number=5)
roll_result = RollResult(spec=roll_spec)
roll_result.append_roll(3)
roll_result.append_roll(4)
roll_result.met_target = True
result_string = writer.build_result_string(
roll_result=roll_result, total_string="totalString", user="tester"
)
expected_result_string = "tester Roll: totalString Total: 7 Target: 5 Result: Success"
self.assertEqual(expected_result_string, result_string)
def test_build_result_string_missed_target(self):
writer = TargetedOutputWriter()
roll_spec = RollSpec(dice_sides=10, dice_count=2, target_number=5)
roll_result = RollResult(spec=roll_spec)
roll_result.append_roll(3)
roll_result.append_roll(4)
roll_result.met_target = False
result_string = writer.build_result_string(
roll_result=roll_result, total_string="totalString", user="tester"
)
expected_result_string = "tester Roll: totalString Total: 7 Target: 5 Result: Failed"
self.assertEqual(expected_result_string, result_string)
class TestBaseOutputWriter(TestCase):
def test_write_output(self):
writer = StandardOutputWriter()
roll_spec = RollSpec(dice_sides=10, dice_count=2)
roll_result = RollResult(spec=roll_spec)
roll_result.append_roll(3)
roll_result.append_roll(4)
result_string = writer.write_output(roll_result=roll_result, user="tester")
expected_result_string = "tester Roll: [3, 4] = 7 Result: 7"
self.assertEqual(expected_result_string, result_string)
def test_build_total_string(self):
writer = StandardOutputWriter()
roll_spec = RollSpec(dice_sides=10, dice_count=2)
roll_result = RollResult(spec=roll_spec)
roll_result.append_roll(3)
roll_result.append_roll(4)
result_string = writer.build_total_string(roll_result=roll_result)
expected_result_string = "[3, 4] = 7"
self.assertEqual(expected_result_string, result_string)
def test_build_total_string_with_modifier(self):
writer = StandardOutputWriter()
roll_spec = RollSpec(dice_sides=10, dice_count=2, modifier=7)
roll_result = RollResult(spec=roll_spec)
roll_result.append_roll(5)
roll_result.append_roll(4)
roll_result.apply_modifier(7)
result_string = writer.build_total_string(roll_result=roll_result)
expected_result_string = "[5, 4] = 9 + 7"
self.assertEqual(expected_result_string, result_string)
| 41.348837
| 94
| 0.716535
| 436
| 3,556
| 5.5
| 0.12844
| 0.137615
| 0.080067
| 0.100083
| 0.834445
| 0.829441
| 0.805254
| 0.775646
| 0.73186
| 0.706839
| 0
| 0.01763
| 0.202475
| 3,556
| 85
| 95
| 41.835294
| 0.827927
| 0
| 0
| 0.6
| 0
| 0
| 0.074522
| 0
| 0
| 0
| 0
| 0
| 0.085714
| 1
| 0.085714
| false
| 0
| 0.057143
| 0
| 0.185714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0be2ff7a6f3c309aba7a49828d2e9a15115071f8
| 185
|
py
|
Python
|
trequests/__init__.py
|
fossabot/trequests
|
778345a1564a8039e3dd18d5bcf1f2df2bda327a
|
[
"MIT"
] | 2
|
2021-07-12T09:49:25.000Z
|
2021-07-12T15:36:48.000Z
|
trequests/__init__.py
|
fossabot/trequests
|
778345a1564a8039e3dd18d5bcf1f2df2bda327a
|
[
"MIT"
] | 1
|
2022-01-08T09:46:27.000Z
|
2022-01-08T09:46:27.000Z
|
trequests/__init__.py
|
fossabot/trequests
|
778345a1564a8039e3dd18d5bcf1f2df2bda327a
|
[
"MIT"
] | 3
|
2021-07-12T17:49:41.000Z
|
2022-01-08T09:43:53.000Z
|
from .sessions import Session
from .sessions import Session as HttpClient
from .api import get, options, head, post, put, patch, delete
from . import exceptions
from . import structures
| 37
| 61
| 0.794595
| 26
| 185
| 5.653846
| 0.615385
| 0.163265
| 0.244898
| 0.340136
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.145946
| 185
| 5
| 62
| 37
| 0.93038
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.