hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c7d7eced2d876cbfd38ccc72b5dcda7eac936c94
| 2,046
|
py
|
Python
|
fabsim/UQP/uqp.py
|
arabnejad/FabSim4
|
c3c0c1af7a625b90c8a6caf01e2b8c0fcb661716
|
[
"BSD-3-Clause"
] | null | null | null |
fabsim/UQP/uqp.py
|
arabnejad/FabSim4
|
c3c0c1af7a625b90c8a6caf01e2b8c0fcb661716
|
[
"BSD-3-Clause"
] | null | null | null |
fabsim/UQP/uqp.py
|
arabnejad/FabSim4
|
c3c0c1af7a625b90c8a6caf01e2b8c0fcb661716
|
[
"BSD-3-Clause"
] | null | null | null |
# This file contains prototype UQP implementations.
#
# These patterns should be purposed for specific settings.
# As such, they do not contain a @task descriptor.
import os
"""
UQP 1-aleatoric: aleatoric acyclic coupled UQ
Runs an initial model, which has an uncertainty caused by
probabilistic variability. Each output of the initial model is
then analysed, and resulting uncertainties quantified.
"""
def uqp1_aleatoric(model_exec, collation_function, **kwargs):
pass
"""
UQP 2-aleatoric: aleatoric acyclic coupled UQ
Runs an initial model, which has an uncertainty caused by
uncertain inputs. Each output of the initial model is
then analysed, and resulting uncertainties quantified.
"""
def uqp1_epistemic(input_space, sampling_function, model_exec,
collation_function, **kwargs):
pass
"""
UQP 2-aleatoric: aleatoric acyclic coupled UQ
Runs an initial model, which has an uncertainty caused by
probabilistic variability. Each output of the initial model is
then ported and serves as an input for a second model.
The translation_function does the porting, and in this case,
it takes results directly.
The output of the second model ensemble is then analysed, and
resulting uncertainties quantified.
"""
def uqp2_aleatoric(model1_exec, translation_function, model2_exec,
collation_function, **kwargs):
pass
"""
UQP 2-epistemic: epistemic acyclic coupled UQ
Runs an initial model, which has an uncertainty caused by
uncertain inputs. Each output of the initial model is
then ported and serves as an input for a second model.
The translation_function does the porting, and in this case,
it could take results directly, or perform a resampling.
The output of the second model ensemble is then analysed, and
resulting uncertainties quantified.
"""
def uqp2_epistemic(input_space, sampling_function, model1_exec,
translation_function, model2_exec,
collation_function, **kwargs):
pass
| 27.648649
| 66
| 0.743891
| 276
| 2,046
| 5.434783
| 0.315217
| 0.064
| 0.044
| 0.053333
| 0.84
| 0.793333
| 0.793333
| 0.790667
| 0.790667
| 0.790667
| 0
| 0.007389
| 0.206256
| 2,046
| 73
| 67
| 28.027397
| 0.916256
| 0.075758
| 0
| 0.538462
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.307692
| false
| 0.307692
| 0.076923
| 0
| 0.384615
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
c7ec7ff50c9e3ded960bd407c9815c22e39fd31a
| 13,768
|
py
|
Python
|
tests/test_stlcontainers.py
|
gidden/cyclopts
|
e346b1721c8d8722af2862823844ab2e7864141b
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_stlcontainers.py
|
gidden/cyclopts
|
e346b1721c8d8722af2862823844ab2e7864141b
|
[
"BSD-3-Clause"
] | 6
|
2015-01-26T18:31:36.000Z
|
2015-02-24T18:28:41.000Z
|
tests/test_stlcontainers.py
|
gidden/cyclopts
|
e346b1721c8d8722af2862823844ab2e7864141b
|
[
"BSD-3-Clause"
] | null | null | null |
"""Tests the part of stlconverters that is accessible from Python."""
###################
### WARNING!!! ###
###################
# This file has been autogenerated
from __future__ import print_function
from unittest import TestCase
import nose
from nose.tools import assert_equal, assert_not_equal, assert_raises, raises, \
assert_almost_equal, assert_true, assert_false, assert_in
from numpy.testing import assert_array_equal, assert_array_almost_equal
import os
import numpy as np
from collections import Container, Mapping
from cyclopts import stlcontainers
# Vector Int
# Vector Double
# MapIntDouble
def test_map_int_double():
m = stlcontainers.MapIntDouble()
uismap = isinstance(-65.5555, Mapping)
m[1] = 18
m[42] = -65.5555
import pprint
pprint.pprint(m)
assert_equal(len(m), 2)
if uismap:
for key, value in m[42].items():
print(key, value, -65.5555[key])
if isinstance(value, np.ndarray):
assert_almost_equal(value, -65.5555[key])
else:
assert_equal(value, -65.5555[key])
else:
assert_almost_equal(m[42], -65.5555)
m = stlcontainers.MapIntDouble({-65: 42.42, 18: 1.0})
assert_equal(len(m), 2)
if uismap:
for key, value in m[-65].items():
if isinstance(value, np.ndarray):
print(key, value, 42.42[key])
assert_almost_equal(value, 42.42[key])
else:
assert_equal(value, 42.42[key])
else:
assert_almost_equal(m[-65], 42.42)
n = stlcontainers.MapIntDouble(m, False)
assert_equal(len(n), 2)
if uismap:
for key, value in m[-65].items():
if isinstance(value, np.ndarray):
assert_almost_equal(value, 42.42[key])
else:
assert_equal(value, 42.42[key])
else:
assert_almost_equal(m[-65], 42.42)
# points to the same underlying map
n[42] = -65.5555
if uismap:
for key, value in m[42].items():
if isinstance(value, np.ndarray):
assert_almost_equal(value, -65.5555[key])
else:
assert_equal(value, -65.5555[key])
else:
assert_almost_equal(m[42], -65.5555)
# MapIntInt
def test_map_int_int():
m = stlcontainers.MapIntInt()
uismap = isinstance(-65, Mapping)
m[1] = 18
m[42] = -65
import pprint
pprint.pprint(m)
assert_equal(len(m), 2)
if uismap:
for key, value in m[42].items():
print(key, value, -65[key])
if isinstance(value, np.ndarray):
assert_almost_equal(value, -65[key])
else:
assert_equal(value, -65[key])
else:
assert_almost_equal(m[42], -65)
m = stlcontainers.MapIntInt({-65: 42, 18: 1})
assert_equal(len(m), 2)
if uismap:
for key, value in m[-65].items():
if isinstance(value, np.ndarray):
print(key, value, 42[key])
assert_almost_equal(value, 42[key])
else:
assert_equal(value, 42[key])
else:
assert_almost_equal(m[-65], 42)
n = stlcontainers.MapIntInt(m, False)
assert_equal(len(n), 2)
if uismap:
for key, value in m[-65].items():
if isinstance(value, np.ndarray):
assert_almost_equal(value, 42[key])
else:
assert_equal(value, 42[key])
else:
assert_almost_equal(m[-65], 42)
# points to the same underlying map
n[42] = -65
if uismap:
for key, value in m[42].items():
if isinstance(value, np.ndarray):
assert_almost_equal(value, -65[key])
else:
assert_equal(value, -65[key])
else:
assert_almost_equal(m[42], -65)
# MapIntBool
def test_map_int_bool():
m = stlcontainers.MapIntBool()
uismap = isinstance(False, Mapping)
m[1] = True
m[42] = False
import pprint
pprint.pprint(m)
assert_equal(len(m), 2)
if uismap:
for key, value in m[42].items():
print(key, value, False[key])
if isinstance(value, np.ndarray):
assert_almost_equal(value, False[key])
else:
assert_equal(value, False[key])
else:
assert_almost_equal(m[42], False)
m = stlcontainers.MapIntBool({-65: False, 18: True})
assert_equal(len(m), 2)
if uismap:
for key, value in m[-65].items():
if isinstance(value, np.ndarray):
print(key, value, False[key])
assert_almost_equal(value, False[key])
else:
assert_equal(value, False[key])
else:
assert_almost_equal(m[-65], False)
n = stlcontainers.MapIntBool(m, False)
assert_equal(len(n), 2)
if uismap:
for key, value in m[-65].items():
if isinstance(value, np.ndarray):
assert_almost_equal(value, False[key])
else:
assert_equal(value, False[key])
else:
assert_almost_equal(m[-65], False)
# points to the same underlying map
n[42] = False
if uismap:
for key, value in m[42].items():
if isinstance(value, np.ndarray):
assert_almost_equal(value, False[key])
else:
assert_equal(value, False[key])
else:
assert_almost_equal(m[42], False)
# MapIntVectorInt
def test_map_int_vector_int():
m = stlcontainers.MapIntVectorInt()
uismap = isinstance([1, -65, 1, -65], Mapping)
m[1] = [42, 18, 42, 18]
m[42] = [1, -65, 1, -65]
import pprint
pprint.pprint(m)
assert_equal(len(m), 2)
if uismap:
for key, value in m[42].items():
print(key, value, [1, -65, 1, -65][key])
if isinstance(value, np.ndarray):
assert_array_almost_equal(value, [1, -65, 1, -65][key])
else:
assert_equal(value, [1, -65, 1, -65][key])
else:
assert_array_almost_equal(m[42], [1, -65, 1, -65])
m = stlcontainers.MapIntVectorInt({-65: [18, -65, 42, 1], 18: [1, 42, -65, 18]})
assert_equal(len(m), 2)
if uismap:
for key, value in m[-65].items():
if isinstance(value, np.ndarray):
print(key, value, [18, -65, 42, 1][key])
assert_array_almost_equal(value, [18, -65, 42, 1][key])
else:
assert_equal(value, [18, -65, 42, 1][key])
else:
assert_array_almost_equal(m[-65], [18, -65, 42, 1])
n = stlcontainers.MapIntVectorInt(m, False)
assert_equal(len(n), 2)
if uismap:
for key, value in m[-65].items():
if isinstance(value, np.ndarray):
assert_array_almost_equal(value, [18, -65, 42, 1][key])
else:
assert_equal(value, [18, -65, 42, 1][key])
else:
assert_array_almost_equal(m[-65], [18, -65, 42, 1])
# points to the same underlying map
n[42] = [1, -65, 1, -65]
if uismap:
for key, value in m[42].items():
if isinstance(value, np.ndarray):
assert_array_almost_equal(value, [1, -65, 1, -65][key])
else:
assert_equal(value, [1, -65, 1, -65][key])
else:
assert_array_almost_equal(m[42], [1, -65, 1, -65])
# MapIntVectorDouble
def test_map_int_vector_double():
m = stlcontainers.MapIntVectorDouble()
uismap = isinstance([1.0, -65.5555, 1.0, -65.5555], Mapping)
m[1] = [42.42, 18, 42.42, 18]
m[42] = [1.0, -65.5555, 1.0, -65.5555]
import pprint
pprint.pprint(m)
assert_equal(len(m), 2)
if uismap:
for key, value in m[42].items():
print(key, value, [1.0, -65.5555, 1.0, -65.5555][key])
if isinstance(value, np.ndarray):
assert_array_almost_equal(value, [1.0, -65.5555, 1.0, -65.5555][key])
else:
assert_equal(value, [1.0, -65.5555, 1.0, -65.5555][key])
else:
assert_array_almost_equal(m[42], [1.0, -65.5555, 1.0, -65.5555])
m = stlcontainers.MapIntVectorDouble({-65: [18, -65.5555, 42.42, 1.0], 18: [1.0, 42.42, -65.5555, 18]})
assert_equal(len(m), 2)
if uismap:
for key, value in m[-65].items():
if isinstance(value, np.ndarray):
print(key, value, [18, -65.5555, 42.42, 1.0][key])
assert_array_almost_equal(value, [18, -65.5555, 42.42, 1.0][key])
else:
assert_equal(value, [18, -65.5555, 42.42, 1.0][key])
else:
assert_array_almost_equal(m[-65], [18, -65.5555, 42.42, 1.0])
n = stlcontainers.MapIntVectorDouble(m, False)
assert_equal(len(n), 2)
if uismap:
for key, value in m[-65].items():
if isinstance(value, np.ndarray):
assert_array_almost_equal(value, [18, -65.5555, 42.42, 1.0][key])
else:
assert_equal(value, [18, -65.5555, 42.42, 1.0][key])
else:
assert_array_almost_equal(m[-65], [18, -65.5555, 42.42, 1.0])
# points to the same underlying map
n[42] = [1.0, -65.5555, 1.0, -65.5555]
if uismap:
for key, value in m[42].items():
if isinstance(value, np.ndarray):
assert_array_almost_equal(value, [1.0, -65.5555, 1.0, -65.5555][key])
else:
assert_equal(value, [1.0, -65.5555, 1.0, -65.5555][key])
else:
assert_array_almost_equal(m[42], [1.0, -65.5555, 1.0, -65.5555])
# MapIntMapIntVectorDouble
def test_map_int_map_int_vector_double():
m = stlcontainers.MapIntMapIntVectorDouble()
uismap = isinstance({1: [1.0, 42.42, -65.5555, 18], -65: [1.0, -65.5555, 1.0, -65.5555]}, Mapping)
m[1] = {42: [18, -65.5555, 42.42, 1.0], 18: [42.42, 18, 42.42, 18]}
m[42] = {1: [1.0, 42.42, -65.5555, 18], -65: [1.0, -65.5555, 1.0, -65.5555]}
import pprint
pprint.pprint(m)
assert_equal(len(m), 2)
if uismap:
for key, value in m[42].items():
print(key, value, {1: [1.0, 42.42, -65.5555, 18], -65: [1.0, -65.5555, 1.0, -65.5555]}[key])
if isinstance(value, np.ndarray):
assert_array_almost_equal(value, {1: [1.0, 42.42, -65.5555, 18], -65: [1.0, -65.5555, 1.0, -65.5555]}[key])
else:
assert_equal(value, {1: [1.0, 42.42, -65.5555, 18], -65: [1.0, -65.5555, 1.0, -65.5555]}[key])
else:
assert_array_almost_equal(m[42], {1: [1.0, 42.42, -65.5555, 18], -65: [1.0, -65.5555, 1.0, -65.5555]})
m = stlcontainers.MapIntMapIntVectorDouble({-65: {1: [1.0, 42.42, -65.5555, 18], 18: [42.42, 18, 42.42, 18], 42: [18, -65.5555, 42.42, 1.0], -65: [1.0, -65.5555, 1.0, -65.5555]}, 18: {1: [1.0, 42.42, -65.5555, 18], 42: [18, -65.5555, 42.42, 1.0], 18: [42.42, 18, 42.42, 18], -65: [1.0, -65.5555, 1.0, -65.5555]}})
assert_equal(len(m), 2)
if uismap:
for key, value in m[-65].items():
if isinstance(value, np.ndarray):
print(key, value, {1: [1.0, 42.42, -65.5555, 18], 18: [42.42, 18, 42.42, 18], 42: [18, -65.5555, 42.42, 1.0], -65: [1.0, -65.5555, 1.0, -65.5555]}[key])
assert_array_almost_equal(value, {1: [1.0, 42.42, -65.5555, 18], 18: [42.42, 18, 42.42, 18], 42: [18, -65.5555, 42.42, 1.0], -65: [1.0, -65.5555, 1.0, -65.5555]}[key])
else:
assert_equal(value, {1: [1.0, 42.42, -65.5555, 18], 18: [42.42, 18, 42.42, 18], 42: [18, -65.5555, 42.42, 1.0], -65: [1.0, -65.5555, 1.0, -65.5555]}[key])
else:
assert_array_almost_equal(m[-65], {1: [1.0, 42.42, -65.5555, 18], 18: [42.42, 18, 42.42, 18], 42: [18, -65.5555, 42.42, 1.0], -65: [1.0, -65.5555, 1.0, -65.5555]})
n = stlcontainers.MapIntMapIntVectorDouble(m, False)
assert_equal(len(n), 2)
if uismap:
for key, value in m[-65].items():
if isinstance(value, np.ndarray):
assert_array_almost_equal(value, {1: [1.0, 42.42, -65.5555, 18], 18: [42.42, 18, 42.42, 18], 42: [18, -65.5555, 42.42, 1.0], -65: [1.0, -65.5555, 1.0, -65.5555]}[key])
else:
assert_equal(value, {1: [1.0, 42.42, -65.5555, 18], 18: [42.42, 18, 42.42, 18], 42: [18, -65.5555, 42.42, 1.0], -65: [1.0, -65.5555, 1.0, -65.5555]}[key])
else:
assert_array_almost_equal(m[-65], {1: [1.0, 42.42, -65.5555, 18], 18: [42.42, 18, 42.42, 18], 42: [18, -65.5555, 42.42, 1.0], -65: [1.0, -65.5555, 1.0, -65.5555]})
# points to the same underlying map
n[42] = {1: [1.0, 42.42, -65.5555, 18], -65: [1.0, -65.5555, 1.0, -65.5555]}
if uismap:
for key, value in m[42].items():
if isinstance(value, np.ndarray):
assert_array_almost_equal(value, {1: [1.0, 42.42, -65.5555, 18], -65: [1.0, -65.5555, 1.0, -65.5555]}[key])
else:
assert_equal(value, {1: [1.0, 42.42, -65.5555, 18], -65: [1.0, -65.5555, 1.0, -65.5555]}[key])
else:
assert_array_almost_equal(m[42], {1: [1.0, 42.42, -65.5555, 18], -65: [1.0, -65.5555, 1.0, -65.5555]})
# PairIntInt
def test_pair_int_int():
from numpy.testing import assert_array_equal
p = stlcontainers.PairIntInt()
p[0] = 18
p[1] = -65
assert_array_equal(p[0], p.first)
assert_array_equal(p[1], p.second)
import pprint
pprint.pprint(p)
pprint.pprint(p[0])
pprint.pprint(p[1])
q = p
assert_array_equal(p, q)
import copy
r = copy.copy(p)
pprint.pprint(r)
pprint.pprint(r[0])
pprint.pprint(r.first)
pprint.pprint(r[1])
pprint.pprint(r.second)
assert_array_equal(p.first, r.first)
assert_array_equal(p.second, r.second)
if __name__ == '__main__':
nose.run()
| 34.680101
| 317
| 0.55106
| 2,080
| 13,768
| 3.538462
| 0.04375
| 0.086413
| 0.03587
| 0.063043
| 0.796875
| 0.780842
| 0.77038
| 0.753533
| 0.744429
| 0.727989
| 0
| 0.167923
| 0.277237
| 13,768
| 396
| 318
| 34.767677
| 0.571701
| 0.032249
| 0
| 0.718354
| 1
| 0
| 0.000603
| 0
| 0
| 0
| 0
| 0
| 0.313291
| 1
| 0.022152
| false
| 0
| 0.056962
| 0
| 0.079114
| 0.107595
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
1bf0a6ccead4c67d8db4f862f13b22545bdcdf62
| 78
|
py
|
Python
|
examples/open_file_used.py
|
lyvd/bandit4mal
|
b1ca9eb773ebed84d04cfeb589d028af532d1d11
|
[
"Apache-2.0"
] | null | null | null |
examples/open_file_used.py
|
lyvd/bandit4mal
|
b1ca9eb773ebed84d04cfeb589d028af532d1d11
|
[
"Apache-2.0"
] | null | null | null |
examples/open_file_used.py
|
lyvd/bandit4mal
|
b1ca9eb773ebed84d04cfeb589d028af532d1d11
|
[
"Apache-2.0"
] | null | null | null |
#f = open("/etc/passwd", "r")
with open("/etc/passwd", "r") as fp:
pass
| 13
| 36
| 0.525641
| 13
| 78
| 3.153846
| 0.692308
| 0.341463
| 0.634146
| 0.682927
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.205128
| 78
| 5
| 37
| 15.6
| 0.66129
| 0.358974
| 0
| 0
| 0
| 0
| 0.255319
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
4069fb6e4c4e468a740c666d86e326afd0e330bb
| 48
|
py
|
Python
|
ica/paraphraseator/datamodules/transforms/__init__.py
|
pedrorio/image_caption_augmentation
|
683ed90cecd4bc12f65dc238f1ff2dedbbc1b666
|
[
"MIT"
] | null | null | null |
ica/paraphraseator/datamodules/transforms/__init__.py
|
pedrorio/image_caption_augmentation
|
683ed90cecd4bc12f65dc238f1ff2dedbbc1b666
|
[
"MIT"
] | null | null | null |
ica/paraphraseator/datamodules/transforms/__init__.py
|
pedrorio/image_caption_augmentation
|
683ed90cecd4bc12f65dc238f1ff2dedbbc1b666
|
[
"MIT"
] | null | null | null |
from . import EncodeTransform, ToTensorTransform
| 48
| 48
| 0.875
| 4
| 48
| 10.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.083333
| 48
| 1
| 48
| 48
| 0.954545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
40c1970ff59efbc87f6eba73595ea496cbf6aabc
| 10,643
|
py
|
Python
|
src/oci/load_balancer/models/update_ssl_cipher_suite_details.py
|
LaudateCorpus1/oci-python-sdk
|
b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
src/oci/load_balancer/models/update_ssl_cipher_suite_details.py
|
LaudateCorpus1/oci-python-sdk
|
b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
src/oci/load_balancer/models/update_ssl_cipher_suite_details.py
|
LaudateCorpus1/oci-python-sdk
|
b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
# coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class UpdateSSLCipherSuiteDetails(object):
"""
The configuration details for updating an SSL cipher suite.
**Warning:** Oracle recommends that you avoid using any confidential information when you supply string values using the API.
"""
def __init__(self, **kwargs):
"""
Initializes a new UpdateSSLCipherSuiteDetails object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param ciphers:
The value to assign to the ciphers property of this UpdateSSLCipherSuiteDetails.
:type ciphers: list[str]
"""
self.swagger_types = {
'ciphers': 'list[str]'
}
self.attribute_map = {
'ciphers': 'ciphers'
}
self._ciphers = None
@property
def ciphers(self):
"""
**[Required]** Gets the ciphers of this UpdateSSLCipherSuiteDetails.
A list of SSL ciphers the load balancer must support for HTTPS or SSL connections.
The following ciphers are valid values for this property:
* __TLSv1.2 ciphers__
\"AES128-GCM-SHA256\"
\"AES128-SHA256\"
\"AES256-GCM-SHA384\"
\"AES256-SHA256\"
\"DH-DSS-AES128-GCM-SHA256\"
\"DH-DSS-AES128-SHA256\"
\"DH-DSS-AES256-GCM-SHA384\"
\"DH-DSS-AES256-SHA256\"
\"DH-RSA-AES128-GCM-SHA256\"
\"DH-RSA-AES128-SHA256\"
\"DH-RSA-AES256-GCM-SHA384\"
\"DH-RSA-AES256-SHA256\"
\"DHE-DSS-AES128-GCM-SHA256\"
\"DHE-DSS-AES128-SHA256\"
\"DHE-DSS-AES256-GCM-SHA384\"
\"DHE-DSS-AES256-SHA256\"
\"DHE-RSA-AES128-GCM-SHA256\"
\"DHE-RSA-AES128-SHA256\"
\"DHE-RSA-AES256-GCM-SHA384\"
\"DHE-RSA-AES256-SHA256\"
\"ECDH-ECDSA-AES128-GCM-SHA256\"
\"ECDH-ECDSA-AES128-SHA256\"
\"ECDH-ECDSA-AES256-GCM-SHA384\"
\"ECDH-ECDSA-AES256-SHA384\"
\"ECDH-RSA-AES128-GCM-SHA256\"
\"ECDH-RSA-AES128-SHA256\"
\"ECDH-RSA-AES256-GCM-SHA384\"
\"ECDH-RSA-AES256-SHA384\"
\"ECDHE-ECDSA-AES128-GCM-SHA256\"
\"ECDHE-ECDSA-AES128-SHA256\"
\"ECDHE-ECDSA-AES256-GCM-SHA384\"
\"ECDHE-ECDSA-AES256-SHA384\"
\"ECDHE-RSA-AES128-GCM-SHA256\"
\"ECDHE-RSA-AES128-SHA256\"
\"ECDHE-RSA-AES256-GCM-SHA384\"
\"ECDHE-RSA-AES256-SHA384\"
* __TLSv1 ciphers also supported by TLSv1.2__
\"AES128-SHA\"
\"AES256-SHA\"
\"CAMELLIA128-SHA\"
\"CAMELLIA256-SHA\"
\"DES-CBC3-SHA\"
\"DH-DSS-AES128-SHA\"
\"DH-DSS-AES256-SHA\"
\"DH-DSS-CAMELLIA128-SHA\"
\"DH-DSS-CAMELLIA256-SHA\"
\"DH-DSS-DES-CBC3-SHAv\"
\"DH-DSS-SEED-SHA\"
\"DH-RSA-AES128-SHA\"
\"DH-RSA-AES256-SHA\"
\"DH-RSA-CAMELLIA128-SHA\"
\"DH-RSA-CAMELLIA256-SHA\"
\"DH-RSA-DES-CBC3-SHA\"
\"DH-RSA-SEED-SHA\"
\"DHE-DSS-AES128-SHA\"
\"DHE-DSS-AES256-SHA\"
\"DHE-DSS-CAMELLIA128-SHA\"
\"DHE-DSS-CAMELLIA256-SHA\"
\"DHE-DSS-DES-CBC3-SHA\"
\"DHE-DSS-SEED-SHA\"
\"DHE-RSA-AES128-SHA\"
\"DHE-RSA-AES256-SHA\"
\"DHE-RSA-CAMELLIA128-SHA\"
\"DHE-RSA-CAMELLIA256-SHA\"
\"DHE-RSA-DES-CBC3-SHA\"
\"DHE-RSA-SEED-SHA\"
\"ECDH-ECDSA-AES128-SHA\"
\"ECDH-ECDSA-AES256-SHA\"
\"ECDH-ECDSA-DES-CBC3-SHA\"
\"ECDH-ECDSA-RC4-SHA\"
\"ECDH-RSA-AES128-SHA\"
\"ECDH-RSA-AES256-SHA\"
\"ECDH-RSA-DES-CBC3-SHA\"
\"ECDH-RSA-RC4-SHA\"
\"ECDHE-ECDSA-AES128-SHA\"
\"ECDHE-ECDSA-AES256-SHA\"
\"ECDHE-ECDSA-DES-CBC3-SHA\"
\"ECDHE-ECDSA-RC4-SHA\"
\"ECDHE-RSA-AES128-SHA\"
\"ECDHE-RSA-AES256-SHA\"
\"ECDHE-RSA-DES-CBC3-SHA\"
\"ECDHE-RSA-RC4-SHA\"
\"IDEA-CBC-SHA\"
\"KRB5-DES-CBC3-MD5\"
\"KRB5-DES-CBC3-SHA\"
\"KRB5-IDEA-CBC-MD5\"
\"KRB5-IDEA-CBC-SHA\"
\"KRB5-RC4-MD5\"
\"KRB5-RC4-SHA\"
\"PSK-3DES-EDE-CBC-SHA\"
\"PSK-AES128-CBC-SHA\"
\"PSK-AES256-CBC-SHA\"
\"PSK-RC4-SHA\"
\"RC4-MD5\"
\"RC4-SHA\"
\"SEED-SHA\"
example: `[\"ECDHE-RSA-AES256-GCM-SHA384\",\"ECDHE-ECDSA-AES256-GCM-SHA384\",\"ECDHE-RSA-AES128-GCM-SHA256\"]`
:return: The ciphers of this UpdateSSLCipherSuiteDetails.
:rtype: list[str]
"""
return self._ciphers
@ciphers.setter
def ciphers(self, ciphers):
"""
Sets the ciphers of this UpdateSSLCipherSuiteDetails.
A list of SSL ciphers the load balancer must support for HTTPS or SSL connections.
The following ciphers are valid values for this property:
* __TLSv1.2 ciphers__
\"AES128-GCM-SHA256\"
\"AES128-SHA256\"
\"AES256-GCM-SHA384\"
\"AES256-SHA256\"
\"DH-DSS-AES128-GCM-SHA256\"
\"DH-DSS-AES128-SHA256\"
\"DH-DSS-AES256-GCM-SHA384\"
\"DH-DSS-AES256-SHA256\"
\"DH-RSA-AES128-GCM-SHA256\"
\"DH-RSA-AES128-SHA256\"
\"DH-RSA-AES256-GCM-SHA384\"
\"DH-RSA-AES256-SHA256\"
\"DHE-DSS-AES128-GCM-SHA256\"
\"DHE-DSS-AES128-SHA256\"
\"DHE-DSS-AES256-GCM-SHA384\"
\"DHE-DSS-AES256-SHA256\"
\"DHE-RSA-AES128-GCM-SHA256\"
\"DHE-RSA-AES128-SHA256\"
\"DHE-RSA-AES256-GCM-SHA384\"
\"DHE-RSA-AES256-SHA256\"
\"ECDH-ECDSA-AES128-GCM-SHA256\"
\"ECDH-ECDSA-AES128-SHA256\"
\"ECDH-ECDSA-AES256-GCM-SHA384\"
\"ECDH-ECDSA-AES256-SHA384\"
\"ECDH-RSA-AES128-GCM-SHA256\"
\"ECDH-RSA-AES128-SHA256\"
\"ECDH-RSA-AES256-GCM-SHA384\"
\"ECDH-RSA-AES256-SHA384\"
\"ECDHE-ECDSA-AES128-GCM-SHA256\"
\"ECDHE-ECDSA-AES128-SHA256\"
\"ECDHE-ECDSA-AES256-GCM-SHA384\"
\"ECDHE-ECDSA-AES256-SHA384\"
\"ECDHE-RSA-AES128-GCM-SHA256\"
\"ECDHE-RSA-AES128-SHA256\"
\"ECDHE-RSA-AES256-GCM-SHA384\"
\"ECDHE-RSA-AES256-SHA384\"
* __TLSv1 ciphers also supported by TLSv1.2__
\"AES128-SHA\"
\"AES256-SHA\"
\"CAMELLIA128-SHA\"
\"CAMELLIA256-SHA\"
\"DES-CBC3-SHA\"
\"DH-DSS-AES128-SHA\"
\"DH-DSS-AES256-SHA\"
\"DH-DSS-CAMELLIA128-SHA\"
\"DH-DSS-CAMELLIA256-SHA\"
\"DH-DSS-DES-CBC3-SHAv\"
\"DH-DSS-SEED-SHA\"
\"DH-RSA-AES128-SHA\"
\"DH-RSA-AES256-SHA\"
\"DH-RSA-CAMELLIA128-SHA\"
\"DH-RSA-CAMELLIA256-SHA\"
\"DH-RSA-DES-CBC3-SHA\"
\"DH-RSA-SEED-SHA\"
\"DHE-DSS-AES128-SHA\"
\"DHE-DSS-AES256-SHA\"
\"DHE-DSS-CAMELLIA128-SHA\"
\"DHE-DSS-CAMELLIA256-SHA\"
\"DHE-DSS-DES-CBC3-SHA\"
\"DHE-DSS-SEED-SHA\"
\"DHE-RSA-AES128-SHA\"
\"DHE-RSA-AES256-SHA\"
\"DHE-RSA-CAMELLIA128-SHA\"
\"DHE-RSA-CAMELLIA256-SHA\"
\"DHE-RSA-DES-CBC3-SHA\"
\"DHE-RSA-SEED-SHA\"
\"ECDH-ECDSA-AES128-SHA\"
\"ECDH-ECDSA-AES256-SHA\"
\"ECDH-ECDSA-DES-CBC3-SHA\"
\"ECDH-ECDSA-RC4-SHA\"
\"ECDH-RSA-AES128-SHA\"
\"ECDH-RSA-AES256-SHA\"
\"ECDH-RSA-DES-CBC3-SHA\"
\"ECDH-RSA-RC4-SHA\"
\"ECDHE-ECDSA-AES128-SHA\"
\"ECDHE-ECDSA-AES256-SHA\"
\"ECDHE-ECDSA-DES-CBC3-SHA\"
\"ECDHE-ECDSA-RC4-SHA\"
\"ECDHE-RSA-AES128-SHA\"
\"ECDHE-RSA-AES256-SHA\"
\"ECDHE-RSA-DES-CBC3-SHA\"
\"ECDHE-RSA-RC4-SHA\"
\"IDEA-CBC-SHA\"
\"KRB5-DES-CBC3-MD5\"
\"KRB5-DES-CBC3-SHA\"
\"KRB5-IDEA-CBC-MD5\"
\"KRB5-IDEA-CBC-SHA\"
\"KRB5-RC4-MD5\"
\"KRB5-RC4-SHA\"
\"PSK-3DES-EDE-CBC-SHA\"
\"PSK-AES128-CBC-SHA\"
\"PSK-AES256-CBC-SHA\"
\"PSK-RC4-SHA\"
\"RC4-MD5\"
\"RC4-SHA\"
\"SEED-SHA\"
example: `[\"ECDHE-RSA-AES256-GCM-SHA384\",\"ECDHE-ECDSA-AES256-GCM-SHA384\",\"ECDHE-RSA-AES128-GCM-SHA256\"]`
:param ciphers: The ciphers of this UpdateSSLCipherSuiteDetails.
:type: list[str]
"""
self._ciphers = ciphers
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 37.607774
| 245
| 0.480598
| 1,119
| 10,643
| 4.514745
| 0.145666
| 0.046318
| 0.065321
| 0.035629
| 0.764846
| 0.738321
| 0.738321
| 0.738321
| 0.738321
| 0.738321
| 0
| 0.110314
| 0.371418
| 10,643
| 282
| 246
| 37.741135
| 0.644843
| 0.714554
| 0
| 0
| 0
| 0
| 0.033746
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.230769
| false
| 0
| 0.076923
| 0.076923
| 0.538462
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 7
|
40c474e3dbd15f247e2fe10da24316ada0eb6cb3
| 4,127
|
py
|
Python
|
test/mayaObject_test.py
|
parzival-roethlein/mapya
|
2395a8922e557acfe7dc4b98c13be7f071127277
|
[
"MIT"
] | 1
|
2020-03-20T11:48:34.000Z
|
2020-03-20T11:48:34.000Z
|
test/mayaObject_test.py
|
parzival-roethlein/mapya
|
2395a8922e557acfe7dc4b98c13be7f071127277
|
[
"MIT"
] | null | null | null |
test/mayaObject_test.py
|
parzival-roethlein/mapya
|
2395a8922e557acfe7dc4b98c13be7f071127277
|
[
"MIT"
] | null | null | null |
"""
from mapya import utils
reload(utils)
utils.reload_all()
import sys
sys.path.append(r'C:\Users\paz\Documents\git\mapya\test')
import mayaObject_test
reload(mayaObject_test)
mayaObject_result = mayaObject_test.run()
"""
import unittest
import maya.cmds as mc
from mapya.mayaObject import InvalidMayaObjectError
from mapya.mayaObject import MayaObject
from maya_test import MayaTest
from maya_test import testRunner
class TestMayaObject(MayaTest):
def test_creation(self):
node = MayaObject(MayaTest.SCENE['transform_1'])
node.MObject
node.MObjectHandle
attr = MayaObject(MayaTest.SCENE['transform_1_attr_1'])
attr.MObject
attr.MObjectHandle
def test_invalidCreation_objectDoesNotExist(self):
mc.delete(MayaTest.SCENE['transform_1'])
with self.assertRaises(ValueError):
MayaObject(MayaTest.SCENE['transform_1'])
with self.assertRaises(ValueError):
MayaObject(MayaTest.SCENE['transform_1_attr_1'])
def test_access_objectDeletedUndoRedo(self):
node = MayaObject(MayaTest.SCENE['transform_1'])
node.MObject
node.MObjectHandle
attr = MayaObject(MayaTest.SCENE['transform_1_attr_1'])
attr.MObject
attr.MObjectHandle
mc.delete(MayaTest.SCENE['transform_1'])
with self.assertRaises(InvalidMayaObjectError):
node.MObject
with self.assertRaises(InvalidMayaObjectError):
node.MObjectHandle
with self.assertRaises(InvalidMayaObjectError):
attr.MObject
with self.assertRaises(InvalidMayaObjectError):
attr.MObjectHandle
mc.undo()
node.MObject
node.MObjectHandle
attr.MObject
attr.MObjectHandle
mc.redo()
with self.assertRaises(InvalidMayaObjectError):
node.MObject
with self.assertRaises(InvalidMayaObjectError):
node.MObjectHandle
with self.assertRaises(InvalidMayaObjectError):
attr.MObject
with self.assertRaises(InvalidMayaObjectError):
attr.MObjectHandle
mc.undo()
def test_access_newScene(self):
node = MayaObject(MayaTest.SCENE['transform_1'])
node.MObject
node.MObjectHandle
attr = MayaObject(MayaTest.SCENE['transform_1_attr_1'])
attr.MObject
attr.MObjectHandle
mc.file(new=True, force=True)
with self.assertRaises(InvalidMayaObjectError):
node.MObject
with self.assertRaises(InvalidMayaObjectError):
node.MObjectHandle
with self.assertRaises(InvalidMayaObjectError):
attr.MObject
with self.assertRaises(InvalidMayaObjectError):
attr.MObjectHandle
def test_access_objectCreationUndoRedo(self):
nodeName = mc.createNode('multiplyDivide')
node = MayaObject(nodeName)
node.MObject
node.MObjectHandle
attr = MayaObject('{}.input1X'.format(nodeName))
attr.MObject
attr.MObjectHandle
mc.undo() # node gone
with self.assertRaises(InvalidMayaObjectError):
node.MObject
with self.assertRaises(InvalidMayaObjectError):
node.MObjectHandle
with self.assertRaises(InvalidMayaObjectError):
attr.MObject
with self.assertRaises(InvalidMayaObjectError):
attr.MObjectHandle
mc.redo() # create node
node.MObject
node.MObjectHandle
attr.MObject
attr.MObjectHandle
mc.undo() # node gone
with self.assertRaises(InvalidMayaObjectError):
node.MObject
with self.assertRaises(InvalidMayaObjectError):
node.MObjectHandle
with self.assertRaises(InvalidMayaObjectError):
attr.MObject
with self.assertRaises(InvalidMayaObjectError):
attr.MObjectHandle
mc.redo() # create node
node.MObject
node.MObjectHandle
attr.MObject
attr.MObjectHandle
def run():
return testRunner(__name__, [TestMayaObject])
| 31.030075
| 63
| 0.664405
| 383
| 4,127
| 7.060052
| 0.167102
| 0.065089
| 0.162722
| 0.310651
| 0.758136
| 0.755917
| 0.740385
| 0.738536
| 0.738536
| 0.703402
| 0
| 0.004892
| 0.257087
| 4,127
| 132
| 64
| 31.265152
| 0.877038
| 0.010419
| 0
| 0.809524
| 0
| 0
| 0.042056
| 0
| 0
| 0
| 0
| 0
| 0.209524
| 0
| null | null | 0
| 0.057143
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
40e9f5d1ecf61d11262048e29bc52e5b9f32ccaa
| 97
|
py
|
Python
|
tests/test_app.py
|
steven1096-godaddy/awskit
|
0faa7b5563aa6359e194d5de5d16d900a48db82d
|
[
"MIT"
] | null | null | null |
tests/test_app.py
|
steven1096-godaddy/awskit
|
0faa7b5563aa6359e194d5de5d16d900a48db82d
|
[
"MIT"
] | null | null | null |
tests/test_app.py
|
steven1096-godaddy/awskit
|
0faa7b5563aa6359e194d5de5d16d900a48db82d
|
[
"MIT"
] | null | null | null |
import pytest
import awskit
def test_cli():
assert True
def test_domains():
assert True
| 12.125
| 19
| 0.721649
| 14
| 97
| 4.857143
| 0.642857
| 0.205882
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.216495
| 97
| 8
| 20
| 12.125
| 0.894737
| 0
| 0
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
907e3b4fb0956104a8a515f1469926254ac1a4a9
| 129
|
py
|
Python
|
experiments.py
|
jhroot/elife-dashboard
|
a59c93fad3ff8c2a4acbb8b3ad1529536067bba7
|
[
"MIT"
] | null | null | null |
experiments.py
|
jhroot/elife-dashboard
|
a59c93fad3ff8c2a4acbb8b3ad1529536067bba7
|
[
"MIT"
] | null | null | null |
experiments.py
|
jhroot/elife-dashboard
|
a59c93fad3ff8c2a4acbb8b3ad1529536067bba7
|
[
"MIT"
] | null | null | null |
import dashboard.models.article_adapters
model = dashboard.models.article_adapters.get_detail_article_model('02020')
print model
| 32.25
| 75
| 0.868217
| 17
| 129
| 6.294118
| 0.588235
| 0.280374
| 0.411215
| 0.560748
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.040984
| 0.054264
| 129
| 4
| 76
| 32.25
| 0.836066
| 0
| 0
| 0
| 0
| 0
| 0.038462
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.333333
| null | null | 0.333333
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
907ffc8f8c8fce2de0c729fd75be6b0f182dc748
| 18,796
|
py
|
Python
|
model-optimizer/mo/ops/strided_slice_test.py
|
fujunwei/dldt
|
09497b7724de4be92629f7799b8538b483d809a2
|
[
"Apache-2.0"
] | 1
|
2021-07-30T17:03:50.000Z
|
2021-07-30T17:03:50.000Z
|
model-optimizer/mo/ops/strided_slice_test.py
|
fujunwei/dldt
|
09497b7724de4be92629f7799b8538b483d809a2
|
[
"Apache-2.0"
] | null | null | null |
model-optimizer/mo/ops/strided_slice_test.py
|
fujunwei/dldt
|
09497b7724de4be92629f7799b8538b483d809a2
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright (C) 2018-2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import numpy as np
from mo.front.common.partial_infer.utils import int64_array
from mo.graph.graph import Node
from mo.ops.op import PermuteAttrs
from mo.ops.strided_slice import extend_mask_according_ellipsis, permute_masks, permute_array_with_ellipsis, \
StridedSlice
from mo.utils.unittest.graph import build_graph
nodes_attributes = {
'data_1': {
'kind': 'data',
'shape': None,
'value': None,
},
'begin': {
'kind': 'data',
'shape': None,
'value': np.array([]),
},
'end': {
'kind': 'data',
'shape': None,
'value': np.array([]),
},
'stride': {
'kind': 'data',
'shape': None,
'value': np.array([]),
},
'strided_slice': {
'op': 'StridedSlice',
'begin_mask': None,
'end_mask': None,
'new_axis_mask': None,
'shrink_axis_mask': None,
'ellipsis_mask': None,
'kind': 'op',
},
'data_2': {
'kind': 'data',
'shape': None,
'value': None,
}
}
class TestPermutationStridedSlice(unittest.TestCase):
def test_permute_begin_end(self):
# Testing constant path case
graph = build_graph(nodes_attributes,
[('data_1', 'strided_slice'),
('begin', 'strided_slice'),
('end', 'strided_slice'),
('stride', 'strided_slice'),
('strided_slice', 'data_2')],
{'data_1': {'shape': np.array([1, 2, 3, 4]), 'value': None},
'strided_slice': {'begin_mask': np.array([1, 1, 0, 0]), 'end_mask': np.array([0, 1, 0, 0]),
'new_axis_mask': np.array([0, 0, 0]), 'shrink_axis_mask': [0, 0, 0],
'ellipsis_mask': np.array([0, 0, 0])},
'data_2': {'shape': np.array([1, 2, 3, 4]), 'value': None},
})
slice_node = Node(graph, 'strided_slice')
permute_masks(slice_node, PermuteAttrs.Permutation(perm=[0, 3, 1, 2], inv=[0, 2, 3, 1]), 'begin_mask')
self.assertTrue(np.array_equal(slice_node.begin_mask, np.array([1, 0, 1, 0])))
permute_masks(slice_node, PermuteAttrs.Permutation(perm=[0, 3, 1, 2], inv=[0, 2, 3, 1]), 'end_mask')
self.assertTrue(np.array_equal(slice_node.end_mask, np.array([0, 0, 1, 0])))
def test_permute_begin_end_short(self):
# Testing constant path case
graph = build_graph(nodes_attributes,
[('data_1', 'strided_slice'),
('begin', 'strided_slice'),
('end', 'strided_slice'),
('stride', 'strided_slice'),
('strided_slice', 'data_2')],
{'data_1': {'shape': np.array([1, 2, 3, 4]), 'value': None},
'strided_slice': {'begin_mask': np.array([1, 0, 0]), 'end_mask': np.array([0, 1, 0]),
'new_axis_mask': np.array([0, 0, 0]), 'shrink_axis_mask': [0, 0, 0],
'ellipsis_mask': np.array([0, 0, 0])},
'data_2': {'shape': np.array([1, 2, 3, 4]), 'value': None},
})
slice_node = Node(graph, 'strided_slice')
permute_masks(slice_node, PermuteAttrs.Permutation(perm=[0, 3, 1, 2], inv=[0, 2, 3, 1]), 'begin_mask')
self.assertTrue(np.array_equal(slice_node.begin_mask, np.array([1, 1, 0, 0])))
permute_masks(slice_node, PermuteAttrs.Permutation(perm=[0, 3, 1, 2], inv=[0, 2, 3, 1]), 'end_mask')
self.assertTrue(np.array_equal(slice_node.end_mask, np.array([0, 1, 1, 0])))
def test_permute_begin_end_long(self):
# Testing constant path case
graph = build_graph(nodes_attributes,
[('data_1', 'strided_slice'),
('begin', 'strided_slice'),
('end', 'strided_slice'),
('stride', 'strided_slice'),
('strided_slice', 'data_2')],
{'data_1': {'shape': np.array([1, 2, 3, 4]), 'value': None},
'strided_slice': {'begin_mask': np.array([1, 0, 0, 1, 0]),
'end_mask': np.array([0, 1, 0, 1, 1]),
'new_axis_mask': np.array([0, 0, 0]),
'shrink_axis_mask': [0, 0, 0],
'ellipsis_mask': np.array([0, 0, 0])},
'data_2': {'shape': np.array([1, 2, 3, 4]), 'value': None},
})
slice_node = Node(graph, 'strided_slice')
permute_masks(slice_node, PermuteAttrs.Permutation(perm=[0, 3, 1, 2], inv=[0, 2, 3, 1]), 'begin_mask')
self.assertTrue(np.array_equal(slice_node.begin_mask, np.array([1, 1, 0, 0, 0])))
permute_masks(slice_node, PermuteAttrs.Permutation(perm=[0, 3, 1, 2], inv=[0, 2, 3, 1]), 'end_mask')
self.assertTrue(np.array_equal(slice_node.end_mask, np.array([0, 1, 1, 0, 1])))
def test_permute_begin_end_new(self):
# Testing constant path case
graph = build_graph(nodes_attributes,
[('data_1', 'strided_slice'),
('begin', 'strided_slice'),
('end', 'strided_slice'),
('stride', 'strided_slice'),
('strided_slice', 'data_2')],
{'data_1': {'shape': np.array([1, 2, 3, 4]), 'value': None},
'strided_slice': {'begin_mask': np.array([1, 0, 0, 1, 0]),
'end_mask': np.array([0, 1, 0, 1, 1]),
'new_axis_mask': np.array([1, 0, 0]),
'shrink_axis_mask': [0, 0, 0],
'ellipsis_mask': np.array([0, 0, 0])},
'data_2': {'shape': np.array([1, 1, 2, 3, 4]), 'value': None},
})
slice_node = Node(graph, 'strided_slice')
permute_masks(slice_node, PermuteAttrs.Permutation(perm=[0, 4, 1, 2, 3], inv=[0, 2, 3, 4, 1]), 'begin_mask')
self.assertTrue(np.array_equal(slice_node.begin_mask, np.array([1, 0, 0, 0, 1])))
permute_masks(slice_node, PermuteAttrs.Permutation(perm=[0, 4, 1, 2, 3], inv=[0, 2, 3, 4, 1]), 'end_mask')
self.assertTrue(np.array_equal(slice_node.end_mask, np.array([0, 1, 1, 0, 1])))
def test_permute_begin_end_new_short(self):
# Testing constant path case
graph = build_graph(nodes_attributes,
[('data_1', 'strided_slice'),
('begin', 'strided_slice'),
('end', 'strided_slice'),
('stride', 'strided_slice'),
('strided_slice', 'data_2')],
{'data_1': {'shape': np.array([1, 2, 3, 4]), 'value': None},
'strided_slice': {'begin_mask': np.array([1, 0, 0]), 'end_mask': np.array([0, 1, 0]),
'new_axis_mask': np.array([1, 0, 0]), 'shrink_axis_mask': [0, 0, 0],
'ellipsis_mask': np.array([0, 0, 0])},
'data_2': {'shape': np.array([1, 1, 2, 3, 4]), 'value': None},
})
slice_node = Node(graph, 'strided_slice')
permute_masks(slice_node, PermuteAttrs.Permutation(perm=[0, 4, 1, 2, 3], inv=[0, 2, 3, 4, 1]), 'begin_mask')
self.assertTrue(np.array_equal(slice_node.begin_mask, np.array([1, 1, 0, 0, 1])))
permute_masks(slice_node, PermuteAttrs.Permutation(perm=[0, 4, 1, 2, 3], inv=[0, 2, 3, 4, 1]), 'end_mask')
self.assertTrue(np.array_equal(slice_node.end_mask, np.array([0, 1, 1, 0, 1])))
def test_permute_begin_end_shrink(self):
# Testing constant path case
graph = build_graph(nodes_attributes,
[('data_1', 'strided_slice'),
('begin', 'strided_slice'),
('end', 'strided_slice'),
('stride', 'strided_slice'),
('strided_slice', 'data_2')],
{'data_1': {'shape': np.array([1, 2, 3, 4]), 'value': None},
'strided_slice': {'begin_mask': np.array([1, 0, 0, 1]), 'end_mask': np.array([0, 1, 0, 1]),
'new_axis_mask': np.array([0, 0, 0]), 'shrink_axis_mask': [1, 0, 0],
'ellipsis_mask': np.array([0, 0, 0])},
'data_2': {'shape': np.array([2, 3, 4]), 'value': None},
})
slice_node = Node(graph, 'strided_slice')
permute_masks(slice_node, PermuteAttrs.Permutation(perm=[0, 3, 1, 2], inv=[0, 2, 3, 1]), 'begin_mask')
self.assertTrue(np.array_equal(slice_node.begin_mask, np.array([1, 1, 0, 0])))
permute_masks(slice_node, PermuteAttrs.Permutation(perm=[0, 3, 1, 2], inv=[0, 2, 3, 1]), 'end_mask')
self.assertTrue(np.array_equal(slice_node.end_mask, np.array([0, 1, 1, 0])))
def test_permute_begin_end_shrink_short(self):
# Testing constant path case
graph = build_graph(nodes_attributes,
[('data_1', 'strided_slice'),
('begin', 'strided_slice'),
('end', 'strided_slice'),
('stride', 'strided_slice'),
('strided_slice', 'data_2')],
{'data_1': {'shape': np.array([1, 2, 3, 4]), 'value': None},
'strided_slice': {'begin_mask': np.array([1, 0, 0]), 'end_mask': np.array([0, 1, 0]),
'new_axis_mask': np.array([0, 0, 0]), 'shrink_axis_mask': [1, 0, 0],
'ellipsis_mask': np.array([0, 0, 0])},
'data_2': {'shape': np.array([2, 3, 4]), 'value': None},
})
slice_node = Node(graph, 'strided_slice')
permute_masks(slice_node, PermuteAttrs.Permutation(perm=[0, 3, 1, 2], inv=[0, 2, 3, 1]), 'begin_mask')
self.assertTrue(np.array_equal(slice_node.begin_mask, np.array([1, 1, 0, 0])))
permute_masks(slice_node, PermuteAttrs.Permutation(perm=[0, 3, 1, 2], inv=[0, 2, 3, 1]), 'end_mask')
self.assertTrue(np.array_equal(slice_node.end_mask, np.array([0, 1, 1, 0])))
def test_permute_begin_end_ellipsis(self):
# Testing constant path case
graph = build_graph(nodes_attributes,
[('data_1', 'strided_slice'),
('begin', 'strided_slice'),
('end', 'strided_slice'),
('stride', 'strided_slice'),
('strided_slice', 'data_2')],
{'data_1': {'shape': np.array([1, 2, 3, 4]), 'value': None},
'strided_slice': {'begin_mask': np.array([0, 0]), 'end_mask': np.array([1, 0]),
'new_axis_mask': np.array([0]), 'shrink_axis_mask': [0],
'ellipsis_mask': np.array([1, 0])},
'data_2': {'shape': np.array([1, 2, 3, 4]), 'value': None},
})
slice_node = Node(graph, 'strided_slice')
permute_masks(slice_node, PermuteAttrs.Permutation(perm=[0, 3, 1, 2], inv=[0, 2, 3, 1]), 'begin_mask')
self.assertTrue(np.array_equal(slice_node.begin_mask, np.array([0, 0, 1, 1])))
permute_masks(slice_node, PermuteAttrs.Permutation(perm=[0, 3, 1, 2], inv=[0, 2, 3, 1]), 'end_mask')
self.assertTrue(np.array_equal(slice_node.end_mask, np.array([1, 0, 1, 1])))
def test_permute_begin_end_ellipsis_infer(self):
# Testing constant path case
graph = build_graph(nodes_attributes,
[('data_1', 'strided_slice'),
('begin', 'strided_slice'),
('end', 'strided_slice'),
('stride', 'strided_slice'),
('strided_slice', 'data_2')],
{'data_1': {'shape': np.array([1, 2, 3, 4]), 'value': None},
'strided_slice': {'begin_mask': np.array([0, 0]), 'end_mask': np.array([1, 0]),
'new_axis_mask': np.array([0]), 'shrink_axis_mask': [0],
'ellipsis_mask': np.array([1, 0])},
'data_2': {'shape': np.array([1, 2, 3, 4]), 'value': None},
})
graph.graph['layout'] = "NHWC"
slice_node = Node(graph, 'strided_slice')
StridedSlice.infer(slice_node)
self.assertTrue(np.array_equal(slice_node.begin_mask, np.array([0, 1, 1, 0])))
self.assertTrue(np.array_equal(slice_node.end_mask, np.array([1, 1, 1, 0])))
self.assertTrue(np.array_equal(slice_node.shrink_axis_mask, np.array([0, 0, 0, 0])))
self.assertTrue(np.array_equal(slice_node.new_axis_mask, np.array([0, 0, 0, 0])))
def test_permute_begin_end_ellipsis_new(self):
# Testing constant path case
graph = build_graph(nodes_attributes,
[('data_1', 'strided_slice'),
('begin', 'strided_slice'),
('end', 'strided_slice'),
('stride', 'strided_slice'),
('strided_slice', 'data_2')],
{'data_1': {'shape': np.array([1, 2, 3, 4]), 'value': None},
'strided_slice': {'begin_mask': np.array([0, 0, 0]), 'end_mask': np.array([1, 0, 0]),
'new_axis_mask': np.array([1, 0, 0]), 'shrink_axis_mask': [0],
'ellipsis_mask': np.array([0, 1, 0])},
'data_2': {'shape': np.array([1, 1, 2, 3, 4]), 'value': None},
})
slice_node = Node(graph, 'strided_slice')
permute_masks(slice_node, PermuteAttrs.Permutation(perm=[0, 4, 1, 2, 3], inv=[0, 2, 3, 4, 1]), 'begin_mask')
self.assertTrue(np.array_equal(slice_node.begin_mask, np.array([0, 0, 0, 1, 1])))
permute_masks(slice_node, PermuteAttrs.Permutation(perm=[0, 4, 1, 2, 3], inv=[0, 2, 3, 4, 1]), 'end_mask')
self.assertTrue(np.array_equal(slice_node.end_mask, np.array([1, 0, 0, 1, 1])))
def test_permute_begin_end_ellipsis_new_inputs(self):
# Testing constant path case
graph = build_graph(nodes_attributes,
[('data_1', 'strided_slice'),
('begin', 'strided_slice'),
('end', 'strided_slice'),
('stride', 'strided_slice'),
('strided_slice', 'data_2')],
{'data_1': {'shape': np.array([1, 2, 3, 4]), 'value': None},
'strided_slice': {'begin_mask': np.array([0, 0, 0]), 'end_mask': np.array([1, 0, 0]),
'new_axis_mask': np.array([1, 0, 0]), 'shrink_axis_mask': [0],
'ellipsis_mask': np.array([0, 1, 0])},
'begin': {'value': np.array([0, 1, 2])},
'end': {'value': np.array([1, 2, 3])},
'stride': {'value': np.array([1, 1, 1])},
'data_2': {'shape': np.array([1, 1, 2, 3, 4]), 'value': None},
})
slice_node = Node(graph, 'strided_slice')
slice_node.in_node(1).value = permute_array_with_ellipsis(slice_node,
slice_node.in_node(1).value, 0)
self.assertTrue(np.array_equal(slice_node.in_node(1).value, np.array([0, 2, 1, 0, 0])))
slice_node.in_node(2).value = permute_array_with_ellipsis(slice_node,
slice_node.in_node(2).value, 0)
self.assertTrue(np.array_equal(slice_node.in_node(2).value, np.array([1, 3, 2, 0, 0])))
def test_extend_mask(self):
ellipsis_mask = int64_array([1, 0])
shrink_mask = int64_array([0, 0])
length_shape = 4
mask = int64_array([0, 1])
ins_value = 0
mask = extend_mask_according_ellipsis(ellipsis_mask, shrink_mask, length_shape, list(mask), ins_value)
self.assertEquals(mask, [0, 0, 0, 1])
def test_extend_mask_shrinked(self):
ellipsis_mask = int64_array([1, 0])
shrink_mask = int64_array([0, 1])
length_shape = 4
mask = int64_array([0, 1])
ins_value = 2
mask = extend_mask_according_ellipsis(ellipsis_mask, shrink_mask, length_shape, list(mask), ins_value)
self.assertEquals(mask, [0, 2, 2, 2, 1])
def test_extend_mask_shrinked_shrink_mask(self):
ellipsis_mask = int64_array([0, 1, 0])
shrink_mask = int64_array([0, 0, 1])
length_shape = 4
ins_value = 2
shrink_mask = extend_mask_according_ellipsis(ellipsis_mask, shrink_mask, length_shape, list(shrink_mask),
ins_value)
self.assertEquals(shrink_mask, [0, 0, 2, 2, 1])
| 54.481159
| 120
| 0.482283
| 2,228
| 18,796
| 3.843806
| 0.059695
| 0.098085
| 0.084773
| 0.054647
| 0.873891
| 0.863965
| 0.833839
| 0.816091
| 0.802312
| 0.794722
| 0
| 0.053451
| 0.360981
| 18,796
| 344
| 121
| 54.639535
| 0.659562
| 0.04602
| 0
| 0.701439
| 0
| 0
| 0.151315
| 0
| 0
| 0
| 0
| 0
| 0.097122
| 1
| 0.05036
| false
| 0
| 0.02518
| 0
| 0.079137
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
90aa101bf238a67b60a73379178d5d7a8e4557be
| 25,028
|
py
|
Python
|
tests/estimators/test_oop_estimators.py
|
TuanNguyen27/kernel_exp_family
|
5a433b8d1702debd2500237784927c427b06f02b
|
[
"BSD-3-Clause"
] | 14
|
2015-10-23T05:19:31.000Z
|
2021-01-15T12:20:22.000Z
|
tests/estimators/test_oop_estimators.py
|
TuanNguyen27/kernel_exp_family
|
5a433b8d1702debd2500237784927c427b06f02b
|
[
"BSD-3-Clause"
] | null | null | null |
tests/estimators/test_oop_estimators.py
|
TuanNguyen27/kernel_exp_family
|
5a433b8d1702debd2500237784927c427b06f02b
|
[
"BSD-3-Clause"
] | 8
|
2016-01-27T19:45:34.000Z
|
2019-08-21T18:11:50.000Z
|
from nose.tools import assert_raises
from numpy.testing.utils import assert_allclose
from kernel_exp_family.estimators.finite.gaussian import KernelExpFiniteGaussian
from kernel_exp_family.estimators.lite.gaussian import KernelExpLiteGaussian
import numpy as np
def get_instace_KernelExpFiniteGaussian(N):
sigma = 2.
lmbda = 2.
D = 2
m = 2
return KernelExpFiniteGaussian(sigma, lmbda, m, D)
def get_instace_KernelExpLiteGaussian(N):
sigma = 2.
lmbda = 1.
D = 2
return KernelExpLiteGaussian(sigma, lmbda, D, N)
def get_estimator_instances(N):
return [
get_instace_KernelExpFiniteGaussian(N),
get_instace_KernelExpLiteGaussian(N)
]
def test_get_name_execute():
N = 100
estimators = get_estimator_instances(N)
for est in estimators:
assert type(est.get_name()) is str
def test_fit_execute():
N = 100
estimators = get_estimator_instances(N)
for est in estimators:
X = np.random.randn(N, est.D)
est.fit(X)
def test_fit_result_none():
N = 100
estimators = get_estimator_instances(N)
for est in estimators:
X = np.random.randn(N, est.D)
result = est.fit(X)
assert result is None
def test_fit_wrong_input_type():
Xs = [None, "test", 1]
N = 1
estimators = get_estimator_instances(N)
for X in Xs:
for est in estimators:
assert_raises(TypeError, est.fit, X)
def test_fit_wrong_input_shape():
N = 100
estimators = get_estimator_instances(N)
for est in estimators:
X = np.random.randn(N, est.D, 2)
assert_raises(ValueError, est.fit, X)
def test_fit_wrong_input_dim():
N = 100
estimators = get_estimator_instances(N)
for est in estimators:
X = np.random.randn(N, est.D + 1)
assert_raises(ValueError, est.fit, X)
def test_log_pdf_multiple_execute():
N = 100
estimators = get_estimator_instances(N)
for est in estimators:
X = np.random.randn(N, est.D)
est.fit(X)
est.log_pdf_multiple(X)
def test_log_pdf_multiple_result():
N = 100
estimators = get_estimator_instances(N)
for est in estimators:
X = np.random.randn(N, est.D)
est.fit(X)
result = est.log_pdf_multiple(X)
assert type(result) is np.ndarray
assert result.ndim == 1
assert len(result) == len(X)
def test_log_pdf_multiple_result_before_fit():
N = 100
estimators = get_estimator_instances(N)
for est in estimators:
X = np.random.randn(N, est.D)
for est in estimators:
result = est.log_pdf_multiple(X)
assert_allclose(result, np.zeros(N))
def test_log_pdf_multiple_wrong_input_type():
N = 10
estimators = get_estimator_instances(N)
for est in estimators:
X = np.random.randn(N, est.D)
est.fit(X)
assert_raises(TypeError, est.log_pdf_multiple, None)
def test_log_pdf_multiple_wrong_input_shape():
N = 100
estimators = get_estimator_instances(N)
for est in estimators:
X = np.random.randn(N, est.D)
Y = np.random.randn(N, est.D + 1)
est.fit(X)
assert_raises(ValueError, est.log_pdf_multiple, Y)
def test_log_pdf_multiple_wrong_input_dim():
N = 100
estimators = get_estimator_instances(N)
for est in estimators:
X = np.random.randn(N, est.D)
Y = np.random.randn(N, est.D + 1)
est.fit(X)
assert_raises(ValueError, est.log_pdf_multiple, Y)
def test_log_pdf_execute():
N = 100
estimators = get_estimator_instances(N)
for est in estimators:
X = np.random.randn(N, est.D)
x = np.random.randn(est.D)
est.fit(X)
est.log_pdf(x)
def test_log_pdf_result():
N = 100
estimators = get_estimator_instances(N)
for est in estimators:
X = np.random.randn(N, est.D)
x = np.random.randn(est.D)
est.fit(X)
result = est.log_pdf(x)
assert type(result) is np.float64
def test_log_pdf_result_before_fit():
N = 10
estimators = get_estimator_instances(N)
for est in estimators:
x = np.random.randn(est.D)
for est in estimators:
result = est.log_pdf(x)
assert_allclose(result, 0)
def test_log_pdf_wrong_input_type():
N = 10
estimators = get_estimator_instances(N)
for est in estimators:
X = np.random.randn(N, est.D)
est.fit(X)
assert_raises(TypeError, est.log_pdf, None)
def test_log_pdf_wrong_input_shape():
N = 100
estimators = get_estimator_instances(N)
for est in estimators:
X = np.random.randn(N, est.D)
x = np.random.randn(est.D + 1)
est.fit(X)
assert_raises(ValueError, est.log_pdf, x)
def test_log_pdf_wrong_input_dim():
N = 100
estimators = get_estimator_instances(N)
for est in estimators:
X = np.random.randn(N, est.D)
x = np.random.randn(est.D + 1)
est.fit(X)
assert_raises(ValueError, est.log_pdf, x)
def test_grad_execute():
N = 100
estimators = get_estimator_instances(N)
for est in estimators:
X = np.random.randn(N, est.D)
x = np.random.randn(est.D)
est.fit(X)
est.grad(x)
def test_grad_result():
N = 100
estimators = get_estimator_instances(N)
for est in estimators:
X = np.random.randn(N, est.D)
x = np.random.randn(est.D)
est.fit(X)
result = est.grad(x)
assert type(result) is np.ndarray
assert result.ndim == 1
assert len(result) == est.D
def test_grad_wrong_before_fit():
N = 10
estimators = get_estimator_instances(N)
for est in estimators:
x = np.random.randn(est.D)
for est in estimators:
result = est.grad(x)
assert_allclose(result, np.zeros(est.D))
def test_grad_wrong_input_type():
N = 10
estimators = get_estimator_instances(N)
for est in estimators:
X = np.random.randn(N, est.D)
est.fit(X)
assert_raises(TypeError, est.grad, None)
def test_grad_wrong_input_shape():
N = 100
estimators = get_estimator_instances(N)
for est in estimators:
X = np.random.randn(N, est.D)
x = np.random.randn(est.D + 1)
est.fit(X)
assert_raises(ValueError, est.grad, x)
def test_grad_wrong_input_dim():
N = 100
estimators = get_estimator_instances(N)
for est in estimators:
X = np.random.randn(N, est.D)
x = np.random.randn(est.D + 1)
est.fit(X)
assert_raises(ValueError, est.grad, x)
def test_objective_execute():
N = 100
estimators = get_estimator_instances(N)
for est in estimators:
X = np.random.randn(N, est.D)
est.fit(X)
est.objective(X)
def test_objective_result():
N = 100
estimators = get_estimator_instances(N)
for est in estimators:
X = np.random.randn(N, est.D)
est.fit(X)
result = est.objective(X)
assert type(result) is np.float64
def test_objective_wrong_input_type():
N = 10
estimators = get_estimator_instances(N)
for est in estimators:
X = np.random.randn(N, est.D)
est.fit(X)
assert_raises(TypeError, est.objective, None)
def test_objective_wrong_input_shape():
N = 100
estimators = get_estimator_instances(N)
for est in estimators:
X = np.random.randn(N, est.D)
Y = np.random.randn(N, est.D + 1)
est.fit(X)
assert_raises(ValueError, est.objective, Y)
def test_objective_wrong_input_dim():
N = 100
estimators = get_estimator_instances(N)
for est in estimators:
X = np.random.randn(N, est.D)
Y = np.random.randn(N, est.D + 1)
est.fit(X)
assert_raises(ValueError, est.objective, Y)
def test_xvalidate_objective_execute():
N = 100
estimators = get_estimator_instances(N)
for est in estimators:
X = np.random.randn(N, est.D)
est.xvalidate_objective(X, num_folds=3, num_repetitions=1)
def test_xvalidate_objective_result():
N = 100
estimators = get_estimator_instances(N)
for est in estimators:
X = np.random.randn(N, est.D)
result = est.xvalidate_objective(X, num_folds=3, num_repetitions=2)
assert type(result) is np.ndarray
assert result.ndim == 2
assert result.shape[0] == 2
assert result.shape[1] == 3
def test_xvalidate_objective_wrong_input_type():
N = 100
estimators = get_estimator_instances(N)
for est in estimators:
X = np.random.randn(N, est.D)
assert_raises(TypeError, est.xvalidate_objective, X=None, num_folds=3, num_repetitions=2)
assert_raises(TypeError, est.xvalidate_objective, X=X, num_folds=None, num_repetitions=2)
assert_raises(TypeError, est.xvalidate_objective, X=X, num_folds=3, num_repetitions=None)
def test_xvalidate_objective_wrong_input_dim_X():
N = 100
estimators = get_estimator_instances(N)
for est in estimators:
X = np.random.randn(N, est.D, 1)
assert_raises(ValueError, est.xvalidate_objective, X=X, num_folds=3, num_repetitions=2)
def test_xvalidate_objective_wrong_input_shape_X():
N = 100
estimators = get_estimator_instances(N)
for est in estimators:
X = np.random.randn(N, est.D + 1)
assert_raises(ValueError, est.xvalidate_objective, X=X, num_folds=3, num_repetitions=2)
def test_xvalidate_objective_wrong_input_negative_int():
N = 100
estimators = get_estimator_instances(N)
for est in estimators:
X = np.random.randn(N, est.D + 1)
assert_raises(ValueError, est.xvalidate_objective, X=X, num_folds=0, num_repetitions=2)
assert_raises(ValueError, est.xvalidate_objective, X=X, num_folds=3, num_repetitions=0)
def test_get_parameters_finite():
N = 10
names = get_instace_KernelExpFiniteGaussian(N).get_parameter_names()
assert "sigma" in names
assert "lmbda" in names
assert len(names) == 2
def test_get_parameters_lite():
N = 10
names = get_instace_KernelExpLiteGaussian(N).get_parameter_names()
assert "sigma" in names
assert "lmbda" in names
assert len(names) == 2
def test_get_parameters():
N = 10
estimators = get_estimator_instances(N)
for estimator in estimators:
param_dict = estimator.get_parameters()
for name, value in param_dict.items():
assert getattr(estimator, name) == value
def test_set_parameters_from_dict():
N = 10
estimators = get_estimator_instances(N)
for estimator in estimators:
param_dict = estimator.get_parameters()
param_dict_old = param_dict.copy()
for name in param_dict.keys():
param_dict[name] += 1
estimator.set_parameters_from_dict(param_dict)
param_dict_new = estimator.get_parameters()
for name in param_dict_new.keys():
assert param_dict_new[name] == param_dict_old[name] + 1
def test_set_parameters_from_dict_wrong_input_type():
N = 10
estimators = get_estimator_instances(N)
for estimator in estimators:
assert_raises(TypeError, estimator.set_parameters_from_dict, None)
assert_raises(TypeError, estimator.set_parameters_from_dict, 1)
assert_raises(TypeError, estimator.set_parameters_from_dict, [])
def test_set_parameters_from_dict_wrong_input_parameters():
N = 10
estimators = get_estimator_instances(N)
for estimator in estimators:
param_dict = estimator.get_parameters()
param_dict['strange_parameter'] = 0
assert_raises(ValueError, estimator.set_parameters_from_dict, param_dict)
def test_update_fit_execute():
N = 100
estimators = get_estimator_instances(N)
for est in estimators:
if est.supports_update_fit():
X = np.random.randn(N, est.D)
X2 = np.random.randn(N, est.D)
est.fit(X)
est.update_fit(X2)
def test_update_fit_increasing_n():
N = 100
estimators = get_estimator_instances(N)
for est in estimators:
if est.supports_update_fit():
X = np.random.randn(N, est.D)
X2 = np.random.randn(N, est.D)
est.fit(X)
old_n = est.n
est.update_fit(X2)
assert est.n == old_n + N
def test_update_fit_equals_batch_from_scratch():
N = 100
# make sure both estimator sets are built using the same random seed
rng_state = np.random.get_state()
estimators = get_estimator_instances(N)
np.random.set_state(rng_state)
estimators2 = get_estimator_instances(N)
for est1, est2 in zip(estimators, estimators2):
if est1.supports_update_fit():
x_test = np.random.randn(est1.D)
X = np.random.randn(N, est1.D)
est1.fit(X)
log_pdf_batch = est1.log_pdf(x_test)
grad_batch = est1.grad(x_test)
est2.update_fit(X)
log_pdf_online = est1.log_pdf(x_test)
grad_online = est1.grad(x_test)
assert_allclose(log_pdf_online, log_pdf_batch, err_msg=est1.get_name())
assert_allclose(grad_online, grad_batch, err_msg=est1.get_name())
def test_update_fit_equals_batch_with_prevous_fit_N_1():
N = 1
estimators = get_estimator_instances(N)
for est in estimators:
if est.supports_update_fit():
x_test = np.random.randn(est.D)
X1 = np.random.randn(N, est.D)
X2 = np.random.randn(N, est.D)
stacked = np.vstack((X1, X2))
est.fit(stacked)
log_pdf_batch = est.log_pdf(x_test)
grad_batch = est.grad(x_test)
est.fit(X1)
est.update_fit(X2)
log_pdf_online = est.log_pdf(x_test)
grad_online = est.grad(x_test)
assert_allclose(log_pdf_online, log_pdf_batch, err_msg=est.get_name())
assert_allclose(grad_online, grad_batch, err_msg=est.get_name())
def test_update_fit_equals_batch_with_prevous_fit_N_2():
N = 2
estimators = get_estimator_instances(N)
for est in estimators:
if est.supports_update_fit():
x_test = np.random.randn(est.D)
X1 = np.random.randn(N, est.D)
X2 = np.random.randn(N, est.D)
stacked = np.vstack((X1, X2))
est.fit(stacked)
log_pdf_batch = est.log_pdf(x_test)
grad_batch = est.grad(x_test)
est.fit(X1)
est.update_fit(X2)
log_pdf_online = est.log_pdf(x_test)
grad_online = est.grad(x_test)
assert_allclose(log_pdf_online, log_pdf_batch, err_msg=est.get_name())
assert_allclose(grad_online, grad_batch, err_msg=est.get_name())
def test_update_fit_equals_batch_with_prevous_fit():
N = 100
estimators = get_estimator_instances(N)
for est in estimators:
if est.supports_update_fit():
x_test = np.random.randn(est.D)
X1 = np.random.randn(N, est.D)
X2 = np.random.randn(N, est.D)
stacked = np.vstack((X1, X2))
est.fit(stacked)
log_pdf_batch = est.log_pdf(x_test)
grad_batch = est.grad(x_test)
est.fit(X1)
est.update_fit(X2)
log_pdf_online = est.log_pdf(x_test)
grad_online = est.grad(x_test)
assert_allclose(log_pdf_online, log_pdf_batch, err_msg=est.get_name())
assert_allclose(grad_online, grad_batch, err_msg=est.get_name())
def test_update_fit_equals_batch_weighted():
N = 100
estimators = get_estimator_instances(N)
for est in estimators:
if est.supports_update_fit():
x_test = np.random.randn(est.D)
X1 = np.random.randn(N, est.D)
X2 = np.random.randn(N, est.D)
log_weights1 = np.log(np.random.rand(N))
log_weights2 = np.log(np.random.rand(N))
log_weights_stacked = np.hstack((log_weights1, log_weights2))
stacked = np.vstack((X1, X2))
est.fit(stacked, log_weights_stacked)
log_pdf_batch = est.log_pdf(x_test)
grad_batch = est.grad(x_test)
est.fit(X1, log_weights1)
est.update_fit(X2, log_weights2)
log_pdf_online = est.log_pdf(x_test)
grad_online = est.grad(x_test)
assert_allclose(log_pdf_online, log_pdf_batch, err_msg=est.get_name())
assert_allclose(grad_online, grad_batch, err_msg=est.get_name())
def test_update_fit_wrong_input_type():
N = 100
estimators = get_estimator_instances(N)
for est in estimators:
X = np.random.randn(N, est.D)
est.fit(X)
if est.supports_update_fit():
assert_raises(TypeError, est.update_fit, None)
assert_raises(TypeError, est.update_fit, 1)
assert_raises(TypeError, est.update_fit, [1, 2, 3])
def test_update_fit_wrong_input_shape():
N = 100
estimators = get_estimator_instances(N)
for est in estimators:
X = np.random.randn(N, est.D)
est.fit(X)
if est.supports_update_fit():
assert_raises(ValueError, est.update_fit, np.random.randn(N))
assert_raises(ValueError, est.update_fit, np.random.randn(N, est.D - 1, 1))
def test_update_fit_wrong_input_dims():
N = 100
estimators = get_estimator_instances(N)
for est in estimators:
X = np.random.randn(N, est.D)
est.fit(X)
if est.supports_update_fit():
assert_raises(ValueError, est.update_fit, np.random.randn(N, est.D + 1))
assert_raises(ValueError, est.update_fit, np.random.randn(N, est.D - 1))
def test_fit_with_weights_execute():
N = 100
estimators = get_estimator_instances(N)
for est in estimators:
X = np.random.randn(N, est.D)
if est.supports_weights():
est.fit(X, np.ones((N)))
def test_fit_with_weights_wrong_input_shape():
N = 100
estimators = get_estimator_instances(N)
for est in estimators:
X = np.random.randn(N, est.D)
if est.supports_weights():
assert_raises(ValueError, est.fit, X, np.ones((N, 1)))
def test_fit_with_weights_wrong_input_dim():
N = 100
estimators = get_estimator_instances(N)
for est in estimators:
X = np.random.randn(N, est.D)
if est.supports_weights():
assert_raises(ValueError, est.fit, X, np.ones(N + 1))
assert_raises(ValueError, est.fit, X, np.ones(N - 1))
def test_fit_with_weights_wrong_input_type():
N = 100
estimators = get_estimator_instances(N)
for est in estimators:
X = np.random.randn(N, est.D)
if est.supports_weights():
assert_raises(TypeError, est.fit, X, "None")
assert_raises(TypeError, est.fit, X, 0.)
def test_fit_with_weights_constant_weights_equals_no_weights():
N = 200
estimators = get_estimator_instances(N)
for est in estimators:
X = np.random.randn(N, est.D)
if est.supports_weights():
x_test = np.random.randn(est.D)
est.fit(X)
log_pdf = est.log_pdf(x_test)
grad = est.grad(x_test)
log_weights = np.log(np.ones(N))
est.fit(X, log_weights)
log_pdf_weighted = est.log_pdf(x_test)
grad_weighted = est.grad(x_test)
assert_allclose(log_pdf, log_pdf_weighted)
assert_allclose(grad_weighted, grad)
def test_fit_with_weights_constant_weights_equals_no_weights_N_1():
N = 1
# make sure both estimator sets are built using the same random seed
rng_state = np.random.get_state()
estimators = get_estimator_instances(N)
np.random.set_state(rng_state)
estimators2 = get_estimator_instances(N)
for est, est2 in zip(estimators, estimators2):
X = np.random.randn(N, est.D)
if est.supports_weights():
x_test = np.random.randn(est.D)
est.fit(X)
log_pdf = est.log_pdf(x_test)
grad = est.grad(x_test)
log_weights = np.log(np.ones(N))
est2.fit(X, log_weights)
log_pdf_weighted = est2.log_pdf(x_test)
grad_weighted = est2.grad(x_test)
assert_allclose(log_pdf, log_pdf_weighted)
assert_allclose(grad_weighted, grad)
def test_fit_with_weights_constant_weights_equals_no_weights_N_2():
N = 2
# make sure both estimator sets are built using the same random seed
rng_state = np.random.get_state()
estimators = get_estimator_instances(N)
np.random.set_state(rng_state)
estimators2 = get_estimator_instances(N)
for est, est2 in zip(estimators, estimators2):
X = np.random.randn(N, est.D)
if est.supports_weights():
x_test = np.random.randn(est.D)
est.fit(X)
log_pdf = est.log_pdf(x_test)
grad = est.grad(x_test)
log_weights = np.log(np.ones(N))
est2.fit(X, log_weights)
log_pdf_weighted = est2.log_pdf(x_test)
grad_weighted = est2.grad(x_test)
assert_allclose(log_pdf, log_pdf_weighted)
assert_allclose(grad_weighted, grad)
def test_update_fit_with_weights_constant_weights_equals_no_weights():
N = 200
# make sure both estimator sets are built using the same random seed
rng_state = np.random.get_state()
estimators = get_estimator_instances(N)
np.random.set_state(rng_state)
estimators2 = get_estimator_instances(N)
for est1, est2 in zip(estimators, estimators2):
X = np.random.randn(N, est1.D)
if est1.supports_weights():
x_test = np.random.randn(est1.D)
log_weights = np.log(np.ones(N))
est1.update_fit(X)
est2.update_fit(X, log_weights)
log_pdf = est1.log_pdf(x_test)
grad = est1.grad(x_test)
log_pdf_weighted = est2.log_pdf(x_test)
grad_weighted = est2.grad(x_test)
assert_allclose(log_pdf, log_pdf_weighted)
assert_allclose(grad_weighted, grad)
def test_update_fit_with_weights_constant_weights_equals_no_weights_N_1():
N = 1
# make sure both estimator sets are built using the same random seed
rng_state = np.random.get_state()
estimators = get_estimator_instances(N)
np.random.set_state(rng_state)
estimators2 = get_estimator_instances(N)
for est1, est2 in zip(estimators, estimators2):
X = np.random.randn(N, est1.D)
if est1.supports_weights():
x_test = np.random.randn(est1.D)
log_weights = np.log(np.ones(N))
est1.update_fit(X)
est2.update_fit(X, log_weights)
log_pdf = est1.log_pdf(x_test)
grad = est1.grad(x_test)
log_pdf_weighted = est2.log_pdf(x_test)
grad_weighted = est2.grad(x_test)
assert_allclose(log_pdf, log_pdf_weighted)
assert_allclose(grad_weighted, grad)
def test_update_fit_with_weights_constant_weights_equals_no_weights_N_2():
N = 1
# make sure both estimator sets are built using the same random seed
rng_state = np.random.get_state()
estimators = get_estimator_instances(N)
np.random.set_state(rng_state)
estimators2 = get_estimator_instances(N)
for est1, est2 in zip(estimators, estimators2):
X = np.random.randn(N, est1.D)
if est1.supports_weights():
x_test = np.random.randn(est1.D)
log_weights = np.log(np.ones(N))
est1.update_fit(X)
est2.update_fit(X, log_weights)
log_pdf = est1.log_pdf(x_test)
grad = est1.grad(x_test)
log_pdf_weighted = est2.log_pdf(x_test)
grad_weighted = est2.grad(x_test)
assert_allclose(log_pdf, log_pdf_weighted)
assert_allclose(grad_weighted, grad)
| 30.521951
| 97
| 0.616829
| 3,464
| 25,028
| 4.192841
| 0.038972
| 0.055081
| 0.076976
| 0.099972
| 0.918136
| 0.890457
| 0.854723
| 0.824566
| 0.800262
| 0.780639
| 0
| 0.018557
| 0.289476
| 25,028
| 820
| 98
| 30.521951
| 0.798178
| 0.016022
| 0
| 0.740196
| 0
| 0
| 0.001828
| 0
| 0
| 0
| 0
| 0
| 0.143791
| 1
| 0.104575
| false
| 0
| 0.00817
| 0.001634
| 0.117647
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
29262b7821e6feb07c95dc003a4578062cf3c477
| 10,152
|
py
|
Python
|
tests/test_munging.py
|
christabor/flask_extras
|
f57300bc2922aa4105d1aa393351b63c86c26048
|
[
"MIT"
] | 19
|
2016-08-03T07:10:23.000Z
|
2022-03-03T16:37:11.000Z
|
tests/test_munging.py
|
christabor/jinja2_template_pack
|
f57300bc2922aa4105d1aa393351b63c86c26048
|
[
"MIT"
] | 7
|
2016-11-11T21:54:53.000Z
|
2018-11-21T04:33:46.000Z
|
tests/test_munging.py
|
christabor/jinja2_template_pack
|
f57300bc2922aa4105d1aa393351b63c86c26048
|
[
"MIT"
] | 3
|
2016-12-30T10:34:02.000Z
|
2021-04-08T05:40:09.000Z
|
"""Test munging filters."""
from flask_extras.filters import munging
import pytest
class TestFilterVals:
"""All tests for filter_vals function."""
def test_title_returns_invalid_first(self):
"""Test function."""
assert munging.filter_vals({}, None) == {}
def test_title_returns_invalid_second(self):
"""Test function."""
assert munging.filter_vals(None, []) is None
def test_title_returns_invalid_both(self):
"""Test function."""
assert munging.filter_vals(None, None) is None
def test_title_returns_valid_empty(self):
"""Test function."""
assert munging.filter_vals(dict(), []) == {}
def test_title_returns_valid_filtered_empty(self):
"""Test function."""
assert munging.filter_vals(dict(foo='bar'), ['bar']) == {}
def test_title_returns_valid_filtered(self):
"""Test function."""
assert munging.filter_vals(
dict(foo='bar', bar='foo'), ['bar']) == dict(bar='foo')
def test_title_returns_valid_filtered_invalid_val(self):
"""Test function."""
d = dict(foo='bar', bar='foo')
assert munging.filter_vals(d, ['baz']) == d
class TestFilterKeys:
"""All tests for filter_keys function."""
def test_title_returns_invalid_first(self):
"""Test function."""
assert munging.filter_keys({}, None) == {}
def test_title_returns_invalid_second(self):
"""Test function."""
assert munging.filter_keys(None, []) is None
def test_title_returns_invalid_both(self):
"""Test function."""
assert munging.filter_keys(None, None) is None
def test_title_returns_valid_empty(self):
"""Test function."""
assert munging.filter_keys(dict(), []) == {}
def test_title_returns_valid_filtered_empty(self):
"""Test function."""
assert munging.filter_keys(dict(foo='bar'), ['foo']) == {}
def test_title_returns_valid_filtered(self):
"""Test function."""
assert munging.filter_keys(
dict(foo='bar', bar='foo'), ['bar']) == dict(foo='bar')
def test_title_returns_valid_filtered_invalid_val(self):
"""Test function."""
d = dict(foo='bar', bar='foo')
assert munging.filter_keys(d, ['baz']) == d
class TestFilterList:
"""All tests for filter_list function."""
def test_title_returns_invalid_first(self):
"""Test function."""
assert munging.filter_list([], None) == []
def test_title_returns_invalid_second(self):
"""Test function."""
assert munging.filter_list(None, []) is None
def test_title_returns_invalid_both(self):
"""Test function."""
assert munging.filter_list(None, None) is None
def test_title_returns_invalid_dict(self):
"""Test function."""
assert munging.filter_list(dict(), []) == dict()
def test_title_returns_valid_filtered_empty(self):
"""Test function."""
assert munging.filter_list([], ['foo']) == []
def test_title_returns_valid_filtered(self):
"""Test function."""
assert munging.filter_list(['foo', 'bar'], ['bar']) == ['foo']
def test_title_returns_valid_filtered_invalid_val(self):
"""Test function."""
assert munging.filter_list(['foo', 'bar'], ['baz']) == ['foo', 'bar']
class TestGroupBy:
"""All tests for group_by function."""
def _get_obj(self, name):
"""Data for tests."""
class ObjClass(object):
def __init__(self, name=None):
if name is not None:
self.name = name
return ObjClass(name=name)
def test_returns_no_objs_noname(self):
"""Test function."""
objs = [None for _ in range(4)]
res = munging.group_by(objs, attr=None)
assert res.keys() == ['__unlabeled']
assert len(res['__unlabeled']) == 4
def test_returns_no_objs_with_name(self):
"""Test function."""
objs = [None for _ in range(4)]
res = munging.group_by(objs, attr='invalid-attr')
assert res.keys() == ['__unlabeled']
assert len(res['__unlabeled']) == 4
def test_returns_objs_nogroup_noname(self):
"""Test function."""
objs = [self._get_obj(name) for name in ['foo1']]
res = munging.group_by(objs, attr=None)
assert res.keys() == ['__unlabeled']
assert len(res['__unlabeled']) == 1
def test_returns_objs_nogroup_fallback(self):
"""Test function."""
objs = [self._get_obj(name) for name in ['foo1']]
res = munging.group_by(objs, attr=None, fallback='somegroup')
assert res.keys() == ['somegroup']
assert len(res['somegroup']) == 1
def test_returns_objs_nogroup(self):
"""Test function."""
objs = [self._get_obj(None)]
res = munging.group_by(objs, attr='name')
assert res.keys() == ['__unlabeled']
assert len(res['__unlabeled']) == 1
def test_returns_objs_group_custom_group(self):
"""Test function."""
objs = [self._get_obj(name) for name in ['foo1', 'foo2']]
groups = [('group1', ('foo1', 'foo2'))]
res = munging.group_by(objs, groups=groups, attr='name')
assert res.keys() == ['group1', '__unlabeled']
assert len(res['group1']) == 2
def test_returns_objs_group_custom_group_with_one_unlabeled(self):
"""Test function."""
objs = [self._get_obj(name) for name in ['foo1', 'foo2', 'foo3']]
groups = [('group1', ('foo1', 'foo2'))]
res = munging.group_by(objs, groups=groups, attr='name')
assert res.keys() == ['group1', '__unlabeled']
assert len(res['group1']) == 2
assert len(res['__unlabeled']) == 1
def test_returns_objs_group_custom_group_with_one_unlabeled_complex(self):
"""Test function."""
names = ['foo{}'.format(i) for i in range(1, 11)]
objs = [self._get_obj(name) for name in names]
groups = [
('group1', ('foo1', 'foo2', 'foo3')),
('group2', ('foo4', 'foo5', 'foo6')),
('group3', ('foo7', 'foo8', 'foo9')),
]
res = munging.group_by(objs, groups=groups, attr='name')
for key in res.keys():
assert key in ['group1', 'group2', 'group3', '__unlabeled']
assert len(res.keys()) == 4
assert len(res['group1']) == 3
assert len(res['group2']) == 3
assert len(res['group3']) == 3
assert len(res['__unlabeled']) == 1
def test_returns_objs_group_custom_group_with_order_preserved(self):
"""Test function."""
names = ['foo{}'.format(i) for i in range(1, 10)]
objs = [self._get_obj(name) for name in names]
groups = [
('group1', ('foo2', 'foo1', 'foo3')),
('group2', ('foo5', 'foo4', 'foo6')),
('group3', ('foo7', 'foo9', 'foo8')),
]
res = munging.group_by(objs, groups=groups, attr='name')
for key in res.keys():
assert key in ['group1', 'group2', 'group3', '__unlabeled']
for group in groups:
label, items = group
for i, item in enumerate(items):
obj_label = getattr(res[label][i], 'name')
assert item == obj_label
class TestSortDictKeysFromReflist:
"""All tests for sort_dict_keys_from_reflist function."""
def test_sort_dict_keys_from_reflist(self):
"""Test function."""
data = dict(foo=1, bar=2, baz=3, quux=4)
ref = ['quux', 'baz', 'foo', 'bar']
expected = [('quux', 4), ('baz', 3), ('foo', 1), ('bar', 2)]
assert munging.sort_dict_keys_from_reflist(data, ref) == expected
def test_sort_dict_keys_from_reflist_nested(self):
"""Test function."""
data = dict(foo=dict(inner1=1, inner2=2), bar=2, baz=3, quux=4)
ref = ['quux', 'baz', 'foo', 'bar']
expected = [
('quux', 4), ('baz', 3),
('foo', {'inner1': 1, 'inner2': 2}), ('bar', 2)]
assert munging.sort_dict_keys_from_reflist(data, ref) == expected
def test_sort_dict_keys_from_reflist_none(self):
"""Test function."""
data = dict(foo=None, bar=2, baz=3, quux=4)
ref = ['quux', 'baz', 'foo', 'bar']
expected = [('quux', 4), ('baz', 3), ('foo', None), ('bar', 2)]
assert munging.sort_dict_keys_from_reflist(data, ref) == expected
def test_sort_dict_keys_from_reflist_missing_val(self):
"""Test function."""
data = dict(foo=1, bar=2, baz=3, quux=4)
ref = ['quux', 'baz', 'foo']
expected = [('quux', 4), ('baz', 3), ('foo', 1)]
assert munging.sort_dict_keys_from_reflist(data, ref) == expected
class TestSortDictValsFromReflist:
"""All tests for sort_dict_vals_from_reflist function."""
def test_sort_dict_vals_from_reflist(self):
"""Test function."""
data = dict(foo=1, bar=2, baz=3, quux=4)
ref = [4, 3, 1, 2]
expected = [('quux', 4), ('baz', 3), ('foo', 1), ('bar', 2)]
assert munging.sort_dict_vals_from_reflist(data, ref) == expected
def test_sort_dict_vals_from_reflist_nested(self):
"""Test function."""
data = dict(foo=dict(inner1=1, inner2=2), bar=2, baz=3, quux=4)
ref = [4, 3, {'inner1': 1, 'inner2': 2}, 2]
expected = [
('quux', 4), ('baz', 3),
('foo', {'inner1': 1, 'inner2': 2}), ('bar', 2)]
assert munging.sort_dict_vals_from_reflist(data, ref) == expected
def test_sort_dict_vals_from_reflist_none(self):
"""Test function."""
data = dict(foo=None, bar=2, baz=3, quux=4)
ref = [4, 3, None, 2]
expected = [('quux', 4), ('baz', 3), ('foo', None), ('bar', 2)]
assert munging.sort_dict_vals_from_reflist(data, ref) == expected
def test_sort_dict_vals_from_reflist_missing_val(self):
"""Test function."""
data = dict(foo=1, bar=2, baz=3, quux=4)
ref = [4, 3, 1]
expected = [('quux', 4), ('baz', 3), ('foo', 1)]
assert munging.sort_dict_vals_from_reflist(data, ref) == expected
| 37.051095
| 78
| 0.583136
| 1,268
| 10,152
| 4.417981
| 0.086751
| 0.047483
| 0.108533
| 0.071225
| 0.847554
| 0.829882
| 0.819529
| 0.788468
| 0.774188
| 0.765084
| 0
| 0.020697
| 0.24803
| 10,152
| 273
| 79
| 37.186813
| 0.713125
| 0.083924
| 0
| 0.5
| 0
| 0
| 0.081156
| 0
| 0
| 0
| 0
| 0
| 0.298851
| 1
| 0.229885
| false
| 0
| 0.011494
| 0
| 0.287356
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
2956bcf8ec057b18c3e817ea26c294ffc094b841
| 9,859
|
py
|
Python
|
tests/integration/test_role/test.py
|
MaxTheHuman/ClickHouse
|
748b75ab1bf51701cc5bb4e4da5b169c20efc042
|
[
"Apache-2.0"
] | null | null | null |
tests/integration/test_role/test.py
|
MaxTheHuman/ClickHouse
|
748b75ab1bf51701cc5bb4e4da5b169c20efc042
|
[
"Apache-2.0"
] | null | null | null |
tests/integration/test_role/test.py
|
MaxTheHuman/ClickHouse
|
748b75ab1bf51701cc5bb4e4da5b169c20efc042
|
[
"Apache-2.0"
] | null | null | null |
import pytest
from helpers.cluster import ClickHouseCluster
from helpers.test_tools import TSV
cluster = ClickHouseCluster(__file__)
instance = cluster.add_instance('instance')
@pytest.fixture(scope="module", autouse=True)
def started_cluster():
try:
cluster.start()
instance.query("CREATE TABLE test_table(x UInt32, y UInt32) ENGINE = MergeTree ORDER BY tuple()")
instance.query("INSERT INTO test_table VALUES (1,5), (2,10)")
yield cluster
finally:
cluster.shutdown()
@pytest.fixture(autouse=True)
def cleanup_after_test():
try:
yield
finally:
instance.query("DROP USER IF EXISTS A, B")
instance.query("DROP ROLE IF EXISTS R1, R2")
def test_create_role():
# Test has known possible deadlocks
# TODO Fix as soon as possible
if instance.is_built_with_thread_sanitizer():
return
instance.query("CREATE USER A")
instance.query('CREATE ROLE R1')
assert "Not enough privileges" in instance.query_and_get_error("SELECT * FROM test_table", user='A')
instance.query('GRANT SELECT ON test_table TO R1')
assert "Not enough privileges" in instance.query_and_get_error("SELECT * FROM test_table", user='A')
instance.query('GRANT R1 TO A')
assert instance.query("SELECT * FROM test_table", user='A') == "1\t5\n2\t10\n"
instance.query('REVOKE R1 FROM A')
assert "Not enough privileges" in instance.query_and_get_error("SELECT * FROM test_table", user='A')
def test_grant_role_to_role():
# Test has known possible deadlocks
# TODO Fix as soon as possible
if instance.is_built_with_thread_sanitizer():
return
instance.query("CREATE USER A")
instance.query('CREATE ROLE R1')
instance.query('CREATE ROLE R2')
assert "Not enough privileges" in instance.query_and_get_error("SELECT * FROM test_table", user='A')
instance.query('GRANT R1 TO A')
assert "Not enough privileges" in instance.query_and_get_error("SELECT * FROM test_table", user='A')
instance.query('GRANT R2 TO R1')
assert "Not enough privileges" in instance.query_and_get_error("SELECT * FROM test_table", user='A')
instance.query('GRANT SELECT ON test_table TO R2')
assert instance.query("SELECT * FROM test_table", user='A') == "1\t5\n2\t10\n"
def test_combine_privileges():
# Test has known possible deadlocks
# TODO Fix as soon as possible
if instance.is_built_with_thread_sanitizer():
return
instance.query("CREATE USER A ")
instance.query('CREATE ROLE R1')
instance.query('CREATE ROLE R2')
assert "Not enough privileges" in instance.query_and_get_error("SELECT * FROM test_table", user='A')
instance.query('GRANT R1 TO A')
instance.query('GRANT SELECT(x) ON test_table TO R1')
assert "Not enough privileges" in instance.query_and_get_error("SELECT * FROM test_table", user='A')
assert instance.query("SELECT x FROM test_table", user='A') == "1\n2\n"
instance.query('GRANT SELECT(y) ON test_table TO R2')
instance.query('GRANT R2 TO A')
assert instance.query("SELECT * FROM test_table", user='A') == "1\t5\n2\t10\n"
def test_admin_option():
# Test has known possible deadlocks
# TODO Fix as soon as possible
if instance.is_built_with_thread_sanitizer():
return
instance.query("CREATE USER A")
instance.query("CREATE USER B")
instance.query('CREATE ROLE R1')
instance.query('GRANT SELECT ON test_table TO R1')
assert "Not enough privileges" in instance.query_and_get_error("SELECT * FROM test_table", user='B')
instance.query('GRANT R1 TO A')
assert "Not enough privileges" in instance.query_and_get_error("GRANT R1 TO B", user='A')
assert "Not enough privileges" in instance.query_and_get_error("SELECT * FROM test_table", user='B')
instance.query('GRANT R1 TO A WITH ADMIN OPTION')
instance.query("GRANT R1 TO B", user='A')
assert instance.query("SELECT * FROM test_table", user='B') == "1\t5\n2\t10\n"
def test_revoke_requires_admin_option():
# Test has known possible deadlocks
# TODO Fix as soon as possible
if instance.is_built_with_thread_sanitizer():
return
instance.query("CREATE USER A, B")
instance.query("CREATE ROLE R1, R2")
instance.query("GRANT R1 TO B")
assert instance.query("SHOW GRANTS FOR B") == "GRANT R1 TO B\n"
expected_error = "necessary to have the role R1 granted"
assert expected_error in instance.query_and_get_error("REVOKE R1 FROM B", user='A')
assert instance.query("SHOW GRANTS FOR B") == "GRANT R1 TO B\n"
instance.query("GRANT R1 TO A")
expected_error = "granted, but without ADMIN option"
assert expected_error in instance.query_and_get_error("REVOKE R1 FROM B", user='A')
assert instance.query("SHOW GRANTS FOR B") == "GRANT R1 TO B\n"
instance.query("GRANT R1 TO A WITH ADMIN OPTION")
instance.query("REVOKE R1 FROM B", user='A')
assert instance.query("SHOW GRANTS FOR B") == ""
instance.query("GRANT R1 TO B")
assert instance.query("SHOW GRANTS FOR B") == "GRANT R1 TO B\n"
instance.query("REVOKE ALL FROM B", user='A')
assert instance.query("SHOW GRANTS FOR B") == ""
instance.query("GRANT R1, R2 TO B")
assert instance.query("SHOW GRANTS FOR B") == "GRANT R1, R2 TO B\n"
expected_error = "necessary to have the role R2 granted"
assert expected_error in instance.query_and_get_error("REVOKE ALL FROM B", user='A')
assert instance.query("SHOW GRANTS FOR B") == "GRANT R1, R2 TO B\n"
instance.query("REVOKE ALL EXCEPT R2 FROM B", user='A')
assert instance.query("SHOW GRANTS FOR B") == "GRANT R2 TO B\n"
instance.query("GRANT R2 TO A WITH ADMIN OPTION")
instance.query("REVOKE ALL FROM B", user='A')
assert instance.query("SHOW GRANTS FOR B") == ""
instance.query("GRANT R1, R2 TO B")
assert instance.query("SHOW GRANTS FOR B") == "GRANT R1, R2 TO B\n"
instance.query("REVOKE ALL FROM B", user='A')
assert instance.query("SHOW GRANTS FOR B") == ""
def test_introspection():
# Test has known possible deadlocks
# TODO Fix as soon as possible
if instance.is_built_with_thread_sanitizer():
return
instance.query("CREATE USER A")
instance.query("CREATE USER B")
instance.query('CREATE ROLE R1')
instance.query('CREATE ROLE R2')
instance.query('GRANT R1 TO A')
instance.query('GRANT R2 TO B WITH ADMIN OPTION')
instance.query('GRANT SELECT ON test.table TO A, R2')
instance.query('GRANT CREATE ON *.* TO B WITH GRANT OPTION')
instance.query('REVOKE SELECT(x) ON test.table FROM R2')
assert instance.query("SHOW ROLES") == TSV(["R1", "R2"])
assert instance.query("SHOW CREATE ROLE R1") == TSV(["CREATE ROLE R1"])
assert instance.query("SHOW CREATE ROLE R2") == TSV(["CREATE ROLE R2"])
assert instance.query("SHOW CREATE ROLES R1, R2") == TSV(["CREATE ROLE R1", "CREATE ROLE R2"])
assert instance.query("SHOW CREATE ROLES") == TSV(["CREATE ROLE R1", "CREATE ROLE R2"])
assert instance.query("SHOW GRANTS FOR A") == TSV(["GRANT SELECT ON test.table TO A", "GRANT R1 TO A"])
assert instance.query("SHOW GRANTS FOR B") == TSV(
["GRANT CREATE ON *.* TO B WITH GRANT OPTION", "GRANT R2 TO B WITH ADMIN OPTION"])
assert instance.query("SHOW GRANTS FOR R1") == ""
assert instance.query("SHOW GRANTS FOR R2") == TSV(
["GRANT SELECT ON test.table TO R2", "REVOKE SELECT(x) ON test.table FROM R2"])
assert instance.query("SHOW GRANTS", user='A') == TSV(["GRANT SELECT ON test.table TO A", "GRANT R1 TO A"])
assert instance.query("SHOW GRANTS", user='B') == TSV(
["GRANT CREATE ON *.* TO B WITH GRANT OPTION", "GRANT R2 TO B WITH ADMIN OPTION"])
assert instance.query("SHOW CURRENT ROLES", user='A') == TSV([["R1", 0, 1]])
assert instance.query("SHOW CURRENT ROLES", user='B') == TSV([["R2", 1, 1]])
assert instance.query("SHOW ENABLED ROLES", user='A') == TSV([["R1", 0, 1, 1]])
assert instance.query("SHOW ENABLED ROLES", user='B') == TSV([["R2", 1, 1, 1]])
expected_access1 = "CREATE ROLE R1\n" \
"CREATE ROLE R2\n"
expected_access2 = "GRANT R1 TO A\n"
expected_access3 = "GRANT R2 TO B WITH ADMIN OPTION"
assert expected_access1 in instance.query("SHOW ACCESS")
assert expected_access2 in instance.query("SHOW ACCESS")
assert expected_access3 in instance.query("SHOW ACCESS")
assert instance.query("SELECT name, storage from system.roles WHERE name IN ('R1', 'R2') ORDER BY name") == \
TSV([["R1", "local directory"],
["R2", "local directory"]])
assert instance.query(
"SELECT * from system.grants WHERE user_name IN ('A', 'B') OR role_name IN ('R1', 'R2') ORDER BY user_name, role_name, access_type, grant_option") == \
TSV([["A", "\\N", "SELECT", "test", "table", "\\N", 0, 0],
["B", "\\N", "CREATE", "\\N", "\\N", "\\N", 0, 1],
["\\N", "R2", "SELECT", "test", "table", "\\N", 0, 0],
["\\N", "R2", "SELECT", "test", "table", "x", 1, 0]])
assert instance.query(
"SELECT * from system.role_grants WHERE user_name IN ('A', 'B') OR role_name IN ('R1', 'R2') ORDER BY user_name, role_name, granted_role_name") == \
TSV([["A", "\\N", "R1", 1, 0],
["B", "\\N", "R2", 1, 1]])
assert instance.query("SELECT * from system.current_roles ORDER BY role_name", user='A') == TSV([["R1", 0, 1]])
assert instance.query("SELECT * from system.current_roles ORDER BY role_name", user='B') == TSV([["R2", 1, 1]])
assert instance.query("SELECT * from system.enabled_roles ORDER BY role_name", user='A') == TSV([["R1", 0, 1, 1]])
assert instance.query("SELECT * from system.enabled_roles ORDER BY role_name", user='B') == TSV([["R2", 1, 1, 1]])
| 43.817778
| 159
| 0.658586
| 1,476
| 9,859
| 4.300813
| 0.079268
| 0.221172
| 0.11673
| 0.097826
| 0.844991
| 0.833648
| 0.769534
| 0.736137
| 0.717234
| 0.676276
| 0
| 0.02164
| 0.198499
| 9,859
| 224
| 160
| 44.013393
| 0.781701
| 0.038239
| 0
| 0.460123
| 0
| 0.01227
| 0.399514
| 0
| 0
| 0
| 0
| 0.004464
| 0.343558
| 1
| 0.04908
| false
| 0
| 0.018405
| 0
| 0.104294
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
46209435c5e4d37ca464a4cde588f201e783a3f0
| 150
|
py
|
Python
|
freecodecamp/arithmetic_operator.py
|
simplymanas/python-learning
|
75bc99c0dce211fd1bce5f6ce1155e0f4c71d7d0
|
[
"Apache-2.0"
] | 4
|
2020-08-18T05:29:38.000Z
|
2021-03-13T19:01:10.000Z
|
freecodecamp/arithmetic_operator.py
|
simplymanas/python-learning
|
75bc99c0dce211fd1bce5f6ce1155e0f4c71d7d0
|
[
"Apache-2.0"
] | null | null | null |
freecodecamp/arithmetic_operator.py
|
simplymanas/python-learning
|
75bc99c0dce211fd1bce5f6ce1155e0f4c71d7d0
|
[
"Apache-2.0"
] | 1
|
2020-08-29T12:57:17.000Z
|
2020-08-29T12:57:17.000Z
|
print( 10 % 3)
print( 10 / 3)
print( 10 // 3)
print( 10 // 3)
print( 10 % 3)
print( 10 % 3)
# operator precedence
print(10 * 5 / 2 + 5 - 4 % 6 // 3)
| 15
| 34
| 0.526667
| 28
| 150
| 2.821429
| 0.321429
| 0.620253
| 0.607595
| 0.822785
| 0.607595
| 0.607595
| 0.607595
| 0.607595
| 0.607595
| 0.607595
| 0
| 0.238532
| 0.273333
| 150
| 10
| 34
| 15
| 0.486239
| 0.126667
| 0
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 10
|
4679167bfd659e56644135ddb34db510f367717b
| 2,789
|
py
|
Python
|
utils/scripts/OOOlevelGen/src/levels/Bump_The_Chump.py
|
fullscreennl/monkeyswipe
|
c56192e202674dd5ab18023f6cf14cf51e95fbd0
|
[
"MIT"
] | null | null | null |
utils/scripts/OOOlevelGen/src/levels/Bump_The_Chump.py
|
fullscreennl/monkeyswipe
|
c56192e202674dd5ab18023f6cf14cf51e95fbd0
|
[
"MIT"
] | null | null | null |
utils/scripts/OOOlevelGen/src/levels/Bump_The_Chump.py
|
fullscreennl/monkeyswipe
|
c56192e202674dd5ab18023f6cf14cf51e95fbd0
|
[
"MIT"
] | null | null | null |
import LevelBuilder
from sprites import *
def render(name,bg):
lb = LevelBuilder.LevelBuilder(name+".plist",background=bg)
lb.addObject(Beam.BeamSprite(x=148, y=63,width=127,height=30,angle='90',restitution=0.2,static='false',friction=0.5,density=1 ).setName('Beam'))
lb.addObject(Beam.BeamSprite(x=235, y=63,width=127,height=30,angle='90',restitution=0.2,static='false',friction=0.5,density=1 ).setName('Beam'))
lb.addObject(Beam.BeamSprite(x=325, y=63,width=127,height=30,angle='90',restitution=0.2,static='false',friction=0.5,density=1 ).setName('Beam'))
lb.addObject(Beam.BeamSprite(x=410, y=63,width=127,height=30,angle='90',restitution=0.2,static='false',friction=0.5,density=1 ).setName('Beam'))
lb.addObject(Friend.FriendSprite(x=234, y=169,width=74,height=74,angle='0',restitution=0.2,static='false',friction=0.5,density=1 ).setName('Friend'))
lb.addObject(Friend.FriendSprite(x=410, y=169,width=74,height=74,angle='0',restitution=0.2,static='false',friction=0.5,density=1 ).setName('Friend'))
lb.addObject(Friend.FriendSprite(x=325, y=169,width=74,height=74,angle='0',restitution=0.2,static='false',friction=0.5,density=1 ).setName('Friend'))
lb.addObject(Friend.FriendSprite(x=150, y=176,width=74,height=74,angle='0',restitution=0.2,static='false',friction=0.5,density=1 ).setName('Friend'))
lb.addObject(Star.StarSprite(x=455, y=22,width=32,height=32))
lb.addObject(Hero.HeroSprite(x=189, y=15,width=32,height=32))
lb.addObject(Enemy.EnemySprite(x=68, y=233,width=32,height=32,angle='0',restitution=0.2,static='false',friction=0.5,density=20 ))
lb.addObject(Enemy.EnemySprite(x=232, y=231,width=32,height=32,angle='0',restitution=0.2,static='false',friction=0.5,density=20 ))
lb.addObject(Enemy.EnemySprite(x=322, y=230,width=32,height=32,angle='0',restitution=0.2,static='false',friction=0.5,density=20 ))
lb.addObject(Enemy.EnemySprite(x=407, y=231,width=32,height=32,angle='0',restitution=0.2,static='false',friction=0.5,density=20 ))
lb.addObject(Enemy.EnemySprite(x=149, y=231,width=32,height=32,angle='0',restitution=0.2,static='false',friction=0.5,density=20 ))
lb.addObject(Bomb.BombSprite(x=189, y=134,width=32,height=32 ,restitution=0.2,static='false',friction=0.5,density=20 ))
lb.addObject(Bomb.BombSprite(x=94, y=22,width=32,height=32 ,restitution=0.2,static='false',friction=0.5,density=20 ))
lb.addObject(Enemy.EnemySprite(x=68, y=113,width=124,height=124,angle='0',restitution=0.2,static='false',friction=0.5,density=20 ))
lb.addObject(Bomb.BombSprite(x=209, y=291,width=32,height=32 ,restitution=0.2,static='false',friction=0.5,density=20 ))
lb.addObject(Bomb.BombSprite(x=40, y=294,width=32,height=32 ,restitution=0.2,static='false',friction=0.5,density=20 ))
lb.render()
| 111.56
| 153
| 0.730728
| 482
| 2,789
| 4.228216
| 0.153527
| 0.107949
| 0.114818
| 0.167812
| 0.886654
| 0.864573
| 0.841511
| 0.824828
| 0.824828
| 0.824828
| 0
| 0.119102
| 0.057727
| 2,789
| 25
| 154
| 111.56
| 0.656393
| 0
| 0
| 0
| 0
| 0
| 0.055197
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04
| false
| 0
| 0.08
| 0
| 0.12
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
46a069ba4a5ec1382f6649258471f2153d44177a
| 38,513
|
py
|
Python
|
totalgood/pacs/models.py
|
hobson/totalgood
|
5aae617beb08c21cbd262f091d69793abb17c5b0
|
[
"MIT"
] | null | null | null |
totalgood/pacs/models.py
|
hobson/totalgood
|
5aae617beb08c21cbd262f091d69793abb17c5b0
|
[
"MIT"
] | 9
|
2020-03-24T15:56:06.000Z
|
2022-03-11T23:26:02.000Z
|
totalgood/pacs/models.py
|
hobson/totalgood
|
5aae617beb08c21cbd262f091d69793abb17c5b0
|
[
"MIT"
] | 1
|
2016-04-24T15:10:20.000Z
|
2016-04-24T15:10:20.000Z
|
# This is an auto-generated Django model module.
# You'll have to do the following manually to clean this up:
# * Make sure each model has one field with primary_key=True
# * Remove `managed = False` lines if you wish to allow Django to create, modify, and delete the table
# Feel free to rename the models, but don't rename db_table values or field names.
#
# Also note: You'll have to insert the output of 'django-admin sqlcustom [app_label]'
# into your database.
from __future__ import unicode_literals
from django.db import models
from django.utils.translation import ugettext_lazy as _
def representation(model, field_names=[]):
"""Unicode representation of Django model instance (object/record/row)"""
if not field_names:
field_names = getattr(model, 'IMPORTANT_FIELDS', ['pk'])
retval = model.__class__.__name__ + u'('
retval += ', '.join("%s" % (repr(getattr(model, s, '') or '')) for s in field_names[:min(len(field_names), representation.max_fields)])
return retval + u')'
representation.max_fields = 5
def name_similarity():
"""Compute the similarity (inverse distance) matrix between committe names"""
pass
class LongCharField(models.CharField):
"An unlimited-length CharField to satisfy by Django and postgreSQL varchar."
description = _("Unlimited-length string")
def __init__(self, *args, **kwargs):
kwargs['max_length'] = int(1e9) # Satisfy management validation.
super(models.CharField, self).__init__(*args, **kwargs)
# Don't add max-length validator like CharField does.
def get_internal_type(self):
# This has no function, since this value is used as a lookup in
# db_type(). Put something that isn't known by django so it
# raises an error if it is ever used.
return 'LongCharField'
def db_type(self, connection):
# *** This is probably only compatible with Postgres.
# 'varchar' with no max length is equivalent to 'text' in Postgres,
# but put 'varchar' so we can tell LongCharFields from TextFields
# when we're looking at the db.
return 'varchar'
def formfield(self, **kwargs):
# Don't pass max_length to form field like CharField does.
return super(models.CharField, self).formfield(**kwargs)
models.LongCharField = LongCharField
class AcGrassRootsInState(models.Model):
filer_id = models.FloatField(blank=True, primary_key=True)
filer = models.TextField(blank=True, null=True)
candidate_name = models.TextField(blank=True, null=True)
total_money = models.FloatField(blank=True, null=True)
percent_grassroots = models.FloatField(blank=True, null=True)
percent_instate = models.FloatField(blank=True, null=True)
total_money_out = models.FloatField(blank=True, null=True)
def __str__(self):
return representation(self)
class Meta:
managed = False
db_table = 'ac_grass_roots_in_state'
class AccessLog(models.Model):
committee_id = models.IntegerField(blank=True, primary_key=True)
date = models.DateTimeField(blank=True, null=True)
def __str__(self):
return representation(self)
class Meta:
managed = False
db_table = 'access_log'
# class AllOregonSum(models.Model):
# in_field = models.FloatField(db_column='in', blank=True, null=True) # Field renamed because it was a Python reserved word.
# out = models.FloatField(blank=True, null=True)
# from_within = models.FloatField(blank=True, null=True)
# to_within = models.FloatField(blank=True, null=True)
# from_outside = models.FloatField(blank=True, null=True)
# to_outside = models.FloatField(blank=True, null=True)
# total_grass_roots = models.FloatField(blank=True, null=True)
# total_from_in_state = models.FloatField(blank=True, null=True)
# class Meta:
# managed = False
# db_table = 'all_oregon_sum'
class CampaignDetail(models.Model):
candidate_name = models.TextField(blank=True, primary_key=True)
committee_name = models.LongCharField(max_length=-1, blank=True, null=True)
race = models.TextField(blank=True, null=True)
website = models.TextField(blank=True, null=True)
phone = models.LongCharField(max_length=-1, blank=True, null=True)
total = models.FloatField(blank=True, null=True)
total_spent = models.FloatField(blank=True, null=True)
grassroots = models.FloatField(blank=True, null=True)
instate = models.FloatField(blank=True, null=True)
filer_id = models.IntegerField(blank=True, null=True)
election = models.TextField(blank=True, null=True)
party = models.TextField(blank=True, null=True)
num_transactions = models.BigIntegerField(blank=True, null=True)
committee_type = models.LongCharField(max_length=-1, blank=True, null=True)
committee_subtype = models.LongCharField(max_length=-1, blank=True, null=True)
db_update_status = models.TextField(blank=True, null=True)
def __str__(self):
return representation(self)
class Meta:
managed = False
db_table = 'campaign_detail'
class CandidateByState(models.Model):
candidate_name = models.TextField(blank=True, null=True)
filer_id = models.IntegerField(blank=True, null=True)
state = models.LongCharField(max_length=-1, blank=True, null=True)
direction = models.CharField(max_length=7, blank=True, null=True)
value = models.FloatField(blank=True, null=True)
def __str__(self):
return representation(self)
class Meta:
managed = False
db_table = 'candidate_by_state'
class CandidateSumByDate(models.Model):
filer_id = models.IntegerField(blank=True, primary_key=True)
tran_date = models.DateField(blank=True, null=True)
total_in = models.FloatField(blank=True, null=True)
total_out = models.FloatField(blank=True, null=True)
IMPORTANT_FIELDS = ['filer_id', 'total_in']
def __str__(self):
return representation(self)
class Meta:
managed = False
db_table = 'candidate_sum_by_date'
class CcGrassRootsInState(models.Model):
filer_id = models.IntegerField(blank=True, primary_key=True)
filer = models.LongCharField(max_length=-1, blank=True, null=True)
num_transactions = models.BigIntegerField(blank=True, null=True)
in_state = models.FloatField(blank=True, null=True)
grass_roots = models.FloatField(blank=True, null=True)
total_contributions = models.FloatField(blank=True, null=True)
total_money = models.FloatField(blank=True, null=True)
total_money_out = models.FloatField(blank=True, null=True)
percent_grass_roots = models.FloatField(blank=True, null=True)
percent_in_state = models.FloatField(blank=True, null=True)
IMPORTANT_FIELDS = ['filer_id', 'filer', 'total_money', 'percent_grass_roots', 'percent_in_state']
def __str__(self):
return representation(self)
class Meta:
managed = False
db_table = 'cc_grass_roots_in_state'
class CcWorkingTransactions(models.Model):
tran_id = models.IntegerField(blank=True, primary_key=True)
tran_date = models.DateField(blank=True, null=True)
filer = models.LongCharField(max_length=-1, blank=True, null=True)
contributor_payee = models.LongCharField(max_length=-1, blank=True, null=True)
sub_type = models.LongCharField(max_length=-1, blank=True, null=True)
amount = models.FloatField(blank=True, null=True)
contributor_payee_committee_id = models.IntegerField(blank=True, null=True)
filer_id = models.IntegerField(blank=True, null=True)
purp_desc = models.LongCharField(max_length=-1, blank=True, null=True)
book_type = models.LongCharField(max_length=-1, blank=True, null=True)
addr_line1 = models.LongCharField(max_length=-1, blank=True, null=True)
filed_date = models.DateField(blank=True, null=True)
addr_line2 = models.LongCharField(max_length=-1, blank=True, null=True)
city = models.LongCharField(max_length=-1, blank=True, null=True)
state = models.LongCharField(max_length=-1, blank=True, null=True)
zip = models.IntegerField(blank=True, null=True)
purpose_codes = models.LongCharField(max_length=-1, blank=True, null=True)
direction = models.CharField(max_length=7, blank=True, null=True)
contributor_payee_class = models.LongCharField(max_length=-1, blank=True, null=True)
IMPORTANT_FIELDS = ['tran_id', 'tran_date', 'filer', 'amount', 'direction', 'purpose_codes']
def __str__(self):
return representation(self)
class Meta:
managed = False
db_table = 'cc_working_transactions'
class DirectionCodes(models.Model):
sub_type = models.LongCharField(max_length=-1, blank=True, primary_key=True)
direction = models.CharField(max_length=7, blank=True, null=True)
def __str__(self):
return representation(self)
class Meta:
managed = False
db_table = 'direction_codes'
class Documentation(models.Model):
title = models.TextField(blank=True, null=True)
endpoint_name = models.TextField(blank=True, null=True)
txt = models.TextField(blank=True, null=True)
def __str__(self):
return representation(self)
class Meta:
managed = False
db_table = 'documentation'
class HackOregonDbStatus(models.Model):
ac_grass_roots_in_state = models.FloatField(blank=True, null=True)
campaign_detail = models.FloatField(blank=True, null=True)
candidate_by_state = models.FloatField(blank=True, null=True)
candidate_sum_by_date = models.FloatField(blank=True, null=True)
cc_grass_roots_in_state = models.FloatField(blank=True, null=True)
cc_working_transactions = models.FloatField(blank=True, null=True)
direction_codes = models.FloatField(blank=True, null=True)
raw_candidate_filings = models.FloatField(blank=True, null=True)
raw_committees = models.FloatField(blank=True, null=True)
raw_committees_scraped = models.FloatField(blank=True, null=True)
raw_committee_transactions = models.FloatField(blank=True, null=True)
raw_committee_transactions_ammended_transactions = models.FloatField(blank=True, null=True)
state_translation = models.FloatField(blank=True, null=True)
working_candidate_committees = models.FloatField(blank=True, null=True)
working_candidate_filings = models.FloatField(blank=True, null=True)
working_committees = models.FloatField(blank=True, null=True)
working_transactions = models.FloatField(blank=True, null=True)
date = models.DateField(blank=True, null=True)
event_at_log_time = models.TextField(blank=True, null=True)
hack_oregon_db_status = models.FloatField(blank=True, null=True)
all_oregon_sum = models.FloatField(blank=True, null=True)
state_sum_by_date = models.FloatField(blank=True, null=True)
documentation = models.FloatField(blank=True, null=True)
oregon_by_contributions = models.FloatField(blank=True, null=True)
oregon_by_purpose_codes = models.FloatField(blank=True, null=True)
sub_type_from_contributor_payee = models.FloatField(blank=True, null=True)
oregon_committee_agg = models.FloatField(blank=True, null=True)
import_dates = models.FloatField(blank=True, null=True)
raw_committee_transactions_errors = models.FloatField(blank=True, null=True)
access_log = models.FloatField(blank=True, null=True)
search_log = models.FloatField(blank=True, null=True)
def __str__(self):
return representation(self)
class Meta:
managed = False
db_table = 'hack_oregon_db_status'
class ImportDates(models.Model):
id = models.DecimalField(max_digits=65535, decimal_places=65535, blank=True, primary_key=True)
scrape_date = models.DateField(blank=True, null=True)
file_name = models.TextField(blank=True, null=True)
def __str__(self):
return representation(self)
class Meta:
managed = False
db_table = 'import_dates'
class OregonByContributions(models.Model):
contribution_type = models.LongCharField(max_length=-1, blank=True, primary_key=True)
total = models.FloatField(blank=True, null=True)
def __str__(self):
return representation(self)
class Meta:
managed = False
db_table = 'oregon_by_contributions'
class OregonByPurposeCodes(models.Model):
purpose_code = models.TextField(blank=True, primary_key=True)
total = models.FloatField(blank=True, null=True)
def __str__(self):
return representation(self)
class Meta:
managed = False
db_table = 'oregon_by_purpose_codes'
class OregonCommitteeAgg(models.Model):
contributor_payee = models.LongCharField(max_length=-1, blank=True, null=True)
contributor_payee_committee_id = models.IntegerField(blank=True, null=True)
sum = models.FloatField(blank=True, null=True)
def __str__(self):
return representation(self)
class Meta:
managed = False
db_table = 'oregon_committee_agg'
class RawCandidateFilings(models.Model):
election_txt = models.TextField(blank=True, null=True)
election_year = models.IntegerField(blank=True, null=True)
office_group = models.TextField(blank=True, null=True)
id_nbr = models.IntegerField(blank=True, null=True)
office = models.TextField(blank=True, null=True)
candidate_office = models.TextField(blank=True, null=True)
candidate_file_rsn = models.IntegerField(blank=True, primary_key=True)
file_mthd_ind = models.TextField(blank=True, null=True)
filetype_descr = models.TextField(blank=True, null=True)
party_descr = models.TextField(blank=True, null=True)
major_party_ind = models.TextField(blank=True, null=True)
cand_ballot_name_txt = models.TextField(blank=True, null=True)
occptn_txt = models.TextField(blank=True, null=True)
education_bckgrnd_txt = models.TextField(blank=True, null=True)
occptn_bkgrnd_txt = models.TextField(blank=True, null=True)
school_grade_diploma_degree_certificate_course_of_study = models.TextField(blank=True, null=True)
prev_govt_bkgrnd_txt = models.TextField(blank=True, null=True)
judge_incbnt_ind = models.TextField(blank=True, null=True)
qlf_ind = models.TextField(blank=True, null=True)
filed_date = models.DateField(blank=True, null=True)
file_fee_rfnd_date = models.DateField(blank=True, null=True)
witdrw_date = models.DateField(blank=True, null=True)
withdrw_resn_txt = models.NullBooleanField()
pttn_file_date = models.DateField(blank=True, null=True)
pttn_sgnr_rqd_nbr = models.IntegerField(blank=True, null=True)
pttn_signr_filed_nbr = models.IntegerField(blank=True, null=True)
pttn_cmplt_date = models.DateField(blank=True, null=True)
ballot_order_nbr = models.IntegerField(blank=True, null=True)
prfx_name_cd = models.TextField(blank=True, null=True)
first_name = models.TextField(blank=True, null=True)
mdle_name = models.TextField(blank=True, null=True)
last_name = models.TextField(blank=True, null=True)
sufx_name = models.TextField(blank=True, null=True)
title_txt = models.TextField(blank=True, null=True)
mailing_addr_line_1 = models.TextField(blank=True, null=True)
mailing_addr_line_2 = models.TextField(blank=True, null=True)
mailing_city_name = models.TextField(blank=True, null=True)
mailing_st_cd = models.TextField(blank=True, null=True)
mailing_zip_code = models.IntegerField(blank=True, null=True)
mailing_zip_plus_four = models.IntegerField(blank=True, null=True)
residence_addr_line_1 = models.TextField(blank=True, null=True)
residence_addr_line_2 = models.TextField(blank=True, null=True)
residence_city_name = models.TextField(blank=True, null=True)
residence_st_cd = models.TextField(blank=True, null=True)
residence_zip_code = models.IntegerField(blank=True, null=True)
residence_zip_plus_four = models.IntegerField(blank=True, null=True)
home_phone = models.TextField(blank=True, null=True)
cell_phone = models.TextField(blank=True, null=True)
fax_phone = models.TextField(blank=True, null=True)
email = models.TextField(blank=True, null=True)
work_phone = models.TextField(blank=True, null=True)
web_address = models.TextField(blank=True, null=True)
def __str__(self):
return representation(self)
class Meta:
managed = False
db_table = 'raw_candidate_filings'
class RawCommitteeTransactions(models.Model):
tran_id = models.IntegerField(blank=True, primary_key=True)
original_id = models.IntegerField(blank=True, null=True)
tran_date = models.DateField(blank=True, null=True)
tran_status = models.LongCharField(max_length=-1, blank=True, null=True)
filer = models.LongCharField(max_length=-1, blank=True, null=True)
contributor_payee = models.LongCharField(max_length=-1, blank=True, null=True)
sub_type = models.LongCharField(max_length=-1, blank=True, null=True)
amount = models.FloatField(blank=True, null=True)
aggregate_amount = models.FloatField(blank=True, null=True)
contributor_payee_committee_id = models.IntegerField(blank=True, null=True)
filer_id = models.IntegerField(blank=True, null=True)
attest_by_name = models.LongCharField(max_length=-1, blank=True, null=True)
attest_date = models.DateField(blank=True, null=True)
review_by_name = models.LongCharField(max_length=-1, blank=True, null=True)
review_date = models.DateField(blank=True, null=True)
due_date = models.DateField(blank=True, null=True)
occptn_ltr_date = models.LongCharField(max_length=-1, blank=True, null=True)
pymt_sched_txt = models.LongCharField(max_length=-1, blank=True, null=True)
purp_desc = models.LongCharField(max_length=-1, blank=True, null=True)
intrst_rate = models.LongCharField(max_length=-1, blank=True, null=True)
check_nbr = models.LongCharField(max_length=-1, blank=True, null=True)
tran_stsfd_ind = models.NullBooleanField()
filed_by_name = models.LongCharField(max_length=-1, blank=True, null=True)
filed_date = models.DateField(blank=True, null=True)
addr_book_agent_name = models.LongCharField(max_length=-1, blank=True, null=True)
book_type = models.LongCharField(max_length=-1, blank=True, null=True)
title_txt = models.LongCharField(max_length=-1, blank=True, null=True)
occptn_txt = models.LongCharField(max_length=-1, blank=True, null=True)
emp_name = models.LongCharField(max_length=-1, blank=True, null=True)
emp_city = models.LongCharField(max_length=-1, blank=True, null=True)
emp_state = models.LongCharField(max_length=-1, blank=True, null=True)
employ_ind = models.NullBooleanField()
self_employ_ind = models.NullBooleanField()
addr_line1 = models.LongCharField(max_length=-1, blank=True, null=True)
addr_line2 = models.LongCharField(max_length=-1, blank=True, null=True)
city = models.LongCharField(max_length=-1, blank=True, null=True)
state = models.LongCharField(max_length=-1, blank=True, null=True)
zip = models.IntegerField(blank=True, null=True)
zip_plus_four = models.IntegerField(blank=True, null=True)
county = models.LongCharField(max_length=-1, blank=True, null=True)
purpose_codes = models.LongCharField(max_length=-1, blank=True, null=True)
exp_date = models.LongCharField(max_length=-1, blank=True, null=True)
IMPORTANT_FIELDS = ['tran_id', 'tran_date', 'filer', 'contributor_payee', 'amount', 'direction', 'purpose_codes']
def __str__(self):
return representation(self)
class Meta:
managed = False
db_table = 'raw_committee_transactions'
class RawCommitteeTransactionsAmmendedTransactions(models.Model):
tran_id = models.IntegerField(blank=True, primary_key=True)
original_id = models.IntegerField(blank=True, null=True)
tran_date = models.DateField(blank=True, null=True)
tran_status = models.LongCharField(max_length=-1, blank=True, null=True)
filer = models.LongCharField(max_length=-1, blank=True, null=True)
contributor_payee = models.LongCharField(max_length=-1, blank=True, null=True)
sub_type = models.LongCharField(max_length=-1, blank=True, null=True)
amount = models.FloatField(blank=True, null=True)
aggregate_amount = models.FloatField(blank=True, null=True)
contributor_payee_committee_id = models.IntegerField(blank=True, null=True)
filer_id = models.IntegerField(blank=True, null=True)
attest_by_name = models.LongCharField(max_length=-1, blank=True, null=True)
attest_date = models.DateField(blank=True, null=True)
review_by_name = models.LongCharField(max_length=-1, blank=True, null=True)
review_date = models.DateField(blank=True, null=True)
due_date = models.DateField(blank=True, null=True)
occptn_ltr_date = models.LongCharField(max_length=-1, blank=True, null=True)
pymt_sched_txt = models.LongCharField(max_length=-1, blank=True, null=True)
purp_desc = models.LongCharField(max_length=-1, blank=True, null=True)
intrst_rate = models.LongCharField(max_length=-1, blank=True, null=True)
check_nbr = models.LongCharField(max_length=-1, blank=True, null=True)
tran_stsfd_ind = models.NullBooleanField()
filed_by_name = models.LongCharField(max_length=-1, blank=True, null=True)
filed_date = models.DateField(blank=True, null=True)
addr_book_agent_name = models.LongCharField(max_length=-1, blank=True, null=True)
book_type = models.LongCharField(max_length=-1, blank=True, null=True)
title_txt = models.LongCharField(max_length=-1, blank=True, null=True)
occptn_txt = models.LongCharField(max_length=-1, blank=True, null=True)
emp_name = models.LongCharField(max_length=-1, blank=True, null=True)
emp_city = models.LongCharField(max_length=-1, blank=True, null=True)
emp_state = models.LongCharField(max_length=-1, blank=True, null=True)
employ_ind = models.NullBooleanField()
self_employ_ind = models.NullBooleanField()
addr_line1 = models.LongCharField(max_length=-1, blank=True, null=True)
addr_line2 = models.LongCharField(max_length=-1, blank=True, null=True)
city = models.LongCharField(max_length=-1, blank=True, null=True)
state = models.LongCharField(max_length=-1, blank=True, null=True)
zip = models.IntegerField(blank=True, null=True)
zip_plus_four = models.IntegerField(blank=True, null=True)
county = models.LongCharField(max_length=-1, blank=True, null=True)
purpose_codes = models.LongCharField(max_length=-1, blank=True, null=True)
exp_date = models.LongCharField(max_length=-1, blank=True, null=True)
def __str__(self):
return representation(self)
class Meta:
managed = False
db_table = 'raw_committee_transactions_ammended_transactions'
class RawCommitteeTransactionsErrors(models.Model):
tran_id = models.IntegerField(blank=True, primary_key=True)
original_id = models.IntegerField(blank=True, null=True)
tran_date = models.DateField(blank=True, null=True)
tran_status = models.LongCharField(max_length=-1, blank=True, null=True)
filer = models.LongCharField(max_length=-1, blank=True, null=True)
contributor_payee = models.LongCharField(max_length=-1, blank=True, null=True)
sub_type = models.LongCharField(max_length=-1, blank=True, null=True)
amount = models.FloatField(blank=True, null=True)
aggregate_amount = models.FloatField(blank=True, null=True)
contributor_payee_committee_id = models.IntegerField(blank=True, null=True)
filer_id = models.IntegerField(blank=True, null=True)
attest_by_name = models.LongCharField(max_length=-1, blank=True, null=True)
attest_date = models.DateField(blank=True, null=True)
review_by_name = models.LongCharField(max_length=-1, blank=True, null=True)
review_date = models.DateField(blank=True, null=True)
due_date = models.DateField(blank=True, null=True)
occptn_ltr_date = models.LongCharField(max_length=-1, blank=True, null=True)
pymt_sched_txt = models.LongCharField(max_length=-1, blank=True, null=True)
purp_desc = models.LongCharField(max_length=-1, blank=True, null=True)
intrst_rate = models.LongCharField(max_length=-1, blank=True, null=True)
check_nbr = models.LongCharField(max_length=-1, blank=True, null=True)
tran_stsfd_ind = models.NullBooleanField()
filed_by_name = models.LongCharField(max_length=-1, blank=True, null=True)
filed_date = models.DateField(blank=True, null=True)
addr_book_agent_name = models.LongCharField(max_length=-1, blank=True, null=True)
book_type = models.LongCharField(max_length=-1, blank=True, null=True)
title_txt = models.LongCharField(max_length=-1, blank=True, null=True)
occptn_txt = models.LongCharField(max_length=-1, blank=True, null=True)
emp_name = models.LongCharField(max_length=-1, blank=True, null=True)
emp_city = models.LongCharField(max_length=-1, blank=True, null=True)
emp_state = models.LongCharField(max_length=-1, blank=True, null=True)
employ_ind = models.NullBooleanField()
self_employ_ind = models.NullBooleanField()
addr_line1 = models.LongCharField(max_length=-1, blank=True, null=True)
addr_line2 = models.LongCharField(max_length=-1, blank=True, null=True)
city = models.LongCharField(max_length=-1, blank=True, null=True)
state = models.LongCharField(max_length=-1, blank=True, null=True)
zip = models.IntegerField(blank=True, null=True)
zip_plus_four = models.IntegerField(blank=True, null=True)
county = models.LongCharField(max_length=-1, blank=True, null=True)
purpose_codes = models.LongCharField(max_length=-1, blank=True, null=True)
exp_date = models.LongCharField(max_length=-1, blank=True, null=True)
def __str__(self):
return representation(self)
class Meta:
managed = False
db_table = 'raw_committee_transactions_errors'
class RawCommittees(models.Model):
committee_id = models.IntegerField(blank=True, primary_key=True)
committee_name = models.LongCharField(max_length=-1, blank=True, null=True)
committee_type = models.LongCharField(max_length=-1, blank=True, null=True)
committee_subtype = models.LongCharField(max_length=-1, blank=True, null=True)
candidate_office = models.LongCharField(max_length=-1, blank=True, null=True)
candidate_office_group = models.LongCharField(max_length=-1, blank=True, null=True)
filing_date = models.DateField(blank=True, null=True)
organization_filing_date = models.DateField(db_column='organization_filing Date',
blank=True, null=True)
treasurer_first_name = models.LongCharField(max_length=-1, blank=True, null=True)
treasurer_last_name = models.LongCharField(max_length=-1, blank=True, null=True)
treasurer_mailing_address = models.LongCharField(max_length=-1, blank=True, null=True)
treasurer_work_phone = models.LongCharField(max_length=-1, blank=True, null=True)
treasurer_fax = models.LongCharField(max_length=-1, blank=True, null=True)
candidate_first_name = models.LongCharField(max_length=-1, blank=True, null=True)
candidate_last_name = models.LongCharField(max_length=-1, blank=True, null=True)
candidate_maling_address = models.LongCharField(max_length=-1, blank=True, null=True)
candidate_work_phone = models.LongCharField(max_length=-1, blank=True, null=True)
candidate_residence_phone = models.LongCharField(max_length=-1, blank=True, null=True)
candidate_fax = models.LongCharField(max_length=-1, blank=True, null=True)
candidate_email = models.LongCharField(max_length=-1, blank=True, null=True)
active_election = models.LongCharField(max_length=-1, blank=True, null=True)
measure = models.LongCharField(max_length=-1, blank=True, null=True)
def __str__(self):
return representation(self)
class Meta:
managed = False
db_table = 'raw_committees'
class RawCommitteesScraped(models.Model):
id = models.IntegerField(blank=True, primary_key=True)
name = models.TextField(blank=True, null=True)
acronym = models.TextField(blank=True, null=True)
pac_type = models.TextField(blank=True, null=True)
filing_effective_from = models.TextField(blank=True, null=True)
filing_type = models.TextField(blank=True, null=True)
address = models.TextField(blank=True, null=True)
campaign_phone = models.TextField(blank=True, null=True)
treasurer_name = models.TextField(blank=True, null=True)
treasurer_mailing_address = models.TextField(blank=True, null=True)
treasurer_work_phone_home_phone_fax = models.TextField(blank=True, null=True)
treasurer_email_address = models.TextField(blank=True, null=True)
candidate_name = models.TextField(blank=True, null=True)
candidate_election_office = models.TextField(blank=True, null=True)
candidate_party_affiliation = models.TextField(blank=True, null=True)
candidate_candidate_address = models.TextField(blank=True, null=True)
candidate_work_phone_home_phone_fax = models.TextField(blank=True, null=True)
candidate_mailing_address = models.TextField(blank=True, null=True)
candidate_email_address = models.TextField(blank=True, null=True)
candidate_occupation = models.TextField(blank=True, null=True)
candidate_employer = models.TextField(blank=True, null=True)
measure_election = models.TextField(blank=True, null=True)
measure_support = models.TextField(blank=True, null=True)
measure_details = models.TextField(blank=True, null=True)
committee_type = models.TextField(blank=True, null=True)
def __str__(self):
return representation(self)
class Meta:
managed = False
db_table = 'raw_committees_scraped'
class SearchLog(models.Model):
search_term = models.TextField(blank=True, null=True)
date = models.DateTimeField(blank=True, null=True)
def __str__(self):
return representation(self)
class Meta:
managed = False
db_table = 'search_log'
class StateSumByDate(models.Model):
tran_date = models.DateField(blank=True, null=True)
total_in = models.FloatField(blank=True, null=True)
total_out = models.FloatField(blank=True, null=True)
total_from_within = models.FloatField(blank=True, null=True)
total_to_within = models.FloatField(blank=True, null=True)
total_from_the_outside = models.FloatField(blank=True, null=True)
total_to_the_outside = models.FloatField(blank=True, null=True)
total_grass_roots = models.FloatField(blank=True, null=True)
total_from_in_state = models.FloatField(blank=True, null=True)
def __str__(self):
return representation(self)
class Meta:
managed = False
db_table = 'state_sum_by_date'
class StateTranslation(models.Model):
statefull = models.LongCharField(max_length=-1, blank=True, null=True)
abbreviation = models.CharField(max_length=3, blank=True, null=True)
def __str__(self):
return representation(self)
class Meta:
managed = False
db_table = 'state_translation'
class SubTypeFromContributorPayee(models.Model):
contributor_payee = models.LongCharField(max_length=-1, blank=True, null=True)
def __str__(self):
return representation(self)
class Meta:
managed = False
db_table = 'sub_type_from_contributor_payee'
class WorkingCandidateCommittees(models.Model):
candidate_name = models.TextField(blank=True, null=True)
committee_id = models.IntegerField(blank=True, null=True)
committee_name = models.LongCharField(max_length=-1, blank=True, null=True)
election_office = models.TextField(blank=True, null=True)
phone = models.TextField(blank=True, null=True)
party_affiliation = models.TextField(blank=True, null=True)
web_address = models.TextField(blank=True, null=True)
def __str__(self):
return representation(self)
class Meta:
managed = False
db_table = 'working_candidate_committees'
class WorkingCandidateFilings(models.Model):
election_txt = models.TextField(blank=True, null=True)
election_year = models.IntegerField(blank=True, null=True)
office_group = models.TextField(blank=True, null=True)
id_nbr = models.IntegerField(blank=True, null=True)
office = models.TextField(blank=True, null=True)
candidate_office = models.TextField(blank=True, null=True)
candidate_file_rsn = models.IntegerField(blank=True, null=True)
file_mthd_ind = models.TextField(blank=True, null=True)
filetype_descr = models.TextField(blank=True, null=True)
party_descr = models.TextField(blank=True, null=True)
major_party_ind = models.TextField(blank=True, null=True)
cand_ballot_name_txt = models.TextField(blank=True, null=True)
occptn_txt = models.TextField(blank=True, null=True)
education_bckgrnd_txt = models.TextField(blank=True, null=True)
occptn_bkgrnd_txt = models.TextField(blank=True, null=True)
school_grade_diploma_degree_certificate_course_of_study = models.TextField(blank=True, null=True)
prev_govt_bkgrnd_txt = models.TextField(blank=True, null=True)
judge_incbnt_ind = models.TextField(blank=True, null=True)
qlf_ind = models.TextField(blank=True, null=True)
filed_date = models.DateField(blank=True, null=True)
file_fee_rfnd_date = models.DateField(blank=True, null=True)
witdrw_date = models.DateField(blank=True, null=True)
withdrw_resn_txt = models.NullBooleanField()
pttn_file_date = models.DateField(blank=True, null=True)
pttn_sgnr_rqd_nbr = models.IntegerField(blank=True, null=True)
pttn_signr_filed_nbr = models.IntegerField(blank=True, null=True)
pttn_cmplt_date = models.DateField(blank=True, null=True)
ballot_order_nbr = models.IntegerField(blank=True, null=True)
prfx_name_cd = models.TextField(blank=True, null=True)
first_name = models.TextField(blank=True, null=True)
mdle_name = models.TextField(blank=True, null=True)
last_name = models.TextField(blank=True, null=True)
sufx_name = models.TextField(blank=True, null=True)
title_txt = models.TextField(blank=True, null=True)
mailing_addr_line_1 = models.TextField(blank=True, null=True)
mailing_addr_line_2 = models.TextField(blank=True, null=True)
mailing_city_name = models.TextField(blank=True, null=True)
mailing_st_cd = models.TextField(blank=True, null=True)
mailing_zip_code = models.IntegerField(blank=True, null=True)
mailing_zip_plus_four = models.IntegerField(blank=True, null=True)
residence_addr_line_1 = models.TextField(blank=True, null=True)
residence_addr_line_2 = models.TextField(blank=True, null=True)
residence_city_name = models.TextField(blank=True, null=True)
residence_st_cd = models.TextField(blank=True, null=True)
residence_zip_code = models.IntegerField(blank=True, null=True)
residence_zip_plus_four = models.IntegerField(blank=True, null=True)
home_phone = models.TextField(blank=True, null=True)
cell_phone = models.TextField(blank=True, null=True)
fax_phone = models.TextField(blank=True, null=True)
email = models.TextField(blank=True, null=True)
work_phone = models.TextField(blank=True, null=True)
web_address = models.TextField(blank=True, null=True)
def __str__(self):
return representation(self)
class Meta:
managed = False
db_table = 'working_candidate_filings'
class WorkingCommittees(models.Model):
committee_id = models.IntegerField(blank=True, primary_key=True)
committee_name = models.LongCharField(max_length=-1, blank=True, null=True)
committee_type = models.LongCharField(max_length=-1, blank=True, null=True)
committee_subtype = models.LongCharField(max_length=-1, blank=True, null=True)
party_affiliation = models.TextField(blank=True, null=True)
phone = models.LongCharField(max_length=-1, blank=True, null=True)
election_office = models.TextField(blank=True, null=True)
candidate_name = models.TextField(blank=True, null=True)
candidate_email_address = models.LongCharField(max_length=-1, blank=True, null=True)
candidate_work_phone_home_phone_fax = models.TextField(blank=True, null=True)
candidate_address = models.LongCharField(max_length=-1, blank=True, null=True)
treasurer_name = models.TextField(blank=True, null=True)
treasurer_work_phone_home_phone_fax = models.TextField(blank=True, null=True)
treasurer_mailing_address = models.LongCharField(max_length=-1, blank=True, null=True)
web_address = models.TextField(blank=True, null=True)
measure = models.LongCharField(max_length=-1, blank=True, null=True)
simple_election = models.TextField(blank=True, null=True)
db_update_status = models.TextField(blank=True, null=True)
def __str__(self):
return representation(self)
class Meta:
managed = False
db_table = 'working_committees'
class WorkingTransactions(models.Model):
tran_id = models.IntegerField(blank=True, primary_key=True)
tran_date = models.DateField(blank=True, null=True)
filer = models.LongCharField(max_length=-1, blank=True, null=True)
contributor_payee = models.LongCharField(max_length=-1, blank=True, null=True)
sub_type = models.LongCharField(max_length=-1, blank=True, null=True)
amount = models.FloatField(blank=True, null=True)
contributor_payee_committee_id = models.IntegerField(blank=True, null=True)
filer_id = models.IntegerField(blank=True, null=True)
purp_desc = models.LongCharField(max_length=-1, blank=True, null=True)
book_type = models.LongCharField(max_length=-1, blank=True, null=True)
addr_line1 = models.LongCharField(max_length=-1, blank=True, null=True)
filed_date = models.DateField(blank=True, null=True)
addr_line2 = models.LongCharField(max_length=-1, blank=True, null=True)
city = models.LongCharField(max_length=-1, blank=True, null=True)
state = models.LongCharField(max_length=-1, blank=True, null=True)
zip = models.IntegerField(blank=True, null=True)
purpose_codes = models.LongCharField(max_length=-1, blank=True, null=True)
direction = models.CharField(max_length=7, blank=True, null=True)
contributor_payee_class = models.LongCharField(max_length=-1, blank=True, null=True)
def __str__(self):
return representation(self)
class Meta:
managed = False
db_table = 'working_transactions'
| 48.021197
| 139
| 0.738712
| 5,114
| 38,513
| 5.360774
| 0.076261
| 0.144775
| 0.200584
| 0.262302
| 0.879628
| 0.871348
| 0.858691
| 0.811855
| 0.77257
| 0.749736
| 0
| 0.005347
| 0.150261
| 38,513
| 801
| 140
| 48.081149
| 0.832366
| 0.047594
| 0
| 0.691589
| 1
| 0
| 0.026012
| 0.01065
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05296
| false
| 0.001558
| 0.017134
| 0.048287
| 0.9081
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 10
|
46aeb8da7a3c3d9f8383cf1bf0cbf09ae511c2a2
| 38
|
py
|
Python
|
asr/metrics/__init__.py
|
isadrtdinov/quartznet
|
e26eae4aeef195a11e0884cc917758fad033372c
|
[
"MIT"
] | null | null | null |
asr/metrics/__init__.py
|
isadrtdinov/quartznet
|
e26eae4aeef195a11e0884cc917758fad033372c
|
[
"MIT"
] | null | null | null |
asr/metrics/__init__.py
|
isadrtdinov/quartznet
|
e26eae4aeef195a11e0884cc917758fad033372c
|
[
"MIT"
] | 2
|
2020-12-30T03:08:45.000Z
|
2021-01-21T08:53:34.000Z
|
from .asr_metrics import asr_metrics
| 12.666667
| 36
| 0.842105
| 6
| 38
| 5
| 0.666667
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.131579
| 38
| 2
| 37
| 19
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
d3eb70a2247a36746e29ec383de7e57c88fff534
| 219
|
py
|
Python
|
classicML/benchmarks/__init__.py
|
sun1638650145/classicML
|
7e0c2155bccb6e491a150ee689d3786526b74565
|
[
"Apache-2.0"
] | 12
|
2020-05-10T12:11:06.000Z
|
2021-10-31T13:23:55.000Z
|
classicML/benchmarks/__init__.py
|
sun1638650145/classicML
|
7e0c2155bccb6e491a150ee689d3786526b74565
|
[
"Apache-2.0"
] | null | null | null |
classicML/benchmarks/__init__.py
|
sun1638650145/classicML
|
7e0c2155bccb6e491a150ee689d3786526b74565
|
[
"Apache-2.0"
] | 2
|
2021-01-17T06:22:05.000Z
|
2021-01-18T14:32:51.000Z
|
"""classicML中的benchmarks用于评估和测试模型的性能和开销"""
from classicML.benchmarks.wrapper_utils import average_timer
from classicML.benchmarks.wrapper_utils import memory_monitor
from classicML.benchmarks.wrapper_utils import timer
| 43.8
| 61
| 0.885845
| 24
| 219
| 7.875
| 0.458333
| 0.206349
| 0.365079
| 0.47619
| 0.650794
| 0.650794
| 0
| 0
| 0
| 0
| 0
| 0
| 0.059361
| 219
| 4
| 62
| 54.75
| 0.917476
| 0.164384
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
3105ae03ec65116370333c4fc8fc8167a8e1f482
| 15,715
|
py
|
Python
|
models.py
|
gtesei/squad
|
1858cfa7f102aab7054ff35b44dec3abb5993f5c
|
[
"MIT"
] | null | null | null |
models.py
|
gtesei/squad
|
1858cfa7f102aab7054ff35b44dec3abb5993f5c
|
[
"MIT"
] | null | null | null |
models.py
|
gtesei/squad
|
1858cfa7f102aab7054ff35b44dec3abb5993f5c
|
[
"MIT"
] | null | null | null |
"""Top-level model classes.
Author:
Chris Chute (chute@stanford.edu)
"""
import layers , bert_layers
import torch
import torch.nn as nn
import torch.nn.functional as F
class BiDAF(nn.Module):
"""Baseline BiDAF model for SQuAD.
Based on the paper:
"Bidirectional Attention Flow for Machine Comprehension"
by Minjoon Seo, Aniruddha Kembhavi, Ali Farhadi, Hannaneh Hajishirzi
(https://arxiv.org/abs/1611.01603).
Follows a high-level structure commonly found in SQuAD models:
- Embedding layer: Embed word indices to get word vectors.
- Encoder layer: Encode the embedded sequence.
- Attention layer: Apply an attention mechanism to the encoded sequence.
- Model encoder layer: Encode the sequence again.
- Output layer: Simple layer (e.g., fc + softmax) to get final outputs.
Args:
word_vectors (torch.Tensor): Pre-trained word vectors.
hidden_size (int): Number of features in the hidden state at each layer.
drop_prob (float): Dropout probability.
"""
def __init__(self, word_vectors, hidden_size, drop_prob=0.):
super(BiDAF, self).__init__()
self.emb = layers.Embedding(word_vectors=word_vectors,
hidden_size=hidden_size,
drop_prob=drop_prob)
self.enc = layers.RNNEncoder(input_size=hidden_size,
hidden_size=hidden_size,
num_layers=1,
drop_prob=drop_prob)
self.att = layers.BiDAFAttention(hidden_size=2 * hidden_size,
drop_prob=drop_prob)
self.mod = layers.RNNEncoder(input_size=8 * hidden_size,
hidden_size=hidden_size,
num_layers=2,
drop_prob=drop_prob)
self.out = layers.BiDAFOutput(hidden_size=hidden_size,
drop_prob=drop_prob)
def forward(self, cw_idxs, qw_idxs):
c_mask = torch.zeros_like(cw_idxs) != cw_idxs
q_mask = torch.zeros_like(qw_idxs) != qw_idxs
c_len, q_len = c_mask.sum(-1), q_mask.sum(-1)
c_emb = self.emb(cw_idxs) # (batch_size, c_len, hidden_size)
q_emb = self.emb(qw_idxs) # (batch_size, q_len, hidden_size)
c_enc = self.enc(c_emb, c_len) # (batch_size, c_len, 2 * hidden_size)
q_enc = self.enc(q_emb, q_len) # (batch_size, q_len, 2 * hidden_size)
att = self.att(c_enc, q_enc,
c_mask, q_mask) # (batch_size, c_len, 8 * hidden_size)
mod = self.mod(att, c_len) # (batch_size, c_len, 2 * hidden_size)
out = self.out(att, mod, c_mask) # 2 tensors, each (batch_size, c_len)
return out
class BiDAF_charCNN(nn.Module):
"""Baseline BiDAF model for SQuAD.
Based on the paper:
"Bidirectional Attention Flow for Machine Comprehension"
by Minjoon Seo, Aniruddha Kembhavi, Ali Farhadi, Hannaneh Hajishirzi
(https://arxiv.org/abs/1611.01603).
Follows a high-level structure commonly found in SQuAD models:
- Embedding layer: Embed word indices to get word vectors.
- Encoder layer: Encode the embedded sequence.
- Attention layer: Apply an attention mechanism to the encoded sequence.
- Model encoder layer: Encode the sequence again.
- Output layer: Simple layer (e.g., fc + softmax) to get final outputs.
Args:
word_vectors (torch.Tensor): Pre-trained word vectors.
hidden_size (int): Number of features in the hidden state at each layer.
drop_prob (float): Dropout probability.
"""
def __init__(self, word_vectors, char_vectors, hidden_size, drop_prob=0.):
super(BiDAF_charCNN, self).__init__()
self.emb = layers.Embedding(word_vectors=word_vectors,
hidden_size=hidden_size,
drop_prob=drop_prob)
self.char_emb = layers.CharEmbedding(char_vectors=char_vectors,
hidden_size=hidden_size,
drop_prob=drop_prob)
self.hwy = layers.HighwayEncoder(2, 2*hidden_size)
self.enc = layers.RNNEncoder(input_size=2*hidden_size,
hidden_size=2*hidden_size,
num_layers=1,
drop_prob=drop_prob)
self.att = layers.BiDAFAttention(hidden_size=2 * 2*hidden_size,
drop_prob=drop_prob)
self.mod = layers.RNNEncoder(input_size=8 * 2*hidden_size,
hidden_size=hidden_size,
num_layers=2,
drop_prob=drop_prob)
self.out = layers.BiDAFOutput(hidden_size=hidden_size,
drop_prob=drop_prob)
def forward(self, cw_idxs, cc_idxs, qw_idxs, qc_idxs):
c_mask = torch.zeros_like(cw_idxs) != cw_idxs
q_mask = torch.zeros_like(qw_idxs) != qw_idxs
c_len, q_len = c_mask.sum(-1), q_mask.sum(-1)
c_emb_w = self.emb(cw_idxs) # (batch_size, c_len, hidden_size)
q_emb_w = self.emb(qw_idxs) # (batch_size, q_len, hidden_size)
c_emb_cc = self.char_emb(cc_idxs) # (batch_size, c_len, hidden_size)
q_emb_cc = self.char_emb(qc_idxs) # (batch_size, q_len, hidden_size)
c_emb = self.hwy(torch.cat([c_emb_w,c_emb_cc],axis=-1))
q_emb = self.hwy(torch.cat([q_emb_w,q_emb_cc],axis=-1))
c_enc = self.enc(c_emb, c_len) # (batch_size, c_len, 2 * hidden_size)
q_enc = self.enc(q_emb, q_len) # (batch_size, q_len, 2 * hidden_size)
att = self.att(c_enc, q_enc,
c_mask, q_mask) # (batch_size, c_len, 8 * hidden_size)
mod = self.mod(att, c_len) # (batch_size, c_len, 2 * hidden_size)
out = self.out(att, mod, c_mask) # 2 tensors, each (batch_size, c_len)
return out
class BiDAF_charCNN_BERTEnc(nn.Module):
"""Baseline BiDAF model for SQuAD.
Based on the paper:
"Bidirectional Attention Flow for Machine Comprehension"
by Minjoon Seo, Aniruddha Kembhavi, Ali Farhadi, Hannaneh Hajishirzi
(https://arxiv.org/abs/1611.01603).
Follows a high-level structure commonly found in SQuAD models:
- Embedding layer: Embed word indices to get word vectors.
- Encoder layer: Encode the embedded sequence.
- Attention layer: Apply an attention mechanism to the encoded sequence.
- Model encoder layer: Encode the sequence again.
- Output layer: Simple layer (e.g., fc + softmax) to get final outputs.
Args:
word_vectors (torch.Tensor): Pre-trained word vectors.
hidden_size (int): Number of features in the hidden state at each layer.
drop_prob (float): Dropout probability.
"""
def __init__(self, word_vectors, char_vectors, hidden_size, drop_prob=0.,twist_embeddings=True):
super(BiDAF_charCNN_BERTEnc, self).__init__()
###
self.twist_embeddings = twist_embeddings
idx_list = []
for i in range(hidden_size):
idx_list.append(i)
idx_list.append(hidden_size+i)
self.register_buffer('idx_twist',torch.tensor(idx_list))
###
self.emb = layers.Embedding(word_vectors=word_vectors,
hidden_size=hidden_size,
drop_prob=drop_prob)
self.char_emb = layers.CharEmbedding(char_vectors=char_vectors,
hidden_size=hidden_size,
drop_prob=drop_prob)
self.hwy = layers.HighwayEncoder(2, 2*hidden_size)
self.enc = bert_layers.BertEncoder(n_layers=6, #n_layers=3,
d_feature=2*hidden_size,
n_heads=8,
out_size=2*hidden_size,
d_ff=2048,
#d_ff = 2*hidden_size,
dropout_prob=0.1,
#dropout_prob=drop_prob,
ff_activation=F.relu)
self.att = layers.BiDAFAttention(hidden_size=2 * hidden_size,
drop_prob=drop_prob)
self.mod = layers.RNNEncoder(input_size=8 * hidden_size,
hidden_size=hidden_size,
num_layers=2,
drop_prob=drop_prob)
self.out = layers.BiDAFOutput(hidden_size=hidden_size,
drop_prob=drop_prob)
def twist(self,a,b):
assert a.shape == b.shape , 'tensors to be twisted need to have the same size'
idx = self.idx_twist.repeat(a.shape[0],a.shape[1],1)
c = torch.cat([a,b],axis=-1)
return torch.gather(c,-1,idx)
def forward(self, cw_idxs, cc_idxs, qw_idxs, qc_idxs):
c_mask = torch.zeros_like(cw_idxs) != cw_idxs
q_mask = torch.zeros_like(qw_idxs) != qw_idxs
c_len, q_len = c_mask.sum(-1), q_mask.sum(-1)
c_emb_w = self.emb(cw_idxs) # (batch_size, c_len, hidden_size)
q_emb_w = self.emb(qw_idxs) # (batch_size, q_len, hidden_size)
c_emb_cc = self.char_emb(cc_idxs) # (batch_size, c_len, hidden_size)
q_emb_cc = self.char_emb(qc_idxs) # (batch_size, q_len, hidden_size)
if self.twist_embeddings:
c_emb = self.hwy(self.twist(c_emb_w,c_emb_cc))
q_emb = self.hwy(self.twist(q_emb_w,q_emb_cc))
else:
c_emb = self.hwy(torch.cat([c_emb_w,c_emb_cc],axis=-1))
q_emb = self.hwy(torch.cat([q_emb_w,q_emb_cc],axis=-1))
c_enc = self.enc(c_emb) # (batch_size, c_len, 2 * hidden_size)
q_enc = self.enc(q_emb) # (batch_size, q_len, 2 * hidden_size)
att = self.att(c_enc, q_enc,c_mask, q_mask) # (batch_size, c_len, 8 * hidden_size)
mod = self.mod(att, c_len) # (batch_size, c_len, 2 * hidden_size)
out = self.out(att, mod, c_mask) # 2 tensors, each (batch_size, c_len)
return out
class BiDAF_charCNN_BERTEnc_BERTMod(nn.Module):
"""Baseline BiDAF model for SQuAD.
Based on the paper:
"Bidirectional Attention Flow for Machine Comprehension"
by Minjoon Seo, Aniruddha Kembhavi, Ali Farhadi, Hannaneh Hajishirzi
(https://arxiv.org/abs/1611.01603).
Follows a high-level structure commonly found in SQuAD models:
- Embedding layer: Embed word indices to get word vectors.
- Encoder layer: Encode the embedded sequence.
- Attention layer: Apply an attention mechanism to the encoded sequence.
- Model encoder layer: Encode the sequence again.
- Output layer: Simple layer (e.g., fc + softmax) to get final outputs.
Args:
word_vectors (torch.Tensor): Pre-trained word vectors.
hidden_size (int): Number of features in the hidden state at each layer.
drop_prob (float): Dropout probability.
"""
def __init__(self, word_vectors, char_vectors, hidden_size, drop_prob=0.,twist_embeddings=False):
super(BiDAF_charCNN_BERTEnc_BERTMod, self).__init__()
###
self.twist_embeddings = twist_embeddings
idx_list = []
for i in range(hidden_size):
idx_list.append(i)
idx_list.append(hidden_size+i)
self.register_buffer('idx_twist',torch.tensor(idx_list))
###
self.emb = layers.Embedding(word_vectors=word_vectors,
hidden_size=hidden_size,
drop_prob=drop_prob)
self.char_emb = layers.CharEmbedding(char_vectors=char_vectors,
hidden_size=hidden_size,
drop_prob=drop_prob)
self.hwy = layers.HighwayEncoder(2, 2*hidden_size)
self.enc = bert_layers.BertEncoder(n_layers=3, #n_layers=4,
d_feature=2*hidden_size,
n_heads=8,
out_size=2*hidden_size,
#d_ff=2048,
d_ff = 2*hidden_size,
dropout_prob=0.1,
#dropout_prob=drop_prob,
ff_activation=F.relu)
self.att = layers.BiDAFAttention(hidden_size=2 * hidden_size,
drop_prob=drop_prob)
self.mod = bert_layers.BertEncoder(n_layers=3, #n_layers=3,
d_feature=8*hidden_size,
n_heads=8,
out_size=2*hidden_size,
#d_ff=2048,
d_ff = 2*hidden_size,
dropout_prob=0.1,
#dropout_prob=drop_prob,
ff_activation=F.relu)
# self.mod = layers.RNNEncoder(input_size=8 * hidden_size,
# hidden_size=hidden_size,
# num_layers=2,
# drop_prob=drop_prob)
self.out = layers.BiDAFOutput(hidden_size=hidden_size,
drop_prob=drop_prob)
def twist(self,a,b):
assert a.shape == b.shape , 'tensors to be twisted need to have the same size'
idx = self.idx_twist.repeat(a.shape[0],a.shape[1],1)
c = torch.cat([a,b],axis=-1)
return torch.gather(c,-1,idx)
def forward(self, cw_idxs, cc_idxs, qw_idxs, qc_idxs):
c_mask = torch.zeros_like(cw_idxs) != cw_idxs
q_mask = torch.zeros_like(qw_idxs) != qw_idxs
c_len, q_len = c_mask.sum(-1), q_mask.sum(-1)
c_emb_w = self.emb(cw_idxs) # (batch_size, c_len, hidden_size)
q_emb_w = self.emb(qw_idxs) # (batch_size, q_len, hidden_size)
c_emb_cc = self.char_emb(cc_idxs) # (batch_size, c_len, hidden_size)
q_emb_cc = self.char_emb(qc_idxs) # (batch_size, q_len, hidden_size)
if self.twist_embeddings:
c_emb = self.hwy(self.twist(c_emb_w,c_emb_cc))
q_emb = self.hwy(self.twist(q_emb_w,q_emb_cc))
else:
c_emb = self.hwy(torch.cat([c_emb_w,c_emb_cc],axis=-1))
q_emb = self.hwy(torch.cat([q_emb_w,q_emb_cc],axis=-1))
c_enc = self.enc(c_emb) # (batch_size, c_len, 2 * hidden_size)
q_enc = self.enc(q_emb) # (batch_size, q_len, 2 * hidden_size)
att = self.att(c_enc, q_enc,c_mask, q_mask) # (batch_size, c_len, 8 * hidden_size)
mod = self.mod(att) # (batch_size, c_len, 2 * hidden_size)
out = self.out(att, mod, c_mask) # 2 tensors, each (batch_size, c_len)
return out
| 43.652778
| 101
| 0.553038
| 2,000
| 15,715
| 4.0635
| 0.0885
| 0.125508
| 0.040605
| 0.036791
| 0.972807
| 0.967885
| 0.961363
| 0.961363
| 0.94869
| 0.94869
| 0
| 0.014264
| 0.35762
| 15,715
| 359
| 102
| 43.774373
| 0.790788
| 0.303213
| 0
| 0.84127
| 0
| 0
| 0.010733
| 0
| 0
| 0
| 0
| 0
| 0.010582
| 1
| 0.05291
| false
| 0
| 0.021164
| 0
| 0.126984
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
312b402bd2f99e17ae250054d893f0647a3a147c
| 9,528
|
py
|
Python
|
model/parts/v2_hydra.py
|
patriacaelum/HydraDX-simulations
|
57948099c924309636647ccd0768afc60a28f705
|
[
"Apache-2.0"
] | null | null | null |
model/parts/v2_hydra.py
|
patriacaelum/HydraDX-simulations
|
57948099c924309636647ccd0768afc60a28f705
|
[
"Apache-2.0"
] | 4
|
2021-10-14T15:51:46.000Z
|
2021-10-17T18:44:03.000Z
|
model/parts/v2_hydra.py
|
patriacaelum/HydraDX-simulations
|
57948099c924309636647ccd0768afc60a28f705
|
[
"Apache-2.0"
] | 1
|
2021-10-14T05:45:47.000Z
|
2021-10-14T05:45:47.000Z
|
import numpy as np
import pandas as pd
from .v2_hydra_utils import * # original mechanisms
# from .hydra_agent_utils import *
from .v2_hydra_agent import *
from .v2_hydra_mechs import * # newer mechanisms
from .v2_hydra_coeffs import * # new mechanism 28 June 2021
# Mechanisms
def mechanismHub_oracle_price_i(params, substep, state_history, prev_state, policy_input):
"""
This mechanismHub returns the updated oracle price for token i.
"""
return 'oracle_price_i', prev_state['oracle_price_i'] + policy_input['oracle_price_i']
def mechanismHub_oracle_price_j(params, substep, state_history, prev_state, policy_input):
"""
This mechanismHub returns the updated oracle price for token j.
"""
return 'oracle_price_j', prev_state['oracle_price_j'] + policy_input['oracle_price_j']
def mechanismHub_oracle_price_hydra(params, substep, state_history, prev_state, policy_input):
"""
This mechanism returns the updated oracle price for Hydra.
"""
return 'oracle_price_hydra', prev_state['oracle_price_hydra'] + policy_input['oracle_price_hydra']
def mechanismHub_fee_percent(params, substep, state_history, prev_state, policy_input):
"""
This mechanism returns the updated fee percent.
"""
return 'fee_percent', policy_input['fee_percent']
def mechanismHub_dynamic_fee_percent(params, substep, state_history, prev_state, policy_input):
"""
This mechanism returns the revenue generated from a dynamic fee.
"""
asset = policy_input['asset_id']
dynamic_rev = prev_state['dynamic_revenue']
dynamic_rev[asset] = dynamic_rev[asset]+ policy_input['dynamic_fee']
return 'dynamic_revenue', dynamic_rev
def mechanismHub_fee_revenue(params, substep, state_history, prev_state, policy_input):
"""
This mechanismHub returns the updated fee taken from the trade. This is a practical implementation of fee hypothesis 1
"""
# Use the asset as key & the fee revenue gained from it as value
asset = policy_input['asset_id']
fee_rev = prev_state['fee_revenue']
fee_rev[asset] = fee_rev[asset]+ policy_input['fee']
return 'fee_revenue', fee_rev
def mechanismHub_pool(params, substep, state_history, prev_state, policy_input):
"""
This mechanismHub returns the approprate 'pool' function to a given policy input:
Conditioned upon the choice of the 'CHANGE LOG' parameter selection of alternative mechanisms is facilitated which allows to test different candidate mechanisms and their effects.
"""
action = policy_input['action_id']
if action == 'Ri_Purchase':
return q_to_r_pool(params, substep, state_history, prev_state, policy_input)
elif action == 'Q_Purchase':
if params['CHANGE_LOG'] == '7-13-21':
return r_to_q_pool(params, substep, state_history, prev_state, policy_input)
else: #placeholder for alternative mechanism below:
return r_to_q_pool(params, substep, state_history, prev_state, policy_input)
elif action == 'AddLiquidity':
return addLiquidity_pool(params, substep, state_history, prev_state, policy_input)
elif action == 'RemoveLiquidity':
return removeLiquidity_pool(params, substep, state_history, prev_state, policy_input)
elif action == 'R_Swap':
if params['CHANGE_LOG'] == '7-13-21':
return r_to_r_pool(params, substep, state_history, prev_state, policy_input)
else: #placeholder for alternative mechanism below:
return r_to_r_pool(params, substep, state_history, prev_state, policy_input)
return('pool', prev_state['pool'])
def mechanismHub_Q_Hydra(params, substep, state_history, prev_state, policy_input):
"""
This mechanism returns the approprate hydra (Q=inside pool) function to a given policy input:
Conditioned upon the choice of the 'CHANGE LOG' parameter selection of alternative mechanisms is facilitated which allows to test different candidate mechanisms and their effects.
"""
action = policy_input['action_id']
if action == 'Ri_Purchase':
if params['CHANGE_LOG'] == '7-13-21':
return q_to_r_Qh(params, substep, state_history, prev_state, policy_input)
else: #placeholder for alternative mechanism below:
return q_to_r_Qh(params, substep, state_history, prev_state, policy_input)
elif action == 'Q_Purchase':
if params['CHANGE_LOG'] == '7-13-21':
return r_to_q_Qh(params, substep, state_history, prev_state, policy_input)
else: #placeholder for alternative mechanism below:
return r_to_q_Qh(params, substep, state_history, prev_state, policy_input)
elif action == 'AddLiquidity':
return addLiquidity_Qh(params, substep, state_history, prev_state, policy_input)
elif action == 'RemoveLiquidity':
return removeLiquidity_Qh(params, substep, state_history, prev_state, policy_input)
elif action == 'R_Swap':
if params['CHANGE_LOG'] == '7-13-21':
return r_to_r_swap_Qh(params, substep, state_history, prev_state, policy_input)
else: #placeholder for alternative mechanism below:
return r_to_r_swap_Qh(params, substep, state_history, prev_state, policy_input)
return('Q', prev_state['Q'])
def mechanismHub_Sq(params, substep, state_history, prev_state, policy_input):
"""
This mechanism returns the approprate share function to a given policy input:
Conditioned upon the choice of the 'CHANGE LOG' parameter selection of alternative mechanisms is facilitated which allows to test different candidate mechanisms and their effects.
"""
action = policy_input['action_id']
if action == 'AddLiquidity':
return addLiquidity_Sq(params, substep, state_history, prev_state, policy_input)
elif action == 'RemoveLiquidity':
return removeLiquidity_Sq(params, substep, state_history, prev_state, policy_input)
return('Sq', prev_state['Sq'])
def H_agenthub(params, substep, state_history, prev_state, policy_input):
action = policy_input['action_id']
if action == 'Ri_Purchase':
if params['CHANGE_LOG'] == '7-13-21':
return H_agent_q_to_r(params, substep, state_history, prev_state, policy_input)
else: #placeholder for alternative mechanism below:
return H_agent_q_to_r(params, substep, state_history, prev_state, policy_input)
elif action == 'Q_Purchase':
if params['CHANGE_LOG'] == '7-13-21': #no actual in change in H
return H_agent_r_to_q(params, substep, state_history, prev_state, policy_input)
else: #placeholder for alternative mechanism below:
return H_agent_r_to_q(params, substep, state_history, prev_state, policy_input)
elif action == 'AddLiquidity':
return H_agent_add_liq(params, substep, state_history, prev_state, policy_input)
elif action == 'RemoveLiquidity':
return H_agent_remove_liq(params, substep, state_history, prev_state, policy_input)
elif action == 'R_Swap':
if params['CHANGE_LOG'] == '7-13-21':
return H_agent_r_to_r_swap(params, substep, state_history, prev_state, policy_input)
else: #placeholder for alternative mechanism below:
return H_agent_r_to_r_swap(params, substep, state_history, prev_state, policy_input)
return('hydra_agents', prev_state['hydra_agents'])
def mechanismHub_H_Hydra(params, substep, state_history, prev_state, policy_input):
"""
This mechanism returns the approprate Hydra (H=total supply) function to a given policy input.
Conditioned upon the choice of the 'CHANGE LOG' parameter selection of alternative mechanisms is facilitated which allows to test different candidate mechanisms and their effects.
"""
action = policy_input['action_id']
if action == 'Ri_Purchase':
if params['CHANGE_LOG'] == '7-13-21':
return q_to_r_H(params, substep, state_history, prev_state, policy_input)
else: #placeholder for alternative mechanism below:
return q_to_r_H(params, substep, state_history, prev_state, policy_input)
elif action == 'Q_Purchase':
if params['CHANGE_LOG'] == '7-13-21':
return r_to_q_H(params, substep, state_history, prev_state, policy_input)
else: #placeholder for alternative mechanism below:
return r_to_q_H(params, substep, state_history, prev_state, policy_input)
elif action == 'AddLiquidity':
return resolve_addLiquidity_H(params, substep, state_history, prev_state, policy_input)
elif action == 'RemoveLiquidity':
return resolve_remove_Liquidity_H(params, substep, state_history, prev_state, policy_input)
elif action == 'R_Swap':
if params['CHANGE_LOG'] == '7-13-21':
return r_to_r_swap_H(params, substep, state_history, prev_state, policy_input)
else: #placeholder for alternative mechanism below:
return r_to_r_swap_H(params, substep, state_history, prev_state, policy_input)
return('H', prev_state['H'])
def mechanismHub_Y(params, substep, state_history, prev_state, policy_input):
"""
This mechanism returns the approprate Y update function for liquidity events
"""
action = policy_input['action_id']
if action == 'AddLiquidity':
return addLiquidity_Y(params, substep, state_history, prev_state, policy_input)
elif action == 'RemoveLiquidity':
return removeLiquidity_Y(params, substep, state_history, prev_state, policy_input)
return('Y', prev_state['Y'])
| 52.351648
| 179
| 0.723447
| 1,275
| 9,528
| 5.119216
| 0.094902
| 0.109545
| 0.129615
| 0.180021
| 0.803279
| 0.796231
| 0.794086
| 0.794086
| 0.787192
| 0.765896
| 0
| 0.008518
| 0.186818
| 9,528
| 181
| 180
| 52.640884
| 0.833893
| 0.23835
| 0
| 0.617886
| 0
| 0
| 0.111689
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.097561
| false
| 0
| 0.04878
| 0
| 0.479675
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
3154be361ae341ce3196361d429ef71d78e96ad6
| 153
|
py
|
Python
|
src/spaceone/config/info/__init__.py
|
whdalsrnt/config
|
51f238811a6df5656cc703780856f748ec7f8a93
|
[
"Apache-2.0"
] | 5
|
2020-06-04T23:01:51.000Z
|
2020-08-07T05:21:23.000Z
|
src/spaceone/config/info/__init__.py
|
whdalsrnt/config
|
51f238811a6df5656cc703780856f748ec7f8a93
|
[
"Apache-2.0"
] | null | null | null |
src/spaceone/config/info/__init__.py
|
whdalsrnt/config
|
51f238811a6df5656cc703780856f748ec7f8a93
|
[
"Apache-2.0"
] | 5
|
2020-06-10T01:51:33.000Z
|
2021-10-21T04:39:13.000Z
|
from spaceone.config.info.common_info import *
from spaceone.config.info.user_config_info import *
from spaceone.config.info.domain_config_info import *
| 38.25
| 53
| 0.843137
| 23
| 153
| 5.391304
| 0.347826
| 0.403226
| 0.435484
| 0.532258
| 0.516129
| 0.516129
| 0
| 0
| 0
| 0
| 0
| 0
| 0.078431
| 153
| 3
| 54
| 51
| 0.879433
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
3165aeb8746d6d09b04b9d318509975460fa8ee3
| 113
|
py
|
Python
|
StaticClass/__init__.py
|
toadicus/PyKStuff
|
b9bb1d5488adb814b4a2845bb940754255ecdc50
|
[
"Unlicense"
] | null | null | null |
StaticClass/__init__.py
|
toadicus/PyKStuff
|
b9bb1d5488adb814b4a2845bb940754255ecdc50
|
[
"Unlicense"
] | null | null | null |
StaticClass/__init__.py
|
toadicus/PyKStuff
|
b9bb1d5488adb814b4a2845bb940754255ecdc50
|
[
"Unlicense"
] | null | null | null |
__author__ = 'toadicus'
from .StaticClass import StaticClass
from .StaticClass import StaticClass as staticclass
| 28.25
| 51
| 0.840708
| 12
| 113
| 7.583333
| 0.5
| 0.32967
| 0.461538
| 0.703297
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.115044
| 113
| 4
| 51
| 28.25
| 0.91
| 0
| 0
| 0
| 0
| 0
| 0.070175
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
319d9be87f205ed505391f7ac524ee3ced954cef
| 1,434
|
py
|
Python
|
tests/patterns/test_match_path_prefix.py
|
hile/pathlib-tree
|
73fe3132472548ca2480b3ab3278dfaab252b149
|
[
"PSF-2.0"
] | 2
|
2022-01-11T14:50:24.000Z
|
2022-03-16T21:37:09.000Z
|
tests/patterns/test_match_path_prefix.py
|
hile/pathlib-tree
|
73fe3132472548ca2480b3ab3278dfaab252b149
|
[
"PSF-2.0"
] | null | null | null |
tests/patterns/test_match_path_prefix.py
|
hile/pathlib-tree
|
73fe3132472548ca2480b3ab3278dfaab252b149
|
[
"PSF-2.0"
] | null | null | null |
"""
Unit tests for pathlib_tree.patterns path prefix matching
"""
from pathlib_tree.patterns import match_path_prefix
def test_match_path_prefix_full_path():
"""
Test matching path prefixes returns True
"""
assert match_path_prefix('/test', '/test/other directory/filename.txt')
def test_match_path_prefix_components():
"""
Test matching path prefixes returns True
"""
assert match_path_prefix(
['test'],
['test', 'other directory', 'filename.txt']
)
def test_match_path_prefix_full_path_no_match():
"""
Test matching different path prefixes returns False
"""
assert not match_path_prefix('/test', '/testing/other directory/filename.txt')
def test_match_path_prefix_patterns():
"""
Test matching different path prefixes returns False
"""
assert match_path_prefix(
'/test/*/filename.txt',
'/test/other directory/filename.txt'
)
assert match_path_prefix(
'/*/*/*.txt',
'/test/other directory/filename.txt'
)
assert match_path_prefix(
'/test/*',
'/test/other directory/filename.txt'
)
def test_match_path_prefix_patterns_no_match():
"""
Test matching different path prefixes returns False
"""
assert not match_path_prefix(
'/test/mydata*/*.txt',
'/test/other directory/filename.txt'
)
assert not match_path_prefix('/test/*', '/test')
| 24.305085
| 82
| 0.661088
| 169
| 1,434
| 5.337278
| 0.177515
| 0.166297
| 0.232816
| 0.14745
| 0.862528
| 0.837029
| 0.812639
| 0.728381
| 0.67184
| 0.67184
| 0
| 0
| 0.219665
| 1,434
| 58
| 83
| 24.724138
| 0.806077
| 0.205718
| 0
| 0.275862
| 0
| 0
| 0.30303
| 0.125
| 0
| 0
| 0
| 0
| 0.275862
| 1
| 0.172414
| true
| 0
| 0.034483
| 0
| 0.206897
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
31b4266d98da95fd017bd66bc8bf6d234e6c4bd8
| 9,070
|
py
|
Python
|
webapp/tests/api-tests.py
|
kimd113/cs257
|
4e17ba7554dec77ba410282efe1228fde9d9b935
|
[
"MIT"
] | null | null | null |
webapp/tests/api-tests.py
|
kimd113/cs257
|
4e17ba7554dec77ba410282efe1228fde9d9b935
|
[
"MIT"
] | null | null | null |
webapp/tests/api-tests.py
|
kimd113/cs257
|
4e17ba7554dec77ba410282efe1228fde9d9b935
|
[
"MIT"
] | null | null | null |
import unittest
import sys
import json
import urllib.request
API_BASE_URL = 'http://localhost:5000'
def get_JSON_string(url):
# url = API_BASE_URL
data_from_server = urllib.request.urlopen(url).read()
string_from_server = data_from_server.decode('utf-8')
# self.video_dict_list = json.loads(string_from_server)
return json.loads(string_from_server)
class MainPageTester(unittest.TestCase):
''' tests the main page endpoint /'''
def setUp(self):
'''
Return a list of data of trending videos.
The data of vidoes are represented as dictionaries of the form:
{
'link': 'kgaO45SyaO4',
'title': 'The New SpotMini',
'channel': 'BostonDynamics',
'publish_time': '2017-11-13T20:09:58.000Z',
'views': '75752',
'likes': '9419',
'dislikes': '52',
'comments': '1230',
'thumbnail_link': 'https://i.ytimg.com/vi/kgaO45SyaO4/default.jpg'
}
The full result of setUp would be like below:
[{'link':'kgaO45SyaO4', ... ,'thumbnail_link':'https://i.ytimg.com/vi/kgaO45SyaO4/default.jpg'},
{'link':'PaJCFHXcWmM', ... ,'thumbnail_link':'https://i.ytimg.com/vi/PaJCFHXcWmM/default.jpg'}]
'''
url = API_BASE_URL + '/'
self.video_dict_list = get_JSON_string(url)
def tearDown(self):
pass
def test_is_empty(self):
self.assertFalse(not self.video_dict_list)
def test_keys(self):
video = self.playlists[0][0]
keys = ['link','title','channel','publish_time','views','likes','dislikes','comments','thumbnail_link']
self.assertTrue(video.keys() == keys)
def test_types(self):
video = self.video_dict_list[0]
self.assertIsInstance(video.get('link'), str)
self.assertIsInstance(video.get('title'), str)
self.assertIsInstance(video.get('channel'), str)
self.assertIsInstance(video.get('publish_time'), str)
self.assertIsInstance(video.get('views'), int)
self.assertIsInstance(video.get('likes'), int)
self.assertIsInstance(video.get('dislikes'), int)
self.assertIsInstance(video.get('comments'), int)
self.assertIsInstance(video.get('thumbnail_link'), str)
class SignUpTester(unittest.TestCase):
''' tests the ednpoint /sign-up '''
def setUp(self):
''' returns a success code if the username is not taken, else an error code '''
url = {API_BASE_URL} + '/sign-up/'
self.message = get_JSON_string(url)
def tearDown(self):
pass
def test_is_empty(self):
self.assertFalse(not self.message)
# After implementing server and database, add a test to check whether message already exists in the database.
class LogInTester(unittest.TestCase):
''' tests the endpoint /log-in'''
def setUp(self):
''' Returns a success code and user information if the username exists, else an error code'''
url = {API_BASE_URL} + '/log-in/'
self.message = get_JSON_string(url)
def tearDown(self):
pass
def test_is_empty(self):
self.assertFalse(not self.message)
# After implementing server and database, add a test to check whether message already exists in the database.
class SaveToPlaylistTester(unittest.TestCase):
''' test the endpoint /save-to-playlist '''
def setUp(self):
''' returns a success code if the video is not in the playlist and saved successfully, else an error code '''
url = {API_BASE_URL} + '/save-to-playlist/'
self.message = get_JSON_string(url)
def tearDown(self):
pass
def test_is_empty(self):
self.assertFalse(not self.message)
class SearchTester(unittest.TestCase):
'''
test for the search endpoint
/videos?title_contains={search_text}&category={category}&channel={channel}&publish-time={publish-time}&sort-option={sort-option}
'''
def setUp(self):
''' Return a list of data of videos searched by queries below:
search_text, category, channel, publish_time, sort_option.
The data of vidoes are represented as dictionaries of the form same as the list from MainPageTester:
The full result of setUp would be like below:
[{'link':'kgaO45SyaO4', ... ,'thumbnail_link':'https://i.ytimg.com/vi/kgaO45SyaO4/default.jpg'},
{'link':'PaJCFHXcWmM', ... ,'thumbnail_link':'https://i.ytimg.com/vi/PaJCFHXcWmM/default.jpg'}]
'''
search_text = 'Is'
category = 'music'
channel = 'ChildishGambinoVEVO'
publish_time = '2018_05'
sort_option = 'views'
url = f'{API_BASE_URL}/videos?title-contains={search_text}&category={category}'
+f'&channel={channel}&publish-time={publish_time}&sort-option={sort_option}/'
self.video_dict_list = get_JSON_string(url)
def tearDown(self):
pass
def test_is_empty(self):
self.assertFalse(not self.video_dict_list)
def test_keys(self):
video = self.playlists[0][0]
keys = ['link','title','channel','publish_time','views','likes','dislikes','comments','thumbnail_link']
self.assertTrue(video.keys() == keys)
def test_types(self):
video = self.video_dict_list[0]
self.assertIsInstance(video.get('link'), str)
self.assertIsInstance(video.get('title'), str)
self.assertIsInstance(video.get('channel'), str)
self.assertIsInstance(video.get('publish_time'), str)
self.assertIsInstance(video.get('views'), int)
self.assertIsInstance(video.get('likes'), int)
self.assertIsInstance(video.get('dislikes'), int)
self.assertIsInstance(video.get('comments'), int)
self.assertIsInstance(video.get('thumbnail_link'), str)
class InvalidSearchTester(unittest.TestCase):
'''
test for the search endpoint /videos?title_contains={search_text}
in this case the search string cannot be found and the API should return an empty list
'''
def setUp(self):
''' Returns Null because the search string cannot be found '''
search_text = '---------------------'
url = f'{API_BASE_URL}/videos?title-contains={search_text}/'
self.video_dict_list = get_JSON_string(url)
def tearDown(self):
pass
def test_is_empty(self):
self.assertTrue(not self.video_dict_list)
class MyPageTester(unittest.TestCase):
'''
test for endpoint /my-page?user={username}, where username is a user with existing playlists
'''
def setUp(self):
'''
Returns a JSON array of arrays, each of which represents a playlist.
Each playlist array is a JSON list of dictionaries, each of represents a video.
'''
username = 'user1'
url = f'{API_BASE_URL}/my-page?user={username}/'
self.playlists = get_JSON_string(url)
def tearDown(self):
pass
def test_is_empty(self):
self.assertFalse(not self.playlists)
self.assertFalse(not self.playlists[0])
def test_keys(self):
video = self.playlists[0][0]
keys = ['link','title','channel','publish_time','views','likes','dislikes','comments','thumbnail_link']
self.assertTrue(video.keys() == keys)
def test_types(self):
video = self.playlists[0][0]
self.assertIsInstance(video.get('link'), str)
self.assertIsInstance(video.get('title'), str)
self.assertIsInstance(video.get('channel'), str)
self.assertIsInstance(video.get('publish_time'), str)
self.assertIsInstance(video.get('views'), int)
self.assertIsInstance(video.get('likes'), int)
self.assertIsInstance(video.get('dislikes'), int)
self.assertIsInstance(video.get('comments'), int)
self.assertIsInstance(video.get('thumbnail_link'), str)
class EmptyMyPageTester(unittest.TestCase):
'''
test for endpoint /my-page?user={username}, where the user has no existing playlists
'''
def setUp(self):
'''
Returns a JSON array of arrays, each of which represents a playlist.
Each playlist array is a JSON list of dictionaries, each of represents a video.
'''
username = 'user1'
url = f'{API_BASE_URL}/my-page?user={username}/'
self.playlists = get_JSON_string(url)
def tearDown(self):
pass
def test_is_empty(self):
self.assertTrue(not self.playlists)
class LogOutTester(unittest.TestCase):
''' tests /log-out/ endpoint '''
def setUp(self):
''' returns a success code if logged out successfully, else an error code '''
url = {API_BASE_URL} + 'log-out/'
self.message = get_JSON_string(url)
def tearDown(self):
pass
def test_is_empty(self):
self.assertFalse(not self.message)
if __name__ == '__main__':
unittest.main()
| 37.020408
| 136
| 0.633407
| 1,110
| 9,070
| 5.053153
| 0.16036
| 0.096274
| 0.120342
| 0.134783
| 0.79212
| 0.773756
| 0.76199
| 0.750758
| 0.739882
| 0.695668
| 0
| 0.010701
| 0.237596
| 9,070
| 244
| 137
| 37.172131
| 0.800434
| 0.031753
| 0
| 0.724638
| 0
| 0
| 0.141852
| 0.050257
| 0
| 0
| 0
| 0
| 0.289855
| 0
| null | null | 0.065217
| 0.028986
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
31d06e39b8ca5cd7fb9fe73bccc1408ff58830af
| 6,683
|
py
|
Python
|
loldib/getratings/models/NA/na_sejuani/na_sejuani_top.py
|
koliupy/loldib
|
c9ab94deb07213cdc42b5a7c26467cdafaf81b7f
|
[
"Apache-2.0"
] | null | null | null |
loldib/getratings/models/NA/na_sejuani/na_sejuani_top.py
|
koliupy/loldib
|
c9ab94deb07213cdc42b5a7c26467cdafaf81b7f
|
[
"Apache-2.0"
] | null | null | null |
loldib/getratings/models/NA/na_sejuani/na_sejuani_top.py
|
koliupy/loldib
|
c9ab94deb07213cdc42b5a7c26467cdafaf81b7f
|
[
"Apache-2.0"
] | null | null | null |
from getratings.models.ratings import Ratings
class NA_Sejuani_Top_Aatrox(Ratings):
pass
class NA_Sejuani_Top_Ahri(Ratings):
pass
class NA_Sejuani_Top_Akali(Ratings):
pass
class NA_Sejuani_Top_Alistar(Ratings):
pass
class NA_Sejuani_Top_Amumu(Ratings):
pass
class NA_Sejuani_Top_Anivia(Ratings):
pass
class NA_Sejuani_Top_Annie(Ratings):
pass
class NA_Sejuani_Top_Ashe(Ratings):
pass
class NA_Sejuani_Top_AurelionSol(Ratings):
pass
class NA_Sejuani_Top_Azir(Ratings):
pass
class NA_Sejuani_Top_Bard(Ratings):
pass
class NA_Sejuani_Top_Blitzcrank(Ratings):
pass
class NA_Sejuani_Top_Brand(Ratings):
pass
class NA_Sejuani_Top_Braum(Ratings):
pass
class NA_Sejuani_Top_Caitlyn(Ratings):
pass
class NA_Sejuani_Top_Camille(Ratings):
pass
class NA_Sejuani_Top_Cassiopeia(Ratings):
pass
class NA_Sejuani_Top_Chogath(Ratings):
pass
class NA_Sejuani_Top_Corki(Ratings):
pass
class NA_Sejuani_Top_Darius(Ratings):
pass
class NA_Sejuani_Top_Diana(Ratings):
pass
class NA_Sejuani_Top_Draven(Ratings):
pass
class NA_Sejuani_Top_DrMundo(Ratings):
pass
class NA_Sejuani_Top_Ekko(Ratings):
pass
class NA_Sejuani_Top_Elise(Ratings):
pass
class NA_Sejuani_Top_Evelynn(Ratings):
pass
class NA_Sejuani_Top_Ezreal(Ratings):
pass
class NA_Sejuani_Top_Fiddlesticks(Ratings):
pass
class NA_Sejuani_Top_Fiora(Ratings):
pass
class NA_Sejuani_Top_Fizz(Ratings):
pass
class NA_Sejuani_Top_Galio(Ratings):
pass
class NA_Sejuani_Top_Gangplank(Ratings):
pass
class NA_Sejuani_Top_Garen(Ratings):
pass
class NA_Sejuani_Top_Gnar(Ratings):
pass
class NA_Sejuani_Top_Gragas(Ratings):
pass
class NA_Sejuani_Top_Graves(Ratings):
pass
class NA_Sejuani_Top_Hecarim(Ratings):
pass
class NA_Sejuani_Top_Heimerdinger(Ratings):
pass
class NA_Sejuani_Top_Illaoi(Ratings):
pass
class NA_Sejuani_Top_Irelia(Ratings):
pass
class NA_Sejuani_Top_Ivern(Ratings):
pass
class NA_Sejuani_Top_Janna(Ratings):
pass
class NA_Sejuani_Top_JarvanIV(Ratings):
pass
class NA_Sejuani_Top_Jax(Ratings):
pass
class NA_Sejuani_Top_Jayce(Ratings):
pass
class NA_Sejuani_Top_Jhin(Ratings):
pass
class NA_Sejuani_Top_Jinx(Ratings):
pass
class NA_Sejuani_Top_Kalista(Ratings):
pass
class NA_Sejuani_Top_Karma(Ratings):
pass
class NA_Sejuani_Top_Karthus(Ratings):
pass
class NA_Sejuani_Top_Kassadin(Ratings):
pass
class NA_Sejuani_Top_Katarina(Ratings):
pass
class NA_Sejuani_Top_Kayle(Ratings):
pass
class NA_Sejuani_Top_Kayn(Ratings):
pass
class NA_Sejuani_Top_Kennen(Ratings):
pass
class NA_Sejuani_Top_Khazix(Ratings):
pass
class NA_Sejuani_Top_Kindred(Ratings):
pass
class NA_Sejuani_Top_Kled(Ratings):
pass
class NA_Sejuani_Top_KogMaw(Ratings):
pass
class NA_Sejuani_Top_Leblanc(Ratings):
pass
class NA_Sejuani_Top_LeeSin(Ratings):
pass
class NA_Sejuani_Top_Leona(Ratings):
pass
class NA_Sejuani_Top_Lissandra(Ratings):
pass
class NA_Sejuani_Top_Lucian(Ratings):
pass
class NA_Sejuani_Top_Lulu(Ratings):
pass
class NA_Sejuani_Top_Lux(Ratings):
pass
class NA_Sejuani_Top_Malphite(Ratings):
pass
class NA_Sejuani_Top_Malzahar(Ratings):
pass
class NA_Sejuani_Top_Maokai(Ratings):
pass
class NA_Sejuani_Top_MasterYi(Ratings):
pass
class NA_Sejuani_Top_MissFortune(Ratings):
pass
class NA_Sejuani_Top_MonkeyKing(Ratings):
pass
class NA_Sejuani_Top_Mordekaiser(Ratings):
pass
class NA_Sejuani_Top_Morgana(Ratings):
pass
class NA_Sejuani_Top_Nami(Ratings):
pass
class NA_Sejuani_Top_Nasus(Ratings):
pass
class NA_Sejuani_Top_Nautilus(Ratings):
pass
class NA_Sejuani_Top_Nidalee(Ratings):
pass
class NA_Sejuani_Top_Nocturne(Ratings):
pass
class NA_Sejuani_Top_Nunu(Ratings):
pass
class NA_Sejuani_Top_Olaf(Ratings):
pass
class NA_Sejuani_Top_Orianna(Ratings):
pass
class NA_Sejuani_Top_Ornn(Ratings):
pass
class NA_Sejuani_Top_Pantheon(Ratings):
pass
class NA_Sejuani_Top_Poppy(Ratings):
pass
class NA_Sejuani_Top_Quinn(Ratings):
pass
class NA_Sejuani_Top_Rakan(Ratings):
pass
class NA_Sejuani_Top_Rammus(Ratings):
pass
class NA_Sejuani_Top_RekSai(Ratings):
pass
class NA_Sejuani_Top_Renekton(Ratings):
pass
class NA_Sejuani_Top_Rengar(Ratings):
pass
class NA_Sejuani_Top_Riven(Ratings):
pass
class NA_Sejuani_Top_Rumble(Ratings):
pass
class NA_Sejuani_Top_Ryze(Ratings):
pass
class NA_Sejuani_Top_Sejuani(Ratings):
pass
class NA_Sejuani_Top_Shaco(Ratings):
pass
class NA_Sejuani_Top_Shen(Ratings):
pass
class NA_Sejuani_Top_Shyvana(Ratings):
pass
class NA_Sejuani_Top_Singed(Ratings):
pass
class NA_Sejuani_Top_Sion(Ratings):
pass
class NA_Sejuani_Top_Sivir(Ratings):
pass
class NA_Sejuani_Top_Skarner(Ratings):
pass
class NA_Sejuani_Top_Sona(Ratings):
pass
class NA_Sejuani_Top_Soraka(Ratings):
pass
class NA_Sejuani_Top_Swain(Ratings):
pass
class NA_Sejuani_Top_Syndra(Ratings):
pass
class NA_Sejuani_Top_TahmKench(Ratings):
pass
class NA_Sejuani_Top_Taliyah(Ratings):
pass
class NA_Sejuani_Top_Talon(Ratings):
pass
class NA_Sejuani_Top_Taric(Ratings):
pass
class NA_Sejuani_Top_Teemo(Ratings):
pass
class NA_Sejuani_Top_Thresh(Ratings):
pass
class NA_Sejuani_Top_Tristana(Ratings):
pass
class NA_Sejuani_Top_Trundle(Ratings):
pass
class NA_Sejuani_Top_Tryndamere(Ratings):
pass
class NA_Sejuani_Top_TwistedFate(Ratings):
pass
class NA_Sejuani_Top_Twitch(Ratings):
pass
class NA_Sejuani_Top_Udyr(Ratings):
pass
class NA_Sejuani_Top_Urgot(Ratings):
pass
class NA_Sejuani_Top_Varus(Ratings):
pass
class NA_Sejuani_Top_Vayne(Ratings):
pass
class NA_Sejuani_Top_Veigar(Ratings):
pass
class NA_Sejuani_Top_Velkoz(Ratings):
pass
class NA_Sejuani_Top_Vi(Ratings):
pass
class NA_Sejuani_Top_Viktor(Ratings):
pass
class NA_Sejuani_Top_Vladimir(Ratings):
pass
class NA_Sejuani_Top_Volibear(Ratings):
pass
class NA_Sejuani_Top_Warwick(Ratings):
pass
class NA_Sejuani_Top_Xayah(Ratings):
pass
class NA_Sejuani_Top_Xerath(Ratings):
pass
class NA_Sejuani_Top_XinZhao(Ratings):
pass
class NA_Sejuani_Top_Yasuo(Ratings):
pass
class NA_Sejuani_Top_Yorick(Ratings):
pass
class NA_Sejuani_Top_Zac(Ratings):
pass
class NA_Sejuani_Top_Zed(Ratings):
pass
class NA_Sejuani_Top_Ziggs(Ratings):
pass
class NA_Sejuani_Top_Zilean(Ratings):
pass
class NA_Sejuani_Top_Zyra(Ratings):
pass
| 16.026379
| 46
| 0.77151
| 972
| 6,683
| 4.878601
| 0.151235
| 0.203712
| 0.407423
| 0.494728
| 0.808941
| 0.808941
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166243
| 6,683
| 416
| 47
| 16.064904
| 0.851041
| 0
| 0
| 0.498195
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.498195
| 0.00361
| 0
| 0.501805
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
|
0
| 8
|
9edcfdfc8bd73e360ed7a359addb217174f87169
| 1,692
|
py
|
Python
|
torch_glow/tests/nodes/div_test.py
|
brightstandlamp/glow
|
e27a74e150a2284300bb7da5529dcbfeb55e1f00
|
[
"Apache-2.0"
] | null | null | null |
torch_glow/tests/nodes/div_test.py
|
brightstandlamp/glow
|
e27a74e150a2284300bb7da5529dcbfeb55e1f00
|
[
"Apache-2.0"
] | null | null | null |
torch_glow/tests/nodes/div_test.py
|
brightstandlamp/glow
|
e27a74e150a2284300bb7da5529dcbfeb55e1f00
|
[
"Apache-2.0"
] | 1
|
2020-02-13T10:46:16.000Z
|
2020-02-13T10:46:16.000Z
|
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests.utils import jitVsGlow
def test_div_basic():
"""Basic test of the PyTorch div Node on Glow."""
def test_f(a, b):
c = a.div(b)
return c.div(c)
x = torch.randn(4)
y = torch.randn(4)
jitVsGlow(test_f, x, y, expected_fused_ops={"aten::div"})
def test_div_broadcast_1():
"""Test of the PyTorch div Node on Glow with broadcasting."""
def test_f(a, b):
c = a.div(b)
return c.div(c)
x = torch.randn(8, 3, 4, 2)
y = torch.randn(4, 2)
jitVsGlow(test_f, x, y, expected_fused_ops={"aten::div"})
def test_div_broadcast_2():
"""Test of the PyTorch div Node on Glow with broadcasting."""
def test_f(a, b):
c = a.div(b)
return c.div(c)
x = torch.randn(8, 3, 4, 2)
y = torch.randn(1, 2)
jitVsGlow(test_f, x, y, expected_fused_ops={"aten::div"})
def test_div_broadcast_3():
"""Test of the PyTorch div Node on Glow with broadcasting."""
def test_f(a, b):
c = a.div(b)
return c.div(c)
x = torch.randn(4, 2)
y = torch.randn(8, 3, 4, 2)
jitVsGlow(test_f, x, y, expected_fused_ops={"aten::div"})
def test_div_float():
"""Test of the PyTorch aten::div Node with a float argument"""
def test_f(a):
return (a*a).div(3.9)
x = torch.randn(4)
jitVsGlow(test_f, x, expected_fused_ops={"aten::div"})
def test_div_int():
"""Test of the PyTorch aten::div Node with an int argument"""
def test_f(a):
return (a*a).div(20)
x = torch.randn(4)
jitVsGlow(test_f, x, expected_fused_ops={"aten::div"})
| 21.15
| 82
| 0.608156
| 286
| 1,692
| 3.437063
| 0.171329
| 0.085453
| 0.061038
| 0.09766
| 0.843337
| 0.841302
| 0.829095
| 0.817904
| 0.715158
| 0.658189
| 0
| 0.022656
| 0.243499
| 1,692
| 79
| 83
| 21.417722
| 0.745313
| 0.191489
| 0
| 0.609756
| 0
| 0
| 0.040389
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.292683
| false
| 0
| 0.073171
| 0.04878
| 0.512195
| 0.02439
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 8
|
7df7943af79cb02445f0dc521f91e9c7202e0e2d
| 21,714
|
py
|
Python
|
checkio/Alice In Wonderland/Multiplication Table/test_multiplication_table.py
|
KenMercusLai/checkio
|
c7702221e1bc0b0b30425859ffa6c09722949d65
|
[
"MIT"
] | 39
|
2015-02-09T13:24:12.000Z
|
2019-05-16T17:51:19.000Z
|
checkio/Alice In Wonderland/Multiplication Table/test_multiplication_table.py
|
KenMercusLai/checkio
|
c7702221e1bc0b0b30425859ffa6c09722949d65
|
[
"MIT"
] | 1
|
2019-10-21T16:18:14.000Z
|
2019-10-21T16:18:14.000Z
|
checkio/Alice In Wonderland/Multiplication Table/test_multiplication_table.py
|
KenMercusLai/checkio
|
c7702221e1bc0b0b30425859ffa6c09722949d65
|
[
"MIT"
] | 22
|
2015-01-30T18:00:05.000Z
|
2021-05-22T02:57:23.000Z
|
import unittest
from multiplication_table import checkio
class Tests(unittest.TestCase):
TESTS = {
"0. Basics": [
{
'input': [4, 6],
'answer': 38,
'explanation': [
[6, 19, 13],
[
['1', '1', '0'],
['1', '1', '1', '0', 6],
['0', '0', '0', '0', 0],
['0', '0', '0', '0', 0],
6,
],
[
['1', '1', '0'],
['1', '1', '1', '1', 7],
['0', '1', '1', '0', 6],
['0', '1', '1', '0', 6],
19,
],
[
['1', '1', '0'],
['1', '0', '0', '1', 1],
['0', '1', '1', '0', 6],
['0', '1', '1', '0', 6],
13,
],
],
},
{
'input': [2, 7],
'answer': 28,
'explanation': [
[7, 14, 7],
[
['1', '1', '1'],
['1', '1', '1', '1', 7],
['0', '0', '0', '0', 0],
7,
],
[
['1', '1', '1'],
['1', '1', '1', '1', 7],
['0', '1', '1', '1', 7],
14,
],
[
['1', '1', '1'],
['1', '0', '0', '0', 0],
['0', '1', '1', '1', 7],
7,
],
],
},
{
'input': [7, 2],
'answer': 18,
'explanation': [
[6, 9, 3],
[
['1', '0'],
['1', '1', '0', 2],
['1', '1', '0', 2],
['1', '1', '0', 2],
6,
],
[
['1', '0'],
['1', '1', '1', 3],
['1', '1', '1', 3],
['1', '1', '1', 3],
9,
],
[
['1', '0'],
['1', '0', '1', 1],
['1', '0', '1', 1],
['1', '0', '1', 1],
3,
],
],
},
],
"1. Extra": [
{
'answer': 2,
'explanation': [
[1, 1, 0],
[['1'], ['1', '1', 1], 1],
[['1'], ['1', '1', 1], 1],
[['1'], ['1', '0', 0], 0],
],
'input': [1, 1],
},
{
'answer': 10,
'explanation': [
[2, 5, 3],
[['1', '0'], ['1', '1', '0', 2], ['0', '0', '0', 0], 2],
[['1', '0'], ['1', '1', '1', 3], ['0', '1', '0', 2], 5],
[['1', '0'], ['1', '0', '1', 1], ['0', '1', '0', 2], 3],
],
'input': [2, 2],
},
{
'answer': 60,
'explanation': [
[18, 30, 12],
[
['1', '0', '0', '1'],
['1', '1', '0', '0', '1', 9],
['1', '1', '0', '0', '1', 9],
18,
],
[
['1', '0', '0', '1'],
['1', '1', '1', '1', '1', 15],
['1', '1', '1', '1', '1', 15],
30,
],
[
['1', '0', '0', '1'],
['1', '0', '1', '1', '0', 6],
['1', '0', '1', '1', '0', 6],
12,
],
],
'input': [3, 9],
},
{
'answer': 84,
'explanation': [
[9, 42, 33],
[
['1', '0', '0', '1'],
['1', '1', '0', '0', '1', 9],
['0', '0', '0', '0', '0', 0],
['0', '0', '0', '0', '0', 0],
['0', '0', '0', '0', '0', 0],
9,
],
[
['1', '0', '0', '1'],
['1', '1', '1', '1', '1', 15],
['0', '1', '0', '0', '1', 9],
['0', '1', '0', '0', '1', 9],
['0', '1', '0', '0', '1', 9],
42,
],
[
['1', '0', '0', '1'],
['1', '0', '1', '1', '0', 6],
['0', '1', '0', '0', '1', 9],
['0', '1', '0', '0', '1', 9],
['0', '1', '0', '0', '1', 9],
33,
],
],
'input': [8, 9],
},
{
'answer': 92,
'explanation': [
[16, 46, 30],
[
['1', '0', '0', '0'],
['1', '1', '0', '0', '0', 8],
['0', '0', '0', '0', '0', 0],
['0', '0', '0', '0', '0', 0],
['1', '1', '0', '0', '0', 8],
16,
],
[
['1', '0', '0', '0'],
['1', '1', '1', '1', '1', 15],
['0', '1', '0', '0', '0', 8],
['0', '1', '0', '0', '0', 8],
['1', '1', '1', '1', '1', 15],
46,
],
[
['1', '0', '0', '0'],
['1', '0', '1', '1', '1', 7],
['0', '1', '0', '0', '0', 8],
['0', '1', '0', '0', '0', 8],
['1', '0', '1', '1', '1', 7],
30,
],
],
'input': [9, 8],
},
{
'answer': 28,
'explanation': [
[8, 14, 6],
[
['1', '0', '0'],
['1', '1', '0', '0', 4],
['1', '1', '0', '0', 4],
8,
],
[
['1', '0', '0'],
['1', '1', '1', '1', 7],
['1', '1', '1', '1', 7],
14,
],
[
['1', '0', '0'],
['1', '0', '1', '1', 3],
['1', '0', '1', '1', 3],
6,
],
],
'input': [3, 4],
},
{
'answer': 36,
'explanation': [
[8, 18, 10],
[
['1', '0', '0'],
['1', '1', '0', '0', 4],
['0', '0', '0', '0', 0],
['1', '1', '0', '0', 4],
8,
],
[
['1', '0', '0'],
['1', '1', '1', '1', 7],
['0', '1', '0', '0', 4],
['1', '1', '1', '1', 7],
18,
],
[
['1', '0', '0'],
['1', '0', '1', '1', 3],
['0', '1', '0', '0', 4],
['1', '0', '1', '1', 3],
10,
],
],
'input': [5, 4],
},
{
'answer': 90,
'explanation': [
[24, 45, 21],
[
['1', '0', '0', '0'],
['1', '1', '0', '0', '0', 8],
['1', '1', '0', '0', '0', 8],
['1', '1', '0', '0', '0', 8],
24,
],
[
['1', '0', '0', '0'],
['1', '1', '1', '1', '1', 15],
['1', '1', '1', '1', '1', 15],
['1', '1', '1', '1', '1', 15],
45,
],
[
['1', '0', '0', '0'],
['1', '0', '1', '1', '1', 7],
['1', '0', '1', '1', '1', 7],
['1', '0', '1', '1', '1', 7],
21,
],
],
'input': [7, 8],
},
{
'answer': 12,
'explanation': [
[4, 6, 2],
[['1', '0'], ['1', '1', '0', 2], ['1', '1', '0', 2], 4],
[['1', '0'], ['1', '1', '1', 3], ['1', '1', '1', 3], 6],
[['1', '0'], ['1', '0', '1', 1], ['1', '0', '1', 1], 2],
],
'input': [3, 2],
},
],
"2. Extra": [
{
'explanation': [
[4, 10, 6],
[
['1', '0'],
['1', '1', '0', 2],
['0', '0', '0', 0],
['1', '1', '0', 2],
['0', '0', '0', 0],
4,
],
[
['1', '0'],
['1', '1', '1', 3],
['0', '1', '0', 2],
['1', '1', '1', 3],
['0', '1', '0', 2],
10,
],
[
['1', '0'],
['1', '0', '1', 1],
['0', '1', '0', 2],
['1', '0', '1', 1],
['0', '1', '0', 2],
6,
],
],
'answer': 20,
'input': [10, 2],
},
{
'explanation': [
[10, 25, 15],
[
['1', '0', '1', '0'],
['1', '1', '0', '1', '0', 10],
['0', '0', '0', '0', '0', 0],
10,
],
[
['1', '0', '1', '0'],
['1', '1', '1', '1', '1', 15],
['0', '1', '0', '1', '0', 10],
25,
],
[
['1', '0', '1', '0'],
['1', '0', '1', '0', '1', 5],
['0', '1', '0', '1', '0', 10],
15,
],
],
'answer': 50,
'input': [2, 10],
},
{
'explanation': [
[36, 57, 21],
[
['1', '1', '0', '0'],
['1', '1', '1', '0', '0', 12],
['0', '0', '0', '0', '0', 0],
['1', '1', '1', '0', '0', 12],
['1', '1', '1', '0', '0', 12],
36,
],
[
['1', '1', '0', '0'],
['1', '1', '1', '1', '1', 15],
['0', '1', '1', '0', '0', 12],
['1', '1', '1', '1', '1', 15],
['1', '1', '1', '1', '1', 15],
57,
],
[
['1', '1', '0', '0'],
['1', '0', '0', '1', '1', 3],
['0', '1', '1', '0', '0', 12],
['1', '0', '0', '1', '1', 3],
['1', '0', '0', '1', '1', 3],
21,
],
],
'answer': 114,
'input': [11, 12],
},
{
'explanation': [
[4, 23, 19],
[
['1', '0', '0'],
['1', '1', '0', '0', 4],
['0', '0', '0', '0', 0],
['0', '0', '0', '0', 0],
['0', '0', '0', '0', 0],
['0', '0', '0', '0', 0],
4,
],
[
['1', '0', '0'],
['1', '1', '1', '1', 7],
['0', '1', '0', '0', 4],
['0', '1', '0', '0', 4],
['0', '1', '0', '0', 4],
['0', '1', '0', '0', 4],
23,
],
[
['1', '0', '0'],
['1', '0', '1', '1', 3],
['0', '1', '0', '0', 4],
['0', '1', '0', '0', 4],
['0', '1', '0', '0', 4],
['0', '1', '0', '0', 4],
19,
],
],
'answer': 46,
'input': [16, 4],
},
{
'explanation': [
[8, 30, 22],
[
['1', '0', '0'],
['1', '1', '0', '0', 4],
['0', '0', '0', '0', 0],
['0', '0', '0', '0', 0],
['0', '0', '0', '0', 0],
['0', '0', '0', '0', 0],
['1', '1', '0', '0', 4],
8,
],
[
['1', '0', '0'],
['1', '1', '1', '1', 7],
['0', '1', '0', '0', 4],
['0', '1', '0', '0', 4],
['0', '1', '0', '0', 4],
['0', '1', '0', '0', 4],
['1', '1', '1', '1', 7],
30,
],
[
['1', '0', '0'],
['1', '0', '1', '1', 3],
['0', '1', '0', '0', 4],
['0', '1', '0', '0', 4],
['0', '1', '0', '0', 4],
['0', '1', '0', '0', 4],
['1', '0', '1', '1', 3],
22,
],
],
'answer': 60,
'input': [33, 4],
},
{
'explanation': [
[32, 94, 62],
[
['1', '0', '0', '0', '0'],
['1', '1', '0', '0', '0', '0', 16],
['1', '1', '0', '0', '0', '0', 16],
['0', '0', '0', '0', '0', '0', 0],
['0', '0', '0', '0', '0', '0', 0],
32,
],
[
['1', '0', '0', '0', '0'],
['1', '1', '1', '1', '1', '1', 31],
['1', '1', '1', '1', '1', '1', 31],
['0', '1', '0', '0', '0', '0', 16],
['0', '1', '0', '0', '0', '0', 16],
94,
],
[
['1', '0', '0', '0', '0'],
['1', '0', '1', '1', '1', '1', 15],
['1', '0', '1', '1', '1', '1', 15],
['0', '1', '0', '0', '0', '0', 16],
['0', '1', '0', '0', '0', '0', 16],
62,
],
],
'answer': 188,
'input': [12, 16],
},
{
'explanation': [
[30, 75, 45],
[
['1', '1', '1', '1'],
['1', '1', '1', '1', '1', 15],
['0', '0', '0', '0', '0', 0],
['0', '0', '0', '0', '0', 0],
['1', '1', '1', '1', '1', 15],
['0', '0', '0', '0', '0', 0],
30,
],
[
['1', '1', '1', '1'],
['1', '1', '1', '1', '1', 15],
['0', '1', '1', '1', '1', 15],
['0', '1', '1', '1', '1', 15],
['1', '1', '1', '1', '1', 15],
['0', '1', '1', '1', '1', 15],
75,
],
[
['1', '1', '1', '1'],
['1', '0', '0', '0', '0', 0],
['0', '1', '1', '1', '1', 15],
['0', '1', '1', '1', '1', 15],
['1', '0', '0', '0', '0', 0],
['0', '1', '1', '1', '1', 15],
45,
],
],
'answer': 150,
'input': [18, 15],
},
{
'explanation': [
[16, 95, 79],
[
['1', '0', '0', '0', '0'],
['1', '1', '0', '0', '0', '0', 16],
['0', '0', '0', '0', '0', '0', 0],
['0', '0', '0', '0', '0', '0', 0],
['0', '0', '0', '0', '0', '0', 0],
['0', '0', '0', '0', '0', '0', 0],
16,
],
[
['1', '0', '0', '0', '0'],
['1', '1', '1', '1', '1', '1', 31],
['0', '1', '0', '0', '0', '0', 16],
['0', '1', '0', '0', '0', '0', 16],
['0', '1', '0', '0', '0', '0', 16],
['0', '1', '0', '0', '0', '0', 16],
95,
],
[
['1', '0', '0', '0', '0'],
['1', '0', '1', '1', '1', '1', 15],
['0', '1', '0', '0', '0', '0', 16],
['0', '1', '0', '0', '0', '0', 16],
['0', '1', '0', '0', '0', '0', 16],
['0', '1', '0', '0', '0', '0', 16],
79,
],
],
'answer': 190,
'input': [16, 16],
},
{
'explanation': [
[3, 21, 18],
[
['1', '1'],
['1', '1', '1', 3],
['0', '0', '0', 0],
['0', '0', '0', 0],
['0', '0', '0', 0],
['0', '0', '0', 0],
['0', '0', '0', 0],
['0', '0', '0', 0],
3,
],
[
['1', '1'],
['1', '1', '1', 3],
['0', '1', '1', 3],
['0', '1', '1', 3],
['0', '1', '1', 3],
['0', '1', '1', 3],
['0', '1', '1', 3],
['0', '1', '1', 3],
21,
],
[
['1', '1'],
['1', '0', '0', 0],
['0', '1', '1', 3],
['0', '1', '1', 3],
['0', '1', '1', 3],
['0', '1', '1', 3],
['0', '1', '1', 3],
['0', '1', '1', 3],
18,
],
],
'answer': 42,
'input': [64, 3],
},
],
}
def test_Basics(self):
for i in self.TESTS['0. Basics']:
assert checkio(*i['input']) == i['answer']
def test_Extra(self):
for i in self.TESTS['1. Extra']:
assert checkio(*i['input']) == i['answer']
def test_Extra2(self):
for i in self.TESTS['2. Extra']:
assert checkio(*i['input']) == i['answer']
if __name__ == "__main__": # pragma: no cover
unittest.main()
| 35.772652
| 76
| 0.115962
| 1,691
| 21,714
| 1.481963
| 0.046718
| 0.284916
| 0.281325
| 0.282522
| 0.648444
| 0.634876
| 0.579409
| 0.543097
| 0.480846
| 0.464485
| 0
| 0.226693
| 0.649765
| 21,714
| 606
| 77
| 35.831683
| 0.102827
| 0.000737
| 0
| 0.662207
| 0
| 0
| 0.078171
| 0
| 0
| 0
| 0
| 0
| 0.005017
| 1
| 0.005017
| false
| 0
| 0.003344
| 0
| 0.011706
| 0
| 0
| 0
| 1
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7dfc7898af17c856b75b9ec824a9bce4641b6419
| 1,006
|
py
|
Python
|
tests/virtual_env_test.py
|
zzamboni/pure-x
|
5a61355b907cd20e6d33a38fb36feb374548d25a
|
[
"MIT"
] | 3
|
2021-01-22T11:49:39.000Z
|
2022-01-20T05:33:58.000Z
|
tests/virtual_env_test.py
|
edouard-lopez/pure
|
5a61355b907cd20e6d33a38fb36feb374548d25a
|
[
"MIT"
] | 17
|
2019-02-26T09:02:13.000Z
|
2020-04-18T14:18:10.000Z
|
tests/virtual_env_test.py
|
edouard-lopez/pure
|
5a61355b907cd20e6d33a38fb36feb374548d25a
|
[
"MIT"
] | null | null | null |
import os
from pure import virtual_env, colors, constants
def test_virtual_env_raw_name_is_empty_when_deactivated():
os.unsetenv('VIRTUAL_ENV')
if 'VIRTUAL_ENV' in os.environ: # when running tests in a virtualenv
del os.environ['VIRTUAL_ENV']
assert virtual_env.raw() == constants.NOTHING
def test_virtual_env_segment_text_is_empty_when_deactivated():
os.unsetenv('VIRTUAL_ENV')
if 'VIRTUAL_ENV' in os.environ: # when running tests in a virtualenv
del os.environ['VIRTUAL_ENV']
colors.load_theme()
assert virtual_env.segment() == {'text': '', 'style': colors.style('mute')}
def test_virtual_env_raw_name_is_empty_when_activated():
os.environ['VIRTUAL_ENV'] = '/path/to/virtual/env'
assert virtual_env.raw() == 'env'
def test_virtual_env_segment_text_is_empty_when_activated():
os.environ['VIRTUAL_ENV'] = '/path/to/virtual/env'
colors.load_theme()
assert virtual_env.segment() == {'text': 'env', 'style': colors.style('mute')}
| 29.588235
| 82
| 0.719682
| 143
| 1,006
| 4.734266
| 0.251748
| 0.28065
| 0.082718
| 0.100443
| 0.838996
| 0.838996
| 0.782866
| 0.782866
| 0.782866
| 0.629247
| 0
| 0
| 0.152087
| 1,006
| 33
| 83
| 30.484848
| 0.793669
| 0.068588
| 0
| 0.5
| 0
| 0
| 0.171306
| 0
| 0
| 0
| 0
| 0
| 0.2
| 1
| 0.2
| true
| 0
| 0.1
| 0
| 0.3
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
b4042b305d90c76f36043398522d900348880bdb
| 725
|
py
|
Python
|
Old/gym_test.py
|
Alden-G878/AI_Traintime
|
3d405952bd2c30171358219591f806efcb157d49
|
[
"MIT"
] | null | null | null |
Old/gym_test.py
|
Alden-G878/AI_Traintime
|
3d405952bd2c30171358219591f806efcb157d49
|
[
"MIT"
] | null | null | null |
Old/gym_test.py
|
Alden-G878/AI_Traintime
|
3d405952bd2c30171358219591f806efcb157d49
|
[
"MIT"
] | null | null | null |
import gym
env = gym.make('Acrobot-v1')
print('Acrobot-v1:')
print(env.observation_space)
print(env.action_space)
env = gym.make('Pendulum-v1')
print('Pendulum-v1:')
print(env.observation_space)
print(env.action_space)
env = gym.make('CartPole-v1')
print('CartPole-v1:')
print(env.observation_space)
print(env.action_space)
env = gym.make('MountainCar-v0')
print('MountainCar-v0')
print(env.observation_space)
print(env.action_space)
'''
env.reset()
for _ in range(1000):
env.render()
env.step(env.action_space.sample()) # take a random action
env.close()
env = gym.make('Pendulum-v0')
env.reset()
for _ in range(1000):
env.render()
env.step(env.action_space.sample()) # take a random action
env.close()
'''
| 22.65625
| 62
| 0.721379
| 111
| 725
| 4.603604
| 0.234234
| 0.125245
| 0.164384
| 0.187867
| 0.731898
| 0.731898
| 0.731898
| 0.731898
| 0.731898
| 0.64775
| 0
| 0.026114
| 0.102069
| 725
| 31
| 63
| 23.387097
| 0.758833
| 0
| 0
| 0.470588
| 0
| 0
| 0.218391
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.058824
| 0
| 0.058824
| 0.705882
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 7
|
b40e29006e933ddfd5de8a7b6ac4849509a81ca4
| 386
|
py
|
Python
|
calibre_access/__init__.py
|
Laharah/calibre-access
|
256aa858fc598f8f60e7eae93cfbed3eb0d0350f
|
[
"MIT"
] | 5
|
2019-09-21T06:48:50.000Z
|
2020-11-29T02:37:57.000Z
|
calibre_access/__init__.py
|
Laharah/calibre-access
|
256aa858fc598f8f60e7eae93cfbed3eb0d0350f
|
[
"MIT"
] | 1
|
2018-11-05T05:53:43.000Z
|
2018-11-05T05:53:43.000Z
|
calibre_access/__init__.py
|
Laharah/calibre-access
|
256aa858fc598f8f60e7eae93cfbed3eb0d0350f
|
[
"MIT"
] | 1
|
2021-04-07T04:43:57.000Z
|
2021-04-07T04:43:57.000Z
|
from .calibre_access import (print_record, calibre_downloads, calibre_searches,
all_records, download_coro, search_coro, download_database,
locate_logs, get_database)
__all__ = (print_record, calibre_downloads, calibre_searches, all_records, download_coro,
search_coro, download_database, locate_logs, get_database)
| 55.142857
| 89
| 0.702073
| 41
| 386
| 6.04878
| 0.414634
| 0.08871
| 0.145161
| 0.217742
| 0.895161
| 0.895161
| 0.895161
| 0.895161
| 0.895161
| 0.895161
| 0
| 0
| 0.240933
| 386
| 6
| 90
| 64.333333
| 0.846416
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.2
| 0
| 0.2
| 0.4
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
b4336fe9100482bc0e6fc2b62ffcdc9dc6b516f5
| 68
|
py
|
Python
|
Chapter 01/Chap01_Example1.44.py
|
Anancha/Programming-Techniques-using-Python
|
e80c329d2a27383909d358741a5cab03cb22fd8b
|
[
"MIT"
] | null | null | null |
Chapter 01/Chap01_Example1.44.py
|
Anancha/Programming-Techniques-using-Python
|
e80c329d2a27383909d358741a5cab03cb22fd8b
|
[
"MIT"
] | null | null | null |
Chapter 01/Chap01_Example1.44.py
|
Anancha/Programming-Techniques-using-Python
|
e80c329d2a27383909d358741a5cab03cb22fd8b
|
[
"MIT"
] | null | null | null |
# \\’ \\” \\t \\n \\\\
print("\\\\\' \\\\\" \\\\t \\\\n \\\\\\\\")
| 22.666667
| 43
| 0.132353
| 5
| 68
| 1.8
| 0.6
| 0.444444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.176471
| 68
| 2
| 44
| 34
| 0.160714
| 0.308824
| 0
| 0
| 0
| 0
| 0.266667
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 7
|
b436eee7d92b793d256a7de170ce9782651cf392
| 15,883
|
py
|
Python
|
tools/cardiac_py/experiments/five.py
|
paulkefer/cardioid
|
59c07b714d8b066b4f84eb50487c36f6eadf634c
|
[
"MIT-0",
"MIT"
] | 33
|
2018-12-12T20:05:06.000Z
|
2021-09-26T13:30:16.000Z
|
tools/cardiac_py/experiments/five.py
|
paulkefer/cardioid
|
59c07b714d8b066b4f84eb50487c36f6eadf634c
|
[
"MIT-0",
"MIT"
] | 5
|
2019-04-25T11:34:43.000Z
|
2021-11-14T04:35:37.000Z
|
tools/cardiac_py/experiments/five.py
|
paulkefer/cardioid
|
59c07b714d8b066b4f84eb50487c36f6eadf634c
|
[
"MIT-0",
"MIT"
] | 15
|
2018-12-21T22:44:59.000Z
|
2021-08-29T10:30:25.000Z
|
'''
Created on Sep 24, 2012
@author: butler
'''
import numpy as np
import plot_schema
import matplotlib.pyplot as plt
import matplotlib.lines as lines
from matplotlib import rc
from matplotlib.mlab import griddata
from matplotlib.ticker import MaxNLocator
from matplotlib.ticker import LinearLocator
from matplotlib.transforms import Bbox
import scipy.interpolate as interpolate
import matplotlib.colors as color
import os
class PlotECGs():
'''
classdocs
'''
def __init__(self, directory, full=True):
'''
Constructor
'''
self.t_gap_ms = 5.0
self.directory = directory
self.normal_dir_name = "run_14"
self.modified_dir_name = 'run_15'
self.normal_legend = '$I_{Kr} = 0.153$'
self.modified_legend = '$I_{Kr} = 0.153$'
self.full = full
if self.full:
self.electrodes = ['electrode#000448302','electrode#000451300','electrode#000452730','electrode#000453393','electrode#000457525','electrode#000458894',"electrode#000438028","electrode#000460291"]
else:
self.electrodes = ['electrode#000094150','electrode#000092294']
self.period_ms = 1000
self.template = plot_schema.PlotSchema()
self.template.set_fontsize(16)
self.fix_axes = True
self.lfont = matplotlib.font_manager.FontProperties(size=(0.75 * self.template.fontsize))
def load_data(self):
n_data_path = self.directory + '/' + self.normal_dir_name
self.n_data = self.load_case_data(n_data_path)
m_data_path = self.directory + '/' + self.modified_dir_name
self.m_data = self.load_case_data(m_data_path)
if self.fix_axes:
#Presume normal is an ok max
e_max = np.max(self.n_data[:,:])
e_min = np.min(self.n_data[:,:])
if self.full:
l1_max = np.max(self.n_data[:,7] - self.n_data[:,6])
l1_min = np.min(self.n_data[:,7] - self.n_data[:,6])
else:
l1_max = 0.0
l2_min = 0.0
self.e_max = max(e_max, l1_max)
self.e_min = min(e_min, l1_min)
def load_case_data(self, case_path):
n_electrodes = len(self.electrodes)
for ii in range(n_electrodes):
electrode_path = case_path + '/' + self.electrodes[ii]
temp_data = np.loadtxt(electrode_path)
if ii == 0:
n_times = temp_data.size
data = np.zeros((n_times, n_electrodes))
data[:,ii] = temp_data[:]
return data
def set_ECG_type(self,ECG_lead, flipper= -1):
''' SETUP plot vectors for each of the different ECG types
'''
if self.full:
lead_1 = 7
lead_2 = 8
else:
lead_1 = 1
lead_2 = 2
ECG_type = ECG_lead - 1
assert((len(self.electrodes) >= ECG_type))
n_points = self.n_data[:,0].size
# 1 setup time series
max_time = self.t_gap_ms / 1000 * n_points
self.time = np.linspace(0, max_time, n_points)
# first normalise
if ECG_type == -1:
col_1 = lead_1 - 1
col_2 = lead_2 - 1
self.n_ECG_data = flipper * (self.n_data[:,col_1] - self.n_data[:,col_2])
self.m_ECG_data = flipper * (self.m_data[:,col_1] - self.m_data[:,col_2])
self.y_label = r'$ \Delta V$'
else:
self.n_ECG_data = flipper * self.n_data[:,ECG_type]
self.m_ECG_data = flipper * self.m_data[:,ECG_type]
self.y_label = r'$ V$'
def plot_normal_ECG_final(self, save_file):
""" """
index_end = self.time.size -1
index_start = self.time.size - 1 - (self.period_ms / self.t_gap_ms)
self.template.apply_fontsettings(plt)
f = plt.figure()
self.template.apply_figuresize_settings(f)
axes = plt.axes()
plt.plot(self.time[index_start:index_end], self.n_ECG_data[index_start:index_end])
plt.xlabel('$t (s)$',fontsize=self.template.get_fontsize())
plt.ylabel(self.y_label,fontsize=self.template.get_fontsize(), rotation='horizontal')
self.template.apply_figuresize_settings(f)
for x_ticl_i in axes.get_xticklabels():
x_ticl_i.set_fontsize(self.template.get_fontsize())
for y_ticl_i in axes.get_yticklabels():
y_ticl_i.set_fontsize(self.template.get_fontsize())
save_loc = self.directory + '/' + save_file
plt.savefig(save_loc, dpi=100, bbox_inches='tight')
plt.show()
def plot_normal_ECG_full(self, save_file):
self.template.apply_fontsettings(plt)
f = plt.figure()
self.template.apply_figuresize_settings(f)
axes = plt.axes()
plt.plot(self.time, self.n_ECG_data)
plt.xlabel('$t (s)$',fontsize=self.template.get_fontsize())
plt.ylabel(self.y_label,fontsize=self.template.get_fontsize(), rotation='horizontal')
self.template.apply_figuresize_settings(f)
for x_ticl_i in axes.get_xticklabels():
x_ticl_i.set_fontsize(self.template.get_fontsize())
for y_ticl_i in axes.get_yticklabels():
y_ticl_i.set_fontsize(self.template.get_fontsize())
save_loc = self.directory + '/' + save_file
plt.savefig(save_loc, dpi=100, bbox_inches='tight')
plt.show()
def plot_modified_ECG_full(self, save_file):
self.template.apply_fontsettings(plt)
f = plt.figure()
self.template.apply_figuresize_settings(f)
axes = plt.axes()
plt.plot(self.time, self.m_ECG_data)
plt.xlabel('$t (s)$',fontsize=self.template.get_fontsize())
plt.ylabel(self.y_label,fontsize=self.template.get_fontsize(), rotation='horizontal')
self.template.apply_figuresize_settings(f)
for x_ticl_i in axes.get_xticklabels():
x_ticl_i.set_fontsize(self.template.get_fontsize())
for y_ticl_i in axes.get_yticklabels():
y_ticl_i.set_fontsize(self.template.get_fontsize())
save_loc = self.directory + '/' + save_file
plt.savefig(save_loc, dpi=100, bbox_inches='tight')
plt.show()
def plot_modified_ECG_final(self, save_file):
index_end = self.time.size -1
index_start = self.time.size - 1 - (self.period_ms / self.t_gap_ms)
self.template.apply_fontsettings(plt)
f = plt.figure()
self.template.apply_figuresize_settings(f)
axes = plt.axes()
plt.plot(self.time[index_start:index_end], self.m_ECG_data[index_start:index_end])
plt.xlabel('$t (s)$',fontsize=self.template.get_fontsize())
plt.ylabel(self.y_label,fontsize=self.template.get_fontsize(), rotation='horizontal')
self.template.apply_figuresize_settings(f)
for x_ticl_i in axes.get_xticklabels():
x_ticl_i.set_fontsize(self.template.get_fontsize())
for y_ticl_i in axes.get_yticklabels():
y_ticl_i.set_fontsize(self.template.get_fontsize())
save_loc = self.directory + '/' + save_file
plt.savefig(save_loc, dpi=100, bbox_inches='tight')
plt.show()
def overlay_ECG_full(self, save_file):
self.template.apply_fontsettings(plt)
f = plt.figure()
self.template.apply_figuresize_settings(f)
axes = plt.axes()
plt.plot(self.n_ECG_data,label=self.normal_legend)
plt.plot(self.m_ECG_data,label=self.modified_legend)
plt.xlabel('$t (s)$',fontsize=self.template.get_fontsize())
plt.ylabel(self.y_label,fontsize=self.template.get_fontsize(), rotation='horizontal')
plt.legend(prop=self.lfont)
self.template.apply_figuresize_settings(f)
for x_ticl_i in axes.get_xticklabels():
x_ticl_i.set_fontsize(self.template.get_fontsize())
for y_ticl_i in axes.get_yticklabels():
y_ticl_i.set_fontsize(self.template.get_fontsize())
save_loc = self.directory + '/' + save_file
plt.savefig(save_loc, dpi=100, bbox_inches='tight')
plt.show()
def overlay_ECG_rapid(self, save_file,colour='black',l1=False):
index_end = self.time.size -1
index_start = self.time.size - 1 - (self.period_ms / self.t_gap_ms)
self.template.apply_fontsettings(plt)
f = plt.figure()
self.template.apply_figuresize_settings(f)
axes = plt.axes()
if self.fix_axes:
axes.set_ylim((self.e_min, self.e_max))
axes.set_xlim((self.time[index_start]),self.time[index_end])
axes.set_frame_on(False)
plt.plot(self.time[index_start:index_end], self.m_ECG_data[index_start:index_end],label=self.modified_legend,color=colour,linestyle="--",linewidth=2)
plt.plot(self.time[index_start:index_end], self.n_ECG_data[index_start:index_end],label=self.normal_legend,color=colour,linestyle="-",linewidth=2)
axes.get_xaxis().set_visible(l1)
axes.get_yaxis().set_visible(l1)
self.template.apply_figuresize_settings(f)
save_loc = self.directory + '/' + save_file
plt.savefig(save_loc, dpi=100, bbox_inches='tight',transparent=True)
def plot_for_rapid_fig_4(self):
self.set_ECG_type(0, -1)
self.overlay_ECG_rapid('lead_1.eps','black',False)
self.set_ECG_type(1,1)
self.overlay_ECG_rapid('V1.eps','red')
self.set_ECG_type(2,1)
self.overlay_ECG_rapid('V2.eps','green')
self.set_ECG_type(3,1)
self.overlay_ECG_rapid('V3.eps','brown')
self.set_ECG_type(4,1)
self.overlay_ECG_rapid('V4.eps','blue')
self.set_ECG_type(5,1)
self.overlay_ECG_rapid('V5.eps','orange')
self.set_ECG_type(6,1)
self.overlay_ECG_rapid('V6.eps','purple')
class Plot_G_NaL_Changes():
'''
classdocs
'''
def __init__(self, directory, full=True):
'''
Constructor
'''
self.t_gap_ms = 5.0
self.directory = directory
#self.g_NaL_dirs = ['g_NaL_0_15','g_NaL_0_20','g_NaL_0_25','g_NaL_0_26','g_NaL_0_27','g_NaL_0_28','g_NaL_0_29','g_NaL_0_3']
self.g_NaL_dirs = ['g_NaL_0_15_a','g_NaL_0_20_a','g_NaL_0_25_a','g_NaL_0_275','g_NaL_0_29_a','g_NaL_0_2925','g_NaL_0_3_a']
#self.legends = ['$g_{Nal}^{old} = 0.15$','$g_{Nal}^{old} = 0.20$','$g_{Nal}^{old} = 0.25$','$g_{Nal}^{old} = 0.26$','$g_{Nal}^{old} = 0.27$','$g_{Nal}^{old} = 0.28$','$g_{Nal}^{old} = 0.29$','$g_{Nal}^{old} = 0.30$']
self.legends = ['$g_{Nal}^{new} = 0.15$','$g_{Nal}^{new} = 0.20$','$g_{Nal}^{new} = 0.25$','$g_{Nal}^{new} = 0.2725$','$g_{Nal}^{new} = 0.29$','$g_{Nal}^{new} = 0.2925$','$g_{Nal}^{new} = 0.30$']
#self.g_NaL_dirs = ['g_NaL_0_28','g_NaL_0_29','g_NaL_0_3','g_NaL_0_29_a','g_NaL_0_2925','g_NaL_0_3_a']
#self.legends = ['$g_{Nal}^{old} = 0.28$','$g_{Nal}^{old} = 0.29$','$g_{Nal}^{old} = 0.30$','$g_{Nal}^{new} = 0.29$','$g_{Nal}^{new} = 0.2925$','$g_{Nal}^{new} = 0.30$']
self.case_type_dir_name = "normal_ikr/full"
self.full = full
if self.full:
self.electrodes = ['electrode#000448302','electrode#000451300','electrode#000452730','electrode#000453393','electrode#000457525','electrode#000458894',"electrode#000438028","electrode#000460291"]
else:
self.electrodes = ['electrode#000094150','electrode#000092294']
self.period_ms = 1000
self.template = plot_schema.PlotSchema()
def load_data(self):
n_cases = len(self.g_NaL_dirs)
self.ECG_data = np.zeros((0))
ii = 0
for g_NaL_dir in self.g_NaL_dirs:
print g_NaL_dir
data_path = self.directory + os.sep + g_NaL_dir + os.sep + self.case_type_dir_name
case_data = self.load_case_data(data_path)
(n_x, n_y) = case_data.shape
print case_data.shape
if self.ECG_data.size == 0:
self.ECG_data = np.zeros((n_cases,n_x + 5,n_y))
print 'shape changed'
self.ECG_data[ii,0:n_x,:] = case_data[:,:]
ii = ii + 1
def load_case_data(self, case_path):
n_electrodes = len(self.electrodes)
for ii in range(n_electrodes):
electrode_path = case_path + '/' + self.electrodes[ii]
temp_data = np.loadtxt(electrode_path)
if ii == 0:
n_times = temp_data.size
data = np.zeros((n_times, n_electrodes))
data[:,ii] = temp_data[:]
return data
def set_ECG_type(self,ECG_lead, flipper= 1):
''' SETUP plot vectors for each of the different ECG types
'''
if self.full:
lead_1 = 7
lead_2 = 8
else:
lead_1 = 1
lead_2 = 2
ECG_type = ECG_lead - 1
assert((len(self.electrodes) >= ECG_type))
n_points = self.ECG_data[0,:,0].size
# 1 setup time series
max_time = self.t_gap_ms / 1000 * n_points
self.time = np.linspace(0, max_time, n_points)
# first normalise
self.ECG_plot_data = np.zeros((len(self.g_NaL_dirs),n_points))
if ECG_type == -1:
for ii in range(len(self.g_NaL_dirs)):
col_1 = lead_1 - 1
col_2 = lead_2 - 1
self.ECG_plot_data[ii,:] = flipper * (self.ECG_data[ii,:,col_1] - self.ECG_data[ii,:,col_2])
self.y_label = r'$ \Delta V$'
else:
assert(1 == -1)
self.n_ECG_data = flipper * self.n_data[:,ECG_type]
self.m_ECG_data = flipper * self.m_data[:,ECG_type]
self.y_label = r'$ V$'
def overlay_ECG_full(self, save_file):
self.template.apply_fontsettings(plt)
f = plt.figure()
self.template.apply_figuresize_settings(f)
axes = plt.axes()
for ii in range(len(self.g_NaL_dirs)):
legend_a = self.legends[ii]
plt.plot(self.time, self.ECG_plot_data[ii,:],label=legend_a)
plt.xlabel('$t (s)$',fontsize=self.template.get_fontsize())
plt.ylabel(self.y_label,fontsize=self.template.get_fontsize(), rotation='horizontal')
plt.legend(prop=self.lfont)
self.template.apply_figuresize_settings(f)
for x_ticl_i in axes.get_xticklabels():
x_ticl_i.set_fontsize(self.template.get_fontsize())
for y_ticl_i in axes.get_yticklabels():
y_ticl_i.set_fontsize(self.template.get_fontsize())
save_loc = self.directory + '/' + save_file
plt.savefig(save_loc, dpi=100, bbox_inches='tight')
plt.show()
def overlay_ECG_final(self, save_file):
index_end = self.time.size -1
index_start = self.time.size - 1 - (self.period_ms / self.t_gap_ms)
self.template.apply_fontsettings(plt)
f = plt.figure()
self.template.apply_figuresize_settings(f)
axes = plt.axes()
for ii in range(len(self.g_NaL_dirs)):
legend_a = self.legends[ii]
plt.plot(self.time[index_start:index_end], self.ECG_plot_data[ii,index_start:index_end],label=legend_a)
plt.xlabel('$t (s)$',fontsize=self.template.get_fontsize())
plt.ylabel(self.y_label,fontsize=self.template.get_fontsize(), rotation='horizontal')
plt.legend(prop=self.lfont)
self.template.apply_figuresize_settings(f)
for x_ticl_i in axes.get_xticklabels():
x_ticl_i.set_fontsize(self.template.get_fontsize())
for y_ticl_i in axes.get_yticklabels():
y_ticl_i.set_fontsize(self.template.get_fontsize())
save_loc = self.directory + '/' + save_file
plt.savefig(save_loc, dpi=100, bbox_inches='tight')
| 44.740845
| 225
| 0.612731
| 2,269
| 15,883
| 3.997796
| 0.098281
| 0.074082
| 0.061735
| 0.070995
| 0.814905
| 0.760004
| 0.739279
| 0.738397
| 0.725168
| 0.711498
| 0
| 0.04093
| 0.252408
| 15,883
| 355
| 226
| 44.740845
| 0.723008
| 0.04445
| 0
| 0.67893
| 0
| 0
| 0.066043
| 0
| 0
| 0
| 0
| 0
| 0.010033
| 0
| null | null | 0
| 0.040134
| null | null | 0.010033
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b440b5d4ca9774ceee657784ddfc1d0b226d546a
| 69
|
py
|
Python
|
lang/py/cookbook/v2/source/cb2_3_8_sol_1.py
|
ch1huizong/learning
|
632267634a9fd84a5f5116de09ff1e2681a6cc85
|
[
"MIT"
] | null | null | null |
lang/py/cookbook/v2/source/cb2_3_8_sol_1.py
|
ch1huizong/learning
|
632267634a9fd84a5f5116de09ff1e2681a6cc85
|
[
"MIT"
] | null | null | null |
lang/py/cookbook/v2/source/cb2_3_8_sol_1.py
|
ch1huizong/learning
|
632267634a9fd84a5f5116de09ff1e2681a6cc85
|
[
"MIT"
] | null | null | null |
import time
def is_dst():
return bool(time.localtime().tm_isdst)
| 17.25
| 42
| 0.724638
| 11
| 69
| 4.363636
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.144928
| 69
| 3
| 43
| 23
| 0.813559
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
b45e680e7d87244149057c64cdb35fe66f83001b
| 7,440
|
py
|
Python
|
src/models/model_cifar.py
|
itsreddy/Autoencoder
|
59cb5423aad183ac13198c2afa527437786111d8
|
[
"Apache-2.0"
] | null | null | null |
src/models/model_cifar.py
|
itsreddy/Autoencoder
|
59cb5423aad183ac13198c2afa527437786111d8
|
[
"Apache-2.0"
] | null | null | null |
src/models/model_cifar.py
|
itsreddy/Autoencoder
|
59cb5423aad183ac13198c2afa527437786111d8
|
[
"Apache-2.0"
] | null | null | null |
import torch.nn as nn
class Encoder(nn.Module):
def __init__(self, args):
super(Encoder, self).__init__()
self.n_channel = args.n_channel
self.dim_h = args.dim_h
self.n_z = args.n_z
self.main = nn.Sequential(
nn.Conv2d(self.n_channel, self.dim_h, 4, 2, 1, bias=False),
nn.ReLU(True),
nn.BatchNorm2d(self.dim_h),
nn.Conv2d(self.dim_h, self.dim_h * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(self.dim_h * 4),
nn.ReLU(True),
nn.Conv2d(self.dim_h * 4, self.dim_h * 8, 4, 2, 1, bias=False),
nn.BatchNorm2d(self.dim_h * 8),
nn.ReLU(True),
nn.Conv2d(self.dim_h * 8, self.dim_h * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(self.dim_h * 2),
nn.ReLU(True),
nn.Conv2d(self.dim_h * 2, self.dim_h, 4, 2, 1, bias=False),
nn.BatchNorm2d(self.dim_h),
nn.ReLU(True),
)
self.fc = nn.Sequential(
nn.Linear(self.dim_h, self.n_z)#,
# nn.Tanh()
)
def forward(self, x):
x = self.main(x)
x = x.squeeze()
x = self.fc(x)
return x
class Decoder(nn.Module):
def __init__(self, args):
super(Decoder, self).__init__()
self.n_channel = args.n_channel
self.dim_h = args.dim_h
self.n_z = args.n_z
self.proj = nn.Sequential(
nn.Linear(self.n_z, self.dim_h * 1 * 1),
nn.ReLU()
)
self.main = nn.Sequential(
nn.ConvTranspose2d(self.dim_h * 1, self.dim_h * 2, 4, 2, 1),
nn.BatchNorm2d(self.dim_h * 2),
nn.ReLU(True),
nn.ConvTranspose2d(self.dim_h * 2, self.dim_h * 8, 4, 2, 1),
nn.BatchNorm2d(self.dim_h * 8),
nn.ReLU(True),
nn.ConvTranspose2d(self.dim_h * 8, self.dim_h * 4, 4, 2, 1),
nn.BatchNorm2d(self.dim_h * 4),
nn.ReLU(True),
nn.ConvTranspose2d(self.dim_h * 4, self.dim_h * 1, 4, 2, 1),
nn.BatchNorm2d(self.dim_h * 1),
nn.ReLU(True),
nn.ConvTranspose2d(self.dim_h * 1, self.n_channel, 4, 2, 1),
nn.Sigmoid()
)
def forward(self, x):
x = self.proj(x)
x = x.view(-1, self.dim_h * 1, 1, 1)
x = self.main(x)
return x
class GanDiscriminator2(nn.Module):
def __init__(self, args):
super(GanDiscriminator2, self).__init__()
self.n_channel = args.n_channel
self.dim_h = args.dim_h
self.n_z = args.n_z
self.main = nn.Sequential(
nn.Conv2d(self.n_channel, self.dim_h, 4, 2, 1, bias=False), # 3 -> 128
nn.LeakyReLU(0.2, inplace=True),
nn.BatchNorm2d(self.dim_h),
nn.Conv2d(self.dim_h, self.dim_h * 2, 4, 2, 1, bias=False), # 128 -> 256
nn.BatchNorm2d(self.dim_h * 2),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(self.dim_h * 2, self.dim_h // 2, 4, 2, 1, bias=False), # 256 -> 128
nn.BatchNorm2d(self.dim_h // 2),
nn.LeakyReLU(0.2, inplace=True)
)
self.fc = nn.Sequential(
nn.Linear(1024, 1), # 128 -> 1
nn.Sigmoid()
)
def forward(self, x):
x = self.main(x)
x = x.view(x.shape[0], -1)
x = self.fc(x)
return x
class GanDiscriminator(nn.Module):
def __init__(self, args):
super(GanDiscriminator, self).__init__()
self.n_channel = args.n_channel
self.dim_h = args.dim_h
self.n_z = args.n_z
self.main = nn.Sequential(
nn.Conv2d(self.n_channel, self.dim_h, 4, 2, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
nn.BatchNorm2d(self.dim_h),
nn.Conv2d(self.dim_h, self.dim_h * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(self.dim_h * 4),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(self.dim_h * 4, self.dim_h * 8, 4, 2, 1, bias=False),
nn.BatchNorm2d(self.dim_h * 8),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(self.dim_h * 8, self.dim_h * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(self.dim_h * 2),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(self.dim_h * 2, self.dim_h, 4, 2, 1, bias=False),
nn.BatchNorm2d(self.dim_h),
nn.LeakyReLU(0.2, inplace=True)
)
self.fc = nn.Sequential(
nn.Linear(self.dim_h, 1),
nn.Sigmoid()
)
def forward(self, x):
x = self.main(x)
x = x.squeeze()
x = self.fc(x)
return x
class LinearEncoder(nn.Module):
def __init__(self, args):
super(LinearEncoder, self).__init__()
self.n_z = args.n_z
self.dim_h = args.dim_h
self.dim_input = args.img_size ** 2
self.main = nn.Sequential(
nn.Linear(self.dim_input, self.dim_h * 16),
nn.ReLU(True),
nn.Linear(self.dim_h * 16, self.dim_h * 8),
nn.ReLU(True),
nn.Linear(self.dim_h * 8, self.dim_h * 8),
nn.ReLU(True),
nn.Linear(self.dim_h * 8, self.dim_h * 8),
nn.ReLU(True),
nn.Linear(self.dim_h * 8, self.dim_h * 4),
nn.ReLU(True),
nn.Linear(self.dim_h * 4, self.dim_h * 2),
nn.ReLU(True),
nn.Linear(self.dim_h * 2, self.dim_h),
nn.ReLU(True),
nn.Linear(self.dim_h, self.n_z)
)
def forward(self, x):
x = self.main(x)
x = x.squeeze()
return x
class LinearDecoder(nn.Module):
def __init__(self, args):
super(LinearDecoder, self).__init__()
self.n_z = args.n_z
self.dim_h = args.dim_h
self.dim_output = args.img_size ** 2
self.main = nn.Sequential(
nn.Linear(self.n_z, self.dim_h),
nn.ReLU(True),
nn.Linear(self.dim_h, self.dim_h * 2),
nn.ReLU(True),
nn.Linear(self.dim_h * 2, self.dim_h * 4),
nn.ReLU(True),
nn.Linear(self.dim_h * 4, self.dim_h * 8),
nn.ReLU(True),
nn.Linear(self.dim_h * 8, self.dim_h * 8),
nn.ReLU(True),
nn.Linear(self.dim_h * 8, self.dim_h * 8),
nn.ReLU(True),
nn.Linear(self.dim_h * 8, self.dim_h * 16),
nn.ReLU(True),
nn.Linear(self.dim_h * 16, self.dim_output),
nn.Sigmoid()
)
def forward(self, x):
x = x.unsqueeze(dim=1)
x = self.main(x)
return x
class Discriminator(nn.Module):
def __init__(self, args):
super(Discriminator, self).__init__()
self.n_channel = args.n_channel
self.dim_h = args.dim_h
self.n_z = args.n_z
self.main = nn.Sequential(
nn.Linear(self.n_z, self.dim_h * 4),
nn.ReLU(True),
nn.Linear(self.dim_h * 4, self.dim_h * 4),
nn.ReLU(True),
nn.Linear(self.dim_h * 4, self.dim_h * 4),
nn.ReLU(True),
nn.Linear(self.dim_h * 4, self.dim_h * 4),
nn.ReLU(True),
nn.Linear(self.dim_h * 4, 1),
nn.Tanh()
)
def forward(self, x):
x = self.main(x)
return x
| 31.392405
| 89
| 0.509409
| 1,100
| 7,440
| 3.265455
| 0.054545
| 0.114699
| 0.213808
| 0.065145
| 0.912305
| 0.903118
| 0.89922
| 0.815702
| 0.759744
| 0.744154
| 0
| 0.044752
| 0.348253
| 7,440
| 236
| 90
| 31.525424
| 0.69602
| 0.008468
| 0
| 0.695431
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071066
| false
| 0
| 0.005076
| 0
| 0.147208
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b4788b834e11d2b88b60bc26ba8995c6114fe25f
| 63,073
|
py
|
Python
|
test/azure/low-level/Expected/AcceptanceTests/LroLowLevel/lrolowlevel/rest/lrosads/_request_builders.py
|
cfculhane/autorest.python
|
8cbca95faee88d933a58bbbd17b76834faa8d387
|
[
"MIT"
] | null | null | null |
test/azure/low-level/Expected/AcceptanceTests/LroLowLevel/lrolowlevel/rest/lrosads/_request_builders.py
|
cfculhane/autorest.python
|
8cbca95faee88d933a58bbbd17b76834faa8d387
|
[
"MIT"
] | null | null | null |
test/azure/low-level/Expected/AcceptanceTests/LroLowLevel/lrolowlevel/rest/lrosads/_request_builders.py
|
cfculhane/autorest.python
|
8cbca95faee88d933a58bbbd17b76834faa8d387
|
[
"MIT"
] | 1
|
2022-03-28T08:58:03.000Z
|
2022-03-28T08:58:03.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
from azure.core.rest import HttpRequest
from msrest import Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Dict, Optional, TypeVar
T = TypeVar("T")
JSONType = Any
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
# fmt: off
def build_put_non_retry400_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
"""Long running put request, service returns a 400 to the initial request.
See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this request builder
into your code flow.
:keyword json: Pass in a JSON-serializable object (usually a dictionary). See the template in
our example to find the input shape. Product to put.
:paramtype json: JSONType
:keyword content: Pass in binary content you want in the body of the request (typically bytes,
a byte iterator, or stream input). Product to put.
:paramtype content: any
:return: Returns an :class:`~azure.core.rest.HttpRequest` that you will pass to the client's
`send_request` method. See https://aka.ms/azsdk/python/protocol/quickstart for how to
incorporate this response into your code flow.
:rtype: ~azure.core.rest.HttpRequest
Example:
.. code-block:: python
# JSON input template you can fill out and use as your body input.
json = {
"id": "str", # Optional. Resource Id.
"location": "str", # Optional. Resource Location.
"name": "str", # Optional. Resource Name.
"properties": {
"provisioningState": "str", # Optional.
"provisioningStateValues": "str" # Optional. Possible values include: "Succeeded", "Failed", "canceled", "Accepted", "Creating", "Created", "Updating", "Updated", "Deleting", "Deleted", "OK".
},
"tags": {
"str": "str" # Optional. A set of tags. Dictionary of :code:`<string>`.
},
"type": "str" # Optional. Resource Type.
}
# response body for status code(s): 200, 201
response.json() == {
"id": "str", # Optional. Resource Id.
"location": "str", # Optional. Resource Location.
"name": "str", # Optional. Resource Name.
"properties": {
"provisioningState": "str", # Optional.
"provisioningStateValues": "str" # Optional. Possible values include: "Succeeded", "Failed", "canceled", "Accepted", "Creating", "Created", "Updating", "Updated", "Deleting", "Deleted", "OK".
},
"tags": {
"str": "str" # Optional. A set of tags. Dictionary of :code:`<string>`.
},
"type": "str" # Optional. Resource Type.
}
"""
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = '/lro/nonretryerror/put/400'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
headers=header_parameters,
**kwargs
)
def build_put_non_retry201_creating400_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
"""Long running put request, service returns a Product with 'ProvisioningState' = 'Creating' and
201 response code.
See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this request builder
into your code flow.
:keyword json: Pass in a JSON-serializable object (usually a dictionary). See the template in
our example to find the input shape. Product to put.
:paramtype json: JSONType
:keyword content: Pass in binary content you want in the body of the request (typically bytes,
a byte iterator, or stream input). Product to put.
:paramtype content: any
:return: Returns an :class:`~azure.core.rest.HttpRequest` that you will pass to the client's
`send_request` method. See https://aka.ms/azsdk/python/protocol/quickstart for how to
incorporate this response into your code flow.
:rtype: ~azure.core.rest.HttpRequest
Example:
.. code-block:: python
# JSON input template you can fill out and use as your body input.
json = {
"id": "str", # Optional. Resource Id.
"location": "str", # Optional. Resource Location.
"name": "str", # Optional. Resource Name.
"properties": {
"provisioningState": "str", # Optional.
"provisioningStateValues": "str" # Optional. Possible values include: "Succeeded", "Failed", "canceled", "Accepted", "Creating", "Created", "Updating", "Updated", "Deleting", "Deleted", "OK".
},
"tags": {
"str": "str" # Optional. A set of tags. Dictionary of :code:`<string>`.
},
"type": "str" # Optional. Resource Type.
}
# response body for status code(s): 200, 201
response.json() == {
"id": "str", # Optional. Resource Id.
"location": "str", # Optional. Resource Location.
"name": "str", # Optional. Resource Name.
"properties": {
"provisioningState": "str", # Optional.
"provisioningStateValues": "str" # Optional. Possible values include: "Succeeded", "Failed", "canceled", "Accepted", "Creating", "Created", "Updating", "Updated", "Deleting", "Deleted", "OK".
},
"tags": {
"str": "str" # Optional. A set of tags. Dictionary of :code:`<string>`.
},
"type": "str" # Optional. Resource Type.
}
"""
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = '/lro/nonretryerror/put/201/creating/400'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
headers=header_parameters,
**kwargs
)
def build_put_non_retry201_creating400_invalid_json_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
"""Long running put request, service returns a Product with 'ProvisioningState' = 'Creating' and
201 response code.
See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this request builder
into your code flow.
:keyword json: Pass in a JSON-serializable object (usually a dictionary). See the template in
our example to find the input shape. Product to put.
:paramtype json: JSONType
:keyword content: Pass in binary content you want in the body of the request (typically bytes,
a byte iterator, or stream input). Product to put.
:paramtype content: any
:return: Returns an :class:`~azure.core.rest.HttpRequest` that you will pass to the client's
`send_request` method. See https://aka.ms/azsdk/python/protocol/quickstart for how to
incorporate this response into your code flow.
:rtype: ~azure.core.rest.HttpRequest
Example:
.. code-block:: python
# JSON input template you can fill out and use as your body input.
json = {
"id": "str", # Optional. Resource Id.
"location": "str", # Optional. Resource Location.
"name": "str", # Optional. Resource Name.
"properties": {
"provisioningState": "str", # Optional.
"provisioningStateValues": "str" # Optional. Possible values include: "Succeeded", "Failed", "canceled", "Accepted", "Creating", "Created", "Updating", "Updated", "Deleting", "Deleted", "OK".
},
"tags": {
"str": "str" # Optional. A set of tags. Dictionary of :code:`<string>`.
},
"type": "str" # Optional. Resource Type.
}
# response body for status code(s): 200, 201
response.json() == {
"id": "str", # Optional. Resource Id.
"location": "str", # Optional. Resource Location.
"name": "str", # Optional. Resource Name.
"properties": {
"provisioningState": "str", # Optional.
"provisioningStateValues": "str" # Optional. Possible values include: "Succeeded", "Failed", "canceled", "Accepted", "Creating", "Created", "Updating", "Updated", "Deleting", "Deleted", "OK".
},
"tags": {
"str": "str" # Optional. A set of tags. Dictionary of :code:`<string>`.
},
"type": "str" # Optional. Resource Type.
}
"""
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = '/lro/nonretryerror/put/201/creating/400/invalidjson'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
headers=header_parameters,
**kwargs
)
def build_put_async_relative_retry400_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
"""Long running put request, service returns a 200 with ProvisioningState=’Creating’. Poll the
endpoint indicated in the Azure-AsyncOperation header for operation status.
See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this request builder
into your code flow.
:keyword json: Pass in a JSON-serializable object (usually a dictionary). See the template in
our example to find the input shape. Product to put.
:paramtype json: JSONType
:keyword content: Pass in binary content you want in the body of the request (typically bytes,
a byte iterator, or stream input). Product to put.
:paramtype content: any
:return: Returns an :class:`~azure.core.rest.HttpRequest` that you will pass to the client's
`send_request` method. See https://aka.ms/azsdk/python/protocol/quickstart for how to
incorporate this response into your code flow.
:rtype: ~azure.core.rest.HttpRequest
Example:
.. code-block:: python
# JSON input template you can fill out and use as your body input.
json = {
"id": "str", # Optional. Resource Id.
"location": "str", # Optional. Resource Location.
"name": "str", # Optional. Resource Name.
"properties": {
"provisioningState": "str", # Optional.
"provisioningStateValues": "str" # Optional. Possible values include: "Succeeded", "Failed", "canceled", "Accepted", "Creating", "Created", "Updating", "Updated", "Deleting", "Deleted", "OK".
},
"tags": {
"str": "str" # Optional. A set of tags. Dictionary of :code:`<string>`.
},
"type": "str" # Optional. Resource Type.
}
# response body for status code(s): 200
response.json() == {
"id": "str", # Optional. Resource Id.
"location": "str", # Optional. Resource Location.
"name": "str", # Optional. Resource Name.
"properties": {
"provisioningState": "str", # Optional.
"provisioningStateValues": "str" # Optional. Possible values include: "Succeeded", "Failed", "canceled", "Accepted", "Creating", "Created", "Updating", "Updated", "Deleting", "Deleted", "OK".
},
"tags": {
"str": "str" # Optional. A set of tags. Dictionary of :code:`<string>`.
},
"type": "str" # Optional. Resource Type.
}
"""
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = '/lro/nonretryerror/putasync/retry/400'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
headers=header_parameters,
**kwargs
)
def build_delete_non_retry400_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
"""Long running delete request, service returns a 400 with an error body.
See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this request builder
into your code flow.
:return: Returns an :class:`~azure.core.rest.HttpRequest` that you will pass to the client's
`send_request` method. See https://aka.ms/azsdk/python/protocol/quickstart for how to
incorporate this response into your code flow.
:rtype: ~azure.core.rest.HttpRequest
"""
accept = "application/json"
# Construct URL
url = '/lro/nonretryerror/delete/400'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=url,
headers=header_parameters,
**kwargs
)
def build_delete202_non_retry400_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
"""Long running delete request, service returns a 202 with a location header.
See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this request builder
into your code flow.
:return: Returns an :class:`~azure.core.rest.HttpRequest` that you will pass to the client's
`send_request` method. See https://aka.ms/azsdk/python/protocol/quickstart for how to
incorporate this response into your code flow.
:rtype: ~azure.core.rest.HttpRequest
"""
accept = "application/json"
# Construct URL
url = '/lro/nonretryerror/delete/202/retry/400'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=url,
headers=header_parameters,
**kwargs
)
def build_delete_async_relative_retry400_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
"""Long running delete request, service returns a 202 to the initial request. Poll the endpoint
indicated in the Azure-AsyncOperation header for operation status.
See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this request builder
into your code flow.
:return: Returns an :class:`~azure.core.rest.HttpRequest` that you will pass to the client's
`send_request` method. See https://aka.ms/azsdk/python/protocol/quickstart for how to
incorporate this response into your code flow.
:rtype: ~azure.core.rest.HttpRequest
"""
accept = "application/json"
# Construct URL
url = '/lro/nonretryerror/deleteasync/retry/400'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=url,
headers=header_parameters,
**kwargs
)
def build_post_non_retry400_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
"""Long running post request, service returns a 400 with no error body.
See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this request builder
into your code flow.
:keyword json: Pass in a JSON-serializable object (usually a dictionary). See the template in
our example to find the input shape. Product to put.
:paramtype json: JSONType
:keyword content: Pass in binary content you want in the body of the request (typically bytes,
a byte iterator, or stream input). Product to put.
:paramtype content: any
:return: Returns an :class:`~azure.core.rest.HttpRequest` that you will pass to the client's
`send_request` method. See https://aka.ms/azsdk/python/protocol/quickstart for how to
incorporate this response into your code flow.
:rtype: ~azure.core.rest.HttpRequest
Example:
.. code-block:: python
# JSON input template you can fill out and use as your body input.
json = {
"id": "str", # Optional. Resource Id.
"location": "str", # Optional. Resource Location.
"name": "str", # Optional. Resource Name.
"properties": {
"provisioningState": "str", # Optional.
"provisioningStateValues": "str" # Optional. Possible values include: "Succeeded", "Failed", "canceled", "Accepted", "Creating", "Created", "Updating", "Updated", "Deleting", "Deleted", "OK".
},
"tags": {
"str": "str" # Optional. A set of tags. Dictionary of :code:`<string>`.
},
"type": "str" # Optional. Resource Type.
}
"""
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = '/lro/nonretryerror/post/400'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
headers=header_parameters,
**kwargs
)
def build_post202_non_retry400_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
"""Long running post request, service returns a 202 with a location header.
See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this request builder
into your code flow.
:keyword json: Pass in a JSON-serializable object (usually a dictionary). See the template in
our example to find the input shape. Product to put.
:paramtype json: JSONType
:keyword content: Pass in binary content you want in the body of the request (typically bytes,
a byte iterator, or stream input). Product to put.
:paramtype content: any
:return: Returns an :class:`~azure.core.rest.HttpRequest` that you will pass to the client's
`send_request` method. See https://aka.ms/azsdk/python/protocol/quickstart for how to
incorporate this response into your code flow.
:rtype: ~azure.core.rest.HttpRequest
Example:
.. code-block:: python
# JSON input template you can fill out and use as your body input.
json = {
"id": "str", # Optional. Resource Id.
"location": "str", # Optional. Resource Location.
"name": "str", # Optional. Resource Name.
"properties": {
"provisioningState": "str", # Optional.
"provisioningStateValues": "str" # Optional. Possible values include: "Succeeded", "Failed", "canceled", "Accepted", "Creating", "Created", "Updating", "Updated", "Deleting", "Deleted", "OK".
},
"tags": {
"str": "str" # Optional. A set of tags. Dictionary of :code:`<string>`.
},
"type": "str" # Optional. Resource Type.
}
"""
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = '/lro/nonretryerror/post/202/retry/400'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
headers=header_parameters,
**kwargs
)
def build_post_async_relative_retry400_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
"""Long running post request, service returns a 202 to the initial request Poll the endpoint
indicated in the Azure-AsyncOperation header for operation status.
See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this request builder
into your code flow.
:keyword json: Pass in a JSON-serializable object (usually a dictionary). See the template in
our example to find the input shape. Product to put.
:paramtype json: JSONType
:keyword content: Pass in binary content you want in the body of the request (typically bytes,
a byte iterator, or stream input). Product to put.
:paramtype content: any
:return: Returns an :class:`~azure.core.rest.HttpRequest` that you will pass to the client's
`send_request` method. See https://aka.ms/azsdk/python/protocol/quickstart for how to
incorporate this response into your code flow.
:rtype: ~azure.core.rest.HttpRequest
Example:
.. code-block:: python
# JSON input template you can fill out and use as your body input.
json = {
"id": "str", # Optional. Resource Id.
"location": "str", # Optional. Resource Location.
"name": "str", # Optional. Resource Name.
"properties": {
"provisioningState": "str", # Optional.
"provisioningStateValues": "str" # Optional. Possible values include: "Succeeded", "Failed", "canceled", "Accepted", "Creating", "Created", "Updating", "Updated", "Deleting", "Deleted", "OK".
},
"tags": {
"str": "str" # Optional. A set of tags. Dictionary of :code:`<string>`.
},
"type": "str" # Optional. Resource Type.
}
"""
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = '/lro/nonretryerror/postasync/retry/400'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
headers=header_parameters,
**kwargs
)
def build_put_error201_no_provisioning_state_payload_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
"""Long running put request, service returns a 201 to the initial request with no payload.
See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this request builder
into your code flow.
:keyword json: Pass in a JSON-serializable object (usually a dictionary). See the template in
our example to find the input shape. Product to put.
:paramtype json: JSONType
:keyword content: Pass in binary content you want in the body of the request (typically bytes,
a byte iterator, or stream input). Product to put.
:paramtype content: any
:return: Returns an :class:`~azure.core.rest.HttpRequest` that you will pass to the client's
`send_request` method. See https://aka.ms/azsdk/python/protocol/quickstart for how to
incorporate this response into your code flow.
:rtype: ~azure.core.rest.HttpRequest
Example:
.. code-block:: python
# JSON input template you can fill out and use as your body input.
json = {
"id": "str", # Optional. Resource Id.
"location": "str", # Optional. Resource Location.
"name": "str", # Optional. Resource Name.
"properties": {
"provisioningState": "str", # Optional.
"provisioningStateValues": "str" # Optional. Possible values include: "Succeeded", "Failed", "canceled", "Accepted", "Creating", "Created", "Updating", "Updated", "Deleting", "Deleted", "OK".
},
"tags": {
"str": "str" # Optional. A set of tags. Dictionary of :code:`<string>`.
},
"type": "str" # Optional. Resource Type.
}
# response body for status code(s): 200, 201
response.json() == {
"id": "str", # Optional. Resource Id.
"location": "str", # Optional. Resource Location.
"name": "str", # Optional. Resource Name.
"properties": {
"provisioningState": "str", # Optional.
"provisioningStateValues": "str" # Optional. Possible values include: "Succeeded", "Failed", "canceled", "Accepted", "Creating", "Created", "Updating", "Updated", "Deleting", "Deleted", "OK".
},
"tags": {
"str": "str" # Optional. A set of tags. Dictionary of :code:`<string>`.
},
"type": "str" # Optional. Resource Type.
}
"""
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = '/lro/error/put/201/noprovisioningstatepayload'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
headers=header_parameters,
**kwargs
)
def build_put_async_relative_retry_no_status_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
"""Long running put request, service returns a 200 to the initial request, with an entity that
contains ProvisioningState=’Creating’. Poll the endpoint indicated in the Azure-AsyncOperation
header for operation status.
See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this request builder
into your code flow.
:keyword json: Pass in a JSON-serializable object (usually a dictionary). See the template in
our example to find the input shape. Product to put.
:paramtype json: JSONType
:keyword content: Pass in binary content you want in the body of the request (typically bytes,
a byte iterator, or stream input). Product to put.
:paramtype content: any
:return: Returns an :class:`~azure.core.rest.HttpRequest` that you will pass to the client's
`send_request` method. See https://aka.ms/azsdk/python/protocol/quickstart for how to
incorporate this response into your code flow.
:rtype: ~azure.core.rest.HttpRequest
Example:
.. code-block:: python
# JSON input template you can fill out and use as your body input.
json = {
"id": "str", # Optional. Resource Id.
"location": "str", # Optional. Resource Location.
"name": "str", # Optional. Resource Name.
"properties": {
"provisioningState": "str", # Optional.
"provisioningStateValues": "str" # Optional. Possible values include: "Succeeded", "Failed", "canceled", "Accepted", "Creating", "Created", "Updating", "Updated", "Deleting", "Deleted", "OK".
},
"tags": {
"str": "str" # Optional. A set of tags. Dictionary of :code:`<string>`.
},
"type": "str" # Optional. Resource Type.
}
# response body for status code(s): 200
response.json() == {
"id": "str", # Optional. Resource Id.
"location": "str", # Optional. Resource Location.
"name": "str", # Optional. Resource Name.
"properties": {
"provisioningState": "str", # Optional.
"provisioningStateValues": "str" # Optional. Possible values include: "Succeeded", "Failed", "canceled", "Accepted", "Creating", "Created", "Updating", "Updated", "Deleting", "Deleted", "OK".
},
"tags": {
"str": "str" # Optional. A set of tags. Dictionary of :code:`<string>`.
},
"type": "str" # Optional. Resource Type.
}
"""
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = '/lro/error/putasync/retry/nostatus'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
headers=header_parameters,
**kwargs
)
def build_put_async_relative_retry_no_status_payload_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
"""Long running put request, service returns a 200 to the initial request, with an entity that
contains ProvisioningState=’Creating’. Poll the endpoint indicated in the Azure-AsyncOperation
header for operation status.
See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this request builder
into your code flow.
:keyword json: Pass in a JSON-serializable object (usually a dictionary). See the template in
our example to find the input shape. Product to put.
:paramtype json: JSONType
:keyword content: Pass in binary content you want in the body of the request (typically bytes,
a byte iterator, or stream input). Product to put.
:paramtype content: any
:return: Returns an :class:`~azure.core.rest.HttpRequest` that you will pass to the client's
`send_request` method. See https://aka.ms/azsdk/python/protocol/quickstart for how to
incorporate this response into your code flow.
:rtype: ~azure.core.rest.HttpRequest
Example:
.. code-block:: python
# JSON input template you can fill out and use as your body input.
json = {
"id": "str", # Optional. Resource Id.
"location": "str", # Optional. Resource Location.
"name": "str", # Optional. Resource Name.
"properties": {
"provisioningState": "str", # Optional.
"provisioningStateValues": "str" # Optional. Possible values include: "Succeeded", "Failed", "canceled", "Accepted", "Creating", "Created", "Updating", "Updated", "Deleting", "Deleted", "OK".
},
"tags": {
"str": "str" # Optional. A set of tags. Dictionary of :code:`<string>`.
},
"type": "str" # Optional. Resource Type.
}
# response body for status code(s): 200
response.json() == {
"id": "str", # Optional. Resource Id.
"location": "str", # Optional. Resource Location.
"name": "str", # Optional. Resource Name.
"properties": {
"provisioningState": "str", # Optional.
"provisioningStateValues": "str" # Optional. Possible values include: "Succeeded", "Failed", "canceled", "Accepted", "Creating", "Created", "Updating", "Updated", "Deleting", "Deleted", "OK".
},
"tags": {
"str": "str" # Optional. A set of tags. Dictionary of :code:`<string>`.
},
"type": "str" # Optional. Resource Type.
}
"""
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = '/lro/error/putasync/retry/nostatuspayload'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
headers=header_parameters,
**kwargs
)
def build_delete204_succeeded_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
"""Long running delete request, service returns a 204 to the initial request, indicating success.
See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this request builder
into your code flow.
:return: Returns an :class:`~azure.core.rest.HttpRequest` that you will pass to the client's
`send_request` method. See https://aka.ms/azsdk/python/protocol/quickstart for how to
incorporate this response into your code flow.
:rtype: ~azure.core.rest.HttpRequest
"""
accept = "application/json"
# Construct URL
url = '/lro/error/delete/204/nolocation'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=url,
headers=header_parameters,
**kwargs
)
def build_delete_async_relative_retry_no_status_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
"""Long running delete request, service returns a 202 to the initial request. Poll the endpoint
indicated in the Azure-AsyncOperation header for operation status.
See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this request builder
into your code flow.
:return: Returns an :class:`~azure.core.rest.HttpRequest` that you will pass to the client's
`send_request` method. See https://aka.ms/azsdk/python/protocol/quickstart for how to
incorporate this response into your code flow.
:rtype: ~azure.core.rest.HttpRequest
"""
accept = "application/json"
# Construct URL
url = '/lro/error/deleteasync/retry/nostatus'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=url,
headers=header_parameters,
**kwargs
)
def build_post202_no_location_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
"""Long running post request, service returns a 202 to the initial request, without a location
header.
See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this request builder
into your code flow.
:keyword json: Pass in a JSON-serializable object (usually a dictionary). See the template in
our example to find the input shape. Product to put.
:paramtype json: JSONType
:keyword content: Pass in binary content you want in the body of the request (typically bytes,
a byte iterator, or stream input). Product to put.
:paramtype content: any
:return: Returns an :class:`~azure.core.rest.HttpRequest` that you will pass to the client's
`send_request` method. See https://aka.ms/azsdk/python/protocol/quickstart for how to
incorporate this response into your code flow.
:rtype: ~azure.core.rest.HttpRequest
Example:
.. code-block:: python
# JSON input template you can fill out and use as your body input.
json = {
"id": "str", # Optional. Resource Id.
"location": "str", # Optional. Resource Location.
"name": "str", # Optional. Resource Name.
"properties": {
"provisioningState": "str", # Optional.
"provisioningStateValues": "str" # Optional. Possible values include: "Succeeded", "Failed", "canceled", "Accepted", "Creating", "Created", "Updating", "Updated", "Deleting", "Deleted", "OK".
},
"tags": {
"str": "str" # Optional. A set of tags. Dictionary of :code:`<string>`.
},
"type": "str" # Optional. Resource Type.
}
"""
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = '/lro/error/post/202/nolocation'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
headers=header_parameters,
**kwargs
)
def build_post_async_relative_retry_no_payload_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
"""Long running post request, service returns a 202 to the initial request, with an entity that
contains ProvisioningState=’Creating’. Poll the endpoint indicated in the Azure-AsyncOperation
header for operation status.
See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this request builder
into your code flow.
:keyword json: Pass in a JSON-serializable object (usually a dictionary). See the template in
our example to find the input shape. Product to put.
:paramtype json: JSONType
:keyword content: Pass in binary content you want in the body of the request (typically bytes,
a byte iterator, or stream input). Product to put.
:paramtype content: any
:return: Returns an :class:`~azure.core.rest.HttpRequest` that you will pass to the client's
`send_request` method. See https://aka.ms/azsdk/python/protocol/quickstart for how to
incorporate this response into your code flow.
:rtype: ~azure.core.rest.HttpRequest
Example:
.. code-block:: python
# JSON input template you can fill out and use as your body input.
json = {
"id": "str", # Optional. Resource Id.
"location": "str", # Optional. Resource Location.
"name": "str", # Optional. Resource Name.
"properties": {
"provisioningState": "str", # Optional.
"provisioningStateValues": "str" # Optional. Possible values include: "Succeeded", "Failed", "canceled", "Accepted", "Creating", "Created", "Updating", "Updated", "Deleting", "Deleted", "OK".
},
"tags": {
"str": "str" # Optional. A set of tags. Dictionary of :code:`<string>`.
},
"type": "str" # Optional. Resource Type.
}
"""
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = '/lro/error/postasync/retry/nopayload'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
headers=header_parameters,
**kwargs
)
def build_put200_invalid_json_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
"""Long running put request, service returns a 200 to the initial request, with an entity that is
not a valid json.
See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this request builder
into your code flow.
:keyword json: Pass in a JSON-serializable object (usually a dictionary). See the template in
our example to find the input shape. Product to put.
:paramtype json: JSONType
:keyword content: Pass in binary content you want in the body of the request (typically bytes,
a byte iterator, or stream input). Product to put.
:paramtype content: any
:return: Returns an :class:`~azure.core.rest.HttpRequest` that you will pass to the client's
`send_request` method. See https://aka.ms/azsdk/python/protocol/quickstart for how to
incorporate this response into your code flow.
:rtype: ~azure.core.rest.HttpRequest
Example:
.. code-block:: python
# JSON input template you can fill out and use as your body input.
json = {
"id": "str", # Optional. Resource Id.
"location": "str", # Optional. Resource Location.
"name": "str", # Optional. Resource Name.
"properties": {
"provisioningState": "str", # Optional.
"provisioningStateValues": "str" # Optional. Possible values include: "Succeeded", "Failed", "canceled", "Accepted", "Creating", "Created", "Updating", "Updated", "Deleting", "Deleted", "OK".
},
"tags": {
"str": "str" # Optional. A set of tags. Dictionary of :code:`<string>`.
},
"type": "str" # Optional. Resource Type.
}
# response body for status code(s): 200
response.json() == {
"id": "str", # Optional. Resource Id.
"location": "str", # Optional. Resource Location.
"name": "str", # Optional. Resource Name.
"properties": {
"provisioningState": "str", # Optional.
"provisioningStateValues": "str" # Optional. Possible values include: "Succeeded", "Failed", "canceled", "Accepted", "Creating", "Created", "Updating", "Updated", "Deleting", "Deleted", "OK".
},
"tags": {
"str": "str" # Optional. A set of tags. Dictionary of :code:`<string>`.
},
"type": "str" # Optional. Resource Type.
}
"""
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = '/lro/error/put/200/invalidjson'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
headers=header_parameters,
**kwargs
)
def build_put_async_relative_retry_invalid_header_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
"""Long running put request, service returns a 200 to the initial request, with an entity that
contains ProvisioningState=’Creating’. The endpoint indicated in the Azure-AsyncOperation
header is invalid.
See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this request builder
into your code flow.
:keyword json: Pass in a JSON-serializable object (usually a dictionary). See the template in
our example to find the input shape. Product to put.
:paramtype json: JSONType
:keyword content: Pass in binary content you want in the body of the request (typically bytes,
a byte iterator, or stream input). Product to put.
:paramtype content: any
:return: Returns an :class:`~azure.core.rest.HttpRequest` that you will pass to the client's
`send_request` method. See https://aka.ms/azsdk/python/protocol/quickstart for how to
incorporate this response into your code flow.
:rtype: ~azure.core.rest.HttpRequest
Example:
.. code-block:: python
# JSON input template you can fill out and use as your body input.
json = {
"id": "str", # Optional. Resource Id.
"location": "str", # Optional. Resource Location.
"name": "str", # Optional. Resource Name.
"properties": {
"provisioningState": "str", # Optional.
"provisioningStateValues": "str" # Optional. Possible values include: "Succeeded", "Failed", "canceled", "Accepted", "Creating", "Created", "Updating", "Updated", "Deleting", "Deleted", "OK".
},
"tags": {
"str": "str" # Optional. A set of tags. Dictionary of :code:`<string>`.
},
"type": "str" # Optional. Resource Type.
}
# response body for status code(s): 200
response.json() == {
"id": "str", # Optional. Resource Id.
"location": "str", # Optional. Resource Location.
"name": "str", # Optional. Resource Name.
"properties": {
"provisioningState": "str", # Optional.
"provisioningStateValues": "str" # Optional. Possible values include: "Succeeded", "Failed", "canceled", "Accepted", "Creating", "Created", "Updating", "Updated", "Deleting", "Deleted", "OK".
},
"tags": {
"str": "str" # Optional. A set of tags. Dictionary of :code:`<string>`.
},
"type": "str" # Optional. Resource Type.
}
"""
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = '/lro/error/putasync/retry/invalidheader'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
headers=header_parameters,
**kwargs
)
def build_put_async_relative_retry_invalid_json_polling_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
"""Long running put request, service returns a 200 to the initial request, with an entity that
contains ProvisioningState=’Creating’. Poll the endpoint indicated in the Azure-AsyncOperation
header for operation status.
See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this request builder
into your code flow.
:keyword json: Pass in a JSON-serializable object (usually a dictionary). See the template in
our example to find the input shape. Product to put.
:paramtype json: JSONType
:keyword content: Pass in binary content you want in the body of the request (typically bytes,
a byte iterator, or stream input). Product to put.
:paramtype content: any
:return: Returns an :class:`~azure.core.rest.HttpRequest` that you will pass to the client's
`send_request` method. See https://aka.ms/azsdk/python/protocol/quickstart for how to
incorporate this response into your code flow.
:rtype: ~azure.core.rest.HttpRequest
Example:
.. code-block:: python
# JSON input template you can fill out and use as your body input.
json = {
"id": "str", # Optional. Resource Id.
"location": "str", # Optional. Resource Location.
"name": "str", # Optional. Resource Name.
"properties": {
"provisioningState": "str", # Optional.
"provisioningStateValues": "str" # Optional. Possible values include: "Succeeded", "Failed", "canceled", "Accepted", "Creating", "Created", "Updating", "Updated", "Deleting", "Deleted", "OK".
},
"tags": {
"str": "str" # Optional. A set of tags. Dictionary of :code:`<string>`.
},
"type": "str" # Optional. Resource Type.
}
# response body for status code(s): 200
response.json() == {
"id": "str", # Optional. Resource Id.
"location": "str", # Optional. Resource Location.
"name": "str", # Optional. Resource Name.
"properties": {
"provisioningState": "str", # Optional.
"provisioningStateValues": "str" # Optional. Possible values include: "Succeeded", "Failed", "canceled", "Accepted", "Creating", "Created", "Updating", "Updated", "Deleting", "Deleted", "OK".
},
"tags": {
"str": "str" # Optional. A set of tags. Dictionary of :code:`<string>`.
},
"type": "str" # Optional. Resource Type.
}
"""
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = '/lro/error/putasync/retry/invalidjsonpolling'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
headers=header_parameters,
**kwargs
)
def build_delete202_retry_invalid_header_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
"""Long running delete request, service returns a 202 to the initial request receing a reponse
with an invalid 'Location' and 'Retry-After' headers.
See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this request builder
into your code flow.
:return: Returns an :class:`~azure.core.rest.HttpRequest` that you will pass to the client's
`send_request` method. See https://aka.ms/azsdk/python/protocol/quickstart for how to
incorporate this response into your code flow.
:rtype: ~azure.core.rest.HttpRequest
"""
accept = "application/json"
# Construct URL
url = '/lro/error/delete/202/retry/invalidheader'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=url,
headers=header_parameters,
**kwargs
)
def build_delete_async_relative_retry_invalid_header_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
"""Long running delete request, service returns a 202 to the initial request. The endpoint
indicated in the Azure-AsyncOperation header is invalid.
See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this request builder
into your code flow.
:return: Returns an :class:`~azure.core.rest.HttpRequest` that you will pass to the client's
`send_request` method. See https://aka.ms/azsdk/python/protocol/quickstart for how to
incorporate this response into your code flow.
:rtype: ~azure.core.rest.HttpRequest
"""
accept = "application/json"
# Construct URL
url = '/lro/error/deleteasync/retry/invalidheader'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=url,
headers=header_parameters,
**kwargs
)
def build_delete_async_relative_retry_invalid_json_polling_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
"""Long running delete request, service returns a 202 to the initial request. Poll the endpoint
indicated in the Azure-AsyncOperation header for operation status.
See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this request builder
into your code flow.
:return: Returns an :class:`~azure.core.rest.HttpRequest` that you will pass to the client's
`send_request` method. See https://aka.ms/azsdk/python/protocol/quickstart for how to
incorporate this response into your code flow.
:rtype: ~azure.core.rest.HttpRequest
"""
accept = "application/json"
# Construct URL
url = '/lro/error/deleteasync/retry/invalidjsonpolling'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=url,
headers=header_parameters,
**kwargs
)
def build_post202_retry_invalid_header_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
"""Long running post request, service returns a 202 to the initial request, with invalid
'Location' and 'Retry-After' headers.
See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this request builder
into your code flow.
:keyword json: Pass in a JSON-serializable object (usually a dictionary). See the template in
our example to find the input shape. Product to put.
:paramtype json: JSONType
:keyword content: Pass in binary content you want in the body of the request (typically bytes,
a byte iterator, or stream input). Product to put.
:paramtype content: any
:return: Returns an :class:`~azure.core.rest.HttpRequest` that you will pass to the client's
`send_request` method. See https://aka.ms/azsdk/python/protocol/quickstart for how to
incorporate this response into your code flow.
:rtype: ~azure.core.rest.HttpRequest
Example:
.. code-block:: python
# JSON input template you can fill out and use as your body input.
json = {
"id": "str", # Optional. Resource Id.
"location": "str", # Optional. Resource Location.
"name": "str", # Optional. Resource Name.
"properties": {
"provisioningState": "str", # Optional.
"provisioningStateValues": "str" # Optional. Possible values include: "Succeeded", "Failed", "canceled", "Accepted", "Creating", "Created", "Updating", "Updated", "Deleting", "Deleted", "OK".
},
"tags": {
"str": "str" # Optional. A set of tags. Dictionary of :code:`<string>`.
},
"type": "str" # Optional. Resource Type.
}
"""
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = '/lro/error/post/202/retry/invalidheader'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
headers=header_parameters,
**kwargs
)
def build_post_async_relative_retry_invalid_header_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
"""Long running post request, service returns a 202 to the initial request, with an entity that
contains ProvisioningState=’Creating’. The endpoint indicated in the Azure-AsyncOperation
header is invalid.
See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this request builder
into your code flow.
:keyword json: Pass in a JSON-serializable object (usually a dictionary). See the template in
our example to find the input shape. Product to put.
:paramtype json: JSONType
:keyword content: Pass in binary content you want in the body of the request (typically bytes,
a byte iterator, or stream input). Product to put.
:paramtype content: any
:return: Returns an :class:`~azure.core.rest.HttpRequest` that you will pass to the client's
`send_request` method. See https://aka.ms/azsdk/python/protocol/quickstart for how to
incorporate this response into your code flow.
:rtype: ~azure.core.rest.HttpRequest
Example:
.. code-block:: python
# JSON input template you can fill out and use as your body input.
json = {
"id": "str", # Optional. Resource Id.
"location": "str", # Optional. Resource Location.
"name": "str", # Optional. Resource Name.
"properties": {
"provisioningState": "str", # Optional.
"provisioningStateValues": "str" # Optional. Possible values include: "Succeeded", "Failed", "canceled", "Accepted", "Creating", "Created", "Updating", "Updated", "Deleting", "Deleted", "OK".
},
"tags": {
"str": "str" # Optional. A set of tags. Dictionary of :code:`<string>`.
},
"type": "str" # Optional. Resource Type.
}
"""
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = '/lro/error/postasync/retry/invalidheader'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
headers=header_parameters,
**kwargs
)
def build_post_async_relative_retry_invalid_json_polling_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
"""Long running post request, service returns a 202 to the initial request, with an entity that
contains ProvisioningState=’Creating’. Poll the endpoint indicated in the Azure-AsyncOperation
header for operation status.
See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this request builder
into your code flow.
:keyword json: Pass in a JSON-serializable object (usually a dictionary). See the template in
our example to find the input shape. Product to put.
:paramtype json: JSONType
:keyword content: Pass in binary content you want in the body of the request (typically bytes,
a byte iterator, or stream input). Product to put.
:paramtype content: any
:return: Returns an :class:`~azure.core.rest.HttpRequest` that you will pass to the client's
`send_request` method. See https://aka.ms/azsdk/python/protocol/quickstart for how to
incorporate this response into your code flow.
:rtype: ~azure.core.rest.HttpRequest
Example:
.. code-block:: python
# JSON input template you can fill out and use as your body input.
json = {
"id": "str", # Optional. Resource Id.
"location": "str", # Optional. Resource Location.
"name": "str", # Optional. Resource Name.
"properties": {
"provisioningState": "str", # Optional.
"provisioningStateValues": "str" # Optional. Possible values include: "Succeeded", "Failed", "canceled", "Accepted", "Creating", "Created", "Updating", "Updated", "Deleting", "Deleted", "OK".
},
"tags": {
"str": "str" # Optional. A set of tags. Dictionary of :code:`<string>`.
},
"type": "str" # Optional. Resource Type.
}
"""
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = '/lro/error/postasync/retry/invalidjsonpolling'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
headers=header_parameters,
**kwargs
)
| 41.797879
| 212
| 0.610007
| 7,001
| 63,073
| 5.436938
| 0.032281
| 0.056641
| 0.055906
| 0.01776
| 0.979245
| 0.978562
| 0.978195
| 0.978195
| 0.978195
| 0.977012
| 0
| 0.005235
| 0.270147
| 63,073
| 1,508
| 213
| 41.825597
| 0.821632
| 0.712428
| 0
| 0.783042
| 0
| 0
| 0.187622
| 0.066334
| 0
| 0
| 0
| 0
| 0
| 1
| 0.064838
| false
| 0
| 0.009975
| 0
| 0.139651
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c309f7e3c1788ea643f81986be5493b6c1026ec6
| 14,507
|
py
|
Python
|
gbpservice/neutron/plugins/ml2plus/driver_api.py
|
baodongli/group-based-policy
|
f3b892ecdc1051b204376e18679f73bf457ce7dc
|
[
"Apache-2.0"
] | null | null | null |
gbpservice/neutron/plugins/ml2plus/driver_api.py
|
baodongli/group-based-policy
|
f3b892ecdc1051b204376e18679f73bf457ce7dc
|
[
"Apache-2.0"
] | null | null | null |
gbpservice/neutron/plugins/ml2plus/driver_api.py
|
baodongli/group-based-policy
|
f3b892ecdc1051b204376e18679f73bf457ce7dc
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2016 Cisco Systems Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import six
from neutron.plugins.ml2 import driver_api
@six.add_metaclass(abc.ABCMeta)
class SubnetPoolContext(object):
"""Context passed to MechanismDrivers for changes to subnet pool
resources.
A SubnetPoolContext instance wraps a subnet pool resource. It
provides helper methods for accessing other relevant
information. Results from expensive operations are cached so that
other MechanismDrivers can freely access the same information.
"""
@abc.abstractproperty
def current(self):
"""Return the subnet pool in its current configuration.
Return the subnet pool with all its properties 'current' at
the time the context was established.
"""
pass
@abc.abstractproperty
def original(self):
"""Return the subnet pool in its original configuration.
Return the subnet pool, with all its properties set to their
original values prior to a call to update_address_scope. Method is
only valid within calls to update_address_scope_precommit and
update_address_scope_postcommit.
"""
pass
@six.add_metaclass(abc.ABCMeta)
class AddressScopeContext(object):
"""Context passed to MechanismDrivers for changes to address scope
resources.
An AddressScopeContext instance wraps an address scope
resource. It provides helper methods for accessing other relevant
information. Results from expensive operations are cached so that
other MechanismDrivers can freely access the same information.
"""
@abc.abstractproperty
def current(self):
"""Return the address scope in its current configuration.
Return the address scope with all its properties 'current' at
the time the context was established.
"""
pass
@abc.abstractproperty
def original(self):
"""Return the address scope in its original configuration.
Return the address scope, with all its properties set to their
original values prior to a call to update_address_scope. Method is
only valid within calls to update_address_scope_precommit and
update_address_scope_postcommit.
"""
pass
@six.add_metaclass(abc.ABCMeta)
class MechanismDriver(driver_api.MechanismDriver):
# REVISIT(rkukura): Is this needed for all operations, or just for
# create operations? If its needed for all operations, should the
# method be specific to the resource and operation, and include
# the request data (i.e. update_network_pretransaction(self,
# data))?
def ensure_tenant(self, plugin_context, tenant_id):
"""Ensure tenant known before creating resource.
:param plugin_context: Plugin request context.
:param tenant_id: Tenant owning resource about to be created.
Called before the start of a transaction creating any new core
resource, allowing any needed tenant-specific processing to be
performed.
"""
pass
def create_subnetpool_precommit(self, context):
"""Allocate resources for a new subnet pool.
:param context: SubnetPoolContext instance describing the new
subnet pool.
Create a new subnet pool, allocating resources as necessary in
the database. Called inside transaction context on
session. Call cannot block. Raising an exception will result
in a rollback of the current transaction.
"""
pass
def create_subnetpool_postcommit(self, context):
"""Create a subnet pool.
:param context: SubnetPoolContext instance describing the new
subnet pool.
Called after the transaction commits. Call can block, though
will block the entire process so care should be taken to not
drastically affect performance. Raising an exception will
cause the deletion of the resource.
"""
pass
def update_subnetpool_precommit(self, context):
"""Update resources of a subnet pool.
:param context: SubnetPoolContext instance describing the new
state of the subnet pool, as well as the original state prior
to the update_subnetpool call.
Update values of a subnet pool, updating the associated
resources in the database. Called inside transaction context
on session. Raising an exception will result in rollback of
the transaction.
update_subnetpool_precommit is called for all changes to the
subnet pool state. It is up to the mechanism driver to ignore
state or state changes that it does not know or care about.
"""
pass
def update_subnetpool_postcommit(self, context):
"""Update a subnet pool.
:param context: SubnetPoolContext instance describing the new
state of the subnet pool, as well as the original state prior
to the update_subnetpool call.
Called after the transaction commits. Call can block, though
will block the entire process so care should be taken to not
drastically affect performance. Raising an exception will
cause the deletion of the resource.
update_subnetpool_postcommit is called for all changes to the
subnet pool state. It is up to the mechanism driver to ignore
state or state changes that it does not know or care about.
"""
pass
def delete_subnetpool_precommit(self, context):
"""Delete resources for a subnet pool.
:param context: SubnetPoolContext instance describing the
current state of the subnet pool, prior to the call to delete
it.
Delete subnet pool resources previously allocated by this
mechanism driver for a subnet pool. Called inside transaction
context on session. Runtime errors are not expected, but
raising an exception will result in rollback of the
transaction.
"""
pass
def delete_subnetpool_postcommit(self, context):
"""Delete a subnet pool.
:param context: SubnetPoolContext instance describing the
current state of the subnet pool, prior to the call to delete
it.
Called after the transaction commits. Call can block, though
will block the entire process so care should be taken to not
drastically affect performance. Runtime errors are not
expected, and will not prevent the resource from being
deleted.
"""
pass
def create_address_scope_precommit(self, context):
"""Allocate resources for a new address scope.
:param context: AddressScopeContext instance describing the
new address scope.
Create a new address scope, allocating resources as necessary
in the database. Called inside transaction context on
session. Call cannot block. Raising an exception will result
in a rollback of the current transaction.
"""
pass
def create_address_scope_postcommit(self, context):
"""Create an address scope.
:param context: AddressScopeContext instance describing the
new address scope.
Called after the transaction commits. Call can block, though
will block the entire process so care should be taken to not
drastically affect performance. Raising an exception will
cause the deletion of the resource.
"""
pass
def update_address_scope_precommit(self, context):
"""Update resources of an address scope.
:param context: AddressScopeContext instance describing the
new state of the address scope, as well as the original state
prior to the update_address_scope call.
Update values of an address scope, updating the associated
resources in the database. Called inside transaction context
on session. Raising an exception will result in rollback of
the transaction.
update_address_scope_precommit is called for all changes to
the address scope state. It is up to the mechanism driver to
ignore state or state changes that it does not know or care
about.
"""
pass
def update_address_scope_postcommit(self, context):
"""Update an address scope.
:param context: AddressScopeContext instance describing the
new state of the address scope, as well as the original state
prior to the update_address_scope call.
Called after the transaction commits. Call can block, though
will block the entire process so care should be taken to not
drastically affect performance. Raising an exception will
cause the deletion of the resource.
update_address_scope_postcommit is called for all changes to
the address scope state. It is up to the mechanism driver to
ignore state or state changes that it does not know or care
about.
"""
pass
def delete_address_scope_precommit(self, context):
"""Delete resources for an address scope.
:param context: AddressScopeContext instance describing the
current state of the address scope, prior to the call to
delete it.
Delete address scope resources previously allocated by this
mechanism driver for an address scope. Called inside
transaction context on session. Runtime errors are not
expected, but raising an exception will result in rollback of
the transaction.
"""
pass
def delete_address_scope_postcommit(self, context):
"""Delete an address scope.
:param context: AddressScopeContext instance describing the
current state of the address scope, prior to the call to
delete it.
Called after the transaction commits. Call can block, though
will block the entire process so care should be taken to not
drastically affect performance. Runtime errors are not
expected, and will not prevent the resource from being
deleted.
"""
pass
# REVISIT(rkukura): Add precommit/postcommit calls for other
# resources implemented in ML2, such as security groups and
# security group rules?
@six.add_metaclass(abc.ABCMeta)
class ExtensionDriver(driver_api.ExtensionDriver):
def process_create_subnetpool(self, plugin_context, data, result):
"""Process extended attributes for create subnet pool.
:param plugin_context: plugin request context
:param data: dictionary of incoming subnet pool data
:param result: subnet pool dictionary to extend
Called inside transaction context on plugin_context.session to
validate and persist any extended subnet pool attributes
defined by this driver. Extended attribute values must also be
added to result.
"""
pass
def process_update_subnetpool(self, plugin_context, data, result):
"""Process extended attributes for update subnet pool.
:param plugin_context: plugin request context
:param data: dictionary of incoming subnet pool data
:param result: subnet pool dictionary to extend
Called inside transaction context on plugin_context.session to
validate and update any extended subnet pool attributes
defined by this driver. Extended attribute values, whether
updated or not, must also be added to result.
"""
pass
def extend_subnetpool_dict(self, session, base_model, result):
"""Add extended attributes to subnet pool dictionary.
:param session: database session
:param base_model: subnet pool model data
:param result: subnet pool dictionary to extend
Called inside transaction context on session to add any
extended attributes defined by this driver to a subnet pool
dictionary to be used for mechanism driver calls and/or
returned as the result of a subnet pool operation.
"""
pass
def process_create_address_scope(self, plugin_context, data, result):
"""Process extended attributes for create address scope.
:param plugin_context: plugin request context
:param data: dictionary of incoming address scope data
:param result: address scope dictionary to extend
Called inside transaction context on plugin_context.session to
validate and persist any extended address scope attributes
defined by this driver. Extended attribute values must also be
added to result.
"""
pass
def process_update_address_scope(self, plugin_context, data, result):
"""Process extended attributes for update address scope.
:param plugin_context: plugin request context
:param data: dictionary of incoming address scope data
:param result: address scope dictionary to extend
Called inside transaction context on plugin_context.session to
validate and update any extended address scope attributes
defined by this driver. Extended attribute values, whether
updated or not, must also be added to result.
"""
pass
def extend_address_scope_dict(self, session, base_model, result):
"""Add extended attributes to address scope dictionary.
:param session: database session
:param base_model: address scope model data
:param result: address scope dictionary to extend
Called inside transaction context on session to add any
extended attributes defined by this driver to an address scope
dictionary to be used for mechanism driver calls and/or
returned as the result of an address scope operation.
"""
pass
| 37.877285
| 78
| 0.695664
| 1,840
| 14,507
| 5.42663
| 0.135326
| 0.067301
| 0.025238
| 0.036054
| 0.813821
| 0.799399
| 0.775363
| 0.764647
| 0.720381
| 0.707161
| 0
| 0.000937
| 0.264493
| 14,507
| 382
| 79
| 37.97644
| 0.934864
| 0.727304
| 0
| 0.57377
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.377049
| false
| 0.377049
| 0.04918
| 0
| 0.491803
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
c33644d5d8a064a54c79001c4011944d4496b501
| 26,049
|
py
|
Python
|
fdasrsf/pcr_regression.py
|
kiranvad/fdasrsf_python
|
e45efa35f53eb04ddfef3dbfbfaf893084413755
|
[
"BSD-3-Clause"
] | null | null | null |
fdasrsf/pcr_regression.py
|
kiranvad/fdasrsf_python
|
e45efa35f53eb04ddfef3dbfbfaf893084413755
|
[
"BSD-3-Clause"
] | null | null | null |
fdasrsf/pcr_regression.py
|
kiranvad/fdasrsf_python
|
e45efa35f53eb04ddfef3dbfbfaf893084413755
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Warping Invariant PCR Regression using SRSF
moduleauthor:: J. Derek Tucker <jdtuck@sandia.gov>
"""
import numpy as np
import fdasrsf as fs
import fdasrsf.utility_functions as uf
import fdasrsf.fPCA as fpca
import fdasrsf.regression as rg
import fdasrsf.geometry as geo
from scipy import dot
from scipy.linalg import inv, norm
from scipy.integrate import trapz, cumtrapz
from scipy.optimize import fmin_l_bfgs_b
class elastic_pcr_regression:
"""
This class provides elastic pcr regression for functional data using the
SRVF framework accounting for warping
Usage: obj = elastic_pcr_regression(f,y,time)
:param f: (M,N) % matrix defining N functions of M samples
:param y: response vector of length N
:param warp_data: fdawarp object of alignment
:param pca: class dependent on fPCA method used object of fPCA
:param alpha: intercept
:param b: coefficient vector
:param SSE: sum of squared errors
Author : J. D. Tucker (JDT) <jdtuck AT sandia.gov>
Date : 18-Mar-2018
"""
def __init__(self, f, y, time):
"""
Construct an instance of the elastic_pcr_regression class
:param f: numpy ndarray of shape (M,N) of N functions with M samples
:param y: response vector
:param time: vector of size M describing the sample points
"""
a = time.shape[0]
if f.shape[0] != a:
raise Exception('Columns of f and time must be equal')
self.f = f
self.y = y
self.time = time
def calc_model(self, pca_method="combined", no=5,
smooth_data=False, sparam=25, parallel=False,
C=None):
"""
This function identifies a regression model with phase-variability
using elastic pca
:param pca_method: string specifing pca method (options = "combined",
"vert", or "horiz", default = "combined")
:param no: scalar specify number of principal components (default=5)
:param smooth_data: smooth data using box filter (default = F)
:param sparam: number of times to apply box filter (default = 25)
:param parallel: run in parallel (default = F)
:param C: scale balance parameter for combined method (default = None)
"""
if smooth_data:
self.f = fs.smooth_data(self.f,sparam)
N1 = self.f.shape[1]
# Align Data
self.warp_data = fs.fdawarp(self.f,self.time)
self.warp_data.srsf_align(parallel=parallel)
# Calculate PCA
if pca_method=='combined':
self.pca = fpca.fdajpca(self.warp_data)
elif pca_method=='vert':
self.pca = fpca.fdavpca(self.warp_data)
elif pca_method=='horiz':
self.pca = fpca.fdahpca(self.warp_data)
else:
raise Exception('Invalid fPCA Method')
self.pca.calc_fpca(no)
# OLS using PCA basis
lam = 0
R = 0
Phi = np.ones((N1, no+1))
Phi[:,1:(no+1)] = self.pca.coef
xx = dot(Phi.T, Phi)
inv_xx = inv(xx + lam * R)
xy = dot(Phi.T, self.y)
b = dot(inv_xx, xy)
alpha = b[0]
b = b[1:no+1]
# compute the SSE
int_X = np.zeros(N1)
for ii in range(0,N1):
int_X[ii] = np.sum(self.pca.coef[ii,:]*b)
SSE = np.sum((self.y-alpha-int_X)**2)
self.alpha = alpha
self.b = b
self.SSE = SSE
self.pca_method = pca_method
return
def predict(self, newdata=None):
"""
This function performs prediction on regression model on new data if available or current stored data in object
Usage: obj.predict()
obj.predict(newdata)
:param newdata: dict containing new data for prediction (needs the keys below, if None predicts on training data)
:type newdata: dict
:param f: (M,N) matrix of functions
:param time: vector of time points
:param y: truth if available
:param smooth: smooth data if needed
:param sparam: number of times to run filter
"""
omethod = self.warp_data.method
lam = self.warp_data.lam
M = self.time.shape[0]
if newdata != None:
f = newdata['f']
time = newdata['time']
y = newdata['y']
if newdata['smooth']:
sparam = newdata['sparam']
f = fs.smooth_data(f,sparam)
q1 = fs.f_to_srsf(f,time)
n = q1.shape[1]
self.y_pred = np.zeros(n)
mq = self.warp_data.mqn
fn = np.zeros((M,n))
qn = np.zeros((M,n))
gam = np.zeros((M,n))
for ii in range(0,n):
gam[:,ii] = uf.optimum_reparam(mq,time,q1[:,ii],omethod,lam)
fn[:,ii] = uf.warp_f_gamma(time,f[:,ii],gam[:,ii])
qn[:,ii] = uf.f_to_srsf(fn[:,ii],time)
U = self.pca.U
no = U.shape[1]
if self.pca.__class__.__name__ == 'fdajpca':
m_new = np.sign(fn[self.pca.id,:])*np.sqrt(np.abs(fn[self.pca.id,:]))
qn1 = np.vstack((qn, m_new))
C = self.pca.C
TT = self.time.shape[0]
mu_g = self.pca.mu_g
mu_psi = self.pca.mu_psi
vec = np.zeros((M,n))
psi = np.zeros((TT,n))
binsize = np.mean(np.diff(self.time))
for i in range(0,n):
psi[:,i] = np.sqrt(np.gradient(gam[:,i],binsize))
out, theta = geo.inv_exp_map(mu_psi, psi[:,i])
vec[:,i] = out
g = np.vstack((qn1, C*vec))
a = np.zeros((n,no))
for i in range(0,n):
for j in range(0,no):
tmp = (g[:,i]-mu_g)
a[i,j] = dot(tmp.T, U[:,j])
elif self.pca.__class__.__name__ == 'fdavpca':
m_new = np.sign(fn[self.pca.id,:])*np.sqrt(np.abs(fn[self.pca.id,:]))
qn1 = np.vstack((qn, m_new))
a = np.zeros((n,no))
for i in range(0,n):
for j in range(0,no):
tmp = (qn1[:,i]-self.pca.mqn)
a[i,j] = dot(tmp.T, U[:,j])
elif self.pca.__class__.__name__ == 'fdahpca':
a = np.zeros((n,no))
mu_psi = self.pca.psi_mu
vec = np.zeros((M,n))
TT = self.time.shape[0]
psi = np.zeros((TT,n))
binsize = np.mean(np.diff(self.time))
for i in range(0,n):
psi[:,i] = np.sqrt(np.gradient(gam[:,i],binsize))
out, theta = geo.inv_exp_map(mu_psi, psi[:,i])
vec[:,i] = out
vm = self.pca.vec.mean(axis=1)
for i in range(0,n):
for j in range(0,no):
a[i,j] = np.sum(dot(vec[:,i]-vm,U[:,j]))
else:
raise Exception('Invalid fPCA Method')
for ii in range(0,n):
self.y_pred[ii] = self.alpha + np.dot(a[ii,:],self.b)
if y is None:
self.SSE = np.nan
else:
self.SSE = np.sum((y-self.y_pred)**2)
else:
n = self.pca.coef.shape[0]
self.y_pred = np.zeros(n)
for ii in range(0,n):
self.y_pred[ii] = self.alpha + np.dot(self.pca.coef[ii,:],self.b)
self.SSE = np.sum((self.y-self.y_pred)**2)
return
class elastic_lpcr_regression:
"""
This class provides elastic logistic pcr regression for functional
data using the SRVF framework accounting for warping
Usage: obj = elastic_lpcr_regression(f,y,time)
:param f: (M,N) % matrix defining N functions of M samples
:param y: response vector of length N (-1/1)
:param warp_data: fdawarp object of alignment
:param pca: class dependent on fPCA method used object of fPCA
:param information
:param alpha: intercept
:param b: coefficient vector
:param Loss: logistic loss
:param PC: probability of classification
:param ylabels: predicted labels
Author : J. D. Tucker (JDT) <jdtuck AT sandia.gov>
Date : 18-Mar-2018
"""
def __init__(self, f, y, time):
"""
Construct an instance of the elastic_lpcr_regression class
:param f: numpy ndarray of shape (M,N) of N functions with M samples
:param y: response vector
:param time: vector of size M describing the sample points
"""
a = time.shape[0]
if f.shape[0] != a:
raise Exception('Columns of f and time must be equal')
self.f = f
self.y = y
self.time = time
def calc_model(self, pca_method="combined", no=5,
smooth_data=False, sparam=25, parallel=False):
"""
This function identifies a logistic regression model with phase-variability
using elastic pca
:param pca_method: string specifing pca method (options = "combined",
"vert", or "horiz", default = "combined")
:param no: scalar specify number of principal components (default=5)
:param smooth_data: smooth data using box filter (default = F)
:param sparam: number of times to apply box filter (default = 25)
:param parallel: calculate in parallel (default = F)
:type f: np.ndarray
:type time: np.ndarray
"""
if smooth_data:
self.f = fs.smooth_data(self.f,sparam)
N1 = self.f.shape[1]
# Align Data
self.warp_data = fs.fdawarp(self.f,self.time)
self.warp_data.srsf_align(parallel=parallel)
# Calculate PCA
if pca_method=='combined':
self.pca = fpca.fdajpca(self.warp_data)
elif pca_method=='vert':
self.pca = fpca.fdavpca(self.warp_data)
elif pca_method=='horiz':
self.pca = fpca.fdahpca(self.warp_data)
else:
raise Exception('Invalid fPCA Method')
self.pca.calc_fpca(no)
# OLS using PCA basis
lam = 0
R = 0
Phi = np.ones((N1, no+1))
Phi[:,1:(no+1)] = self.pca.coef
# Find alpha and beta using l_bfgs
b0 = np.zeros(no+1)
out = fmin_l_bfgs_b(rg.logit_loss, b0, fprime=rg.logit_gradient,
args=(Phi, self.y), pgtol=1e-10, maxiter=200,
maxfun=250, factr=1e-30)
b = out[0]
alpha = b[0]
# compute the Loss
LL = rg.logit_loss(b,Phi,self.y)
b = b[1:no+1]
self.alpha = alpha
self.b = b
self.LL = LL
self.pca_method = pca_method
return
def predict(self, newdata=None):
"""
This function performs prediction on regression model on new data if available or current stored data in object
Usage: obj.predict()
obj.predict(newdata)
:param newdata: dict containing new data for prediction (needs the keys below, if None predicts on training data)
:type newdata: dict
:param f: (M,N) matrix of functions
:param time: vector of time points
:param y: truth if available
:param smooth: smooth data if needed
:param sparam: number of times to run filter
"""
omethod = self.warp_data.method
lam = self.warp_data.lam
M = self.time.shape[0]
if newdata != None:
f = newdata['f']
time = newdata['time']
y = newdata['y']
if newdata['smooth']:
sparam = newdata['sparam']
f = fs.smooth_data(f,sparam)
q1 = fs.f_to_srsf(f,time)
n = q1.shape[1]
self.y_pred = np.zeros(n)
mq = self.warp_data.mqn
fn = np.zeros((M,n))
qn = np.zeros((M,n))
gam = np.zeros((M,n))
for ii in range(0,n):
gam[:,ii] = uf.optimum_reparam(mq,time,q1[:,ii],omethod)
fn[:,ii] = uf.warp_f_gamma(time,f[:,ii],gam[:,ii])
qn[:,ii] = uf.f_to_srsf(fn[:,ii],time)
U = self.pca.U
no = U.shape[1]
if self.pca.__class__.__name__ == 'fdajpca':
m_new = np.sign(fn[self.pca.id,:])*np.sqrt(np.abs(fn[self.pca.id,:]))
qn1 = np.vstack((qn, m_new))
C = self.pca.C
TT = self.time.shape[0]
mu_g = self.pca.mu_g
mu_psi = self.pca.mu_psi
vec = np.zeros((M,n))
psi = np.zeros((TT,n))
binsize = np.mean(np.diff(self.time))
for i in range(0,n):
psi[:,i] = np.sqrt(np.gradient(gam[:,i],binsize))
out, theta = geo.inv_exp_map(mu_psi, psi[:,i])
vec[:,i] = out
g = np.vstack((qn1, C*vec))
a = np.zeros((n,no))
for i in range(0,n):
for j in range(0,no):
tmp = (g[:,i]-mu_g)
a[i,j] = dot(tmp.T, U[:,j])
elif self.pca.__class__.__name__ == 'fdavpca':
m_new = np.sign(fn[self.pca.id,:])*np.sqrt(np.abs(fn[self.pca.id,:]))
qn1 = np.vstack((qn, m_new))
a = np.zeros((n,no))
for i in range(0,n):
for j in range(0,no):
tmp = (qn1[:,i]-self.pca.mqn)
a[i,j] = dot(tmp.T, U[:,j])
elif self.pca.__class__.__name__ == 'fdahpca':
a = np.zeros((n,no))
mu_psi = self.pca.psi_mu
vec = np.zeros((M,n))
TT = self.time.shape[0]
psi = np.zeros((TT,n))
binsize = np.mean(np.diff(self.time))
for i in range(0,n):
psi[:,i] = np.sqrt(np.gradient(gam[:,i],binsize))
out, theta = geo.inv_exp_map(mu_psi, psi[:,i])
vec[:,i] = out
vm = self.pca.vec.mean(axis=1)
for i in range(0,n):
for j in range(0,no):
a[i,j] = np.sum(dot(vec[:,i]-vm,U[:,j]))
else:
raise Exception('Invalid fPCA Method')
for ii in range(0,n):
self.y_pred[ii] = self.alpha + np.sum(a[ii,:]*self.b)
if y is None:
self.y_pred = rg.phi(self.y_pred)
self.y_labels = np.ones(n)
self.y_labels[self.y_pred < 0.5] = -1
self.PC = np.nan
else:
self.y_pred = rg.phi(self.y_pred)
self.y_labels = np.ones(n)
self.y_labels[self.y_pred < 0.5] = -1
TP = np.sum(y[self.y_labels == 1] == 1)
FP = np.sum(y[self.y_labels == -1] == 1)
TN = np.sum(y[self.y_labels == -1] == -1)
FN = np.sum(y[self.y_labels == 1] == -1)
self.PC = (TP+TN)/(TP+FP+FN+TN)
else:
n = self.pca.coef.shape[0]
self.y_pred = np.zeros(n)
for ii in range(0,n):
self.y_pred[ii] = self.alpha + np.dot(self.pca.coef[ii,:],self.b)
self.y_pred = rg.phi(self.y_pred)
self.y_labels = np.ones(n)
self.y_labels[self.y_pred < 0.5] = -1
TP = np.sum(self.y[self.y_labels == 1] == 1)
FP = np.sum(self.y[self.y_labels == -1] == 1)
TN = np.sum(self.y[self.y_labels == -1] == -1)
FN = np.sum(self.y[self.y_labels == 1] == -1)
self.PC = (TP+TN)/(TP+FP+FN+TN)
return
class elastic_mlpcr_regression:
"""
This class provides elastic multinomial logistic pcr regression for functional
data using the SRVF framework accounting for warping
Usage: obj = elastic_mlpcr_regression(f,y,time)
:param f: (M,N) % matrix defining N functions of M samples
:param y: response vector of length N
:param Y: coded label matrix
:param warp_data: fdawarp object of alignment
:param pca: class dependent on fPCA method used object of fPCA
:param information
:param alpha: intercept
:param b: coefficient vector
:param Loss: logistic loss
:param PC: probability of classification
:param ylabels: predicted labels
Author : J. D. Tucker (JDT) <jdtuck AT sandia.gov>
Date : 18-Mar-2018
"""
def __init__(self, f, y, time):
"""
Construct an instance of the elastic_mlpcr_regression class
:param f: numpy ndarray of shape (M,N) of N functions with M samples
:param y: response vector
:param time: vector of size M describing the sample points
"""
a = time.shape[0]
if f.shape[0] != a:
raise Exception('Columns of f and time must be equal')
self.f = f
self.y = y
self.time = time
N1 = f.shape[1]
# Code labels
m = y.max()
self.n_classes = m
self.Y = np.zeros((N1, m), dtype=int)
for ii in range(0, N1):
self.Y[ii, y[ii]-1] = 1
def calc_model(self, pca_method="combined", no=5,
smooth_data=False, sparam=25, parallel=False):
"""
This function identifies a logistic regression model with phase-variability
using elastic pca
:param f: numpy ndarray of shape (M,N) of N functions with M samples
:param y: numpy array of N responses
:param time: vector of size M describing the sample points
:param pca_method: string specifing pca method (options = "combined",
"vert", or "horiz", default = "combined")
:param no: scalar specify number of principal components (default=5)
:param smooth_data: smooth data using box filter (default = F)
:param sparam: number of times to apply box filter (default = 25)
:param parallel: run model in parallel (default = F)
:type f: np.ndarray
:type time: np.ndarray
"""
if smooth_data:
self.f = fs.smooth_data(self.f,sparam)
N1 = self.f.shape[1]
# Align Data
self.warp_data = fs.fdawarp(self.f,self.time)
self.warp_data.srsf_align(parallel=parallel)
# Calculate PCA
if pca_method=='combined':
self.pca = fpca.fdajpca(self.warp_data)
elif pca_method=='vert':
self.pca = fpca.fdavpca(self.warp_data)
elif pca_method=='horiz':
self.pca = fpca.fdahpca(self.warp_data)
else:
raise Exception('Invalid fPCA Method')
self.pca.calc_fpca(no)
# OLS using PCA basis
lam = 0
R = 0
Phi = np.ones((N1, no+1))
Phi[:,1:(no+1)] = self.pca.coef
# Find alpha and beta using l_bfgs
b0 = np.zeros(self.n_classes*(no+1))
out = fmin_l_bfgs_b(rg.mlogit_loss, b0, fprime=rg.mlogit_gradient,
args=(Phi, self.Y), pgtol=1e-10, maxiter=200,
maxfun=250, factr=1e-30)
b = out[0]
B0 = b.reshape(no+1, self.n_classes)
alpha = B0[0, :]
# compute the Loss
LL = rg.mlogit_loss(b,Phi,self.y)
b = B0[1:no+1,:]
self.alpha = alpha
self.b = b
self.LL = LL
self.pca_method = pca_method
return
def predict(self, newdata=None):
"""
This function performs prediction on regression model on new data if available or current stored data in object
Usage: obj.predict()
obj.predict(newdata)
:param newdata: dict containing new data for prediction (needs the keys below, if None predicts on training data)
:type newdata: dict
:param f: (M,N) matrix of functions
:param time: vector of time points
:param y: truth if available
:param smooth: smooth data if needed
:param sparam: number of times to run filter
"""
omethod = self.warp_data.method
lam = self.warp_data.lam
m = self.n_classes
M = self.time.shape[0]
if newdata != None:
f = newdata['f']
time = newdata['time']
y = newdata['y']
if newdata['smooth']:
sparam = newdata['sparam']
f = fs.smooth_data(f,sparam)
q1 = fs.f_to_srsf(f,time)
n = q1.shape[1]
self.y_pred = np.zeros((n,m))
mq = self.warp_data.mqn
fn = np.zeros((M,n))
qn = np.zeros((M,n))
gam = np.zeros((M,n))
for ii in range(0,n):
gam[:,ii] = uf.optimum_reparam(mq,time,q1[:,ii],omethod)
fn[:,ii] = uf.warp_f_gamma(time,f[:,ii],gam[:,ii])
qn[:,ii] = uf.f_to_srsf(fn[:,ii],time)
U = self.pca.U
no = U.shape[1]
if self.pca.__class__.__name__ == 'fdajpca':
m_new = np.sign(fn[self.pca.id,:])*np.sqrt(np.abs(fn[self.pca.id,:]))
qn1 = np.vstack((qn, m_new))
C = self.pca.C
TT = self.time.shape[0]
mu_g = self.pca.mu_g
mu_psi = self.pca.mu_psi
vec = np.zeros((M,n))
psi = np.zeros((TT,n))
binsize = np.mean(np.diff(self.time))
for i in range(0,n):
psi[:,i] = np.sqrt(np.gradient(gam[:,i],binsize))
out, theta = geo.inv_exp_map(mu_psi, psi[:,i])
vec[:,i] = out
g = np.vstack((qn1, C*vec))
a = np.zeros((n,no))
for i in range(0,n):
for j in range(0,no):
tmp = (g[:,i]-mu_g)
a[i,j] = dot(tmp.T, U[:,j])
elif self.pca.__class__.__name__ == 'fdavpca':
m_new = np.sign(fn[self.pca.id,:])*np.sqrt(np.abs(fn[self.pca.id,:]))
qn1 = np.vstack((qn, m_new))
a = np.zeros((n,no))
for i in range(0,n):
for j in range(0,no):
tmp = (qn1[:,i]-self.pca.mqn)
a[i,j] = dot(tmp.T, U[:,j])
elif self.pca.__class__.__name__ == 'fdahpca':
a = np.zeros((n,no))
mu_psi = self.pca.psi_mu
vec = np.zeros((M,n))
TT = self.time.shape[0]
psi = np.zeros((TT,n))
binsize = np.mean(np.diff(self.time))
for i in range(0,n):
psi[:,i] = np.sqrt(np.gradient(gam[:,i],binsize))
out, theta = geo.inv_exp_map(mu_psi, psi[:,i])
vec[:,i] = out
vm = self.pca.vec.mean(axis=1)
for i in range(0,n):
for j in range(0,no):
a[i,j] = np.sum(dot(vec[:,i]-vm,U[:,j]))
else:
raise Exception('Invalid fPCA Method')
for ii in range(0,n):
for jj in range(0,m):
self.y_pred[ii,jj] = self.alpha[jj] + np.sum(a[ii,:]*self.b[:,jj])
if y is None:
self.y_pred = rg.phi(self.y_pred.reshape((1,n*m)))
self.y_pred = self.y_pred.reshape((n,m))
self.y_labels = np.argmax(self.y_pred,axis=1)
self.PC = np.nan
else:
self.y_pred = rg.phi(self.y_pred.reshape((1,n*m)))
self.y_pred = self.y_pred.reshape((n,m))
self.y_labels = np.argmax(self.y_pred,axis=1)
self.PC = np.zeros(m)
cls_set = np.arange(0,m)
for ii in range(0,m):
cls_sub = np.setdiff1d(cls_set,ii)
TP = np.sum(y[self.y_labels == ii] == ii)
FP = np.sum(y[np.in1d(self.y_labels,cls_sub)] == ii)
TN = np.sum(y[np.in1d(self.y_labels,cls_sub)] == self.y_labels[np.in1d(self.y_labels,cls_sub)])
FN = np.sum(np.in1d(y[self.y_labels==ii],cls_sub))
self.PC[ii] = (TP+TN)/(TP+FP+FN+TN)
self.PCo = np.sum(y == self.y_labels)/self.y_labels.shape[0]
else:
n = self.pca.coef.shape[0]
self.y_pred = np.zeros((n,m))
for ii in range(0,n):
for jj in range(0,m):
self.y_pred[ii,jj] = self.alpha[jj] + np.sum(self.pca.coef[ii,:]*self.b[:,jj])
self.y_pred = rg.phi(self.y_pred.reshape((1,n*m)))
self.y_pred = self.y_pred.reshape((n,m))
self.y_labels = np.argmax(self.y_pred,axis=1)
self.PC = np.zeros(m)
cls_set = np.arange(0,m)
for ii in range(0,m):
cls_sub = np.setdiff1d(cls_set,ii)
TP = np.sum(self.y[self.y_labels == ii] == ii)
FP = np.sum(self.y[np.in1d(self.y_labels,cls_sub)] == ii)
TN = np.sum(self.y[np.in1d(self.y_labels,cls_sub)] == self.y_labels[np.in1d(self.y_labels,cls_sub)])
FN = np.sum(np.in1d(y[self.y_labels==ii],cls_sub))
self.PC[ii] = (TP+TN)/(TP+FP+FN+TN)
self.PCo = np.sum(y == self.y_labels)/self.y_labels.shape[0]
return
| 36.688732
| 121
| 0.503436
| 3,623
| 26,049
| 3.520839
| 0.078664
| 0.035278
| 0.024459
| 0.016933
| 0.928426
| 0.91502
| 0.902634
| 0.900596
| 0.894167
| 0.87794
| 0
| 0.015258
| 0.370993
| 26,049
| 709
| 122
| 36.74048
| 0.763259
| 0.230066
| 0
| 0.857798
| 0
| 0
| 0.02154
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.020642
| false
| 0
| 0.022936
| 0
| 0.06422
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c34f387cf9de69d5f53cab6a2acedbc51b216b8a
| 241,348
|
py
|
Python
|
tests/test_portfolio.py
|
davidandreoletti/vectorbt
|
0cd596e1be975d4af6379d883090ffb5b7375d08
|
[
"Apache-2.0"
] | null | null | null |
tests/test_portfolio.py
|
davidandreoletti/vectorbt
|
0cd596e1be975d4af6379d883090ffb5b7375d08
|
[
"Apache-2.0"
] | null | null | null |
tests/test_portfolio.py
|
davidandreoletti/vectorbt
|
0cd596e1be975d4af6379d883090ffb5b7375d08
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import pandas as pd
from numba import njit, typeof
from numba.typed import List
from datetime import datetime, timedelta
import pytest
import vectorbt as vbt
from vectorbt.portfolio.enums import *
from vectorbt.generic.enums import drawdown_dt
from vectorbt import settings
from vectorbt.utils.random import set_seed
from vectorbt.portfolio import nb
from tests.utils import record_arrays_close
seed = 42
day_dt = np.timedelta64(86400000000000)
settings.returns['year_freq'] = '252 days' # same as empyrical
price = pd.Series([1., 2., 3., 4., 5.], index=pd.Index([
datetime(2020, 1, 1),
datetime(2020, 1, 2),
datetime(2020, 1, 3),
datetime(2020, 1, 4),
datetime(2020, 1, 5)
]))
price_wide = price.vbt.tile(3, keys=['a', 'b', 'c'])
big_price = pd.DataFrame(np.random.uniform(size=(1000,)))
big_price.index = [datetime(2018, 1, 1) + timedelta(days=i) for i in range(1000)]
big_price_wide = big_price.vbt.tile(1000)
# ############# nb ############# #
def assert_same_tuple(tup1, tup2):
for i in range(len(tup1)):
assert tup1[i] == tup2[i] or np.isnan(tup1[i]) and np.isnan(tup2[i])
def test_execute_order_nb():
# Errors, ignored and rejected orders
with pytest.raises(Exception) as e_info:
_ = nb.create_order_nb()
with pytest.raises(Exception) as e_info:
_ = nb.create_order_nb(10)
with pytest.raises(Exception) as e_info:
_ = nb.execute_order_nb(
ProcessOrderState(-100., 100., 0., 100., 10., 1100., 0, 0),
nb.create_order_nb(10, 10))
with pytest.raises(Exception) as e_info:
_ = nb.execute_order_nb(
ProcessOrderState(np.nan, 100., 0., 100., 10., 1100., 0, 0),
nb.create_order_nb(10, 10))
with pytest.raises(Exception) as e_info:
_ = nb.execute_order_nb(
ProcessOrderState(100., np.inf, 0., 100., 10., 1100., 0, 0),
nb.create_order_nb(10, 10))
with pytest.raises(Exception) as e_info:
_ = nb.execute_order_nb(
ProcessOrderState(100., np.nan, 0., 100., 10., 1100., 0, 0),
nb.create_order_nb(10, 10))
with pytest.raises(Exception) as e_info:
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., np.nan, 100., 10., 1100., 0, 0),
nb.create_order_nb(10, 10))
with pytest.raises(Exception) as e_info:
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., -10., 100., 10., 1100., 0, 0),
nb.create_order_nb(10, 10))
with pytest.raises(Exception) as e_info:
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., np.nan, 10., 1100., 0, 0),
nb.create_order_nb(10, 10))
with pytest.raises(Exception) as e_info:
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.create_order_nb(10, 10, size_type=-2))
with pytest.raises(Exception) as e_info:
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.create_order_nb(10, 10, size_type=20))
with pytest.raises(Exception) as e_info:
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.create_order_nb(10, 10, direction=-2))
with pytest.raises(Exception) as e_info:
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.create_order_nb(10, 10, direction=20))
with pytest.raises(Exception) as e_info:
_ = nb.execute_order_nb(
ProcessOrderState(100., -100., 0., 100., 10., 1100., 0, 0),
nb.create_order_nb(10, 10, direction=Direction.LongOnly))
with pytest.raises(Exception) as e_info:
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.create_order_nb(10, 10, direction=Direction.ShortOnly))
with pytest.raises(Exception) as e_info:
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.create_order_nb(10, np.inf))
with pytest.raises(Exception) as e_info:
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.create_order_nb(10, -10))
with pytest.raises(Exception) as e_info:
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.create_order_nb(10, 10, fees=np.inf))
with pytest.raises(Exception) as e_info:
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.create_order_nb(10, 10, fees=-1))
with pytest.raises(Exception) as e_info:
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.create_order_nb(10, 10, fixed_fees=np.inf))
with pytest.raises(Exception) as e_info:
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.create_order_nb(10, 10, fixed_fees=-1))
with pytest.raises(Exception) as e_info:
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.create_order_nb(10, 10, slippage=np.inf))
with pytest.raises(Exception) as e_info:
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.create_order_nb(10, 10, slippage=-1))
with pytest.raises(Exception) as e_info:
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.create_order_nb(10, 10, min_size=np.inf))
with pytest.raises(Exception) as e_info:
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.create_order_nb(10, 10, min_size=-1))
with pytest.raises(Exception) as e_info:
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.create_order_nb(10, 10, max_size=0))
with pytest.raises(Exception) as e_info:
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.create_order_nb(10, 10, max_size=-10))
with pytest.raises(Exception) as e_info:
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.create_order_nb(10, 10, reject_prob=np.nan))
with pytest.raises(Exception) as e_info:
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.create_order_nb(10, 10, reject_prob=-1))
with pytest.raises(Exception) as e_info:
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.create_order_nb(10, 10, reject_prob=2))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., np.nan, 0, 0),
nb.create_order_nb(1, 10, size_type=SizeType.TargetPercent))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=1, status_info=3))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., -10., 0, 0),
nb.create_order_nb(1, 10, size_type=SizeType.TargetPercent))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=4))
with pytest.raises(Exception) as e_info:
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., np.inf, 1100., 0, 0),
nb.create_order_nb(10, 10, size_type=SizeType.Value))
with pytest.raises(Exception) as e_info:
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., -10., 1100, 0, 0),
nb.create_order_nb(10, 10, size_type=SizeType.Value))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., np.nan, 1100., 0, 0),
nb.create_order_nb(10, 10, size_type=SizeType.Value))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
with pytest.raises(Exception) as e_info:
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., np.inf, 1100., 0, 0),
nb.create_order_nb(10, 10, size_type=SizeType.TargetValue))
with pytest.raises(Exception) as e_info:
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., -10., 1100, 0, 0),
nb.create_order_nb(10, 10, size_type=SizeType.TargetValue))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., np.nan, 1100., 0, 0),
nb.create_order_nb(10, 10, size_type=SizeType.TargetValue))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=1, status_info=2))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., -10., 0., 100., 10., 1100., 0, 0),
nb.create_order_nb(np.inf, 10, direction=Direction.ShortOnly))
assert exec_state == ExecuteOrderState(cash=200.0, position=-20.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., -10., 0., 100., 10., 1100., 0, 0),
nb.create_order_nb(-np.inf, 10, direction=Direction.All))
assert exec_state == ExecuteOrderState(cash=200.0, position=-20.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 10., 0., 100., 10., 1100., 0, 0),
nb.create_order_nb(0, 10))
assert exec_state == ExecuteOrderState(cash=100.0, position=10.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=1, status_info=5))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.create_order_nb(15, 10, max_size=10, allow_partial=False))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=9))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.create_order_nb(10, 10, reject_prob=1.))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=10))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 100., 0., 0., 10., 1100., 0, 0),
nb.create_order_nb(10, 10, direction=Direction.LongOnly))
assert exec_state == ExecuteOrderState(cash=0.0, position=100.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=7))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 100., 0., 0., 10., 1100., 0, 0),
nb.create_order_nb(10, 10, direction=Direction.All))
assert exec_state == ExecuteOrderState(cash=0.0, position=100.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=7))
with pytest.raises(Exception) as e_info:
_ = nb.execute_order_nb(
ProcessOrderState(np.inf, 100, 0., np.inf, np.nan, 1100., 0, 0),
nb.create_order_nb(np.inf, 10, direction=Direction.LongOnly))
with pytest.raises(Exception) as e_info:
_ = nb.execute_order_nb(
ProcessOrderState(np.inf, 100., 0., np.inf, 10., 1100., 0, 0),
nb.create_order_nb(np.inf, 10, direction=Direction.All))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 1100., 0, 0),
nb.create_order_nb(-10, 10, direction=Direction.ShortOnly))
assert exec_state == ExecuteOrderState(cash=100.0, position=0.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=8))
with pytest.raises(Exception) as e_info:
_ = nb.execute_order_nb(
ProcessOrderState(np.inf, 100., 0., np.inf, 10., 1100., 0, 0),
nb.create_order_nb(-np.inf, 10, direction=Direction.ShortOnly))
with pytest.raises(Exception) as e_info:
_ = nb.execute_order_nb(
ProcessOrderState(np.inf, 100., 0., np.inf, 10., 1100., 0, 0),
nb.create_order_nb(-np.inf, 10, direction=Direction.All))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 1100., 0, 0),
nb.create_order_nb(-10, 10, direction=Direction.LongOnly))
assert exec_state == ExecuteOrderState(cash=100.0, position=0.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=8))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.create_order_nb(10, 10, fixed_fees=100))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=11))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.create_order_nb(10, 10, min_size=100))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=12))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.create_order_nb(100, 10, allow_partial=False))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=13))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.create_order_nb(-10, 10, min_size=100))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=12))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.create_order_nb(-200, 10, direction=Direction.LongOnly, allow_partial=False))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=13))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.create_order_nb(-10, 10, fixed_fees=1000))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=11))
# Calculations
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.create_order_nb(10, 10, fees=0.1, fixed_fees=1, slippage=0.1))
assert exec_state == ExecuteOrderState(cash=0.0, position=8.18181818181818, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=8.18181818181818, price=11.0, fees=10.000000000000014, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.create_order_nb(100, 10, fees=0.1, fixed_fees=1, slippage=0.1))
assert exec_state == ExecuteOrderState(cash=0.0, position=8.18181818181818, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=8.18181818181818, price=11.0, fees=10.000000000000014, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.create_order_nb(-10, 10, fees=0.1, fixed_fees=1, slippage=0.1))
assert exec_state == ExecuteOrderState(cash=180.0, position=-10.0, debt=90.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10.0, price=9.0, fees=10.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.create_order_nb(-100, 10, fees=0.1, fixed_fees=1, slippage=0.1))
assert exec_state == ExecuteOrderState(cash=909.0, position=-100.0, debt=900.0, free_cash=-891.0)
assert_same_tuple(order_result, OrderResult(
size=100.0, price=9.0, fees=91.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.create_order_nb(10, 10, size_type=SizeType.TargetAmount))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.create_order_nb(-10, 10, size_type=SizeType.TargetAmount))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.create_order_nb(100, 10, size_type=SizeType.Value))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.create_order_nb(-100, 10, size_type=SizeType.Value))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.create_order_nb(100, 10, size_type=SizeType.TargetValue))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.create_order_nb(-100, 10, size_type=SizeType.TargetValue))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.create_order_nb(1, 10, size_type=SizeType.TargetPercent))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.create_order_nb(-1, 10, size_type=SizeType.TargetPercent))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 5., 0., 50., 10., 100., 0, 0),
nb.create_order_nb(1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 5., 0., 50., 10., 100., 0, 0),
nb.create_order_nb(0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=25.0, position=7.5, debt=0.0, free_cash=25.0)
assert_same_tuple(order_result, OrderResult(
size=2.5, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 5., 0., 50., 10., 100., 0, 0),
nb.create_order_nb(-0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=125.0, position=-2.5, debt=25.0, free_cash=75.0)
assert_same_tuple(order_result, OrderResult(
size=7.5, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 5., 0., 50., 10., 100., 0, 0),
nb.create_order_nb(-1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=15.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 0., 0., 50., 10., 100., 0, 0),
nb.create_order_nb(1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=0.0, position=5.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 0., 0., 50., 10., 100., 0, 0),
nb.create_order_nb(0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=25.0, position=2.5, debt=0.0, free_cash=25.0)
assert_same_tuple(order_result, OrderResult(
size=2.5, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 0., 0., 50., 10., 100., 0, 0),
nb.create_order_nb(-0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=75.0, position=-2.5, debt=25.0, free_cash=25.0)
assert_same_tuple(order_result, OrderResult(
size=2.5, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 0., 0., 50., 10., 100., 0, 0),
nb.create_order_nb(-1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=100.0, position=-5.0, debt=50.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., -5., 0., 50., 10., 100., 0, 0),
nb.create_order_nb(1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=0.0, position=0.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., -5., 0., 50., 10., 100., 0, 0),
nb.create_order_nb(0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=25.0, position=-2.5, debt=0.0, free_cash=25.0)
assert_same_tuple(order_result, OrderResult(
size=2.5, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., -5., 0., 50., 10., 100., 0, 0),
nb.create_order_nb(-0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=75.0, position=-7.5, debt=25.0, free_cash=25.0)
assert_same_tuple(order_result, OrderResult(
size=2.5, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., -5., 0., 50., 10., 100., 0, 0),
nb.create_order_nb(-1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=100.0, position=-10.0, debt=50.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.create_order_nb(np.inf, 10))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., -5., 0., 100., 10., 100., 0, 0),
nb.create_order_nb(np.inf, 10))
assert exec_state == ExecuteOrderState(cash=0.0, position=5.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.create_order_nb(-np.inf, 10))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(150., -5., 0., 150., 10., 100., 0, 0),
nb.create_order_nb(-np.inf, 10))
assert exec_state == ExecuteOrderState(cash=300.0, position=-20.0, debt=150.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=15.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 10., 0., -50., 10., 100., 0, 0),
nb.create_order_nb(-20, 10, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=150.0, position=-5.0, debt=50.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=15.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 1., 0., -50., 10., 100., 0, 0),
nb.create_order_nb(-10, 10, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=10.0, position=0.0, debt=0.0, free_cash=-40.0)
assert_same_tuple(order_result, OrderResult(
size=1.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 0., 0., -100., 10., 100., 0, 0),
nb.create_order_nb(-10, 10, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=0.0, position=0.0, debt=0.0, free_cash=-100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=6))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 0., 0., 100., 10., 100., 0, 0),
nb.create_order_nb(-20, 10, fees=0.1, slippage=0.1, fixed_fees=1., lock_cash=True))
assert exec_state == ExecuteOrderState(cash=80.0, position=-10.0, debt=90.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10.0, price=9.0, fees=10.0, side=1, status=0, status_info=-1))
def test_build_call_seq_nb():
group_lens = np.array([1, 2, 3, 4])
np.testing.assert_array_equal(
nb.build_call_seq_nb((10, 10), group_lens, CallSeqType.Default),
nb.build_call_seq((10, 10), group_lens, CallSeqType.Default)
)
np.testing.assert_array_equal(
nb.build_call_seq_nb((10, 10), group_lens, CallSeqType.Reversed),
nb.build_call_seq((10, 10), group_lens, CallSeqType.Reversed)
)
set_seed(seed)
out1 = nb.build_call_seq_nb((10, 10), group_lens, CallSeqType.Random)
set_seed(seed)
out2 = nb.build_call_seq((10, 10), group_lens, CallSeqType.Random)
np.testing.assert_array_equal(out1, out2)
# ############# from_signals ############# #
entries = pd.Series([True, True, True, False, False], index=price.index)
entries_wide = entries.vbt.tile(3, keys=['a', 'b', 'c'])
exits = pd.Series([False, False, True, True, True], index=price.index)
exits_wide = exits.vbt.tile(3, keys=['a', 'b', 'c'])
def from_signals_all(price=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(price, entries, exits, direction='all', **kwargs)
def from_signals_longonly(price=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(price, entries, exits, direction='longonly', **kwargs)
def from_signals_shortonly(price=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(price, entries, exits, direction='shortonly', **kwargs)
class TestFromSignals:
def test_one_column(self):
record_arrays_close(
from_signals_all().order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 3, 0, 200., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly().order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 3, 0, 100., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly().order_records,
np.array([
(0, 0, 0, 100., 1., 0., 1), (1, 3, 0, 50., 4., 0., 0)
], dtype=order_dt)
)
portfolio = from_signals_all()
pd.testing.assert_index_equal(
portfolio.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
portfolio.wrapper.columns,
pd.Int64Index([0], dtype='int64')
)
assert portfolio.wrapper.ndim == 1
assert portfolio.wrapper.freq == day_dt
assert portfolio.wrapper.grouper.group_by is None
def test_multiple_columns(self):
record_arrays_close(
from_signals_all(price=price_wide).order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 3, 0, 200., 4., 0., 1),
(2, 0, 1, 100., 1., 0., 0), (3, 3, 1, 200., 4., 0., 1),
(4, 0, 2, 100., 1., 0., 0), (5, 3, 2, 200., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(price=price_wide).order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 3, 0, 100., 4., 0., 1),
(2, 0, 1, 100., 1., 0., 0), (3, 3, 1, 100., 4., 0., 1),
(4, 0, 2, 100., 1., 0., 0), (5, 3, 2, 100., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(price=price_wide).order_records,
np.array([
(0, 0, 0, 100., 1., 0., 1), (1, 3, 0, 50., 4., 0., 0),
(2, 0, 1, 100., 1., 0., 1), (3, 3, 1, 50., 4., 0., 0),
(4, 0, 2, 100., 1., 0., 1), (5, 3, 2, 50., 4., 0., 0)
], dtype=order_dt)
)
portfolio = from_signals_all(price=price_wide)
pd.testing.assert_index_equal(
portfolio.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
portfolio.wrapper.columns,
pd.Index(['a', 'b', 'c'], dtype='object')
)
assert portfolio.wrapper.ndim == 2
assert portfolio.wrapper.freq == day_dt
assert portfolio.wrapper.grouper.group_by is None
def test_size(self):
record_arrays_close(
from_signals_all(size=[[-1, 0, 1, np.inf]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 2.0, 4.0, 0.0, 1), (2, 0, 2, 1.0, 1.0, 0.0, 0),
(3, 3, 2, 2.0, 4.0, 0.0, 1), (4, 0, 3, 100.0, 1.0, 0.0, 0), (5, 3, 3, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=[[-1, 0, 1, np.inf]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 1.0, 4.0, 0.0, 1), (2, 0, 2, 1.0, 1.0, 0.0, 0),
(3, 3, 2, 1.0, 4.0, 0.0, 1), (4, 0, 3, 100.0, 1.0, 0.0, 0), (5, 3, 3, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=[[-1, 0, 1, np.inf]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 3, 0, 1.0, 4.0, 0.0, 0), (2, 0, 2, 1.0, 1.0, 0.0, 1),
(3, 3, 2, 1.0, 4.0, 0.0, 0), (4, 0, 3, 100.0, 1.0, 0.0, 1), (5, 3, 3, 50.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_percent(self):
with pytest.raises(Exception) as e_info:
_ = from_signals_all(size=0.5, size_type='percent')
record_arrays_close(
from_signals_all(size=0.5, size_type='percent', close_first=True).order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 3, 0, 50., 4., 0., 1), (2, 4, 0, 25., 5., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_all(size=0.5, size_type='percent', close_first=True, accumulate=True).order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 1, 0, 12.5, 2., 0., 0),
(2, 3, 0, 65.625, 4., 0., 1), (3, 4, 0, 26.25, 5., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=0.5, size_type='percent').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 3, 0, 50., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=0.5, size_type='percent').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 1), (1, 3, 0, 37.5, 4., 0., 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(
price=price_wide, size=0.5, size_type='percent',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 0, 1, 25., 1., 0., 0),
(2, 0, 2, 12.5, 1., 0., 0), (3, 3, 0, 50., 4., 0., 1),
(4, 3, 1, 25., 4., 0., 1), (5, 3, 2, 12.5, 4., 0., 1)
], dtype=order_dt)
)
def test_price(self):
record_arrays_close(
from_signals_all(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 0), (1, 3, 0, 198.01980198019803, 4.04, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099, 1.01, 0., 0), (1, 3, 0, 99.00990099, 4.04, 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 1), (1, 3, 0, 49.504950495049506, 4.04, 0.0, 0)
], dtype=order_dt)
)
def test_fees(self):
record_arrays_close(
from_signals_all(size=1, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 2.0, 4.0, 0.0, 1), (2, 0, 1, 1.0, 1.0, 0.1, 0),
(3, 3, 1, 2.0, 4.0, 0.8, 1), (4, 0, 2, 1.0, 1.0, 1.0, 0), (5, 3, 2, 2.0, 4.0, 8.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 1.0, 4.0, 0.0, 1), (2, 0, 1, 1.0, 1.0, 0.1, 0),
(3, 3, 1, 1.0, 4.0, 0.4, 1), (4, 0, 2, 1.0, 1.0, 1.0, 0), (5, 3, 2, 1.0, 4.0, 4.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 3, 0, 1.0, 4.0, 0.0, 0), (2, 0, 1, 1.0, 1.0, 0.1, 1),
(3, 3, 1, 1.0, 4.0, 0.4, 0), (4, 0, 2, 1.0, 1.0, 1.0, 1), (5, 3, 2, 1.0, 4.0, 4.0, 0)
], dtype=order_dt)
)
def test_fixed_fees(self):
record_arrays_close(
from_signals_all(size=1, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 2.0, 4.0, 0.0, 1), (2, 0, 1, 1.0, 1.0, 0.1, 0),
(3, 3, 1, 2.0, 4.0, 0.1, 1), (4, 0, 2, 1.0, 1.0, 1.0, 0), (5, 3, 2, 2.0, 4.0, 1.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 1.0, 4.0, 0.0, 1), (2, 0, 1, 1.0, 1.0, 0.1, 0),
(3, 3, 1, 1.0, 4.0, 0.1, 1), (4, 0, 2, 1.0, 1.0, 1.0, 0), (5, 3, 2, 1.0, 4.0, 1.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 3, 0, 1.0, 4.0, 0.0, 0), (2, 0, 1, 1.0, 1.0, 0.1, 1),
(3, 3, 1, 1.0, 4.0, 0.1, 0), (4, 0, 2, 1.0, 1.0, 1.0, 1), (5, 3, 2, 1.0, 4.0, 1.0, 0)
], dtype=order_dt)
)
def test_slippage(self):
record_arrays_close(
from_signals_all(size=1, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 2.0, 4.0, 0.0, 1), (2, 0, 1, 1.0, 1.1, 0.0, 0),
(3, 3, 1, 2.0, 3.6, 0.0, 1), (4, 0, 2, 1.0, 2.0, 0.0, 0), (5, 3, 2, 2.0, 0.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 1.0, 4.0, 0.0, 1), (2, 0, 1, 1.0, 1.1, 0.0, 0),
(3, 3, 1, 1.0, 3.6, 0.0, 1), (4, 0, 2, 1.0, 2.0, 0.0, 0), (5, 3, 2, 1.0, 0.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 3, 0, 1.0, 4.0, 0.0, 0), (2, 0, 1, 1.0, 0.9, 0.0, 1),
(3, 3, 1, 1.0, 4.4, 0.0, 0), (4, 0, 2, 1.0, 0.0, 0.0, 1), (5, 3, 2, 1.0, 8.0, 0.0, 0)
], dtype=order_dt)
)
def test_min_size(self):
record_arrays_close(
from_signals_all(size=1, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 2.0, 4.0, 0.0, 1), (2, 0, 1, 1.0, 1.0, 0.0, 0),
(3, 3, 1, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 1.0, 4.0, 0.0, 1), (2, 0, 1, 1.0, 1.0, 0.0, 0),
(3, 3, 1, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 3, 0, 1.0, 4.0, 0.0, 0), (2, 0, 1, 1.0, 1.0, 0.0, 1),
(3, 3, 1, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_max_size(self):
record_arrays_close(
from_signals_all(size=1, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 0), (1, 3, 0, 0.5, 4.0, 0.0, 1), (2, 4, 0, 0.5, 5.0, 0.0, 1),
(3, 0, 1, 1.0, 1.0, 0.0, 0), (4, 3, 1, 1.0, 4.0, 0.0, 1), (5, 4, 1, 1.0, 5.0, 0.0, 1),
(6, 0, 2, 1.0, 1.0, 0.0, 0), (7, 3, 2, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 0), (1, 3, 0, 0.5, 4.0, 0.0, 1), (2, 0, 1, 1.0, 1.0, 0.0, 0),
(3, 3, 1, 1.0, 4.0, 0.0, 1), (4, 0, 2, 1.0, 1.0, 0.0, 0), (5, 3, 2, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 1), (1, 3, 0, 0.5, 4.0, 0.0, 0), (2, 0, 1, 1.0, 1.0, 0.0, 1),
(3, 3, 1, 1.0, 4.0, 0.0, 0), (4, 0, 2, 1.0, 1.0, 0.0, 1), (5, 3, 2, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_reject_prob(self):
record_arrays_close(
from_signals_all(size=1., reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 2.0, 4.0, 0.0, 1), (2, 1, 1, 1.0, 2.0, 0.0, 0),
(3, 3, 1, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1., reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 1.0, 4.0, 0.0, 1), (2, 1, 1, 1.0, 2.0, 0.0, 0),
(3, 3, 1, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1., reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 3, 0, 1.0, 4.0, 0.0, 0), (2, 1, 1, 1.0, 2.0, 0.0, 1),
(3, 3, 1, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_close_first(self):
record_arrays_close(
from_signals_all(close_first=[[False, True]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 3, 0, 200.0, 4.0, 0.0, 1), (2, 0, 1, 100.0, 1.0, 0.0, 0),
(3, 3, 1, 100.0, 4.0, 0.0, 1), (4, 4, 1, 80.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_all(
price=pd.Series(price.values[::-1], index=price.index),
entries=pd.Series(entries.values[::-1], index=price.index),
exits=pd.Series(exits.values[::-1], index=price.index),
close_first=[[False, True]]
).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 1), (1, 3, 0, 100.0, 2.0, 0.0, 0), (2, 0, 1, 20.0, 5.0, 0.0, 1),
(3, 3, 1, 20.0, 2.0, 0.0, 0), (4, 4, 1, 160.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
def test_allow_partial(self):
record_arrays_close(
from_signals_all(size=1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 3, 0, 1100.0, 4.0, 0.0, 1), (2, 3, 1, 1000.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 3, 0, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 1000.0, 1.0, 0.0, 1), (1, 3, 0, 275.0, 4.0, 0.0, 0), (2, 0, 1, 1000.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_all(size=np.inf, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 3, 0, 200.0, 4.0, 0.0, 1), (2, 0, 1, 100.0, 1.0, 0.0, 0),
(3, 3, 1, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=np.inf, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 3, 0, 100.0, 4.0, 0.0, 1), (2, 0, 1, 100.0, 1.0, 0.0, 0),
(3, 3, 1, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=np.inf, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 3, 0, 50.0, 4.0, 0.0, 0), (2, 0, 1, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
def test_raise_reject(self):
record_arrays_close(
from_signals_all(size=1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 3, 0, 1100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 3, 0, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
with pytest.raises(Exception) as e_info:
_ = from_signals_shortonly(size=1000, allow_partial=True, raise_reject=True).order_records
with pytest.raises(Exception) as e_info:
_ = from_signals_all(size=1000, allow_partial=False, raise_reject=True).order_records
with pytest.raises(Exception) as e_info:
_ = from_signals_longonly(size=1000, allow_partial=False, raise_reject=True).order_records
with pytest.raises(Exception) as e_info:
_ = from_signals_shortonly(size=1000, allow_partial=False, raise_reject=True).order_records
def test_log(self):
record_arrays_close(
from_signals_all(log=True).log_records,
np.array([
(0, 0, 0, 0, 100.0, 0.0, 0.0, 100.0, 1.0, 100.0, np.inf, 1.0, 0, 2, 0.0, 0.0,
0.0, 1e-08, np.inf, 0.0, False, True, False, True, 0.0, 100.0, 0.0, 0.0, 1.0,
100.0, 100.0, 1.0, 0.0, 0, 0, -1, 0),
(1, 3, 0, 0, 0.0, 100.0, 0.0, 0.0, 4.0, 400.0, -np.inf, 4.0, 0, 2, 0.0,
0.0, 0.0, 1e-08, np.inf, 0.0, False, True, False, True, 800.0, -100.0,
400.0, 0.0, 4.0, 400.0, 200.0, 4.0, 0.0, 1, 0, -1, 1)
], dtype=log_dt)
)
def test_accumulate(self):
record_arrays_close(
from_signals_all(size=1, accumulate=True).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 1.0, 2.0, 0.0, 0), (2, 3, 0, 1.0, 4.0, 0.0, 1),
(3, 4, 0, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, accumulate=True).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 1.0, 2.0, 0.0, 0), (2, 3, 0, 1.0, 4.0, 0.0, 1),
(3, 4, 0, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, accumulate=True).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 1, 0, 1.0, 2.0, 0.0, 1), (2, 3, 0, 1.0, 4.0, 0.0, 0),
(3, 4, 0, 1.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_conflict_mode(self):
kwargs = dict(
price=price.iloc[:3],
entries=pd.DataFrame([
[True, True, True, True, True],
[True, True, True, True, False],
[True, True, True, True, True]
]),
exits=pd.DataFrame([
[True, True, True, True, True],
[False, False, False, False, True],
[True, True, True, True, True]
]),
size=1.,
conflict_mode=[[
'ignore',
'entry',
'exit',
'opposite',
'opposite'
]]
)
record_arrays_close(
from_signals_all(**kwargs).order_records,
np.array([
(0, 1, 0, 1.0, 2.0, 0.0, 0), (1, 0, 1, 1.0, 1.0, 0.0, 0), (2, 0, 2, 1.0, 1.0, 0.0, 1),
(3, 1, 2, 2.0, 2.0, 0.0, 0), (4, 2, 2, 2.0, 3.0, 0.0, 1), (5, 1, 3, 1.0, 2.0, 0.0, 0),
(6, 2, 3, 2.0, 3.0, 0.0, 1), (7, 1, 4, 1.0, 2.0, 0.0, 1), (8, 2, 4, 2.0, 3.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(**kwargs).order_records,
np.array([
(0, 1, 0, 1.0, 2.0, 0.0, 0), (1, 0, 1, 1.0, 1.0, 0.0, 0), (2, 1, 2, 1.0, 2.0, 0.0, 0),
(3, 2, 2, 1.0, 3.0, 0.0, 1), (4, 1, 3, 1.0, 2.0, 0.0, 0), (5, 2, 3, 1.0, 3.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(**kwargs).order_records,
np.array([
(0, 1, 0, 1.0, 2.0, 0.0, 1), (1, 0, 1, 1.0, 1.0, 0.0, 1), (2, 1, 2, 1.0, 2.0, 0.0, 1),
(3, 2, 2, 1.0, 3.0, 0.0, 0), (4, 1, 3, 1.0, 2.0, 0.0, 1), (5, 2, 3, 1.0, 3.0, 0.0, 0)
], dtype=order_dt)
)
def test_init_cash(self):
record_arrays_close(
from_signals_all(price=price_wide, size=1., init_cash=[0., 1., 100.]).order_records,
np.array([
(0, 3, 0, 1.0, 4.0, 0.0, 1), (1, 0, 1, 1.0, 1.0, 0.0, 0), (2, 3, 1, 2.0, 4.0, 0.0, 1),
(3, 0, 2, 1.0, 1.0, 0.0, 0), (4, 3, 2, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(price=price_wide, size=1., init_cash=[0., 1., 100.]).order_records,
np.array([
(0, 0, 1, 1.0, 1.0, 0.0, 0), (1, 3, 1, 1.0, 4.0, 0.0, 1), (2, 0, 2, 1.0, 1.0, 0.0, 0),
(3, 3, 2, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(price=price_wide, size=1., init_cash=[0., 1., 100.]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 3, 0, 0.25, 4.0, 0.0, 0), (2, 0, 1, 1.0, 1.0, 0.0, 1),
(3, 3, 1, 0.5, 4.0, 0.0, 0), (4, 0, 2, 1.0, 1.0, 0.0, 1), (5, 3, 2, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
with pytest.raises(Exception) as e_info:
_ = from_signals_all(init_cash=np.inf).order_records
with pytest.raises(Exception) as e_info:
_ = from_signals_longonly(init_cash=np.inf).order_records
with pytest.raises(Exception) as e_info:
_ = from_signals_shortonly(init_cash=np.inf).order_records
def test_group_by(self):
portfolio = from_signals_all(price=price_wide, group_by=np.array([0, 0, 1]))
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 3, 0, 200.0, 4.0, 0.0, 1), (2, 0, 1, 100.0, 1.0, 0.0, 0),
(3, 3, 1, 200.0, 4.0, 0.0, 1), (4, 0, 2, 100.0, 1.0, 0.0, 0), (5, 3, 2, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
portfolio.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
portfolio.init_cash,
pd.Series([200., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert not portfolio.cash_sharing
def test_cash_sharing(self):
portfolio = from_signals_all(price=price_wide, group_by=np.array([0, 0, 1]), cash_sharing=True)
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 3, 0, 200., 4., 0., 1),
(2, 0, 2, 100., 1., 0., 0), (3, 3, 2, 200., 4., 0., 1)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
portfolio.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
portfolio.init_cash,
pd.Series([100., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert portfolio.cash_sharing
with pytest.raises(Exception) as e_info:
_ = portfolio.regroup(group_by=False)
def test_call_seq(self):
portfolio = from_signals_all(price=price_wide, group_by=np.array([0, 0, 1]), cash_sharing=True)
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 3, 0, 200., 4., 0., 1),
(2, 0, 2, 100., 1., 0., 0), (3, 3, 2, 200., 4., 0., 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0]
])
)
portfolio = from_signals_all(
price=price_wide, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='reversed')
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 1, 100., 1., 0., 0), (1, 3, 1, 200., 4., 0., 1),
(2, 0, 2, 100., 1., 0., 0), (3, 3, 2, 200., 4., 0., 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
portfolio = from_signals_all(
price=price_wide, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='random', seed=seed)
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 1, 100., 1., 0., 0), (1, 3, 1, 200., 4., 0., 1),
(2, 0, 2, 100., 1., 0., 0), (3, 3, 2, 200., 4., 0., 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[1, 0, 0],
[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
kwargs = dict(
price=1.,
entries=pd.DataFrame([
[False, False, True],
[False, True, False],
[True, False, False],
[False, False, True],
[False, True, False],
]),
exits=pd.DataFrame([
[False, False, False],
[False, False, True],
[False, True, False],
[True, False, False],
[False, False, True],
]),
group_by=np.array([0, 0, 0]),
cash_sharing=True,
call_seq='auto'
)
portfolio = from_signals_all(**kwargs)
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 2, 100., 1., 0., 0), (1, 1, 2, 200., 1., 0., 1),
(2, 1, 1, 200., 1., 0., 0), (3, 2, 1, 200., 1., 0., 1),
(4, 2, 0, 200., 1., 0., 0), (5, 3, 0, 200., 1., 0., 1),
(6, 3, 2, 200., 1., 0., 0), (7, 4, 2, 200., 1., 0., 1),
(8, 4, 1, 200., 1., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 2, 0],
[0, 1, 2],
[2, 0, 1]
])
)
portfolio = from_signals_longonly(**kwargs)
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 2, 100., 1., 0., 0), (1, 1, 2, 100., 1., 0., 1),
(2, 1, 1, 100., 1., 0., 0), (3, 2, 1, 100., 1., 0., 1),
(4, 2, 0, 100., 1., 0., 0), (5, 3, 0, 100., 1., 0., 1),
(6, 3, 2, 100., 1., 0., 0), (7, 4, 2, 100., 1., 0., 1),
(8, 4, 1, 100., 1., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 2, 0],
[0, 1, 2],
[2, 0, 1]
])
)
portfolio = from_signals_shortonly(**kwargs)
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 2, 100., 1., 0., 1), (1, 1, 2, 100., 1., 0., 0),
(2, 2, 0, 100., 1., 0., 1), (3, 3, 0, 100., 1., 0., 0),
(4, 4, 1, 100., 1., 0., 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[2, 0, 1],
[1, 0, 2],
[0, 1, 2],
[2, 1, 0],
[1, 0, 2]
])
)
portfolio = from_signals_longonly(**kwargs, size=1., size_type='percent')
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 2, 100.0, 1.0, 0.0, 0), (1, 1, 2, 100.0, 1.0, 0.0, 1), (2, 1, 1, 100.0, 1.0, 0.0, 0),
(3, 2, 1, 100.0, 1.0, 0.0, 1), (4, 2, 0, 100.0, 1.0, 0.0, 0), (5, 3, 0, 100.0, 1.0, 0.0, 1),
(6, 3, 2, 100.0, 1.0, 0.0, 0), (7, 4, 2, 100.0, 1.0, 0.0, 1), (8, 4, 1, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 0, 2],
[0, 1, 2],
[2, 0, 1]
])
)
def test_max_orders(self):
_ = from_signals_all(price=price_wide)
_ = from_signals_all(price=price_wide, max_orders=6)
with pytest.raises(Exception) as e_info:
_ = from_signals_all(price=price_wide, max_orders=5)
def test_max_logs(self):
_ = from_signals_all(price=price_wide, log=True)
_ = from_signals_all(price=price_wide, log=True, max_logs=6)
with pytest.raises(Exception) as e_info:
_ = from_signals_all(price=price_wide, log=True, max_logs=5)
# ############# from_holding ############# #
class TestFromHolding:
def test_from_holding(self):
record_arrays_close(
vbt.Portfolio.from_holding(price).order_records,
vbt.Portfolio.from_signals(price, True, False, accumulate=False).order_records
)
# ############# from_random_signals ############# #
class TestFromRandomSignals:
def test_from_random_n(self):
result = vbt.Portfolio.from_random_signals(price, n=2, seed=seed)
record_arrays_close(
result.order_records,
vbt.Portfolio.from_signals(
price,
[True, False, True, False, False],
[False, True, False, False, True]
).order_records
)
pd.testing.assert_index_equal(
result.wrapper.index,
price.vbt.wrapper.index
)
pd.testing.assert_index_equal(
result.wrapper.columns,
price.vbt.wrapper.columns
)
result = vbt.Portfolio.from_random_signals(price, n=[1, 2], seed=seed)
record_arrays_close(
result.order_records,
vbt.Portfolio.from_signals(
price,
[[False, True], [True, False], [False, True], [False, False], [False, False]],
[[False, False], [False, True], [False, False], [False, True], [True, False]]
).order_records
)
pd.testing.assert_index_equal(
result.wrapper.index,
pd.DatetimeIndex([
'2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'
], dtype='datetime64[ns]', freq=None)
)
pd.testing.assert_index_equal(
result.wrapper.columns,
pd.Int64Index([1, 2], dtype='int64', name='rand_n')
)
def test_from_random_prob(self):
result = vbt.Portfolio.from_random_signals(price, prob=0.5, seed=seed)
record_arrays_close(
result.order_records,
vbt.Portfolio.from_signals(
price,
[True, False, False, False, False],
[False, False, False, False, True]
).order_records
)
pd.testing.assert_index_equal(
result.wrapper.index,
price.vbt.wrapper.index
)
pd.testing.assert_index_equal(
result.wrapper.columns,
price.vbt.wrapper.columns
)
result = vbt.Portfolio.from_random_signals(price, prob=[0.25, 0.5], seed=seed)
record_arrays_close(
result.order_records,
vbt.Portfolio.from_signals(
price,
[[False, True], [False, False], [False, False], [False, False], [True, False]],
[[False, False], [False, True], [False, False], [False, False], [False, False]]
).order_records
)
pd.testing.assert_index_equal(
result.wrapper.index,
pd.DatetimeIndex([
'2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'
], dtype='datetime64[ns]', freq=None)
)
pd.testing.assert_index_equal(
result.wrapper.columns,
pd.MultiIndex.from_tuples([(0.25, 0.25), (0.5, 0.5)], names=['rprob_entry_prob', 'rprob_exit_prob'])
)
# ############# from_orders ############# #
order_size = pd.Series([np.inf, -np.inf, np.nan, np.inf, -np.inf], index=price.index)
order_size_wide = order_size.vbt.tile(3, keys=['a', 'b', 'c'])
order_size_one = pd.Series([1, -1, np.nan, 1, -1], index=price.index)
def from_orders_all(price=price, size=order_size, **kwargs):
return vbt.Portfolio.from_orders(price, size, direction='all', **kwargs)
def from_orders_longonly(price=price, size=order_size, **kwargs):
return vbt.Portfolio.from_orders(price, size, direction='longonly', **kwargs)
def from_orders_shortonly(price=price, size=order_size, **kwargs):
return vbt.Portfolio.from_orders(price, size, direction='shortonly', **kwargs)
class TestFromOrders:
def test_one_column(self):
record_arrays_close(
from_orders_all().order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 200.0, 2.0, 0.0, 1), (2, 3, 0, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly().order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 100.0, 2.0, 0.0, 1), (2, 3, 0, 50.0, 4.0, 0.0, 0),
(3, 4, 0, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly().order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 1, 0, 100.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
portfolio = from_orders_all()
pd.testing.assert_index_equal(
portfolio.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
portfolio.wrapper.columns,
pd.Int64Index([0], dtype='int64')
)
assert portfolio.wrapper.ndim == 1
assert portfolio.wrapper.freq == day_dt
assert portfolio.wrapper.grouper.group_by is None
def test_multiple_columns(self):
record_arrays_close(
from_orders_all(price=price_wide).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 200.0, 2.0, 0.0, 1), (2, 3, 0, 100.0, 4.0, 0.0, 0),
(3, 0, 1, 100.0, 1.0, 0.0, 0), (4, 1, 1, 200.0, 2.0, 0.0, 1), (5, 3, 1, 100.0, 4.0, 0.0, 0),
(6, 0, 2, 100.0, 1.0, 0.0, 0), (7, 1, 2, 200.0, 2.0, 0.0, 1), (8, 3, 2, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(price=price_wide).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 100.0, 2.0, 0.0, 1), (2, 3, 0, 50.0, 4.0, 0.0, 0),
(3, 4, 0, 50.0, 5.0, 0.0, 1), (4, 0, 1, 100.0, 1.0, 0.0, 0), (5, 1, 1, 100.0, 2.0, 0.0, 1),
(6, 3, 1, 50.0, 4.0, 0.0, 0), (7, 4, 1, 50.0, 5.0, 0.0, 1), (8, 0, 2, 100.0, 1.0, 0.0, 0),
(9, 1, 2, 100.0, 2.0, 0.0, 1), (10, 3, 2, 50.0, 4.0, 0.0, 0), (11, 4, 2, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(price=price_wide).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 1, 0, 100.0, 2.0, 0.0, 0), (2, 0, 1, 100.0, 1.0, 0.0, 1),
(3, 1, 1, 100.0, 2.0, 0.0, 0), (4, 0, 2, 100.0, 1.0, 0.0, 1), (5, 1, 2, 100.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
portfolio = from_orders_all(price=price_wide)
pd.testing.assert_index_equal(
portfolio.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
portfolio.wrapper.columns,
pd.Index(['a', 'b', 'c'], dtype='object')
)
assert portfolio.wrapper.ndim == 2
assert portfolio.wrapper.freq == day_dt
assert portfolio.wrapper.grouper.group_by is None
def test_size_inf(self):
record_arrays_close(
from_orders_all(size=[[np.inf, -np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[np.inf, -np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[np.inf, -np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
def test_price(self):
record_arrays_close(
from_orders_all(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 0), (1, 1, 0, 198.01980198019803, 2.02, 0.0, 1),
(2, 3, 0, 99.00990099009901, 4.04, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 0), (1, 1, 0, 99.00990099009901, 2.02, 0.0, 1),
(2, 3, 0, 49.504950495049506, 4.04, 0.0, 0), (3, 4, 0, 49.504950495049506, 5.05, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 1), (1, 1, 0, 99.00990099009901, 2.02, 0.0, 0)
], dtype=order_dt)
)
def test_fees(self):
record_arrays_close(
from_orders_all(size=order_size_one, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 1.0, 2.0, 0.0, 1), (2, 3, 0, 1.0, 4.0, 0.0, 0),
(3, 4, 0, 1.0, 5.0, 0.0, 1), (4, 0, 1, 1.0, 1.0, 0.1, 0), (5, 1, 1, 1.0, 2.0, 0.2, 1),
(6, 3, 1, 1.0, 4.0, 0.4, 0), (7, 4, 1, 1.0, 5.0, 0.5, 1), (8, 0, 2, 1.0, 1.0, 1.0, 0),
(9, 1, 2, 1.0, 2.0, 2.0, 1), (10, 3, 2, 1.0, 4.0, 4.0, 0), (11, 4, 2, 1.0, 5.0, 5.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 1.0, 2.0, 0.0, 1), (2, 3, 0, 1.0, 4.0, 0.0, 0),
(3, 4, 0, 1.0, 5.0, 0.0, 1), (4, 0, 1, 1.0, 1.0, 0.1, 0), (5, 1, 1, 1.0, 2.0, 0.2, 1),
(6, 3, 1, 1.0, 4.0, 0.4, 0), (7, 4, 1, 1.0, 5.0, 0.5, 1), (8, 0, 2, 1.0, 1.0, 1.0, 0),
(9, 1, 2, 1.0, 2.0, 2.0, 1), (10, 3, 2, 1.0, 4.0, 4.0, 0), (11, 4, 2, 1.0, 5.0, 5.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 1, 0, 1.0, 2.0, 0.0, 0), (2, 3, 0, 1.0, 4.0, 0.0, 1),
(3, 4, 0, 1.0, 5.0, 0.0, 0), (4, 0, 1, 1.0, 1.0, 0.1, 1), (5, 1, 1, 1.0, 2.0, 0.2, 0),
(6, 3, 1, 1.0, 4.0, 0.4, 1), (7, 4, 1, 1.0, 5.0, 0.5, 0), (8, 0, 2, 1.0, 1.0, 1.0, 1),
(9, 1, 2, 1.0, 2.0, 2.0, 0), (10, 3, 2, 1.0, 4.0, 4.0, 1), (11, 4, 2, 1.0, 5.0, 5.0, 0)
], dtype=order_dt)
)
def test_fixed_fees(self):
record_arrays_close(
from_orders_all(size=order_size_one, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 1.0, 2.0, 0.0, 1), (2, 3, 0, 1.0, 4.0, 0.0, 0),
(3, 4, 0, 1.0, 5.0, 0.0, 1), (4, 0, 1, 1.0, 1.0, 0.1, 0), (5, 1, 1, 1.0, 2.0, 0.1, 1),
(6, 3, 1, 1.0, 4.0, 0.1, 0), (7, 4, 1, 1.0, 5.0, 0.1, 1), (8, 0, 2, 1.0, 1.0, 1.0, 0),
(9, 1, 2, 1.0, 2.0, 1.0, 1), (10, 3, 2, 1.0, 4.0, 1.0, 0), (11, 4, 2, 1.0, 5.0, 1.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 1.0, 2.0, 0.0, 1), (2, 3, 0, 1.0, 4.0, 0.0, 0),
(3, 4, 0, 1.0, 5.0, 0.0, 1), (4, 0, 1, 1.0, 1.0, 0.1, 0), (5, 1, 1, 1.0, 2.0, 0.1, 1),
(6, 3, 1, 1.0, 4.0, 0.1, 0), (7, 4, 1, 1.0, 5.0, 0.1, 1), (8, 0, 2, 1.0, 1.0, 1.0, 0),
(9, 1, 2, 1.0, 2.0, 1.0, 1), (10, 3, 2, 1.0, 4.0, 1.0, 0), (11, 4, 2, 1.0, 5.0, 1.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 1, 0, 1.0, 2.0, 0.0, 0), (2, 3, 0, 1.0, 4.0, 0.0, 1),
(3, 4, 0, 1.0, 5.0, 0.0, 0), (4, 0, 1, 1.0, 1.0, 0.1, 1), (5, 1, 1, 1.0, 2.0, 0.1, 0),
(6, 3, 1, 1.0, 4.0, 0.1, 1), (7, 4, 1, 1.0, 5.0, 0.1, 0), (8, 0, 2, 1.0, 1.0, 1.0, 1),
(9, 1, 2, 1.0, 2.0, 1.0, 0), (10, 3, 2, 1.0, 4.0, 1.0, 1), (11, 4, 2, 1.0, 5.0, 1.0, 0)
], dtype=order_dt)
)
def test_slippage(self):
record_arrays_close(
from_orders_all(size=order_size_one, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 1.0, 2.0, 0.0, 1), (2, 3, 0, 1.0, 4.0, 0.0, 0),
(3, 4, 0, 1.0, 5.0, 0.0, 1), (4, 0, 1, 1.0, 1.1, 0.0, 0), (5, 1, 1, 1.0, 1.8, 0.0, 1),
(6, 3, 1, 1.0, 4.4, 0.0, 0), (7, 4, 1, 1.0, 4.5, 0.0, 1), (8, 0, 2, 1.0, 2.0, 0.0, 0),
(9, 1, 2, 1.0, 0.0, 0.0, 1), (10, 3, 2, 1.0, 8.0, 0.0, 0), (11, 4, 2, 1.0, 0.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 1.0, 2.0, 0.0, 1), (2, 3, 0, 1.0, 4.0, 0.0, 0),
(3, 4, 0, 1.0, 5.0, 0.0, 1), (4, 0, 1, 1.0, 1.1, 0.0, 0), (5, 1, 1, 1.0, 1.8, 0.0, 1),
(6, 3, 1, 1.0, 4.4, 0.0, 0), (7, 4, 1, 1.0, 4.5, 0.0, 1), (8, 0, 2, 1.0, 2.0, 0.0, 0),
(9, 1, 2, 1.0, 0.0, 0.0, 1), (10, 3, 2, 1.0, 8.0, 0.0, 0), (11, 4, 2, 1.0, 0.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 1, 0, 1.0, 2.0, 0.0, 0), (2, 3, 0, 1.0, 4.0, 0.0, 1),
(3, 4, 0, 1.0, 5.0, 0.0, 0), (4, 0, 1, 1.0, 0.9, 0.0, 1), (5, 1, 1, 1.0, 2.2, 0.0, 0),
(6, 3, 1, 1.0, 3.6, 0.0, 1), (7, 4, 1, 1.0, 5.5, 0.0, 0), (8, 0, 2, 1.0, 0.0, 0.0, 1),
(9, 1, 2, 1.0, 4.0, 0.0, 0), (10, 3, 2, 1.0, 0.0, 0.0, 1), (11, 4, 2, 1.0, 10.0, 0.0, 0)
], dtype=order_dt)
)
def test_min_size(self):
record_arrays_close(
from_orders_all(size=order_size_one, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 1.0, 2.0, 0.0, 1), (2, 3, 0, 1.0, 4.0, 0.0, 0),
(3, 4, 0, 1.0, 5.0, 0.0, 1), (4, 0, 1, 1.0, 1.0, 0.0, 0), (5, 1, 1, 1.0, 2.0, 0.0, 1),
(6, 3, 1, 1.0, 4.0, 0.0, 0), (7, 4, 1, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 1.0, 2.0, 0.0, 1), (2, 3, 0, 1.0, 4.0, 0.0, 0),
(3, 4, 0, 1.0, 5.0, 0.0, 1), (4, 0, 1, 1.0, 1.0, 0.0, 0), (5, 1, 1, 1.0, 2.0, 0.0, 1),
(6, 3, 1, 1.0, 4.0, 0.0, 0), (7, 4, 1, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 1, 0, 1.0, 2.0, 0.0, 0), (2, 3, 0, 1.0, 4.0, 0.0, 1),
(3, 4, 0, 1.0, 5.0, 0.0, 0), (4, 0, 1, 1.0, 1.0, 0.0, 1), (5, 1, 1, 1.0, 2.0, 0.0, 0),
(6, 3, 1, 1.0, 4.0, 0.0, 1), (7, 4, 1, 1.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_max_size(self):
record_arrays_close(
from_orders_all(size=order_size_one, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 0), (1, 1, 0, 0.5, 2.0, 0.0, 1), (2, 3, 0, 0.5, 4.0, 0.0, 0),
(3, 4, 0, 0.5, 5.0, 0.0, 1), (4, 0, 1, 1.0, 1.0, 0.0, 0), (5, 1, 1, 1.0, 2.0, 0.0, 1),
(6, 3, 1, 1.0, 4.0, 0.0, 0), (7, 4, 1, 1.0, 5.0, 0.0, 1), (8, 0, 2, 1.0, 1.0, 0.0, 0),
(9, 1, 2, 1.0, 2.0, 0.0, 1), (10, 3, 2, 1.0, 4.0, 0.0, 0), (11, 4, 2, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 0), (1, 1, 0, 0.5, 2.0, 0.0, 1), (2, 3, 0, 0.5, 4.0, 0.0, 0),
(3, 4, 0, 0.5, 5.0, 0.0, 1), (4, 0, 1, 1.0, 1.0, 0.0, 0), (5, 1, 1, 1.0, 2.0, 0.0, 1),
(6, 3, 1, 1.0, 4.0, 0.0, 0), (7, 4, 1, 1.0, 5.0, 0.0, 1), (8, 0, 2, 1.0, 1.0, 0.0, 0),
(9, 1, 2, 1.0, 2.0, 0.0, 1), (10, 3, 2, 1.0, 4.0, 0.0, 0), (11, 4, 2, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 1), (1, 1, 0, 0.5, 2.0, 0.0, 0), (2, 3, 0, 0.5, 4.0, 0.0, 1),
(3, 4, 0, 0.5, 5.0, 0.0, 0), (4, 0, 1, 1.0, 1.0, 0.0, 1), (5, 1, 1, 1.0, 2.0, 0.0, 0),
(6, 3, 1, 1.0, 4.0, 0.0, 1), (7, 4, 1, 1.0, 5.0, 0.0, 0), (8, 0, 2, 1.0, 1.0, 0.0, 1),
(9, 1, 2, 1.0, 2.0, 0.0, 0), (10, 3, 2, 1.0, 4.0, 0.0, 1), (11, 4, 2, 1.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_reject_prob(self):
record_arrays_close(
from_orders_all(size=order_size_one, reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 1.0, 2.0, 0.0, 1), (2, 3, 0, 1.0, 4.0, 0.0, 0),
(3, 4, 0, 1.0, 5.0, 0.0, 1), (4, 1, 1, 1.0, 2.0, 0.0, 1), (5, 3, 1, 1.0, 4.0, 0.0, 0),
(6, 4, 1, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 1.0, 2.0, 0.0, 1), (2, 3, 0, 1.0, 4.0, 0.0, 0),
(3, 4, 0, 1.0, 5.0, 0.0, 1), (4, 3, 1, 1.0, 4.0, 0.0, 0), (5, 4, 1, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 1, 0, 1.0, 2.0, 0.0, 0), (2, 3, 0, 1.0, 4.0, 0.0, 1),
(3, 4, 0, 1.0, 5.0, 0.0, 0), (4, 3, 1, 1.0, 4.0, 0.0, 1), (5, 4, 1, 1.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_lock_cash(self):
portfolio = from_orders_all(size=order_size_one * 1000, lock_cash=[[False, True]])
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 1, 0, 1000., 2., 0., 1),
(2, 3, 0, 500., 4., 0., 0), (3, 4, 0, 1000., 5., 0., 1),
(4, 0, 1, 100., 1., 0., 0), (5, 1, 1, 200., 2., 0., 1),
(6, 3, 1, 100., 4., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.cash(free=True).values,
np.array([
[0.0, 0.0],
[-1600.0, 0.0],
[-1600.0, 0.0],
[-1600.0, 0.0],
[-6600.0, 0.0]
])
)
portfolio = from_orders_longonly(size=order_size_one * 1000, lock_cash=[[False, True]])
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 1, 0, 100., 2., 0., 1),
(2, 3, 0, 50., 4., 0., 0), (3, 4, 0, 50., 5., 0., 1),
(4, 0, 1, 100., 1., 0., 0), (5, 1, 1, 100., 2., 0., 1),
(6, 3, 1, 50., 4., 0., 0), (7, 4, 1, 50., 5., 0., 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.cash(free=True).values,
np.array([
[0.0, 0.0],
[200.0, 200.0],
[200.0, 200.0],
[0.0, 0.0],
[250.0, 250.0]
])
)
portfolio = from_orders_shortonly(size=order_size_one * 1000, lock_cash=[[False, True]])
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 1000., 1., 0., 1), (1, 1, 0, 550., 2., 0., 0),
(2, 3, 0, 1000., 4., 0., 1), (3, 4, 0, 800., 5., 0., 0),
(4, 0, 1, 100., 1., 0., 1), (5, 1, 1, 100., 2., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.cash(free=True).values,
np.array([
[-900.0, 0.0],
[-900.0, 0.0],
[-900.0, 0.0],
[-4900.0, 0.0],
[-3989.6551724137926, 0.0]
])
)
def test_allow_partial(self):
record_arrays_close(
from_orders_all(size=order_size_one * 1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 1000.0, 2.0, 0.0, 1), (2, 3, 0, 500.0, 4.0, 0.0, 0),
(3, 4, 0, 1000.0, 5.0, 0.0, 1), (4, 1, 1, 1000.0, 2.0, 0.0, 1), (5, 4, 1, 1000.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one * 1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 100.0, 2.0, 0.0, 1), (2, 3, 0, 50.0, 4.0, 0.0, 0),
(3, 4, 0, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one * 1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 1000.0, 1.0, 0.0, 1), (1, 1, 0, 550.0, 2.0, 0.0, 0), (2, 3, 0, 1000.0, 4.0, 0.0, 1),
(3, 4, 0, 800.0, 5.0, 0.0, 0), (4, 0, 1, 1000.0, 1.0, 0.0, 1), (5, 3, 1, 1000.0, 4.0, 0.0, 1),
(6, 4, 1, 1000.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_all(size=order_size, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 200.0, 2.0, 0.0, 1), (2, 3, 0, 100.0, 4.0, 0.0, 0),
(3, 0, 1, 100.0, 1.0, 0.0, 0), (4, 1, 1, 200.0, 2.0, 0.0, 1), (5, 3, 1, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 100.0, 2.0, 0.0, 1), (2, 3, 0, 50.0, 4.0, 0.0, 0),
(3, 4, 0, 50.0, 5.0, 0.0, 1), (4, 0, 1, 100.0, 1.0, 0.0, 0), (5, 1, 1, 100.0, 2.0, 0.0, 1),
(6, 3, 1, 50.0, 4.0, 0.0, 0), (7, 4, 1, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 1, 0, 100.0, 2.0, 0.0, 0), (2, 0, 1, 100.0, 1.0, 0.0, 1),
(3, 1, 1, 100.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
def test_raise_reject(self):
record_arrays_close(
from_orders_all(size=order_size_one * 1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 1000.0, 2.0, 0.0, 1), (2, 3, 0, 500.0, 4.0, 0.0, 0),
(3, 4, 0, 1000.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one * 1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 100.0, 2.0, 0.0, 1), (2, 3, 0, 50.0, 4.0, 0.0, 0),
(3, 4, 0, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one * 1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 1000.0, 1.0, 0.0, 1), (1, 1, 0, 550.0, 2.0, 0.0, 0), (2, 3, 0, 1000.0, 4.0, 0.0, 1),
(3, 4, 0, 800.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
with pytest.raises(Exception) as e_info:
_ = from_orders_all(size=order_size_one * 1000, allow_partial=False, raise_reject=True).order_records
with pytest.raises(Exception) as e_info:
_ = from_orders_longonly(size=order_size_one * 1000, allow_partial=False, raise_reject=True).order_records
with pytest.raises(Exception) as e_info:
_ = from_orders_shortonly(size=order_size_one * 1000, allow_partial=False, raise_reject=True).order_records
def test_log(self):
record_arrays_close(
from_orders_all(log=True).log_records,
np.array([
(0, 0, 0, 0, 100.0, 0.0, 0.0, 100.0, 1.0, 100.0, np.inf, 1.0, 0, 2,
0.0, 0.0, 0.0, 1e-08, np.inf, 0.0, False, True, False, True, 0.0,
100.0, 0.0, 0.0, 1.0, 100.0, 100.0, 1.0, 0.0, 0, 0, -1, 0),
(1, 1, 0, 0, 0.0, 100.0, 0.0, 0.0, 2.0, 200.0, -np.inf, 2.0, 0, 2,
0.0, 0.0, 0.0, 1e-08, np.inf, 0.0, False, True, False, True, 400.0,
-100.0, 200.0, 0.0, 2.0, 200.0, 200.0, 2.0, 0.0, 1, 0, -1, 1),
(2, 2, 0, 0, 400.0, -100.0, 200.0, 0.0, 3.0, 100.0, np.nan, 3.0, 0,
2, 0.0, 0.0, 0.0, 1e-08, np.inf, 0.0, False, True, False, True, 400.0,
-100.0, 200.0, 0.0, 3.0, 100.0, np.nan, np.nan, np.nan, -1, 1, 0, -1),
(3, 3, 0, 0, 400.0, -100.0, 200.0, 0.0, 4.0, 0.0, np.inf, 4.0, 0, 2,
0.0, 0.0, 0.0, 1e-08, np.inf, 0.0, False, True, False, True, 0.0, 0.0,
0.0, 0.0, 4.0, 0.0, 100.0, 4.0, 0.0, 0, 0, -1, 2),
(4, 4, 0, 0, 0.0, 0.0, 0.0, 0.0, 5.0, 0.0, -np.inf, 5.0, 0, 2, 0.0,
0.0, 0.0, 1e-08, np.inf, 0.0, False, True, False, True, 0.0, 0.0,
0.0, 0.0, 5.0, 0.0, np.nan, np.nan, np.nan, -1, 2, 6, -1)
], dtype=log_dt)
)
def test_group_by(self):
portfolio = from_orders_all(price=price_wide, group_by=np.array([0, 0, 1]))
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 200.0, 2.0, 0.0, 1), (2, 3, 0, 100.0, 4.0, 0.0, 0),
(3, 0, 1, 100.0, 1.0, 0.0, 0), (4, 1, 1, 200.0, 2.0, 0.0, 1), (5, 3, 1, 100.0, 4.0, 0.0, 0),
(6, 0, 2, 100.0, 1.0, 0.0, 0), (7, 1, 2, 200.0, 2.0, 0.0, 1), (8, 3, 2, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
portfolio.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
portfolio.init_cash,
pd.Series([200., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert not portfolio.cash_sharing
def test_cash_sharing(self):
portfolio = from_orders_all(price=price_wide, group_by=np.array([0, 0, 1]), cash_sharing=True)
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 1, 0, 200., 2., 0., 1),
(2, 3, 0, 100., 4., 0., 0), (3, 0, 2, 100., 1., 0., 0),
(4, 1, 2, 200., 2., 0., 1), (5, 3, 2, 100., 4., 0., 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
portfolio.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
portfolio.init_cash,
pd.Series([100., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert portfolio.cash_sharing
with pytest.raises(Exception) as e_info:
_ = portfolio.regroup(group_by=False)
def test_call_seq(self):
portfolio = from_orders_all(price=price_wide, group_by=np.array([0, 0, 1]), cash_sharing=True)
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 1, 0, 200., 2., 0., 1),
(2, 3, 0, 100., 4., 0., 0), (3, 0, 2, 100., 1., 0., 0),
(4, 1, 2, 200., 2., 0., 1), (5, 3, 2, 100., 4., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0]
])
)
portfolio = from_orders_all(
price=price_wide, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='reversed')
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 1, 100., 1., 0., 0), (1, 1, 1, 200., 2., 0., 1),
(2, 3, 1, 100., 4., 0., 0), (3, 0, 2, 100., 1., 0., 0),
(4, 1, 2, 200., 2., 0., 1), (5, 3, 2, 100., 4., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
portfolio = from_orders_all(
price=price_wide, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='random', seed=seed)
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 1, 100., 1., 0., 0), (1, 1, 1, 200., 2., 0., 1),
(2, 3, 1, 100., 4., 0., 0), (3, 0, 2, 100., 1., 0., 0),
(4, 1, 2, 200., 2., 0., 1), (5, 3, 2, 100., 4., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[1, 0, 0],
[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
kwargs = dict(
price=1.,
size=pd.DataFrame([
[0., 0., np.inf],
[0., np.inf, -np.inf],
[np.inf, -np.inf, 0.],
[-np.inf, 0., np.inf],
[0., np.inf, -np.inf],
]),
group_by=np.array([0, 0, 0]),
cash_sharing=True,
call_seq='auto'
)
portfolio = from_orders_all(**kwargs)
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 2, 100., 1., 0., 0), (1, 1, 2, 200., 1., 0., 1),
(2, 1, 1, 200., 1., 0., 0), (3, 2, 1, 200., 1., 0., 1),
(4, 2, 0, 200., 1., 0., 0), (5, 3, 0, 200., 1., 0., 1),
(6, 3, 2, 200., 1., 0., 0), (7, 4, 2, 200., 1., 0., 1),
(8, 4, 1, 200., 1., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 2, 0],
[0, 1, 2],
[2, 0, 1]
])
)
portfolio = from_orders_longonly(**kwargs)
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 2, 100., 1., 0., 0), (1, 1, 2, 100., 1., 0., 1),
(2, 1, 1, 100., 1., 0., 0), (3, 2, 1, 100., 1., 0., 1),
(4, 2, 0, 100., 1., 0., 0), (5, 3, 0, 100., 1., 0., 1),
(6, 3, 2, 100., 1., 0., 0), (7, 4, 2, 100., 1., 0., 1),
(8, 4, 1, 100., 1., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 2, 0],
[0, 1, 2],
[2, 0, 1]
])
)
portfolio = from_orders_shortonly(**kwargs)
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 2, 100., 1., 0., 1), (1, 1, 2, 100., 1., 0., 0),
(2, 2, 0, 100., 1., 0., 1), (3, 3, 0, 100., 1., 0., 0),
(4, 4, 1, 100., 1., 0., 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[2, 0, 1],
[1, 0, 2],
[0, 2, 1],
[2, 1, 0],
[1, 0, 2]
])
)
def test_value(self):
record_arrays_close(
from_orders_all(size=order_size_one, size_type='value').order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 0.5, 2.0, 0.0, 1),
(2, 3, 0, 0.25, 4.0, 0.0, 0), (3, 4, 0, 0.2, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, size_type='value').order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 0.5, 2.0, 0.0, 1),
(2, 3, 0, 0.25, 4.0, 0.0, 0), (3, 4, 0, 0.2, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, size_type='value').order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 1, 0, 0.5, 2.0, 0.0, 0),
(2, 3, 0, 0.25, 4.0, 0.0, 1), (3, 4, 0, 0.2, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_target_amount(self):
record_arrays_close(
from_orders_all(size=[[75., -75.]], size_type='targetamount').order_records,
np.array([
(0, 0, 0, 75.0, 1.0, 0.0, 0), (1, 0, 1, 75.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[75., -75.]], size_type='targetamount').order_records,
np.array([
(0, 0, 0, 75.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[75., -75.]], size_type='targetamount').order_records,
np.array([
(0, 0, 0, 75.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_all(
price=price_wide, size=75., size_type='targetamount',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 75.0, 1.0, 0.0, 0), (1, 0, 1, 25.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
def test_target_value(self):
record_arrays_close(
from_orders_all(size=[[50., -50.]], size_type='targetvalue').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 1, 0, 25.0, 2.0, 0.0, 1),
(2, 2, 0, 8.333333333333332, 3.0, 0.0, 1), (3, 3, 0, 4.166666666666668, 4.0, 0.0, 1),
(4, 4, 0, 2.5, 5.0, 0.0, 1), (5, 0, 1, 50.0, 1.0, 0.0, 1),
(6, 1, 1, 25.0, 2.0, 0.0, 0), (7, 2, 1, 8.333333333333332, 3.0, 0.0, 0),
(8, 3, 1, 4.166666666666668, 4.0, 0.0, 0), (9, 4, 1, 2.5, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[50., -50.]], size_type='targetvalue').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 1, 0, 25.0, 2.0, 0.0, 1),
(2, 2, 0, 8.333333333333332, 3.0, 0.0, 1), (3, 3, 0, 4.166666666666668, 4.0, 0.0, 1),
(4, 4, 0, 2.5, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[50., -50.]], size_type='targetvalue').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 1), (1, 1, 0, 25.0, 2.0, 0.0, 0),
(2, 2, 0, 8.333333333333332, 3.0, 0.0, 0), (3, 3, 0, 4.166666666666668, 4.0, 0.0, 0),
(4, 4, 0, 2.5, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_all(
price=price_wide, size=50., size_type='targetvalue',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 0, 1, 50.0, 1.0, 0.0, 0),
(2, 1, 0, 25.0, 2.0, 0.0, 1), (3, 1, 1, 25.0, 2.0, 0.0, 1),
(4, 1, 2, 25.0, 2.0, 0.0, 0), (5, 2, 0, 8.333333333333332, 3.0, 0.0, 1),
(6, 2, 1, 8.333333333333332, 3.0, 0.0, 1), (7, 2, 2, 8.333333333333332, 3.0, 0.0, 1),
(8, 3, 0, 4.166666666666668, 4.0, 0.0, 1), (9, 3, 1, 4.166666666666668, 4.0, 0.0, 1),
(10, 3, 2, 4.166666666666668, 4.0, 0.0, 1), (11, 4, 0, 2.5, 5.0, 0.0, 1),
(12, 4, 1, 2.5, 5.0, 0.0, 1), (13, 4, 2, 2.5, 5.0, 0.0, 1)
], dtype=order_dt)
)
def test_target_percent(self):
record_arrays_close(
from_orders_all(size=[[0.5, -0.5]], size_type='targetpercent').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 1, 0, 12.5, 2.0, 0.0, 1), (2, 2, 0, 6.25, 3.0, 0.0, 1),
(3, 3, 0, 3.90625, 4.0, 0.0, 1), (4, 4, 0, 2.734375, 5.0, 0.0, 1), (5, 0, 1, 50.0, 1.0, 0.0, 1),
(6, 1, 1, 37.5, 2.0, 0.0, 0), (7, 2, 1, 6.25, 3.0, 0.0, 0), (8, 3, 1, 2.34375, 4.0, 0.0, 0),
(9, 4, 1, 1.171875, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[0.5, -0.5]], size_type='targetpercent').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 1, 0, 12.5, 2.0, 0.0, 1), (2, 2, 0, 6.25, 3.0, 0.0, 1),
(3, 3, 0, 3.90625, 4.0, 0.0, 1), (4, 4, 0, 2.734375, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[0.5, -0.5]], size_type='targetpercent').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 1), (1, 1, 0, 37.5, 2.0, 0.0, 0), (2, 2, 0, 6.25, 3.0, 0.0, 0),
(3, 3, 0, 2.34375, 4.0, 0.0, 0), (4, 4, 0, 1.171875, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_all(
price=price_wide, size=0.5, size_type='targetpercent',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 0, 1, 50.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
def test_update_value(self):
record_arrays_close(
from_orders_all(size=0.5, size_type='targetpercent', fees=0.01, slippage=0.01,
update_value=False).order_records,
from_orders_all(size=0.5, size_type='targetpercent', fees=0.01, slippage=0.01,
update_value=True).order_records
)
record_arrays_close(
from_orders_all(
price=price_wide, size=0.5, size_type='targetpercent', fees=0.01, slippage=0.01,
group_by=np.array([0, 0, 0]), cash_sharing=True, update_value=False).order_records,
np.array([
(0, 0, 0, 50.0, 1.01, 0.505, 0),
(1, 0, 1, 48.02960494069208, 1.01, 0.485099009900992, 0),
(2, 1, 0, 0.9851975296539592, 1.98, 0.019506911087148394, 1),
(3, 1, 1, 0.9465661198057499, 2.02, 0.019120635620076154, 0),
(4, 2, 0, 0.019315704924103727, 2.9699999999999998, 0.0005736764362458806, 1),
(5, 2, 1, 0.018558300554959377, 3.0300000000000002, 0.0005623165068152705, 0),
(6, 3, 0, 0.00037870218456959037, 3.96, 1.4996606508955778e-05, 1),
(7, 3, 1, 0.0003638525743521767, 4.04, 1.4699644003827875e-05, 0),
(8, 4, 0, 7.424805112066224e-06, 4.95, 3.675278530472781e-07, 1),
(9, 4, 1, 7.133664827307231e-06, 5.05, 3.6025007377901643e-07, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_all(
price=price_wide, size=0.5, size_type='targetpercent', fees=0.01, slippage=0.01,
group_by=np.array([0, 0, 0]), cash_sharing=True, update_value=True).order_records,
np.array([
(0, 0, 0, 50.0, 1.01, 0.505, 0),
(1, 0, 1, 48.02960494069208, 1.01, 0.485099009900992, 0),
(2, 1, 0, 0.9851975296539592, 1.98, 0.019506911087148394, 1),
(3, 1, 1, 0.7303208018821721, 2.02, 0.014752480198019875, 0),
(4, 1, 2, 0.21624531792357785, 2.02, 0.0043681554220562635, 0),
(5, 2, 0, 0.019315704924103727, 2.9699999999999998, 0.0005736764362458806, 1),
(6, 2, 1, 0.009608602243410758, 2.9699999999999998, 0.00028537548662929945, 1),
(7, 2, 2, 0.02779013180558861, 3.0300000000000002, 0.0008420409937093393, 0),
(8, 3, 0, 0.0005670876809631409, 3.96, 2.2456672166140378e-05, 1),
(9, 3, 1, 0.00037770350099464167, 3.96, 1.4957058639387809e-05, 1),
(10, 3, 2, 0.0009077441794302741, 4.04, 3.6672864848982974e-05, 0),
(11, 4, 0, 1.8523501267964093e-05, 4.95, 9.169133127642227e-07, 1),
(12, 4, 1, 1.2972670177191503e-05, 4.95, 6.421471737709794e-07, 1),
(13, 4, 2, 3.0261148547590434e-05, 5.05, 1.5281880016533242e-06, 0)
], dtype=order_dt)
)
def test_percent(self):
record_arrays_close(
from_orders_all(size=[[0.5, -0.5]], size_type='percent').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 1, 0, 12.5, 2., 0., 0),
(2, 2, 0, 4.16666667, 3., 0., 0), (3, 3, 0, 1.5625, 4., 0., 0),
(4, 4, 0, 0.625, 5., 0., 0), (5, 0, 1, 50., 1., 0., 1),
(6, 1, 1, 12.5, 2., 0., 1), (7, 2, 1, 4.16666667, 3., 0., 1),
(8, 3, 1, 1.5625, 4., 0., 1), (9, 4, 1, 0.625, 5., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[0.5, -0.5]], size_type='percent').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 1, 0, 12.5, 2., 0., 0),
(2, 2, 0, 4.16666667, 3., 0., 0), (3, 3, 0, 1.5625, 4., 0., 0),
(4, 4, 0, 0.625, 5., 0., 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[0.5, -0.5]], size_type='percent').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 1), (1, 1, 0, 12.5, 2., 0., 1),
(2, 2, 0, 4.16666667, 3., 0., 1), (3, 3, 0, 1.5625, 4., 0., 1),
(4, 4, 0, 0.625, 5., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_all(
price=price_wide, size=0.5, size_type='percent',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 5.00000000e+01, 1., 0., 0), (1, 0, 1, 2.50000000e+01, 1., 0., 0),
(2, 0, 2, 1.25000000e+01, 1., 0., 0), (3, 1, 0, 3.12500000e+00, 2., 0., 0),
(4, 1, 1, 1.56250000e+00, 2., 0., 0), (5, 1, 2, 7.81250000e-01, 2., 0., 0),
(6, 2, 0, 2.60416667e-01, 3., 0., 0), (7, 2, 1, 1.30208333e-01, 3., 0., 0),
(8, 2, 2, 6.51041667e-02, 3., 0., 0), (9, 3, 0, 2.44140625e-02, 4., 0., 0),
(10, 3, 1, 1.22070312e-02, 4., 0., 0), (11, 3, 2, 6.10351562e-03, 4., 0., 0),
(12, 4, 0, 2.44140625e-03, 5., 0., 0), (13, 4, 1, 1.22070312e-03, 5., 0., 0),
(14, 4, 2, 6.10351562e-04, 5., 0., 0)
], dtype=order_dt)
)
def test_auto_seq(self):
target_hold_value = pd.DataFrame({
'a': [0., 70., 30., 0., 70.],
'b': [30., 0., 70., 30., 30.],
'c': [70., 30., 0., 70., 0.]
}, index=price.index)
pd.testing.assert_frame_equal(
from_orders_all(
price=1., size=target_hold_value, size_type='targetvalue',
group_by=np.array([0, 0, 0]), cash_sharing=True,
call_seq='auto').asset_value(group_by=False),
target_hold_value
)
pd.testing.assert_frame_equal(
from_orders_all(
price=1., size=target_hold_value / 100, size_type='targetpercent',
group_by=np.array([0, 0, 0]), cash_sharing=True,
call_seq='auto').asset_value(group_by=False),
target_hold_value
)
def test_max_orders(self):
_ = from_orders_all(price=price_wide)
_ = from_orders_all(price=price_wide, max_orders=9)
with pytest.raises(Exception) as e_info:
_ = from_orders_all(price=price_wide, max_orders=8)
def test_max_logs(self):
_ = from_orders_all(price=price_wide, log=True)
_ = from_orders_all(price=price_wide, log=True, max_logs=15)
with pytest.raises(Exception) as e_info:
_ = from_orders_all(price=price_wide, log=True, max_logs=14)
# ############# from_order_func ############# #
@njit
def order_func_nb(c, size):
return nb.create_order_nb(size if c.i % 2 == 0 else -size, c.close[c.i, c.col])
@njit
def log_order_func_nb(c, size):
return nb.create_order_nb(size if c.i % 2 == 0 else -size, c.close[c.i, c.col], log=True)
class TestFromOrderFunc:
@pytest.mark.parametrize(
"test_row_wise",
[False, True],
)
def test_one_column(self, test_row_wise):
portfolio = vbt.Portfolio.from_order_func(price.tolist(), order_func_nb, np.inf, row_wise=test_row_wise)
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 200.0, 2.0, 0.0, 1),
(2, 2, 0, 133.33333333333334, 3.0, 0.0, 0), (3, 3, 0, 66.66666666666669, 4.0, 0.0, 1),
(4, 4, 0, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
portfolio = vbt.Portfolio.from_order_func(price, order_func_nb, np.inf, row_wise=test_row_wise)
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 200.0, 2.0, 0.0, 1),
(2, 2, 0, 133.33333333333334, 3.0, 0.0, 0), (3, 3, 0, 66.66666666666669, 4.0, 0.0, 1),
(4, 4, 0, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
portfolio.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
portfolio.wrapper.columns,
pd.Int64Index([0], dtype='int64')
)
assert portfolio.wrapper.ndim == 1
assert portfolio.wrapper.freq == day_dt
assert portfolio.wrapper.grouper.group_by is None
@pytest.mark.parametrize(
"test_row_wise",
[False, True],
)
def test_multiple_columns(self, test_row_wise):
portfolio = vbt.Portfolio.from_order_func(price_wide, order_func_nb, np.inf, row_wise=test_row_wise)
if test_row_wise:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 1.0, 0.0, 0),
(2, 0, 2, 100.0, 1.0, 0.0, 0), (3, 1, 0, 200.0, 2.0, 0.0, 1),
(4, 1, 1, 200.0, 2.0, 0.0, 1), (5, 1, 2, 200.0, 2.0, 0.0, 1),
(6, 2, 0, 133.33333333333334, 3.0, 0.0, 0), (7, 2, 1, 133.33333333333334, 3.0, 0.0, 0),
(8, 2, 2, 133.33333333333334, 3.0, 0.0, 0), (9, 3, 0, 66.66666666666669, 4.0, 0.0, 1),
(10, 3, 1, 66.66666666666669, 4.0, 0.0, 1), (11, 3, 2, 66.66666666666669, 4.0, 0.0, 1),
(12, 4, 0, 53.33333333333335, 5.0, 0.0, 0), (13, 4, 1, 53.33333333333335, 5.0, 0.0, 0),
(14, 4, 2, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
else:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 200.0, 2.0, 0.0, 1),
(2, 2, 0, 133.33333333333334, 3.0, 0.0, 0), (3, 3, 0, 66.66666666666669, 4.0, 0.0, 1),
(4, 4, 0, 53.33333333333335, 5.0, 0.0, 0), (5, 0, 1, 100.0, 1.0, 0.0, 0),
(6, 1, 1, 200.0, 2.0, 0.0, 1), (7, 2, 1, 133.33333333333334, 3.0, 0.0, 0),
(8, 3, 1, 66.66666666666669, 4.0, 0.0, 1), (9, 4, 1, 53.33333333333335, 5.0, 0.0, 0),
(10, 0, 2, 100.0, 1.0, 0.0, 0), (11, 1, 2, 200.0, 2.0, 0.0, 1),
(12, 2, 2, 133.33333333333334, 3.0, 0.0, 0), (13, 3, 2, 66.66666666666669, 4.0, 0.0, 1),
(14, 4, 2, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
portfolio.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
portfolio.wrapper.columns,
pd.Index(['a', 'b', 'c'], dtype='object')
)
assert portfolio.wrapper.ndim == 2
assert portfolio.wrapper.freq == day_dt
assert portfolio.wrapper.grouper.group_by is None
@pytest.mark.parametrize(
"test_row_wise",
[False, True],
)
def test_target_shape(self, test_row_wise):
portfolio = vbt.Portfolio.from_order_func(
price, order_func_nb, np.inf,
target_shape=(5,), row_wise=test_row_wise)
pd.testing.assert_index_equal(
portfolio.wrapper.columns,
pd.Int64Index([0], dtype='int64')
)
assert portfolio.wrapper.ndim == 1
portfolio = vbt.Portfolio.from_order_func(
price, order_func_nb, np.inf,
target_shape=(5, 1), row_wise=test_row_wise)
pd.testing.assert_index_equal(
portfolio.wrapper.columns,
pd.Int64Index([0], dtype='int64', name='iteration_idx')
)
assert portfolio.wrapper.ndim == 2
portfolio = vbt.Portfolio.from_order_func(
price, order_func_nb, np.inf,
target_shape=(5, 1), row_wise=test_row_wise,
keys=pd.Index(['first'], name='custom'))
pd.testing.assert_index_equal(
portfolio.wrapper.columns,
pd.Index(['first'], dtype='object', name='custom')
)
assert portfolio.wrapper.ndim == 2
portfolio = vbt.Portfolio.from_order_func(
price, order_func_nb, np.inf,
target_shape=(5, 3), row_wise=test_row_wise)
pd.testing.assert_index_equal(
portfolio.wrapper.columns,
pd.Int64Index([0, 1, 2], dtype='int64', name='iteration_idx')
)
assert portfolio.wrapper.ndim == 2
portfolio = vbt.Portfolio.from_order_func(
price, order_func_nb, np.inf,
target_shape=(5, 3), row_wise=test_row_wise,
keys=pd.Index(['first', 'second', 'third'], name='custom'))
pd.testing.assert_index_equal(
portfolio.wrapper.columns,
pd.Index(['first', 'second', 'third'], dtype='object', name='custom')
)
assert portfolio.wrapper.ndim == 2
@pytest.mark.parametrize(
"test_row_wise",
[False, True],
)
def test_group_by(self, test_row_wise):
portfolio = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, np.inf,
group_by=np.array([0, 0, 1]), row_wise=test_row_wise)
if test_row_wise:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 1.0, 0.0, 0),
(2, 0, 2, 100.0, 1.0, 0.0, 0), (3, 1, 0, 200.0, 2.0, 0.0, 1),
(4, 1, 1, 200.0, 2.0, 0.0, 1), (5, 1, 2, 200.0, 2.0, 0.0, 1),
(6, 2, 0, 133.33333333333334, 3.0, 0.0, 0), (7, 2, 1, 133.33333333333334, 3.0, 0.0, 0),
(8, 2, 2, 133.33333333333334, 3.0, 0.0, 0), (9, 3, 0, 66.66666666666669, 4.0, 0.0, 1),
(10, 3, 1, 66.66666666666669, 4.0, 0.0, 1), (11, 3, 2, 66.66666666666669, 4.0, 0.0, 1),
(12, 4, 0, 53.33333333333335, 5.0, 0.0, 0), (13, 4, 1, 53.33333333333335, 5.0, 0.0, 0),
(14, 4, 2, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
else:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 1.0, 0.0, 0),
(2, 1, 0, 200.0, 2.0, 0.0, 1), (3, 1, 1, 200.0, 2.0, 0.0, 1),
(4, 2, 0, 133.33333333333334, 3.0, 0.0, 0), (5, 2, 1, 133.33333333333334, 3.0, 0.0, 0),
(6, 3, 0, 66.66666666666669, 4.0, 0.0, 1), (7, 3, 1, 66.66666666666669, 4.0, 0.0, 1),
(8, 4, 0, 53.33333333333335, 5.0, 0.0, 0), (9, 4, 1, 53.33333333333335, 5.0, 0.0, 0),
(10, 0, 2, 100.0, 1.0, 0.0, 0), (11, 1, 2, 200.0, 2.0, 0.0, 1),
(12, 2, 2, 133.33333333333334, 3.0, 0.0, 0), (13, 3, 2, 66.66666666666669, 4.0, 0.0, 1),
(14, 4, 2, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
portfolio.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
portfolio.init_cash,
pd.Series([200., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert not portfolio.cash_sharing
@pytest.mark.parametrize(
"test_row_wise",
[False, True],
)
def test_cash_sharing(self, test_row_wise):
portfolio = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, np.inf,
group_by=np.array([0, 0, 1]), cash_sharing=True, row_wise=test_row_wise)
if test_row_wise:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 2, 100., 1., 0., 0),
(2, 1, 0, 200., 2., 0., 1), (3, 1, 2, 200., 2., 0., 1),
(4, 2, 0, 133.33333333, 3., 0., 0), (5, 2, 2, 133.33333333, 3., 0., 0),
(6, 3, 0, 66.66666667, 4., 0., 1), (7, 3, 2, 66.66666667, 4., 0., 1),
(8, 4, 0, 53.33333333, 5., 0., 0), (9, 4, 2, 53.33333333, 5., 0., 0)
], dtype=order_dt)
)
else:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 1, 0, 200., 2., 0., 1),
(2, 2, 0, 133.33333333, 3., 0., 0), (3, 3, 0, 66.66666667, 4., 0., 1),
(4, 4, 0, 53.33333333, 5., 0., 0), (5, 0, 2, 100., 1., 0., 0),
(6, 1, 2, 200., 2., 0., 1), (7, 2, 2, 133.33333333, 3., 0., 0),
(8, 3, 2, 66.66666667, 4., 0., 1), (9, 4, 2, 53.33333333, 5., 0., 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
portfolio.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
portfolio.init_cash,
pd.Series([100., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert portfolio.cash_sharing
@pytest.mark.parametrize(
"test_row_wise",
[False, True],
)
def test_call_seq(self, test_row_wise):
portfolio = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, np.inf, group_by=np.array([0, 0, 1]),
cash_sharing=True, row_wise=test_row_wise)
if test_row_wise:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 2, 100., 1., 0., 0),
(2, 1, 0, 200., 2., 0., 1), (3, 1, 2, 200., 2., 0., 1),
(4, 2, 0, 133.33333333, 3., 0., 0), (5, 2, 2, 133.33333333, 3., 0., 0),
(6, 3, 0, 66.66666667, 4., 0., 1), (7, 3, 2, 66.66666667, 4., 0., 1),
(8, 4, 0, 53.33333333, 5., 0., 0), (9, 4, 2, 53.33333333, 5., 0., 0)
], dtype=order_dt)
)
else:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 1, 0, 200., 2., 0., 1),
(2, 2, 0, 133.33333333, 3., 0., 0), (3, 3, 0, 66.66666667, 4., 0., 1),
(4, 4, 0, 53.33333333, 5., 0., 0), (5, 0, 2, 100., 1., 0., 0),
(6, 1, 2, 200., 2., 0., 1), (7, 2, 2, 133.33333333, 3., 0., 0),
(8, 3, 2, 66.66666667, 4., 0., 1), (9, 4, 2, 53.33333333, 5., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0]
])
)
portfolio = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, np.inf, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='reversed', row_wise=test_row_wise)
if test_row_wise:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 1, 100., 1., 0., 0), (1, 0, 2, 100., 1., 0., 0),
(2, 1, 1, 200., 2., 0., 1), (3, 1, 2, 200., 2., 0., 1),
(4, 2, 1, 133.33333333, 3., 0., 0), (5, 2, 2, 133.33333333, 3., 0., 0),
(6, 3, 1, 66.66666667, 4., 0., 1), (7, 3, 2, 66.66666667, 4., 0., 1),
(8, 4, 1, 53.33333333, 5., 0., 0), (9, 4, 2, 53.33333333, 5., 0., 0)
], dtype=order_dt)
)
else:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 1, 100., 1., 0., 0), (1, 1, 1, 200., 2., 0., 1),
(2, 2, 1, 133.33333333, 3., 0., 0), (3, 3, 1, 66.66666667, 4., 0., 1),
(4, 4, 1, 53.33333333, 5., 0., 0), (5, 0, 2, 100., 1., 0., 0),
(6, 1, 2, 200., 2., 0., 1), (7, 2, 2, 133.33333333, 3., 0., 0),
(8, 3, 2, 66.66666667, 4., 0., 1), (9, 4, 2, 53.33333333, 5., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
portfolio = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, np.inf, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='random', seed=seed, row_wise=test_row_wise)
if test_row_wise:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 1, 100., 1., 0., 0), (1, 0, 2, 100., 1., 0., 0),
(2, 1, 1, 200., 2., 0., 1), (3, 1, 2, 200., 2., 0., 1),
(4, 2, 1, 133.33333333, 3., 0., 0), (5, 2, 2, 133.33333333, 3., 0., 0),
(6, 3, 1, 66.66666667, 4., 0., 1), (7, 3, 2, 66.66666667, 4., 0., 1),
(8, 4, 1, 53.33333333, 5., 0., 0), (9, 4, 2, 53.33333333, 5., 0., 0)
], dtype=order_dt)
)
else:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 1, 100., 1., 0., 0), (1, 1, 1, 200., 2., 0., 1),
(2, 2, 1, 133.33333333, 3., 0., 0), (3, 3, 1, 66.66666667, 4., 0., 1),
(4, 4, 1, 53.33333333, 5., 0., 0), (5, 0, 2, 100., 1., 0., 0),
(6, 1, 2, 200., 2., 0., 1), (7, 2, 2, 133.33333333, 3., 0., 0),
(8, 3, 2, 66.66666667, 4., 0., 1), (9, 4, 2, 53.33333333, 5., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[1, 0, 0],
[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
with pytest.raises(Exception) as e_info:
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, np.inf, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='auto', row_wise=test_row_wise
)
target_hold_value = pd.DataFrame({
'a': [0., 70., 30., 0., 70.],
'b': [30., 0., 70., 30., 30.],
'c': [70., 30., 0., 70., 0.]
}, index=price.index)
@njit
def segment_prep_func_nb(c, target_hold_value):
order_size = np.copy(target_hold_value[c.i, c.from_col:c.to_col])
order_size_type = np.full(c.group_len, SizeType.TargetValue)
direction = np.full(c.group_len, Direction.All)
order_value_out = np.empty(c.group_len, dtype=np.float_)
c.last_val_price[c.from_col:c.to_col] = c.close[c.i, c.from_col:c.to_col]
nb.sort_call_seq_nb(c, order_size, order_size_type, direction, order_value_out)
return order_size, order_size_type, direction
@njit
def pct_order_func_nb(c, order_size, order_size_type, direction):
col_i = c.call_seq_now[c.call_idx]
return nb.create_order_nb(
order_size[col_i],
c.close[c.i, col_i],
size_type=order_size_type[col_i],
direction=direction[col_i]
)
portfolio = vbt.Portfolio.from_order_func(
price_wide * 0 + 1, pct_order_func_nb, group_by=np.array([0, 0, 0]),
cash_sharing=True, segment_prep_func_nb=segment_prep_func_nb,
segment_prep_args=(target_hold_value.values,), row_wise=test_row_wise)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[0, 1, 2],
[2, 1, 0],
[0, 2, 1],
[1, 0, 2],
[2, 1, 0]
])
)
pd.testing.assert_frame_equal(
portfolio.asset_value(group_by=False),
target_hold_value
)
@pytest.mark.parametrize(
"test_row_wise",
[False, True],
)
def test_target_value(self, test_row_wise):
@njit
def target_val_segment_prep_func_nb(c, val_price):
c.last_val_price[c.from_col:c.to_col] = val_price[c.i]
return ()
@njit
def target_val_order_func_nb(c):
return nb.create_order_nb(50., c.close[c.i, c.col], size_type=SizeType.TargetValue)
portfolio = vbt.Portfolio.from_order_func(
price.iloc[1:], target_val_order_func_nb, row_wise=test_row_wise)
if test_row_wise:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 1, 0, 25.0, 3.0, 0.0, 0), (1, 2, 0, 8.333333333333332, 4.0, 0.0, 1),
(2, 3, 0, 4.166666666666668, 5.0, 0.0, 1)
], dtype=order_dt)
)
else:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 1, 0, 25.0, 3.0, 0.0, 0), (1, 2, 0, 8.333333333333332, 4.0, 0.0, 1),
(2, 3, 0, 4.166666666666668, 5.0, 0.0, 1)
], dtype=order_dt)
)
portfolio = vbt.Portfolio.from_order_func(
price.iloc[1:], target_val_order_func_nb,
segment_prep_func_nb=target_val_segment_prep_func_nb,
segment_prep_args=(price.iloc[:-1].values,), row_wise=test_row_wise)
if test_row_wise:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 0), (1, 1, 0, 25.0, 3.0, 0.0, 1),
(2, 2, 0, 8.333333333333332, 4.0, 0.0, 1), (3, 3, 0, 4.166666666666668, 5.0, 0.0, 1)
], dtype=order_dt)
)
else:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 0), (1, 1, 0, 25.0, 3.0, 0.0, 1),
(2, 2, 0, 8.333333333333332, 4.0, 0.0, 1), (3, 3, 0, 4.166666666666668, 5.0, 0.0, 1)
], dtype=order_dt)
)
@pytest.mark.parametrize(
"test_row_wise",
[False, True],
)
def test_target_percent(self, test_row_wise):
@njit
def target_pct_segment_prep_func_nb(c, val_price):
c.last_val_price[c.from_col:c.to_col] = val_price[c.i]
return ()
@njit
def target_pct_order_func_nb(c):
return nb.create_order_nb(0.5, c.close[c.i, c.col], size_type=SizeType.TargetPercent)
portfolio = vbt.Portfolio.from_order_func(
price.iloc[1:], target_pct_order_func_nb, row_wise=test_row_wise)
if test_row_wise:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 1, 0, 25.0, 3.0, 0.0, 0), (1, 2, 0, 8.333333333333332, 4.0, 0.0, 1),
(2, 3, 0, 1.0416666666666679, 5.0, 0.0, 1)
], dtype=order_dt)
)
else:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 1, 0, 25.0, 3.0, 0.0, 0), (1, 2, 0, 8.333333333333332, 4.0, 0.0, 1),
(2, 3, 0, 1.0416666666666679, 5.0, 0.0, 1)
], dtype=order_dt)
)
portfolio = vbt.Portfolio.from_order_func(
price.iloc[1:], target_pct_order_func_nb,
segment_prep_func_nb=target_pct_segment_prep_func_nb,
segment_prep_args=(price.iloc[:-1].values,), row_wise=test_row_wise)
if test_row_wise:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 0), (1, 1, 0, 25.0, 3.0, 0.0, 1),
(2, 3, 0, 3.125, 5.0, 0.0, 1)
], dtype=order_dt)
)
else:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 0), (1, 1, 0, 25.0, 3.0, 0.0, 1),
(2, 3, 0, 3.125, 5.0, 0.0, 1)
], dtype=order_dt)
)
@pytest.mark.parametrize(
"test_row_wise",
[False, True],
)
def test_update_value(self, test_row_wise):
@njit
def order_func_nb(c):
return nb.create_order_nb(
np.inf if c.i % 2 == 0 else -np.inf,
c.close[c.i, c.col],
fees=0.01,
fixed_fees=1.,
slippage=0.01
)
@njit
def after_order_func_nb(c, value_before, value_now):
value_before[c.i, c.col] = c.value_before
value_now[c.i, c.col] = c.value_now
value_before = np.empty_like(price.values[:, None])
value_now = np.empty_like(price.values[:, None])
_ = vbt.Portfolio.from_order_func(
price,
order_func_nb,
after_order_func_nb=after_order_func_nb,
after_order_args=(value_before, value_now),
row_wise=test_row_wise,
update_value=False)
np.testing.assert_array_equal(
value_before,
value_now
)
_ = vbt.Portfolio.from_order_func(
price,
order_func_nb,
after_order_func_nb=after_order_func_nb,
after_order_args=(value_before, value_now),
row_wise=test_row_wise,
update_value=True)
np.testing.assert_array_equal(
value_before,
np.array([
[100.0],
[97.04930889128518],
[185.46988117104038],
[82.47853456223025],
[104.65775576218027]
])
)
np.testing.assert_array_equal(
value_now,
np.array([
[98.01980198019803],
[187.36243097890815],
[83.30331990785257],
[105.72569204546781],
[73.54075125567473]
])
)
@pytest.mark.parametrize(
"test_row_wise",
[False, True],
)
def test_after_order_context(self, test_row_wise):
def order_func(c):
return nb.create_order_nb(
1.,
c.close[c.i, c.col],
fees=0.01,
fixed_fees=1.,
slippage=0.01,
log=True
)
def after_order_func(c, lst):
lst.append(c)
lst = []
_ = vbt.Portfolio.from_order_func(
price_wide,
order_func,
after_order_func_nb=after_order_func,
after_order_args=(lst,),
row_wise=test_row_wise,
update_value=True,
max_logs=price_wide.shape[0] * price_wide.shape[1],
use_numba=False,
group_by=[0, 0, 1],
cash_sharing=True
)
c = lst[-1]
assert c.target_shape == price_wide.shape
np.testing.assert_array_equal(
c.close,
price_wide.values
)
np.testing.assert_array_equal(
c.group_lens,
np.array([2, 1])
)
np.testing.assert_array_equal(
c.init_cash,
np.array([100., 100.])
)
assert c.cash_sharing
np.testing.assert_array_equal(
c.call_seq,
np.array([
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0]
])
)
np.testing.assert_array_equal(
c.active_mask,
np.array([
[True, True],
[True, True],
[True, True],
[True, True],
[True, True]
])
)
assert c.update_value
if test_row_wise:
record_arrays_close(
c.order_records,
np.array([
(0, 0, 0, 1.0, 1.01, 1.0101, 0), (1, 0, 1, 1.0, 1.01, 1.0101, 0),
(2, 0, 2, 1.0, 1.01, 1.0101, 0), (3, 1, 0, 1.0, 2.02, 1.0202, 0),
(4, 1, 1, 1.0, 2.02, 1.0202, 0), (5, 1, 2, 1.0, 2.02, 1.0202, 0),
(6, 2, 0, 1.0, 3.0300000000000002, 1.0303, 0), (7, 2, 1, 1.0, 3.0300000000000002, 1.0303, 0),
(8, 2, 2, 1.0, 3.0300000000000002, 1.0303, 0), (9, 3, 0, 1.0, 4.04, 1.0404, 0),
(10, 3, 1, 1.0, 4.04, 1.0404, 0), (11, 3, 2, 1.0, 4.04, 1.0404, 0),
(12, 4, 0, 1.0, 5.05, 1.0505, 0), (13, 4, 1, 1.0, 5.05, 1.0505, 0),
(14, 4, 2, 1.0, 5.05, 1.0505, 0)
], dtype=order_dt)
)
else:
record_arrays_close(
c.order_records,
np.array([
(0, 0, 0, 1.0, 1.01, 1.0101, 0), (1, 0, 1, 1.0, 1.01, 1.0101, 0),
(2, 1, 0, 1.0, 2.02, 1.0202, 0), (3, 1, 1, 1.0, 2.02, 1.0202, 0),
(4, 2, 0, 1.0, 3.0300000000000002, 1.0303, 0), (5, 2, 1, 1.0, 3.0300000000000002, 1.0303, 0),
(6, 3, 0, 1.0, 4.04, 1.0404, 0), (7, 3, 1, 1.0, 4.04, 1.0404, 0),
(8, 4, 0, 1.0, 5.05, 1.0505, 0), (9, 4, 1, 1.0, 5.05, 1.0505, 0),
(10, 0, 2, 1.0, 1.01, 1.0101, 0), (11, 1, 2, 1.0, 2.02, 1.0202, 0),
(12, 2, 2, 1.0, 3.0300000000000002, 1.0303, 0), (13, 3, 2, 1.0, 4.04, 1.0404, 0),
(14, 4, 2, 1.0, 5.05, 1.0505, 0)
], dtype=order_dt)
)
if test_row_wise:
record_arrays_close(
c.log_records,
np.array([
(0, 0, 0, 0, 100.0, 0.0, 0.0, 100.0, np.nan, 100.0, 1.0, 1.0, 0, 2, 0.01, 1.0,
0.01, 0.0, np.inf, 0.0, False, True, False, True, 97.9799, 1.0, 0.0, 97.9799,
1.01, 98.9899, 1.0, 1.01, 1.0101, 0, 0, -1, 0),
(1, 0, 1, 0, 97.9799, 0.0, 0.0, 97.9799, np.nan, 98.9899, 1.0, 1.0, 0, 2,
0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 95.9598, 1.0,
0.0, 95.9598, 1.01, 97.97980000000001, 1.0, 1.01, 1.0101, 0, 0, -1, 1),
(2, 0, 2, 1, 100.0, 0.0, 0.0, 100.0, np.nan, 100.0, 1.0, 1.0, 0, 2, 0.01,
1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 97.9799, 1.0, 0.0,
97.9799, 1.01, 98.9899, 1.0, 1.01, 1.0101, 0, 0, -1, 2),
(3, 1, 0, 0, 95.9598, 1.0, 0.0, 95.9598, 1.0, 97.9598, 1.0, 2.0, 0,
2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 92.9196,
2.0, 0.0, 92.9196, 2.02, 97.95960000000001, 1.0, 2.02, 1.0202, 0, 0, -1, 3),
(4, 1, 1, 0, 92.9196, 1.0, 0.0, 92.9196, 1.0, 97.95960000000001, 1.0, 2.0,
0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 89.8794,
2.0, 0.0, 89.8794, 2.02, 97.95940000000002, 1.0, 2.02, 1.0202, 0, 0, -1, 4),
(5, 1, 2, 1, 97.9799, 1.0, 0.0, 97.9799, 1.0, 98.9799, 1.0, 2.0, 0, 2,
0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 94.9397, 2.0,
0.0, 94.9397, 2.02, 98.97970000000001, 1.0, 2.02, 1.0202, 0, 0, -1, 5),
(6, 2, 0, 0, 89.8794, 2.0, 0.0, 89.8794, 2.0, 97.8794, 1.0, 3.0, 0, 2,
0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 85.8191, 3.0,
0.0, 85.8191, 3.0300000000000002, 98.90910000000001, 1.0,
3.0300000000000002, 1.0303, 0, 0, -1, 6),
(7, 2, 1, 0, 85.8191, 2.0, 0.0, 85.8191, 2.0, 98.90910000000001,
1.0, 3.0, 0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True,
81.75880000000001, 3.0, 0.0, 81.75880000000001, 3.0300000000000002,
99.93880000000001, 1.0, 3.0300000000000002, 1.0303, 0, 0, -1, 7),
(8, 2, 2, 1, 94.9397, 2.0, 0.0, 94.9397, 2.0, 98.9397, 1.0, 3.0, 0,
2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 90.8794,
3.0, 0.0, 90.8794, 3.0300000000000002, 99.96940000000001, 1.0,
3.0300000000000002, 1.0303, 0, 0, -1, 8),
(9, 3, 0, 0, 81.75880000000001, 3.0, 0.0, 81.75880000000001, 3.0, 99.75880000000001,
1.0, 4.0, 0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True,
76.67840000000001, 4.0, 0.0, 76.67840000000001, 4.04, 101.83840000000001,
1.0, 4.04, 1.0404, 0, 0, -1, 9),
(10, 3, 1, 0, 76.67840000000001, 3.0, 0.0, 76.67840000000001, 3.0,
101.83840000000001, 1.0, 4.0, 0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0,
False, True, False, True, 71.59800000000001, 4.0, 0.0, 71.59800000000001,
4.04, 103.918, 1.0, 4.04, 1.0404, 0, 0, -1, 10),
(11, 3, 2, 1, 90.8794, 3.0, 0.0, 90.8794, 3.0, 99.8794, 1.0, 4.0, 0, 2,
0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 85.799, 4.0,
0.0, 85.799, 4.04, 101.959, 1.0, 4.04, 1.0404, 0, 0, -1, 11),
(12, 4, 0, 0, 71.59800000000001, 4.0, 0.0, 71.59800000000001, 4.0,
103.59800000000001, 1.0, 5.0, 0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0,
False, True, False, True, 65.49750000000002, 5.0, 0.0, 65.49750000000002,
5.05, 106.74750000000002, 1.0, 5.05, 1.0505, 0, 0, -1, 12),
(13, 4, 1, 0, 65.49750000000002, 4.0, 0.0, 65.49750000000002, 4.0,
106.74750000000002, 1.0, 5.0, 0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0,
False, True, False, True, 59.39700000000002, 5.0, 0.0, 59.39700000000002,
5.05, 109.89700000000002, 1.0, 5.05, 1.0505, 0, 0, -1, 13),
(14, 4, 2, 1, 85.799, 4.0, 0.0, 85.799, 4.0, 101.799, 1.0, 5.0, 0, 2,
0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 79.69850000000001,
5.0, 0.0, 79.69850000000001, 5.05, 104.94850000000001, 1.0, 5.05, 1.0505, 0, 0, -1, 14)
], dtype=log_dt)
)
else:
record_arrays_close(
c.log_records,
np.array([
(0, 0, 0, 0, 100.0, 0.0, 0.0, 100.0, np.nan, 100.0, 1.0, 1.0, 0, 2,
0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 97.9799,
1.0, 0.0, 97.9799, 1.01, 98.9899, 1.0, 1.01, 1.0101, 0, 0, -1, 0),
(1, 0, 1, 0, 97.9799, 0.0, 0.0, 97.9799, np.nan, 98.9899, 1.0, 1.0, 0,
2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 95.9598,
1.0, 0.0, 95.9598, 1.01, 97.97980000000001, 1.0, 1.01, 1.0101, 0, 0, -1, 1),
(2, 1, 0, 0, 95.9598, 1.0, 0.0, 95.9598, 1.0, 97.9598, 1.0, 2.0, 0, 2,
0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 92.9196, 2.0,
0.0, 92.9196, 2.02, 97.95960000000001, 1.0, 2.02, 1.0202, 0, 0, -1, 2),
(3, 1, 1, 0, 92.9196, 1.0, 0.0, 92.9196, 1.0, 97.95960000000001, 1.0,
2.0, 0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 89.8794,
2.0, 0.0, 89.8794, 2.02, 97.95940000000002, 1.0, 2.02, 1.0202, 0, 0, -1, 3),
(4, 2, 0, 0, 89.8794, 2.0, 0.0, 89.8794, 2.0, 97.8794, 1.0, 3.0, 0, 2,
0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 85.8191,
3.0, 0.0, 85.8191, 3.0300000000000002, 98.90910000000001, 1.0,
3.0300000000000002, 1.0303, 0, 0, -1, 4),
(5, 2, 1, 0, 85.8191, 2.0, 0.0, 85.8191, 2.0, 98.90910000000001, 1.0,
3.0, 0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True,
81.75880000000001, 3.0, 0.0, 81.75880000000001, 3.0300000000000002,
99.93880000000001, 1.0, 3.0300000000000002, 1.0303, 0, 0, -1, 5),
(6, 3, 0, 0, 81.75880000000001, 3.0, 0.0, 81.75880000000001, 3.0,
99.75880000000001, 1.0, 4.0, 0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0,
False, True, False, True, 76.67840000000001, 4.0, 0.0, 76.67840000000001,
4.04, 101.83840000000001, 1.0, 4.04, 1.0404, 0, 0, -1, 6),
(7, 3, 1, 0, 76.67840000000001, 3.0, 0.0, 76.67840000000001, 3.0,
101.83840000000001, 1.0, 4.0, 0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0,
False, True, False, True, 71.59800000000001, 4.0, 0.0, 71.59800000000001,
4.04, 103.918, 1.0, 4.04, 1.0404, 0, 0, -1, 7),
(8, 4, 0, 0, 71.59800000000001, 4.0, 0.0, 71.59800000000001, 4.0,
103.59800000000001, 1.0, 5.0, 0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0,
False, True, False, True, 65.49750000000002, 5.0, 0.0, 65.49750000000002,
5.05, 106.74750000000002, 1.0, 5.05, 1.0505, 0, 0, -1, 8),
(9, 4, 1, 0, 65.49750000000002, 4.0, 0.0, 65.49750000000002, 4.0,
106.74750000000002, 1.0, 5.0, 0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0,
False, True, False, True, 59.39700000000002, 5.0, 0.0, 59.39700000000002,
5.05, 109.89700000000002, 1.0, 5.05, 1.0505, 0, 0, -1, 9),
(10, 0, 2, 1, 100.0, 0.0, 0.0, 100.0, np.nan, 100.0, 1.0, 1.0, 0, 2, 0.01,
1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 97.9799, 1.0, 0.0,
97.9799, 1.01, 98.9899, 1.0, 1.01, 1.0101, 0, 0, -1, 10),
(11, 1, 2, 1, 97.9799, 1.0, 0.0, 97.9799, 1.0, 98.9799, 1.0, 2.0, 0,
2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 94.9397,
2.0, 0.0, 94.9397, 2.02, 98.97970000000001, 1.0, 2.02, 1.0202, 0, 0, -1, 11),
(12, 2, 2, 1, 94.9397, 2.0, 0.0, 94.9397, 2.0, 98.9397, 1.0, 3.0, 0,
2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 90.8794,
3.0, 0.0, 90.8794, 3.0300000000000002, 99.96940000000001, 1.0,
3.0300000000000002, 1.0303, 0, 0, -1, 12),
(13, 3, 2, 1, 90.8794, 3.0, 0.0, 90.8794, 3.0, 99.8794, 1.0, 4.0, 0, 2,
0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 85.799, 4.0,
0.0, 85.799, 4.04, 101.959, 1.0, 4.04, 1.0404, 0, 0, -1, 13),
(14, 4, 2, 1, 85.799, 4.0, 0.0, 85.799, 4.0, 101.799, 1.0, 5.0, 0, 2, 0.01,
1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 79.69850000000001,
5.0, 0.0, 79.69850000000001, 5.05, 104.94850000000001, 1.0, 5.05, 1.0505, 0, 0, -1, 14)
], dtype=log_dt)
)
np.testing.assert_array_equal(
c.last_cash,
np.array([59.39700000000002, 79.69850000000001])
)
np.testing.assert_array_equal(
c.last_position,
np.array([5., 5., 5.])
)
np.testing.assert_array_equal(
c.last_val_price,
np.array([5.05, 5.05, 5.05])
)
np.testing.assert_array_equal(
c.last_value,
np.array([109.89700000000002, 104.94850000000001])
)
np.testing.assert_array_equal(
c.last_debt,
np.array([0., 0., 0.])
)
np.testing.assert_array_equal(
c.last_free_cash,
np.array([59.39700000000002, 79.69850000000001])
)
if test_row_wise:
np.testing.assert_array_equal(
c.last_oidx,
np.array([12, 13, 14])
)
np.testing.assert_array_equal(
c.last_lidx,
np.array([12, 13, 14])
)
else:
np.testing.assert_array_equal(
c.last_oidx,
np.array([8, 9, 14])
)
np.testing.assert_array_equal(
c.last_lidx,
np.array([8, 9, 14])
)
assert c.order_records[c.last_oidx[0]]['col'] == 0
assert c.order_records[c.last_oidx[1]]['col'] == 1
assert c.order_records[c.last_oidx[2]]['col'] == 2
assert c.log_records[c.last_lidx[0]]['col'] == 0
assert c.log_records[c.last_lidx[1]]['col'] == 1
assert c.log_records[c.last_lidx[2]]['col'] == 2
assert c.group == 1
assert c.group_len == 1
assert c.from_col == 2
assert c.to_col == 3
assert c.i == 4
np.testing.assert_array_equal(
c.call_seq_now,
np.array([0])
)
assert c.col == 2
assert c.call_idx == 0
assert c.cash_before == 85.799
assert c.position_before == 4.0
assert c.val_price_before == 4.0
assert c.value_before == 101.799
assert c.debt_before == 0.0
assert c.free_cash_before == 85.799
assert_same_tuple(
c.order_result,
OrderResult(size=1.0, price=5.05, fees=1.0505, side=0, status=0, status_info=-1)
)
assert c.cash_now == 79.69850000000001
assert c.position_now == 5.0
assert c.val_price_now == 5.05
assert c.value_now == 104.94850000000001
assert c.debt_now == 0.0
assert c.free_cash_now == 79.69850000000001
@pytest.mark.parametrize(
"test_row_wise",
[False, True],
)
def test_free_cash(self, test_row_wise):
def order_func(c, size):
return nb.create_order_nb(
size[c.i, c.col],
c.close[c.i, c.col],
fees=0.01,
fixed_fees=1.,
slippage=0.01
)
def after_order_func(c, debt, free_cash):
debt[c.i, c.col] = c.debt_now
if c.cash_sharing:
free_cash[c.i, c.group] = c.free_cash_now
else:
free_cash[c.i, c.col] = c.free_cash_now
size = np.array([
[5, -5, 5],
[5, -5, -10],
[-5, 5, 10],
[-5, 5, -10],
[-5, 5, 10]
])
debt = np.empty(price_wide.shape, dtype=np.float_)
free_cash = np.empty(price_wide.shape, dtype=np.float_)
portfolio = vbt.Portfolio.from_order_func(
price_wide,
order_func, size,
after_order_func_nb=after_order_func,
after_order_args=(debt, free_cash,),
row_wise=test_row_wise,
use_numba=False
)
np.testing.assert_array_equal(
debt,
np.array([
[0.0, 4.95, 0.0],
[0.0, 14.850000000000001, 9.9],
[0.0, 7.425000000000001, 0.0],
[0.0, 0.0, 19.8],
[24.75, 0.0, 0.0]
])
)
np.testing.assert_array_equal(
free_cash,
np.array([
[93.8995, 94.0005, 93.8995],
[82.6985, 83.00150000000001, 92.70150000000001],
[96.39999999999999, 81.55000000000001, 80.8985],
[115.002, 74.998, 79.5025],
[89.0045, 48.49550000000001, 67.0975]
])
)
np.testing.assert_almost_equal(
free_cash,
portfolio.cash(free=True).values
)
debt = np.empty(price_wide.shape, dtype=np.float_)
free_cash = np.empty(price_wide.shape, dtype=np.float_)
portfolio = vbt.Portfolio.from_order_func(
price_wide.vbt.wrapper.wrap(price_wide.values[::-1]),
order_func, size,
after_order_func_nb=after_order_func,
after_order_args=(debt, free_cash,),
row_wise=test_row_wise,
use_numba=False
)
np.testing.assert_array_equal(
debt,
np.array([
[0.0, 24.75, 0.0],
[0.0, 44.55, 19.8],
[0.0, 22.275, 0.0],
[0.0, 0.0, 9.9],
[4.95, 0.0, 0.0]
])
)
np.testing.assert_array_equal(
free_cash,
np.array([
[73.4975, 74.0025, 73.4975],
[52.0955, 53.00449999999999, 72.1015],
[65.797, 81.25299999999999, 80.0985],
[74.598, 114.60199999999998, 78.9005],
[68.5985, 108.50149999999998, 87.49949999999998]
])
)
np.testing.assert_almost_equal(
free_cash,
portfolio.cash(free=True).values
)
debt = np.empty(price_wide.shape, dtype=np.float_)
free_cash = np.empty((price_wide.shape[0], 2), dtype=np.float_)
portfolio = vbt.Portfolio.from_order_func(
price_wide,
order_func, size,
after_order_func_nb=after_order_func,
after_order_args=(debt, free_cash,),
row_wise=test_row_wise,
use_numba=False,
group_by=[0, 0, 1],
cash_sharing=True
)
np.testing.assert_array_equal(
debt,
np.array([
[0.0, 4.95, 0.0],
[0.0, 14.850000000000001, 9.9],
[0.0, 7.425000000000001, 0.0],
[0.0, 0.0, 19.8],
[24.75, 0.0, 0.0]
])
)
np.testing.assert_array_equal(
free_cash,
np.array([
[87.9, 93.8995],
[65.70000000000002, 92.70150000000001],
[77.95000000000002, 80.8985],
[90.00000000000001, 79.5025],
[37.500000000000014, 67.0975]
])
)
np.testing.assert_almost_equal(
free_cash,
portfolio.cash(free=True).values
)
@pytest.mark.parametrize(
"test_row_wise",
[False, True],
)
def test_init_cash(self, test_row_wise):
portfolio = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, 10., row_wise=test_row_wise, init_cash=[1., 10., np.inf])
if test_row_wise:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 10.0, 1.0, 0.0, 0),
(2, 0, 2, 10.0, 1.0, 0.0, 0), (3, 1, 0, 10.0, 2.0, 0.0, 1),
(4, 1, 1, 10.0, 2.0, 0.0, 1), (5, 1, 2, 10.0, 2.0, 0.0, 1),
(6, 2, 0, 6.666666666666667, 3.0, 0.0, 0), (7, 2, 1, 6.666666666666667, 3.0, 0.0, 0),
(8, 2, 2, 10.0, 3.0, 0.0, 0), (9, 3, 0, 10.0, 4.0, 0.0, 1),
(10, 3, 1, 10.0, 4.0, 0.0, 1), (11, 3, 2, 10.0, 4.0, 0.0, 1),
(12, 4, 0, 8.0, 5.0, 0.0, 0), (13, 4, 1, 8.0, 5.0, 0.0, 0),
(14, 4, 2, 10.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
else:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 10.0, 2.0, 0.0, 1),
(2, 2, 0, 6.666666666666667, 3.0, 0.0, 0), (3, 3, 0, 10.0, 4.0, 0.0, 1),
(4, 4, 0, 8.0, 5.0, 0.0, 0), (5, 0, 1, 10.0, 1.0, 0.0, 0),
(6, 1, 1, 10.0, 2.0, 0.0, 1), (7, 2, 1, 6.666666666666667, 3.0, 0.0, 0),
(8, 3, 1, 10.0, 4.0, 0.0, 1), (9, 4, 1, 8.0, 5.0, 0.0, 0),
(10, 0, 2, 10.0, 1.0, 0.0, 0), (11, 1, 2, 10.0, 2.0, 0.0, 1),
(12, 2, 2, 10.0, 3.0, 0.0, 0), (13, 3, 2, 10.0, 4.0, 0.0, 1),
(14, 4, 2, 10.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
assert type(portfolio._init_cash) == np.ndarray
base_portfolio = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, 10., row_wise=test_row_wise, init_cash=np.inf)
portfolio = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, 10., row_wise=test_row_wise, init_cash=InitCashMode.Auto)
record_arrays_close(
portfolio.order_records,
base_portfolio.orders.values
)
assert portfolio._init_cash == InitCashMode.Auto
portfolio = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, 10., row_wise=test_row_wise, init_cash=InitCashMode.AutoAlign)
record_arrays_close(
portfolio.order_records,
base_portfolio.orders.values
)
assert portfolio._init_cash == InitCashMode.AutoAlign
def test_func_calls(self):
@njit
def prep_func_nb(c, call_i, sim_lst):
call_i[0] += 1
sim_lst.append(call_i[0])
return (call_i,)
@njit
def group_prep_func_nb(c, call_i, group_lst):
call_i[0] += 1
group_lst.append(call_i[0])
return (call_i,)
@njit
def segment_prep_func_nb(c, call_i, segment_lst):
call_i[0] += 1
segment_lst.append(call_i[0])
return (call_i,)
@njit
def order_func_nb(c, call_i, order_lst):
call_i[0] += 1
order_lst.append(call_i[0])
return NoOrder
@njit
def after_order_func_nb(c, call_i, after_order_lst):
call_i[0] += 1
after_order_lst.append(call_i[0])
call_i = np.array([0])
sim_lst = List.empty_list(typeof(0))
group_lst = List.empty_list(typeof(0))
segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
after_order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, order_lst,
group_by=np.array([0, 0, 1]),
prep_func_nb=prep_func_nb, prep_args=(call_i, sim_lst),
group_prep_func_nb=group_prep_func_nb, group_prep_args=(group_lst,),
segment_prep_func_nb=segment_prep_func_nb, segment_prep_args=(segment_lst,),
after_order_func_nb=after_order_func_nb, after_order_args=(after_order_lst,)
)
assert call_i[0] == 43
assert list(sim_lst) == [1]
assert list(group_lst) == [2, 28]
assert list(segment_lst) == [3, 8, 13, 18, 23, 29, 32, 35, 38, 41]
assert list(order_lst) == [4, 6, 9, 11, 14, 16, 19, 21, 24, 26, 30, 33, 36, 39, 42]
assert list(after_order_lst) == [5, 7, 10, 12, 15, 17, 20, 22, 25, 27, 31, 34, 37, 40, 43]
call_i = np.array([0])
sim_lst = List.empty_list(typeof(0))
group_lst = List.empty_list(typeof(0))
segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
after_order_lst = List.empty_list(typeof(0))
active_mask = np.array([
[False, True],
[False, False],
[False, True],
[False, False],
[False, True],
])
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, order_lst,
group_by=np.array([0, 0, 1]),
prep_func_nb=prep_func_nb, prep_args=(call_i, sim_lst),
group_prep_func_nb=group_prep_func_nb, group_prep_args=(group_lst,),
segment_prep_func_nb=segment_prep_func_nb, segment_prep_args=(segment_lst,),
after_order_func_nb=after_order_func_nb, after_order_args=(after_order_lst,),
active_mask=active_mask
)
assert call_i[0] == 11
assert list(sim_lst) == [1]
assert list(group_lst) == [2]
assert list(segment_lst) == [3, 6, 9]
assert list(order_lst) == [4, 7, 10]
assert list(after_order_lst) == [5, 8, 11]
def test_func_calls_row_wise(self):
@njit
def prep_func_nb(c, call_i, sim_lst):
call_i[0] += 1
sim_lst.append(call_i[0])
return (call_i,)
@njit
def row_prep_func_nb(c, call_i, row_lst):
call_i[0] += 1
row_lst.append(call_i[0])
return (call_i,)
@njit
def segment_prep_func_nb(c, call_i, segment_lst):
call_i[0] += 1
segment_lst.append(call_i[0])
return (call_i,)
@njit
def order_func_nb(c, call_i, order_lst):
call_i[0] += 1
order_lst.append(call_i[0])
return NoOrder
@njit
def after_order_func_nb(c, call_i, after_order_lst):
call_i[0] += 1
after_order_lst.append(call_i[0])
call_i = np.array([0])
sim_lst = List.empty_list(typeof(0))
row_lst = List.empty_list(typeof(0))
segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
after_order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, order_lst,
group_by=np.array([0, 0, 1]),
prep_func_nb=prep_func_nb, prep_args=(call_i, sim_lst),
row_prep_func_nb=row_prep_func_nb, row_prep_args=(row_lst,),
segment_prep_func_nb=segment_prep_func_nb, segment_prep_args=(segment_lst,),
after_order_func_nb=after_order_func_nb, after_order_args=(after_order_lst,),
row_wise=True
)
assert call_i[0] == 46
assert list(sim_lst) == [1]
assert list(row_lst) == [2, 11, 20, 29, 38]
assert list(segment_lst) == [3, 8, 12, 17, 21, 26, 30, 35, 39, 44]
assert list(order_lst) == [4, 6, 9, 13, 15, 18, 22, 24, 27, 31, 33, 36, 40, 42, 45]
assert list(after_order_lst) == [5, 7, 10, 14, 16, 19, 23, 25, 28, 32, 34, 37, 41, 43, 46]
call_i = np.array([0])
sim_lst = List.empty_list(typeof(0))
row_lst = List.empty_list(typeof(0))
segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
after_order_lst = List.empty_list(typeof(0))
active_mask = np.array([
[False, False],
[False, True],
[True, False],
[True, True],
[False, False],
])
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, order_lst,
group_by=np.array([0, 0, 1]),
prep_func_nb=prep_func_nb, prep_args=(call_i, sim_lst),
row_prep_func_nb=row_prep_func_nb, row_prep_args=(row_lst,),
segment_prep_func_nb=segment_prep_func_nb, segment_prep_args=(segment_lst,),
after_order_func_nb=after_order_func_nb, after_order_args=(after_order_lst,),
active_mask=active_mask,
row_wise=True
)
assert call_i[0] == 20
assert list(sim_lst) == [1]
assert list(row_lst) == [2, 6, 12]
assert list(segment_lst) == [3, 7, 13, 18]
assert list(order_lst) == [4, 8, 10, 14, 16, 19]
assert list(after_order_lst) == [5, 9, 11, 15, 17, 20]
@pytest.mark.parametrize(
"test_row_wise",
[False, True],
)
def test_max_orders(self, test_row_wise):
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, np.inf, row_wise=test_row_wise)
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, np.inf, row_wise=test_row_wise, max_orders=15)
with pytest.raises(Exception) as e_info:
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, np.inf, row_wise=test_row_wise, max_orders=14)
@pytest.mark.parametrize(
"test_row_wise",
[False, True],
)
def test_max_logs(self, test_row_wise):
_ = vbt.Portfolio.from_order_func(
price_wide, log_order_func_nb, np.inf, row_wise=test_row_wise)
_ = vbt.Portfolio.from_order_func(
price_wide, log_order_func_nb, np.inf, row_wise=test_row_wise, max_logs=15)
with pytest.raises(Exception) as e_info:
_ = vbt.Portfolio.from_order_func(
price_wide, log_order_func_nb, np.inf, row_wise=test_row_wise, max_logs=14)
# ############# Portfolio ############# #
price_na = pd.DataFrame({
'a': [np.nan, 2., 3., 4., 5.],
'b': [1., 2., np.nan, 4., 5.],
'c': [1., 2., 3., 4., np.nan]
}, index=price.index)
order_size_new = pd.Series([1., 0.1, -1., -0.1, 1.])
directions = ['longonly', 'shortonly', 'all']
group_by = pd.Index(['first', 'first', 'second'], name='group')
portfolio = vbt.Portfolio.from_orders(
price_na, order_size_new, size_type='amount', direction=directions,
fees=0.01, fixed_fees=0.1, slippage=0.01, log=True,
call_seq='reversed', group_by=None,
init_cash=[100., 100., 100.], freq='1D'
) # independent
portfolio_grouped = vbt.Portfolio.from_orders(
price_na, order_size_new, size_type='amount', direction=directions,
fees=0.01, fixed_fees=0.1, slippage=0.01, log=True,
call_seq='reversed', group_by=group_by, cash_sharing=False,
init_cash=[100., 100., 100.], freq='1D'
) # grouped
portfolio_shared = vbt.Portfolio.from_orders(
price_na, order_size_new, size_type='amount', direction=directions,
fees=0.01, fixed_fees=0.1, slippage=0.01, log=True,
call_seq='reversed', group_by=group_by, cash_sharing=True,
init_cash=[200., 100.], freq='1D'
) # shared
class TestPortfolio:
def test_config(self, tmp_path):
assert vbt.Portfolio.loads(portfolio['a'].dumps()) == portfolio['a']
assert vbt.Portfolio.loads(portfolio.dumps()) == portfolio
portfolio.save(tmp_path / 'portfolio')
assert vbt.Portfolio.load(tmp_path / 'portfolio') == portfolio
def test_wrapper(self):
pd.testing.assert_index_equal(
portfolio.wrapper.index,
price_na.index
)
pd.testing.assert_index_equal(
portfolio.wrapper.columns,
price_na.columns
)
assert portfolio.wrapper.ndim == 2
assert portfolio.wrapper.grouper.group_by is None
assert portfolio.wrapper.grouper.allow_enable
assert portfolio.wrapper.grouper.allow_disable
assert portfolio.wrapper.grouper.allow_modify
pd.testing.assert_index_equal(
portfolio_grouped.wrapper.index,
price_na.index
)
pd.testing.assert_index_equal(
portfolio_grouped.wrapper.columns,
price_na.columns
)
assert portfolio_grouped.wrapper.ndim == 2
pd.testing.assert_index_equal(
portfolio_grouped.wrapper.grouper.group_by,
group_by
)
assert portfolio_grouped.wrapper.grouper.allow_enable
assert portfolio_grouped.wrapper.grouper.allow_disable
assert portfolio_grouped.wrapper.grouper.allow_modify
pd.testing.assert_index_equal(
portfolio_shared.wrapper.index,
price_na.index
)
pd.testing.assert_index_equal(
portfolio_shared.wrapper.columns,
price_na.columns
)
assert portfolio_shared.wrapper.ndim == 2
pd.testing.assert_index_equal(
portfolio_shared.wrapper.grouper.group_by,
group_by
)
assert not portfolio_shared.wrapper.grouper.allow_enable
assert portfolio_shared.wrapper.grouper.allow_disable
assert not portfolio_shared.wrapper.grouper.allow_modify
def test_indexing(self):
assert portfolio['a'].wrapper == portfolio.wrapper['a']
assert portfolio['a'].orders == portfolio.orders['a']
assert portfolio['a'].logs == portfolio.logs['a']
assert portfolio['a'].init_cash == portfolio.init_cash['a']
pd.testing.assert_series_equal(portfolio['a'].call_seq, portfolio.call_seq['a'])
assert portfolio['c'].wrapper == portfolio.wrapper['c']
assert portfolio['c'].orders == portfolio.orders['c']
assert portfolio['c'].logs == portfolio.logs['c']
assert portfolio['c'].init_cash == portfolio.init_cash['c']
pd.testing.assert_series_equal(portfolio['c'].call_seq, portfolio.call_seq['c'])
assert portfolio[['c']].wrapper == portfolio.wrapper[['c']]
assert portfolio[['c']].orders == portfolio.orders[['c']]
assert portfolio[['c']].logs == portfolio.logs[['c']]
pd.testing.assert_series_equal(portfolio[['c']].init_cash, portfolio.init_cash[['c']])
pd.testing.assert_frame_equal(portfolio[['c']].call_seq, portfolio.call_seq[['c']])
assert portfolio_grouped['first'].wrapper == portfolio_grouped.wrapper['first']
assert portfolio_grouped['first'].orders == portfolio_grouped.orders['first']
assert portfolio_grouped['first'].logs == portfolio_grouped.logs['first']
assert portfolio_grouped['first'].init_cash == portfolio_grouped.init_cash['first']
pd.testing.assert_frame_equal(portfolio_grouped['first'].call_seq, portfolio_grouped.call_seq[['a', 'b']])
assert portfolio_grouped[['first']].wrapper == portfolio_grouped.wrapper[['first']]
assert portfolio_grouped[['first']].orders == portfolio_grouped.orders[['first']]
assert portfolio_grouped[['first']].logs == portfolio_grouped.logs[['first']]
pd.testing.assert_series_equal(
portfolio_grouped[['first']].init_cash,
portfolio_grouped.init_cash[['first']])
pd.testing.assert_frame_equal(portfolio_grouped[['first']].call_seq, portfolio_grouped.call_seq[['a', 'b']])
assert portfolio_grouped['second'].wrapper == portfolio_grouped.wrapper['second']
assert portfolio_grouped['second'].orders == portfolio_grouped.orders['second']
assert portfolio_grouped['second'].logs == portfolio_grouped.logs['second']
assert portfolio_grouped['second'].init_cash == portfolio_grouped.init_cash['second']
pd.testing.assert_series_equal(portfolio_grouped['second'].call_seq, portfolio_grouped.call_seq['c'])
assert portfolio_grouped[['second']].orders == portfolio_grouped.orders[['second']]
assert portfolio_grouped[['second']].wrapper == portfolio_grouped.wrapper[['second']]
assert portfolio_grouped[['second']].orders == portfolio_grouped.orders[['second']]
assert portfolio_grouped[['second']].logs == portfolio_grouped.logs[['second']]
pd.testing.assert_series_equal(
portfolio_grouped[['second']].init_cash,
portfolio_grouped.init_cash[['second']])
pd.testing.assert_frame_equal(portfolio_grouped[['second']].call_seq, portfolio_grouped.call_seq[['c']])
assert portfolio_shared['first'].wrapper == portfolio_shared.wrapper['first']
assert portfolio_shared['first'].orders == portfolio_shared.orders['first']
assert portfolio_shared['first'].logs == portfolio_shared.logs['first']
assert portfolio_shared['first'].init_cash == portfolio_shared.init_cash['first']
pd.testing.assert_frame_equal(portfolio_shared['first'].call_seq, portfolio_shared.call_seq[['a', 'b']])
assert portfolio_shared[['first']].orders == portfolio_shared.orders[['first']]
assert portfolio_shared[['first']].wrapper == portfolio_shared.wrapper[['first']]
assert portfolio_shared[['first']].orders == portfolio_shared.orders[['first']]
assert portfolio_shared[['first']].logs == portfolio_shared.logs[['first']]
pd.testing.assert_series_equal(
portfolio_shared[['first']].init_cash,
portfolio_shared.init_cash[['first']])
pd.testing.assert_frame_equal(portfolio_shared[['first']].call_seq, portfolio_shared.call_seq[['a', 'b']])
assert portfolio_shared['second'].wrapper == portfolio_shared.wrapper['second']
assert portfolio_shared['second'].orders == portfolio_shared.orders['second']
assert portfolio_shared['second'].logs == portfolio_shared.logs['second']
assert portfolio_shared['second'].init_cash == portfolio_shared.init_cash['second']
pd.testing.assert_series_equal(portfolio_shared['second'].call_seq, portfolio_shared.call_seq['c'])
assert portfolio_shared[['second']].wrapper == portfolio_shared.wrapper[['second']]
assert portfolio_shared[['second']].orders == portfolio_shared.orders[['second']]
assert portfolio_shared[['second']].logs == portfolio_shared.logs[['second']]
pd.testing.assert_series_equal(
portfolio_shared[['second']].init_cash,
portfolio_shared.init_cash[['second']])
pd.testing.assert_frame_equal(portfolio_shared[['second']].call_seq, portfolio_shared.call_seq[['c']])
def test_regroup(self):
assert portfolio.regroup(None) == portfolio
assert portfolio.regroup(False) == portfolio
assert portfolio.regroup(group_by) != portfolio
pd.testing.assert_index_equal(portfolio.regroup(group_by).wrapper.grouper.group_by, group_by)
assert portfolio_grouped.regroup(None) == portfolio_grouped
assert portfolio_grouped.regroup(False) != portfolio_grouped
assert portfolio_grouped.regroup(False).wrapper.grouper.group_by is None
assert portfolio_grouped.regroup(group_by) == portfolio_grouped
assert portfolio_shared.regroup(None) == portfolio_shared
with pytest.raises(Exception) as e_info:
_ = portfolio_shared.regroup(False)
assert portfolio_shared.regroup(group_by) == portfolio_shared
def test_cash_sharing(self):
assert not portfolio.cash_sharing
assert not portfolio_grouped.cash_sharing
assert portfolio_shared.cash_sharing
def test_call_seq(self):
pd.testing.assert_frame_equal(
portfolio.call_seq,
pd.DataFrame(
np.array([
[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
portfolio_grouped.call_seq,
pd.DataFrame(
np.array([
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
portfolio_shared.call_seq,
pd.DataFrame(
np.array([
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
]),
index=price_na.index,
columns=price_na.columns
)
)
def test_incl_unrealized(self):
assert not vbt.Portfolio.from_orders(price_na, 1000., incl_unrealized=False).incl_unrealized
assert vbt.Portfolio.from_orders(price_na, 1000., incl_unrealized=True).incl_unrealized
def test_orders(self):
record_arrays_close(
portfolio.orders.values,
np.array([
(0, 1, 0, 0.1, 2.02, 0.10202, 0), (1, 2, 0, 0.1, 2.9699999999999998, 0.10297, 1),
(2, 4, 0, 1.0, 5.05, 0.1505, 0), (3, 0, 1, 1.0, 0.99, 0.10990000000000001, 1),
(4, 1, 1, 0.1, 1.98, 0.10198, 1), (5, 3, 1, 0.1, 4.04, 0.10404000000000001, 0),
(6, 4, 1, 1.0, 4.95, 0.14950000000000002, 1), (7, 0, 2, 1.0, 1.01, 0.1101, 0),
(8, 1, 2, 0.1, 2.02, 0.10202, 0), (9, 2, 2, 1.0, 2.9699999999999998, 0.1297, 1),
(10, 3, 2, 0.1, 3.96, 0.10396000000000001, 1)
], dtype=order_dt)
)
result = pd.Series(
np.array([3, 4, 4]),
index=price_na.columns
).rename('count')
pd.testing.assert_series_equal(
portfolio.orders.count(),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.get_orders(group_by=False).count(),
result
)
pd.testing.assert_series_equal(
portfolio_shared.get_orders(group_by=False).count(),
result
)
result = pd.Series(
np.array([7, 4]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('count')
pd.testing.assert_series_equal(
portfolio.get_orders(group_by=group_by).count(),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.orders.count(),
result
)
pd.testing.assert_series_equal(
portfolio_shared.orders.count(),
result
)
def test_logs(self):
record_arrays_close(
portfolio.logs.values,
np.array([
(0, 0, 0, 0, 100.0, 0.0, 0.0, 100.0, np.nan, 100.0, 1.0, np.nan, 0, 0, 0.01, 0.1,
0.01, 1e-08, np.inf, 0.0, False, True, False, True, 100.0, 0.0, 0.0, 100.0,
np.nan, 100.0, np.nan, np.nan, np.nan, -1, 1, 1, -1),
(1, 1, 0, 0, 100.0, 0.0, 0.0, 100.0, 2.0, 100.0, 0.1, 2.0, 0, 0, 0.01,
0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True, 99.69598, 0.1, 0.0,
99.69598, 2.0, 100.0, 0.1, 2.02, 0.10202, 0, 0, -1, 0),
(2, 2, 0, 0, 99.69598, 0.1, 0.0, 99.69598, 3.0, 99.99598, -1.0, 3.0, 0, 0,
0.01, 0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True, 99.89001, 0.0,
0.0, 99.89001, 3.0, 99.99598, 0.1, 2.9699999999999998, 0.10297, 1, 0, -1, 1),
(3, 3, 0, 0, 99.89001, 0.0, 0.0, 99.89001, 4.0, 99.89001, -0.1, 4.0,
0, 0, 0.01, 0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True, 99.89001,
0.0, 0.0, 99.89001, 4.0, 99.89001, np.nan, np.nan, np.nan, -1, 2, 8, -1),
(4, 4, 0, 0, 99.89001, 0.0, 0.0, 99.89001, 5.0, 99.89001, 1.0, 5.0, 0, 0,
0.01, 0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True, 94.68951,
1.0, 0.0, 94.68951, 5.0, 99.89001, 1.0, 5.05, 0.1505, 0, 0, -1, 2),
(5, 0, 1, 1, 100.0, 0.0, 0.0, 100.0, 1.0, 100.0, 1.0, 1.0, 0, 1, 0.01,
0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True, 100.8801, -1.0,
0.99, 98.9001, 1.0, 100.0, 1.0, 0.99, 0.10990000000000001, 1, 0, -1, 3),
(6, 1, 1, 1, 100.8801, -1.0, 0.99, 98.9001, 2.0, 98.8801, 0.1, 2.0, 0, 1,
0.01, 0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True, 100.97612,
-1.1, 1.188, 98.60011999999999, 2.0, 98.8801, 0.1, 1.98, 0.10198, 1, 0, -1, 4),
(7, 2, 1, 1, 100.97612, -1.1, 1.188, 98.60011999999999, np.nan, np.nan, -1.0, np.nan,
0, 1, 0.01, 0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True, 100.97612,
-1.1, 1.188, 98.60011999999999, np.nan, np.nan, np.nan, np.nan, np.nan, -1, 1, 1, -1),
(8, 3, 1, 1, 100.97612, -1.1, 1.188, 98.60011999999999, 4.0, 96.57611999999999,
-0.1, 4.0, 0, 1, 0.01, 0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True,
100.46808, -1.0, 1.08, 98.30807999999999, 4.0, 96.57611999999999, 0.1, 4.04,
0.10404000000000001, 0, 0, -1, 5),
(9, 4, 1, 1, 100.46808, -1.0, 1.08, 98.30807999999999, 5.0, 95.46808, 1.0,
5.0, 0, 1, 0.01, 0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True,
105.26858, -2.0, 6.03, 93.20857999999998, 5.0, 95.46808, 1.0, 4.95,
0.14950000000000002, 1, 0, -1, 6),
(10, 0, 2, 2, 100.0, 0.0, 0.0, 100.0, 1.0, 100.0, 1.0, 1.0, 0, 2, 0.01,
0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True, 98.8799, 1.0,
0.0, 98.8799, 1.0, 100.0, 1.0, 1.01, 0.1101, 0, 0, -1, 7),
(11, 1, 2, 2, 98.8799, 1.0, 0.0, 98.8799, 2.0, 100.8799, 0.1, 2.0, 0, 2,
0.01, 0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True, 98.57588000000001,
1.1, 0.0, 98.57588000000001, 2.0, 100.8799, 0.1, 2.02, 0.10202, 0, 0, -1, 8),
(12, 2, 2, 2, 98.57588000000001, 1.1, 0.0, 98.57588000000001, 3.0, 101.87588000000001,
-1.0, 3.0, 0, 2, 0.01, 0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True,
101.41618000000001, 0.10000000000000009, 0.0, 101.41618000000001, 3.0,
101.87588000000001, 1.0, 2.9699999999999998, 0.1297, 1, 0, -1, 9),
(13, 3, 2, 2, 101.41618000000001, 0.10000000000000009, 0.0, 101.41618000000001,
4.0, 101.81618000000002, -0.1, 4.0, 0, 2, 0.01, 0.1, 0.01, 1e-08, np.inf, 0.0,
False, True, False, True, 101.70822000000001, 0.0, 0.0, 101.70822000000001,
4.0, 101.81618000000002, 0.1, 3.96, 0.10396000000000001, 1, 0, -1, 10),
(14, 4, 2, 2, 101.70822000000001, 0.0, 0.0, 101.70822000000001, np.nan,
101.70822000000001, 1.0, np.nan, 0, 2, 0.01, 0.1, 0.01, 1e-08, np.inf, 0.0,
False, True, False, True, 101.70822000000001, 0.0, 0.0, 101.70822000000001,
np.nan, 101.70822000000001, np.nan, np.nan, np.nan, -1, 1, 1, -1)
], dtype=log_dt)
)
result = pd.Series(
np.array([5, 5, 5]),
index=price_na.columns
).rename('count')
pd.testing.assert_series_equal(
portfolio.logs.count(),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.get_logs(group_by=False).count(),
result
)
pd.testing.assert_series_equal(
portfolio_shared.get_logs(group_by=False).count(),
result
)
result = pd.Series(
np.array([10, 5]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('count')
pd.testing.assert_series_equal(
portfolio.get_logs(group_by=group_by).count(),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.logs.count(),
result
)
pd.testing.assert_series_equal(
portfolio_shared.logs.count(),
result
)
def test_trades(self):
record_arrays_close(
portfolio.trades.values,
np.array([
(0, 0, 0.1, 1, 2.02, 0.10202, 2, 2.9699999999999998, 0.10297,
-0.10999000000000003, -0.5445049504950497, 0, 1, 0),
(1, 0, 1.0, 4, 5.05, 0.1505, 4, 5.0, 0.0,
-0.20049999999999982, -0.03970297029702967, 0, 0, 1),
(2, 1, 0.1, 0, 1.0799999999999998, 0.019261818181818182,
3, 4.04, 0.10404000000000001, -0.4193018181818182, -3.882424242424243, 1, 1, 2),
(3, 1, 2.0, 0, 3.015, 0.3421181818181819, 4, 5.0, 0.0,
-4.312118181818182, -0.7151108095884214, 1, 0, 2),
(4, 2, 1.0, 0, 1.1018181818181818, 0.19283636363636364, 2,
2.9699999999999998, 0.1297, 1.5456454545454543, 1.4028135313531351, 0, 1, 3),
(5, 2, 0.10000000000000009, 0, 1.1018181818181818, 0.019283636363636378,
3, 3.96, 0.10396000000000001, 0.1625745454545457, 1.4755115511551162, 0, 1, 3)
], dtype=trade_dt)
)
result = pd.Series(
np.array([2, 2, 2]),
index=price_na.columns
).rename('count')
pd.testing.assert_series_equal(
portfolio.trades.count(),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.get_trades(group_by=False).count(),
result
)
pd.testing.assert_series_equal(
portfolio_shared.get_trades(group_by=False).count(),
result
)
result = pd.Series(
np.array([4, 2]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('count')
pd.testing.assert_series_equal(
portfolio.get_trades(group_by=group_by).count(),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.trades.count(),
result
)
pd.testing.assert_series_equal(
portfolio_shared.trades.count(),
result
)
def test_positions(self):
record_arrays_close(
portfolio.positions.values,
np.array([
(0, 0, 0.1, 1, 2.02, 0.10202, 2, 2.9699999999999998,
0.10297, -0.10999000000000003, -0.5445049504950497, 0, 1),
(1, 0, 1.0, 4, 5.05, 0.1505, 4, 5.0, 0.0,
-0.20049999999999982, -0.03970297029702967, 0, 0),
(2, 1, 2.1, 0, 2.9228571428571426, 0.36138000000000003, 4, 4.954285714285714,
0.10404000000000001, -4.731420000000001, -0.7708406647116326, 1, 0),
(3, 2, 1.1, 0, 1.1018181818181818, 0.21212000000000003, 3,
3.06, 0.23366000000000003, 1.7082200000000003, 1.4094224422442245, 0, 1)
], dtype=position_dt)
)
result = pd.Series(
np.array([2, 1, 1]),
index=price_na.columns
).rename('count')
pd.testing.assert_series_equal(
portfolio.positions.count(),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.get_positions(group_by=False).count(),
result
)
pd.testing.assert_series_equal(
portfolio_shared.get_positions(group_by=False).count(),
result
)
result = pd.Series(
np.array([3, 1]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('count')
pd.testing.assert_series_equal(
portfolio.get_positions(group_by=group_by).count(),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.positions.count(),
result
)
pd.testing.assert_series_equal(
portfolio_shared.positions.count(),
result
)
def test_drawdowns(self):
record_arrays_close(
portfolio.drawdowns.values,
np.array([
(0, 0, 0, 4, 4, 0), (1, 1, 0, 4, 4, 0), (2, 2, 2, 3, 4, 0)
], dtype=drawdown_dt)
)
result = pd.Series(
np.array([1, 1, 1]),
index=price_na.columns
).rename('count')
pd.testing.assert_series_equal(
portfolio.drawdowns.count(),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.get_drawdowns(group_by=False).count(),
result
)
pd.testing.assert_series_equal(
portfolio_shared.get_drawdowns(group_by=False).count(),
result
)
result = pd.Series(
np.array([1, 1]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('count')
pd.testing.assert_series_equal(
portfolio.get_drawdowns(group_by=group_by).count(),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.drawdowns.count(),
result
)
pd.testing.assert_series_equal(
portfolio_shared.drawdowns.count(),
result
)
def test_close(self):
pd.testing.assert_frame_equal(portfolio.close, price_na)
pd.testing.assert_frame_equal(portfolio_grouped.close, price_na)
pd.testing.assert_frame_equal(portfolio_shared.close, price_na)
def test_get_fillna_close(self):
pd.testing.assert_frame_equal(
portfolio.get_fillna_close(ffill=False, bfill=False),
price_na
)
pd.testing.assert_frame_equal(
portfolio.get_fillna_close(ffill=True, bfill=False),
price_na.ffill()
)
pd.testing.assert_frame_equal(
portfolio.get_fillna_close(ffill=False, bfill=True),
price_na.bfill()
)
pd.testing.assert_frame_equal(
portfolio.get_fillna_close(ffill=True, bfill=True),
price_na.ffill().bfill()
)
def test_asset_flow(self):
pd.testing.assert_frame_equal(
portfolio.asset_flow(direction='longonly'),
pd.DataFrame(
np.array([
[0., 0., 1.],
[0.1, 0., 0.1],
[-0.1, 0., -1.],
[0., 0., -0.1],
[1., 0., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
portfolio.asset_flow(direction='shortonly'),
pd.DataFrame(
np.array([
[0., 1., 0.],
[0., 0.1, 0.],
[0., 0., 0.],
[0., -0.1, 0.],
[0., 1., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[0., -1., 1.],
[0.1, -0.1, 0.1],
[-0.1, 0., -1.],
[0., 0.1, -0.1],
[1., -1., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
portfolio.asset_flow(),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.asset_flow(),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.asset_flow(),
result
)
def test_assets(self):
pd.testing.assert_frame_equal(
portfolio.assets(direction='longonly'),
pd.DataFrame(
np.array([
[0., 0., 1.],
[0.1, 0., 1.1],
[0., 0., 0.1],
[0., 0., 0.],
[1., 0., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
portfolio.assets(direction='shortonly'),
pd.DataFrame(
np.array([
[0., 1., 0.],
[0., 1.1, 0.],
[0., 1.1, 0.],
[0., 1., 0.],
[0., 2., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[0., -1., 1.],
[0.1, -1.1, 1.1],
[0., -1.1, 0.1],
[0., -1., 0.],
[1., -2., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
portfolio.assets(),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.assets(),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.assets(),
result
)
def test_position_mask(self):
pd.testing.assert_frame_equal(
portfolio.position_mask(direction='longonly'),
pd.DataFrame(
np.array([
[False, False, True],
[True, False, True],
[False, False, True],
[False, False, False],
[True, False, False]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
portfolio.position_mask(direction='shortonly'),
pd.DataFrame(
np.array([
[False, True, False],
[False, True, False],
[False, True, False],
[False, True, False],
[False, True, False]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[False, True, True],
[True, True, True],
[False, True, True],
[False, True, False],
[True, True, False]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
portfolio.position_mask(),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.position_mask(group_by=False),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.position_mask(group_by=False),
result
)
result = pd.DataFrame(
np.array([
[True, True],
[True, True],
[True, True],
[True, False],
[True, False]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
portfolio.position_mask(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.position_mask(),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.position_mask(),
result
)
def test_position_coverage(self):
pd.testing.assert_series_equal(
portfolio.position_coverage(direction='longonly'),
pd.Series(np.array([0.4, 0., 0.6]), index=price_na.columns).rename('position_coverage')
)
pd.testing.assert_series_equal(
portfolio.position_coverage(direction='shortonly'),
pd.Series(np.array([0., 1., 0.]), index=price_na.columns).rename('position_coverage')
)
result = pd.Series(np.array([0.4, 1., 0.6]), index=price_na.columns).rename('position_coverage')
pd.testing.assert_series_equal(
portfolio.position_coverage(),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.position_coverage(group_by=False),
result
)
pd.testing.assert_series_equal(
portfolio_shared.position_coverage(group_by=False),
result
)
result = pd.Series(
np.array([0.7, 0.6]),
pd.Index(['first', 'second'], dtype='object', name='group')
).rename('position_coverage')
pd.testing.assert_series_equal(
portfolio.position_coverage(group_by=group_by),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.position_coverage(),
result
)
pd.testing.assert_series_equal(
portfolio_shared.position_coverage(),
result
)
def test_cash_flow(self):
pd.testing.assert_frame_equal(
portfolio.cash_flow(free=True),
pd.DataFrame(
np.array([
[0.0, -1.0998999999999999, -1.1201],
[-0.30402, -0.2999800000000002, -0.3040200000000002],
[0.19402999999999998, 0.0, 2.8402999999999996],
[0.0, -0.2920400000000002, 0.29204000000000035],
[-5.2005, -5.0995, 0.0]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[0., 0.8801, -1.1201],
[-0.30402, 0.09602, -0.30402],
[0.19403, 0., 2.8403],
[0., -0.50804, 0.29204],
[-5.2005, 4.8005, 0.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
portfolio.cash_flow(),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.cash_flow(group_by=False),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.cash_flow(group_by=False),
result
)
result = pd.DataFrame(
np.array([
[0.8801, -1.1201],
[-0.208, -0.30402],
[0.19403, 2.8403],
[-0.50804, 0.29204],
[-0.4, 0.]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
portfolio.cash_flow(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.cash_flow(),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.cash_flow(),
result
)
def test_init_cash(self):
pd.testing.assert_series_equal(
portfolio.init_cash,
pd.Series(np.array([100., 100., 100.]), index=price_na.columns).rename('init_cash')
)
pd.testing.assert_series_equal(
portfolio_grouped.get_init_cash(group_by=False),
pd.Series(np.array([100., 100., 100.]), index=price_na.columns).rename('init_cash')
)
pd.testing.assert_series_equal(
portfolio_shared.get_init_cash(group_by=False),
pd.Series(np.array([200., 200., 100.]), index=price_na.columns).rename('init_cash')
)
result = pd.Series(
np.array([200., 100.]),
pd.Index(['first', 'second'], dtype='object', name='group')
).rename('init_cash')
pd.testing.assert_series_equal(
portfolio.get_init_cash(group_by=group_by),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.init_cash,
result
)
pd.testing.assert_series_equal(
portfolio_shared.init_cash,
result
)
pd.testing.assert_series_equal(
vbt.Portfolio.from_orders(
price_na, 1000., init_cash=InitCashMode.Auto, group_by=None).init_cash,
pd.Series(
np.array([14000., 12000., 10000.]),
index=price_na.columns
).rename('init_cash')
)
pd.testing.assert_series_equal(
vbt.Portfolio.from_orders(
price_na, 1000., init_cash=InitCashMode.Auto, group_by=group_by).init_cash,
pd.Series(
np.array([26000.0, 10000.0]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('init_cash')
)
pd.testing.assert_series_equal(
vbt.Portfolio.from_orders(
price_na, 1000., init_cash=InitCashMode.Auto, group_by=group_by, cash_sharing=True).init_cash,
pd.Series(
np.array([26000.0, 10000.0]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('init_cash')
)
pd.testing.assert_series_equal(
vbt.Portfolio.from_orders(
price_na, 1000., init_cash=InitCashMode.AutoAlign, group_by=None).init_cash,
pd.Series(
np.array([14000., 14000., 14000.]),
index=price_na.columns
).rename('init_cash')
)
pd.testing.assert_series_equal(
vbt.Portfolio.from_orders(
price_na, 1000., init_cash=InitCashMode.AutoAlign, group_by=group_by).init_cash,
pd.Series(
np.array([26000.0, 26000.0]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('init_cash')
)
pd.testing.assert_series_equal(
vbt.Portfolio.from_orders(
price_na, 1000., init_cash=InitCashMode.AutoAlign, group_by=group_by, cash_sharing=True).init_cash,
pd.Series(
np.array([26000.0, 26000.0]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('init_cash')
)
def test_cash(self):
pd.testing.assert_frame_equal(
portfolio.cash(free=True),
pd.DataFrame(
np.array([
[100.0, 98.9001, 98.8799],
[99.69598, 98.60011999999999, 98.57588000000001],
[99.89001, 98.60011999999999, 101.41618000000001],
[99.89001, 98.30807999999999, 101.70822000000001],
[94.68951, 93.20857999999998, 101.70822000000001]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[100., 100.8801, 98.8799],
[99.69598, 100.97612, 98.57588],
[99.89001, 100.97612, 101.41618],
[99.89001, 100.46808, 101.70822],
[94.68951, 105.26858, 101.70822]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
portfolio.cash(),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.cash(group_by=False),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.cash(group_by=False),
pd.DataFrame(
np.array([
[200., 200.8801, 98.8799],
[199.69598, 200.97612, 98.57588],
[199.89001, 200.97612, 101.41618],
[199.89001, 200.46808, 101.70822],
[194.68951, 205.26858, 101.70822]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
portfolio_shared.cash(group_by=False, in_sim_order=True),
pd.DataFrame(
np.array([
[200.8801, 200.8801, 98.8799],
[200.6721, 200.97612, 98.57588000000001],
[200.86613, 200.6721, 101.41618000000001],
[200.35809, 200.35809, 101.70822000000001],
[199.95809, 205.15859, 101.70822000000001]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[200.8801, 98.8799],
[200.6721, 98.57588],
[200.86613, 101.41618],
[200.35809, 101.70822],
[199.95809, 101.70822]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
portfolio.cash(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.cash(),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.cash(),
result
)
def test_asset_value(self):
pd.testing.assert_frame_equal(
portfolio.asset_value(direction='longonly'),
pd.DataFrame(
np.array([
[0., 0., 1.],
[0.2, 0., 2.2],
[0., 0., 0.3],
[0., 0., 0.],
[5., 0., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
portfolio.asset_value(direction='shortonly'),
pd.DataFrame(
np.array([
[0., 1., 0.],
[0., 2.2, 0.],
[0., 2.2, 0.],
[0., 4., 0.],
[0., 10., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[0., -1., 1.],
[0.2, -2.2, 2.2],
[0., -2.2, 0.3],
[0., -4., 0.],
[5., -10., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
portfolio.asset_value(),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.asset_value(group_by=False),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.asset_value(group_by=False),
result
)
result = pd.DataFrame(
np.array([
[-1., 1.],
[-2., 2.2],
[-2.2, 0.3],
[-4., 0.],
[-5., 0.]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
portfolio.asset_value(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.asset_value(),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.asset_value(),
result
)
def test_gross_exposure(self):
pd.testing.assert_frame_equal(
portfolio.gross_exposure(direction='longonly'),
pd.DataFrame(
np.array([
[0., 0., 0.01001202],
[0.00200208, 0., 0.02183062],
[0., 0., 0.00294938],
[0., 0., 0.],
[0.05015573, 0., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
portfolio.gross_exposure(direction='shortonly'),
pd.DataFrame(
np.array([
[0.0, 0.01000999998999, 0.0],
[0.0, 0.021825370842812494, 0.0],
[0.0, 0.021825370842812494, 0.0],
[0.0, 0.03909759620159034, 0.0],
[0.0, 0.09689116931945001, 0.0]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[0.0, -0.010214494162927312, 0.010012024441354066],
[0.00200208256628545, -0.022821548354919067, 0.021830620581035857],
[0.0, -0.022821548354919067, 0.002949383274126105],
[0.0, -0.04241418126633477, 0.0],
[0.050155728521486365, -0.12017991413866216, 0.0]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
portfolio.gross_exposure(),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.gross_exposure(group_by=False),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.gross_exposure(group_by=False),
pd.DataFrame(
np.array([
[0.0, -0.00505305454620791, 0.010012024441354066],
[0.0010005203706447724, -0.011201622483733716, 0.021830620581035857],
[0.0, -0.011201622483733716, 0.002949383274126105],
[0.0, -0.020585865497718882, 0.0],
[0.025038871596209537, -0.0545825965137659, 0.0]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[-0.00505305454620791, 0.010012024441354066],
[-0.010188689433972452, 0.021830620581035857],
[-0.0112078992458765, 0.002949383274126105],
[-0.02059752492931316, 0.0],
[-0.027337628293439265, 0.0]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
portfolio.gross_exposure(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.gross_exposure(),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.gross_exposure(),
result
)
def test_net_exposure(self):
result = pd.DataFrame(
np.array([
[0.0, -0.01000999998999, 0.010012024441354066],
[0.00200208256628545, -0.021825370842812494, 0.021830620581035857],
[0.0, -0.021825370842812494, 0.002949383274126105],
[0.0, -0.03909759620159034, 0.0],
[0.050155728521486365, -0.09689116931945001, 0.0]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
portfolio.net_exposure(),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.net_exposure(group_by=False),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.net_exposure(group_by=False),
pd.DataFrame(
np.array([
[0.0, -0.005002498748124688, 0.010012024441354066],
[0.0010005203706447724, -0.010956168751293576, 0.021830620581035857],
[0.0, -0.010956168751293576, 0.002949383274126105],
[0.0, -0.019771825228137207, 0.0],
[0.025038871596209537, -0.049210520540028384, 0.0]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[-0.005002498748124688, 0.010012024441354066],
[-0.009965205542937988, 0.021830620581035857],
[-0.010962173376438594, 0.002949383274126105],
[-0.019782580537729116, 0.0],
[-0.0246106361476199, 0.0]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
portfolio.net_exposure(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.net_exposure(),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.net_exposure(),
result
)
def test_value(self):
result = pd.DataFrame(
np.array([
[100., 99.8801, 99.8799],
[99.89598, 98.77612, 100.77588],
[99.89001, 98.77612, 101.71618],
[99.89001, 96.46808, 101.70822],
[99.68951, 95.26858, 101.70822]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
portfolio.value(),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.value(group_by=False),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.value(group_by=False),
pd.DataFrame(
np.array([
[200., 199.8801, 99.8799],
[199.89598, 198.77612, 100.77588],
[199.89001, 198.77612, 101.71618],
[199.89001, 196.46808, 101.70822],
[199.68951, 195.26858, 101.70822]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
portfolio_shared.value(group_by=False, in_sim_order=True),
pd.DataFrame(
np.array([
[199.8801, 199.8801, 99.8799],
[198.6721, 198.77612000000002, 100.77588000000002],
[198.66613, 198.6721, 101.71618000000001],
[196.35809, 196.35809, 101.70822000000001],
[194.95809, 195.15859, 101.70822000000001]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[199.8801, 99.8799],
[198.6721, 100.77588],
[198.66613, 101.71618],
[196.35809, 101.70822],
[194.95809, 101.70822]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
portfolio.value(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.value(),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.value(),
result
)
def test_total_profit(self):
result = pd.Series(
np.array([-0.31049, -4.73142, 1.70822]),
index=price_na.columns
).rename('total_profit')
pd.testing.assert_series_equal(
portfolio.total_profit(),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.total_profit(group_by=False),
result
)
pd.testing.assert_series_equal(
portfolio_shared.total_profit(group_by=False),
result
)
result = pd.Series(
np.array([-5.04191, 1.70822]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('total_profit')
pd.testing.assert_series_equal(
portfolio.total_profit(group_by=group_by),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.total_profit(),
result
)
pd.testing.assert_series_equal(
portfolio_shared.total_profit(),
result
)
def test_final_value(self):
result = pd.Series(
np.array([99.68951, 95.26858, 101.70822]),
index=price_na.columns
).rename('final_value')
pd.testing.assert_series_equal(
portfolio.final_value(),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.final_value(group_by=False),
result
)
pd.testing.assert_series_equal(
portfolio_shared.final_value(group_by=False),
pd.Series(
np.array([199.68951, 195.26858, 101.70822]),
index=price_na.columns
).rename('final_value')
)
result = pd.Series(
np.array([194.95809, 101.70822]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('final_value')
pd.testing.assert_series_equal(
portfolio.final_value(group_by=group_by),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.final_value(),
result
)
pd.testing.assert_series_equal(
portfolio_shared.final_value(),
result
)
def test_total_return(self):
result = pd.Series(
np.array([-0.0031049, -0.0473142, 0.0170822]),
index=price_na.columns
).rename('total_return')
pd.testing.assert_series_equal(
portfolio.total_return(),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.total_return(group_by=False),
result
)
pd.testing.assert_series_equal(
portfolio_shared.total_return(group_by=False),
pd.Series(
np.array([-0.00155245, -0.0236571, 0.0170822]),
index=price_na.columns
).rename('total_return')
)
result = pd.Series(
np.array([-0.02520955, 0.0170822]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('total_return')
pd.testing.assert_series_equal(
portfolio.total_return(group_by=group_by),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.total_return(),
result
)
pd.testing.assert_series_equal(
portfolio_shared.total_return(),
result
)
def test_returns(self):
result = pd.DataFrame(
np.array([
[0.00000000e+00, -1.19900000e-03, -1.20100000e-03],
[-1.04020000e-03, -1.10530526e-02, 8.97057366e-03],
[-5.97621646e-05, 0.0, 9.33060570e-03],
[0.00000000e+00, -0.023366376407576966, -7.82569695e-05],
[-2.00720773e-03, -1.24341648e-02, 0.00000000e+00]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
portfolio.returns(),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.returns(group_by=False),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.returns(group_by=False),
pd.DataFrame(
np.array([
[0.00000000e+00, -5.99500000e-04, -1.20100000e-03],
[-5.20100000e-04, -5.52321117e-03, 8.97057366e-03],
[-2.98655331e-05, 0.0, 9.33060570e-03],
[0.00000000e+00, -0.011611253907159497, -7.82569695e-05],
[-1.00305163e-03, -6.10531746e-03, 0.00000000e+00]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
portfolio_shared.returns(group_by=False, in_sim_order=True),
pd.DataFrame(
np.array([
[0.0, -0.0005995000000000062, -1.20100000e-03],
[-0.0005233022960706736, -0.005523211165093367, 8.97057366e-03],
[-3.0049513746473233e-05, 0.0, 9.33060570e-03],
[0.0, -0.011617682390048093, -7.82569695e-05],
[-0.0010273695869600474, -0.0061087373583639994, 0.00000000e+00]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[-5.99500000e-04, -1.20100000e-03],
[-6.04362315e-03, 8.97057366e-03],
[-3.0049513746473233e-05, 9.33060570e-03],
[-0.011617682390048093, -7.82569695e-05],
[-7.12983101e-03, 0.00000000e+00]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
portfolio.returns(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.returns(),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.returns(),
result
)
def test_active_returns(self):
result = pd.DataFrame(
np.array([
[0., -np.inf, -np.inf],
[-np.inf, -1.10398, 0.89598],
[-0.02985, 0.0, 0.42740909],
[0., -1.0491090909090908, -0.02653333],
[-np.inf, -0.299875, 0.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
portfolio.active_returns(),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.active_returns(group_by=False),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.active_returns(group_by=False),
result
)
result = pd.DataFrame(
np.array([
[-np.inf, -np.inf],
[-1.208, 0.89598],
[-0.0029850000000000154, 0.42740909],
[-1.0491090909090908, -0.02653333],
[-0.35, 0.]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
portfolio.active_returns(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.active_returns(),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.active_returns(),
result
)
def test_market_value(self):
result = pd.DataFrame(
np.array([
[100., 100., 100.],
[100., 200., 200.],
[150., 200., 300.],
[200., 400., 400.],
[250., 500., 400.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
portfolio.market_value(),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.market_value(group_by=False),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.market_value(group_by=False),
pd.DataFrame(
np.array([
[200., 200., 100.],
[200., 400., 200.],
[300., 400., 300.],
[400., 800., 400.],
[500., 1000., 400.]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[200., 100.],
[300., 200.],
[350., 300.],
[600., 400.],
[750., 400.]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
portfolio.market_value(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.market_value(),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.market_value(),
result
)
def test_market_returns(self):
result = pd.DataFrame(
np.array([
[0., 0., 0.],
[0., 1., 1.],
[0.5, 0., 0.5],
[0.33333333, 1., 0.33333333],
[0.25, 0.25, 0.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
portfolio.market_returns(),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.market_returns(group_by=False),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.market_returns(group_by=False),
result
)
result = pd.DataFrame(
np.array([
[0., 0.],
[0.5, 1.],
[0.16666667, 0.5],
[0.71428571, 0.33333333],
[0.25, 0.]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
portfolio.market_returns(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.market_returns(),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.market_returns(),
result
)
def test_total_market_return(self):
result = pd.Series(
np.array([1.5, 4., 3.]),
index=price_na.columns
).rename('total_market_return')
pd.testing.assert_series_equal(
portfolio.total_market_return(),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.total_market_return(group_by=False),
result
)
pd.testing.assert_series_equal(
portfolio_shared.total_market_return(group_by=False),
result
)
result = pd.Series(
np.array([2.75, 3.]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('total_market_return')
pd.testing.assert_series_equal(
portfolio.total_market_return(group_by=group_by),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.total_market_return(),
result
)
pd.testing.assert_series_equal(
portfolio_shared.total_market_return(),
result
)
def test_return_method(self):
pd.testing.assert_frame_equal(
portfolio_shared.cumulative_returns(),
pd.DataFrame(
np.array([
[-0.000599499999999975, -0.0012009999999998966],
[-0.006639499999999909, 0.007758800000000177],
[-0.006669349999999907, 0.017161800000000005],
[-0.01820955000000002, 0.017082199999999936],
[-0.025209550000000136, 0.017082199999999936]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
)
pd.testing.assert_frame_equal(
portfolio_shared.cumulative_returns(group_by=False),
pd.DataFrame(
np.array([
[0.0, -0.000599499999999975, -0.0012009999999998966],
[-0.0005201000000001343, -0.006119399999999886, 0.007758800000000177],
[-0.0005499500000001323, -0.006119399999999886, 0.017161800000000005],
[-0.0005499500000001323, -0.017659599999999886, 0.017082199999999936],
[-0.0015524500000001495, -0.023657099999999875, 0.017082199999999936]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_series_equal(
portfolio_shared.sharpe_ratio(),
pd.Series(
np.array([-16.697884366310568, 10.257634695847853]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('sharpe_ratio')
)
pd.testing.assert_series_equal(
portfolio_shared.sharpe_ratio(risk_free=0.01),
pd.Series(
np.array([-49.54098765664797, -19.873024060759022]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('sharpe_ratio')
)
pd.testing.assert_series_equal(
portfolio_shared.sharpe_ratio(year_freq='365D'),
pd.Series(
np.array([-20.095906945591288, 12.345065267401496]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('sharpe_ratio')
)
pd.testing.assert_series_equal(
portfolio_shared.sharpe_ratio(group_by=False),
pd.Series(
np.array([-11.058998255347488, -16.018796953152307, 10.257634695847853]),
index=price_na.columns
).rename('sharpe_ratio')
)
def test_stats(self):
pd.testing.assert_series_equal(
portfolio.stats(),
pd.Series(
np.array([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-05 00:00:00'),
pd.Timedelta('5 days 00:00:00'), 100.0, -1.1112299999999966,
-1.1112299999999966, 283.3333333333333, 66.66666666666667,
1.6451238489727062, 1.6451238489727062,
pd.Timedelta('3 days 08:00:00'), pd.Timedelta('3 days 08:00:00'),
1.3333333333333333, 33.333333333333336, -98.38058805880588,
-100.8038553855386, -99.59222172217225,
pd.Timedelta('2 days 08:00:00'), pd.Timedelta('2 days 04:00:00'),
0.10827272727272726, 1.2350921335789007, -0.008766789792898303,
-5.609478162762282, 26.256548486255838, 5720.684444410799
]),
index=pd.Index([
'Start', 'End', 'Duration', 'Init. Cash', 'Total Profit',
'Total Return [%]', 'Benchmark Return [%]', 'Position Coverage [%]',
'Max. Drawdown [%]', 'Avg. Drawdown [%]', 'Max. Drawdown Duration',
'Avg. Drawdown Duration', 'Num. Trades', 'Win Rate [%]',
'Best Trade [%]', 'Worst Trade [%]', 'Avg. Trade [%]',
'Max. Trade Duration', 'Avg. Trade Duration', 'Expectancy', 'SQN',
'Gross Exposure', 'Sharpe Ratio', 'Sortino Ratio', 'Calmar Ratio'
], dtype='object'),
name='stats_mean')
)
pd.testing.assert_series_equal(
portfolio['a'].stats(),
pd.Series(
np.array([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-05 00:00:00'),
pd.Timedelta('5 days 00:00:00'), 100.0, -0.3104900000000015,
-0.3104900000000015, 150.0, 40.0, 0.3104900000000015,
0.3104900000000015, pd.Timedelta('4 days 00:00:00'),
pd.Timedelta('4 days 00:00:00'), 1, 0.0, -54.450495049504966,
-54.450495049504966, -54.450495049504966,
pd.Timedelta('1 days 00:00:00'), pd.Timedelta('1 days 00:00:00'),
-0.10999000000000003, np.nan, 0.010431562217554364,
-11.057783842772304, -9.75393669809172, -46.721467294341814
]),
index=pd.Index([
'Start', 'End', 'Duration', 'Init. Cash', 'Total Profit',
'Total Return [%]', 'Benchmark Return [%]', 'Position Coverage [%]',
'Max. Drawdown [%]', 'Avg. Drawdown [%]', 'Max. Drawdown Duration',
'Avg. Drawdown Duration', 'Num. Trades', 'Win Rate [%]',
'Best Trade [%]', 'Worst Trade [%]', 'Avg. Trade [%]',
'Max. Trade Duration', 'Avg. Trade Duration', 'Expectancy', 'SQN',
'Gross Exposure', 'Sharpe Ratio', 'Sortino Ratio', 'Calmar Ratio'
], dtype='object'),
name='a')
)
pd.testing.assert_series_equal(
portfolio['a'].stats(required_return=0.1, risk_free=0.01),
pd.Series(
np.array([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-05 00:00:00'),
pd.Timedelta('5 days 00:00:00'), 100.0, -0.3104900000000015,
-0.3104900000000015, 150.0, 40.0, 0.3104900000000015,
0.3104900000000015, pd.Timedelta('4 days 00:00:00'),
pd.Timedelta('4 days 00:00:00'), 1, 0.0, -54.450495049504966,
-54.450495049504966, -54.450495049504966,
pd.Timedelta('1 days 00:00:00'), pd.Timedelta('1 days 00:00:00'),
-0.10999000000000003, np.nan, 0.010431562217554364,
-188.9975847831419, -15.874008737030774, -46.721467294341814
]),
index=pd.Index([
'Start', 'End', 'Duration', 'Init. Cash', 'Total Profit',
'Total Return [%]', 'Benchmark Return [%]', 'Position Coverage [%]',
'Max. Drawdown [%]', 'Avg. Drawdown [%]', 'Max. Drawdown Duration',
'Avg. Drawdown Duration', 'Num. Trades', 'Win Rate [%]',
'Best Trade [%]', 'Worst Trade [%]', 'Avg. Trade [%]',
'Max. Trade Duration', 'Avg. Trade Duration', 'Expectancy', 'SQN',
'Gross Exposure', 'Sharpe Ratio', 'Sortino Ratio', 'Calmar Ratio'
], dtype='object'),
name='a')
)
pd.testing.assert_series_equal(
portfolio['a'].stats(active_returns=True),
pd.Series(
np.array([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-05 00:00:00'),
pd.Timedelta('5 days 00:00:00'), 100.0, -0.3104900000000015,
-0.3104900000000015, 150.0, 40.0, 0.3104900000000015,
0.3104900000000015, pd.Timedelta('4 days 00:00:00'),
pd.Timedelta('4 days 00:00:00'), 1, 0.0, -54.450495049504966,
-54.450495049504966, -54.450495049504966,
pd.Timedelta('1 days 00:00:00'), pd.Timedelta('1 days 00:00:00'),
-0.10999000000000003, np.nan, 0.010431562217554364, np.nan, np.nan, np.nan
]),
index=pd.Index([
'Start', 'End', 'Duration', 'Init. Cash', 'Total Profit',
'Total Return [%]', 'Benchmark Return [%]', 'Position Coverage [%]',
'Max. Drawdown [%]', 'Avg. Drawdown [%]', 'Max. Drawdown Duration',
'Avg. Drawdown Duration', 'Num. Trades', 'Win Rate [%]',
'Best Trade [%]', 'Worst Trade [%]', 'Avg. Trade [%]',
'Max. Trade Duration', 'Avg. Trade Duration', 'Expectancy', 'SQN',
'Gross Exposure', 'Sharpe Ratio', 'Sortino Ratio', 'Calmar Ratio'
], dtype='object'),
name='a')
)
pd.testing.assert_series_equal(
portfolio['a'].stats(incl_unrealized=True),
pd.Series(
np.array([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-05 00:00:00'),
pd.Timedelta('5 days 00:00:00'), 100.0, -0.3104900000000015,
-0.3104900000000015, 150.0, 40.0, 0.3104900000000015,
0.3104900000000015, pd.Timedelta('4 days 00:00:00'),
pd.Timedelta('4 days 00:00:00'), 2, 0.0, -3.9702970297029667,
-54.450495049504966, -29.210396039603964,
pd.Timedelta('1 days 00:00:00'), pd.Timedelta('0 days 12:00:00'),
-0.1552449999999999, -3.43044967406917, 0.010431562217554364,
-11.057783842772304, -9.75393669809172, -46.721467294341814
]),
index=pd.Index([
'Start', 'End', 'Duration', 'Init. Cash', 'Total Profit',
'Total Return [%]', 'Benchmark Return [%]', 'Position Coverage [%]',
'Max. Drawdown [%]', 'Avg. Drawdown [%]', 'Max. Drawdown Duration',
'Avg. Drawdown Duration', 'Num. Trades', 'Win Rate [%]',
'Best Trade [%]', 'Worst Trade [%]', 'Avg. Trade [%]',
'Max. Trade Duration', 'Avg. Trade Duration', 'Expectancy', 'SQN',
'Gross Exposure', 'Sharpe Ratio', 'Sortino Ratio', 'Calmar Ratio'
], dtype='object'),
name='a')
)
pd.testing.assert_series_equal(
portfolio_grouped['first'].stats(),
pd.Series(
np.array([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-05 00:00:00'),
pd.Timedelta('5 days 00:00:00'), 200.0, -5.04191,
-2.520955, 275.0, 70.0, 2.46248125751388,
2.46248125751388, pd.Timedelta('4 days 00:00:00'),
pd.Timedelta('4 days 00:00:00'), 2, 0.0, -54.450495049504966,
-388.2424242424243, -221.34645964596461,
pd.Timedelta('3 days 00:00:00'), pd.Timedelta('2 days 00:00:00'),
-0.2646459090909091, -1.711191707103453, -0.01716935548563326,
-17.828382866511035, -12.417661888716555, -29.395593091285203
]),
index=pd.Index([
'Start', 'End', 'Duration', 'Init. Cash', 'Total Profit',
'Total Return [%]', 'Benchmark Return [%]', 'Position Coverage [%]',
'Max. Drawdown [%]', 'Avg. Drawdown [%]', 'Max. Drawdown Duration',
'Avg. Drawdown Duration', 'Num. Trades', 'Win Rate [%]',
'Best Trade [%]', 'Worst Trade [%]', 'Avg. Trade [%]',
'Max. Trade Duration', 'Avg. Trade Duration', 'Expectancy', 'SQN',
'Gross Exposure', 'Sharpe Ratio', 'Sortino Ratio', 'Calmar Ratio'
], dtype='object'),
name='first')
)
pd.testing.assert_series_equal(
portfolio['c'].stats(),
portfolio.stats(column='c')
)
pd.testing.assert_series_equal(
portfolio['c'].stats(),
portfolio_grouped.stats(column='c', group_by=False)
)
pd.testing.assert_series_equal(
portfolio_grouped['second'].stats(),
portfolio_grouped.stats(column='second')
)
def test_returns_stats(self):
pd.testing.assert_series_equal(
portfolio.returns_stats(),
pd.Series(
np.array([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-05 00:00:00'),
pd.Timedelta('5 days 00:00:00'), -1.1112300000000113, 283.3333333333333,
9.669922456336872, 8.29654627059829, -5.609478162762282, 5720.684444410799,
-1.6451238489727107, 4.768700318817701, 26.256548486255838, -0.3997971268456455,
-1.2025410695003063, 3.1644021626949534, 7.42228636406823, -0.007990063884177678,
-0.26918960772379186, -0.00123384949617063
]),
index=pd.Index([
'Start', 'End', 'Duration', 'Total Return [%]', 'Benchmark Return [%]',
'Annual Return [%]', 'Annual Volatility [%]', 'Sharpe Ratio',
'Calmar Ratio', 'Max. Drawdown [%]', 'Omega Ratio', 'Sortino Ratio',
'Skew', 'Kurtosis', 'Tail Ratio', 'Common Sense Ratio', 'Value at Risk',
'Alpha', 'Beta'
], dtype='object'),
name='stats_mean')
)
pd.testing.assert_series_equal(
portfolio['a'].returns_stats(),
pd.Series(
np.array([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-05 00:00:00'),
pd.Timedelta('5 days 00:00:00'), -0.3104900000000077,
150.0, -14.50654838022003, 1.4162092947628355,
-11.057783842772304, -46.721467294341814, -0.3104899999999966,
0.0, -9.75393669809172, -1.2191070234483876,
0.12297560887596681, 0.0, 0.0,
-0.0018138061822238526, -0.1792948451549693, 0.0007493142128979539
]),
index=pd.Index([
'Start', 'End', 'Duration', 'Total Return [%]', 'Benchmark Return [%]',
'Annual Return [%]', 'Annual Volatility [%]', 'Sharpe Ratio',
'Calmar Ratio', 'Max. Drawdown [%]', 'Omega Ratio', 'Sortino Ratio',
'Skew', 'Kurtosis', 'Tail Ratio', 'Common Sense Ratio', 'Value at Risk',
'Alpha', 'Beta'
], dtype='object'),
name='a')
)
pd.testing.assert_series_equal(
portfolio_grouped['first'].returns_stats(),
pd.Series(
np.array([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-05 00:00:00'),
pd.Timedelta('5 days 00:00:00'), -2.5209550000000025, 275.0, -72.38609704079437,
7.187935871660704, -17.828382866511035, -29.395593091285203, -2.462481257513882,
0.0, -12.417661888716555, -0.19681929158210584, -1.5821971095816858,
0.05430622731792859, 0.014996068912378419, -0.010308823936793889,
-0.21603224384824826, -0.012303869004819437
]),
index=pd.Index([
'Start', 'End', 'Duration', 'Total Return [%]', 'Benchmark Return [%]',
'Annual Return [%]', 'Annual Volatility [%]', 'Sharpe Ratio',
'Calmar Ratio', 'Max. Drawdown [%]', 'Omega Ratio', 'Sortino Ratio',
'Skew', 'Kurtosis', 'Tail Ratio', 'Common Sense Ratio', 'Value at Risk',
'Alpha', 'Beta'
], dtype='object'),
name='first')
)
pd.testing.assert_series_equal(
portfolio['c'].returns_stats(),
portfolio.returns_stats(column='c')
)
pd.testing.assert_series_equal(
portfolio['c'].returns_stats(),
portfolio_grouped.returns_stats(column='c', group_by=False)
)
pd.testing.assert_series_equal(
portfolio_grouped['second'].returns_stats(),
portfolio_grouped.returns_stats(column='second')
)
def test_plot_methods(self):
_ = portfolio.plot(column='a', subplots='all')
_ = portfolio_grouped.plot(column='first', subplots='all')
_ = portfolio_grouped.plot(column='a', subplots='all', group_by=False)
_ = portfolio_shared.plot(column='a', subplots='all', group_by=False)
with pytest.raises(Exception) as e_info:
_ = portfolio.plot(subplots='all')
with pytest.raises(Exception) as e_info:
_ = portfolio_grouped.plot(subplots='all')
| 44.801931
| 119
| 0.503149
| 34,583
| 241,348
| 3.363734
| 0.02562
| 0.0561
| 0.03693
| 0.01623
| 0.897806
| 0.872756
| 0.854283
| 0.829491
| 0.800031
| 0.754522
| 0
| 0.205596
| 0.333336
| 241,348
| 5,386
| 120
| 44.810249
| 0.517396
| 0.000833
| 0
| 0.579559
| 0
| 0
| 0.028413
| 0
| 0
| 0
| 0
| 0
| 0.109756
| 1
| 0.026713
| false
| 0
| 0.002516
| 0.002516
| 0.03523
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c368f9d9d68e3e7f223f4117c4f4f8d90c4a315f
| 124
|
py
|
Python
|
dl_lib/network/loss/__init__.py
|
harrylin-hyl/CenterNet-better
|
567d4370ddf2b52386b46321dbd9b77e7e9c8b8b
|
[
"Apache-2.0"
] | 543
|
2020-02-23T08:58:46.000Z
|
2022-03-24T04:56:14.000Z
|
dl_lib/network/loss/__init__.py
|
harrylin-hyl/CenterNet-better
|
567d4370ddf2b52386b46321dbd9b77e7e9c8b8b
|
[
"Apache-2.0"
] | 50
|
2020-02-25T08:58:42.000Z
|
2022-01-27T06:01:31.000Z
|
dl_lib/network/loss/__init__.py
|
harrylin-hyl/CenterNet-better
|
567d4370ddf2b52386b46321dbd9b77e7e9c8b8b
|
[
"Apache-2.0"
] | 99
|
2020-02-23T14:13:57.000Z
|
2022-02-27T08:46:48.000Z
|
#!/usr/bin/python3
# -*- coding:utf-8 -*-
from .focal_loss import modified_focal_loss
from .reg_l1_loss import reg_l1_loss
| 20.666667
| 43
| 0.758065
| 21
| 124
| 4.142857
| 0.619048
| 0.206897
| 0.206897
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.036364
| 0.112903
| 124
| 5
| 44
| 24.8
| 0.754545
| 0.306452
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
6f1a0329d5efbadf0d8e840b9a3fde0344866bc3
| 69
|
py
|
Python
|
Warmup-1/diff21.py
|
VivekM27/Coding-Bat-Python-Solutions
|
14d5c6ccaa2129e56a5898374dec60740fe6761b
|
[
"Apache-2.0"
] | null | null | null |
Warmup-1/diff21.py
|
VivekM27/Coding-Bat-Python-Solutions
|
14d5c6ccaa2129e56a5898374dec60740fe6761b
|
[
"Apache-2.0"
] | null | null | null |
Warmup-1/diff21.py
|
VivekM27/Coding-Bat-Python-Solutions
|
14d5c6ccaa2129e56a5898374dec60740fe6761b
|
[
"Apache-2.0"
] | null | null | null |
# DIFF21
def diff21(n):
return 2*abs(n-21) if n>21 else abs(n-21)
| 23
| 43
| 0.637681
| 16
| 69
| 2.75
| 0.5625
| 0.204545
| 0.272727
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.196429
| 0.188406
| 69
| 3
| 43
| 23
| 0.589286
| 0.086957
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
6f2acae20752d379e98663266d51c6afd9079e2a
| 11,361
|
py
|
Python
|
graphtheory/shortestpaths/allpairs.py
|
mashal02/graphs-dict
|
39917d8a7f3bdcd5d95f3549ca054d16ba535e90
|
[
"BSD-3-Clause"
] | 36
|
2015-09-20T20:55:39.000Z
|
2021-09-20T05:49:03.000Z
|
graphtheory/shortestpaths/allpairs.py
|
mashal02/graphs-dict
|
39917d8a7f3bdcd5d95f3549ca054d16ba535e90
|
[
"BSD-3-Clause"
] | 6
|
2016-03-25T21:41:46.000Z
|
2020-02-12T03:18:59.000Z
|
graphtheory/shortestpaths/allpairs.py
|
mashal02/graphs-dict
|
39917d8a7f3bdcd5d95f3549ca054d16ba535e90
|
[
"BSD-3-Clause"
] | 9
|
2016-09-12T07:57:27.000Z
|
2022-03-21T16:15:39.000Z
|
#!/usr/bin/python
try:
integer_types = (int, long)
except NameError: # Python 3
integer_types = (int,)
xrange = range
class SlowAllPairs:
"""All-pairs shortest paths algorithm in O(V^4) time.
Attributes
----------
graph : input directed weighted graph
distance : dict-of-dict
weights : dict-of-dict, private
Examples
--------
>>> from graphtheory.structures.edges import Edge
>>> from graphtheory.structures.graphs import Graph
>>> from graphtheory.shortestpaths.allpairs import SlowAllPairs
>>> G = Graph(n=10, True) # an exemplary directed graph
# Add nodes and edges here.
>>> algorithm = SlowAllPairs(G) # initialization
>>> algorithm.run() # calculations
>>> algorithm.distance[source][target] # distance from source to target
Notes
-----
Based on:
Cormen, T. H., Leiserson, C. E., Rivest, R. L., and Stein, C., 2009,
Introduction to Algorithms, third edition, The MIT Press,
Cambridge, London.
"""
def __init__(self, graph):
"""The algorithm initialization.
Parameters
----------
graph : directed weighted graph
"""
if not graph.is_directed():
raise ValueError("the graph is not directed")
self.graph = graph
self.distance = dict()
self.weights = dict()
for source in self.graph.iternodes(): # O(V^2) time
self.distance[source] = dict()
for target in self.graph.iternodes():
self.distance[source][target] = float("inf")
self.distance[source][source] = 0
for edge in self.graph.iteredges(): # O(E) time
self.distance[edge.source][edge.target] = edge.weight
for source in self.graph.iternodes():
self.weights[source] = dict(self.distance[source])
def run(self):
"""Executable pseudocode."""
for m in xrange(2, self.graph.v()): # |V|-2 times
self.distance = self.extended_shortest_paths(self.distance)
if any(self.distance[node][node] < 0 for node in self.graph.iternodes()):
raise ValueError("negative cycle detected")
def extended_shortest_paths(self, old_distance):
"""O(V^3) time."""
new_distance = dict()
for source in self.graph.iternodes():
new_distance[source] = dict()
for target in self.graph.iternodes():
new_distance[source][target] = float("inf")
for node in self.graph.iternodes():
new_distance[source][target] = min(new_distance[source][target],
old_distance[source][node] + self.weights[node][target])
return new_distance
class SlowAllPairsEdges:
"""All-pairs shortest paths algorithm in O(V^2 (V+E)) time.
Attributes
----------
graph : input directed weighted graph
distance : dict-of-dict
weights : dict-of-dict, private
Examples
--------
>>> from graphtheory.structures.edges import Edge
>>> from graphtheory.structures.graphs import Graph
>>> from graphtheory.shortestpaths.allpairs import SlowAllPairsEdges
>>> G = Graph(n=10, True) # an exemplary directed graph
# Add nodes and edges here.
>>> algorithm = SlowAllPairsEdges(G) # initialization
>>> algorithm.run() # calculations
>>> algorithm.distance[source][target] # distance from source to target
Notes
-----
Based on:
Cormen, T. H., Leiserson, C. E., Rivest, R. L., and Stein, C., 2009,
Introduction to Algorithms, third edition, The MIT Press,
Cambridge, London.
"""
def __init__(self, graph):
"""The algorithm initialization.
Parameters
----------
graph : directed weighted graph
"""
if not graph.is_directed():
raise ValueError("the graph is not directed")
self.graph = graph
self.distance = dict()
self.weights = dict()
for source in self.graph.iternodes(): # O(V^2) time
self.distance[source] = dict()
for target in self.graph.iternodes():
self.distance[source][target] = float("inf")
self.distance[source][source] = 0
for edge in self.graph.iteredges(): # O(E) time
self.distance[edge.source][edge.target] = edge.weight
for source in self.graph.iternodes():
self.weights[source] = dict(self.distance[source])
def run(self):
"""Executable pseudocode."""
for m in xrange(2, self.graph.v()): # |V|-2 times
self.distance = self.extended_shortest_paths(self.distance)
if any(self.distance[node][node] < 0 for node in self.graph.iternodes()):
raise ValueError("negative cycle detected")
def extended_shortest_paths(self, old_distance):
"""O(V*(V+E)) time."""
new_distance = dict()
for source in self.graph.iternodes(): # |V| times
new_distance[source] = dict(old_distance[source]) # IMPORTANT, O(V)
for edge in self.graph.iteredges(): # O(E) time
new_distance[source][edge.target] = min(
new_distance[source][edge.target],
old_distance[source][edge.source] + edge.weight)
return new_distance
class SlowAllPairsWithPaths: # not for FasterAllPairsSP
"""All-pairs shortest paths algorithm in O(V^4) time.
Attributes
----------
graph : input directed weighted graph
distance : dict-of-dict
weights : dict-of-dict, private
parent : dict with nodes
Examples
--------
>>> from graphtheory.structures.edges import Edge
>>> from graphtheory.structures.graphs import Graph
>>> from graphtheory.shortestpaths.allpairs import SlowAllPairsWithPaths
>>> G = Graph(n=10, True) # an exemplary directed graph
# Add nodes and edges here.
>>> algorithm = SlowAllPairsWithPaths(G) # initialization
>>> algorithm.run() # calculations
>>> algorithm.distance[source][target] # distance from source to target
>>> algorithm.path(source, target) # path from source to target
Notes
-----
Based on:
Cormen, T. H., Leiserson, C. E., Rivest, R. L., and Stein, C., 2009,
Introduction to Algorithms, third edition, The MIT Press,
Cambridge, London.
"""
def __init__(self, graph):
"""The algorithm initialization.
Parameters
----------
graph : directed weighted graph
"""
if not graph.is_directed():
raise ValueError("the graph is not directed")
self.graph = graph
self.distance = dict()
self.weights = dict()
self.parent = dict()
for source in self.graph.iternodes(): # O(V^2) time
self.distance[source] = dict()
self.parent[source] = dict()
for target in self.graph.iternodes():
self.distance[source][target] = float("inf")
self.parent[source][target] = None
self.distance[source][source] = 0
for edge in self.graph.iteredges(): # O(E) time
self.distance[edge.source][edge.target] = edge.weight
self.parent[edge.source][edge.target] = edge.source
for source in self.graph.iternodes():
self.weights[source] = dict(self.distance[source])
def run(self):
"""Executable pseudocode."""
for m in xrange(2, self.graph.v()): # |V|-2 times
self.distance = self.extended_shortest_paths(self.distance)
if any(self.distance[node][node] < 0 for node in self.graph.iternodes()):
raise ValueError("negative cycle detected")
def extended_shortest_paths(self, old_distance):
"""O(V^3) time."""
new_distance = dict()
for source in self.graph.iternodes():
new_distance[source] = dict(old_distance[source]) # IMPORTANT, copy
for target in self.graph.iternodes():
for node in self.graph.iternodes():
alt = old_distance[source][node] + self.weights[node][target]
if new_distance[source][target] > alt:
new_distance[source][target] = alt
self.parent[source][target] = node
return new_distance
def path(self, source, target):
"""Path reconstruction."""
if source == target:
return [source]
elif self.parent[source][target] is None:
raise ValueError("no path to target")
else:
return self.path(source, self.parent[source][target]) + [target]
class FasterAllPairs:
"""All-pairs shortest paths algorithm in O(V^3 log V) time.
Attributes
----------
graph : input directed weighted graph
distance : dict-of-dict
Examples
--------
>>> from graphtheory.structures.edges import Edge
>>> from graphtheory.structures.graphs import Graph
>>> from graphtheory.shortestpaths.allpairs import FasterAllPairs
>>> G = Graph(n=10, True) # an exemplary directed graph
# Add nodes and edges here.
>>> algorithm = FasterAllPairs(G) # initialization
>>> algorithm.run() # calculations
>>> algorithm.distance[source][target] # distance from source to target
Notes
-----
Based on:
Cormen, T. H., Leiserson, C. E., Rivest, R. L., and Stein, C., 2009,
Introduction to Algorithms, third edition, The MIT Press,
Cambridge, London.
"""
def __init__(self, graph):
"""The algorithm initialization.
Parameters
----------
graph : directed weighted graph
"""
if not graph.is_directed():
raise ValueError("the graph is not directed")
self.graph = graph
self.distance = dict()
for source in self.graph.iternodes(): # O(V^2) time
self.distance[source] = dict()
for target in self.graph.iternodes():
self.distance[source][target] = float("inf") # IMPORTANT
self.distance[source][source] = 0
for edge in self.graph.iteredges(): # O(E) time
self.distance[edge.source][edge.target] = edge.weight
def run(self):
"""Executable pseudocode."""
m = 1
while m < (self.graph.v() - 1): # log(V) times
self.distance = self.extended_shortest_paths(self.distance)
m = 2 * m
if any(self.distance[node][node] < 0 for node in self.graph.iternodes()):
raise ValueError("negative cycle detected")
def extended_shortest_paths(self, old_distance):
"""O(V^3) time."""
new_distance = dict()
for source in self.graph.iternodes():
new_distance[source] = dict()
for target in self.graph.iternodes():
new_distance[source][target] = float("inf")
for node in self.graph.iternodes():
new_distance[source][target] = min(new_distance[source][target],
old_distance[source][node] + old_distance[node][target])
return new_distance
# EOF
| 37.006515
| 84
| 0.585688
| 1,284
| 11,361
| 5.126947
| 0.104361
| 0.057421
| 0.050129
| 0.075953
| 0.882273
| 0.850828
| 0.842321
| 0.842321
| 0.811332
| 0.798116
| 0
| 0.006569
| 0.289851
| 11,361
| 306
| 85
| 37.127451
| 0.80937
| 0.352874
| 0
| 0.769784
| 0
| 0
| 0.034094
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.093525
| false
| 0
| 0
| 0
| 0.165468
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
6f54976c6b707dc73d721e83208db529f0656584
| 2,899
|
py
|
Python
|
core/fitness.py
|
binh234/genetic_sudoku
|
57985493d53af0e7e085de3a72c901b64bb56be8
|
[
"MIT"
] | null | null | null |
core/fitness.py
|
binh234/genetic_sudoku
|
57985493d53af0e7e085de3a72c901b64bb56be8
|
[
"MIT"
] | null | null | null |
core/fitness.py
|
binh234/genetic_sudoku
|
57985493d53af0e7e085de3a72c901b64bb56be8
|
[
"MIT"
] | null | null | null |
import numpy as np
from .helper import same_column_indexes, same_row_indexes, get_cells_from_indexes
from .settings import DIGIT_NUMBER, BLOCK_NUMBER, GOAL
class DifferentFitness():
def cal_fitness(self, candidate, tracker):
""" The fitness of a candidate solution is determined by
total sum of number of different numberals in each row and column
Parameters:
- candidate (Candidate): The candidate to evaluate
- tracker (array): Helper array that determines all possible values for each cell in the chromosome
"""
row_fitness = 0
col_fitness = 0
candidate.fitness_matrix = np.zeros((2, BLOCK_NUMBER), dtype=int)
# calculate rows duplicates
for a, b in same_column_indexes(0, 0):
row = set()
for x, y in same_row_indexes(a, b):
value = candidate.gene[x][y]
row.add(value)
row_fitness += len(row)
candidate.fitness_matrix[0][a // BLOCK_NUMBER] += len(row)
for a, b in same_row_indexes(0, 0):
col = set()
for x, y in same_column_indexes(a, b):
value = candidate.gene[x][y]
col.add(value)
col_fitness += len(col)
candidate.fitness_matrix[1][a] += len(col)
return row_fitness + col_fitness
class PerfectFitness:
def cal_fitness(self, candidate, tracker=None):
""" The fitness of a candidate solution is determined by
sum of number of different numberals in each row and column
minus total number of cell that contains invalid value
Parameters:
- candidate (Candidate): The candidate to evaluate
- tracker (array): Helper array that determines all possible values for each cell in the chromosome
"""
row_fitness = 0
col_fitness = 0
duplicates_count = 0
candidate.fitness_matrix = np.zeros((2, BLOCK_NUMBER), dtype=int)
# calculate rows duplicates
for a, b in same_column_indexes(0, 0):
row = set()
for x, y in same_row_indexes(a, b):
value = candidate.gene[x][y]
row.add(value)
if value not in tracker[x][y]:
duplicates_count += 1
row_fitness += len(row)
candidate.fitness_matrix[0][a // BLOCK_NUMBER] += len(row)
for a, b in same_row_indexes(0, 0):
col = set()
for x, y in same_column_indexes(a, b):
value = candidate.gene[x][y]
col.add(value)
if value not in tracker[x][y]:
duplicates_count += 1
col_fitness += len(col)
candidate.fitness_matrix[1][a] += len(col)
return row_fitness + col_fitness - duplicates_count
| 37.649351
| 111
| 0.57951
| 369
| 2,899
| 4.409214
| 0.214092
| 0.012293
| 0.081131
| 0.01721
| 0.846343
| 0.846343
| 0.805778
| 0.805778
| 0.805778
| 0.75169
| 0
| 0.010943
| 0.338048
| 2,899
| 77
| 112
| 37.649351
| 0.836894
| 0.235598
| 0
| 0.791667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041667
| false
| 0
| 0.0625
| 0
| 0.1875
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
6f5e28a68d05c9f4e7e69a979fcc4e413bc4f707
| 36
|
py
|
Python
|
src/lib/wave.py
|
DTenore/skulpt
|
098d20acfb088d6db85535132c324b7ac2f2d212
|
[
"MIT"
] | 2,671
|
2015-01-03T08:23:25.000Z
|
2022-03-31T06:15:48.000Z
|
src/lib/wave.py
|
wakeupmuyunhe/skulpt
|
a8fb11a80fb6d7c016bab5dfe3712517a350b347
|
[
"MIT"
] | 972
|
2015-01-05T08:11:00.000Z
|
2022-03-29T13:47:15.000Z
|
src/lib/wave.py
|
wakeupmuyunhe/skulpt
|
a8fb11a80fb6d7c016bab5dfe3712517a350b347
|
[
"MIT"
] | 845
|
2015-01-03T19:53:36.000Z
|
2022-03-29T18:34:22.000Z
|
import _sk_fail; _sk_fail._("wave")
| 18
| 35
| 0.75
| 6
| 36
| 3.666667
| 0.666667
| 0.545455
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.083333
| 36
| 1
| 36
| 36
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
6f756ee9ea0b4a0c9ed3b590d07a191b6b37b74c
| 1,762
|
py
|
Python
|
src/browserist/exception/window_handle.py
|
jakob-bagterp/browserist
|
76bd916dd217b7da3759fd6ec3374191002dc091
|
[
"Apache-2.0"
] | 2
|
2022-02-20T10:03:19.000Z
|
2022-03-22T11:17:10.000Z
|
src/browserist/exception/window_handle.py
|
jakob-bagterp/browserist
|
76bd916dd217b7da3759fd6ec3374191002dc091
|
[
"Apache-2.0"
] | null | null | null |
src/browserist/exception/window_handle.py
|
jakob-bagterp/browserist
|
76bd916dd217b7da3759fd6ec3374191002dc091
|
[
"Apache-2.0"
] | null | null | null |
class WindowHandleIdNotFoundError(Exception):
__slots__ = ["message"]
def __init__(self, id: str) -> None:
self.message = f"Window handle ID not found or doesn't exist: {id}"
super().__init__(self.message)
def __str__(self) -> str:
return self.message
class WindowHandleNameNotFoundError(Exception):
__slots__ = ["message"]
def __init__(self, name: str) -> None:
self.message = f"Window handle name not found or doesn't exist: {name}"
super().__init__(self.message)
def __str__(self) -> str:
return self.message
class WindowHandleIdNotUniqueError(Exception):
__slots__ = ["message"]
def __init__(self, id: str) -> None:
self.message = f"Window handle ID already exists: {id}"
super().__init__(self.message)
def __str__(self) -> str:
return self.message
class WindowHandleNameNotUniqueError(Exception):
__slots__ = ["message"]
def __init__(self, name: str) -> None:
self.message = f"Window handle name already exists: {name}"
super().__init__(self.message)
def __str__(self) -> str:
return self.message
class WindowHandleIdNotValidError(Exception):
__slots__ = ["message"]
def __init__(self, id: str) -> None:
self.message = f"Window handle ID has invalid format: {id}"
super().__init__(self.message)
def __str__(self) -> str:
return self.message
class WindowHandleNameNotValidError(Exception):
__slots__ = ["message"]
def __init__(self, name: str) -> None:
self.message = f"Window handle name is invalid. Try using another name than this: {name}"
super().__init__(self.message)
def __str__(self) -> str:
return self.message
| 27.107692
| 97
| 0.651532
| 201
| 1,762
| 5.233831
| 0.19403
| 0.188213
| 0.119772
| 0.136882
| 0.759506
| 0.759506
| 0.719582
| 0.719582
| 0.719582
| 0.719582
| 0
| 0
| 0.230988
| 1,762
| 64
| 98
| 27.53125
| 0.776384
| 0
| 0
| 0.714286
| 0
| 0
| 0.189557
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.285714
| false
| 0
| 0
| 0.142857
| 0.714286
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 9
|
48ebaa24c7a1bc6cd52f2b9cd0db72d00adfa0e0
| 1,017
|
py
|
Python
|
1401-1500/1425-Constrained Subsequence Sum/1425-Constrained Subsequence Sum.py
|
jiadaizhao/LeetCode
|
4ddea0a532fe7c5d053ffbd6870174ec99fc2d60
|
[
"MIT"
] | 49
|
2018-05-05T02:53:10.000Z
|
2022-03-30T12:08:09.000Z
|
1401-1500/1425-Constrained Subsequence Sum/1425-Constrained Subsequence Sum.py
|
jolly-fellow/LeetCode
|
ab20b3ec137ed05fad1edda1c30db04ab355486f
|
[
"MIT"
] | 11
|
2017-12-15T22:31:44.000Z
|
2020-10-02T12:42:49.000Z
|
1401-1500/1425-Constrained Subsequence Sum/1425-Constrained Subsequence Sum.py
|
jolly-fellow/LeetCode
|
ab20b3ec137ed05fad1edda1c30db04ab355486f
|
[
"MIT"
] | 28
|
2017-12-05T10:56:51.000Z
|
2022-01-26T18:18:27.000Z
|
import collections
class Solution:
def constrainedSubsetSum(self, nums: List[int], k: int) -> int:
dp = nums[:]
dq = collections.deque()
for i, num in enumerate(nums):
dp[i] += dq[0] if dq else 0
while dq and dp[i] >= dq[-1]:
dq.pop()
if dp[i] > 0:
dq.append(dp[i])
if i >= k and dq and dq[0] == dp[i - k]:
dq.popleft()
return max(dp)
# Put index indstead of value in deque
import collections
class Solution:
def constrainedSubsetSum(self, nums: List[int], k: int) -> int:
dp = nums[:]
dq = collections.deque()
for i, num in enumerate(nums):
dp[i] += dp[dq[0]] if dq else 0
while dq and dp[i] >= dp[dq[-1]]:
dq.pop()
if dp[i] > 0:
dq.append(i)
if dq and dq[0] <= i - k:
dq.popleft()
return max(dp)
| 29.911765
| 67
| 0.443461
| 134
| 1,017
| 3.365672
| 0.261194
| 0.053215
| 0.097561
| 0.133038
| 0.855876
| 0.855876
| 0.855876
| 0.758315
| 0.758315
| 0.758315
| 0
| 0.017241
| 0.429695
| 1,017
| 33
| 68
| 30.818182
| 0.760345
| 0.035398
| 0
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.071429
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
48f5d5c7c6c64bda1aea7f5c2c3f117c325b2c1c
| 30,379
|
py
|
Python
|
general/tests/tests_views.py
|
GuillaumeStaub/client-manager
|
81298dc43956499f05c0f4d55992fd7dfced49b0
|
[
"MIT"
] | null | null | null |
general/tests/tests_views.py
|
GuillaumeStaub/client-manager
|
81298dc43956499f05c0f4d55992fd7dfced49b0
|
[
"MIT"
] | null | null | null |
general/tests/tests_views.py
|
GuillaumeStaub/client-manager
|
81298dc43956499f05c0f4d55992fd7dfced49b0
|
[
"MIT"
] | null | null | null |
from django.test import TestCase
from django.urls import reverse, reverse_lazy
import json
from django.contrib.auth.models import User
from general.models import Client, Commande, InfosTechniques, Forfait, Saison, Evenement
class ClientsListViewTest(TestCase):
@classmethod
def setUpTestData(cls):
# Create two users
test_user1 = User.objects.create_user(username='testuser1', password='1X<ISRUkw+tuK')
test_user1.save()
# Create 62 Clients for pagination tests
number_of_clients = 62
for client_id in range(number_of_clients):
Client.objects.create(
prenom=f'Christian {client_id}',
nom=f'Surname {client_id}',
adresse=f' {client_id} rue de paris',
code_postal=f'33000',
commune='Bordeaux',
telephone=f'07875478{client_id}',
)
infos_techniques = InfosTechniques.objects.create(matricule_compteur='674', num_armoire='CH02')
saison = Saison.objects.create(nom='2020 - Octobre')
forfait = Forfait.objects.create(nom='Forfait 2', description='Puissance inférieure à 18kVA', prix_ht=14.17,
taxe=20.00,
prix_ttc=17.00, saison=saison)
client = Client.objects.create(nom='Rodriguez', prenom='Jean', adresse='13 rue de la paix', code_postal=75000,
commune='Paris', telephone='0600112233', email='jean.villard@yahooo.com', )
event = Evenement.objects.create(nom='Brocante des Quinquonces', ville='Bordeaux', type='Brocante')
Commande.objects.create(saison=saison, puissance=18, forfait=forfait, nb_jours=23, client=client,
infos_techniques=infos_techniques,
evenement=event, payee=True)
def test_redirect_if_not_logged_in(self):
response = self.client.get(reverse('home'))
self.assertRedirects(response, '/users/login/?next=%2F')
def test_logged_in_uses_correct_template(self):
login = self.client.login(username='testuser1', password='1X<ISRUkw+tuK')
response = self.client.get(reverse('home'))
# Check our user is logged in
self.assertEqual(str(response.context['user']), 'testuser1')
# Check that we got a response "success"
self.assertEqual(response.status_code, 200)
# Check we used correct template
self.assertTemplateUsed(response, 'general/home.html')
def test_view_url_exists_at_desired_location(self):
self.client.login(username='testuser1', password='1X<ISRUkw+tuK')
response = self.client.get('')
self.assertEqual(str(response.context['user']), 'testuser1')
assert response.status_code == 200
def test_view_url_accessible_by_name(self):
self.client.login(username='testuser1', password='1X<ISRUkw+tuK')
response = self.client.get(reverse('home'))
self.assertEqual(str(response.context['user']), 'testuser1')
assert response.status_code == 200
def test_view_uses_correct_template(self):
self.client.login(username='testuser1', password='1X<ISRUkw+tuK')
response = self.client.get(reverse('home'))
assert response.status_code == 200
self.assertEqual(str(response.context['user']), 'testuser1')
self.assertTemplateUsed(response, 'general/home.html')
def test_lists_all_clients(self):
self.client.login(username='testuser1', password='1X<ISRUkw+tuK')
# Get second page and confirm it has (exactly) remaining 12 items
response = self.client.get(reverse('home') + '?page=2')
self.assertEqual(response.status_code, 200)
self.assertEqual(str(response.context['user']), 'testuser1')
self.assertTrue(len(response.context['clients']) == 63)
def test_nb_clients_in_context(self):
self.client.login(username='testuser1', password='1X<ISRUkw+tuK')
response = self.client.get(reverse('home'))
self.assertEqual(response.status_code, 200)
self.assertEqual(str(response.context['user']), 'testuser1')
self.assertTrue('nb_clients' in response.context)
self.assertTrue(response.context['nb_clients'] == 63)
def test_total_commandes_in_context(self):
self.client.login(username='testuser1', password='1X<ISRUkw+tuK')
response = self.client.get(reverse('home'))
self.assertEqual(response.status_code, 200)
self.assertEqual(str(response.context['user']), 'testuser1')
self.assertTrue('total_commandes' in response.context)
assert float(response.context['total_commandes']) == 391.00
def test_field_names_in_context(self):
self.client.login(username='testuser1', password='1X<ISRUkw+tuK')
response = self.client.get(reverse('home'))
self.assertEqual(response.status_code, 200)
self.assertEqual(str(response.context['user']), 'testuser1')
self.assertTrue('field_names' in response.context)
assert response.context['field_names'] == ['Nom', 'Prenom', 'Téléphone', 'Commune']
class ClientsCreateViewTest(TestCase):
@classmethod
def setUpTestData(cls):
test_user1 = User.objects.create_user(username='testuser1', password='1X<ISRUkw+tuK')
test_user1.save()
Client.objects.create(
prenom='Christian', nom='Surname', adresse='rue de paris',
code_postal='33000', commune='Bordeaux', telephone='0787547810'
)
InfosTechniques.objects.create(matricule_compteur='674', num_armoire='CH02')
saison = Saison.objects.create(nom='2020 - Octobre')
Forfait.objects.create(nom='Forfait 2', description='Puissance inférieure à 18kVA', prix_ht=14.17,
taxe=20.00,
prix_ttc=17.00, saison=saison)
Evenement.objects.create(nom='Brocante des Quinquonces', ville='Bordeaux', type='Brocante')
def test_redirect_if_not_logged_in(self):
response = self.client.get(reverse('create_client'))
self.assertRedirects(response, '/users/login/?next=/create/client')
def test_view_url_exists_at_desired_location(self):
self.client.login(username='testuser1', password='1X<ISRUkw+tuK')
response = self.client.get('/create/client')
assert response.status_code == 200
def test_view_url_accessible_by_name(self):
self.client.login(username='testuser1', password='1X<ISRUkw+tuK')
response = self.client.get(reverse('create_client'))
assert response.status_code == 200
def test_view_uses_correct_template(self):
self.client.login(username='testuser1', password='1X<ISRUkw+tuK')
response = self.client.get(reverse('create_client'))
assert response.status_code == 200
self.assertTemplateUsed(response, 'general/create_client.html')
def test_create_client(self):
self.client.login(username='testuser1', password='1X<ISRUkw+tuK')
data = {}
response = self.client.get(reverse('create_client'))
assert response.status_code == 200
formsets = [formset for formset in response.context['inlines']]
for formset in formsets:
for field in formset.management_form:
data["-".join((formset.management_form.prefix, field.name))] = field.value()
for form in formset:
for field in form:
data["-".join((form.prefix, field.name))] = field.value() if field.value() is not None else ''
client = {'prenom': 'ChriTest', 'nom': 'Test', 'adresse': 'rue de paris',
'code_postal': '33000', 'commune': 'Bordeaux', 'telephone': '0787547810'}
for key, value in client.items():
data[key] = value
self.client.post(reverse('create_client'), data)
self.assertEqual(Client.objects.last().id, 72)
def test_create_client_and_command(self):
self.client.login(username='testuser1', password='1X<ISRUkw+tuK')
data = {}
response = self.client.get(reverse('create_client'))
assert response.status_code == 200
formsets = [formset for formset in response.context['inlines']]
for formset in formsets:
for field in formset.management_form:
data["-".join((formset.management_form.prefix, field.name))] = field.value()
client = {'prenom': 'ChriTest', 'nom': 'Test2', 'adresse': 'rue de paris',
'code_postal': '33000', 'commune': 'Bordeaux', 'telephone': '0787547810'}
commande = {'client-0-saison': Saison.objects.last().nom, 'client-0-evenement': Evenement.objects.last().id,
'client-0-puissance': 18,
'client-0-forfait': Forfait.objects.last().nom,
'client-0-nb_jours': 23, 'client-0-client': '',
'client-0-infos_techniques': InfosTechniques.objects.last().id,
'client-0-total_ht': 0.0, 'client-0-total_ttc': 0.0, 'client-0-payee': False,
'client-0-id': '',
'client-1-saison': '', 'client-1-evenement': '', 'client-1-puissance': 0,
'client-1-forfait': '',
'client-1-nb_jours': 23, 'client-1-client': '', 'client-1-infos_techniques': '',
'client-1-total_ht': 0.0, 'client-1-total_ttc': 0.0, 'client-1-payee': False,
'client-1-id': ''}
for key, value in client.items():
data[key] = value
for key, value in commande.items():
data[key] = value
self.client.post(reverse('create_client'), data)
self.assertEqual(Client.objects.last().id, 71)
def test_view_inlines_in_context(self):
self.client.login(username='testuser1', password='1X<ISRUkw+tuK')
response = self.client.get(reverse('create_client'))
assert response.status_code == 200
self.assertTrue('inlines' in response.context)
def test_view_two_formset_in_context(self):
self.client.login(username='testuser1', password='1X<ISRUkw+tuK')
response = self.client.get(reverse('create_client'))
assert response.status_code == 200
formsets = [formset for formset in response.context['inlines']]
self.assertTrue(len(formsets), 2)
def test_view_form_in_context(self):
self.client.login(username='testuser1', password='1X<ISRUkw+tuK')
response = self.client.get(reverse('create_client'))
assert response.status_code == 200
self.assertTrue('form' in response.context)
class ClientDeleteTest(TestCase):
@classmethod
def setUpTestData(cls):
test_user1 = User.objects.create_user(username='testuser1', password='1X<ISRUkw+tuK')
test_user1.save()
Client.objects.create(
prenom='TestDelete', nom='Jean', adresse='rue de paris',
code_postal='33000', commune='Bordeaux', telephone='0787547810'
)
def test_redirect_if_not_logged_in(self):
id_client = Client.objects.last().id
response = self.client.get('/delete/{}'.format(id_client))
self.assertRedirects(response, '/users/login/?next=/delete/{}'.format(id_client))
def test_view_url_exists_at_desired_location(self):
self.client.login(username='testuser1', password='1X<ISRUkw+tuK')
response = self.client.get(f'/delete/{Client.objects.get(prenom="TestDelete").id}', follow=True)
assert response.status_code == 200
def test_object_is_delete_with_post(self):
self.client.login(username='testuser1', password='1X<ISRUkw+tuK')
Client.objects.create(
prenom='TestDelete2', nom='Jean', adresse='rue de paris',
code_postal='33000', commune='Bordeaux', telephone='0787547810'
)
id_client = Client.objects.get(prenom="TestDelete2").id
self.client.post(f'/delete/{id_client}')
# verifies that a non-existent object returns a 404 error.
null_response = self.client.get(f'/delete/{id_client}')
self.assertEqual(null_response.status_code, 404)
"""
def test_object_is_delete_with_get(self):
self.client.login(username='testuser1', password='1X<ISRUkw+tuK')
Client.objects.create(
prenom='TestDelete3', nom='Jean', adresse='rue de paris',
code_postal='33000', commune='Bordeaux', telephone='0787547810'
)
id_client = Client.objects.get(prenom="TestDelete3").id
self.client.get(f'/delete/{id_client}')
# verifies that a non-existent object returns a 404 error.
null_response = self.client.get(f'/delete/{id_client}')
self.assertEqual(null_response.status_code, 404)
def test_success_redirect_after_delete(self):
self.client.login(username='testuser1', password='1X<ISRUkw+tuK')
Client.objects.create(
prenom='TestDelete4', nom='Jean', adresse='rue de paris',
code_postal='33000', commune='Bordeaux', telephone='0787547810'
)
id_client = Client.objects.get(prenom="TestDelete4").id
success_url = reverse_lazy('home')
response = self.client.get(f'/delete/{id_client}', follow=True)
self.assertRedirects(response, success_url)
"""
class UpdateClientViewTest(TestCase):
@classmethod
def setUpTestData(cls):
test_user1 = User.objects.create_user(username='testuser1', password='1X<ISRUkw+tuK')
test_user1.save()
client = Client.objects.create(
prenom='Michel', nom='Antoine', adresse='rue de paris',
code_postal='33000', commune='Bordeaux', telephone='0787547810'
)
infos_techniques = InfosTechniques.objects.create(matricule_compteur='670', num_armoire='CH02')
saison = Saison.objects.create(nom='2020 - Octobre')
forfait = Forfait.objects.create(nom='Forfait 2', description='Puissance inférieure à 18kVA', prix_ht=14.17,
taxe=20.00,
prix_ttc=17.00, saison=saison)
event = Evenement.objects.create(nom='Brocante des Quinquonces', ville='Bordeaux', type='Brocante')
Commande.objects.create(saison=saison, puissance=20, forfait=forfait, nb_jours=23, client=client,
infos_techniques=infos_techniques,
evenement=event, payee=True)
def test_redirect_if_not_logged_in(self):
id_client = Client.objects.last().id
response = self.client.get('/update/{}'.format(id_client))
self.assertRedirects(response, '/users/login/?next=/update/{}'.format(id_client))
def test_view_url_exists_at_desired_location(self):
self.client.login(username='testuser1', password='1X<ISRUkw+tuK')
id_client_to_update = Client.objects.get(nom='Antoine').id
response = self.client.get(f'/update/{id_client_to_update}')
assert response.status_code == 200
def test_view_url_accessible_by_name(self):
self.client.login(username='testuser1', password='1X<ISRUkw+tuK')
id_client_to_update = Client.objects.get(nom='Antoine').id
response = self.client.get(reverse('update_client', kwargs={'pk': id_client_to_update}))
assert response.status_code == 200
def test_view_uses_correct_template(self):
self.client.login(username='testuser1', password='1X<ISRUkw+tuK')
id_client_to_update = Client.objects.get(nom='Antoine').id
response = self.client.get(reverse('update_client', kwargs={'pk': id_client_to_update}))
assert response.status_code == 200
self.assertTemplateUsed(response, 'general/update_client.html')
def test_update_client(self):
self.client.login(username='testuser1', password='1X<ISRUkw+tuK')
id_client_to_update = Client.objects.get(nom='Antoine').id
data = {}
response = self.client.get(reverse('update_client', kwargs={'pk': id_client_to_update}))
assert response.status_code == 200
formsets = [formset for formset in response.context['inlines']]
for formset in formsets:
for field in formset.management_form:
data["-".join((formset.management_form.prefix, field.name))] = field.value()
for form in formset:
for field in form:
data["-".join((form.prefix, field.name))] = field.value() if field.value() is not None else ''
client = {'prenom': 'Marc', 'nom': 'Antoine', 'adresse': 'rue de paris',
'code_postal': '33000', 'commune': 'Bordeaux', 'telephone': '0787547810'}
for key, value in client.items():
data[key] = value
print(data)
self.client.post(reverse('update_client', kwargs={'pk': id_client_to_update}), data, follow=True)
self.assertEqual(Client.objects.last().prenom, 'Marc')
def test_view_inlines_in_context(self):
self.client.login(username='testuser1', password='1X<ISRUkw+tuK')
id_client_to_update = Client.objects.get(nom='Antoine').id
response = self.client.get(reverse('update_client', kwargs={'pk': id_client_to_update}))
assert response.status_code == 200
self.assertTrue('inlines' in response.context)
def test_view_two_formset_in_context(self):
self.client.login(username='testuser1', password='1X<ISRUkw+tuK')
id_client_to_update = Client.objects.get(nom='Antoine').id
response = self.client.get(reverse('update_client', kwargs={'pk': id_client_to_update}))
assert response.status_code == 200
formsets = [formset for formset in response.context['inlines']]
self.assertTrue(len(formsets), 3)
def test_view_form_in_context(self):
self.client.login(username='testuser1', password='1X<ISRUkw+tuK')
id_client_to_update = Client.objects.get(nom='Antoine').id
response = self.client.get(reverse('update_client', kwargs={'pk': id_client_to_update}))
assert response.status_code == 200
self.assertTrue('form' in response.context)
"""
def test_update_client_and_command(self):
self.client.login(username='testuser1', password='1X<ISRUkw+tuK')
id_client_to_update = Client.objects.get(nom='Antoine').id
data = {}
response = self.client.get(reverse('update_client', kwargs={'pk': id_client_to_update}))
assert response.status_code == 200
formsets = [formset for formset in response.context['inlines']]
for formset in formsets:
for field in formset.management_form:
data["-".join((formset.management_form.prefix, field.name))] = field.value()
client = {'prenom': 'Paul', 'nom': 'Antoine', 'adresse': 'rue de paris',
'code_postal': '33000', 'commune': 'Bordeaux', 'telephone': '0787547810'}
commande = {'client-0-saison': Saison.objects.last().nom, 'client-0-evenement': Evenement.objects.last().id,
'client-0-puissance': 24,
'client-0-forfait': Forfait.objects.last().nom,
'client-0-nb_jours': 23, 'client-0-client': id_client_to_update,
'client-0-infos_techniques': InfosTechniques.objects.last().id,
'client-0-total_ht': 0.0, 'client-0-total_ttc': 0.0, 'client-0-payee': True,
'client-0-id': Commande.objects.last().id,
'client-1-saison': '', 'client-1-evenement': '', 'client-1-puissance': 0,
'client-1-forfait': '',
'client-1-nb_jours': 23, 'client-1-client': '', 'client-1-infos_techniques': '',
'client-1-total_ht': 0.0, 'client-1-total_ttc': 0.0, 'client-1-payee': False,
'client-1-id': '',
'client-2-saison': '', 'client-2-evenement': '', 'client-2-puissance': 0,
'client-2-forfait': '',
'client-2-nb_jours': 23, 'client-2-client': '', 'client-2-infos_techniques': '',
'client-2-total_ht': 0.0, 'client-2-total_ttc': 0.0, 'client-2-payee': False,
'client-2-id': ''
}
for key, value in client.items():
data[key] = value
for key, value in commande.items():
data[key] = value
self.client.post(reverse('update_client', kwargs={'pk': id_client_to_update}), data)
self.assertEqual(Commande.objects.last().puissance, 20)
self.assertEqual(Commande.objects.last().client.prenom, 'Paul')
"""
class AjaxForfaitTest(TestCase):
@classmethod
def setUpTestData(cls):
test_user1 = User.objects.create_user(username='testuser1', password='1X<ISRUkw+tuK')
test_user1.save()
saison = Saison.objects.create(nom='2020 - Octobre')
Forfait.objects.create(nom='Forfait 2', description='Puissance inférieure à 18kVA', prix_ht=14.17,
taxe=20.00, prix_ttc=17.00, saison=saison)
def test_redirect_if_not_logged_in(self):
response = self.client.get('/ajax_forfait/')
self.assertRedirects(response, '/users/login/?next=/ajax_forfait/')
def test_view_url_exists_at_desired_location(self):
self.client.login(username='testuser1', password='1X<ISRUkw+tuK')
response = self.client.get('/ajax_forfait/', {'forfait_name': ''})
assert response.status_code == 200
def test_view_url_accessible_by_name(self):
self.client.login(username='testuser1', password='1X<ISRUkw+tuK')
response = self.client.get(reverse('ajax_forfait'), {'forfait_name': ''})
assert response.status_code == 200
"""
def test_response_view_if_forfait_name(self):
self.client.login(username='testuser1', password='1X<ISRUkw+tuK')
correct_response = json.dumps({"forfait_price_ht": '14.17', "forfait_price_ttc": '17.00',
"forfait_taxe": '20.00'})
response = self.client.get(reverse('ajax_forfait'), {'forfait_name': 'Forfait 2'})
assert response.status_code == 200
self.assertEqual(json.loads(response.content), json.loads(correct_response))
def test_response_view_if_not_forfait_name(self):
self.client.login(username='testuser1', password='1X<ISRUkw+tuK')
correct_response = json.dumps({"forfait_price_ht": 0.0, "forfait_price_ttc": 0.0,
"forfait_taxe": 20.0})
response = self.client.get(reverse('ajax_forfait'), {'forfait_name': ''})
assert response.status_code == 200
self.assertEqual(json.loads(response.content), json.loads(correct_response))
"""
class CommandesListViewTest(TestCase):
@classmethod
def setUpTestData(cls):
# Create two users
test_user1 = User.objects.create_user(username='testuser1', password='1X<ISRUkw+tuK')
test_user1.save()
client = Client.objects.create(nom='Rodriguez', prenom='Jean', adresse='13 rue de la paix', code_postal=75000,
commune='Paris', telephone='0600112233', email='jean.villard@yahooo.com', )
infos_techniques = InfosTechniques.objects.create(matricule_compteur='674', num_armoire='CH02')
saison = Saison.objects.create(nom='2020 - Octobre')
forfait = Forfait.objects.create(nom='Forfait 2', description='Puissance inférieure à 18kVA', prix_ht=14.17,
taxe=20.00,
prix_ttc=17.00, saison=saison)
event = Evenement.objects.create(nom='Brocante des Quinquonces', ville='Bordeaux', type='Brocante')
# Create 62 Clients for pagination tests
number_of_commandes = 62
for commande_id in range(number_of_commandes):
Commande.objects.create(saison=saison, puissance=18, forfait=forfait, nb_jours=23, client=client,
infos_techniques=infos_techniques,
evenement=event, payee=True)
def test_redirect_if_not_logged_in(self):
response = self.client.get(reverse('commandes'))
self.assertRedirects(response, '/users/login/?next=/commandes/')
def test_view_url_exists_at_desired_location(self):
self.client.login(username='testuser1', password='1X<ISRUkw+tuK')
response = self.client.get('/commandes/')
self.assertEqual(str(response.context['user']), 'testuser1')
assert response.status_code == 200
def test_view_url_accessible_by_name(self):
self.client.login(username='testuser1', password='1X<ISRUkw+tuK')
response = self.client.get(reverse('commandes'))
self.assertEqual(str(response.context['user']), 'testuser1')
assert response.status_code == 200
def test_view_uses_correct_template(self):
self.client.login(username='testuser1', password='1X<ISRUkw+tuK')
response = self.client.get(reverse('commandes'))
assert response.status_code == 200
self.assertEqual(str(response.context['user']), 'testuser1')
self.assertTemplateUsed(response, 'general/commandes_list.html')
def test_lists_all_commandes(self):
self.client.login(username='testuser1', password='1X<ISRUkw+tuK')
# Get second page and confirm it has (exactly) remaining 12 items
response = self.client.get(reverse('commandes') + '?page=2')
self.assertEqual(response.status_code, 200)
self.assertEqual(str(response.context['user']), 'testuser1')
self.assertTrue(len(response.context['commandes']) == 62)
def test_nb_commandes_in_context(self):
self.client.login(username='testuser1', password='1X<ISRUkw+tuK')
response = self.client.get(reverse('commandes'))
self.assertEqual(response.status_code, 200)
self.assertEqual(str(response.context['user']), 'testuser1')
self.assertTrue('nb_commandes' in response.context)
self.assertTrue(response.context['nb_commandes'] == 62)
def test_total_commandes_in_context(self):
self.client.login(username='testuser1', password='1X<ISRUkw+tuK')
response = self.client.get(reverse('commandes'))
self.assertEqual(response.status_code, 200)
self.assertEqual(str(response.context['user']), 'testuser1')
self.assertTrue('total_commandes_payees' in response.context)
assert float(response.context['total_commandes_payees']) == 24242.00
def test_nb_commandes_non_payee_in_context(self):
self.client.login(username='testuser1', password='1X<ISRUkw+tuK')
response = self.client.get(reverse('commandes'))
self.assertEqual(response.status_code, 200)
self.assertEqual(str(response.context['user']), 'testuser1')
self.assertTrue('total_commandes_non_payees' in response.context)
assert response.context['total_commandes_non_payees'] == 0.0
def test_field_names_in_context(self):
self.client.login(username='testuser1', password='1X<ISRUkw+tuK')
response = self.client.get(reverse('commandes'))
self.assertEqual(response.status_code, 200)
self.assertEqual(str(response.context['user']), 'testuser1')
self.assertTrue('field_names' in response.context)
assert response.context['field_names'] == ['Evènement', 'Saison', 'Client', 'Forfait', 'Nb jours', 'Total',
'Payée', 'Moyen de paiement', 'Traitée par ACH', 'Date']
class CommandesDetailViewTest(TestCase):
@classmethod
def setUpTestData(cls):
# Create two users
test_user1 = User.objects.create_user(username='testuser1', password='1X<ISRUkw+tuK')
test_user1.save()
client = Client.objects.create(nom='Rodriguez', prenom='Jean', adresse='13 rue de la paix', code_postal=75000,
commune='Paris', telephone='0600112233', email='jean.villard@yahooo.com', )
infos_techniques = InfosTechniques.objects.create(matricule_compteur='674', num_armoire='CH02')
saison = Saison.objects.create(nom='2020 - Octobre')
forfait = Forfait.objects.create(nom='Forfait 2', description='Puissance inférieure à 18kVA', prix_ht=14.17,
taxe=20.00,
prix_ttc=17.00, saison=saison)
event = Evenement.objects.create(nom='Brocante des Quinquonces', ville='Bordeaux', type='Brocante')
Commande.objects.create(saison=saison, puissance=18, forfait=forfait, nb_jours=23, client=client,
infos_techniques=infos_techniques,
evenement=event, payee=True)
def test_redirect_if_not_logged_in(self):
id_commande = Commande.objects.last().id
response = self.client.get(reverse('comande_detail', kwargs={'pk': id_commande}))
self.assertRedirects(response, '/users/login/?next=/commande/{}'.format(id_commande))
def test_view_url_exists_at_desired_location(self):
id_commande = Commande.objects.last().id
self.client.login(username='testuser1', password='1X<ISRUkw+tuK')
response = self.client.get('/commande/{}'.format(id_commande))
self.assertEqual(str(response.context['user']), 'testuser1')
assert response.status_code == 200
def test_view_url_accessible_by_name(self):
id_commande = Commande.objects.last().id
self.client.login(username='testuser1', password='1X<ISRUkw+tuK')
response = self.client.get(reverse('comande_detail', kwargs={'pk': id_commande}))
self.assertEqual(str(response.context['user']), 'testuser1')
assert response.status_code == 200
def test_view_uses_correct_template(self):
id_commande = Commande.objects.last().id
self.client.login(username='testuser1', password='1X<ISRUkw+tuK')
response = self.client.get(reverse('comande_detail', kwargs={'pk': id_commande}))
assert response.status_code == 200
self.assertEqual(str(response.context['user']), 'testuser1')
self.assertTemplateUsed(response, 'general/commande_detail.html')
| 51.929915
| 118
| 0.643504
| 3,565
| 30,379
| 5.330435
| 0.070126
| 0.052097
| 0.034889
| 0.071041
| 0.906173
| 0.89128
| 0.875862
| 0.86239
| 0.847971
| 0.828132
| 0
| 0.033665
| 0.219724
| 30,379
| 584
| 119
| 52.018836
| 0.768014
| 0.013529
| 0
| 0.722628
| 0
| 0
| 0.172588
| 0.024108
| 0
| 0
| 0
| 0
| 0.231144
| 1
| 0.126521
| false
| 0.109489
| 0.012165
| 0
| 0.155718
| 0.002433
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
d2a173a4cf728c83aadc5a1a23497211c7fd2889
| 146,578
|
py
|
Python
|
nayan/test_views.py
|
patilnayan92/etonlinetest
|
42b57cb6f10e518be99faa47e3f9f57a1a54b413
|
[
"Python-2.0"
] | 2
|
2019-03-06T02:17:25.000Z
|
2019-10-03T17:43:26.000Z
|
nayan/test_views.py
|
patilnayan92/etonlinetest
|
42b57cb6f10e518be99faa47e3f9f57a1a54b413
|
[
"Python-2.0"
] | null | null | null |
nayan/test_views.py
|
patilnayan92/etonlinetest
|
42b57cb6f10e518be99faa47e3f9f57a1a54b413
|
[
"Python-2.0"
] | 4
|
2019-02-01T16:10:40.000Z
|
2020-08-30T02:44:39.000Z
|
from datetime import datetime
import pytz
import os
import json
try:
from StringIO import StringIO as string_io
except ImportError:
from io import BytesIO as string_io
import zipfile
import shutil
from textwrap import dedent
from django.contrib.auth.models import Group
from django.contrib.auth import authenticate
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.test import Client
from django.utils import timezone
from django.core import mail
from django.conf import settings
from django.core.files.uploadedfile import SimpleUploadedFile
from nayan.models import User, Profile, Question, Quiz, QuestionPaper,\
QuestionSet, AnswerPaper, Answer, Course, StandardTestCase,\
AssignmentUpload, FileUpload, McqTestCase, IntegerTestCase, StringTestCase,\
FloatTestCase, FIXTURES_DIR_PATH
from nayan.decorators import user_has_profile
class TestUserRegistration(TestCase):
def setUp(self):
self.client = Client()
def tearDown(self):
self.registered_user.delete()
def test_register_user_post(self):
response = self.client.post(reverse('nayan:register'),
data={'username': 'register_user',
'email':'register_user@mail.com', 'password': 'reg_user',
'confirm_password': 'reg_user', 'first_name': 'user1_f_name',
'last_name': 'user1_l_name', 'roll_number': '1',
'institute': 'demo_institute', 'department': 'demo_dept',
'position': 'student', 'timezone': pytz.utc.zone
}
)
self.registered_user = User.objects.get(username='register_user')
self.assertEqual(self.registered_user.email, 'register_user@mail.com')
self.assertEqual(self.registered_user.first_name, 'user1_f_name')
self.assertEqual(self.registered_user.last_name, 'user1_l_name')
self.assertEqual(self.registered_user.profile.roll_number, '1')
self.assertEqual(self.registered_user.profile.institute, 'demo_institute')
self.assertEqual(self.registered_user.profile.department, 'demo_dept')
self.assertEqual(self.registered_user.profile.position, 'student')
self.assertEqual(self.registered_user.profile.timezone, 'UTC')
class TestProfile(TestCase):
def setUp(self):
self.client = Client()
# Create User without profile
self.user1_plaintext_pass = 'demo1'
self.user1 = User.objects.create_user(
username='demo_user1',
password=self.user1_plaintext_pass,
email='demo1@test.com'
)
# Create User with profile
self.user2_plaintext_pass = 'demo2'
self.user2 = User.objects.create_user(
username='demo_user2',
password=self.user2_plaintext_pass,
first_name='first_name',
last_name='last_name',
email='demo2@test.com'
)
Profile.objects.create(
user=self.user2,
roll_number=10,
institute='IIT',
department='Chemical',
position='Student',
timezone='UTC'
)
def tearDown(self):
self.client.logout()
self.user1.delete()
self.user2.delete()
def test_user_has_profile_for_user_without_profile(self):
"""
If no profile exists for user passed as argument return False
"""
has_profile_status = user_has_profile(self.user1)
self.assertFalse(has_profile_status)
def test_user_has_profile_for_user_with_profile(self):
"""
If profile exists for user passed as argument return True
"""
has_profile_status = user_has_profile(self.user2)
self.assertTrue(has_profile_status)
def test_view_profile_denies_anonymous(self):
"""
If not logged in redirect to login page
"""
response = self.client.get(reverse('nayan:view_profile'), follow=True)
redirect_destination = '/exam/login/?next=/exam/viewprofile/'
self.assertRedirects(response, redirect_destination)
def test_view_profile_get_for_user_without_profile(self):
"""
If no profile exists a blank profile form will be displayed
"""
self.client.login(
username=self.user1.username,
password=self.user1_plaintext_pass
)
response = self.client.get(reverse('nayan:view_profile'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'nayan/editprofile.html')
def test_view_profile_get_for_user_with_profile(self):
"""
If profile exists a viewprofile.html template will be rendered
"""
self.client.login(
username=self.user2.username,
password=self.user2_plaintext_pass
)
response = self.client.get(reverse('nayan:view_profile'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'nayan/view_profile.html')
def test_email_verification_for_user_post(self):
"""
POST request to verify email
"""
self.client.login(
username=self.user2.username,
password=self.user2_plaintext_pass
)
post_response = self.client.post(reverse('nayan:new_activation'),
data={'email':self.user2.email}
)
subject = mail.outbox[0].subject.replace(" ", "_")
activation_key = mail.outbox[0].body.split("\n")[2].split("/")[-1]
get_response = self.client.get(reverse('nayan:activate',
kwargs={'key': activation_key}),
follow=True
)
updated_profile_user = User.objects.get(id=self.user2.id)
updated_profile = Profile.objects.get(user=updated_profile_user)
self.assertEqual(post_response.status_code, 200)
self.assertEqual(subject, "nayan_Email_Verification")
self.assertEqual(get_response.status_code, 200)
self.assertEqual(updated_profile.is_email_verified, True)
self.assertTemplateUsed(get_response, 'nayan/activation_status.html')
def test_edit_profile_post(self):
"""
POST request to edit_profile view should update the user's profile
"""
self.client.login(
username=self.user2.username,
password=self.user2_plaintext_pass
)
response = self.client.post(reverse('nayan:edit_profile'),
data={
'user': self.user2,
'first_name': 'new_first_name',
'last_name': 'new_last_name',
'roll_number': 20,
'institute': 'new_institute',
'department': 'Aerospace',
'position': 'new_position',
'timezone': 'UTC'
}
)
updated_profile_user = User.objects.get(id=self.user2.id)
updated_profile = Profile.objects.get(user=updated_profile_user)
self.assertEqual(updated_profile_user.first_name, 'new_first_name')
self.assertEqual(updated_profile_user.last_name, 'new_last_name')
self.assertEqual(updated_profile.roll_number, '20')
self.assertEqual(updated_profile.institute, 'new_institute')
self.assertEqual(updated_profile.department, 'Aerospace')
self.assertEqual(updated_profile.position, 'new_position')
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'nayan/profile_updated.html')
def test_edit_profile_post_for_user_without_profile(self):
"""
POST request to edit_profile view should update the user's profile
"""
self.client.login(
username=self.user1.username,
password=self.user1_plaintext_pass
)
response = self.client.post(reverse('nayan:edit_profile'),
data={
'user': self.user1,
'first_name': 'new_first_name',
'last_name': 'new_last_name',
'roll_number': 21,
'institute': 'new_institute',
'department': 'Aerospace',
'position': 'new_position',
'timezone': 'UTC'
}
)
updated_profile_user = User.objects.get(id=self.user1.id)
updated_profile = Profile.objects.get(user=updated_profile_user)
self.assertEqual(updated_profile_user.first_name, 'new_first_name')
self.assertEqual(updated_profile_user.last_name, 'new_last_name')
self.assertEqual(updated_profile.roll_number, '21')
self.assertEqual(updated_profile.institute, 'new_institute')
self.assertEqual(updated_profile.department, 'Aerospace')
self.assertEqual(updated_profile.position, 'new_position')
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'nayan/profile_updated.html')
def test_edit_profile_get(self):
"""
GET request to edit profile should display profile form
"""
self.client.login(
username=self.user2.username,
password=self.user2_plaintext_pass
)
response = self.client.get(reverse('nayan:edit_profile'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'nayan/editprofile.html')
def test_edit_profile_get_for_user_without_profile(self):
"""
If no profile exists a blank profile form will be displayed
"""
self.client.login(
username=self.user1.username,
password=self.user1_plaintext_pass
)
response = self.client.get(reverse('nayan:edit_profile'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'nayan/editprofile.html')
def test_edit_profile_get_for_user_with_profile(self):
"""
If profile exists a editprofile.html template will be rendered
"""
self.client.login(
username=self.user2.username,
password=self.user2_plaintext_pass
)
response = self.client.get(reverse('nayan:edit_profile'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'nayan/editprofile.html')
def test_update_email_for_user_post(self):
""" POST request to update email if multiple users with same email are
found
"""
self.client.login(
username=self.user2.username,
password=self.user2_plaintext_pass
)
response = self.client.post(reverse('nayan:update_email'),
data={
'username': self.user2.username,
'email':"demo_user2@mail.com"
}
)
updated_user = User.objects.get(id=self.user2.id)
self.assertEqual(updated_user.email, "demo_user2@mail.com")
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'nayan/activation_status.html')
class TestStudentDashboard(TestCase):
def setUp(self):
self.client = Client()
# student
self.student_plaintext_pass = 'student'
self.student = User.objects.create_user(
username='student',
password=self.student_plaintext_pass,
first_name='first_name',
last_name='last_name',
email='student@test.com'
)
Profile.objects.create(
user=self.student,
roll_number=10,
institute='IIT',
department='Chemical',
position='student',
timezone='UTC'
)
# student without profile
self.student_no_profile_plaintext_pass = 'student2'
self.student_no_profile = User.objects.create_user(
username='student_no_profile',
password=self.student_no_profile_plaintext_pass,
first_name='first_name',
last_name='last_name',
email='student_no_profile@test.com'
)
# moderator
self.user_plaintext_pass = 'demo'
self.user = User.objects.create_user(
username='demo_user',
password=self.user_plaintext_pass,
first_name='first_name',
last_name='last_name',
email='demo@test.com'
)
Profile.objects.create(
user=self.user,
roll_number=10,
institute='IIT',
department='Chemical',
position='Moderator',
timezone='UTC'
)
self.course = Course.objects.create(name="Python Course",
enrollment="Enroll Request", creator=self.user)
self.hidden_course = Course.objects.create(name="Hidden Course",
enrollment="Enroll Request", creator=self.user, code="hide",
hidden=True)
def tearDown(self):
self.client.logout()
self.user.delete()
self.course.delete()
def test_student_dashboard_denies_anonymous_user(self):
"""
Check student dashboard denies anonymous user
"""
response = self.client.get(reverse('nayan:quizlist_user'),
follow=True
)
self.assertEqual(response.status_code, 200)
redirection_url = '/exam/login/?next=/exam/quizzes/'
self.assertRedirects(response, redirection_url)
def test_student_dashboard_get_for_user_without_profile(self):
"""
If no profile exists a blank profile form will be displayed
"""
self.client.login(
username=self.student_no_profile.username,
password=self.student_no_profile_plaintext_pass
)
response = self.client.get(reverse('nayan:quizlist_user'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'nayan/editprofile.html')
def test_student_dashboard_get_for_user_with_profile(self):
"""
If profile exists a editprofile.html template will be rendered
"""
self.client.login(
username=self.student.username,
password=self.student_plaintext_pass
)
response = self.client.get(reverse('nayan:quizlist_user'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'nayan/quizzes_user.html')
def test_student_dashboard_all_courses_get(self):
"""
Check student dashboard for all non hidden courses
"""
self.client.login(
username=self.student.username,
password=self.student_plaintext_pass
)
response = self.client.get(reverse('nayan:quizlist_user'),
follow=True
)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "nayan/quizzes_user.html")
self.assertEqual(response.context['title'], 'All Courses')
self.assertEqual(response.context['courses'][0], self.course)
def test_student_dashboard_enrolled_courses_get(self):
"""
Check student dashboard for all courses in which student is
enrolled
"""
self.client.login(
username=self.student.username,
password=self.student_plaintext_pass
)
self.course.students.add(self.student)
response = self.client.get(reverse('nayan:quizlist_user',
kwargs={'enrolled': "enrolled"}),
follow=True
)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "nayan/quizzes_user.html")
self.assertEqual(response.context['title'], 'Enrolled Courses')
self.assertEqual(response.context['courses'][0], self.course)
def test_student_dashboard_hidden_courses_post(self):
"""
Get courses for student based on the course code
"""
self.client.login(
username=self.student.username,
password=self.student_plaintext_pass
)
response = self.client.post(reverse('nayan:quizlist_user'),
data={'course_code': 'hide'}
)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "nayan/quizzes_user.html")
self.assertEqual(response.context['title'], 'Search')
self.assertEqual(response.context['courses'][0], self.hidden_course)
class TestMonitor(TestCase):
def setUp(self):
self.client = Client()
self.mod_group = Group.objects.create(name='moderator')
tzone = pytz.timezone('UTC')
# Create Moderator with profile
self.user_plaintext_pass = 'demo'
self.user = User.objects.create_user(
username='demo_user',
password=self.user_plaintext_pass,
first_name='first_name',
last_name='last_name',
email='demo@test.com'
)
Profile.objects.create(
user=self.user,
roll_number=10,
institute='IIT',
department='Chemical',
position='Moderator',
timezone='UTC'
)
# Create Student
self.student_plaintext_pass = 'demo_student'
self.student = User.objects.create_user(
username='demo_student',
password=self.student_plaintext_pass,
first_name='student_first_name',
last_name='student_last_name',
email='demo_student@test.com'
)
Profile.objects.create(
user=self.student,
roll_number=10,
institute='IIT',
department='Chemical',
position='Moderator',
timezone='UTC'
)
# Add to moderator group
self.mod_group.user_set.add(self.user)
self.course = Course.objects.create(name="Python Course",
enrollment="Open Enrollment", creator=self.user)
self.quiz = Quiz.objects.create(
start_date_time=datetime(2014, 10, 9, 10, 8, 15, 0, tzone),
end_date_time=datetime(2015, 10, 9, 10, 8, 15, 0, tzone),
duration=30, active=True, instructions="Demo Instructions",
attempts_allowed=-1, time_between_attempts=0,
description='demo quiz', pass_criteria=40,
language='Python', course=self.course
)
self.question = Question.objects.create(
summary="Test_question", description="Add two numbers",
points=1.0, language="python", type="code", user=self.user
)
self.question_paper = QuestionPaper.objects.create(quiz=self.quiz,
total_marks=1.0, fixed_question_order=str(self.question)
)
self.question_paper.fixed_questions.add(self.question)
user_answer = "def add(a, b)\n\treturn a+b"
self.new_answer = Answer(question=self.question, answer=user_answer,
correct=True, error=json.dumps([]))
self.new_answer.save()
self.answerpaper = AnswerPaper.objects.create(
user=self.student, question_paper=self.question_paper,
attempt_number=1,
start_time=datetime(2014, 10, 9, 10, 8, 15, 0, tzone),
end_time=datetime(2014, 10, 9, 10, 15, 15, 0, tzone),
user_ip="127.0.0.1", status="completed", passed=True,
percent=1, marks_obtained=1
)
self.answerpaper.answers.add(self.new_answer)
self.answerpaper.questions_answered.add(self.question)
def tearDown(self):
self.client.logout()
self.user.delete()
self.student.delete()
self.quiz.delete()
self.course.delete()
self.answerpaper.delete()
self.question.delete()
self.question_paper.delete()
self.new_answer.delete()
def test_monitor_denies_student(self):
"""
Check Monitor denies student
"""
self.client.login(
username=self.student.username,
password=self.student_plaintext_pass
)
response = self.client.get(reverse('nayan:monitor'),
follow=True
)
self.assertEqual(response.status_code, 404)
def test_monitor_display_quizzes(self):
"""
Check all the available quizzes in monitor
"""
self.client.login(
username=self.user.username,
password=self.user_plaintext_pass
)
response = self.client.get(reverse('nayan:monitor'),
follow=True
)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "nayan/monitor.html")
self.assertEqual(response.context['course_details'][0], self.course)
self.assertEqual(response.context['msg'], "Monitor")
def test_monitor_display_quiz_results(self):
"""
Check all the quiz results in monitor
"""
self.client.login(
username=self.user.username,
password=self.user_plaintext_pass
)
response = self.client.get(reverse('nayan:monitor',
kwargs={'quiz_id': self.quiz.id}),
follow=True
)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "nayan/monitor.html")
self.assertEqual(response.context['msg'], "Quiz Results")
self.assertEqual(response.context['papers'][0], self.answerpaper)
self.assertEqual(response.context['latest_attempts'][0], self.answerpaper)
def test_get_quiz_user_data(self):
"""
Check for getting user data for a quiz
"""
self.client.login(
username=self.user.username,
password=self.user_plaintext_pass
)
response = self.client.get(reverse('nayan:user_data',
kwargs={'user_id':self.student.id,
'questionpaper_id': self.question_paper.id}),
follow=True
)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'nayan/user_data.html')
self.assertEqual(response.context['data']['papers'][0], self.answerpaper)
self.assertEqual(response.context['data']['profile'], self.student.profile)
self.assertEqual(response.context['data']['user'], self.student)
self.assertEqual(response.context['data']['questionpaperid'],
str(self.question_paper.id))
class TestGradeUser(TestCase):
def setUp(self):
self.client = Client()
self.mod_group = Group.objects.create(name='moderator')
tzone = pytz.timezone('UTC')
# Create Moderator with profile
self.user_plaintext_pass = 'demo'
self.user = User.objects.create_user(
username='demo_user',
password=self.user_plaintext_pass,
first_name='first_name',
last_name='last_name',
email='demo@test.com'
)
Profile.objects.create(
user=self.user,
roll_number=10,
institute='IIT',
department='Chemical',
position='Moderator',
timezone='UTC'
)
# Create Student
self.student_plaintext_pass = 'demo_student'
self.student = User.objects.create_user(
username='demo_student',
password=self.student_plaintext_pass,
first_name='student_first_name',
last_name='student_last_name',
email='demo_student@test.com'
)
Profile.objects.create(
user=self.student,
roll_number=10,
institute='IIT',
department='Chemical',
position='Moderator',
timezone='UTC'
)
# Add to moderator group
self.mod_group.user_set.add(self.user)
self.course = Course.objects.create(name="Python Course",
enrollment="Open Enrollment", creator=self.user)
self.quiz = Quiz.objects.create(
start_date_time=datetime(2014, 10, 9, 10, 8, 15, 0, tzone),
end_date_time=datetime(2015, 10, 9, 10, 8, 15, 0, tzone),
duration=30, active=True, instructions="Demo Instructions",
attempts_allowed=-1, time_between_attempts=0,
description='demo quiz', pass_criteria=40,
language='Python', course=self.course
)
self.question = Question.objects.create(
summary="Test_question", description="Add two numbers",
points=1.0, language="python", type="code", user=self.user
)
self.question_paper = QuestionPaper.objects.create(quiz=self.quiz,
total_marks=1.0, fixed_question_order=str(self.question.id)
)
self.question_paper.fixed_questions.add(self.question)
user_answer = "def add(a, b)\n\treturn a+b"
self.new_answer = Answer(question=self.question, answer=user_answer,
correct=True, error=json.dumps([]), marks=0.5)
self.new_answer.save()
self.answerpaper = AnswerPaper.objects.create(
user=self.student, question_paper=self.question_paper,
attempt_number=1,
start_time=datetime(2014, 10, 9, 10, 8, 15, 0, tzone),
end_time=datetime(2014, 10, 9, 10, 15, 15, 0, tzone),
user_ip="127.0.0.1", status="completed", passed=True,
marks_obtained=0.5
)
self.answerpaper.answers.add(self.new_answer)
self.answerpaper.questions_answered.add(self.question)
self.answerpaper.questions.add(self.question)
def tearDown(self):
self.client.logout()
self.user.delete()
self.student.delete()
self.quiz.delete()
self.course.delete()
self.answerpaper.delete()
self.question.delete()
self.question_paper.delete()
self.new_answer.delete()
def test_grade_user_denies_student(self):
"""
Check Grade User denies student
"""
self.client.login(
username=self.student.username,
password=self.student_plaintext_pass
)
response = self.client.get(reverse('nayan:grade_user'),
follow=True
)
self.assertEqual(response.status_code, 404)
def test_grade_user_display_quizzes(self):
"""
Check all the available quizzes in grade user
"""
self.client.login(
username=self.user.username,
password=self.user_plaintext_pass
)
response = self.client.get(reverse('nayan:grade_user'),
follow=True
)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "nayan/grade_user.html")
self.assertEqual(response.context['course_details'][0], self.course)
def test_grade_user_get_quiz_users(self):
"""
Check all the available users in quiz in grade user
"""
self.client.login(
username=self.user.username,
password=self.user_plaintext_pass
)
response = self.client.get(reverse('nayan:grade_user',
kwargs={"quiz_id": self.quiz.id}),
follow=True
)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "nayan/grade_user.html")
self.assertEqual(response.context['users'][0]['user__first_name'],
self.student.first_name)
self.assertEqual(response.context['quiz'], self.quiz)
self.assertFalse(response.context['has_quiz_assignments'])
def test_grade_user_get_quiz_user_data(self):
"""
Check student attempts and answers
"""
self.client.login(
username=self.user.username,
password=self.user_plaintext_pass
)
response = self.client.get(reverse('nayan:grade_user',
kwargs={"quiz_id": self.quiz.id,
"user_id": self.student.id}),
follow=True
)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "nayan/grade_user.html")
self.assertFalse(response.context['has_user_assignments'])
self.assertEqual(response.context['quiz_id'], str(self.quiz.id))
self.assertEqual(response.context['user_id'], str(self.student.id))
self.assertEqual(response.context['attempts'][0], self.answerpaper)
def test_grade_user_update_user_marks(self):
"""
Check update marks of student
"""
self.client.login(
username=self.user.username,
password=self.user_plaintext_pass
)
self.client.get(reverse('nayan:grade_user',
kwargs={"quiz_id": self.quiz.id,
"user_id": self.student.id}),
follow=True
)
question_marks = "q{0}_marks".format(self.question.id)
response = self.client.post(reverse('nayan:grade_user',
kwargs={"quiz_id": self.quiz.id,
"user_id": self.student.id,
"attempt_number": self.answerpaper.attempt_number}),
data={question_marks: 1.0}
)
updated_ans_paper = AnswerPaper.objects.get(user=self.student,
question_paper=self.question_paper,
attempt_number=self.answerpaper.attempt_number
)
updated_ans = Answer.objects.get(question=self.question)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "nayan/grade_user.html")
self.assertEqual(updated_ans.marks, 1.0)
self.assertEqual(updated_ans_paper.marks_obtained, 1.0)
class TestDownloadAssignment(TestCase):
def setUp(self):
self.client = Client()
self.mod_group = Group.objects.create(name='moderator')
tzone = pytz.timezone('UTC')
# Create Moderator with profile
self.user_plaintext_pass = 'demo'
self.user = User.objects.create_user(
username='demo_user',
password=self.user_plaintext_pass,
first_name='first_name',
last_name='last_name',
email='demo@test.com'
)
Profile.objects.create(
user=self.user,
roll_number=10,
institute='IIT',
department='Chemical',
position='Moderator',
timezone='UTC'
)
# Add to moderator group
self.mod_group.user_set.add(self.user)
# Create Student 1
self.student1_plaintext_pass = 'demo_student1'
self.student1 = User.objects.create_user(
username='demo_student1',
password=self.student1_plaintext_pass,
first_name='student1_first_name',
last_name='student1_last_name',
email='demo_student1@test.com'
)
# Create Student 2
self.student2_plaintext_pass = 'demo_student2'
self.student2 = User.objects.create_user(
username='demo_student2',
password=self.student2_plaintext_pass,
first_name='student2_first_name',
last_name='student2_last_name',
email='demo_student2@test.com'
)
self.course = Course.objects.create(name="Python Course",
enrollment="Enroll Request", creator=self.user)
self.quiz = Quiz.objects.create(
start_date_time=datetime(2014, 10, 9, 10, 8, 15, 0, tzone),
end_date_time=datetime(2015, 10, 9, 10, 8, 15, 0, tzone),
duration=30, active=True, instructions="Demo Instructions",
attempts_allowed=-1, time_between_attempts=0,
description='demo_quiz', pass_criteria=40,
language='Python', course=self.course
)
self.question = Question.objects.create(
summary="Test_question", description="Assignment Upload",
points=1.0, language="python", type="upload", user=self.user
)
self.question_paper = QuestionPaper.objects.create(quiz=self.quiz,
total_marks=1.0, fixed_question_order=str(self.question.id)
)
self.question_paper.fixed_questions.add(self.question)
# create assignment file
assignment_file1 = SimpleUploadedFile("file1.txt", b"Test")
assignment_file2 = SimpleUploadedFile("file2.txt", b"Test")
assignment_file3 = SimpleUploadedFile("file3.txt", b"Test")
self.assignment1 = AssignmentUpload.objects.create(user=self.student1,
assignmentQuestion=self.question,
assignmentFile=assignment_file1,
question_paper=self.question_paper
)
self.assignment2 = AssignmentUpload.objects.create(user=self.student2,
assignmentQuestion=self.question,
assignmentFile=assignment_file2,
question_paper=self.question_paper
)
def tearDown(self):
self.client.logout()
self.user.delete()
self.student1.delete()
self.student2.delete()
self.assignment1.delete()
self.assignment2.delete()
self.quiz.delete()
self.course.delete()
dir_name = self.quiz.description.replace(" ", "_")
file_path = os.sep.join((settings.MEDIA_ROOT, dir_name))
if os.path.exists(file_path):
shutil.rmtree(file_path)
def test_download_assignment_denies_student(self):
"""
Check download assignment denies student
"""
self.client.login(
username=self.student1.username,
password=self.student1_plaintext_pass
)
response = self.client.get(reverse('nayan:download_quiz_assignment',
kwargs={'quiz_id': self.quiz.id}),
follow=True
)
self.assertEqual(response.status_code, 404)
def test_download_assignment_per_quiz(self):
"""
Check for download assignments per quiz
"""
self.client.login(
username=self.user.username,
password=self.user_plaintext_pass
)
response = self.client.get(reverse('nayan:download_quiz_assignment',
kwargs={'quiz_id': self.quiz.id}),
follow=True
)
file_name = "{0}_Assignment_files.zip".format(self.quiz.description)
file_name = file_name.replace(" ", "_")
self.assertEqual(response.status_code, 200)
self.assertEqual(response.get('Content-Disposition'),
"attachment; filename={0}".format(file_name))
zip_file = string_io(response.content)
zipped_file = zipfile.ZipFile(zip_file, 'r')
self.assertIsNone(zipped_file.testzip())
self.assertIn('file1.txt', zipped_file.namelist()[0])
self.assertIn('file2.txt', zipped_file.namelist()[1])
zip_file.close()
zipped_file.close()
def test_download_assignment_per_user(self):
"""
Check for download assignments per quiz
"""
self.client.login(
username=self.user.username,
password=self.user_plaintext_pass
)
response = self.client.get(reverse('nayan:download_user_assignment',
kwargs={'quiz_id': self.quiz.id,
'question_id': self.question.id,
'user_id': self.student2.id
}),
follow=True
)
file_name = "{0}.zip".format(self.student2.get_full_name())
file_name = file_name.replace(" ", "_")
self.assertEqual(response.status_code, 200)
self.assertEqual(response.get('Content-Disposition'),
"attachment; filename={0}".format(file_name))
zip_file = string_io(response.content)
zipped_file = zipfile.ZipFile(zip_file, 'r')
self.assertIsNone(zipped_file.testzip())
self.assertIn('file2.txt', zipped_file.namelist()[0])
zip_file.close()
zipped_file.close()
class TestAddQuiz(TestCase):
def setUp(self):
self.client = Client()
self.mod_group = Group.objects.create(name='moderator')
tzone = pytz.timezone('UTC')
# Create Moderator with profile
self.user_plaintext_pass = 'demo'
self.user = User.objects.create_user(
username='demo_user',
password=self.user_plaintext_pass,
first_name='first_name',
last_name='last_name',
email='demo@test.com'
)
Profile.objects.create(
user=self.user,
roll_number=10,
institute='IIT',
department='Chemical',
position='Moderator',
timezone='UTC'
)
# Create Student
self.student_plaintext_pass = 'demo_student'
self.student = User.objects.create_user(
username='demo_student',
password=self.student_plaintext_pass,
first_name='student_first_name',
last_name='student_last_name',
email='demo_student@test.com'
)
# Add to moderator group
self.mod_group.user_set.add(self.user)
self.course = Course.objects.create(name="Python Course",
enrollment="Enroll Request", creator=self.user)
self.pre_req_quiz = Quiz.objects.create(
start_date_time=datetime(2014, 2, 1, 5, 8, 15, 0, tzone),
end_date_time=datetime(2015, 10, 9, 10, 8, 15, 0, tzone),
duration=30, active=True, instructions="Demo Instructions",
attempts_allowed=-1, time_between_attempts=0,
description='pre requisite quiz', pass_criteria=40,
language='Python', prerequisite=None,
course=self.course
)
self.quiz = Quiz.objects.create(
start_date_time=datetime(2014, 10, 9, 10, 8, 15, 0, tzone),
end_date_time=datetime(2015, 10, 9, 10, 8, 15, 0, tzone),
duration=30, active=True, instructions="Demo Instructions",
attempts_allowed=-1, time_between_attempts=0,
description='demo quiz', pass_criteria=40,
language='Python', prerequisite=self.pre_req_quiz,
course=self.course
)
def tearDown(self):
self.client.logout()
self.user.delete()
self.student.delete()
self.quiz.delete()
self.pre_req_quiz.delete()
self.course.delete()
def test_add_quiz_denies_anonymous(self):
"""
If not logged in redirect to login page
"""
response = self.client.get(reverse('nayan:add_quiz',
kwargs={'course_id': self.course.id}),
follow=True
)
redirect_destination = '/exam/login/?next=/exam/manage/addquiz/{0}/'.format(self.course.id)
self.assertRedirects(response, redirect_destination)
def test_add_quiz_denies_non_moderator(self):
"""
If not moderator in redirect to login page
"""
self.client.login(
username=self.student.username,
password=self.student_plaintext_pass
)
course_id = self.course.id
response = self.client.get(reverse('nayan:add_quiz',
kwargs={'course_id': self.course.id}),
follow=True
)
self.assertEqual(response.status_code, 404)
def test_add_quiz_get(self):
"""
GET request to add question should display add quiz form
"""
self.client.login(
username=self.user.username,
password=self.user_plaintext_pass
)
response = self.client.get(reverse('nayan:add_quiz',
kwargs={'course_id': self.course.id})
)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'nayan/add_quiz.html')
self.assertIsNotNone(response.context['form'])
def test_add_quiz_post_existing_quiz(self):
"""
POST request to add quiz should edit quiz if quiz exists
"""
self.client.login(
username=self.user.username,
password=self.user_plaintext_pass
)
tzone = pytz.timezone('UTC')
response = self.client.post(reverse('nayan:edit_quiz',
kwargs={'course_id':self.course.id, 'quiz_id': self.quiz.id}),
data={
'start_date_time': '2016-01-10 09:00:15',
'end_date_time': '2016-01-15 09:00:15',
'duration': 30,
'active': False,
'attempts_allowed': 5,
'time_between_attempts': 1,
'description': 'updated demo quiz',
'pass_criteria': 40,
'language': 'java',
'instructions': "Demo Instructions",
'prerequisite': self.pre_req_quiz.id,
'course': self.course.id
}
)
updated_quiz = Quiz.objects.get(id=self.quiz.id)
self.assertEqual(updated_quiz.start_date_time,
datetime(2016, 1, 10, 9, 0, 15, 0, tzone)
)
self.assertEqual(updated_quiz.end_date_time,
datetime(2016, 1, 15, 9, 0, 15, 0, tzone)
)
self.assertEqual(updated_quiz.duration, 30)
self.assertEqual(updated_quiz.active, False)
self.assertEqual(updated_quiz.attempts_allowed, 5)
self.assertEqual(updated_quiz.time_between_attempts, 1)
self.assertEqual(updated_quiz.description, 'updated demo quiz')
self.assertEqual(updated_quiz.pass_criteria, 40)
self.assertEqual(updated_quiz.language, 'java')
self.assertEqual(updated_quiz.prerequisite, self.pre_req_quiz)
self.assertEqual(updated_quiz.course, self.course)
self.assertEqual(response.status_code, 302)
self.assertRedirects(response, '/exam/manage/courses/')
def test_add_quiz_post_new_quiz(self):
"""
POST request to add quiz should add new quiz if no quiz exists
"""
self.client.login(
username=self.user.username,
password=self.user_plaintext_pass
)
tzone = pytz.timezone('UTC')
response = self.client.post(reverse('nayan:add_quiz',
kwargs={"course_id": self.course.id}),
data={
'start_date_time': '2016-01-10 09:00:15',
'end_date_time': '2016-01-15 09:00:15',
'duration': 50,
'active': True,
'attempts_allowed': -1,
'time_between_attempts': 2,
'description': 'new demo quiz',
'pass_criteria': 50,
'language': 'python',
'instructions': "Demo Instructions",
'prerequisite': self.pre_req_quiz.id,
'course': self.course.id
}
)
quiz_list = Quiz.objects.all().order_by('-id')
new_quiz = quiz_list[0]
self.assertEqual(new_quiz.start_date_time,
datetime(2016, 1, 10, 9, 0, 15, 0, tzone)
)
self.assertEqual(new_quiz.end_date_time,
datetime(2016, 1, 15, 9, 0, 15, 0, tzone)
)
self.assertEqual(new_quiz.duration, 50)
self.assertEqual(new_quiz.active, True)
self.assertEqual(new_quiz.attempts_allowed, -1)
self.assertEqual(new_quiz.time_between_attempts, 2)
self.assertEqual(new_quiz.description, 'new demo quiz')
self.assertEqual(new_quiz.pass_criteria, 50)
self.assertEqual(new_quiz.language, 'python')
self.assertEqual(new_quiz.prerequisite, self.pre_req_quiz)
self.assertEqual(new_quiz.course, self.course)
self.assertEqual(response.status_code, 302)
self.assertRedirects(response, '/exam/manage/courses/')
class TestAddTeacher(TestCase):
def setUp(self):
self.client = Client()
self.mod_group = Group.objects.create(name='moderator')
tzone = pytz.timezone('UTC')
# Create Moderator with profile
self.user_plaintext_pass = 'demo'
self.user = User.objects.create_user(
username='demo_user',
password=self.user_plaintext_pass,
first_name='first_name',
last_name='last_name',
email='demo@test.com'
)
Profile.objects.create(
user=self.user,
roll_number=10,
institute='IIT',
department='Chemical',
position='Moderator',
timezone='UTC'
)
# Create Student
self.student_plaintext_pass = 'demo_student'
self.student = User.objects.create_user(
username='demo_student',
password=self.student_plaintext_pass,
first_name='student_first_name',
last_name='student_last_name',
email='demo_student@test.com'
)
# Add to moderator group
self.mod_group.user_set.add(self.user)
self.course = Course.objects.create(name="Python Course",
enrollment="Enroll Request", creator=self.user)
self.pre_req_quiz = Quiz.objects.create(
start_date_time=datetime(2014, 2, 1, 5, 8, 15, 0, tzone),
end_date_time=datetime(2015, 10, 9, 10, 8, 15, 0, tzone),
duration=30, active=True,
attempts_allowed=-1, time_between_attempts=0,
description='pre requisite quiz', pass_criteria=40,
language='Python', prerequisite=None,
course=self.course
)
self.quiz = Quiz.objects.create(
start_date_time=datetime(2014, 10, 9, 10, 8, 15, 0, tzone),
end_date_time=datetime(2015, 10, 9, 10, 8, 15, 0, tzone),
duration=30, active=True,
attempts_allowed=-1, time_between_attempts=0,
description='demo quiz', pass_criteria=40,
language='Python', prerequisite=self.pre_req_quiz,
course=self.course
)
def tearDown(self):
self.client.logout()
self.user.delete()
self.student.delete()
self.quiz.delete()
self.pre_req_quiz.delete()
self.course.delete()
def test_add_teacher_denies_anonymous(self):
"""
If not logged in redirect to login page
"""
response = self.client.get(reverse('nayan:add_teacher',
kwargs={'course_id': self.course.id}
),
follow=True
)
redirect_destination = ('/exam/login/?next=/exam'
'/manage/addteacher/{0}/'.format(self.course.id))
self.assertRedirects(response, redirect_destination)
def test_add_teacher_denies_non_moderator(self):
"""
If not moderator redirect to login page
"""
self.client.login(
username=self.student.username,
password=self.student_plaintext_pass
)
response = self.client.get(reverse('nayan:add_teacher',
kwargs={'course_id': self.course.id}
),
follow=True
)
self.assertEqual(response.status_code, 404)
def test_add_teacher_get(self):
"""
GET request to add teacher should display list of teachers
"""
self.client.login(
username=self.user.username,
password=self.user_plaintext_pass
)
response = self.client.get(reverse('nayan:add_teacher',
kwargs={'course_id': self.course.id}
)
)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'nayan/addteacher.html')
self.assertEqual(response.context['course'], self.course)
def test_add_teacher_post(self):
"""
POST request to add teacher should add teachers to a course
"""
self.client.login(
username=self.user.username,
password=self.user_plaintext_pass
)
teacher_id_list = []
for i in range(5):
teacher = User.objects.create_user(
username='demo_teacher{}'.format(i),
password='demo_teacher_pass{}'.format(i),
first_name='teacher_first_name{}'.format(i),
last_name='teacher_last_name{}'.format(i),
email='demo{}@test.com'.format(i)
)
teacher_profile = Profile.objects.create(
user=teacher,
roll_number='T{}'.format(i),
institute='IIT',
department='Chemical',
position='Teacher',
timezone='UTC'
)
teacher_id_list.append(teacher.id)
response = self.client.post(reverse('nayan:add_teacher',
kwargs={'course_id': self.course.id}
),
data={'check': teacher_id_list}
)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'nayan/addteacher.html')
self.assertEqual(response.context['status'], True)
for t_id in teacher_id_list:
teacher_object = User.objects.get(id=t_id)
self.assertIn(teacher_object, response.context['teachers_added'])
self.assertIn(teacher_object, self.course.teachers.all())
class TestRemoveTeacher(TestCase):
def setUp(self):
self.client = Client()
self.mod_group = Group.objects.create(name='moderator')
tzone = pytz.timezone('UTC')
# Create Moderator with profile
self.user_plaintext_pass = 'demo'
self.user = User.objects.create_user(
username='demo_user',
password=self.user_plaintext_pass,
first_name='first_name',
last_name='last_name',
email='demo@test.com'
)
Profile.objects.create(
user=self.user,
roll_number=10,
institute='IIT',
department='Chemical',
position='Moderator',
timezone='UTC'
)
# Create Student
self.student_plaintext_pass = 'demo_student'
self.student = User.objects.create_user(
username='demo_student',
password=self.student_plaintext_pass,
first_name='student_first_name',
last_name='student_last_name',
email='demo_student@test.com'
)
# Add to moderator group
self.mod_group.user_set.add(self.user)
self.course = Course.objects.create(name="Python Course",
enrollment="Enroll Request", creator=self.user)
self.pre_req_quiz = Quiz.objects.create(
start_date_time=datetime(2014, 2, 1, 5, 8, 15, 0, tzone),
end_date_time=datetime(2015, 10, 9, 10, 8, 15, 0, tzone),
duration=30, active=True,
attempts_allowed=-1, time_between_attempts=0,
description='pre requisite quiz', pass_criteria=40,
language='Python', prerequisite=None,
course=self.course
)
self.quiz = Quiz.objects.create(
start_date_time=datetime(2014, 10, 9, 10, 8, 15, 0, tzone),
end_date_time=datetime(2015, 10, 9, 10, 8, 15, 0, tzone),
duration=30, active=True,
attempts_allowed=-1, time_between_attempts=0,
description='demo quiz', pass_criteria=40,
language='Python', prerequisite=self.pre_req_quiz,
course=self.course
)
def tearDown(self):
self.client.logout()
self.user.delete()
self.student.delete()
self.quiz.delete()
self.pre_req_quiz.delete()
self.course.delete()
def test_remove_teacher_denies_anonymous(self):
"""
If not logged in redirect to login page
"""
response = self.client.get(reverse('nayan:remove_teacher',
kwargs={'course_id': self.course.id}
),
follow=True
)
redirect_destination = ('/exam/login/?next=/exam'
'/manage/remove_teachers/{0}/'.format(self.course.id))
self.assertRedirects(response, redirect_destination)
def test_remove_teacher_denies_non_moderator(self):
"""
If not moderator redirect to login page
"""
self.client.login(
username=self.student.username,
password=self.student_plaintext_pass
)
response = self.client.get(reverse('nayan:remove_teacher',
kwargs={'course_id': self.course.id}
),
follow=True
)
self.assertEqual(response.status_code, 404)
def test_remove_teacher_post(self):
"""
POST request should remove moderator from course
"""
teacher_id_list = []
self.client.login(
username=self.user.username,
password=self.user_plaintext_pass
)
for i in range(5):
teacher = User.objects.create_user(
username='remove_teacher{}'.format(i),
password='remove_teacher_pass{}'.format(i),
first_name='remove_teacher_first_name{}'.format(i),
last_name='remove_teacher_last_name{}'.format(i),
email='remove_teacher{}@test.com'.format(i)
)
teacher_profile = Profile.objects.create(
user=teacher,
roll_number='RT{}'.format(i),
institute='IIT',
department='Aeronautical',
position='Teacher',
timezone='UTC'
)
teacher_id_list.append(teacher.id)
self.course.teachers.add(teacher)
response = self.client.post(reverse('nayan:remove_teacher',
kwargs={'course_id': self.course.id}
),
data={'remove': teacher_id_list}
)
self.assertEqual(response.status_code, 302)
redirect_destination = '/exam/manage/courses'
self.assertRedirects(response, redirect_destination,
status_code=302,
target_status_code=301
)
for t_id in teacher_id_list:
teacher = User.objects.get(id=t_id)
self.assertNotIn(teacher, self.course.teachers.all())
class TestCourses(TestCase):
def setUp(self):
self.client = Client()
self.mod_group = Group.objects.create(name='moderator')
# Create Moderator with profile
self.user1_plaintext_pass = 'demo1'
self.user1 = User.objects.create_user(
username='demo_user1',
password=self.user1_plaintext_pass,
first_name='user1_first_name',
last_name='user1_last_name',
email='demo@test.com'
)
Profile.objects.create(
user=self.user1,
roll_number=10,
institute='IIT',
department='Chemical',
position='Moderator',
timezone='UTC'
)
self.user2_plaintext_pass = 'demo2'
self.user2 = User.objects.create_user(
username='demo_user2',
password=self.user2_plaintext_pass,
first_name='user2_first_name',
last_name='user2_last_name',
email='demo2@test.com'
)
Profile.objects.create(
user=self.user2,
roll_number=10,
institute='IIT',
department='Aeronautical',
position='Moderator',
timezone='UTC'
)
# Create Student
self.student_plaintext_pass = 'demo_student'
self.student = User.objects.create_user(
username='demo_student',
password=self.student_plaintext_pass,
first_name='student_first_name',
last_name='student_last_name',
email='demo_student@test.com'
)
# Add to moderator group
self.mod_group.user_set.add(self.user1)
self.mod_group.user_set.add(self.user2)
self.user1_course = Course.objects.create(name="Python Course",
enrollment="Enroll Request", creator=self.user1)
self.user2_course = Course.objects.create(name="Java Course",
enrollment="Enroll Request", creator=self.user2)
def tearDown(self):
self.client.logout()
self.user1.delete()
self.user2.delete()
self.student.delete()
self.user1_course.delete()
self.user2_course.delete()
def test_courses_denies_anonymous(self):
"""
If not logged in redirect to login page
"""
response = self.client.get(reverse('nayan:courses'),
follow=True
)
redirect_destination = ('/exam/login/?next=/exam'
'/manage/courses/')
self.assertRedirects(response, redirect_destination)
def test_courses_denies_non_moderator(self):
"""
If not moderator redirect to login page
"""
self.client.login(
username=self.student.username,
password=self.student_plaintext_pass
)
response = self.client.get(reverse('nayan:courses'),
follow=True
)
self.assertEqual(response.status_code, 404)
def test_courses_get(self):
"""
GET request should return courses page
"""
self.client.login(
username=self.user1.username,
password=self.user1_plaintext_pass
)
response = self.client.get(reverse('nayan:courses'),
follow=True
)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'nayan/courses.html')
self.assertIn(self.user1_course, response.context['courses'])
self.assertNotIn(self.user2_course, response.context['courses'])
class TestAddCourse(TestCase):
def setUp(self):
self.client = Client()
self.mod_group = Group.objects.create(name='moderator')
tzone = pytz.timezone('UTC')
# Create Moderator with profile
self.user_plaintext_pass = 'demo'
self.user = User.objects.create_user(
username='demo_user',
password=self.user_plaintext_pass,
first_name='first_name',
last_name='last_name',
email='demo@test.com'
)
Profile.objects.create(
user=self.user,
roll_number=10,
institute='IIT',
department='Chemical',
position='Moderator',
timezone='UTC'
)
# Create Student
self.student_plaintext_pass = 'demo_student'
self.student = User.objects.create_user(
username='demo_student',
password=self.student_plaintext_pass,
first_name='student_first_name',
last_name='student_last_name',
email='demo_student@test.com'
)
# Add to moderator group
self.mod_group.user_set.add(self.user)
self.course = Course.objects.create(name="Python Course",
enrollment="Enroll Request", creator=self.user)
self.pre_req_quiz = Quiz.objects.create(
start_date_time=datetime(2014, 2, 1, 5, 8, 15, 0, tzone),
end_date_time=datetime(2015, 10, 9, 10, 8, 15, 0, tzone),
duration=30, active=True,
attempts_allowed=-1, time_between_attempts=0,
description='pre requisite quiz', pass_criteria=40,
language='Python', prerequisite=None,
course=self.course
)
self.quiz = Quiz.objects.create(
start_date_time=datetime(2014, 10, 9, 10, 8, 15, 0, tzone),
end_date_time=datetime(2015, 10, 9, 10, 8, 15, 0, tzone),
duration=30, active=True,
attempts_allowed=-1, time_between_attempts=0,
description='demo quiz', pass_criteria=40,
language='Python', prerequisite=self.pre_req_quiz,
course=self.course
)
def tearDown(self):
self.client.logout()
self.user.delete()
self.student.delete()
self.quiz.delete()
self.pre_req_quiz.delete()
self.course.delete()
def test_add_course_denies_anonymous(self):
"""
If not logged in redirect to login page
"""
response = self.client.get(reverse('nayan:add_course'),
follow=True
)
redirect_destination = ('/exam/login/?next=/'
'exam/manage/add_course/')
self.assertRedirects(response, redirect_destination)
def test_add_course_denies_non_moderator(self):
"""
If not moderator in redirect to login page
"""
self.client.login(
username=self.student.username,
password=self.student_plaintext_pass
)
course_id = self.course.id
response = self.client.get(reverse('nayan:add_course'),
follow=True
)
self.assertEqual(response.status_code, 404)
def test_add_course_get(self):
"""
GET request to add course should display add course form
"""
self.client.login(
username=self.user.username,
password=self.user_plaintext_pass
)
response = self.client.get(reverse('nayan:add_course'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'nayan/add_course.html')
self.assertIsNotNone(response.context['form'])
def test_add_course_post_new_course(self):
"""
POST request to add course should add new courses if no course exists
"""
self.client.login(
username=self.user.username,
password=self.user_plaintext_pass
)
response = self.client.post(reverse('nayan:add_course'),
data={'name': 'new_demo_course_1',
'active': True,
'enrollment': 'open',
'start_enroll_time': '2016-01-10 09:00:15',
'end_enroll_time': '2016-01-15 09:00:15',
}
)
new_course = Course.objects.latest('created_on')
self.assertEqual(new_course.name, 'new_demo_course_1')
self.assertEqual(new_course.enrollment, 'open')
self.assertEqual(new_course.active, True)
self.assertEqual(response.status_code, 302)
self.assertRedirects(response, '/exam/manage/')
class TestCourseDetail(TestCase):
def setUp(self):
self.client = Client()
self.mod_group = Group.objects.create(name='moderator')
# Create Moderator with profile
self.user1_plaintext_pass = 'demo1'
self.user1 = User.objects.create_user(
username='demo_user1',
password=self.user1_plaintext_pass,
first_name='user1_first_name',
last_name='user1_last_name',
email='demo@test.com'
)
Profile.objects.create(
user=self.user1,
roll_number=10,
institute='IIT',
department='Chemical',
position='Moderator',
timezone='UTC'
)
self.user2_plaintext_pass = 'demo2'
self.user2 = User.objects.create_user(
username='demo_user2',
password=self.user2_plaintext_pass,
first_name='user2_first_name',
last_name='user2_last_name',
email='demo2@test.com'
)
Profile.objects.create(
user=self.user2,
roll_number=10,
institute='IIT',
department='Aeronautical',
position='Moderator',
timezone='UTC'
)
# Create Student
self.student_plaintext_pass = 'demo_student'
self.student = User.objects.create_user(
username='demo_student',
password=self.student_plaintext_pass,
first_name='student_first_name',
last_name='student_last_name',
email='demo_student@test.com'
)
self.student1_plaintext_pass = 'demo_student1'
self.student1 = User.objects.create_user(
username='demo_student1',
password=self.student1_plaintext_pass,
first_name='student_first_name',
last_name='student_last_name',
email='demo_student1@test.com'
)
# Add to moderator group
self.mod_group.user_set.add(self.user1)
self.mod_group.user_set.add(self.user2)
self.user1_course = Course.objects.create(name="Python Course",
enrollment="Enroll Request", creator=self.user1)
def tearDown(self):
self.client.logout()
self.user1.delete()
self.user2.delete()
self.student.delete()
self.user1_course.delete()
def test_upload_users_with_correct_csv(self):
# Given
self.client.login(
username=self.user1.username,
password=self.user1_plaintext_pass
)
csv_file_path = os.path.join(FIXTURES_DIR_PATH, "users_correct.csv")
csv_file = open(csv_file_path, 'rb')
upload_file = SimpleUploadedFile(csv_file_path, csv_file.read())
# When
response = self.client.post(reverse('nayan:upload_users',
kwargs={'course_id': self.user1_course.id}),
data={'csv_file': upload_file})
csv_file.close()
# Then
uploaded_user = User.objects.filter(email="abc@xyz.com")
self.assertEqual(uploaded_user.count(), 1)
self.assertEqual(response.status_code, 200)
self.assertIn('upload_details', response.context)
self.assertTemplateUsed(response, 'nayan/course_detail.html')
def test_upload_users_add_update_reject(self):
# Given
self.client.login(
username=self.user1.username,
password=self.user1_plaintext_pass
)
csv_file_path = os.path.join(FIXTURES_DIR_PATH,
"users_add_update_reject.csv")
csv_file = open(csv_file_path, 'rb')
upload_file = SimpleUploadedFile(csv_file_path, csv_file.read())
# When
response = self.client.post(reverse('nayan:upload_users',
kwargs={'course_id': self.user1_course.id}),
data={'csv_file': upload_file})
csv_file.close()
# Then
uploaded_user = User.objects.filter(username="test")
user = uploaded_user[0]
self.assertEqual(uploaded_user.count(), 1)
self.assertEqual(user.first_name, "test2")
self.assertIn(user, self.user1_course.get_rejected())
self.assertEqual(response.status_code, 200)
self.assertIn('upload_details', response.context)
self.assertTemplateUsed(response, 'nayan/course_detail.html')
def test_upload_users_with_wrong_csv(self):
# Given
self.client.login(
username=self.user1.username,
password=self.user1_plaintext_pass
)
csv_file_path = os.path.join(FIXTURES_DIR_PATH, "demo_questions.zip")
csv_file = open(csv_file_path, 'rb')
upload_file = SimpleUploadedFile(csv_file_path, csv_file.read())
message = "The file uploaded is not a CSV file."
# When
response = self.client.post(reverse('nayan:upload_users',
kwargs={'course_id': self.user1_course.id}),
data={'csv_file': upload_file})
csv_file.close()
# Then
self.assertEqual(response.status_code, 200)
self.assertNotIn('upload_details', response.context)
self.assertIn('message', response.context)
self.assertEqual(response.context['message'], message)
self.assertTemplateUsed(response, 'nayan/course_detail.html')
def test_upload_users_csv_with_missing_headers(self):
# Given
self.client.login(
username=self.user1.username,
password=self.user1_plaintext_pass
)
csv_file_path = os.path.join(FIXTURES_DIR_PATH, "users_some_headers_missing.csv")
csv_file = open(csv_file_path, 'rb')
upload_file = SimpleUploadedFile(csv_file_path, csv_file.read())
message = "The CSV file does not contain the required headers"
# When
response = self.client.post(reverse('nayan:upload_users',
kwargs={'course_id': self.user1_course.id}),
data={'csv_file': upload_file})
csv_file.close()
# Then
self.assertEqual(response.status_code, 200)
self.assertNotIn('upload_details', response.context)
self.assertIn('message', response.context)
self.assertEqual(response.context['message'], message)
self.assertTemplateUsed(response, 'nayan/course_detail.html')
def test_upload_users_csv_with_no_values(self):
# Given
self.client.login(
username=self.user1.username,
password=self.user1_plaintext_pass
)
csv_file_path = os.path.join(FIXTURES_DIR_PATH, "users_with_no_values.csv")
csv_file = open(csv_file_path, 'rb')
upload_file = SimpleUploadedFile(csv_file_path, csv_file.read())
# When
response = self.client.post(reverse('nayan:upload_users',
kwargs={'course_id': self.user1_course.id}),
data={'csv_file': upload_file})
csv_file.close()
# Then
self.assertEqual(response.status_code, 200)
self.assertIn('upload_details', response.context)
self.assertNotIn('message', response.context)
self.assertIn("No rows in the CSV file", response.context['upload_details'])
self.assertTemplateUsed(response, 'nayan/course_detail.html')
def test_upload_users_csv_with_missing_values(self):
'''
This test takes csv with 3 row values.
1st row has a missing row.
2nd has a proper row.
3rd has a same row has 2nd
Only 2nd user will be added.
This test proves that:
- Row with missing values is ignored and continued with next row.
- Duplicate user is not created.
'''
# Given
self.client.login(
username=self.user1.username,
password=self.user1_plaintext_pass
)
csv_file_path = os.path.join(FIXTURES_DIR_PATH, "users_some_values_missing.csv")
csv_file = open(csv_file_path, 'rb')
upload_file = SimpleUploadedFile(csv_file_path, csv_file.read())
# When
response = self.client.post(reverse('nayan:upload_users',
kwargs={'course_id': self.user1_course.id}),
data={'csv_file': upload_file})
csv_file.close()
# Then
uploaded_user = User.objects.filter(email="dummy@xyz.com")
self.assertEqual(uploaded_user.count(), 1)
self.assertEqual(response.status_code, 200)
self.assertIn('upload_details', response.context)
self.assertNotIn('message', response.context)
self.assertTemplateUsed(response, 'nayan/course_detail.html')
def test_course_detail_denies_anonymous(self):
"""
If not logged in redirect to login page
"""
response = self.client.get(reverse('nayan:course_detail',
kwargs={'course_id': self.user1_course.id}
),
follow=True
)
redirect_destination = ('/exam/login/?next=/exam/'
'manage/course_detail/{0}/'.format(self.user1_course.id))
self.assertRedirects(response, redirect_destination)
def test_course_detail_denies_non_moderator(self):
"""
If not moderator redirect to 404
"""
self.client.login(
username=self.student.username,
password=self.student_plaintext_pass
)
response = self.client.get(reverse('nayan:course_detail',
kwargs={'course_id': self.user1_course.id}
),
follow=True
)
self.assertEqual(response.status_code, 404)
def test_course_detail_denies_unrelated_moderators(self):
"""
If not creator of course or related teacher redirect to 404
"""
self.client.login(
username=self.user2.username,
password=self.user2_plaintext_pass
)
response = self.client.get(reverse('nayan:course_detail',
kwargs={'course_id': self.user1_course.id}
),
follow=True
)
self.assertEqual(response.status_code, 404)
def test_course_detail_get(self):
"""
If not creator of course or related teacher redirect to 404
"""
self.client.login(
username=self.user1.username,
password=self.user1_plaintext_pass
)
response = self.client.get(reverse('nayan:course_detail',
kwargs={'course_id': self.user1_course.id}
),
follow=True
)
self.assertEqual(self.user1_course, response.context['course'])
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'nayan/course_detail.html')
def test_student_course_enroll_get(self):
"""
Enroll student in a course using get request
"""
self.client.login(
username=self.user1.username,
password=self.user1_plaintext_pass
)
response = self.client.get(reverse('nayan:enroll_user',
kwargs={'course_id': self.user1_course.id,
'user_id': self.student.id})
)
enrolled_student = self.user1_course.students.all()
self.assertEqual(response.status_code, 200)
self.assertSequenceEqual([self.student], enrolled_student)
def test_student_course_enroll_post(self):
"""
Enroll student in a course using post request
"""
self.client.login(
username=self.user1.username,
password=self.user1_plaintext_pass
)
response = self.client.post(reverse('nayan:enroll_users',
kwargs={'course_id': self.user1_course.id}),
data={'check': self.student1.id}
)
enrolled_student = self.user1_course.students.all()
self.assertEqual(response.status_code, 200)
self.assertSequenceEqual([self.student1], enrolled_student)
def test_student_course_reject_get(self):
"""
Reject student in a course using get request
"""
self.client.login(
username=self.user1.username,
password=self.user1_plaintext_pass
)
response = self.client.get(reverse('nayan:reject_user',
kwargs={'course_id': self.user1_course.id,
'user_id': self.student.id})
)
enrolled_student = self.user1_course.rejected.all()
self.assertEqual(response.status_code, 200)
self.assertSequenceEqual([self.student], enrolled_student)
def test_student_course_reject_post(self):
"""
Reject student in a course using post request
"""
self.client.login(
username=self.user1.username,
password=self.user1_plaintext_pass
)
response = self.client.post(reverse('nayan:reject_users',
kwargs={'course_id': self.user1_course.id}),
data={'check': self.student1.id}
)
enrolled_student = self.user1_course.rejected.all()
self.assertEqual(response.status_code, 200)
self.assertSequenceEqual([self.student1], enrolled_student)
def test_toggle_course_status_get(self):
self.client.login(
username=self.user1.username,
password=self.user1_plaintext_pass
)
response = self.client.post(reverse('nayan:toggle_course_status',
kwargs={'course_id': self.user1_course.id})
)
self.assertEqual(response.status_code, 200)
course = Course.objects.get(name="Python Course")
self.assertFalse(course.active)
self.assertEqual(self.user1_course, response.context['course'])
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'nayan/course_detail.html')
def test_send_mail_to_course_students(self):
""" Check if bulk mail is sent to multiple students enrolled in a course
"""
self.client.login(
username=self.user1.username,
password=self.user1_plaintext_pass
)
self.student2 = User.objects.create_user(
username='demo_student2',
password=self.student_plaintext_pass,
first_name='student_first_name',
last_name='student_last_name',
email='demo_student2@test.com'
)
self.student3 = User.objects.create_user(
username='demo_student3',
password=self.student_plaintext_pass,
first_name='student_first_name',
last_name='student_last_name',
email='demo_student3@test.com'
)
self.student4 = User.objects.create_user(
username='demo_student4',
password=self.student_plaintext_pass,
first_name='student_first_name',
last_name='student_last_name',
email='demo_student4@test.com'
)
user_ids = [self.student.id, self.student2.id, self.student3.id,
self.student4.id]
user_emails = [self.student.email, self.student2.email,
self.student3.email, self.student4.email]
self.user1_course.students.add(*user_ids)
attachment = SimpleUploadedFile("file.txt", b"Test")
email_data = {
'send_mail': 'send_mail', 'email_attach': [attachment],
'subject': 'test_bulk_mail', 'body': 'Test_Mail',
'check': user_ids
}
self.client.post(reverse(
'nayan:send_mail', kwargs={'course_id': self.user1_course.id}),
data=email_data
)
attachment_file = mail.outbox[0].attachments[0][0]
subject = mail.outbox[0].subject
body = mail.outbox[0].alternatives[0][0]
recipients = mail.outbox[0].recipients()
self.assertEqual(attachment_file, "file.txt")
self.assertEqual(subject, "test_bulk_mail")
self.assertEqual(body, "Test_Mail")
self.assertSequenceEqual(recipients, user_emails)
# Test for get request in send mail
get_response = self.client.get(reverse(
'nayan:send_mail', kwargs={'course_id': self.user1_course.id})
)
self.assertEqual(get_response.status_code, 200)
self.assertEqual(get_response.context['course'], self.user1_course)
self.assertEqual(get_response.context['state'], 'mail')
class TestEnrollRequest(TestCase):
def setUp(self):
self.client = Client()
self.mod_group = Group.objects.create(name='moderator')
# Create Moderator with profile
self.user1_plaintext_pass = 'demo1'
self.user1 = User.objects.create_user(
username='demo_user1',
password=self.user1_plaintext_pass,
first_name='user1_first_name',
last_name='user1_last_name',
email='demo@test.com'
)
Profile.objects.create(
user=self.user1,
roll_number=10,
institute='IIT',
department='Chemical',
position='Moderator',
timezone='UTC'
)
self.user2_plaintext_pass = 'demo2'
self.user2 = User.objects.create_user(
username='demo_user2',
password=self.user2_plaintext_pass,
first_name='user2_first_name',
last_name='user2_last_name',
email='demo2@test.com'
)
Profile.objects.create(
user=self.user2,
roll_number=10,
institute='IIT',
department='Aeronautical',
position='Moderator',
timezone='UTC'
)
# Create Student
self.student_plaintext_pass = 'demo_student'
self.student = User.objects.create_user(
username='demo_student',
password=self.student_plaintext_pass,
first_name='student_first_name',
last_name='student_last_name',
email='demo_student@test.com'
)
# Add to moderator group
self.mod_group.user_set.add(self.user1)
self.mod_group.user_set.add(self.user2)
self.course = Course.objects.create(name="Python Course",
enrollment="Enroll Request", creator=self.user1)
def tearDown(self):
self.client.logout()
self.user1.delete()
self.user2.delete()
self.student.delete()
self.course.delete()
def test_enroll_request_denies_anonymous(self):
"""
If not logged in redirect to login page
"""
response = self.client.get(reverse('nayan:enroll_request',
kwargs={'course_id': self.course.id}
),
follow=True
)
redirect_destination = ('/exam/login/?next=/exam'
'/enroll_request/{}/'.format(self.course.id))
self.assertRedirects(response, redirect_destination)
def test_enroll_request_get_for_student(self):
self.client.login(
username=self.student.username,
password=self.student_plaintext_pass
)
response = self.client.get(reverse('nayan:enroll_request',
kwargs={'course_id': self.course.id}
),
follow=True
)
self.assertRedirects(response, '/exam/quizzes/')
def test_enroll_request_get_for_moderator(self):
self.client.login(
username=self.user2.username,
password=self.user2_plaintext_pass
)
response = self.client.get(reverse('nayan:enroll_request',
kwargs={'course_id': self.course.id}
),
follow=True
)
self.assertRedirects(response, '/exam/manage/courses/')
class TestViewAnswerPaper(TestCase):
def setUp(self):
self.client = Client()
self.plaintext_pass = 'demo'
for i in range(1, 4):
User.objects.create_user(
username='demo_user{0}'.format(i),
password=self.plaintext_pass,
first_name='first_name',
last_name='last_name',
email='demo@test.com'
)
self.user1 = User.objects.get(username="demo_user1")
self.course = Course.objects.create(name="Python Course",
enrollment="Enroll Request",
creator=self.user1)
self.question = Question.objects.create(summary='Dummy', points=1,
type='code', user=self.user1)
self.quiz = Quiz.objects.create(time_between_attempts=0, course=self.course,
description='demo quiz', language='Python')
self.user3 = User.objects.get(username="demo_user3")
self.question_paper = QuestionPaper.objects.create(quiz=self.quiz,
total_marks=1.0)
self.question_paper.fixed_questions.add(self.question)
self.question_paper.save()
self.ans_paper = AnswerPaper.objects.create(user=self.user3,
attempt_number=1, question_paper=self.question_paper,
start_time=timezone.now(), user_ip='101.0.0.1',
end_time=timezone.now()+timezone.timedelta(minutes=20))
def tearDown(self):
User.objects.all().delete()
Course.objects.all().delete()
Question.objects.all().delete()
Quiz.objects.all().delete()
QuestionPaper.objects.all().delete()
AnswerPaper.objects.all().delete()
def test_anonymous_user(self):
# Given, user not logged in
redirect_destination = ('/exam/login/?next=/exam'
'/view_answerpaper/{0}/'.format(self.question_paper.id))
# When
response = self.client.get(reverse('nayan:view_answerpaper',
kwargs={'questionpaper_id': self.question_paper.id}
),
follow=True
)
# Then
self.assertRedirects(response, redirect_destination)
def test_cannot_view(self):
# Given, enrolled user tries to view when not permitted by moderator
user2 = User.objects.get(username="demo_user2")
self.course.students.add(user2)
self.course.save()
self.quiz.view_answerpaper = False
self.quiz.save()
self.client.login(
username=user2.username,
password=self.plaintext_pass
)
# When
response = self.client.get(reverse('nayan:view_answerpaper',
kwargs={'questionpaper_id': self.question_paper.id}
),
follow=True
)
# Then
self.assertRedirects(response, '/exam/quizzes/')
def test_can_view_answerpaper(self):
# Given, user enrolled and can view
user3 = User.objects.get(username="demo_user3")
self.course.students.add(user3)
self.course.save()
answerpaper = AnswerPaper.objects.get(pk=self.ans_paper.id)
self.quiz.view_answerpaper = True
self.quiz.save()
self.client.login(
username=user3.username,
password=self.plaintext_pass
)
# When
response = self.client.get(reverse('nayan:view_answerpaper',
kwargs={'questionpaper_id': self.question_paper.id}
),
follow=True
)
# Then
self.assertEqual(response.status_code, 200)
self.assertTrue('data' in response.context)
self.assertTrue('quiz' in response.context)
self.assertTemplateUsed(response, 'nayan/view_answerpaper.html')
# When, wrong question paper id
response = self.client.get(reverse('nayan:view_answerpaper',
kwargs={'questionpaper_id': 190}
),
follow=True
)
# Then
self.assertEqual(response.status_code, 404)
def test_view_when_not_enrolled(self):
# Given, user tries to view when not enrolled in the course
user2 = User.objects.get(username="demo_user2")
self.client.login(
username=user2.username,
password=self.plaintext_pass
)
self.course.students.remove(user2)
self.course.save()
self.quiz.view_answerpaper = True
self.quiz.save()
# When
response = self.client.get(reverse('nayan:view_answerpaper',
kwargs={'questionpaper_id': self.question_paper.id}
),
follow=True
)
# Then
self.assertRedirects(response, '/exam/quizzes/')
class TestSelfEnroll(TestCase):
def setUp(self):
self.client = Client()
self.mod_group = Group.objects.create(name='moderator')
# Create Moderator with profile
self.user1_plaintext_pass = 'demo1'
self.user1 = User.objects.create_user(
username='demo_user1',
password=self.user1_plaintext_pass,
first_name='user1_first_name',
last_name='user1_last_name',
email='demo@test.com'
)
Profile.objects.create(
user=self.user1,
roll_number=10,
institute='IIT',
department='Chemical',
position='Moderator',
timezone='UTC'
)
self.user2_plaintext_pass = 'demo2'
self.user2 = User.objects.create_user(
username='demo_user2',
password=self.user2_plaintext_pass,
first_name='user2_first_name',
last_name='user2_last_name',
email='demo2@test.com'
)
Profile.objects.create(
user=self.user2,
roll_number=10,
institute='IIT',
department='Aeronautical',
position='Moderator',
timezone='UTC'
)
# Create Student
self.student_plaintext_pass = 'demo_student'
self.student = User.objects.create_user(
username='demo_student',
password=self.student_plaintext_pass,
first_name='student_first_name',
last_name='student_last_name',
email='demo_student@test.com'
)
# Add to moderator group
self.mod_group.user_set.add(self.user1)
self.mod_group.user_set.add(self.user2)
self.course = Course.objects.create(name="Python Course",
enrollment="Enroll Request", creator=self.user1)
def tearDown(self):
self.client.logout()
self.user1.delete()
self.user2.delete()
self.student.delete()
self.course.delete()
def test_self_enroll_denies_anonymous(self):
response = self.client.get(reverse('nayan:self_enroll',
kwargs={'course_id': self.course.id}
),
follow=True
)
redirect_destination = ('/exam/login/?next=/exam'
'/self_enroll/{}/'.format(self.course.id))
self.assertRedirects(response, redirect_destination)
def test_enroll_request_get_for_student(self):
self.client.login(
username=self.student.username,
password=self.student_plaintext_pass
)
response = self.client.get(reverse('nayan:self_enroll',
kwargs={'course_id': self.course.id}
),
follow=True
)
self.assertRedirects(response, '/exam/quizzes/')
def test_enroll_request_get_for_moderator(self):
self.client.login(
username=self.user2.username,
password=self.user2_plaintext_pass
)
response = self.client.get(reverse('nayan:self_enroll',
kwargs={'course_id': self.course.id}
),
follow=True
)
self.assertRedirects(response, '/exam/manage/')
class TestGrader(TestCase):
def setUp(self):
self.client = Client()
self.mod_group = Group.objects.create(name='moderator')
# Create Moderator with profile
self.user1_plaintext_pass = 'demo1'
self.user1 = User.objects.create_user(
username='demo_user1',
password=self.user1_plaintext_pass,
first_name='user1_first_name',
last_name='user1_last_name',
email='demo@test.com'
)
Profile.objects.create(
user=self.user1,
roll_number=10,
institute='IIT',
department='Chemical',
position='Moderator',
timezone='UTC'
)
self.user2_plaintext_pass = 'demo2'
self.user2 = User.objects.create_user(
username='demo_user2',
password=self.user2_plaintext_pass,
first_name='user2_first_name',
last_name='user2_last_name',
email='demo2@test.com'
)
Profile.objects.create(
user=self.user2,
roll_number=10,
institute='IIT',
department='Aeronautical',
position='Moderator',
timezone='UTC'
)
# Create Student
self.student_plaintext_pass = 'demo_student'
self.student = User.objects.create_user(
username='demo_student',
password=self.student_plaintext_pass,
first_name='student_first_name',
last_name='student_last_name',
email='demo_student@test.com'
)
# Add to moderator group
self.mod_group.user_set.add(self.user1)
self.mod_group.user_set.add(self.user2)
self.course = Course.objects.create(name="Python Course",
enrollment="Enroll Request", creator=self.user1)
self.question = Question.objects.create(summary='Dummy', points=1,
type='code', user=self.user1)
self.quiz = Quiz.objects.create(time_between_attempts=0, course=self.course,
description='demo quiz', language='Python')
self.question_paper = QuestionPaper.objects.create(quiz=self.quiz,
total_marks=1.0)
self.question_paper.fixed_questions.add(self.question)
self.question_paper.save()
self.answerpaper = AnswerPaper.objects.create(user=self.user2,
attempt_number=1, question_paper=self.question_paper,
start_time=timezone.now(), user_ip='101.0.0.1',
end_time=timezone.now()+timezone.timedelta(minutes=20))
def tearDown(self):
User.objects.all().delete()
Course.objects.all().delete()
Question.objects.all().delete()
Quiz.objects.all().delete()
QuestionPaper.objects.all().delete()
AnswerPaper.objects.all().delete()
def test_grader_denies_anonymous(self):
# Given
redirect_destination = ('/exam/login/?next=/exam/manage/grader/')
# When
response = self.client.get(reverse('nayan:grader'), follow=True)
# Then
self.assertRedirects(response, redirect_destination)
def test_grader_denies_students(self):
# Given
self.client.login(
username=self.student.username,
password=self.student_plaintext_pass
)
# When
response = self.client.get(reverse('nayan:grader'), follow=True)
# Then
self.assertEqual(response.status_code, 404)
def test_regrade_denies_anonymous(self):
# Given
redirect_destination = dedent('''\
/exam/login/?next=/exam/manage/regrade/answerpaper/{}/{}/{}/'''.format(
self.course.id, self.question.id, self.answerpaper.id)
)
# When
response = self.client.get(reverse('nayan:regrade',
kwargs={'course_id': self.course.id,
'question_id': self.question.id,
'answerpaper_id': self.answerpaper.id}),
follow=True)
# Then
self.assertRedirects(response, redirect_destination)
def test_regrade_denies_students(self):
# Given
self.client.login(
username=self.student.username,
password=self.student_plaintext_pass
)
# When
response = self.client.get(reverse('nayan:regrade',
kwargs={'course_id': self.course.id,
'question_id': self.question.id,
'answerpaper_id': self.answerpaper.id}),
follow=True)
# Then
self.assertEqual(response.status_code, 404)
def test_grader_by_moderator(self):
# Given
self.client.login(
username=self.user1.username,
password=self.user1_plaintext_pass
)
# When
response = self.client.get(reverse('nayan:grader'),
follow=True)
# Then
self.assertEqual(response.status_code, 200)
self.assertTrue('courses' in response.context)
self.assertTemplateUsed(response, 'nayan/regrade.html')
def test_regrade_by_moderator(self):
# Given
self.client.login(
username=self.user1.username,
password=self.user1_plaintext_pass
)
# When
response = self.client.get(reverse('nayan:regrade',
kwargs={'course_id': self.course.id,
'question_id': self.question.id,
'answerpaper_id': self.answerpaper.id}),
follow=True)
# Then
self.assertEqual(response.status_code, 200)
self.assertTrue('courses' in response.context)
self.assertTrue('details' in response.context)
self.assertTemplateUsed(response, 'nayan/regrade.html')
def test_regrade_denies_moderator_not_in_course(self):
# Given
self.client.login(
username=self.user2.username,
password=self.user2_plaintext_pass
)
# When
response = self.client.get(reverse('nayan:regrade',
kwargs={'course_id': self.course.id,
'question_id': self.question.id,
'answerpaper_id': self.answerpaper.id}),
follow=True)
# Then
self.assertEqual(response.status_code, 404)
class TestPasswordReset(TestCase):
def setUp(self):
# Create User with profile
self.user1_plaintext_pass = 'demo1'
self.user1 = User.objects.create_user(
username='demo_user1',
password=self.user1_plaintext_pass,
first_name='first_name',
last_name='last_name',
email='demo1@test.com'
)
Profile.objects.create(
user=self.user1,
roll_number=10,
institute='IIT',
department='Chemical',
position='Student',
timezone='UTC'
)
def tearDown(self):
self.user1.delete()
def test_password_reset_post(self):
"""
POST request to password_reset view should return a valid response
"""
# When
response = self.client.post(reverse('password_reset'),
data={
'email': self.user1.email,
}
)
# Then
self.assertEqual(response.context['email'], self.user1.email)
self.assertEqual(response.status_code, 302)
self.assertRedirects(response, '/exam/reset/password_reset/mail_sent/')
def test_password_change_post(self):
"""
POST request to password_change view should change the user password
"""
# Given
self.client.login(
username=self.user1.username,
password=self.user1_plaintext_pass
)
# When
response = self.client.post(reverse('password_change'),
data={
'old_password': self.user1_plaintext_pass,
'new_password1': 'new_demo1_pass',
'new_password2': 'new_demo1_pass'
}
)
# Then
self.assertIsNotNone(authenticate(username='demo_user1', password='new_demo1_pass'))
self.assertEqual(response.status_code, 302)
self.assertRedirects(response, '/exam/reset/password_change/done/')
# Finally
self.client.logout()
class TestModeratorDashboard(TestCase):
def setUp(self):
self.client = Client()
tzone = pytz.timezone("utc")
self.mod_group = Group.objects.create(name='moderator')
# student
self.student_plaintext_pass = 'student'
self.student = User.objects.create_user(
username='student',
password=self.student_plaintext_pass,
first_name='first_name',
last_name='last_name',
email='student@test.com'
)
Profile.objects.create(
user=self.student,
roll_number=10,
institute='IIT',
department='Chemical',
position='student',
timezone='UTC'
)
# moderator
self.user_plaintext_pass = 'demo'
self.user = User.objects.create_user(
username='demo_user',
password=self.user_plaintext_pass,
first_name='user_first_name',
last_name='user_last_name',
email='demo@test.com'
)
Profile.objects.create(
user=self.user,
roll_number=10,
institute='IIT',
department='Chemical',
position='Moderator',
timezone='UTC'
)
self.mod_no_profile_plaintext_pass = 'demo2'
self.mod_no_profile = User.objects.create_user(
username='demo_user2',
password=self.mod_no_profile_plaintext_pass,
first_name='user_first_name22',
last_name='user_last_name',
email='demo2@test.com'
)
self.mod_group.user_set.add(self.user)
self.course = Course.objects.create(name="Python Course",
enrollment="Enroll Request", creator=self.user)
self.quiz = Quiz.objects.create(
start_date_time=datetime(2014, 10, 9, 10, 8, 15, 0, tzone),
end_date_time=datetime(2015, 10, 9, 10, 8, 15, 0, tzone),
duration=30, active=True, instructions="Demo Instructions",
attempts_allowed=-1, time_between_attempts=0,
description='demo quiz', pass_criteria=40,
language='Python', course=self.course
)
self.question = Question.objects.create(
summary="Test_question", description="Add two numbers",
points=1.0, language="python", type="code", user=self.user
)
self.question_paper = QuestionPaper.objects.create(quiz=self.quiz,
total_marks=1.0, fixed_question_order=str(self.question.id)
)
self.question_paper.fixed_questions.add(self.question)
# student answerpaper
user_answer = "def add(a, b)\n\treturn a+b"
self.new_answer = Answer(question=self.question, answer=user_answer,
correct=True, error=json.dumps([]), marks=0.5)
self.new_answer.save()
self.answerpaper = AnswerPaper.objects.create(
user=self.student, question_paper=self.question_paper,
attempt_number=1,
start_time=datetime(2014, 10, 9, 10, 8, 15, 0, tzone),
end_time=datetime(2014, 10, 9, 10, 15, 15, 0, tzone),
user_ip="127.0.0.1", status="completed", passed=True,
marks_obtained=0.5
)
self.answerpaper.answers.add(self.new_answer)
self.answerpaper.questions_answered.add(self.question)
self.answerpaper.questions.add(self.question)
# moderator trial answerpaper
self.trial_quiz = Quiz.objects.create(
start_date_time=datetime(2014, 10, 9, 10, 8, 15, 0, tzone),
end_date_time=datetime(2015, 10, 9, 10, 8, 15, 0, tzone),
duration=30, active=True, instructions="Demo Instructions",
attempts_allowed=-1, time_between_attempts=0,
description='trial quiz', pass_criteria=40,
language='Python', course=self.course, is_trial=True
)
self.trial_question_paper = QuestionPaper.objects.create(
quiz=self.trial_quiz,
total_marks=1.0, fixed_question_order=str(self.question.id)
)
self.trial_question_paper.fixed_questions.add(self.question)
self.new_answer1 = Answer(question=self.question, answer=user_answer,
correct=True, error=json.dumps([]), marks=0.5)
self.new_answer1.save()
self.trial_answerpaper = AnswerPaper.objects.create(
user=self.user, question_paper=self.trial_question_paper,
attempt_number=1,
start_time=datetime(2014, 10, 9, 10, 8, 15, 0, tzone),
end_time=datetime(2014, 10, 9, 10, 15, 15, 0, tzone),
user_ip="127.0.0.1", status="completed", passed=True,
marks_obtained=0.5
)
self.trial_answerpaper.answers.add(self.new_answer1)
self.trial_answerpaper.questions_answered.add(self.question)
self.trial_answerpaper.questions.add(self.question)
def tearDown(self):
self.client.logout()
self.user.delete()
self.quiz.delete()
self.question_paper.delete()
self.answerpaper.delete()
self.new_answer.delete()
def test_moderator_dashboard_denies_student(self):
"""
Check moderator dashboard denies student
"""
self.client.login(
username=self.student.username,
password=self.student_plaintext_pass
)
response = self.client.get(reverse('nayan:manage'),
follow=True
)
self.assertEqual(response.status_code, 200)
self.assertRedirects(response, '/exam/quizzes/')
def test_moderator_dashboard_get_for_user_without_profile(self):
"""
If no profile exists a blank profile form will be displayed
"""
self.client.login(
username=self.mod_no_profile.username,
password=self.mod_no_profile_plaintext_pass
)
response = self.client.get(reverse('nayan:quizlist_user'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'nayan/editprofile.html')
def test_moderator_dashboard_get_for_user_with_profile(self):
"""
If profile exists a editprofile.html template will be rendered
"""
self.client.login(
username=self.user.username,
password=self.user_plaintext_pass
)
response = self.client.get(reverse('nayan:quizlist_user'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'nayan/quizzes_user.html')
def test_moderator_dashboard_get_all_quizzes(self):
"""
Check moderator dashboard to get all the moderator created quizzes
"""
self.client.login(
username=self.user.username,
password=self.user_plaintext_pass
)
response = self.client.get(reverse('nayan:manage'),
follow=True
)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "nayan/moderator_dashboard.html")
self.assertEqual(response.context['trial_paper'][0], self.trial_answerpaper)
paper, answer_papers, users_passed, users_failed =\
response.context['users_per_paper'][0]
self.assertEqual(paper, self.question_paper)
self.assertEqual(answer_papers[0], self.answerpaper)
self.assertEqual(users_passed, 1)
self.assertEqual(users_failed, 0)
def test_moderator_dashboard_delete_trial_papers(self):
"""
Check moderator dashboard to delete trial papers
"""
self.client.login(
username=self.user.username,
password=self.user_plaintext_pass
)
self.course.is_trial=True
self.course.save()
response = self.client.post(reverse('nayan:manage'),
data={'delete_paper': [self.trial_answerpaper.id]}
)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "nayan/moderator_dashboard.html")
updated_answerpaper = AnswerPaper.objects.filter(user=self.user)
updated_quiz = Quiz.objects.filter(
description=self.trial_question_paper.quiz.description
)
updated_course = Course.objects.filter(
name=self.trial_question_paper.quiz.course.name)
self.assertSequenceEqual(updated_answerpaper, [])
self.assertSequenceEqual(updated_quiz, [])
self.assertSequenceEqual(updated_course, [])
class TestUserLogin(TestCase):
def setUp(self):
self.client = Client()
# Create Moderator with profile
self.user1_plaintext_pass = 'demo1'
self.user1 = User.objects.create_user(
username='demo_user1',
password=self.user1_plaintext_pass,
first_name='user1_first_name',
last_name='user1_last_name',
email='demo@test.com'
)
Profile.objects.create(
user=self.user1,
roll_number=10,
institute='IIT',
department='Chemical',
position='Moderator',
timezone='UTC'
)
def tearDown(self):
self.client.logout()
settings.IS_DEVELOPMENT = True
self.user1.delete()
def test_successful_user_login(self):
"""
Check if user is successfully logged in
"""
response = self.client.post(reverse('nayan:login'),
data={'username': self.user1.username,
'password': self.user1_plaintext_pass}
)
self.assertEqual(response.status_code, 302)
self.assertRedirects(response, '/exam/quizzes/')
def test_unsuccessful_user_login(self):
"""
Check for failed login attempt for incorrect username/password
"""
response = self.client.post(reverse('nayan:login'),
data={'username': self.user1.username,
'password': "demo"}
)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'nayan/login.html')
def test_email_verified_decorator_for_user_login(self):
"""
Check email verified decorator to check for user login
"""
settings.IS_DEVELOPMENT = False
response = self.client.post(reverse('nayan:login'),
data={'username': self.user1.username,
'password': self.user1_plaintext_pass}
)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "nayan/activation_status.html")
class TestDownloadcsv(TestCase):
def setUp(self):
self.client = Client()
tzone = pytz.timezone("utc")
self.mod_group = Group.objects.create(name='moderator')
# student
self.student_plaintext_pass = 'student'
self.student = User.objects.create_user(
username='student',
password=self.student_plaintext_pass,
first_name='first_name',
last_name='last_name',
email='student@test.com'
)
Profile.objects.create(
user=self.student,
roll_number=10,
institute='IIT',
department='Chemical',
position='student',
timezone='UTC'
)
# moderator
self.user_plaintext_pass = 'demo'
self.user = User.objects.create_user(
username='demo_user',
password=self.user_plaintext_pass,
first_name='user_first_name',
last_name='user_last_name',
email='demo@test.com'
)
Profile.objects.create(
user=self.user,
roll_number=10,
institute='IIT',
department='Chemical',
position='Moderator',
timezone='UTC'
)
self.mod_group.user_set.add(self.user)
self.course = Course.objects.create(name="Python Course",
enrollment="Enroll Request", creator=self.user)
self.course.students.add(self.student)
self.quiz = Quiz.objects.create(
start_date_time=datetime(2014, 10, 9, 10, 8, 15, 0, tzone),
end_date_time=datetime(2015, 10, 9, 10, 8, 15, 0, tzone),
duration=30, active=True, instructions="Demo Instructions",
attempts_allowed=-1, time_between_attempts=0,
description='demo quiz', pass_criteria=40,
language='Python', course=self.course
)
self.question = Question.objects.create(
summary="Test_question", description="Add two numbers",
points=1.0, language="python", type="code", user=self.user
)
self.question_paper = QuestionPaper.objects.create(quiz=self.quiz,
total_marks=1.0, fixed_question_order=str(self.question.id)
)
self.question_paper.fixed_questions.add(self.question)
# student answerpaper
user_answer = "def add(a, b)\n\treturn a+b"
self.new_answer = Answer(question=self.question, answer=user_answer,
correct=True, error=json.dumps([]), marks=0.5)
self.new_answer.save()
self.answerpaper = AnswerPaper.objects.create(
user=self.student, question_paper=self.question_paper,
attempt_number=1,
start_time=datetime(2014, 10, 9, 10, 8, 15, 0, tzone),
end_time=datetime(2014, 10, 9, 10, 15, 15, 0, tzone),
user_ip="127.0.0.1", status="completed", passed=True,
marks_obtained=0.5
)
self.answerpaper.answers.add(self.new_answer)
self.answerpaper.questions_answered.add(self.question)
self.answerpaper.questions.add(self.question)
def tearDown(self):
self.client.logout()
self.user.delete()
self.student.delete()
self.quiz.delete()
self.course.delete()
def test_download_csv_denies_student(self):
"""
Check download csv denies student
"""
self.client.login(
username=self.student.username,
password=self.student_plaintext_pass
)
response = self.client.get(reverse('nayan:download_quiz_csv',
kwargs={"course_id": self.course.id,
"quiz_id": self.quiz.id}),
follow=True
)
self.assertEqual(response.status_code, 404)
def test_download_course_csv_denies_student(self):
"""
Check download course csv denies student
"""
self.client.login(
username=self.student.username,
password=self.student_plaintext_pass
)
response = self.client.get(reverse('nayan:download_course_csv',
kwargs={"course_id": self.course.id}),
follow=True
)
self.assertEqual(response.status_code, 404)
def test_download_csv_denies_non_course_creator(self):
"""
Check download csv denies non course creator
"""
self.mod_group.user_set.add(self.student)
self.client.login(
username=self.student.username,
password=self.student_plaintext_pass
)
response = self.client.get(reverse('nayan:download_quiz_csv',
kwargs={"course_id": self.course.id,
"quiz_id": self.quiz.id}),
follow=True
)
self.assertEqual(response.status_code, 404)
def test_download_course_csv_denies_non_course_creator(self):
"""
Check download course csv denies non course creator
"""
self.mod_group.user_set.add(self.student)
self.client.login(
username=self.student.username,
password=self.student_plaintext_pass
)
response = self.client.get(reverse('nayan:download_course_csv',
kwargs={"course_id": self.course.id}),
follow=True
)
self.assertEqual(response.status_code, 404)
def test_download_course_csv(self):
"""
Check for csv result of a course
"""
self.client.login(
username=self.user.username,
password=self.user_plaintext_pass
)
response = self.client.get(reverse('nayan:download_course_csv',
kwargs={'course_id': self.course.id}),
follow=True
)
file_name = "{0}.csv".format(self.course.name.lower())
self.assertEqual(response.status_code, 200)
self.assertEqual(response.get('Content-Disposition'),
'attachment; filename="{0}"'.format(file_name))
def test_download_quiz_csv(self):
"""
Check for csv result of a quiz
"""
self.client.login(
username=self.user.username,
password=self.user_plaintext_pass
)
response = self.client.get(reverse('nayan:download_quiz_csv',
kwargs={"course_id": self.course.id,
"quiz_id": self.quiz.id}),
follow=True
)
file_name = "{0}-{1}-attempt{2}.csv".format(self.course.name.replace('.', ''),
self.quiz.description.replace('.', ''), 1)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.get('Content-Disposition'),
'attachment; filename="{0}"'.format(file_name))
class TestShowQuestions(TestCase):
def setUp(self):
self.client = Client()
tzone = pytz.timezone("utc")
self.mod_group = Group.objects.create(name='moderator')
# student
self.student_plaintext_pass = 'student'
self.student = User.objects.create_user(
username='student',
password=self.student_plaintext_pass,
first_name='first_name',
last_name='last_name',
email='student@test.com'
)
Profile.objects.create(
user=self.student,
roll_number=10,
institute='IIT',
department='Chemical',
position='student',
timezone='UTC'
)
# moderator
self.user_plaintext_pass = 'demo'
self.user = User.objects.create_user(
username='demo_user',
password=self.user_plaintext_pass,
first_name='user_first_name',
last_name='user_last_name',
email='demo@test.com'
)
Profile.objects.create(
user=self.user,
roll_number=10,
institute='IIT',
department='Chemical',
position='Moderator',
timezone='UTC'
)
self.mod_group.user_set.add(self.user)
self.question = Question.objects.create(
summary="Test_question1", description="Add two numbers",
points=2.0, language="python", type="code", user=self.user,
active=True
)
self.question1 = Question.objects.create(
summary="Test_question2", description="Add two numbers",
points=1.0, language="python", type="mcq", user=self.user,
active=True
)
def test_show_questions_denies_student(self):
"""
Check show questions denies student
"""
self.client.login(
username=self.student.username,
password=self.student_plaintext_pass
)
response = self.client.get(reverse('nayan:show_questions'),
follow=True
)
self.assertEqual(response.status_code, 404)
def test_show_all_questions(self):
"""
Check if all the user created questions are shown
"""
self.client.login(
username=self.user.username,
password=self.user_plaintext_pass
)
response = self.client.get(reverse('nayan:show_questions'),
follow=True
)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'nayan/showquestions.html')
self.assertEqual(response.context['questions'][0], self.question)
def test_download_questions(self):
"""
Check for downloading questions zip file
"""
self.client.login(
username=self.user.username,
password=self.user_plaintext_pass
)
response = self.client.post(reverse('nayan:show_questions'),
data={'question': [self.question.id],
'download': 'download'}
)
file_name = "{0}_questions.zip".format(self.user)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.get('Content-Disposition'),
"attachment; filename={0}".format(file_name))
zip_file = string_io(response.content)
zipped_file = zipfile.ZipFile(zip_file, 'r')
self.assertIsNone(zipped_file.testzip())
self.assertIn('questions_dump.yaml', zipped_file.namelist())
zip_file.close()
zipped_file.close()
response = self.client.post(reverse('nayan:show_questions'),
data={'question': [],
'download': 'download'}
)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'nayan/showquestions.html')
self.assertIn("download", response.context['msg'])
def test_upload_questions(self):
"""
Check for uploading questions zip file
"""
self.client.login(
username=self.user.username,
password=self.user_plaintext_pass
)
ques_file = os.path.join(FIXTURES_DIR_PATH, "demo_questions.zip")
f = open(ques_file, 'rb')
questions_file = SimpleUploadedFile(ques_file, f.read(),
content_type="application/zip")
response = self.client.post(reverse('nayan:show_questions'),
data={'file': questions_file,
'upload': 'upload'}
)
summaries = ['Roots of quadratic equation', 'Print Output',
'Adding decimals', 'For Loop over String',
'Hello World in File', 'Extract columns from files',
'Check Palindrome', 'Add 3 numbers', 'Reverse a string'
]
uploaded_ques = Question.objects.filter(active=True,
summary__in=summaries,
user=self.user).count()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'nayan/showquestions.html')
self.assertEqual(uploaded_ques, 9)
f.close()
dummy_file = SimpleUploadedFile("test.txt", b"test")
response = self.client.post(reverse('nayan:show_questions'),
data={'file': dummy_file,
'upload': 'upload'}
)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'nayan/showquestions.html')
self.assertIn("ZIP file", response.context['message'])
def test_attempt_questions(self):
"""
Check for testing questions
"""
self.client.login(
username=self.user.username,
password=self.user_plaintext_pass
)
response = self.client.post(reverse('nayan:show_questions'),
data={'question': [self.question.id],
'test': 'test'}
)
trial_que_paper = QuestionPaper.objects.get(
quiz__description="trial_questions"
)
redirection_url = "/exam/start/1/{}".format(trial_que_paper.id)
self.assertEqual(response.status_code, 302)
self.assertRedirects(response, redirection_url, target_status_code=301)
def test_ajax_questions_filter(self):
"""
Check for filter questions based type, marks and
language of a question
"""
self.client.login(
username=self.user.username,
password=self.user_plaintext_pass
)
response = self.client.post(reverse('nayan:questions_filter'),
data={'question_type': 'mcq',
'marks': '1.0', 'language': 'python'
}
)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'nayan/ajax_question_filter.html')
self.assertEqual(response.context['questions'][0], self.question1)
class TestShowStatistics(TestCase):
def setUp(self):
self.client = Client()
self.mod_group = Group.objects.create(name='moderator')
tzone = pytz.timezone('UTC')
# Create Moderator with profile
self.user_plaintext_pass = 'demo'
self.user = User.objects.create_user(
username='demo_user',
password=self.user_plaintext_pass,
first_name='first_name',
last_name='last_name',
email='demo@test.com'
)
Profile.objects.create(
user=self.user,
roll_number=10,
institute='IIT',
department='Chemical',
position='Moderator',
timezone='UTC'
)
# Create Student
self.student_plaintext_pass = 'demo_student'
self.student = User.objects.create_user(
username='demo_student',
password=self.student_plaintext_pass,
first_name='student_first_name',
last_name='student_last_name',
email='demo_student@test.com'
)
Profile.objects.create(
user=self.student,
roll_number=10,
institute='IIT',
department='Chemical',
position='Moderator',
timezone='UTC'
)
# Add to moderator group
self.mod_group.user_set.add(self.user)
self.course = Course.objects.create(name="Python Course",
enrollment="Open Enrollment", creator=self.user)
self.quiz = Quiz.objects.create(
start_date_time=datetime(2014, 10, 9, 10, 8, 15, 0, tzone),
end_date_time=datetime(2015, 10, 9, 10, 8, 15, 0, tzone),
duration=30, active=True, instructions="Demo Instructions",
attempts_allowed=-1, time_between_attempts=0,
description='demo quiz', pass_criteria=40,
language='Python', course=self.course
)
self.question = Question.objects.create(
summary="Test_question", description="Add two numbers",
points=1.0, language="python", type="code", user=self.user
)
self.question_paper = QuestionPaper.objects.create(quiz=self.quiz,
total_marks=1.0, fixed_question_order=str(self.question)
)
self.question_paper.fixed_questions.add(self.question)
user_answer = "def add(a, b)\n\treturn a+b"
self.new_answer = Answer(question=self.question, answer=user_answer,
correct=True, error=json.dumps([]))
self.new_answer.save()
self.answerpaper = AnswerPaper.objects.create(
user=self.student, question_paper=self.question_paper,
attempt_number=1,
start_time=datetime(2014, 10, 9, 10, 8, 15, 0, tzone),
end_time=datetime(2014, 10, 9, 10, 15, 15, 0, tzone),
user_ip="127.0.0.1", status="completed", passed=True,
percent=1, marks_obtained=1
)
self.answerpaper.answers.add(self.new_answer)
self.answerpaper.questions_answered.add(self.question)
self.answerpaper.questions.add(self.question)
def tearDown(self):
self.client.logout()
self.user.delete()
self.student.delete()
self.quiz.delete()
self.course.delete()
self.answerpaper.delete()
self.question.delete()
self.question_paper.delete()
self.new_answer.delete()
def test_show_statistics_denies_student(self):
"""
Check show statistics denies student
"""
self.client.login(
username=self.student.username,
password=self.student_plaintext_pass
)
response = self.client.get(reverse('nayan:show_statistics',
kwargs={"questionpaper_id": self.question_paper.id}),
follow=True
)
self.assertEqual(response.status_code, 404)
def test_show_statistics_for_student(self):
"""
Check for student statistics
"""
self.client.login(
username=self.user.username,
password=self.user_plaintext_pass
)
response = self.client.get(reverse('nayan:show_statistics',
kwargs={'questionpaper_id': self.question_paper.id}),
follow=True
)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'nayan/statistics_question.html')
self.assertEqual(response.context['quiz'], self.quiz)
self.assertEqual(response.context['attempts'][0],
self.answerpaper.attempt_number)
self.assertEqual(response.context['questionpaper_id'],
str(self.question_paper.id))
def test_show_statistics_for_student_per_attempt(self):
"""
Check for student statistics per attempt
"""
self.client.login(
username=self.user.username,
password=self.user_plaintext_pass
)
response = self.client.get(reverse('nayan:show_statistics',
kwargs={'questionpaper_id': self.question_paper.id,
'attempt_number': self.answerpaper.attempt_number}),
follow=True
)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'nayan/statistics_question.html')
self.assertSequenceEqual(response.context['question_stats'][self.question],
[1, 1])
self.assertEqual(response.context['attempts'][0], 1)
self.assertEqual(response.context['total'], 1)
class TestQuestionPaper(TestCase):
def setUp(self):
self.client = Client()
self.mod_group = Group.objects.create(name='moderator')
tzone = pytz.timezone('UTC')
# Create Moderator with profile
self.user_plaintext_pass = 'demo'
self.user = User.objects.create_user(
username='demo_user',
password=self.user_plaintext_pass,
first_name='first_name',
last_name='last_name',
email='demo@test.com'
)
Profile.objects.create(
user=self.user,
roll_number=10,
institute='IIT',
department='Chemical',
position='Moderator',
timezone='UTC'
)
# Add to moderator group
self.mod_group.user_set.add(self.user)
self.course = Course.objects.create(
name="Python Course",
enrollment="Open Enrollment", creator=self.user)
self.quiz = Quiz.objects.create(
start_date_time=datetime(2014, 10, 9, 10, 8, 15, 0, tzone),
end_date_time=datetime(2015, 10, 9, 10, 8, 15, 0, tzone),
duration=30, active=True, instructions="Demo Instructions",
attempts_allowed=-1, time_between_attempts=0,
description='demo quiz', pass_criteria=40,
language='Python', course=self.course
)
# Mcq Question
self.question_mcq = Question.objects.create(
summary="Test_mcq_question", description="Test MCQ",
points=1.0, language="python", type="mcq", user=self.user
)
self.mcq_based_testcase = McqTestCase(
options="a",
question=self.question_mcq,
correct=True,
type='mcqtestcase'
)
self.mcq_based_testcase.save()
ordered_questions = str(self.question_mcq.id)
# Mcc Question
self.question_mcc = Question.objects.create(
summary="Test_mcc_question", description="Test MCC",
points=1.0, language="python", type="mcq", user=self.user
)
self.mcc_based_testcase = McqTestCase(
options="a",
question=self.question_mcc,
correct=True,
type='mcqtestcase'
)
self.mcc_based_testcase.save()
ordered_questions = ordered_questions + str(self.question_mcc.id)
# Integer Question
self.question_int = Question.objects.create(
summary="Test_mcc_question", description="Test MCC",
points=1.0, language="python", type="integer", user=self.user
)
self.int_based_testcase = IntegerTestCase(
correct=1,
question=self.question_int,
type='integertestcase'
)
self.int_based_testcase.save()
ordered_questions = ordered_questions + str(self.question_int.id)
# String Question
self.question_str = Question.objects.create(
summary="Test_mcc_question", description="Test MCC",
points=1.0, language="python", type="string", user=self.user
)
self.str_based_testcase = StringTestCase(
correct="abc",
string_check="lower",
question=self.question_str,
type='stringtestcase'
)
self.str_based_testcase.save()
# Float Question
self.question_float = Question.objects.create(
summary="Test_mcc_question", description="Test MCC",
points=1.0, language="python", type="float", user=self.user
)
self.float_based_testcase = FloatTestCase(
correct=2.0,
error_margin=0,
question=self.question_float,
type='floattestcase'
)
self.float_based_testcase.save()
ordered_questions = ordered_questions + str(self.question_float.id)
questions_list = [self.question_mcq, self.question_mcc,
self.question_int, self.question_str,
self.question_float]
self.question_paper = QuestionPaper.objects.create(
quiz=self.quiz,
total_marks=5.0, fixed_question_order=ordered_questions
)
self.question_paper.fixed_questions.add(*questions_list)
self.answerpaper = AnswerPaper.objects.create(
user=self.user, question_paper=self.question_paper,
attempt_number=1,
start_time=datetime(2014, 10, 9, 10, 8, 15, 0, tzone),
end_time=datetime(2014, 10, 9, 10, 15, 15, 0, tzone),
user_ip="127.0.0.1", status="inprogress", passed=False,
percent=0, marks_obtained=0
)
self.answerpaper.questions.add(*questions_list)
def tearDown(self):
self.client.logout()
self.user.delete()
self.quiz.delete()
self.course.delete()
self.answerpaper.delete()
self.question_mcq.delete()
self.question_mcc.delete()
self.question_int.delete()
self.question_paper.delete()
def test_mcq_attempt_right_after_wrong(self):
""" Case:- Check if answerpaper and answer marks are updated after
attempting same mcq question with wrong answer and then right
answer
"""
self.client.login(
username=self.user.username,
password=self.user_plaintext_pass
)
# Given Wrong Answer
wrong_user_answer = "25"
# When
self.client.post(
reverse('nayan:check',
kwargs={"q_id": self.question_mcq.id, "attempt_num": 1,
"questionpaper_id": self.question_paper.id}),
data={"answer": wrong_user_answer}
)
# Then
wrong_answer_paper = AnswerPaper.objects.get(id=self.answerpaper.id)
self.assertEqual(wrong_answer_paper.marks_obtained, 0)
# Given Right Answer
right_user_answer = str(self.mcq_based_testcase.id)
# When
self.client.post(
reverse('nayan:check',
kwargs={"q_id": self.question_mcq.id, "attempt_num": 1,
"questionpaper_id": self.question_paper.id}),
data={"answer": right_user_answer}
)
# Then
updated_answerpaper = AnswerPaper.objects.get(id=self.answerpaper.id)
self.assertEqual(updated_answerpaper.marks_obtained, 1)
def test_mcq_question_attempt_wrong_after_right(self):
""" Case:- Check if answerpaper and answer marks are updated after
attempting same mcq question with right answer and then wrong
answer
"""
self.client.login(
username=self.user.username,
password=self.user_plaintext_pass
)
# Given Right Answer
right_user_answer = str(self.mcq_based_testcase.id)
# When
self.client.post(
reverse('nayan:check',
kwargs={"q_id": self.question_mcq.id, "attempt_num": 1,
"questionpaper_id": self.question_paper.id}),
data={"answer": right_user_answer}
)
# Then
updated_answerpaper = AnswerPaper.objects.get(id=self.answerpaper.id)
self.assertEqual(updated_answerpaper.marks_obtained, 1)
# Given Wrong Answer
wrong_user_answer = "25"
# When
self.client.post(
reverse('nayan:check',
kwargs={"q_id": self.question_mcq.id, "attempt_num": 1,
"questionpaper_id": self.question_paper.id}),
data={"answer": wrong_user_answer}
)
# Then
wrong_answer_paper = AnswerPaper.objects.get(id=self.answerpaper.id)
self.assertEqual(wrong_answer_paper.marks_obtained, 0)
def test_mcc_question_attempt_wrong_after_right(self):
""" Case:- Check if answerpaper and answer marks are updated after
attempting same mcc question with right answer and then wrong
answer
"""
self.client.login(
username=self.user.username,
password=self.user_plaintext_pass
)
# Given Right Answer
right_user_answer = str(self.mcc_based_testcase.id)
# When
self.client.post(
reverse('nayan:check',
kwargs={"q_id": self.question_mcc.id, "attempt_num": 1,
"questionpaper_id": self.question_paper.id}),
data={"answer": right_user_answer}
)
# Then
updated_answerpaper = AnswerPaper.objects.get(id=self.answerpaper.id)
self.assertEqual(updated_answerpaper.marks_obtained, 1)
# Given Wrong Answer
wrong_user_answer = "b"
# When
self.client.post(
reverse('nayan:check',
kwargs={"q_id": self.question_mcc.id, "attempt_num": 1,
"questionpaper_id": self.question_paper.id}),
data={"answer": wrong_user_answer}
)
# Then
wrong_answer_paper = AnswerPaper.objects.get(id=self.answerpaper.id)
self.assertEqual(wrong_answer_paper.marks_obtained, 0)
def test_integer_question_attempt_wrong_after_right(self):
""" Case:- Check if answerpaper and answer marks are updated after
attempting same integer question with right answer and then wrong
answer
"""
self.client.login(
username=self.user.username,
password=self.user_plaintext_pass
)
# Given Right Answer
right_user_answer = 1
# When
self.client.post(
reverse('nayan:check',
kwargs={"q_id": self.question_int.id, "attempt_num": 1,
"questionpaper_id": self.question_paper.id}),
data={"answer": right_user_answer}
)
# Then
updated_answerpaper = AnswerPaper.objects.get(id=self.answerpaper.id)
self.assertEqual(updated_answerpaper.marks_obtained, 1)
# Given Wrong Answer
wrong_user_answer = -1
# When
self.client.post(
reverse('nayan:check',
kwargs={"q_id": self.question_int.id, "attempt_num": 1,
"questionpaper_id": self.question_paper.id}),
data={"answer": wrong_user_answer}
)
# Then
wrong_answer_paper = AnswerPaper.objects.get(id=self.answerpaper.id)
self.assertEqual(wrong_answer_paper.marks_obtained, 0)
def test_string_question_attempt_wrong_after_right(self):
""" Case:- Check if answerpaper and answer marks are updated after
attempting same string question with right answer and then wrong
answer
"""
self.client.login(
username=self.user.username,
password=self.user_plaintext_pass
)
# Given Right Answer
right_user_answer = "abc"
# When
self.client.post(
reverse('nayan:check',
kwargs={"q_id": self.question_str.id, "attempt_num": 1,
"questionpaper_id": self.question_paper.id}),
data={"answer": right_user_answer}
)
# Then
updated_answerpaper = AnswerPaper.objects.get(id=self.answerpaper.id)
self.assertEqual(updated_answerpaper.marks_obtained, 1)
# Given Wrong Answer
wrong_user_answer = "c"
# When
self.client.post(
reverse('nayan:check',
kwargs={"q_id": self.question_str.id, "attempt_num": 1,
"questionpaper_id": self.question_paper.id}),
data={"answer": wrong_user_answer}
)
# Then
wrong_answer_paper = AnswerPaper.objects.get(id=self.answerpaper.id)
self.assertEqual(wrong_answer_paper.marks_obtained, 0)
def test_float_question_attempt_wrong_after_right(self):
""" Case:- Check if answerpaper and answer marks are updated after
attempting same float question with right answer and then wrong
answer
"""
self.client.login(
username=self.user.username,
password=self.user_plaintext_pass
)
# Given Right Answer
right_user_answer = 2.0
# When
self.client.post(
reverse('nayan:check',
kwargs={"q_id": self.question_float.id, "attempt_num": 1,
"questionpaper_id": self.question_paper.id}),
data={"answer": right_user_answer}
)
# Then
updated_answerpaper = AnswerPaper.objects.get(id=self.answerpaper.id)
self.assertEqual(updated_answerpaper.marks_obtained, 1)
# Given Wrong Answer
wrong_user_answer = -1
# When
self.client.post(
reverse('nayan:check',
kwargs={"q_id": self.question_float.id, "attempt_num": 1,
"questionpaper_id": self.question_paper.id}),
data={"answer": wrong_user_answer}
)
# Then
wrong_answer_paper = AnswerPaper.objects.get(id=self.answerpaper.id)
self.assertEqual(wrong_answer_paper.marks_obtained, 0)
| 36.986626
| 99
| 0.583157
| 15,419
| 146,578
| 5.350736
| 0.032882
| 0.031393
| 0.035684
| 0.026205
| 0.864369
| 0.827861
| 0.804637
| 0.784432
| 0.758348
| 0.748046
| 0
| 0.018357
| 0.312073
| 146,578
| 3,962
| 100
| 36.995962
| 0.799841
| 0.050294
| 0
| 0.674006
| 0
| 0
| 0.111667
| 0.021048
| 0
| 0
| 0
| 0
| 0.110089
| 1
| 0.052251
| false
| 0.079527
| 0.006901
| 0
| 0.06671
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
d2a29e9e8729d5d5bea4327d668e0e4ac230094b
| 3,700
|
py
|
Python
|
robot_fan/interval_test.py
|
thewizardplusplus/robot-fan
|
4ae883e53c4c585bc1ea5012738f6cf26f626d0e
|
[
"MIT"
] | 1
|
2021-08-30T21:27:36.000Z
|
2021-08-30T21:27:36.000Z
|
robot_fan/interval_test.py
|
thewizardplusplus/robot-fan
|
4ae883e53c4c585bc1ea5012738f6cf26f626d0e
|
[
"MIT"
] | null | null | null |
robot_fan/interval_test.py
|
thewizardplusplus/robot-fan
|
4ae883e53c4c585bc1ea5012738f6cf26f626d0e
|
[
"MIT"
] | null | null | null |
import unittest
from robot_fan.interval import Interval
class TestInterval(unittest.TestCase):
def test_init_success_with_maximum_greater_than_minimum(self):
interval = Interval(23, 42)
self.assertEqual(interval.minimum, 23)
self.assertEqual(interval.maximum, 42)
def test_init_success_with_maximum_less_than_minimum(self):
interval = Interval(42, 23)
self.assertEqual(interval.minimum, 42)
self.assertEqual(interval.maximum, 23)
def test_init_success_with_same_values(self):
interval = Interval(23, 23)
self.assertEqual(interval.minimum, 23)
self.assertEqual(interval.maximum, 23)
def test_len_with_maximum_greater_than_minimum(self):
interval = Interval(23, 42)
interval_length = len(interval)
self.assertEqual(interval_length, 19)
def test_len_with_maximum_less_than_minimum(self):
interval = Interval(42, 23)
interval_length = len(interval)
self.assertEqual(interval_length, 19)
def test_len_with_same_values(self):
interval = Interval(23, 23)
interval_length = len(interval)
self.assertEqual(interval_length, 0)
def test_get_proportion_by_value_success_with_maximum_greater_than_minimum(self):
interval = Interval(23, 42)
proportion = interval.get_proportion_by_value(37.25)
self.assertAlmostEqual(proportion, 0.75)
def test_get_proportion_by_value_success_with_maximum_less_than_minimum(self):
interval = Interval(42, 23)
proportion = interval.get_proportion_by_value(37.25)
self.assertAlmostEqual(proportion, 0.25)
def test_get_proportion_by_value_error_with_too_small_value_and_maximum_greater_than_minimum(self):
error_message = "the value is out of the interval"
with self.assertRaisesRegex(RuntimeError, error_message):
interval = Interval(23, 42)
interval.get_proportion_by_value(-100)
def test_get_proportion_by_value_error_with_too_small_value_and_maximum_less_than_minimum(self):
error_message = "the value is out of the interval"
with self.assertRaisesRegex(RuntimeError, error_message):
interval = Interval(42, 23)
interval.get_proportion_by_value(-100)
def test_get_proportion_by_value_error_with_too_great_value_and_maximum_greater_than_minimum(self):
error_message = "the value is out of the interval"
with self.assertRaisesRegex(RuntimeError, error_message):
interval = Interval(23, 42)
interval.get_proportion_by_value(100)
def test_get_proportion_by_value_error_with_too_great_value_and_maximum_less_than_minimum(self):
error_message = "the value is out of the interval"
with self.assertRaisesRegex(RuntimeError, error_message):
interval = Interval(42, 23)
interval.get_proportion_by_value(100)
def test_get_value_by_proportion_success(self):
interval = Interval(23, 42)
value = interval.get_value_by_proportion(0.75)
self.assertAlmostEqual(value, 37.25)
def test_get_value_by_proportion_error_with_too_small_proportion(self):
error_message = "the proportion is incorrect"
with self.assertRaisesRegex(RuntimeError, error_message):
interval = Interval(23, 42)
interval.get_value_by_proportion(-2)
def test_get_value_by_proportion_error_with_too_great_proportion(self):
error_message = "the proportion is incorrect"
with self.assertRaisesRegex(RuntimeError, error_message):
interval = Interval(23, 42)
interval.get_value_by_proportion(2)
| 38.947368
| 103
| 0.726216
| 467
| 3,700
| 5.368308
| 0.111349
| 0.041883
| 0.071799
| 0.095732
| 0.942162
| 0.903869
| 0.888313
| 0.869166
| 0.841245
| 0.716793
| 0
| 0.038005
| 0.203514
| 3,700
| 94
| 104
| 39.361702
| 0.812691
| 0
| 0
| 0.608696
| 0
| 0
| 0.049189
| 0
| 0
| 0
| 0
| 0
| 0.26087
| 1
| 0.217391
| false
| 0
| 0.028986
| 0
| 0.26087
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d2a7c798ca6796735ae28b80f2fbcd74567fc1e2
| 27,780
|
py
|
Python
|
cwl-wdl/WdlV1_1ParserListener.py
|
Anu-123-gif/wdl-cwl-translator
|
af9ff9ef217dec1bb2352ae99048871027542aeb
|
[
"Apache-2.0"
] | null | null | null |
cwl-wdl/WdlV1_1ParserListener.py
|
Anu-123-gif/wdl-cwl-translator
|
af9ff9ef217dec1bb2352ae99048871027542aeb
|
[
"Apache-2.0"
] | null | null | null |
cwl-wdl/WdlV1_1ParserListener.py
|
Anu-123-gif/wdl-cwl-translator
|
af9ff9ef217dec1bb2352ae99048871027542aeb
|
[
"Apache-2.0"
] | null | null | null |
# Generated from WdlV1_1Parser.g4 by ANTLR 4.9
from antlr4 import *
if __name__ is not None and "." in __name__:
from .WdlV1_1Parser import WdlV1_1Parser
else:
from WdlV1_1Parser import WdlV1_1Parser
# This class defines a complete listener for a parse tree produced by WdlV1_1Parser.
class WdlV1_1ParserListener(ParseTreeListener):
# Enter a parse tree produced by WdlV1_1Parser#map_type.
def enterMap_type(self, ctx:WdlV1_1Parser.Map_typeContext):
pass
# Exit a parse tree produced by WdlV1_1Parser#map_type.
def exitMap_type(self, ctx:WdlV1_1Parser.Map_typeContext):
pass
# Enter a parse tree produced by WdlV1_1Parser#array_type.
def enterArray_type(self, ctx:WdlV1_1Parser.Array_typeContext):
pass
# Exit a parse tree produced by WdlV1_1Parser#array_type.
def exitArray_type(self, ctx:WdlV1_1Parser.Array_typeContext):
pass
# Enter a parse tree produced by WdlV1_1Parser#pair_type.
def enterPair_type(self, ctx:WdlV1_1Parser.Pair_typeContext):
pass
# Exit a parse tree produced by WdlV1_1Parser#pair_type.
def exitPair_type(self, ctx:WdlV1_1Parser.Pair_typeContext):
pass
# Enter a parse tree produced by WdlV1_1Parser#type_base.
def enterType_base(self, ctx:WdlV1_1Parser.Type_baseContext):
pass
# Exit a parse tree produced by WdlV1_1Parser#type_base.
def exitType_base(self, ctx:WdlV1_1Parser.Type_baseContext):
pass
# Enter a parse tree produced by WdlV1_1Parser#wdl_type.
def enterWdl_type(self, ctx:WdlV1_1Parser.Wdl_typeContext):
pass
# Exit a parse tree produced by WdlV1_1Parser#wdl_type.
def exitWdl_type(self, ctx:WdlV1_1Parser.Wdl_typeContext):
pass
# Enter a parse tree produced by WdlV1_1Parser#unbound_decls.
def enterUnbound_decls(self, ctx:WdlV1_1Parser.Unbound_declsContext):
pass
# Exit a parse tree produced by WdlV1_1Parser#unbound_decls.
def exitUnbound_decls(self, ctx:WdlV1_1Parser.Unbound_declsContext):
pass
# Enter a parse tree produced by WdlV1_1Parser#bound_decls.
def enterBound_decls(self, ctx:WdlV1_1Parser.Bound_declsContext):
pass
# Exit a parse tree produced by WdlV1_1Parser#bound_decls.
def exitBound_decls(self, ctx:WdlV1_1Parser.Bound_declsContext):
pass
# Enter a parse tree produced by WdlV1_1Parser#any_decls.
def enterAny_decls(self, ctx:WdlV1_1Parser.Any_declsContext):
pass
# Exit a parse tree produced by WdlV1_1Parser#any_decls.
def exitAny_decls(self, ctx:WdlV1_1Parser.Any_declsContext):
pass
# Enter a parse tree produced by WdlV1_1Parser#number.
def enterNumber(self, ctx:WdlV1_1Parser.NumberContext):
pass
# Exit a parse tree produced by WdlV1_1Parser#number.
def exitNumber(self, ctx:WdlV1_1Parser.NumberContext):
pass
# Enter a parse tree produced by WdlV1_1Parser#expression_placeholder_option.
def enterExpression_placeholder_option(self, ctx:WdlV1_1Parser.Expression_placeholder_optionContext):
pass
# Exit a parse tree produced by WdlV1_1Parser#expression_placeholder_option.
def exitExpression_placeholder_option(self, ctx:WdlV1_1Parser.Expression_placeholder_optionContext):
pass
# Enter a parse tree produced by WdlV1_1Parser#string_part.
def enterString_part(self, ctx:WdlV1_1Parser.String_partContext):
pass
# Exit a parse tree produced by WdlV1_1Parser#string_part.
def exitString_part(self, ctx:WdlV1_1Parser.String_partContext):
pass
# Enter a parse tree produced by WdlV1_1Parser#string_expr_part.
def enterString_expr_part(self, ctx:WdlV1_1Parser.String_expr_partContext):
pass
# Exit a parse tree produced by WdlV1_1Parser#string_expr_part.
def exitString_expr_part(self, ctx:WdlV1_1Parser.String_expr_partContext):
pass
# Enter a parse tree produced by WdlV1_1Parser#string_expr_with_string_part.
def enterString_expr_with_string_part(self, ctx:WdlV1_1Parser.String_expr_with_string_partContext):
pass
# Exit a parse tree produced by WdlV1_1Parser#string_expr_with_string_part.
def exitString_expr_with_string_part(self, ctx:WdlV1_1Parser.String_expr_with_string_partContext):
pass
# Enter a parse tree produced by WdlV1_1Parser#string.
def enterString(self, ctx:WdlV1_1Parser.StringContext):
pass
# Exit a parse tree produced by WdlV1_1Parser#string.
def exitString(self, ctx:WdlV1_1Parser.StringContext):
pass
# Enter a parse tree produced by WdlV1_1Parser#primitive_literal.
def enterPrimitive_literal(self, ctx:WdlV1_1Parser.Primitive_literalContext):
pass
# Exit a parse tree produced by WdlV1_1Parser#primitive_literal.
def exitPrimitive_literal(self, ctx:WdlV1_1Parser.Primitive_literalContext):
pass
# Enter a parse tree produced by WdlV1_1Parser#expr.
def enterExpr(self, ctx:WdlV1_1Parser.ExprContext):
pass
# Exit a parse tree produced by WdlV1_1Parser#expr.
def exitExpr(self, ctx:WdlV1_1Parser.ExprContext):
pass
# Enter a parse tree produced by WdlV1_1Parser#infix0.
def enterInfix0(self, ctx:WdlV1_1Parser.Infix0Context):
pass
# Exit a parse tree produced by WdlV1_1Parser#infix0.
def exitInfix0(self, ctx:WdlV1_1Parser.Infix0Context):
pass
# Enter a parse tree produced by WdlV1_1Parser#infix1.
def enterInfix1(self, ctx:WdlV1_1Parser.Infix1Context):
pass
# Exit a parse tree produced by WdlV1_1Parser#infix1.
def exitInfix1(self, ctx:WdlV1_1Parser.Infix1Context):
pass
# Enter a parse tree produced by WdlV1_1Parser#lor.
def enterLor(self, ctx:WdlV1_1Parser.LorContext):
pass
# Exit a parse tree produced by WdlV1_1Parser#lor.
def exitLor(self, ctx:WdlV1_1Parser.LorContext):
pass
# Enter a parse tree produced by WdlV1_1Parser#infix2.
def enterInfix2(self, ctx:WdlV1_1Parser.Infix2Context):
pass
# Exit a parse tree produced by WdlV1_1Parser#infix2.
def exitInfix2(self, ctx:WdlV1_1Parser.Infix2Context):
pass
# Enter a parse tree produced by WdlV1_1Parser#land.
def enterLand(self, ctx:WdlV1_1Parser.LandContext):
pass
# Exit a parse tree produced by WdlV1_1Parser#land.
def exitLand(self, ctx:WdlV1_1Parser.LandContext):
pass
# Enter a parse tree produced by WdlV1_1Parser#eqeq.
def enterEqeq(self, ctx:WdlV1_1Parser.EqeqContext):
pass
# Exit a parse tree produced by WdlV1_1Parser#eqeq.
def exitEqeq(self, ctx:WdlV1_1Parser.EqeqContext):
pass
# Enter a parse tree produced by WdlV1_1Parser#lt.
def enterLt(self, ctx:WdlV1_1Parser.LtContext):
pass
# Exit a parse tree produced by WdlV1_1Parser#lt.
def exitLt(self, ctx:WdlV1_1Parser.LtContext):
pass
# Enter a parse tree produced by WdlV1_1Parser#infix3.
def enterInfix3(self, ctx:WdlV1_1Parser.Infix3Context):
pass
# Exit a parse tree produced by WdlV1_1Parser#infix3.
def exitInfix3(self, ctx:WdlV1_1Parser.Infix3Context):
pass
# Enter a parse tree produced by WdlV1_1Parser#gte.
def enterGte(self, ctx:WdlV1_1Parser.GteContext):
pass
# Exit a parse tree produced by WdlV1_1Parser#gte.
def exitGte(self, ctx:WdlV1_1Parser.GteContext):
pass
# Enter a parse tree produced by WdlV1_1Parser#neq.
def enterNeq(self, ctx:WdlV1_1Parser.NeqContext):
pass
# Exit a parse tree produced by WdlV1_1Parser#neq.
def exitNeq(self, ctx:WdlV1_1Parser.NeqContext):
pass
# Enter a parse tree produced by WdlV1_1Parser#lte.
def enterLte(self, ctx:WdlV1_1Parser.LteContext):
pass
# Exit a parse tree produced by WdlV1_1Parser#lte.
def exitLte(self, ctx:WdlV1_1Parser.LteContext):
pass
# Enter a parse tree produced by WdlV1_1Parser#gt.
def enterGt(self, ctx:WdlV1_1Parser.GtContext):
pass
# Exit a parse tree produced by WdlV1_1Parser#gt.
def exitGt(self, ctx:WdlV1_1Parser.GtContext):
pass
# Enter a parse tree produced by WdlV1_1Parser#add.
def enterAdd(self, ctx:WdlV1_1Parser.AddContext):
pass
# Exit a parse tree produced by WdlV1_1Parser#add.
def exitAdd(self, ctx:WdlV1_1Parser.AddContext):
pass
# Enter a parse tree produced by WdlV1_1Parser#sub.
def enterSub(self, ctx:WdlV1_1Parser.SubContext):
pass
# Exit a parse tree produced by WdlV1_1Parser#sub.
def exitSub(self, ctx:WdlV1_1Parser.SubContext):
pass
# Enter a parse tree produced by WdlV1_1Parser#infix4.
def enterInfix4(self, ctx:WdlV1_1Parser.Infix4Context):
pass
# Exit a parse tree produced by WdlV1_1Parser#infix4.
def exitInfix4(self, ctx:WdlV1_1Parser.Infix4Context):
pass
# Enter a parse tree produced by WdlV1_1Parser#mod.
def enterMod(self, ctx:WdlV1_1Parser.ModContext):
pass
# Exit a parse tree produced by WdlV1_1Parser#mod.
def exitMod(self, ctx:WdlV1_1Parser.ModContext):
pass
# Enter a parse tree produced by WdlV1_1Parser#mul.
def enterMul(self, ctx:WdlV1_1Parser.MulContext):
pass
# Exit a parse tree produced by WdlV1_1Parser#mul.
def exitMul(self, ctx:WdlV1_1Parser.MulContext):
pass
# Enter a parse tree produced by WdlV1_1Parser#divide.
def enterDivide(self, ctx:WdlV1_1Parser.DivideContext):
pass
# Exit a parse tree produced by WdlV1_1Parser#divide.
def exitDivide(self, ctx:WdlV1_1Parser.DivideContext):
pass
# Enter a parse tree produced by WdlV1_1Parser#infix5.
def enterInfix5(self, ctx:WdlV1_1Parser.Infix5Context):
pass
# Exit a parse tree produced by WdlV1_1Parser#infix5.
def exitInfix5(self, ctx:WdlV1_1Parser.Infix5Context):
pass
# Enter a parse tree produced by WdlV1_1Parser#expr_infix5.
def enterExpr_infix5(self, ctx:WdlV1_1Parser.Expr_infix5Context):
pass
# Exit a parse tree produced by WdlV1_1Parser#expr_infix5.
def exitExpr_infix5(self, ctx:WdlV1_1Parser.Expr_infix5Context):
pass
# Enter a parse tree produced by WdlV1_1Parser#member.
def enterMember(self, ctx:WdlV1_1Parser.MemberContext):
pass
# Exit a parse tree produced by WdlV1_1Parser#member.
def exitMember(self, ctx:WdlV1_1Parser.MemberContext):
pass
# Enter a parse tree produced by WdlV1_1Parser#pair_literal.
def enterPair_literal(self, ctx:WdlV1_1Parser.Pair_literalContext):
pass
# Exit a parse tree produced by WdlV1_1Parser#pair_literal.
def exitPair_literal(self, ctx:WdlV1_1Parser.Pair_literalContext):
pass
# Enter a parse tree produced by WdlV1_1Parser#unarysigned.
def enterUnarysigned(self, ctx:WdlV1_1Parser.UnarysignedContext):
pass
# Exit a parse tree produced by WdlV1_1Parser#unarysigned.
def exitUnarysigned(self, ctx:WdlV1_1Parser.UnarysignedContext):
pass
# Enter a parse tree produced by WdlV1_1Parser#apply.
def enterApply(self, ctx:WdlV1_1Parser.ApplyContext):
pass
# Exit a parse tree produced by WdlV1_1Parser#apply.
def exitApply(self, ctx:WdlV1_1Parser.ApplyContext):
pass
# Enter a parse tree produced by WdlV1_1Parser#expression_group.
def enterExpression_group(self, ctx:WdlV1_1Parser.Expression_groupContext):
pass
# Exit a parse tree produced by WdlV1_1Parser#expression_group.
def exitExpression_group(self, ctx:WdlV1_1Parser.Expression_groupContext):
pass
# Enter a parse tree produced by WdlV1_1Parser#primitives.
def enterPrimitives(self, ctx:WdlV1_1Parser.PrimitivesContext):
pass
# Exit a parse tree produced by WdlV1_1Parser#primitives.
def exitPrimitives(self, ctx:WdlV1_1Parser.PrimitivesContext):
pass
# Enter a parse tree produced by WdlV1_1Parser#left_name.
def enterLeft_name(self, ctx:WdlV1_1Parser.Left_nameContext):
pass
# Exit a parse tree produced by WdlV1_1Parser#left_name.
def exitLeft_name(self, ctx:WdlV1_1Parser.Left_nameContext):
pass
# Enter a parse tree produced by WdlV1_1Parser#at.
def enterAt(self, ctx:WdlV1_1Parser.AtContext):
pass
# Exit a parse tree produced by WdlV1_1Parser#at.
def exitAt(self, ctx:WdlV1_1Parser.AtContext):
pass
# Enter a parse tree produced by WdlV1_1Parser#negate.
def enterNegate(self, ctx:WdlV1_1Parser.NegateContext):
pass
# Exit a parse tree produced by WdlV1_1Parser#negate.
def exitNegate(self, ctx:WdlV1_1Parser.NegateContext):
pass
# Enter a parse tree produced by WdlV1_1Parser#map_literal.
def enterMap_literal(self, ctx:WdlV1_1Parser.Map_literalContext):
pass
# Exit a parse tree produced by WdlV1_1Parser#map_literal.
def exitMap_literal(self, ctx:WdlV1_1Parser.Map_literalContext):
pass
# Enter a parse tree produced by WdlV1_1Parser#ifthenelse.
def enterIfthenelse(self, ctx:WdlV1_1Parser.IfthenelseContext):
pass
# Exit a parse tree produced by WdlV1_1Parser#ifthenelse.
def exitIfthenelse(self, ctx:WdlV1_1Parser.IfthenelseContext):
pass
# Enter a parse tree produced by WdlV1_1Parser#get_name.
def enterGet_name(self, ctx:WdlV1_1Parser.Get_nameContext):
pass
# Exit a parse tree produced by WdlV1_1Parser#get_name.
def exitGet_name(self, ctx:WdlV1_1Parser.Get_nameContext):
pass
# Enter a parse tree produced by WdlV1_1Parser#object_literal.
def enterObject_literal(self, ctx:WdlV1_1Parser.Object_literalContext):
pass
# Exit a parse tree produced by WdlV1_1Parser#object_literal.
def exitObject_literal(self, ctx:WdlV1_1Parser.Object_literalContext):
pass
# Enter a parse tree produced by WdlV1_1Parser#array_literal.
def enterArray_literal(self, ctx:WdlV1_1Parser.Array_literalContext):
pass
# Exit a parse tree produced by WdlV1_1Parser#array_literal.
def exitArray_literal(self, ctx:WdlV1_1Parser.Array_literalContext):
pass
# Enter a parse tree produced by WdlV1_1Parser#struct_literal.
def enterStruct_literal(self, ctx:WdlV1_1Parser.Struct_literalContext):
pass
# Exit a parse tree produced by WdlV1_1Parser#struct_literal.
def exitStruct_literal(self, ctx:WdlV1_1Parser.Struct_literalContext):
pass
# Enter a parse tree produced by WdlV1_1Parser#version.
def enterVersion(self, ctx:WdlV1_1Parser.VersionContext):
pass
# Exit a parse tree produced by WdlV1_1Parser#version.
def exitVersion(self, ctx:WdlV1_1Parser.VersionContext):
pass
# Enter a parse tree produced by WdlV1_1Parser#import_alias.
def enterImport_alias(self, ctx:WdlV1_1Parser.Import_aliasContext):
pass
# Exit a parse tree produced by WdlV1_1Parser#import_alias.
def exitImport_alias(self, ctx:WdlV1_1Parser.Import_aliasContext):
pass
# Enter a parse tree produced by WdlV1_1Parser#import_as.
def enterImport_as(self, ctx:WdlV1_1Parser.Import_asContext):
pass
# Exit a parse tree produced by WdlV1_1Parser#import_as.
def exitImport_as(self, ctx:WdlV1_1Parser.Import_asContext):
pass
# Enter a parse tree produced by WdlV1_1Parser#import_doc.
def enterImport_doc(self, ctx:WdlV1_1Parser.Import_docContext):
pass
# Exit a parse tree produced by WdlV1_1Parser#import_doc.
def exitImport_doc(self, ctx:WdlV1_1Parser.Import_docContext):
pass
# Enter a parse tree produced by WdlV1_1Parser#struct.
def enterStruct(self, ctx:WdlV1_1Parser.StructContext):
pass
# Exit a parse tree produced by WdlV1_1Parser#struct.
def exitStruct(self, ctx:WdlV1_1Parser.StructContext):
pass
# Enter a parse tree produced by WdlV1_1Parser#meta_value.
def enterMeta_value(self, ctx:WdlV1_1Parser.Meta_valueContext):
pass
# Exit a parse tree produced by WdlV1_1Parser#meta_value.
def exitMeta_value(self, ctx:WdlV1_1Parser.Meta_valueContext):
pass
# Enter a parse tree produced by WdlV1_1Parser#meta_string_part.
def enterMeta_string_part(self, ctx:WdlV1_1Parser.Meta_string_partContext):
pass
# Exit a parse tree produced by WdlV1_1Parser#meta_string_part.
def exitMeta_string_part(self, ctx:WdlV1_1Parser.Meta_string_partContext):
pass
# Enter a parse tree produced by WdlV1_1Parser#meta_string.
def enterMeta_string(self, ctx:WdlV1_1Parser.Meta_stringContext):
pass
# Exit a parse tree produced by WdlV1_1Parser#meta_string.
def exitMeta_string(self, ctx:WdlV1_1Parser.Meta_stringContext):
pass
# Enter a parse tree produced by WdlV1_1Parser#meta_array.
def enterMeta_array(self, ctx:WdlV1_1Parser.Meta_arrayContext):
pass
# Exit a parse tree produced by WdlV1_1Parser#meta_array.
def exitMeta_array(self, ctx:WdlV1_1Parser.Meta_arrayContext):
pass
# Enter a parse tree produced by WdlV1_1Parser#meta_object.
def enterMeta_object(self, ctx:WdlV1_1Parser.Meta_objectContext):
pass
# Exit a parse tree produced by WdlV1_1Parser#meta_object.
def exitMeta_object(self, ctx:WdlV1_1Parser.Meta_objectContext):
pass
# Enter a parse tree produced by WdlV1_1Parser#meta_object_kv.
def enterMeta_object_kv(self, ctx:WdlV1_1Parser.Meta_object_kvContext):
pass
# Exit a parse tree produced by WdlV1_1Parser#meta_object_kv.
def exitMeta_object_kv(self, ctx:WdlV1_1Parser.Meta_object_kvContext):
pass
# Enter a parse tree produced by WdlV1_1Parser#meta_kv.
def enterMeta_kv(self, ctx:WdlV1_1Parser.Meta_kvContext):
pass
# Exit a parse tree produced by WdlV1_1Parser#meta_kv.
def exitMeta_kv(self, ctx:WdlV1_1Parser.Meta_kvContext):
pass
# Enter a parse tree produced by WdlV1_1Parser#parameter_meta.
def enterParameter_meta(self, ctx:WdlV1_1Parser.Parameter_metaContext):
pass
# Exit a parse tree produced by WdlV1_1Parser#parameter_meta.
def exitParameter_meta(self, ctx:WdlV1_1Parser.Parameter_metaContext):
pass
# Enter a parse tree produced by WdlV1_1Parser#meta.
def enterMeta(self, ctx:WdlV1_1Parser.MetaContext):
pass
# Exit a parse tree produced by WdlV1_1Parser#meta.
def exitMeta(self, ctx:WdlV1_1Parser.MetaContext):
pass
# Enter a parse tree produced by WdlV1_1Parser#task_runtime_kv.
def enterTask_runtime_kv(self, ctx:WdlV1_1Parser.Task_runtime_kvContext):
pass
# Exit a parse tree produced by WdlV1_1Parser#task_runtime_kv.
def exitTask_runtime_kv(self, ctx:WdlV1_1Parser.Task_runtime_kvContext):
pass
# Enter a parse tree produced by WdlV1_1Parser#task_runtime.
def enterTask_runtime(self, ctx:WdlV1_1Parser.Task_runtimeContext):
pass
# Exit a parse tree produced by WdlV1_1Parser#task_runtime.
def exitTask_runtime(self, ctx:WdlV1_1Parser.Task_runtimeContext):
pass
# Enter a parse tree produced by WdlV1_1Parser#task_input.
def enterTask_input(self, ctx:WdlV1_1Parser.Task_inputContext):
pass
# Exit a parse tree produced by WdlV1_1Parser#task_input.
def exitTask_input(self, ctx:WdlV1_1Parser.Task_inputContext):
pass
# Enter a parse tree produced by WdlV1_1Parser#task_output.
def enterTask_output(self, ctx:WdlV1_1Parser.Task_outputContext):
pass
# Exit a parse tree produced by WdlV1_1Parser#task_output.
def exitTask_output(self, ctx:WdlV1_1Parser.Task_outputContext):
pass
# Enter a parse tree produced by WdlV1_1Parser#task_command_string_part.
def enterTask_command_string_part(self, ctx:WdlV1_1Parser.Task_command_string_partContext):
pass
# Exit a parse tree produced by WdlV1_1Parser#task_command_string_part.
def exitTask_command_string_part(self, ctx:WdlV1_1Parser.Task_command_string_partContext):
pass
# Enter a parse tree produced by WdlV1_1Parser#task_command_expr_part.
def enterTask_command_expr_part(self, ctx:WdlV1_1Parser.Task_command_expr_partContext):
pass
# Exit a parse tree produced by WdlV1_1Parser#task_command_expr_part.
def exitTask_command_expr_part(self, ctx:WdlV1_1Parser.Task_command_expr_partContext):
pass
# Enter a parse tree produced by WdlV1_1Parser#task_command_expr_with_string.
def enterTask_command_expr_with_string(self, ctx:WdlV1_1Parser.Task_command_expr_with_stringContext):
pass
# Exit a parse tree produced by WdlV1_1Parser#task_command_expr_with_string.
def exitTask_command_expr_with_string(self, ctx:WdlV1_1Parser.Task_command_expr_with_stringContext):
pass
# Enter a parse tree produced by WdlV1_1Parser#task_command.
def enterTask_command(self, ctx:WdlV1_1Parser.Task_commandContext):
pass
# Exit a parse tree produced by WdlV1_1Parser#task_command.
def exitTask_command(self, ctx:WdlV1_1Parser.Task_commandContext):
pass
# Enter a parse tree produced by WdlV1_1Parser#task_element.
def enterTask_element(self, ctx:WdlV1_1Parser.Task_elementContext):
pass
# Exit a parse tree produced by WdlV1_1Parser#task_element.
def exitTask_element(self, ctx:WdlV1_1Parser.Task_elementContext):
pass
# Enter a parse tree produced by WdlV1_1Parser#task.
def enterTask(self, ctx:WdlV1_1Parser.TaskContext):
pass
# Exit a parse tree produced by WdlV1_1Parser#task.
def exitTask(self, ctx:WdlV1_1Parser.TaskContext):
pass
# Enter a parse tree produced by WdlV1_1Parser#inner_workflow_element.
def enterInner_workflow_element(self, ctx:WdlV1_1Parser.Inner_workflow_elementContext):
pass
# Exit a parse tree produced by WdlV1_1Parser#inner_workflow_element.
def exitInner_workflow_element(self, ctx:WdlV1_1Parser.Inner_workflow_elementContext):
pass
# Enter a parse tree produced by WdlV1_1Parser#call_alias.
def enterCall_alias(self, ctx:WdlV1_1Parser.Call_aliasContext):
pass
# Exit a parse tree produced by WdlV1_1Parser#call_alias.
def exitCall_alias(self, ctx:WdlV1_1Parser.Call_aliasContext):
pass
# Enter a parse tree produced by WdlV1_1Parser#call_input.
def enterCall_input(self, ctx:WdlV1_1Parser.Call_inputContext):
pass
# Exit a parse tree produced by WdlV1_1Parser#call_input.
def exitCall_input(self, ctx:WdlV1_1Parser.Call_inputContext):
pass
# Enter a parse tree produced by WdlV1_1Parser#call_inputs.
def enterCall_inputs(self, ctx:WdlV1_1Parser.Call_inputsContext):
pass
# Exit a parse tree produced by WdlV1_1Parser#call_inputs.
def exitCall_inputs(self, ctx:WdlV1_1Parser.Call_inputsContext):
pass
# Enter a parse tree produced by WdlV1_1Parser#call_body.
def enterCall_body(self, ctx:WdlV1_1Parser.Call_bodyContext):
pass
# Exit a parse tree produced by WdlV1_1Parser#call_body.
def exitCall_body(self, ctx:WdlV1_1Parser.Call_bodyContext):
pass
# Enter a parse tree produced by WdlV1_1Parser#call_after.
def enterCall_after(self, ctx:WdlV1_1Parser.Call_afterContext):
pass
# Exit a parse tree produced by WdlV1_1Parser#call_after.
def exitCall_after(self, ctx:WdlV1_1Parser.Call_afterContext):
pass
# Enter a parse tree produced by WdlV1_1Parser#call_name.
def enterCall_name(self, ctx:WdlV1_1Parser.Call_nameContext):
pass
# Exit a parse tree produced by WdlV1_1Parser#call_name.
def exitCall_name(self, ctx:WdlV1_1Parser.Call_nameContext):
pass
# Enter a parse tree produced by WdlV1_1Parser#call.
def enterCall(self, ctx:WdlV1_1Parser.CallContext):
pass
# Exit a parse tree produced by WdlV1_1Parser#call.
def exitCall(self, ctx:WdlV1_1Parser.CallContext):
pass
# Enter a parse tree produced by WdlV1_1Parser#scatter.
def enterScatter(self, ctx:WdlV1_1Parser.ScatterContext):
pass
# Exit a parse tree produced by WdlV1_1Parser#scatter.
def exitScatter(self, ctx:WdlV1_1Parser.ScatterContext):
pass
# Enter a parse tree produced by WdlV1_1Parser#conditional.
def enterConditional(self, ctx:WdlV1_1Parser.ConditionalContext):
pass
# Exit a parse tree produced by WdlV1_1Parser#conditional.
def exitConditional(self, ctx:WdlV1_1Parser.ConditionalContext):
pass
# Enter a parse tree produced by WdlV1_1Parser#workflow_input.
def enterWorkflow_input(self, ctx:WdlV1_1Parser.Workflow_inputContext):
pass
# Exit a parse tree produced by WdlV1_1Parser#workflow_input.
def exitWorkflow_input(self, ctx:WdlV1_1Parser.Workflow_inputContext):
pass
# Enter a parse tree produced by WdlV1_1Parser#workflow_output.
def enterWorkflow_output(self, ctx:WdlV1_1Parser.Workflow_outputContext):
pass
# Exit a parse tree produced by WdlV1_1Parser#workflow_output.
def exitWorkflow_output(self, ctx:WdlV1_1Parser.Workflow_outputContext):
pass
# Enter a parse tree produced by WdlV1_1Parser#input.
def enterInput(self, ctx:WdlV1_1Parser.InputContext):
pass
# Exit a parse tree produced by WdlV1_1Parser#input.
def exitInput(self, ctx:WdlV1_1Parser.InputContext):
pass
# Enter a parse tree produced by WdlV1_1Parser#output.
def enterOutput(self, ctx:WdlV1_1Parser.OutputContext):
pass
# Exit a parse tree produced by WdlV1_1Parser#output.
def exitOutput(self, ctx:WdlV1_1Parser.OutputContext):
pass
# Enter a parse tree produced by WdlV1_1Parser#inner_element.
def enterInner_element(self, ctx:WdlV1_1Parser.Inner_elementContext):
pass
# Exit a parse tree produced by WdlV1_1Parser#inner_element.
def exitInner_element(self, ctx:WdlV1_1Parser.Inner_elementContext):
pass
# Enter a parse tree produced by WdlV1_1Parser#parameter_meta_element.
def enterParameter_meta_element(self, ctx:WdlV1_1Parser.Parameter_meta_elementContext):
pass
# Exit a parse tree produced by WdlV1_1Parser#parameter_meta_element.
def exitParameter_meta_element(self, ctx:WdlV1_1Parser.Parameter_meta_elementContext):
pass
# Enter a parse tree produced by WdlV1_1Parser#meta_element.
def enterMeta_element(self, ctx:WdlV1_1Parser.Meta_elementContext):
pass
# Exit a parse tree produced by WdlV1_1Parser#meta_element.
def exitMeta_element(self, ctx:WdlV1_1Parser.Meta_elementContext):
pass
# Enter a parse tree produced by WdlV1_1Parser#workflow.
def enterWorkflow(self, ctx:WdlV1_1Parser.WorkflowContext):
pass
# Exit a parse tree produced by WdlV1_1Parser#workflow.
def exitWorkflow(self, ctx:WdlV1_1Parser.WorkflowContext):
pass
# Enter a parse tree produced by WdlV1_1Parser#document_element.
def enterDocument_element(self, ctx:WdlV1_1Parser.Document_elementContext):
pass
# Exit a parse tree produced by WdlV1_1Parser#document_element.
def exitDocument_element(self, ctx:WdlV1_1Parser.Document_elementContext):
pass
# Enter a parse tree produced by WdlV1_1Parser#document.
def enterDocument(self, ctx:WdlV1_1Parser.DocumentContext):
pass
# Exit a parse tree produced by WdlV1_1Parser#document.
def exitDocument(self, ctx:WdlV1_1Parser.DocumentContext):
pass
del WdlV1_1Parser
| 32.041522
| 105
| 0.734089
| 3,716
| 27,780
| 5.26507
| 0.076695
| 0.237363
| 0.097623
| 0.175722
| 0.897163
| 0.897163
| 0.771633
| 0.769997
| 0.625045
| 0.483312
| 0
| 0.037164
| 0.203816
| 27,780
| 867
| 106
| 32.041522
| 0.847409
| 0.383729
| 0
| 0.490956
| 1
| 0
| 0.00006
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.490956
| false
| 0.490956
| 0.023256
| 0
| 0.516796
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 10
|
d2b3ac819e4d1e117fa75c84706aae7e2041b204
| 58
|
py
|
Python
|
metrology/utils/__init__.py
|
miracle2k/metrology
|
f7341372055da0713a6db634c336b452cb4dc491
|
[
"MIT"
] | 52
|
2015-02-02T22:04:41.000Z
|
2021-09-07T12:27:51.000Z
|
metrology/utils/__init__.py
|
miracle2k/metrology
|
f7341372055da0713a6db634c336b452cb4dc491
|
[
"MIT"
] | 17
|
2016-06-06T07:10:03.000Z
|
2020-07-05T00:06:51.000Z
|
metrology/utils/__init__.py
|
miracle2k/metrology
|
f7341372055da0713a6db634c336b452cb4dc491
|
[
"MIT"
] | 13
|
2015-04-05T10:40:41.000Z
|
2021-07-01T12:55:53.000Z
|
from time import time
def now():
return int(time())
| 9.666667
| 22
| 0.637931
| 9
| 58
| 4.111111
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.241379
| 58
| 5
| 23
| 11.6
| 0.840909
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
d2d307f4bdec9f8af31b392a83327d120ba45735
| 73,494
|
py
|
Python
|
tasks/task_generator/utils/generate_variations.py
|
danarosen/Hi-Phy
|
ef3b5b744fae0efa3f9e004380ab724af4e1a0ec
|
[
"MIT"
] | 24
|
2021-09-01T09:50:06.000Z
|
2022-02-01T16:10:23.000Z
|
tasks/task_generator/utils/generate_variations.py
|
danarosen/Hi-Phy
|
ef3b5b744fae0efa3f9e004380ab724af4e1a0ec
|
[
"MIT"
] | 4
|
2021-10-06T04:55:39.000Z
|
2021-12-07T03:56:46.000Z
|
tasks/task_generator/utils/generate_variations.py
|
danarosen/Hi-Phy
|
ef3b5b744fae0efa3f9e004380ab724af4e1a0ec
|
[
"MIT"
] | 3
|
2021-09-15T05:48:58.000Z
|
2021-10-18T06:07:13.000Z
|
import random
import copy
import math
import sys
from utils.constants import *
from utils.data_classes import *
class GenerateLevels:
# check if the block is slanted
def is_slanted_block(self, block):
# the threshold degree by which the object's rotation is ignored
slanting_threshold = 5
if abs(block.rotation) < slanting_threshold or abs(block.rotation - 90) < slanting_threshold or abs(
block.rotation - 180) < slanting_threshold or abs(block.rotation - 270) < slanting_threshold or abs(
block.rotation - 360) < slanting_threshold or abs(block.rotation + 90) < slanting_threshold or abs(
block.rotation + 180) < slanting_threshold or abs(block.rotation + 270) < slanting_threshold or abs(
block.rotation + 360) < slanting_threshold:
return False
return True
# check the rotation of the block and return the round offed rotation if not slanted
def get_adjusted_block_rotation(self, block):
# the threshold degree by which the object's rotation is ignored
slanting_threshold = 5
if self.is_slanted_block(block):
return block.rotation
else:
rotation = abs(block.rotation)
if rotation < slanting_threshold:
return 0
elif abs(block.rotation - 90) < slanting_threshold:
return 90
elif abs(block.rotation - 180) < slanting_threshold:
return 180
elif abs(block.rotation - 270) < slanting_threshold:
return 270
elif abs(block.rotation - 360) < slanting_threshold:
return 360
def get_horizontal_and_vertical_span(self, block_considered):
# returns the horizontal and vertical span of a given object
location_offset_x = 0.1 # used to reduce the horizontal span of the round blocks (which's base is not fully touched)
horizontal_span_of_the_block = 0
vertical_span_of_the_block = 0
# print('block_considered', type(block_considered))
# print('xxx', '<class \'__main__.Pig\'>' == str(type(block_considered)))
# print('xxx', '<class \'__main__.Block\'>' == str(type(block_considered)))
block_rotation = self.get_adjusted_block_rotation(block_considered)
# if isinstance(block_considered, Block):
if '<class \'utils.data_classes.Block\'>' == str(type(block_considered)):
vertical_span_of_the_block = abs(
(blocks[block_considered.type][0] * block_considered.scale_x) * math.sin(
math.radians(block_rotation))) + abs(
(blocks[block_considered.type][1] * block_considered.scale_y) * math.cos(
math.radians(block_rotation)))
horizontal_span_of_the_block = abs(
(blocks[block_considered.type][0] * block_considered.scale_x) * math.cos(
math.radians(block_rotation))) + abs(
(blocks[block_considered.type][1] * block_considered.scale_y) * math.sin(
math.radians(block_rotation)))
# elif isinstance(block_considered, Pig):
elif '<class \'utils.data_classes.Pig\'>' == str(type(block_considered)):
vertical_span_of_the_block = abs(
(pigs[block_considered.type][0]) * math.sin(math.radians(block_rotation))) + abs(
(pigs[block_considered.type][1]) * math.cos(math.radians(block_rotation)))
horizontal_span_of_the_block = abs(
(pigs[block_considered.type][0]) * math.cos(math.radians(block_rotation))) + abs(
(pigs[block_considered.type][1]) * math.sin(math.radians(block_rotation))) - location_offset_x
elif '<class \'utils.data_classes.Tnt\'>' == str(type(block_considered)):
vertical_span_of_the_block = abs(
(tnts[block_considered.type][0]) * math.sin(math.radians(block_rotation))) + abs(
(tnts[block_considered.type][1]) * math.cos(math.radians(block_rotation)))
horizontal_span_of_the_block = abs(
(tnts[block_considered.type][0]) * math.cos(math.radians(block_rotation))) + abs(
(tnts[block_considered.type][1]) * math.sin(math.radians(block_rotation))) - location_offset_x
else:
print('Unknown Object!')
return horizontal_span_of_the_block, vertical_span_of_the_block
# returns the blocks which are cut by a horizontal line
def find_blocks_which_cut_a_horizontal_line(self, template_data, line):
selected_blocks = []
for block in template_data[0] + template_data[1]:
vertical_span = self.get_horizontal_and_vertical_span(block)[1]
# check if the block lies on the line
# print('line, min mid max', line, block.y - vertical_span / 2, block.y, block.y + vertical_span / 2)
if block.y - vertical_span / 2 < line and line < block.y + vertical_span / 2:
# print('added')
selected_blocks.append(block)
return selected_blocks
def get_occupied_x_spans_below_y_axis(self, template_data, y_axis):
selected_blocks_x_spans = []
for block in template_data[0] + template_data[1]:
horizontal_span, vertical_span = self.get_horizontal_and_vertical_span(block)
# check the vertical span is below the y_axis considered
if block.y - vertical_span / 2 < y_axis:
# save the covered x span
selected_blocks_x_spans.append([block.x - horizontal_span / 2, block.x + horizontal_span / 2])
return selected_blocks_x_spans
def does_coordinate_overlap_ranges(self, coordinate, coordinate_ranges):
for coordinate_range in coordinate_ranges:
if coordinate_range[0] - 1.05 < coordinate < coordinate_range[1] + 1.05: # 1.05 is the half-length of the longest block (rect big)
return True
return False
def place_a_random_block_on_ground(self, template_data, restricted_x_areas):
random_block, random_block_size = random.choice(list(blocks.items()))
random_block_material = random.choice(['ice', 'wood', 'stone'])
random_block_angle = 0
y_coordinate = GROUND_LEVEL + random_block_size[1] / 2
# get blocks on the ground
# blocks_on_ground = self.find_blocks_which_cut_a_horizontal_line(template_data, -3.39)
occupied_x_spans_on_ground = self.get_occupied_x_spans_below_y_axis(template_data, -2.5) # -2.5 is the width of the tallest block (squarehole)
# add the restricted areas as well
occupied_x_spans_on_ground += restricted_x_areas
# print('occupied_x_spans', occupied_x_spans_on_ground)
# randomly pick a x location that doesn't overlap with existing objects
no_of_tries = 0
max_no_of_tries = 100
x_coordinate = round(random.uniform(X_MIN_REACHABLE, X_MAX_REACHABLE), 5)
while self.does_coordinate_overlap_ranges(x_coordinate, occupied_x_spans_on_ground):
x_coordinate = round(random.uniform(X_MIN_REACHABLE, X_MAX_REACHABLE), 5)
no_of_tries += 1
if no_of_tries > max_no_of_tries:
print('could not find a feasible location on ground to place a block')
return False, template_data
# if successfully found a x coordinate add the new block to the template_data
template_data[0].append(Block(0, random_block, random_block_material, x_coordinate, y_coordinate, random_block_angle))
# print('placed a ', random_block_material, random_block, 'at', x_coordinate, y_coordinate)
return True, template_data
def place_random_blocks_on_ground(self, template_data, restricted_areas):
# place random blocks in the level
for j in range(NUM_OF_RANDOM_BLOCKS_TO_PLACE):
self.place_a_random_block_on_ground(template_data, restricted_areas)
def get_reachable_x_location_using_reachability_line(self, x_min, x_max, y_location):
# get the possible max x location considering the closest y
y_coordinates = [row[1] for row in reachability_line]
closest_y_on_reachability_line = min(y_coordinates, key=lambda y: abs(y - y_location))
# get the possible max x location considering the closest y
x_max_theoretical = reachability_line[y_coordinates.index(closest_y_on_reachability_line)][0]
# if the theoretical value is lesser than the user given value, overwrite!
if x_max_theoretical < x_max:
x_max = x_max_theoretical
# get a random x location in the feasible range
random_x_location = round(random.uniform(x_min, x_max), 5)
return random_x_location
def get_reachable_location_using_reachability_line(self, x_min, x_max, y_min, y_max):
# print('input', y_min, y_max)
random_x_location, random_y_location = 0, 0
while True:
# get a random x location
random_x_location = round(random.uniform(x_min, x_max), 5)
# find the closest x location form the reachability line
x_coordinates = [row[0] for row in reachability_line]
closest_x_on_reachability_line = min(x_coordinates, key=lambda x: abs(x - random_x_location))
# get the possible max y location considering the closest x
y_max_theoretical = reachability_line[x_coordinates.index(closest_x_on_reachability_line)][1]
# if the theoretical value is lesser than the user given value, overwrite!
if y_max_theoretical < y_max:
updated_y_max = y_max_theoretical
else:
updated_y_max = y_max
# print('closest_x_on_reachability_line', closest_x_on_reachability_line)
# print(y_min, updated_y_max)
if y_min > updated_y_max:
# print('updated_y_max', updated_y_max)
print('y_locations are not feasible for the selected x location, retrying')
continue
random_y_location = round(random.uniform(y_min, updated_y_max), 5)
break
return random_x_location, random_y_location
def get_location_in_reachability_line(self):
# get a random x location
random_x_location = round(random.uniform(reachability_line[0][0], reachability_line[-1][0]), 5)
# find the closest x location form the reachability line
x_coordinates = [row[0] for row in reachability_line]
closest_x_on_reachability_line = min(x_coordinates, key=lambda x: abs(x - random_x_location))
# get the y location considering the closest x
y_location = reachability_line[x_coordinates.index(closest_x_on_reachability_line)][1]
# print('random_x_location', random_x_location)
# print('closest x', closest_x_on_reachability_line)
# print('y_location', y_location)
#
return random_x_location, y_location
def get_location_in_reachable_space(self, x_min, x_max, y_min, y_max):
# select a random y location
random_x_location = round(random.uniform(x_min, x_max), 5)
# if x is larger than the middle point, reduce y_max to half
if random_x_location > (x_min + (x_max - x_min) / 2):
random_y_location = round(random.uniform(y_min, y_max), 5)
else:
random_y_location = round(random.uniform(y_min, y_min + (y_max - y_min) / 2), 5)
return random_x_location, random_y_location
def get_location_in_reachable_space_2(self, x_min, x_max, y_min, y_max):
# select a random y location
random_x_location = round(random.uniform(x_min, x_max), 5)
# if x is larger than the middle point, reduce y_max to half
if random_x_location > (x_min + (x_max - x_min) / 2):
random_y_location = round(random.uniform(y_min, y_min + (y_max - y_min) / 2), 5)
else:
random_y_location = round(random.uniform(y_min, y_max), 5)
return random_x_location, random_y_location
def get_location_in_unreachable_space(self, x_min_unreachable, x_max_unreachable, y_min_reachable, y_max_reachable):
# select a random y location
random_y_location = round(random.uniform(y_min_reachable, y_max_reachable), 5)
# if y is larger than the middle point, shift the x unreachable range
if random_y_location > (y_min_reachable + (y_max_reachable - y_min_reachable) / 2):
random_x_location = round(random.uniform(x_min_unreachable - 7, x_min_unreachable), 5)
else:
random_x_location = round(random.uniform(x_max_unreachable, x_min_unreachable), 5)
return random_x_location, random_y_location
def get_location_in_unreachable_space_2(self, x_min_unreachable, x_max_unreachable, y_min_reachable, y_max_reachable):
# select a random y location
random_y_location = round(random.uniform(y_min_reachable, y_max_reachable), 5)
# if y is larger than the middle point, shift the x unreachable range
if random_y_location > (y_min_reachable + (y_max_reachable - y_min_reachable) / 2):
random_x_location = round(random.uniform(x_min_unreachable - 3, x_min_unreachable), 5)
else:
random_x_location = round(random.uniform(x_max_unreachable, x_min_unreachable), 5)
return random_x_location, random_y_location
def template_1_1_1(self, template_data):
# only x axis can be changed, pig should be within the reachable range
for pig in template_data[1]:
# generate a random x location with the reachable range
# random_x_location = round(random.uniform(X_MIN_REACHABLE, X_MAX_REACHABLE), 5)
random_x_location = self.get_reachable_x_location_using_reachability_line(X_MIN_REACHABLE, X_MAX_REACHABLE, -3.263795)
pig.x = random_x_location
self.place_random_blocks_on_ground(template_data, [])
return template_data
def template_1_1_2(self, template_data):
# generate a random x and y location within the reachable range for the pig
random_x, random_y = self.get_reachable_location_using_reachability_line(-7, X_HIGH_REACHABLE, -2.829752, Y_HIGH_REACHABLE)
shift_x_value = 0
shift_y_value = 0
# place the pig in the random location
for pig in template_data[1]:
shift_x_value = pig.x - random_x
shift_y_value = pig.y - random_y
pig.x = random_x
pig.y = random_y
# adjust the platform to the pig's location
for block in template_data[0]:
block.x -= shift_x_value
block.y -= shift_y_value
self.place_random_blocks_on_ground(template_data, [])
return template_data
# def template_1_1_5(self, template_data):
# # generate a random x and y location within the reachable range for the pig
# random_x, random_y = self.get_location_in_reachable_space(X_LOW_REACHABLE + 1.5, X_HIGH_REACHABLE - 1.5, Y_LOW_REACHABLE + 1, Y_HIGH_REACHABLE - 1.5)
# shift_x_value = 0
# shift_y_value = 0
#
# # place the pig in the random location
# for pig in template_data[1]:
# shift_x_value = pig.x - random_x
# shift_y_value = pig.y - random_y
# pig.x = random_x
# pig.y = random_y
#
# # adjust the platform to the pig's location
# for block in template_data[0]:
# block.x -= shift_x_value
# block.y -= shift_y_value
#
# self.place_random_blocks_on_ground(template_data, [])
# return template_data
def template_1_2_1(self, template_data):
# only x axis can be changed, pig should be within the reachable range
for pig in template_data[1]:
# generate a random x location with the reachable range
random_x_location = round(random.uniform(X_MIN_REACHABLE + 1, X_MAX_REACHABLE - 1), 5)
pig.x = random_x_location
self.place_random_blocks_on_ground(template_data, [])
return template_data
def template_1_2_2(self, template_data):
# generate a random x and y location within the reachable range for the pig
random_x, random_y = self.get_location_in_reachable_space(X_LOW_REACHABLE + 2, X_HIGH_REACHABLE - 2, Y_LOW_REACHABLE + 2, Y_HIGH_REACHABLE - 2)
shift_x_value = 0
shift_y_value = 0
# place the pig in the random location
for pig in template_data[1]:
shift_x_value = pig.x - random_x
shift_y_value = pig.y - random_y
pig.x = random_x
pig.y = random_y
# adjust the platform to the pig's location
for block in template_data[0]:
block.x -= shift_x_value
block.y -= shift_y_value
self.place_random_blocks_on_ground(template_data, [])
return template_data
def template_1_2_5(self, template_data):
# generate a random x and y location within the reachable range for the pig
random_x, random_y = self.get_location_in_reachable_space(X_LOW_REACHABLE + 5, X_HIGH_REACHABLE - 1.5, Y_LOW_REACHABLE + 1, Y_HIGH_REACHABLE - 1.5)
shift_x_value = 0
shift_y_value = 0
# place the pig in the random location
for pig in template_data[1]:
shift_x_value = pig.x - random_x
shift_y_value = pig.y - random_y
pig.x = random_x
pig.y = random_y
# adjust the platform to the pig's location
for block in template_data[0]:
block.x -= shift_x_value
block.y -= shift_y_value
self.place_random_blocks_on_ground(template_data, [])
return template_data
def template_1_1_6(self, template_data):
# generate a random x and y location within the reachable range for the pig
random_x, random_y = self.get_reachable_location_using_reachability_line(X_LOW_REACHABLE + 5, X_HIGH_REACHABLE - 5, Y_LOW_REACHABLE + 1, Y_HIGH_REACHABLE - 4)
shift_x_value = 0
shift_y_value = 0
# place the pig in the random location
for pig in template_data[1]:
shift_x_value = pig.x - random_x
shift_y_value = pig.y - random_y
pig.x = random_x
pig.y = random_y
# adjust the platform to the pig's location
for block in template_data[0]:
block.x -= shift_x_value
block.y -= shift_y_value
self.place_random_blocks_on_ground(template_data, [])
return template_data
def template_2_1_1(self, template_data):
# only x axis can be changed, pig should be within the unreachable range
for pig in template_data[1]:
# generate a random x location with the reachable range
random_x_location = round(random.uniform(X_MAX_REACHABLE + 1, X_MAX_UNREACHABLE), 5)
pig.x = random_x_location
self.place_random_blocks_on_ground(template_data, [])
return template_data
def template_2_1_2(self, template_data):
# generate a random x and y location within the unreachable range for the pig
random_x, random_y = self.get_location_in_unreachable_space(X_MAX_REACHABLE + 1, X_MAX_REACHABLE + 5, Y_LOW_REACHABLE + 1, Y_HIGH_REACHABLE)
shift_x_value = 0
shift_y_value = 0
# place the pig in the random location
for pig in template_data[1]:
shift_x_value = pig.x - random_x
shift_y_value = pig.y - random_y
pig.x = random_x
pig.y = random_y
# adjust the platform to the pig's location
for block in template_data[0]:
block.x -= shift_x_value
block.y -= shift_y_value
self.place_random_blocks_on_ground(template_data, [])
return template_data
def template_2_1_4(self, template_data):
shift_x_value = 0
# only x axis can be changed, pig should be within the unreachable range
for pig in template_data[1]:
# generate a random x location with the reachable range
random_x_location = round(random.uniform(X_MAX_REACHABLE + 1, X_MAX_UNREACHABLE), 5)
shift_x_value = pig.x - random_x_location
pig.x = random_x_location
# adjust the ball's x location
for block in template_data[0]:
block.x -= shift_x_value
self.place_random_blocks_on_ground(template_data, [[-3, X_MAX_REACHABLE]])
return template_data
def template_2_1_5(self, template_data):
# generate a random x and y location within the unreachable range for the pig
random_x, random_y = self.get_location_in_unreachable_space(X_MAX_REACHABLE + 2, X_MAX_REACHABLE + 1, Y_LOW_REACHABLE + 1, Y_HIGH_REACHABLE)
shift_x_value = 0
shift_y_value = 0
# place the pig in the random location
for pig in template_data[1]:
shift_x_value = pig.x - random_x
shift_y_value = pig.y - random_y
pig.x = random_x
pig.y = random_y
# adjust the platform to the pig's location
for block in template_data[0]:
block.x -= shift_x_value
block.y -= shift_y_value
self.place_random_blocks_on_ground(template_data, [])
return template_data
def template_2_1_6(self, template_data):
# generate a random x and y location within the unreachable range for the pig
random_x, random_y = self.get_location_in_unreachable_space_2(X_MAX_REACHABLE, X_MAX_REACHABLE + 3, Y_LOW_REACHABLE + 1, Y_HIGH_REACHABLE - 5)
shift_x_value = 0
shift_y_value = 0
# place the pig in the random location
for pig in template_data[1]:
shift_x_value = pig.x - random_x
shift_y_value = pig.y - random_y
pig.x = random_x
pig.y = random_y
# adjust the platform to the pig's location
for block in template_data[0]:
block.x -= shift_x_value
block.y -= shift_y_value
self.place_random_blocks_on_ground(template_data, [])
return template_data
def template_2_1_7(self, template_data):
shift_x_value = 0
# only x axis can be changed, pig should be within the reachable range
for pig in template_data[1]:
# generate a random x location with the reachable range
random_x_location = round(random.uniform(X_MIN_REACHABLE + 6, X_MAX_REACHABLE), 5)
shift_x_value = pig.x - random_x_location
pig.x = random_x_location
# adjust the platform to the pig's location
for block in template_data[0]:
block.x -= shift_x_value
self.place_random_blocks_on_ground(template_data, [])
return template_data
def template_2_1_8(self, template_data):
# generate a random x and y location
random_x, random_y = self.get_location_in_reachable_space(X_MIN_REACHABLE + 5, X_MAX_REACHABLE - 3, Y_LOW_REACHABLE + 1, Y_HIGH_REACHABLE - 2)
shift_x_value = 0
shift_y_value = 0
# place the pig in the random location
for pig in template_data[1]:
shift_x_value = pig.x - random_x
shift_y_value = pig.y - random_y
pig.x = random_x
pig.y = random_y
# adjust the platform to the pig's location
for block in template_data[0]:
block.x -= shift_x_value
block.y -= shift_y_value
self.place_random_blocks_on_ground(template_data, [])
return template_data
def template_2_1_9(self, template_data):
# generate a random x and y location
random_x, random_y = self.get_location_in_reachable_space(X_MIN_REACHABLE + 5, X_MAX_REACHABLE, Y_LOW_REACHABLE + 1, Y_HIGH_REACHABLE - 5)
shift_x_value = 0
shift_y_value = 0
# place the pig in the random location
for pig in template_data[1]:
shift_x_value = pig.x - random_x
shift_y_value = pig.y - random_y
pig.x = random_x
pig.y = random_y
# adjust the platform to the pig's location
for block in template_data[0]:
block.x -= shift_x_value
block.y -= shift_y_value
self.place_random_blocks_on_ground(template_data, [])
return template_data
def template_2_2_1(self, template_data):
# point that needs to be reachable (big rock circle)
reachable_point = [-5.31003, -0.7899181]
# get a reachable location for the reachable point
random_x, random_y = self.get_reachable_location_using_reachability_line(-8.747027, 1.549163, -0.5899642, 1.23003)
shift_x_value = reachable_point[0] - random_x
shift_y_value = reachable_point[1] - random_y
# shift the pig
for pig in template_data[1]:
pig.x -= shift_x_value
pig.y -= shift_y_value
# shift the other blocks and platforms
for block in template_data[0]:
block.x -= shift_x_value
block.y -= shift_y_value
self.place_random_blocks_on_ground(template_data, [])
return template_data
def template_2_2_2(self, template_data):
# generate a random x and y location
random_x, random_y = self.get_location_in_reachable_space_2(X_MIN_REACHABLE + 5, X_MAX_REACHABLE - 7, Y_LOW_REACHABLE + 1, Y_HIGH_REACHABLE - 3)
shift_x_value = 0
shift_y_value = 0
# place the pig in the random location
for pig in template_data[1]:
shift_x_value = pig.x - random_x
shift_y_value = pig.y - random_y
pig.x = random_x
pig.y = random_y
# adjust the platform to the pig's location
for block in template_data[0]:
block.x -= shift_x_value
block.y -= shift_y_value
self.place_random_blocks_on_ground(template_data, [])
return template_data
def template_2_2_8(self, template_data):
# generate a random x and y location
random_x, random_y = self.get_location_in_reachable_space_2(X_MIN_REACHABLE + 5, X_MAX_REACHABLE - 7, Y_LOW_REACHABLE + 1, Y_HIGH_REACHABLE - 3)
shift_x_value = 0
shift_y_value = 0
# place the first pig in the random location
for pig in template_data[1]:
shift_x_value = pig.x - random_x
shift_y_value = pig.y - random_y
pig.x = random_x
pig.y = random_y
break
# second pig
template_data[1][1].x -= shift_x_value
template_data[1][1].y -= shift_y_value
# adjust the platform to the pig's location
for block in template_data[0]:
block.x -= shift_x_value
block.y -= shift_y_value
self.place_random_blocks_on_ground(template_data, [])
return template_data
def template_2_3_1(self, template_data):
# point that needs to be reachable (square hole)
reachable_point = [2.44, 1.71176]
# place the square hole in the reachability line
random_x, random_y = self.get_location_in_reachability_line()
shift_x_value = reachable_point[0] - random_x
shift_y_value = reachable_point[1] - random_y
# for block in template_data[0]:
# if block.type == 'SquareHole':
# shift_x_value = block.x - random_x
# shift_y_value = block.y - random_y
# block.x = random_x
# block.y = random_y
# shift the platform and pig
for pig in template_data[1]:
pig.x -= shift_x_value
pig.y -= shift_y_value
for block in template_data[0]:
block.x -= shift_x_value
block.y -= shift_y_value
self.place_random_blocks_on_ground(template_data, [])
return template_data
def template_2_3_2(self, template_data):
# point that needs to be reachable (square hole)
reachable_point = [-5.59846, 1.02442]
# place the square hole in a reachable location
random_x, random_y = self.get_reachable_location_using_reachability_line(X_MIN_REACHABLE + 5, X_MAX_REACHABLE, Y_LOW_REACHABLE + 1, 100)
shift_x_value = reachable_point[0] - random_x
shift_y_value = reachable_point[1] - random_y
# shift all the objects
for pig in template_data[1]:
pig.x -= shift_x_value
pig.y -= shift_y_value
for block in template_data[0]:
block.x -= shift_x_value
block.y -= shift_y_value
# for block in template_data[0]:
# if block.type == 'SquareHole':
# shift_x_value = block.x - random_x
# shift_y_value = block.y - random_y
# block.x = random_x
# block.y = random_y
#
# # shift the platform and pig
# for pig in template_data[1]:
# pig.x -= shift_x_value
# pig.y -= shift_y_value
#
# for block in template_data[0]:
# if block.type == 'Platform':
# block.x -= shift_x_value
# block.y -= shift_y_value
self.place_random_blocks_on_ground(template_data, [])
return template_data
def template_2_3_3(self, template_data):
shift_x_value = 0
# only x axis can be changed, pig should be within the unreachable range
for pig in template_data[1]:
# generate a random x location with the reachable range
random_x_location = round(random.uniform(X_MAX_REACHABLE + 1, X_MAX_UNREACHABLE - 4), 5)
shift_x_value = pig.x - random_x_location
pig.x = random_x_location
# adjust the block's x location
for block in template_data[0]:
block.x -= shift_x_value
self.place_random_blocks_on_ground(template_data, [])
return template_data
def template_2_3_4(self, template_data):
shift_x_value = 0
# only x axis can be changed
for pig in template_data[1]:
# generate a random x location with the reachable range
random_x_location = round(random.uniform(X_MIN_REACHABLE + 3, X_MAX_REACHABLE), 5)
shift_x_value = pig.x - random_x_location
pig.x = random_x_location
# adjust other objects
for block in template_data[0]:
block.x -= shift_x_value
self.place_random_blocks_on_ground(template_data, [])
return template_data
def template_2_3_5(self, template_data):
# point that needs to be reachable (wood hole)
reachable_point = [-1.51509, -1.64602]
# get a reachable location for the reachable point
random_x, random_y = self.get_reachable_location_using_reachability_line(X_MIN_REACHABLE + 5, X_MAX_REACHABLE, Y_LOW_REACHABLE + 2, 100)
shift_x_value = reachable_point[0] - random_x
shift_y_value = reachable_point[1] - random_y
# shift the pig
for pig in template_data[1]:
pig.x -= shift_x_value
pig.y -= shift_y_value
# shift the other blocks and platforms
for block in template_data[0]:
block.x -= shift_x_value
block.y -= shift_y_value
# # place the square hole in a reachable location
# random_x, random_y = self.get_reachable_location_using_reachability_line(X_MIN_REACHABLE + 5, X_MAX_REACHABLE, Y_LOW_REACHABLE + 2, 100)
# for block in template_data[0]:
# if block.type == 'SquareHole':
# shift_x_value = block.x - random_x
# shift_y_value = block.y - random_y
# block.x = random_x
# block.y = random_y
#
# # shift the platform and pig
# for pig in template_data[1]:
# pig.x -= shift_x_value
# pig.y -= shift_y_value
#
# for block in template_data[0]:
# if block.type == 'Platform':
# block.x -= shift_x_value
# block.y -= shift_y_value
self.place_random_blocks_on_ground(template_data, [])
return template_data
def template_2_4_1(self, template_data):
shift_x_value = 0
shift_y_value = 0
# place the pig in a reachable location
random_x, random_y = self.get_reachable_location_using_reachability_line(X_MIN_REACHABLE + 5, 0, Y_LOW_REACHABLE + 2, 4)
for pig in template_data[1]:
shift_x_value = pig.x - random_x
shift_y_value = pig.y - random_y
pig.x = random_x
pig.y = random_y
# shift the platforms
for block in template_data[0]:
block.x -= shift_x_value
block.y -= shift_y_value
self.place_random_blocks_on_ground(template_data, [])
return template_data
def template_2_4_3(self, template_data):
shift_x_value = 0
shift_y_value = 0
# place the pig in a reachable location
random_x, random_y = self.get_reachable_location_using_reachability_line(X_MIN_REACHABLE + 5, 0, Y_LOW_REACHABLE + 2, 2)
for pig in template_data[1]:
shift_x_value = pig.x - random_x
shift_y_value = pig.y - random_y
pig.x = random_x
pig.y = random_y
# shift the platforms
for block in template_data[0]:
block.x -= shift_x_value
block.y -= shift_y_value
self.place_random_blocks_on_ground(template_data, [])
return template_data
def template_2_4_4(self, template_data):
shift_x_value = 0
shift_y_value = 0
# place the pig in a reachable location
random_x, random_y = self.get_reachable_location_using_reachability_line(X_MIN_REACHABLE + 5, X_MAX_REACHABLE - 5, Y_LOW_REACHABLE + 2, 1)
for pig in template_data[1]:
shift_x_value = pig.x - random_x
shift_y_value = pig.y - random_y
pig.x = random_x
pig.y = random_y
# shift the platforms
for block in template_data[0]:
block.x -= shift_x_value
block.y -= shift_y_value
self.place_random_blocks_on_ground(template_data, [])
return template_data
def template_2_4_6(self, template_data):
shift_x_value = 0
shift_y_value = 0
# place the pig in a reachable location
random_x, random_y = self.get_reachable_location_using_reachability_line(X_MIN_REACHABLE + 5, 0, Y_LOW_REACHABLE + 2, 0)
for pig in template_data[1]:
shift_x_value = pig.x - random_x
shift_y_value = pig.y - random_y
pig.x = random_x
pig.y = random_y
# shift the platforms
for block in template_data[0]:
block.x -= shift_x_value
block.y -= shift_y_value
self.place_random_blocks_on_ground(template_data, [])
return template_data
def template_3_1_1(self, template_data):
shift_x_value = 0
shift_y_value = 0
# place the stone square-hole in a reachable place
random_x, random_y = self.get_reachable_location_using_reachability_line(-7.5, 5, 2.3, 5.3)
for block in template_data[0]:
if block.material == 'stone' and block.type == 'SquareHole':
shift_x_value = block.x - random_x
shift_y_value = block.y - random_y
block.x = random_x
block.y = random_y
# shift the platforms and the wood block
for block in template_data[0]:
if not (block.material == 'stone' and block.type == 'SquareHole'):
block.x -= shift_x_value
block.y -= shift_y_value
# shift the pig
for pig in template_data[1]:
pig.x -= shift_x_value
pig.y -= shift_y_value
self.place_random_blocks_on_ground(template_data, [])
return template_data
def template_3_1_2(self, template_data):
# point that needs to be reachable ( wood square hole)
reachable_point = [3.48, 1.410032]
# place the square hole in a reachable location
random_x, random_y = self.get_reachable_location_using_reachability_line(3.3, 3.5, 1.47, 3.3)
shift_x_value = reachable_point[0] - random_x
shift_y_value = reachable_point[1] - random_y
# shift all the objects
for pig in template_data[1]:
pig.x -= shift_x_value
pig.y -= shift_y_value
for block in template_data[0]:
block.x -= shift_x_value
block.y -= shift_y_value
# # place the wood square-hole in a reachable place
# random_x, random_y = self.get_reachable_location_using_reachability_line(3.3, 3.5, 1.47, 3.3)
# for block in template_data[0]:
# if block.material == 'wood' and block.type == 'SquareHole':
# shift_x_value = block.x - random_x
# shift_y_value = block.y - random_y
# block.x = random_x
# block.y = random_y
#
# # shift the platforms and the stone block
# for block in template_data[0]:
# if not (block.material == 'wood' and block.type == 'SquareHole'):
# block.x -= shift_x_value
# block.y -= shift_y_value
#
# # shift the pig
# for pig in template_data[1]:
# pig.x -= shift_x_value
# pig.y -= shift_y_value
self.place_random_blocks_on_ground(template_data, [])
return template_data
def template_3_1_3(self, template_data):
shift_x_value = 0
shift_y_value = 0
# place the small stone ball in a reachable place
random_x, random_y = self.get_reachable_location_using_reachability_line(-7, 0, 2.3, 4)
for block in template_data[0]:
if block.type == 'CircleSmall' and block.material == 'stone':
shift_x_value = block.x - random_x
shift_y_value = block.y - random_y
block.x = random_x
block.y = random_y
# shift the platforms and the stone block
for block in template_data[0]:
if not (block.type == 'CircleSmall' and block.material == 'stone'):
block.x -= shift_x_value
block.y -= shift_y_value
# shift the pig
for pig in template_data[1]:
pig.x -= shift_x_value
pig.y -= shift_y_value
self.place_random_blocks_on_ground(template_data, [])
return template_data
def template_3_1_4(self, template_data):
shift_x_value = 0
shift_y_value = 0
# place the small stone ball in a reachable place
random_x, random_y = self.get_reachable_location_using_reachability_line(-5, 0, 0.3, 4)
for block in template_data[0]:
if block.type == 'CircleSmall' and block.material == 'stone':
shift_x_value = block.x - random_x
shift_y_value = block.y - random_y
block.x = random_x
block.y = random_y
# shift the platforms and the stone block
for block in template_data[0]:
if not (block.type == 'CircleSmall' and block.material == 'stone'):
block.x -= shift_x_value
block.y -= shift_y_value
# shift the pig
for pig in template_data[1]:
pig.x -= shift_x_value
pig.y -= shift_y_value
self.place_random_blocks_on_ground(template_data, [])
return template_data
def template_3_1_6(self, template_data):
shift_x_value = 0
shift_y_value = 0
# place the wood hole block in a reachable place
random_x, random_y = self.get_reachable_location_using_reachability_line(-5, 0, -0.5, 2)
for block in template_data[0]:
if block.material == 'wood' and block.type == 'SquareHole':
shift_x_value = block.x - random_x
shift_y_value = block.y - random_y
block.x = random_x
block.y = random_y
# shift the platforms and the stone block
for block in template_data[0]:
if not (block.material == 'wood' and block.type == 'SquareHole'):
block.x -= shift_x_value
block.y -= shift_y_value
# shift the pig
for pig in template_data[1]:
pig.x -= shift_x_value
pig.y -= shift_y_value
self.place_random_blocks_on_ground(template_data, [])
return template_data
def template_3_2_1(self, template_data):
shift_x_value = 0
shift_y_value = 0
# place the RectBig in the reachability line
random_x, random_y = self.get_location_in_reachability_line()
for block in template_data[0]:
if block.type == 'RectBig':
shift_x_value = block.x - random_x
shift_y_value = block.y - random_y
block.x = random_x
block.y = random_y
# shift the other blocks
for pig in template_data[1]:
pig.x -= shift_x_value
pig.y -= shift_y_value
for block in template_data[0]:
if not (block.type == 'RectBig'):
block.x -= shift_x_value
block.y -= shift_y_value
self.place_random_blocks_on_ground(template_data, [])
return template_data
def template_3_2_2(self, template_data):
# place the higher stone hole block in a reachable place, y axis can not be changed (too high)
random_x = self.get_reachable_x_location_using_reachability_line(-8, X_MAX_REACHABLE, 4.27)
# find the higher stone hole block
higher_stone_block = Block(0, '', '', 0.0, 0.0, 0.0)
for block in template_data[0]:
if block.material == 'stone' and block.type == 'SquareHole':
if block.y > higher_stone_block.y:
higher_stone_block = block
# shift it to the selected random palce
shift_x_value = higher_stone_block.x - random_x
higher_stone_block.x = random_x
# shift the other blocks and platforms
for block in template_data[0]:
if block.identifier != higher_stone_block.identifier:
block.x -= shift_x_value
# shift the pig
for pig in template_data[1]:
pig.x -= shift_x_value
self.place_random_blocks_on_ground(template_data, [])
return template_data
def template_3_2_3(self, template_data):
# place the lower stone hole block in a reachable place, y axis can not be changed (too high)
random_x, random_y = self.get_reachable_location_using_reachability_line(-6, X_MAX_REACHABLE, 0.9341, Y_HIGH_REACHABLE)
# find the higher stone hole block
lower_stone_block = Block(0, '', '', 0.0, 100.0, 0.0)
for block in template_data[0]:
if block.material == 'stone' and block.type == 'SquareHole':
if block.y < lower_stone_block.y:
lower_stone_block = block
# print('lower_stone_block', lower_stone_block)
# shift it to the selected random place
shift_x_value = lower_stone_block.x - random_x
shift_y_value = lower_stone_block.y - random_y
lower_stone_block.x = random_x
lower_stone_block.y = random_y
# shift the other blocks and platforms
for block in template_data[0]:
if block.identifier != lower_stone_block.identifier:
block.x -= shift_x_value
block.y -= shift_y_value
# shift the pig
for pig in template_data[1]:
pig.x -= shift_x_value
pig.y -= shift_y_value
self.place_random_blocks_on_ground(template_data, [])
return template_data
def template_3_2_4(self, template_data):
# bouncing platform should be reachable from lower trajectory
random_x, random_y = self.get_reachable_location_using_reachability_line(-2, 2.6, -0.97782, 1.3)
# find the bouncing platform (platform with highest x)
bouncing_platform = Block(0, '', '', -100.0, 0.0, 0.0)
for block in template_data[0]:
if block.type == 'Platform':
if block.x > bouncing_platform.x:
bouncing_platform = block
# shift it to the selected random place
shift_x_value = bouncing_platform.x - random_x
shift_y_value = bouncing_platform.y - random_y
bouncing_platform.x = random_x
bouncing_platform.y = random_y
# shift the other blocks and platforms
for block in template_data[0]:
if block.identifier != bouncing_platform.identifier:
block.x -= shift_x_value
block.y -= shift_y_value
# shift the pig
for pig in template_data[1]:
pig.x -= shift_x_value
pig.y -= shift_y_value
self.place_random_blocks_on_ground(template_data, [])
return template_data
def template_3_3_1(self, template_data):
# point that needs to be reachable (wide opening)
reachable_point = [-4.89, 0.95]
# get a reachable location for the reachable point
random_x, random_y = self.get_reachable_location_using_reachability_line(X_MIN_REACHABLE + 3, X_MAX_REACHABLE, 0.95, Y_HIGH_REACHABLE)
shift_x_value = reachable_point[0] - random_x
shift_y_value = reachable_point[1] - random_y
# shift the pig
for pig in template_data[1]:
pig.x -= shift_x_value
pig.y -= shift_y_value
# shift the other blocks and platforms
for block in template_data[0]:
block.x -= shift_x_value
block.y -= shift_y_value
self.place_random_blocks_on_ground(template_data, [])
return template_data
def template_3_3_2(self, template_data):
# point that needs to be reachable (big rock circle)
reachable_point = [-6.21018, 2.18042]
# get a reachable location for the reachable point
random_x, random_y = self.get_reachable_location_using_reachability_line(-9.850999, X_MAX_REACHABLE, 3.319321, Y_HIGH_REACHABLE)
shift_x_value = reachable_point[0] - random_x
shift_y_value = reachable_point[1] - random_y
# shift the pig
for pig in template_data[1]:
pig.x -= shift_x_value
pig.y -= shift_y_value
# shift the other blocks and platforms
for block in template_data[0]:
block.x -= shift_x_value
block.y -= shift_y_value
self.place_random_blocks_on_ground(template_data, [])
return template_data
def template_3_3_3(self, template_data):
# point that needs to be reachable (wide opening)
reachable_point = [-0.986, 2.635]
# get a reachable location for the reachable point (reachable_point should be in the lower trajectory)
random_x, random_y = self.get_reachable_location_using_reachability_line(-5, X_MAX_REACHABLE, -0.97, 2.8)
shift_x_value = reachable_point[0] - random_x
shift_y_value = reachable_point[1] - random_y
# shift the pig
for pig in template_data[1]:
pig.x -= shift_x_value
pig.y -= shift_y_value
# shift the other blocks and platforms
for block in template_data[0]:
block.x -= shift_x_value
block.y -= shift_y_value
self.place_random_blocks_on_ground(template_data, [])
return template_data
def template_3_3_4(self, template_data):
# point that needs to be reachable (wide opening)
reachable_point = [-0.831, 2.684]
# get a reachable location for the reachable point (reachable_point should be in the lower trajectory)
random_x, random_y = self.get_reachable_location_using_reachability_line(-5, X_MAX_REACHABLE, -0.363, 2.8)
shift_x_value = reachable_point[0] - random_x
shift_y_value = reachable_point[1] - random_y
# shift the pig
for pig in template_data[1]:
pig.x -= shift_x_value
pig.y -= shift_y_value
# shift the other blocks and platforms
for block in template_data[0]:
block.x -= shift_x_value
block.y -= shift_y_value
self.place_random_blocks_on_ground(template_data, [])
return template_data
def template_3_4_1(self, template_data):
# point that needs to be reachable (wide opening)
reachable_point = [2.25871, 1.39156]
# get a reachable location for the reachable point (reachable_point should be in the lower trajectory)
random_x, random_y = self.get_reachable_location_using_reachability_line(-7.94429, X_MAX_REACHABLE, 1.39156, 2.8)
shift_x_value = reachable_point[0] - random_x
shift_y_value = reachable_point[1] - random_y
# shift the pig
for pig in template_data[1]:
pig.x -= shift_x_value
pig.y -= shift_y_value
# shift the other blocks and platforms
for block in template_data[0]:
block.x -= shift_x_value
block.y -= shift_y_value
self.place_random_blocks_on_ground(template_data, [])
return template_data
def template_3_4_2(self, template_data):
# point that needs to be reachable (wood triangle)
reachable_point = [-6.912, 1.2258]
# get a reachable location for the reachable point
random_x, random_y = self.get_reachable_location_using_reachability_line(-8.729, X_MAX_REACHABLE, 1.2258, Y_HIGH_REACHABLE)
shift_x_value = reachable_point[0] - random_x
shift_y_value = reachable_point[1] - random_y
# shift the pig
for pig in template_data[1]:
pig.x -= shift_x_value
pig.y -= shift_y_value
# shift the other blocks and platforms
for block in template_data[0]:
block.x -= shift_x_value
block.y -= shift_y_value
self.place_random_blocks_on_ground(template_data, [])
return template_data
def template_3_4_3(self, template_data):
# point that needs to be reachable (ice triangle)
reachable_point = [-4.795, -1.19961]
# get a reachable location for the reachable point
random_x, random_y = self.get_reachable_location_using_reachability_line(-7.485002, X_MAX_REACHABLE, -1.19961, Y_HIGH_REACHABLE)
shift_x_value = reachable_point[0] - random_x
shift_y_value = reachable_point[1] - random_y
# shift the pig
for pig in template_data[1]:
pig.x -= shift_x_value
pig.y -= shift_y_value
# shift the other blocks and platforms
for block in template_data[0]:
block.x -= shift_x_value
block.y -= shift_y_value
self.place_random_blocks_on_ground(template_data, [])
return template_data
def template_3_4_4(self, template_data):
# point that needs to be reachable (edge of the upper stone block)
reachable_point = [-5.048, 1.88]
# get a reachable location for the reachable point (should be the lower trajectory)
random_x, random_y = self.get_reachable_location_using_reachability_line(-8, -4, 1.88, 2.8)
shift_x_value = reachable_point[0] - random_x
shift_y_value = reachable_point[1] - random_y
# shift the pig
for pig in template_data[1]:
pig.x -= shift_x_value
pig.y -= shift_y_value
# shift the other blocks and platforms
for block in template_data[0]:
block.x -= shift_x_value
block.y -= shift_y_value
self.place_random_blocks_on_ground(template_data, [])
return template_data
def template_3_5_1(self, template_data):
# point that needs to be reachable (furthest pig)
reachable_point = [2.820013, 1.026101]
# get a reachable location for the reachable point (should be the lower trajectory)
random_x, random_y = self.get_reachable_location_using_reachability_line(-0.89, X_MAX_REACHABLE, -0.7995539, 2.8)
shift_x_value = reachable_point[0] - random_x
shift_y_value = reachable_point[1] - random_y
# shift the pig
for pig in template_data[1]:
pig.x -= shift_x_value
pig.y -= shift_y_value
# shift the other blocks and platforms
for block in template_data[0]:
block.x -= shift_x_value
block.y -= shift_y_value
self.place_random_blocks_on_ground(template_data, [])
return template_data
def template_3_5_2(self, template_data):
# point that needs to be reachable (top antenne point)
reachable_point = [-2.4, 5.8]
# get a reachable location for the reachable point
random_x, random_y = self.get_reachable_location_using_reachability_line(-6.45, X_MAX_REACHABLE, 3.68, Y_HIGH_REACHABLE)
shift_x_value = reachable_point[0] - random_x
shift_y_value = reachable_point[1] - random_y
# shift the pig
for pig in template_data[1]:
pig.x -= shift_x_value
pig.y -= shift_y_value
# shift the other blocks and platforms
for block in template_data[0]:
block.x -= shift_x_value
block.y -= shift_y_value
self.place_random_blocks_on_ground(template_data, [])
return template_data
def template_3_5_3(self, template_data):
# point that needs to be reachable (topmost pig)
reachable_point = [-3.379822, -2.420288]
# get a reachable location for the reachable point (should be the lower trajectory)
random_x, random_y = self.get_reachable_location_using_reachability_line(-6, 1.558355, -0.9386116, 1.5)
shift_x_value = reachable_point[0] - random_x
shift_y_value = reachable_point[1] - random_y
# shift the pig
for pig in template_data[1]:
pig.x -= shift_x_value
pig.y -= shift_y_value
# shift the other blocks and platforms
for block in template_data[0]:
block.x -= shift_x_value
block.y -= shift_y_value
self.place_random_blocks_on_ground(template_data, [])
return template_data
def template_3_5_4(self, template_data):
# point that needs to be reachable (pig in the bucket)
reachable_point = [-6.1519, -2.8896]
# get a reachable location for the reachable point (should be the lower trajectory)
random_x, random_y = self.get_reachable_location_using_reachability_line(-6.1519, 4, -3.174598, 0.3)
shift_x_value = reachable_point[0] - random_x
shift_y_value = reachable_point[1] - random_y
# shift the pig
for pig in template_data[1]:
pig.x -= shift_x_value
pig.y -= shift_y_value
# shift the other blocks and platforms
for block in template_data[0]:
block.x -= shift_x_value
block.y -= shift_y_value
self.place_random_blocks_on_ground(template_data, [])
return template_data
def template_3_5_5(self, template_data):
# point that needs to be reachable (topmost pig)
reachable_point = [-1.20842, 2.02951]
# get a reachable location for the reachable point
random_x, random_y = self.get_reachable_location_using_reachability_line(-3.914959, X_MAX_REACHABLE, 2.02951, Y_HIGH_REACHABLE)
shift_x_value = reachable_point[0] - random_x
shift_y_value = reachable_point[1] - random_y
# shift the pig
for pig in template_data[1]:
pig.x -= shift_x_value
pig.y -= shift_y_value
# shift the other blocks and platforms
for block in template_data[0]:
block.x -= shift_x_value
block.y -= shift_y_value
self.place_random_blocks_on_ground(template_data, [])
return template_data
def template_3_6_1(self, template_data):
# point that needs to be reachable (left ice block)
reachable_point = [-3.709073, -2.15922]
# get a reachable location for the reachable point
random_x, random_y = self.get_reachable_location_using_reachability_line(-7.66196, X_MAX_REACHABLE, -2.417462, Y_HIGH_REACHABLE - 2)
shift_x_value = reachable_point[0] - random_x
shift_y_value = reachable_point[1] - random_y
# shift the pig
for pig in template_data[1]:
pig.x -= shift_x_value
pig.y -= shift_y_value
# shift the other blocks and platforms
for block in template_data[0]:
block.x -= shift_x_value
block.y -= shift_y_value
self.place_random_blocks_on_ground(template_data, [])
return template_data
def template_3_6_2(self, template_data):
# point that needs to be reachable (pig)
reachable_point = [1.79002, -2.535197]
# get a reachable location for the reachable point (should be the lower trajectory)
random_x, random_y = self.get_reachable_location_using_reachability_line(-5.311675, -2.535197, -2.535197, -1.0)
shift_x_value = reachable_point[0] - random_x
shift_y_value = reachable_point[1] - random_y
# shift the pig
for pig in template_data[1]:
pig.x -= shift_x_value
pig.y -= shift_y_value
# shift the other blocks and platforms
for block in template_data[0]:
block.x -= shift_x_value
block.y -= shift_y_value
self.place_random_blocks_on_ground(template_data, [])
return template_data
def template_3_6_3(self, template_data):
# point that needs to be reachable (right most pig)
reachable_point = [-2.219439, -0.03752904]
# get a reachable location for the reachable point (should be the lower trajectory)
random_x, random_y = self.get_reachable_location_using_reachability_line(-5.379386, -0.03903953, -0.03752904, 0.29331)
shift_x_value = reachable_point[0] - random_x
shift_y_value = reachable_point[1] - random_y
# shift the pig
for pig in template_data[1]:
pig.x -= shift_x_value
pig.y -= shift_y_value
# shift the other blocks and platforms
for block in template_data[0]:
block.x -= shift_x_value
block.y -= shift_y_value
self.place_random_blocks_on_ground(template_data, [])
return template_data
def template_3_6_4(self, template_data):
# point that needs to be reachable (top most pig)
reachable_point = [-3.399167, 0.2950496]
# get a reachable location for the reachable point (should be the lower trajectory)
random_x, random_y = self.get_reachable_location_using_reachability_line(-4.354696, 1.563726, 0.2950496, 1.565926)
shift_x_value = reachable_point[0] - random_x
shift_y_value = reachable_point[1] - random_y
# shift the pig
for pig in template_data[1]:
pig.x -= shift_x_value
pig.y -= shift_y_value
# shift the other blocks and platforms
for block in template_data[0]:
block.x -= shift_x_value
block.y -= shift_y_value
self.place_random_blocks_on_ground(template_data, [])
return template_data
def template_3_6_5(self, template_data):
# point that needs to be reachable (right most pig)
reachable_point = [-2.170179, -1.350999]
# get a reachable location for the reachable point (should be the lower trajectory)
random_x, random_y = self.get_reachable_location_using_reachability_line(-4.354696, -1.353722, -1.350999, 0.4000222)
shift_x_value = reachable_point[0] - random_x
shift_y_value = reachable_point[1] - random_y
# shift the pig
for pig in template_data[1]:
pig.x -= shift_x_value
pig.y -= shift_y_value
# shift the other blocks and platforms
for block in template_data[0]:
block.x -= shift_x_value
block.y -= shift_y_value
self.place_random_blocks_on_ground(template_data, [])
return template_data
def template_3_6_6(self, template_data):
# point that needs to be reachable (right most pig)
reachable_point = [-2.221663, -0.5047105]
# get a reachable location for the reachable point (should be the lower trajectory)
random_x, random_y = self.get_reachable_location_using_reachability_line(-7.040801, -0.7267894, -1.064094, 0.5543331)
shift_x_value = reachable_point[0] - random_x
shift_y_value = reachable_point[1] - random_y
# shift the pig
for pig in template_data[1]:
pig.x -= shift_x_value
pig.y -= shift_y_value
# shift the other blocks and platforms
for block in template_data[0]:
block.x -= shift_x_value
block.y -= shift_y_value
self.place_random_blocks_on_ground(template_data, [])
return template_data
def template_3_6_7(self, template_data):
# point that needs to be reachable (right most pig)
reachable_point = [-2.384823, -0.1313629]
# get a reachable location for the reachable point (should be the lower trajectory)
random_x, random_y = self.get_reachable_location_using_reachability_line(-5.755874, 5.775358, -0.575754, 1.737329)
shift_x_value = reachable_point[0] - random_x
shift_y_value = reachable_point[1] - random_y
# shift the pig
for pig in template_data[1]:
pig.x -= shift_x_value
pig.y -= shift_y_value
# shift the other blocks and platforms
for block in template_data[0]:
block.x -= shift_x_value
block.y -= shift_y_value
self.place_random_blocks_on_ground(template_data, [])
return template_data
def template_3_7_1(self, template_data):
# point that needs to be reachable (pig)
reachable_point = [-1.11452, 0.0996229]
# get a reachable location for the reachable point (should be the lower trajectory)
random_x, random_y = self.get_reachable_location_using_reachability_line(-4.312766, X_MAX_REACHABLE, 0.7, 2.8)
shift_x_value = reachable_point[0] - random_x
shift_y_value = reachable_point[1] - random_y
# shift the pig
for pig in template_data[1]:
pig.x -= shift_x_value
pig.y -= shift_y_value
# shift the other blocks and platforms
for block in template_data[0]:
block.x -= shift_x_value
block.y -= shift_y_value
self.place_random_blocks_on_ground(template_data, [])
return template_data
def template_3_7_2(self, template_data):
# point that needs to be reachable (pig)
reachable_point = [-0.1495501, 0.7360905]
# get a reachable location for the reachable point (should be the lower trajectory)
random_x, random_y = self.get_reachable_location_using_reachability_line(-4.70958, 4.69, 0.7360905, 2.8)
shift_x_value = reachable_point[0] - random_x
shift_y_value = reachable_point[1] - random_y
# shift the pig
for pig in template_data[1]:
pig.x -= shift_x_value
pig.y -= shift_y_value
# shift the other blocks and platforms
for block in template_data[0]:
block.x -= shift_x_value
block.y -= shift_y_value
self.place_random_blocks_on_ground(template_data, [])
return template_data
def template_3_7_3(self, template_data):
# point that needs to be reachable (pig)
reachable_point = [-0.09999022, 0.3341722]
# get a reachable location for the reachable point (should be the lower trajectory)
random_x, random_y = self.get_reachable_location_using_reachability_line(-4.70958, 4.69, 0.3341722, 2.8)
shift_x_value = reachable_point[0] - random_x
shift_y_value = reachable_point[1] - random_y
# shift the pig
for pig in template_data[1]:
pig.x -= shift_x_value
pig.y -= shift_y_value
# shift the other blocks and platforms
for block in template_data[0]:
block.x -= shift_x_value
block.y -= shift_y_value
self.place_random_blocks_on_ground(template_data, [])
return template_data
def template_3_7_4(self, template_data):
# point that needs to be reachable (stone ball)
reachable_point = [-3.74237, 2.16261]
# get a reachable location for the reachable point
random_x, random_y = self.get_reachable_location_using_reachability_line(-9.347918, X_MAX_REACHABLE - 2, 2.16261, Y_HIGH_REACHABLE)
shift_x_value = reachable_point[0] - random_x
shift_y_value = reachable_point[1] - random_y
# shift the pig
for pig in template_data[1]:
pig.x -= shift_x_value
pig.y -= shift_y_value
# shift the other blocks and platforms
for block in template_data[0]:
block.x -= shift_x_value
block.y -= shift_y_value
self.place_random_blocks_on_ground(template_data, [])
return template_data
def template_3_7_5(self, template_data):
# point that needs to be reachable (stone ball)
reachable_point = [-3.06, 4.45124]
# get a reachable location for the reachable point
random_x, random_y = self.get_reachable_location_using_reachability_line(-7.828814, X_MAX_REACHABLE - 5, 3.999, Y_HIGH_REACHABLE)
shift_x_value = reachable_point[0] - random_x
shift_y_value = reachable_point[1] - random_y
# shift the pig
for pig in template_data[1]:
pig.x -= shift_x_value
pig.y -= shift_y_value
# shift the other blocks and platforms
for block in template_data[0]:
block.x -= shift_x_value
block.y -= shift_y_value
self.place_random_blocks_on_ground(template_data, [])
return template_data
def template_3_8_1(self, template_data):
# only add distract objects for the correct timing levels
self.place_random_blocks_on_ground(template_data, [])
return template_data
def template_3_9_1(self, template_data):
shift_x_value = 0
shift_y_value = 0
# pig should be within the unreachable range
for pig in template_data[1]:
# generate a random x location with the unreachable range
random_x_location = round(random.uniform(X_MAX_REACHABLE + 4, X_MAX_UNREACHABLE), 5)
# generate a random y location with the reachable range
random_y_location = round(random.uniform(Y_LOW_REACHABLE, Y_HIGH_REACHABLE), 5)
shift_x_value = pig.x - random_x_location
shift_y_value = pig.y - random_y_location
pig.x = random_x_location
pig.y = random_y_location
# adjust the platforms
for block in template_data[0]:
block.x -= shift_x_value
block.y -= shift_y_value
self.place_random_blocks_on_ground(template_data, [])
return template_data
def template_3_9_2(self, template_data):
shift_x_value = 0
shift_y_value = 0
# pig should be within the unreachable range
for pig in template_data[1]:
# generate a random x location with the unreachable range
random_x_location = round(random.uniform(X_MAX_REACHABLE + 1, X_MAX_UNREACHABLE), 5)
# generate a random y location with the reachable range
random_y_location = round(random.uniform(Y_LOW_REACHABLE, Y_HIGH_REACHABLE), 5)
shift_x_value = pig.x - random_x_location
shift_y_value = pig.y - random_y_location
pig.x = random_x_location
pig.y = random_y_location
# adjust the platforms
for block in template_data[0]:
block.x -= shift_x_value
block.y -= shift_y_value
self.place_random_blocks_on_ground(template_data, [])
return template_data
def template_3_9_3(self, template_data):
# point that needs to be reachable (furthers pig)
reachable_point = [1.90999, -0.05851]
# get a reachable location for the reachable point
random_x, random_y = self.get_reachable_location_using_reachability_line(-2.910033, X_MAX_REACHABLE, -0.05851, 2.8)
shift_x_value = reachable_point[0] - random_x
shift_y_value = reachable_point[1] - random_y
# shift the pig
for pig in template_data[1]:
pig.x -= shift_x_value
pig.y -= shift_y_value
# shift the other blocks and platforms
for block in template_data[0]:
block.x -= shift_x_value
block.y -= shift_y_value
self.place_random_blocks_on_ground(template_data, [])
return template_data
def template_3_9_4(self, template_data):
# point that needs to be reachable (top of the divider)
reachable_point = [0.97, 2.33]
# get a reachable location for the reachable point
random_x, random_y = self.get_reachable_location_using_reachability_line(-2.910033, X_MAX_REACHABLE - 5, 2.33, Y_HIGH_REACHABLE)
shift_x_value = reachable_point[0] - random_x
shift_y_value = reachable_point[1] - random_y
# shift the pig
for pig in template_data[1]:
pig.x -= shift_x_value
pig.y -= shift_y_value
# shift the other blocks and platforms
for block in template_data[0]:
block.x -= shift_x_value
block.y -= shift_y_value
self.place_random_blocks_on_ground(template_data, [])
return template_data
def template_3_9_5(self, template_data):
# point that needs to be reachable (pig)
reachable_point = [3.35, 0.1286316]
# get a reachable location for the reachable point (low trajectory)
random_x, random_y = self.get_reachable_location_using_reachability_line(-5.369997, X_MAX_REACHABLE, -1.421352, 2.8)
shift_x_value = reachable_point[0] - random_x
shift_y_value = reachable_point[1] - random_y
# shift the pig
for pig in template_data[1]:
pig.x -= shift_x_value
pig.y -= shift_y_value
# shift the other blocks and platforms
for block in template_data[0]:
block.x -= shift_x_value
block.y -= shift_y_value
self.place_random_blocks_on_ground(template_data, [])
return template_data
def template_3_9_6(self, template_data):
# point that needs to be reachable (middle of the bridge)
reachable_point = [-0.8199974, -1.90136]
# get a reachable location for the reachable point (high trajectory)
random_x, random_y = self.get_reachable_location_using_reachability_line(-6.249995, X_MAX_REACHABLE - 5, -2.330358, Y_HIGH_REACHABLE)
shift_x_value = reachable_point[0] - random_x
shift_y_value = reachable_point[1] - random_y
# shift the pig
for pig in template_data[1]:
pig.x -= shift_x_value
pig.y -= shift_y_value
# shift the other blocks and platforms
for block in template_data[0]:
block.x -= shift_x_value
block.y -= shift_y_value
self.place_random_blocks_on_ground(template_data, [])
return template_data
def template_3_9_7(self, template_data):
# point that needs to be reachable (opening)
reachable_point = [-0.32, -1.43]
# get a reachable location for the reachable point (high trajectory)
random_x, random_y = self.get_reachable_location_using_reachability_line(-8.876295, X_MAX_REACHABLE - 5, -1.43, Y_HIGH_REACHABLE)
shift_x_value = reachable_point[0] - random_x
shift_y_value = reachable_point[1] - random_y
# shift the pig
for pig in template_data[1]:
pig.x -= shift_x_value
pig.y -= shift_y_value
# shift the other blocks and platforms
for block in template_data[0]:
block.x -= shift_x_value
block.y -= shift_y_value
self.place_random_blocks_on_ground(template_data, [])
return template_data
def template_3_9_8(self, template_data):
# point that needs to be reachable (topmost pig)
reachable_point = [0.2198468, 0.8202759]
# get a reachable location for the reachable point (high trajectory)
random_x, random_y = self.get_reachable_location_using_reachability_line(-5.219497, X_MAX_REACHABLE - 5, 0.8202759, Y_HIGH_REACHABLE)
shift_x_value = reachable_point[0] - random_x
shift_y_value = reachable_point[1] - random_y
# shift the pig
for pig in template_data[1]:
pig.x -= shift_x_value
pig.y -= shift_y_value
# shift the other blocks and platforms
for block in template_data[0]:
block.x -= shift_x_value
block.y -= shift_y_value
self.place_random_blocks_on_ground(template_data, [])
return template_data
def template_test(self, template_data):
self.place_a_random_block_on_ground(template_data, [])
self.place_a_random_block_on_ground(template_data, [])
self.place_a_random_block_on_ground(template_data, [])
return template_data
def generate_levels_from_template(self, template_name, template_data):
# rewrite the NUM_OF_VARIANTS_TO_GEN from the user input
try:
NUM_OF_VARIANTS_TO_GEN = int(sys.argv[1])
print('user given number of tasks to generate:', NUM_OF_VARIANTS_TO_GEN)
except:
print('the number of tasks to generate is invalid/not given using the default number:', DEFAULT_NUM_OF_VARIANTS_TO_GEN)
NUM_OF_VARIANTS_TO_GEN = DEFAULT_NUM_OF_VARIANTS_TO_GEN
print('template considered: ', template_name)
# all_blocks, all_pigs, all_tnts
generated_levels = []
# generated_levels.append(self.template_test(copy.deepcopy(template_data)))
# print('generated_levels', len(generated_levels))
# return generated_levels
for i in range(NUM_OF_VARIANTS_TO_GEN):
print('generating task', i)
if template_name == '1_1_1':
# generated_levels.append(self.template_test(copy.deepcopy(template_data)))
generated_levels.append(self.template_1_1_1(copy.deepcopy(template_data)))
elif template_name == '1_1_2' or template_name == '1_1_3' or template_name == '1_1_4' or template_name == '1_1_5':
generated_levels.append(self.template_1_1_2(copy.deepcopy(template_data)))
# elif template_name == '1_1_5':
# generated_levels.append(self.template_1_1_5(copy.deepcopy(template_data)))
elif template_name == '1_1_6' or template_name == '1_2_6':
generated_levels.append(self.template_1_1_6(copy.deepcopy(template_data)))
elif template_name == '1_2_1':
generated_levels.append(self.template_1_2_1(copy.deepcopy(template_data)))
elif template_name == '1_2_2' or template_name == '1_2_3' or template_name == '1_2_4':
generated_levels.append(self.template_1_2_2(copy.deepcopy(template_data)))
elif template_name == '1_2_5':
generated_levels.append(self.template_1_2_5(copy.deepcopy(template_data)))
elif template_name == '2_1_1':
generated_levels.append(self.template_2_1_1(copy.deepcopy(template_data)))
elif template_name == '2_1_2' or template_name == '2_1_3':
generated_levels.append(self.template_2_1_2(copy.deepcopy(template_data)))
elif template_name == '2_1_4':
generated_levels.append(self.template_2_1_4(copy.deepcopy(template_data)))
elif template_name == '2_1_5':
generated_levels.append(self.template_2_1_5(copy.deepcopy(template_data)))
elif template_name == '2_1_6':
generated_levels.append(self.template_2_1_6(copy.deepcopy(template_data)))
elif template_name == '2_1_7':
generated_levels.append(self.template_2_1_7(copy.deepcopy(template_data)))
elif template_name == '2_1_8':
generated_levels.append(self.template_2_1_8(copy.deepcopy(template_data)))
elif template_name == '2_1_9':
generated_levels.append(self.template_2_1_9(copy.deepcopy(template_data)))
elif template_name == '2_2_1':
generated_levels.append(self.template_2_2_1(copy.deepcopy(template_data)))
elif template_name == '2_2_2' or template_name == '2_2_3' or template_name == '2_2_4' or template_name == '2_2_5' or template_name == '2_2_6' or template_name == '2_2_7':
generated_levels.append(self.template_2_2_2(copy.deepcopy(template_data)))
elif template_name == '2_2_8':
generated_levels.append(self.template_2_2_8(copy.deepcopy(template_data)))
elif template_name == '2_3_1':
generated_levels.append(self.template_2_3_1(copy.deepcopy(template_data)))
elif template_name == '2_3_2':
generated_levels.append(self.template_2_3_2(copy.deepcopy(template_data)))
elif template_name == '2_3_3':
generated_levels.append(self.template_2_3_3(copy.deepcopy(template_data)))
elif template_name == '2_3_4':
generated_levels.append(self.template_2_3_4(copy.deepcopy(template_data)))
elif template_name == '2_3_5':
generated_levels.append(self.template_2_3_5(copy.deepcopy(template_data)))
elif template_name == '2_4_1' or template_name == '2_4_2':
generated_levels.append(self.template_2_4_1(copy.deepcopy(template_data)))
elif template_name == '2_4_3':
generated_levels.append(self.template_2_4_3(copy.deepcopy(template_data)))
elif template_name == '2_4_4' or template_name == '2_4_5':
generated_levels.append(self.template_2_4_4(copy.deepcopy(template_data)))
elif template_name == '2_4_6':
generated_levels.append(self.template_2_4_6(copy.deepcopy(template_data)))
elif template_name == '3_1_1':
generated_levels.append(self.template_3_1_1(copy.deepcopy(template_data)))
elif template_name == '3_1_2': # not working properly (both blocks can be fallen)
generated_levels.append(self.template_3_1_2(copy.deepcopy(template_data)))
elif template_name == '3_1_3':
generated_levels.append(self.template_3_1_3(copy.deepcopy(template_data)))
elif template_name == '3_1_4':
generated_levels.append(self.template_3_1_4(copy.deepcopy(template_data)))
elif template_name == '3_1_5':
print('this template is not used! (same as 3_1_4)')
elif template_name == '3_1_6':
generated_levels.append(self.template_3_1_6(copy.deepcopy(template_data)))
elif template_name == '3_2_1':
generated_levels.append(self.template_3_2_1(copy.deepcopy(template_data)))
elif template_name == '3_2_2':
generated_levels.append(self.template_3_2_2(copy.deepcopy(template_data)))
elif template_name == '3_2_3':
generated_levels.append(self.template_3_2_3(copy.deepcopy(template_data)))
elif template_name == '3_2_4':
generated_levels.append(self.template_3_2_4(copy.deepcopy(template_data)))
elif template_name == '3_3_1':
generated_levels.append(self.template_3_3_1(copy.deepcopy(template_data)))
elif template_name == '3_3_2':
generated_levels.append(self.template_3_3_2(copy.deepcopy(template_data)))
elif template_name == '3_3_3':
generated_levels.append(self.template_3_3_3(copy.deepcopy(template_data)))
elif template_name == '3_3_4':
generated_levels.append(self.template_3_3_4(copy.deepcopy(template_data)))
elif template_name == '3_4_1':
generated_levels.append(self.template_3_4_1(copy.deepcopy(template_data)))
elif template_name == '3_4_2':
generated_levels.append(self.template_3_4_2(copy.deepcopy(template_data)))
elif template_name == '3_4_3':
generated_levels.append(self.template_3_4_3(copy.deepcopy(template_data)))
elif template_name == '3_4_4':
generated_levels.append(self.template_3_4_4(copy.deepcopy(template_data)))
elif template_name == '3_5_1':
generated_levels.append(self.template_3_5_1(copy.deepcopy(template_data)))
elif template_name == '3_5_2':
generated_levels.append(self.template_3_5_2(copy.deepcopy(template_data)))
elif template_name == '3_5_3':
generated_levels.append(self.template_3_5_3(copy.deepcopy(template_data)))
elif template_name == '3_5_4':
generated_levels.append(self.template_3_5_4(copy.deepcopy(template_data)))
elif template_name == '3_5_5':
generated_levels.append(self.template_3_5_5(copy.deepcopy(template_data)))
elif template_name == '3_6_1':
generated_levels.append(self.template_3_6_1(copy.deepcopy(template_data)))
elif template_name == '3_6_2':
generated_levels.append(self.template_3_6_2(copy.deepcopy(template_data)))
elif template_name == '3_6_3':
generated_levels.append(self.template_3_6_3(copy.deepcopy(template_data)))
elif template_name == '3_6_4':
generated_levels.append(self.template_3_6_4(copy.deepcopy(template_data)))
elif template_name == '3_6_5':
generated_levels.append(self.template_3_6_5(copy.deepcopy(template_data)))
elif template_name == '3_6_6':
generated_levels.append(self.template_3_6_6(copy.deepcopy(template_data)))
elif template_name == '3_6_7':
generated_levels.append(self.template_3_6_7(copy.deepcopy(template_data)))
elif template_name == '3_7_1':
generated_levels.append(self.template_3_7_1(copy.deepcopy(template_data)))
elif template_name == '3_7_2':
generated_levels.append(self.template_3_7_2(copy.deepcopy(template_data)))
elif template_name == '3_7_3':
generated_levels.append(self.template_3_7_3(copy.deepcopy(template_data)))
elif template_name == '3_7_4':
generated_levels.append(self.template_3_7_4(copy.deepcopy(template_data)))
elif template_name == '3_7_5':
generated_levels.append(self.template_3_7_5(copy.deepcopy(template_data)))
elif template_name == '3_8_1' or template_name == '3_8_2':
generated_levels.append(self.template_3_8_1(copy.deepcopy(template_data)))
elif template_name == '3_9_1':
generated_levels.append(self.template_3_9_1(copy.deepcopy(template_data)))
elif template_name == '3_9_2':
generated_levels.append(self.template_3_9_2(copy.deepcopy(template_data)))
elif template_name == '3_9_3':
generated_levels.append(self.template_3_9_3(copy.deepcopy(template_data)))
elif template_name == '3_9_4':
generated_levels.append(self.template_3_9_4(copy.deepcopy(template_data)))
elif template_name == '3_9_5':
generated_levels.append(self.template_3_9_5(copy.deepcopy(template_data)))
elif template_name == '3_9_6':
generated_levels.append(self.template_3_9_6(copy.deepcopy(template_data)))
elif template_name == '3_9_7':
generated_levels.append(self.template_3_9_7(copy.deepcopy(template_data)))
elif template_name == '3_9_8':
generated_levels.append(self.template_3_9_8(copy.deepcopy(template_data)))
print('total generated_levels', len(generated_levels))
return generated_levels
| 34.650636
| 173
| 0.74583
| 12,050
| 73,494
| 4.213112
| 0.033278
| 0.108257
| 0.046368
| 0.027655
| 0.900626
| 0.876123
| 0.861172
| 0.802297
| 0.794181
| 0.736231
| 0
| 0.036353
| 0.164204
| 73,494
| 2,120
| 174
| 34.666981
| 0.790138
| 0.21712
| 0
| 0.628916
| 1
| 0
| 0.017447
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.069076
| false
| 0
| 0.004819
| 0
| 0.149398
| 0.007229
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
961baba3ea73519cb2da4dc0686de0cac0ebb39c
| 2,563
|
py
|
Python
|
mapclientplugins/zincpcaembeddedstep/resources_rc.py
|
hyu754/mapclientplugins.zincpcaembeddedstep
|
a3dc9b47d3b18422aa476e533dec93b7291c749e
|
[
"Apache-2.0"
] | null | null | null |
mapclientplugins/zincpcaembeddedstep/resources_rc.py
|
hyu754/mapclientplugins.zincpcaembeddedstep
|
a3dc9b47d3b18422aa476e533dec93b7291c749e
|
[
"Apache-2.0"
] | null | null | null |
mapclientplugins/zincpcaembeddedstep/resources_rc.py
|
hyu754/mapclientplugins.zincpcaembeddedstep
|
a3dc9b47d3b18422aa476e533dec93b7291c749e
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Resource object code
#
# Created: Fri 20. Apr 15:51:18 2018
# by: The Resource Compiler for PySide (Qt v4.8.7)
#
# WARNING! All changes made in this file will be lost!
from PySide import QtCore
qt_resource_data = b"\x00\x00\x02\x18\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x00@\x00\x00\x00@\x08\x02\x00\x00\x00%\x0b\xe6\x89\x00\x00\x00\x03sBIT\x08\x08\x08\xdb\xe1O\xe0\x00\x00\x00\x09pHYs\x00\x00\x0e\xc4\x00\x00\x0e\xc4\x01\x95+\x0e\x1b\x00\x00\x01\xbbIDATh\x81\xed\x9a=r\xc20\x10\x85\xe5LN@\xe3\x8a\x92c\xf9t9\x16%U\x1a\xae\xa0\x14b<\x8a1\xfay\xef\xad7L\xf4*`\xf0\xea}\xb2${\xb5\x9ab\x8c\xe1\x9d\xf5\xe1m\x80\xd5\x00\xf0\xd6\x00\xf0\xd6\xa7]\xe8\xfb\xf9\x92>\x9cnW\xbbV&\xf92\xba\xfa\x0e!\x9cn\xd7\xcdWm[A\x0e\xf0\xaa\xd7\xed0d\x00-\x03\xc6\x02C\x00\xd0;\xd6\xb5\x18\x14\x003MUS\x1c\x01\x10v!\x1f\xaa\x0f\xc0h.2a[\x01\xacWC\xb8\x89&\x80c\x1eI\x9b\xb6\x1a\x9b\xab\x00\x1ci\x1dkw\x1f@5`\xa6\xaf\xef\x10B\x5cf\xec\xf2\x16\x1b[\x00\xad\xf5\x5cF\x18\xfb\x00*\xeb\xab\xe9\xf5G\x12\xc3\x1c\xa0l\x94\xc4\xb8\x9f/\xcf\xc6d\xaf\xd3-\xe6\xe22\xa7\xbf\x91s#\x17\x0b\xb0;`\x0a\xca\x07\x95\x04\x03\x07\xe8\xb5\xbeQ\xba\x84\xc7@\x00H\xeb\xb9x\x0c<'\x8e\xcb,\x19\xc4y\xa8\xe7\xc5\xb7*\x10@e\x9d\x8f\xf9\xf6\xbb\x12\x03\xc0[\x03\xc0[\x03\xc0[\x03\xc0[\x03\xc0[\x03\xc0[ \x00\xf0\xdek\x14\x93\xcd\xc8$\xef\xd5L\xb2\x8f\x00\xa8\xf2ZIfG%\xf5pB(LJ\x05\xdb*]\x18B\xeb\x8f\x80\xaf\xb6\x16\xc1r\x83\xcd\xc6V\xc1Ues\x17\xc0\xd0n-V\xf7jK\xdb\xeb\xaa\xbb\x91$\xec\xf5_\xadT\x0b\x1c<\x86\x91\xf5G\xfc\xde\x12\x93i\xa5\x03h\x05,\xf2\xc91\xe0\xc8T\x99U\x82A\x96T\xf0B7_\xcb\x91t\x04{\xd4\x00\xc3\x10\xdeC\xcda\x8fv\x0c\xf9,R\x1e\xb7)\x9b3Z\x00\x0c\x0f<\xadFM\xab\xfcz\x80\xa4\xdct\xb0|zX\x01$\xed\xd6\x15\xb5\xb2\x058@\xff5\xa9\xff;\x1a\x00\xde\xfa\x01\xc7Xu\x83\xfca/,\x00\x00\x00\x00IEND\xaeB`\x82"
qt_resource_name = b"\x00\x13\x01.#\xc0\x00z\x00i\x00n\x00c\x00p\x00c\x00a\x00e\x00m\x00b\x00e\x00d\x00d\x00e\x00d\x00s\x00t\x00e\x00p\x00\x06\x07\x03}\xc3\x00i\x00m\x00a\x00g\x00e\x00s\x00\x08\x05\xe2Y'\x00l\x00o\x00g\x00o\x00.\x00p\x00n\x00g"
qt_resource_struct = b"\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\x00\x00\x00,\x00\x02\x00\x00\x00\x01\x00\x00\x00\x03\x00\x00\x00>\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00"
def qInitResources():
QtCore.qRegisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
| 116.5
| 1,588
| 0.745611
| 531
| 2,563
| 3.561205
| 0.483992
| 0.164992
| 0.147541
| 0.088842
| 0.163406
| 0.163406
| 0.153887
| 0.144368
| 0.13009
| 0.13009
| 0
| 0.257258
| 0.032384
| 2,563
| 21
| 1,589
| 122.047619
| 0.505242
| 0.071791
| 0
| 0
| 0
| 0.333333
| 0.846057
| 0.845635
| 0
| 0
| 0.003374
| 0
| 0
| 1
| 0.222222
| false
| 0
| 0.111111
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
82a956d4267d7f63afa94c24b88fcde7322ad060
| 12,076
|
py
|
Python
|
fuelweb_test/tests/test_bonding.py
|
Fiware/ops.Fuel-main-dev
|
779ffdcc9630d780777c60270fdc2f8baf87750a
|
[
"Apache-2.0"
] | null | null | null |
fuelweb_test/tests/test_bonding.py
|
Fiware/ops.Fuel-main-dev
|
779ffdcc9630d780777c60270fdc2f8baf87750a
|
[
"Apache-2.0"
] | null | null | null |
fuelweb_test/tests/test_bonding.py
|
Fiware/ops.Fuel-main-dev
|
779ffdcc9630d780777c60270fdc2f8baf87750a
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from proboscis.asserts import assert_equal
from proboscis import SkipTest
from proboscis import test
from fuelweb_test.helpers.decorators import log_snapshot_on_error
from fuelweb_test.settings import DEPLOYMENT_MODE
from fuelweb_test.settings import OPENSTACK_RELEASE
from fuelweb_test.settings import OPENSTACK_RELEASE_REDHAT
from fuelweb_test.tests.base_test_case import SetupEnvironment
from fuelweb_test.tests.base_test_case import TestBasic
@test(groups=["bonding_ha_one_controller", "bonding"])
class BondingHAOneController(TestBasic):
@test(depends_on=[SetupEnvironment.prepare_slaves_3],
groups=["deploy_bonding_active_backup"])
@log_snapshot_on_error
def deploy_bonding_active_backup(self):
"""Deploy cluster in ha mode with one controller bonding
Scenario:
1. Create cluster
2. Add 1 node with controller role
3. Add 1 node with compute role
4. Setup bonding for all interfaces
4. Deploy the cluster
5. Run network verification
6. Run OSTF
Duration 30m
Snapshot deploy_bonding_active_backup
"""
if OPENSTACK_RELEASE == OPENSTACK_RELEASE_REDHAT:
raise SkipTest()
self.env.revert_snapshot("ready_with_3_slaves")
segment_type = 'gre'
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=DEPLOYMENT_MODE,
settings={
"net_provider": 'neutron',
"net_segment_type": segment_type,
}
)
self.fuel_web.update_nodes(
cluster_id, {
'slave-01': ['controller'],
'slave-02': ['compute']
}
)
raw_data = {
'mac': None,
'mode': 'active-backup',
'name': 'ovs-bond0',
'slaves': [
{'name': 'eth4'},
{'name': 'eth3'},
{'name': 'eth2'},
{'name': 'eth1'}
],
'state': None,
'type': 'bond',
'assigned_networks': []
}
interfaces = {
'eth0': ['fuelweb_admin'],
'ovs-bond0': [
'public',
'management',
'storage'
]
}
net_params = self.fuel_web.client.get_networks(cluster_id)
nailgun_nodes = self.fuel_web.client.list_cluster_nodes(cluster_id)
for node in nailgun_nodes:
self.fuel_web.update_node_networks(
node['id'], interfaces_dict=interfaces,
raw_data=raw_data
)
self.fuel_web.deploy_cluster_wait(cluster_id)
cluster = self.fuel_web.client.get_cluster(cluster_id)
assert_equal(str(cluster['net_provider']), 'neutron')
assert_equal(str(net_params["networking_parameters"]
['segmentation_type']), segment_type)
self.fuel_web.verify_network(cluster_id)
self.fuel_web.run_ostf(
cluster_id=cluster_id)
self.env.make_snapshot("deploy_bonding_active_backup")
@test(depends_on=[SetupEnvironment.prepare_slaves_3],
groups=["deploy_bonding_balance_slb"])
@log_snapshot_on_error
def deploy_bonding_balance_slb(self):
"""Deploy cluster in ha mode with 1 controller and bonding
Scenario:
1. Create cluster
2. Add 1 node with controller role
3. Add 1 node with compute role
4. Setup bonding for all interfaces
4. Deploy the cluster
5. Run network verification
6. Run OSTF
Duration 30m
Snapshot deploy_bonding_balance_slb
"""
if OPENSTACK_RELEASE == OPENSTACK_RELEASE_REDHAT:
raise SkipTest()
self.env.revert_snapshot("ready_with_3_slaves")
segment_type = 'vlan'
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=DEPLOYMENT_MODE,
settings={
"net_provider": 'neutron',
"net_segment_type": segment_type,
}
)
self.fuel_web.update_nodes(
cluster_id, {
'slave-01': ['controller'],
'slave-02': ['compute']
}
)
raw_data = {
'mac': None,
'mode': 'balance-slb',
'name': 'ovs-bond0',
'slaves': [
{'name': 'eth4'},
{'name': 'eth3'},
{'name': 'eth2'},
{'name': 'eth1'}
],
'state': None,
'type': 'bond',
'assigned_networks': []
}
interfaces = {
'eth0': ['fuelweb_admin'],
'ovs-bond0': [
'public',
'management',
'storage',
'private'
]
}
net_params = self.fuel_web.client.get_networks(cluster_id)
nailgun_nodes = self.fuel_web.client.list_cluster_nodes(cluster_id)
for node in nailgun_nodes:
self.fuel_web.update_node_networks(
node['id'], interfaces_dict=interfaces,
raw_data=raw_data
)
self.fuel_web.deploy_cluster_wait(cluster_id)
cluster = self.fuel_web.client.get_cluster(cluster_id)
assert_equal(str(cluster['net_provider']), 'neutron')
assert_equal(str(net_params["networking_parameters"]
['segmentation_type']), segment_type)
self.fuel_web.verify_network(cluster_id)
self.fuel_web.run_ostf(
cluster_id=cluster_id)
self.env.make_snapshot("deploy_bonding_balance_slb")
@test(groups=["bonding_ha", "bonding"])
class BondingHA(TestBasic):
@test(depends_on=[SetupEnvironment.prepare_slaves_5],
groups=["deploy_bonding_ha_active_backup"])
@log_snapshot_on_error
def deploy_bonding_ha_active_backup(self):
"""Deploy cluster in HA mode with bonding (active backup)
Scenario:
1. Create cluster
2. Add 3 nodes with controller role
3. Add 2 node with compute role
4. Setup bonding for all interfaces
4. Deploy the cluster
5. Run network verification
6. Run OSTF
Duration 70m
Snapshot deploy_bonding_ha_active_backup
"""
if OPENSTACK_RELEASE == OPENSTACK_RELEASE_REDHAT:
raise SkipTest()
self.env.revert_snapshot("ready_with_5_slaves")
segment_type = 'vlan'
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=DEPLOYMENT_MODE,
settings={
"net_provider": 'neutron',
"net_segment_type": segment_type,
}
)
self.fuel_web.update_nodes(
cluster_id, {
'slave-01': ['controller'],
'slave-02': ['controller'],
'slave-03': ['controller'],
'slave-04': ['compute'],
'slave-05': ['compute']
}
)
raw_data = {
'mac': None,
'mode': 'active-backup',
'name': 'ovs-bond0',
'slaves': [
{'name': 'eth4'},
{'name': 'eth3'},
{'name': 'eth2'},
{'name': 'eth1'}
],
'state': None,
'type': 'bond',
'assigned_networks': []
}
interfaces = {
'eth0': ['fuelweb_admin'],
'ovs-bond0': [
'public',
'management',
'storage',
'private'
]
}
net_params = self.fuel_web.client.get_networks(cluster_id)
nailgun_nodes = self.fuel_web.client.list_cluster_nodes(cluster_id)
for node in nailgun_nodes:
self.fuel_web.update_node_networks(
node['id'], interfaces_dict=interfaces,
raw_data=raw_data
)
self.fuel_web.deploy_cluster_wait(cluster_id)
cluster = self.fuel_web.client.get_cluster(cluster_id)
assert_equal(str(cluster['net_provider']), 'neutron')
assert_equal(str(net_params["networking_parameters"]
['segmentation_type']), segment_type)
self.fuel_web.verify_network(cluster_id)
self.fuel_web.run_ostf(
cluster_id=cluster_id)
self.env.make_snapshot("deploy_bonding_ha_active_backup")
@test(depends_on=[SetupEnvironment.prepare_slaves_5],
groups=["deploy_bonding_ha_balance_slb"])
@log_snapshot_on_error
def deploy_bonding_ha_balance_slb(self):
"""Deploy cluster in HA mode with bonding (balance SLB)
Scenario:
1. Create cluster
2. Add 3 nodes with controller role
3. Add 2 node with compute role
4. Setup bonding for all interfaces
4. Deploy the cluster
5. Run network verification
6. Run OSTF
Duration 70m
Snapshot deploy_bonding_ha_balance_slb
"""
if OPENSTACK_RELEASE == OPENSTACK_RELEASE_REDHAT:
raise SkipTest()
self.env.revert_snapshot("ready_with_5_slaves")
segment_type = 'gre'
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=DEPLOYMENT_MODE,
settings={
"net_provider": 'neutron',
"net_segment_type": segment_type,
}
)
self.fuel_web.update_nodes(
cluster_id, {
'slave-01': ['controller'],
'slave-02': ['controller'],
'slave-03': ['controller'],
'slave-04': ['compute'],
'slave-05': ['compute']
}
)
raw_data = {
'mac': None,
'mode': 'balance-slb',
'name': 'ovs-bond0',
'slaves': [
{'name': 'eth4'},
{'name': 'eth3'},
{'name': 'eth2'},
{'name': 'eth1'}
],
'state': None,
'type': 'bond',
'assigned_networks': []
}
interfaces = {
'eth0': ['fuelweb_admin'],
'ovs-bond0': [
'public',
'management',
'storage'
]
}
net_params = self.fuel_web.client.get_networks(cluster_id)
nailgun_nodes = self.fuel_web.client.list_cluster_nodes(cluster_id)
for node in nailgun_nodes:
self.fuel_web.update_node_networks(
node['id'], interfaces_dict=interfaces,
raw_data=raw_data
)
self.fuel_web.deploy_cluster_wait(cluster_id)
cluster = self.fuel_web.client.get_cluster(cluster_id)
assert_equal(str(cluster['net_provider']), 'neutron')
assert_equal(str(net_params["networking_parameters"]
['segmentation_type']), segment_type)
self.fuel_web.verify_network(cluster_id)
self.fuel_web.run_ostf(
cluster_id=cluster_id)
self.env.make_snapshot("deploy_bonding_ha_balance_slb")
| 30.806122
| 78
| 0.553495
| 1,249
| 12,076
| 5.052042
| 0.148118
| 0.051347
| 0.062758
| 0.03233
| 0.875911
| 0.869731
| 0.868146
| 0.85103
| 0.83233
| 0.778447
| 0
| 0.014838
| 0.347052
| 12,076
| 391
| 79
| 30.88491
| 0.785415
| 0.157502
| 0
| 0.781609
| 0
| 0
| 0.162253
| 0.034455
| 0
| 0
| 0
| 0
| 0.034483
| 1
| 0.015326
| false
| 0
| 0.034483
| 0
| 0.057471
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
82c3e764fe21663b6505b8df8cd5ae798685cc27
| 27,858
|
py
|
Python
|
sdk/python/pulumi_splunk/outputs_tcp_syslog.py
|
pulumi/pulumi-splunk
|
a593a4b65e7de94d61b93676231606820193f212
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2020-12-23T01:26:49.000Z
|
2020-12-23T01:26:49.000Z
|
sdk/python/pulumi_splunk/outputs_tcp_syslog.py
|
pulumi/pulumi-splunk
|
a593a4b65e7de94d61b93676231606820193f212
|
[
"ECL-2.0",
"Apache-2.0"
] | 36
|
2020-12-22T16:57:47.000Z
|
2022-03-25T20:12:26.000Z
|
sdk/python/pulumi_splunk/outputs_tcp_syslog.py
|
pulumi/pulumi-splunk
|
a593a4b65e7de94d61b93676231606820193f212
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
from . import outputs
from ._inputs import *
__all__ = ['OutputsTcpSyslogArgs', 'OutputsTcpSyslog']
@pulumi.input_type
class OutputsTcpSyslogArgs:
def __init__(__self__, *,
acl: Optional[pulumi.Input['OutputsTcpSyslogAclArgs']] = None,
disabled: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
priority: Optional[pulumi.Input[int]] = None,
server: Optional[pulumi.Input[str]] = None,
syslog_sourcetype: Optional[pulumi.Input[str]] = None,
timestamp_format: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a OutputsTcpSyslog resource.
:param pulumi.Input['OutputsTcpSyslogAclArgs'] acl: The app/user context that is the namespace for the resource
:param pulumi.Input[bool] disabled: If true, disables global syslog settings.
:param pulumi.Input[str] name: Name of the syslog output group. This is name used when creating syslog configuration in outputs.conf.
:param pulumi.Input[int] priority: Sets syslog priority value. The priority value should specified as an integer. See $SPLUNK_HOME/etc/system/README/outputs.conf.spec for details.
:param pulumi.Input[str] server: host:port of the server where syslog data should be sent
:param pulumi.Input[str] syslog_sourcetype: Specifies a rule for handling data in addition to that provided by the "syslog" sourcetype. By default, there is no value for syslogSourceType.
<br>This string is used as a substring match against the sourcetype key. For example, if the string is set to 'syslog', then all source types containing the string "syslog" receives this special treatment.
To match a source type explicitly, use the pattern "sourcetype::sourcetype_name." For example
syslogSourcetype = sourcetype::apache_common
Data that is "syslog" or matches this setting is assumed to already be in syslog format.
Data that does not match the rules has a header, potentially a timestamp, and a hostname added to the front of the event. This is how Splunk software causes arbitrary log data to match syslog expectations.
:param pulumi.Input[str] timestamp_format: Format of timestamp to add at start of the events to be forwarded.
The format is a strftime-style timestamp formatting string. See $SPLUNK_HOME/etc/system/README/outputs.conf.spec for details.
:param pulumi.Input[str] type: Protocol to use to send syslog data. Valid values: (tcp | udp ).
"""
if acl is not None:
pulumi.set(__self__, "acl", acl)
if disabled is not None:
pulumi.set(__self__, "disabled", disabled)
if name is not None:
pulumi.set(__self__, "name", name)
if priority is not None:
pulumi.set(__self__, "priority", priority)
if server is not None:
pulumi.set(__self__, "server", server)
if syslog_sourcetype is not None:
pulumi.set(__self__, "syslog_sourcetype", syslog_sourcetype)
if timestamp_format is not None:
pulumi.set(__self__, "timestamp_format", timestamp_format)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def acl(self) -> Optional[pulumi.Input['OutputsTcpSyslogAclArgs']]:
"""
The app/user context that is the namespace for the resource
"""
return pulumi.get(self, "acl")
@acl.setter
def acl(self, value: Optional[pulumi.Input['OutputsTcpSyslogAclArgs']]):
pulumi.set(self, "acl", value)
@property
@pulumi.getter
def disabled(self) -> Optional[pulumi.Input[bool]]:
"""
If true, disables global syslog settings.
"""
return pulumi.get(self, "disabled")
@disabled.setter
def disabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "disabled", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the syslog output group. This is name used when creating syslog configuration in outputs.conf.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def priority(self) -> Optional[pulumi.Input[int]]:
"""
Sets syslog priority value. The priority value should specified as an integer. See $SPLUNK_HOME/etc/system/README/outputs.conf.spec for details.
"""
return pulumi.get(self, "priority")
@priority.setter
def priority(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "priority", value)
@property
@pulumi.getter
def server(self) -> Optional[pulumi.Input[str]]:
"""
host:port of the server where syslog data should be sent
"""
return pulumi.get(self, "server")
@server.setter
def server(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "server", value)
@property
@pulumi.getter(name="syslogSourcetype")
def syslog_sourcetype(self) -> Optional[pulumi.Input[str]]:
"""
Specifies a rule for handling data in addition to that provided by the "syslog" sourcetype. By default, there is no value for syslogSourceType.
<br>This string is used as a substring match against the sourcetype key. For example, if the string is set to 'syslog', then all source types containing the string "syslog" receives this special treatment.
To match a source type explicitly, use the pattern "sourcetype::sourcetype_name." For example
syslogSourcetype = sourcetype::apache_common
Data that is "syslog" or matches this setting is assumed to already be in syslog format.
Data that does not match the rules has a header, potentially a timestamp, and a hostname added to the front of the event. This is how Splunk software causes arbitrary log data to match syslog expectations.
"""
return pulumi.get(self, "syslog_sourcetype")
@syslog_sourcetype.setter
def syslog_sourcetype(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "syslog_sourcetype", value)
@property
@pulumi.getter(name="timestampFormat")
def timestamp_format(self) -> Optional[pulumi.Input[str]]:
"""
Format of timestamp to add at start of the events to be forwarded.
The format is a strftime-style timestamp formatting string. See $SPLUNK_HOME/etc/system/README/outputs.conf.spec for details.
"""
return pulumi.get(self, "timestamp_format")
@timestamp_format.setter
def timestamp_format(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "timestamp_format", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
"""
Protocol to use to send syslog data. Valid values: (tcp | udp ).
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@pulumi.input_type
class _OutputsTcpSyslogState:
def __init__(__self__, *,
acl: Optional[pulumi.Input['OutputsTcpSyslogAclArgs']] = None,
disabled: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
priority: Optional[pulumi.Input[int]] = None,
server: Optional[pulumi.Input[str]] = None,
syslog_sourcetype: Optional[pulumi.Input[str]] = None,
timestamp_format: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering OutputsTcpSyslog resources.
:param pulumi.Input['OutputsTcpSyslogAclArgs'] acl: The app/user context that is the namespace for the resource
:param pulumi.Input[bool] disabled: If true, disables global syslog settings.
:param pulumi.Input[str] name: Name of the syslog output group. This is name used when creating syslog configuration in outputs.conf.
:param pulumi.Input[int] priority: Sets syslog priority value. The priority value should specified as an integer. See $SPLUNK_HOME/etc/system/README/outputs.conf.spec for details.
:param pulumi.Input[str] server: host:port of the server where syslog data should be sent
:param pulumi.Input[str] syslog_sourcetype: Specifies a rule for handling data in addition to that provided by the "syslog" sourcetype. By default, there is no value for syslogSourceType.
<br>This string is used as a substring match against the sourcetype key. For example, if the string is set to 'syslog', then all source types containing the string "syslog" receives this special treatment.
To match a source type explicitly, use the pattern "sourcetype::sourcetype_name." For example
syslogSourcetype = sourcetype::apache_common
Data that is "syslog" or matches this setting is assumed to already be in syslog format.
Data that does not match the rules has a header, potentially a timestamp, and a hostname added to the front of the event. This is how Splunk software causes arbitrary log data to match syslog expectations.
:param pulumi.Input[str] timestamp_format: Format of timestamp to add at start of the events to be forwarded.
The format is a strftime-style timestamp formatting string. See $SPLUNK_HOME/etc/system/README/outputs.conf.spec for details.
:param pulumi.Input[str] type: Protocol to use to send syslog data. Valid values: (tcp | udp ).
"""
if acl is not None:
pulumi.set(__self__, "acl", acl)
if disabled is not None:
pulumi.set(__self__, "disabled", disabled)
if name is not None:
pulumi.set(__self__, "name", name)
if priority is not None:
pulumi.set(__self__, "priority", priority)
if server is not None:
pulumi.set(__self__, "server", server)
if syslog_sourcetype is not None:
pulumi.set(__self__, "syslog_sourcetype", syslog_sourcetype)
if timestamp_format is not None:
pulumi.set(__self__, "timestamp_format", timestamp_format)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def acl(self) -> Optional[pulumi.Input['OutputsTcpSyslogAclArgs']]:
"""
The app/user context that is the namespace for the resource
"""
return pulumi.get(self, "acl")
@acl.setter
def acl(self, value: Optional[pulumi.Input['OutputsTcpSyslogAclArgs']]):
pulumi.set(self, "acl", value)
@property
@pulumi.getter
def disabled(self) -> Optional[pulumi.Input[bool]]:
"""
If true, disables global syslog settings.
"""
return pulumi.get(self, "disabled")
@disabled.setter
def disabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "disabled", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the syslog output group. This is name used when creating syslog configuration in outputs.conf.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def priority(self) -> Optional[pulumi.Input[int]]:
"""
Sets syslog priority value. The priority value should specified as an integer. See $SPLUNK_HOME/etc/system/README/outputs.conf.spec for details.
"""
return pulumi.get(self, "priority")
@priority.setter
def priority(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "priority", value)
@property
@pulumi.getter
def server(self) -> Optional[pulumi.Input[str]]:
"""
host:port of the server where syslog data should be sent
"""
return pulumi.get(self, "server")
@server.setter
def server(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "server", value)
@property
@pulumi.getter(name="syslogSourcetype")
def syslog_sourcetype(self) -> Optional[pulumi.Input[str]]:
"""
Specifies a rule for handling data in addition to that provided by the "syslog" sourcetype. By default, there is no value for syslogSourceType.
<br>This string is used as a substring match against the sourcetype key. For example, if the string is set to 'syslog', then all source types containing the string "syslog" receives this special treatment.
To match a source type explicitly, use the pattern "sourcetype::sourcetype_name." For example
syslogSourcetype = sourcetype::apache_common
Data that is "syslog" or matches this setting is assumed to already be in syslog format.
Data that does not match the rules has a header, potentially a timestamp, and a hostname added to the front of the event. This is how Splunk software causes arbitrary log data to match syslog expectations.
"""
return pulumi.get(self, "syslog_sourcetype")
@syslog_sourcetype.setter
def syslog_sourcetype(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "syslog_sourcetype", value)
@property
@pulumi.getter(name="timestampFormat")
def timestamp_format(self) -> Optional[pulumi.Input[str]]:
"""
Format of timestamp to add at start of the events to be forwarded.
The format is a strftime-style timestamp formatting string. See $SPLUNK_HOME/etc/system/README/outputs.conf.spec for details.
"""
return pulumi.get(self, "timestamp_format")
@timestamp_format.setter
def timestamp_format(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "timestamp_format", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
"""
Protocol to use to send syslog data. Valid values: (tcp | udp ).
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
class OutputsTcpSyslog(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
acl: Optional[pulumi.Input[pulumi.InputType['OutputsTcpSyslogAclArgs']]] = None,
disabled: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
priority: Optional[pulumi.Input[int]] = None,
server: Optional[pulumi.Input[str]] = None,
syslog_sourcetype: Optional[pulumi.Input[str]] = None,
timestamp_format: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
## # Resource: OutputsTcpSyslog
Access the configuration of a forwarded server configured to provide data in standard syslog format.
## Example Usage
```python
import pulumi
import pulumi_splunk as splunk
tcp_syslog = splunk.OutputsTcpSyslog("tcpSyslog",
priority=5,
server="new-host-1:1234")
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['OutputsTcpSyslogAclArgs']] acl: The app/user context that is the namespace for the resource
:param pulumi.Input[bool] disabled: If true, disables global syslog settings.
:param pulumi.Input[str] name: Name of the syslog output group. This is name used when creating syslog configuration in outputs.conf.
:param pulumi.Input[int] priority: Sets syslog priority value. The priority value should specified as an integer. See $SPLUNK_HOME/etc/system/README/outputs.conf.spec for details.
:param pulumi.Input[str] server: host:port of the server where syslog data should be sent
:param pulumi.Input[str] syslog_sourcetype: Specifies a rule for handling data in addition to that provided by the "syslog" sourcetype. By default, there is no value for syslogSourceType.
<br>This string is used as a substring match against the sourcetype key. For example, if the string is set to 'syslog', then all source types containing the string "syslog" receives this special treatment.
To match a source type explicitly, use the pattern "sourcetype::sourcetype_name." For example
syslogSourcetype = sourcetype::apache_common
Data that is "syslog" or matches this setting is assumed to already be in syslog format.
Data that does not match the rules has a header, potentially a timestamp, and a hostname added to the front of the event. This is how Splunk software causes arbitrary log data to match syslog expectations.
:param pulumi.Input[str] timestamp_format: Format of timestamp to add at start of the events to be forwarded.
The format is a strftime-style timestamp formatting string. See $SPLUNK_HOME/etc/system/README/outputs.conf.spec for details.
:param pulumi.Input[str] type: Protocol to use to send syslog data. Valid values: (tcp | udp ).
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: Optional[OutputsTcpSyslogArgs] = None,
opts: Optional[pulumi.ResourceOptions] = None):
"""
## # Resource: OutputsTcpSyslog
Access the configuration of a forwarded server configured to provide data in standard syslog format.
## Example Usage
```python
import pulumi
import pulumi_splunk as splunk
tcp_syslog = splunk.OutputsTcpSyslog("tcpSyslog",
priority=5,
server="new-host-1:1234")
```
:param str resource_name: The name of the resource.
:param OutputsTcpSyslogArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(OutputsTcpSyslogArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
acl: Optional[pulumi.Input[pulumi.InputType['OutputsTcpSyslogAclArgs']]] = None,
disabled: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
priority: Optional[pulumi.Input[int]] = None,
server: Optional[pulumi.Input[str]] = None,
syslog_sourcetype: Optional[pulumi.Input[str]] = None,
timestamp_format: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = OutputsTcpSyslogArgs.__new__(OutputsTcpSyslogArgs)
__props__.__dict__["acl"] = acl
__props__.__dict__["disabled"] = disabled
__props__.__dict__["name"] = name
__props__.__dict__["priority"] = priority
__props__.__dict__["server"] = server
__props__.__dict__["syslog_sourcetype"] = syslog_sourcetype
__props__.__dict__["timestamp_format"] = timestamp_format
__props__.__dict__["type"] = type
super(OutputsTcpSyslog, __self__).__init__(
'splunk:index/outputsTcpSyslog:OutputsTcpSyslog',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
acl: Optional[pulumi.Input[pulumi.InputType['OutputsTcpSyslogAclArgs']]] = None,
disabled: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
priority: Optional[pulumi.Input[int]] = None,
server: Optional[pulumi.Input[str]] = None,
syslog_sourcetype: Optional[pulumi.Input[str]] = None,
timestamp_format: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None) -> 'OutputsTcpSyslog':
"""
Get an existing OutputsTcpSyslog resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['OutputsTcpSyslogAclArgs']] acl: The app/user context that is the namespace for the resource
:param pulumi.Input[bool] disabled: If true, disables global syslog settings.
:param pulumi.Input[str] name: Name of the syslog output group. This is name used when creating syslog configuration in outputs.conf.
:param pulumi.Input[int] priority: Sets syslog priority value. The priority value should specified as an integer. See $SPLUNK_HOME/etc/system/README/outputs.conf.spec for details.
:param pulumi.Input[str] server: host:port of the server where syslog data should be sent
:param pulumi.Input[str] syslog_sourcetype: Specifies a rule for handling data in addition to that provided by the "syslog" sourcetype. By default, there is no value for syslogSourceType.
<br>This string is used as a substring match against the sourcetype key. For example, if the string is set to 'syslog', then all source types containing the string "syslog" receives this special treatment.
To match a source type explicitly, use the pattern "sourcetype::sourcetype_name." For example
syslogSourcetype = sourcetype::apache_common
Data that is "syslog" or matches this setting is assumed to already be in syslog format.
Data that does not match the rules has a header, potentially a timestamp, and a hostname added to the front of the event. This is how Splunk software causes arbitrary log data to match syslog expectations.
:param pulumi.Input[str] timestamp_format: Format of timestamp to add at start of the events to be forwarded.
The format is a strftime-style timestamp formatting string. See $SPLUNK_HOME/etc/system/README/outputs.conf.spec for details.
:param pulumi.Input[str] type: Protocol to use to send syslog data. Valid values: (tcp | udp ).
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _OutputsTcpSyslogState.__new__(_OutputsTcpSyslogState)
__props__.__dict__["acl"] = acl
__props__.__dict__["disabled"] = disabled
__props__.__dict__["name"] = name
__props__.__dict__["priority"] = priority
__props__.__dict__["server"] = server
__props__.__dict__["syslog_sourcetype"] = syslog_sourcetype
__props__.__dict__["timestamp_format"] = timestamp_format
__props__.__dict__["type"] = type
return OutputsTcpSyslog(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def acl(self) -> pulumi.Output['outputs.OutputsTcpSyslogAcl']:
"""
The app/user context that is the namespace for the resource
"""
return pulumi.get(self, "acl")
@property
@pulumi.getter
def disabled(self) -> pulumi.Output[bool]:
"""
If true, disables global syslog settings.
"""
return pulumi.get(self, "disabled")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Name of the syslog output group. This is name used when creating syslog configuration in outputs.conf.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def priority(self) -> pulumi.Output[int]:
"""
Sets syslog priority value. The priority value should specified as an integer. See $SPLUNK_HOME/etc/system/README/outputs.conf.spec for details.
"""
return pulumi.get(self, "priority")
@property
@pulumi.getter
def server(self) -> pulumi.Output[str]:
"""
host:port of the server where syslog data should be sent
"""
return pulumi.get(self, "server")
@property
@pulumi.getter(name="syslogSourcetype")
def syslog_sourcetype(self) -> pulumi.Output[str]:
"""
Specifies a rule for handling data in addition to that provided by the "syslog" sourcetype. By default, there is no value for syslogSourceType.
<br>This string is used as a substring match against the sourcetype key. For example, if the string is set to 'syslog', then all source types containing the string "syslog" receives this special treatment.
To match a source type explicitly, use the pattern "sourcetype::sourcetype_name." For example
syslogSourcetype = sourcetype::apache_common
Data that is "syslog" or matches this setting is assumed to already be in syslog format.
Data that does not match the rules has a header, potentially a timestamp, and a hostname added to the front of the event. This is how Splunk software causes arbitrary log data to match syslog expectations.
"""
return pulumi.get(self, "syslog_sourcetype")
@property
@pulumi.getter(name="timestampFormat")
def timestamp_format(self) -> pulumi.Output[str]:
"""
Format of timestamp to add at start of the events to be forwarded.
The format is a strftime-style timestamp formatting string. See $SPLUNK_HOME/etc/system/README/outputs.conf.spec for details.
"""
return pulumi.get(self, "timestamp_format")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Protocol to use to send syslog data. Valid values: (tcp | udp ).
"""
return pulumi.get(self, "type")
| 50.928702
| 220
| 0.665087
| 3,454
| 27,858
| 5.239143
| 0.068616
| 0.06565
| 0.075597
| 0.054708
| 0.898265
| 0.892076
| 0.8813
| 0.876934
| 0.874116
| 0.867263
| 0
| 0.000619
| 0.245818
| 27,858
| 546
| 221
| 51.021978
| 0.860685
| 0.473042
| 0
| 0.828179
| 1
| 0
| 0.089084
| 0.021211
| 0
| 0
| 0
| 0
| 0
| 1
| 0.161512
| false
| 0.003436
| 0.024055
| 0
| 0.281787
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
82cf0f310e55933e675402a226782f037cff1ace
| 13,547
|
py
|
Python
|
Networks/Models/SMILES-Net.py
|
FrancescoZ/T-Tox
|
766f4fbc11bd09789ba198d00244dac2913dcbf1
|
[
"BSD-2-Clause",
"MIT"
] | null | null | null |
Networks/Models/SMILES-Net.py
|
FrancescoZ/T-Tox
|
766f4fbc11bd09789ba198d00244dac2913dcbf1
|
[
"BSD-2-Clause",
"MIT"
] | null | null | null |
Networks/Models/SMILES-Net.py
|
FrancescoZ/T-Tox
|
766f4fbc11bd09789ba198d00244dac2913dcbf1
|
[
"BSD-2-Clause",
"MIT"
] | null | null | null |
import pandas as pd
import numpy as np
import keras
from keras.preprocessing.text import Tokenizer
from keras.engine.topology import Layer
from keras import initializers as initializers, regularizers, constraints
from keras.callbacks import Callback
from keras.layers import Embedding, Input, Dense, LSTM, GRU, Bidirectional, TimeDistributed, Dropout
from keras import backend as K
from keras.models import Model
from sklearn.metrics import roc_auc_score
import input as dataset
import tensorflow as tf
from network.layers import AttentionDecoder
import input as data
from utils import helpers
from network.optimizer import Optimizer
from network.evaluation import Metrics
from keras.utils import plot_model
import keras
from keras.preprocessing.image import ImageDataGenerator
from keras.optimizers import SGD
from keras.callbacks import TensorBoard
import keras.backend as K
import os
import sys
import time
import statistics
import shutil
import numpy as nu
from sklearn.model_selection import train_test_split
from string import punctuation
from os import listdir
from numpy import array
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Flatten
from keras.layers import Embedding
from keras.layers.convolutional import Conv1D
from keras.layers.convolutional import MaxPooling1D
from keras.models import Sequential, Model
from keras.layers import Dropout, Dense, Input
from keras.optimizers import Adam, Nadam
from keras.activations import relu, elu, sigmoid
from keras.losses import binary_crossentropy
class SMILESNet:
def __init__(self,
vocab_size,
max_length,
X_train,
Y_train,
X_test,
Y_test,
metrics,
tensorBoard,
early,
learning_rate='',
rho='',
epsilon='',
epochs='',
loss_function='',
log_dir='',
batch_size='',
return_probabilities='',
classes = 2):
self.vocab_size = vocab_size
self.max_length = max_length
input_ = Input(shape=(max_length,), dtype='float32',name='text_input')
input_embed = Embedding(vocab_size+1, 100,
input_length=max_length,
trainable=True,
mask_zero=True,
name='OneHot_smile')(input_)
rnn_encoded = Bidirectional(LSTM(100, return_sequences=True),
name='bidirectional_smile',
merge_mode='concat',
trainable=True)(input_embed)
y_hat = AttentionDecoder(units =100,
name='attention_decoder_smile',
output_dim=2,
return_sequence=True,
return_probabilities=return_probabilities,
trainable=True)(rnn_encoded)
dense = Dense(classes, activation='softmax',name ='dense_smile')(y_hat)
self.model = Model(inputs = input_, outputs = dense)
plot_model(self.model, to_file='modelHATT.png')
self.X_train = X_train
self.Y_train = Y_train
self.X_test = X_test
self.Y_test = Y_test
self.learning_rate = learning_rate
self.rho = rho
self.epsilon = epsilon
self.epochs = epochs
self.loss_function = loss_function
self.log_dir = log_dir
self.batch_size = batch_size
self.metrics = metrics
self.tensorBoard = tensorBoard
self.early = early
self.classes = classes
self.opt = 'Adam'
print(self.model.summary())
def Concat(self):
input_ = Input(shape=(self.max_length,), dtype='float32',name='text_input')
input_embed = Embedding(self.vocab_size+1, 100,
input_length=self.max_length,
trainable=True,
mask_zero=True,
name='OneHot_smile')(input_)
rnn_encoded = Bidirectional(LSTM(100, return_sequences=True),
name='bidirectional_smile',
merge_mode='concat',
trainable=True)(input_embed)
y_hat = AttentionDecoder(units =100,
name='attention_decoder_smile',
output_dim=2,
return_sequence=True,
return_probabilities=True,
return_attention=True,
trainable=True)(rnn_encoded)
return input_, y_hat
def Visual(self):
input_ = Input(shape=(self.max_length,), dtype='float32',name='text_input')
input_embed = Embedding(self.vocab_size+1, 100,
input_length=self.max_length,
trainable=True,
mask_zero=True,
name='OneHot_smile')(input_)
rnn_encoded = Bidirectional(LSTM(100, return_sequences=True),
name='bidirectional_smile',
merge_mode='concat',
trainable=True)(input_embed)
y_hat = AttentionDecoder(units =100,
name='attention_decoder_smile',
output_dim=2,
return_sequence=True,
return_probabilities=True,
return_attention=True,
trainable=True)(rnn_encoded)
return Model(inputs = input_, outputs = y_hat)
def run(self):
self.model.compile(loss=self.loss_function,
optimizer=self.opt,
metrics=['acc'])
return self.model.fit(self.X_train,
self.Y_train,
validation_data=(self.X_test, self.Y_test),
epochs=self.epochs,
batch_size=self.batch_size,
callbacks = [self.early,self.metrics,self.tensorBoard])
# import pandas as pd
# import numpy as np
# import keras
# from keras.preprocessing.text import Tokenizer
# from keras.engine.topology import Layer
# from keras import initializers as initializers, regularizers, constraints
# from keras.callbacks import Callback
# from keras.layers import Embedding, Input, Dense, LSTM, GRU, Bidirectional, TimeDistributed, Dropout
# from keras import backend as K
# from keras.models import Model
# from sklearn.metrics import roc_auc_score
# import input as dataset
# import tensorflow as tf
# from network.layers import AttentionDecoder
# import input as data
# from utils import helpers
# from network.optimizer import Optimizer
# from network.evaluation import Metrics
# from keras.utils import plot_model
# import keras
# from keras.preprocessing.image import ImageDataGenerator
# from keras.optimizers import SGD
# from keras.callbacks import TensorBoard
# import keras.backend as K
# import os
# import sys
# import time
# import statistics
# import shutil
# import numpy as nu
# from sklearn.model_selection import train_test_split
# from string import punctuation
# from os import listdir
# from numpy import array
# from keras.preprocessing.text import Tokenizer
# from keras.preprocessing.sequence import pad_sequences
# from keras.models import Sequential
# from keras.layers import Dense
# from keras.layers import Flatten
# from keras.layers import Embedding
# from keras.layers.convolutional import Conv1D
# from keras.layers.convolutional import MaxPooling1D
# from keras.models import Sequential, Model
# from keras.layers import Dropout, Dense, Input
# from keras.optimizers import Adam, Nadam
# from keras.activations import relu, elu, sigmoid
# from keras.losses import binary_crossentropy
# class VisualATT:
# def __init__(self,
# vocab_size,
# max_length,
# X_train,
# Y_train,
# X_test,
# Y_test,
# metrics,
# tensorBoard,
# early,
# learning_rate='',
# rho='',
# epsilon='',
# epochs='',
# loss_function='',
# log_dir='',
# batch_size='',
# return_probabilities='',
# classes = 2):
# self.vocab_size = vocab_size
# self.max_length = max_length
# input_ = Input(shape=(max_length,), dtype='float32',name='text_input')
# input_embed = Embedding(vocab_size+1, 400,
# input_length=max_length,
# trainable=True,
# mask_zero=True,
# name='OneHot_smile')(input_)
# rnn_encoded = Bidirectional(LSTM(400, return_sequences=True),
# name='bidirectional_smile',
# merge_mode='sum',
# trainable=True)(input_embed)
# y_hat = AttentionDecoder(units =400,
# name='attention_decoder_smile',
# output_dim=2,
# return_sequence=True,
# return_probabilities=return_probabilities,
# trainable=True)(rnn_encoded)
# dense = Dense(classes, activation='softmax',name ='dense_smile')(y_hat)
# self.model = Model(inputs = input_, outputs = dense)
# plot_model(self.model, to_file='modelHATT.png')
# self.X_train = X_train
# self.Y_train = Y_train
# self.X_test = X_test
# self.Y_test = Y_test
# self.learning_rate = learning_rate
# self.rho = rho
# self.epsilon = epsilon
# self.epochs = epochs
# self.loss_function = loss_function
# self.log_dir = log_dir
# self.batch_size = batch_size
# self.metrics = metrics
# self.tensorBoard = tensorBoard
# self.early = early
# self.classes = classes
# self.opt = 'Adam'
# print(self.model.summary())
# def Concat(self):
# input_ = Input(shape=(self.max_length,), dtype='float32',name='text_input')
# input_embed = Embedding(self.vocab_size+1, 400,
# input_length=self.max_length,
# trainable=True,
# mask_zero=True,
# name='OneHot_smile')(input_)
# rnn_encoded = Bidirectional(LSTM(400, return_sequences=True),
# name='bidirectional_smile',
# merge_mode='sum',
# trainable=True)(input_embed)
# y_hat = AttentionDecoder(units =400,
# name='attention_decoder_smile',
# output_dim=2,
# return_sequence=True,
# return_probabilities=True,
# return_attention=True,
# trainable=True)(rnn_encoded)
# return input_, y_hat[0]
# def Visual(self):
# input_ = Input(shape=(self.max_length,), dtype='float32',name='text_input')
# input_embed = Embedding(self.vocab_size+1, 400,
# input_length=self.max_length,
# trainable=True,
# mask_zero=True,
# name='OneHot_smile')(input_)
# rnn_encoded = Bidirectional(LSTM(400, return_sequences=True),
# name='bidirectional_smile',
# merge_mode='sum',
# trainable=True)(input_embed)
# y_hat = AttentionDecoder(units =400,
# name='attention_decoder_smile',
# output_dim=2,
# return_sequence=True,
# return_probabilities=True,
# return_attention=True,
# trainable=True)(rnn_encoded)
# return Model(inputs = input_, outputs = y_hat)
# def run(self):
# self.model.compile(loss=self.loss_function,
# optimizer=self.opt,
# metrics=['acc'])
# return self.model.fit(self.X_train,
# self.Y_train,
# validation_data=(self.X_test, self.Y_test),
# epochs=self.epochs,
# batch_size=self.batch_size,
# callbacks = [self.early,self.metrics,self.tensorBoard])
| 37.422652
| 102
| 0.545065
| 1,325
| 13,547
| 5.378113
| 0.116981
| 0.060623
| 0.02947
| 0.02947
| 0.997334
| 0.997334
| 0.996492
| 0.996492
| 0.996492
| 0.996492
| 0
| 0.010084
| 0.377796
| 13,547
| 362
| 103
| 37.422652
| 0.83533
| 0.48424
| 0
| 0.326797
| 0
| 0
| 0.039362
| 0.010097
| 0
| 0
| 0
| 0
| 0
| 1
| 0.026144
| false
| 0
| 0.30719
| 0
| 0.359477
| 0.006536
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
7d54fee829a17a8af4e3a098f5f415c141d06c1d
| 20,190
|
py
|
Python
|
sandbox/lib/jumpscale/JumpscaleLibs/clients/gitea/client/user_service.py
|
threefoldtech/threebot_prebuilt
|
1f0e1c65c14cef079cd80f73927d7c8318755c48
|
[
"Apache-2.0"
] | 2
|
2019-05-09T07:21:25.000Z
|
2019-08-05T06:37:53.000Z
|
sandbox/lib/jumpscale/JumpscaleLibs/clients/gitea/client/user_service.py
|
threefoldtech/threebot_prebuilt
|
1f0e1c65c14cef079cd80f73927d7c8318755c48
|
[
"Apache-2.0"
] | 664
|
2018-12-19T12:43:44.000Z
|
2019-08-23T04:24:42.000Z
|
Jumpscale/clients/gitea/client/user_service.py
|
threefoldtech/jumpscale10
|
5fb073a82aeb0e66fc7d9660c45a1e31bc094bfa
|
[
"Apache-2.0"
] | 7
|
2019-05-03T07:14:37.000Z
|
2019-08-05T12:36:52.000Z
|
# DO NOT EDIT THIS FILE. This file will be overwritten when re-running go-raml.
from .Email import Email
from .GPGKey import GPGKey
from .Organization import Organization
from .PublicKey import PublicKey
from .Repository import Repository
from .TrackedTime import TrackedTime
from .User import User
from .unhandled_api_error import UnhandledAPIError
from .unmarshall_error import UnmarshallError
class UserService:
def __init__(self, client):
pass
self.client = client
def userDeleteEmail(self, headers=None, query_params=None, content_type="application/json"):
"""
Delete email addresses
It is method for DELETE /user/emails
"""
uri = self.client.base_url + "/user/emails"
return self.client.delete(uri, None, headers, query_params, content_type)
def userListEmails(self, headers=None, query_params=None, content_type="application/json"):
"""
List the authenticated user's email addresses
It is method for GET /user/emails
"""
uri = self.client.base_url + "/user/emails"
resp = self.client.get(uri, None, headers, query_params, content_type)
try:
if resp.status_code == 200:
resps = []
for elem in resp.json():
resps.append(Email(elem))
return resps, resp
message = "unknown status code={}".format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code, message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def userAddEmail(self, data, headers=None, query_params=None, content_type="application/json"):
"""
Add email addresses
It is method for POST /user/emails
"""
uri = self.client.base_url + "/user/emails"
resp = self.client.post(uri, data, headers, query_params, content_type)
try:
if resp.status_code == 201:
resps = []
for elem in resp.json():
resps.append(Email(elem))
return resps, resp
message = "unknown status code={}".format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code, message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def userCurrentListFollowers(self, headers=None, query_params=None, content_type="application/json"):
"""
List the authenticated user's followers
It is method for GET /user/followers
"""
uri = self.client.base_url + "/user/followers"
resp = self.client.get(uri, None, headers, query_params, content_type)
try:
if resp.status_code == 200:
resps = []
for elem in resp.json():
resps.append(User(elem))
return resps, resp
message = "unknown status code={}".format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code, message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def userCurrentCheckFollowing(self, followee, headers=None, query_params=None, content_type="application/json"):
"""
Check whether a user is followed by the authenticated user
It is method for GET /user/following/{followee}
"""
uri = self.client.base_url + "/user/following/" + followee
return self.client.get(uri, None, headers, query_params, content_type)
def userCurrentDeleteFollow(self, username, headers=None, query_params=None, content_type="application/json"):
"""
Unfollow a user
It is method for DELETE /user/following/{username}
"""
uri = self.client.base_url + "/user/following/" + username
return self.client.delete(uri, None, headers, query_params, content_type)
def userCurrentPutFollow(self, data, username, headers=None, query_params=None, content_type="application/json"):
"""
Follow a user
It is method for PUT /user/following/{username}
"""
uri = self.client.base_url + "/user/following/" + username
return self.client.put(uri, data, headers, query_params, content_type)
def userCurrentListFollowing(self, headers=None, query_params=None, content_type="application/json"):
"""
List the users that the authenticated user is following
It is method for GET /user/following
"""
uri = self.client.base_url + "/user/following"
resp = self.client.get(uri, None, headers, query_params, content_type)
try:
if resp.status_code == 200:
resps = []
for elem in resp.json():
resps.append(User(elem))
return resps, resp
message = "unknown status code={}".format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code, message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def userCurrentDeleteGPGKey(self, id, headers=None, query_params=None, content_type="application/json"):
"""
Remove a GPG key
It is method for DELETE /user/gpg_keys/{id}
"""
uri = self.client.base_url + "/user/gpg_keys/" + id
return self.client.delete(uri, None, headers, query_params, content_type)
def userCurrentGetGPGKey(self, id, headers=None, query_params=None, content_type="application/json"):
"""
Get a GPG key
It is method for GET /user/gpg_keys/{id}
"""
uri = self.client.base_url + "/user/gpg_keys/" + id
resp = self.client.get(uri, None, headers, query_params, content_type)
try:
if resp.status_code == 200:
return GPGKey(resp.json()), resp
message = "unknown status code={}".format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code, message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def userCurrentListGPGKeys(self, headers=None, query_params=None, content_type="application/json"):
"""
List the authenticated user's GPG keys
It is method for GET /user/gpg_keys
"""
uri = self.client.base_url + "/user/gpg_keys"
resp = self.client.get(uri, None, headers, query_params, content_type)
try:
if resp.status_code == 200:
resps = []
for elem in resp.json():
resps.append(GPGKey(elem))
return resps, resp
message = "unknown status code={}".format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code, message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def userCurrentPostGPGKey(self, data, headers=None, query_params=None, content_type="application/json"):
"""
Create a GPG key
It is method for POST /user/gpg_keys
"""
uri = self.client.base_url + "/user/gpg_keys"
resp = self.client.post(uri, data, headers, query_params, content_type)
try:
if resp.status_code == 201:
return GPGKey(resp.json()), resp
message = "unknown status code={}".format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code, message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def userCurrentDeleteKey(self, id, headers=None, query_params=None, content_type="application/json"):
"""
Delete a public key
It is method for DELETE /user/keys/{id}
"""
uri = self.client.base_url + "/user/keys/" + id
return self.client.delete(uri, None, headers, query_params, content_type)
def userCurrentGetKey(self, id, headers=None, query_params=None, content_type="application/json"):
"""
Get a public key
It is method for GET /user/keys/{id}
"""
uri = self.client.base_url + "/user/keys/" + id
resp = self.client.get(uri, None, headers, query_params, content_type)
try:
if resp.status_code == 200:
return PublicKey(resp.json()), resp
message = "unknown status code={}".format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code, message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def userCurrentListKeys(self, headers=None, query_params=None, content_type="application/json"):
"""
List the authenticated user's public keys
It is method for GET /user/keys
"""
uri = self.client.base_url + "/user/keys"
resp = self.client.get(uri, None, headers, query_params, content_type)
try:
if resp.status_code == 200:
resps = []
for elem in resp.json():
resps.append(PublicKey(elem))
return resps, resp
message = "unknown status code={}".format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code, message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def userCurrentPostKey(self, data, headers=None, query_params=None, content_type="application/json"):
"""
Create a public key
It is method for POST /user/keys
"""
uri = self.client.base_url + "/user/keys"
resp = self.client.post(uri, data, headers, query_params, content_type)
try:
if resp.status_code == 201:
return PublicKey(resp.json()), resp
message = "unknown status code={}".format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code, message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def orgListCurrentUserOrgs(self, headers=None, query_params=None, content_type="application/json"):
"""
List the current user's organizations
It is method for GET /user/orgs
"""
uri = self.client.base_url + "/user/orgs"
resp = self.client.get(uri, None, headers, query_params, content_type)
try:
if resp.status_code == 200:
resps = []
for elem in resp.json():
resps.append(Organization(elem))
return resps, resp
message = "unknown status code={}".format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code, message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def userCurrentListRepos(self, headers=None, query_params=None, content_type="application/json"):
"""
List the repos that the authenticated user owns or has access to
It is method for GET /user/repos
"""
uri = self.client.base_url + "/user/repos"
resp = self.client.get(uri, None, headers, query_params, content_type)
try:
if resp.status_code == 200:
resps = []
for elem in resp.json():
resps.append(Repository(elem))
return resps, resp
message = "unknown status code={}".format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code, message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def createCurrentUserRepo(self, data, headers=None, query_params=None, content_type="application/json"):
"""
Create a repository
It is method for POST /user/repos
"""
uri = self.client.base_url + "/user/repos"
resp = self.client.post(uri, data, headers, query_params, content_type)
try:
if resp.status_code == 201:
return Repository(resp.json()), resp
message = "unknown status code={}".format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code, message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def userCurrentDeleteStar(self, owner, repo, headers=None, query_params=None, content_type="application/json"):
"""
Unstar the given repo
It is method for DELETE /user/starred/{owner}/{repo}
"""
uri = self.client.base_url + "/user/starred/" + owner + "/" + repo
return self.client.delete(uri, None, headers, query_params, content_type)
def userCurrentCheckStarring(self, owner, repo, headers=None, query_params=None, content_type="application/json"):
"""
Whether the authenticated is starring the repo
It is method for GET /user/starred/{owner}/{repo}
"""
uri = self.client.base_url + "/user/starred/" + owner + "/" + repo
return self.client.get(uri, None, headers, query_params, content_type)
def userCurrentPutStar(self, data, owner, repo, headers=None, query_params=None, content_type="application/json"):
"""
Star the given repo
It is method for PUT /user/starred/{owner}/{repo}
"""
uri = self.client.base_url + "/user/starred/" + owner + "/" + repo
return self.client.put(uri, data, headers, query_params, content_type)
def userCurrentListStarred(self, headers=None, query_params=None, content_type="application/json"):
"""
The repos that the authenticated user has starred
It is method for GET /user/starred
"""
uri = self.client.base_url + "/user/starred"
resp = self.client.get(uri, None, headers, query_params, content_type)
try:
if resp.status_code == 200:
resps = []
for elem in resp.json():
resps.append(Repository(elem))
return resps, resp
message = "unknown status code={}".format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code, message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def userCurrentListSubscriptions(self, headers=None, query_params=None, content_type="application/json"):
"""
List repositories watched by the authenticated user
It is method for GET /user/subscriptions
"""
uri = self.client.base_url + "/user/subscriptions"
resp = self.client.get(uri, None, headers, query_params, content_type)
try:
if resp.status_code == 200:
resps = []
for elem in resp.json():
resps.append(Repository(elem))
return resps, resp
message = "unknown status code={}".format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code, message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def userCurrentTrackedTimes(self, headers=None, query_params=None, content_type="application/json"):
"""
List the current user's tracked times
It is method for GET /user/times
"""
uri = self.client.base_url + "/user/times"
resp = self.client.get(uri, None, headers, query_params, content_type)
try:
if resp.status_code == 200:
resps = []
for elem in resp.json():
resps.append(TrackedTime(elem))
return resps, resp
message = "unknown status code={}".format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code, message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def orgListUserOrgs(self, username, headers=None, query_params=None, content_type="application/json"):
"""
List a user's organizations
It is method for GET /user/{username}/orgs
"""
uri = self.client.base_url + "/user/" + username + "/orgs"
resp = self.client.get(uri, None, headers, query_params, content_type)
try:
if resp.status_code == 200:
resps = []
for elem in resp.json():
resps.append(Organization(elem))
return resps, resp
message = "unknown status code={}".format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code, message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def userGetCurrent(self, headers=None, query_params=None, content_type="application/json"):
"""
Get the authenticated user
It is method for GET /user
"""
uri = self.client.base_url + "/user"
resp = self.client.get(uri, None, headers, query_params, content_type)
try:
if resp.status_code == 200:
return User(resp.json()), resp
message = "unknown status code={}".format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code, message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
| 41.628866
| 118
| 0.611491
| 2,298
| 20,190
| 5.283725
| 0.067015
| 0.059298
| 0.062263
| 0.048921
| 0.892357
| 0.892028
| 0.860896
| 0.824411
| 0.821776
| 0.805057
| 0
| 0.003789
| 0.294205
| 20,190
| 484
| 119
| 41.714876
| 0.848281
| 0.099208
| 0
| 0.835366
| 1
| 0
| 0.0679
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.085366
| false
| 0.003049
| 0.027439
| 0
| 0.198171
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
7db1e656907324d28e9b0db6d89704c25618c141
| 19,676
|
py
|
Python
|
data_generator.py
|
ju-leon/bmaml
|
6e55e8fbc317d66d93323e0ac49804af05f551ed
|
[
"MIT"
] | null | null | null |
data_generator.py
|
ju-leon/bmaml
|
6e55e8fbc317d66d93323e0ac49804af05f551ed
|
[
"MIT"
] | null | null | null |
data_generator.py
|
ju-leon/bmaml
|
6e55e8fbc317d66d93323e0ac49804af05f551ed
|
[
"MIT"
] | null | null | null |
from collections import OrderedDict
import numpy as np
from tensorflow.python.platform import flags
FLAGS = flags.FLAGS
class SinusoidGenerator(object):
def __init__(self, split_data=True):
print("Using Sinusoid")
# data size
self.split_data = split_data
self.num_tasks = FLAGS.num_tasks
self.few_k_shot = FLAGS.few_k_shot
self.val_k_shot = FLAGS.val_k_shot
if self.split_data:
self.all_k_shot = FLAGS.few_k_shot + FLAGS.val_k_shot
self.total_samples = self.all_k_shot + self.val_k_shot
else:
self.all_k_shot = None
self.total_samples = self.few_k_shot + self.val_k_shot
self.dim_input = 1
self.dim_output = 1
# train set
self.amp_range_train = [0.1, 5.0]
self.phs_range_train = [0, np.pi*FLAGS.phase]
self.inp_range_train = [-5.0, 5.0]
# data size
self.dim_input = 1
self.dim_output = 1
# set valid/test tasks
self.valid_tasks = self.generate_finite_dataset(total_num_tasks=int(FLAGS.test_total_num_tasks),
is_training=False)
self.test_tasks = self.generate_finite_dataset(total_num_tasks=int(FLAGS.test_total_num_tasks),
is_training=False)
self.valid_batch_idx = 0
self.test_batch_idx = 0
# set train tasks
if FLAGS.finite:
self.train_tasks = self.generate_finite_dataset(total_num_tasks=int(FLAGS.train_total_num_tasks),
is_training=True)
self.train_batch_idx = 0
self.generate_batch = self.generate_finite_batch
else:
self.generate_batch = self.generate_infinite_batch
# generate finite dataset
def generate_finite_dataset(self, total_num_tasks, is_training=True):
# get range
amp_range = self.amp_range_train
phs_range = self.phs_range_train
inp_range = self.inp_range_train
# sample tasks
amp_list = np.random.uniform(low=amp_range[0], high=amp_range[1], size=[total_num_tasks])
phs_list = np.random.uniform(low=phs_range[0], high=phs_range[1], size=[total_num_tasks])
frq_list = np.ones(shape=[total_num_tasks])
x_list = np.random.uniform(low=inp_range[0], high=inp_range[1], size=[total_num_tasks, self.total_samples, 1])
y_list = np.zeros(shape=[total_num_tasks, self.total_samples, 1])
z_list = np.zeros(shape=[total_num_tasks, self.total_samples, 1])
# for each task
for t in range(total_num_tasks):
# sample noise
z_list[t] = np.random.normal(loc=0.0, scale=FLAGS.noise_factor * amp_list[t], size=[self.total_samples, 1])
# compute output
y_list[t] = amp_list[t] * np.sin(frq_list[t] * x_list[t] - phs_list[t])
# task dataset
tasks = OrderedDict()
tasks['x'] = x_list
tasks['y'] = y_list
tasks['z'] = z_list
tasks['amp'] = amp_list
tasks['phs'] = phs_list
tasks['frq'] = frq_list
tasks['size'] = total_num_tasks
return tasks
# get batch from finite dataset
def generate_finite_batch(self,
is_training=True,
batch_idx=None,
inc_follow=True):
# get dataset
if is_training:
task_list = self.train_tasks
num_tasks = task_list['size']
elif FLAGS.train:
task_list = self.valid_tasks
num_tasks = task_list['size']
else:
task_list = self.test_tasks
num_tasks = task_list['size']
# get batch-wise data
if batch_idx is not None:
x_list = task_list['x'][batch_idx:(batch_idx + self.num_tasks)]
y_list = task_list['y'][batch_idx:(batch_idx + self.num_tasks)]
z_list = task_list['z'][batch_idx:(batch_idx + self.num_tasks)]
else:
idx_list = np.arange(num_tasks)
np.random.shuffle(idx_list)
idx_list = idx_list[:self.num_tasks]
x_list = task_list['x'][idx_list]
y_list = task_list['y'][idx_list]
z_list = task_list['z'][idx_list]
# split data
if self.split_data:
follow_x = x_list[:, :self.few_k_shot]
follow_y = y_list[:, :self.few_k_shot]
follow_z = z_list[:, :self.few_k_shot]
if inc_follow:
leader_x = x_list[:, :self.all_k_shot]
leader_y = y_list[:, :self.all_k_shot]
leader_z = z_list[:, :self.all_k_shot]
else:
leader_x = x_list[:, self.few_k_shot:self.all_k_shot]
leader_y = y_list[:, self.few_k_shot:self.all_k_shot]
leader_z = z_list[:, self.few_k_shot:self.all_k_shot]
valid_x = x_list[:, self.all_k_shot:]
valid_y = y_list[:, self.all_k_shot:]
valid_z = z_list[:, self.all_k_shot:]
# add noise
return [follow_x,
leader_x,
valid_x,
follow_y + follow_z,
leader_y + leader_z,
valid_y + valid_z if is_training else valid_y]
else:
train_x, valid_x = x_list[:, :self.few_k_shot], x_list[:, self.few_k_shot:]
train_y, valid_y = y_list[:, :self.few_k_shot], y_list[:, self.few_k_shot:]
train_z, valid_z = z_list[:, :self.few_k_shot], z_list[:, self.few_k_shot:]
# add noise
return [train_x,
valid_x,
train_y + train_z,
valid_y + valid_z if is_training else valid_y]
# get batch from infinite dataset
def generate_infinite_batch(self,
is_training=True,
batch_idx=None,
inc_follow=True):
if is_training:
# get range
amp_range = self.amp_range_train
phs_range = self.phs_range_train
inp_range = self.inp_range_train
# sample tasks
amp_list = np.random.uniform(low=amp_range[0], high=amp_range[1], size=[self.num_tasks])
phs_list = np.random.uniform(low=phs_range[0], high=phs_range[1], size=[self.num_tasks])
frq_list = np.ones(shape=[self.num_tasks])
x_list = np.random.uniform(low=inp_range[0], high=inp_range[1], size=[self.num_tasks, self.total_samples, 1])
y_list = np.zeros(shape=[self.num_tasks, self.total_samples, 1])
z_list = np.zeros(shape=[self.num_tasks, self.total_samples, 1])
# for each task
for t in range(self.num_tasks):
# sample noise
z_list[t] = np.random.normal(loc=0.0, scale=FLAGS.noise_factor * amp_list[t], size=[self.total_samples, 1])
# compute output
y_list[t] = amp_list[t] * np.sin(frq_list[t] * x_list[t] - phs_list[t])
else:
if FLAGS.train:
task_list = self.valid_tasks
num_tasks = task_list['size']
else:
task_list = self.test_tasks
num_tasks = task_list['size']
# get batch-wise data
if batch_idx is not None:
x_list = task_list['x'][batch_idx:(batch_idx + self.num_tasks)]
y_list = task_list['y'][batch_idx:(batch_idx + self.num_tasks)]
z_list = task_list['z'][batch_idx:(batch_idx + self.num_tasks)]
else:
idx_list = np.arange(num_tasks)
np.random.shuffle(idx_list)
idx_list = idx_list[:self.num_tasks]
x_list = task_list['x'][idx_list]
y_list = task_list['y'][idx_list]
z_list = task_list['z'][idx_list]
# split data
if self.split_data:
follow_x = x_list[:, :self.few_k_shot]
follow_y = y_list[:, :self.few_k_shot]
follow_z = z_list[:, :self.few_k_shot]
if inc_follow:
leader_x = x_list[:, :self.all_k_shot]
leader_y = y_list[:, :self.all_k_shot]
leader_z = z_list[:, :self.all_k_shot]
else:
leader_x = x_list[:, self.few_k_shot:self.all_k_shot]
leader_y = y_list[:, self.few_k_shot:self.all_k_shot]
leader_z = z_list[:, self.few_k_shot:self.all_k_shot]
valid_x = x_list[:, self.all_k_shot:]
valid_y = y_list[:, self.all_k_shot:]
valid_z = z_list[:, self.all_k_shot:]
# add noise
return [follow_x,
leader_x,
valid_x,
follow_y + follow_z ,
leader_y + leader_z,
valid_y + valid_z if is_training else valid_y]
else:
train_x, valid_x = x_list[:, :self.few_k_shot], x_list[:, self.few_k_shot:]
train_y, valid_y = y_list[:, :self.few_k_shot], y_list[:, self.few_k_shot:]
train_z, valid_z = z_list[:, :self.few_k_shot], z_list[:, self.few_k_shot:]
# add noise
return [train_x,
valid_x,
train_y + train_z,
valid_y + valid_z if is_training else valid_y]
class LineSineGenerator(object):
def __init__(self, split_data=True):
print("Using LineSine")
# data size
self.split_data = split_data
self.num_tasks = FLAGS.num_tasks
self.few_k_shot = FLAGS.few_k_shot
self.val_k_shot = FLAGS.val_k_shot
if self.split_data:
self.all_k_shot = FLAGS.few_k_shot + FLAGS.val_k_shot
self.total_samples = self.all_k_shot + self.val_k_shot
else:
self.all_k_shot = None
self.total_samples = self.few_k_shot + self.val_k_shot
self.dim_input = 1
self.dim_output = 1
# train set
self.amp_range_train = [0.1, 5.0]
self.phs_range_train = [0, np.pi*FLAGS.phase]
self.slope_range_train = [-1.0, 1.0]
self.inp_range_train = [-5.0, 5.0]
# data size
self.dim_input = 1
self.dim_output = 1
# set valid/test tasks
self.valid_tasks = self.generate_finite_dataset(total_num_tasks=int(FLAGS.test_total_num_tasks),
is_training=False)
self.test_tasks = self.generate_finite_dataset(total_num_tasks=int(FLAGS.test_total_num_tasks),
is_training=False)
self.valid_batch_idx = 0
self.test_batch_idx = 0
# set train tasks
if FLAGS.finite:
self.train_tasks = self.generate_finite_dataset(total_num_tasks=int(FLAGS.train_total_num_tasks),
is_training=True)
self.train_batch_idx = 0
self.generate_batch = self.generate_finite_batch
else:
self.generate_batch = self.generate_infinite_batch
# generate finite dataset
def generate_finite_dataset(self, total_num_tasks, is_training=True):
# get range
amp_range = self.amp_range_train
phs_range = self.phs_range_train
inp_range = self.inp_range_train
slope_range = self.slope_range_train
amp_list = np.random.uniform(low=amp_range[0], high=amp_range[1], size=[total_num_tasks])
phs_list = np.random.uniform(low=phs_range[0], high=phs_range[1], size=[total_num_tasks])
frq_list = np.ones(shape=[total_num_tasks])
slope_list = np.random.uniform(low=slope_range[0], high=slope_range[1], size=[total_num_tasks])
x_list = np.random.uniform(low=inp_range[0], high=inp_range[1], size=[total_num_tasks, self.total_samples, 1])
y_list = np.zeros(shape=[total_num_tasks, self.total_samples, 1])
z_list = np.zeros(shape=[total_num_tasks, self.total_samples, 1])
# for each task
for t in range(total_num_tasks):
# sample noise
z_list[t] = np.random.normal(loc=0.0, scale=FLAGS.noise_factor * amp_list[t], size=[self.total_samples, 1])
# Randomly select sinusiod or linear task
if (np.random.randint(2)):
y_list[t] = amp_list[t] * np.sin(frq_list[t] * x_list[t] - phs_list[t])
else:
y_list[t] = x_list[t] * slope_list[t]
# task dataset
tasks = OrderedDict()
tasks['x'] = x_list
tasks['y'] = y_list
tasks['z'] = z_list
tasks['amp'] = amp_list
tasks['phs'] = phs_list
tasks['frq'] = frq_list
tasks['size'] = total_num_tasks
return tasks
# get batch from finite dataset
def generate_finite_batch(self,
is_training=True,
batch_idx=None,
inc_follow=True):
# get dataset
if is_training:
task_list = self.train_tasks
num_tasks = task_list['size']
elif FLAGS.train:
task_list = self.valid_tasks
num_tasks = task_list['size']
else:
task_list = self.test_tasks
num_tasks = task_list['size']
# get batch-wise data
if batch_idx is not None:
x_list = task_list['x'][batch_idx:(batch_idx + self.num_tasks)]
y_list = task_list['y'][batch_idx:(batch_idx + self.num_tasks)]
z_list = task_list['z'][batch_idx:(batch_idx + self.num_tasks)]
else:
idx_list = np.arange(num_tasks)
np.random.shuffle(idx_list)
idx_list = idx_list[:self.num_tasks]
x_list = task_list['x'][idx_list]
y_list = task_list['y'][idx_list]
z_list = task_list['z'][idx_list]
# split data
if self.split_data:
follow_x = x_list[:, :self.few_k_shot]
follow_y = y_list[:, :self.few_k_shot]
follow_z = z_list[:, :self.few_k_shot]
if inc_follow:
leader_x = x_list[:, :self.all_k_shot]
leader_y = y_list[:, :self.all_k_shot]
leader_z = z_list[:, :self.all_k_shot]
else:
leader_x = x_list[:, self.few_k_shot:self.all_k_shot]
leader_y = y_list[:, self.few_k_shot:self.all_k_shot]
leader_z = z_list[:, self.few_k_shot:self.all_k_shot]
valid_x = x_list[:, self.all_k_shot:]
valid_y = y_list[:, self.all_k_shot:]
valid_z = z_list[:, self.all_k_shot:]
# add noise
return [follow_x,
leader_x,
valid_x,
follow_y + follow_z,
leader_y + leader_z,
valid_y + valid_z if is_training else valid_y]
else:
train_x, valid_x = x_list[:, :self.few_k_shot], x_list[:, self.few_k_shot:]
train_y, valid_y = y_list[:, :self.few_k_shot], y_list[:, self.few_k_shot:]
train_z, valid_z = z_list[:, :self.few_k_shot], z_list[:, self.few_k_shot:]
# add noise
return [train_x,
valid_x,
train_y + train_z,
valid_y + valid_z if is_training else valid_y]
# get batch from infinite dataset
def generate_infinite_batch(self,
is_training=True,
batch_idx=None,
inc_follow=True):
if is_training:
# get range
amp_range = self.amp_range_train
phs_range = self.phs_range_train
inp_range = self.inp_range_train
slope_range = self.slope_range_train
# sample tasks
amp_list = np.random.uniform(low=amp_range[0], high=amp_range[1], size=[self.num_tasks])
phs_list = np.random.uniform(low=phs_range[0], high=phs_range[1], size=[self.num_tasks])
frq_list = np.ones(shape=[self.num_tasks])
slope_list = np.random.uniform(low=slope_range[0], high=slope_range[1], size=[self.num_tasks])
x_list = np.random.uniform(low=inp_range[0], high=inp_range[1], size=[self.num_tasks, self.total_samples, 1])
y_list = np.zeros(shape=[self.num_tasks, self.total_samples, 1])
z_list = np.zeros(shape=[self.num_tasks, self.total_samples, 1])
# for each task
for t in range(self.num_tasks):
# sample noise
z_list[t] = np.random.normal(loc=0.0, scale=FLAGS.noise_factor * amp_list[t], size=[self.total_samples, 1])
# Randomly select sinusiod or linear task
if (np.random.randint(2)):
y_list[t] = amp_list[t] * np.sin(frq_list[t] * x_list[t] - phs_list[t])
else:
y_list[t] = x_list[t] * slope_list[t]
else:
if FLAGS.train:
task_list = self.valid_tasks
num_tasks = task_list['size']
else:
task_list = self.test_tasks
num_tasks = task_list['size']
# get batch-wise data
if batch_idx is not None:
x_list = task_list['x'][batch_idx:(batch_idx + self.num_tasks)]
y_list = task_list['y'][batch_idx:(batch_idx + self.num_tasks)]
z_list = task_list['z'][batch_idx:(batch_idx + self.num_tasks)]
else:
idx_list = np.arange(num_tasks)
np.random.shuffle(idx_list)
idx_list = idx_list[:self.num_tasks]
x_list = task_list['x'][idx_list]
y_list = task_list['y'][idx_list]
z_list = task_list['z'][idx_list]
# split data
if self.split_data:
follow_x = x_list[:, :self.few_k_shot]
follow_y = y_list[:, :self.few_k_shot]
follow_z = z_list[:, :self.few_k_shot]
if inc_follow:
leader_x = x_list[:, :self.all_k_shot]
leader_y = y_list[:, :self.all_k_shot]
leader_z = z_list[:, :self.all_k_shot]
else:
leader_x = x_list[:, self.few_k_shot:self.all_k_shot]
leader_y = y_list[:, self.few_k_shot:self.all_k_shot]
leader_z = z_list[:, self.few_k_shot:self.all_k_shot]
valid_x = x_list[:, self.all_k_shot:]
valid_y = y_list[:, self.all_k_shot:]
valid_z = z_list[:, self.all_k_shot:]
# add noise
return [follow_x,
leader_x,
valid_x,
follow_y + follow_z ,
leader_y + leader_z,
valid_y + valid_z if is_training else valid_y]
else:
train_x, valid_x = x_list[:, :self.few_k_shot], x_list[:, self.few_k_shot:]
train_y, valid_y = y_list[:, :self.few_k_shot], y_list[:, self.few_k_shot:]
train_z, valid_z = z_list[:, :self.few_k_shot], z_list[:, self.few_k_shot:]
# add noise
return [train_x,
valid_x,
train_y + train_z,
valid_y + valid_z if is_training else valid_y]
| 40.821577
| 123
| 0.553822
| 2,717
| 19,676
| 3.651086
| 0.03975
| 0.054435
| 0.045161
| 0.062903
| 0.981855
| 0.981855
| 0.981855
| 0.981855
| 0.981855
| 0.97379
| 0
| 0.00697
| 0.343718
| 19,676
| 481
| 124
| 40.906445
| 0.761248
| 0.043505
| 0
| 0.969359
| 0
| 0
| 0.006609
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.022284
| false
| 0
| 0.008357
| 0
| 0.064067
| 0.005571
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7df2061aef00ba186680b530104c67cab73de17e
| 3,297
|
py
|
Python
|
z2/part3/updated_part2_batch/jm/parser_errors_2/854462788.py
|
kozakusek/ipp-2020-testy
|
09aa008fa53d159672cc7cbf969a6b237e15a7b8
|
[
"MIT"
] | 1
|
2020-04-16T12:13:47.000Z
|
2020-04-16T12:13:47.000Z
|
z2/part3/updated_part2_batch/jm/parser_errors_2/854462788.py
|
kozakusek/ipp-2020-testy
|
09aa008fa53d159672cc7cbf969a6b237e15a7b8
|
[
"MIT"
] | 18
|
2020-03-06T17:50:15.000Z
|
2020-05-19T14:58:30.000Z
|
z2/part3/updated_part2_batch/jm/parser_errors_2/854462788.py
|
kozakusek/ipp-2020-testy
|
09aa008fa53d159672cc7cbf969a6b237e15a7b8
|
[
"MIT"
] | 18
|
2020-03-06T17:45:13.000Z
|
2020-06-09T19:18:31.000Z
|
from part1 import (
gamma_board,
gamma_busy_fields,
gamma_delete,
gamma_free_fields,
gamma_golden_move,
gamma_golden_possible,
gamma_move,
gamma_new,
)
"""
scenario: test_random_actions
uuid: 854462788
"""
"""
random actions, total chaos
"""
board = gamma_new(4, 5, 2, 19)
assert board is not None
assert gamma_move(board, 1, 1, 0) == 1
assert gamma_move(board, 2, 2, 2) == 1
assert gamma_move(board, 2, 3, 1) == 1
assert gamma_free_fields(board, 2) == 17
assert gamma_move(board, 1, 1, 0) == 0
assert gamma_move(board, 1, 2, 2) == 0
assert gamma_move(board, 2, 1, 1) == 1
assert gamma_move(board, 1, 3, 2) == 1
assert gamma_move(board, 2, 0, 2) == 1
assert gamma_busy_fields(board, 2) == 4
assert gamma_move(board, 1, 1, 2) == 1
assert gamma_move(board, 1, 2, 3) == 1
assert gamma_golden_move(board, 1, 2, 0) == 0
assert gamma_move(board, 2, 0, 2) == 0
assert gamma_move(board, 2, 4, 2) == 0
assert gamma_move(board, 2, 0, 4) == 1
assert gamma_move(board, 1, 0, 2) == 0
assert gamma_move(board, 2, 3, 1) == 0
assert gamma_move(board, 2, 2, 0) == 1
assert gamma_move(board, 1, 1, 2) == 0
assert gamma_move(board, 2, 3, 0) == 1
assert gamma_busy_fields(board, 2) == 7
assert gamma_move(board, 1, 1, 2) == 0
assert gamma_move(board, 1, 2, 3) == 0
assert gamma_move(board, 2, 4, 1) == 0
assert gamma_move(board, 1, 4, 3) == 0
assert gamma_move(board, 2, 3, 0) == 0
assert gamma_free_fields(board, 2) == 9
board400468228 = gamma_board(board)
assert board400468228 is not None
assert board400468228 == ("2...\n"
"..1.\n"
"2121\n"
".2.2\n"
".122\n")
del board400468228
board400468228 = None
assert gamma_move(board, 1, 3, 3) == 1
assert gamma_move(board, 2, 4, 3) == 0
assert gamma_move(board, 2, 1, 2) == 0
assert gamma_busy_fields(board, 2) == 7
assert gamma_move(board, 1, 3, 4) == 1
assert gamma_move(board, 1, 2, 1) == 1
assert gamma_golden_move(board, 1, 2, 0) == 1
assert gamma_move(board, 2, 4, 1) == 0
assert gamma_move(board, 1, 3, 0) == 0
assert gamma_free_fields(board, 1) == 6
assert gamma_move(board, 2, 3, 0) == 0
assert gamma_move(board, 2, 2, 2) == 0
assert gamma_move(board, 1, 3, 0) == 0
assert gamma_free_fields(board, 1) == 6
assert gamma_move(board, 2, 1, 0) == 0
assert gamma_move(board, 1, 1, 0) == 0
assert gamma_move(board, 1, 1, 1) == 0
assert gamma_free_fields(board, 1) == 6
assert gamma_move(board, 2, 0, 0) == 1
assert gamma_move(board, 2, 3, 4) == 0
assert gamma_move(board, 1, 3, 0) == 0
assert gamma_move(board, 2, 3, 0) == 0
assert gamma_golden_possible(board, 2) == 1
assert gamma_move(board, 1, 1, 0) == 0
assert gamma_move(board, 2, 3, 0) == 0
assert gamma_move(board, 2, 2, 3) == 0
assert gamma_move(board, 1, 1, 4) == 1
assert gamma_move(board, 2, 3, 0) == 0
assert gamma_move(board, 2, 0, 1) == 1
assert gamma_golden_move(board, 2, 4, 3) == 0
assert gamma_move(board, 1, 3, 1) == 0
assert gamma_move(board, 1, 1, 4) == 0
assert gamma_busy_fields(board, 1) == 9
assert gamma_move(board, 2, 0, 2) == 0
assert gamma_move(board, 2, 2, 2) == 0
assert gamma_move(board, 1, 3, 0) == 0
assert gamma_move(board, 1, 2, 0) == 0
assert gamma_move(board, 2, 3, 0) == 0
assert gamma_move(board, 2, 2, 4) == 1
assert gamma_busy_fields(board, 2) == 9
gamma_delete(board)
| 31.4
| 46
| 0.654838
| 610
| 3,297
| 3.377049
| 0.067213
| 0.363107
| 0.393204
| 0.524272
| 0.813107
| 0.812621
| 0.758738
| 0.568932
| 0.52233
| 0.497087
| 0
| 0.120239
| 0.187746
| 3,297
| 104
| 47
| 31.701923
| 0.648992
| 0
| 0
| 0.288889
| 0
| 0
| 0.009349
| 0
| 0
| 0
| 0
| 0
| 0.788889
| 1
| 0
| false
| 0
| 0.011111
| 0
| 0.011111
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
81b5d0e86b5a405d111e333db35cafd860817875
| 57,252
|
py
|
Python
|
pyNastran/bdf/cards/bdf_tables.py
|
luzpaz/pyNastran
|
939e9eefdc87a3bf67939a23dc09f155b93969a0
|
[
"BSD-3-Clause"
] | 293
|
2015-03-22T20:22:01.000Z
|
2022-03-14T20:28:24.000Z
|
pyNastran/bdf/cards/bdf_tables.py
|
luzpaz/pyNastran
|
939e9eefdc87a3bf67939a23dc09f155b93969a0
|
[
"BSD-3-Clause"
] | 512
|
2015-03-14T18:39:27.000Z
|
2022-03-31T16:15:43.000Z
|
pyNastran/bdf/cards/bdf_tables.py
|
luzpaz/pyNastran
|
939e9eefdc87a3bf67939a23dc09f155b93969a0
|
[
"BSD-3-Clause"
] | 136
|
2015-03-19T03:26:06.000Z
|
2022-03-25T22:14:54.000Z
|
# pylint: disable=R0902,R0904,R0914,C0111
"""
All table cards are defined in this file. This includes:
* table_d
* TABLED1 - Dynamic Table = f(Time, Frequency)
* TABLED2
* TABLED3
* table_m
* TABLEM1 - Material table = f(Temperature)
* TABLEM2
* TABLEM3
* TABLEM4
*tables
* TABLEST - Material table = f(Stress)
* TABLES1
* TABLEHT - Material table = f(Temperature)
* TABLEH1
*random_tables
* TABRND1
* TABRNDG
"""
from typing import List, Any
import numpy as np
from pyNastran.bdf.field_writer_8 import set_blank_if_default, print_card_8
from pyNastran.bdf.field_writer_16 import print_card_16
from pyNastran.bdf.field_writer_double import print_card_double
from pyNastran.bdf import MAX_INT
from pyNastran.bdf.cards.base_card import BaseCard
from pyNastran.bdf.bdf_interface.assign_type import (
integer, integer_or_blank, double, string, string_or_blank,
double_or_string, double_or_blank, integer_or_string)
def make_xy(table_id, table_type, xy):
try:
xy = np.array(xy, dtype='float64')
except ValueError:
msg = 'cannot parse %s table_id=%r\n' % (table_type, table_id)
for xi, yi in xy:
try:
xi2 = float(xi)
except ValueError:
xi2 = '*' + xi
try:
yi2 = float(yi)
except ValueError:
yi2 = '*' + yi
msg += ' %s %s\n' % (xi2, yi2)
raise ValueError(msg)
x = xy[:, 0]
y = xy[:, 1]
return x, y
class Table(BaseCard):
def __init__(self):
BaseCard.__init__(self)
#def parse_fields(self, xy, nrepeated, is_data=False):
#self.table = TableObj(xy, nrepeated, is_data)
def write_card(self, size: int=8, is_double: bool=False) -> str:
card = self.repr_fields()
if size == 8:
if self.tid > MAX_INT:
return self.comment + print_card_16(card)
return self.comment + print_card_8(card)
if is_double:
return self.comment + print_card_double(card)
return self.comment + print_card_16(card)
#cxy = np.array(self.tc.table.table)
#fc = cxy[:, 0]
#yc = cxy[:, 1]
#assert fc.shape == yc.shape, 'fc.shape=%s yc.shape=%s' % (str(fc.shape), str(yc.shape))
#print('fc =', fc)
#print('yc =', yc)
#self.tc.interpolate(freq)
#c = interp1d(fc, yc, freq)
class DTABLE(BaseCard):
type = 'DTABLE'
@classmethod
def _init_from_empty(cls):
default_values = {'CAT' : 1}
return DTABLE(default_values, comment='')
def _finalize_hdf5(self, encoding):
"""hdf5 helper function"""
keys, values = self.default_values
self.default_values = {key : value if not np.isnan(value) else None
for key, value in zip(keys, values)}
def __init__(self, default_values, comment=''):
"""
Creates a DTABLE card
Parameters
----------
default_values : dict
key : str
the parameter name
value : float
the value
comment : str; default=''
a comment for the card
"""
BaseCard.__init__(self)
if comment:
self.comment = comment
self.default_values = default_values
#print('default_values = %s' % default_values)
#for key, value in self.default_values.items():
#print(key, type(key))
assert len(self.default_values) > 0, self.default_values
#print(self)
@classmethod
def add_card(cls, card, comment=''):
"""
Adds a DTABLE card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
nfields = len(card) - 1
assert nfields % 2 == 0, nfields
default_values = {}
j = 1
for i in range(1, nfields + 1, 2):
label = string(card, i, 'label_%i' % j)
value = double(card, i + 1, 'value_%i' % j)
assert label not in default_values, 'label_%i=%r is not unique' % (j, label)
default_values[label] = value
j += 1
assert j >= 2, j
return DTABLE(default_values, comment=comment)
def __getitem__(self, key):
try:
item = self.default_values[key]
except KeyError:
msg = 'expected_key=%r\n' % str(key)
for keyi, value in self.default_values.items():
msg += 'DTABLE; key=%r value=%r\n' % (keyi, value)
raise KeyError(msg)
return item
def raw_fields(self):
list_fields = ['DTABLE']
#print('***default_values = %s' % self.default_values)
assert len(self.default_values) > 0, self.default_values
for label, value in sorted(self.default_values.items()):
list_fields += [label, value]
return list_fields
#def repr_fields(self):
#return self.raw_fields()
def write_card(self, size: int=8, is_double: bool=False) -> str:
card = self.repr_fields()
if size == 8:
return self.comment + print_card_8(card)
if is_double:
return self.comment + print_card_double(card)
return self.comment + print_card_16(card)
class TABLED1(Table):
"""
Dynamic Load Tabular Function, Form 1
Defines a tabular function for use in generating frequency-dependent and
time-dependent dynamic loads.
+---------+------+-------+-------+--------+-----+-----+------+------+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |
+=========+======+=======+=======+========+=====+=====+======+======+
| TABLED1 | TID | XAXIS | YAXIS | EXTRAP | | | | |
+---------+------+-------+-------+--------+-----+-----+------+------+
| | x1 | y1 | x2 | y2 | x3 | y3 | etc. | ENDT |
+---------+------+-------+-------+--------+-----+-----+------+------+
| TABLED1 | 32 | | | | | | | |
+---------+------+-------+-------+--------+-----+-----+------+------+
| | -3.0 | 6.9 | 2.0 | 5.6 | 3.0 | 5.6 | ENDT | |
+---------+------+-------+-------+--------+-----+-----+------+------+
..note:: EXTRAP is NX specific
"""
type = 'TABLED1'
@classmethod
def _init_from_empty(cls):
tid = 1
x = [0., 1.]
y = [0., 1.]
return TABLED1(tid, x, y, xaxis='LINEAR', yaxis='LINEAR', extrap=0, comment='')
def __init__(self, tid: int, x: np.ndarray, y: np.ndarray,
xaxis: str='LINEAR', yaxis: str='LINEAR',
extrap: int=0, comment: str=''):
"""
Creates a TABLED1, which is a dynamic load card that is applied
by the DAREA card
Parameters
----------
tid : int
table id
x : List[float]
nvalues
y : List[float]
nvalues
xaxis : str
LINEAR, LOG
yaxis : str
LINEAR, LOG
extrap : int; default=0
Extrapolation method:
0 : linear
1 : constant
.. note:: this is NX specific
comment : str; default=''
a comment for the card
"""
Table.__init__(self)
if comment:
self.comment = comment
self.tid = tid
self.extrap = extrap
self.x = np.asarray(x, dtype='float64')
self.y = np.asarray(y, dtype='float64')
self.xaxis = xaxis
self.yaxis = yaxis
assert self.xaxis in ['LINEAR', 'LOG'], 'xaxis=%r' % (self.xaxis)
assert self.yaxis in ['LINEAR', 'LOG'], 'yaxis=%r' % (self.yaxis)
@classmethod
def add_card(cls, card, comment=''):
"""
Adds a TABLED1 card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
table_id = integer(card, 1, 'tid')
xaxis = string_or_blank(card, 2, 'xaxis', 'LINEAR')
yaxis = string_or_blank(card, 3, 'yaxis', 'LINEAR')
extrap = integer_or_blank(card, 4, 'yaxis', 0)
x, y = read_table(card, table_id, 'TABLED1')
return TABLED1(table_id, x, y, xaxis=xaxis, yaxis=yaxis, extrap=extrap, comment=comment)
@classmethod
def add_op2_data(cls, data, comment=''):
table_id = data[0]
xaxis = _map_axis(data[1])
yaxis = _map_axis(data[2])
xy = data[3:]
xy = np.array(xy, dtype='float64')
xy = xy.reshape(xy.size // 2, 2)
x = xy[:, 0]
y = xy[:, 1]
return TABLED1(table_id, x, y, xaxis=xaxis, yaxis=yaxis, comment=comment)
def raw_fields(self):
xy = []
for xi, yi in zip(self.x, self.y):
xy.extend([xi, yi])
list_fields = ['TABLED1', self.tid, self.xaxis, self.yaxis, self.extrap,
None, None, None, None] + xy + ['ENDT']
return list_fields
def repr_fields(self):
extrap = set_blank_if_default(self.extrap, 0)
xy = []
for xi, yi in zip(self.x, self.y):
xy.extend([xi, yi])
list_fields = ['TABLED1', self.tid, self.xaxis, self.yaxis, extrap,
None, None, None, None] + xy + ['ENDT']
return list_fields
def interpolate(self, x):
if isinstance(x, float):
x = [x]
x = np.asarray(x)
#nx = x.size
#ny = self.y.size
# xj follow xi
i = np.searchsorted(self.x, x, side='left') - 1
j = i + 1
#k = np.where(j == ny)[0]
# TODO: handle out of range errors
xi = self.x[i]
yi = self.y[i]
try:
xj = self.x[j]
yj = self.y[j]
except IndexError:
#print('table.x = %s' % self.x)
#print('table.y = %s' % self.y)
#print('x = %s' % x)
#print('yi = %s' % yi)
return yi
# TODO: could probably speed this up with log rules
if self.xaxis == 'LINEAR' and self.yaxis == 'LINEAR':
dx = xj - xi
y = (xj - x) / dx * yi + (x - xi) / dx * yj
elif self.xaxis == 'LOG' and self.yaxis == 'LINEAR':
dx = np.log(xj / xi)
y = np.log(xj / x) / dx * yi + np.log(x / xi) / dx * yj
elif self.xaxis == 'LINEAR' and self.yaxis == 'LOG':
dx = xj - xi
lny = (xj - x) / dx * np.log(yi) + (x - xi) / dx * np.log(yj)
y = np.exp(lny)
elif self.xaxis == 'LOG' and self.yaxis == 'LOG':
dx = np.log(xj / xi)
lny = (xj - x) / dx * np.log(yi) + (x - xi) / dx * np.log(yj)
y = np.exp(lny)
else:
raise NotImplementedError('xaxis=%r yaxis=%r' % (self.xaxis, self.yaxis))
return y
class TABLED2(Table):
"""
Dynamic Load Tabular Function, Form 2
Defines a tabular function for use in generating frequency-dependent and
time-dependent dynamic loads. Also contains parametric data for use with the
table.
"""
type = 'TABLED2'
@classmethod
def _init_from_empty(cls):
tid = 1
x1 = 1.
x = [0., 1.]
y = [0., 1.]
return TABLED2(tid, x1, x, y, extrap=0, comment='')
def __init__(self, tid: int, x1: float,
x: np.ndarray, y: np.ndarray,
extrap: int=0, comment: str=''):
"""
Parameters
----------
tid : int
table id
x1 : float
y = yT(x - x1)
x : List[float]
the x values
y : List[float]
the y values
extrap : int; default=0
Extrapolation method:
0 : linear
1 : constant
.. note:: this is NX specific
comment : str; default=''
a comment for the card
"""
Table.__init__(self)
if comment:
self.comment = comment
self.tid = tid
self.x1 = x1
self.extrap = extrap
self.x = np.asarray(x, dtype='float64')
self.y = np.asarray(y, dtype='float64')
@classmethod
def add_card(cls, card, comment=''):
"""
Adds a TABLED2 card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
table_id = integer(card, 1, 'tid')
x1 = double(card, 2, 'x1')
extrap = integer_or_blank(card, 3, 'extrap', default=0)
x, y = read_table(card, table_id, 'TABLED2')
return TABLED2(table_id, x1, x, y, extrap=extrap, comment=comment)
@classmethod
def add_op2_data(cls, data, comment=''):
table_id = data[0]
x1 = data[1]
xy = data[2:]
xy = np.array(xy, dtype='float64')
xy = xy.reshape(xy.size // 2, 2)
x = xy[:, 0]
y = xy[:, 1]
return TABLED2(table_id, x1, x, y, comment=comment)
def interpolate(self, x):
if isinstance(x, float):
x = [x]
x = np.asarray(x)
#nx = x.size
#ny = self.y.size
# xj follow xi
i = np.searchsorted(self.x, x, side='left') - 1
j = i + 1
#k = np.where(j == ny)[0]
# TODO: handle out of range errors
xi = self.x[i]
yi = self.y[i]
try:
xj = self.x[j]
yj = self.y[j]
except IndexError:
#print('table.x = %s' % self.x)
#print('table.y = %s' % self.y)
#print('x = %s' % x)
#print('yi = %s' % yi)
return yi
dx = xj - xi
y = (xj - x) / dx * yi + (x - xi) / dx * yj
return y
def raw_fields(self):
xy = []
for xi, yi in zip(self.x, self.y):
xy.extend([xi, yi])
list_fields = ['TABLED2', self.tid, self.x1, self.extrap, None, None,
None, None, None] + xy + ['ENDT']
return list_fields
def repr_fields(self):
xy = []
for xi, yi in zip(self.x, self.y):
xy.extend([xi, yi])
extrap = set_blank_if_default(self.extrap, 0)
list_fields = ['TABLED2', self.tid, self.x1, extrap, None, None,
None, None, None] + xy + ['ENDT']
return list_fields
class TABLED3(Table):
"""
Dynamic Load Tabular Function, Form 3
Defines a tabular function for use in generating frequency-dependent and
time-dependent dynamic loads. Also contains parametric data for use with the
table.
"""
type = 'TABLED3'
@classmethod
def _init_from_empty(cls):
tid = 1
x1 = 1.
x2 = 2.
x = [0., 1.]
y = [0., 1.]
return TABLED3(tid, x1, x2, x, y, extrap=0, comment='')
def __init__(self, tid: int, x1: float, x2: float,
x: np.ndarray, y: np.ndarray,
extrap: int=0, comment: str=''):
"""
Parameters
----------
tid : int
table id
x1 : float
y = yT(x - x1)
x2 : ???
???
x : List[float]
the x values
y : List[float]
the y values
extrap : int; default=0
Extrapolation method:
0 : linear
1 : constant
.. note:: this is NX specific
comment : str; default=''
a comment for the card
"""
Table.__init__(self)
if comment:
self.comment = comment
self.tid = tid
self.x1 = x1
self.x2 = x2
self.extrap = extrap
self.x = np.asarray(x, dtype='float64')
self.y = np.asarray(y, dtype='float64')
assert self.x2 != 0.0
@classmethod
def add_card(cls, card, comment=''):
"""
Adds a TABLED3 card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
table_id = integer(card, 1, 'tid')
x1 = double(card, 2, 'x1')
x2 = double(card, 3, 'x2')
extrap = integer_or_blank(card, 4, 'extrap', default=0)
x, y = read_table(card, table_id, 'TABLED3')
return TABLED3(table_id, x1, x2, x, y, extrap=extrap, comment=comment)
@classmethod
def add_op2_data(cls, data, comment=''):
table_id = data[0]
x1 = data[1]
x2 = data[2]
xy = data[3:]
xy = np.array(xy, dtype='float64')
xy = xy.reshape(xy.size // 2, 2)
x = xy[:, 0]
y = xy[:, 1]
return TABLED3(table_id, x1, x2, x, y, comment=comment)
def raw_fields(self):
xy = []
for xi, yi in zip(self.x, self.y):
xy.extend([xi, yi])
list_fields = ['TABLED3', self.tid, self.x1, self.x2, self.extrap,
None, None, None, None] + xy + ['ENDT']
return list_fields
def repr_fields(self):
xy = []
for xi, yi in zip(self.x, self.y):
xy.extend([xi, yi])
extrap = set_blank_if_default(self.extrap, 0)
list_fields = ['TABLED3', self.tid, self.x1, self.x2, extrap,
None, None, None, None] + xy + ['ENDT']
return list_fields
class TABLED4(Table):
"""
Dynamic Load Tabular Function, Form 4
Defines the coefficients of a power series for use in generating
frequency-dependent and time-dependent dynamic loads. Also contains
parametric data for use with the table.
"""
type = 'TABLED4'
@classmethod
def _init_from_empty(cls):
tid = 1
x1 = 1.
x2 = 1.
x3 = 1.
x4 = 1.
a = [1., 2.]
return TABLED4(tid, x1, x2, x3, x4, a, comment='')
def __init__(self, tid: int,
x1: float, x2: float, x3: float, x4: float,
a: List[float], comment: str=''):
Table.__init__(self)
if comment:
self.comment = comment
self.tid = tid
self.x1 = x1
self.x2 = x2
self.x3 = x3
self.x4 = x4
self.a = np.array(a)
assert self.x2 != 0.0, 'x2=%s\n%s' % (self.x2, str(self))
assert self.x3 <= self.x4, 'x3=%s x4=%s\n%s' % (self.x3, self.x4, str(self))
@classmethod
def add_card(cls, card, comment=''):
"""
Adds a TABLED4 card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
table_id = integer(card, 1, 'tid')
x1 = double(card, 2, 'x1')
x2 = double(card, 3, 'x2')
x3 = double(card, 4, 'x3')
x4 = double(card, 5, 'x4')
nfields = len(card) - 1
nterms = nfields - 9
if nterms < 0:
raise SyntaxError('%r card is too short' % cls.type)
a = []
j = 0
for i in range(9, nfields):
ai = double(card, i, 'a%i' % (j))
a.append(ai)
j += 1
string(card, nfields, 'ENDT')
return TABLED4(table_id, x1, x2, x3, x4, a, comment=comment)
@classmethod
def add_op2_data(cls, data, comment=''):
table_id = data[0]
x1 = data[1]
x2 = data[2]
x3 = data[3]
x4 = data[4]
a = data[5:]
return TABLED4(table_id, x1, x2, x3, x4, a, comment=comment)
def raw_fields(self):
list_fields = ['TABLED4', self.tid, self.x1, self.x2, self.x3, self.x4,
None, None, None] + list(self.a) + ['ENDT']
return list_fields
def repr_fields(self):
return self.raw_fields()
def interpolate(self, x):
"""
y = sum_{i=0}^N Ai * ((x-x1)/x2))^i
"""
if isinstance(x, float):
x = [x]
x = np.asarray(x)
nx = x.size
na = self.a.size
n = np.arange(0., na)
x1 = np.ones(nx) * self.x1
x2 = np.ones(nx) * self.x2
i = np.where(x < self.x3)[0]
x[i] = self.x3
j = np.where(x > self.x4)[0]
x[j] = self.x4
#yi = np.zeros(x.shape, dtype=x.dtype)
yi = self.a * ((x - x1) / x2) ** n
return yi.sum()
class TABLED5(Table):
"""
Dynamic Load Tabular Function, Form 5
Defines a value as a function of two variables for use in generating
frequency-dependent and time-dependent dynamic loads.
"""
type = 'TABLED5'
def __init__(self, tid, xs, table_ids, comment=''):
Table.__init__(self)
if comment:
self.comment = comment
self.tid = tid
self.xs = xs
self.table_ids = table_ids
@classmethod
def add_card(cls, card, comment=''):
"""
Adds a TABLED5 card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
table_id = integer(card, 1, 'tid')
nfields = len(card) - 1
nterms = nfields - 9
if nterms < 0:
raise SyntaxError('%r card is too short' % cls.type)
nfields = len(card) - 1
nterms = (nfields - 9) // 2
if nterms < 0:
raise SyntaxError('%r card is too short' % cls.type)
xs = []
table_ids = []
for i in range(nterms):
n = 9 + i * 2
if card.field(n) == 'ENDT':
break
x = double_or_string(card, n, 'x' + str(i + 1))
table_id = integer_or_string(card, n + 1, 'table_id' + str(i + 1))
if x == 'SKIP' or table_id == 'SKIP':
continue
xs.append(x)
table_ids.append(table_id)
string(card, nfields, 'ENDT')
return TABLED5(table_id, xs, table_ids, comment=comment)
#@classmethod
#def add_op2_data(cls, data, comment=''):
#table_id = data[0]
#x1 = data[1]
#x2 = data[2]
#x3 = data[3]
#x4 = data[4]
#a = data[5:]
#return TABLED4(table_id, x1, x2, x3, x4, a, comment=comment)
def raw_fields(self):
x_table = []
for xi, tablei in zip(self.xs, self.table_ids):
x_table.extend([xi, tablei])
list_fields = ['TABLED5', self.tid, None, None, None, None,
None, None, None] + x_table + ['ENDT']
return list_fields
def repr_fields(self):
return self.raw_fields()
#def interpolate(self, x):
#"""
#y = sum_{i=0}^N Ai * ((x-x1)/x2))^i
#"""
#if isinstance(x, float):
#x = [x]
#x = np.asarray(x)
#nx = x.size
#na = self.a.size
#n = np.arange(0., na)
#x1 = np.ones(nx) * self.x1
#x2 = np.ones(nx) * self.x2
#i = np.where(x < self.x3)[0]
#x[i] = self.x3
#j = np.where(x > self.x4)[0]
#x[j] = self.x4
#yi = self.a * ((x - x1) / x2) ** n
#return yi.sum()
class TABDMP1(Table):
type = 'TABDMP1'
@classmethod
def _init_from_empty(cls):
tid = 1
x = [0., 1.]
y = [0., 1.]
return TABDMP1(tid, x, y, Type='G', comment='')
def __init__(self, tid: int, x: Any, y: Any, Type: str='G', comment: str='') -> None:
Table.__init__(self)
if comment:
self.comment = comment
self.tid = tid
self.Type = Type
self.x = np.asarray(x, dtype='float64')
self.y = np.asarray(y, dtype='float64')
assert self.Type in ['G', 'CRIT', 'Q'], 'Type=%r' % self.Type
@classmethod
def add_card(cls, card, comment=''):
"""
Adds a TABDMP1 card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
table_id = integer(card, 1, 'tid')
Type = string_or_blank(card, 2, 'Type', 'G')
x, y = read_table(card, table_id, 'TABDMP1')
return TABDMP1(table_id, x, y, Type=Type, comment=comment)
@classmethod
def add_op2_data(cls, data, comment=''):
table_id = data[0]
unused_x1 = data[1]
Type = data[2]
xy = data[5:]
xy = np.array(xy, dtype='float64')
x = xy[:, 0]
y = xy[:, 1]
return TABDMP1(table_id, x, y, Type=Type, comment=comment)
def raw_fields(self):
xy = []
for xi, yi in zip(self.x, self.y):
xy.extend([xi, yi])
list_fields = ['TABDMP1', self.tid, self.Type, None, None, None, None,
None, None] + xy + ['ENDT']
return list_fields
def repr_fields(self):
return self.raw_fields()
class TABLEM1(Table):
"""
MSC
===
+---------+------+-------+-------+--------+-----+-----+------+------+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |
+=========+======+=======+=======+========+=====+=====+======+======+
| TABLEM1 | TID | | | | | | | |
+---------+------+-------+-------+--------+-----+-----+------+------+
| | x1 | y1 | x2 | y2 | x3 | y3 | etc. | ENDT |
+---------+------+-------+-------+--------+-----+-----+------+------+
| TABLEM1 | 32 | | | | | | | |
+---------+------+-------+-------+--------+-----+-----+------+------+
| | -3.0 | 6.9 | 2.0 | 5.6 | 3.0 | 5.6 | ENDT | |
+---------+------+-------+-------+--------+-----+-----+------+------+
NX
==
+---------+------+-------+-------+--------+-----+-----+------+------+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |
+=========+======+=======+=======+========+=====+=====+======+======+
| TABLEM1 | TID | XAXIS | YAXIS | EXTRAP | | | | |
+---------+------+-------+-------+--------+-----+-----+------+------+
| | x1 | y1 | x2 | y2 | x3 | y3 | etc. | ENDT |
+---------+------+-------+-------+--------+-----+-----+------+------+
| TABLEM1 | 32 | | | | | | | |
+---------+------+-------+-------+--------+-----+-----+------+------+
| | -3.0 | 6.9 | 2.0 | 5.6 | 3.0 | 5.6 | ENDT | |
+---------+------+-------+-------+--------+-----+-----+------+------+
"""
type = 'TABLEM1'
@classmethod
def _init_from_empty(cls):
tid = 1
x = [0., 1.]
y = [0., 1.]
return TABLEM1(tid, x, y, xaxis='LINEAR', yaxis='LINEAR', comment='')
def __init__(self, tid, x, y, xaxis='LINEAR', yaxis='LINEAR', comment=''):
Table.__init__(self)
if comment:
self.comment = comment
self.tid = tid
self.xaxis = xaxis # linear/log
self.yaxis = yaxis # linear/log
self.x = np.asarray(x, dtype='float64')
self.y = np.asarray(y, dtype='float64')
@classmethod
def add_card(cls, card, comment=''):
"""
Adds a TABLEM1 card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
table_id = integer(card, 1, 'tid')
xaxis = string_or_blank(card, 2, 'xaxis', 'LINEAR')
yaxis = string_or_blank(card, 3, 'yaxis', 'LINEAR')
x, y = read_table(card, table_id, 'TABLEM1')
return TABLEM1(table_id, x, y, xaxis=xaxis, yaxis=yaxis, comment=comment)
@classmethod
def add_op2_data(cls, data, comment=''):
table_id = data[0]
xy = data[1:]
xy = np.array(xy, dtype='float64')
xy = xy.reshape(xy.size // 2, 2)
x = xy[:, 0]
y = xy[:, 1]
return TABLEM1(table_id, x, y, comment=comment)
def raw_fields(self):
xy = []
for xi, yi in zip(self.x, self.y):
xy.extend([xi, yi])
list_fields = ['TABLEM1', self.tid, self.xaxis, self.yaxis, None, None,
None, None, None] + xy + ['ENDT']
return list_fields
def repr_fields(self):
xaxis = set_blank_if_default(self.xaxis, 'LINEAR')
yaxis = set_blank_if_default(self.yaxis, 'LINEAR')
xy = []
for xi, yi in zip(self.x, self.y):
xy.extend([xi, yi])
list_fields = ['TABLEM1', self.tid, xaxis, yaxis, None, None,
None, None, None] + xy + ['ENDT']
return list_fields
class TABLEM2(Table):
"""
+---------+------+-------+--------+-----+-----+-----+------+------+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |
+=========+======+=======+========+=====+=====+=====+======+======+
| TABLEM2 | TID | X1 | EXTRAP | | | | | |
+---------+------+-------+--------+-----+-----+-----+------+------+
| | x1 | y1 | x2 | y2 | x3 | y3 | etc. | ENDT |
+---------+------+-------+--------+-----+-----+-----+------+------+
| TABLEM2 | 32 | -10.5 | | | | | | |
+---------+------+-------+--------+-----+-----+-----+------+------+
| | -3.0 | 6.9 | 2.0 | 5.6 | 3.0 | 5.6 | ENDT | |
+---------+------+-------+--------+-----+-----+-----+------+------+
..note:: EXTRAP is NX specific
"""
type = 'TABLEM2'
@classmethod
def _init_from_empty(cls):
tid = 1
x1 = 1.
x = [0., 1.]
y = [0., 1.]
return TABLEM2(tid, x1, x, y, extrap=0, comment='')
def __init__(self, tid, x1, x, y, extrap=0, comment=''):
Table.__init__(self)
if comment:
self.comment = comment
self.tid = tid
self.x1 = x1
self.extrap = extrap
self.x = np.asarray(x, dtype='float64')
self.y = np.asarray(y, dtype='float64')
@classmethod
def add_card(cls, card, comment=''):
"""
Adds a TABLEM2 card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
table_id = integer(card, 1, 'tid')
# defined in MSC as an integer and used as a float...int > 0
# defined in NX as a float; real
# no default given in either, but from context, let's assume 0.0
x1 = double_or_blank(card, 2, 'x1', 0.0)
extrap = integer_or_blank(card, 3, 'EXTRAP', default=0)
x, y = read_table(card, table_id, 'TABLEM2')
return TABLEM2(table_id, x1, x, y, extrap=extrap, comment=comment)
@classmethod
def add_op2_data(cls, data, comment=''):
table_id = data[0]
x1 = data[1]
xy = data[2:]
xy = np.array(xy, dtype='float64')
xy = xy.reshape(xy.size // 2, 2)
x = xy[:, 0]
y = xy[:, 1]
return TABLEM2(table_id, x1, x, y, comment=comment)
def raw_fields(self):
xy = []
for xi, yi in zip(self.x, self.y):
xy.extend([xi, yi])
list_fields = ['TABLEM2', self.tid, self.x1, self.extrap, None, None,
None, None, None] + xy + ['ENDT']
return list_fields
def repr_fields(self):
extrap = set_blank_if_default(self.extrap, 0)
xy = []
for xi, yi in zip(self.x, self.y):
xy.extend([xi, yi])
list_fields = ['TABLEM2', self.tid, self.x1, extrap, None, None,
None, None, None] + xy + ['ENDT']
return list_fields
class TABLEM3(Table):
"""
+---------+------+-------+-------+--------+-----+-----+------+------+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |
+=========+======+=======+=======+========+=====+=====+======+======+
| TABLEM3 | TID | X1 | X2 | EXTRAP | | | | |
+---------+------+-------+-------+--------+-----+-----+------+------+
| | x1 | y1 | x2 | y2 | x3 | y3 | etc. | ENDT |
+---------+------+-------+-------+--------+-----+-----+------+------+
| TABLEM3 | 32 | 126.9 | 30.0 | | | | | |
+---------+------+-------+-------+--------+-----+-----+------+------+
| | -3.0 | 6.9 | 2.0 | 5.6 | 3.0 | 5.6 | ENDT | |
+---------+------+-------+-------+--------+-----+-----+------+------+
..note:: EXTRAP is NX specific
"""
type = 'TABLEM3'
@classmethod
def _init_from_empty(cls):
tid = 1
x1 = 1.
x2 = 2.
x = [0., 1.]
y = [0., 1.]
return TABLEM3(tid, x1, x2, x, y, extrap=0, comment='')
def __init__(self, tid, x1, x2, x, y, extrap=0, comment=''):
Table.__init__(self)
if comment:
self.comment = comment
self.tid = tid
self.x1 = x1
self.x2 = x2
self.extrap = extrap
self.x = np.asarray(x, dtype='float64')
self.y = np.asarray(y, dtype='float64')
assert self.x2 != 0.0
@classmethod
def add_card(cls, card, comment=''):
"""
Adds a TABLEM3 card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
table_id = integer(card, 1, 'tid')
x1 = double(card, 2, 'x1')
x2 = double(card, 3, 'x2')
extrap = integer_or_blank(card, 4, 'extrap', default=0)
x, y = read_table(card, table_id, 'TABLEM3')
return TABLEM3(table_id, x1, x2, x, y, extrap=extrap, comment=comment)
@classmethod
def add_op2_data(cls, data, comment=''):
table_id = data[0]
x1 = data[1]
x2 = data[2]
xy = data[3:]
xy = np.array(xy, dtype='float64')
xy = xy.reshape(xy.size // 2, 2)
x = xy[:, 0]
y = xy[:, 1]
return TABLEM3(table_id, x1, x2, x, y, comment=comment)
def raw_fields(self):
xy = []
for xi, yi in zip(self.x, self.y):
xy.extend([xi, yi])
list_fields = ['TABLEM3', self.tid, self.x1, self.x2, self.extrap,
None, None, None, None] + xy + ['ENDT']
return list_fields
def repr_fields(self):
xy = []
extrap = set_blank_if_default(self.extrap, 0)
for xi, yi in zip(self.x, self.y):
xy.extend([xi, yi])
list_fields = ['TABLEM3', self.tid, self.x1, self.x2, extrap,
None, None, None, None] + xy + ['ENDT']
return list_fields
class TABLEM4(Table):
"""
+---------+------+---------+--------+-----+--------+------+------+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+=========+======+=========+========+=====+========+======+======+
| TABLEM4 | TID | X1 | X2 | X3 | X4 | | |
+---------+------+---------+--------+-----+--------+------+------+
| | A1 | A2 | A3 | A4 | A5 | etc. | ENDT |
+---------+------+---------+--------+-----+--------+------+------+
| TABLEM4 | 32 | 0.0 | 1.0 | 0.0 | 100. | | |
+---------+------+---------+--------+-----+--------+------+------+
| | 2.91 | -0.0329 | 6.51-5 | 0.0 | -3.4-7 | ENDT | |
+---------+------+---------+--------+-----+--------+------+------+
"""
type = 'TABLEM4'
@classmethod
def _init_from_empty(cls):
tid = 1
x1 = 1.
x2 = 1.
x3 = 1.
x4 = 2.
a = [1., 2.]
return TABLEM4(tid, x1, x2, x3, x4, a, comment='')
def __init__(self, tid, x1, x2, x3, x4, a, comment=''):
Table.__init__(self)
if comment:
self.comment = comment
self.tid = tid
self.x1 = x1
self.x2 = x2
self.x3 = x3
self.x4 = x4
self.a = np.asarray(a)
assert self.x2 != 0.0, 'x2=%s\n%s' % (self.x2, str(self))
assert self.x3 <= self.x4, 'x3=%s x4=%s\n%s' % (self.x3, self.x4, str(self))
@classmethod
def add_card(cls, card, comment=''):
"""
Adds a TABLEM4 card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
table_id = integer(card, 1, 'tid')
x1 = double(card, 2, 'x1')
x2 = double(card, 3, 'x2')
x3 = double(card, 4, 'x3')
x4 = double(card, 5, 'x4')
nfields = len(card) - 1
nterms = nfields - 9
if nterms < 0:
raise SyntaxError('%r card is too short' % cls.type)
a = []
j = 0
for i in range(9, nfields):
ai = double_or_blank(card, i, 'a%i' % (j), 0.0)
a.append(ai)
j += 1
string(card, nfields, 'ENDT')
return TABLEM4(table_id, x1, x2, x3, x4, a, comment=comment)
@classmethod
def add_op2_data(cls, data, comment=''):
"""
Adds a TABLEM4 card from the OP2
Parameters
----------
data : List[varies]
a list of fields defined in OP2 format
comment : str; default=''
a comment for the card
"""
table_id = data[0]
x1 = data[1]
x2 = data[2]
x3 = data[3]
x4 = data[4]
a = data[3:]
return TABLEM4(table_id, x1, x2, x3, x4, a, comment=comment)
def raw_fields(self):
list_fields = ['TABLEM4', self.tid, self.x1, self.x2, self.x3, self.x4,
None, None, None] + list(self.a) + ['ENDT']
return list_fields
def repr_fields(self):
return self.raw_fields()
class TABLES1(Table):
"""
+---------+------+-------+-------+--------+-----+-------+------+------+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |
+=========+======+=======+=======+========+=====+=======+======+======+
| TABLES1 | TID | TYPE | | | | | | |
+---------+------+-------+-------+--------+-----+-------+------+------+
| | x1 | y1 | x2 | y2 | x3 | y3 | etc. | ENDT |
+---------+------+-------+-------+--------+-----+-------+------+------+
| TABLES1 | 32 | | | | | | | |
+---------+------+-------+-------+--------+-----+-------+------+------+
| | 0.0 | 0.0 | 0.01 | 1000. | 0.2 | 1500. | ENDT | |
+---------+------+-------+-------+--------+-----+-------+------+------+
"""
type = 'TABLES1'
@classmethod
def _init_from_empty(cls):
tid = 1
x = [0., 1.]
y = [0., 1.]
return TABLES1(tid, x, y, Type=1, comment='')
def __init__(self, tid, x, y, Type=1, comment=''):
"""
Adds a TABLES1 card, which defines a stress dependent material
Parameters
----------
tid : int
Table ID
Type : int; default=1
Type of stress-strain curve (1 or 2)
1 - Cauchy (true) stress vs. total true strain
2 - Cauchy (true) stress vs. plastic true strain (MSC only)
Type is MSC-specific and was added somewhere between
2006 and 2016.
x, y : List[float]
table values
comment : str; default=''
a comment for the card
"""
Table.__init__(self)
if comment:
self.comment = comment
self.tid = tid
self.Type = Type
self.x = np.asarray(x, dtype='float64')
self.y = np.asarray(y, dtype='float64')
assert self.Type in [1, 2], 'TABLES1 Type=%s' % self.Type
@classmethod
def add_card(cls, card, comment=''):
"""
Adds a TABLES1 card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
table_id = integer(card, 1, 'tid')
Type = integer_or_blank(card, 2, 'Type', 1)
x, y = read_table(card, table_id, 'TABLES1')
return TABLES1(table_id, x, y, Type=Type, comment=comment)
@classmethod
def add_op2_data(cls, data, comment=''):
"""
Adds a TABLES1 card from the OP2
Parameters
----------
data : List[varies]
a list of fields defined in OP2 format
comment : str; default=''
a comment for the card
"""
table_id = data[0]
xy = data[1:]
xy = np.array(xy, dtype='float64')
xy = xy.reshape(xy.size // 2, 2)
x = xy[:, 0]
y = xy[:, 1]
return TABLES1(table_id, x, y, Type=1, comment=comment)
def raw_fields(self):
xy = []
for xi, yi in zip(self.x, self.y):
xy.extend([xi, yi])
list_fields = ['TABLES1', self.tid, self.Type, None, None, None,
None, None, None] + xy + ['ENDT']
return list_fields
def repr_fields(self):
xy = []
for xi, yi in zip(self.x, self.y):
xy.extend([xi, yi])
# MSC 2005.2 doesn't support Type; 2016.1 does
stress_strain_curve_type = set_blank_if_default(self.Type, 1)
list_fields = ['TABLES1', self.tid, stress_strain_curve_type, None, None, None,
None, None, None] + xy + ['ENDT']
return list_fields
class TABLEST(Table):
"""
+---------+-------+-------+-------+--------+------+------+------+------+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |
+=========+=======+=======+=======+========+=====+=======+======+======+
| TABLEST | TID | | | | | | | |
+---------+-------+-------+-------+--------+------+------+------+------+
| | x1 | y1 | x2 | y2 | x3 | y3 | etc. | ENDT |
+---------+-------+-------+-------+--------+------+------+------+------+
| TABLEST | 32 | | | | | | | |
+---------+-------+-------+-------+--------+------+------+------+------+
| | 150.0 | 10.0 | 175.0 | 20. | ENDT | | | |
+---------+-------+-------+-------+--------+------+------+------+------+
"""
type = 'TABLEST'
def __init__(self, tid, x, y, comment=''):
Table.__init__(self)
if comment:
self.comment = comment
self.tid = tid
self.x = np.asarray(x, dtype='float64')
self.y = np.asarray(y, dtype='float64')
@classmethod
def add_card(cls, card, comment=''):
"""
Adds a TABLEST card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
table_id = integer(card, 1, 'tid')
x, y = read_table(card, table_id, 'TABLEST')
return TABLEST(table_id, x, y, comment=comment)
@classmethod
def add_op2_data(cls, data, comment=''):
"""
Adds a TABLEST card from the OP2
Parameters
----------
data : List[varies]
a list of fields defined in OP2 format
comment : str; default=''
a comment for the card
"""
table_id = data[0]
xy = data[1:]
xy = np.array(xy, dtype='float64')
xy = xy.reshape(xy.size // 2, 2)
x = xy[:, 0]
y = xy[:, 1]
return TABLEST(table_id, x, y, comment=comment)
def raw_fields(self):
xy = []
for xi, yi in zip(self.x, self.y):
xy.extend([xi, yi])
list_fields = ['TABLEST', self.tid, None, None, None, None,
None, None, None] + xy + ['ENDT']
return list_fields
def repr_fields(self):
return self.raw_fields()
class TABLEH1(Table):
"""
+---------+------+-------+-------+--------+-----+-------+------+------+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |
+=========+======+=======+=======+========+=====+=======+======+======+
| TABLEH1 | TID | | | | | | | |
+---------+------+-------+-------+--------+-----+-------+------+------+
| | x1 | y1 | x2 | y2 | x3 | y3 | etc. | ENDT |
+---------+------+-------+-------+--------+-----+-------+------+------+
| TABLEH1 | 32 | | | | | | | |
+---------+------+-------+-------+--------+-----+-------+------+------+
| | 0.0 | 0.0 | 0.01 | 1000. | 0.2 | 1500. | ENDT | |
+---------+------+-------+-------+--------+-----+-------+------+------+
"""
type = 'TABLEH1'
@classmethod
def _init_from_empty(cls):
tid = 1
x = [0., 1.]
y = [0., 1.]
return TABLEH1(tid, x, y, comment='')
def __init__(self, tid, x, y, comment=''):
"""
Adds a TABLEH1 card, which defines convection heat transfer coefficient.
It's referenced by a TABLEHT.
Parameters
----------
tid : int
Table ID
x, y : List[float]
table values
comment : str; default=''
a comment for the card
"""
Table.__init__(self)
if comment:
self.comment = comment
self.tid = tid
self.x = np.asarray(x, dtype='float64')
self.y = np.asarray(y, dtype='float64')
@classmethod
def add_card(cls, card, comment=''):
"""
Adds a TABLEH1 card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
table_id = integer(card, 1, 'tid')
x, y = read_table(card, table_id, 'TABLEH1')
return TABLEH1(table_id, x, y, comment=comment)
@classmethod
def add_op2_data(cls, data, comment=''):
"""
Adds a TABLEH1 card from the OP2
Parameters
----------
data : List[varies]
a list of fields defined in OP2 format
comment : str; default=''
a comment for the card
"""
table_id = data[0]
xy = data[1:]
xy = np.array(xy, dtype='float64')
xy = xy.reshape(xy.size // 2, 2)
x = xy[:, 0]
y = xy[:, 1]
return TABLEH1(table_id, x, y, comment=comment)
def raw_fields(self):
xy = []
for xi, yi in zip(self.x, self.y):
xy.extend([xi, yi])
list_fields = ['TABLEH1', self.tid, None, None, None, None,
None, None, None] + xy + ['ENDT']
return list_fields
def repr_fields(self):
xy = []
for xi, yi in zip(self.x, self.y):
xy.extend([xi, yi])
list_fields = ['TABLEH1', self.tid, None, None, None, None,
None, None, None] + xy + ['ENDT']
return list_fields
class TABLEHT(Table):
"""
+---------+-------+-------+-------+--------+------+------+------+------+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |
+=========+=======+=======+=======+========+=====+=======+======+======+
| TABLEHT | TID | | | | | | | |
+---------+-------+-------+-------+--------+------+------+------+------+
| | x1 | tid1 | x2 | tid2 | x3 | tid3 | etc. | ENDT |
+---------+-------+-------+-------+--------+------+------+------+------+
| TABLEHT | 32 | | | | | | | |
+---------+-------+-------+-------+--------+------+------+------+------+
| | 1. | 10 | 5. | 11 | ENDT | | | |
+---------+-------+-------+-------+--------+------+------+------+------+
"""
type = 'TABLEHT'
@classmethod
def _init_from_empty(cls):
tid = 1
x = [0., 1.]
y = [0., 1.]
return TABLEHT(tid, x, y, comment='')
def __init__(self, tid: int, x, y, comment=''):
"""
Adds a TABLEHT card, which a function of two variables for
convection heat transfer coefficient.
Parameters
----------
tid : int
Table ID
x, y : List[float]
table values
comment : str; default=''
a comment for the card
"""
Table.__init__(self)
if comment:
self.comment = comment
self.tid = tid
self.x = np.asarray(x, dtype='float64')
self.y = np.asarray(y, dtype='int32')
@classmethod
def add_card(cls, card, comment=''):
"""
Adds a TABLEHT card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
table_id = integer(card, 1, 'tid')
x, y = read_table_float_int(card, table_id, 'TABLEHT')
return TABLEHT(table_id, x, y, comment=comment)
@classmethod
def add_op2_data(cls, data, comment=''):
"""
Adds a TABLEHT card from the OP2
Parameters
----------
data : List[varies]
a list of fields defined in OP2 format
comment : str; default=''
a comment for the card
"""
table_id = data[0]
xy = data[1:]
xy = np.array(xy, dtype='float64')
xy = xy.reshape(xy.size // 2, 2)
x = xy[:, 0]
y = xy[:, 1]
return TABLEHT(table_id, x, y, comment=comment)
def raw_fields(self):
xy = []
for xi, yi in zip(self.x, self.y):
xy.extend([xi, yi])
list_fields = ['TABLEHT', self.tid, None, None, None, None,
None, None, None] + xy + ['ENDT']
return list_fields
def repr_fields(self):
return self.raw_fields()
#class RandomTable(Table):
#type = 'TABLE??'
#def __init__(self):
#Table.__init__(self)
class TABRND1(Table):
type = 'TABRND1'
@classmethod
def _init_from_empty(cls):
tid = 1
x = [0., 1.]
y = [0., 1.]
return TABRND1(tid, x, y, xaxis='LINEAR', yaxis='LINEAR', comment='')
def __init__(self, tid, x, y, xaxis='LINEAR', yaxis='LINEAR', comment=''):
Table.__init__(self)
if comment:
self.comment = comment
self.tid = tid
self.x = np.asarray(x, dtype='float64')
self.y = np.asarray(y, dtype='float64')
self.xaxis = xaxis
self.yaxis = yaxis
assert self.xaxis in ['LINEAR', 'LOG'], 'xaxis=%r' % (self.xaxis)
assert self.yaxis in ['LINEAR', 'LOG'], 'yaxis=%r' % (self.yaxis)
@classmethod
def add_card(cls, card, comment=''):
"""
Adds a TABRND1 card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
table_id = integer(card, 1, 'tid')
xaxis = string_or_blank(card, 2, 'xaxis', 'LINEAR')
yaxis = string_or_blank(card, 3, 'yaxis', 'LINEAR')
x, y = read_table(card, table_id, 'TABRND1')
return TABRND1(table_id, x, y, xaxis=xaxis, yaxis=yaxis, comment=comment)
@classmethod
def add_op2_data(cls, data, comment=''):
"""
Adds a TABRND1 card from the OP2
Parameters
----------
data : List[varies]
a list of fields defined in OP2 format
comment : str; default=''
a comment for the card
"""
table_id = data[0]
xaxis = _map_axis(data[1])
yaxis = _map_axis(data[2])
xy = data[3:]
xy = np.array(xy, dtype='float64')
x = xy[:, 0]
y = xy[:, 1]
return TABRND1(table_id, x, y, xaxis=xaxis, yaxis=yaxis, comment=comment)
#def parse_fields(self, xy, nrepeated, is_data=False):
#self.table = TableObj(xy, nrepeated, is_data)
def raw_fields(self):
xy = []
for xi, yi in zip(self.x, self.y):
xy.extend([xi, yi])
list_fields = ['TABRND1', self.tid, self.xaxis, self.yaxis, None, None,
None, None, None] + xy + ['ENDT']
return list_fields
def repr_fields(self):
xy = []
for xi, yi in zip(self.x, self.y):
xy.extend([xi, yi])
xaxis = set_blank_if_default(self.xaxis, 'LINEAR')
yaxis = set_blank_if_default(self.yaxis, 'LINEAR')
list_fields = ['TABRND1', self.tid, xaxis, yaxis, None, None,
None, None, None] + xy + ['ENDT']
return list_fields
class TABRNDG(Table):
r"""
Gust Power Spectral Density
Defines the power spectral density (PSD) of a gust for aeroelastic response
analysis.
"""
type = 'TABRNDG'
@classmethod
def _init_from_empty(cls):
tid = 1
Type = 1
LU = 1.
WG = 1.
return TABRNDG(tid, Type, LU, WG, comment='')
def __init__(self, tid, Type, LU, WG, comment=''):
"""
Creates a TABRNDG card
Parameters
----------
tid : int
table id
Type : int
PSD type
1 : von Karman
2 : Dryden
LU : float
Scale of turbulence divided by velocity (units of time)
WG : float
Root-mean-square gust velocity
comment : str; default=''
a comment for the card
"""
Table.__init__(self)
if comment:
self.comment = comment
#: Table identification number. (Integer >0)
self.tid = tid
#: PSD Type: 1. von Karman; 2. Dryden
self.Type = Type
#: Scale of turbulence divided by velocity (units of time; Real)
self.LU = LU
#: Root-mean-square gust velocity. (Real)
self.WG = WG
assert self.Type in [1, 2], ('Type must be 1 or 2. '
'Type=%s' % (self.Type))
@classmethod
def add_card(cls, card, comment=''):
"""
Adds a TABRNDG card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
table_id = integer(card, 1, 'tid')
Type = integer(card, 2, 'Type')
LU = double(card, 3, 'LU')
WG = double(card, 4, 'WG')
return TABRNDG(table_id, Type, LU, WG, comment=comment)
def raw_fields(self):
list_fields = ['TABRNDG', self.tid, self.Type, self.LU, self.WG]
return list_fields
def repr_fields(self):
return self.raw_fields()
def _map_axis(axis):
if axis == 0:
axis_type = 'LINEAR'
elif axis == 1:
axis_type = 'LOG'
else: # pragma: no cover
raise ValueError('axis=%r' % axis)
return axis_type
def read_table(card, table_id, table_type):
"""common method for reading tables that handles SKIP"""
nfields = len(card) - 1
nterms = (nfields - 9) // 2
if nterms < 0:
raise SyntaxError('%r card is too short' % table_type)
xy = []
for i in range(nterms):
n = 9 + i * 2
if card.field(n) == 'ENDT':
break
xi = double_or_string(card, n, 'x' + str(i + 1))
yi = double_or_string(card, n + 1, 'y' + str(i + 1))
if xi == 'SKIP' or yi == 'SKIP':
continue
xy.append([xi, yi])
string(card, nfields, 'ENDT')
x, y = make_xy(table_id, table_type, xy)
return x, y
def read_table_float_int(card, table_id, table_type):
"""common method for reading tables that handles SKIP"""
nfields = len(card) - 1
nterms = (nfields - 9) // 2
if nterms < 0:
raise SyntaxError('%r card is too short' % table_type)
xy = []
for i in range(nterms):
n = 9 + i * 2
if card.field(n) == 'ENDT':
break
xi = double_or_string(card, n, 'x' + str(i + 1))
yi = integer_or_string(card, n + 1, 'y' + str(i + 1))
if xi == 'SKIP' or yi == 'SKIP':
continue
xy.append([xi, yi])
string(card, nfields, 'ENDT')
x, y = make_xy(table_id, table_type, xy)
return x, y
| 31.302351
| 96
| 0.448002
| 6,633
| 57,252
| 3.766018
| 0.055932
| 0.031705
| 0.036029
| 0.032666
| 0.818295
| 0.797038
| 0.769776
| 0.748359
| 0.730625
| 0.71269
| 0
| 0.03277
| 0.352389
| 57,252
| 1,828
| 97
| 31.319475
| 0.640963
| 0.312373
| 0
| 0.751559
| 0
| 0
| 0.044487
| 0
| 0
| 0
| 0
| 0.001094
| 0.018711
| 1
| 0.112266
| false
| 0
| 0.008316
| 0.007277
| 0.256757
| 0.010395
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
81cee5d788887086397aab5de9efa1cec0fa8b6b
| 332
|
py
|
Python
|
src/generator.py
|
FlingJLJ/ThrowawayNameGenerator
|
42324c5b13ab91f7eb9047e7c704fbd6812f07ec
|
[
"Unlicense"
] | null | null | null |
src/generator.py
|
FlingJLJ/ThrowawayNameGenerator
|
42324c5b13ab91f7eb9047e7c704fbd6812f07ec
|
[
"Unlicense"
] | null | null | null |
src/generator.py
|
FlingJLJ/ThrowawayNameGenerator
|
42324c5b13ab91f7eb9047e7c704fbd6812f07ec
|
[
"Unlicense"
] | null | null | null |
import wordlists as wl
from random import randint
adjlist = wl.adjectives.split('\n')
nounlist = wl.nouns.split('\n')
def generate():
return adjlist[randint(0, len(adjlist))] + nounlist[randint(0, len(nounlist))] + str(randint(0, 9)) + str(randint(0, 9)) + str(randint(0, 9)) + str(randint(0, 9)) + str(randint(0, 9))
| 36.888889
| 188
| 0.653614
| 51
| 332
| 4.254902
| 0.392157
| 0.258065
| 0.253456
| 0.276498
| 0.276498
| 0.276498
| 0.276498
| 0.276498
| 0.276498
| 0.276498
| 0
| 0.042705
| 0.153614
| 332
| 8
| 189
| 41.5
| 0.729537
| 0
| 0
| 0
| 1
| 0
| 0.012346
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.333333
| 0.166667
| 0.666667
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 8
|
c48f93c2925e1aa2a4b6e65440e9d18168593bc3
| 22,011
|
py
|
Python
|
tests/fetch_test.py
|
sublinus/sptygenre
|
94716353df98ee75d5a546e76f7259f30b6f70dd
|
[
"MIT"
] | 1
|
2018-02-02T14:17:19.000Z
|
2018-02-02T14:17:19.000Z
|
tests/fetch_test.py
|
sublinus/sptygenre
|
94716353df98ee75d5a546e76f7259f30b6f70dd
|
[
"MIT"
] | 2
|
2018-01-18T17:50:20.000Z
|
2018-01-18T20:53:44.000Z
|
tests/fetch_test.py
|
sublinus/sptygenre
|
94716353df98ee75d5a546e76f7259f30b6f70dd
|
[
"MIT"
] | null | null | null |
import mock
import pytest
import sptygenre.fetch.fetch as fetch
@pytest.fixture
def spotipyAPI():
m = mock.MagicMock()
m.user_playlist.return_value = {'collaborative': False, 'description': None, 'external_urls': {'spotify': 'https://open.spotify.com/user/g051x7db/playlist/3UVCafpPe8O9Nn47A6D363'}, 'followers': {'href': None, 'total': 0}, 'href': 'https://api.spotify.com/v1/users/g051x7db/playlists/3UVCafpPe8O9Nn47A6D363', 'id': '3UVCafpPe8O9Nn47A6D363', 'images': [{'height': 640, 'url': 'https://i.scdn.co/image/bf3561a432a4ceac784c5f9dfd551c0de4d29a94', 'width': 640}], 'name': 'test_playlist', 'owner': {'display_name': None, 'external_urls': {'spotify': 'https://open.spotify.com/user/g051x7db'}, 'href': 'https://api.spotify.com/v1/users/g051x7db', 'id': 'g051x7db', 'type': 'user', 'uri': 'spotify:user:g051x7db'}, 'public': True, 'snapshot_id': 'GD0hSZPTJ/sviwvbcx6CTTV8P3R83E+rTuIZLzvoPpc+eYkGRCjibRV+yUFkqV90', 'tracks': {'href': 'https://api.spotify.com/v1/users/g051x7db/playlists/3UVCafpPe8O9Nn47A6D363/tracks?offset=0&limit=100', 'items': [{'added_at': '2018-02-02T15:42:29Z', 'added_by': {'external_urls': {'spotify': 'https://open.spotify.com/user/g051x7db'}, 'href': 'https://api.spotify.com/v1/users/g051x7db', 'id': 'g051x7db', 'type': 'user', 'uri': 'spotify:user:g051x7db'}, 'is_local': False, 'track': {'album': {'album_type': 'single', 'artists': [{'external_urls': {'spotify': 'https://open.spotify.com/artist/1XKjtpH5P81gpOXDB91IEB'}, 'href': 'https://api.spotify.com/v1/artists/1XKjtpH5P81gpOXDB91IEB', 'id': '1XKjtpH5P81gpOXDB91IEB', 'name': 'Miami Yacine', 'type': 'artist', 'uri': 'spotify:artist:1XKjtpH5P81gpOXDB91IEB'}], 'available_markets': ['AD', 'AR', 'AT', 'AU', 'BE', 'BG', 'BO', 'BR', 'CA', 'CH', 'CL', 'CO', 'CR', 'CY', 'CZ', 'DE', 'DK', 'DO', 'EC', 'EE', 'ES', 'FI', 'FR', 'GB', 'GR', 'GT', 'HK', 'HN', 'HU', 'ID', 'IE', 'IS', 'IT', 'JP', 'LI', 'LT', 'LU', 'LV', 'MC', 'MT', 'MX', 'MY', 'NI', 'NL', 'NO', 'NZ', 'PA', 'PE', 'PH', 'PL', 'PT', 'PY', 'SE', 'SG', 'SK', 'SV', 'TH', 'TR', 'TW', 'US', 'UY'], 'external_urls': {'spotify': 'https://open.spotify.com/album/40CYJrASNrMCDr2x4UE60E'}, 'href': 'https://api.spotify.com/v1/albums/40CYJrASNrMCDr2x4UE60E', 'id': '40CYJrASNrMCDr2x4UE60E', 'images': [{'height': 640, 'url': 'https://i.scdn.co/image/bf3561a432a4ceac784c5f9dfd551c0de4d29a94', 'width': 640}, {'height': 300, 'url': 'https://i.scdn.co/image/ee3625453f33e989380a67d0a25299840fdcde8a', 'width': 300}, {'height': 64, 'url': 'https://i.scdn.co/image/93b12946028a5a172008f5c2f25e2dcd3bc61e48', 'width': 64}], 'name': 'Kokaina', 'type': 'album', 'uri': 'spotify:album:40CYJrASNrMCDr2x4UE60E'}, 'artists': [{'external_urls': {'spotify': 'https://open.spotify.com/artist/1XKjtpH5P81gpOXDB91IEB'}, 'href': 'https://api.spotify.com/v1/artists/1XKjtpH5P81gpOXDB91IEB', 'id': '1XKjtpH5P81gpOXDB91IEB', 'name': 'Miami Yacine', 'type': 'artist', 'uri': 'spotify:artist:1XKjtpH5P81gpOXDB91IEB'}], 'available_markets': ['AD', 'AR', 'AT', 'AU', 'BE', 'BG', 'BO', 'BR', 'CA', 'CH', 'CL', 'CO', 'CR', 'CY', 'CZ', 'DE', 'DK', 'DO', 'EC', 'EE', 'ES', 'FI', 'FR', 'GB', 'GR', 'GT', 'HK', 'HN', 'HU', 'ID', 'IE', 'IS', 'IT', 'JP', 'LI', 'LT', 'LU', 'LV', 'MC', 'MT', 'MX', 'MY', 'NI', 'NL', 'NO', 'NZ', 'PA', 'PE', 'PH', 'PL', 'PT', 'PY', 'SE', 'SG', 'SK', 'SV', 'TH', 'TR', 'TW', 'US', 'UY'], 'disc_number': 1, 'duration_ms': 201979, 'explicit': True, 'external_ids': {'isrc': 'DELJ81616750'}, 'external_urls': {'spotify': 'https://open.spotify.com/track/2KHbPYI22yWIOgWUsApYIS'}, 'href': 'https://api.spotify.com/v1/tracks/2KHbPYI22yWIOgWUsApYIS', 'id': '2KHbPYI22yWIOgWUsApYIS', 'name': 'Kokaina', 'popularity': 68, 'preview_url': 'https://p.scdn.co/mp3-preview/f06860acc59525ee6758536914c7b07001f1470d?cid=3dab3c84034247168f7021f1f1128754', 'track_number': 1, 'type': 'track', 'uri': 'spotify:track:2KHbPYI22yWIOgWUsApYIS'}}, {'added_at': '2018-02-02T15:42:33Z', 'added_by': {'external_urls': {'spotify': 'https://open.spotify.com/user/g051x7db'}, 'href': 'https://api.spotify.com/v1/users/g051x7db', 'id': 'g051x7db', 'type': 'user', 'uri': 'spotify:user:g051x7db'}, 'is_local': False, 'track': {'album': {'album_type': 'album', 'artists': [{'external_urls': {'spotify': 'https://open.spotify.com/artist/7Ln80lUS6He07XvHI8qqHH'}, 'href': 'https://api.spotify.com/v1/artists/7Ln80lUS6He07XvHI8qqHH', 'id': '7Ln80lUS6He07XvHI8qqHH', 'name': 'Arctic Monkeys', 'type': 'artist', 'uri': 'spotify:artist:7Ln80lUS6He07XvHI8qqHH'}], 'available_markets': ['AD', 'AR', 'AT', 'AU', 'BE', 'BG', 'BO', 'BR', 'CA', 'CH', 'CL', 'CO', 'CR', 'CY', 'CZ', 'DE', 'DK', 'DO', 'EC', 'EE', 'ES', 'FI', 'FR', 'GB', 'GR', 'GT', 'HK', 'HN', 'HU', 'ID', 'IE', 'IS', 'IT', 'JP', 'LI', 'LT', 'LU', 'LV', 'MC', 'MT', 'MX', 'MY', 'NI', 'NL', 'NO', 'NZ', 'PA', 'PE', 'PH', 'PL', 'PT', 'PY', 'SE', 'SG', 'SK', 'SV', 'TH', 'TR', 'TW', 'US', 'UY'], 'external_urls': {'spotify': 'https://open.spotify.com/album/1XkGORuUX2QGOEIL4EbJKm'}, 'href': 'https://api.spotify.com/v1/albums/1XkGORuUX2QGOEIL4EbJKm', 'id': '1XkGORuUX2QGOEIL4EbJKm', 'images': [{'height': 640, 'url': 'https://i.scdn.co/image/637c657bc3053d9f5fecad1c9ae68e20d8ab2eb6', 'width': 640}, {'height': 300, 'url': 'https://i.scdn.co/image/b370274ac9b00a04c502f401b34bf577764f4c4f', 'width': 300}, {'height': 64, 'url': 'https://i.scdn.co/image/660b217e14669a23e1d70273d0150f6828b1cb3f', 'width': 64}], 'name': 'Favourite Worst Nightmare', 'type': 'album', 'uri': 'spotify:album:1XkGORuUX2QGOEIL4EbJKm'}, 'artists': [{'external_urls': {'spotify': 'https://open.spotify.com/artist/7Ln80lUS6He07XvHI8qqHH'}, 'href': 'https://api.spotify.com/v1/artists/7Ln80lUS6He07XvHI8qqHH', 'id': '7Ln80lUS6He07XvHI8qqHH', 'name': 'Arctic Monkeys', 'type': 'artist', 'uri': 'spotify:artist:7Ln80lUS6He07XvHI8qqHH'}], 'available_markets': ['AD', 'AR', 'AT', 'AU', 'BE', 'BG', 'BO', 'BR', 'CA', 'CH', 'CL', 'CO', 'CR', 'CY', 'CZ', 'DE', 'DK', 'DO', 'EC', 'EE', 'ES', 'FI', 'FR', 'GB', 'GR', 'GT', 'HK', 'HN', 'HU', 'ID', 'IE', 'IS', 'IT', 'JP', 'LI', 'LT', 'LU', 'LV', 'MC', 'MT', 'MX', 'MY', 'NI', 'NL', 'NO', 'NZ', 'PA', 'PE', 'PH', 'PL', 'PT', 'PY', 'SE', 'SG', 'SK', 'SV', 'TH', 'TR', 'TW', 'US', 'UY'], 'disc_number': 1, 'duration_ms': 183893, 'explicit': False, 'external_ids': {'isrc': 'GBCEL0700067'}, 'external_urls': {'spotify': 'https://open.spotify.com/track/2x8evxqUlF0eRabbW2JBJd'}, 'href': 'https://api.spotify.com/v1/tracks/2x8evxqUlF0eRabbW2JBJd', 'id': '2x8evxqUlF0eRabbW2JBJd', 'name': 'Fluorescent Adolescent', 'popularity': 80, 'preview_url': 'https://p.scdn.co/mp3-preview/cb8560b45aa0d4028e94a2648686c460641074d0?cid=3dab3c84034247168f7021f1f1128754', 'track_number': 5, 'type': 'track', 'uri': 'spotify:track:2x8evxqUlF0eRabbW2JBJd'}}, {'added_at': '2018-02-02T15:42:39Z', 'added_by': {'external_urls': {'spotify': 'https://open.spotify.com/user/g051x7db'}, 'href': 'https://api.spotify.com/v1/users/g051x7db', 'id': 'g051x7db', 'type': 'user', 'uri': 'spotify:user:g051x7db'}, 'is_local': False, 'track': {'album': {'album_type': 'album', 'artists': [{'external_urls': {'spotify': 'https://open.spotify.com/artist/0SfsnGyD8FpIN4U4WCkBZ5'}, 'href': 'https://api.spotify.com/v1/artists/0SfsnGyD8FpIN4U4WCkBZ5', 'id': '0SfsnGyD8FpIN4U4WCkBZ5', 'name': 'Armin van Buuren', 'type': 'artist', 'uri': 'spotify:artist:0SfsnGyD8FpIN4U4WCkBZ5'}], 'available_markets': ['AD', 'AR', 'AT', 'AU', 'BE', 'BG', 'BO', 'BR', 'CA', 'CH', 'CL', 'CO', 'CR', 'CY', 'CZ', 'DE', 'DK', 'DO', 'EC', 'EE', 'ES', 'FI', 'FR', 'GB', 'GR', 'GT', 'HK', 'HN', 'HU', 'ID', 'IE', 'IS', 'IT', 'JP', 'LI', 'LT', 'LU', 'LV', 'MC', 'MT', 'MX', 'MY', 'NI', 'NL', 'NO', 'NZ', 'PA', 'PE', 'PH', 'PL', 'PT', 'PY', 'SE', 'SG', 'SK', 'SV', 'TH', 'TR', 'TW', 'US', 'UY'], 'external_urls': {'spotify': 'https://open.spotify.com/album/0P0UYHedjYpAMpDZy3Lxk1'}, 'href': 'https://api.spotify.com/v1/albums/0P0UYHedjYpAMpDZy3Lxk1', 'id': '0P0UYHedjYpAMpDZy3Lxk1', 'images': [{'height': 640, 'url': 'https://i.scdn.co/image/9aca103482a3985c1e50d3d36e4cde1c98aa20cf', 'width': 640}, {'height': 300, 'url': 'https://i.scdn.co/image/4b6fdbd853a858c53db031c1ddb79f07d7774f6e', 'width': 300}, {'height': 64, 'url': 'https://i.scdn.co/image/09a3625136a9a32b70b7a322c8eb982874af2fd8', 'width': 64}], 'name': 'A State Of Trance Year Mix 2017 (Mixed by Armin van Buuren)', 'type': 'album', 'uri': 'spotify:album:0P0UYHedjYpAMpDZy3Lxk1'}, 'artists': [{'external_urls': {'spotify': 'https://open.spotify.com/artist/5B9q1NRokzWYB7nSgnlHyv'}, 'href': 'https://api.spotify.com/v1/artists/5B9q1NRokzWYB7nSgnlHyv', 'id': '5B9q1NRokzWYB7nSgnlHyv', 'name': 'Giuseppe Ottaviani', 'type': 'artist', 'uri': 'spotify:artist:5B9q1NRokzWYB7nSgnlHyv'}], 'available_markets': ['AD', 'AR', 'AT', 'AU', 'BE', 'BG', 'BO', 'BR', 'CA', 'CH', 'CL', 'CO', 'CR', 'CY', 'CZ', 'DE', 'DK', 'DO', 'EC', 'EE', 'ES', 'FI', 'FR', 'GB', 'GR', 'GT', 'HK', 'HN', 'HU', 'ID', 'IE', 'IS', 'IT', 'JP', 'LI', 'LT', 'LU', 'LV', 'MC', 'MT', 'MX', 'MY', 'NI', 'NL', 'NO', 'NZ', 'PA', 'PE', 'PH', 'PL', 'PT', 'PY', 'SE', 'SG', 'SK', 'SV', 'TH', 'TR', 'TW', 'US', 'UY'], 'disc_number': 2, 'duration_ms': 194344, 'explicit': False, 'external_ids': {'isrc': 'NLF711711719'}, 'external_urls': {'spotify': 'https://open.spotify.com/track/4cnckyCmxL79byCK2wCI3f'}, 'href': 'https://api.spotify.com/v1/tracks/4cnckyCmxL79byCK2wCI3f', 'id': '4cnckyCmxL79byCK2wCI3f', 'name': 'Lumina', 'popularity': 44, 'preview_url': 'https://p.scdn.co/mp3-preview/71d43597c080253c62cc7690449a7a473ba0c57f?cid=3dab3c84034247168f7021f1f1128754', 'track_number': 78, 'type': 'track', 'uri': 'spotify:track:4cnckyCmxL79byCK2wCI3f'}}], 'limit': 100, 'next': None, 'offset': 0, 'previous': None, 'total': 3}, 'type': 'playlist', 'uri': 'spotify:user:g051x7db:playlist:3UVCafpPe8O9Nn47A6D363'}
m.next.return_value = None
m.artists.return_value = {'artists': [{'external_urls': {'spotify': 'https://open.spotify.com/artist/1XKjtpH5P81gpOXDB91IEB'}, 'followers': {'href': None, 'total': 168655}, 'genres': ['deep german hip hop'], 'href': 'https://api.spotify.com/v1/artists/1XKjtpH5P81gpOXDB91IEB', 'id': '1XKjtpH5P81gpOXDB91IEB', 'images': [{'height': 640, 'url': 'https://i.scdn.co/image/2b07998f4b60731056fe325c79386f5c2ce5cd00', 'width': 640}, {'height': 320, 'url': 'https://i.scdn.co/image/d87d745e975ac979c23b0524928828c585d3303e', 'width': 320}, {'height': 160, 'url': 'https://i.scdn.co/image/27697724dce51810319f472962a70674067e161f', 'width': 160}], 'name': 'Miami Yacine', 'popularity': 72, 'type': 'artist', 'uri': 'spotify:artist:1XKjtpH5P81gpOXDB91IEB'}, {'external_urls': {'spotify': 'https://open.spotify.com/artist/7Ln80lUS6He07XvHI8qqHH'}, 'followers': {'href': None, 'total': 4938610}, 'genres': ['garage rock', 'indie rock', 'modern rock', 'permanent wave', 'sheffield indie'], 'href': 'https://api.spotify.com/v1/artists/7Ln80lUS6He07XvHI8qqHH', 'id': '7Ln80lUS6He07XvHI8qqHH', 'images': [{'height': 1333, 'url': 'https://i.scdn.co/image/c488bf987b2f716a539a768a102855450345113d', 'width': 1000}, {'height': 853, 'url': 'https://i.scdn.co/image/b30c0e39cfa70b2124b9d0d24e83761ef48e5540', 'width': 640}, {'height': 267, 'url': 'https://i.scdn.co/image/e1f00ceabce8dd0480bad7e873993082d9ac3fe9', 'width': 200}, {'height': 85, 'url': 'https://i.scdn.co/image/eba02729ec372fd9954cafd3fd71950bb8fc385f', 'width': 64}], 'name': 'Arctic Monkeys', 'popularity': 85, 'type': 'artist', 'uri': 'spotify:artist:7Ln80lUS6He07XvHI8qqHH'}, {'external_urls': {'spotify': 'https://open.spotify.com/artist/5B9q1NRokzWYB7nSgnlHyv'}, 'followers': {'href': None, 'total': 22068}, 'genres': ['deep uplifting trance', 'edm', 'progressive house', 'progressive trance', 'trance', 'uplifting trance'], 'href': 'https://api.spotify.com/v1/artists/5B9q1NRokzWYB7nSgnlHyv', 'id': '5B9q1NRokzWYB7nSgnlHyv', 'images': [{'height': 1000, 'url': 'https://i.scdn.co/image/71f3f50192efe45992a542d3dca9888957b8d647', 'width': 1000}, {'height': 640, 'url': 'https://i.scdn.co/image/4f2ca00f94641cc466fde8e67bfed36fcec344e1', 'width': 640}, {'height': 200, 'url': 'https://i.scdn.co/image/e6ced34336070d14d3fbb2b8c81e7121033e5fb3', 'width': 200}, {'height': 64, 'url': 'https://i.scdn.co/image/081aa8fd80e374b0e3446d98d8c32236f7902003', 'width': 64}], 'name': 'Giuseppe Ottaviani', 'popularity': 52, 'type': 'artist', 'uri': 'spotify:artist:5B9q1NRokzWYB7nSgnlHyv'}]}
return m
@pytest.fixture()
def fetcher(spotipyAPI):
return fetch.Fetcher("spotify:user:g051x7db:playlist:3UVCafpPe8O9Nn47A6D363", spotipyAPI)
def test_fetch_playlist(fetcher):
assert fetcher.fetch_playlist() == {'href': 'https://api.spotify.com/v1/users/g051x7db/playlists/3UVCafpPe8O9Nn47A6D363/tracks?offset=0&limit=100', 'items': [{'added_at': '2018-02-02T15:42:29Z', 'added_by': {'external_urls': {'spotify': 'https://open.spotify.com/user/g051x7db'}, 'href': 'https://api.spotify.com/v1/users/g051x7db', 'id': 'g051x7db', 'type': 'user', 'uri': 'spotify:user:g051x7db'}, 'is_local': False, 'track': {'album': {'album_type': 'single', 'artists': [{'external_urls': {'spotify': 'https://open.spotify.com/artist/1XKjtpH5P81gpOXDB91IEB'}, 'href': 'https://api.spotify.com/v1/artists/1XKjtpH5P81gpOXDB91IEB', 'id': '1XKjtpH5P81gpOXDB91IEB', 'name': 'Miami Yacine', 'type': 'artist', 'uri': 'spotify:artist:1XKjtpH5P81gpOXDB91IEB'}], 'available_markets': ['AD', 'AR', 'AT', 'AU', 'BE', 'BG', 'BO', 'BR', 'CA', 'CH', 'CL', 'CO', 'CR', 'CY', 'CZ', 'DE', 'DK', 'DO', 'EC', 'EE', 'ES', 'FI', 'FR', 'GB', 'GR', 'GT', 'HK', 'HN', 'HU', 'ID', 'IE', 'IS', 'IT', 'JP', 'LI', 'LT', 'LU', 'LV', 'MC', 'MT', 'MX', 'MY', 'NI', 'NL', 'NO', 'NZ', 'PA', 'PE', 'PH', 'PL', 'PT', 'PY', 'SE', 'SG', 'SK', 'SV', 'TH', 'TR', 'TW', 'US', 'UY'], 'external_urls': {'spotify': 'https://open.spotify.com/album/40CYJrASNrMCDr2x4UE60E'}, 'href': 'https://api.spotify.com/v1/albums/40CYJrASNrMCDr2x4UE60E', 'id': '40CYJrASNrMCDr2x4UE60E', 'images': [{'height': 640, 'url': 'https://i.scdn.co/image/bf3561a432a4ceac784c5f9dfd551c0de4d29a94', 'width': 640}, {'height': 300, 'url': 'https://i.scdn.co/image/ee3625453f33e989380a67d0a25299840fdcde8a', 'width': 300}, {'height': 64, 'url': 'https://i.scdn.co/image/93b12946028a5a172008f5c2f25e2dcd3bc61e48', 'width': 64}], 'name': 'Kokaina', 'type': 'album', 'uri': 'spotify:album:40CYJrASNrMCDr2x4UE60E'}, 'artists': [{'external_urls': {'spotify': 'https://open.spotify.com/artist/1XKjtpH5P81gpOXDB91IEB'}, 'href': 'https://api.spotify.com/v1/artists/1XKjtpH5P81gpOXDB91IEB', 'id': '1XKjtpH5P81gpOXDB91IEB', 'name': 'Miami Yacine', 'type': 'artist', 'uri': 'spotify:artist:1XKjtpH5P81gpOXDB91IEB'}], 'available_markets': ['AD', 'AR', 'AT', 'AU', 'BE', 'BG', 'BO', 'BR', 'CA', 'CH', 'CL', 'CO', 'CR', 'CY', 'CZ', 'DE', 'DK', 'DO', 'EC', 'EE', 'ES', 'FI', 'FR', 'GB', 'GR', 'GT', 'HK', 'HN', 'HU', 'ID', 'IE', 'IS', 'IT', 'JP', 'LI', 'LT', 'LU', 'LV', 'MC', 'MT', 'MX', 'MY', 'NI', 'NL', 'NO', 'NZ', 'PA', 'PE', 'PH', 'PL', 'PT', 'PY', 'SE', 'SG', 'SK', 'SV', 'TH', 'TR', 'TW', 'US', 'UY'], 'disc_number': 1, 'duration_ms': 201979, 'explicit': True, 'external_ids': {'isrc': 'DELJ81616750'}, 'external_urls': {'spotify': 'https://open.spotify.com/track/2KHbPYI22yWIOgWUsApYIS'}, 'href': 'https://api.spotify.com/v1/tracks/2KHbPYI22yWIOgWUsApYIS', 'id': '2KHbPYI22yWIOgWUsApYIS', 'name': 'Kokaina', 'popularity': 68, 'preview_url': 'https://p.scdn.co/mp3-preview/f06860acc59525ee6758536914c7b07001f1470d?cid=3dab3c84034247168f7021f1f1128754', 'track_number': 1, 'type': 'track', 'uri': 'spotify:track:2KHbPYI22yWIOgWUsApYIS'}}, {'added_at': '2018-02-02T15:42:33Z', 'added_by': {'external_urls': {'spotify': 'https://open.spotify.com/user/g051x7db'}, 'href': 'https://api.spotify.com/v1/users/g051x7db', 'id': 'g051x7db', 'type': 'user', 'uri': 'spotify:user:g051x7db'}, 'is_local': False, 'track': {'album': {'album_type': 'album', 'artists': [{'external_urls': {'spotify': 'https://open.spotify.com/artist/7Ln80lUS6He07XvHI8qqHH'}, 'href': 'https://api.spotify.com/v1/artists/7Ln80lUS6He07XvHI8qqHH', 'id': '7Ln80lUS6He07XvHI8qqHH', 'name': 'Arctic Monkeys', 'type': 'artist', 'uri': 'spotify:artist:7Ln80lUS6He07XvHI8qqHH'}], 'available_markets': ['AD', 'AR', 'AT', 'AU', 'BE', 'BG', 'BO', 'BR', 'CA', 'CH', 'CL', 'CO', 'CR', 'CY', 'CZ', 'DE', 'DK', 'DO', 'EC', 'EE', 'ES', 'FI', 'FR', 'GB', 'GR', 'GT', 'HK', 'HN', 'HU', 'ID', 'IE', 'IS', 'IT', 'JP', 'LI', 'LT', 'LU', 'LV', 'MC', 'MT', 'MX', 'MY', 'NI', 'NL', 'NO', 'NZ', 'PA', 'PE', 'PH', 'PL', 'PT', 'PY', 'SE', 'SG', 'SK', 'SV', 'TH', 'TR', 'TW', 'US', 'UY'], 'external_urls': {'spotify': 'https://open.spotify.com/album/1XkGORuUX2QGOEIL4EbJKm'}, 'href': 'https://api.spotify.com/v1/albums/1XkGORuUX2QGOEIL4EbJKm', 'id': '1XkGORuUX2QGOEIL4EbJKm', 'images': [{'height': 640, 'url': 'https://i.scdn.co/image/637c657bc3053d9f5fecad1c9ae68e20d8ab2eb6', 'width': 640}, {'height': 300, 'url': 'https://i.scdn.co/image/b370274ac9b00a04c502f401b34bf577764f4c4f', 'width': 300}, {'height': 64, 'url': 'https://i.scdn.co/image/660b217e14669a23e1d70273d0150f6828b1cb3f', 'width': 64}], 'name': 'Favourite Worst Nightmare', 'type': 'album', 'uri': 'spotify:album:1XkGORuUX2QGOEIL4EbJKm'}, 'artists': [{'external_urls': {'spotify': 'https://open.spotify.com/artist/7Ln80lUS6He07XvHI8qqHH'}, 'href': 'https://api.spotify.com/v1/artists/7Ln80lUS6He07XvHI8qqHH', 'id': '7Ln80lUS6He07XvHI8qqHH', 'name': 'Arctic Monkeys', 'type': 'artist', 'uri': 'spotify:artist:7Ln80lUS6He07XvHI8qqHH'}], 'available_markets': ['AD', 'AR', 'AT', 'AU', 'BE', 'BG', 'BO', 'BR', 'CA', 'CH', 'CL', 'CO', 'CR', 'CY', 'CZ', 'DE', 'DK', 'DO', 'EC', 'EE', 'ES', 'FI', 'FR', 'GB', 'GR', 'GT', 'HK', 'HN', 'HU', 'ID', 'IE', 'IS', 'IT', 'JP', 'LI', 'LT', 'LU', 'LV', 'MC', 'MT', 'MX', 'MY', 'NI', 'NL', 'NO', 'NZ', 'PA', 'PE', 'PH', 'PL', 'PT', 'PY', 'SE', 'SG', 'SK', 'SV', 'TH', 'TR', 'TW', 'US', 'UY'], 'disc_number': 1, 'duration_ms': 183893, 'explicit': False, 'external_ids': {'isrc': 'GBCEL0700067'}, 'external_urls': {'spotify': 'https://open.spotify.com/track/2x8evxqUlF0eRabbW2JBJd'}, 'href': 'https://api.spotify.com/v1/tracks/2x8evxqUlF0eRabbW2JBJd', 'id': '2x8evxqUlF0eRabbW2JBJd', 'name': 'Fluorescent Adolescent', 'popularity': 80, 'preview_url': 'https://p.scdn.co/mp3-preview/cb8560b45aa0d4028e94a2648686c460641074d0?cid=3dab3c84034247168f7021f1f1128754', 'track_number': 5, 'type': 'track', 'uri': 'spotify:track:2x8evxqUlF0eRabbW2JBJd'}}, {'added_at': '2018-02-02T15:42:39Z', 'added_by': {'external_urls': {'spotify': 'https://open.spotify.com/user/g051x7db'}, 'href': 'https://api.spotify.com/v1/users/g051x7db', 'id': 'g051x7db', 'type': 'user', 'uri': 'spotify:user:g051x7db'}, 'is_local': False, 'track': {'album': {'album_type': 'album', 'artists': [{'external_urls': {'spotify': 'https://open.spotify.com/artist/0SfsnGyD8FpIN4U4WCkBZ5'}, 'href': 'https://api.spotify.com/v1/artists/0SfsnGyD8FpIN4U4WCkBZ5', 'id': '0SfsnGyD8FpIN4U4WCkBZ5', 'name': 'Armin van Buuren', 'type': 'artist', 'uri': 'spotify:artist:0SfsnGyD8FpIN4U4WCkBZ5'}], 'available_markets': ['AD', 'AR', 'AT', 'AU', 'BE', 'BG', 'BO', 'BR', 'CA', 'CH', 'CL', 'CO', 'CR', 'CY', 'CZ', 'DE', 'DK', 'DO', 'EC', 'EE', 'ES', 'FI', 'FR', 'GB', 'GR', 'GT', 'HK', 'HN', 'HU', 'ID', 'IE', 'IS', 'IT', 'JP', 'LI', 'LT', 'LU', 'LV', 'MC', 'MT', 'MX', 'MY', 'NI', 'NL', 'NO', 'NZ', 'PA', 'PE', 'PH', 'PL', 'PT', 'PY', 'SE', 'SG', 'SK', 'SV', 'TH', 'TR', 'TW', 'US', 'UY'], 'external_urls': {'spotify': 'https://open.spotify.com/album/0P0UYHedjYpAMpDZy3Lxk1'}, 'href': 'https://api.spotify.com/v1/albums/0P0UYHedjYpAMpDZy3Lxk1', 'id': '0P0UYHedjYpAMpDZy3Lxk1', 'images': [{'height': 640, 'url': 'https://i.scdn.co/image/9aca103482a3985c1e50d3d36e4cde1c98aa20cf', 'width': 640}, {'height': 300, 'url': 'https://i.scdn.co/image/4b6fdbd853a858c53db031c1ddb79f07d7774f6e', 'width': 300}, {'height': 64, 'url': 'https://i.scdn.co/image/09a3625136a9a32b70b7a322c8eb982874af2fd8', 'width': 64}], 'name': 'A State Of Trance Year Mix 2017 (Mixed by Armin van Buuren)', 'type': 'album', 'uri': 'spotify:album:0P0UYHedjYpAMpDZy3Lxk1'}, 'artists': [{'external_urls': {'spotify': 'https://open.spotify.com/artist/5B9q1NRokzWYB7nSgnlHyv'}, 'href': 'https://api.spotify.com/v1/artists/5B9q1NRokzWYB7nSgnlHyv', 'id': '5B9q1NRokzWYB7nSgnlHyv', 'name': 'Giuseppe Ottaviani', 'type': 'artist', 'uri': 'spotify:artist:5B9q1NRokzWYB7nSgnlHyv'}], 'available_markets': ['AD', 'AR', 'AT', 'AU', 'BE', 'BG', 'BO', 'BR', 'CA', 'CH', 'CL', 'CO', 'CR', 'CY', 'CZ', 'DE', 'DK', 'DO', 'EC', 'EE', 'ES', 'FI', 'FR', 'GB', 'GR', 'GT', 'HK', 'HN', 'HU', 'ID', 'IE', 'IS', 'IT', 'JP', 'LI', 'LT', 'LU', 'LV', 'MC', 'MT', 'MX', 'MY', 'NI', 'NL', 'NO', 'NZ', 'PA', 'PE', 'PH', 'PL', 'PT', 'PY', 'SE', 'SG', 'SK', 'SV', 'TH', 'TR', 'TW', 'US', 'UY'], 'disc_number': 2, 'duration_ms': 194344, 'explicit': False, 'external_ids': {'isrc': 'NLF711711719'}, 'external_urls': {'spotify': 'https://open.spotify.com/track/4cnckyCmxL79byCK2wCI3f'}, 'href': 'https://api.spotify.com/v1/tracks/4cnckyCmxL79byCK2wCI3f', 'id': '4cnckyCmxL79byCK2wCI3f', 'name': 'Lumina', 'popularity': 44, 'preview_url': 'https://p.scdn.co/mp3-preview/71d43597c080253c62cc7690449a7a473ba0c57f?cid=3dab3c84034247168f7021f1f1128754', 'track_number': 78, 'type': 'track', 'uri': 'spotify:track:4cnckyCmxL79byCK2wCI3f'}}], 'limit': 100, 'next': None, 'offset': 0, 'previous': None, 'total': 3}
def test_get_genres(fetcher, spotipyAPI):
assert fetcher.get_genres_from_playlist(spotipyAPI.user_playlist()["tracks"]) == [['deep german hip hop'], ['garage rock', 'indie rock', 'modern rock', 'permanent wave', 'sheffield indie'], ['deep uplifting trance', 'edm', 'progressive house', 'progressive trance', 'trance', 'uplifting trance']]
@pytest.mark.parametrize("test_input, expected", [
([["pop", "trance"],["rock", "mellow trap"],["hip hop", "hip hip hop"]],{"pop": 1, "trance": 1, "rock": 1, "mellow trap": 1, "hip hop": 1, "hip hip hop": 1}),
([["pop"], ["pop"], ["pop"], ["pop"]], {"pop":4}),
([["trap"]], {"trap": 1})
])
def test_quantize_genres(fetcher, test_input, expected):
assert fetcher.quantize_genres(test_input) == expected
| 628.885714
| 9,616
| 0.641179
| 2,639
| 22,011
| 5.30125
| 0.10269
| 0.051465
| 0.031737
| 0.05025
| 0.879771
| 0.872766
| 0.851036
| 0.851036
| 0.846962
| 0.835025
| 0
| 0.114746
| 0.083413
| 22,011
| 34
| 9,617
| 647.382353
| 0.578686
| 0
| 0
| 0
| 0
| 0.083333
| 0.647676
| 0.087229
| 0
| 0
| 0
| 0
| 0.125
| 1
| 0.208333
| false
| 0
| 0.125
| 0.041667
| 0.416667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
c48fccaf3a3aa706a714dd1319b5031773366843
| 127
|
py
|
Python
|
src/mylib/training/__init__.py
|
takedarts/swgridnet
|
e2fede53a60047b40abeaf016a1c52d51a3e3e48
|
[
"MIT"
] | 8
|
2017-09-25T08:54:18.000Z
|
2018-10-18T11:47:43.000Z
|
src/mylib/training/__init__.py
|
takedarts/swgridnet
|
e2fede53a60047b40abeaf016a1c52d51a3e3e48
|
[
"MIT"
] | null | null | null |
src/mylib/training/__init__.py
|
takedarts/swgridnet
|
e2fede53a60047b40abeaf016a1c52d51a3e3e48
|
[
"MIT"
] | null | null | null |
from mylib.training import extensions
from mylib.training import trigger
from mylib.training.updater import StandardUpdater
| 31.75
| 51
| 0.850394
| 16
| 127
| 6.75
| 0.5
| 0.25
| 0.472222
| 0.425926
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.11811
| 127
| 3
| 52
| 42.333333
| 0.964286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
c496af5eb31c0a4e30c8770ecc0d9236d31676ca
| 55
|
py
|
Python
|
Heapsort/__init__.py
|
billyateallcookies/python
|
61e1983ce9e992495c90904131d35bca964aca01
|
[
"MIT"
] | 1
|
2021-05-23T19:31:04.000Z
|
2021-05-23T19:31:04.000Z
|
Heapsort/__init__.py
|
billyateallcookies/python
|
61e1983ce9e992495c90904131d35bca964aca01
|
[
"MIT"
] | null | null | null |
Heapsort/__init__.py
|
billyateallcookies/python
|
61e1983ce9e992495c90904131d35bca964aca01
|
[
"MIT"
] | null | null | null |
import Heapsort.heapsort
import Heapsort.heapsort_tests
| 27.5
| 30
| 0.909091
| 7
| 55
| 7
| 0.428571
| 0.571429
| 0.897959
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.054545
| 55
| 2
| 30
| 27.5
| 0.942308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
f20d915d5f72d44537a93cf79ed67a6a06d45e8f
| 47
|
py
|
Python
|
PyExercises - CeV - Mundo 3/Exercises (35 - 43)/Ex 43/Functions/main/line/__init__.py
|
PatrickAMenezes/PyExercises-CursoEmVideo-Mundo3
|
3c02768eb720c2112ececc95be95caf2bdd98fb1
|
[
"MIT"
] | null | null | null |
PyExercises - CeV - Mundo 3/Exercises (35 - 43)/Ex 43/Functions/main/line/__init__.py
|
PatrickAMenezes/PyExercises-CursoEmVideo-Mundo3
|
3c02768eb720c2112ececc95be95caf2bdd98fb1
|
[
"MIT"
] | null | null | null |
PyExercises - CeV - Mundo 3/Exercises (35 - 43)/Ex 43/Functions/main/line/__init__.py
|
PatrickAMenezes/PyExercises-CursoEmVideo-Mundo3
|
3c02768eb720c2112ececc95be95caf2bdd98fb1
|
[
"MIT"
] | null | null | null |
def line():
return '\033[1m-\033[m'*45
| 15.666667
| 30
| 0.510638
| 8
| 47
| 3
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.257143
| 0.255319
| 47
| 3
| 31
| 15.666667
| 0.428571
| 0
| 0
| 0
| 0
| 0
| 0.291667
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0.5
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 9
|
f21600d04e10af36079fad2067a130970995f340
| 62
|
py
|
Python
|
tests/tests_xyy.py
|
gabrielmontagne/xyy
|
47206f7770d9c5494aa7964603b0a051b17b5f65
|
[
"MIT"
] | null | null | null |
tests/tests_xyy.py
|
gabrielmontagne/xyy
|
47206f7770d9c5494aa7964603b0a051b17b5f65
|
[
"MIT"
] | null | null | null |
tests/tests_xyy.py
|
gabrielmontagne/xyy
|
47206f7770d9c5494aa7964603b0a051b17b5f65
|
[
"MIT"
] | null | null | null |
from context import xyy
def test_fail(): assert False, 'x_x'
| 15.5
| 36
| 0.741935
| 11
| 62
| 4
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.16129
| 62
| 3
| 37
| 20.666667
| 0.846154
| 0
| 0
| 0
| 0
| 0
| 0.048387
| 0
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0.5
| true
| 0
| 0.5
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
4800d236a49085c1704a49502a2530fe830416ac
| 99
|
py
|
Python
|
fluidity/__init__.py
|
MaximeRedstone/UnstructuredCAE-DA
|
b54bd53540c11aa1b70e5160751905141f463217
|
[
"MIT"
] | null | null | null |
fluidity/__init__.py
|
MaximeRedstone/UnstructuredCAE-DA
|
b54bd53540c11aa1b70e5160751905141f463217
|
[
"MIT"
] | null | null | null |
fluidity/__init__.py
|
MaximeRedstone/UnstructuredCAE-DA
|
b54bd53540c11aa1b70e5160751905141f463217
|
[
"MIT"
] | null | null | null |
from UnstructuredCAEDA.fluidity.VtkSave import VtkSave
from UnstructuredCAEDA.fluidity import utils
| 49.5
| 54
| 0.89899
| 11
| 99
| 8.090909
| 0.545455
| 0.47191
| 0.651685
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.070707
| 99
| 2
| 55
| 49.5
| 0.967391
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
482d2de86f4a9535d08b21e7bbdd4865e9da7061
| 118
|
py
|
Python
|
platform/hwconf_data/efr32mg14p/PythonSnippet/__init__.py
|
lenloe1/v2.7
|
9ac9c4a7bb37987af382c80647f42d84db5f2e1d
|
[
"Zlib"
] | null | null | null |
platform/hwconf_data/efr32mg14p/PythonSnippet/__init__.py
|
lenloe1/v2.7
|
9ac9c4a7bb37987af382c80647f42d84db5f2e1d
|
[
"Zlib"
] | 1
|
2020-08-25T02:36:22.000Z
|
2020-08-25T02:36:22.000Z
|
platform/hwconf_data/efr32mg14p/PythonSnippet/__init__.py
|
lenloe1/v2.7
|
9ac9c4a7bb37987af382c80647f42d84db5f2e1d
|
[
"Zlib"
] | 1
|
2020-08-25T01:56:04.000Z
|
2020-08-25T01:56:04.000Z
|
from efr32mg14p.halconfig import halconfig_types as types
from efr32mg14p.halconfig import halconfig_dependency as dep
| 59
| 60
| 0.889831
| 16
| 118
| 6.4375
| 0.5
| 0.271845
| 0.446602
| 0.563107
| 0.737864
| 0
| 0
| 0
| 0
| 0
| 0
| 0.074766
| 0.09322
| 118
| 2
| 60
| 59
| 0.88785
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
48832477fa66aeef8828e03e95d63f4e0935cd36
| 36,187
|
py
|
Python
|
sdk/python/pulumi_oci/dns/resolver.py
|
EladGabay/pulumi-oci
|
6841e27d4a1a7e15c672306b769912efbfd3ba99
|
[
"ECL-2.0",
"Apache-2.0"
] | 5
|
2021-08-17T11:14:46.000Z
|
2021-12-31T02:07:03.000Z
|
sdk/python/pulumi_oci/dns/resolver.py
|
pulumi-oci/pulumi-oci
|
6841e27d4a1a7e15c672306b769912efbfd3ba99
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2021-09-06T11:21:29.000Z
|
2021-09-06T11:21:29.000Z
|
sdk/python/pulumi_oci/dns/resolver.py
|
pulumi-oci/pulumi-oci
|
6841e27d4a1a7e15c672306b769912efbfd3ba99
|
[
"ECL-2.0",
"Apache-2.0"
] | 2
|
2021-08-24T23:31:30.000Z
|
2022-01-02T19:26:54.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['ResolverArgs', 'Resolver']
@pulumi.input_type
class ResolverArgs:
def __init__(__self__, *,
resolver_id: pulumi.Input[str],
scope: pulumi.Input[str],
attached_views: Optional[pulumi.Input[Sequence[pulumi.Input['ResolverAttachedViewArgs']]]] = None,
compartment_id: Optional[pulumi.Input[str]] = None,
defined_tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
display_name: Optional[pulumi.Input[str]] = None,
freeform_tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
rules: Optional[pulumi.Input[Sequence[pulumi.Input['ResolverRuleArgs']]]] = None):
"""
The set of arguments for constructing a Resolver resource.
:param pulumi.Input[str] resolver_id: The OCID of the target resolver.
:param pulumi.Input[str] scope: Value must be `PRIVATE` when creating private name resolvers.
:param pulumi.Input[Sequence[pulumi.Input['ResolverAttachedViewArgs']]] attached_views: (Updatable) The attached views. Views are evaluated in order.
:param pulumi.Input[str] compartment_id: (Updatable) The OCID of the owning compartment.
:param pulumi.Input[Mapping[str, Any]] defined_tags: (Updatable) Defined tags for this resource. Each key is predefined and scoped to a namespace. For more information, see [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm).
:param pulumi.Input[str] display_name: (Updatable) The display name of the resolver.
:param pulumi.Input[Mapping[str, Any]] freeform_tags: (Updatable) Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace. For more information, see [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm).
:param pulumi.Input[Sequence[pulumi.Input['ResolverRuleArgs']]] rules: (Updatable) Rules for the resolver. Rules are evaluated in order.
"""
pulumi.set(__self__, "resolver_id", resolver_id)
pulumi.set(__self__, "scope", scope)
if attached_views is not None:
pulumi.set(__self__, "attached_views", attached_views)
if compartment_id is not None:
pulumi.set(__self__, "compartment_id", compartment_id)
if defined_tags is not None:
pulumi.set(__self__, "defined_tags", defined_tags)
if display_name is not None:
pulumi.set(__self__, "display_name", display_name)
if freeform_tags is not None:
pulumi.set(__self__, "freeform_tags", freeform_tags)
if rules is not None:
pulumi.set(__self__, "rules", rules)
@property
@pulumi.getter(name="resolverId")
def resolver_id(self) -> pulumi.Input[str]:
"""
The OCID of the target resolver.
"""
return pulumi.get(self, "resolver_id")
@resolver_id.setter
def resolver_id(self, value: pulumi.Input[str]):
pulumi.set(self, "resolver_id", value)
@property
@pulumi.getter
def scope(self) -> pulumi.Input[str]:
"""
Value must be `PRIVATE` when creating private name resolvers.
"""
return pulumi.get(self, "scope")
@scope.setter
def scope(self, value: pulumi.Input[str]):
pulumi.set(self, "scope", value)
@property
@pulumi.getter(name="attachedViews")
def attached_views(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ResolverAttachedViewArgs']]]]:
"""
(Updatable) The attached views. Views are evaluated in order.
"""
return pulumi.get(self, "attached_views")
@attached_views.setter
def attached_views(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ResolverAttachedViewArgs']]]]):
pulumi.set(self, "attached_views", value)
@property
@pulumi.getter(name="compartmentId")
def compartment_id(self) -> Optional[pulumi.Input[str]]:
"""
(Updatable) The OCID of the owning compartment.
"""
return pulumi.get(self, "compartment_id")
@compartment_id.setter
def compartment_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "compartment_id", value)
@property
@pulumi.getter(name="definedTags")
def defined_tags(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
(Updatable) Defined tags for this resource. Each key is predefined and scoped to a namespace. For more information, see [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm).
"""
return pulumi.get(self, "defined_tags")
@defined_tags.setter
def defined_tags(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "defined_tags", value)
@property
@pulumi.getter(name="displayName")
def display_name(self) -> Optional[pulumi.Input[str]]:
"""
(Updatable) The display name of the resolver.
"""
return pulumi.get(self, "display_name")
@display_name.setter
def display_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "display_name", value)
@property
@pulumi.getter(name="freeformTags")
def freeform_tags(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
(Updatable) Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace. For more information, see [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm).
"""
return pulumi.get(self, "freeform_tags")
@freeform_tags.setter
def freeform_tags(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "freeform_tags", value)
@property
@pulumi.getter
def rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ResolverRuleArgs']]]]:
"""
(Updatable) Rules for the resolver. Rules are evaluated in order.
"""
return pulumi.get(self, "rules")
@rules.setter
def rules(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ResolverRuleArgs']]]]):
pulumi.set(self, "rules", value)
@pulumi.input_type
class _ResolverState:
def __init__(__self__, *,
attached_vcn_id: Optional[pulumi.Input[str]] = None,
attached_views: Optional[pulumi.Input[Sequence[pulumi.Input['ResolverAttachedViewArgs']]]] = None,
compartment_id: Optional[pulumi.Input[str]] = None,
default_view_id: Optional[pulumi.Input[str]] = None,
defined_tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
display_name: Optional[pulumi.Input[str]] = None,
endpoints: Optional[pulumi.Input[Sequence[pulumi.Input['ResolverEndpointArgs']]]] = None,
freeform_tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
is_protected: Optional[pulumi.Input[bool]] = None,
resolver_id: Optional[pulumi.Input[str]] = None,
rules: Optional[pulumi.Input[Sequence[pulumi.Input['ResolverRuleArgs']]]] = None,
scope: Optional[pulumi.Input[str]] = None,
self: Optional[pulumi.Input[str]] = None,
state: Optional[pulumi.Input[str]] = None,
time_created: Optional[pulumi.Input[str]] = None,
time_updated: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering Resolver resources.
:param pulumi.Input[str] attached_vcn_id: The OCID of the attached VCN.
:param pulumi.Input[Sequence[pulumi.Input['ResolverAttachedViewArgs']]] attached_views: (Updatable) The attached views. Views are evaluated in order.
:param pulumi.Input[str] compartment_id: (Updatable) The OCID of the owning compartment.
:param pulumi.Input[str] default_view_id: The OCID of the default view.
:param pulumi.Input[Mapping[str, Any]] defined_tags: (Updatable) Defined tags for this resource. Each key is predefined and scoped to a namespace. For more information, see [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm).
:param pulumi.Input[str] display_name: (Updatable) The display name of the resolver.
:param pulumi.Input[Sequence[pulumi.Input['ResolverEndpointArgs']]] endpoints: Read-only array of endpoints for the resolver.
:param pulumi.Input[Mapping[str, Any]] freeform_tags: (Updatable) Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace. For more information, see [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm).
:param pulumi.Input[bool] is_protected: A Boolean flag indicating whether or not parts of the resource are unable to be explicitly managed.
:param pulumi.Input[str] resolver_id: The OCID of the target resolver.
:param pulumi.Input[Sequence[pulumi.Input['ResolverRuleArgs']]] rules: (Updatable) Rules for the resolver. Rules are evaluated in order.
:param pulumi.Input[str] scope: Value must be `PRIVATE` when creating private name resolvers.
:param pulumi.Input[str] self: The canonical absolute URL of the resource.
:param pulumi.Input[str] state: The current state of the resource.
:param pulumi.Input[str] time_created: The date and time the resource was created in "YYYY-MM-ddThh:mm:ssZ" format with a Z offset, as defined by RFC 3339.
:param pulumi.Input[str] time_updated: The date and time the resource was last updated in "YYYY-MM-ddThh:mm:ssZ" format with a Z offset, as defined by RFC 3339.
"""
if attached_vcn_id is not None:
pulumi.set(__self__, "attached_vcn_id", attached_vcn_id)
if attached_views is not None:
pulumi.set(__self__, "attached_views", attached_views)
if compartment_id is not None:
pulumi.set(__self__, "compartment_id", compartment_id)
if default_view_id is not None:
pulumi.set(__self__, "default_view_id", default_view_id)
if defined_tags is not None:
pulumi.set(__self__, "defined_tags", defined_tags)
if display_name is not None:
pulumi.set(__self__, "display_name", display_name)
if endpoints is not None:
pulumi.set(__self__, "endpoints", endpoints)
if freeform_tags is not None:
pulumi.set(__self__, "freeform_tags", freeform_tags)
if is_protected is not None:
pulumi.set(__self__, "is_protected", is_protected)
if resolver_id is not None:
pulumi.set(__self__, "resolver_id", resolver_id)
if rules is not None:
pulumi.set(__self__, "rules", rules)
if scope is not None:
pulumi.set(__self__, "scope", scope)
if self is not None:
pulumi.set(__self__, "self", self)
if state is not None:
pulumi.set(__self__, "state", state)
if time_created is not None:
pulumi.set(__self__, "time_created", time_created)
if time_updated is not None:
pulumi.set(__self__, "time_updated", time_updated)
@property
@pulumi.getter(name="attachedVcnId")
def attached_vcn_id(self) -> Optional[pulumi.Input[str]]:
"""
The OCID of the attached VCN.
"""
return pulumi.get(self, "attached_vcn_id")
@attached_vcn_id.setter
def attached_vcn_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "attached_vcn_id", value)
@property
@pulumi.getter(name="attachedViews")
def attached_views(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ResolverAttachedViewArgs']]]]:
"""
(Updatable) The attached views. Views are evaluated in order.
"""
return pulumi.get(self, "attached_views")
@attached_views.setter
def attached_views(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ResolverAttachedViewArgs']]]]):
pulumi.set(self, "attached_views", value)
@property
@pulumi.getter(name="compartmentId")
def compartment_id(self) -> Optional[pulumi.Input[str]]:
"""
(Updatable) The OCID of the owning compartment.
"""
return pulumi.get(self, "compartment_id")
@compartment_id.setter
def compartment_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "compartment_id", value)
@property
@pulumi.getter(name="defaultViewId")
def default_view_id(self) -> Optional[pulumi.Input[str]]:
"""
The OCID of the default view.
"""
return pulumi.get(self, "default_view_id")
@default_view_id.setter
def default_view_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "default_view_id", value)
@property
@pulumi.getter(name="definedTags")
def defined_tags(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
(Updatable) Defined tags for this resource. Each key is predefined and scoped to a namespace. For more information, see [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm).
"""
return pulumi.get(self, "defined_tags")
@defined_tags.setter
def defined_tags(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "defined_tags", value)
@property
@pulumi.getter(name="displayName")
def display_name(self) -> Optional[pulumi.Input[str]]:
"""
(Updatable) The display name of the resolver.
"""
return pulumi.get(self, "display_name")
@display_name.setter
def display_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "display_name", value)
@property
@pulumi.getter
def endpoints(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ResolverEndpointArgs']]]]:
"""
Read-only array of endpoints for the resolver.
"""
return pulumi.get(self, "endpoints")
@endpoints.setter
def endpoints(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ResolverEndpointArgs']]]]):
pulumi.set(self, "endpoints", value)
@property
@pulumi.getter(name="freeformTags")
def freeform_tags(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
(Updatable) Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace. For more information, see [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm).
"""
return pulumi.get(self, "freeform_tags")
@freeform_tags.setter
def freeform_tags(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "freeform_tags", value)
@property
@pulumi.getter(name="isProtected")
def is_protected(self) -> Optional[pulumi.Input[bool]]:
"""
A Boolean flag indicating whether or not parts of the resource are unable to be explicitly managed.
"""
return pulumi.get(self, "is_protected")
@is_protected.setter
def is_protected(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_protected", value)
@property
@pulumi.getter(name="resolverId")
def resolver_id(self) -> Optional[pulumi.Input[str]]:
"""
The OCID of the target resolver.
"""
return pulumi.get(self, "resolver_id")
@resolver_id.setter
def resolver_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resolver_id", value)
@property
@pulumi.getter
def rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ResolverRuleArgs']]]]:
"""
(Updatable) Rules for the resolver. Rules are evaluated in order.
"""
return pulumi.get(self, "rules")
@rules.setter
def rules(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ResolverRuleArgs']]]]):
pulumi.set(self, "rules", value)
@property
@pulumi.getter
def scope(self) -> Optional[pulumi.Input[str]]:
"""
Value must be `PRIVATE` when creating private name resolvers.
"""
return pulumi.get(self, "scope")
@scope.setter
def scope(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "scope", value)
@property
@pulumi.getter
def self(self) -> Optional[pulumi.Input[str]]:
"""
The canonical absolute URL of the resource.
"""
return pulumi.get(self, "self")
@self.setter
def self(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "self", value)
@property
@pulumi.getter
def state(self) -> Optional[pulumi.Input[str]]:
"""
The current state of the resource.
"""
return pulumi.get(self, "state")
@state.setter
def state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "state", value)
@property
@pulumi.getter(name="timeCreated")
def time_created(self) -> Optional[pulumi.Input[str]]:
"""
The date and time the resource was created in "YYYY-MM-ddThh:mm:ssZ" format with a Z offset, as defined by RFC 3339.
"""
return pulumi.get(self, "time_created")
@time_created.setter
def time_created(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "time_created", value)
@property
@pulumi.getter(name="timeUpdated")
def time_updated(self) -> Optional[pulumi.Input[str]]:
"""
The date and time the resource was last updated in "YYYY-MM-ddThh:mm:ssZ" format with a Z offset, as defined by RFC 3339.
"""
return pulumi.get(self, "time_updated")
@time_updated.setter
def time_updated(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "time_updated", value)
class Resolver(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
attached_views: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ResolverAttachedViewArgs']]]]] = None,
compartment_id: Optional[pulumi.Input[str]] = None,
defined_tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
display_name: Optional[pulumi.Input[str]] = None,
freeform_tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
resolver_id: Optional[pulumi.Input[str]] = None,
rules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ResolverRuleArgs']]]]] = None,
scope: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
This resource provides the Resolver resource in Oracle Cloud Infrastructure DNS service.
Updates the specified resolver with your new information. Requires a `PRIVATE` scope query parameter.
Note: Resolvers are associated with VCNs and created when a VCN is created. Wait until created VCN's state shows as Available in OCI console before updating DNS resolver properties.
Also a VCN cannot be deleted while its resolver has resolver endpoints. Additionally a resolver endpoint cannot be deleted if it is referenced in the resolver's rules. To remove the rules from a resolver user needs to update the resolver resource. Since DNS Resolver gets deleted when VCN is deleted there is no support for Delete for DNS Resolver.
## Import
For legacy Resolvers that were created without using `scope`, these Resolvers can be imported using the `id`, e.g.
```sh
$ pulumi import oci:dns/resolver:Resolver test_resolver "id"
```
For Resolvers created using `scope`, these Resolvers can be imported using the `id`, e.g.
```sh
$ pulumi import oci:dns/resolver:Resolver test_resolver "resolverId/{resolverId}/scope/{scope}"
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ResolverAttachedViewArgs']]]] attached_views: (Updatable) The attached views. Views are evaluated in order.
:param pulumi.Input[str] compartment_id: (Updatable) The OCID of the owning compartment.
:param pulumi.Input[Mapping[str, Any]] defined_tags: (Updatable) Defined tags for this resource. Each key is predefined and scoped to a namespace. For more information, see [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm).
:param pulumi.Input[str] display_name: (Updatable) The display name of the resolver.
:param pulumi.Input[Mapping[str, Any]] freeform_tags: (Updatable) Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace. For more information, see [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm).
:param pulumi.Input[str] resolver_id: The OCID of the target resolver.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ResolverRuleArgs']]]] rules: (Updatable) Rules for the resolver. Rules are evaluated in order.
:param pulumi.Input[str] scope: Value must be `PRIVATE` when creating private name resolvers.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ResolverArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
This resource provides the Resolver resource in Oracle Cloud Infrastructure DNS service.
Updates the specified resolver with your new information. Requires a `PRIVATE` scope query parameter.
Note: Resolvers are associated with VCNs and created when a VCN is created. Wait until created VCN's state shows as Available in OCI console before updating DNS resolver properties.
Also a VCN cannot be deleted while its resolver has resolver endpoints. Additionally a resolver endpoint cannot be deleted if it is referenced in the resolver's rules. To remove the rules from a resolver user needs to update the resolver resource. Since DNS Resolver gets deleted when VCN is deleted there is no support for Delete for DNS Resolver.
## Import
For legacy Resolvers that were created without using `scope`, these Resolvers can be imported using the `id`, e.g.
```sh
$ pulumi import oci:dns/resolver:Resolver test_resolver "id"
```
For Resolvers created using `scope`, these Resolvers can be imported using the `id`, e.g.
```sh
$ pulumi import oci:dns/resolver:Resolver test_resolver "resolverId/{resolverId}/scope/{scope}"
```
:param str resource_name: The name of the resource.
:param ResolverArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ResolverArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
attached_views: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ResolverAttachedViewArgs']]]]] = None,
compartment_id: Optional[pulumi.Input[str]] = None,
defined_tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
display_name: Optional[pulumi.Input[str]] = None,
freeform_tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
resolver_id: Optional[pulumi.Input[str]] = None,
rules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ResolverRuleArgs']]]]] = None,
scope: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ResolverArgs.__new__(ResolverArgs)
__props__.__dict__["attached_views"] = attached_views
__props__.__dict__["compartment_id"] = compartment_id
__props__.__dict__["defined_tags"] = defined_tags
__props__.__dict__["display_name"] = display_name
__props__.__dict__["freeform_tags"] = freeform_tags
if resolver_id is None and not opts.urn:
raise TypeError("Missing required property 'resolver_id'")
__props__.__dict__["resolver_id"] = resolver_id
__props__.__dict__["rules"] = rules
if scope is None and not opts.urn:
raise TypeError("Missing required property 'scope'")
__props__.__dict__["scope"] = scope
__props__.__dict__["attached_vcn_id"] = None
__props__.__dict__["default_view_id"] = None
__props__.__dict__["endpoints"] = None
__props__.__dict__["is_protected"] = None
__props__.__dict__["self"] = None
__props__.__dict__["state"] = None
__props__.__dict__["time_created"] = None
__props__.__dict__["time_updated"] = None
super(Resolver, __self__).__init__(
'oci:dns/resolver:Resolver',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
attached_vcn_id: Optional[pulumi.Input[str]] = None,
attached_views: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ResolverAttachedViewArgs']]]]] = None,
compartment_id: Optional[pulumi.Input[str]] = None,
default_view_id: Optional[pulumi.Input[str]] = None,
defined_tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
display_name: Optional[pulumi.Input[str]] = None,
endpoints: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ResolverEndpointArgs']]]]] = None,
freeform_tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
is_protected: Optional[pulumi.Input[bool]] = None,
resolver_id: Optional[pulumi.Input[str]] = None,
rules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ResolverRuleArgs']]]]] = None,
scope: Optional[pulumi.Input[str]] = None,
self: Optional[pulumi.Input[str]] = None,
state: Optional[pulumi.Input[str]] = None,
time_created: Optional[pulumi.Input[str]] = None,
time_updated: Optional[pulumi.Input[str]] = None) -> 'Resolver':
"""
Get an existing Resolver resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] attached_vcn_id: The OCID of the attached VCN.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ResolverAttachedViewArgs']]]] attached_views: (Updatable) The attached views. Views are evaluated in order.
:param pulumi.Input[str] compartment_id: (Updatable) The OCID of the owning compartment.
:param pulumi.Input[str] default_view_id: The OCID of the default view.
:param pulumi.Input[Mapping[str, Any]] defined_tags: (Updatable) Defined tags for this resource. Each key is predefined and scoped to a namespace. For more information, see [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm).
:param pulumi.Input[str] display_name: (Updatable) The display name of the resolver.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ResolverEndpointArgs']]]] endpoints: Read-only array of endpoints for the resolver.
:param pulumi.Input[Mapping[str, Any]] freeform_tags: (Updatable) Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace. For more information, see [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm).
:param pulumi.Input[bool] is_protected: A Boolean flag indicating whether or not parts of the resource are unable to be explicitly managed.
:param pulumi.Input[str] resolver_id: The OCID of the target resolver.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ResolverRuleArgs']]]] rules: (Updatable) Rules for the resolver. Rules are evaluated in order.
:param pulumi.Input[str] scope: Value must be `PRIVATE` when creating private name resolvers.
:param pulumi.Input[str] self: The canonical absolute URL of the resource.
:param pulumi.Input[str] state: The current state of the resource.
:param pulumi.Input[str] time_created: The date and time the resource was created in "YYYY-MM-ddThh:mm:ssZ" format with a Z offset, as defined by RFC 3339.
:param pulumi.Input[str] time_updated: The date and time the resource was last updated in "YYYY-MM-ddThh:mm:ssZ" format with a Z offset, as defined by RFC 3339.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _ResolverState.__new__(_ResolverState)
__props__.__dict__["attached_vcn_id"] = attached_vcn_id
__props__.__dict__["attached_views"] = attached_views
__props__.__dict__["compartment_id"] = compartment_id
__props__.__dict__["default_view_id"] = default_view_id
__props__.__dict__["defined_tags"] = defined_tags
__props__.__dict__["display_name"] = display_name
__props__.__dict__["endpoints"] = endpoints
__props__.__dict__["freeform_tags"] = freeform_tags
__props__.__dict__["is_protected"] = is_protected
__props__.__dict__["resolver_id"] = resolver_id
__props__.__dict__["rules"] = rules
__props__.__dict__["scope"] = scope
__props__.__dict__["self"] = self
__props__.__dict__["state"] = state
__props__.__dict__["time_created"] = time_created
__props__.__dict__["time_updated"] = time_updated
return Resolver(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="attachedVcnId")
def attached_vcn_id(self) -> pulumi.Output[str]:
"""
The OCID of the attached VCN.
"""
return pulumi.get(self, "attached_vcn_id")
@property
@pulumi.getter(name="attachedViews")
def attached_views(self) -> pulumi.Output[Optional[Sequence['outputs.ResolverAttachedView']]]:
"""
(Updatable) The attached views. Views are evaluated in order.
"""
return pulumi.get(self, "attached_views")
@property
@pulumi.getter(name="compartmentId")
def compartment_id(self) -> pulumi.Output[str]:
"""
(Updatable) The OCID of the owning compartment.
"""
return pulumi.get(self, "compartment_id")
@property
@pulumi.getter(name="defaultViewId")
def default_view_id(self) -> pulumi.Output[str]:
"""
The OCID of the default view.
"""
return pulumi.get(self, "default_view_id")
@property
@pulumi.getter(name="definedTags")
def defined_tags(self) -> pulumi.Output[Mapping[str, Any]]:
"""
(Updatable) Defined tags for this resource. Each key is predefined and scoped to a namespace. For more information, see [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm).
"""
return pulumi.get(self, "defined_tags")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> pulumi.Output[str]:
"""
(Updatable) The display name of the resolver.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter
def endpoints(self) -> pulumi.Output[Sequence['outputs.ResolverEndpoint']]:
"""
Read-only array of endpoints for the resolver.
"""
return pulumi.get(self, "endpoints")
@property
@pulumi.getter(name="freeformTags")
def freeform_tags(self) -> pulumi.Output[Mapping[str, Any]]:
"""
(Updatable) Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace. For more information, see [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm).
"""
return pulumi.get(self, "freeform_tags")
@property
@pulumi.getter(name="isProtected")
def is_protected(self) -> pulumi.Output[bool]:
"""
A Boolean flag indicating whether or not parts of the resource are unable to be explicitly managed.
"""
return pulumi.get(self, "is_protected")
@property
@pulumi.getter(name="resolverId")
def resolver_id(self) -> pulumi.Output[str]:
"""
The OCID of the target resolver.
"""
return pulumi.get(self, "resolver_id")
@property
@pulumi.getter
def rules(self) -> pulumi.Output[Optional[Sequence['outputs.ResolverRule']]]:
"""
(Updatable) Rules for the resolver. Rules are evaluated in order.
"""
return pulumi.get(self, "rules")
@property
@pulumi.getter
def scope(self) -> pulumi.Output[str]:
"""
Value must be `PRIVATE` when creating private name resolvers.
"""
return pulumi.get(self, "scope")
@property
@pulumi.getter
def self(self) -> pulumi.Output[str]:
"""
The canonical absolute URL of the resource.
"""
return pulumi.get(self, "self")
@property
@pulumi.getter
def state(self) -> pulumi.Output[str]:
"""
The current state of the resource.
"""
return pulumi.get(self, "state")
@property
@pulumi.getter(name="timeCreated")
def time_created(self) -> pulumi.Output[str]:
"""
The date and time the resource was created in "YYYY-MM-ddThh:mm:ssZ" format with a Z offset, as defined by RFC 3339.
"""
return pulumi.get(self, "time_created")
@property
@pulumi.getter(name="timeUpdated")
def time_updated(self) -> pulumi.Output[str]:
"""
The date and time the resource was last updated in "YYYY-MM-ddThh:mm:ssZ" format with a Z offset, as defined by RFC 3339.
"""
return pulumi.get(self, "time_updated")
| 48.703903
| 356
| 0.663194
| 4,421
| 36,187
| 5.245646
| 0.061977
| 0.089173
| 0.08029
| 0.051227
| 0.901427
| 0.882282
| 0.854254
| 0.831271
| 0.825363
| 0.793325
| 0
| 0.001179
| 0.226767
| 36,187
| 742
| 357
| 48.769542
| 0.827633
| 0.365076
| 0
| 0.666667
| 1
| 0
| 0.112734
| 0.013763
| 0
| 0
| 0
| 0
| 0
| 1
| 0.165501
| false
| 0.002331
| 0.016317
| 0
| 0.284382
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
6fbc2e08b711842a22773ddba409819e00b48b92
| 1,257
|
py
|
Python
|
Helpers/SortFxts.py
|
whirledsol/PythonUtilities
|
1f6cc29fb99cc093416e4f3b72bc3835820bc5c5
|
[
"CC0-1.0"
] | null | null | null |
Helpers/SortFxts.py
|
whirledsol/PythonUtilities
|
1f6cc29fb99cc093416e4f3b72bc3835820bc5c5
|
[
"CC0-1.0"
] | null | null | null |
Helpers/SortFxts.py
|
whirledsol/PythonUtilities
|
1f6cc29fb99cc093416e4f3b72bc3835820bc5c5
|
[
"CC0-1.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 19 17:37:29 2013
@author: Will
"""
import Helpers.Models.SkyObjects
def sort_RA(list):
try: import operator
except ImportError:
#print("using lambda")
RAsorter= lambda x: (x.RA, x.DEC) # use a lambda if no operator module
else:
#print("using attrgetter")
RAsorter= operator.attrgetter("RA") # use operator since it's faster than lambda
list.sort(key=RAsorter, reverse=False)
return list
def sort_DEC(list):
try: import operator
except ImportError:
#print("using lambda")
RAsorter= lambda x: (x.DEC) # use a lambda if no operator module
else:
#print("using attrgetter")
RAsorter= operator.attrgetter("DEC") # use operator since it's faster than lambda
list.sort(key=RAsorter, reverse=False)
return list
def sort_Z(list):
try: import operator
except ImportError:
#print("using lambda")
RAsorter= lambda x: (x.z) # use a lambda if no operator module
else:
#print("using attrgetter")
RAsorter= operator.attrgetter("z","RA") # use operator since it's faster than lambda
list.sort(key=RAsorter, reverse=False)
return list
| 28.568182
| 92
| 0.633254
| 165
| 1,257
| 4.806061
| 0.29697
| 0.075662
| 0.04918
| 0.079445
| 0.878941
| 0.878941
| 0.878941
| 0.878941
| 0.878941
| 0.878941
| 0
| 0.013978
| 0.260143
| 1,257
| 44
| 93
| 28.568182
| 0.83871
| 0.354018
| 0
| 0.6
| 0
| 0
| 0.010088
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.12
| false
| 0
| 0.28
| 0
| 0.52
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 7
|
6fe3352a2828d2c7afd79232803a554eea6b0bbb
| 27,712
|
py
|
Python
|
ext/ANTsPyNet/antspynet/architectures/create_resunet_model.py
|
tsmonteiro/fmri_proc
|
ee740cfa3c3a7ef8e1ee1ebd3b286a66712e0ec1
|
[
"MIT"
] | 2
|
2021-11-16T10:00:33.000Z
|
2021-12-13T02:57:40.000Z
|
ext/ANTsPyNet/antspynet/architectures/create_resunet_model.py
|
tsmonteiro/fmri_proc
|
ee740cfa3c3a7ef8e1ee1ebd3b286a66712e0ec1
|
[
"MIT"
] | null | null | null |
ext/ANTsPyNet/antspynet/architectures/create_resunet_model.py
|
tsmonteiro/fmri_proc
|
ee740cfa3c3a7ef8e1ee1ebd3b286a66712e0ec1
|
[
"MIT"
] | 1
|
2021-12-13T02:57:27.000Z
|
2021-12-13T02:57:27.000Z
|
import tensorflow as tf
import tensorflow.keras.backend as K
from tensorflow.keras.models import Model
from tensorflow.keras.layers import (Input, Dropout, BatchNormalization, Add,
ThresholdedReLU, Concatenate, Dense,
Conv2D, Conv2DTranspose,
MaxPooling2D, UpSampling2D,
Conv3D, Conv3DTranspose,
MaxPooling3D, UpSampling3D)
from tensorflow.keras import initializers
from tensorflow.keras import regularizers
def create_resunet_model_2d(input_image_size,
number_of_outputs=1,
number_of_filters_at_base_layer=32,
bottle_neck_block_depth_schedule=(3, 4),
convolution_kernel_size=(3, 3),
deconvolution_kernel_size=(2, 2),
dropout_rate=0.0,
weight_decay=0.0,
mode='classification'
):
"""
2-D implementation of the Resnet + U-net deep learning architecture.
Creates a keras model of the U-net + ResNet deep learning architecture for
image segmentation and regression with the paper available here:
https://arxiv.org/abs/1608.04117
This particular implementation was ported from the following python
implementation:
https://github.com/veugene/fcn_maker/
Arguments
---------
input_image_size : tuple of length 3
Used for specifying the input tensor shape. The
shape (or dimension) of that tensor is the image dimensions followed by
the number of channels (e.g., red, green, and blue). The batch size
(i.e., number of training images) is not specified a priori.
number_of_outputs : integer
Meaning depends on the mode. For 'classification' this is the number of
segmentation labels. For 'regression' this is the number of outputs.
number_of_filters_at_base_layer : integer
Number of filters at the beginning and end of the 'U'. Doubles at each
descending/ascending layer.
bottle_neck_block_depth_schedule : tuple
Tuple that provides the encoding layer schedule for the number of bottleneck
blocks per long skip connection.
convolution_kernel_size : tuple of length 2
2-d vector defining the kernel size during the encoding path
deconvolution_kernel_size : tuple of length 2
2-d vector defining the kernel size during the decoding
dropout_rate : scalar
Float between 0 and 1 to use between dense layers.
weight_decay : scalar
Weighting parameter for L2 regularization of the kernel weights of the
convolution layers. Default = 0.0.
mode : string
'classification' or 'regression'. Default = 'classification'.
Returns
-------
Keras model
A 2-D Keras model defining the network.
Example
-------
>>> model = create_resunet_model_2d((128, 128, 1))
>>> model.summary()
"""
def simple_block_2d(input, number_of_filters,
downsample=False, upsample=False,
convolution_kernel_size=(3, 3),
deconvolution_kernel_size=(2, 2),
weight_decay=0.0, dropout_rate=0.0):
number_of_output_filters = number_of_filters
output = BatchNormalization()(input)
output = ThresholdedReLU(theta = 0)(output)
if downsample:
output = MaxPooling2D(pool_size=(2, 2))(output)
output = Conv2D(filters=number_of_filters,
kernel_size=convolution_kernel_size,
padding='same',
kernel_regularizer=regularizers.l2(weight_decay))(output)
if upsample:
output = Conv2DTranspose(filters=number_of_filters,
kernel_size=deconvolution_kernel_size,
padding='same',
kernel_initializer=initializers.he_normal(),
kernel_regularizer=regularizers.l2(weight_decay))(output)
output = UpSampling2D(size=(2, 2))(output)
if dropout_rate > 0.0:
output=Dropout(rate=dropout_rate)(output)
# Modify the input so that it has the same size as the output
if downsample:
input = Conv2D(filters=number_of_output_filters,
kernel_size=(1, 1),
strides=(2, 2),
padding='same')(input)
elif upsample:
input = Conv2DTranspose(filters=number_of_output_filters,
kernel_size=(1, 1),
padding='same')(input)
input = UpSampling2D(size=(2, 2))(input)
elif number_of_filters != number_of_output_filters:
input = Conv2D(filters=number_of_output_filters,
kernel_size=(1, 1),
padding='same')(input)
output = skip_connection(input, output)
return(output)
def bottle_neck_block_2d(input, number_of_filters, downsample=False,
upsample=False, deconvolution_kernel_size=(2, 2),
weight_decay=0.0, dropout_rate=0.0):
output = input
number_of_output_filters = number_of_filters
if downsample:
output = BatchNormalization()(output)
output = ThresholdedReLU(theta = 0)(output)
output = Conv2D(filters=number_of_filters,
kernel_size=(1, 1),
strides=(2, 2),
kernel_initializer=initializers.he_normal(),
kernel_regularizer=regularizers.l2(weight_decay))(output)
output = BatchNormalization()(output)
output = ThresholdedReLU(theta = 0)(output)
output = Conv2D(filters=number_of_filters,
kernel_size=(1, 1),
kernel_initializer=initializers.he_normal(),
kernel_regularizer=regularizers.l2(weight_decay))(output)
output = BatchNormalization()(output)
output = ThresholdedReLU(theta = 0)(output)
if upsample:
output = Conv2DTranspose(filters=number_of_filters,
kernel_size=deconvolution_kernel_size,
padding='same',
kernel_initializer=initializers.he_normal(),
kernel_regularizer=regularizers.l2(weight_decay))(output)
output = UpSampling2D(size=(2, 2))(output)
output = Conv2D(filters=(number_of_filters * 4),
kernel_size=(1, 1),
kernel_initializer=initializers.he_normal(),
kernel_regularizer=regularizers.l2(weight_decay))(output)
number_of_output_filters = number_of_filters * 4
if dropout_rate > 0.0:
output=Dropout(rate=dropout_rate)(output)
# Modify the input so that it has the same size as the output
if downsample:
input = Conv2D(filters=number_of_output_filters,
kernel_size=(1, 1),
strides=(2, 2),
padding='same')(input)
elif upsample:
input = Conv2DTranspose(filters=number_of_output_filters,
kernel_size=(1, 1),
padding='same')(input)
input = UpSampling2D(size=(2, 2))(input)
elif number_of_filters != number_of_output_filters:
input = Conv2D(filters=number_of_output_filters,
kernel_size=(1, 1),
padding='valid')(input)
output = skip_connection(input, output)
return(output)
def skip_connection(source, target, merge_mode='sum'):
layer_list = [source, target]
output = None
if merge_mode == 'sum':
output = Add()(layer_list)
else:
channel_axis = 0
if K.image_data_format() == 'channels_last':
channel_axis = -1
output = Concatenate(axis=channel_axis)(layer_list)
return(output)
inputs = Input(shape = input_image_size)
encoding_layers_with_long_skip_connections = []
encoding_layer_count = 1
# Preprocessing layer
model = Conv2D(filters=number_of_filters_at_base_layer,
kernel_size=convolution_kernel_size,
activation='relu',
padding='same',
kernel_initializer=initializers.he_normal(),
kernel_regularizer=regularizers.l2(weight_decay))(inputs)
encoding_layers_with_long_skip_connections.append(model)
encoding_layer_count += 1
# Encoding initialization path
model = simple_block_2d(model, number_of_filters_at_base_layer, downsample=True,
convolution_kernel_size=convolution_kernel_size,
deconvolution_kernel_size=deconvolution_kernel_size,
weight_decay=weight_decay, dropout_rate=dropout_rate)
encoding_layers_with_long_skip_connections.append(model)
encoding_layer_count += 1
# Encoding main path
number_of_bottle_neck_layers = len(bottle_neck_block_depth_schedule)
for i in range(number_of_bottle_neck_layers):
number_of_filters = number_of_filters_at_base_layer * 2**i
for j in range(bottle_neck_block_depth_schedule[i]):
do_downsample = False
if j == 0:
do_downsample = True
else:
do_downsample = False
model = bottle_neck_block_2d(model, number_of_filters=number_of_filters,
downsample=do_downsample,
deconvolution_kernel_size=deconvolution_kernel_size,
weight_decay=weight_decay, dropout_rate=dropout_rate)
if j == (bottle_neck_block_depth_schedule[i] - 1):
encoding_layers_with_long_skip_connections.append(model)
encoding_layer_count += 1
encoding_layer_count -= 1
# Transition path
number_of_filters = number_of_filters_at_base_layer * 2**number_of_bottle_neck_layers
model = bottle_neck_block_2d(model, number_of_filters=number_of_filters,
downsample=True,
deconvolution_kernel_size=deconvolution_kernel_size,
weight_decay=weight_decay, dropout_rate=dropout_rate)
model = bottle_neck_block_2d(model, number_of_filters=number_of_filters,
upsample=True,
deconvolution_kernel_size=deconvolution_kernel_size,
weight_decay=weight_decay, dropout_rate=dropout_rate)
# Decoding main path
number_of_bottle_neck_layers = len(bottle_neck_block_depth_schedule)
for i in range(number_of_bottle_neck_layers):
number_of_filters = (number_of_filters_at_base_layer *
2**(number_of_bottle_neck_layers - i - 1))
for j in range(bottle_neck_block_depth_schedule[number_of_bottle_neck_layers - i - 1]):
do_upsample = False
if j == bottle_neck_block_depth_schedule[number_of_bottle_neck_layers - i - 1] - 1:
do_upsample = True
else:
do_upsample = False
model = bottle_neck_block_2d(model, number_of_filters=number_of_filters,
upsample=do_upsample,
deconvolution_kernel_size=deconvolution_kernel_size,
weight_decay=weight_decay, dropout_rate=dropout_rate)
if j == 0:
model = Conv2D(filters=(number_of_filters * 4),
kernel_size=(1, 1),
padding='same')(model)
model = skip_connection(encoding_layers_with_long_skip_connections[encoding_layer_count - 1], model)
encoding_layer_count -= 1
# Decoding initialization path
model = simple_block_2d(model, number_of_filters_at_base_layer, upsample=True,
convolution_kernel_size=convolution_kernel_size,
deconvolution_kernel_size=deconvolution_kernel_size,
weight_decay=weight_decay, dropout_rate=dropout_rate)
# Postprocessing layer
model = Conv2D(filters=number_of_filters_at_base_layer,
kernel_size=convolution_kernel_size,
activation='relu',
padding='same',
kernel_initializer=initializers.he_normal(),
kernel_regularizer=regularizers.l2(weight_decay))(model)
encoding_layer_count -= 1
model = skip_connection(encoding_layers_with_long_skip_connections[encoding_layer_count - 1], model)
model = BatchNormalization()(model)
model = ThresholdedReLU(theta = 0)(model)
convActivation = ''
if mode == 'classification':
convActivation = 'softmax'
elif mode == 'regression':
convActivation = 'linear'
else:
raise ValueError('mode must be either `classification` or `regression`.')
outputs = Conv2D(filters=number_of_outputs,
kernel_size=(1, 1),
activation = convActivation,
kernel_regularizer=regularizers.l2(weight_decay))(model)
resunet_model = Model(inputs=inputs, outputs=outputs)
return resunet_model
def create_resunet_model_3d(input_image_size,
number_of_outputs=1,
number_of_filters_at_base_layer=32,
bottle_neck_block_depth_schedule=(3, 4),
convolution_kernel_size=(3, 3, 3),
deconvolution_kernel_size=(2, 2, 2),
dropout_rate=0.0,
weight_decay=0.0,
mode='classification'
):
"""
3-D implementation of the Resnet + U-net deep learning architecture.
Creates a keras model of the U-net + ResNet deep learning architecture for
image segmentation and regression with the paper available here:
https://arxiv.org/abs/1608.04117
This particular implementation was ported from the following python
implementation:
https://github.com/veugene/fcn_maker/
Arguments
---------
input_image_size : tuple of length 4
Used for specifying the input tensor shape. The
shape (or dimension) of that tensor is the image dimensions followed by
the number of channels (e.g., red, green, and blue). The batch size
(i.e., number of training images) is not specified a priori.
number_of_outputs : integer
Meaning depends on the mode. For 'classification' this is the number of
segmentation labels. For 'regression' this is the number of outputs.
number_of_filters_at_base_layer : integer
Number of filters at the beginning and end of the 'U'. Doubles at each
descending/ascending layer.
bottle_neck_block_depth_schedule : tuple
Tuple that provides the encoding layer schedule for the number of bottleneck
blocks per long skip connection.
convolution_kernel_size : tuple of length 3
3-d vector defining the kernel size during the encoding path
deconvolution_kernel_size : tuple of length 3
3-d vector defining the kernel size during the decoding
dropout_rate : scalar
Float between 0 and 1 to use between dense layers.
weight_decay : scalar
Weighting parameter for L2 regularization of the kernel weights of the
convolution layers. Default = 0.0.
mode : string
'classification' or 'regression'. Default = 'classification'.
Returns
-------
Keras model
A 3-D Keras model defining the network.
Example
-------
>>> model = create_resunet_model_3d((128, 128, 128, 1))
>>> model.summary()
"""
def simple_block_3d(input, number_of_filters,
downsample=False, upsample=False,
convolution_kernel_size=(3, 3, 3),
deconvolution_kernel_size=(2, 2, 2),
weight_decay=0.0, dropout_rate=0.0):
number_of_output_filters = number_of_filters
output = BatchNormalization()(input)
output = ThresholdedReLU(theta = 0)(output)
if downsample:
output = MaxPooling3D(pool_size=(2, 2, 2))(output)
output = Conv3D(filters=number_of_filters,
kernel_size=convolution_kernel_size,
padding='same',
kernel_regularizer=regularizers.l2(weight_decay))(output)
if upsample:
output = Conv3DTranspose(filters=number_of_filters,
kernel_size=deconvolution_kernel_size,
padding='same',
kernel_initializer=initializers.he_normal(),
kernel_regularizer=regularizers.l2(weight_decay))(output)
output = UpSampling3D(size=(2, 2, 2))(output)
if dropout_rate > 0.0:
output=Dropout(rate=dropout_rate)(output)
# Modify the input so that it has the same size as the output
if downsample:
input = Conv3D(filters=number_of_output_filters,
kernel_size=(1, 1, 1),
strides=(2, 2, 2),
padding='same')(input)
elif upsample:
input = Conv3DTranspose(filters=number_of_output_filters,
kernel_size=(1, 1, 1),
padding='same')(input)
input = UpSampling3D(size=(2, 2, 2))(input)
elif number_of_filters != number_of_output_filters:
input = Conv3D(filters=number_of_output_filters,
kernel_size=(1, 1, 1),
padding='same')(input)
output = skip_connection(input, output)
return(output)
def bottle_neck_block_3d(input, number_of_filters, downsample=False,
upsample=False, deconvolution_kernel_size=(2, 2, 2),
weight_decay=0.0, dropout_rate=0.0):
output = input
number_of_output_filters = number_of_filters
if downsample:
output = BatchNormalization()(output)
output = ThresholdedReLU(theta = 0)(output)
output = Conv3D(filters=number_of_filters,
kernel_size=(1, 1, 1),
strides=(2, 2, 2),
kernel_initializer=initializers.he_normal(),
kernel_regularizer=regularizers.l2(weight_decay))(output)
output = BatchNormalization()(output)
output = ThresholdedReLU(theta = 0)(output)
output = Conv3D(filters=number_of_filters,
kernel_size=(1, 1, 1),
kernel_initializer=initializers.he_normal(),
kernel_regularizer=regularizers.l2(weight_decay))(output)
output = BatchNormalization()(output)
output = ThresholdedReLU(theta = 0)(output)
if upsample:
output = Conv3DTranspose(filters=number_of_filters,
kernel_size=deconvolution_kernel_size,
padding='same',
kernel_initializer=initializers.he_normal(),
kernel_regularizer=regularizers.l2(weight_decay))(output)
output = UpSampling3D(size=(2, 2, 2))(output)
output = Conv3D(filters=(number_of_filters * 4),
kernel_size=(1, 1, 1),
kernel_initializer=initializers.he_normal(),
kernel_regularizer=regularizers.l2(weight_decay))(output)
number_of_output_filters = number_of_filters * 4
if dropout_rate > 0.0:
output=Dropout(rate=dropout_rate)(output)
# Modify the input so that it has the same size as the output
if downsample:
input = Conv3D(filters=number_of_output_filters,
kernel_size=(1, 1, 1),
strides=(2, 2, 2),
padding='same')(input)
elif upsample:
input = Conv3DTranspose(filters=number_of_output_filters,
kernel_size=(1, 1, 1),
padding='same')(input)
input = UpSampling3D(size=(2, 2, 2))(input)
elif number_of_filters != number_of_output_filters:
input = Conv3D(filters=number_of_output_filters,
kernel_size=(1, 1, 1),
padding='valid')(input)
output = skip_connection(input, output)
return(output)
def skip_connection(source, target, merge_mode='sum'):
layer_list = [source, target]
output = None
if merge_mode == 'sum':
output = Add()(layer_list)
else:
channel_axis = 0
if K.image_data_format() == 'channels_last':
channel_axis = -1
output = Concatenate(axis=channel_axis)(layer_list)
return(output)
inputs = Input(shape = input_image_size)
encoding_layers_with_long_skip_connections = []
encoding_layer_count = 1
# Preprocessing layer
model = Conv3D(filters=number_of_filters_at_base_layer,
kernel_size=convolution_kernel_size,
activation='relu',
padding='same',
kernel_initializer=initializers.he_normal(),
kernel_regularizer=regularizers.l2(weight_decay))(inputs)
encoding_layers_with_long_skip_connections.append(model)
encoding_layer_count += 1
# Encoding initialization path
model = simple_block_3d(model, number_of_filters_at_base_layer, downsample=True,
convolution_kernel_size=convolution_kernel_size,
deconvolution_kernel_size=deconvolution_kernel_size,
weight_decay=weight_decay, dropout_rate=dropout_rate)
encoding_layers_with_long_skip_connections.append(model)
encoding_layer_count += 1
# Encoding main path
number_of_bottle_neck_layers = len(bottle_neck_block_depth_schedule)
for i in range(number_of_bottle_neck_layers):
number_of_filters = number_of_filters_at_base_layer * 2**i
for j in range(bottle_neck_block_depth_schedule[i]):
do_downsample = False
if j == 0:
do_downsample = True
else:
do_downsample = False
model = bottle_neck_block_3d(model, number_of_filters=number_of_filters,
downsample=do_downsample,
deconvolution_kernel_size=deconvolution_kernel_size,
weight_decay=weight_decay, dropout_rate=dropout_rate)
if j == (bottle_neck_block_depth_schedule[i] - 1):
encoding_layers_with_long_skip_connections.append(model)
encoding_layer_count += 1
encoding_layer_count -= 1
# Transition path
number_of_filters = number_of_filters_at_base_layer * 2**number_of_bottle_neck_layers
model = bottle_neck_block_3d(model, number_of_filters=number_of_filters,
downsample=True,
deconvolution_kernel_size=deconvolution_kernel_size,
weight_decay=weight_decay, dropout_rate=dropout_rate)
model = bottle_neck_block_3d(model, number_of_filters=number_of_filters,
upsample=True,
deconvolution_kernel_size=deconvolution_kernel_size,
weight_decay=weight_decay, dropout_rate=dropout_rate)
# Decoding main path
number_of_bottle_neck_layers = len(bottle_neck_block_depth_schedule)
for i in range(number_of_bottle_neck_layers):
number_of_filters = (number_of_filters_at_base_layer *
2**(number_of_bottle_neck_layers - i - 1))
for j in range(bottle_neck_block_depth_schedule[number_of_bottle_neck_layers - i - 1]):
do_upsample = False
if j == bottle_neck_block_depth_schedule[number_of_bottle_neck_layers - i - 1] - 1:
do_upsample = True
else:
do_upsample = False
model = bottle_neck_block_3d(model, number_of_filters=number_of_filters,
upsample=do_upsample,
deconvolution_kernel_size=deconvolution_kernel_size,
weight_decay=weight_decay, dropout_rate=dropout_rate)
if j == 0:
model = Conv3D(filters=(number_of_filters * 4),
kernel_size=(1, 1, 1),
padding='same')(model)
model = skip_connection(encoding_layers_with_long_skip_connections[encoding_layer_count - 1], model)
encoding_layer_count -= 1
# Decoding initialization path
model = simple_block_3d(model, number_of_filters_at_base_layer, upsample=True,
convolution_kernel_size=convolution_kernel_size,
deconvolution_kernel_size=deconvolution_kernel_size,
weight_decay=weight_decay, dropout_rate=dropout_rate)
# Postprocessing layer
model = Conv3D(filters=number_of_filters_at_base_layer,
kernel_size=convolution_kernel_size,
activation='relu',
padding='same',
kernel_initializer=initializers.he_normal(),
kernel_regularizer=regularizers.l2(weight_decay))(model)
encoding_layer_count -= 1
model = skip_connection(encoding_layers_with_long_skip_connections[encoding_layer_count - 1], model)
model = BatchNormalization()(model)
model = ThresholdedReLU(theta = 0)(model)
convActivation = ''
if mode == 'classification':
convActivation = 'softmax'
elif mode == 'regression':
convActivation = 'linear'
else:
raise ValueError('mode must be either `classification` or `regression`.')
outputs = Conv3D(filters=number_of_outputs,
kernel_size=(1, 1, 1),
activation = convActivation,
kernel_regularizer=regularizers.l2(weight_decay))(model)
resunet_model = Model(inputs=inputs, outputs=outputs)
return resunet_model
| 40.27907
| 115
| 0.587074
| 2,901
| 27,712
| 5.299897
| 0.077215
| 0.06452
| 0.068293
| 0.054374
| 0.970862
| 0.970797
| 0.970797
| 0.970797
| 0.962472
| 0.962472
| 0
| 0.020145
| 0.342595
| 27,712
| 687
| 116
| 40.3377
| 0.823801
| 0.155348
| 0
| 0.924939
| 0
| 0
| 0.015632
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.01937
| false
| 0
| 0.014528
| 0
| 0.038741
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d208130aa75e864a9b7f50e78433df7f3f070892
| 126
|
py
|
Python
|
env/lib/python3.8/site-packages/plotly/dashboard_objs.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 11,750
|
2015-10-12T07:03:39.000Z
|
2022-03-31T20:43:15.000Z
|
env/lib/python3.8/site-packages/plotly/dashboard_objs.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 2,951
|
2015-10-12T00:41:25.000Z
|
2022-03-31T22:19:26.000Z
|
env/lib/python3.8/site-packages/plotly/dashboard_objs.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 2,623
|
2015-10-15T14:40:27.000Z
|
2022-03-28T16:05:50.000Z
|
from __future__ import absolute_import
from _plotly_future_ import _chart_studio_error
_chart_studio_error("dashboard_objs")
| 25.2
| 47
| 0.888889
| 17
| 126
| 5.705882
| 0.588235
| 0.247423
| 0.329897
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.079365
| 126
| 4
| 48
| 31.5
| 0.836207
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
d271fde1f059ddf73f9cf07bd0a8977212de8f1b
| 17,685
|
py
|
Python
|
volleyball/__init__.py
|
agucova/cs42
|
83640ee6b8a38437563e476a3bfb3805e54930b8
|
[
"MIT"
] | null | null | null |
volleyball/__init__.py
|
agucova/cs42
|
83640ee6b8a38437563e476a3bfb3805e54930b8
|
[
"MIT"
] | null | null | null |
volleyball/__init__.py
|
agucova/cs42
|
83640ee6b8a38437563e476a3bfb3805e54930b8
|
[
"MIT"
] | null | null | null |
import check50
@check50.check()
def partida_0():
"""partida_0"""
check50.run("python3 volleyball.py").stdin("A\nB\nA\nA\nA\nB\nB\nA\nA\nA", prompt=False).stdout("EMPIEZA\nSACA A\nGANA A\nA 1 B 0\nSACA A\nGANA B\nA 1 B 0\nSACA B\nGANA A\nA 1 B 0\nSACA A\nGANA A\nA 2 B 0\nSACA A\nGANA A\nA 3 B 0\nSACA A\nGANA B\nA 3 B 0\nSACA B\nGANA B\nA 3 B 1\nSACA B\nGANA A\nA 3 B 1\nSACA A\nGANA A\nA 4 B 1\nSACA A\nGANA A\nA 5 B 1\nFINAL", regex=False).exit(0)
@check50.check()
def partida_1():
"""partida_1"""
check50.run("python3 volleyball.py").stdin("A\nA\nA\nA\nA", prompt=False).stdout("EMPIEZA\nSACA A\nGANA A\nA 1 B 0\nSACA A\nGANA A\nA 2 B 0\nSACA A\nGANA A\nA 3 B 0\nSACA A\nGANA A\nA 4 B 0\nSACA A\nGANA A\nA 5 B 0\nFINAL", regex=False).exit(0)
@check50.check()
def partida_2():
"""partida_2"""
check50.run("python3 volleyball.py").stdin("A\nB\nB\nB\nB\nB\nA\nA\nA\nA\nA\nA", prompt=False).stdout("EMPIEZA\nSACA A\nGANA A\nA 1 B 0\nSACA A\nGANA B\nA 1 B 0\nSACA B\nGANA B\nA 1 B 1\nSACA B\nGANA B\nA 1 B 2\nSACA B\nGANA B\nA 1 B 3\nSACA B\nGANA B\nA 1 B 4\nSACA B\nGANA A\nA 1 B 4\nSACA A\nGANA A\nA 2 B 4\nSACA A\nGANA A\nA 3 B 4\nSACA A\nGANA A\nA 4 B 4\nSACA A\nGANA A\nA 5 B 4\nSACA A\nGANA A\nA 6 B 4\nFINAL", regex=False).exit(0)
@check50.check()
def partida_3():
"""partida_3"""
check50.run("python3 volleyball.py").stdin("A\nA\nA\nA\nB\nB\nB\nB\nB\nB\nA\nB\nA\nA\nA\nB\nA\nA", prompt=False).stdout("EMPIEZA\nSACA A\nGANA A\nA 1 B 0\nSACA A\nGANA A\nA 2 B 0\nSACA A\nGANA A\nA 3 B 0\nSACA A\nGANA A\nA 4 B 0\nSACA A\nGANA B\nA 4 B 0\nSACA B\nGANA B\nA 4 B 1\nSACA B\nGANA B\nA 4 B 2\nSACA B\nGANA B\nA 4 B 3\nSACA B\nGANA B\nA 4 B 4\nSACA B\nGANA B\nA 4 B 5\nSACA B\nGANA A\nA 4 B 5\nSACA A\nGANA B\nA 4 B 5\nSACA B\nGANA A\nA 4 B 5\nSACA A\nGANA A\nA 5 B 5\nSACA A\nGANA A\nA 6 B 5\nSACA A\nGANA B\nA 6 B 5\nSACA B\nGANA A\nA 6 B 5\nSACA A\nGANA A\nA 7 B 5\nFINAL", regex=False).exit(0)
@check50.check()
def partida_4():
"""partida_4"""
check50.run("python3 volleyball.py").stdin("B\nA\nB\nB\nA\nA\nA\nA\nA\nB\nB\nB\nB\nB\nB", prompt=False).stdout("EMPIEZA\nSACA A\nGANA B\nA 0 B 0\nSACA B\nGANA A\nA 0 B 0\nSACA A\nGANA B\nA 0 B 0\nSACA B\nGANA B\nA 0 B 1\nSACA B\nGANA A\nA 0 B 1\nSACA A\nGANA A\nA 1 B 1\nSACA A\nGANA A\nA 2 B 1\nSACA A\nGANA A\nA 3 B 1\nSACA A\nGANA A\nA 4 B 1\nSACA A\nGANA B\nA 4 B 1\nSACA B\nGANA B\nA 4 B 2\nSACA B\nGANA B\nA 4 B 3\nSACA B\nGANA B\nA 4 B 4\nSACA B\nGANA B\nA 4 B 5\nSACA B\nGANA B\nA 4 B 6\nFINAL", regex=False).exit(0)
@check50.check()
def partida_5():
"""partida_5"""
check50.run("python3 volleyball.py").stdin("B\nB\nA\nA\nB\nA\nB\nB\nA\nB\nA\nB\nA\nA\nB\nB\nA\nB\nB\nB", prompt=False).stdout("EMPIEZA\nSACA A\nGANA B\nA 0 B 0\nSACA B\nGANA B\nA 0 B 1\nSACA B\nGANA A\nA 0 B 1\nSACA A\nGANA A\nA 1 B 1\nSACA A\nGANA B\nA 1 B 1\nSACA B\nGANA A\nA 1 B 1\nSACA A\nGANA B\nA 1 B 1\nSACA B\nGANA B\nA 1 B 2\nSACA B\nGANA A\nA 1 B 2\nSACA A\nGANA B\nA 1 B 2\nSACA B\nGANA A\nA 1 B 2\nSACA A\nGANA B\nA 1 B 2\nSACA B\nGANA A\nA 1 B 2\nSACA A\nGANA A\nA 2 B 2\nSACA A\nGANA B\nA 2 B 2\nSACA B\nGANA B\nA 2 B 3\nSACA B\nGANA A\nA 2 B 3\nSACA A\nGANA B\nA 2 B 3\nSACA B\nGANA B\nA 2 B 4\nSACA B\nGANA B\nA 2 B 5\nFINAL", regex=False).exit(0)
@check50.check()
def partida_6():
"""partida_6"""
check50.run("python3 volleyball.py").stdin("A\nA\nB\nB\nA\nA\nB\nA\nA\nB\nA\nA", prompt=False).stdout("EMPIEZA\nSACA A\nGANA A\nA 1 B 0\nSACA A\nGANA A\nA 2 B 0\nSACA A\nGANA B\nA 2 B 0\nSACA B\nGANA B\nA 2 B 1\nSACA B\nGANA A\nA 2 B 1\nSACA A\nGANA A\nA 3 B 1\nSACA A\nGANA B\nA 3 B 1\nSACA B\nGANA A\nA 3 B 1\nSACA A\nGANA A\nA 4 B 1\nSACA A\nGANA B\nA 4 B 1\nSACA B\nGANA A\nA 4 B 1\nSACA A\nGANA A\nA 5 B 1\nFINAL", regex=False).exit(0)
@check50.check()
def partida_7():
"""partida_7"""
check50.run("python3 volleyball.py").stdin("A\nA\nA\nB\nA\nB\nA\nA\nB\nA\nA", prompt=False).stdout("EMPIEZA\nSACA A\nGANA A\nA 1 B 0\nSACA A\nGANA A\nA 2 B 0\nSACA A\nGANA A\nA 3 B 0\nSACA A\nGANA B\nA 3 B 0\nSACA B\nGANA A\nA 3 B 0\nSACA A\nGANA B\nA 3 B 0\nSACA B\nGANA A\nA 3 B 0\nSACA A\nGANA A\nA 4 B 0\nSACA A\nGANA B\nA 4 B 0\nSACA B\nGANA A\nA 4 B 0\nSACA A\nGANA A\nA 5 B 0\nFINAL", regex=False).exit(0)
@check50.check()
def partida_8():
"""partida_8"""
check50.run("python3 volleyball.py").stdin("B\nB\nB\nA\nA\nB\nB\nA\nB\nB\nB", prompt=False).stdout("EMPIEZA\nSACA A\nGANA B\nA 0 B 0\nSACA B\nGANA B\nA 0 B 1\nSACA B\nGANA B\nA 0 B 2\nSACA B\nGANA A\nA 0 B 2\nSACA A\nGANA A\nA 1 B 2\nSACA A\nGANA B\nA 1 B 2\nSACA B\nGANA B\nA 1 B 3\nSACA B\nGANA A\nA 1 B 3\nSACA A\nGANA B\nA 1 B 3\nSACA B\nGANA B\nA 1 B 4\nSACA B\nGANA B\nA 1 B 5\nFINAL", regex=False).exit(0)
@check50.check()
def partida_9():
"""partida_9"""
check50.run("python3 volleyball.py").stdin("B\nB\nA\nB\nB\nB\nB\nA\nA\nA\nB\nB", prompt=False).stdout("EMPIEZA\nSACA A\nGANA B\nA 0 B 0\nSACA B\nGANA B\nA 0 B 1\nSACA B\nGANA A\nA 0 B 1\nSACA A\nGANA B\nA 0 B 1\nSACA B\nGANA B\nA 0 B 2\nSACA B\nGANA B\nA 0 B 3\nSACA B\nGANA B\nA 0 B 4\nSACA B\nGANA A\nA 0 B 4\nSACA A\nGANA A\nA 1 B 4\nSACA A\nGANA A\nA 2 B 4\nSACA A\nGANA B\nA 2 B 4\nSACA B\nGANA B\nA 2 B 5\nFINAL", regex=False).exit(0)
@check50.check()
def partida_10():
"""partida_10"""
check50.run("python3 volleyball.py").stdin("A\nB\nB\nB\nB\nB\nA\nA\nA\nA\nB\nB\nB", prompt=False).stdout("EMPIEZA\nSACA A\nGANA A\nA 1 B 0\nSACA A\nGANA B\nA 1 B 0\nSACA B\nGANA B\nA 1 B 1\nSACA B\nGANA B\nA 1 B 2\nSACA B\nGANA B\nA 1 B 3\nSACA B\nGANA B\nA 1 B 4\nSACA B\nGANA A\nA 1 B 4\nSACA A\nGANA A\nA 2 B 4\nSACA A\nGANA A\nA 3 B 4\nSACA A\nGANA A\nA 4 B 4\nSACA A\nGANA B\nA 4 B 4\nSACA B\nGANA B\nA 4 B 5\nSACA B\nGANA B\nA 4 B 6\nFINAL", regex=False).exit(0)
@check50.check()
def partida_11():
"""partida_11"""
check50.run("python3 volleyball.py").stdin("A\nA\nA\nA\nB\nB\nB\nB\nB\nB\nA\nA\nA\nB\nB\nB\nA\nA\nA\nA", prompt=False).stdout("EMPIEZA\nSACA A\nGANA A\nA 1 B 0\nSACA A\nGANA A\nA 2 B 0\nSACA A\nGANA A\nA 3 B 0\nSACA A\nGANA A\nA 4 B 0\nSACA A\nGANA B\nA 4 B 0\nSACA B\nGANA B\nA 4 B 1\nSACA B\nGANA B\nA 4 B 2\nSACA B\nGANA B\nA 4 B 3\nSACA B\nGANA B\nA 4 B 4\nSACA B\nGANA B\nA 4 B 5\nSACA B\nGANA A\nA 4 B 5\nSACA A\nGANA A\nA 5 B 5\nSACA A\nGANA A\nA 6 B 5\nSACA A\nGANA B\nA 6 B 5\nSACA B\nGANA B\nA 6 B 6\nSACA B\nGANA B\nA 6 B 7\nSACA B\nGANA A\nA 6 B 7\nSACA A\nGANA A\nA 7 B 7\nSACA A\nGANA A\nA 8 B 7\nSACA A\nGANA A\nA 9 B 7\nFINAL", regex=False).exit(0)
@check50.check()
def partida_12():
"""partida_12"""
check50.run("python3 volleyball.py").stdin("B\nB\nB\nB\nB\nB", prompt=False).stdout("EMPIEZA\nSACA A\nGANA B\nA 0 B 0\nSACA B\nGANA B\nA 0 B 1\nSACA B\nGANA B\nA 0 B 2\nSACA B\nGANA B\nA 0 B 3\nSACA B\nGANA B\nA 0 B 4\nSACA B\nGANA B\nA 0 B 5\nFINAL", regex=False).exit(0)
@check50.check()
def partida_13():
"""partida_13"""
check50.run("python3 volleyball.py").stdin("B\nA\nB\nA\nA\nA\nA\nB\nA\nA\nB\nA\nB\nB\nA\nA", prompt=False).stdout("EMPIEZA\nSACA A\nGANA B\nA 0 B 0\nSACA B\nGANA A\nA 0 B 0\nSACA A\nGANA B\nA 0 B 0\nSACA B\nGANA A\nA 0 B 0\nSACA A\nGANA A\nA 1 B 0\nSACA A\nGANA A\nA 2 B 0\nSACA A\nGANA A\nA 3 B 0\nSACA A\nGANA B\nA 3 B 0\nSACA B\nGANA A\nA 3 B 0\nSACA A\nGANA A\nA 4 B 0\nSACA A\nGANA B\nA 4 B 0\nSACA B\nGANA A\nA 4 B 0\nSACA A\nGANA B\nA 4 B 0\nSACA B\nGANA B\nA 4 B 1\nSACA B\nGANA A\nA 4 B 1\nSACA A\nGANA A\nA 5 B 1\nFINAL", regex=False).exit(0)
@check50.check()
def partida_14():
"""partida_14"""
check50.run("python3 volleyball.py").stdin("A\nB\nB\nA\nA\nA\nB\nA\nB\nB\nA\nB\nB\nB\nA\nB\nA\nB\nA\nA\nB\nA\nA\nA", prompt=False).stdout("EMPIEZA\nSACA A\nGANA A\nA 1 B 0\nSACA A\nGANA B\nA 1 B 0\nSACA B\nGANA B\nA 1 B 1\nSACA B\nGANA A\nA 1 B 1\nSACA A\nGANA A\nA 2 B 1\nSACA A\nGANA A\nA 3 B 1\nSACA A\nGANA B\nA 3 B 1\nSACA B\nGANA A\nA 3 B 1\nSACA A\nGANA B\nA 3 B 1\nSACA B\nGANA B\nA 3 B 2\nSACA B\nGANA A\nA 3 B 2\nSACA A\nGANA B\nA 3 B 2\nSACA B\nGANA B\nA 3 B 3\nSACA B\nGANA B\nA 3 B 4\nSACA B\nGANA A\nA 3 B 4\nSACA A\nGANA B\nA 3 B 4\nSACA B\nGANA A\nA 3 B 4\nSACA A\nGANA B\nA 3 B 4\nSACA B\nGANA A\nA 3 B 4\nSACA A\nGANA A\nA 4 B 4\nSACA A\nGANA B\nA 4 B 4\nSACA B\nGANA A\nA 4 B 4\nSACA A\nGANA A\nA 5 B 4\nSACA A\nGANA A\nA 6 B 4\nFINAL", regex=False).exit(0)
@check50.check()
def partida_15():
"""partida_15"""
check50.run("python3 volleyball.py").stdin("B\nB\nB\nA\nB\nA\nB\nA\nA\nB\nA\nA\nA\nA\nA", prompt=False).stdout("EMPIEZA\nSACA A\nGANA B\nA 0 B 0\nSACA B\nGANA B\nA 0 B 1\nSACA B\nGANA B\nA 0 B 2\nSACA B\nGANA A\nA 0 B 2\nSACA A\nGANA B\nA 0 B 2\nSACA B\nGANA A\nA 0 B 2\nSACA A\nGANA B\nA 0 B 2\nSACA B\nGANA A\nA 0 B 2\nSACA A\nGANA A\nA 1 B 2\nSACA A\nGANA B\nA 1 B 2\nSACA B\nGANA A\nA 1 B 2\nSACA A\nGANA A\nA 2 B 2\nSACA A\nGANA A\nA 3 B 2\nSACA A\nGANA A\nA 4 B 2\nSACA A\nGANA A\nA 5 B 2\nFINAL", regex=False).exit(0)
@check50.check()
def partida_16():
"""partida_16"""
check50.run("python3 volleyball.py").stdin("A\nB\nB\nB\nB\nB\nB", prompt=False).stdout("EMPIEZA\nSACA A\nGANA A\nA 1 B 0\nSACA A\nGANA B\nA 1 B 0\nSACA B\nGANA B\nA 1 B 1\nSACA B\nGANA B\nA 1 B 2\nSACA B\nGANA B\nA 1 B 3\nSACA B\nGANA B\nA 1 B 4\nSACA B\nGANA B\nA 1 B 5\nFINAL", regex=False).exit(0)
@check50.check()
def partida_17():
"""partida_17"""
check50.run("python3 volleyball.py").stdin("A\nA\nA\nB\nB\nA\nB\nA\nB\nB\nB\nA\nA\nA", prompt=False).stdout("EMPIEZA\nSACA A\nGANA A\nA 1 B 0\nSACA A\nGANA A\nA 2 B 0\nSACA A\nGANA A\nA 3 B 0\nSACA A\nGANA B\nA 3 B 0\nSACA B\nGANA B\nA 3 B 1\nSACA B\nGANA A\nA 3 B 1\nSACA A\nGANA B\nA 3 B 1\nSACA B\nGANA A\nA 3 B 1\nSACA A\nGANA B\nA 3 B 1\nSACA B\nGANA B\nA 3 B 2\nSACA B\nGANA B\nA 3 B 3\nSACA B\nGANA A\nA 3 B 3\nSACA A\nGANA A\nA 4 B 3\nSACA A\nGANA A\nA 5 B 3\nFINAL", regex=False).exit(0)
@check50.check()
def partida_18():
"""partida_18"""
check50.run("python3 volleyball.py").stdin("A\nB\nA\nB\nA\nB\nA\nA\nA\nA\nA", prompt=False).stdout("EMPIEZA\nSACA A\nGANA A\nA 1 B 0\nSACA A\nGANA B\nA 1 B 0\nSACA B\nGANA A\nA 1 B 0\nSACA A\nGANA B\nA 1 B 0\nSACA B\nGANA A\nA 1 B 0\nSACA A\nGANA B\nA 1 B 0\nSACA B\nGANA A\nA 1 B 0\nSACA A\nGANA A\nA 2 B 0\nSACA A\nGANA A\nA 3 B 0\nSACA A\nGANA A\nA 4 B 0\nSACA A\nGANA A\nA 5 B 0\nFINAL", regex=False).exit(0)
@check50.check()
def partida_19():
"""partida_19"""
check50.run("python3 volleyball.py").stdin("A\nA\nA\nB\nB\nB\nA\nB\nA\nA\nB\nA\nA", prompt=False).stdout("EMPIEZA\nSACA A\nGANA A\nA 1 B 0\nSACA A\nGANA A\nA 2 B 0\nSACA A\nGANA A\nA 3 B 0\nSACA A\nGANA B\nA 3 B 0\nSACA B\nGANA B\nA 3 B 1\nSACA B\nGANA B\nA 3 B 2\nSACA B\nGANA A\nA 3 B 2\nSACA A\nGANA B\nA 3 B 2\nSACA B\nGANA A\nA 3 B 2\nSACA A\nGANA A\nA 4 B 2\nSACA A\nGANA B\nA 4 B 2\nSACA B\nGANA A\nA 4 B 2\nSACA A\nGANA A\nA 5 B 2\nFINAL", regex=False).exit(0)
@check50.check()
def partida_20():
"""partida_20"""
check50.run("python3 volleyball.py").stdin("B\nA\nB\nB\nA\nA\nB\nA\nA\nB\nA\nB\nA\nB\nA\nB\nA\nB\nB\nB\nB\nB", prompt=False).stdout("EMPIEZA\nSACA A\nGANA B\nA 0 B 0\nSACA B\nGANA A\nA 0 B 0\nSACA A\nGANA B\nA 0 B 0\nSACA B\nGANA B\nA 0 B 1\nSACA B\nGANA A\nA 0 B 1\nSACA A\nGANA A\nA 1 B 1\nSACA A\nGANA B\nA 1 B 1\nSACA B\nGANA A\nA 1 B 1\nSACA A\nGANA A\nA 2 B 1\nSACA A\nGANA B\nA 2 B 1\nSACA B\nGANA A\nA 2 B 1\nSACA A\nGANA B\nA 2 B 1\nSACA B\nGANA A\nA 2 B 1\nSACA A\nGANA B\nA 2 B 1\nSACA B\nGANA A\nA 2 B 1\nSACA A\nGANA B\nA 2 B 1\nSACA B\nGANA A\nA 2 B 1\nSACA A\nGANA B\nA 2 B 1\nSACA B\nGANA B\nA 2 B 2\nSACA B\nGANA B\nA 2 B 3\nSACA B\nGANA B\nA 2 B 4\nSACA B\nGANA B\nA 2 B 5\nFINAL", regex=False).exit(0)
@check50.check()
def partida_21():
"""partida_21"""
check50.run("python3 volleyball.py").stdin("B\nA\nA\nB\nA\nB\nA\nA\nB\nB\nA\nB\nA\nA\nA\nB\nA\nB\nA\nB\nA\nA", prompt=False).stdout("EMPIEZA\nSACA A\nGANA B\nA 0 B 0\nSACA B\nGANA A\nA 0 B 0\nSACA A\nGANA A\nA 1 B 0\nSACA A\nGANA B\nA 1 B 0\nSACA B\nGANA A\nA 1 B 0\nSACA A\nGANA B\nA 1 B 0\nSACA B\nGANA A\nA 1 B 0\nSACA A\nGANA A\nA 2 B 0\nSACA A\nGANA B\nA 2 B 0\nSACA B\nGANA B\nA 2 B 1\nSACA B\nGANA A\nA 2 B 1\nSACA A\nGANA B\nA 2 B 1\nSACA B\nGANA A\nA 2 B 1\nSACA A\nGANA A\nA 3 B 1\nSACA A\nGANA A\nA 4 B 1\nSACA A\nGANA B\nA 4 B 1\nSACA B\nGANA A\nA 4 B 1\nSACA A\nGANA B\nA 4 B 1\nSACA B\nGANA A\nA 4 B 1\nSACA A\nGANA B\nA 4 B 1\nSACA B\nGANA A\nA 4 B 1\nSACA A\nGANA A\nA 5 B 1\nFINAL", regex=False).exit(0)
@check50.check()
def partida_22():
"""partida_22"""
check50.run("python3 volleyball.py").stdin("B\nB\nB\nB\nB\nA\nA\nB\nA\nB\nA\nB\nB", prompt=False).stdout("EMPIEZA\nSACA A\nGANA B\nA 0 B 0\nSACA B\nGANA B\nA 0 B 1\nSACA B\nGANA B\nA 0 B 2\nSACA B\nGANA B\nA 0 B 3\nSACA B\nGANA B\nA 0 B 4\nSACA B\nGANA A\nA 0 B 4\nSACA A\nGANA A\nA 1 B 4\nSACA A\nGANA B\nA 1 B 4\nSACA B\nGANA A\nA 1 B 4\nSACA A\nGANA B\nA 1 B 4\nSACA B\nGANA A\nA 1 B 4\nSACA A\nGANA B\nA 1 B 4\nSACA B\nGANA B\nA 1 B 5\nFINAL", regex=False).exit(0)
@check50.check()
def partida_23():
"""partida_23"""
check50.run("python3 volleyball.py").stdin("A\nA\nA\nB\nA\nB\nB\nB\nB\nA\nB\nB\nA\nA\nA\nB\nA\nA", prompt=False).stdout("EMPIEZA\nSACA A\nGANA A\nA 1 B 0\nSACA A\nGANA A\nA 2 B 0\nSACA A\nGANA A\nA 3 B 0\nSACA A\nGANA B\nA 3 B 0\nSACA B\nGANA A\nA 3 B 0\nSACA A\nGANA B\nA 3 B 0\nSACA B\nGANA B\nA 3 B 1\nSACA B\nGANA B\nA 3 B 2\nSACA B\nGANA B\nA 3 B 3\nSACA B\nGANA A\nA 3 B 3\nSACA A\nGANA B\nA 3 B 3\nSACA B\nGANA B\nA 3 B 4\nSACA B\nGANA A\nA 3 B 4\nSACA A\nGANA A\nA 4 B 4\nSACA A\nGANA A\nA 5 B 4\nSACA A\nGANA B\nA 5 B 4\nSACA B\nGANA A\nA 5 B 4\nSACA A\nGANA A\nA 6 B 4\nFINAL", regex=False).exit(0)
@check50.check()
def partida_24():
"""partida_24"""
check50.run("python3 volleyball.py").stdin("B\nA\nB\nB\nB\nB\nB\nB", prompt=False).stdout("EMPIEZA\nSACA A\nGANA B\nA 0 B 0\nSACA B\nGANA A\nA 0 B 0\nSACA A\nGANA B\nA 0 B 0\nSACA B\nGANA B\nA 0 B 1\nSACA B\nGANA B\nA 0 B 2\nSACA B\nGANA B\nA 0 B 3\nSACA B\nGANA B\nA 0 B 4\nSACA B\nGANA B\nA 0 B 5\nFINAL", regex=False).exit(0)
@check50.check()
def partida_25():
"""partida_25"""
check50.run("python3 volleyball.py").stdin("A\nA\nA\nB\nB\nB\nA\nA\nB\nB\nB\nB\nA\nB\nB", prompt=False).stdout("EMPIEZA\nSACA A\nGANA A\nA 1 B 0\nSACA A\nGANA A\nA 2 B 0\nSACA A\nGANA A\nA 3 B 0\nSACA A\nGANA B\nA 3 B 0\nSACA B\nGANA B\nA 3 B 1\nSACA B\nGANA B\nA 3 B 2\nSACA B\nGANA A\nA 3 B 2\nSACA A\nGANA A\nA 4 B 2\nSACA A\nGANA B\nA 4 B 2\nSACA B\nGANA B\nA 4 B 3\nSACA B\nGANA B\nA 4 B 4\nSACA B\nGANA B\nA 4 B 5\nSACA B\nGANA A\nA 4 B 5\nSACA A\nGANA B\nA 4 B 5\nSACA B\nGANA B\nA 4 B 6\nFINAL", regex=False).exit(0)
@check50.check()
def partida_26():
"""partida_26"""
check50.run("python3 volleyball.py").stdin("B\nA\nB\nB\nB\nA\nA\nA\nB\nA\nB\nA\nA\nB\nB\nB\nA\nB\nB", prompt=False).stdout("EMPIEZA\nSACA A\nGANA B\nA 0 B 0\nSACA B\nGANA A\nA 0 B 0\nSACA A\nGANA B\nA 0 B 0\nSACA B\nGANA B\nA 0 B 1\nSACA B\nGANA B\nA 0 B 2\nSACA B\nGANA A\nA 0 B 2\nSACA A\nGANA A\nA 1 B 2\nSACA A\nGANA A\nA 2 B 2\nSACA A\nGANA B\nA 2 B 2\nSACA B\nGANA A\nA 2 B 2\nSACA A\nGANA B\nA 2 B 2\nSACA B\nGANA A\nA 2 B 2\nSACA A\nGANA A\nA 3 B 2\nSACA A\nGANA B\nA 3 B 2\nSACA B\nGANA B\nA 3 B 3\nSACA B\nGANA B\nA 3 B 4\nSACA B\nGANA A\nA 3 B 4\nSACA A\nGANA B\nA 3 B 4\nSACA B\nGANA B\nA 3 B 5\nFINAL", regex=False).exit(0)
@check50.check()
def partida_27():
"""partida_27"""
check50.run("python3 volleyball.py").stdin("A\nB\nB\nA\nA\nA\nB\nA\nB\nA\nA\nB\nB\nA\nB\nA\nA", prompt=False).stdout("EMPIEZA\nSACA A\nGANA A\nA 1 B 0\nSACA A\nGANA B\nA 1 B 0\nSACA B\nGANA B\nA 1 B 1\nSACA B\nGANA A\nA 1 B 1\nSACA A\nGANA A\nA 2 B 1\nSACA A\nGANA A\nA 3 B 1\nSACA A\nGANA B\nA 3 B 1\nSACA B\nGANA A\nA 3 B 1\nSACA A\nGANA B\nA 3 B 1\nSACA B\nGANA A\nA 3 B 1\nSACA A\nGANA A\nA 4 B 1\nSACA A\nGANA B\nA 4 B 1\nSACA B\nGANA B\nA 4 B 2\nSACA B\nGANA A\nA 4 B 2\nSACA A\nGANA B\nA 4 B 2\nSACA B\nGANA A\nA 4 B 2\nSACA A\nGANA A\nA 5 B 2\nFINAL", regex=False).exit(0)
@check50.check()
def partida_28():
"""partida_28"""
check50.run("python3 volleyball.py").stdin("A\nB\nA\nB\nB\nA\nB\nA\nB\nB\nB\nA\nB\nA\nA\nA\nA\nB\nA\nA", prompt=False).stdout("EMPIEZA\nSACA A\nGANA A\nA 1 B 0\nSACA A\nGANA B\nA 1 B 0\nSACA B\nGANA A\nA 1 B 0\nSACA A\nGANA B\nA 1 B 0\nSACA B\nGANA B\nA 1 B 1\nSACA B\nGANA A\nA 1 B 1\nSACA A\nGANA B\nA 1 B 1\nSACA B\nGANA A\nA 1 B 1\nSACA A\nGANA B\nA 1 B 1\nSACA B\nGANA B\nA 1 B 2\nSACA B\nGANA B\nA 1 B 3\nSACA B\nGANA A\nA 1 B 3\nSACA A\nGANA B\nA 1 B 3\nSACA B\nGANA A\nA 1 B 3\nSACA A\nGANA A\nA 2 B 3\nSACA A\nGANA A\nA 3 B 3\nSACA A\nGANA A\nA 4 B 3\nSACA A\nGANA B\nA 4 B 3\nSACA B\nGANA A\nA 4 B 3\nSACA A\nGANA A\nA 5 B 3\nFINAL", regex=False).exit(0)
@check50.check()
def partida_29():
"""partida_29"""
check50.run("python3 volleyball.py").stdin("B\nB\nA\nB\nA\nA\nA\nB\nA\nB\nA\nB\nB\nA\nB\nB\nA\nB\nA\nB\nB\nA\nA\nA\nA\nB\nB\nB\nA\nA\nA\nB\nB\nB\nB", prompt=False).stdout("EMPIEZA\nSACA A\nGANA B\nA 0 B 0\nSACA B\nGANA B\nA 0 B 1\nSACA B\nGANA A\nA 0 B 1\nSACA A\nGANA B\nA 0 B 1\nSACA B\nGANA A\nA 0 B 1\nSACA A\nGANA A\nA 1 B 1\nSACA A\nGANA A\nA 2 B 1\nSACA A\nGANA B\nA 2 B 1\nSACA B\nGANA A\nA 2 B 1\nSACA A\nGANA B\nA 2 B 1\nSACA B\nGANA A\nA 2 B 1\nSACA A\nGANA B\nA 2 B 1\nSACA B\nGANA B\nA 2 B 2\nSACA B\nGANA A\nA 2 B 2\nSACA A\nGANA B\nA 2 B 2\nSACA B\nGANA B\nA 2 B 3\nSACA B\nGANA A\nA 2 B 3\nSACA A\nGANA B\nA 2 B 3\nSACA B\nGANA A\nA 2 B 3\nSACA A\nGANA B\nA 2 B 3\nSACA B\nGANA B\nA 2 B 4\nSACA B\nGANA A\nA 2 B 4\nSACA A\nGANA A\nA 3 B 4\nSACA A\nGANA A\nA 4 B 4\nSACA A\nGANA A\nA 5 B 4\nSACA A\nGANA B\nA 5 B 4\nSACA B\nGANA B\nA 5 B 5\nSACA B\nGANA B\nA 5 B 6\nSACA B\nGANA A\nA 5 B 6\nSACA A\nGANA A\nA 6 B 6\nSACA A\nGANA A\nA 7 B 6\nSACA A\nGANA B\nA 7 B 6\nSACA B\nGANA B\nA 7 B 7\nSACA B\nGANA B\nA 7 B 8\nSACA B\nGANA B\nA 7 B 9\nFINAL", regex=False).exit()
| 117.119205
| 1,087
| 0.678993
| 4,747
| 17,685
| 2.516958
| 0.011165
| 0.120522
| 0.220957
| 0.125544
| 0.968949
| 0.967359
| 0.967024
| 0.948945
| 0.938735
| 0.914881
| 0
| 0.079777
| 0.157252
| 17,685
| 151
| 1,087
| 117.119205
| 0.721887
| 0.018038
| 0
| 0.32967
| 0
| 0.604396
| 0.795888
| 0.072839
| 0
| 0
| 0
| 0
| 0
| 1
| 0.32967
| true
| 0
| 0.010989
| 0
| 0.340659
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 13
|
96774631e85032457eb5a0fd2bbc4b78ca60deb2
| 195
|
py
|
Python
|
attention_generator/layers/__init__.py
|
wenhuchen/ethz-bootstrapped-captioner
|
ee68bbf2bd2f1ec4d2e4163c6ec794005a4a8f1c
|
[
"BSD-3-Clause"
] | 6
|
2016-12-05T21:27:30.000Z
|
2018-07-26T18:19:33.000Z
|
attention_generator/layers/__init__.py
|
wenhuchen/ethz-bootstrapped-captioner
|
ee68bbf2bd2f1ec4d2e4163c6ec794005a4a8f1c
|
[
"BSD-3-Clause"
] | 3
|
2016-12-22T07:50:29.000Z
|
2018-05-03T10:43:21.000Z
|
attention_generator/layers/__init__.py
|
wenhuchen/ethz-bootstrapped-captioner
|
ee68bbf2bd2f1ec4d2e4163c6ec794005a4a8f1c
|
[
"BSD-3-Clause"
] | 3
|
2017-07-23T12:50:43.000Z
|
2018-04-22T11:26:53.000Z
|
from lstm_cond_nox_layer import lstm_cond_nox_layer, param_init_lstm_cond_nox
from lstm_cond_layer import lstm_cond_layer, param_init_lstm_cond
from lstm_layer import lstm_layer, param_init_lstm
| 48.75
| 77
| 0.907692
| 36
| 195
| 4.333333
| 0.222222
| 0.307692
| 0.211538
| 0.346154
| 0.282051
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.076923
| 195
| 3
| 78
| 65
| 0.866667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
9679da78c5f9f20082522fa3ced12c562d7bbb9e
| 19,904
|
py
|
Python
|
testscripts/RDKB/component/TAD/TS_TAD_CheckCPUProcAnalyzer.py
|
rdkcmf/rdkb-tools-tdkb
|
9f9c3600cd701d5fc90ac86a6394ebd28d49267e
|
[
"Apache-2.0"
] | null | null | null |
testscripts/RDKB/component/TAD/TS_TAD_CheckCPUProcAnalyzer.py
|
rdkcmf/rdkb-tools-tdkb
|
9f9c3600cd701d5fc90ac86a6394ebd28d49267e
|
[
"Apache-2.0"
] | null | null | null |
testscripts/RDKB/component/TAD/TS_TAD_CheckCPUProcAnalyzer.py
|
rdkcmf/rdkb-tools-tdkb
|
9f9c3600cd701d5fc90ac86a6394ebd28d49267e
|
[
"Apache-2.0"
] | null | null | null |
##########################################################################
# If not stated otherwise in this file or this component's Licenses.txt
# file the following copyright and licenses apply:
#
# Copyright 2020 RDK Management
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##########################################################################
'''
<?xml version='1.0' encoding='utf-8'?>
<xml>
<id></id>
<!-- Do not edit id. This will be auto filled while exporting. If you are adding a new script keep the id empty -->
<version>4</version>
<!-- Do not edit version. This will be auto incremented while updating. If you are adding a new script you can keep the vresion as 1 -->
<name>TS_TAD_CheckCPUProcAnalyzer</name>
<!-- If you are adding a new script you can specify the script name. Script Name should be unique same as this file name with out .py extension -->
<primitive_test_id> </primitive_test_id>
<!-- Do not change primitive_test_id if you are editing an existing script. -->
<primitive_test_name>TADstub_Get</primitive_test_name>
<!-- -->
<primitive_test_version>3</primitive_test_version>
<!-- -->
<status>FREE</status>
<!-- -->
<synopsis>To check if cpuprocanalyzer process is running after enabling Device.SelfHeal.X_RDK_CPUProcAnalyzer_Enable</synopsis>
<!-- -->
<groups_id />
<!-- -->
<execution_time>20</execution_time>
<!-- -->
<long_duration>false</long_duration>
<!-- -->
<advanced_script>false</advanced_script>
<!-- execution_time is the time out time for test execution -->
<remarks></remarks>
<!-- Reason for skipping the tests if marked to skip -->
<skip>false</skip>
<!-- -->
<box_types>
<box_type>Broadband</box_type>
<!-- -->
</box_types>
<rdk_versions>
<rdk_version>RDKB</rdk_version>
<!-- -->
</rdk_versions>
<test_cases>
<test_case_id>TC_TAD_81</test_case_id>
<test_objective>To check if cpuprocanalyzer process is running after enabling Device.SelfHeal.X_RDK_CPUProcAnalyzer_Enable</test_objective>
<test_type>Positive</test_type>
<test_setup>Broadband</test_setup>
<pre_requisite>1.Ccsp Components in DUT should be in a running state that includes component under test Cable Modem
2.TDK Agent should be in running state or invoke it through StartTdk.sh script</pre_requisite>
<api_or_interface_used>TDKB_TR181Stub_Get
TDKB_TR181Stub_SetOnly
</api_or_interface_used>
<input_parameters>Device.SelfHeal.X_RDK_CPUProcAnalyzer_Enable</input_parameters>
<automation_approch>1.Load the module
2.Set the value to true "sysevent set UPLOAD_LOGS_VAL_DCM true" if it is false.
3.Trigger the process by enabling Device.SelfHeal.X_RDK_CPUProcAnalyzer_Enable
4.Check the process ps | grep -i /usr/bin/cpuprocanalyzer
5.Verify no Error messages are seen in /rdklogs/logs/CPUPROCANALYZERlog.txt.0
6.Unload the Module</automation_approch>
<expected_output>After enabling Device.SelfHeal.X_RDK_CPUProcAnalyzer_Enable cpuprocanalyzer process should be running</expected_output>
<priority>High</priority>
<test_stub_interface>TAD</test_stub_interface>
<test_script>TS_TAD_CheckCPUProcAnalyzer</test_script>
<skipped>No</skipped>
<release_version>M83</release_version>
<remarks>None</remarks>
</test_cases>
<script_tags />
</xml>
'''
# use tdklib library,which provides a wrapper for tdk testcase script
import tdklib;
from time import sleep;
#Test component to be tested
obj = tdklib.TDKScriptingLibrary("sysutil","1");
obj1= tdklib.TDKScriptingLibrary("tdkbtr181","1");
#IP and Port of box, No need to change,
#This will be replaced with corresponding DUT Ip and port while executing script
ip = <ipaddress>
port = <port>
obj.configureTestCase(ip,port,'TS_TAD_CheckCPUProcAnalyzer');
obj1.configureTestCase(ip,port,'TS_TAD_CheckCPUProcAnalyzer');
#Get the result of connection with test component and DUT
loadmodulestatus=obj.getLoadModuleResult();
loadmodulestatus1=obj1.getLoadModuleResult();
if "SUCCESS" in loadmodulestatus.upper() and "SUCCESS" in loadmodulestatus1.upper():
#Set the result status of execution
obj.setLoadModuleStatus("SUCCESS");
obj1.setLoadModuleStatus("SUCCESS");
tdkTestObj = obj1.createTestStep('TDKB_TR181Stub_Get');
tdkTestObj.addParameter("ParamName","Device.SelfHeal.X_RDK_CPUProcAnalyzer_Enable");
expectedresult="SUCCESS";
#Execute the test case in DUT
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
details = tdkTestObj.getResultDetails();
if expectedresult in actualresult :
#Set the result status of execution
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 1: Get the CPUProc Analyzer Enable status";
print "EXPECTED RESULT 1: Should get the CPUProc Analyzer Enable status";
print "ACTUAL RESULT 1: CPUProc Analyzer Enable status is:",details
#Get the result of execution
print "[TEST EXECUTION RESULT] : SUCCESS";
tdkTestObj = obj.createTestStep('ExecuteCmd');
cmd= "sysevent get UPLOAD_LOGS_VAL_DCM";
expectedresult="SUCCESS";
tdkTestObj.addParameter("command",cmd);
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
default = tdkTestObj.getResultDetails().strip().replace("\\n", "");
if expectedresult in actualresult and default !="":
#Set the result status of execution
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 2: Get the UPLOAD_LOGS_VAL_DCM value";
print "EXPECTED RESULT 2: Should get the UPLOAD_LOGS_VAL_DCM value";
print "ACTUAL RESULT 2: UPLOAD_LOGS_VAL_DCM status is:",default
#Get the result of execution
print "[TEST EXECUTION RESULT] : SUCCESS";
tdkTestObj = obj.createTestStep('ExecuteCmd');
cmd= "sysevent set UPLOAD_LOGS_VAL_DCM true";
expectedresult="SUCCESS";
tdkTestObj.addParameter("command",cmd);
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
details = tdkTestObj.getResultDetails().strip().replace("\\n", "");
if expectedresult in actualresult:
#Set the result status of execution
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 3: Set the UPLOAD_LOGS_VAL_DCM to true";
print "EXPECTED RESULT 3 : Should set the UPLOAD_LOGS_VAL_DCM value to true";
print "ACTUAL RESULT 3:",details
#Get the result of execution
print "[TEST EXECUTION RESULT] : SUCCESS";
tdkTestObj = obj.createTestStep('ExecuteCmd');
cmd= "sysevent get UPLOAD_LOGS_VAL_DCM";
expectedresult="SUCCESS";
tdkTestObj.addParameter("command",cmd);
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
details = tdkTestObj.getResultDetails().strip().replace("\\n", "");
if expectedresult in actualresult and details =="true":
#Set the result status of execution
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 4: Check if UPLOAD_LOGS_VAL_DCM is true";
print "EXPECTED RESULT 4: Should get the UPLOAD_LOGS_VAL_DCM as true";
print "ACTUAL RESULT 4: UPLOAD_LOGS_VAL_DCM status is:",details
#Get the result of execution
print "[TEST EXECUTION RESULT] : SUCCESS";
tdkTestObj = obj1.createTestStep('TDKB_TR181Stub_SetOnly');
tdkTestObj.addParameter("ParamName","Device.SelfHeal.X_RDK_CPUProcAnalyzer_Enable");
tdkTestObj.addParameter("ParamValue","true");
tdkTestObj.addParameter("Type","bool");
expectedresult="SUCCESS";
#Execute the test case in DUT
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
details = tdkTestObj.getResultDetails();
if expectedresult in actualresult:
#Set the result status of execution
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 5: Enable CPUProc Analyzer";
print "EXPECTED RESULT 5: Should enable CPUProc Analyzer";
print "ACTUAL RESULT 5:",details
#Get the result of execution
print "[TEST EXECUTION RESULT] : SUCCESS";
sleep(10);
tdkTestObj = obj.createTestStep('ExecuteCmd');
cmd= "pidof cpuprocanalyzer";
expectedresult="SUCCESS";
tdkTestObj.addParameter("command",cmd);
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
details = tdkTestObj.getResultDetails().strip().replace("\\n", "");
if expectedresult in actualresult and details != "":
#Set the result status of execution
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 6: Check if cpuprocanalyzer process is running";
print "EXPECTED RESULT 6: cpuprocanalyzer process should be running";
print "ACTUAL RESULT 6:pidof cpuprocanalyzer is :",details
#Get the result of execution
print "[TEST EXECUTION RESULT] : SUCCESS";
tdkTestObj = obj.createTestStep('ExecuteCmd');
cmd= "grep -rin \"error\" /rdklogs/logs/CPUPROCANALYZERlog.txt.0";
expectedresult="SUCCESS";
tdkTestObj.addParameter("command",cmd);
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
details = tdkTestObj.getResultDetails().strip().replace("\\n", "");
if expectedresult in actualresult:
#Set the result status of execution
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 7: Check if any Error mesages are present in CPUPROCANALYZERlog.txt.0";
print "EXPECTED RESULT 7: No Error messages should be present in CPUPROCANALYZERlog.txt.0";
print "ACTUAL RESULT 7:",details
#Get the result of execution
print "[TEST EXECUTION RESULT] : SUCCESS";
else:
#Set the result status of execution
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 7: Check if any Error mesages are present in CPUPROCANALYZERlog.txt.0";
print "EXPECTED RESULT 7: No Error messages should be present in CPUPROCANALYZERlog.txt.0";
print "ACTUAL RESULT 7:",details
#Get the result of execution
print "[TEST EXECUTION RESULT] : FAILURE";
else:
#Set the result status of execution
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 6: Check if cpuprocanalyzer process is running";
print "EXPECTED RESULT 6: cpuprocanalyzer process should be running";
print "ACTUAL RESULT 6:pidof cpuprocanalyzer is :",details
#Get the result of execution
print "[TEST EXECUTION RESULT] : FAILURE";
#Revert the Value
tdkTestObj = obj1.createTestStep('TDKB_TR181Stub_SetOnly');
tdkTestObj.addParameter("ParamName","Device.SelfHeal.X_RDK_CPUProcAnalyzer_Enable");
tdkTestObj.addParameter("ParamValue","false");
tdkTestObj.addParameter("Type","bool");
expectedresult="SUCCESS";
#Execute the test case in DUT
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
details = tdkTestObj.getResultDetails();
if expectedresult in actualresult:
#Set the result status of execution
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 7: Disable the CPUProcAnalyzer";
print "EXPECTED RESULT 7:Should disable the CPUProcAnalyzer";
print "ACTUAL RESULT 7:",details
#Get the result of execution
print "[TEST EXECUTION RESULT] : SUCCESS";
tdkTestObj = obj.createTestStep('ExecuteCmd');
cmd= "sysevent set UPLOAD_LOGS_VAL_DCM %s" %default;
expectedresult="SUCCESS";
tdkTestObj.addParameter("command",cmd);
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
details = tdkTestObj.getResultDetails().strip().replace("\\n", "");
if expectedresult in actualresult:
#Set the result status of execution
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 8: Revert the UPLOAD_LOGS_VAL_DCM to %s" %default;
print "EXPECTED RESULT 8 : Should revert the UPLOAD_LOGS_VAL_DCM to previous";
print "ACTUAL RESULT 8:Revert success";
#Get the result of execution
print "[TEST EXECUTION RESULT] : SUCCESS";
#rebooting the device to stop the running process
print "**Device is going for a reboot to stop the running process as a part of revert operation **";
obj.initiateReboot();
sleep(300);
tdkTestObj = obj.createTestStep('ExecuteCmd');
cmd= "pidof cpuprocanalyzer";
expectedresult="SUCCESS";
tdkTestObj.addParameter("command",cmd);
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
details = tdkTestObj.getResultDetails().strip().replace("\\n", "");
if expectedresult in actualresult and details == "":
#Set the result status of execution
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 9:Check if cpuprocanalyzer process is running";
print "EXPECTED RESULT 9 : cpuprocanalyzer process should not be running";
print "ACTUAL RESULT 9: ",details;
#Get the result of execution
print "[TEST EXECUTION RESULT] : SUCCESS";
else:
#Set the result status of execution
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 9:Check if cpuprocanalyzer process is running";
print "EXPECTED RESULT 9 : cpuprocanalyzer process should not be running";
print "ACTUAL RESULT 9: ",details;
#Get the result of execution
print "[TEST EXECUTION RESULT] : FAILURE";
else:
#Set the result status of execution
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 8: Revert the UPLOAD_LOGS_VAL_DCM to %s" %default;
print "EXPECTED RESULT 8 : Should revert the UPLOAD_LOGS_VAL_DCM to previous";
print "ACTUAL RESULT 8:Revert failed";
#Get the result of execution
print "[TEST EXECUTION RESULT] : FAILURE";
else:
#Set the result status of execution
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 7: Disable the CPUProcAnalyzer";
print "EXPECTED RESULT 7:Should disable the CPUProcAnalyzer";
print "ACTUAL RESULT 7:",details
#Get the result of execution
print "[TEST EXECUTION RESULT] : FAILURE";
else:
#Set the result status of execution
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 5: Enable CPUProc Analyzer";
print "EXPECTED RESULT 5: Should enable CPUProc Analyzer";
print "ACTUAL RESULT 5:",details
#Get the result of execution
print "[TEST EXECUTION RESULT] : SUCCESS";
else:
#Set the result status of execution
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 4: Check if UPLOAD_LOGS_VAL_DCM is true";
print "EXPECTED RESULT 4: Should get the UPLOAD_LOGS_VAL_DCM as true";
print "ACTUAL RESULT 4: UPLOAD_LOGS_VAL_DCM status is:",details
#Get the result of execution
print "[TEST EXECUTION RESULT] : FAILURE";
else:
#Set the result status of execution
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 3: Set the UPLOAD_LOGS_VAL_DCM to true";
print "EXPECTED RESULT 3 : Should set the UPLOAD_LOGS_VAL_DCM value to true";
print "ACTUAL RESULT 3:",details
#Get the result of execution
print "[TEST EXECUTION RESULT] : FAILURE";
else:
#Set the result status of execution
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 2: Get the UPLOAD_LOGS_VAL_DCM value";
print "EXPECTED RESULT 2: Should get the UPLOAD_LOGS_VAL_DCM value";
print "ACTUAL RESULT 2: UPLOAD_LOGS_VAL_DCM status is:",default
#Get the result of execution
print "[TEST EXECUTION RESULT] : FAILURE";
else:
#Set the result status of execution
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 1: Get the CPUProc Analyzer Enable status";
print "EXPECTED RESULT 1: Should get the CPUProc Analyzer Enable status";
print "ACTUAL RESULT 1: CPUProc Analyzer Enable status is:",details
#Get the result of execution
print "[TEST EXECUTION RESULT] : FAILURE";
obj1.unloadModule("tdkbtr181");
obj.unloadModule("sysutil");
else:
print "Failed to load module";
obj.setLoadModuleStatus("FAILURE");
| 54.831956
| 149
| 0.594453
| 2,021
| 19,904
| 5.762989
| 0.146957
| 0.032455
| 0.027904
| 0.034344
| 0.746716
| 0.733923
| 0.715721
| 0.70662
| 0.702155
| 0.690993
| 0
| 0.009724
| 0.317976
| 19,904
| 362
| 150
| 54.983425
| 0.84825
| 0.117363
| 0
| 0.840376
| 0
| 0
| 0.321034
| 0.025294
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.00939
| null | null | 0.384977
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
967ff84ccc04452970f5b3435f68ee9b828d747e
| 44
|
py
|
Python
|
tkmvvm/model.py
|
jannesh/tkmvvm
|
339251d1af7abe18bd98628cf0e4efea5594189d
|
[
"MIT"
] | 21
|
2018-04-26T08:01:50.000Z
|
2022-03-22T05:32:25.000Z
|
tkmvvm/model.py
|
jannesh/tkmvvm
|
339251d1af7abe18bd98628cf0e4efea5594189d
|
[
"MIT"
] | null | null | null |
tkmvvm/model.py
|
jannesh/tkmvvm
|
339251d1af7abe18bd98628cf0e4efea5594189d
|
[
"MIT"
] | 6
|
2018-09-26T16:55:21.000Z
|
2021-12-15T15:33:48.000Z
|
import abc
class Model(abc.ABC):
pass
| 7.333333
| 21
| 0.659091
| 7
| 44
| 4.142857
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.25
| 44
| 5
| 22
| 8.8
| 0.878788
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 7
|
969191f117a9a5abdfbc4e13e6d70e00c7f22e14
| 9,214
|
py
|
Python
|
vis_services/tests/test_endpoints.py
|
spacemansteve/vis-services
|
9e49f8ab9f359aa135e77426e145c2571af2275d
|
[
"MIT"
] | null | null | null |
vis_services/tests/test_endpoints.py
|
spacemansteve/vis-services
|
9e49f8ab9f359aa135e77426e145c2571af2275d
|
[
"MIT"
] | 14
|
2015-04-15T00:29:42.000Z
|
2019-05-04T01:41:20.000Z
|
vis_services/tests/test_endpoints.py
|
spacemansteve/vis-services
|
9e49f8ab9f359aa135e77426e145c2571af2275d
|
[
"MIT"
] | 8
|
2015-04-06T12:12:24.000Z
|
2021-05-04T21:41:35.000Z
|
import sys
import os
PROJECT_HOME = os.path.abspath(os.path.join(os.path.dirname(__file__),'../../'))
sys.path.append(PROJECT_HOME)
from flask_testing import TestCase
from flask import request
from flask import url_for, Flask
import unittest
import requests
import time
from vis_services import app
import json
import httpretty
STUBDATA_DIR = PROJECT_HOME + "/vis_services/tests/stubdata"
solr_data = json.load(open(STUBDATA_DIR + "/test_input/paper_network_before_groups_func_large.json"))
wordcloud = json.load(open(STUBDATA_DIR + "/test_output/wordcloud.json"))
class TestExpectedResults(TestCase):
'''Check if the service returns expected results'''
def create_app(self):
'''Create the wsgi application'''
app_ = app.create_app()
return app_
@httpretty.activate
def test_word_cloud_200(self):
'''test query for generating a word cloud'''
SOLRQUERY_URL = self.app.config.get("VIS_SERVICE_SOLR_PATH")
query_params = {'query': ['{"q": "author:\\"Henneken,E\\""}']}
httpretty.register_uri(
httpretty.GET, SOLRQUERY_URL,
content_type='application/json',
status=200,
body='%s'%json.dumps(solr_data))
r = self.client.post(
url_for('wordcloud'),
content_type='application/json',
data=json.dumps(query_params))
self.assertTrue(r.status_code == 200)
@httpretty.activate
def test_word_cloud_empty_request(self):
'''test query for generating a word cloud - empty request should throw 403'''
SOLRQUERY_URL = self.app.config.get("VIS_SERVICE_SOLR_PATH")
query_params = {}
httpretty.register_uri(
httpretty.GET, SOLRQUERY_URL,
content_type='application/json',
status=200,
body='%s'%json.dumps(solr_data))
r = self.client.post(
url_for('wordcloud'),
content_type='application/json',
data=json.dumps(query_params))
expected = {'Error Info': 'no data provided with request', 'Error': 'there was a problem with your request'}
self.assertEqual(r.status_code, 403)
self.assertEqual(r.json, expected)
@httpretty.activate
def test_word_cloud_wrong_solr_data(self):
'''test query for generating a word cloud - incorrect Solr request should throw 403'''
SOLRQUERY_URL = self.app.config.get("VIS_SERVICE_SOLR_PATH")
query_params = {'query': None}
httpretty.register_uri(
httpretty.GET, SOLRQUERY_URL,
content_type='application/json',
status=200,
body='%s'%json.dumps(solr_data))
r = self.client.post(
url_for('wordcloud'),
content_type='application/json',
data=json.dumps(query_params))
expected = {'Error Info': "couldn't decode query, it should be json-encoded before being sent (so double encoded)", 'Error': 'there was a problem with your request'}
self.assertEqual(r.status_code, 403)
@httpretty.activate
def test_word_cloud_solr_error(self):
'''test query for generating a word cloud - Solr comes back with an HTTP error code'''
SOLRQUERY_URL = self.app.config.get("VIS_SERVICE_SOLR_PATH")
query_params = {'query': ['{"q": "author:\\"Henneken,E\\""}']}
httpretty.register_uri(
httpretty.GET, SOLRQUERY_URL,
content_type='application/json',
status=500,
body='Oops. Something went wrong!')
r = self.client.post(
url_for('wordcloud'),
content_type='application/json',
data=json.dumps(query_params))
expected = {'Error Info': 'Oops. Something went wrong!', 'Error': 'There was a connection error. Please try again later'}
self.assertEqual(r.status_code, 500)
self.assertEqual(r.json, expected)
@httpretty.activate
def test_author_network_200(self):
'''test query for generating an author network'''
SOLRQUERY_URL = self.app.config.get("VIS_SERVICE_SOLR_PATH")
query_params = {'query': ['{"q": "author:\\"Henneken,E\\""}']}
httpretty.register_uri(
httpretty.GET, SOLRQUERY_URL,
content_type='application/json',
status=200,
body='%s'%json.dumps(solr_data))
r = self.client.post(
url_for('authornetwork'),
content_type='application/json',
data=json.dumps(query_params))
self.assertTrue(r.status_code == 200)
@httpretty.activate
def test_author_network_solr_error(self):
'''test query for generating an author network - Solr comes back with an HTTP error code'''
SOLRQUERY_URL = self.app.config.get("VIS_SERVICE_SOLR_PATH")
query_params = {'query': ['{"q": "author:\\"Henneken,E\\""}']}
httpretty.register_uri(
httpretty.GET, SOLRQUERY_URL,
content_type='application/json',
status=500,
body='Oops. Something went wrong!')
r = self.client.post(
url_for('authornetwork'),
content_type='application/json',
data=json.dumps(query_params))
expected = {'Error Info': 'Oops. Something went wrong!', 'Error': 'There was a connection error. Please try again later'}
self.assertEqual(r.status_code, 500)
self.assertEqual(r.json, expected)
@httpretty.activate
def test_author_network_data_error(self):
'''test query for generating an author network'''
SOLRQUERY_URL = self.app.config.get("VIS_SERVICE_SOLR_PATH")
query_params = {'bibcodes': [], 'query': ['{"q": "author:\\"Henneken,E\\""}']}
httpretty.register_uri(
httpretty.GET, SOLRQUERY_URL,
content_type='application/json',
status=200,
body='%s'%json.dumps(solr_data))
r = self.client.post(
url_for('authornetwork'),
content_type='application/json',
data=json.dumps(query_params))
expected = {'Error Info': 'Cannot send both bibcodes and query', 'Error': 'there was a problem with your request'}
self.assertEqual(r.status_code, 403)
self.assertEqual(r.json, expected)
@httpretty.activate
def test_paper_network_200(self):
'''test query for generating a paper network'''
SOLRQUERY_URL = self.app.config.get("VIS_SERVICE_SOLR_PATH")
query_params = {'query': ['{"q": "author:\\"Henneken,E\\""}']}
httpretty.register_uri(
httpretty.GET, SOLRQUERY_URL,
content_type='application/json',
status=200,
body='%s'%json.dumps(solr_data))
r = self.client.post(
url_for('papernetwork'),
content_type='application/json',
data=json.dumps(query_params))
self.assertTrue(r.status_code == 200)
@httpretty.activate
def test_paper_network_solr_error(self):
'''test query for generating a paper network - Solr comes back with an HTTP error code'''
SOLRQUERY_URL = self.app.config.get("VIS_SERVICE_SOLR_PATH")
query_params = {'query': ['{"q": "author:\\"Henneken,E\\""}']}
httpretty.register_uri(
httpretty.GET, SOLRQUERY_URL,
content_type='application/json',
status=500,
body='Oops. Something went wrong!')
r = self.client.post(
url_for('papernetwork'),
content_type='application/json',
data=json.dumps(query_params))
expected = {'Error Info': 'Oops. Something went wrong!', 'Error': 'There was a connection error. Please try again later'}
self.assertEqual(r.status_code, 500)
self.assertEqual(r.json, expected)
@httpretty.activate
def test_paper_network_data_error(self):
'''test query for generating an author network'''
SOLRQUERY_URL = self.app.config.get("VIS_SERVICE_SOLR_PATH")
query_params = {'bibcodes': [], 'query': ['{"q": "author:\\"Henneken,E\\""}']}
httpretty.register_uri(
httpretty.GET, SOLRQUERY_URL,
content_type='application/json',
status=200,
body='%s'%json.dumps(solr_data))
r = self.client.post(
url_for('papernetwork'),
content_type='application/json',
data=json.dumps(query_params))
expected = {'Error Info': 'Cannot send both bibcodes and query', 'Error': 'there was a problem with your request'}
self.assertEqual(r.status_code, 403)
self.assertEqual(r.json, expected)
| 44.728155
| 173
| 0.588344
| 1,030
| 9,214
| 5.07767
| 0.129126
| 0.045889
| 0.08413
| 0.099426
| 0.874379
| 0.874379
| 0.845889
| 0.844168
| 0.797514
| 0.790057
| 0
| 0.011533
| 0.294226
| 9,214
| 205
| 174
| 44.946341
| 0.792711
| 0.074886
| 0
| 0.803468
| 0
| 0
| 0.21789
| 0.062389
| 0
| 0
| 0
| 0
| 0.092486
| 1
| 0.063584
| false
| 0
| 0.063584
| 0
| 0.138728
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
96aaa4e35297b33ea17bd7823e3ba277177a838a
| 23,853
|
py
|
Python
|
colicycle/colicycle/coli_simulation.py
|
guiwitz/DoubleAdderArticle
|
d3f68ef22186ee096aaca554c346e814ebc35b1b
|
[
"BSD-3-Clause"
] | null | null | null |
colicycle/colicycle/coli_simulation.py
|
guiwitz/DoubleAdderArticle
|
d3f68ef22186ee096aaca554c346e814ebc35b1b
|
[
"BSD-3-Clause"
] | null | null | null |
colicycle/colicycle/coli_simulation.py
|
guiwitz/DoubleAdderArticle
|
d3f68ef22186ee096aaca554c346e814ebc35b1b
|
[
"BSD-3-Clause"
] | 2
|
2019-12-02T16:34:37.000Z
|
2020-04-21T18:25:50.000Z
|
"""
This module allows to simulate E.coli cell cycles following
different models.
"""
# Author: Guillaume Witz, Biozentrum Basel, 2019
# License: MIT License
import numpy as np
import pandas as pd
import copy
def simul_doubleadder(nbstart, run_time, params, name):
"""Simulate double-adder model
Parameters
----------
nbstart : int
number of cells to simulate
run_time: int
number of iterations
params: dict
experimental parameters
name: str
name of runs
Returns
-------
cells : list of dict
Each element of the list is a cell cycle defined by a
dictionary of features (Lb, Ld etc.)
"""
#initialize birth length and growth rate
L0 = np.exp(np.random.normal(params['Lb_logn_mu'],params['Lb_logn_sigma'],size=nbstart))
tau = np.exp(np.random.normal(params['tau_logn_mu'], params['tau_logn_sigma'], size=nbstart))
#standard value of growth rate. Used to scale the noise appropriately
normval = np.exp(params['tau_logn_mu'])
#initialize the inter-initiation adder (exact procedure doesn't really matter here)
#as all cells start with n_ori = 1, there's no initiation to division adder running
DLi = np.random.normal(params['DLi_mu'], params['DLi_sigma'], size=nbstart)
#initialize cell infos as a list of dictionaries. All cells start with n_ori = 1
cells = {}
for x in range(nbstart):
dict1 = {'Lb': L0[x],'L':L0[x], 'gen': str(x), 'tau':tau[x], 'Lt': [[0,L0[x],1]], 'finish': False,
'born':0, 'DLi': [[0,DLi[x]]],'DLdLi': [],'Li':[],'Ti':[],
'numori':1,'Ld':np.nan, 'numori_born':1,'name': name,'mLi':np.nan, 'mLd':np.nan, 'rfact':0.5}
cells[str(x)] = dict1
for t in range(run_time):
divide_cell = []
for x in cells:
if cells[x]['finish']==False:
#update cell size
cells[x]['L'] = cells[x]['L']*(2**(1/cells[x]['tau']))
cells[x]['Lt'].append([t,cells[x]['L'],cells[x]['numori']])
#increment the most recent inter-initiation adder
cells[x]['DLi'][-1][0] = cells[x]['DLi'][-1][0]+(cells[x]['Lt'][-1][1]-cells[x]['Lt'][-2][1])
#if at least one volume counter since RI is running, increment all of them
if len(cells[x]['DLdLi'])>0:
cells[x]['DLdLi'] = [[k[0]+(cells[x]['Lt'][-1][1]-cells[x]['Lt'][-2][1]),k[1]] for k in cells[x]['DLdLi']]
#if a volume counter has reached its limit divide
if len(cells[x]['DLdLi'])>0:
if (cells[x]['numori']>1) and (cells[x]['DLdLi'][0][0]>cells[x]['DLdLi'][0][1]):
cells[x]['finish'] = True#tag cell as finished
cells[x]['Ld'] = cells[x]['L']
cells[x]['Td'] = len(cells[x]['Lt'])
cells[x]['Td_abs'] = t
cells[x]['d_Ld_Lb'] = cells[x]['L']-cells[x]['Lb']
#assign the correct adders (the oldest ones) to the cell that just divided
cells[x]['final_DLdLi'] = cells[x]['DLdLi'][0][1]
cells[x]['final_DLi'] = cells[x]['DLi'][0][1]
cells[x]['final_Li'] = cells[x]['Li'][0]
#for each accumulated variable suppress the oldest one
if len(cells[x]['DLdLi'])==1:
cells[x]['DLdLi'] = []
else:
cells[x]['DLdLi'].pop(0)
if len(cells[x]['DLi'])==1:
cells[x]['DLi'] = []
else:
cells[x]['DLi'].pop(0)
if len(cells[x]['Li'])==1:
cells[x]['Li'] = []
else:
cells[x]['Li'].pop(0)
divide_cell.append(x)
#if the added volume has reached its limit make new RI
if cells[x]['DLi'][-1][0]>cells[x]['DLi'][-1][1]:
#duplicate origin
cells[x]['numori'] = cells[x]['numori']*2
#define new adder
newdli = cells[x]['numori']*np.random.normal(params['DLi_mu'], params['DLi_sigma'])
cells[x]['DLi'].append([0,newdli])
cells[x]['Li'].append(cells[x]['L'])
#temporarilly store Ti as absolute time
cells[x]['Ti'].append(t)
#define new adder
new_dv = cells[x]['numori']*np.exp(np.random.normal(params['DLdLi_logn_mu'], params['DLdLi_logn_sigma']))
cells[x]['DLdLi'].append([0,new_dv])
for x in divide_cell:
#Draw division ratio
rfact = 1/(1+np.random.normal(1,params['div_ratio']))
#Create new cell using mother information
new_tau = np.exp(correlated_normal(np.log(cells[x]['tau']), params['tau_logn_mu'], params['tau_logn_sigma'], params['tau_corr']))
new_Lb = copy.deepcopy(rfact*cells[x]['L'])
new_L = copy.deepcopy(rfact*cells[x]['L'])
new_Lt = [[t,copy.deepcopy(rfact*cells[x]['L']),copy.deepcopy(cells[x]['numori'])/2]]
new_DLi = copy.deepcopy([[rfact*y[0],rfact*y[1]] for y in cells[x]['DLi']])
new_DLdLi = copy.deepcopy([[rfact*y[0],rfact*y[1]] for y in cells[x]['DLdLi']])
new_Li = copy.deepcopy([rfact*y for y in cells[x]['Li']])
new_numori = copy.deepcopy(cells[x]['numori'])/2
mother_initL = copy.deepcopy(cells[x]['final_Li'])/2
mother_Ld = copy.deepcopy(cells[x]['Ld'])
dict1 = {'Lb': new_Lb,'L': new_L, 'gen': str(x)+'B', 'tau': new_tau,'Lt': new_Lt, 'finish': False,
'born':t, 'DLi': new_DLi,'DLdLi': new_DLdLi,'Li':new_Li,'Ti':[], 'numori':new_numori,
'numori_born':copy.deepcopy(new_numori),'Ld':np.nan, 'name': name,'mLi': mother_initL, 'mLd':mother_Ld,
'rfact':rfact}
cells[x+'B'] = copy.deepcopy(dict1)
#keep oldest timer as final timer and give daughter remaining ones. Caclulate initiation time based on cell birth.
TL_S_val = copy.deepcopy(cells[x]['Ti'].pop(0))
cells[x+'B']['Ti'] = copy.deepcopy(cells[x]['Ti'])
cells[x]['Ti'] = TL_S_val-copy.deepcopy(cells[x]['born'])
for x in cells:
if len(cells[x]['Li'])>0:
cells[x]['Li'] = np.nan
return cells
def simul_growth_dinter_classicadder(nbstart, run_time, params, name):
"""Simulate a model with inter-initiation per origin adder and
classic division adder (Ld = Lb+dL)
Parameters
----------
nbstart : int
number of cells to simulate
run_time: int
number of iterations
params: dict
experimental parameters
name: str
name of runs
Returns
-------
cells : list of dict
Each element of the list is a cell cycle defined by a
dictionary of features (Lb, Ld etc.)
"""
#initialize birth length and growth rate
L0 = np.exp(np.random.normal(params['Lb_logn_mu'],params['Lb_logn_sigma'],size=nbstart))
tau = np.exp(np.random.normal(params['tau_logn_mu'], params['tau_logn_sigma'], size=nbstart))
#standard value of growth rate. Used to scale the noise appropriately
normval = np.exp(params['tau_logn_mu'])
#initialize the inter-initiation adder (exact procedure doesn't really matter here)
#as all cells start with n_ori = 1, there's no initiation to division adder running
DLi = np.random.normal(params['DLi_mu'], params['DLi_sigma'], size=nbstart)
#initialize classic adder
dL = np.random.normal(params['dL_mu'], params['dL_sigma'], size=nbstart)
#initialize cell infos as a list of dictionaries. All cells start with n_ori = 1
cells = {}
for x in range(nbstart):
dict1 = {'Lb': L0[x],'L':L0[x], 'gen': str(x), 'tau':tau[x], 'Lt': [[0,L0[x],1]], 'finish': False,
'born':0, 'DLi': [[0,DLi[x]],[0,DLi[x]]],'DLdLi': [[0,1]],'Li':[0],'Ti':[0], 'dL': [0,dL[x]],
'numori':2,'Ld':np.nan, 'numori_born':1,'name': name,'mLi':np.nan, 'mLd':np.nan, 'rfact':0.5}
cells[str(x)] = dict1
for t in range(run_time):
divide_cell = []
for x in cells:
if cells[x]['finish']==False:
#update cell size
cells[x]['L'] = cells[x]['L']*(2**(1/cells[x]['tau']))
cells[x]['Lt'].append([t,cells[x]['L'],cells[x]['numori']])
#increment the most recent inter-initiation adder
cells[x]['DLi'][-1][0] = cells[x]['DLi'][-1][0]+(cells[x]['Lt'][-1][1]-cells[x]['Lt'][-2][1])
#increment adder
cells[x]['dL'][0] = cells[x]['dL'][0]+(cells[x]['Lt'][-1][1]-cells[x]['Lt'][-2][1])
#if at least one volume counter since RI is running, increment all of them
if len(cells[x]['DLdLi'])>0:
cells[x]['DLdLi'] = [[k[0]+(cells[x]['Lt'][-1][1]-cells[x]['Lt'][-2][1]),k[1]] for k in cells[x]['DLdLi']]
if (cells[x]['numori']>1) and (cells[x]['dL'][0]>cells[x]['dL'][1]):
cells[x]['finish'] = True#tag cell as finished
cells[x]['Ld'] = cells[x]['L']
cells[x]['Td'] = len(cells[x]['Lt'])
cells[x]['Td_abs'] = t
cells[x]['d_Ld_Lb'] = cells[x]['L']-cells[x]['Lb']
#assign the correct adders (the oldest ones) to the cell that just divided
cells[x]['final_DLi'] = cells[x]['DLi'][0][1]
cells[x]['final_Li'] = cells[x]['Li'][0]
cells[x]['final_DLdLi'] = cells[x]['DLdLi'][0][0]
#for each accumulated variable suppress the oldest one
if len(cells[x]['DLi'])==1:
cells[x]['DLi'] = []
else:
cells[x]['DLi'].pop(0)
if len(cells[x]['Li'])==1:
cells[x]['Li'] = []
else:
cells[x]['Li'].pop(0)
if len(cells[x]['DLdLi'])==1:
cells[x]['DLdLi'] = []
else:
cells[x]['DLdLi'].pop(0)
divide_cell.append(x)
#if the added volume has reached its limit make new RI
if cells[x]['DLi'][-1][0]>cells[x]['DLi'][-1][1]:
#duplicate origin
cells[x]['numori'] = cells[x]['numori']*2
#Version where adder is noisy itself
newdli = cells[x]['numori']*np.random.normal(params['DLi_mu'], params['DLi_sigma'])
cells[x]['DLi'].append([0,newdli])
cells[x]['Li'].append(cells[x]['L'])
#temporarilly store TL_S as absolute time
cells[x]['Ti'].append(t)
cells[x]['DLdLi'].append([0,0])
for x in divide_cell:
#Draw division ratio
rfact = 1/(1+np.random.normal(1,params['div_ratio']))
#Create new cell using mother information
new_tau = np.exp(correlated_normal(np.log(cells[x]['tau']), params['tau_logn_mu'], params['tau_logn_sigma'], params['tau_corr']))
new_Lb = copy.deepcopy(rfact*cells[x]['L'])
new_L = copy.deepcopy(rfact*cells[x]['L'])
new_Lt = [[t,copy.deepcopy(rfact*cells[x]['L']),copy.deepcopy(cells[x]['numori'])/2]]
new_DLi = copy.deepcopy([[rfact*y[0],rfact*y[1]] for y in cells[x]['DLi']])
new_Li = copy.deepcopy([rfact*y for y in cells[x]['Li']])
new_numori = copy.deepcopy(cells[x]['numori'])/2
mother_initL = rfact*copy.deepcopy(cells[x]['final_Li'])
mother_Ld = copy.deepcopy(cells[x]['Ld'])
new_DLdLi = copy.deepcopy([[rfact*y[0],rfact*y[1]] for y in cells[x]['DLdLi']])
new_dL = np.random.normal(params['dL_mu'], params['dL_sigma'])
dict1 = {'Lb': new_Lb,'L': new_L, 'gen': str(x)+'B', 'tau': new_tau,'Lt': new_Lt, 'finish': False,
'born':t, 'DLi': new_DLi,'DLdLi': new_DLdLi,'Li':new_Li,'Ti':[], 'numori':new_numori,
'numori_born':copy.deepcopy(new_numori),'Ld':np.nan, 'name': name,'mLi': mother_initL, 'mLd':mother_Ld,
'rfact':rfact, 'dL': [0,new_dL]}
cells[x+'B'] = copy.deepcopy(dict1)
#keep oldest timer as final timer and give daughter remaining ones. Caclulate initiation time based on cell birth.
TL_S_val = copy.deepcopy(cells[x]['Ti'].pop(0))
cells[x+'B']['Ti'] = copy.deepcopy(cells[x]['Ti'])
cells[x]['Ti'] = TL_S_val-copy.deepcopy(cells[x]['born'])
for x in cells:
if len(cells[x]['Li'])>0:
cells[x]['Li'] = np.nan
return cells
def simul_growth_ho_amir(nbstart, run_time, params, name):
"""Simulate the Ho and Amir model (Front. in Microbiol. 2015) with inter-initiation per origin adder and
timer from initiation to division
Parameters
----------
nbstart : int
number of cells to simulate
run_time: int
number of iterations
params: dict
experimental parameters
name: str
name of runs
Returns
-------
cells : list of dict
Each element of the list is a cell cycle defined by a
dictionary of features (Lb, Ld etc.)
"""
#initialize birth length and growth rate
L0 = np.exp(np.random.normal(params['Lb_logn_mu'],params['Lb_logn_sigma'],size=nbstart))
tau = np.exp(np.random.normal(params['tau_logn_mu'], params['tau_logn_sigma'], size=nbstart))
#standard value of growth rate. Used to scale the noise appropriately
normval = np.exp(params['tau_logn_mu'])
#initialize the inter-initiation adder (exact procedure doesn't really matter here)
#as all cells start with n_ori = 1, there's no initiation to division adder running
DLi = np.random.normal(params['DLi_mu'], params['DLi_sigma'], size=nbstart)
#time from initiation to division
tid_mu = 90
tid_var = 5
Tid = np.random.normal(tid_mu, tid_var, size=nbstart)
#initialize cell infos as a list of dictionaries. All cells start with n_ori = 1
cells = {}
for x in range(nbstart):
dict1 = {'Lb': L0[x],'L':L0[x], 'gen': str(x), 'tau':tau[x], 'Lt': [[0,L0[x],1]], 'finish': False,
'born':0, 'DLi': [[0,DLi[x]]],'DLdLi': [],'Li':[],'Ti':[],
'numori':1,'Ld':np.nan, 'numori_born':1,'name': name,'mLi':np.nan,
'mLd':np.nan, 'rfact':0.5, 'Tid': [[0,Tid[x]]]}
cells[str(x)] = dict1
for t in range(run_time):
divide_cell = []
for x in cells:
if cells[x]['finish']==False:
#update cell size
cells[x]['L'] = cells[x]['L']*(2**(1/cells[x]['tau']))
cells[x]['Lt'].append([t,cells[x]['L'],cells[x]['numori']])
#increment the most recent inter-initiation adder
cells[x]['DLi'][-1][0] = cells[x]['DLi'][-1][0]+(cells[x]['Lt'][-1][1]-cells[x]['Lt'][-2][1])
#if at least one volume counter since RI is running, increment all of them
if len(cells[x]['DLdLi'])>0:
cells[x]['DLdLi'] = [[k[0]+(cells[x]['Lt'][-1][1]-cells[x]['Lt'][-2][1]),k[1]] for k in cells[x]['DLdLi']]
cells[x]['Tid'] = [[k[0]+1,k[1]] for k in cells[x]['Tid']]
#if a volume counter has reached its limit divide
if len(cells[x]['DLdLi'])>0:
if (cells[x]['numori']>1) and (cells[x]['Tid'][0][0]>cells[x]['Tid'][0][1]):
cells[x]['finish'] = True#tag cell as finished
cells[x]['Ld'] = cells[x]['L']
cells[x]['Td'] = len(cells[x]['Lt'])
cells[x]['Td_abs'] = t
cells[x]['d_Ld_Lb'] = cells[x]['L']-cells[x]['Lb']
#assign the correct adders (the oldest ones) to the cell that just divided
cells[x]['final_DLdLi'] = cells[x]['DLdLi'][0][0]
cells[x]['final_DLi'] = cells[x]['DLi'][0][1]
cells[x]['final_Li'] = cells[x]['Li'][0]
cells[x]['final_Tid'] = cells[x]['Tid'][0][1]
#for each accumulated variable suppress the oldest one
if len(cells[x]['DLdLi'])==1:
cells[x]['DLdLi'] = []
else:
cells[x]['DLdLi'].pop(0)
if len(cells[x]['Tid'])==1:
cells[x]['Tid'] = []
else:
cells[x]['Tid'].pop(0)
if len(cells[x]['DLi'])==1:
cells[x]['DLi'] = []
else:
cells[x]['DLi'].pop(0)
if len(cells[x]['Li'])==1:
cells[x]['Li'] = []
else:
cells[x]['Li'].pop(0)
divide_cell.append(x)
#if the added volume has reached its limit make new RI
if cells[x]['DLi'][-1][0]>cells[x]['DLi'][-1][1]:
#duplicate origin
cells[x]['numori'] = cells[x]['numori']*2
#Version where adder is noisy itself
newdli = cells[x]['numori']*np.random.normal(params['DLi_mu'], params['DLi_sigma'])
cells[x]['DLi'].append([0,newdli])
cells[x]['Li'].append(cells[x]['L'])
#temporarilly store TL_S as absolute time
cells[x]['Ti'].append(t)
#Version where adder itself is noisy
new_dv = cells[x]['numori']*np.exp(np.random.normal(params['DLdLi_logn_mu'], params['DLdLi_logn_sigma']))
cells[x]['DLdLi'].append([0,new_dv])
cells[x]['Tid'].append([0,np.random.normal(tid_mu, tid_var, size=1)])
for x in divide_cell:
#Draw division ratio
rfact = 1/(1+np.random.normal(1,params['div_ratio']))
#Create new cell using mother information
new_tau = np.exp(correlated_normal(np.log(cells[x]['tau']), params['tau_logn_mu'], params['tau_logn_sigma'], params['tau_corr']))
new_Lb = copy.deepcopy(rfact*cells[x]['L'])
new_L = copy.deepcopy(rfact*cells[x]['L'])
new_Lt = [[t,copy.deepcopy(rfact*cells[x]['L']),copy.deepcopy(cells[x]['numori'])/2]]
new_DLi = copy.deepcopy([[rfact*y[0],rfact*y[1]] for y in cells[x]['DLi']])
new_DLdLi = copy.deepcopy([[rfact*y[0],rfact*y[1]] for y in cells[x]['DLdLi']])
new_Tid = copy.deepcopy(cells[x]['Tid'])
new_Li = copy.deepcopy([rfact*y for y in cells[x]['Li']])
new_numori = copy.deepcopy(cells[x]['numori'])/2
mother_initL = copy.deepcopy(cells[x]['final_Li'])/2
mother_Ld = copy.deepcopy(cells[x]['Ld'])
dict1 = {'Lb': new_Lb,'L': new_L, 'gen': str(x)+'B', 'tau': new_tau,'Lt': new_Lt, 'finish': False,
'born':t, 'DLi': new_DLi,'DLdLi': new_DLdLi,'Tid': new_Tid, 'Li':new_Li,'Ti':[], 'numori':new_numori,
'numori_born':copy.deepcopy(new_numori),'Ld':np.nan, 'name': name,'mLi': mother_initL, 'mLd':mother_Ld,
'rfact':rfact}
cells[x+'B'] = copy.deepcopy(dict1)
#keep oldest timer as final timer and give daughter remaining ones. Caclulate initiation time based on cell birth.
TL_S_val = copy.deepcopy(cells[x]['Ti'].pop(0))
cells[x+'B']['Ti'] = copy.deepcopy(cells[x]['Ti'])
cells[x]['Ti'] = TL_S_val-copy.deepcopy(cells[x]['born'])
for x in cells:
if len(cells[x]['Li'])>0:
cells[x]['Li'] = np.nan
return cells
def correlated_normal(old_val, mu, sigma, rho):
"""Generated correlated gaussian distributions
Parameters
----------
old_val : float
previous drawn value
mu: float
normal mean
sigma: float
normal standard dev.
rho: float
correlation (0-1)
Returns
-------
correlated : float
new correlated value from gaussian
"""
x1 = (old_val-mu)/sigma
x2 = np.random.normal(0,1)
x3 = rho*x1+np.sqrt(1-rho**2)*x2
correlated = x3*sigma+mu
return correlated
def standardise_dataframe(simul):
"""Turns simulation output in structure similar to experiments
Parameters
----------
simul : list of dicts
output of simulation function
Returns
-------
simul_pd_exp : Pandas dataframe
dataframe with same structure as experimental data
"""
#transform list into dataframe
simul_pd_or = pd.DataFrame(simul).T
simul_pd = copy.deepcopy(simul_pd_or)
#remove bad formatting
simul_pd = simul_pd.apply(pd.to_numeric, errors='coerce')
#add column with cell length over time
simul_pd['length'] = simul_pd_or.Lt.apply(lambda x: np.array(x)[:,1])
#change the genealogy-based index into a numerical index and create a mother_id column
#similar to the one of the experimental data
simul_pd['genealogy'] = simul_pd.index
simul_pd.index = range(len(simul_pd.index))
simul_pd['mother_id'] = simul_pd.apply(lambda row:
int(simul_pd.index[simul_pd.genealogy == row.genealogy[0:-1]][0])
if len(row.genealogy[0:-1])>0 else -1,axis = 1)
simul_pd = simul_pd.astype({"mother_id": int})
#rename fields to match experimental formatting
simul_pd_exp = copy.deepcopy(simul_pd)
if 'final_DLdLi' not in simul_pd_exp.keys():
simul_pd_exp['final_DLdLi'] = -1.0
simul_pd_exp = simul_pd_exp[['rfact','born','Lb','Ld','final_Li','tau','final_DLi','final_DLdLi','Td','Ti','mLi','mLd','numori_born','mother_id','length']]
simul_pd_exp = simul_pd_exp.rename(columns = {'Lb':'Lb_fit','Ld':'Ld_fit','final_Li':'Li_fit',
'tau':'tau_fit','final_DLi': 'DLi','mLi': 'mLi_fit','mLd': 'mLd_fit'})
simul_pd_exp = simul_pd_exp[['rfact','born','Lb_fit','Ld_fit','Li_fit','tau_fit','DLi','Td','Ti','mLi_fit','mLd_fit','final_DLdLi','numori_born','mother_id','length']]
return simul_pd_exp
| 42.978378
| 171
| 0.501321
| 3,147
| 23,853
| 3.704163
| 0.089609
| 0.119928
| 0.029253
| 0.033971
| 0.839753
| 0.824912
| 0.812816
| 0.800206
| 0.789912
| 0.783049
| 0
| 0.016719
| 0.330483
| 23,853
| 555
| 172
| 42.978378
| 0.713212
| 0
| 0
| 0.785714
| 0
| 0
| 0.110575
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.011278
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
73739c32aa3369ff1f4c8521ca66c2fd27580684
| 136
|
py
|
Python
|
src/loralay/modeling/layout_pegasus/__init__.py
|
laudao/loralay-modeling
|
a7c89717bac4f0ef9ed820544c4d27e2fe2e4228
|
[
"Apache-2.0"
] | null | null | null |
src/loralay/modeling/layout_pegasus/__init__.py
|
laudao/loralay-modeling
|
a7c89717bac4f0ef9ed820544c4d27e2fe2e4228
|
[
"Apache-2.0"
] | null | null | null |
src/loralay/modeling/layout_pegasus/__init__.py
|
laudao/loralay-modeling
|
a7c89717bac4f0ef9ed820544c4d27e2fe2e4228
|
[
"Apache-2.0"
] | null | null | null |
from .modeling_layout_pegasus import LayoutPegasusForConditionalGeneration
from .configuration_layout_pegasus import LayoutPegasusConfig
| 68
| 74
| 0.933824
| 12
| 136
| 10.25
| 0.666667
| 0.211382
| 0.308943
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.051471
| 136
| 2
| 75
| 68
| 0.953488
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
73bbbd7da6cf6d929772389e42fb84534652984f
| 4,594
|
py
|
Python
|
nn/Units/resnet_atrous_units.py
|
meule/resuneta
|
86ec511603b82cef5536b1104fe511f3b0acf121
|
[
"MIT",
"BSD-3-Clause"
] | 89
|
2019-07-05T07:14:51.000Z
|
2022-03-22T16:30:31.000Z
|
nn/Units/resnet_atrous_units.py
|
Dabao55/resuneta
|
49d26563f84c737e07d34edfe30b56c59cbb4203
|
[
"MIT",
"BSD-3-Clause"
] | 14
|
2019-09-05T14:14:00.000Z
|
2021-09-06T02:35:41.000Z
|
nn/Units/resnet_atrous_units.py
|
Dabao55/resuneta
|
49d26563f84c737e07d34edfe30b56c59cbb4203
|
[
"MIT",
"BSD-3-Clause"
] | 32
|
2019-07-12T17:04:59.000Z
|
2022-03-24T07:36:32.000Z
|
from resuneta.nn.BBlocks import resnet_blocks
from mxnet.gluon import HybridBlock
# TODO: write a more sofisticated version, using HybridBlock as a container
class ResNet_atrous_unit(HybridBlock):
def __init__(self, _nfilters, _kernel_size=(3,3), _dilation_rates=[3,15,31], _norm_type = 'BatchNorm', **kwards):
super(ResNet_atrous_unit,self).__init__(**kwards)
# mxnet doesn't like wrapping things inside a list: it shadows the HybridBlock, remove list
with self.name_scope():
self.ResBlock1 = resnet_blocks.ResNet_v2_block(_nfilters,_kernel_size,_dilation_rate=(1,1), _norm_type = _norm_type, prefix="_ResNetv2block_1_")
d = _dilation_rates[0]
self.ResBlock2 = resnet_blocks.ResNet_v2_block(_nfilters,_kernel_size,_dilation_rate=(d,d), _norm_type = _norm_type, prefix="_ResNetv2block_2_")
d = _dilation_rates[1]
self.ResBlock3 = resnet_blocks.ResNet_v2_block(_nfilters,_kernel_size,_dilation_rate=(d,d), _norm_type = _norm_type, prefix="_ResNetv2block_3_")
d = _dilation_rates[2]
self.ResBlock4 = resnet_blocks.ResNet_v2_block(_nfilters,_kernel_size,_dilation_rate=(d,d), _norm_type = _norm_type, prefix="_ResNetv2block_4_")
def hybrid_forward(self,F,_xl):
# First perform a standard ResNet block with dilation_rate = 1
x = _xl
"""
# These are great for Imperative programming only,
x = x + self.ResBlock1(_xl)
x = x + self.ResBlock2(_xl)
x = x + self.ResBlock3(_xl)
x = x + self.ResBlock4(_xl)
# """
# Uniform description for both Symbol and NDArray
x = F.broadcast_add( x , self.ResBlock1(_xl) )
x = F.broadcast_add( x , self.ResBlock2(_xl) )
x = F.broadcast_add( x , self.ResBlock3(_xl) )
x = F.broadcast_add( x , self.ResBlock4(_xl) )
return x
# Two atrous in parallel
class ResNet_atrous_2_unit(HybridBlock):
def __init__(self, _nfilters, _kernel_size=(3,3), _dilation_rates=[3,15], _norm_type = 'BatchNorm', **kwards):
super(ResNet_atrous_2_unit,self).__init__(**kwards)
# mxnet doesn't like wrapping things inside a list: it shadows the HybridBlock, remove list
with self.name_scope():
self.ResBlock1 = resnet_blocks.ResNet_v2_block(_nfilters,_kernel_size,_dilation_rate=(1,1), _norm_type = _norm_type, prefix="_ResNetv2block_1_")
d = _dilation_rates[0]
self.ResBlock2 = resnet_blocks.ResNet_v2_block(_nfilters,_kernel_size,_dilation_rate=(d,d), _norm_type = _norm_type, prefix="_ResNetv2block_2_")
d = _dilation_rates[1]
self.ResBlock3 = resnet_blocks.ResNet_v2_block(_nfilters,_kernel_size,_dilation_rate=(d,d), _norm_type = _norm_type, prefix="_ResNetv2block_3_")
def hybrid_forward(self,F,_xl):
# First perform a standard ResNet block with dilation_rate = 1
x = _xl
"""
# Imperative program only
x = x + self.ResBlock1(_xl)
x = x + self.ResBlock2(_xl)
x = x + self.ResBlock3(_xl)
# """
# Uniform description for both Symbol and NDArray
x = F.broadcast_add( x , self.ResBlock1(_xl) )
x = F.broadcast_add( x , self.ResBlock2(_xl) )
x = F.broadcast_add( x , self.ResBlock3(_xl) )
return x
# One atrous in parallel
class ResNet_atrous_1_unit(HybridBlock):
def __init__(self, _nfilters, _kernel_size=(3,3), _dilation_rates=[3], _norm_type = 'BatchNorm', **kwards):
super(ResNet_atrous_1_unit,self).__init__(**kwards)
# mxnet doesn't like wrapping things inside a list: it shadows the HybridBlock, remove list
with self.name_scope():
self.ResBlock1 = resnet_blocks.ResNet_v2_block(_nfilters,_kernel_size,_dilation_rate=(1,1), _norm_type = _norm_type, prefix="_ResNetv2block_1_")
d = _dilation_rates[0]
self.ResBlock2 = resnet_blocks.ResNet_v2_block(_nfilters,_kernel_size,_dilation_rate=(d,d), _norm_type = _norm_type, prefix="_ResNetv2block_2_")
def hybrid_forward(self,F,_xl):
# First perform a standard ResNet block with dilation_rate = 1
x = _xl
"""
# Imperative program only
x = x + self.ResBlock1(_xl)
x = x + self.ResBlock2(_xl)
# """
x = F.broadcast_add( x , self.ResBlock1(_xl) )
x = F.broadcast_add( x , self.ResBlock2(_xl) )
return x
| 35.338462
| 156
| 0.649325
| 593
| 4,594
| 4.607083
| 0.160202
| 0.061493
| 0.079063
| 0.065886
| 0.900439
| 0.900073
| 0.875915
| 0.825037
| 0.825037
| 0.825037
| 0
| 0.025507
| 0.24902
| 4,594
| 129
| 157
| 35.612403
| 0.766377
| 0.146713
| 0
| 0.702128
| 0
| 0
| 0.052972
| 0
| 0
| 0
| 0
| 0.007752
| 0
| 1
| 0.12766
| false
| 0
| 0.042553
| 0
| 0.297872
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
73c510c0566a651af8fcbcae9edd696d83c5a218
| 142,257
|
py
|
Python
|
tests/unit/test_wkt.py
|
digital-land/pipeline
|
7ecd3fdaa201227a28463a6c5b5870c1152c0e73
|
[
"MIT"
] | 3
|
2020-03-05T09:00:27.000Z
|
2020-11-13T12:48:32.000Z
|
tests/unit/test_wkt.py
|
digital-land/pipeline
|
7ecd3fdaa201227a28463a6c5b5870c1152c0e73
|
[
"MIT"
] | 2
|
2020-10-14T14:50:49.000Z
|
2020-10-27T13:39:08.000Z
|
tests/unit/test_wkt.py
|
digital-land/pipeline
|
7ecd3fdaa201227a28463a6c5b5870c1152c0e73
|
[
"MIT"
] | 2
|
2020-05-18T14:36:14.000Z
|
2020-11-13T12:48:35.000Z
|
#!/usr/bin/env pytest
from digital_land.datatype.wkt import WktDataType
from digital_land.log import IssueLog
def issue_type(issues):
if issues.rows == []:
return None
issue = issues.rows.pop()
assert issues.rows == []
return issue["issue-type"]
def test_wkt_point_wgs84():
wkt = WktDataType()
issues = IssueLog()
# Nelson's colum
assert (
wkt.normalise("POINT( -0.127972 51.507722 )", issues=issues)
== "POINT (-0.127972 51.507722)"
)
assert issue_type(issues) is None
def test_wkt_point_wgs84_south_west():
wkt = WktDataType()
issues = IssueLog()
# Scilly Isles
assert (
wkt.normalise("POINT (-6.322778 49.936111)", issues=issues)
== "POINT (-6.322778 49.936111)"
)
assert issue_type(issues) is None
def test_wkt_point_wgs84_north_east():
wkt = WktDataType()
issues = IssueLog()
# Berwick-upon-Tweed
assert (
wkt.normalise("POINT (-2.007 55.771)", issues=issues)
== "POINT (-2.007000 55.771000)"
)
assert issue_type(issues) is None
def test_wkt_point_wgs84_flipped():
wkt = WktDataType()
issues = IssueLog()
# Nelson's colum
assert (
wkt.normalise("POINT (51.507722 -0.127972)", issues=issues)
== "POINT (-0.127972 51.507722)"
)
assert issue_type(issues) == "WGS84 flipped"
def test_wkt_point_wgs84_out_of_range():
wkt = WktDataType()
issues = IssueLog()
assert wkt.normalise("POINT (0.0 0.0)", issues=issues) == ""
assert issue_type(issues) == "WGS84 out of bounds"
assert wkt.normalise("POINT (0.0 48.1)", issues=issues) == ""
assert issue_type(issues) == "WGS84 out of bounds"
def test_wkt_point_northings_eastings():
wkt = WktDataType()
issues = IssueLog()
# Nelson's column TQ 30015 80415
assert (
wkt.normalise("POINT (530015 180415)", issues=issues)
== "POINT (-0.127960 51.507718)"
)
assert issue_type(issues) == "OSGB"
def test_parse_wkt_ogbd_to_wgs84_accuracy():
wkt = WktDataType()
issues = IssueLog()
output = wkt.normalise(
"MULTIPOLYGON (((177556.8532 38417.5297999997,177554.4128 38418.3279999997,177549.4009 38419.9708999991,177547.6712 38420.5159000009,177548.2506 38422.5782999992,177549.7508 38422.1910999995,177553.3995 38420.8543999996,177555.9324 38419.9307000004,177557.5129 38419.3800000008,177556.8532 38417.5297999997)))", # noqa: E501
issues=issues,
)
assert (
output
== "MULTIPOLYGON (((-5.118622 50.203865,-5.118614 50.203882,-5.118724 50.203904,-5.118745 50.203907,-5.118752 50.203888,-5.118728 50.203884,-5.118622 50.203865)))" # noqa: E501
)
def test_wkt_point_flipped_northings_eastings():
wkt = WktDataType()
issues = IssueLog()
# Nelson's column TQ 30015 80415
assert (
wkt.normalise("POINT (180415 530015)", issues=issues)
== "POINT (-0.127960 51.507718)"
)
assert issue_type(issues) == "OSGB flipped"
def test_wkt_point_mercator():
wkt = WktDataType()
issues = IssueLog()
# Nelson's Column
# https://epsg.io/map#srs=3857&x=-14245.780102&y=6711600.069496&z=17&layer=streets
assert (
wkt.normalise("POINT (-14245.780102 6711600.069496)", issues=issues)
== "POINT (-0.127972 51.507722)"
)
assert issue_type(issues) == "Mercator"
def test_wkt_point_mercator_flipped():
wkt = WktDataType()
issues = IssueLog()
# Nelson's Column
# https://epsg.io/map#srs=3857&x=-14245.780102&y=6711600.069496&z=17&layer=streets
assert (
wkt.normalise("POINT (6711600.069496 -14245.780102)", issues=issues)
== "POINT (-0.127972 51.507722)"
)
def test_wkt_point_missing_values():
wkt = WktDataType()
issues = IssueLog()
assert wkt.normalise("POINT", issues=issues) == ""
assert issue_type(issues) == "invalid WKT"
assert wkt.normalise("POINT ()", issues=issues) == ""
assert issue_type(issues) == "invalid WKT"
assert wkt.normalise("POINT (-0.127972 )", issues=issues) == ""
assert issue_type(issues) == "invalid WKT"
def test_wkt_point_out_of_range_values():
wkt = WktDataType()
issues = IssueLog()
assert wkt.normalise("POINT (1000 100000000)", issues=issues) == ""
assert issue_type(issues) == "invalid coordinates"
assert wkt.normalise("POINT (100000000 10000)", issues=issues) == ""
assert issue_type(issues) == "invalid coordinates"
def test_wkt_multipolygon_wgs84():
wkt = WktDataType()
issues = IssueLog()
value = "MULTIPOLYGON (((-0.1434494279 51.46626361,-0.1434646353 51.46627914,-0.143515539 51.4663375,-0.1435648475 51.4663926,-0.1435988703 51.46643054,-0.1436227923 51.46646195,-0.1436840978 51.46644134,-0.1436913831 51.4664392,-0.1437519691 51.46641858,-0.1437548832 51.46641773,-0.1436953554 51.46634835,-0.1435837312 51.46621808,-0.1435209507 51.46623957,-0.1434494279 51.46626361)))" # noqa: E501
expected = "MULTIPOLYGON (((-0.143449 51.466264,-0.143623 51.466462,-0.143755 51.466418,-0.143584 51.466218,-0.143449 51.466264)))"
assert wkt.normalise(value, issues=issues) == expected
assert issue_type(issues) is None
def test_wkt_multipolygon_flipped_northings_and_eastings():
wkt = WktDataType()
issues = IssueLog()
value = "MULTIPOLYGON (((203500.0 494297.28,203499.8 494297.07,203495.1 494292.05,203491.2 494287.55,203487.2 494284.05,203482.45 494280.05,203478.4 494276.3,203479.85 494274.9,203486.95 494265.96,203500.0 494249.55,203503.6 494244.7,203514.5 494230.45,203532.6 494206.8,203554.0 494178.8,203566.2 494162.9,203601.8 494116.4,203626.0 494136.2,203628.4 494138.7,203628.9 494141.1,203626.04 494151.35,203639.0 494160.6,203645.3 494165.9,203650.7 494170.2,203651.8 494170.9,203659.0 494175.7,203670.3 494183.0,203674.8 494186.0,203677.7 494188.6,203619.4 494261.3,203612.3 494270.3,203607.2 494276.8,203602.7 494282.9,203590.9 494299.4,203578.6 494316.9,203578.36 494317.24,203562.9 494338.9,203550.8 494332.5,203546.6 494330.4,203540.7 494326.7,203533.8 494322.3,203522.1 494315.4,203519.1 494313.4,203516.8 494311.9,203514.3 494309.8,203500.0 494297.28)))" # noqa: E501
expected = "MULTIPOLYGON (((-0.636234 51.722310,-0.636018 51.722459,-0.635742 51.722723,-0.635614 51.722868,-0.636501 51.723277,-0.637759 51.723926,-0.637797 51.723900,-0.638032 51.723686,-0.638174 51.723582,-0.638312 51.723467,-0.638459 51.723495,-0.638494 51.723491,-0.638824 51.723255,-0.636925 51.722318,-0.636543 51.722119,-0.636377 51.722233,-0.636234 51.722310)))" # noqa: E501
assert wkt.normalise(value, issues=issues) == expected
assert issue_type(issues) == "OSGB flipped"
def test_wkt_multipolygon_mercator():
wkt = WktDataType()
issues = IssueLog()
value = "MULTIPOLYGON (((-7946.4687 6701859.138,-7925.9829 6701856.258,-7926.5076 6701852.447,-7946.5692 6701855.314,-7946.4687 6701859.138)))" # noqa: E501
expected = "MULTIPOLYGON (((-0.071384 51.453226,-0.071385 51.453205,-0.071205 51.453189,-0.071200 51.453210,-0.071384 51.453226)))" # noqa: E501
assert wkt.normalise(value, issues=issues) == expected
assert issue_type(issues) == "Mercator"
def test_wkt_multipolygon_wgs84_duplicate_polygon():
wkt = WktDataType()
issues = IssueLog()
# Buckinghamshire tree preservation zone
value = "MULTIPOLYGON (((-0.111483 51.472552,-0.111481 51.472541,-0.111373 51.472542,-0.111372 51.472551,-0.111483 51.472552)),((-0.111483 51.472541,-0.111481 51.472541,-0.111481 51.472541,-0.111483 51.472541)))" # noqa: E501
expected = "MULTIPOLYGON (((-0.111483 51.472552,-0.111481 51.472541,-0.111373 51.472542,-0.111372 51.472551,-0.111483 51.472552)))"
assert wkt.normalise(value, issues=issues) == expected
def test_wkt_invalid_multipolygon_too_few_points():
wkt = WktDataType()
issues = IssueLog()
# 465879 "Teesmouth and Cleveland Coast"
# Too few points in geometry component[-1.240621 54.600571]
value = "MULTIPOLYGON(((-1.240621 54.600571,-1.240618 54.600566,-1.24038 54.600608,-1.240094 54.600681,-1.239689 54.6008,-1.239365 54.60088,-1.239192 54.600903,-1.239082 54.600905,-1.23893 54.600894,-1.238186 54.600798,-1.237731 54.600706,-1.237607 54.600704,-1.237373 54.600645,-1.237228 54.600627,-1.237181 54.600612,-1.237163 54.60059,-1.237159 54.6005,-1.237124 54.60044,-1.237062 54.60039,-1.236918 54.600298,-1.236722 54.600197,-1.236307 54.600009,-1.236144 54.599908,-1.23606 54.599837,-1.235639 54.599828,-1.235434 54.599864,-1.235202 54.599854,-1.235063 54.599822,-1.234897 54.599755,-1.234799 54.599734,-1.234647 54.59972,-1.234565 54.59969,-1.234484 54.599638,-1.234447 54.599608,-1.234405 54.599557,-1.234306 54.599471,-1.234267 54.599451,-1.234022 54.599388,-1.233858 54.599361,-1.233446 54.599375,-1.233442 54.599417,-1.233678 54.601451,-1.233948 54.603642,-1.236817 54.603639,-1.246748 54.603569,-1.250048 54.601986,-1.250198 54.601952,-1.249817 54.601682,-1.248889 54.601045,-1.248357 54.600664,-1.247944 54.600349,-1.246529 54.600752,-1.245915 54.599941,-1.245832 54.599909,-1.245807 54.599911,-1.245686 54.599962,-1.245584 54.599994,-1.245013 54.600219,-1.244672 54.600364,-1.244425 54.600494,-1.244167 54.600571,-1.244048 54.600593,-1.244054 54.600605,-1.243709 54.600674,-1.243382 54.60082,-1.242351 54.601124,-1.242329 54.601101,-1.241955 54.600894,-1.241885 54.600845,-1.241862 54.600805,-1.241837 54.600792,-1.241697 54.600757,-1.241631 54.600749,-1.24158 54.600731,-1.241436 54.600726,-1.241406 54.600709,-1.241349 54.600658,-1.241331 54.600653,-1.241279 54.600595,-1.241261 54.600557,-1.241205 54.600525,-1.241081 54.600521,-1.240771 54.600548,-1.240621 54.600571),(-1.240621 54.600571,-1.240621 54.600571,-1.240592 54.600575,-1.240621 54.600571)))" # noqa: E501
expected = "MULTIPOLYGON (((-1.240771 54.600548,-1.240621 54.600571,-1.240618 54.600566,-1.240380 54.600608,-1.240094 54.600681,-1.239689 54.600800,-1.239365 54.600880,-1.239192 54.600903,-1.239082 54.600905,-1.238930 54.600894,-1.238186 54.600798,-1.237731 54.600706,-1.237607 54.600704,-1.237373 54.600645,-1.237228 54.600627,-1.237181 54.600612,-1.237163 54.600590,-1.237159 54.600500,-1.237124 54.600440,-1.237062 54.600390,-1.236918 54.600298,-1.236722 54.600197,-1.236307 54.600009,-1.236144 54.599908,-1.236060 54.599837,-1.235639 54.599828,-1.235434 54.599864,-1.235202 54.599854,-1.235063 54.599822,-1.234897 54.599755,-1.234799 54.599734,-1.234647 54.599720,-1.234565 54.599690,-1.234484 54.599638,-1.234447 54.599608,-1.234405 54.599557,-1.234306 54.599471,-1.234267 54.599451,-1.234022 54.599388,-1.233858 54.599361,-1.233446 54.599375,-1.233442 54.599417,-1.233678 54.601451,-1.233948 54.603642,-1.236817 54.603639,-1.246748 54.603569,-1.250048 54.601986,-1.250198 54.601952,-1.248889 54.601045,-1.248357 54.600664,-1.247944 54.600349,-1.246529 54.600752,-1.245915 54.599941,-1.245832 54.599909,-1.245807 54.599911,-1.245686 54.599962,-1.245584 54.599994,-1.245013 54.600219,-1.244672 54.600364,-1.244425 54.600494,-1.244167 54.600571,-1.244048 54.600593,-1.244054 54.600605,-1.243709 54.600674,-1.243382 54.600820,-1.242351 54.601124,-1.242329 54.601101,-1.241955 54.600894,-1.241885 54.600845,-1.241862 54.600805,-1.241837 54.600792,-1.241697 54.600757,-1.241631 54.600749,-1.241580 54.600731,-1.241436 54.600726,-1.241406 54.600709,-1.241349 54.600658,-1.241331 54.600653,-1.241279 54.600595,-1.241261 54.600557,-1.241205 54.600525,-1.241081 54.600521,-1.240771 54.600548)))" # noqa: E501
assert wkt.normalise(value, issues=issues) == expected
def test_wkt_invalid_multipolygon_self_intersection_with_holes():
wkt = WktDataType()
issues = IssueLog()
# 56948 Wychavon District
# Self-intersection[-2.23750992455993 52.3608380047779]
value = "MULTIPOLYGON(((-2.219219 52.359138,-2.220211 52.359378,-2.220471 52.359422,-2.221685 52.359671,-2.223059 52.359918,-2.223321 52.359918,-2.224476 52.360181,-2.224934 52.360309,-2.225276 52.360421,-2.225417 52.360449,-2.226117 52.360489,-2.226391 52.360522,-2.230048 52.361069,-2.23038 52.361108,-2.230882 52.361148,-2.231879 52.361187,-2.232138 52.361205,-2.23311 52.361126,-2.233739 52.361018,-2.235523 52.360881,-2.23721 52.360857,-2.23751 52.360838,-2.237432 52.360688,-2.237512 52.360842,-2.237998 52.360783,-2.239279 52.360553,-2.240296 52.360428,-2.241611 52.360336,-2.241935 52.360368,-2.242738 52.360753,-2.242828 52.360787,-2.242966 52.360819,-2.244501 52.360918,-2.245688 52.361022,-2.246176 52.360063,-2.246586 52.35937,-2.246946 52.358795,-2.247101 52.358432,-2.246931 52.358449,-2.247499 52.357102,-2.247668 52.356678,-2.247808 52.35643,-2.247931 52.356144,-2.24825 52.355477,-2.248235 52.355473,-2.248248 52.355445,-2.247603 52.355271,-2.247506 52.355255,-2.247419 52.355261,-2.247287 52.355305,-2.245728 52.354927,-2.245735 52.354917,-2.245707 52.35491,-2.246763 52.353241,-2.24582 52.353017,-2.246116 52.352627,-2.247583 52.350831,-2.24769 52.350784,-2.249376 52.348833,-2.249439 52.348856,-2.249448 52.348846,-2.249802 52.348973,-2.249878 52.348847,-2.25007 52.348582,-2.250434 52.348079,-2.250952 52.3474,-2.251079 52.347203,-2.251191 52.347056,-2.251216 52.34701,-2.251659 52.345845,-2.251429 52.345849,-2.251097 52.34584,-2.250744 52.345802,-2.249882 52.345656,-2.24975 52.345653,-2.249651 52.345663,-2.249516 52.345665,-2.249537 52.34564,-2.249415 52.345652,-2.249481 52.345606,-2.249765 52.345473,-2.250106 52.345334,-2.251117 52.344948,-2.251371 52.344825,-2.251439 52.344783,-2.25175 52.34455,-2.252118 52.344229,-2.252137 52.344207,-2.252234 52.344028,-2.252422 52.34363,-2.252596 52.343282,-2.252628 52.343203,-2.252754 52.342968,-2.252872 52.342615,-2.253145 52.342118,-2.253206 52.342017,-2.253333 52.341845,-2.253542 52.341586,-2.253932 52.341125,-2.253981 52.341142,-2.254006 52.341115,-2.254735 52.341322,-2.255745 52.341556,-2.256257 52.341633,-2.256417 52.341647,-2.256684 52.341653,-2.256809 52.341679,-2.257019 52.341683,-2.257338 52.341658,-2.257543 52.341623,-2.257789 52.341559,-2.257946 52.341506,-2.260411 52.341036,-2.261065 52.340953,-2.261594 52.340866,-2.261983 52.340815,-2.262009 52.340743,-2.261232 52.340654,-2.261227 52.340648,-2.261243 52.340535,-2.261266 52.340461,-2.261301 52.340388,-2.261386 52.34026,-2.2617 52.339902,-2.26173 52.33983,-2.261737 52.339784,-2.261737 52.339728,-2.261725 52.33967,-2.261676 52.339575,-2.261611 52.339504,-2.261219 52.339217,-2.261089 52.339141,-2.260976 52.339101,-2.260919 52.33907,-2.260848 52.339005,-2.260709 52.338646,-2.260736 52.338406,-2.260872 52.338325,-2.261047 52.338248,-2.261181 52.338204,-2.261798 52.338058,-2.2631 52.338127,-2.263077 52.338016,-2.263068 52.337922,-2.263072 52.33783,-2.263285 52.336903,-2.263308 52.336841,-2.26337 52.336718,-2.263858 52.336039,-2.264079 52.335934,-2.26495 52.335971,-2.265184 52.335971,-2.265538 52.335952,-2.265868 52.335904,-2.265982 52.335899,-2.266097 52.335896,-2.266333 52.335924,-2.266826 52.33615,-2.267131 52.336275,-2.268065 52.336632,-2.268295 52.336425,-2.268478 52.336282,-2.26834 52.336212,-2.268366 52.336176,-2.268436 52.336108,-2.268478 52.336077,-2.268753 52.336065,-2.268876 52.336092,-2.268998 52.336126,-2.269216 52.336269,-2.26916 52.336135,-2.269161 52.336057,-2.269175 52.336,-2.269208 52.335931,-2.269235 52.335896,-2.269305 52.335841,-2.269229 52.335752,-2.269216 52.33571,-2.269213 52.335656,-2.26922 52.335626,-2.269269 52.33555,-2.269719 52.335152,-2.267975 52.334078,-2.267306 52.33416,-2.266837 52.333635,-2.267003 52.333569,-2.267208 52.333507,-2.267342 52.333512,-2.267833 52.333485,-2.267968 52.33374,-2.270334 52.333121,-2.271779 52.332618,-2.271123 52.331963,-2.270858 52.331634,-2.270783 52.331658,-2.270828 52.331784,-2.27082 52.331804,-2.270767 52.331862,-2.270514 52.332056,-2.270431 52.332094,-2.270346 52.332115,-2.27017 52.33208,-2.269903 52.331995,-2.269734 52.331928,-2.269379 52.331838,-2.268946 52.331646,-2.268627 52.331472,-2.268517 52.33138,-2.268476 52.331326,-2.268468 52.331296,-2.268376 52.331228,-2.267261 52.330812,-2.266464 52.330537,-2.266292 52.330461,-2.266156 52.33039,-2.265971 52.330276,-2.265553 52.330117,-2.265334 52.330051,-2.264944 52.329959,-2.264135 52.329802,-2.263841 52.329757,-2.263447 52.329637,-2.262763 52.329639,-2.262622 52.329625,-2.262261 52.329524,-2.261951 52.329446,-2.261659 52.329297,-2.261422 52.329153,-2.261161 52.328932,-2.260899 52.328771,-2.26025 52.328586,-2.259985 52.328528,-2.258908 52.328352,-2.258338 52.328205,-2.258204 52.328165,-2.257965 52.328072,-2.257672 52.327935,-2.257395 52.327784,-2.257207 52.327676,-2.256751 52.327382,-2.256239 52.327071,-2.255899 52.326878,-2.255135 52.3265,-2.254356 52.326155,-2.253899 52.325995,-2.251895 52.325501,-2.25161 52.325417,-2.251494 52.325375,-2.251266 52.325266,-2.251065 52.325164,-2.249734 52.324428,-2.249612 52.324343,-2.249415 52.324182,-2.249009 52.323791,-2.2485 52.323317,-2.24847 52.323311,-2.248384 52.323328,-2.248302 52.323356,-2.247721 52.323621,-2.246956 52.323937,-2.24681 52.32398,-2.246701 52.323996,-2.246417 52.324011,-2.246082 52.324003,-2.244877 52.323844,-2.244729 52.323833,-2.244618 52.323833,-2.24399 52.323888,-2.243637 52.323931,-2.242906 52.324036,-2.242683 52.324085,-2.242216 52.324138,-2.242029 52.324137,-2.241791 52.324096,-2.241159 52.324009,-2.240891 52.323975,-2.240744 52.32397,-2.240629 52.32399,-2.240582 52.324014,-2.240491 52.324088,-2.240427 52.324129,-2.240371 52.32415,-2.240193 52.324187,-2.240086 52.324194,-2.238681 52.324191,-2.238192 52.324197,-2.238066 52.324189,-2.23796 52.324173,-2.237192 52.324008,-2.237048 52.323984,-2.236532 52.32393,-2.236276 52.323914,-2.235472 52.323893,-2.235268 52.323905,-2.234935 52.323939,-2.233882 52.324114,-2.23339 52.324157,-2.233299 52.324152,-2.233133 52.324127,-2.232933 52.324082,-2.23268 52.32401,-2.232561 52.324002,-2.232418 52.324018,-2.231378 52.324023,-2.230386 52.32391,-2.229426 52.32386,-2.229168 52.323839,-2.228476 52.32378,-2.227666 52.323688,-2.226645 52.323599,-2.226303 52.323581,-2.22551 52.323555,-2.224674 52.323548,-2.222433 52.323576,-2.22177 52.323593,-2.219554 52.323676,-2.219186 52.323678,-2.218596 52.323663,-2.217935 52.323615,-2.217612 52.323579,-2.215772 52.32331,-2.215379 52.32326,-2.214933 52.323219,-2.214828 52.323091,-2.214618 52.322731,-2.214566 52.322655,-2.214195 52.322138,-2.213858 52.321599,-2.212909 52.320136,-2.212881 52.320139,-2.21287 52.320133,-2.212865 52.320115,-2.212891 52.320104,-2.212668 52.31976,-2.211414 52.318001,-2.211164 52.317585,-2.210969 52.317285,-2.210555 52.316763,-2.210414 52.316539,-2.21015 52.316157,-2.2097 52.315556,-2.209525 52.315315,-2.209492 52.31525,-2.209155 52.314907,-2.208656 52.314437,-2.208045 52.313948,-2.207645 52.313607,-2.207454 52.313477,-2.207151 52.313302,-2.206783 52.31311,-2.205236 52.312387,-2.204279 52.312002,-2.204041 52.311918,-2.204048 52.311909,-2.203804 52.311811,-2.20298 52.311505,-2.200164 52.310598,-2.199682 52.310487,-2.199274 52.310432,-2.198991 52.310363,-2.198039 52.310094,-2.197348 52.309915,-2.197041 52.309843,-2.195562 52.309458,-2.194698 52.309179,-2.194607 52.309171,-2.194566 52.309192,-2.194059 52.308965,-2.193892 52.308899,-2.193484 52.308773,-2.19273 52.308559,-2.192514 52.308476,-2.191466 52.308154,-2.190573 52.307851,-2.19003 52.307644,-2.189245 52.307362,-2.187706 52.306743,-2.187383 52.30663,-2.186503 52.306229,-2.186078 52.306025,-2.185728 52.305831,-2.185045 52.305505,-2.18475 52.305374,-2.184313 52.305215,-2.183867 52.305034,-2.183517 52.304835,-2.183496 52.304815,-2.183484 52.304788,-2.183208 52.304683,-2.182763 52.304487,-2.182488 52.30435,-2.18234 52.304298,-2.18224 52.304245,-2.182218 52.304224,-2.181076 52.303659,-2.18049 52.303383,-2.180005 52.303049,-2.179765 52.302866,-2.179494 52.302703,-2.17898 52.302352,-2.178363 52.301873,-2.177925 52.301577,-2.177816 52.301495,-2.177507 52.301243,-2.177305 52.301043,-2.177306 52.30103,-2.177284 52.30098,-2.177306 52.300952,-2.177323 52.300943,-2.177309 52.300924,-2.177062 52.300721,-2.176974 52.30067,-2.176712 52.300431,-2.176301 52.300013,-2.176175 52.299895,-2.175687 52.299377,-2.175661 52.299366,-2.175549 52.299267,-2.175285 52.299094,-2.173739 52.297686,-2.172665 52.296728,-2.172306 52.296391,-2.172212 52.296285,-2.172144 52.296189,-2.172054 52.296084,-2.171884 52.295898,-2.170878 52.294971,-2.17081 52.294911,-2.170793 52.294916,-2.169356 52.293535,-2.16871 52.292801,-2.168576 52.292688,-2.167765 52.291862,-2.167285 52.291402,-2.16691 52.29107,-2.166472 52.290739,-2.166227 52.290542,-2.165783 52.290218,-2.165184 52.289735,-2.164453 52.288998,-2.164423 52.288935,-2.164337 52.288937,-2.164137 52.288771,-2.163589 52.288274,-2.163177 52.287867,-2.162777 52.287424,-2.162721 52.287374,-2.162832 52.287344,-2.16284 52.287323,-2.162815 52.287287,-2.16256 52.287051,-2.16249 52.286994,-2.162381 52.286936,-2.162309 52.286888,-2.16177 52.286354,-2.161519 52.286127,-2.161303 52.285947,-2.161207 52.285869,-2.161132 52.28589,-2.161071 52.285863,-2.160828 52.285572,-2.16058 52.285254,-2.160223 52.284829,-2.160187 52.284799,-2.160082 52.28481,-2.160053 52.284772,-2.160069 52.284767,-2.160057 52.284745,-2.160098 52.284696,-2.159613 52.284357,-2.159406 52.284252,-2.159208 52.284046,-2.159238 52.28403,-2.158606 52.28354,-2.158363 52.283287,-2.15809 52.283076,-2.157663 52.282713,-2.157522 52.28258,-2.156648 52.281699,-2.156009 52.280972,-2.155863 52.280815,-2.155793 52.280761,-2.155724 52.280694,-2.155056 52.280027,-2.154887 52.279811,-2.154507 52.279234,-2.154377 52.279017,-2.15426 52.278803,-2.153952 52.278162,-2.153788 52.277774,-2.15357 52.277205,-2.153479 52.27674,-2.153344 52.275658,-2.153337 52.275562,-2.153342 52.275076,-2.153362 52.274834,-2.15336 52.274723,-2.153328 52.274689,-2.15334 52.274618,-2.153118 52.274561,-2.15304 52.274629,-2.152931 52.274662,-2.152261 52.274542,-2.15199 52.274509,-2.151777 52.274472,-2.151575 52.274421,-2.150223 52.274027,-2.150059 52.273941,-2.149812 52.27385,-2.149374 52.273737,-2.148982 52.273623,-2.148098 52.273385,-2.147926 52.273365,-2.147052 52.273224,-2.146156 52.273065,-2.145637 52.273027,-2.145325 52.273023,-2.145044 52.273049,-2.144518 52.273107,-2.144083 52.273197,-2.143846 52.273233,-2.142873 52.273311,-2.142723 52.27333,-2.140601 52.273627,-2.140123 52.273737,-2.139879 52.273321,-2.139849 52.273288,-2.139793 52.273089,-2.139801 52.273055,-2.139758 52.272902,-2.139874 52.272858,-2.139885 52.272842,-2.140452 52.272424,-2.140569 52.272322,-2.140819 52.272123,-2.141517 52.271585,-2.140963 52.271307,-2.140668 52.271093,-2.140684 52.270594,-2.140745 52.27059,-2.140742 52.270541,-2.140825 52.269835,-2.141008 52.269702,-2.141013 52.269601,-2.140619 52.269575,-2.140299 52.269534,-2.139733 52.269483,-2.139028 52.269498,-2.138386 52.269487,-2.138102 52.269489,-2.137727 52.269507,-2.137247 52.269544,-2.137088 52.269546,-2.136636 52.269532,-2.136455 52.269537,-2.135216 52.269605,-2.135134 52.26963,-2.135093 52.269673,-2.135086 52.269712,-2.135095 52.269774,-2.135035 52.269782,-2.135001 52.269717,-2.134906 52.269681,-2.13479 52.269683,-2.134423 52.269743,-2.134042 52.269837,-2.133267 52.269965,-2.132411 52.270121,-2.13191 52.270197,-2.131867 52.270103,-2.131861 52.270066,-2.131868 52.270052,-2.131906 52.270011,-2.131973 52.269966,-2.132107 52.269919,-2.132177 52.269857,-2.132205 52.269808,-2.13221 52.269778,-2.132204 52.269692,-2.13221 52.269664,-2.132194 52.269627,-2.132144 52.269583,-2.132144 52.26949,-2.132124 52.269451,-2.132132 52.269426,-2.132174 52.269395,-2.132297 52.26937,-2.132344 52.269353,-2.132385 52.269296,-2.132458 52.269243,-2.132819 52.269195,-2.132635 52.268913,-2.132521 52.268863,-2.131987 52.268576,-2.131654 52.268386,-2.132247 52.268106,-2.132677 52.267881,-2.133159 52.267691,-2.133525 52.267524,-2.133697 52.267436,-2.133681 52.267397,-2.133704 52.26737,-2.133801 52.267356,-2.13408 52.26721,-2.134263 52.267096,-2.134357 52.267027,-2.134536 52.266872,-2.134395 52.266809,-2.133882 52.266634,-2.133596 52.266567,-2.133274 52.266519,-2.132821 52.266488,-2.131841 52.26648,-2.131317 52.266456,-2.130986 52.266432,-2.13027 52.266281,-2.129602 52.266161,-2.129198 52.266117,-2.128694 52.266075,-2.128091 52.266053,-2.127364 52.266075,-2.126837 52.26612,-2.126522 52.266128,-2.126212 52.266119,-2.119952 52.265298,-2.115125 52.264694,-2.11148 52.264109,-2.111087 52.264058,-2.110021 52.2639,-2.109683 52.263867,-2.108934 52.263822,-2.107455 52.263701,-2.106395 52.263598,-2.105909 52.263562,-2.10332 52.263314,-2.101655 52.263172,-2.100815 52.26309,-2.09765 52.262809,-2.096722 52.262699,-2.093677 52.262226,-2.091837 52.262005,-2.090728 52.261882,-2.089397 52.261757,-2.086862 52.261558,-2.086106 52.261448,-2.083137 52.261193,-2.080816 52.260884,-2.07721 52.260601,-2.072849 52.260144,-2.071848 52.260094,-2.071465 52.260083,-2.070513 52.260085,-2.07004 52.260097,-2.069706 52.26011,-2.068625 52.260191,-2.067797 52.26029,-2.067344 52.260359,-2.066804 52.260453,-2.065932 52.260633,-2.061021 52.261689,-2.060301 52.261871,-2.059602 52.262071,-2.05719 52.262856,-2.05588 52.263301,-2.055105 52.26362,-2.054654 52.263835,-2.054334 52.264,-2.05265 52.264933,-2.054113 52.265903,-2.053168 52.266439,-2.053286 52.266516,-2.053487 52.266625,-2.053178 52.266876,-2.053024 52.266979,-2.052999 52.267033,-2.053277 52.267229,-2.053316 52.267264,-2.05305 52.267393,-2.053179 52.267468,-2.052755 52.267682,-2.053013 52.268006,-2.052842 52.268069,-2.052624 52.268163,-2.051959 52.268357,-2.051412 52.268538,-2.051342 52.268633,-2.051278 52.268704,-2.051134 52.268926,-2.050976 52.268815,-2.05082 52.268675,-2.050594 52.268437,-2.050524 52.268344,-2.050376 52.268077,-2.050344 52.26799,-2.050344 52.267943,-2.050273 52.267892,-2.050228 52.267839,-2.050292 52.267799,-2.050331 52.267732,-2.050466 52.267552,-2.0508 52.267331,-2.050845 52.267284,-2.050925 52.266893,-2.050903 52.266893,-2.051031 52.266529,-2.051008 52.266523,-2.051053 52.266391,-2.051079 52.266281,-2.051012 52.266034,-2.050335 52.265812,-2.049705 52.265702,-2.049912 52.2653,-2.049183 52.265186,-2.048167 52.264985,-2.048042 52.264947,-2.047824 52.264865,-2.047609 52.264775,-2.04704 52.264509,-2.046443 52.26428,-2.046032 52.264164,-2.04578 52.264114,-2.045098 52.263958,-2.044804 52.263875,-2.044171 52.263675,-2.043079 52.263288,-2.041948 52.262863,-2.040945 52.262503,-2.040575 52.262352,-2.040183 52.262177,-2.040023 52.262067,-2.039769 52.261928,-2.039461 52.261783,-2.038694 52.26147,-2.036469 52.260611,-2.035455 52.260186,-2.032303 52.258986,-2.030577 52.258311,-2.027489 52.257155,-2.026896 52.256947,-2.02659 52.256844,-2.026198 52.256724,-2.02562 52.256556,-2.025304 52.256473,-2.024781 52.256343,-2.022064 52.255722,-2.021195 52.255506,-2.02057 52.255342,-2.020223 52.255241,-2.01952 52.254997,-2.019093 52.254829,-2.018639 52.254628,-2.017926 52.254306,-2.017139 52.253933,-2.01591 52.253315,-2.014882 52.252767,-2.014409 52.253113,-2.014356 52.253125,-2.014104 52.253149,-2.013732 52.253224,-2.013712 52.253322,-2.013741 52.253391,-2.013744 52.253416,-2.013713 52.253546,-2.013652 52.25369,-2.013634 52.253755,-2.013501 52.25405,-2.013463 52.254048,-2.013403 52.254201,-2.013282 52.254588,-2.013248 52.254836,-2.013153 52.25516,-2.013133 52.255403,-2.013234 52.255501,-2.013272 52.255502,-2.013348 52.255523,-2.013531 52.255657,-2.013751 52.256016,-2.013817 52.25617,-2.013782 52.25648,-2.013789 52.256555,-2.013582 52.257011,-2.013461 52.25715,-2.013394 52.257278,-2.013359 52.25733,-2.013034 52.257735,-2.01334 52.257619,-2.013313 52.257656,-2.013385 52.257629,-2.013338 52.25768,-2.013151 52.257941,-2.013092 52.258006,-2.013095 52.258002,-2.013036 52.258,-2.01289 52.257954,-2.012852 52.257965,-2.012765 52.258104,-2.012738 52.258224,-2.012724 52.258248,-2.012591 52.258394,-2.0126 52.258479,-2.012552 52.258562,-2.012559 52.258645,-2.01241 52.258817,-2.012306 52.258914,-2.012231 52.258966,-2.012228 52.258983,-2.012253 52.259027,-2.012251 52.259043,-2.012234 52.259088,-2.012133 52.259274,-2.011988 52.259383,-2.011896 52.259463,-2.011451 52.259926,-2.011405 52.259997,-2.011409 52.260046,-2.011453 52.260145,-2.011436 52.260186,-2.011425 52.260246,-2.011444 52.260498,-2.01148 52.260613,-2.011548 52.26077,-2.01151 52.260964,-2.011516 52.261016,-2.011506 52.261126,-2.011521 52.261154,-2.011553 52.261272,-2.011532 52.261312,-2.011534 52.261374,-2.011552 52.261594,-2.011579 52.261796,-2.011581 52.261883,-2.011565 52.262038,-2.011526 52.262265,-2.011487 52.262351,-2.011412 52.262482,-2.01128 52.262589,-2.011262 52.262614,-2.011249 52.262674,-2.011248 52.262794,-2.011276 52.262881,-2.011426 52.262932,-2.011869 52.262983,-2.0121 52.263027,-2.012183 52.263056,-2.012175 52.263235,-2.012164 52.263313,-2.012164 52.263431,-2.012205 52.263667,-2.012152 52.263722,-2.012147 52.263746,-2.012163 52.263892,-2.012217 52.26404,-2.012227 52.264166,-2.01226 52.264297,-2.012264 52.264371,-2.012275 52.264403,-2.012591 52.264325,-2.013001 52.264177,-2.013054 52.264169,-2.013158 52.264179,-2.013187 52.264193,-2.01322 52.264214,-2.013286 52.2643,-2.013296 52.264519,-2.013334 52.264649,-2.013388 52.26472,-2.013477 52.264903,-2.013668 52.265134,-2.01372 52.265216,-2.013916 52.26542,-2.013923 52.265454,-2.013918 52.265566,-2.013993 52.265666,-2.01398 52.265676,-2.014 52.2657,-2.013966 52.265717,-2.013893 52.265737,-2.013819 52.265789,-2.013783 52.265824,-2.013754 52.265836,-2.013514 52.26591,-2.013339 52.265986,-2.013267 52.266036,-2.013246 52.266113,-2.013482 52.266265,-2.014269 52.266514,-2.014445 52.266553,-2.014498 52.266558,-2.014603 52.266586,-2.014808 52.266687,-2.014899 52.266748,-2.015227 52.267016,-2.015368 52.267141,-2.015425 52.267207,-2.015453 52.267277,-2.015451 52.26732,-2.015388 52.267461,-2.015382 52.26766,-2.015391 52.267671,-2.015378 52.267725,-2.015383 52.267764,-2.015418 52.267838,-2.015395 52.267873,-2.01541 52.268023,-2.015456 52.268177,-2.015443 52.268264,-2.015526 52.268443,-2.015562 52.268655,-2.015519 52.268767,-2.015499 52.269305,-2.015553 52.269538,-2.015538 52.269609,-2.015556 52.269713,-2.01555 52.269962,-2.015538 52.27001,-2.0153 52.270031,-2.014998 52.270037,-2.014797 52.270031,-2.014631 52.270046,-2.014363 52.270089,-2.014171 52.270104,-2.014101 52.270119,-2.013838 52.270183,-2.013746 52.270228,-2.013726 52.270357,-2.013701 52.270446,-2.013656 52.270462,-2.013412 52.27048,-2.013175 52.270473,-2.012945 52.270455,-2.01268 52.27042,-2.012137 52.270535,-2.012061 52.270569,-2.011535 52.270887,-2.011513 52.270908,-2.011293 52.2709,-2.011173 52.270907,-2.010471 52.270896,-2.009828 52.270896,-2.009388 52.270761,-2.008923 52.270602,-2.008777 52.270568,-2.008708 52.270523,-2.008582 52.270486,-2.008516 52.270485,-2.008015 52.270407,-2.007955 52.270408,-2.007854 52.270424,-2.007763 52.270422,-2.007177 52.270265,-2.007111 52.270269,-2.006789 52.270324,-2.006608 52.270378,-2.006566 52.270406,-2.006532 52.27045,-2.006443 52.270494,-2.006359 52.27055,-2.006148 52.270706,-2.006078 52.270737,-2.005848 52.270595,-2.005266 52.27109,-2.004388 52.271857,-2.004221 52.272012,-2.003228 52.273624,-2.003955 52.273842,-2.004791 52.274226,-2.00569 52.274575,-2.005878 52.27466,-2.005977 52.274711,-2.006112 52.274797,-2.006391 52.275022,-2.006553 52.275136,-2.007052 52.275373,-2.007026 52.275451,-2.006992 52.275478,-2.006867 52.275564,-2.006796 52.275602,-2.006716 52.275662,-2.006684 52.275663,-2.006665 52.275673,-2.006399 52.27567,-2.006143 52.275652,-2.00583 52.275642,-2.005412 52.275662,-2.004867 52.275877,-2.003809 52.276317,-2.003539 52.276423,-2.003186 52.276559,-2.0025 52.276792,-2.002582 52.276892,-2.002702 52.277066,-2.002837 52.277292,-2.002938 52.277488,-2.00311 52.277767,-2.003336 52.27801,-2.003635 52.278297,-2.003917 52.278591,-2.003909 52.278597,-2.003919 52.278608,-2.003466 52.278914,-2.002685 52.279416,-2.002505 52.279545,-2.002665 52.279522,-2.002815 52.279485,-2.003018 52.279448,-2.003227 52.279393,-2.003568 52.27932,-2.003733 52.279257,-2.003921 52.279229,-2.003987 52.279247,-2.004053 52.279305,-2.004126 52.279349,-2.004596 52.2795,-2.004716 52.27951,-2.004758 52.279494,-2.004825 52.279436,-2.005009 52.279218,-2.005023 52.279182,-2.005025 52.279114,-2.005007 52.279066,-2.005708 52.278795,-2.006203 52.278545,-2.006307 52.278485,-2.006395 52.27849,-2.006579 52.278526,-2.006643 52.278527,-2.006733 52.278553,-2.007261 52.278589,-2.007947 52.278518,-2.007983 52.278578,-2.007989 52.278618,-2.008017 52.278654,-2.008122 52.2786,-2.008213 52.278569,-2.00837 52.278477,-2.008906 52.278106,-2.009038 52.278075,-2.009076 52.278056,-2.009161 52.278046,-2.009232 52.278059,-2.009298 52.278084,-2.009384 52.278138,-2.009466 52.278171,-2.009536 52.278215,-2.00956 52.278221,-2.009602 52.278257,-2.009929 52.27846,-2.010008 52.278489,-2.010053 52.278517,-2.010259 52.278665,-2.010362 52.278729,-2.01093 52.27854,-2.011356 52.278422,-2.011701 52.278353,-2.011758 52.278355,-2.011893 52.278383,-2.012173 52.278412,-2.012446 52.278425,-2.01361 52.278473,-2.01438 52.278494,-2.014757 52.278519,-2.014887 52.278492,-2.015058 52.278502,-2.015488 52.278567,-2.015719 52.278582,-2.016674 52.27877,-2.016983 52.27882,-2.017044 52.278812,-2.017095 52.278816,-2.017218 52.278855,-2.017597 52.278908,-2.017876 52.278989,-2.018143 52.279011,-2.018843 52.279148,-2.018915 52.279164,-2.019167 52.279254,-2.019332 52.279303,-2.019723 52.279407,-2.020041 52.279527,-2.02023 52.279396,-2.020339 52.279157,-2.020442 52.279098,-2.020473 52.279097,-2.020564 52.279035,-2.020644 52.278855,-2.020841 52.278755,-2.020919 52.278664,-2.020937 52.278666,-2.020994 52.278719,-2.021047 52.278732,-2.021128 52.2787,-2.021161 52.27866,-2.021196 52.278644,-2.021301 52.278621,-2.021382 52.278565,-2.021611 52.278498,-2.021806 52.27849,-2.022201 52.278402,-2.022338 52.278355,-2.022439 52.278336,-2.02247 52.278316,-2.022581 52.278325,-2.022653 52.278307,-2.022738 52.278363,-2.022885 52.278411,-2.02291 52.278408,-2.023106 52.278325,-2.023263 52.278086,-2.023281 52.278113,-2.023313 52.278054,-2.023329 52.278047,-2.023387 52.278197,-2.023396 52.278284,-2.023461 52.278389,-2.023499 52.2784,-2.023599 52.27839,-2.023628 52.278408,-2.023625 52.278473,-2.023613 52.278497,-2.023587 52.278521,-2.023531 52.278549,-2.023433 52.278577,-2.023421 52.278642,-2.023618 52.278658,-2.02366 52.278656,-2.023742 52.278634,-2.023813 52.278631,-2.023826 52.278634,-2.023836 52.278655,-2.023825 52.278679,-2.023826 52.278699,-2.023918 52.27873,-2.023985 52.27872,-2.024014 52.278733,-2.024014 52.278762,-2.02397 52.278819,-2.023951 52.278832,-2.023897 52.278834,-2.023885 52.278846,-2.023917 52.278925,-2.023964 52.278989,-2.023957 52.27903,-2.023965 52.279057,-2.024024 52.279158,-2.024026 52.279195,-2.024008 52.279231,-2.024014 52.279279,-2.023961 52.279298,-2.023959 52.279319,-2.023992 52.279443,-2.024086 52.279561,-2.024209 52.279544,-2.024224 52.279567,-2.024263 52.279563,-2.024282 52.279579,-2.024296 52.279612,-2.024296 52.279671,-2.0244 52.279734,-2.024476 52.279822,-2.024447 52.27986,-2.024444 52.279878,-2.024457 52.279889,-2.024497 52.279896,-2.024521 52.279909,-2.024516 52.279989,-2.02456 52.27998,-2.024615 52.279987,-2.024705 52.280015,-2.024705 52.280035,-2.024664 52.280052,-2.02464 52.280078,-2.024678 52.280113,-2.02468 52.280126,-2.024725 52.280153,-2.024743 52.280172,-2.024799 52.280183,-2.024835 52.280202,-2.02484 52.280218,-2.024827 52.280298,-2.024832 52.280368,-2.024856 52.280376,-2.025076 52.280357,-2.025243 52.28033,-2.02595 52.280616,-2.026153 52.280647,-2.026219 52.280641,-2.02631 52.280712,-2.026505 52.280822,-2.026628 52.28096,-2.026869 52.280978,-2.027077 52.280958,-2.0273 52.280952,-2.02748 52.28096,-2.027622 52.280994,-2.027729 52.281031,-2.027948 52.281142,-2.028159 52.281188,-2.02822 52.281275,-2.028297 52.281338,-2.028338 52.281356,-2.028407 52.281356,-2.028442 52.281365,-2.02852 52.281444,-2.028578 52.281491,-2.028646 52.28153,-2.028874 52.281633,-2.029714 52.282096,-2.030078 52.282355,-2.030281 52.282461,-2.030501 52.282534,-2.030618 52.282552,-2.030967 52.282554,-2.031084 52.282538,-2.031442 52.282521,-2.031706 52.282521,-2.031773 52.282526,-2.03184 52.282549,-2.032201 52.282622,-2.032623 52.282664,-2.032788 52.282637,-2.033123 52.282637,-2.033135 52.282663,-2.033159 52.282664,-2.033206 52.282747,-2.033229 52.282837,-2.033331 52.282878,-2.033503 52.282831,-2.033858 52.282644,-2.033854 52.282634,-2.033868 52.282638,-2.033895 52.282624,-2.034407 52.282818,-2.034377 52.282837,-2.034396 52.28285,-2.034087 52.283053,-2.033945 52.283161,-2.034116 52.283238,-2.034345 52.283292,-2.034774 52.283439,-2.034812 52.283467,-2.035173 52.283597,-2.035322 52.283676,-2.035409 52.283757,-2.035388 52.283766,-2.035461 52.283847,-2.035463 52.283868,-2.035482 52.283898,-2.035579 52.284148,-2.035655 52.284296,-2.035982 52.284204,-2.035995 52.284212,-2.036023 52.284203,-2.036092 52.284259,-2.03723 52.28491,-2.037793 52.285224,-2.037847 52.28523,-2.037851 52.285241,-2.038491 52.285625,-2.039867 52.286008,-2.040465 52.286181,-2.040725 52.286264,-2.041175 52.286364,-2.041472 52.286455,-2.041636 52.28652,-2.042654 52.28707,-2.043018 52.287244,-2.043205 52.287312,-2.043435 52.287379,-2.044189 52.28758,-2.044492 52.287647,-2.045021 52.287711,-2.045283 52.287734,-2.045534 52.287771,-2.046079 52.287795,-2.046641 52.287796,-2.046863 52.287806,-2.047014 52.287823,-2.047079 52.287838,-2.047852 52.2877,-2.047892 52.28764,-2.048324 52.287533,-2.04886 52.287309,-2.048904 52.287313,-2.04895 52.287295,-2.048958 52.287316,-2.048966 52.287316,-2.049629 52.287195,-2.050866 52.287057,-2.051209 52.286957,-2.052545 52.286378,-2.052929 52.286177,-2.053228 52.28597,-2.053522 52.285785,-2.053954 52.285492,-2.054364 52.285265,-2.054472 52.285171,-2.054576 52.285133,-2.054587 52.285142,-2.055054 52.284908,-2.05549 52.284742,-2.055516 52.284719,-2.055515 52.284704,-2.05555 52.28469,-2.056196 52.284552,-2.056796 52.28444,-2.05751 52.284337,-2.057668 52.284298,-2.05782 52.284279,-2.058061 52.284275,-2.058129 52.284282,-2.058266 52.284314,-2.058447 52.284384,-2.058609 52.284383,-2.059424 52.284297,-2.059726 52.284239,-2.059722 52.284232,-2.060016 52.284168,-2.060405 52.284103,-2.060679 52.284171,-2.06085 52.284176,-2.060935 52.284155,-2.061002 52.284219,-2.061398 52.284438,-2.061905 52.284742,-2.062286 52.284864,-2.062883 52.284939,-2.063442 52.285044,-2.063825 52.285177,-2.064109 52.28526,-2.067122 52.286288,-2.06727 52.286325,-2.06777 52.286416,-2.068365 52.286557,-2.070442 52.287148,-2.070643 52.287224,-2.070829 52.287278,-2.071135 52.287398,-2.071137 52.287417,-2.071155 52.287426,-2.071139 52.287446,-2.071147 52.287524,-2.071307 52.287637,-2.071509 52.287721,-2.071695 52.287777,-2.071703 52.287769,-2.073369 52.288352,-2.074205 52.288621,-2.074508 52.288679,-2.075064 52.288727,-2.075535 52.288834,-2.075774 52.288904,-2.076256 52.289002,-2.076744 52.289064,-2.077055 52.289078,-2.078275 52.289072,-2.078724 52.289081,-2.079201 52.28905,-2.079657 52.288989,-2.080043 52.288919,-2.080861 52.288811,-2.081174 52.288777,-2.08146 52.28878,-2.081625 52.288793,-2.081854 52.288827,-2.082291 52.288854,-2.082956 52.288852,-2.083308 52.288815,-2.0843 52.288846,-2.085219 52.288895,-2.085807 52.288953,-2.086436 52.288991,-2.087592 52.289034,-2.088 52.289104,-2.088219 52.289151,-2.088924 52.28933,-2.089143 52.289416,-2.089221 52.289467,-2.08934 52.289473,-2.089449 52.289454,-2.089574 52.289408,-2.089747 52.28938,-2.089879 52.289347,-2.090188 52.289235,-2.090398 52.289134,-2.090905 52.288765,-2.091065 52.288682,-2.09121 52.288634,-2.091697 52.288546,-2.09169 52.288555,-2.091761 52.288544,-2.09167 52.288663,-2.091704 52.288665,-2.091599 52.288817,-2.091037 52.289604,-2.090062 52.29094,-2.08985 52.291267,-2.089621 52.291537,-2.089496 52.291702,-2.089538 52.29171,-2.089489 52.29175,-2.089442 52.291806,-2.089307 52.291815,-2.088881 52.292379,-2.087762 52.293936,-2.087498 52.294289,-2.086921 52.294974,-2.086501 52.29543,-2.086097 52.295818,-2.085506 52.296304,-2.085518 52.296388,-2.085349 52.296586,-2.084887 52.296939,-2.084896 52.296977,-2.084922 52.296957,-2.08505 52.297304,-2.085253 52.29775,-2.085339 52.297897,-2.085471 52.298248,-2.08561 52.298529,-2.085647 52.298691,-2.086319 52.298609,-2.086429 52.29893,-2.086468 52.298927,-2.086526 52.299067,-2.086638 52.299185,-2.087165 52.300618,-2.087247 52.300612,-2.087466 52.301197,-2.087445 52.301203,-2.087646 52.301548,-2.087754 52.301717,-2.087778 52.301713,-2.087846 52.301842,-2.087453 52.301933,-2.087575 52.302147,-2.08768 52.302359,-2.088059 52.302315,-2.088586 52.303393,-2.0886 52.303445,-2.088639 52.30349,-2.088184 52.303642,-2.088 52.303716,-2.087934 52.303726,-2.087872 52.303718,-2.087965 52.303825,-2.088306 52.303689,-2.088368 52.303689,-2.088613 52.303651,-2.089002 52.303535,-2.089168 52.303474,-2.089415 52.303412,-2.089627 52.303376,-2.08968 52.303381,-2.089731 52.303398,-2.089808 52.30346,-2.090061 52.303576,-2.090177 52.303638,-2.09024 52.303682,-2.090264 52.303716,-2.090305 52.303817,-2.090353 52.303896,-2.090424 52.304772,-2.090411 52.304924,-2.090368 52.304919,-2.090054 52.305007,-2.089957 52.305056,-2.089914 52.305133,-2.089795 52.305256,-2.089661 52.305412,-2.08957 52.30544,-2.089398 52.305612,-2.089353 52.305671,-2.089157 52.305807,-2.089142 52.305801,-2.08903 52.305938,-2.088884 52.306162,-2.088746 52.306409,-2.088712 52.306486,-2.088691 52.306593,-2.088621 52.306803,-2.088535 52.306999,-2.088504 52.307051,-2.088412 52.307428,-2.088075 52.308255,-2.088001 52.30846,-2.08795 52.308631,-2.087737 52.308893,-2.087422 52.3094,-2.087301 52.309685,-2.087312 52.309752,-2.087305 52.309787,-2.087246 52.309884,-2.087184 52.309943,-2.0871 52.310041,-2.08704 52.310142,-2.086968 52.31034,-2.086917 52.310438,-2.086682 52.310766,-2.086573 52.310978,-2.086508 52.311069,-2.086327 52.311251,-2.086261 52.311301,-2.085715 52.311598,-2.085498 52.311817,-2.08541 52.311928,-2.085356 52.312024,-2.085283 52.312273,-2.08525 52.312691,-2.085369 52.312772,-2.085511 52.312892,-2.085635 52.312967,-2.085903 52.313191,-2.085792 52.313234,-2.085779 52.313248,-2.085634 52.313303,-2.085483 52.313373,-2.085421 52.313383,-2.085171 52.313484,-2.085278 52.313556,-2.085376 52.313642,-2.085635 52.313981,-2.085685 52.314011,-2.085748 52.314078,-2.085976 52.314252,-2.086179 52.314389,-2.086336 52.314511,-2.087168 52.314448,-2.088603 52.314282,-2.090353 52.314114,-2.091637 52.313925,-2.09385 52.313566,-2.09511 52.313352,-2.095063 52.313396,-2.095114 52.31339,-2.095021 52.313455,-2.09498 52.313493,-2.094943 52.313623,-2.094876 52.313814,-2.094887 52.313851,-2.09486 52.313915,-2.094872 52.313916,-2.094711 52.314213,-2.0946 52.314373,-2.094547 52.314502,-2.094506 52.314562,-2.094396 52.314662,-2.094283 52.314789,-2.094096 52.315121,-2.094117 52.315247,-2.094077 52.315289,-2.09411 52.315364,-2.094138 52.3154,-2.094279 52.315329,-2.095189 52.314764,-2.095501 52.314583,-2.095649 52.314529,-2.096035 52.314452,-2.096696 52.314342,-2.09697 52.314221,-2.097872 52.313866,-2.098594 52.313625,-2.099012 52.313523,-2.099169 52.313505,-2.099232 52.313487,-2.099232 52.313467,-2.099424 52.313476,-2.100191 52.31342,-2.100623 52.313402,-2.101965 52.313385,-2.102478 52.313468,-2.102733 52.313542,-2.103398 52.313667,-2.105261 52.313981,-2.105966 52.314135,-2.106211 52.314167,-2.106413 52.314209,-2.107741 52.314432,-2.108032 52.314465,-2.10861 52.314513,-2.109551 52.314558,-2.110031 52.314569,-2.111369 52.314531,-2.112306 52.314414,-2.113812 52.313707,-2.114038 52.313612,-2.114489 52.313444,-2.114296 52.313163,-2.114314 52.313158,-2.114308 52.31315,-2.115067 52.313092,-2.115593 52.313089,-2.117168 52.313107,-2.11744 52.313136,-2.118038 52.31324,-2.119347 52.313587,-2.119356 52.313577,-2.119987 52.313738,-2.120187 52.313565,-2.120199 52.313568,-2.120218 52.313551,-2.120894 52.313779,-2.121083 52.313832,-2.121394 52.313953,-2.122043 52.314135,-2.122291 52.314189,-2.123042 52.314326,-2.12384 52.314433,-2.124091 52.314483,-2.123785 52.314935,-2.123592 52.315188,-2.123448 52.31532,-2.123084 52.315622,-2.122828 52.315855,-2.122568 52.316112,-2.122506 52.316178,-2.122463 52.316241,-2.122313 52.316491,-2.122203 52.316615,-2.122052 52.316746,-2.1219 52.316853,-2.121795 52.316916,-2.121592 52.317011,-2.121536 52.317025,-2.121396 52.317086,-2.121146 52.317227,-2.120886 52.317362,-2.120798 52.317433,-2.120798 52.317469,-2.120749 52.317496,-2.120662 52.317512,-2.120535 52.317578,-2.120101 52.317836,-2.119744 52.318092,-2.11965 52.318187,-2.119598 52.318265,-2.119505 52.318373,-2.118967 52.318735,-2.118809 52.31886,-2.118619 52.319043,-2.1185 52.31918,-2.11839 52.319273,-2.118112 52.319549,-2.118039 52.319611,-2.117644 52.320018,-2.117261 52.320476,-2.117036 52.320778,-2.116937 52.320929,-2.116827 52.321063,-2.116742 52.321188,-2.116534 52.321428,-2.116464 52.32152,-2.116384 52.321592,-2.116 52.321856,-2.115902 52.321942,-2.115765 52.322085,-2.116078 52.322157,-2.116434 52.322224,-2.116631 52.322271,-2.117004 52.322374,-2.117281 52.322427,-2.117917 52.322589,-2.118185 52.322664,-2.118394 52.322734,-2.117899 52.323001,-2.117947 52.323041,-2.118041 52.323157,-2.118106 52.323314,-2.118155 52.323591,-2.118218 52.323777,-2.11824 52.323921,-2.118233 52.324028,-2.118283 52.324157,-2.118291 52.324285,-2.118346 52.3244,-2.118501 52.32444,-2.118493 52.324518,-2.118501 52.324587,-2.118564 52.324878,-2.118597 52.325156,-2.118346 52.325172,-2.117312 52.325162,-2.117316 52.325252,-2.117441 52.325744,-2.117491 52.326015,-2.117604 52.326394,-2.117641 52.326557,-2.11758 52.326613,-2.11748 52.326651,-2.117369 52.326711,-2.117307 52.32677,-2.117239 52.32686,-2.117114 52.327112,-2.117055 52.327253,-2.11662 52.3271,-2.116181 52.327087,-2.116224 52.3273,-2.116294 52.327498,-2.116278 52.327558,-2.116291 52.327653,-2.116331 52.327785,-2.116384 52.327897,-2.116542 52.328152,-2.116882 52.32857,-2.117014 52.328778,-2.117264 52.329278,-2.117316 52.329461,-2.117354 52.329645,-2.117384 52.329733,-2.117488 52.329972,-2.117758 52.330416,-2.11811 52.330938,-2.118206 52.331134,-2.118226 52.331141,-2.118225 52.331165,-2.118358 52.331372,-2.118363 52.331398,-2.118379 52.331397,-2.118375 52.331488,-2.118385 52.331538,-2.118472 52.331748,-2.118489 52.331769,-2.118546 52.331778,-2.118562 52.332037,-2.118548 52.332168,-2.11853 52.332208,-2.118422 52.332358,-2.11833 52.33252,-2.118086 52.332969,-2.11798 52.333191,-2.117965 52.333239,-2.117942 52.333486,-2.117849 52.333487,-2.117661 52.334663,-2.117608 52.335096,-2.117569 52.335274,-2.117546 52.335273,-2.11754 52.335316,-2.117373 52.335171,-2.11724 52.335168,-2.117218 52.335205,-2.117228 52.335228,-2.117228 52.335275,-2.117221 52.335299,-2.117201 52.335321,-2.117154 52.335325,-2.117098 52.335318,-2.117017 52.335328,-2.116976 52.335343,-2.11694 52.335381,-2.116868 52.335416,-2.11683 52.335413,-2.116804 52.335399,-2.116698 52.335407,-2.116693 52.335414,-2.116703 52.335473,-2.116696 52.335492,-2.116664 52.33551,-2.116478 52.335524,-2.116427 52.335519,-2.116249 52.335525,-2.11623 52.335505,-2.116232 52.335489,-2.11611 52.335417,-2.116068 52.335412,-2.116055 52.335402,-2.115825 52.335409,-2.115742 52.335429,-2.115672 52.335421,-2.115665 52.33528,-2.11546 52.335294,-2.115409 52.335285,-2.115186 52.335294,-2.115175 52.33529,-2.115169 52.335271,-2.115181 52.335243,-2.115239 52.335213,-2.115256 52.335194,-2.115236 52.335114,-2.115098 52.335147,-2.114971 52.335258,-2.114736 52.335307,-2.114681 52.335345,-2.11464 52.335391,-2.114588 52.335417,-2.114543 52.335468,-2.114559 52.335485,-2.114614 52.335516,-2.114636 52.335543,-2.114639 52.335576,-2.114615 52.335609,-2.114564 52.33562,-2.114479 52.335593,-2.114428 52.335593,-2.114381 52.335646,-2.114379 52.335662,-2.11436 52.335692,-2.114278 52.335773,-2.114223 52.33585,-2.11419 52.335861,-2.11412 52.335866,-2.114055 52.335902,-2.114044 52.335938,-2.113947 52.335956,-2.113757 52.336072,-2.113726 52.336142,-2.113689 52.336195,-2.113704 52.336237,-2.113698 52.336268,-2.113685 52.33628,-2.113647 52.336287,-2.113596 52.33628,-2.113521 52.336304,-2.113583 52.336368,-2.113633 52.336388,-2.113644 52.336412,-2.113481 52.33659,-2.113475 52.336669,-2.113491 52.336691,-2.113487 52.336715,-2.113456 52.336739,-2.11339 52.336745,-2.113322 52.336783,-2.113356 52.336805,-2.113431 52.336821,-2.113468 52.336837,-2.11348 52.336849,-2.11348 52.336868,-2.113464 52.336896,-2.113414 52.336903,-2.11334 52.336901,-2.113275 52.336939,-2.113295 52.336954,-2.113388 52.33697,-2.113404 52.336989,-2.113401 52.337003,-2.113347 52.337045,-2.11329 52.337147,-2.113485 52.337307,-2.113653 52.337381,-2.113698 52.337428,-2.113746 52.337493,-2.113884 52.337566,-2.113909 52.337594,-2.113941 52.337653,-2.113966 52.337673,-2.114067 52.337718,-2.114145 52.337727,-2.114178 52.337746,-2.11419 52.337832,-2.114212 52.337898,-2.114217 52.337987,-2.114233 52.338019,-2.114222 52.338057,-2.114226 52.338119,-2.114251 52.338195,-2.114291 52.338236,-2.114311 52.338326,-2.114311 52.338398,-2.114303 52.338432,-2.114275 52.338475,-2.114226 52.338511,-2.114293 52.338524,-2.114817 52.338569,-2.115906 52.338688,-2.116485 52.338732,-2.116374 52.338905,-2.116333 52.339069,-2.116337 52.339161,-2.116373 52.339164,-2.116371 52.339209,-2.116426 52.339287,-2.11638 52.339683,-2.116341 52.339791,-2.116303 52.339857,-2.116229 52.339953,-2.116096 52.340166,-2.11678 52.340098,-2.116871 52.340364,-2.116961 52.340546,-2.117126 52.340935,-2.117432 52.341606,-2.117443 52.341691,-2.117415 52.341729,-2.117385 52.341744,-2.117268 52.341752,-2.117037 52.341785,-2.117002 52.341924,-2.117105 52.341972,-2.117333 52.342054,-2.118257 52.342166,-2.118881 52.342171,-2.118937 52.342077,-2.118984 52.342032,-2.119057 52.341923,-2.119173 52.341702,-2.119199 52.341644,-2.119268 52.341429,-2.119387 52.34142,-2.119643 52.34134,-2.119882 52.341209,-2.120199 52.341051,-2.120634 52.340809,-2.121048 52.340624,-2.120996 52.340551,-2.120897 52.34045,-2.121397 52.340281,-2.121708 52.340214,-2.121866 52.340194,-2.12312 52.340106,-2.123918 52.34006,-2.123903 52.340207,-2.124721 52.340227,-2.124719 52.341006,-2.124758 52.341979,-2.123875 52.342144,-2.122818 52.342358,-2.122811 52.342383,-2.122819 52.342411,-2.122897 52.342504,-2.123337 52.342965,-2.123454 52.343075,-2.12361 52.343154,-2.123718 52.34324,-2.123991 52.343389,-2.124168 52.343458,-2.124411 52.34352,-2.124646 52.343606,-2.124852 52.343706,-2.125255 52.343882,-2.125366 52.343948,-2.125388 52.343981,-2.125391 52.344009,-2.125449 52.344109,-2.125655 52.344338,-2.125871 52.34456,-2.126069 52.344858,-2.126167 52.344971,-2.126248 52.345095,-2.126334 52.345199,-2.126366 52.345279,-2.126384 52.345361,-2.126381 52.345506,-2.126389 52.345539,-2.126779 52.345507,-2.127042 52.345475,-2.127229 52.345471,-2.127828 52.345479,-2.128443 52.345499,-2.128745 52.345543,-2.128808 52.345568,-2.129027 52.345606,-2.129128 52.345656,-2.129192 52.345675,-2.129368 52.345771,-2.130014 52.346279,-2.130254 52.346498,-2.130533 52.34668,-2.130844 52.346852,-2.131172 52.34695,-2.131444 52.347043,-2.13144 52.347055,-2.131477 52.347065,-2.131341 52.347236,-2.131021 52.347695,-2.130887 52.347865,-2.13086 52.347931,-2.130776 52.348005,-2.130291 52.348599,-2.129911 52.349107,-2.129556 52.34962,-2.129539 52.34968,-2.129483 52.349796,-2.129311 52.350279,-2.129222 52.350434,-2.129199 52.350498,-2.129157 52.350548,-2.129092 52.350662,-2.129052 52.350669,-2.12902 52.350716,-2.128995 52.350709,-2.128874 52.350863,-2.128699 52.351036,-2.128599 52.351115,-2.128469 52.351209,-2.128008 52.351516,-2.127638 52.351729,-2.127536 52.351798,-2.127331 52.35196,-2.127747 52.35223,-2.128179 52.352209,-2.128336 52.352217,-2.12859 52.352213,-2.128747 52.352224,-2.129133 52.352198,-2.129363 52.352099,-2.129477 52.352079,-2.12979 52.351987,-2.130048 52.351787,-2.130735 52.351314,-2.131491 52.350616,-2.131605 52.350491,-2.131622 52.350496,-2.131775 52.350338,-2.131806 52.350284,-2.132012 52.350102,-2.132009 52.350091,-2.132111 52.349876,-2.132377 52.349221,-2.132424 52.349067,-2.132484 52.348928,-2.132765 52.348159,-2.133023 52.347998,-2.133131 52.348034,-2.133156 52.348025,-2.133893 52.348442,-2.13469 52.348882,-2.135149 52.349076,-2.135641 52.349317,-2.135723 52.349369,-2.136057 52.349531,-2.136129 52.349545,-2.136206 52.349579,-2.136264 52.349595,-2.136565 52.349735,-2.137112 52.349942,-2.137173 52.349921,-2.137261 52.349935,-2.137514 52.349922,-2.137672 52.349843,-2.138014 52.349513,-2.138083 52.349432,-2.138199 52.349255,-2.138278 52.349111,-2.138421 52.348883,-2.138595 52.3487,-2.138891 52.348503,-2.139245 52.348351,-2.139602 52.348288,-2.139885 52.348127,-2.140037 52.3481,-2.140074 52.34808,-2.140119 52.348041,-2.140146 52.348031,-2.140204 52.348031,-2.140268 52.348016,-2.140483 52.34802,-2.141926 52.347937,-2.142056 52.347918,-2.142103 52.347669,-2.142113 52.347657,-2.142114 52.347581,-2.142129 52.347542,-2.142187 52.347471,-2.142315 52.347366,-2.142348 52.347324,-2.142421 52.347199,-2.142626 52.346985,-2.142704 52.346914,-2.142816 52.346874,-2.143331 52.346642,-2.143822 52.346436,-2.144303 52.346293,-2.144772 52.346065,-2.145786 52.345503,-2.146264 52.345228,-2.146698 52.345411,-2.146962 52.345505,-2.147218 52.345624,-2.148219 52.346181,-2.148509 52.34638,-2.148608 52.346416,-2.148864 52.346294,-2.148891 52.34627,-2.148974 52.346107,-2.148978 52.346081,-2.149007 52.346025,-2.14903 52.346006,-2.149039 52.345978,-2.149108 52.34594,-2.14916 52.345896,-2.149181 52.345844,-2.149203 52.345818,-2.149257 52.345777,-2.149298 52.345645,-2.149317 52.345612,-2.149374 52.345575,-2.149548 52.3454,-2.149759 52.345253,-2.149709 52.3452,-2.149683 52.345156,-2.149675 52.345096,-2.149764 52.345011,-2.14976 52.344998,-2.149779 52.344975,-2.149772 52.344967,-2.149701 52.344967,-2.149799 52.34473,-2.149823 52.344689,-2.149891 52.344633,-2.149984 52.344584,-2.150003 52.34454,-2.150047 52.344485,-2.150144 52.344423,-2.150375 52.344195,-2.150561 52.344025,-2.150953 52.343686,-2.151118 52.343234,-2.15117 52.342818,-2.151196 52.342705,-2.15123 52.34261,-2.151266 52.342542,-2.151337 52.342437,-2.151588 52.342507,-2.151707 52.342175,-2.151792 52.341803,-2.151815 52.341739,-2.151839 52.341705,-2.151912 52.341632,-2.151961 52.341521,-2.151947 52.341479,-2.151954 52.341336,-2.151931 52.341175,-2.151925 52.341055,-2.151935 52.34101,-2.15201 52.34082,-2.151978 52.340706,-2.151973 52.340636,-2.15199 52.340528,-2.151986 52.340446,-2.152005 52.340384,-2.152052 52.340328,-2.152141 52.340091,-2.152411 52.339611,-2.152532 52.33923,-2.152599 52.338781,-2.152571 52.338687,-2.152361 52.338695,-2.152306 52.338704,-2.152303 52.338697,-2.152141 52.338125,-2.152111 52.337983,-2.152015 52.337841,-2.152023 52.337755,-2.151975 52.337663,-2.151921 52.337607,-2.151889 52.337587,-2.151896 52.337568,-2.151882 52.337481,-2.151865 52.337455,-2.15183 52.337423,-2.151824 52.337399,-2.151841 52.337305,-2.151995 52.337192,-2.151927 52.337036,-2.151925 52.33699,-2.151919 52.336985,-2.152022 52.336898,-2.152214 52.336713,-2.152252 52.336693,-2.152416 52.336642,-2.152624 52.336476,-2.152681 52.336407,-2.152807 52.336293,-2.15285 52.336239,-2.152925 52.336192,-2.153408 52.335725,-2.153758 52.335705,-2.153848 52.335684,-2.153882 52.335665,-2.154056 52.335496,-2.154262 52.335348,-2.154398 52.335207,-2.154458 52.335126,-2.154538 52.335043,-2.154581 52.334941,-2.154721 52.334768,-2.154731 52.334667,-2.154844 52.334455,-2.15482 52.334451,-2.154831 52.334421,-2.15482 52.334377,-2.154787 52.334351,-2.154813 52.334327,-2.154816 52.334309,-2.154831 52.33431,-2.154856 52.334279,-2.154961 52.334211,-2.155008 52.334142,-2.155018 52.334117,-2.155022 52.334047,-2.154978 52.333982,-2.154989 52.333945,-2.15501 52.333923,-2.155272 52.333816,-2.155285 52.333801,-2.155303 52.333735,-2.155366 52.333674,-2.155509 52.333581,-2.155527 52.333581,-2.155645 52.333636,-2.155684 52.333644,-2.155755 52.33364,-2.15583 52.333625,-2.155951 52.333567,-2.156103 52.333468,-2.156158 52.33344,-2.1562 52.33339,-2.156225 52.333349,-2.156251 52.333274,-2.15625 52.333219,-2.156205 52.333147,-2.156215 52.333124,-2.156364 52.332974,-2.156488 52.332837,-2.156512 52.332801,-2.156415 52.332365,-2.156256 52.331969,-2.156245 52.331759,-2.156167 52.331527,-2.156074 52.331323,-2.156055 52.331098,-2.156072 52.331021,-2.156137 52.330972,-2.156224 52.331001,-2.15652 52.331045,-2.156667 52.331058,-2.156922 52.331058,-2.15715 52.331034,-2.157615 52.330952,-2.158323 52.330782,-2.159536 52.330378,-2.159914 52.330213,-2.160154 52.329931,-2.160362 52.329721,-2.160482 52.329617,-2.160716 52.329441,-2.161531 52.32878,-2.161775 52.32857,-2.161811 52.3285,-2.161991 52.328437,-2.162078 52.328382,-2.162136 52.32833,-2.16223 52.328273,-2.162497 52.328146,-2.162617 52.328049,-2.162683 52.327981,-2.162832 52.327858,-2.163012 52.327683,-2.163238 52.327495,-2.163504 52.327221,-2.163642 52.327119,-2.163685 52.327063,-2.163736 52.326959,-2.163755 52.326934,-2.163814 52.326878,-2.163904 52.326818,-2.163969 52.326719,-2.164095 52.32658,-2.164155 52.326499,-2.164205 52.326405,-2.164244 52.326308,-2.164321 52.326195,-2.164351 52.326137,-2.164361 52.326091,-2.164382 52.326064,-2.164382 52.326039,-2.16434 52.326017,-2.164461 52.32594,-2.164749 52.325733,-2.165003 52.325616,-2.1652 52.325552,-2.165764 52.325421,-2.165973 52.325348,-2.165961 52.325286,-2.166443 52.325076,-2.166552 52.325007,-2.166641 52.324964,-2.16693 52.324774,-2.16705 52.324727,-2.167042 52.324712,-2.167168 52.324656,-2.167251 52.32463,-2.167556 52.324508,-2.167583 52.324483,-2.167617 52.324433,-2.167637 52.324384,-2.167635 52.324333,-2.167643 52.324309,-2.167688 52.324207,-2.167747 52.324111,-2.167768 52.323804,-2.167766 52.323644,-2.167805 52.323279,-2.167736 52.323146,-2.167752 52.322988,-2.167681 52.322773,-2.167652 52.322712,-2.167619 52.322562,-2.167602 52.322261,-2.167597 52.321704,-2.167563 52.321542,-2.167577 52.321386,-2.167565 52.321306,-2.167577 52.321226,-2.167554 52.321062,-2.167527 52.320962,-2.167557 52.320904,-2.16755 52.320719,-2.167564 52.320559,-2.167516 52.320391,-2.167507 52.320309,-2.167524 52.320227,-2.167551 52.320153,-2.167613 52.320029,-2.167707 52.319887,-2.167871 52.319691,-2.167837 52.319551,-2.167869 52.319431,-2.16787 52.319341,-2.167855 52.319293,-2.167868 52.319207,-2.167815 52.319076,-2.167829 52.318983,-2.16782 52.318983,-2.167827 52.318943,-2.167818 52.318923,-2.167807 52.318911,-2.167724 52.318873,-2.167593 52.31883,-2.167565 52.318812,-2.167534 52.31877,-2.167461 52.318567,-2.16719 52.318458,-2.167019 52.318429,-2.166805 52.318374,-2.166845 52.318353,-2.166812 52.318346,-2.166816 52.318337,-2.166873 52.31828,-2.167197 52.318118,-2.167337 52.318059,-2.167626 52.317981,-2.167828 52.317841,-2.167966 52.317805,-2.168157 52.31772,-2.168325 52.317613,-2.168364 52.317579,-2.168428 52.317485,-2.168522 52.317403,-2.168578 52.317366,-2.168703 52.317298,-2.168802 52.317259,-2.169007 52.317203,-2.16908 52.317187,-2.169202 52.317176,-2.169368 52.317183,-2.169461 52.31717,-2.169515 52.317156,-2.169651 52.317066,-2.169737 52.317055,-2.170013 52.317096,-2.170343 52.317102,-2.170484 52.317114,-2.170837 52.317174,-2.170985 52.31719,-2.171361 52.31727,-2.171475 52.317279,-2.171796 52.31728,-2.172001 52.317267,-2.172172 52.317218,-2.172637 52.317211,-2.172726 52.317221,-2.172919 52.317223,-2.17317 52.317205,-2.173231 52.317191,-2.173338 52.31709,-2.173571 52.316962,-2.173787 52.316803,-2.173968 52.316718,-2.174129 52.316665,-2.174429 52.316603,-2.174585 52.3166,-2.174595 52.316639,-2.174623 52.31664,-2.174671 52.31682,-2.17469 52.316962,-2.174903 52.317702,-2.174976 52.317861,-2.175098 52.318394,-2.175111 52.318512,-2.175105 52.318723,-2.175137 52.318864,-2.175171 52.318955,-2.175217 52.319173,-2.175312 52.319823,-2.175281 52.320017,-2.175092 52.320602,-2.174953 52.320846,-2.175157 52.320862,-2.175128 52.320925,-2.175161 52.320934,-2.175087 52.321118,-2.175078 52.321188,-2.175088 52.321285,-2.175086 52.321344,-2.175104 52.321455,-2.175203 52.321838,-2.175208 52.322077,-2.175234 52.322194,-2.175235 52.322248,-2.175192 52.322461,-2.175148 52.322798,-2.175018 52.323163,-2.174992 52.323374,-2.174922 52.323605,-2.174831 52.323865,-2.174762 52.324124,-2.174732 52.324263,-2.174716 52.324379,-2.174706 52.324609,-2.174627 52.324921,-2.174568 52.325072,-2.174475 52.325273,-2.174345 52.32565,-2.17424 52.325909,-2.174163 52.326047,-2.174062 52.326181,-2.174041 52.326226,-2.174034 52.326264,-2.173781 52.326794,-2.173714 52.326955,-2.173621 52.327099,-2.17344 52.327322,-2.173443 52.327399,-2.173305 52.327594,-2.173265 52.327665,-2.173226 52.327711,-2.173188 52.327735,-2.173136 52.327797,-2.173132 52.327808,-2.17316 52.327839,-2.17306 52.327966,-2.172849 52.328103,-2.172617 52.32822,-2.172411 52.328262,-2.172314 52.328291,-2.172239 52.328334,-2.17219 52.328381,-2.172082 52.328536,-2.172051 52.328651,-2.172027 52.328704,-2.171989 52.328767,-2.171897 52.328879,-2.171729 52.329352,-2.171692 52.32948,-2.17162 52.329608,-2.171626 52.32961,-2.171556 52.329714,-2.171551 52.329734,-2.171225 52.330148,-2.171069 52.330395,-2.170947 52.330645,-2.170744 52.331426,-2.170772 52.331516,-2.170925 52.331642,-2.17109 52.331704,-2.171152 52.331716,-2.171202 52.331718,-2.171623 52.331686,-2.172261 52.331684,-2.172524 52.331692,-2.172665 52.331706,-2.172735 52.331707,-2.172817 52.331696,-2.172994 52.331713,-2.17322 52.331718,-2.173343 52.331713,-2.17342 52.331723,-2.17366 52.331728,-2.173927 52.331666,-2.17404 52.331648,-2.174143 52.331452,-2.174194 52.331397,-2.174286 52.331356,-2.174374 52.331343,-2.175252 52.331355,-2.175561 52.331392,-2.175703 52.331399,-2.175772 52.331424,-2.175827 52.331471,-2.176049 52.331552,-2.176318 52.331621,-2.176565 52.33166,-2.176894 52.331696,-2.177422 52.3317,-2.177705 52.331757,-2.177882 52.331756,-2.178003 52.331702,-2.178084 52.331638,-2.178135 52.331583,-2.178189 52.331555,-2.178264 52.331495,-2.178471 52.331432,-2.17858 52.331417,-2.178718 52.331377,-2.178949 52.331337,-2.179223 52.331255,-2.179737 52.331181,-2.180123 52.33114,-2.180877 52.3311,-2.181001 52.331085,-2.181238 52.331034,-2.181453 52.330966,-2.181606 52.330892,-2.182079 52.330633,-2.182259 52.330575,-2.182558 52.330432,-2.182666 52.330371,-2.182889 52.330308,-2.182955 52.330296,-2.183352 52.330261,-2.183495 52.330224,-2.183557 52.330171,-2.183584 52.330159,-2.184212 52.330135,-2.184803 52.3301,-2.184958 52.330076,-2.185085 52.330038,-2.185572 52.329844,-2.185688 52.329784,-2.185824 52.329743,-2.18606 52.329691,-2.186245 52.32961,-2.186349 52.329575,-2.186471 52.329541,-2.186729 52.329489,-2.187065 52.329331,-2.187199 52.329225,-2.18732 52.32918,-2.187527 52.329169,-2.187637 52.329201,-2.187696 52.329209,-2.188117 52.329218,-2.188283 52.329183,-2.188438 52.329176,-2.189051 52.329076,-2.189045 52.329056,-2.189104 52.329048,-2.189338 52.329087,-2.189494 52.329083,-2.189647 52.329019,-2.189885 52.328899,-2.190011 52.328871,-2.190265 52.328875,-2.190386 52.328899,-2.190497 52.328932,-2.190507 52.329013,-2.190498 52.329242,-2.190519 52.329528,-2.190581 52.329874,-2.190581 52.329917,-2.190553 52.330558,-2.190472 52.330868,-2.190469 52.330901,-2.190495 52.331033,-2.190495 52.331106,-2.190352 52.33154,-2.190358 52.331598,-2.190444 52.331875,-2.19048 52.33195,-2.190545 52.332041,-2.190718 52.332205,-2.190914 52.332367,-2.191054 52.33253,-2.191102 52.332575,-2.191222 52.332684,-2.191465 52.332881,-2.191563 52.333012,-2.191623 52.333049,-2.191964 52.33316,-2.192041 52.333194,-2.192121 52.333247,-2.192186 52.333301,-2.192457 52.33345,-2.192489 52.333476,-2.192519 52.33351,-2.192538 52.33355,-2.192577 52.333665,-2.19265 52.333936,-2.192656 52.333982,-2.192629 52.33414,-2.192633 52.334196,-2.192594 52.334518,-2.192606 52.334542,-2.192672 52.334617,-2.192913 52.33478,-2.19298 52.3348,-2.193103 52.334792,-2.193148 52.334798,-2.193198 52.334821,-2.193209 52.334838,-2.193197 52.334894,-2.193146 52.334988,-2.193161 52.335029,-2.193232 52.335111,-2.193332 52.335182,-2.193382 52.335231,-2.19348 52.335262,-2.193494 52.335283,-2.193492 52.335308,-2.193485 52.335314,-2.193375 52.33535,-2.193394 52.335391,-2.193481 52.335475,-2.193496 52.335502,-2.19349 52.335517,-2.193438 52.335561,-2.193321 52.335581,-2.193233 52.335622,-2.193227 52.335645,-2.193245 52.335709,-2.19324 52.33577,-2.193258 52.335842,-2.193197 52.336003,-2.193185 52.336087,-2.193176 52.336102,-2.193133 52.336109,-2.193044 52.336084,-2.192998 52.336066,-2.192849 52.335968,-2.192791 52.335988,-2.192762 52.336014,-2.192753 52.336049,-2.192763 52.336073,-2.192833 52.336148,-2.192827 52.336163,-2.192758 52.336242,-2.19259 52.336367,-2.19252 52.336457,-2.192513 52.336479,-2.192519 52.336494,-2.19261 52.336561,-2.192661 52.33661,-2.192739 52.336623,-2.19281 52.336607,-2.192871 52.33661,-2.192877 52.336625,-2.192866 52.336642,-2.19274 52.336681,-2.192709 52.336711,-2.192727 52.33676,-2.192809 52.336855,-2.192832 52.336891,-2.192832 52.33691,-2.192821 52.336918,-2.19278 52.336921,-2.192772 52.336931,-2.192492 52.336888,-2.192412 52.33695,-2.192409 52.336973,-2.192212 52.336915,-2.192173 52.336927,-2.192116 52.337023,-2.1921 52.337094,-2.192034 52.337152,-2.192005 52.337191,-2.192008 52.337208,-2.192023 52.337221,-2.192082 52.337246,-2.192107 52.33727,-2.192104 52.337281,-2.192031 52.337349,-2.192023 52.337366,-2.192019 52.337412,-2.19204 52.337524,-2.192052 52.337543,-2.192075 52.337556,-2.192341 52.337458,-2.192405 52.337464,-2.192448 52.337476,-2.192485 52.337496,-2.192514 52.337529,-2.192535 52.337541,-2.192539 52.337555,-2.192529 52.337621,-2.192518 52.337639,-2.192477 52.337671,-2.192399 52.337708,-2.192383 52.337726,-2.192398 52.337765,-2.19246 52.337833,-2.192472 52.337843,-2.192574 52.337809,-2.192678 52.337817,-2.192722 52.33781,-2.192874 52.337903,-2.192996 52.337918,-2.193027 52.33793,-2.19304 52.337946,-2.193093 52.338026,-2.193088 52.338068,-2.193058 52.338091,-2.192932 52.338135,-2.192915 52.338158,-2.192921 52.338181,-2.192996 52.338232,-2.193039 52.338286,-2.193042 52.338301,-2.192993 52.338516,-2.193118 52.338536,-2.193241 52.33854,-2.193279 52.338558,-2.193298 52.338583,-2.193282 52.33861,-2.193197 52.338662,-2.193114 52.338732,-2.193091 52.33878,-2.193093 52.338866,-2.193113 52.33889,-2.193146 52.338896,-2.193564 52.338869,-2.193659 52.338905,-2.193684 52.338926,-2.193687 52.338945,-2.19368 52.338959,-2.193649 52.338977,-2.193463 52.338987,-2.193412 52.339027,-2.193402 52.339068,-2.193431 52.339104,-2.19359 52.339167,-2.193627 52.339186,-2.193647 52.339209,-2.193649 52.339226,-2.193563 52.339374,-2.193567 52.339389,-2.193598 52.339433,-2.193759 52.339536,-2.193777 52.339573,-2.193777 52.339598,-2.19383 52.339621,-2.193902 52.339683,-2.193899 52.339871,-2.193917 52.339934,-2.193949 52.339997,-2.194064 52.340045,-2.19434 52.340085,-2.194694 52.34017,-2.195063 52.340284,-2.195192 52.340342,-2.195371 52.340475,-2.195538 52.340588,-2.195613 52.340654,-2.195651 52.340922,-2.195722 52.341019,-2.195756 52.341129,-2.195596 52.341258,-2.195703 52.341304,-2.196059 52.341489,-2.196331 52.34165,-2.196752 52.341869,-2.196998 52.342017,-2.197936 52.342553,-2.198575 52.342886,-2.198828 52.343041,-2.199158 52.343221,-2.200485 52.343984,-2.201049 52.344281,-2.201745 52.344605,-2.202135 52.344849,-2.202169 52.344856,-2.202238 52.344809,-2.202638 52.344986,-2.202921 52.345142,-2.203049 52.345178,-2.203315 52.345219,-2.203513 52.345199,-2.203597 52.345217,-2.203644 52.345179,-2.204124 52.345269,-2.204309 52.345315,-2.204569 52.345398,-2.20508 52.345612,-2.205099 52.345638,-2.205105 52.345683,-2.205088 52.345707,-2.20503 52.345754,-2.20502 52.345788,-2.205017 52.345892,-2.204873 52.346254,-2.204838 52.346389,-2.20481 52.346723,-2.204777 52.346866,-2.204728 52.347009,-2.204743 52.347142,-2.204744 52.347313,-2.204754 52.34744,-2.204727 52.347721,-2.204733 52.347763,-2.204728 52.348025,-2.204778 52.348823,-2.204796 52.348951,-2.204833 52.34911,-2.204961 52.349439,-2.205051 52.349579,-2.205256 52.349815,-2.205339 52.349925,-2.205385 52.350014,-2.205412 52.350099,-2.205434 52.350334,-2.205419 52.350407,-2.205373 52.350555,-2.205348 52.350594,-2.205321 52.350703,-2.205346 52.350721,-2.205909 52.350915,-2.206659 52.351241,-2.207171 52.351429,-2.20718 52.351418,-2.207192 52.351422,-2.207208 52.351407,-2.207349 52.351465,-2.207417 52.351501,-2.207661 52.35169,-2.207641 52.351705,-2.207629 52.351739,-2.207621 52.351829,-2.207586 52.35195,-2.207672 52.352092,-2.207715 52.352183,-2.207741 52.352279,-2.207849 52.352506,-2.207908 52.352587,-2.208043 52.352724,-2.208069 52.352766,-2.207994 52.352947,-2.207894 52.353235,-2.208065 52.35329,-2.208416 52.353464,-2.208818 52.353708,-2.209078 52.353852,-2.209189 52.353897,-2.210227 52.354266,-2.210374 52.354342,-2.210403 52.354321,-2.210623 52.354425,-2.210486 52.354585,-2.210409 52.35466,-2.210212 52.354876,-2.210197 52.3549,-2.210192 52.354925,-2.210201 52.354952,-2.210358 52.355064,-2.210416 52.355088,-2.210535 52.35517,-2.210669 52.35523,-2.210857 52.355344,-2.211305 52.355563,-2.211635 52.355742,-2.211844 52.355898,-2.212201 52.356073,-2.212513 52.35621,-2.212843 52.356341,-2.213949 52.356835,-2.213975 52.356844,-2.214008 52.356825,-2.214311 52.356936,-2.215069 52.357296,-2.216381 52.357974,-2.217504 52.358453,-2.218162 52.358717,-2.219002 52.359069,-2.219219 52.359138),(-2.231458 52.328493,-2.232054 52.328423,-2.232023 52.32832,-2.231973 52.328052,-2.231954 52.327913,-2.231939 52.327639,-2.231899 52.327566,-2.231832 52.326595,-2.231818 52.326589,-2.231808 52.326473,-2.231802 52.326227,-2.231808 52.326105,-2.232088 52.326066,-2.232677 52.326051,-2.232668 52.32662,-2.232656 52.326895,-2.23382 52.32688,-2.234332 52.327213,-2.23502 52.32755,-2.235316 52.327703,-2.235853 52.32781,-2.236667 52.327998,-2.23756 52.328242,-2.237491 52.328316,-2.237531 52.328369,-2.237248 52.328628,-2.23706 52.328822,-2.236899 52.329048,-2.235375 52.330839,-2.234811 52.331537,-2.234778 52.33154,-2.234783 52.331588,-2.235739 52.331666,-2.235728 52.331899,-2.235948 52.331908,-2.235949 52.331966,-2.23618 52.332005,-2.236224 52.331986,-2.236311 52.331919,-2.236357 52.331908,-2.236415 52.331903,-2.236572 52.331913,-2.236515 52.332023,-2.236433 52.332244,-2.236399 52.332387,-2.236384 52.33251,-2.236645 52.332567,-2.236622 52.332614,-2.236542 52.332722,-2.236501 52.332749,-2.236216 52.333127,-2.236017 52.33337,-2.235955 52.333468,-2.236222 52.333545,-2.235848 52.334071,-2.23581 52.334386,-2.235953 52.334372,-2.235981 52.334463,-2.236074 52.334488,-2.236107 52.334655,-2.236222 52.334657,-2.236232 52.334691,-2.23698 52.334662,-2.236809 52.33509,-2.236595 52.335514,-2.23703 52.335665,-2.236894 52.335754,-2.237107 52.335855,-2.236895 52.336012,-2.236663 52.336149,-2.236598 52.33621,-2.23677 52.336227,-2.236972 52.336268,-2.237194 52.336347,-2.237479 52.33648,-2.237425 52.336553,-2.237417 52.336636,-2.237403 52.33667,-2.23731 52.336806,-2.237095 52.337033,-2.237035 52.337013,-2.236893 52.337186,-2.236888 52.337268,-2.236822 52.337347,-2.236712 52.337538,-2.236657 52.337586,-2.236609 52.337589,-2.236503 52.337564,-2.236507 52.337541,-2.236476 52.337504,-2.235825 52.336926,-2.235453 52.336844,-2.235411 52.336882,-2.235365 52.336896,-2.235288 52.336898,-2.235195 52.336889,-2.235036 52.336861,-2.234962 52.33684,-2.234898 52.336811,-2.234796 52.336748,-2.234752 52.336709,-2.234647 52.336593,-2.234469 52.336362,-2.23438 52.336259,-2.234312 52.336192,-2.234271 52.336165,-2.234146 52.336115,-2.233681 52.336004,-2.233345 52.335865,-2.233239 52.335832,-2.233153 52.335817,-2.233175 52.335778,-2.23309 52.335749,-2.233044 52.335766,-2.232913 52.335718,-2.232751 52.335583,-2.232668 52.335498,-2.232294 52.335289,-2.2322 52.335215,-2.23205 52.33514,-2.231764 52.335029,-2.231695 52.335008,-2.231672 52.33504,-2.231469 52.334969,-2.231205 52.334899,-2.230951 52.335169,-2.230825 52.335349,-2.230803 52.335417,-2.230775 52.335453,-2.230493 52.335693,-2.230409 52.335774,-2.23023 52.335906,-2.229941 52.336199,-2.229736 52.336126,-2.229472 52.336045,-2.229283 52.336005,-2.229111 52.335979,-2.228468 52.335927,-2.227869 52.335895,-2.227471 52.335854,-2.227465 52.335808,-2.227496 52.335643,-2.227574 52.335405,-2.22768 52.335031,-2.227711 52.334799,-2.227628 52.334795,-2.227243 52.33488,-2.227151 52.334886,-2.227093 52.334882,-2.226993 52.334852,-2.226841 52.334749,-2.226608 52.334628,-2.226664 52.334481,-2.226779 52.334495,-2.226762 52.334482,-2.226799 52.334405,-2.226833 52.334393,-2.226928 52.334163,-2.226969 52.334083,-2.227039 52.33398,-2.22767 52.333978,-2.227786 52.333362,-2.227841 52.333226,-2.22794 52.333054,-2.227989 52.332986,-2.228252 52.332731,-2.229113 52.332081,-2.229285 52.331965,-2.229355 52.331924,-2.229514 52.331952,-2.230034 52.331623,-2.230079 52.331523,-2.230124 52.331494,-2.230347 52.331313,-2.231313 52.330669,-2.231657 52.330381,-2.231729 52.33031,-2.231746 52.33031,-2.231726 52.33031,-2.232135 52.329733,-2.232164 52.329681,-2.232322 52.329311,-2.23231 52.329217,-2.232267 52.329073,-2.232237 52.329015,-2.231435 52.328961,-2.231458 52.328493),(-2.180426 52.31046,-2.180616 52.310473,-2.180768 52.310497,-2.180773 52.310529,-2.180688 52.310767,-2.180648 52.310848,-2.180618 52.310989,-2.180566 52.311085,-2.180518 52.31121,-2.180435 52.311381,-2.180429 52.311395,-2.180434 52.31141,-2.181208 52.311829,-2.181117 52.311886,-2.181047 52.311916,-2.180916 52.312004,-2.180683 52.312186,-2.180416 52.312426,-2.180273 52.312569,-2.180068 52.312795,-2.179758 52.313213,-2.179442 52.313514,-2.179172 52.313748,-2.178825 52.314023,-2.178976 52.314065,-2.179431 52.314149,-2.179788 52.314225,-2.179558 52.314728,-2.179657 52.314743,-2.179666 52.314809,-2.179509 52.315053,-2.179403 52.315272,-2.179487 52.315734,-2.179438 52.315846,-2.17948 52.315922,-2.17948 52.315977,-2.179514 52.316186,-2.179523 52.316377,-2.179494 52.31642,-2.1794 52.31648,-2.179267 52.316551,-2.179197 52.316578,-2.178861 52.316542,-2.177617 52.316485,-2.177489 52.316464,-2.177335 52.316406,-2.177065 52.316278,-2.17711 52.315985,-2.177195 52.315584,-2.177379 52.315002,-2.177119 52.31494,-2.177052 52.314944,-2.17689 52.31493,-2.176608 52.314885,-2.176318 52.314825,-2.175729 52.314775,-2.175381 52.314719,-2.175182 52.314709,-2.175251 52.314403,-2.175798 52.31444,-2.175839 52.314349,-2.176204 52.314412,-2.176739 52.314487,-2.17676 52.314411,-2.176779 52.314179,-2.17658 52.314148,-2.176621 52.314077,-2.176621 52.31401,-2.176596 52.313832,-2.176882 52.313844,-2.177292 52.313811,-2.177248 52.313563,-2.177239 52.313396,-2.177257 52.313137,-2.177274 52.313034,-2.17731 52.312897,-2.177328 52.312846,-2.17734 52.312837,-2.177366 52.312744,-2.177605 52.312243,-2.177592 52.312206,-2.177617 52.312145,-2.177699 52.311988,-2.177904 52.31154,-2.177999 52.311303,-2.178016 52.311282,-2.178044 52.311271,-2.17807 52.311108,-2.178221 52.310726,-2.178243 52.31069,-2.178428 52.310117,-2.178445 52.309985,-2.178418 52.309931,-2.178419 52.309719,-2.178396 52.309566,-2.178368 52.309472,-2.178276 52.309294,-2.179224 52.309159,-2.179246 52.309399,-2.179258 52.309658,-2.179237 52.309768,-2.179213 52.309816,-2.1792 52.309976,-2.179165 52.310094,-2.179084 52.310184,-2.179022 52.310396,-2.179891 52.310464,-2.180426 52.31046),(-2.11888 52.294998,-2.118651 52.295207,-2.118479 52.295147,-2.11822 52.295079,-2.118144 52.295081,-2.118093 52.295096,-2.118001 52.295167,-2.117819 52.295079,-2.117694 52.294985,-2.117564 52.294866,-2.11732 52.294614,-2.117154 52.294467,-2.116923 52.294202,-2.116576 52.29387,-2.115397 52.292876,-2.115299 52.292939,-2.115354 52.292989,-2.114965 52.293248,-2.114625 52.293051,-2.114526 52.293092,-2.114357 52.293205,-2.113672 52.293703,-2.113582 52.293832,-2.113345 52.294,-2.11324 52.294103,-2.113055 52.294314,-2.112694 52.294206,-2.113142 52.293841,-2.113204 52.293782,-2.11293 52.293676,-2.112789 52.293609,-2.112455 52.293469,-2.112367 52.293448,-2.112255 52.293404,-2.112081 52.293324,-2.111216 52.292859,-2.110677 52.292517,-2.109852 52.292111,-2.10979 52.292062,-2.109674 52.291991,-2.109091 52.291728,-2.108843 52.291629,-2.108674 52.291574,-2.1085 52.291485,-2.107887 52.291202,-2.107546 52.29107,-2.107304 52.290998,-2.107161 52.290966,-2.107 52.290941,-2.106825 52.290924,-2.106447 52.290915,-2.106122 52.290895,-2.105633 52.290837,-2.105667 52.29069,-2.105664 52.290592,-2.105717 52.290494,-2.105798 52.290413,-2.105916 52.290376,-2.10617 52.290273,-2.106526 52.290081,-2.106783 52.289967,-2.107428 52.289556,-2.107703 52.289405,-2.108111 52.289155,-2.10836 52.289018,-2.108549 52.288932,-2.108689 52.288859,-2.108749 52.288818,-2.109105 52.28864,-2.109436 52.28845,-2.109756 52.288248,-2.109926 52.288163,-2.110305 52.287948,-2.110537 52.287829,-2.110759 52.287729,-2.110976 52.28765,-2.11124 52.287546,-2.111354 52.287491,-2.111522 52.287432,-2.112074 52.28721,-2.11243 52.287044,-2.11309 52.28668,-2.11327 52.286602,-2.113539 52.286509,-2.113903 52.286439,-2.114339 52.286368,-2.114722 52.286346,-2.11512 52.286367,-2.115435 52.286399,-2.115567 52.286426,-2.115797 52.286491,-2.115909 52.286547,-2.116039 52.28667,-2.116077 52.286733,-2.116109 52.286827,-2.116136 52.286864,-2.116164 52.286946,-2.116186 52.286979,-2.116289 52.287201,-2.11642 52.287336,-2.116505 52.287398,-2.11663 52.28747,-2.116847 52.287556,-2.117023 52.287593,-2.117335 52.28764,-2.117442 52.287673,-2.117467 52.2877,-2.11753 52.28781,-2.117597 52.287896,-2.117855 52.288115,-2.118489 52.288625,-2.118613 52.288748,-2.119051 52.288733,-2.119252 52.288705,-2.119446 52.288667,-2.11957 52.288689,-2.119711 52.288737,-2.119817 52.288791,-2.119896 52.288845,-2.11994 52.288894,-2.120117 52.289131,-2.120188 52.289282,-2.120259 52.289465,-2.12041 52.289965,-2.12058 52.290662,-2.120615 52.290873,-2.120642 52.291148,-2.120635 52.291613,-2.120591 52.291826,-2.120576 52.291822,-2.120518 52.292196,-2.120445 52.292538,-2.120191 52.293109,-2.120018 52.29346,-2.119973 52.293531,-2.119588 52.294126,-2.119308 52.294513,-2.119156 52.294717,-2.11888 52.294998)))" # noqa: E501
expected = "MULTIPOLYGON (((-2.237210 52.360857,-2.237510 52.360838,-2.237512 52.360842,-2.237998 52.360783,-2.239279 52.360553,-2.240296 52.360428,-2.241611 52.360336,-2.241935 52.360368,-2.242738 52.360753,-2.242828 52.360787,-2.242966 52.360819,-2.244501 52.360918,-2.245688 52.361022,-2.246176 52.360063,-2.246586 52.359370,-2.246946 52.358795,-2.247101 52.358432,-2.246931 52.358449,-2.247499 52.357102,-2.247668 52.356678,-2.247808 52.356430,-2.247931 52.356144,-2.248250 52.355477,-2.248235 52.355473,-2.248248 52.355445,-2.247603 52.355271,-2.247506 52.355255,-2.247419 52.355261,-2.247287 52.355305,-2.245728 52.354927,-2.245735 52.354917,-2.245707 52.354910,-2.246763 52.353241,-2.245820 52.353017,-2.246116 52.352627,-2.247583 52.350831,-2.247690 52.350784,-2.249376 52.348833,-2.249439 52.348856,-2.249448 52.348846,-2.249802 52.348973,-2.249878 52.348847,-2.250434 52.348079,-2.250952 52.347400,-2.251079 52.347203,-2.251191 52.347056,-2.251216 52.347010,-2.251659 52.345845,-2.251429 52.345849,-2.251097 52.345840,-2.250744 52.345802,-2.249882 52.345656,-2.249750 52.345653,-2.249516 52.345665,-2.249537 52.345640,-2.249415 52.345652,-2.249481 52.345606,-2.249765 52.345473,-2.250106 52.345334,-2.251117 52.344948,-2.251371 52.344825,-2.251439 52.344783,-2.251750 52.344550,-2.252118 52.344229,-2.252137 52.344207,-2.252234 52.344028,-2.252596 52.343282,-2.252628 52.343203,-2.252754 52.342968,-2.252872 52.342615,-2.253145 52.342118,-2.253206 52.342017,-2.253333 52.341845,-2.253932 52.341125,-2.253981 52.341142,-2.254006 52.341115,-2.254735 52.341322,-2.255745 52.341556,-2.256257 52.341633,-2.256417 52.341647,-2.256684 52.341653,-2.256809 52.341679,-2.257019 52.341683,-2.257338 52.341658,-2.257543 52.341623,-2.257789 52.341559,-2.257946 52.341506,-2.260411 52.341036,-2.261065 52.340953,-2.261594 52.340866,-2.261983 52.340815,-2.262009 52.340743,-2.261232 52.340654,-2.261227 52.340648,-2.261243 52.340535,-2.261266 52.340461,-2.261301 52.340388,-2.261386 52.340260,-2.261700 52.339902,-2.261730 52.339830,-2.261737 52.339784,-2.261737 52.339728,-2.261725 52.339670,-2.261676 52.339575,-2.261611 52.339504,-2.261219 52.339217,-2.261089 52.339141,-2.260976 52.339101,-2.260919 52.339070,-2.260848 52.339005,-2.260709 52.338646,-2.260736 52.338406,-2.260872 52.338325,-2.261047 52.338248,-2.261181 52.338204,-2.261798 52.338058,-2.263100 52.338127,-2.263077 52.338016,-2.263068 52.337922,-2.263072 52.337830,-2.263285 52.336903,-2.263308 52.336841,-2.263370 52.336718,-2.263858 52.336039,-2.264079 52.335934,-2.264950 52.335971,-2.265184 52.335971,-2.265538 52.335952,-2.265868 52.335904,-2.265982 52.335899,-2.266097 52.335896,-2.266333 52.335924,-2.266826 52.336150,-2.267131 52.336275,-2.268065 52.336632,-2.268295 52.336425,-2.268478 52.336282,-2.268340 52.336212,-2.268366 52.336176,-2.268478 52.336077,-2.268753 52.336065,-2.268876 52.336092,-2.268998 52.336126,-2.269216 52.336269,-2.269160 52.336135,-2.269161 52.336057,-2.269175 52.336000,-2.269208 52.335931,-2.269235 52.335896,-2.269305 52.335841,-2.269229 52.335752,-2.269216 52.335710,-2.269213 52.335656,-2.269220 52.335626,-2.269269 52.335550,-2.269719 52.335152,-2.267975 52.334078,-2.267306 52.334160,-2.266837 52.333635,-2.267003 52.333569,-2.267208 52.333507,-2.267342 52.333512,-2.267833 52.333485,-2.267968 52.333740,-2.270334 52.333121,-2.271779 52.332618,-2.271123 52.331963,-2.270858 52.331634,-2.270783 52.331658,-2.270828 52.331784,-2.270820 52.331804,-2.270767 52.331862,-2.270514 52.332056,-2.270431 52.332094,-2.270346 52.332115,-2.270170 52.332080,-2.269903 52.331995,-2.269734 52.331928,-2.269379 52.331838,-2.268946 52.331646,-2.268627 52.331472,-2.268517 52.331380,-2.268476 52.331326,-2.268468 52.331296,-2.268376 52.331228,-2.267261 52.330812,-2.266464 52.330537,-2.266292 52.330461,-2.266156 52.330390,-2.265971 52.330276,-2.265553 52.330117,-2.265334 52.330051,-2.264944 52.329959,-2.264135 52.329802,-2.263841 52.329757,-2.263447 52.329637,-2.262763 52.329639,-2.262622 52.329625,-2.261951 52.329446,-2.261659 52.329297,-2.261422 52.329153,-2.261161 52.328932,-2.260899 52.328771,-2.260250 52.328586,-2.259985 52.328528,-2.258908 52.328352,-2.258338 52.328205,-2.258204 52.328165,-2.257965 52.328072,-2.257672 52.327935,-2.257207 52.327676,-2.256751 52.327382,-2.256239 52.327071,-2.255899 52.326878,-2.255135 52.326500,-2.254356 52.326155,-2.253899 52.325995,-2.251895 52.325501,-2.251610 52.325417,-2.251494 52.325375,-2.251266 52.325266,-2.251065 52.325164,-2.249734 52.324428,-2.249612 52.324343,-2.249415 52.324182,-2.249009 52.323791,-2.248500 52.323317,-2.248470 52.323311,-2.248384 52.323328,-2.248302 52.323356,-2.247721 52.323621,-2.246956 52.323937,-2.246810 52.323980,-2.246701 52.323996,-2.246417 52.324011,-2.246082 52.324003,-2.244877 52.323844,-2.244618 52.323833,-2.243990 52.323888,-2.243637 52.323931,-2.242906 52.324036,-2.242683 52.324085,-2.242216 52.324138,-2.242029 52.324137,-2.241791 52.324096,-2.241159 52.324009,-2.240891 52.323975,-2.240744 52.323970,-2.240629 52.323990,-2.240582 52.324014,-2.240491 52.324088,-2.240427 52.324129,-2.240371 52.324150,-2.240193 52.324187,-2.240086 52.324194,-2.238681 52.324191,-2.238192 52.324197,-2.238066 52.324189,-2.237960 52.324173,-2.237192 52.324008,-2.237048 52.323984,-2.236532 52.323930,-2.236276 52.323914,-2.235472 52.323893,-2.235268 52.323905,-2.234935 52.323939,-2.233882 52.324114,-2.233390 52.324157,-2.233299 52.324152,-2.233133 52.324127,-2.232933 52.324082,-2.232680 52.324010,-2.232561 52.324002,-2.232418 52.324018,-2.231378 52.324023,-2.230386 52.323910,-2.229426 52.323860,-2.229168 52.323839,-2.228476 52.323780,-2.227666 52.323688,-2.226645 52.323599,-2.225510 52.323555,-2.224674 52.323548,-2.222433 52.323576,-2.221770 52.323593,-2.219554 52.323676,-2.219186 52.323678,-2.218596 52.323663,-2.217935 52.323615,-2.217612 52.323579,-2.215772 52.323310,-2.215379 52.323260,-2.214933 52.323219,-2.214828 52.323091,-2.214618 52.322731,-2.214195 52.322138,-2.213858 52.321599,-2.212909 52.320136,-2.212881 52.320139,-2.212870 52.320133,-2.212865 52.320115,-2.212891 52.320104,-2.212668 52.319760,-2.211414 52.318001,-2.211164 52.317585,-2.210969 52.317285,-2.210555 52.316763,-2.210414 52.316539,-2.210150 52.316157,-2.209525 52.315315,-2.209492 52.315250,-2.209155 52.314907,-2.208656 52.314437,-2.208045 52.313948,-2.207645 52.313607,-2.207454 52.313477,-2.207151 52.313302,-2.206783 52.313110,-2.205236 52.312387,-2.204279 52.312002,-2.204041 52.311918,-2.204048 52.311909,-2.203804 52.311811,-2.202980 52.311505,-2.200164 52.310598,-2.199682 52.310487,-2.199274 52.310432,-2.198991 52.310363,-2.198039 52.310094,-2.197348 52.309915,-2.197041 52.309843,-2.195562 52.309458,-2.194698 52.309179,-2.194607 52.309171,-2.194566 52.309192,-2.194059 52.308965,-2.193892 52.308899,-2.193484 52.308773,-2.192730 52.308559,-2.192514 52.308476,-2.191466 52.308154,-2.190573 52.307851,-2.190030 52.307644,-2.189245 52.307362,-2.187706 52.306743,-2.187383 52.306630,-2.186503 52.306229,-2.186078 52.306025,-2.185728 52.305831,-2.185045 52.305505,-2.184750 52.305374,-2.184313 52.305215,-2.183867 52.305034,-2.183517 52.304835,-2.183496 52.304815,-2.183484 52.304788,-2.183208 52.304683,-2.182763 52.304487,-2.182488 52.304350,-2.182340 52.304298,-2.182240 52.304245,-2.182218 52.304224,-2.181076 52.303659,-2.180490 52.303383,-2.180005 52.303049,-2.179765 52.302866,-2.179494 52.302703,-2.178980 52.302352,-2.178363 52.301873,-2.177925 52.301577,-2.177816 52.301495,-2.177507 52.301243,-2.177305 52.301043,-2.177306 52.301030,-2.177284 52.300980,-2.177306 52.300952,-2.177323 52.300943,-2.177309 52.300924,-2.177062 52.300721,-2.176974 52.300670,-2.176712 52.300431,-2.176301 52.300013,-2.176175 52.299895,-2.175687 52.299377,-2.175661 52.299366,-2.175549 52.299267,-2.175285 52.299094,-2.173739 52.297686,-2.172665 52.296728,-2.172306 52.296391,-2.172212 52.296285,-2.172144 52.296189,-2.171884 52.295898,-2.170878 52.294971,-2.170810 52.294911,-2.170793 52.294916,-2.169356 52.293535,-2.168710 52.292801,-2.168576 52.292688,-2.167765 52.291862,-2.167285 52.291402,-2.166910 52.291070,-2.166472 52.290739,-2.166227 52.290542,-2.165783 52.290218,-2.165184 52.289735,-2.164453 52.288998,-2.164423 52.288935,-2.164337 52.288937,-2.164137 52.288771,-2.163589 52.288274,-2.163177 52.287867,-2.162777 52.287424,-2.162721 52.287374,-2.162832 52.287344,-2.162840 52.287323,-2.162815 52.287287,-2.162490 52.286994,-2.162381 52.286936,-2.162309 52.286888,-2.161770 52.286354,-2.161519 52.286127,-2.161303 52.285947,-2.161207 52.285869,-2.161132 52.285890,-2.161071 52.285863,-2.160828 52.285572,-2.160580 52.285254,-2.160223 52.284829,-2.160187 52.284799,-2.160082 52.284810,-2.160053 52.284772,-2.160069 52.284767,-2.160057 52.284745,-2.160098 52.284696,-2.159613 52.284357,-2.159406 52.284252,-2.159208 52.284046,-2.159238 52.284030,-2.158606 52.283540,-2.158363 52.283287,-2.158090 52.283076,-2.157663 52.282713,-2.157522 52.282580,-2.156648 52.281699,-2.155863 52.280815,-2.155793 52.280761,-2.155724 52.280694,-2.155056 52.280027,-2.154887 52.279811,-2.154507 52.279234,-2.154260 52.278803,-2.153952 52.278162,-2.153788 52.277774,-2.153570 52.277205,-2.153479 52.276740,-2.153344 52.275658,-2.153337 52.275562,-2.153342 52.275076,-2.153362 52.274834,-2.153360 52.274723,-2.153328 52.274689,-2.153340 52.274618,-2.153118 52.274561,-2.153040 52.274629,-2.152931 52.274662,-2.152261 52.274542,-2.151990 52.274509,-2.151777 52.274472,-2.151575 52.274421,-2.150223 52.274027,-2.150059 52.273941,-2.149812 52.273850,-2.149374 52.273737,-2.148982 52.273623,-2.148098 52.273385,-2.147926 52.273365,-2.147052 52.273224,-2.146156 52.273065,-2.145637 52.273027,-2.145325 52.273023,-2.145044 52.273049,-2.144518 52.273107,-2.144083 52.273197,-2.143846 52.273233,-2.142873 52.273311,-2.142723 52.273330,-2.140601 52.273627,-2.140123 52.273737,-2.139879 52.273321,-2.139849 52.273288,-2.139793 52.273089,-2.139801 52.273055,-2.139758 52.272902,-2.139874 52.272858,-2.139885 52.272842,-2.140452 52.272424,-2.140819 52.272123,-2.141517 52.271585,-2.140963 52.271307,-2.140668 52.271093,-2.140684 52.270594,-2.140745 52.270590,-2.140742 52.270541,-2.140825 52.269835,-2.141008 52.269702,-2.141013 52.269601,-2.140619 52.269575,-2.140299 52.269534,-2.139733 52.269483,-2.139028 52.269498,-2.138102 52.269489,-2.137727 52.269507,-2.137247 52.269544,-2.137088 52.269546,-2.136636 52.269532,-2.136455 52.269537,-2.135216 52.269605,-2.135134 52.269630,-2.135093 52.269673,-2.135086 52.269712,-2.135095 52.269774,-2.135035 52.269782,-2.135001 52.269717,-2.134906 52.269681,-2.134790 52.269683,-2.134423 52.269743,-2.134042 52.269837,-2.133267 52.269965,-2.132411 52.270121,-2.131910 52.270197,-2.131867 52.270103,-2.131861 52.270066,-2.131868 52.270052,-2.131906 52.270011,-2.131973 52.269966,-2.132107 52.269919,-2.132177 52.269857,-2.132205 52.269808,-2.132210 52.269778,-2.132204 52.269692,-2.132210 52.269664,-2.132194 52.269627,-2.132144 52.269583,-2.132144 52.269490,-2.132124 52.269451,-2.132132 52.269426,-2.132174 52.269395,-2.132297 52.269370,-2.132344 52.269353,-2.132385 52.269296,-2.132458 52.269243,-2.132819 52.269195,-2.132635 52.268913,-2.132521 52.268863,-2.131987 52.268576,-2.131654 52.268386,-2.132247 52.268106,-2.132677 52.267881,-2.133159 52.267691,-2.133525 52.267524,-2.133697 52.267436,-2.133681 52.267397,-2.133704 52.267370,-2.133801 52.267356,-2.134080 52.267210,-2.134263 52.267096,-2.134357 52.267027,-2.134536 52.266872,-2.134395 52.266809,-2.133882 52.266634,-2.133596 52.266567,-2.133274 52.266519,-2.132821 52.266488,-2.131841 52.266480,-2.131317 52.266456,-2.130986 52.266432,-2.130270 52.266281,-2.129602 52.266161,-2.129198 52.266117,-2.128694 52.266075,-2.128091 52.266053,-2.127364 52.266075,-2.126837 52.266120,-2.126522 52.266128,-2.126212 52.266119,-2.119952 52.265298,-2.115125 52.264694,-2.111480 52.264109,-2.111087 52.264058,-2.110021 52.263900,-2.109683 52.263867,-2.108934 52.263822,-2.107455 52.263701,-2.106395 52.263598,-2.105909 52.263562,-2.103320 52.263314,-2.101655 52.263172,-2.100815 52.263090,-2.097650 52.262809,-2.096722 52.262699,-2.093677 52.262226,-2.091837 52.262005,-2.090728 52.261882,-2.089397 52.261757,-2.086862 52.261558,-2.086106 52.261448,-2.083137 52.261193,-2.080816 52.260884,-2.077210 52.260601,-2.072849 52.260144,-2.071848 52.260094,-2.071465 52.260083,-2.070513 52.260085,-2.070040 52.260097,-2.069706 52.260110,-2.068625 52.260191,-2.067797 52.260290,-2.067344 52.260359,-2.066804 52.260453,-2.065932 52.260633,-2.061021 52.261689,-2.060301 52.261871,-2.059602 52.262071,-2.057190 52.262856,-2.055880 52.263301,-2.055105 52.263620,-2.054654 52.263835,-2.054334 52.264000,-2.052650 52.264933,-2.054113 52.265903,-2.053168 52.266439,-2.053286 52.266516,-2.053487 52.266625,-2.053178 52.266876,-2.053024 52.266979,-2.052999 52.267033,-2.053277 52.267229,-2.053316 52.267264,-2.053050 52.267393,-2.053179 52.267468,-2.052755 52.267682,-2.053013 52.268006,-2.052842 52.268069,-2.052624 52.268163,-2.051959 52.268357,-2.051412 52.268538,-2.051342 52.268633,-2.051278 52.268704,-2.051134 52.268926,-2.050976 52.268815,-2.050820 52.268675,-2.050594 52.268437,-2.050524 52.268344,-2.050376 52.268077,-2.050344 52.267990,-2.050344 52.267943,-2.050273 52.267892,-2.050228 52.267839,-2.050292 52.267799,-2.050331 52.267732,-2.050466 52.267552,-2.050800 52.267331,-2.050845 52.267284,-2.050925 52.266893,-2.050903 52.266893,-2.051031 52.266529,-2.051008 52.266523,-2.051053 52.266391,-2.051079 52.266281,-2.051012 52.266034,-2.050335 52.265812,-2.049705 52.265702,-2.049912 52.265300,-2.049183 52.265186,-2.048167 52.264985,-2.048042 52.264947,-2.047824 52.264865,-2.047609 52.264775,-2.047040 52.264509,-2.046443 52.264280,-2.046032 52.264164,-2.045780 52.264114,-2.045098 52.263958,-2.044804 52.263875,-2.044171 52.263675,-2.043079 52.263288,-2.041948 52.262863,-2.040945 52.262503,-2.040575 52.262352,-2.040183 52.262177,-2.040023 52.262067,-2.039769 52.261928,-2.039461 52.261783,-2.038694 52.261470,-2.036469 52.260611,-2.035455 52.260186,-2.032303 52.258986,-2.030577 52.258311,-2.027489 52.257155,-2.026896 52.256947,-2.026198 52.256724,-2.025620 52.256556,-2.025304 52.256473,-2.024781 52.256343,-2.022064 52.255722,-2.020570 52.255342,-2.020223 52.255241,-2.019520 52.254997,-2.019093 52.254829,-2.018639 52.254628,-2.017926 52.254306,-2.017139 52.253933,-2.015910 52.253315,-2.014882 52.252767,-2.014409 52.253113,-2.014356 52.253125,-2.014104 52.253149,-2.013732 52.253224,-2.013712 52.253322,-2.013741 52.253391,-2.013744 52.253416,-2.013713 52.253546,-2.013652 52.253690,-2.013634 52.253755,-2.013501 52.254050,-2.013463 52.254048,-2.013403 52.254201,-2.013282 52.254588,-2.013248 52.254836,-2.013153 52.255160,-2.013133 52.255403,-2.013234 52.255501,-2.013272 52.255502,-2.013348 52.255523,-2.013531 52.255657,-2.013751 52.256016,-2.013817 52.256170,-2.013782 52.256480,-2.013789 52.256555,-2.013582 52.257011,-2.013461 52.257150,-2.013359 52.257330,-2.013034 52.257735,-2.013340 52.257619,-2.013313 52.257656,-2.013385 52.257629,-2.013338 52.257680,-2.013151 52.257941,-2.013092 52.258006,-2.013095 52.258002,-2.013036 52.258000,-2.012890 52.257954,-2.012852 52.257965,-2.012765 52.258104,-2.012738 52.258224,-2.012724 52.258248,-2.012591 52.258394,-2.012600 52.258479,-2.012552 52.258562,-2.012559 52.258645,-2.012410 52.258817,-2.012306 52.258914,-2.012231 52.258966,-2.012228 52.258983,-2.012253 52.259027,-2.012251 52.259043,-2.012234 52.259088,-2.012133 52.259274,-2.011988 52.259383,-2.011896 52.259463,-2.011451 52.259926,-2.011405 52.259997,-2.011409 52.260046,-2.011453 52.260145,-2.011436 52.260186,-2.011425 52.260246,-2.011444 52.260498,-2.011480 52.260613,-2.011548 52.260770,-2.011510 52.260964,-2.011516 52.261016,-2.011506 52.261126,-2.011521 52.261154,-2.011553 52.261272,-2.011532 52.261312,-2.011534 52.261374,-2.011552 52.261594,-2.011579 52.261796,-2.011581 52.261883,-2.011565 52.262038,-2.011526 52.262265,-2.011487 52.262351,-2.011412 52.262482,-2.011280 52.262589,-2.011262 52.262614,-2.011249 52.262674,-2.011248 52.262794,-2.011276 52.262881,-2.011426 52.262932,-2.011869 52.262983,-2.012100 52.263027,-2.012183 52.263056,-2.012175 52.263235,-2.012164 52.263313,-2.012164 52.263431,-2.012205 52.263667,-2.012152 52.263722,-2.012147 52.263746,-2.012163 52.263892,-2.012217 52.264040,-2.012227 52.264166,-2.012260 52.264297,-2.012264 52.264371,-2.012275 52.264403,-2.012591 52.264325,-2.013001 52.264177,-2.013054 52.264169,-2.013158 52.264179,-2.013187 52.264193,-2.013220 52.264214,-2.013286 52.264300,-2.013296 52.264519,-2.013334 52.264649,-2.013388 52.264720,-2.013477 52.264903,-2.013668 52.265134,-2.013720 52.265216,-2.013916 52.265420,-2.013923 52.265454,-2.013918 52.265566,-2.013993 52.265666,-2.013980 52.265676,-2.014000 52.265700,-2.013893 52.265737,-2.013819 52.265789,-2.013783 52.265824,-2.013514 52.265910,-2.013339 52.265986,-2.013267 52.266036,-2.013246 52.266113,-2.013482 52.266265,-2.014269 52.266514,-2.014445 52.266553,-2.014498 52.266558,-2.014603 52.266586,-2.014808 52.266687,-2.014899 52.266748,-2.015227 52.267016,-2.015368 52.267141,-2.015425 52.267207,-2.015453 52.267277,-2.015451 52.267320,-2.015388 52.267461,-2.015382 52.267660,-2.015391 52.267671,-2.015378 52.267725,-2.015383 52.267764,-2.015418 52.267838,-2.015395 52.267873,-2.015410 52.268023,-2.015456 52.268177,-2.015443 52.268264,-2.015526 52.268443,-2.015562 52.268655,-2.015519 52.268767,-2.015499 52.269305,-2.015553 52.269538,-2.015538 52.269609,-2.015556 52.269713,-2.015550 52.269962,-2.015538 52.270010,-2.015300 52.270031,-2.014998 52.270037,-2.014797 52.270031,-2.014631 52.270046,-2.014363 52.270089,-2.014171 52.270104,-2.014101 52.270119,-2.013838 52.270183,-2.013746 52.270228,-2.013726 52.270357,-2.013701 52.270446,-2.013656 52.270462,-2.013412 52.270480,-2.013175 52.270473,-2.012945 52.270455,-2.012680 52.270420,-2.012137 52.270535,-2.012061 52.270569,-2.011535 52.270887,-2.011513 52.270908,-2.011293 52.270900,-2.011173 52.270907,-2.010471 52.270896,-2.009828 52.270896,-2.009388 52.270761,-2.008923 52.270602,-2.008777 52.270568,-2.008708 52.270523,-2.008582 52.270486,-2.008516 52.270485,-2.008015 52.270407,-2.007955 52.270408,-2.007854 52.270424,-2.007763 52.270422,-2.007177 52.270265,-2.007111 52.270269,-2.006789 52.270324,-2.006608 52.270378,-2.006566 52.270406,-2.006532 52.270450,-2.006443 52.270494,-2.006148 52.270706,-2.006078 52.270737,-2.005848 52.270595,-2.005266 52.271090,-2.004388 52.271857,-2.004221 52.272012,-2.003228 52.273624,-2.003955 52.273842,-2.004791 52.274226,-2.005690 52.274575,-2.005878 52.274660,-2.005977 52.274711,-2.006112 52.274797,-2.006391 52.275022,-2.006553 52.275136,-2.007052 52.275373,-2.007026 52.275451,-2.006992 52.275478,-2.006867 52.275564,-2.006796 52.275602,-2.006716 52.275662,-2.006684 52.275663,-2.006665 52.275673,-2.006399 52.275670,-2.006143 52.275652,-2.005830 52.275642,-2.005412 52.275662,-2.004867 52.275877,-2.003539 52.276423,-2.003186 52.276559,-2.002500 52.276792,-2.002582 52.276892,-2.002702 52.277066,-2.002837 52.277292,-2.002938 52.277488,-2.003110 52.277767,-2.003336 52.278010,-2.003635 52.278297,-2.003917 52.278591,-2.003909 52.278597,-2.003919 52.278608,-2.003466 52.278914,-2.002685 52.279416,-2.002505 52.279545,-2.002665 52.279522,-2.002815 52.279485,-2.003018 52.279448,-2.003227 52.279393,-2.003568 52.279320,-2.003733 52.279257,-2.003921 52.279229,-2.003987 52.279247,-2.004053 52.279305,-2.004126 52.279349,-2.004596 52.279500,-2.004716 52.279510,-2.004758 52.279494,-2.004825 52.279436,-2.005009 52.279218,-2.005023 52.279182,-2.005025 52.279114,-2.005007 52.279066,-2.005708 52.278795,-2.006203 52.278545,-2.006307 52.278485,-2.006395 52.278490,-2.006579 52.278526,-2.006643 52.278527,-2.006733 52.278553,-2.007261 52.278589,-2.007947 52.278518,-2.007983 52.278578,-2.007989 52.278618,-2.008017 52.278654,-2.008122 52.278600,-2.008213 52.278569,-2.008370 52.278477,-2.008906 52.278106,-2.009038 52.278075,-2.009076 52.278056,-2.009161 52.278046,-2.009232 52.278059,-2.009298 52.278084,-2.009384 52.278138,-2.009466 52.278171,-2.009536 52.278215,-2.009560 52.278221,-2.009602 52.278257,-2.009929 52.278460,-2.010008 52.278489,-2.010053 52.278517,-2.010259 52.278665,-2.010362 52.278729,-2.010930 52.278540,-2.011356 52.278422,-2.011701 52.278353,-2.011758 52.278355,-2.011893 52.278383,-2.012173 52.278412,-2.012446 52.278425,-2.013610 52.278473,-2.014380 52.278494,-2.014757 52.278519,-2.014887 52.278492,-2.015058 52.278502,-2.015488 52.278567,-2.015719 52.278582,-2.016674 52.278770,-2.016983 52.278820,-2.017044 52.278812,-2.017095 52.278816,-2.017218 52.278855,-2.017597 52.278908,-2.017876 52.278989,-2.018143 52.279011,-2.018843 52.279148,-2.018915 52.279164,-2.019167 52.279254,-2.019332 52.279303,-2.019723 52.279407,-2.020041 52.279527,-2.020230 52.279396,-2.020339 52.279157,-2.020442 52.279098,-2.020473 52.279097,-2.020564 52.279035,-2.020644 52.278855,-2.020841 52.278755,-2.020919 52.278664,-2.020937 52.278666,-2.020994 52.278719,-2.021047 52.278732,-2.021128 52.278700,-2.021161 52.278660,-2.021196 52.278644,-2.021301 52.278621,-2.021382 52.278565,-2.021611 52.278498,-2.021806 52.278490,-2.022201 52.278402,-2.022338 52.278355,-2.022439 52.278336,-2.022470 52.278316,-2.022581 52.278325,-2.022653 52.278307,-2.022738 52.278363,-2.022885 52.278411,-2.022910 52.278408,-2.023106 52.278325,-2.023263 52.278086,-2.023281 52.278113,-2.023313 52.278054,-2.023329 52.278047,-2.023387 52.278197,-2.023396 52.278284,-2.023461 52.278389,-2.023499 52.278400,-2.023599 52.278390,-2.023628 52.278408,-2.023625 52.278473,-2.023613 52.278497,-2.023587 52.278521,-2.023531 52.278549,-2.023433 52.278577,-2.023421 52.278642,-2.023618 52.278658,-2.023660 52.278656,-2.023742 52.278634,-2.023813 52.278631,-2.023826 52.278634,-2.023836 52.278655,-2.023825 52.278679,-2.023826 52.278699,-2.023918 52.278730,-2.023985 52.278720,-2.024014 52.278733,-2.024014 52.278762,-2.023970 52.278819,-2.023951 52.278832,-2.023897 52.278834,-2.023885 52.278846,-2.023917 52.278925,-2.023964 52.278989,-2.023957 52.279030,-2.023965 52.279057,-2.024024 52.279158,-2.024026 52.279195,-2.024008 52.279231,-2.024014 52.279279,-2.023961 52.279298,-2.023959 52.279319,-2.023992 52.279443,-2.024086 52.279561,-2.024209 52.279544,-2.024224 52.279567,-2.024263 52.279563,-2.024282 52.279579,-2.024296 52.279612,-2.024296 52.279671,-2.024400 52.279734,-2.024476 52.279822,-2.024447 52.279860,-2.024444 52.279878,-2.024457 52.279889,-2.024497 52.279896,-2.024521 52.279909,-2.024516 52.279989,-2.024560 52.279980,-2.024615 52.279987,-2.024705 52.280015,-2.024705 52.280035,-2.024664 52.280052,-2.024640 52.280078,-2.024678 52.280113,-2.024680 52.280126,-2.024743 52.280172,-2.024799 52.280183,-2.024835 52.280202,-2.024840 52.280218,-2.024827 52.280298,-2.024832 52.280368,-2.024856 52.280376,-2.025076 52.280357,-2.025243 52.280330,-2.025950 52.280616,-2.026153 52.280647,-2.026219 52.280641,-2.026310 52.280712,-2.026505 52.280822,-2.026628 52.280960,-2.026869 52.280978,-2.027077 52.280958,-2.027300 52.280952,-2.027480 52.280960,-2.027622 52.280994,-2.027729 52.281031,-2.027948 52.281142,-2.028159 52.281188,-2.028220 52.281275,-2.028297 52.281338,-2.028338 52.281356,-2.028407 52.281356,-2.028442 52.281365,-2.028578 52.281491,-2.028646 52.281530,-2.028874 52.281633,-2.029714 52.282096,-2.030078 52.282355,-2.030281 52.282461,-2.030501 52.282534,-2.030618 52.282552,-2.030967 52.282554,-2.031084 52.282538,-2.031442 52.282521,-2.031706 52.282521,-2.031773 52.282526,-2.031840 52.282549,-2.032201 52.282622,-2.032623 52.282664,-2.032788 52.282637,-2.033123 52.282637,-2.033135 52.282663,-2.033159 52.282664,-2.033206 52.282747,-2.033229 52.282837,-2.033331 52.282878,-2.033503 52.282831,-2.033858 52.282644,-2.033854 52.282634,-2.033868 52.282638,-2.033895 52.282624,-2.034407 52.282818,-2.034377 52.282837,-2.034396 52.282850,-2.034087 52.283053,-2.033945 52.283161,-2.034116 52.283238,-2.034345 52.283292,-2.034774 52.283439,-2.034812 52.283467,-2.035173 52.283597,-2.035322 52.283676,-2.035409 52.283757,-2.035388 52.283766,-2.035461 52.283847,-2.035463 52.283868,-2.035482 52.283898,-2.035579 52.284148,-2.035655 52.284296,-2.035982 52.284204,-2.035995 52.284212,-2.036023 52.284203,-2.036092 52.284259,-2.037230 52.284910,-2.037793 52.285224,-2.037847 52.285230,-2.037851 52.285241,-2.038491 52.285625,-2.039867 52.286008,-2.040465 52.286181,-2.040725 52.286264,-2.041175 52.286364,-2.041472 52.286455,-2.041636 52.286520,-2.042654 52.287070,-2.043018 52.287244,-2.043205 52.287312,-2.043435 52.287379,-2.044189 52.287580,-2.044492 52.287647,-2.045021 52.287711,-2.045283 52.287734,-2.045534 52.287771,-2.046079 52.287795,-2.046641 52.287796,-2.046863 52.287806,-2.047014 52.287823,-2.047079 52.287838,-2.047852 52.287700,-2.047892 52.287640,-2.048324 52.287533,-2.048860 52.287309,-2.048904 52.287313,-2.048950 52.287295,-2.048958 52.287316,-2.048966 52.287316,-2.049629 52.287195,-2.050866 52.287057,-2.051209 52.286957,-2.052545 52.286378,-2.052929 52.286177,-2.053228 52.285970,-2.053522 52.285785,-2.053954 52.285492,-2.054364 52.285265,-2.054472 52.285171,-2.054576 52.285133,-2.054587 52.285142,-2.055054 52.284908,-2.055490 52.284742,-2.055516 52.284719,-2.055515 52.284704,-2.055550 52.284690,-2.056196 52.284552,-2.056796 52.284440,-2.057510 52.284337,-2.057668 52.284298,-2.057820 52.284279,-2.058061 52.284275,-2.058129 52.284282,-2.058266 52.284314,-2.058447 52.284384,-2.058609 52.284383,-2.059424 52.284297,-2.059726 52.284239,-2.059722 52.284232,-2.060016 52.284168,-2.060405 52.284103,-2.060679 52.284171,-2.060850 52.284176,-2.060935 52.284155,-2.061002 52.284219,-2.061398 52.284438,-2.061905 52.284742,-2.062286 52.284864,-2.062883 52.284939,-2.063442 52.285044,-2.063825 52.285177,-2.064109 52.285260,-2.067122 52.286288,-2.067270 52.286325,-2.067770 52.286416,-2.068365 52.286557,-2.070442 52.287148,-2.070643 52.287224,-2.070829 52.287278,-2.071135 52.287398,-2.071137 52.287417,-2.071155 52.287426,-2.071139 52.287446,-2.071147 52.287524,-2.071307 52.287637,-2.071509 52.287721,-2.071695 52.287777,-2.071703 52.287769,-2.073369 52.288352,-2.074205 52.288621,-2.074508 52.288679,-2.075064 52.288727,-2.075535 52.288834,-2.075774 52.288904,-2.076256 52.289002,-2.076744 52.289064,-2.077055 52.289078,-2.078275 52.289072,-2.078724 52.289081,-2.079201 52.289050,-2.079657 52.288989,-2.080043 52.288919,-2.080861 52.288811,-2.081174 52.288777,-2.081460 52.288780,-2.081625 52.288793,-2.081854 52.288827,-2.082291 52.288854,-2.082956 52.288852,-2.083308 52.288815,-2.084300 52.288846,-2.085219 52.288895,-2.085807 52.288953,-2.086436 52.288991,-2.087592 52.289034,-2.088000 52.289104,-2.088219 52.289151,-2.088924 52.289330,-2.089143 52.289416,-2.089221 52.289467,-2.089340 52.289473,-2.089449 52.289454,-2.089574 52.289408,-2.089747 52.289380,-2.089879 52.289347,-2.090188 52.289235,-2.090398 52.289134,-2.090905 52.288765,-2.091065 52.288682,-2.091210 52.288634,-2.091697 52.288546,-2.091690 52.288555,-2.091761 52.288544,-2.091670 52.288663,-2.091704 52.288665,-2.091599 52.288817,-2.091037 52.289604,-2.090062 52.290940,-2.089850 52.291267,-2.089621 52.291537,-2.089496 52.291702,-2.089538 52.291710,-2.089489 52.291750,-2.089442 52.291806,-2.089307 52.291815,-2.088881 52.292379,-2.087762 52.293936,-2.087498 52.294289,-2.086921 52.294974,-2.086501 52.295430,-2.086097 52.295818,-2.085506 52.296304,-2.085518 52.296388,-2.085349 52.296586,-2.084887 52.296939,-2.084896 52.296977,-2.084922 52.296957,-2.085050 52.297304,-2.085253 52.297750,-2.085339 52.297897,-2.085471 52.298248,-2.085610 52.298529,-2.085647 52.298691,-2.086319 52.298609,-2.086429 52.298930,-2.086468 52.298927,-2.086526 52.299067,-2.086638 52.299185,-2.087165 52.300618,-2.087247 52.300612,-2.087466 52.301197,-2.087445 52.301203,-2.087646 52.301548,-2.087754 52.301717,-2.087778 52.301713,-2.087846 52.301842,-2.087453 52.301933,-2.087575 52.302147,-2.087680 52.302359,-2.088059 52.302315,-2.088586 52.303393,-2.088600 52.303445,-2.088639 52.303490,-2.088184 52.303642,-2.088000 52.303716,-2.087934 52.303726,-2.087872 52.303718,-2.087965 52.303825,-2.088306 52.303689,-2.088368 52.303689,-2.088613 52.303651,-2.089002 52.303535,-2.089168 52.303474,-2.089415 52.303412,-2.089627 52.303376,-2.089680 52.303381,-2.089731 52.303398,-2.089808 52.303460,-2.090061 52.303576,-2.090177 52.303638,-2.090240 52.303682,-2.090264 52.303716,-2.090305 52.303817,-2.090353 52.303896,-2.090424 52.304772,-2.090411 52.304924,-2.090368 52.304919,-2.090054 52.305007,-2.089957 52.305056,-2.089914 52.305133,-2.089795 52.305256,-2.089661 52.305412,-2.089570 52.305440,-2.089398 52.305612,-2.089353 52.305671,-2.089157 52.305807,-2.089142 52.305801,-2.089030 52.305938,-2.088884 52.306162,-2.088746 52.306409,-2.088712 52.306486,-2.088691 52.306593,-2.088621 52.306803,-2.088535 52.306999,-2.088504 52.307051,-2.088412 52.307428,-2.088075 52.308255,-2.088001 52.308460,-2.087950 52.308631,-2.087737 52.308893,-2.087422 52.309400,-2.087301 52.309685,-2.087312 52.309752,-2.087305 52.309787,-2.087246 52.309884,-2.087184 52.309943,-2.087100 52.310041,-2.087040 52.310142,-2.086968 52.310340,-2.086917 52.310438,-2.086682 52.310766,-2.086573 52.310978,-2.086508 52.311069,-2.086327 52.311251,-2.086261 52.311301,-2.085715 52.311598,-2.085498 52.311817,-2.085410 52.311928,-2.085356 52.312024,-2.085283 52.312273,-2.085250 52.312691,-2.085369 52.312772,-2.085511 52.312892,-2.085635 52.312967,-2.085903 52.313191,-2.085483 52.313373,-2.085421 52.313383,-2.085171 52.313484,-2.085278 52.313556,-2.085376 52.313642,-2.085635 52.313981,-2.085685 52.314011,-2.085748 52.314078,-2.085976 52.314252,-2.086179 52.314389,-2.086336 52.314511,-2.087168 52.314448,-2.088603 52.314282,-2.090353 52.314114,-2.091637 52.313925,-2.093850 52.313566,-2.095110 52.313352,-2.095063 52.313396,-2.095114 52.313390,-2.095021 52.313455,-2.094980 52.313493,-2.094876 52.313814,-2.094887 52.313851,-2.094860 52.313915,-2.094872 52.313916,-2.094711 52.314213,-2.094600 52.314373,-2.094547 52.314502,-2.094506 52.314562,-2.094396 52.314662,-2.094283 52.314789,-2.094096 52.315121,-2.094117 52.315247,-2.094077 52.315289,-2.094110 52.315364,-2.094138 52.315400,-2.094279 52.315329,-2.095189 52.314764,-2.095501 52.314583,-2.095649 52.314529,-2.096035 52.314452,-2.096696 52.314342,-2.096970 52.314221,-2.097872 52.313866,-2.098594 52.313625,-2.099012 52.313523,-2.099169 52.313505,-2.099232 52.313487,-2.099232 52.313467,-2.099424 52.313476,-2.100191 52.313420,-2.100623 52.313402,-2.101965 52.313385,-2.102478 52.313468,-2.102733 52.313542,-2.103398 52.313667,-2.105261 52.313981,-2.105966 52.314135,-2.106211 52.314167,-2.106413 52.314209,-2.107741 52.314432,-2.108032 52.314465,-2.108610 52.314513,-2.109551 52.314558,-2.110031 52.314569,-2.111369 52.314531,-2.112306 52.314414,-2.113812 52.313707,-2.114038 52.313612,-2.114489 52.313444,-2.114296 52.313163,-2.114314 52.313158,-2.114308 52.313150,-2.115067 52.313092,-2.115593 52.313089,-2.117168 52.313107,-2.117440 52.313136,-2.118038 52.313240,-2.119347 52.313587,-2.119356 52.313577,-2.119987 52.313738,-2.120187 52.313565,-2.120199 52.313568,-2.120218 52.313551,-2.120894 52.313779,-2.121083 52.313832,-2.121394 52.313953,-2.122043 52.314135,-2.122291 52.314189,-2.123042 52.314326,-2.123840 52.314433,-2.124091 52.314483,-2.123785 52.314935,-2.123592 52.315188,-2.123448 52.315320,-2.123084 52.315622,-2.122828 52.315855,-2.122568 52.316112,-2.122506 52.316178,-2.122463 52.316241,-2.122313 52.316491,-2.122203 52.316615,-2.122052 52.316746,-2.121900 52.316853,-2.121795 52.316916,-2.121592 52.317011,-2.121536 52.317025,-2.121396 52.317086,-2.121146 52.317227,-2.120886 52.317362,-2.120798 52.317433,-2.120798 52.317469,-2.120749 52.317496,-2.120662 52.317512,-2.120535 52.317578,-2.120101 52.317836,-2.119744 52.318092,-2.119650 52.318187,-2.119598 52.318265,-2.119505 52.318373,-2.118967 52.318735,-2.118809 52.318860,-2.118619 52.319043,-2.118500 52.319180,-2.118390 52.319273,-2.118112 52.319549,-2.118039 52.319611,-2.117644 52.320018,-2.117261 52.320476,-2.117036 52.320778,-2.116937 52.320929,-2.116827 52.321063,-2.116742 52.321188,-2.116534 52.321428,-2.116464 52.321520,-2.116384 52.321592,-2.116000 52.321856,-2.115902 52.321942,-2.115765 52.322085,-2.116078 52.322157,-2.116434 52.322224,-2.117004 52.322374,-2.117281 52.322427,-2.117917 52.322589,-2.118185 52.322664,-2.118394 52.322734,-2.117899 52.323001,-2.117947 52.323041,-2.118041 52.323157,-2.118106 52.323314,-2.118155 52.323591,-2.118218 52.323777,-2.118240 52.323921,-2.118233 52.324028,-2.118283 52.324157,-2.118291 52.324285,-2.118346 52.324400,-2.118501 52.324440,-2.118493 52.324518,-2.118501 52.324587,-2.118564 52.324878,-2.118597 52.325156,-2.118346 52.325172,-2.117312 52.325162,-2.117316 52.325252,-2.117441 52.325744,-2.117491 52.326015,-2.117604 52.326394,-2.117641 52.326557,-2.117580 52.326613,-2.117480 52.326651,-2.117369 52.326711,-2.117307 52.326770,-2.117239 52.326860,-2.117114 52.327112,-2.117055 52.327253,-2.116620 52.327100,-2.116181 52.327087,-2.116224 52.327300,-2.116294 52.327498,-2.116278 52.327558,-2.116291 52.327653,-2.116331 52.327785,-2.116384 52.327897,-2.116542 52.328152,-2.116882 52.328570,-2.117014 52.328778,-2.117264 52.329278,-2.117316 52.329461,-2.117354 52.329645,-2.117384 52.329733,-2.117488 52.329972,-2.117758 52.330416,-2.118110 52.330938,-2.118206 52.331134,-2.118226 52.331141,-2.118225 52.331165,-2.118358 52.331372,-2.118363 52.331398,-2.118379 52.331397,-2.118375 52.331488,-2.118385 52.331538,-2.118472 52.331748,-2.118489 52.331769,-2.118546 52.331778,-2.118562 52.332037,-2.118548 52.332168,-2.118530 52.332208,-2.118422 52.332358,-2.118330 52.332520,-2.118086 52.332969,-2.117980 52.333191,-2.117965 52.333239,-2.117942 52.333486,-2.117849 52.333487,-2.117661 52.334663,-2.117608 52.335096,-2.117569 52.335274,-2.117546 52.335273,-2.117540 52.335316,-2.117373 52.335171,-2.117240 52.335168,-2.117218 52.335205,-2.117228 52.335228,-2.117228 52.335275,-2.117221 52.335299,-2.117201 52.335321,-2.117154 52.335325,-2.117098 52.335318,-2.117017 52.335328,-2.116976 52.335343,-2.116940 52.335381,-2.116868 52.335416,-2.116830 52.335413,-2.116804 52.335399,-2.116698 52.335407,-2.116693 52.335414,-2.116703 52.335473,-2.116696 52.335492,-2.116664 52.335510,-2.116478 52.335524,-2.116427 52.335519,-2.116249 52.335525,-2.116230 52.335505,-2.116232 52.335489,-2.116110 52.335417,-2.116068 52.335412,-2.116055 52.335402,-2.115825 52.335409,-2.115742 52.335429,-2.115672 52.335421,-2.115665 52.335280,-2.115460 52.335294,-2.115409 52.335285,-2.115186 52.335294,-2.115175 52.335290,-2.115169 52.335271,-2.115181 52.335243,-2.115239 52.335213,-2.115256 52.335194,-2.115236 52.335114,-2.115098 52.335147,-2.114971 52.335258,-2.114736 52.335307,-2.114681 52.335345,-2.114640 52.335391,-2.114588 52.335417,-2.114543 52.335468,-2.114559 52.335485,-2.114614 52.335516,-2.114636 52.335543,-2.114639 52.335576,-2.114615 52.335609,-2.114564 52.335620,-2.114479 52.335593,-2.114428 52.335593,-2.114381 52.335646,-2.114360 52.335692,-2.114278 52.335773,-2.114223 52.335850,-2.114190 52.335861,-2.114120 52.335866,-2.114055 52.335902,-2.114044 52.335938,-2.113947 52.335956,-2.113757 52.336072,-2.113726 52.336142,-2.113689 52.336195,-2.113704 52.336237,-2.113698 52.336268,-2.113685 52.336280,-2.113647 52.336287,-2.113596 52.336280,-2.113521 52.336304,-2.113583 52.336368,-2.113633 52.336388,-2.113644 52.336412,-2.113481 52.336590,-2.113475 52.336669,-2.113491 52.336691,-2.113487 52.336715,-2.113456 52.336739,-2.113390 52.336745,-2.113322 52.336783,-2.113356 52.336805,-2.113431 52.336821,-2.113468 52.336837,-2.113480 52.336849,-2.113480 52.336868,-2.113464 52.336896,-2.113340 52.336901,-2.113275 52.336939,-2.113295 52.336954,-2.113388 52.336970,-2.113404 52.336989,-2.113401 52.337003,-2.113347 52.337045,-2.113290 52.337147,-2.113485 52.337307,-2.113653 52.337381,-2.113746 52.337493,-2.113884 52.337566,-2.113909 52.337594,-2.113941 52.337653,-2.113966 52.337673,-2.114067 52.337718,-2.114145 52.337727,-2.114178 52.337746,-2.114190 52.337832,-2.114212 52.337898,-2.114217 52.337987,-2.114233 52.338019,-2.114222 52.338057,-2.114226 52.338119,-2.114251 52.338195,-2.114291 52.338236,-2.114311 52.338326,-2.114311 52.338398,-2.114303 52.338432,-2.114275 52.338475,-2.114226 52.338511,-2.114293 52.338524,-2.114817 52.338569,-2.115906 52.338688,-2.116485 52.338732,-2.116374 52.338905,-2.116333 52.339069,-2.116337 52.339161,-2.116373 52.339164,-2.116371 52.339209,-2.116426 52.339287,-2.116380 52.339683,-2.116341 52.339791,-2.116303 52.339857,-2.116229 52.339953,-2.116096 52.340166,-2.116780 52.340098,-2.116871 52.340364,-2.116961 52.340546,-2.117126 52.340935,-2.117432 52.341606,-2.117443 52.341691,-2.117415 52.341729,-2.117385 52.341744,-2.117268 52.341752,-2.117037 52.341785,-2.117002 52.341924,-2.117105 52.341972,-2.117333 52.342054,-2.118257 52.342166,-2.118881 52.342171,-2.118937 52.342077,-2.118984 52.342032,-2.119057 52.341923,-2.119173 52.341702,-2.119199 52.341644,-2.119268 52.341429,-2.119387 52.341420,-2.119643 52.341340,-2.119882 52.341209,-2.120199 52.341051,-2.120634 52.340809,-2.121048 52.340624,-2.120996 52.340551,-2.120897 52.340450,-2.121397 52.340281,-2.121708 52.340214,-2.121866 52.340194,-2.123120 52.340106,-2.123918 52.340060,-2.123903 52.340207,-2.124721 52.340227,-2.124719 52.341006,-2.124758 52.341979,-2.123875 52.342144,-2.122818 52.342358,-2.122811 52.342383,-2.122819 52.342411,-2.122897 52.342504,-2.123337 52.342965,-2.123454 52.343075,-2.123610 52.343154,-2.123718 52.343240,-2.123991 52.343389,-2.124168 52.343458,-2.124411 52.343520,-2.124646 52.343606,-2.124852 52.343706,-2.125255 52.343882,-2.125366 52.343948,-2.125388 52.343981,-2.125391 52.344009,-2.125449 52.344109,-2.125655 52.344338,-2.125871 52.344560,-2.126069 52.344858,-2.126167 52.344971,-2.126248 52.345095,-2.126334 52.345199,-2.126366 52.345279,-2.126384 52.345361,-2.126381 52.345506,-2.126389 52.345539,-2.126779 52.345507,-2.127042 52.345475,-2.127828 52.345479,-2.128443 52.345499,-2.128745 52.345543,-2.128808 52.345568,-2.129027 52.345606,-2.129128 52.345656,-2.129192 52.345675,-2.129368 52.345771,-2.130014 52.346279,-2.130254 52.346498,-2.130533 52.346680,-2.130844 52.346852,-2.131172 52.346950,-2.131444 52.347043,-2.131440 52.347055,-2.131477 52.347065,-2.131341 52.347236,-2.131021 52.347695,-2.130887 52.347865,-2.130860 52.347931,-2.130776 52.348005,-2.130291 52.348599,-2.129911 52.349107,-2.129556 52.349620,-2.129539 52.349680,-2.129483 52.349796,-2.129311 52.350279,-2.129222 52.350434,-2.129199 52.350498,-2.129157 52.350548,-2.129092 52.350662,-2.129052 52.350669,-2.129020 52.350716,-2.128995 52.350709,-2.128874 52.350863,-2.128699 52.351036,-2.128599 52.351115,-2.128008 52.351516,-2.127638 52.351729,-2.127536 52.351798,-2.127331 52.351960,-2.127747 52.352230,-2.128179 52.352209,-2.128336 52.352217,-2.128590 52.352213,-2.128747 52.352224,-2.129133 52.352198,-2.129363 52.352099,-2.129477 52.352079,-2.129790 52.351987,-2.130048 52.351787,-2.130735 52.351314,-2.131491 52.350616,-2.131605 52.350491,-2.131622 52.350496,-2.131775 52.350338,-2.131806 52.350284,-2.132012 52.350102,-2.132009 52.350091,-2.132111 52.349876,-2.132377 52.349221,-2.132424 52.349067,-2.132484 52.348928,-2.132765 52.348159,-2.133023 52.347998,-2.133131 52.348034,-2.133156 52.348025,-2.134690 52.348882,-2.135149 52.349076,-2.136057 52.349531,-2.136129 52.349545,-2.136206 52.349579,-2.136264 52.349595,-2.136565 52.349735,-2.137112 52.349942,-2.137173 52.349921,-2.137261 52.349935,-2.137514 52.349922,-2.137672 52.349843,-2.138014 52.349513,-2.138083 52.349432,-2.138199 52.349255,-2.138278 52.349111,-2.138421 52.348883,-2.138595 52.348700,-2.138891 52.348503,-2.139245 52.348351,-2.139602 52.348288,-2.139885 52.348127,-2.140037 52.348100,-2.140074 52.348080,-2.140119 52.348041,-2.140146 52.348031,-2.140204 52.348031,-2.140268 52.348016,-2.140483 52.348020,-2.141926 52.347937,-2.142056 52.347918,-2.142103 52.347669,-2.142113 52.347657,-2.142114 52.347581,-2.142129 52.347542,-2.142187 52.347471,-2.142315 52.347366,-2.142348 52.347324,-2.142421 52.347199,-2.142626 52.346985,-2.142704 52.346914,-2.142816 52.346874,-2.143331 52.346642,-2.143822 52.346436,-2.144303 52.346293,-2.144772 52.346065,-2.145786 52.345503,-2.146264 52.345228,-2.146698 52.345411,-2.146962 52.345505,-2.147218 52.345624,-2.148219 52.346181,-2.148509 52.346380,-2.148608 52.346416,-2.148864 52.346294,-2.148891 52.346270,-2.148974 52.346107,-2.148978 52.346081,-2.149007 52.346025,-2.149030 52.346006,-2.149039 52.345978,-2.149108 52.345940,-2.149160 52.345896,-2.149181 52.345844,-2.149257 52.345777,-2.149298 52.345645,-2.149317 52.345612,-2.149374 52.345575,-2.149548 52.345400,-2.149759 52.345253,-2.149709 52.345200,-2.149683 52.345156,-2.149675 52.345096,-2.149764 52.345011,-2.149760 52.344998,-2.149779 52.344975,-2.149772 52.344967,-2.149701 52.344967,-2.149799 52.344730,-2.149823 52.344689,-2.149891 52.344633,-2.149984 52.344584,-2.150003 52.344540,-2.150047 52.344485,-2.150144 52.344423,-2.150375 52.344195,-2.150561 52.344025,-2.150953 52.343686,-2.151118 52.343234,-2.151170 52.342818,-2.151196 52.342705,-2.151230 52.342610,-2.151266 52.342542,-2.151337 52.342437,-2.151588 52.342507,-2.151707 52.342175,-2.151792 52.341803,-2.151815 52.341739,-2.151839 52.341705,-2.151912 52.341632,-2.151961 52.341521,-2.151947 52.341479,-2.151954 52.341336,-2.151931 52.341175,-2.151925 52.341055,-2.151935 52.341010,-2.152010 52.340820,-2.151978 52.340706,-2.151973 52.340636,-2.151990 52.340528,-2.151986 52.340446,-2.152005 52.340384,-2.152052 52.340328,-2.152141 52.340091,-2.152411 52.339611,-2.152532 52.339230,-2.152599 52.338781,-2.152571 52.338687,-2.152361 52.338695,-2.152306 52.338704,-2.152303 52.338697,-2.152141 52.338125,-2.152111 52.337983,-2.152015 52.337841,-2.152023 52.337755,-2.151975 52.337663,-2.151921 52.337607,-2.151889 52.337587,-2.151896 52.337568,-2.151882 52.337481,-2.151830 52.337423,-2.151824 52.337399,-2.151841 52.337305,-2.151995 52.337192,-2.151927 52.337036,-2.151925 52.336990,-2.151919 52.336985,-2.152022 52.336898,-2.152214 52.336713,-2.152252 52.336693,-2.152416 52.336642,-2.152624 52.336476,-2.152681 52.336407,-2.152807 52.336293,-2.152850 52.336239,-2.152925 52.336192,-2.153408 52.335725,-2.153758 52.335705,-2.153848 52.335684,-2.153882 52.335665,-2.154056 52.335496,-2.154262 52.335348,-2.154398 52.335207,-2.154458 52.335126,-2.154538 52.335043,-2.154581 52.334941,-2.154721 52.334768,-2.154731 52.334667,-2.154844 52.334455,-2.154820 52.334451,-2.154831 52.334421,-2.154820 52.334377,-2.154787 52.334351,-2.154813 52.334327,-2.154816 52.334309,-2.154831 52.334310,-2.154856 52.334279,-2.154961 52.334211,-2.155008 52.334142,-2.155018 52.334117,-2.155022 52.334047,-2.154978 52.333982,-2.154989 52.333945,-2.155010 52.333923,-2.155272 52.333816,-2.155285 52.333801,-2.155303 52.333735,-2.155366 52.333674,-2.155509 52.333581,-2.155527 52.333581,-2.155645 52.333636,-2.155684 52.333644,-2.155755 52.333640,-2.155830 52.333625,-2.155951 52.333567,-2.156158 52.333440,-2.156200 52.333390,-2.156225 52.333349,-2.156251 52.333274,-2.156250 52.333219,-2.156205 52.333147,-2.156215 52.333124,-2.156364 52.332974,-2.156488 52.332837,-2.156512 52.332801,-2.156415 52.332365,-2.156256 52.331969,-2.156245 52.331759,-2.156167 52.331527,-2.156074 52.331323,-2.156055 52.331098,-2.156072 52.331021,-2.156137 52.330972,-2.156224 52.331001,-2.156520 52.331045,-2.156667 52.331058,-2.156922 52.331058,-2.157150 52.331034,-2.157615 52.330952,-2.158323 52.330782,-2.159536 52.330378,-2.159914 52.330213,-2.160154 52.329931,-2.160362 52.329721,-2.160482 52.329617,-2.160716 52.329441,-2.161531 52.328780,-2.161775 52.328570,-2.161811 52.328500,-2.161991 52.328437,-2.162078 52.328382,-2.162136 52.328330,-2.162230 52.328273,-2.162497 52.328146,-2.162832 52.327858,-2.163012 52.327683,-2.163238 52.327495,-2.163504 52.327221,-2.163642 52.327119,-2.163685 52.327063,-2.163755 52.326934,-2.163814 52.326878,-2.163904 52.326818,-2.163969 52.326719,-2.164095 52.326580,-2.164155 52.326499,-2.164205 52.326405,-2.164244 52.326308,-2.164321 52.326195,-2.164351 52.326137,-2.164361 52.326091,-2.164382 52.326064,-2.164382 52.326039,-2.164340 52.326017,-2.164461 52.325940,-2.164749 52.325733,-2.165003 52.325616,-2.165200 52.325552,-2.165764 52.325421,-2.165973 52.325348,-2.165961 52.325286,-2.166443 52.325076,-2.166552 52.325007,-2.166641 52.324964,-2.166930 52.324774,-2.167050 52.324727,-2.167042 52.324712,-2.167168 52.324656,-2.167251 52.324630,-2.167556 52.324508,-2.167583 52.324483,-2.167617 52.324433,-2.167637 52.324384,-2.167635 52.324333,-2.167643 52.324309,-2.167688 52.324207,-2.167747 52.324111,-2.167768 52.323804,-2.167766 52.323644,-2.167805 52.323279,-2.167736 52.323146,-2.167752 52.322988,-2.167681 52.322773,-2.167652 52.322712,-2.167619 52.322562,-2.167602 52.322261,-2.167597 52.321704,-2.167563 52.321542,-2.167577 52.321386,-2.167565 52.321306,-2.167577 52.321226,-2.167554 52.321062,-2.167527 52.320962,-2.167557 52.320904,-2.167550 52.320719,-2.167564 52.320559,-2.167516 52.320391,-2.167507 52.320309,-2.167524 52.320227,-2.167551 52.320153,-2.167613 52.320029,-2.167707 52.319887,-2.167871 52.319691,-2.167837 52.319551,-2.167869 52.319431,-2.167870 52.319341,-2.167855 52.319293,-2.167868 52.319207,-2.167815 52.319076,-2.167829 52.318983,-2.167820 52.318983,-2.167827 52.318943,-2.167818 52.318923,-2.167807 52.318911,-2.167724 52.318873,-2.167593 52.318830,-2.167565 52.318812,-2.167534 52.318770,-2.167461 52.318567,-2.167190 52.318458,-2.167019 52.318429,-2.166805 52.318374,-2.166845 52.318353,-2.166812 52.318346,-2.166816 52.318337,-2.166873 52.318280,-2.167197 52.318118,-2.167337 52.318059,-2.167626 52.317981,-2.167828 52.317841,-2.167966 52.317805,-2.168157 52.317720,-2.168325 52.317613,-2.168364 52.317579,-2.168428 52.317485,-2.168522 52.317403,-2.168578 52.317366,-2.168703 52.317298,-2.168802 52.317259,-2.169007 52.317203,-2.169080 52.317187,-2.169202 52.317176,-2.169368 52.317183,-2.169461 52.317170,-2.169515 52.317156,-2.169651 52.317066,-2.169737 52.317055,-2.170013 52.317096,-2.170343 52.317102,-2.170484 52.317114,-2.170837 52.317174,-2.170985 52.317190,-2.171361 52.317270,-2.171475 52.317279,-2.171796 52.317280,-2.172001 52.317267,-2.172172 52.317218,-2.172637 52.317211,-2.172726 52.317221,-2.172919 52.317223,-2.173170 52.317205,-2.173231 52.317191,-2.173338 52.317090,-2.173571 52.316962,-2.173787 52.316803,-2.173968 52.316718,-2.174129 52.316665,-2.174429 52.316603,-2.174585 52.316600,-2.174595 52.316639,-2.174623 52.316640,-2.174671 52.316820,-2.174690 52.316962,-2.174903 52.317702,-2.174976 52.317861,-2.175098 52.318394,-2.175111 52.318512,-2.175105 52.318723,-2.175137 52.318864,-2.175171 52.318955,-2.175217 52.319173,-2.175312 52.319823,-2.175281 52.320017,-2.175092 52.320602,-2.174953 52.320846,-2.175157 52.320862,-2.175128 52.320925,-2.175161 52.320934,-2.175087 52.321118,-2.175078 52.321188,-2.175088 52.321285,-2.175086 52.321344,-2.175104 52.321455,-2.175203 52.321838,-2.175208 52.322077,-2.175234 52.322194,-2.175235 52.322248,-2.175192 52.322461,-2.175148 52.322798,-2.175018 52.323163,-2.174992 52.323374,-2.174922 52.323605,-2.174831 52.323865,-2.174762 52.324124,-2.174716 52.324379,-2.174706 52.324609,-2.174627 52.324921,-2.174568 52.325072,-2.174475 52.325273,-2.174345 52.325650,-2.174240 52.325909,-2.174163 52.326047,-2.174062 52.326181,-2.174041 52.326226,-2.174034 52.326264,-2.173781 52.326794,-2.173714 52.326955,-2.173621 52.327099,-2.173440 52.327322,-2.173443 52.327399,-2.173305 52.327594,-2.173265 52.327665,-2.173226 52.327711,-2.173188 52.327735,-2.173136 52.327797,-2.173132 52.327808,-2.173160 52.327839,-2.173060 52.327966,-2.172849 52.328103,-2.172617 52.328220,-2.172411 52.328262,-2.172314 52.328291,-2.172239 52.328334,-2.172190 52.328381,-2.172082 52.328536,-2.172051 52.328651,-2.172027 52.328704,-2.171989 52.328767,-2.171897 52.328879,-2.171729 52.329352,-2.171692 52.329480,-2.171620 52.329608,-2.171626 52.329610,-2.171556 52.329714,-2.171551 52.329734,-2.171225 52.330148,-2.171069 52.330395,-2.170947 52.330645,-2.170744 52.331426,-2.170772 52.331516,-2.170925 52.331642,-2.171090 52.331704,-2.171152 52.331716,-2.171202 52.331718,-2.171623 52.331686,-2.172261 52.331684,-2.172524 52.331692,-2.172665 52.331706,-2.172735 52.331707,-2.172817 52.331696,-2.172994 52.331713,-2.173220 52.331718,-2.173343 52.331713,-2.173420 52.331723,-2.173660 52.331728,-2.173927 52.331666,-2.174040 52.331648,-2.174143 52.331452,-2.174194 52.331397,-2.174286 52.331356,-2.174374 52.331343,-2.175252 52.331355,-2.175561 52.331392,-2.175703 52.331399,-2.175772 52.331424,-2.175827 52.331471,-2.176049 52.331552,-2.176318 52.331621,-2.176565 52.331660,-2.176894 52.331696,-2.177422 52.331700,-2.177705 52.331757,-2.177882 52.331756,-2.178003 52.331702,-2.178084 52.331638,-2.178135 52.331583,-2.178189 52.331555,-2.178264 52.331495,-2.178471 52.331432,-2.178580 52.331417,-2.178718 52.331377,-2.178949 52.331337,-2.179223 52.331255,-2.179737 52.331181,-2.180123 52.331140,-2.180877 52.331100,-2.181001 52.331085,-2.181238 52.331034,-2.181453 52.330966,-2.181606 52.330892,-2.182079 52.330633,-2.182259 52.330575,-2.182558 52.330432,-2.182666 52.330371,-2.182889 52.330308,-2.182955 52.330296,-2.183352 52.330261,-2.183495 52.330224,-2.183557 52.330171,-2.183584 52.330159,-2.184212 52.330135,-2.184803 52.330100,-2.184958 52.330076,-2.185085 52.330038,-2.185572 52.329844,-2.185688 52.329784,-2.185824 52.329743,-2.186060 52.329691,-2.186245 52.329610,-2.186349 52.329575,-2.186471 52.329541,-2.186729 52.329489,-2.187065 52.329331,-2.187199 52.329225,-2.187320 52.329180,-2.187527 52.329169,-2.187637 52.329201,-2.187696 52.329209,-2.188117 52.329218,-2.188283 52.329183,-2.188438 52.329176,-2.189051 52.329076,-2.189045 52.329056,-2.189104 52.329048,-2.189338 52.329087,-2.189494 52.329083,-2.189647 52.329019,-2.189885 52.328899,-2.190011 52.328871,-2.190265 52.328875,-2.190386 52.328899,-2.190497 52.328932,-2.190507 52.329013,-2.190498 52.329242,-2.190519 52.329528,-2.190581 52.329874,-2.190581 52.329917,-2.190553 52.330558,-2.190469 52.330901,-2.190495 52.331033,-2.190495 52.331106,-2.190352 52.331540,-2.190358 52.331598,-2.190444 52.331875,-2.190480 52.331950,-2.190545 52.332041,-2.190718 52.332205,-2.190914 52.332367,-2.191054 52.332530,-2.191102 52.332575,-2.191222 52.332684,-2.191465 52.332881,-2.191563 52.333012,-2.191623 52.333049,-2.191964 52.333160,-2.192041 52.333194,-2.192186 52.333301,-2.192457 52.333450,-2.192489 52.333476,-2.192519 52.333510,-2.192538 52.333550,-2.192577 52.333665,-2.192650 52.333936,-2.192656 52.333982,-2.192629 52.334140,-2.192633 52.334196,-2.192594 52.334518,-2.192606 52.334542,-2.192672 52.334617,-2.192913 52.334780,-2.192980 52.334800,-2.193103 52.334792,-2.193148 52.334798,-2.193198 52.334821,-2.193209 52.334838,-2.193197 52.334894,-2.193146 52.334988,-2.193161 52.335029,-2.193232 52.335111,-2.193332 52.335182,-2.193382 52.335231,-2.193480 52.335262,-2.193494 52.335283,-2.193492 52.335308,-2.193485 52.335314,-2.193375 52.335350,-2.193394 52.335391,-2.193481 52.335475,-2.193496 52.335502,-2.193490 52.335517,-2.193438 52.335561,-2.193321 52.335581,-2.193233 52.335622,-2.193227 52.335645,-2.193245 52.335709,-2.193240 52.335770,-2.193258 52.335842,-2.193197 52.336003,-2.193185 52.336087,-2.193176 52.336102,-2.193133 52.336109,-2.192998 52.336066,-2.192849 52.335968,-2.192791 52.335988,-2.192762 52.336014,-2.192753 52.336049,-2.192763 52.336073,-2.192833 52.336148,-2.192827 52.336163,-2.192758 52.336242,-2.192590 52.336367,-2.192520 52.336457,-2.192513 52.336479,-2.192519 52.336494,-2.192610 52.336561,-2.192661 52.336610,-2.192739 52.336623,-2.192810 52.336607,-2.192871 52.336610,-2.192877 52.336625,-2.192866 52.336642,-2.192740 52.336681,-2.192709 52.336711,-2.192727 52.336760,-2.192809 52.336855,-2.192832 52.336891,-2.192832 52.336910,-2.192821 52.336918,-2.192780 52.336921,-2.192772 52.336931,-2.192492 52.336888,-2.192412 52.336950,-2.192409 52.336973,-2.192212 52.336915,-2.192173 52.336927,-2.192116 52.337023,-2.192100 52.337094,-2.192034 52.337152,-2.192005 52.337191,-2.192008 52.337208,-2.192023 52.337221,-2.192082 52.337246,-2.192107 52.337270,-2.192104 52.337281,-2.192031 52.337349,-2.192023 52.337366,-2.192019 52.337412,-2.192040 52.337524,-2.192052 52.337543,-2.192075 52.337556,-2.192341 52.337458,-2.192448 52.337476,-2.192485 52.337496,-2.192514 52.337529,-2.192535 52.337541,-2.192539 52.337555,-2.192529 52.337621,-2.192518 52.337639,-2.192477 52.337671,-2.192399 52.337708,-2.192383 52.337726,-2.192398 52.337765,-2.192472 52.337843,-2.192574 52.337809,-2.192678 52.337817,-2.192722 52.337810,-2.192874 52.337903,-2.192996 52.337918,-2.193027 52.337930,-2.193040 52.337946,-2.193093 52.338026,-2.193088 52.338068,-2.193058 52.338091,-2.192932 52.338135,-2.192915 52.338158,-2.192921 52.338181,-2.192996 52.338232,-2.193039 52.338286,-2.193042 52.338301,-2.192993 52.338516,-2.193118 52.338536,-2.193241 52.338540,-2.193279 52.338558,-2.193298 52.338583,-2.193282 52.338610,-2.193197 52.338662,-2.193114 52.338732,-2.193091 52.338780,-2.193093 52.338866,-2.193113 52.338890,-2.193146 52.338896,-2.193564 52.338869,-2.193659 52.338905,-2.193684 52.338926,-2.193687 52.338945,-2.193680 52.338959,-2.193649 52.338977,-2.193463 52.338987,-2.193412 52.339027,-2.193402 52.339068,-2.193431 52.339104,-2.193590 52.339167,-2.193627 52.339186,-2.193647 52.339209,-2.193649 52.339226,-2.193563 52.339374,-2.193567 52.339389,-2.193598 52.339433,-2.193759 52.339536,-2.193777 52.339573,-2.193777 52.339598,-2.193830 52.339621,-2.193902 52.339683,-2.193899 52.339871,-2.193917 52.339934,-2.193949 52.339997,-2.194064 52.340045,-2.194340 52.340085,-2.194694 52.340170,-2.195063 52.340284,-2.195192 52.340342,-2.195538 52.340588,-2.195613 52.340654,-2.195651 52.340922,-2.195722 52.341019,-2.195756 52.341129,-2.195596 52.341258,-2.195703 52.341304,-2.196059 52.341489,-2.196331 52.341650,-2.196752 52.341869,-2.196998 52.342017,-2.197936 52.342553,-2.198575 52.342886,-2.198828 52.343041,-2.199158 52.343221,-2.200485 52.343984,-2.201049 52.344281,-2.201745 52.344605,-2.202135 52.344849,-2.202169 52.344856,-2.202238 52.344809,-2.202638 52.344986,-2.202921 52.345142,-2.203049 52.345178,-2.203315 52.345219,-2.203513 52.345199,-2.203597 52.345217,-2.203644 52.345179,-2.204124 52.345269,-2.204309 52.345315,-2.204569 52.345398,-2.205080 52.345612,-2.205099 52.345638,-2.205105 52.345683,-2.205088 52.345707,-2.205030 52.345754,-2.205020 52.345788,-2.205017 52.345892,-2.204873 52.346254,-2.204838 52.346389,-2.204810 52.346723,-2.204777 52.346866,-2.204728 52.347009,-2.204743 52.347142,-2.204744 52.347313,-2.204754 52.347440,-2.204727 52.347721,-2.204733 52.347763,-2.204728 52.348025,-2.204778 52.348823,-2.204796 52.348951,-2.204833 52.349110,-2.204961 52.349439,-2.205051 52.349579,-2.205256 52.349815,-2.205339 52.349925,-2.205385 52.350014,-2.205412 52.350099,-2.205434 52.350334,-2.205419 52.350407,-2.205373 52.350555,-2.205348 52.350594,-2.205321 52.350703,-2.205346 52.350721,-2.205909 52.350915,-2.206659 52.351241,-2.207171 52.351429,-2.207208 52.351407,-2.207349 52.351465,-2.207417 52.351501,-2.207661 52.351690,-2.207641 52.351705,-2.207629 52.351739,-2.207621 52.351829,-2.207586 52.351950,-2.207672 52.352092,-2.207715 52.352183,-2.207741 52.352279,-2.207849 52.352506,-2.207908 52.352587,-2.208043 52.352724,-2.208069 52.352766,-2.207994 52.352947,-2.207894 52.353235,-2.208065 52.353290,-2.208416 52.353464,-2.208818 52.353708,-2.209078 52.353852,-2.210227 52.354266,-2.210374 52.354342,-2.210403 52.354321,-2.210623 52.354425,-2.210486 52.354585,-2.210212 52.354876,-2.210192 52.354925,-2.210201 52.354952,-2.210358 52.355064,-2.210416 52.355088,-2.210535 52.355170,-2.210669 52.355230,-2.210857 52.355344,-2.211305 52.355563,-2.211635 52.355742,-2.211844 52.355898,-2.212201 52.356073,-2.212513 52.356210,-2.212843 52.356341,-2.213949 52.356835,-2.213975 52.356844,-2.214008 52.356825,-2.214311 52.356936,-2.215069 52.357296,-2.216381 52.357974,-2.217504 52.358453,-2.218162 52.358717,-2.219002 52.359069,-2.219219 52.359138,-2.220211 52.359378,-2.220471 52.359422,-2.221685 52.359671,-2.223059 52.359918,-2.223321 52.359918,-2.224476 52.360181,-2.224934 52.360309,-2.225276 52.360421,-2.225417 52.360449,-2.226117 52.360489,-2.226391 52.360522,-2.230048 52.361069,-2.230380 52.361108,-2.230882 52.361148,-2.231879 52.361187,-2.232138 52.361205,-2.233110 52.361126,-2.233739 52.361018,-2.235523 52.360881,-2.237210 52.360857),(-2.237510 52.360838,-2.237510 52.360838,-2.237432 52.360688,-2.237510 52.360838),(-2.231657 52.330381,-2.231729 52.330310,-2.231726 52.330310,-2.232135 52.329733,-2.232164 52.329681,-2.232322 52.329311,-2.232310 52.329217,-2.232267 52.329073,-2.232237 52.329015,-2.231435 52.328961,-2.231458 52.328493,-2.232054 52.328423,-2.232023 52.328320,-2.231973 52.328052,-2.231954 52.327913,-2.231939 52.327639,-2.231899 52.327566,-2.231832 52.326595,-2.231818 52.326589,-2.231808 52.326473,-2.231802 52.326227,-2.231808 52.326105,-2.232088 52.326066,-2.232677 52.326051,-2.232668 52.326620,-2.232656 52.326895,-2.233820 52.326880,-2.234332 52.327213,-2.235020 52.327550,-2.235316 52.327703,-2.235853 52.327810,-2.236667 52.327998,-2.237560 52.328242,-2.237491 52.328316,-2.237531 52.328369,-2.237248 52.328628,-2.237060 52.328822,-2.236899 52.329048,-2.235375 52.330839,-2.234811 52.331537,-2.234778 52.331540,-2.234783 52.331588,-2.235739 52.331666,-2.235728 52.331899,-2.235948 52.331908,-2.235949 52.331966,-2.236180 52.332005,-2.236224 52.331986,-2.236311 52.331919,-2.236357 52.331908,-2.236415 52.331903,-2.236572 52.331913,-2.236515 52.332023,-2.236433 52.332244,-2.236399 52.332387,-2.236384 52.332510,-2.236645 52.332567,-2.236622 52.332614,-2.236542 52.332722,-2.236501 52.332749,-2.236216 52.333127,-2.236017 52.333370,-2.235955 52.333468,-2.236222 52.333545,-2.235848 52.334071,-2.235810 52.334386,-2.235953 52.334372,-2.235981 52.334463,-2.236074 52.334488,-2.236107 52.334655,-2.236222 52.334657,-2.236232 52.334691,-2.236980 52.334662,-2.236809 52.335090,-2.236595 52.335514,-2.237030 52.335665,-2.236894 52.335754,-2.237107 52.335855,-2.236895 52.336012,-2.236663 52.336149,-2.236598 52.336210,-2.236770 52.336227,-2.236972 52.336268,-2.237194 52.336347,-2.237479 52.336480,-2.237425 52.336553,-2.237417 52.336636,-2.237403 52.336670,-2.237310 52.336806,-2.237095 52.337033,-2.237035 52.337013,-2.236893 52.337186,-2.236888 52.337268,-2.236822 52.337347,-2.236712 52.337538,-2.236657 52.337586,-2.236609 52.337589,-2.236503 52.337564,-2.236507 52.337541,-2.236476 52.337504,-2.235825 52.336926,-2.235453 52.336844,-2.235411 52.336882,-2.235365 52.336896,-2.235288 52.336898,-2.235195 52.336889,-2.235036 52.336861,-2.234962 52.336840,-2.234898 52.336811,-2.234796 52.336748,-2.234752 52.336709,-2.234647 52.336593,-2.234469 52.336362,-2.234380 52.336259,-2.234312 52.336192,-2.234271 52.336165,-2.234146 52.336115,-2.233681 52.336004,-2.233345 52.335865,-2.233239 52.335832,-2.233153 52.335817,-2.233175 52.335778,-2.233090 52.335749,-2.233044 52.335766,-2.232913 52.335718,-2.232751 52.335583,-2.232668 52.335498,-2.232294 52.335289,-2.232200 52.335215,-2.232050 52.335140,-2.231695 52.335008,-2.231672 52.335040,-2.231469 52.334969,-2.231205 52.334899,-2.230951 52.335169,-2.230825 52.335349,-2.230803 52.335417,-2.230775 52.335453,-2.230493 52.335693,-2.230409 52.335774,-2.230230 52.335906,-2.229941 52.336199,-2.229736 52.336126,-2.229472 52.336045,-2.229283 52.336005,-2.229111 52.335979,-2.228468 52.335927,-2.227869 52.335895,-2.227471 52.335854,-2.227465 52.335808,-2.227496 52.335643,-2.227574 52.335405,-2.227680 52.335031,-2.227711 52.334799,-2.227628 52.334795,-2.227243 52.334880,-2.227093 52.334882,-2.226993 52.334852,-2.226841 52.334749,-2.226608 52.334628,-2.226664 52.334481,-2.226779 52.334495,-2.226762 52.334482,-2.226799 52.334405,-2.226833 52.334393,-2.226928 52.334163,-2.226969 52.334083,-2.227039 52.333980,-2.227670 52.333978,-2.227786 52.333362,-2.227841 52.333226,-2.227940 52.333054,-2.227989 52.332986,-2.228252 52.332731,-2.229113 52.332081,-2.229285 52.331965,-2.229355 52.331924,-2.229514 52.331952,-2.230034 52.331623,-2.230079 52.331523,-2.230347 52.331313,-2.231313 52.330669,-2.231657 52.330381),(-2.179891 52.310464,-2.180426 52.310460,-2.180616 52.310473,-2.180768 52.310497,-2.180773 52.310529,-2.180688 52.310767,-2.180648 52.310848,-2.180618 52.310989,-2.180566 52.311085,-2.180518 52.311210,-2.180435 52.311381,-2.180429 52.311395,-2.180434 52.311410,-2.181208 52.311829,-2.181117 52.311886,-2.181047 52.311916,-2.180916 52.312004,-2.180683 52.312186,-2.180416 52.312426,-2.180273 52.312569,-2.180068 52.312795,-2.179758 52.313213,-2.179442 52.313514,-2.179172 52.313748,-2.178825 52.314023,-2.178976 52.314065,-2.179431 52.314149,-2.179788 52.314225,-2.179558 52.314728,-2.179657 52.314743,-2.179666 52.314809,-2.179509 52.315053,-2.179403 52.315272,-2.179487 52.315734,-2.179438 52.315846,-2.179480 52.315922,-2.179480 52.315977,-2.179514 52.316186,-2.179523 52.316377,-2.179494 52.316420,-2.179267 52.316551,-2.179197 52.316578,-2.178861 52.316542,-2.177617 52.316485,-2.177489 52.316464,-2.177335 52.316406,-2.177065 52.316278,-2.177110 52.315985,-2.177195 52.315584,-2.177379 52.315002,-2.177119 52.314940,-2.177052 52.314944,-2.176890 52.314930,-2.176608 52.314885,-2.176318 52.314825,-2.175729 52.314775,-2.175381 52.314719,-2.175182 52.314709,-2.175251 52.314403,-2.175798 52.314440,-2.175839 52.314349,-2.176204 52.314412,-2.176739 52.314487,-2.176760 52.314411,-2.176779 52.314179,-2.176580 52.314148,-2.176621 52.314077,-2.176621 52.314010,-2.176596 52.313832,-2.176882 52.313844,-2.177292 52.313811,-2.177248 52.313563,-2.177239 52.313396,-2.177257 52.313137,-2.177274 52.313034,-2.177310 52.312897,-2.177328 52.312846,-2.177340 52.312837,-2.177366 52.312744,-2.177605 52.312243,-2.177592 52.312206,-2.177617 52.312145,-2.177699 52.311988,-2.177904 52.311540,-2.177999 52.311303,-2.178016 52.311282,-2.178044 52.311271,-2.178070 52.311108,-2.178221 52.310726,-2.178243 52.310690,-2.178428 52.310117,-2.178445 52.309985,-2.178418 52.309931,-2.178419 52.309719,-2.178396 52.309566,-2.178368 52.309472,-2.178276 52.309294,-2.179224 52.309159,-2.179246 52.309399,-2.179258 52.309658,-2.179237 52.309768,-2.179213 52.309816,-2.179200 52.309976,-2.179165 52.310094,-2.179084 52.310184,-2.179022 52.310396,-2.179891 52.310464),(-2.119156 52.294717,-2.118880 52.294998,-2.118651 52.295207,-2.118479 52.295147,-2.118220 52.295079,-2.118144 52.295081,-2.118093 52.295096,-2.118001 52.295167,-2.117819 52.295079,-2.117694 52.294985,-2.117564 52.294866,-2.117320 52.294614,-2.117154 52.294467,-2.116923 52.294202,-2.116576 52.293870,-2.115397 52.292876,-2.115299 52.292939,-2.115354 52.292989,-2.114965 52.293248,-2.114625 52.293051,-2.114526 52.293092,-2.114357 52.293205,-2.113672 52.293703,-2.113582 52.293832,-2.113345 52.294000,-2.113240 52.294103,-2.113055 52.294314,-2.112694 52.294206,-2.113142 52.293841,-2.113204 52.293782,-2.112930 52.293676,-2.112789 52.293609,-2.112455 52.293469,-2.112367 52.293448,-2.112255 52.293404,-2.112081 52.293324,-2.111216 52.292859,-2.110677 52.292517,-2.109852 52.292111,-2.109790 52.292062,-2.109674 52.291991,-2.109091 52.291728,-2.108843 52.291629,-2.108674 52.291574,-2.108500 52.291485,-2.107887 52.291202,-2.107546 52.291070,-2.107304 52.290998,-2.107161 52.290966,-2.106825 52.290924,-2.106447 52.290915,-2.106122 52.290895,-2.105633 52.290837,-2.105667 52.290690,-2.105664 52.290592,-2.105717 52.290494,-2.105798 52.290413,-2.105916 52.290376,-2.106170 52.290273,-2.106526 52.290081,-2.106783 52.289967,-2.107428 52.289556,-2.107703 52.289405,-2.108111 52.289155,-2.108360 52.289018,-2.108549 52.288932,-2.108689 52.288859,-2.108749 52.288818,-2.109105 52.288640,-2.109436 52.288450,-2.109756 52.288248,-2.109926 52.288163,-2.110305 52.287948,-2.110537 52.287829,-2.110759 52.287729,-2.111240 52.287546,-2.111354 52.287491,-2.111522 52.287432,-2.112074 52.287210,-2.112430 52.287044,-2.113090 52.286680,-2.113270 52.286602,-2.113539 52.286509,-2.113903 52.286439,-2.114339 52.286368,-2.114722 52.286346,-2.115120 52.286367,-2.115435 52.286399,-2.115567 52.286426,-2.115797 52.286491,-2.115909 52.286547,-2.116039 52.286670,-2.116077 52.286733,-2.116109 52.286827,-2.116136 52.286864,-2.116164 52.286946,-2.116186 52.286979,-2.116289 52.287201,-2.116420 52.287336,-2.116505 52.287398,-2.116630 52.287470,-2.116847 52.287556,-2.117023 52.287593,-2.117335 52.287640,-2.117442 52.287673,-2.117467 52.287700,-2.117530 52.287810,-2.117597 52.287896,-2.117855 52.288115,-2.118489 52.288625,-2.118613 52.288748,-2.119051 52.288733,-2.119252 52.288705,-2.119446 52.288667,-2.119570 52.288689,-2.119711 52.288737,-2.119817 52.288791,-2.119896 52.288845,-2.119940 52.288894,-2.120117 52.289131,-2.120188 52.289282,-2.120259 52.289465,-2.120410 52.289965,-2.120580 52.290662,-2.120615 52.290873,-2.120642 52.291148,-2.120635 52.291613,-2.120591 52.291826,-2.120576 52.291822,-2.120518 52.292196,-2.120445 52.292538,-2.120191 52.293109,-2.120018 52.293460,-2.119973 52.293531,-2.119588 52.294126,-2.119308 52.294513,-2.119156 52.294717)))" # noqa: E501
assert wkt.normalise(value, issues=issues) == expected
| 658.597222
| 65,472
| 0.744933
| 28,077
| 142,257
| 3.770595
| 0.270684
| 0.002494
| 0.003571
| 0.004761
| 0.88393
| 0.862733
| 0.861071
| 0.835709
| 0.833518
| 0.801959
| 0
| 0.764957
| 0.057108
| 142,257
| 215
| 65,473
| 661.660465
| 0.024312
| 0.004794
| 0
| 0.5
| 0
| 0.097222
| 0.969911
| 0.00621
| 0
| 0
| 0
| 0
| 0.277778
| 1
| 0.131944
| false
| 0
| 0.013889
| 0
| 0.159722
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 11
|
73ddb3d507e881f9e64645626b790518fede01a8
| 116
|
py
|
Python
|
parallel_wavegan/models/__init__.py
|
Moon-sung-woo/Tacotron2_ParallelWaveGAN_korean
|
437c9748673f2e5cb84e99884e8d0d916f269c9e
|
[
"BSD-3-Clause"
] | 15
|
2019-11-27T05:47:16.000Z
|
2021-04-14T17:27:15.000Z
|
parallel_wavegan/models/__init__.py
|
Moon-sung-woo/Tacotron2_ParallelWaveGAN_korean
|
437c9748673f2e5cb84e99884e8d0d916f269c9e
|
[
"BSD-3-Clause"
] | 1
|
2020-03-17T10:55:52.000Z
|
2020-03-17T10:55:52.000Z
|
parallel_wavegan/models/__init__.py
|
Moon-sung-woo/Tacotron2_ParallelWaveGAN_korean
|
437c9748673f2e5cb84e99884e8d0d916f269c9e
|
[
"BSD-3-Clause"
] | 3
|
2019-12-22T02:53:46.000Z
|
2020-10-29T20:02:46.000Z
|
from parallel_wavegan.models.melgan import * # NOQA
from parallel_wavegan.models.parallel_wavegan import * # NOQA
| 38.666667
| 62
| 0.810345
| 15
| 116
| 6.066667
| 0.466667
| 0.494505
| 0.417582
| 0.549451
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12069
| 116
| 2
| 63
| 58
| 0.892157
| 0.077586
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
73ddf674d08bbc3496e4012a6d86f790b902e33b
| 22,687
|
py
|
Python
|
new_insane.py
|
edazizovv/financial_news_re
|
d7950da28d77ade29628f3b2fa266f5059527f52
|
[
"MIT"
] | null | null | null |
new_insane.py
|
edazizovv/financial_news_re
|
d7950da28d77ade29628f3b2fa266f5059527f52
|
[
"MIT"
] | null | null | null |
new_insane.py
|
edazizovv/financial_news_re
|
d7950da28d77ade29628f3b2fa266f5059527f52
|
[
"MIT"
] | null | null | null |
#
import numpy
import pandas
from sklearn.metrics import mean_absolute_error
from sklearn.feature_selection import RFECV
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeRegressor
from sklearn.linear_model import LinearRegression
from sklearn.neighbors import KNeighborsRegressor
from sklearn.ensemble import ExtraTreesRegressor, RandomForestRegressor, GradientBoostingRegressor
from sklearn.svm import SVR as SVR_
from lightgbm import LGBMRegressor
from xgboost import XGBRegressor
import torch
#
from m_utils.measures import r2_adj
from m_utils.transformations import LogPctTransformer, Whitener, HypeTan # , Axe <-- coming soon
from neuro_new import WrappedNumericOnlyGene
#
def MAE(y_true, y_pred):
Z = numpy.concatenate([y_true.reshape(-1, 1), y_pred.reshape(-1, 1)], axis=1)
Z = numpy.array(Z, dtype=numpy.float32)
Z[Z == numpy.inf] = numpy.nan
Z[Z == -numpy.inf] = numpy.nan
nan_mask = ~pandas.isna(Z).any(axis=1)
y_true_, y_pred_ = y_true[nan_mask], y_pred[nan_mask]
if y_true_.shape[0] == 0:
return numpy.nan
else:
if y_true_.shape[0] != y_true.shape[0]:
print('MAE: the sample contains NaNs, they were dropped\tN of dropped NaNs: {0}'.format(y_true.shape[0] - y_true_.shape[0]))
return mean_absolute_error(y_true=y_true_, y_pred=y_pred_)
def R2_adj(y_true, y_pred, dim1):
Z = numpy.concatenate([y_true.reshape(-1, 1), y_pred.reshape(-1, 1)], axis=1)
Z = numpy.array(Z, dtype=numpy.float32)
Z[Z == numpy.inf] = numpy.nan
Z[Z == -numpy.inf] = numpy.nan
nan_mask = ~pandas.isna(Z).any(axis=1)
y_true_, y_pred_ = y_true[nan_mask], y_pred[nan_mask]
if y_true_.shape[0] == 0:
return numpy.nan
else:
if y_true_.shape[0] != y_true.shape[0]:
print('MAE: the sample contains NaNs, they were dropped\tN of dropped NaNs: {0}'.format(y_true.shape[0] - y_true_.shape[0]))
return r2_adj(y_true=y_true_, y_pred=y_pred_, dim0=Z.shape[0], dim1=dim1)
class OLR:
def __init__(self, rfe_cv, *args, **kwargs):
self.rfe = None
self.rfe_cv = rfe_cv
self.model = LinearRegression(*args, **kwargs)
def fit(self, X, y):
Z = numpy.concatenate([X, y.reshape(-1, 1)], axis=1)
Z = numpy.array(Z, dtype=numpy.float32)
Z[Z == numpy.inf] = numpy.nan
Z[Z == -numpy.inf] = numpy.nan
X_, y_ = X[~pandas.isna(Z).any(axis=1), :], y[~pandas.isna(Z).any(axis=1)]
if Z.shape[0] != X.shape[0]:
print('FIT: the sample contains NaNs, they were dropped\tN of dropped NaNs: {0}'.format(X.shape[0] - X_.shape[0]))
if self.rfe_cv:
self.rfe = RFECV(self.model)
self.rfe.fit(X_, y_)
else:
self.model.fit(X_, y_)
def predict(self, X):
Z = numpy.concatenate([X], axis=1)
Z = numpy.array(Z, dtype=numpy.float32)
Z[Z == numpy.inf] = numpy.nan
Z[Z == -numpy.inf] = numpy.nan
nan_mask = ~pandas.isna(Z).any(axis=1)
X_ = X[nan_mask, :]
if Z.shape[0] != X.shape[0]:
print('PREDICT: the sample contains NaNs, they were dropped\tN of dropped NaNs: {0}'.format(X.shape[0] - X_.shape[0]))
Z = numpy.full(shape=(X.shape[0], 1), fill_value=numpy.nan, dtype=numpy.float64)
if self.rfe_cv:
Z[nan_mask, :] = self.rfe.predict(X_).reshape(-1, 1)
else:
Z[nan_mask, :] = self.model.predict(X_).reshape(-1, 1)
return Z
class KNR:
def __init__(self, rfe_cv, *args, **kwargs):
self.rfe = None
self.rfe_cv = rfe_cv
self.model = KNeighborsRegressor(*args, **kwargs)
def fit(self, X, y):
Z = numpy.concatenate([X, y.reshape(-1, 1)], axis=1)
Z = numpy.array(Z, dtype=numpy.float32)
Z[Z == numpy.inf] = numpy.nan
Z[Z == -numpy.inf] = numpy.nan
X_, y_ = X[~pandas.isna(Z).any(axis=1), :], y[~pandas.isna(Z).any(axis=1)]
if Z.shape[0] != X.shape[0]:
print('FIT: the sample contains NaNs, they were dropped\tN of dropped NaNs: {0}'.format(X.shape[0] - X_.shape[0]))
if self.rfe_cv:
self.rfe = RFECV(self.model)
self.rfe.fit(X_, y_)
else:
self.model.fit(X_, y_)
def predict(self, X):
Z = numpy.concatenate([X], axis=1)
Z = numpy.array(Z, dtype=numpy.float32)
Z[Z == numpy.inf] = numpy.nan
Z[Z == -numpy.inf] = numpy.nan
nan_mask = ~pandas.isna(Z).any(axis=1)
X_ = X[nan_mask, :]
if Z.shape[0] != X.shape[0]:
print('PREDICT: the sample contains NaNs, they were dropped\tN of dropped NaNs: {0}'.format(X.shape[0] - X_.shape[0]))
Z = numpy.full(shape=(X.shape[0], 1), fill_value=numpy.nan, dtype=numpy.float64)
if self.rfe_cv:
Z[nan_mask, :] = self.rfe.predict(X_).reshape(-1, 1)
else:
Z[nan_mask, :] = self.model.predict(X_).reshape(-1, 1)
return Z
class DTR:
def __init__(self, rfe_cv, *args, **kwargs):
self.rfe = None
self.rfe_cv = rfe_cv
self.model = DecisionTreeRegressor(*args, **kwargs)
def fit(self, X, y):
Z = numpy.concatenate([X, y.reshape(-1, 1)], axis=1)
Z = numpy.array(Z, dtype=numpy.float32)
Z[Z == numpy.inf] = numpy.nan
Z[Z == -numpy.inf] = numpy.nan
X_, y_ = X[~pandas.isna(Z).any(axis=1), :], y[~pandas.isna(Z).any(axis=1)]
if Z.shape[0] != X.shape[0]:
print('FIT: the sample contains NaNs, they were dropped\tN of dropped NaNs: {0}'.format(X.shape[0] - X_.shape[0]))
if self.rfe_cv:
self.rfe = RFECV(self.model)
self.rfe.fit(X_, y_)
else:
self.model.fit(X_, y_)
def predict(self, X):
Z = numpy.concatenate([X], axis=1)
Z = numpy.array(Z, dtype=numpy.float32)
Z[Z == numpy.inf] = numpy.nan
Z[Z == -numpy.inf] = numpy.nan
nan_mask = ~pandas.isna(Z).any(axis=1)
X_ = X[nan_mask, :]
if Z.shape[0] != X.shape[0]:
print('PREDICT: the sample contains NaNs, they were dropped\tN of dropped NaNs: {0}'.format(X.shape[0] - X_.shape[0]))
Z = numpy.full(shape=(X.shape[0], 1), fill_value=numpy.nan, dtype=numpy.float64)
if self.rfe_cv:
Z[nan_mask, :] = self.rfe.predict(X_).reshape(-1, 1)
else:
Z[nan_mask, :] = self.model.predict(X_).reshape(-1, 1)
return Z
class ETR:
def __init__(self, rfe_cv, *args, **kwargs):
self.rfe = None
self.rfe_cv = rfe_cv
self.model = ExtraTreesRegressor(*args, **kwargs)
def fit(self, X, y):
Z = numpy.concatenate([X, y.reshape(-1, 1)], axis=1)
Z = numpy.array(Z, dtype=numpy.float32)
Z[Z == numpy.inf] = numpy.nan
Z[Z == -numpy.inf] = numpy.nan
X_, y_ = X[~pandas.isna(Z).any(axis=1), :], y[~pandas.isna(Z).any(axis=1)]
if Z.shape[0] != X.shape[0]:
print('FIT: the sample contains NaNs, they were dropped\tN of dropped NaNs: {0}'.format(X.shape[0] - X_.shape[0]))
if self.rfe_cv:
self.rfe = RFECV(self.model)
self.rfe.fit(X_, y_)
else:
self.model.fit(X_, y_)
def predict(self, X):
Z = numpy.concatenate([X], axis=1)
Z = numpy.array(Z, dtype=numpy.float32)
Z[Z == numpy.inf] = numpy.nan
Z[Z == -numpy.inf] = numpy.nan
nan_mask = ~pandas.isna(Z).any(axis=1)
X_ = X[nan_mask, :]
if Z.shape[0] != X.shape[0]:
print('PREDICT: the sample contains NaNs, they were dropped\tN of dropped NaNs: {0}'.format(X.shape[0] - X_.shape[0]))
Z = numpy.full(shape=(X.shape[0], 1), fill_value=numpy.nan, dtype=numpy.float64)
if self.rfe_cv:
Z[nan_mask, :] = self.rfe.predict(X_).reshape(-1, 1)
else:
Z[nan_mask, :] = self.model.predict(X_).reshape(-1, 1)
return Z
class RFR:
def __init__(self, rfe_cv, *args, **kwargs):
self.rfe = None
self.rfe_cv = rfe_cv
self.model = RandomForestRegressor(*args, **kwargs)
def fit(self, X, y):
Z = numpy.concatenate([X, y.reshape(-1, 1)], axis=1)
Z = numpy.array(Z, dtype=numpy.float32)
Z[Z == numpy.inf] = numpy.nan
Z[Z == -numpy.inf] = numpy.nan
X_, y_ = X[~pandas.isna(Z).any(axis=1), :], y[~pandas.isna(Z).any(axis=1)]
if Z.shape[0] != X.shape[0]:
print('FIT: the sample contains NaNs, they were dropped\tN of dropped NaNs: {0}'.format(X.shape[0] - X_.shape[0]))
if self.rfe_cv:
self.rfe = RFECV(self.model)
self.rfe.fit(X_, y_)
else:
self.model.fit(X_, y_)
def predict(self, X):
Z = numpy.concatenate([X], axis=1)
Z = numpy.array(Z, dtype=numpy.float32)
Z[Z == numpy.inf] = numpy.nan
Z[Z == -numpy.inf] = numpy.nan
nan_mask = ~pandas.isna(Z).any(axis=1)
X_ = X[nan_mask, :]
if Z.shape[0] != X.shape[0]:
print('PREDICT: the sample contains NaNs, they were dropped\tN of dropped NaNs: {0}'.format(X.shape[0] - X_.shape[0]))
Z = numpy.full(shape=(X.shape[0], 1), fill_value=numpy.nan, dtype=numpy.float64)
if self.rfe_cv:
Z[nan_mask, :] = self.rfe.predict(X_).reshape(-1, 1)
else:
Z[nan_mask, :] = self.model.predict(X_).reshape(-1, 1)
return Z
class SVR:
def __init__(self, rfe_cv, *args, **kwargs):
self.rfe = None
self.rfe_cv = rfe_cv
self.model = SVR_(*args, **kwargs)
def fit(self, X, y):
Z = numpy.concatenate([X, y.reshape(-1, 1)], axis=1)
Z = numpy.array(Z, dtype=numpy.float32)
Z[Z == numpy.inf] = numpy.nan
Z[Z == -numpy.inf] = numpy.nan
X_, y_ = X[~pandas.isna(Z).any(axis=1), :], y[~pandas.isna(Z).any(axis=1)]
if Z.shape[0] != X.shape[0]:
print('FIT: the sample contains NaNs, they were dropped\tN of dropped NaNs: {0}'.format(X.shape[0] - X_.shape[0]))
if self.rfe_cv:
self.rfe = RFECV(self.model)
self.rfe.fit(X_, y_)
else:
self.model.fit(X_, y_)
def predict(self, X):
Z = numpy.concatenate([X], axis=1)
Z = numpy.array(Z, dtype=numpy.float32)
Z[Z == numpy.inf] = numpy.nan
Z[Z == -numpy.inf] = numpy.nan
nan_mask = ~pandas.isna(Z).any(axis=1)
X_ = X[nan_mask, :]
if Z.shape[0] != X.shape[0]:
print('PREDICT: the sample contains NaNs, they were dropped\tN of dropped NaNs: {0}'.format(X.shape[0] - X_.shape[0]))
Z = numpy.full(shape=(X.shape[0], 1), fill_value=numpy.nan, dtype=numpy.float64)
if self.rfe_cv:
Z[nan_mask, :] = self.rfe.predict(X_).reshape(-1, 1)
else:
Z[nan_mask, :] = self.model.predict(X_).reshape(-1, 1)
return Z
class GBR:
def __init__(self, rfe_cv, *args, **kwargs):
self.rfe = None
self.rfe_cv = rfe_cv
self.model = GradientBoostingRegressor(*args, **kwargs)
def fit(self, X, y):
Z = numpy.concatenate([X, y.reshape(-1, 1)], axis=1)
Z = numpy.array(Z, dtype=numpy.float32)
Z[Z == numpy.inf] = numpy.nan
Z[Z == -numpy.inf] = numpy.nan
X_, y_ = X[~pandas.isna(Z).any(axis=1), :], y[~pandas.isna(Z).any(axis=1)]
if Z.shape[0] != X.shape[0]:
print('FIT: the sample contains NaNs, they were dropped\tN of dropped NaNs: {0}'.format(X.shape[0] - X_.shape[0]))
if self.rfe_cv:
self.rfe = RFECV(self.model)
self.rfe.fit(X_, y_)
else:
self.model.fit(X_, y_)
def predict(self, X):
Z = numpy.concatenate([X], axis=1)
Z = numpy.array(Z, dtype=numpy.float32)
Z[Z == numpy.inf] = numpy.nan
Z[Z == -numpy.inf] = numpy.nan
nan_mask = ~pandas.isna(Z).any(axis=1)
X_ = X[nan_mask, :]
if Z.shape[0] != X.shape[0]:
print('PREDICT: the sample contains NaNs, they were dropped\tN of dropped NaNs: {0}'.format(X.shape[0] - X_.shape[0]))
Z = numpy.full(shape=(X.shape[0], 1), fill_value=numpy.nan, dtype=numpy.float64)
if self.rfe_cv:
Z[nan_mask, :] = self.rfe.predict(X_).reshape(-1, 1)
else:
Z[nan_mask, :] = self.model.predict(X_).reshape(-1, 1)
return Z
class LBR:
def __init__(self, rfe_cv, *args, **kwargs):
self.rfe = None
self.rfe_cv = rfe_cv
self.model = LGBMRegressor(*args, **kwargs)
def fit(self, X, y):
Z = numpy.concatenate([X, y.reshape(-1, 1)], axis=1)
Z = numpy.array(Z, dtype=numpy.float32)
Z[Z == numpy.inf] = numpy.nan
Z[Z == -numpy.inf] = numpy.nan
X_, y_ = X[~pandas.isna(Z).any(axis=1), :], y[~pandas.isna(Z).any(axis=1)]
if Z.shape[0] != X.shape[0]:
print('FIT: the sample contains NaNs, they were dropped\tN of dropped NaNs: {0}'.format(X.shape[0] - X_.shape[0]))
if self.rfe_cv:
self.rfe = RFECV(self.model)
self.rfe.fit(X_, y_)
else:
self.model.fit(X_, y_)
def predict(self, X):
Z = numpy.concatenate([X], axis=1)
Z = numpy.array(Z, dtype=numpy.float32)
Z[Z == numpy.inf] = numpy.nan
Z[Z == -numpy.inf] = numpy.nan
nan_mask = ~pandas.isna(Z).any(axis=1)
X_ = X[nan_mask, :]
if Z.shape[0] != X.shape[0]:
print('PREDICT: the sample contains NaNs, they were dropped\tN of dropped NaNs: {0}'.format(X.shape[0] - X_.shape[0]))
Z = numpy.full(shape=(X.shape[0], 1), fill_value=numpy.nan, dtype=numpy.float64)
if self.rfe_cv:
Z[nan_mask, :] = self.rfe.predict(X_).reshape(-1, 1)
else:
Z[nan_mask, :] = self.model.predict(X_).reshape(-1, 1)
return Z
class XBR:
def __init__(self, rfe_cv, *args, **kwargs):
self.rfe = None
self.rfe_cv = rfe_cv
self.model = XGBRegressor(*args, **kwargs)
def fit(self, X, y):
Z = numpy.concatenate([X, y.reshape(-1, 1)], axis=1)
Z = numpy.array(Z, dtype=numpy.float32)
Z[Z == numpy.inf] = numpy.nan
Z[Z == -numpy.inf] = numpy.nan
X_, y_ = X[~pandas.isna(Z).any(axis=1), :], y[~pandas.isna(Z).any(axis=1)]
if Z.shape[0] != X.shape[0]:
print('FIT: the sample contains NaNs, they were dropped\tN of dropped NaNs: {0}'.format(X.shape[0] - X_.shape[0]))
if self.rfe_cv:
self.rfe = RFECV(self.model)
self.rfe.fit(X_, y_)
else:
self.model.fit(X_, y_)
def predict(self, X):
Z = numpy.concatenate([X], axis=1)
Z = numpy.array(Z, dtype=numpy.float32)
Z[Z == numpy.inf] = numpy.nan
Z[Z == -numpy.inf] = numpy.nan
nan_mask = ~pandas.isna(Z).any(axis=1)
X_ = X[nan_mask, :]
if Z.shape[0] != X.shape[0]:
print('PREDICT: the sample contains NaNs, they were dropped\tN of dropped NaNs: {0}'.format(X.shape[0] - X_.shape[0]))
Z = numpy.full(shape=(X.shape[0], 1), fill_value=numpy.nan, dtype=numpy.float64)
if self.rfe_cv:
Z[nan_mask, :] = self.rfe.predict(X_).reshape(-1, 1)
else:
Z[nan_mask, :] = self.model.predict(X_).reshape(-1, 1)
return Z
class Insane:
def __init__(self, my_name):
self.my_name = my_name
self.store = None
def say_my_name(self):
return self.my_name
def fit(self, array):
if self.my_name == 'Nothing':
pass
elif self.my_name == 'LnPct':
trf = LogPctTransformer()
trf.fit(array)
self.store = trf
elif self.my_name == 'TanhLnPct':
trf0 = LogPctTransformer()
trf0.fit(array)
array_ = trf0.transform(array)
trf1 = HypeTan()
trf1.fit(array_)
trf = [trf0, trf1]
self.store = trf
elif self.my_name == 'Whiten':
trf = Whitener()
trf.fit(array)
self.store = trf
elif self.my_name == 'TanhWhiten':
trf0 = Whitener()
trf0.fit(array)
array_ = trf0.transform(array)
trf1 = HypeTan()
trf1.fit(array_)
trf = [trf0, trf1]
self.store = trf
elif self.my_name == 'AxeLnPct':
"""
trf0 = LogPctTransformer()
trf0.fit(array)
array_ = trf0.tranform(array)
trf1 = Axe()
trf1.fit(array_)
"""
raise Exception("Axe is not ready!")
elif self.my_name == 'AxeWOELnPct':
raise Exception("Axe is not ready!")
else:
raise Exception("Not Yet!")
def forward(self, array):
if self.my_name == 'Nothing':
return array
elif self.my_name == 'LnPct':
return self.store.transform(array)
elif self.my_name == 'TanhLnPct':
return self.store[1].transform(self.store[0].transform(array))
elif self.my_name == 'Whiten':
return self.store.transform(array)
elif self.my_name == 'TanhWhiten':
return self.store[1].transform(self.store[0].transform(array))
elif self.my_name == 'AxeLnPct':
# return self.store[1].transform(self.store[0].transform(array))
raise Exception("It is coming soon...")
elif self.my_name == 'AxeWOELnPct':
raise Exception("It is coming soon...")
else:
raise Exception("Not Yet!")
def backward(self, array):
if self.my_name == 'Nothing':
return array
elif self.my_name == 'LnPct':
return self.store.inverse_transform(array)
elif self.my_name == 'TanhLnPct':
return self.store[0].inverse_transform(self.store[1].inverse_transform(array))
elif self.my_name == 'Whiten':
return self.store.inverse_transform(array)
elif self.my_name == 'TanhWhiten':
return self.store[0].inverse_transform(self.store[1].inverse_transform(array))
elif self.my_name == 'AxeLnPct':
# return self.store[0].inverse_transform(self.store[1].inverse_transform(array))
raise Exception("It is coming soon...")
elif self.my_name == 'AxeWOELnPct':
raise Exception("It is coming soon...")
else:
raise Exception("Not Yet!")
class Neakt:
def __init__(self, masked, coded):
self.masked = masked
self.coded = coded
self.transformers = list(self.masked.keys())
self.masks = [self.masked[key] for key in self.transformers]
self.n = len(self.transformers)
def say_my_name(self):
return self.coded
def fit(self, X, Y):
array = X.copy()
for j in range(self.n):
self.transformers[j].fit(array[:, self.masks[j]])
def predict(self, X):
array_ = X.copy()
for j in range(self.n):
# array_ = self.transformers[j].transform(array_[:, self.masks[j]])
array_[:, self.masks[j]] = self.transformers[j].forward(array_[:, self.masks[j]])
"""
try:
tmp = self.transformers[j].forward(array_[:, self.masks[j]])
array_[:, self.masks[j]] = tmp
except Exception as e:
print(tmp.shape)
print(array_[:, self.masks[j]].shape)
raise e
"""
return array_
def backward(self, array):
array_ = array.copy()
for j in range(self.n):
# array_ = self.transformers[-j - 1].inverse_transform(array_[:, self.masks[-j - 1]])
# print('iter {0}'.format(j))
# print('full array')
# print(array_)
# print('changeable part')
# print(array_[:, self.masks[-j - 1]])
array_[:, self.masks[-j - 1]] = self.transformers[-j - 1].backward(array_[:, self.masks[-j - 1]])
# print('final')
# print(array_)
return array_
class SimpleNumericNN:
def __init__(self, rfe_cv, *args, **kwargs):
self.rfe = None
self.rfe_cv = rfe_cv
self.model = WrappedNumericOnlyGene(*args, **kwargs)
def fit(self, X, y):
Z = numpy.concatenate([X, y.reshape(-1, 1)], axis=1)
Z = numpy.array(Z, dtype=numpy.float32) # !
Z[Z == numpy.inf] = numpy.nan
Z[Z == -numpy.inf] = numpy.nan
X_, y_ = X[~pandas.isna(Z).any(axis=1), :], y[~pandas.isna(Z).any(axis=1)]
if Z.shape[0] != X.shape[0]:
print('FIT: the sample contains NaNs, they were dropped\tN of dropped NaNs: {0}'.format(X.shape[0] - X_.shape[0]))
del Z, X, y
"""
if self.rfe_cv:
self.rfe = RFECV(self.model)
self.rfe.fit(X_, y_)
else:
self.model.fit(X_, y_)
"""
X_train, X_val, y_train, y_val = train_test_split(X_, y_, test_size=0.3)
y_train, y_val = y_train.reshape(-1, 1), y_val.reshape(-1, 1)
del X_, y_
X_train, X_val = torch.tensor(X_train, dtype=torch.float), torch.tensor(X_val, dtype=torch.float)
# y_train_, y_val_ = torch.tensor(y_train, dtype=torch.float).flatten(), torch.tensor(y_val, dtype=torch.float).flatten()
y_train, y_val = torch.tensor(y_train, dtype=torch.float), torch.tensor(y_val, dtype=torch.float)
self.model.fit(X_train=X_train, y_train=y_train, X_val=X_val, y_val=y_val)
def predict(self, X):
Z = numpy.concatenate([X], axis=1)
Z = numpy.array(Z, dtype=numpy.float32)
Z[Z == numpy.inf] = numpy.nan
Z[Z == -numpy.inf] = numpy.nan
nan_mask = ~pandas.isna(Z).any(axis=1)
X_ = X[nan_mask, :]
if Z.shape[0] != X.shape[0]:
print('PREDICT: the sample contains NaNs, they were dropped\tN of dropped NaNs: {0}'.format(X.shape[0] - X_.shape[0]))
Z = numpy.full(shape=(X.shape[0], 1), fill_value=numpy.nan, dtype=numpy.float64)
"""
if self.rfe_cv:
Z[nan_mask, :] = self.rfe.predict(X_).reshape(-1, 1)
else:
Z[nan_mask, :] = self.model.predict(X_).reshape(-1, 1)
"""
X_ = torch.tensor(X[nan_mask, :], dtype=torch.float)
Z[nan_mask, :] = self.model.predict(X_).reshape(-1, 1)
return Z
| 35.173643
| 136
| 0.557544
| 3,315
| 22,687
| 3.68537
| 0.050679
| 0.049603
| 0.040108
| 0.036015
| 0.860931
| 0.848244
| 0.834738
| 0.81493
| 0.801588
| 0.798969
| 0
| 0.023022
| 0.283951
| 22,687
| 645
| 137
| 35.173643
| 0.729024
| 0.025962
| 0
| 0.839744
| 0
| 0
| 0.090841
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.089744
| false
| 0.002137
| 0.034188
| 0.004274
| 0.209402
| 0.047009
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
fb8f45407e6ad09f74d2518df673775662474fa6
| 20,704
|
py
|
Python
|
core/python/infer_section.py
|
vkb1/openseismic
|
51f91e658a74e2c38910b686b2e96b73e77f5eb5
|
[
"Apache-2.0"
] | 7
|
2021-05-13T05:53:09.000Z
|
2022-03-03T21:44:10.000Z
|
core/python/infer_section.py
|
manaspathak89/openseismic
|
034de2abf00feddab6ae8151b46abc2fa7153091
|
[
"Apache-2.0"
] | null | null | null |
core/python/infer_section.py
|
manaspathak89/openseismic
|
034de2abf00feddab6ae8151b46abc2fa7153091
|
[
"Apache-2.0"
] | 4
|
2021-04-23T20:45:12.000Z
|
2021-07-03T23:58:08.000Z
|
#
# Copyright (C) 2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
#
import os
import math
import shutil
import warnings
import numpy as np
import tensorflow as tf
from tqdm import tqdm
from utils.infer_util import InferRequestsQueue, loader
warnings.filterwarnings("ignore")
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
OUTPUT, SWAP, INFER, ARG = 5, 6, 7, 8
def infer_section_sync(arg_obj, logger, get_functions):
"""
Infer on section data synchronously. In order to use this in your
configuration, specify ``infer_type`` as ``section_sync``. Section inference
requires that additional parameters such as ``slice``, ``subsampl``, and
``slice_no`` be specified in the JSON configuration file. This function's
specific arguments will be filled according to your configuration inputs.
:param arg_obj: Arguments object that holds parameters needed for inference.
:param logger: Common logger object for logging coherence.
:param get_functions: Functions associated with parameters given.
:return: None
"""
logger.setLevel(OUTPUT)
output_folder = arg_obj.output
if not os.path.exists(output_folder):
logger.output('Making new folder for output storage.')
os.mkdir(output_folder)
else:
logger.output('Output folder already exists. Deleting...')
shutil.rmtree(output_folder)
logger.output('Making new folder for output storage.')
os.mkdir(output_folder)
logger.setLevel(INFER)
logger.infer('Setting up inference...')
preprocess, postprocess, model = get_functions(
arg_obj.model, arg_obj.given_model_name)
in_shape = model.get_input_shape()
logger.infer('Using model: {}'.format(model.name))
# Expects one input and one output layer
assert (len(model.get_inputs()) <
2), "[ERROR] Expects model with one input layer."
assert (len(model.get_outputs()) <
2), "[ERROR] Expects model with one output layer."
slice_type = arg_obj.slice
subsampl = 1 # arg_obj.subsampl
im_size = arg_obj.im_size
slice_no = arg_obj.slice_no
return_full_size = arg_obj.return_to_fullsize
sep = os.path.sep
data_arr = []
assert (os.path.isdir(arg_obj.data) or os.path.isfile(
arg_obj.data)), "[ERROR] Unexpected data input."
if os.path.isdir(arg_obj.data):
for data_file_name in os.listdir(arg_obj.data):
path_to_file = arg_obj.data + sep + data_file_name
data, data_info = loader(path_to_file)
data_arr.append(
{'name': data_file_name, 'data': data, 'data_info': data_info})
if os.path.isfile(arg_obj.data):
data, data_info = loader(arg_obj.data)
data_arr.append({'name': arg_obj.data.replace(
"/", "-"), 'data': data, 'data_info': data_info})
logger.infer('Conducting inference...')
def ls(N): return np.linspace(0, N - 1, N, dtype='int')
for data_dict in data_arr:
input_name = data_dict['name']
data = data_dict['data']
data_info = data_dict['data_info']
logger.infer('Conducting inference on input: {}...'.format(input_name))
logger.infer(
'Inference Config - Slice Type {} on Slice No. {}...'.format(slice_type, slice_no))
N0, N1, N2 = data.shape
x0_range = ls(N0)
x1_range = ls(N1)
x2_range = ls(N2)
check_slice_type = slice_type == 'inline' or slice_type == 'crossline' or slice_type == 'timeslice'
assert check_slice_type, "[ERROR] Invalid slice_type: {}".format(
slice_type)
if slice_type == 'inline':
slice_no = slice_no - data_info['inline_start']
class_cube = data[::subsampl, 0:1, ::subsampl] * 0
x1_range = np.array([slice_no])
elif slice_type == 'crossline':
slice_no = slice_no - data_info['crossline_start']
class_cube = data[::subsampl, ::subsampl, 0:1, ] * 0
x2_range = np.array([slice_no])
elif slice_type == 'timeslice':
slice_no = slice_no - data_info['timeslice_start']
class_cube = data[0:1, ::subsampl, ::subsampl] * 0
x0_range = np.array([slice_no])
assert slice_no > - \
1, "[ERROR] Invalid slice_no. For {}, refer to: {}".format(
input_name, data_info)
n0, n1, n2 = class_cube.shape
# x0_grid, x1_grid, x2_grid = np.meshgrid(ls(n0,), ls(n1), ls(n2), indexing='ij')
X0_grid, X1_grid, X2_grid = np.meshgrid(
x0_range, x1_range, x2_range, indexing='ij')
X0_grid_sub = X0_grid[::subsampl, ::subsampl, ::subsampl]
X1_grid_sub = X1_grid[::subsampl, ::subsampl, ::subsampl]
X2_grid_sub = X2_grid[::subsampl, ::subsampl, ::subsampl]
w = in_shape[2] // 2
h = in_shape[3] // 2
# Decide iterator axis
# X0_grid_sub.size / w * w
iter_axis = range(
math.ceil(n1 / in_shape[2]) * math.ceil(n2 / in_shape[2]))
if slice_type == 'inline':
# X1_grid_sub.size / w * w
iter_axis = range(
math.ceil(n0 / in_shape[2]) * math.ceil(n2 / in_shape[2]))
if slice_type == 'crossline':
# X2_grid_sub.size / w * w
iter_axis = range(
math.ceil(n0 / in_shape[2]) * math.ceil(n1 / in_shape[2]))
iter_axis = tqdm(iter_axis)
next_X0_1, next_X0_2 = 0, in_shape[2]
next_X1_1, next_X1_2 = 0, in_shape[2]
next_X2_1, next_X2_2 = 0, in_shape[2]
for i in iter_axis:
X0 = X0_grid_sub.ravel()[
i] if slice_type == 'timeslice' else next_X0_1 + w
X1 = X1_grid_sub.ravel()[
i] if slice_type == 'inline' else next_X1_1 + w
X2 = X2_grid_sub.ravel()[
i] if slice_type == 'crossline' else next_X2_1 + w
mini_sheet = np.zeros(in_shape)
found_mini_sheet = False
if slice_type == 'inline' and next_X0_1 == X0 - w and next_X2_1 == X2 - w:
# X1 out
end_X0 = min(next_X0_2, X0 + w + 1)
end_X2 = min(next_X2_2, X2 + w + 1)
mini_sheet = data[X0-w:end_X0, X1, X2-w:end_X2]
mini_sheet = mini_sheet[np.newaxis, np.newaxis, :, :]
found_mini_sheet = True
if slice_type == 'crossline' and next_X0_1 == X0 - w and next_X1_1 == X1 - w:
# X2 out
end_X0 = min(next_X0_2, X0 + w + 1)
end_X1 = min(next_X1_2, X1 + w + 1)
mini_sheet = data[X0-w: end_X0, X1-w: end_X1, X2]
mini_sheet = mini_sheet[np.newaxis, np.newaxis, :, :]
found_mini_sheet = True
if slice_type == 'timeslice' and next_X1_1 == X1 - w and next_X2_1 == X2 - w:
# X0 out
end_X1 = min(next_X1_2, X1 + w + 1)
end_X2 = min(next_X2_2, X2 + w + 1)
mini_sheet = data[X0, X1-w: end_X1, X2-w: end_X2]
mini_sheet = mini_sheet[np.newaxis, np.newaxis, :, :]
found_mini_sheet = True
if found_mini_sheet:
orig_shape = mini_sheet.shape
input_dict = preprocess(mini_sheet, model.get_inputs(), model)
output_dict, latency = model.infer(input_dict)
output_dict = postprocess(output_dict, orig_shape)
out = output_dict[list(output_dict.keys())[0]]
out = np.squeeze(out)
if slice_type == 'inline':
# X1 out
class_cube[X0 - w: X0 + w + 1, 0, X2 - w: X2 + w + 1] = out
if next_X2_2 == n2:
next_X0_1, next_X0_2 = X0 + w, min(X0 + 3*w + 1, n0)
next_X2_1, next_X2_2 = 0, in_shape[2]
else:
next_X2_1, next_X2_2 = X2 + w, min(X2 + 3*w + 1, n2)
if slice_type == 'crossline':
# X2 out
class_cube[X0 - w: X0 + w + 1, X1 - w: X1 + w + 1, 0] = out
if next_X1_2 == n1:
next_X0_1, next_X0_2 = X0 + w, min(X0 + 3*w + 1, n0)
next_X1_1, next_X1_2 = 0, in_shape[2]
else:
next_X1_1, next_X1_2 = X1 + w, min(X1 + 3*w + 1, n1)
if slice_type == 'timeslice':
# X0 out
class_cube[0, X1 - w: X1 + w + 1, X2 - w: X2 + w + 1] = out
if next_X2_2 == n2:
next_X1_1, next_X1_2 = X1 + w, min(X1 + 3*w + 1, n1)
next_X2_1, next_X2_2 = 0, in_shape[2]
else:
next_X2_1, next_X2_2 = X2 + w, min(X2 + 3*w + 1, n2)
input_ref = input_name + "-input"
save_path = output_folder + sep + input_ref
logger.infer('Saving output to output path: {}'.format(
save_path + sep + "out.npy"
))
if not os.path.exists(save_path):
os.mkdir(save_path)
np.save(save_path + sep + "out", class_cube)
logger.infer('Complete!')
def infer_section_async(arg_obj, logger, get_functions):
"""
Infer on section data asynchronously. In order to use this in your
configuration, specify ``infer_type`` as ``section_async``. Section inference
requires that additional parameters such as ``slice``, ``subsampl``, and
``slice_no`` be specified in the JSON configuration file. This function's
specific arguments will be filled according to your configuration inputs.
:param arg_obj: Arguments object that holds parameters needed for inference.
:param logger: Common logger object for logging coherence.
:param get_functions: Functions associated with parameters given.
:return: None
"""
logger.setLevel(OUTPUT)
output_folder = arg_obj.output
if not os.path.exists(output_folder):
logger.output('Making new folder for output storage.')
os.mkdir(output_folder)
else:
logger.output('Output folder already exists. Deleting...')
shutil.rmtree(output_folder)
logger.output('Making new folder for output storage.')
os.mkdir(output_folder)
logger.setLevel(INFER)
logger.infer('Setting up inference queues and requests...')
preprocess, postprocess, model = get_functions(
arg_obj.model, arg_obj.given_model_name, arg_obj.infer_type, arg_obj.streams)
in_shape = model.get_input_shape()
logger.infer('Using model: {}'.format(model.name))
# Expects one input and one output layer
assert (len(model.get_inputs()) <
2), "[ERROR] Expects model with one input layer."
assert (len(model.get_outputs()) <
2), "[ERROR] Expects model with one output layer."
slice_type = arg_obj.slice
subsampl = 1 # arg_obj.subsampl
im_size = arg_obj.im_size
slice_no = arg_obj.slice_no
return_full_size = arg_obj.return_to_fullsize
sep = os.path.sep
data_arr = []
assert (os.path.isdir(arg_obj.data) or os.path.isfile(
arg_obj.data)), "[ERROR] Unexpected data input."
if os.path.isdir(arg_obj.data):
for data_file_name in os.listdir(arg_obj.data):
path_to_file = arg_obj.data + sep + data_file_name
data, data_info = loader(path_to_file)
data_arr.append({'name': data_file_name, 'data': data})
if os.path.isfile(arg_obj.data):
data, data_info = loader(arg_obj.data)
data_arr.append({'name': arg_obj.data.replace("/", "-"), 'data': data})
def async_callback(param_dict):
"""
Params:
param_dict - dictionary which holds:
(1) request
(2) postprocess
(3) file_name
"""
request = param_dict['request']
postprocess = param_dict['postprocess']
order_dict = param_dict['order_dict']
orig_shape = param_dict['orig_shape']
slice_type = param_dict['slice_type']
i = param_dict['order']
output_blobs = request.output_blobs
out_layer = list(output_blobs.keys())[0]
output_dict = {out_layer: output_blobs[out_layer].buffer}
output_dict = postprocess(output_dict, orig_shape)
out = output_dict[list(output_dict.keys())[0]]
out = np.squeeze(out)
if slice_type == 'inline':
out = out[:, np.newaxis, :]
if slice_type == 'crossline':
out = out[:, :, np.newaxis]
if slice_type == 'timeslice':
out = out[np.newaxis, :, :]
order_dict[i] = {
'x0x1x2': param_dict['x0x1x2'], 'out': out
}
return out
requests = model.get_requests()
request_queue = InferRequestsQueue(requests, async_callback, postprocess)
logger.infer('Conducting inference...')
def ls(N): return np.linspace(0, N - 1, N, dtype='int')
for data_dict in data_arr:
input_name = data_dict['name']
data = data_dict['data']
logger.infer('Conducting inference on input: {}...'.format(input_name))
N0, N1, N2 = data.shape
x0_range = ls(N0)
x1_range = ls(N1)
x2_range = ls(N2)
pred_points = (x0_range[::subsampl],
x1_range[::subsampl], x2_range[::subsampl])
check_slice_type = slice_type == 'inline' or slice_type == 'crossline' or slice_type == 'timeslice'
assert check_slice_type, "[ERROR] Invalid slice_type: {}".format(
slice_type)
if slice_type == 'inline':
slice_no = slice_no - data_info['inline_start']
class_cube = data[::subsampl, 0:1, ::subsampl] * 0
x1_range = np.array([slice_no])
elif slice_type == 'crossline':
slice_no = slice_no - data_info['crossline_start']
class_cube = data[::subsampl, ::subsampl, 0:1, ] * 0
x2_range = np.array([slice_no])
elif slice_type == 'timeslice':
slice_no = slice_no - data_info['timeslice_start']
class_cube = data[0:1, ::subsampl, ::subsampl] * 0
x0_range = np.array([slice_no])
assert slice_no > - \
1, f"[ERROR] Invalid slice_no. For {input_name}, refer to: {data_info}"
n0, n1, n2 = class_cube.shape
x0_grid, x1_grid, x2_grid = np.meshgrid(
ls(n0,), ls(n1), ls(n2), indexing='ij')
X0_grid, X1_grid, X2_grid = np.meshgrid(
x0_range, x1_range, x2_range, indexing='ij')
X0_grid_sub = X0_grid[::subsampl, ::subsampl, ::subsampl]
X1_grid_sub = X1_grid[::subsampl, ::subsampl, ::subsampl]
X2_grid_sub = X2_grid[::subsampl, ::subsampl, ::subsampl]
w = in_shape[2] // 2
h = in_shape[3] // 2
order_dict = {}
# Decide iterator axis
# X0_grid_sub.size / w * w
iter_axis = range(
math.ceil(n1 / in_shape[2]) * math.ceil(n2 / in_shape[2]))
if slice_type == 'inline':
# X1_grid_sub.size / w * w
iter_axis = range(
math.ceil(n0 / in_shape[2]) * math.ceil(n2 / in_shape[2]))
if slice_type == 'crossline':
# X2_grid_sub.size / w * w
iter_axis = range(
math.ceil(n0 / in_shape[2]) * math.ceil(n1 / in_shape[2]))
iter_axis = tqdm(iter_axis)
next_X0_1, next_X0_2 = 0, in_shape[2]
next_X1_1, next_X1_2 = 0, in_shape[2]
next_X2_1, next_X2_2 = 0, in_shape[2]
for i in iter_axis:
X0 = X0_grid_sub.ravel()[
i] if slice_type == 'timeslice' else next_X0_1 + w
X1 = X1_grid_sub.ravel()[
i] if slice_type == 'inline' else next_X1_1 + w
X2 = X2_grid_sub.ravel()[
i] if slice_type == 'crossline' else next_X2_1 + w
mini_sheet = np.zeros(in_shape)
found_mini_sheet = False
end_X0 = end_X1 = end_X2 = 1
if slice_type == 'inline' and next_X0_1 == X0 - w and next_X2_1 == X2 - w:
# X1 out
end_X0 = min(next_X0_2, X0 + w + 1)
end_X2 = min(next_X2_2, X2 + w + 1)
mini_sheet = data[X0-w:end_X0, X1, X2-w:end_X2]
mini_sheet = mini_sheet[np.newaxis, np.newaxis, :, :]
found_mini_sheet = True
if slice_type == 'crossline' and next_X0_1 == X0 - w and next_X1_1 == X1 - w:
# X2 out
end_X0 = min(next_X0_2, X0 + w + 1)
end_X1 = min(next_X1_2, X1 + w + 1)
mini_sheet = data[X0-w:end_X0, X1-w:end_X1, X2]
mini_sheet = mini_sheet[np.newaxis, np.newaxis, :, :]
found_mini_sheet = True
if slice_type == 'timeslice' and next_X1_1 == X1 - w and next_X2_1 == X2 - w:
# X0 out
end_X1 = min(next_X1_2, X1 + w + 1)
end_X2 = min(next_X2_2, X2 + w + 1)
mini_sheet = data[X0, X1-w: end_X1, X2-w: end_X2]
mini_sheet = mini_sheet[np.newaxis, np.newaxis, :, :]
found_mini_sheet = True
if found_mini_sheet:
orig_shape = mini_sheet.shape
input_dict = preprocess(mini_sheet, model.get_inputs(), model)
# Inference! input_dict => {output_layer: output_data}, latency
infer_request = request_queue.get_idle_request()
infer_request.start_async(input_dict, input_name, {
'x0x1x2': [
(0, 1) if slice_type == 'timeslice' else (X0 - w, end_X0),
(0, 1) if slice_type == 'inline' else (X1 - w, end_X1),
(0, 1) if slice_type == 'crossline' else (X2 - w, end_X2)
], 'order': i, 'order_dict': order_dict, 'orig_shape': orig_shape,
'slice_type': slice_type
})
if slice_type == 'inline':
# X1 out
if next_X2_2 == n2:
next_X0_1, next_X0_2 = X0 + w, min(X0 + 3*w + 1, n0)
next_X2_1, next_X2_2 = 0, in_shape[2]
else:
next_X2_1, next_X2_2 = X2 + w, min(X2 + 3*w + 1, n2)
if slice_type == 'crossline':
# X2 out
if next_X1_2 == n1:
next_X0_1, next_X0_2 = X0 + w, min(X0 + 3*w + 1, n0)
next_X1_1, next_X1_2 = 0, in_shape[2]
else:
next_X1_1, next_X1_2 = X1 + w, min(X1 + 3*w + 1, n1)
if slice_type == 'timeslice':
# X0 out
if next_X2_2 == n2:
next_X1_1, next_X1_2 = X1 + w, min(X1 + 3*w + 1, n1)
next_X2_1, next_X2_2 = 0, in_shape[2]
else:
next_X2_1, next_X2_2 = X2 + w, min(X2 + 3*w + 1, n2)
logger.infer('Cleaning up requests...')
request_queue.wait_all()
logger.infer('Placing prediction in proper cube spot...')
available_keys = set(list(order_dict.keys()))
# Decide iterator axis
# X0_grid_sub.size / w * w
iter_axis = range(
math.ceil(n1 / in_shape[2]) * math.ceil(n2 / in_shape[2]))
if slice_type == 'inline':
# X1_grid_sub.size / w * w
iter_axis = range(
math.ceil(n0 / in_shape[2]) * math.ceil(n2 / in_shape[2]))
if slice_type == 'crossline':
# X2_grid_sub.size / w * w
iter_axis = range(
math.ceil(n0 / in_shape[2]) * math.ceil(n1 / in_shape[2]))
iter_axis = tqdm(iter_axis)
for i in iter_axis:
if i in available_keys:
out_w_param = order_dict[i]
out = out_w_param['out']
x0, x1, x2 = out_w_param['x0x1x2']
x0_1, x0_2 = x0
x1_1, x1_2 = x1
x2_1, x2_2 = x2
class_cube[x0_1:x0_2, x1_1:x1_2, x2_1:x2_2] = out
input_ref = input_name + "-input"
save_path = output_folder + sep + input_ref
logger.infer('Saving output to output path: {}'.format(
save_path + sep + "out.npy"))
if not os.path.exists(save_path):
os.mkdir(save_path)
np.save(save_path + sep + "out", class_cube)
logger.infer('Complete!')
| 39.586998
| 107
| 0.556463
| 2,842
| 20,704
| 3.791696
| 0.082336
| 0.048441
| 0.032665
| 0.020509
| 0.850037
| 0.83222
| 0.825724
| 0.820898
| 0.81765
| 0.800204
| 0
| 0.043822
| 0.327666
| 20,704
| 522
| 108
| 39.662835
| 0.730316
| 0.094764
| 0
| 0.775676
| 0
| 0
| 0.093275
| 0
| 0
| 0
| 0
| 0
| 0.027027
| 1
| 0.013514
| false
| 0
| 0.021622
| 0.005405
| 0.037838
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
fb90d532a7d42f2794f73b0f1bdc14d9de82837e
| 3,094
|
py
|
Python
|
TextSearchEngine/test_parse_finder.py
|
LechMadeyski/PhD19MarekSosnicki
|
9ea8af436e451f47062a132abc02f21b00403876
|
[
"Apache-2.0"
] | 2
|
2021-03-23T18:23:27.000Z
|
2021-06-22T20:01:59.000Z
|
TextSearchEngine/test_parse_finder.py
|
LechMadeyski/AutomatedSearchHelper
|
9ea8af436e451f47062a132abc02f21b00403876
|
[
"Apache-2.0"
] | null | null | null |
TextSearchEngine/test_parse_finder.py
|
LechMadeyski/AutomatedSearchHelper
|
9ea8af436e451f47062a132abc02f21b00403876
|
[
"Apache-2.0"
] | null | null | null |
import pytest
from .parse_finder import parse_finder
from .search_functions import *
def test_parse_throw_when_opening_not_found():
with pytest.raises(ValueError):
parse_finder("")
with pytest.raises(ValueError):
parse_finder("EXACT_WORD_")
def test_parse_throw_when_unknown_method():
with pytest.raises(ValueError):
parse_finder("UNKNOWN()")
def test_parse_EXACT_WORD_shall_throw_when_word_is_not_found():
with pytest.raises(ValueError):
parse_finder('EXACT_WORD()')
with pytest.raises(ValueError):
parse_finder('EXACT_WORD(")')
def test_parse_EXACT_WORD_shall_throw_when_method_end_is_not_found():
with pytest.raises(ValueError):
parse_finder('EXACT_WORD("aAA"')
def test_parse_EXACT_WORD():
text = 'EXACT_WORD("A")'
finder = parse_finder(text)
assert isinstance(finder, EXACT_WORD)
assert str(finder) == 'EXACT_WORD("A")'
def test_parse_EXACT_WORD_with_case_sensitive():
text = 'EXACT_WORD("A", case_sensitive)'
finder = parse_finder(text)
assert isinstance(finder, EXACT_WORD)
assert str(finder) == 'EXACT_WORD("A",case_sensitive)'
def test_parse_PARTIAL_WORD():
text = 'PARTIAL_WORD("A")'
finder = parse_finder(text)
assert isinstance(finder, PARTIAL_WORD)
assert str(finder) == 'PARTIAL_WORD("A")'
def test_parse_PARTIAL_WORD_with_case_sensitive():
text = 'PARTIAL_WORD("A", case_sensitive)'
finder = parse_finder(text)
assert isinstance(finder, PARTIAL_WORD)
assert str(finder) == 'PARTIAL_WORD("A",case_sensitive)'
def test_parse_PARTIAL_WORD_ignore_spaces():
text = ' PARTIAL_WORD ("A", case_sensitive ) '
finder = parse_finder(text)
assert isinstance(finder, PARTIAL_WORD)
assert str(finder) == 'PARTIAL_WORD("A",case_sensitive)'
def test_parse_OR_single_matcher():
text = 'OR(PARTIAL_WORD("A"))'
finder = parse_finder(text)
assert isinstance(finder, OR)
assert str(finder) == 'OR(PARTIAL_WORD("A"))'
def test_parse_OR_two_simple_matchers():
text = 'OR(PARTIAL_WORD("A"), EXACT_WORD("C"))'
finder = parse_finder(text)
assert isinstance(finder, OR)
assert str(finder) == 'OR(PARTIAL_WORD("A"), EXACT_WORD("C"))'
def test_parse_OR_inside_OR():
text = 'OR(PARTIAL_WORD("A"), OR(EXACT_WORD("B", case_sensitive), PARTIAL_WORD("D")))'
finder = parse_finder(text)
assert isinstance(finder, OR)
assert str(finder) == 'OR(PARTIAL_WORD("A"), OR(EXACT_WORD("B",case_sensitive), PARTIAL_WORD("D")))'
def test_parse_AND_two_simple_matchers():
text = 'AND(PARTIAL_WORD("A"), EXACT_WORD("C"))'
finder = parse_finder(text)
assert isinstance(finder, AND)
assert str(finder) == 'AND(PARTIAL_WORD("A"), EXACT_WORD("C"))'
def test_parse_OR_AND_mix():
text = 'OR(AND(PARTIAL_WORD("A"), EXACT_WORD("W")), AND(EXACT_WORD("B", case_sensitive), PARTIAL_WORD("D")))'
finder = parse_finder(text)
assert isinstance(finder, OR)
assert str(finder) == 'OR(AND(PARTIAL_WORD("A"), EXACT_WORD("W")), AND(EXACT_WORD("B",case_sensitive), PARTIAL_WORD("D")))'
| 31.252525
| 127
| 0.704266
| 426
| 3,094
| 4.762911
| 0.112676
| 0.140956
| 0.094628
| 0.103499
| 0.880237
| 0.811237
| 0.781173
| 0.77723
| 0.726466
| 0.700838
| 0
| 0
| 0.151907
| 3,094
| 98
| 128
| 31.571429
| 0.773247
| 0
| 0
| 0.42029
| 0
| 0.028986
| 0.280543
| 0.123788
| 0
| 0
| 0
| 0
| 0.289855
| 1
| 0.202899
| false
| 0
| 0.043478
| 0
| 0.246377
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
fba3ce3bf9fb4f87a1f4d4b7006aeb32509584d4
| 34,536
|
py
|
Python
|
tests/juniper/juniper_base_protocol_test.py
|
fbouliane/fake-switches
|
b46a18352a69fd6d29ce16e5a2befcf5de6aac26
|
[
"Apache-2.0"
] | null | null | null |
tests/juniper/juniper_base_protocol_test.py
|
fbouliane/fake-switches
|
b46a18352a69fd6d29ce16e5a2befcf5de6aac26
|
[
"Apache-2.0"
] | null | null | null |
tests/juniper/juniper_base_protocol_test.py
|
fbouliane/fake-switches
|
b46a18352a69fd6d29ce16e5a2befcf5de6aac26
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016 Internap.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from hamcrest import assert_that, has_length, has_items, equal_to, is_, is_not
from ncclient import manager
from ncclient.operations import RPCError
from tests import contains_regex
from tests.util.global_reactor import juniper_switch_ip, juniper_switch_netconf_port
from fake_switches.netconf import dict_2_etree, XML_ATTRIBUTES, XML_TEXT
class JuniperBaseProtocolTest(unittest.TestCase):
def setUp(self):
self.nc = self.create_client()
self.PORT_MODE_TAG = "port-mode"
def tearDown(self):
try:
self.nc.discard_changes()
finally:
self.nc.close_session()
def create_client(self):
return manager.connect(
host=juniper_switch_ip,
port=juniper_switch_netconf_port,
username="root",
password="root",
hostkey_verify=False,
device_params={'name': 'junos'}
)
def test_capabilities(self):
assert_that(self.nc.server_capabilities, has_items(
"urn:ietf:params:xml:ns:netconf:base:1.0",
"urn:ietf:params:xml:ns:netconf:capability:candidate:1.0",
"urn:ietf:params:xml:ns:netconf:capability:confirmed-commit:1.0",
"urn:ietf:params:xml:ns:netconf:capability:validate:1.0",
"urn:ietf:params:xml:ns:netconf:capability:url:1.0?protocol=http,ftp,file",
"http://xml.juniper.net/netconf/junos/1.0",
"http://xml.juniper.net/dmi/system/1.0",
))
def test_get_running_config(self):
result = self.nc.get_config(source="running")
conf = result._NCElement__result.xml
assert_that(conf, contains_regex(
'<configuration xmlns="http://xml.juniper.net/xnm/1.1/xnm" junos:commit-localtime="[^"]*" junos:commit-seconds="[^"]*" junos:commit-user="[^"]*">'))
assert_that(result.xpath("data/configuration/interfaces/interface/unit/family/ethernet-switching"),
has_length(4))
assert_that(result.xpath("data/configuration/vlans/vlan"), has_length(0))
def test_lock_edit_candidate_add_vlan_and_commit(self):
with self.nc.locked(target='candidate'):
result = self.nc.edit_config(target='candidate', config=dict_2_etree({
"config": {
"configuration": {
"vlans": {
"vlan": {
"name": "VLAN2999",
}
}
}
}}))
assert_that(result.xpath("//rpc-reply/ok"), has_length(1))
result = self.nc.commit()
assert_that(result.xpath("//rpc-reply/ok"), has_length(1))
result = self.nc.get_config(source="running")
assert_that(result.xpath("data/configuration/vlans/vlan"), has_length(1))
self.edit({
"vlans": {
"vlan": {
XML_ATTRIBUTES: {"operation": "delete"},
"name": "VLAN2999"
}
}
})
self.nc.commit()
result = self.nc.get_config(source="running")
assert_that(result.xpath("data/configuration/vlans/vlan"), has_length(0))
def test_locking_fails_if_changes_are_being_made(self):
nc2 = self.create_client()
try:
self.nc.edit_config(target='candidate', config=dict_2_etree({
"config": {
"configuration": {
"vlans": {
"vlan": [
{"name": "VLAN2999"},
{"description": "WHAAT"}
]
}
}
}}))
with self.assertRaises(RPCError):
with nc2.locked(target='candidate'):
self.fail('Should not be able to lock an edited configuration')
finally:
self.nc.discard_changes()
nc2.close_session()
def test_double_locking_with_two_sessions(self):
nc2 = self.create_client()
try:
with self.nc.locked(target='candidate'):
with self.assertRaises(RPCError):
with nc2.locked(target='candidate'):
self.fail("The second lock should not have worked.")
finally:
nc2.close_session()
def test_bad_configuration_element(self):
with self.assertRaises(RPCError):
self.nc.edit_config(target='candidate', config=dict_2_etree({
"config": {
"configuration": {
"vbleh": "shizzle"
}
}}))
def test_create_vlan(self):
self.nc.edit_config(target='candidate', config=dict_2_etree({"config": {"configuration": {
"vlans": {
"vlan": [
{"name": "VLAN2999"},
{"description": "WHAAT"},
{"vlan-id": "2995"}
]
}
}}}))
self.nc.commit()
result = self.nc.get_config(source="running", filter=dict_2_etree({"filter": {
"configuration": {"vlans": {}}}
}))
assert_that(result.xpath("data/*"), has_length(1))
assert_that(result.xpath("data/configuration/*"), has_length(1))
assert_that(result.xpath("data/configuration/vlans/*"), has_length(1))
assert_that(result.xpath("data/configuration/vlans/vlan/*"), has_length(3))
vlan2995 = result.xpath("data/configuration/vlans/vlan")[0]
assert_that(vlan2995.xpath("name")[0].text, equal_to("VLAN2999"))
assert_that(vlan2995.xpath("description")[0].text, equal_to("WHAAT"))
assert_that(vlan2995.xpath("vlan-id")[0].text, equal_to("2995"))
self.cleanup(vlan("VLAN2999"))
def test_vlan_configuration_merging(self):
self.edit({
"vlans": {
"vlan": [
{"name": "VLAN2999"},
{"vlan-id": "2995"}
]}})
self.edit({
"vlans": {
"vlan": [
{"name": "VLAN2999"},
{"description": "shizzle"}
]}})
self.nc.commit()
self.edit({
"vlans": {
"vlan": [
{"name": "VLAN2999"},
{"vlan-id": "2996"},
{"description": {XML_ATTRIBUTES: {"operation": "delete"}}}
]}})
self.nc.commit()
result = self.nc.get_config(source="running", filter=dict_2_etree({"filter": {
"configuration": {"vlans": {}}}
}))
assert_that(result.xpath("data/configuration/vlans/vlan"), has_length(1))
vlan2995 = result.xpath("data/configuration/vlans/vlan")[0]
assert_that(vlan2995.xpath("name")[0].text, equal_to("VLAN2999"))
assert_that(vlan2995.xpath("description"), has_length(0))
assert_that(vlan2995.xpath("vlan-id")[0].text, equal_to("2996"))
self.cleanup(vlan("VLAN2999"))
def test_deletion_errors(self):
self.edit({
"vlans": {
"vlan": [
{"name": "VLAN2999"},
{"vlan-id": "2995"}]}})
with self.assertRaises(RPCError):
self.edit({
"vlans": {
"vlan": {
"name": "VLAN3000",
XML_ATTRIBUTES: {"operation": "delete"}}}})
with self.assertRaises(RPCError):
self.edit({
"vlans": {
"vlan": [
{"name": "VLAN2999"},
{"description": {XML_ATTRIBUTES: {"operation": "delete"}}}
]}})
self.nc.commit()
with self.assertRaises(RPCError):
self.edit({
"vlans": {
"vlan": {
"name": "VLAN3000",
XML_ATTRIBUTES: {"operation": "delete"}}}})
with self.assertRaises(RPCError):
self.edit({
"vlans": {
"vlan": [
{"name": "VLAN2999"},
{"description": {XML_ATTRIBUTES: {"operation": "delete"}}}
]}})
self.cleanup(vlan("VLAN2999"))
def test_access_mode(self):
self.edit({
"vlans": {
"vlan": [
{"name": "VLAN2995"},
{"vlan-id": "2995"}]},
"interfaces": {
"interface": [
{"name": "ge-0/0/3"},
{"unit": [
{"name": "0"},
{"family": {
"ethernet-switching": {
self.PORT_MODE_TAG: "access",
"vlan": [
{"members": "2995"},
]}}}]}]}})
self.nc.commit()
result = self.nc.get_config(source="running", filter=dict_2_etree({"filter": {
"configuration": {"interfaces": {"interface": {"name": "ge-0/0/3"}}}}
}))
assert_that(result.xpath("data/configuration/interfaces/interface"), has_length(1))
int003 = result.xpath("data/configuration/interfaces/interface")[0]
assert_that(int003.xpath("name")[0].text, equal_to("ge-0/0/3"))
assert_that(int003.xpath("unit/family/ethernet-switching/*"), has_length(2))
assert_that(int003.xpath("unit/family/ethernet-switching/{}".format(self.PORT_MODE_TAG))[0].text,
equal_to("access"))
assert_that(int003.xpath("unit/family/ethernet-switching/vlan/members"), has_length(1))
assert_that(int003.xpath("unit/family/ethernet-switching/vlan/members")[0].text, equal_to("2995"))
self.cleanup(vlan("VLAN2995"), interface("ge-0/0/3", [self.PORT_MODE_TAG, "vlan"]))
def test_assigning_unknown_vlan_raises(self):
self.edit({
"interfaces": {
"interface": [
{"name": "ge-0/0/3"},
{"unit": [
{"name": "0"},
{"family": {
"ethernet-switching": {
"vlan": {"members": "2000"}}}}]}]}})
with self.assertRaises(RPCError):
self.nc.commit()
def test_assigning_unknown_vlan_in_a_range_raises(self):
self.edit({
"vlans": {
"vlan": [
{"name": "VLAN2995"},
{"vlan-id": "2995"}]},
"interfaces": {
"interface": [
{"name": "ge-0/0/3"},
{"unit": [
{"name": "0"},
{"family": {
"ethernet-switching": {
self.PORT_MODE_TAG: "trunk",
"vlan": {"members": "2995-2996"}}}}]}]}})
with self.assertRaises(RPCError):
self.nc.commit()
def test_assigning_unknown_native_vlan_raises(self):
self.edit({
"interfaces": {
"interface": [
{"name": "ge-0/0/3"},
{"unit": [
{"name": "0"},
{"family": {
"ethernet-switching": {
"native-vlan-id": "2000"}}}]}]}})
with self.assertRaises(RPCError):
self.nc.commit()
def test_trunk_mode(self):
self.edit({
"vlans": [
{"vlan": [
{"name": "VLAN2995"},
{"vlan-id": "2995"}]},
{"vlan": [
{"name": "VLAN2996"},
{"vlan-id": "2996"}]},
{"vlan": [
{"name": "VLAN2997"},
{"vlan-id": "2997"}]},
],
"interfaces": {
"interface": [
{"name": "ge-0/0/3"},
{"unit": [
{"name": "0"},
{"family": {
"ethernet-switching": {
self.PORT_MODE_TAG: "trunk",
"native-vlan-id": "2996",
"vlan": [
{"members": "2995"},
{"members": "2997"},
]}}}]}]}})
self.nc.commit()
result = self.nc.get_config(source="running", filter=dict_2_etree({"filter": {
"configuration": {"interfaces": {"interface": {"name": "ge-0/0/3"}}}}
}))
assert_that(result.xpath("data/configuration/interfaces/interface"), has_length(1))
int003 = result.xpath("data/configuration/interfaces/interface")[0]
assert_that(int003.xpath("name")[0].text, equal_to("ge-0/0/3"))
assert_that(int003.xpath("unit/family/ethernet-switching/*"), has_length(3))
assert_that(int003.xpath("unit/family/ethernet-switching/{}".format(self.PORT_MODE_TAG))[0].text,
equal_to("trunk"))
assert_that(int003.xpath("unit/family/ethernet-switching/native-vlan-id")[0].text, equal_to("2996"))
assert_that(int003.xpath("unit/family/ethernet-switching/vlan/members"), has_length(2))
assert_that(int003.xpath("unit/family/ethernet-switching/vlan/members")[0].text, equal_to("2995"))
assert_that(int003.xpath("unit/family/ethernet-switching/vlan/members")[1].text, equal_to("2997"))
self.edit({
"interfaces": {
"interface": [
{"name": "ge-0/0/3"},
{"unit": [
{"name": "0"},
{"family": {
"ethernet-switching": {
"vlan": [
{"members": {XML_TEXT: "2995", XML_ATTRIBUTES: {"operation": "delete"}}},
]}}}]}]}})
self.nc.commit()
result = self.nc.get_config(source="running", filter=dict_2_etree({"filter": {
"configuration": {"interfaces": {"interface": {"name": "ge-0/0/3"}}}}
}))
int003 = result.xpath("data/configuration/interfaces/interface")[0]
assert_that(int003.xpath("unit/family/ethernet-switching/vlan/members"), has_length(1))
assert_that(int003.xpath("unit/family/ethernet-switching/vlan/members")[0].text, equal_to("2997"))
self.cleanup(vlan("VLAN2995"), vlan("VLAN2996"), vlan("VLAN2997"),
interface("ge-0/0/3", [self.PORT_MODE_TAG, "native-vlan-id", "vlan"]))
result = self.nc.get_config(source="running", filter=dict_2_etree({"filter": {
"configuration": {"vlans": {}}}
}))
assert_that(result.xpath("data/configuration/vlans/vlan"), has_length(0))
def test_set_spanning_tree_options(self):
self.edit({
"protocols": {
"rstp": {
"interface": [
{"name": "ge-0/0/3"},
{"edge": ""},
{"no-root-port": ""}]}}})
self.nc.commit()
result = self.nc.get_config(source="running", filter=dict_2_etree({"filter": {
"configuration": {"protocols": {"rstp": {"interface": {"name": "ge-0/0/3"}}}}}
}))
assert_that(result.xpath("data/configuration/protocols/rstp/interface"), has_length(1))
interface = result.xpath("data/configuration/protocols/rstp/interface")[0]
assert_that(interface, has_length(3))
assert_that(interface.xpath("name")[0].text, equal_to("ge-0/0/3"))
assert_that(interface.xpath("edge"), has_length(1))
assert_that(interface.xpath("no-root-port"), has_length(1))
self.edit({
"protocols": {
"rstp": {
"interface": {
XML_ATTRIBUTES: {"operation": "delete"},
"name": "ge-0/0/3"}}}})
self.nc.commit()
result = self.nc.get_config(source="running", filter=dict_2_etree({"filter": {
"configuration": {"protocols": ""}}
}))
assert_that(result.xpath("data/configuration/protocols"), has_length(1))
def test_deleting_spanning_tree_options(self):
self.edit({
"protocols": {
"rstp": {
"interface": [
{"name": "ge-0/0/3"},
{"edge": ""},
{"no-root-port": ""}]}}})
self.nc.commit()
self.edit({
"protocols": {
"rstp": {
"interface": [
{"name": "ge-0/0/3"},
{"edge": {XML_ATTRIBUTES: {"operation": "delete"}}},
{"no-root-port": {XML_ATTRIBUTES: {"operation": "delete"}}}]}}})
self.nc.commit()
result = self.nc.get_config(source="running", filter=dict_2_etree({"filter": {
"configuration": {"protocols": {"rstp": {"interface": {"name": "ge-0/0/3"}}}}}
}))
assert_that(result.xpath("data/configuration/protocols/rstp/interface"), has_length(0))
def test_set_lldp(self):
self.edit({
"protocols": {
"lldp": {
"interface": [
{"name": "ge-0/0/3"},
{"disable": ""}]}}})
self.nc.commit()
result = self.nc.get_config(source="running", filter=dict_2_etree({"filter": {
"configuration": {"protocols": {"lldp": {"interface": {"name": "ge-0/0/3"}}}}}
}))
assert_that(result.xpath("data/configuration/protocols/lldp/interface"), has_length(1))
interface = result.xpath("data/configuration/protocols/lldp/interface")[0]
assert_that(interface, has_length(2))
assert_that(interface.xpath("name")[0].text, equal_to("ge-0/0/3"))
assert_that(len(interface.xpath("disable")), equal_to(1))
self.edit({
"protocols": {
"lldp": {
"interface": [
{"name": "ge-0/0/3"},
{"disable": {XML_ATTRIBUTES: {"operation": "delete"}}}]}}})
self.nc.commit()
result = self.nc.get_config(source="running", filter=dict_2_etree({"filter": {
"configuration": {"protocols": {"lldp": {"interface": {"name": "ge-0/0/3"}}}}}
}))
assert_that(result.xpath("data/configuration/protocols/lldp/interface")[0], has_length(1))
self.edit({
"protocols": {
"lldp": {
"interface": {
XML_ATTRIBUTES: {"operation": "delete"},
"name": "ge-0/0/3"}}}})
self.nc.commit()
result = self.nc.get_config(source="running", filter=dict_2_etree({"filter": {
"configuration": {"protocols": ""}}
}))
assert_that(result.xpath("data/configuration/protocols"), has_length(1))
def test_set_interface_description(self):
self.edit({
"interfaces": {
"interface": [
{"name": "ge-0/0/2"},
{"description": "Hey there beautiful"}]}})
self.nc.commit()
result = self.nc.get_config(source="running", filter=dict_2_etree({"filter": {
"configuration": {"interfaces": {"interface": {"name": "ge-0/0/2"}}}}
}))
assert_that(result.xpath("data/configuration/interfaces/interface"), has_length(1))
int002 = result.xpath("data/configuration/interfaces/interface")[0]
assert_that(int002.xpath("name")[0].text, equal_to("ge-0/0/2"))
assert_that(int002.xpath("description")[0].text, equal_to("Hey there beautiful"))
self.edit({
"interfaces": {
"interface": [
{"name": "ge-0/0/2"},
{"description": {XML_ATTRIBUTES: {"operation": "delete"}}}]}})
self.nc.commit()
result = self.nc.get_config(source="running", filter=dict_2_etree({"filter": {
"configuration": {"interfaces": {"interface": {"name": "ge-0/0/2"}}}}
}))
assert_that(result.xpath("data/configuration/interfaces/interface"), has_length(1))
int002 = result.xpath("data/configuration/interfaces/interface")[0]
assert_that(int002.xpath("description"), has_length(0))
def test_set_interface_disabling(self):
result = self.nc.get_config(source="running", filter=dict_2_etree({"filter": {
"configuration": {"interfaces": {"interface": {"name": "ge-0/0/2"}}}}}))
int002 = result.xpath("data/configuration/interfaces/interface")[0]
assert_that(int002.xpath("enable"), has_length(0))
assert_that(int002.xpath("disable"), has_length(0))
self.edit({"interfaces": {"interface": [{"name": "ge-0/0/2"}, {"disable": ""}]}})
self.nc.commit()
result = self.nc.get_config(source="running", filter=dict_2_etree({"filter": {
"configuration": {"interfaces": {"interface": {"name": "ge-0/0/2"}}}}}))
int002 = result.xpath("data/configuration/interfaces/interface")[0]
assert_that(int002.xpath("enable"), has_length(0))
assert_that(int002.xpath("disable"), has_length(1))
self.edit({"interfaces": {"interface": [{"name": "ge-0/0/2"}, {"enable": ""}]}})
self.nc.commit()
result = self.nc.get_config(source="running", filter=dict_2_etree({"filter": {
"configuration": {"interfaces": {"interface": {"name": "ge-0/0/2"}}}}}))
int002 = result.xpath("data/configuration/interfaces/interface")[0]
assert_that(int002.xpath("enable"), has_length(1))
assert_that(int002.xpath("disable"), has_length(0))
self.edit({"interfaces": {
"interface": [{"name": "ge-0/0/2"}, {"enable": {XML_ATTRIBUTES: {"operation": "delete"}}}]}})
self.nc.commit()
result = self.nc.get_config(source="running", filter=dict_2_etree({"filter": {
"configuration": {"interfaces": {"interface": {"name": "ge-0/0/2"}}}}}))
int002 = result.xpath("data/configuration/interfaces/interface")[0]
assert_that(int002.xpath("enable"), has_length(0))
assert_that(int002.xpath("disable"), has_length(0))
self.edit({"interfaces": {"interface": [{"name": "ge-0/0/2"}, {"disable": ""}]}})
self.nc.commit()
self.edit({"interfaces": {
"interface": [{"name": "ge-0/0/2"}, {"disable": {XML_ATTRIBUTES: {"operation": "delete"}}}]}})
self.nc.commit()
result = self.nc.get_config(source="running", filter=dict_2_etree({"filter": {
"configuration": {"interfaces": {"interface": {"name": "ge-0/0/2"}}}}}))
int002 = result.xpath("data/configuration/interfaces/interface")[0]
assert_that(int002.xpath("enable"), has_length(0))
assert_that(int002.xpath("disable"), has_length(0))
def test_create_aggregated_port(self):
self.edit({
"interfaces": {
"interface": [
{"name": "ae1"},
{"description": "This is a Greg hated"}]}})
self.nc.commit()
ae1 = self.get_interface("ae1")
assert_that(ae1.xpath("*"), has_length(2))
assert_that(ae1.xpath("description")[0].text, is_("This is a Greg hated"))
self.edit({
"interfaces": {
"interface": [
{"name": "ae1"},
{"description": {XML_ATTRIBUTES: {"operation": "delete"}}},
{"aggregated-ether-options": {
"link-speed": "10g",
"auto-negotiation": {},
"lacp": {
"active": {},
"periodic": "slow"}}}]}})
self.nc.commit()
ae1 = self.get_interface("ae1")
assert_that(ae1.xpath("*"), has_length(2))
assert_that(ae1.xpath("aggregated-ether-options/*"), has_length(3))
assert_that(ae1.xpath("aggregated-ether-options/link-speed")[0].text, is_("10g"))
assert_that(ae1.xpath("aggregated-ether-options/auto-negotiation"), has_length(1))
assert_that(ae1.xpath("aggregated-ether-options/lacp/*"), has_length(2))
assert_that(ae1.xpath("aggregated-ether-options/lacp/active"), has_length(1))
assert_that(ae1.xpath("aggregated-ether-options/lacp/periodic")[0].text, is_("slow"))
self.edit({
"vlans": [
{"vlan": [
{"name": "VLAN2995"},
{"vlan-id": "2995"}]},
{"vlan": [
{"name": "VLAN2997"},
{"vlan-id": "2997"}]},
],
"interfaces": {
"interface": [
{"name": "ae1"},
{"aggregated-ether-options": {
"link-speed": {XML_ATTRIBUTES: {"operation": "delete"}},
"auto-negotiation": {XML_ATTRIBUTES: {"operation": "delete"}},
"lacp": {
"active": {XML_ATTRIBUTES: {"operation": "delete"}},
"periodic": "slow"}}},
{"unit": [
{"name": "0"},
{"family": {
"ethernet-switching": {
self.PORT_MODE_TAG: "trunk",
"vlan": [
{"members": "2995"},
{"members": "2997"}]}}}]}]}})
self.nc.commit()
ae1 = self.get_interface("ae1")
assert_that(ae1.xpath("*"), has_length(3))
assert_that(ae1.xpath("aggregated-ether-options/*"), has_length(1))
assert_that(ae1.xpath("aggregated-ether-options/lacp/periodic")[0].text, is_("slow"))
assert_that(ae1.xpath("unit/family/ethernet-switching/vlan/members"), has_length(2))
self.cleanup(vlan("VLAN2995"), vlan("VLAN2997"), interface("ae1"))
result = self.nc.get_config(source="running", filter=dict_2_etree({"filter": {
"configuration": {"interfaces": {"interface": {"name": "ae1"}}}}}))
assert_that(result.xpath("configuration/interfaces"), has_length(0))
def test_assign_port_to_aggregated_interface(self):
self.edit({
"vlans": [
{"vlan": [
{"name": "VLAN2995"},
{"vlan-id": "2995"}]},
],
"interfaces": [
{"interface": [
{"name": "ge-0/0/1"},
{"unit": [
{"name": "0"},
{"family": {
"ethernet-switching": {
self.PORT_MODE_TAG: "access"}}}]}]},
{"interface": [
{"name": "ge-0/0/2"},
{"unit": [
{"name": "0"},
{"family": {
"ethernet-switching": {
self.PORT_MODE_TAG: "access"}}}]}]},
]})
self.nc.commit()
self.edit({
"vlans": [
{"vlan": [
{"name": "VLAN2995"},
{"vlan-id": "2995"}]},
],
"interfaces": [
{"interface": [
{"name": "ae1"},
{"aggregated-ether-options": {
"link-speed": "10g",
"lacp": {
"active": {},
"periodic": "slow"}}},
{"unit": [
{"name": "0"},
{"family": {
"ethernet-switching": {
self.PORT_MODE_TAG: "trunk",
"vlan": [
{"members": "2995"}]}}}]}]},
{"interface": [
{"name": "ge-0/0/1"},
{"ether-options": {
"auto-negotiation": {},
"speed": {"ethernet-10g": {}},
"ieee-802.3ad": {"bundle": "ae1"}}},
{"unit": {XML_ATTRIBUTES: {"operation": "delete"}}}]},
{"interface": [{XML_ATTRIBUTES: {"operation": "replace"}},
{"name": "ge-0/0/2"},
{"ether-options": {
"speed": {"ethernet-10g": {}},
"ieee-802.3ad": {"bundle": "ae1"}}}]},
]})
self.nc.commit()
ge001 = self.get_interface("ge-0/0/1")
assert_that(ge001.xpath("*"), has_length(2))
assert_that(ge001.xpath("unit"), has_length(0))
assert_that(ge001.xpath("ether-options/*"), has_length(3))
assert_that(ge001.xpath("ether-options/auto-negotiation"), has_length(1))
assert_that(ge001.xpath("ether-options/speed/ethernet-10g"), has_length(1))
assert_that(ge001.xpath("ether-options/ieee-802.3ad/bundle")[0].text, is_("ae1"))
ge002 = self.get_interface("ge-0/0/2")
assert_that(ge002.xpath("*"), has_length(2))
assert_that(ge002.xpath("unit"), has_length(0))
assert_that(ge002.xpath("ether-options/*"), has_length(2))
assert_that(ge002.xpath("ether-options/speed/ethernet-10g"), has_length(1))
assert_that(ge002.xpath("ether-options/ieee-802.3ad/bundle")[0].text, is_("ae1"))
self.edit({
"interfaces": [
{"interface": [
{"name": "ge-0/0/1"},
{"ether-options": {
"auto-negotiation": {XML_ATTRIBUTES: {"operation": "delete"}},
"speed": "10g",
"ieee-802.3ad": {XML_ATTRIBUTES: {"operation": "delete"}}}}]},
{"interface": [
{"name": "ge-0/0/2"},
{"ether-options": {XML_ATTRIBUTES: {"operation": "delete"}}}]},
]})
self.nc.commit()
ge001 = self.get_interface("ge-0/0/1")
assert_that(ge001.xpath("unit"), has_length(0))
assert_that(ge001.xpath("ether-options/*"), has_length(1))
assert_that(ge001.xpath("ether-options/speed/ethernet-10g"), has_length(1))
ge002 = self.get_interface("ge-0/0/2")
assert_that(ge002.xpath("*"), has_length(1))
assert_that(ge002.xpath("unit"), has_length(0))
assert_that(ge002.xpath("ether-options"), has_length(0))
self.cleanup(vlan("VLAN2995"), interface("ae1"), reset_interface("ge-0/0/1"), reset_interface("ge-0/0/2"))
def test_compare_configuration(self):
result = self.nc.compare_configuration()
output = result.xpath("configuration-information/configuration-output")[0]
assert_that(output.text.strip(), is_(""))
self.edit({
"vlans": [
{"vlan": [
{"name": "VLAN2995"},
{"vlan-id": "2995"}]},
]})
result = self.nc.compare_configuration()
output = result.xpath("configuration-information/configuration-output")[0]
assert_that(output.text.strip(), is_not(""))
self.nc.commit()
result = self.nc.compare_configuration()
output = result.xpath("configuration-information/configuration-output")[0]
assert_that(output.text.strip(), is_(""))
def edit(self, config):
result = self.nc.edit_config(target="candidate", config=dict_2_etree({
"config": {
"configuration": config
}
}))
assert_that(result.xpath("//rpc-reply/ok"), has_length(1))
def cleanup(self, *args):
for cleanup in args:
cleanup(self.edit)
self.nc.commit()
def get_interface(self, name):
result = self.nc.get_config(source="running", filter=dict_2_etree({"filter": {
"configuration": {"interfaces": {"interface": {"name": name}}}}}))
return result.xpath("data/configuration/interfaces/interface")[0]
def vlan(vlan_name):
def m(edit):
edit({"vlans": {
"vlan": {"name": vlan_name, XML_ATTRIBUTES: {"operation": "delete"}}
}})
return m
def interface(interface_name, fields=None):
if fields is not None:
def m(edit):
edit({"interfaces": {
"interface": [
{"name": interface_name},
{"unit": [
{"name": "0"},
{"family": {
"ethernet-switching": {field: {XML_ATTRIBUTES: {"operation": "delete"}} for field in fields}
}}]}]}})
else:
def m(edit):
edit({"interfaces": {
"interface": {
"name": interface_name,
XML_ATTRIBUTES: {"operation": "delete"}}}})
return m
def reset_interface(interface_name):
def m(edit):
edit({"interfaces": {
"interface": [{XML_ATTRIBUTES: {"operation": "replace"}},
{"name": interface_name},
{"unit": [
{"name": "0"},
{"family": {
"ethernet-switching": {}}}]}]}})
return m
| 38.979684
| 160
| 0.487636
| 3,287
| 34,536
| 4.980529
| 0.085488
| 0.061084
| 0.01295
| 0.019547
| 0.835257
| 0.80667
| 0.761652
| 0.736791
| 0.707959
| 0.665873
| 0
| 0.038914
| 0.342975
| 34,536
| 885
| 161
| 39.023729
| 0.682561
| 0.01581
| 0
| 0.719323
| 0
| 0.004231
| 0.241141
| 0.082735
| 0
| 0
| 0
| 0
| 0.155148
| 1
| 0.049365
| false
| 0.00141
| 0.009873
| 0.00141
| 0.067701
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
837911ce03bcddcdd0261a000e371411a0567ace
| 285
|
py
|
Python
|
tests/parser/wellfounded.11.test.py
|
veltri/DLV2
|
944aaef803aa75e7ec51d7e0c2b0d964687fdd0e
|
[
"Apache-2.0"
] | null | null | null |
tests/parser/wellfounded.11.test.py
|
veltri/DLV2
|
944aaef803aa75e7ec51d7e0c2b0d964687fdd0e
|
[
"Apache-2.0"
] | null | null | null |
tests/parser/wellfounded.11.test.py
|
veltri/DLV2
|
944aaef803aa75e7ec51d7e0c2b0d964687fdd0e
|
[
"Apache-2.0"
] | null | null | null |
input = """
t(Z) :- t0(Z).
t(Z) :- g(X,Y,Z), t(X).
t(Z) :- g(X,Y,Z), not t(Y).
t0(1).
g(1,2,3).
g(2,5,4).
g(2,4,5).
g(5,3,6).
"""
output = """
t(Z) :- t0(Z).
t(Z) :- g(X,Y,Z), t(X).
t(Z) :- g(X,Y,Z), not t(Y).
t0(1).
g(1,2,3).
g(2,5,4).
g(2,4,5).
g(5,3,6).
"""
| 11.4
| 28
| 0.340351
| 80
| 285
| 1.2125
| 0.1875
| 0.123711
| 0.123711
| 0.164948
| 0.886598
| 0.886598
| 0.886598
| 0.886598
| 0.886598
| 0.886598
| 0
| 0.136986
| 0.231579
| 285
| 24
| 29
| 11.875
| 0.305936
| 0
| 0
| 0.9
| 0
| 0
| 0.883019
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 11
|
838bcadbde8f28775e0cebe3f0b866c396667213
| 44
|
py
|
Python
|
test/auditing/test_check_discovery/tasks/correct_second_module/patches/patch_more_stuff.py
|
uliana291/the-zoo
|
a15a4162c39553abe91224f4feff5d3b66f9413e
|
[
"MIT"
] | 90
|
2018-11-20T10:58:24.000Z
|
2022-02-19T16:12:46.000Z
|
test/auditing/test_check_discovery/tasks/correct_second_module/patches/patch_more_stuff.py
|
uliana291/the-zoo
|
a15a4162c39553abe91224f4feff5d3b66f9413e
|
[
"MIT"
] | 348
|
2018-11-21T09:22:31.000Z
|
2021-11-03T13:45:08.000Z
|
test/auditing/test_check_discovery/tasks/correct_second_module/patches/patch_more_stuff.py
|
aexvir/the-zoo
|
7816afb9a0a26c6058b030b4a987c73e952d92bd
|
[
"MIT"
] | 11
|
2018-12-08T18:42:07.000Z
|
2021-02-21T06:27:58.000Z
|
def patch_dummy_function():
return True
| 14.666667
| 27
| 0.75
| 6
| 44
| 5.166667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.181818
| 44
| 2
| 28
| 22
| 0.861111
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0.5
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
83b24707d35feeed159a3ea54e86846d74b1c8ef
| 14
|
py
|
Python
|
pages/themes/stringsNumbersVariablesComments/HW/tmp.py
|
WWWCourses/ProgressBG-Python-UniCredit-Slides
|
87539aa2f73738370ac8df865cf3a1adac447391
|
[
"MIT"
] | null | null | null |
pages/themes/stringsNumbersVariablesComments/HW/tmp.py
|
WWWCourses/ProgressBG-Python-UniCredit-Slides
|
87539aa2f73738370ac8df865cf3a1adac447391
|
[
"MIT"
] | null | null | null |
pages/themes/stringsNumbersVariablesComments/HW/tmp.py
|
WWWCourses/ProgressBG-Python-UniCredit-Slides
|
87539aa2f73738370ac8df865cf3a1adac447391
|
[
"MIT"
] | null | null | null |
print(0.1+0.2)
| 14
| 14
| 0.642857
| 5
| 14
| 1.8
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.285714
| 0
| 14
| 1
| 14
| 14
| 0.357143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 8
|
83f07ed6ca592c7989b8ed00db7104594d31365d
| 2,442
|
py
|
Python
|
fewshot/data/preprocessors/random_box_occluder.py
|
sebamenabar/oc-fewshot-public
|
2dad8c9f24cb1bfe72d8b13b33d28f6788d86ca8
|
[
"MIT"
] | null | null | null |
fewshot/data/preprocessors/random_box_occluder.py
|
sebamenabar/oc-fewshot-public
|
2dad8c9f24cb1bfe72d8b13b33d28f6788d86ca8
|
[
"MIT"
] | null | null | null |
fewshot/data/preprocessors/random_box_occluder.py
|
sebamenabar/oc-fewshot-public
|
2dad8c9f24cb1bfe72d8b13b33d28f6788d86ca8
|
[
"MIT"
] | null | null | null |
"""Occluding the image with a random box.
Author: Mengye Ren (mren@cs.toronto.edu)
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import tensorflow as tf
from fewshot.data.preprocessors.preprocessor import Preprocessor
class RandomBoxOccluder(Preprocessor):
@tf.function
def preprocess(self, inputs):
"""NHWC float format."""
image = tf.image.convert_image_dtype(inputs, tf.float32)
N = inputs.shape[0]
W = inputs.shape[2]
H = inputs.shape[1]
C = inputs.shape[3]
BW = int(W * 0.3)
BH = int(H * 0.3)
box_loc = tf.cast(
tf.floor(
tf.random.uniform([N, 2]) *
tf.constant([H - BH, W - BW], dtype=tf.float32)), tf.int32)
w_range = tf.reshape(tf.range(BW), [1, 1, -1, 1])
h_range = tf.reshape(tf.range(BH), [1, -1, 1, 1])
box_idx = tf.concat(
[tf.tile(h_range, [1, 1, BW, 1]),
tf.tile(w_range, [1, BH, 1, 1])],
axis=-1)
box_idx += tf.reshape(box_loc, [N, 1, 1, 2])
Nidx = tf.tile(tf.reshape(tf.range(N), [-1, 1, 1, 1]), [1, BW, BH, 1])
box_idx = tf.concat([Nidx, box_idx], axis=-1)
box_idx = tf.reshape(box_idx, [N * BW * BH, 3])
mask = tf.scatter_nd(box_idx, tf.ones([N * BW * BH, C]), tf.shape(inputs))
image = image * (1.0 - mask) + 0.5 * mask
return image
class RandomBoxOccluderNoF(Preprocessor):
# @tf.function
def preprocess(self, inputs):
"""NHWC float format."""
image = tf.image.convert_image_dtype(inputs, tf.float32)
N = inputs.shape[0]
W = inputs.shape[2]
H = inputs.shape[1]
C = inputs.shape[3]
BW = int(W * 0.3)
BH = int(H * 0.3)
box_loc = tf.cast(
tf.floor(
tf.random.uniform([N, 2]) *
tf.constant([H - BH, W - BW], dtype=tf.float32)), tf.int32)
w_range = tf.reshape(tf.range(BW), [1, 1, -1, 1])
h_range = tf.reshape(tf.range(BH), [1, -1, 1, 1])
box_idx = tf.concat(
[tf.tile(h_range, [1, 1, BW, 1]),
tf.tile(w_range, [1, BH, 1, 1])],
axis=-1)
box_idx += tf.reshape(box_loc, [N, 1, 1, 2])
Nidx = tf.tile(tf.reshape(tf.range(N), [-1, 1, 1, 1]), [1, BW, BH, 1])
box_idx = tf.concat([Nidx, box_idx], axis=-1)
box_idx = tf.reshape(box_idx, [N * BW * BH, 3])
mask = tf.scatter_nd(box_idx, tf.ones([N * BW * BH, C]), tf.shape(inputs))
image = image * (1.0 - mask) + 0.5 * mask
return image
| 33.916667
| 78
| 0.570844
| 401
| 2,442
| 3.379052
| 0.184539
| 0.038376
| 0.030996
| 0.023616
| 0.813284
| 0.813284
| 0.813284
| 0.813284
| 0.813284
| 0.813284
| 0
| 0.04886
| 0.2457
| 2,442
| 71
| 79
| 34.394366
| 0.686754
| 0.054054
| 0
| 0.877193
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035088
| false
| 0
| 0.052632
| 0
| 0.157895
| 0.017544
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f71a3eaeece4ab1511448b596d52d6ce7165fb16
| 34
|
py
|
Python
|
06_01_name_conflict.py
|
simonmonk/prog_pico_ed1
|
36e70f88ea7dc73e75399cd390d1cc2023843971
|
[
"MIT"
] | 6
|
2021-05-08T13:19:33.000Z
|
2022-03-20T08:29:44.000Z
|
06_01_name_conflict.py
|
simonmonk/prog_pico_ed1
|
36e70f88ea7dc73e75399cd390d1cc2023843971
|
[
"MIT"
] | 1
|
2021-03-05T20:27:15.000Z
|
2021-11-17T09:07:43.000Z
|
06_01_name_conflict.py
|
simonmonk/prog_pico_ed1
|
36e70f88ea7dc73e75399cd390d1cc2023843971
|
[
"MIT"
] | 2
|
2021-07-02T15:19:37.000Z
|
2021-10-06T00:53:25.000Z
|
def print():
pass
print()
| 8.5
| 12
| 0.5
| 4
| 34
| 4.25
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.352941
| 34
| 4
| 13
| 8.5
| 0.772727
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0.333333
| 0
| 0
| 0.333333
| 0.666667
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 1
|
0
| 7
|
54184351d266adefecda88e66f94576a7a4d10ea
| 50,494
|
py
|
Python
|
codegen/google_proto/struct_map_pb2.py
|
QratorLabs/ritfest2016
|
cddaaa9e827f5315d2e426c083029124649d6f50
|
[
"MIT"
] | null | null | null |
codegen/google_proto/struct_map_pb2.py
|
QratorLabs/ritfest2016
|
cddaaa9e827f5315d2e426c083029124649d6f50
|
[
"MIT"
] | null | null | null |
codegen/google_proto/struct_map_pb2.py
|
QratorLabs/ritfest2016
|
cddaaa9e827f5315d2e426c083029124649d6f50
|
[
"MIT"
] | null | null | null |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: struct_map.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='struct_map.proto',
package='',
serialized_pb=_b('\n\x10struct_map.proto\"\x83\x19\n\tStructMap\x12\x19\n\x11long_name_foo_bar\x18\x01 \x02(\t\x12\x19\n\x11long_key_bar_name\x18\x02 \x02(\t\x12\x19\n\x11long_foo_key_name\x18\x03 \x02(\t\x12\x18\n\x10key_foo_bar_name\x18\x04 \x02(\t\x12\x19\n\x11name_bar_long_foo\x18\x05 \x02(\t\x12\x18\n\x10\x62\x61r_foo_key_long\x18\x06 \x02(\t\x12\x19\n\x11\x62\x61r_foo_long_name\x18\x07 \x02(\t\x12\x19\n\x11long_foo_name_bar\x18\x08 \x02(\t\x12\x19\n\x11key_foo_name_long\x18\t \x02(\t\x12\x19\n\x11name_long_foo_key\x18\n \x02(\t\x12\x19\n\x11\x66oo_bar_long_name\x18\x0b \x02(\t\x12\x19\n\x11key_bar_name_long\x18\x0c \x02(\t\x12\x19\n\x11\x62\x61r_name_long_foo\x18\r \x02(\t\x12\x19\n\x11key_name_bar_long\x18\x0e \x02(\t\x12\x18\n\x10\x66oo_name_bar_key\x18\x0f \x02(\t\x12\x19\n\x11long_key_foo_name\x18\x10 \x02(\t\x12\x19\n\x11name_foo_long_key\x18\x11 \x02(\t\x12\x18\n\x10key_name_bar_foo\x18\x12 \x02(\t\x12\x19\n\x11\x62\x61r_long_name_key\x18\x13 \x02(\t\x12\x18\n\x10long_bar_foo_key\x18\x14 \x02(\t\x12\x18\n\x10\x62\x61r_long_key_foo\x18\x15 \x02(\t\x12\x18\n\x10\x66oo_bar_long_key\x18\x16 \x02(\t\x12\x18\n\x10name_foo_key_bar\x18\x17 \x02(\t\x12\x19\n\x11name_key_long_bar\x18\x18 \x02(\t\x12\x18\n\x10name_key_bar_foo\x18\x19 \x02(\t\x12\x19\n\x11key_bar_long_name\x18\x1a \x02(\t\x12\x19\n\x11long_key_name_bar\x18\x1b \x02(\t\x12\x19\n\x11key_name_long_foo\x18\x1c \x02(\t\x12\x18\n\x10\x66oo_name_key_bar\x18\x1d \x02(\t\x12\x19\n\x11key_name_long_bar\x18\x1e \x02(\t\x12\x19\n\x11\x66oo_name_bar_long\x18\x1f \x02(\t\x12\x19\n\x11name_long_key_foo\x18 \x02(\t\x12\x19\n\x11name_long_key_bar\x18! \x02(\t\x12\x19\n\x11name_bar_foo_long\x18\" \x02(\t\x12\x19\n\x11long_bar_name_foo\x18# \x02(\t\x12\x18\n\x10\x62\x61r_key_foo_name\x18$ \x02(\t\x12\x19\n\x11name_foo_key_long\x18% \x02(\t\x12\x18\n\x10\x66oo_long_key_bar\x18& \x02(\t\x12\x18\n\x10key_foo_name_bar\x18\' \x02(\t\x12\x19\n\x11key_name_foo_long\x18( \x02(\t\x12\x19\n\x11long_foo_bar_name\x18) \x02(\t\x12\x19\n\x11\x66oo_name_key_long\x18* \x02(\t\x12\x18\n\x10\x62\x61r_key_long_foo\x18+ \x02(\t\x12\x18\n\x10\x62\x61r_foo_long_key\x18, \x02(\t\x12\x19\n\x11key_long_foo_name\x18- \x02(\t\x12\x19\n\x11name_foo_long_bar\x18. \x02(\t\x12\x19\n\x11\x66oo_key_name_long\x18/ \x02(\t\x12\x19\n\x11long_name_bar_foo\x18\x30 \x02(\t\x12\x19\n\x11\x66oo_bar_name_long\x18\x31 \x02(\t\x12\x19\n\x11\x66oo_long_key_name\x18\x32 \x02(\t\x12\x18\n\x10key_bar_foo_name\x18\x33 \x02(\t\x12\x18\n\x10\x66oo_bar_key_name\x18\x34 \x02(\t\x12\x19\n\x11\x62\x61r_name_foo_long\x18\x35 \x02(\t\x12\x19\n\x11name_bar_key_long\x18\x36 \x02(\t\x12\x18\n\x10key_bar_foo_long\x18\x37 \x02(\t\x12\x18\n\x10long_key_bar_foo\x18\x38 \x02(\t\x12\x18\n\x10key_foo_long_bar\x18\x39 \x02(\t\x12\x19\n\x11\x62\x61r_foo_name_long\x18: \x02(\t\x12\x18\n\x10key_long_bar_foo\x18; \x02(\t\x12\x19\n\x11long_bar_foo_name\x18< \x02(\t\x12\x19\n\x11\x66oo_long_name_bar\x18= \x02(\t\x12\x19\n\x11long_bar_key_name\x18> \x02(\t\x12\x19\n\x11long_name_foo_key\x18? \x02(\t\x12\x19\n\x11long_foo_name_key\x18@ \x02(\t\x12\x19\n\x11name_long_bar_key\x18\x41 \x02(\t\x12\x18\n\x10long_key_foo_bar\x18\x42 \x02(\t\x12\x19\n\x11name_key_foo_long\x18\x43 \x02(\t\x12\x19\n\x11name_key_long_foo\x18\x44 \x02(\t\x12\x18\n\x10key_name_foo_bar\x18\x45 \x02(\t\x12\x18\n\x10long_foo_key_bar\x18\x46 \x02(\t\x12\x19\n\x11\x66oo_name_long_bar\x18G \x02(\t\x12\x19\n\x11name_long_bar_foo\x18H \x02(\t\x12\x19\n\x11long_name_bar_key\x18I \x02(\t\x12\x19\n\x11\x62\x61r_long_foo_name\x18J \x02(\t\x12\x19\n\x11key_long_bar_name\x18K \x02(\t\x12\x19\n\x11name_key_bar_long\x18L \x02(\t\x12\x19\n\x11\x66oo_long_name_key\x18M \x02(\t\x12\x18\n\x10\x62\x61r_long_foo_key\x18N \x02(\t\x12\x19\n\x11\x62\x61r_key_name_long\x18O \x02(\t\x12\x18\n\x10\x62\x61r_foo_key_name\x18P \x02(\t\x12\x19\n\x11key_long_name_bar\x18Q \x02(\t\x12\x18\n\x10long_bar_key_foo\x18R \x02(\t\x12\x19\n\x11\x66oo_name_long_key\x18S \x02(\t\x12\x18\n\x10key_bar_long_foo\x18T \x02(\t\x12\x19\n\x11long_bar_name_key\x18U \x02(\t\x12\x19\n\x11name_bar_long_key\x18V \x02(\t\x12\x19\n\x11key_foo_long_name\x18W \x02(\t\x12\x18\n\x10\x66oo_key_bar_long\x18X \x02(\t\x12\x18\n\x10\x66oo_bar_key_long\x18Y \x02(\t\x12\x19\n\x11\x62\x61r_long_key_name\x18Z \x02(\t\x12\x18\n\x10name_bar_key_foo\x18[ \x02(\t\x12\x19\n\x11\x62\x61r_name_key_long\x18\\ \x02(\t\x12\x19\n\x11\x62\x61r_name_long_key\x18] \x02(\t\x12\x18\n\x10\x66oo_key_name_bar\x18^ \x02(\t\x12\x18\n\x10name_bar_foo_key\x18_ \x02(\t\x12\x19\n\x11long_name_key_foo\x18` \x02(\t\x12\x19\n\x11long_name_key_bar\x18\x61 \x02(\t\x12\x19\n\x11\x66oo_key_long_name\x18\x62 \x02(\t\x12\x18\n\x10\x66oo_key_long_bar\x18\x63 \x02(\t\x12\x19\n\x11\x66oo_long_bar_name\x18\x64 \x02(\t\x12\x19\n\x11key_long_name_foo\x18\x65 \x02(\t\x12\x19\n\x11\x62\x61r_key_long_name\x18\x66 \x02(\t\x12\x18\n\x10\x66oo_bar_name_key\x18g \x02(\t\x12\x18\n\x10key_bar_name_foo\x18h \x02(\t\x12\x18\n\x10\x66oo_long_bar_key\x18i \x02(\t\x12\x18\n\x10name_key_foo_bar\x18j \x02(\t\x12\x18\n\x10\x62\x61r_name_key_foo\x18k \x02(\t\x12\x19\n\x11long_key_name_foo\x18l \x02(\t\x12\x19\n\x11name_foo_bar_long\x18m \x02(\t\x12\x19\n\x11\x62\x61r_long_name_foo\x18n \x02(\t\x12\x18\n\x10\x66oo_key_bar_name\x18o \x02(\t\x12\x18\n\x10name_foo_bar_key\x18p \x02(\t\x12\x18\n\x10\x62\x61r_key_name_foo\x18q \x02(\t\x12\x19\n\x11name_long_foo_bar\x18r \x02(\t\x12\x18\n\x10long_foo_bar_key\x18s \x02(\t\x12\x18\n\x10key_foo_bar_long\x18t \x02(\t\x12\x18\n\x10\x62\x61r_name_foo_key\x18u \x02(\t\x12\x18\n\x10\x62\x61r_foo_name_key\x18v \x02(\t\x12\x18\n\x10key_long_foo_bar\x18w \x02(\t\x12\x18\n\x10\x62\x61r_key_foo_long\x18x \x02(\t')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_STRUCTMAP = _descriptor.Descriptor(
name='StructMap',
full_name='StructMap',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='long_name_foo_bar', full_name='StructMap.long_name_foo_bar', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='long_key_bar_name', full_name='StructMap.long_key_bar_name', index=1,
number=2, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='long_foo_key_name', full_name='StructMap.long_foo_key_name', index=2,
number=3, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='key_foo_bar_name', full_name='StructMap.key_foo_bar_name', index=3,
number=4, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='name_bar_long_foo', full_name='StructMap.name_bar_long_foo', index=4,
number=5, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='bar_foo_key_long', full_name='StructMap.bar_foo_key_long', index=5,
number=6, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='bar_foo_long_name', full_name='StructMap.bar_foo_long_name', index=6,
number=7, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='long_foo_name_bar', full_name='StructMap.long_foo_name_bar', index=7,
number=8, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='key_foo_name_long', full_name='StructMap.key_foo_name_long', index=8,
number=9, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='name_long_foo_key', full_name='StructMap.name_long_foo_key', index=9,
number=10, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='foo_bar_long_name', full_name='StructMap.foo_bar_long_name', index=10,
number=11, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='key_bar_name_long', full_name='StructMap.key_bar_name_long', index=11,
number=12, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='bar_name_long_foo', full_name='StructMap.bar_name_long_foo', index=12,
number=13, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='key_name_bar_long', full_name='StructMap.key_name_bar_long', index=13,
number=14, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='foo_name_bar_key', full_name='StructMap.foo_name_bar_key', index=14,
number=15, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='long_key_foo_name', full_name='StructMap.long_key_foo_name', index=15,
number=16, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='name_foo_long_key', full_name='StructMap.name_foo_long_key', index=16,
number=17, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='key_name_bar_foo', full_name='StructMap.key_name_bar_foo', index=17,
number=18, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='bar_long_name_key', full_name='StructMap.bar_long_name_key', index=18,
number=19, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='long_bar_foo_key', full_name='StructMap.long_bar_foo_key', index=19,
number=20, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='bar_long_key_foo', full_name='StructMap.bar_long_key_foo', index=20,
number=21, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='foo_bar_long_key', full_name='StructMap.foo_bar_long_key', index=21,
number=22, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='name_foo_key_bar', full_name='StructMap.name_foo_key_bar', index=22,
number=23, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='name_key_long_bar', full_name='StructMap.name_key_long_bar', index=23,
number=24, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='name_key_bar_foo', full_name='StructMap.name_key_bar_foo', index=24,
number=25, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='key_bar_long_name', full_name='StructMap.key_bar_long_name', index=25,
number=26, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='long_key_name_bar', full_name='StructMap.long_key_name_bar', index=26,
number=27, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='key_name_long_foo', full_name='StructMap.key_name_long_foo', index=27,
number=28, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='foo_name_key_bar', full_name='StructMap.foo_name_key_bar', index=28,
number=29, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='key_name_long_bar', full_name='StructMap.key_name_long_bar', index=29,
number=30, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='foo_name_bar_long', full_name='StructMap.foo_name_bar_long', index=30,
number=31, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='name_long_key_foo', full_name='StructMap.name_long_key_foo', index=31,
number=32, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='name_long_key_bar', full_name='StructMap.name_long_key_bar', index=32,
number=33, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='name_bar_foo_long', full_name='StructMap.name_bar_foo_long', index=33,
number=34, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='long_bar_name_foo', full_name='StructMap.long_bar_name_foo', index=34,
number=35, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='bar_key_foo_name', full_name='StructMap.bar_key_foo_name', index=35,
number=36, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='name_foo_key_long', full_name='StructMap.name_foo_key_long', index=36,
number=37, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='foo_long_key_bar', full_name='StructMap.foo_long_key_bar', index=37,
number=38, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='key_foo_name_bar', full_name='StructMap.key_foo_name_bar', index=38,
number=39, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='key_name_foo_long', full_name='StructMap.key_name_foo_long', index=39,
number=40, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='long_foo_bar_name', full_name='StructMap.long_foo_bar_name', index=40,
number=41, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='foo_name_key_long', full_name='StructMap.foo_name_key_long', index=41,
number=42, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='bar_key_long_foo', full_name='StructMap.bar_key_long_foo', index=42,
number=43, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='bar_foo_long_key', full_name='StructMap.bar_foo_long_key', index=43,
number=44, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='key_long_foo_name', full_name='StructMap.key_long_foo_name', index=44,
number=45, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='name_foo_long_bar', full_name='StructMap.name_foo_long_bar', index=45,
number=46, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='foo_key_name_long', full_name='StructMap.foo_key_name_long', index=46,
number=47, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='long_name_bar_foo', full_name='StructMap.long_name_bar_foo', index=47,
number=48, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='foo_bar_name_long', full_name='StructMap.foo_bar_name_long', index=48,
number=49, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='foo_long_key_name', full_name='StructMap.foo_long_key_name', index=49,
number=50, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='key_bar_foo_name', full_name='StructMap.key_bar_foo_name', index=50,
number=51, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='foo_bar_key_name', full_name='StructMap.foo_bar_key_name', index=51,
number=52, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='bar_name_foo_long', full_name='StructMap.bar_name_foo_long', index=52,
number=53, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='name_bar_key_long', full_name='StructMap.name_bar_key_long', index=53,
number=54, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='key_bar_foo_long', full_name='StructMap.key_bar_foo_long', index=54,
number=55, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='long_key_bar_foo', full_name='StructMap.long_key_bar_foo', index=55,
number=56, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='key_foo_long_bar', full_name='StructMap.key_foo_long_bar', index=56,
number=57, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='bar_foo_name_long', full_name='StructMap.bar_foo_name_long', index=57,
number=58, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='key_long_bar_foo', full_name='StructMap.key_long_bar_foo', index=58,
number=59, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='long_bar_foo_name', full_name='StructMap.long_bar_foo_name', index=59,
number=60, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='foo_long_name_bar', full_name='StructMap.foo_long_name_bar', index=60,
number=61, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='long_bar_key_name', full_name='StructMap.long_bar_key_name', index=61,
number=62, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='long_name_foo_key', full_name='StructMap.long_name_foo_key', index=62,
number=63, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='long_foo_name_key', full_name='StructMap.long_foo_name_key', index=63,
number=64, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='name_long_bar_key', full_name='StructMap.name_long_bar_key', index=64,
number=65, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='long_key_foo_bar', full_name='StructMap.long_key_foo_bar', index=65,
number=66, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='name_key_foo_long', full_name='StructMap.name_key_foo_long', index=66,
number=67, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='name_key_long_foo', full_name='StructMap.name_key_long_foo', index=67,
number=68, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='key_name_foo_bar', full_name='StructMap.key_name_foo_bar', index=68,
number=69, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='long_foo_key_bar', full_name='StructMap.long_foo_key_bar', index=69,
number=70, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='foo_name_long_bar', full_name='StructMap.foo_name_long_bar', index=70,
number=71, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='name_long_bar_foo', full_name='StructMap.name_long_bar_foo', index=71,
number=72, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='long_name_bar_key', full_name='StructMap.long_name_bar_key', index=72,
number=73, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='bar_long_foo_name', full_name='StructMap.bar_long_foo_name', index=73,
number=74, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='key_long_bar_name', full_name='StructMap.key_long_bar_name', index=74,
number=75, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='name_key_bar_long', full_name='StructMap.name_key_bar_long', index=75,
number=76, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='foo_long_name_key', full_name='StructMap.foo_long_name_key', index=76,
number=77, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='bar_long_foo_key', full_name='StructMap.bar_long_foo_key', index=77,
number=78, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='bar_key_name_long', full_name='StructMap.bar_key_name_long', index=78,
number=79, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='bar_foo_key_name', full_name='StructMap.bar_foo_key_name', index=79,
number=80, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='key_long_name_bar', full_name='StructMap.key_long_name_bar', index=80,
number=81, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='long_bar_key_foo', full_name='StructMap.long_bar_key_foo', index=81,
number=82, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='foo_name_long_key', full_name='StructMap.foo_name_long_key', index=82,
number=83, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='key_bar_long_foo', full_name='StructMap.key_bar_long_foo', index=83,
number=84, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='long_bar_name_key', full_name='StructMap.long_bar_name_key', index=84,
number=85, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='name_bar_long_key', full_name='StructMap.name_bar_long_key', index=85,
number=86, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='key_foo_long_name', full_name='StructMap.key_foo_long_name', index=86,
number=87, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='foo_key_bar_long', full_name='StructMap.foo_key_bar_long', index=87,
number=88, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='foo_bar_key_long', full_name='StructMap.foo_bar_key_long', index=88,
number=89, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='bar_long_key_name', full_name='StructMap.bar_long_key_name', index=89,
number=90, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='name_bar_key_foo', full_name='StructMap.name_bar_key_foo', index=90,
number=91, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='bar_name_key_long', full_name='StructMap.bar_name_key_long', index=91,
number=92, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='bar_name_long_key', full_name='StructMap.bar_name_long_key', index=92,
number=93, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='foo_key_name_bar', full_name='StructMap.foo_key_name_bar', index=93,
number=94, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='name_bar_foo_key', full_name='StructMap.name_bar_foo_key', index=94,
number=95, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='long_name_key_foo', full_name='StructMap.long_name_key_foo', index=95,
number=96, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='long_name_key_bar', full_name='StructMap.long_name_key_bar', index=96,
number=97, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='foo_key_long_name', full_name='StructMap.foo_key_long_name', index=97,
number=98, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='foo_key_long_bar', full_name='StructMap.foo_key_long_bar', index=98,
number=99, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='foo_long_bar_name', full_name='StructMap.foo_long_bar_name', index=99,
number=100, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='key_long_name_foo', full_name='StructMap.key_long_name_foo', index=100,
number=101, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='bar_key_long_name', full_name='StructMap.bar_key_long_name', index=101,
number=102, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='foo_bar_name_key', full_name='StructMap.foo_bar_name_key', index=102,
number=103, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='key_bar_name_foo', full_name='StructMap.key_bar_name_foo', index=103,
number=104, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='foo_long_bar_key', full_name='StructMap.foo_long_bar_key', index=104,
number=105, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='name_key_foo_bar', full_name='StructMap.name_key_foo_bar', index=105,
number=106, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='bar_name_key_foo', full_name='StructMap.bar_name_key_foo', index=106,
number=107, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='long_key_name_foo', full_name='StructMap.long_key_name_foo', index=107,
number=108, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='name_foo_bar_long', full_name='StructMap.name_foo_bar_long', index=108,
number=109, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='bar_long_name_foo', full_name='StructMap.bar_long_name_foo', index=109,
number=110, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='foo_key_bar_name', full_name='StructMap.foo_key_bar_name', index=110,
number=111, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='name_foo_bar_key', full_name='StructMap.name_foo_bar_key', index=111,
number=112, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='bar_key_name_foo', full_name='StructMap.bar_key_name_foo', index=112,
number=113, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='name_long_foo_bar', full_name='StructMap.name_long_foo_bar', index=113,
number=114, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='long_foo_bar_key', full_name='StructMap.long_foo_bar_key', index=114,
number=115, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='key_foo_bar_long', full_name='StructMap.key_foo_bar_long', index=115,
number=116, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='bar_name_foo_key', full_name='StructMap.bar_name_foo_key', index=116,
number=117, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='bar_foo_name_key', full_name='StructMap.bar_foo_name_key', index=117,
number=118, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='key_long_foo_bar', full_name='StructMap.key_long_foo_bar', index=118,
number=119, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='bar_key_foo_long', full_name='StructMap.bar_key_foo_long', index=119,
number=120, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=21,
serialized_end=3224,
)
DESCRIPTOR.message_types_by_name['StructMap'] = _STRUCTMAP
StructMap = _reflection.GeneratedProtocolMessageType('StructMap', (_message.Message,), dict(
DESCRIPTOR = _STRUCTMAP,
__module__ = 'struct_map_pb2'
# @@protoc_insertion_point(class_scope:StructMap)
))
_sym_db.RegisterMessage(StructMap)
# @@protoc_insertion_point(module_scope)
| 56.042175
| 5,671
| 0.721868
| 7,734
| 50,494
| 4.38001
| 0.037885
| 0.085255
| 0.060723
| 0.042509
| 0.880649
| 0.877402
| 0.765343
| 0.742258
| 0.7089
| 0.687085
| 0
| 0.055957
| 0.138908
| 50,494
| 900
| 5,672
| 56.104444
| 0.723137
| 0.004
| 0
| 0.685585
| 1
| 0.001135
| 0.15388
| 0.100127
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.00681
| 0
| 0.00681
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
5811de6269babc2789d68b1fa6e043a3cb4f957c
| 23,395
|
py
|
Python
|
tests/device/cli/test_otp.py
|
gbdlin/yubikey-manager
|
7fb59f633ea1a782d401ff3f03b1f0a859f5fb16
|
[
"BSD-2-Clause"
] | null | null | null |
tests/device/cli/test_otp.py
|
gbdlin/yubikey-manager
|
7fb59f633ea1a782d401ff3f03b1f0a859f5fb16
|
[
"BSD-2-Clause"
] | 1
|
2021-08-10T20:03:13.000Z
|
2021-08-10T20:03:13.000Z
|
tests/device/cli/test_otp.py
|
gbdlin/yubikey-manager
|
7fb59f633ea1a782d401ff3f03b1f0a859f5fb16
|
[
"BSD-2-Clause"
] | null | null | null |
# vim: set fileencoding=utf-8 :
# Copyright (c) 2018 Yubico AB
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from yubikit.management import CAPABILITY
from .. import condition
from time import sleep
import re
import pytest
@pytest.fixture(autouse=True)
@condition.capability(CAPABILITY.OTP)
def ensure_otp():
pass
class TestSlotStatus:
def test_ykman_otp_info(self, ykman_cli):
info = ykman_cli("otp", "info").output
assert "Slot 1:" in info
assert "Slot 2:" in info
def test_ykman_swap_slots(self, ykman_cli):
info = ykman_cli("otp", "info").output
if "programmed" not in info:
ykman_cli("otp", "static", "2", "incredible")
output = ykman_cli("otp", "swap", "-f").output
assert "Swapping slots..." in output
output = ykman_cli("otp", "swap", "-f").output
assert "Swapping slots..." in output
@condition.fips(False)
def test_ykman_otp_info_does_not_indicate_fips_mode_for_non_fips_key(
self, ykman_cli
): # noqa: E501
info = ykman_cli("otp", "info").output
assert "FIPS Approved Mode:" not in info
class TestReclaimTimeout:
def test_update_after_reclaim(self, ykman_cli):
info = ykman_cli("otp", "info").output
if "programmed" not in info:
ykman_cli("otp", "static", "2", "incredible")
ykman_cli("otp", "swap", "-f")
ykman_cli("otp", "swap", "-f")
sleep(4) # Ensure reclaim
ykman_cli("otp", "swap", "-f")
ykman_cli("otp", "swap", "-f")
class TestSlotStaticPassword:
@pytest.fixture(autouse=True)
def delete_slot(self, ykman_cli):
try:
ykman_cli("otp", "delete", "2", "-f")
except SystemExit:
pass
yield None
try:
ykman_cli("otp", "delete", "2", "-f")
except SystemExit:
pass
def test_too_long(self, ykman_cli):
with pytest.raises(SystemExit):
ykman_cli("otp", "static", "2", "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")
def test_unsupported_chars(self, ykman_cli):
with pytest.raises(ValueError):
ykman_cli("otp", "static", "2", "ö")
with pytest.raises(ValueError):
ykman_cli("otp", "static", "2", "@")
def test_provide_valid_pw(self, ykman_cli):
ykman_cli("otp", "static", "2", "higngdukgerjktbbikrhirngtlkkttkb")
assert "Slot 2: programmed" in ykman_cli("otp", "info").output
def test_provide_valid_pw_prompt(self, ykman_cli):
ykman_cli("otp", "static", "2", input="higngdukgerjktbbikrhirngtlkkttkb\ny\n")
assert "Slot 2: programmed" in ykman_cli("otp", "info").output
def test_generate_pw_too_long(self, ykman_cli):
with pytest.raises(SystemExit):
ykman_cli("otp", "static", "2", "--generate", "--length", "39")
def test_generate_pw_blank_length(self, ykman_cli):
with pytest.raises(SystemExit):
ykman_cli("otp", "static", "2", "--generate", "--length")
def test_generate_zero_length(self, ykman_cli):
with pytest.raises(SystemExit):
ykman_cli("otp", "static", "2", "--generate", "--length", "0")
def test_generate_pw(self, ykman_cli):
ykman_cli("otp", "static", "2", "--generate", "--length", "38")
assert "Slot 2: programmed" in ykman_cli("otp", "info").output
def test_generate_pw_default_length(self, ykman_cli):
ykman_cli("otp", "static", "2", "--generate")
assert "Slot 2: programmed" in ykman_cli("otp", "info").output
def test_us_scancodes(self, ykman_cli):
ykman_cli("otp", "static", "2", "abcABC123", "--keyboard-layout", "US")
ykman_cli("otp", "static", "2", "@!)", "-f", "--keyboard-layout", "US")
def test_de_scancodes(self, ykman_cli):
ykman_cli("otp", "static", "2", "abcABC123", "--keyboard-layout", "DE")
ykman_cli("otp", "static", "2", "Üßö", "-f", "--keyboard-layout", "DE")
def test_overwrite_prompt(self, ykman_cli):
ykman_cli("otp", "static", "2", "bbb")
with pytest.raises(SystemExit):
ykman_cli("otp", "static", "2", "ccc")
ykman_cli("otp", "static", "2", "ddd", "-f")
assert "Slot 2: programmed" in ykman_cli("otp", "info").output
class TestSlotProgramming:
@pytest.fixture(autouse=True)
def delete_slot(self, ykman_cli):
try:
ykman_cli("otp", "delete", "2", "-f")
except SystemExit:
pass
yield None
try:
ykman_cli("otp", "delete", "2", "-f")
except SystemExit:
pass
def test_ykman_program_otp_slot_2(self, ykman_cli):
ykman_cli(
"otp",
"yubiotp",
"2",
"--public-id",
"vvccccfiluij",
"--private-id",
"267e0a88949b",
"--key",
"b8e31ab90bb8830e3c1fe1b483a8e0d4",
"-f",
)
self._check_slot_2_programmed(ykman_cli)
def test_ykman_program_otp_slot_2_prompt(self, ykman_cli):
ykman_cli(
"otp",
"yubiotp",
"2",
input="vvccccfiluij\n"
"267e0a88949b\n"
"b8e31ab90bb8830e3c1fe1b483a8e0d4\n"
"n\n"
"y\n",
)
self._check_slot_2_programmed(ykman_cli)
def test_ykman_program_otp_slot_2_options(self, ykman_cli):
output = ykman_cli(
"otp",
"yubiotp",
"2",
"--public-id",
"vvccccfiluij",
"--private-id",
"267e0a88949b",
"--key",
"b8e31ab90bb8830e3c1fe1b483a8e0d4",
"-f",
).output
assert "" == output
self._check_slot_2_programmed(ykman_cli)
def test_ykman_program_otp_slot_2_generated_all(self, ykman_cli):
output = ykman_cli(
"otp",
"yubiotp",
"2",
"-f",
"--serial-public-id",
"--generate-private-id",
"--generate-key",
).output
assert "Using YubiKey serial as public ID" in output
assert "Using a randomly generated private ID" in output
assert "Using a randomly generated secret key" in output
self._check_slot_2_programmed(ykman_cli)
def test_ykman_program_otp_slot_2_serial_public_id(self, ykman_cli):
output = ykman_cli(
"otp",
"yubiotp",
"2",
"--serial-public-id",
"--private-id",
"267e0a88949b",
"--key",
"b8e31ab90bb8830e3c1fe1b483a8e0d4",
"-f",
).output
assert "Using YubiKey serial as public ID" in output
assert "generated private ID" not in output
assert "generated secret key" not in output
self._check_slot_2_programmed(ykman_cli)
def test_invalid_public_id(self, ykman_cli):
with pytest.raises(SystemExit):
ykman_cli("otp", "yubiotp", "-P", "imnotmodhex!")
def test_ykman_program_otp_slot_2_generated_private_id(self, ykman_cli):
output = ykman_cli(
"otp",
"yubiotp",
"2",
"--public-id",
"vvccccfiluij",
"--generate-private-id",
"--key",
"b8e31ab90bb8830e3c1fe1b483a8e0d4",
"-f",
).output
assert "serial as public ID" not in output
assert "Using a randomly generated private ID" in output
assert "generated secret key" not in output
self._check_slot_2_programmed(ykman_cli)
def test_ykman_program_otp_slot_2_generated_secret_key(self, ykman_cli):
output = ykman_cli(
"otp",
"yubiotp",
"2",
"--public-id",
"vvccccfiluij",
"--private-id",
"267e0a88949b",
"--generate-key",
"-f",
).output
assert "serial as public ID" not in output
assert "generated private ID" not in output
assert "Using a randomly generated secret key" in output
self._check_slot_2_programmed(ykman_cli)
def test_ykman_program_otp_slot_2_serial_id_conflicts_public_id(self, ykman_cli):
with pytest.raises(SystemExit):
ykman_cli(
"otp",
"yubiotp",
"2",
"-f",
"--serial-public-id",
"--public-id",
"vvccccfiluij",
"--generate-private-id",
"--generate-key",
)
self._check_slot_2_not_programmed(ykman_cli)
def test_ykman_program_otp_slot_2_generate_id_conflicts_private_id(
self, ykman_cli
): # noqa: E501
with pytest.raises(SystemExit):
ykman_cli(
"otp",
"yubiotp",
"2",
"-f",
"--serial-public-id",
"--generate-private-id",
"--private-id",
"267e0a88949b",
"--generate-key",
)
self._check_slot_2_not_programmed(ykman_cli)
def test_ykman_program_otp_slot_2_generate_key_conflicts_key(self, ykman_cli):
with pytest.raises(SystemExit):
ykman_cli(
"otp",
"yubiotp",
"2",
"-f",
"--serial-public-id",
"--generate-private-id",
"--generate-key",
"--key",
"b8e31ab90bb8830e3c1fe1b483a8e0d4",
)
self._check_slot_2_not_programmed(ykman_cli)
def test_ykman_program_chalresp_slot_2(self, ykman_cli):
ykman_cli("otp", "chalresp", "2", "abba", "-f")
self._check_slot_2_programmed(ykman_cli)
ykman_cli("otp", "chalresp", "2", "--totp", "abba", "-f")
self._check_slot_2_programmed(ykman_cli)
ykman_cli("otp", "chalresp", "2", "--touch", "abba", "-f")
self._check_slot_2_programmed(ykman_cli)
def test_ykman_program_chalresp_slot_2_force_fails_without_key(self, ykman_cli):
with pytest.raises(SystemExit):
ykman_cli("otp", "chalresp", "2", "-f")
self._check_slot_2_not_programmed(ykman_cli)
def test_ykman_program_chalresp_slot_2_generated(self, ykman_cli):
output = ykman_cli("otp", "chalresp", "2", "-f", "-g").output
assert re.match("Using a randomly generated key: [0-9a-f]{40}$", output)
self._check_slot_2_programmed(ykman_cli)
def test_ykman_program_chalresp_slot_2_generated_fails_if_also_given(
self, ykman_cli
): # noqa: E501
with pytest.raises(SystemExit):
ykman_cli("otp", "chalresp", "2", "-f", "-g", "abababab")
def test_ykman_program_chalresp_slot_2_prompt(self, ykman_cli):
ykman_cli("otp", "chalresp", "2", input="abba\ny\n")
self._check_slot_2_programmed(ykman_cli)
def test_ykman_program_hotp_slot_2(self, ykman_cli):
ykman_cli("otp", "hotp", "2", "27KIZZE3SD7GE2FVJJBAXEI3I6RRTPGM", "-f")
self._check_slot_2_programmed(ykman_cli)
def test_ykman_program_hotp_slot_2_prompt(self, ykman_cli):
ykman_cli("otp", "hotp", "2", input="abba\ny\n")
self._check_slot_2_programmed(ykman_cli)
def test_update_settings_enter_slot_2(self, ykman_cli):
ykman_cli("otp", "static", "2", "-f", "-g", "-l", "20")
output = ykman_cli("otp", "settings", "2", "-f", "--no-enter").output
assert "Updating settings for slot" in output
def test_delete_slot_2(self, ykman_cli):
ykman_cli("otp", "static", "2", "-f", "-g", "-l", "20")
output = ykman_cli("otp", "delete", "2", "-f").output
assert "Deleting the configuration" in output
status = ykman_cli("otp", "info").output
assert "Slot 2: empty" in status
def test_access_code_slot_2(self, ykman_cli):
ykman_cli(
"otp",
"--access-code",
"111111111111",
"static",
"2",
"--generate",
"--length",
"10",
)
self._check_slot_2_programmed(ykman_cli)
self._check_slot_2_has_access_code(ykman_cli)
ykman_cli("otp", "--access-code", "111111111111", "delete", "2", "-f")
status = ykman_cli("otp", "info").output
assert "Slot 2: empty" in status
@condition.min_version(4, 3, 2)
@condition.max_version(4, 3, 5)
def test_update_access_code_fails_on_yk_432_to_435(self, ykman_cli):
ykman_cli("otp", "static", "2", "--generate", "--length", "10")
self._check_slot_2_programmed(ykman_cli)
with pytest.raises(SystemExit):
ykman_cli("otp", "settings", "--new-access-code", "111111111111", "2", "-f")
ykman_cli(
"otp",
"--access-code",
"111111111111",
"static",
"2",
"-f",
"--generate",
"--length",
"10",
)
with pytest.raises(SystemExit):
ykman_cli("otp", "delete", "2", "-f")
with pytest.raises(SystemExit):
ykman_cli(
"otp",
"--access-code",
"111111111111",
"settings",
"--new-access-code",
"222222222222",
"2",
"-f",
)
ykman_cli("otp", "--access-code", "111111111111", "delete", "2", "-f")
@condition.min_version(4, 3, 2)
@condition.max_version(4, 3, 5)
def test_delete_access_code_fails_on_yk_432_to_435(self, ykman_cli):
ykman_cli(
"otp",
"--access-code",
"111111111111",
"static",
"2",
"--generate",
"--length",
"10",
)
self._check_slot_2_programmed(ykman_cli)
with pytest.raises(SystemExit):
ykman_cli(
"otp",
"--access-code",
"111111111111",
"settings",
"--delete-access-code",
"2",
"-f",
)
with pytest.raises(SystemExit):
ykman_cli("otp", "delete", "2", "-f")
ykman_cli("otp", "--access-code", "111111111111", "delete", "2", "-f")
@condition.check(lambda version: not (4, 3, 2) <= version <= (4, 3, 5))
def test_update_access_code_slot_2(self, ykman_cli):
ykman_cli("otp", "static", "2", "--generate", "--length", "10")
self._check_slot_2_programmed(ykman_cli)
self._check_slot_2_does_not_have_access_code(ykman_cli)
ykman_cli("otp", "settings", "--new-access-code", "111111111111", "2", "-f")
self._check_slot_2_has_access_code(ykman_cli)
ykman_cli(
"otp",
"--access-code",
"111111111111",
"settings",
"--delete-access-code",
"2",
"-f",
)
self._check_slot_2_does_not_have_access_code(ykman_cli)
ykman_cli("otp", "delete", "2", "-f")
@condition.check(lambda version: not (4, 3, 2) <= version <= (4, 3, 5))
def test_update_access_code_prompt_slot_2(self, ykman_cli):
ykman_cli("otp", "static", "2", "--generate", "--length", "10")
self._check_slot_2_programmed(ykman_cli)
self._check_slot_2_does_not_have_access_code(ykman_cli)
ykman_cli(
"otp", "settings", "--new-access-code", "-", "2", "-f", input="111111111111"
)
self._check_slot_2_has_access_code(ykman_cli)
ykman_cli(
"otp",
"--access-code",
"",
"settings",
"--delete-access-code",
"2",
"-f",
input="111111111111",
)
self._check_slot_2_does_not_have_access_code(ykman_cli)
ykman_cli("otp", "delete", "2", "-f")
@condition.check(lambda version: not (4, 3, 2) <= version <= (4, 3, 5))
def test_new_access_code_conflicts_with_delete_access_code(self, ykman_cli):
ykman_cli("otp", "static", "2", "--generate", "--length", "10")
self._check_slot_2_programmed(ykman_cli)
self._check_slot_2_does_not_have_access_code(ykman_cli)
with pytest.raises(SystemExit):
ykman_cli(
"otp",
"settings",
"--delete-access-code",
"--new-access-code",
"111111111111",
"2",
"-f",
)
self._check_slot_2_does_not_have_access_code(ykman_cli)
ykman_cli("otp", "settings", "--new-access-code", "111111111111", "2", "-f")
with pytest.raises(SystemExit):
ykman_cli(
"otp",
"settings",
"--delete-access-code",
"--new-access-code",
"111111111111",
"2",
"-f",
)
self._check_slot_2_has_access_code(ykman_cli)
ykman_cli("otp", "--access-code", "111111111111", "delete", "2", "-f")
def _check_slot_2_programmed(self, ykman_cli):
status = ykman_cli("otp", "info").output
assert "Slot 2: programmed" in status
def _check_slot_2_not_programmed(self, ykman_cli):
status = ykman_cli("otp", "info").output
assert "Slot 2: empty" in status
def _check_slot_2_has_access_code(self, ykman_cli):
with pytest.raises(SystemExit):
ykman_cli("otp", "settings", "--pacing", "0", "2", "-f")
ykman_cli(
"otp",
"--access-code",
"111111111111",
"settings",
"--pacing",
"0",
"2",
"-f",
)
def _check_slot_2_does_not_have_access_code(self, ykman_cli):
ykman_cli("otp", "settings", "--pacing", "0", "2", "-f")
class TestSlotCalculate:
@pytest.fixture(autouse=True)
def delete_slot(self, ykman_cli):
try:
ykman_cli("otp", "delete", "2", "-f")
except SystemExit:
pass
yield None
try:
ykman_cli("otp", "delete", "2", "-f")
except SystemExit:
pass
def test_calculate_hex(self, ykman_cli):
ykman_cli("otp", "chalresp", "2", "abba", "-f")
output = ykman_cli("otp", "calculate", "2", "abba").output
assert "f8de2586056d89d8b961a072d1245a495d2155e1" in output
def test_calculate_totp(self, ykman_cli):
ykman_cli("otp", "chalresp", "2", "abba", "-f")
output = ykman_cli("otp", "calculate", "2", "999", "-T").output
assert "533486" == output.strip()
output = ykman_cli("otp", "calculate", "2", "999", "-T", "-d", "8").output
assert "04533486" == output.strip()
output = ykman_cli("otp", "calculate", "2", "-T").output
assert 6 == len(output.strip())
output = ykman_cli("otp", "calculate", "2", "-T", "-d", "8").output
assert 8 == len(output.strip())
class TestFipsMode:
@pytest.fixture(autouse=True)
@condition.fips(True)
def delete_slots(self, ykman_cli):
try:
ykman_cli("otp", "delete", "1", "-f")
except SystemExit:
pass
try:
ykman_cli("otp", "delete", "2", "-f")
except SystemExit:
pass
yield None
def test_not_fips_mode_if_no_slot_programmed(self, ykman_cli):
info = ykman_cli("otp", "info").output
assert "FIPS Approved Mode: No" in info
def test_not_fips_mode_if_slot_1_not_programmed(self, ykman_cli):
ykman_cli("otp", "static", "2", "--generate", "--length", "10")
info = ykman_cli("otp", "info").output
assert "FIPS Approved Mode: No" in info
def test_not_fips_mode_if_slot_2_not_programmed(self, ykman_cli):
ykman_cli("otp", "static", "1", "--generate", "--length", "10")
info = ykman_cli("otp", "info").output
assert "FIPS Approved Mode: No" in info
def test_not_fips_mode_if_no_slot_has_access_code(self, ykman_cli):
ykman_cli("otp", "static", "1", "--generate", "--length", "10")
ykman_cli("otp", "static", "2", "--generate", "--length", "10")
info = ykman_cli("otp", "info").output
assert "FIPS Approved Mode: No" in info
def test_not_fips_mode_if_only_slot_1_has_access_code(self, ykman_cli):
ykman_cli("otp", "static", "1", "--generate", "--length", "10")
ykman_cli("otp", "static", "2", "--generate", "--length", "10")
ykman_cli("otp", "settings", "--new-access-code", "111111111111", "1", "-f")
info = ykman_cli("otp", "info").output
assert "FIPS Approved Mode: No" in info
ykman_cli("otp", "--access-code", "111111111111", "delete", "1", "-f")
def test_not_fips_mode_if_only_slot_2_has_access_code(self, ykman_cli):
ykman_cli("otp", "static", "1", "--generate", "--length", "10")
ykman_cli("otp", "static", "2", "--generate", "--length", "10")
ykman_cli("otp", "settings", "--new-access-code", "111111111111", "2", "-f")
info = ykman_cli("otp", "info").output
assert "FIPS Approved Mode: No" in info
ykman_cli("otp", "--access-code", "111111111111", "delete", "2", "-f")
def test_fips_mode_if_both_slots_have_access_code(self, ykman_cli):
ykman_cli("otp", "static", "--generate", "--length", "10", "1", "-f")
ykman_cli("otp", "static", "--generate", "--length", "10", "2", "-f")
ykman_cli("otp", "settings", "--new-access-code", "111111111111", "1", "-f")
ykman_cli("otp", "settings", "--new-access-code", "111111111111", "2", "-f")
info = ykman_cli("otp", "info").output
assert "FIPS Approved Mode: Yes" in info
ykman_cli("otp", "--access-code", "111111111111", "delete", "1", "-f")
ykman_cli("otp", "--access-code", "111111111111", "delete", "2", "-f")
| 35.022455
| 88
| 0.564651
| 2,720
| 23,395
| 4.591176
| 0.104412
| 0.142857
| 0.11451
| 0.05253
| 0.809817
| 0.779548
| 0.768097
| 0.755846
| 0.719731
| 0.672406
| 0
| 0.050409
| 0.288566
| 23,395
| 667
| 89
| 35.074963
| 0.699892
| 0.059671
| 0
| 0.720974
| 0
| 0
| 0.215424
| 0.021806
| 0
| 0
| 0
| 0
| 0.078652
| 1
| 0.11236
| false
| 0.018727
| 0.009363
| 0
| 0.132959
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5813c8a5243a5f4c0a9e22e3c123e0522bbcedd5
| 157
|
py
|
Python
|
python_modules/dagster/dagster_tests/cli_tests/workspace_tests/hello_world_file_in_directory/hello_world_repository.py
|
bitdotioinc/dagster
|
4fe395a37b206b1a48b956fa5dd72bf698104cca
|
[
"Apache-2.0"
] | 1
|
2021-04-27T19:49:59.000Z
|
2021-04-27T19:49:59.000Z
|
python_modules/dagster/dagster_tests/cli_tests/workspace_tests/hello_world_file_in_directory/hello_world_repository.py
|
bitdotioinc/dagster
|
4fe395a37b206b1a48b956fa5dd72bf698104cca
|
[
"Apache-2.0"
] | 7
|
2022-03-16T06:55:04.000Z
|
2022-03-18T07:03:25.000Z
|
python_modules/dagster/dagster_tests/cli_tests/workspace_tests/hello_world_file_in_directory/hello_world_repository.py
|
bitdotioinc/dagster
|
4fe395a37b206b1a48b956fa5dd72bf698104cca
|
[
"Apache-2.0"
] | 1
|
2020-08-20T14:20:31.000Z
|
2020-08-20T14:20:31.000Z
|
from src.pipelines import hello_world_pipeline
from dagster import repository
@repository
def hello_world_repository():
return [hello_world_pipeline]
| 17.444444
| 46
| 0.828025
| 20
| 157
| 6.2
| 0.55
| 0.241935
| 0.290323
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.127389
| 157
| 8
| 47
| 19.625
| 0.905109
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| true
| 0
| 0.4
| 0.2
| 0.8
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 8
|
58284a042f5c5f059a25b55898bb7d03514f4d31
| 103
|
py
|
Python
|
appyter/ext/socketio/__init__.py
|
MaayanLab/jupyter-template
|
dd05bfcb95c9eafb1a9df845b5d8fecae1d6b9d5
|
[
"Apache-2.0"
] | null | null | null |
appyter/ext/socketio/__init__.py
|
MaayanLab/jupyter-template
|
dd05bfcb95c9eafb1a9df845b5d8fecae1d6b9d5
|
[
"Apache-2.0"
] | 24
|
2020-04-07T17:04:47.000Z
|
2020-05-27T00:51:25.000Z
|
appyter/ext/socketio/__init__.py
|
MaayanLab/jupyter-template
|
dd05bfcb95c9eafb1a9df845b5d8fecae1d6b9d5
|
[
"Apache-2.0"
] | null | null | null |
from appyter.ext.socketio.server import AsyncServer
from appyter.ext.socketio.client import AsyncClient
| 51.5
| 51
| 0.873786
| 14
| 103
| 6.428571
| 0.642857
| 0.244444
| 0.311111
| 0.488889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.067961
| 103
| 2
| 52
| 51.5
| 0.9375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
58880d3b14aaa885ccd2bd4f635515e63425758d
| 4,041
|
py
|
Python
|
SimpleSolver/SimpleSolver.py
|
MunkyCode/CubeSolver
|
1d19f6666386e3e0c943342731e92ccc4f37e9d6
|
[
"MIT"
] | null | null | null |
SimpleSolver/SimpleSolver.py
|
MunkyCode/CubeSolver
|
1d19f6666386e3e0c943342731e92ccc4f37e9d6
|
[
"MIT"
] | null | null | null |
SimpleSolver/SimpleSolver.py
|
MunkyCode/CubeSolver
|
1d19f6666386e3e0c943342731e92ccc4f37e9d6
|
[
"MIT"
] | null | null | null |
import pycuber as pc
from SimpleSolver.util import Solved
class SimpleSolver:
ColorDict = {"[r]":"red","[b]":"blue","[o]":"orange", "[g]":"green","[w]":"white","[y]":"yellow"}
def Solve(self, cube):
assert type(cube) == pc.Cube, "Cannot solve a non cube"
self.Cross(cube)
def Cross(self, cube):
assert type(cube) == pc.Cube, "Cannot solve a non cube"
for col in {"[r]","[g]","[b]","[o]"}:
e = list(cube.has_colour(cube["D"].colour) & cube.has_colour(self.ColorDict[col]) & cube.select_type("edge"))[0]
faces = list(e.facings)
white = faces[1]
other = faces[0]
if e[faces[0]].__str__() == "[w]":
white = faces[0]
other = faces[1]
if white == 'D': # white square on the bottom face
if self.ColorDict[col] != cube[other].colour: # If the other square does not match the face that it is on
cube(other + "2")
e = list(cube.has_colour(cube["D"].colour) & cube.has_colour(self.ColorDict[col]) & cube.select_type("edge"))[0]
faces = list(e.facings)
other = faces[0]
if e[faces[0]].__str__() == "[w]":
other = faces[1]
while self.ColorDict[col] != cube[other].colour:
cube("U")
e = list(cube.has_colour(cube["D"].colour) & cube.has_colour(self.ColorDict[col]) & cube.select_type("edge"))[0]
faces = list(e.facings)
other = faces[0]
if e[faces[0]].__str__() == "[w]":
other = faces[1]
cube(other + "2")
elif white == 'U': # if the white square is on the top face
while self.ColorDict[col] != cube[other].colour:
cube("U")
e = list(cube.has_colour(cube["D"].colour) & cube.has_colour(self.ColorDict[col]) & cube.select_type("edge"))[0]
faces = list(e.facings)
other = faces[0]
if e[faces[0]].__str__() == "[w]":
other = faces[1]
cube(other + "2")
elif other == 'U': # if the white is on the top layer and other color is on the top face
while self.ColorDict[col] != cube[white].colour:
cube("U")
e = list(cube.has_colour(cube["D"].colour) & cube.has_colour(self.ColorDict[col]) & cube.select_type("edge"))[0]
faces = list(e.facings)
white = faces[1]
if e[faces[0]].__str__() == "[w]":
white = faces[0]
rotation = "FLBR"
ind = list(rotation).index(white)
cube("U " + rotation[(ind + 1) % 4] + " " + rotation[ind] + "' " + rotation[(ind + 1) %4])
elif other == 'D':
rotation = "FLBR"
ind = list(rotation).index(white)
cube(rotation[ind] + " " + rotation[(ind + 1) % 4] + "' " + "U' " + rotation[(ind + 1) %4])
e = list(cube.has_colour(cube["D"].colour) & cube.has_colour(self.ColorDict[col]) & cube.select_type("edge"))[0]
faces = list(e.facings)
other = faces[0]
if e[faces[0]].__str__() == "[w]":
other = faces[1]
while self.ColorDict[col] != cube[other].colour:
cube("U")
e = list(cube.has_colour(cube["D"].colour) & cube.has_colour(self.ColorDict[col]) & cube.select_type("edge"))[0]
faces = list(e.facings)
other = faces[0]
if e[faces[0]].__str__() == "[w]":
other = faces[1]
cube(other + "2")
| 53.171053
| 137
| 0.446672
| 462
| 4,041
| 3.800866
| 0.158009
| 0.102506
| 0.103645
| 0.136674
| 0.809795
| 0.79385
| 0.748861
| 0.748861
| 0.702164
| 0.668565
| 0
| 0.017199
| 0.395694
| 4,041
| 75
| 138
| 53.88
| 0.701884
| 0.048503
| 0
| 0.771429
| 0
| 0
| 0.05101
| 0
| 0
| 0
| 0
| 0
| 0.028571
| 1
| 0.028571
| false
| 0
| 0.028571
| 0
| 0.085714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
589ce75ffda22305b5fa4968557c57e698bfc9e7
| 6,535
|
py
|
Python
|
Krogg/fire.py
|
wang0618/ascii-art
|
7ce6f152541716034bf0a22d341a898b17e2865f
|
[
"MIT"
] | 1
|
2021-08-29T09:52:06.000Z
|
2021-08-29T09:52:06.000Z
|
Krogg/fire.py
|
wang0618/ascii-art
|
7ce6f152541716034bf0a22d341a898b17e2865f
|
[
"MIT"
] | null | null | null |
Krogg/fire.py
|
wang0618/ascii-art
|
7ce6f152541716034bf0a22d341a898b17e2865f
|
[
"MIT"
] | null | null | null |
# The fire
# By:Krogg
# https://web.archive.org/web/20000623191725/http://gtcom.net/~krogg/ascii/FIRE.HTM
duration = 350
name = "The fire"
frames = [
" O / \n"+
" /|\\/ : \n"+
" |\\ : \n"+
" || \n"+
"##### : \n"+
" : (: \n"+
" ): )) ) \n"+
"#| (( ((((( |##\n"+
"##\\ ))))))))) /###\n"+
"###\\(((((((((((/####\r",
" O . \n"+
" /|\\__ . \n"+
" |\\ \n"+
" || . : \n"+
"##### \n"+
" : ( : \n"+
" : :(((: \n"+
"#| ) ))))) |##\n"+
"##\\ ((((((((( /###\n"+
"###\\)))))))))))/####\r",
" O \n"+
" /|\\__ \n"+
" |\\ . . \n"+
" || \n"+
"##### : : \n"+
" :( :( : \n"+
" )) )) \n"+
"#| (( ((( ( |##\n"+
"##\\ )))))) )) /###\n"+
"###\\(((((((((((/####\r",
" O . \n"+
" /|\\__ . \n"+
"- |\\ \n"+
" || : \n"+
"##### \n"+
" : ( : \n"+
" :(((: \n"+
"#| ) ))))) |##\n"+
"##\\ ((((((((( /###\n"+
"###\\)))))))))))/####\r",
" O \n"+
" /|\\__ \n"+
"-- |\\ . \n"+
" | \\. \n"+
"#####: : : \n"+
" ( : : \n"+
" )) ) ) \n"+
"#| ((( (( (( |##\n"+
"##\\ ))))))))) /###\n"+
"###\\(((((((((((/####\r",
" O \n"+
"\\ /|\\__ \n"+
" --|\\ \n"+
"\\ | \\ . . \n"+
"#####: : : \n"+
" ) \n"+
" (((: ( : \n"+
"#| )))) )) |##\n"+
"##\\ ((((((((( /###\n"+
"###\\)))))))))))/####\r",
" O \n"+
"\\ /|\\ \n"+
" -->\\\\ . \n"+
"\\ / | \\ . . \n"+
"#####: . \n"+
" . \n"+
" ) : \n"+
"#| ((: ( : |##\n"+
"##\\ )))) )) /###\n"+
"###\\(((((((((((/####\r",
"O \\O \n"+
"|\\ |\\ \n"+
"|\\--> \n"+
"|| /|\\ . \n"+
"##### \\ . \n"+
" : \n"+
" ( : : \n"+
"#| )) : :) |##\n"+
"##\\ (((( (( /###\n"+
"###\\)))))))))))/####\r",
"\\O __O \n"+
" |\\ /-- \n"+
"/| \\/\\ \n"+
"|| / | \n"+
"#####| : : \n"+
" | : \n"+
" ) \n"+
"#| ( (( |##\n"+
"##\\ ))) ))) /###\n"+
"###\\(((((((((((/####\r",
" O/ \\O \n"+
" |\\ /\\ \n"+
" || /\\ \n"+
" ||/ | . : \n"+
"##### : \n"+
" / : \n"+
" / (: (: \n"+
"#| )):) )) |##\n"+
"##\\ (((( ((( /###\n"+
"###\\)))))))))))/####\r",
" O __O \n"+
"/|\\ /-- \n"+
" || /\\ . \n"+
" ||/ / . \n"+
"##### . \n"+
" : : \n"+
" __ ): : \n"+
"#| (( (: |##\n"+
"##\\ )))) )) /###\n"+
"###\\(((((((((((/####\r",
" O \n"+
"/|\\ __|O \n"+
" || /\\ | \n"+
" ||/ \\ . \n"+
"##### : : \n"+
" : : \n"+
" :( : \n"+
"#| )) ::) |##\n"+
"##\\ __((( (( /###\n"+
"###\\)))))))))))/####\r",
" O \n"+
"/|\\ __O \n"+
" || /| . \n"+
" || __/| . \n"+
"##### |. . \n"+
" .: \n"+
" ) : \n"+
"#| (( ( |##\n"+
"##\\ ))) )) /###\n"+
"###\\__(((((((((/####\r",
" O \n"+
"/|\\ \n"+
" || . \n"+
" || _____ \n"+
"##### //\\O .: \n"+
" / : \n"+
" \n"+
"#| ) ) |##\n"+
"##\\ (( (( /###\n"+
"###\\)))))))))))/####\r",
" O \n"+
" /|\\__ \n"+
" |\\ \n"+
" || .. \n"+
"#####\\ : \n"+
" __\\ \n"+
" \\/ \n"+
"#| /O (: |##\n"+
"##\\ )) )) /###\n"+
"###\\(((((((((((/####\r",
" O \n"+
" /|\\__ \n"+
" |\\ \n"+
" | \\ . \n"+
"##### \n"+
" \\ | \n"+
" \\| : \n"+
"#| )/ ( |##\n"+
"##\\ (( )) /###\n"+
"###\\)))))))))))/####\r",
" O \n"+
" /|\\__ \n"+
" |\\ \n"+
" || \n"+
"##### ( : \n"+
" )) ) \n"+
" ((( (( \n"+
"#| )))))))) |##\n"+
"##\\ (((((((((( /###\n"+
"###\\)))))))))))/####\r",
" O \n"+
" /|\\__ \n"+
" |\\ \n"+
" || ( . \n"+
"##### )) ) \n"+
" ((( (( \n"+
" ))))))) \n"+
"#| (((((((( |##\n"+
"##\\ )))))))))) /###\n"+
"###\\(((((((((((/####\r",
" O \n"+
" /|\\__ \n"+
" |\\ ( \n"+
" || )) ) \n"+
"##### ((( (( \n"+
" ))) ))) \n"+
" ((((((( \n"+
"#| )))))))) |##\n"+
"##\\ ((((((((( /###\n"+
"###\\)))))))))))/####\r",
" O \n"+
" /|\\__ ( \n"+
" |\\ )) ) \n"+
" || : \n"+
"##### (: ) \n"+
" )( )): \n"+
" (( (( \n"+
"#| )) )))) |##\n"+
"##\\ ((((((((( /###\n"+
"###\\)))))))))))/####\r",
" O \n"+
" /|\\__ \n"+
" |\\ : . \n"+
" || :: \n"+
"##### ) \n"+
" (( \n"+
" :): ))) \n"+
"#| (( (((( |##\n"+
"##\\ )))) )))) /###\n"+
"###\\(((((((((((/####\r",
" O \n"+
" /|\\__. \n"+
" |\\ :. \n"+
" || \n"+
"##### \n"+
" : : ) \n"+
" ) (( \n"+
"#| (( :))) |##\n"+
"##\\ (((( (((( /###\n"+
"###\\)))))))))))/####\r",
" O \n"+
" /|\\__ . \n"+
" |\\ \n"+
" | \\ \n"+
"##### : : \n"+
" ( \n"+
" ) :)) \n"+
"#| ((( ((( |##\n"+
"##\\ )))) )))) /###\n"+
"###\\(((((((((((/####\r",
" O \n"+
" /|\\__ \n"+
" |\\ \n"+
" || . . \n"+
"##### \n"+
" : \n"+
" ( : ( \n"+
"#| )) )): |##\n"+
"##\\ (((( (((( /###\n"+
"###\\)))))))))))/####\r"
]
| 23.937729
| 83
| 0.065953
| 295
| 6,535
| 1.281356
| 0.071186
| 0.994709
| 1.285714
| 1.449735
| 0.719577
| 0.719577
| 0.719577
| 0.714286
| 0.714286
| 0.714286
| 0
| 0.005308
| 0.50987
| 6,535
| 272
| 84
| 24.025735
| 0.112707
| 0.015149
| 0
| 0.565574
| 0
| 0
| 0.840771
| 0.085834
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 1
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 12
|
54715555124aa370cd8bdf43f613445e92e18150
| 161
|
py
|
Python
|
bloggy/context_processors.py
|
ediziks/yaziyo.co
|
0badd7e6ee555d45dd2ba856640c725886a2e2a4
|
[
"MIT"
] | null | null | null |
bloggy/context_processors.py
|
ediziks/yaziyo.co
|
0badd7e6ee555d45dd2ba856640c725886a2e2a4
|
[
"MIT"
] | 5
|
2022-02-22T22:53:20.000Z
|
2022-02-22T22:53:21.000Z
|
bloggy/context_processors.py
|
ediziks/yaziyo.co
|
0badd7e6ee555d45dd2ba856640c725886a2e2a4
|
[
"MIT"
] | null | null | null |
from django.conf import settings
# for pushing Analytics ID key to base.html
def google_analytics(request):
return {'GA_KEY': settings.GOOGLE_ANALYTICS_KEY}
| 23
| 50
| 0.795031
| 24
| 161
| 5.166667
| 0.75
| 0.241935
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.130435
| 161
| 6
| 51
| 26.833333
| 0.885714
| 0.254658
| 0
| 0
| 0
| 0
| 0.050847
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
547e04e8d20e0f5f9e3cafb6974e402c17f701f9
| 2,924
|
py
|
Python
|
mitre_attack/cli/command_groups/software.py
|
check-spelling/mitre-attack
|
f3be1ccff235593c4277f3b9ec2696757924894b
|
[
"MIT"
] | 1
|
2022-01-13T06:32:10.000Z
|
2022-01-13T06:32:10.000Z
|
mitre_attack/cli/command_groups/software.py
|
check-spelling/mitre-attack
|
f3be1ccff235593c4277f3b9ec2696757924894b
|
[
"MIT"
] | null | null | null |
mitre_attack/cli/command_groups/software.py
|
check-spelling/mitre-attack
|
f3be1ccff235593c4277f3b9ec2696757924894b
|
[
"MIT"
] | 1
|
2022-01-14T00:00:27.000Z
|
2022-01-14T00:00:27.000Z
|
from mitre_attack.api.client import MitreAttack
import mitre_attack.cli.click as click
import mitre_attack.cli.command_groups.malware as malware_command_group
import mitre_attack.cli.command_groups.tools as tools_command_group
@click.group()
@click.pass_context
def software(_):
"""
Query or count malware and tools.
"""
pass
@software.command()
@click.option('--software-ids')
@click.option('--software-names')
@click.pass_context
def get_software(_: click.Context, software_ids: str, software_names: str):
api = MitreAttack()
for row in api.enterprise.iter_software(
software_ids=click.str_to_strs(software_ids),
software_names=click.str_to_strs(software_names),
):
click.echo(row.to_json())
@software.command()
@click.option('--software-ids')
@click.option('--software-names')
@click.pass_context
def count_software(_: click.Context, software_ids: str, software_names: str):
api = MitreAttack()
n = api.enterprise.count_software(
software_ids=click.str_to_strs(software_ids),
software_names=click.str_to_strs(software_names),
)
click.echo(n)
@software.command()
@click.option('--software-id')
@click.option('--software-name')
@click.pass_context
def get_malware_family(ctx: click.Context, software_id: str, software_name: str):
ctx.invoke(malware_command_group.get_malware_family, software_id=software_id, software_name=software_name)
@software.command()
@click.option('--software-ids')
@click.option('--software-names')
@click.pass_context
def get_malware_families(ctx: click.Context, software_ids: str, software_names: str):
ctx.invoke(malware_command_group.get_malware_families, software_ids=software_ids, software_names=software_names)
@software.command()
@click.option('--software-ids')
@click.option('--software-names')
@click.pass_context
def count_malware_families(ctx: click.Context, software_ids: str, software_names: str):
ctx.invoke(malware_command_group.count_malware_families, software_ids=software_ids, software_names=software_names)
@software.command()
@click.option('--software-ids')
@click.option('--software-names')
@click.pass_context
def get_tool(ctx: click.Context, software_ids: str, software_names: str):
ctx.invoke(tools_command_group.get_tool, software_ids=software_ids, software_names=software_names)
@software.command()
@click.option('--software-ids')
@click.option('--software-names')
@click.pass_context
def get_tools(ctx: click.Context, software_ids: str, software_names: str):
ctx.invoke(tools_command_group.get_tools, software_ids=software_ids, software_names=software_names)
@software.command()
@click.option('--software-ids')
@click.option('--software-names')
@click.pass_context
def count_tools(ctx: click.Context, software_ids: str, software_names: str):
ctx.invoke(tools_command_group.count_tools, software_ids=software_ids, software_names=software_names)
| 32.853933
| 118
| 0.77223
| 400
| 2,924
| 5.3625
| 0.1125
| 0.14359
| 0.141725
| 0.07972
| 0.820047
| 0.804196
| 0.759907
| 0.759907
| 0.739394
| 0.712354
| 0
| 0
| 0.096785
| 2,924
| 88
| 119
| 33.227273
| 0.812192
| 0.011286
| 0
| 0.560606
| 0
| 0
| 0.082783
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.136364
| false
| 0.151515
| 0.060606
| 0
| 0.19697
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
54c259f7bf261d6f0ed8a0a176a7eee0e6a80d0a
| 27,090
|
py
|
Python
|
EyesLibrary/keywords/check.py
|
jward-magento/EyesLibrary
|
2d59046a8882bb92431a34edfa8790a5be9cce65
|
[
"Apache-2.0"
] | null | null | null |
EyesLibrary/keywords/check.py
|
jward-magento/EyesLibrary
|
2d59046a8882bb92431a34edfa8790a5be9cce65
|
[
"Apache-2.0"
] | null | null | null |
EyesLibrary/keywords/check.py
|
jward-magento/EyesLibrary
|
2d59046a8882bb92431a34edfa8790a5be9cce65
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
from __future__ import absolute_import
import os
import six.moves.http_client
import base64
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.common.exceptions import InvalidElementStateException
from robot.libraries.BuiltIn import BuiltIn
from applitools.core import logger
from applitools.geometry import Region
from applitools.eyes import Eyes, BatchInfo
from applitools.selenium.webelement import EyesWebElement
from .session import SessionKeywords
from applitools.selenium.positioning import StitchMode
from robot.api import logger as loggerRobot
from EyesLibrary.resources import variables, utils
import six
class CheckKeywords:
def check_eyes_window(
self,
name,
force_full_page_screenshot=None,
enable_eyes_log=None,
enable_http_debug_log=None,
matchtimeout=-1,
target=None,
hidescrollbars=None,
wait_before_screenshots=None,
send_dom=None,
matchlevel=None,
isdisabled=None
):
"""
Takes a snapshot from the browser using the webdriver and matches
it with the expected output.
| =Arguments= | =Description= |
| Name (str) | *Mandatory* - Name that will be given to region in Eyes |
| Force Full Page Screenshot (bool) | Will force the browser to take a screenshot of whole page. Define "Stitch Mode" argument on `Open Eyes Session` if necessary |
| Enable Eyes Log (bool) | Determines if the trace logs of Applitools Eyes SDK are activated for this test. Overrides the argument set on `Open Eyes Session` |
| Enable HTTP Debug Log (bool) | The HTTP Debug logs will not be included by default. To activate, pass 'True' in the variable |
| Match Timeout (int) | Determines how much time in milliseconds Eyes continue to retry the matching before declaring a mismatch on this checkpoint |
| Target (Target) | The intended Target. See `Defining Ignore and Floating Regions` |
| Hide Scrollbars (bool) | Sets if the scrollbars are hidden in the checkpoint, by passing 'True' or 'False' in the variable |
| Wait Before Screenshots (int) | Determines the number of milliseconds that Eyes will wait before capturing the screenshot of this checkpoint. Overrides the argument set on `Open Eyes Session` |
| Send DOM (bool) | Sets if DOM information should be sent for this checkpoint |
| Match Level (str) | The match level for the comparison of this checkpoint - can be STRICT, LAYOUT, CONTENT or EXACT |
| Is Disabled (bool) | Determines whether or not interactions with Eyes will be silently ignored for this checkpoint |
*Example:*
| Check Eyes Window | Google Homepage | ${true} | ${true} | ${true} | 5000 |
*Note (Safari on mobile):*
When checking a window, provide osname=iOS and browsername=Safari on `Open Eyes Session`.
Due to an issue regarding the height of the address bar not being taken into account when the screenshot is taken, a temporary workaround is in place.
In order to screenshot the correct element, it is added the value of 71 to the y coordinate of the element.
"""
original_properties = utils.save_current_properties()
utils.update_properties(force_full_page_screenshot, enable_eyes_log, enable_http_debug_log, hidescrollbars, wait_before_screenshots, send_dom, matchlevel, None, isdisabled)
# Temporary workaround in order to capture the correct element on Safari
# Element coordinate y doesn't take the address bar height into consideration, so it has to be added
# Current address bar height: 71
if variables.eyes.host_app == "Safari" and variables.eyes.host_os == "iOS":
size = variables.driver.get_window_size("current")
variables.eyes.check_region(
Region(0, 71, size.__getitem__("width"), size.__getitem__("height")),
name,
matchtimeout,
target,
)
else:
variables.eyes.check_window(name, int(matchtimeout), target)
utils.update_properties(**original_properties)
def check_eyes_region(
self,
left,
top,
width,
height,
name,
enable_eyes_log=None,
enable_http_debug_log=None,
matchtimeout=-1,
target=None,
hidescrollbars=None,
wait_before_screenshots=None,
send_dom=None,
matchlevel=None,
isdisabled=None
):
"""
Takes a snapshot of the given region from the browser using a Region
object (identified by left, top, width, height) and matches it with the
expected output.
The width and the height cannot be greater than the width and the height specified on `Open Eyes Session`.
| =Arguments= | =Description= |
| Left (float) | *Mandatory* - The left coordinate of the region that is tested e.g. 100 |
| Top (float) | *Mandatory* - The top coordinate of the region that is tested e.g. 150 |
| Width (float) | *Mandatory* - The width of the region that is tested e.g. 500 |
| Height (float) | *Mandatory* - The height of the region that is tested e.g. 120 |
| Name (str) | *Mandatory* - Name that will be given to region in Eyes |
| Enable Eyes Log (bool) | Determines if the trace logs of Applitools Eyes SDK are activated for this checkpoint. Overrides the argument set on `Open Eyes Session` |
| Enable HTTP Debug Log (bool) | The HTTP Debug logs will not be included by default. To activate, pass 'True' in the variable |
| Match Timeout (int) | Determines how much time in milliseconds Eyes continue to retry the matching before declaring a mismatch on this checkpoint |
| Target (Target) | The intended Target. See `Defining Ignore and Floating Regions` |
| Hide Scrollbars (bool) | Sets if the scrollbars are hidden in the checkpoint, by passing 'True' or 'False' in the variable |
| Wait Before Screenshots (int) | Determines the number of milliseconds that Eyes will wait before capturing the screenshot of this checkpoint. Overrides the argument set on `Open Eyes Session` |
| Send DOM (bool) | Sets if DOM information should be sent for this checkpoint |
| Match Level (str) | The match level for the comparison of this checkpoint - can be STRICT, LAYOUT, CONTENT or EXACT |
| Is Disabled (bool) | Determines whether or not interactions with Eyes will be silently ignored for this checkpoint |
*Example:*
| Check Eyes Region | 100 | 150 | 500 | 120 | Google Logo | ${true} | ${true} | 5000 |
"""
original_properties = utils.save_current_properties()
utils.update_properties(None, enable_eyes_log, enable_http_debug_log, hidescrollbars, wait_before_screenshots, send_dom, matchlevel, None, isdisabled)
region = Region(float(left), float(top), float(width), float(height))
variables.eyes.check_region(region, name, matchtimeout, target)
utils.update_properties(**original_properties)
def check_eyes_region_by_element(
self,
element,
name,
enable_eyes_log=None,
enable_http_debug_log=False,
matchtimeout=-1,
target=None,
hidescrollbars=None,
wait_before_screenshots=None,
send_dom=None,
stitchcontent=None,
matchlevel=None,
isdisabled=None
):
"""
Takes a snapshot of the region of the given element from the browser
using the web driver. Not available to mobile native apps.
| =Arguments= | =Description= |
| Element (WebElement) | *Mandatory* - The Web Element to be checked |
| Name (str) | *Mandatory* - Name that will be given to region in Eyes |
| Enable Eyes Log (bool) | Determines if the trace logs of Applitools Eyes SDK are activated for this checkpoint. Overrides the argument set on `Open Eyes Session` |
| Enable HTTP Debug Log (bool) | The HTTP Debug logs will not be included by default. To activate, pass 'True' in the variable |
| Match Timeout (int) | Determines how much time in milliseconds Eyes continue to retry the matching before declaring a mismatch on this test |
| Target (Target) | The intended Target. See `Defining Ignore and Floating Regions` |
| Hide Scrollbars (bool) | Sets if the scrollbars are hidden in the checkpoint, by passing 'True' or 'False' in the variable |
| Wait Before Screenshots (int) | Determines the number of milliseconds that Eyes will wait before capturing the screenshot of this checkpoint. Overrides the argument set on `Open Eyes Session` |
| Send DOM (bool) | Sets if DOM information should be sent for this checkpoint |
| Stitch Content (bool) | Determines if Eyes will scroll this element to take a full element screenshot, when the element is scrollable |
| Match Level (str) | The match level for the comparison of this checkpoint - can be STRICT, LAYOUT, CONTENT or EXACT |
| Is Disabled (bool) | Determines whether or not interactions with Eyes will be silently ignored for this checkpoint |
*Example:*
| ${element}= | Get Element | //*[@id="hplogo"] |
| Check Eyes Region By Element | ${element} | ElementName | ${true} | ${true} | 5000 |
*Note (Safari on mobile):*
When checking an element, provide osname=iOS and browsername=Safari on `Open Eyes Session`.
Due to an issue regarding the height of the address bar not being taken into account when the screenshot is taken, a temporary workaround is in place.
In order to screenshot the correct element, it is added the value of 71 to the y coordinate of the element.
"""
original_properties = utils.save_current_properties()
utils.update_properties(None, enable_eyes_log, hidescrollbars, wait_before_screenshots, send_dom, matchlevel, stitchcontent, isdisabled)
# Temporary workaround in order to capture the correct element on Safari
# Element coordinate y doesn't take the address bar height into consideration, so it has to be added
# Current address bar height: 71
if variables.eyes.host_app == "Safari" and variables.eyes.host_os == "iOS":
location = element.location
size = element.size
variables.eyes.check_region(
Region(
location.__getitem__("x"),
location.__getitem__("y") + 71,
size.__getitem__("width"),
size.__getitem__("height"),
),
name,
matchtimeout,
target,
variables.stitchcontent,
)
else:
variables.eyes.check_region_by_element(
element, name, matchtimeout, target, variables.stitchcontent
)
utils.update_properties(**original_properties)
def check_eyes_region_by_selector(
self,
value,
name,
selector="id",
enable_eyes_log=None,
enable_http_debug_log=None,
matchtimeout=-1,
target=None,
hidescrollbars=None,
wait_before_screenshots=None,
send_dom=None,
stitchcontent=None,
matchlevel=None,
isdisabled=None
):
"""
Takes a snapshot of the region of the element found by calling
find_element(by, value) from the browser using the web driver and
matches it with the expected output. With a choice from eight
selectors, to check by on `Using Selectors` section.
Not available to mobile native apps.
| =Arguments= | =Description= |
| Value (str) | *Mandatory* - The specific value of the selector. e.g. a CSS SELECTOR value .first.expanded.dropdown |
| Name (str) | *Mandatory* - Name that will be given to region in Eyes |
| Selector (str) | *Mandatory* - The strategy to locate the element. The supported selectors are specified in `Using Selectors` |
| Enable Eyes Log (bool) | Determines if the trace logs of Applitools Eyes SDK are activated for this checkpoint. Overrides the argument set on `Open Eyes Session` |
| Enable HTTP Debug Log (bool) | The HTTP Debug logs will not be included by default. To activate, pass 'True' in the variable |
| Match Timeout (int) | Determines how much time in milliseconds Eyes continue to retry the matching before declaring a mismatch on this checkpoint |
| Target (Target) | The intended Target. See `Defining Ignore and Floating Regions` |
| Hide Scrollbars (bool) | Sets if the scrollbars are hidden in the checkpoint, by passing 'True' or 'False' in the variable |
| Wait Before Screenshots (int) | Determines the number of milliseconds that Eyes will wait before capturing the screenshot of this test. Overrides the argument set on `Open Eyes Session` |
| Send DOM (bool) | Sets if DOM information should be sent for this checkpoint |
| Stitch Content (bool) | Determines if Eyes will scroll this element to take a full element screenshot, when the element is scrollable |
| Match Level (str) | The match level for the comparison of this checkpoint - can be STRICT, LAYOUT, CONTENT or EXACT |
| Is Disabled (bool) | Determines whether or not interactions with Eyes will be silently ignored for this checkpoint |
*Example:*
| Check Eyes Region By Selector | .first.expanded.dropdown | Css Element | css selector | ${true} | ${true} | 5000 |
*Note (Safari on mobile):*
When checking an element, provide osname=iOS and browsername=Safari on `Open Eyes Session`.
Due to an issue regarding the height of the address bar not being taken into account when the screenshot is taken, a temporary workaround is in place.
In order to screenshot the correct element, it is added the value of 71 to the y coordinate of the element.
"""
original_properties = utils.save_current_properties()
utils.update_properties(None, enable_eyes_log, enable_http_debug_log, hidescrollbars, wait_before_screenshots, send_dom, matchlevel, stitchcontent, isdisabled)
selector_strategy = utils.get_selector_strategy(selector)
# Temporary workaround in order to capture the correct element on Safari
# Element coordinate y doesn't take the address bar height into consideration, so it has to be added
# Current address bar height: 71
if variables.eyes.host_app == "Safari" and variables.eyes.host_os == "iOS":
element = variables.driver.find_element(selector_strategy, value)
location = element.location
size = element.size
variables.eyes.check_region(
Region(
location.__getitem__("x"),
location.__getitem__("y") + 71,
size.__getitem__("width"),
size.__getitem__("height"),
),
name,
matchtimeout,
target,
variables.stitchcontent,
)
else:
variables.eyes.check_region_by_selector(
selector_strategy,
value,
name,
matchtimeout,
target,
variables.stitchcontent,
)
utils.update_properties(**original_properties)
def check_eyes_region_in_frame_by_selector(
self,
framereference,
value,
name,
selector="id",
enable_eyes_log=None,
enable_http_debug_log=None,
matchtimeout=-1,
target=None,
hidescrollbars=None,
wait_before_screenshots=None,
send_dom=None,
stitchcontent=None,
matchlevel=None,
isdisabled=None
):
"""
Takes a snapshot of the region of the element found by calling
find_element(by, value) inside a specific frame,
and matches it with the expected output. With a choice from eight
selectors, to check by on `Using Selectors` section.
| =Arguments= | =Description= |
| Frame Reference (str, int or WebElement) | *Mandatory* - Defines the frame to be checked. See below what arguments must be used as frame reference |
| Value (str) | *Mandatory* - The specific value of the selector. e.g. a CSS SELECTOR value .first.expanded.dropdown |
| Name (str) | *Mandatory* - Name that will be given to region in Eyes |
| Selector (str) | *Mandatory* - The strategy to locate the element. The supported selectors are specified in `Using Selectors` |
| Enable Eyes Log (bool) | Determines if the trace logs of Applitools Eyes SDK are activated for this checkpoint. Overrides the argument set on `Open Eyes Session` |
| Enable HTTP Debug Log (bool) | The HTTP Debug logs will not be included by default. To activate, pass 'True' in the variable |
| Match Timeout (int) | Determines how much time in milliseconds Eyes continue to retry the matching before declaring a mismatch on this test |
| Target (Target) | The intended Target. See `Defining Ignore and Floating Regions` |
| Hide Scrollbars (bool) | Sets if the scrollbars are hidden in the checkpoint, by passing 'True' or 'False' in the variable |
| Wait Before Screenshots (int) | Determines the number of milliseconds that Eyes will wait before capturing the screenshot of this checkpoint. Overrides the argument set on `Open Eyes Session` |
| Send DOM (bool) | Sets if DOM information should be sent for this checkpoint |
| Stitch Content (bool) | Determines if Eyes will scroll this element to take a full element screenshot, when the element is scrollable |
| Match Level (str) | The match level for the comparison of this checkpoint - can be STRICT, LAYOUT, CONTENT or EXACT |
| Is Disabled (bool) | Determines whether or not interactions with Eyes will be silently ignored for this checkpoint |
*Example:*
| Check Eyes Region In Frame By Selector | FrameName | .first.expanded.dropdown | Css Element | css selector | ${true} | ${true} | 5000 |
*Frame Reference*
In order to locate the correct frame, you must use one of the following references:
- Str: Name of the frame
- Int: Index of frame, relative to the list of frames on the page
- EyesWebElement or WebElement: The frame element
*Note (Safari on mobile):*
When checking an element, provide osname=iOS and browsername=Safari on `Open Eyes Session`.
Due to an issue regarding the height of the address bar not being taken into account when the screenshot is taken, a temporary workaround is in place.
In order to screenshot the correct element, it is added the value of 71 to the y coordinate of the element.
"""
original_properties = utils.save_current_properties()
utils.update_properties(None, enable_eyes_log, enable_http_debug_log, hidescrollbars, wait_before_screenshots, send_dom, matchlevel, stitchcontent, isdisabled)
if type(framereference) is six.text_type:
try:
framereference = int(framereference)
except:
framereference = str(framereference)
selector_strategy = utils.get_selector_strategy(selector)
# Temporary workaround in order to capture the correct element on Safari
# Element coordinate y doesn't take the address bar height into consideration, so it has to be added
# Current address bar height: 71
if variables.eyes.host_app == "Safari" and variables.eyes.host_os == "iOS":
with variables.driver.switch_to.frame_and_back(framereference):
element = variables.driver.find_element(selector_strategy, value)
location = element.location
size = element.size
variables.eyes.check_region(
Region(
location.__getitem__("x"),
location.__getitem__("y") + 71,
size.__getitem__("width"),
size.__getitem__("height"),
),
name,
matchtimeout,
target,
variables.stitchcontent,
)
else:
variables.eyes.check_region_in_frame_by_selector(
framereference,
selector_strategy,
value,
name,
matchtimeout,
target,
variables.stitchcontent,
)
utils.update_properties(**original_properties)
| 69.107143
| 222
| 0.513031
| 2,618
| 27,090
| 5.217723
| 0.111154
| 0.026647
| 0.011713
| 0.019912
| 0.854832
| 0.841069
| 0.841069
| 0.829209
| 0.818228
| 0.806881
| 0
| 0.004973
| 0.43588
| 27,090
| 391
| 223
| 69.283887
| 0.888889
| 0.673274
| 0
| 0.727723
| 0
| 0
| 0.012409
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.024752
| false
| 0
| 0.084158
| 0
| 0.113861
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
49a57f9d37d553c26e5cb8a0de81dc5d0128dc74
| 51,056
|
py
|
Python
|
authapi/tests/test_teams.py
|
praekeltfoundation/seed-auth-api
|
2238f7ecde2f75143bea0ac36f875793a19dde9b
|
[
"BSD-3-Clause"
] | null | null | null |
authapi/tests/test_teams.py
|
praekeltfoundation/seed-auth-api
|
2238f7ecde2f75143bea0ac36f875793a19dde9b
|
[
"BSD-3-Clause"
] | 2
|
2019-08-06T08:30:42.000Z
|
2020-02-12T06:32:54.000Z
|
authapi/tests/test_teams.py
|
praekeltfoundation/seed-auth-api
|
2238f7ecde2f75143bea0ac36f875793a19dde9b
|
[
"BSD-3-Clause"
] | null | null | null |
from django.contrib.auth.models import User
from django.urls import reverse
from rest_framework import status
from authapi.serializers import (
TeamSerializer, OrganizationSummarySerializer, TeamSummarySerializer,
PermissionSerializer, UserSummarySerializer)
from authapi.models import SeedTeam, SeedOrganization, SeedPermission
from authapi.tests.base import AuthAPITestCase
class TeamTests(AuthAPITestCase):
def setUp(self):
self.patch_client_data_json()
def test_get_team_list(self):
'''A GET request on the teams endpoint should return a list of
teams.'''
_, token = self.create_admin_user()
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
organization = SeedOrganization.objects.create()
team1 = SeedTeam.objects.create(organization=organization)
team2 = SeedTeam.objects.create(organization=organization)
url = reverse('seedteam-list')
context = self.get_context(url)
expected = [
TeamSerializer(instance=t, context=context).data
for t in [team1, team2]
]
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
sorted(expected, key=lambda i: i['id']),
sorted(response.data, key=lambda i: i['id']))
def test_get_team_list_archived(self):
'''When getting the list of teams, archived teams should not be
shown.'''
_, token = self.create_admin_user()
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
org = SeedOrganization.objects.create(title='test org')
team = SeedTeam.objects.create(organization=org, title='test team')
response = self.client.get(reverse('seedteam-list'))
self.assertEqual(len(response.data), 1)
team.archived = True
team.save()
response = self.client.get(reverse('seedteam-list'))
self.assertEqual(len(response.data), 0)
def test_get_team_list_archived_queryparam_true(self):
'''If the queryparam archived is set to true, then we should return
all archived teams.'''
_, token = self.create_admin_user()
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
org = SeedOrganization.objects.create(title='test org')
team = SeedTeam.objects.create(organization=org, title='test team')
response = self.client.get(
'%s?archived=true' % reverse('seedteam-list'))
self.assertEqual(len(response.data), 0)
team.archived = True
team.save()
response = self.client.get(
'%s?archived=true' % reverse('seedteam-list'))
self.assertEqual(len(response.data), 1)
def test_get_team_list_archived_queryparam_both(self):
'''If the queryparam archived is set to both, then we should return
both archived and non-archived teams.'''
_, token = self.create_admin_user()
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
org = SeedOrganization.objects.create(title='test org')
team = SeedTeam.objects.create(organization=org, title='test team')
team.archived = True
team.save()
SeedTeam.objects.create(organization=org, title='test team')
response = self.client.get(reverse('seedteam-list'))
self.assertEqual(len(response.data), 1)
response = self.client.get(
'%s?archived=both' % reverse('seedteam-list'))
self.assertEqual(len(response.data), 2)
def test_get_team_list_archived_invalid_queryparam(self):
'''If the archived querystring parameter is not one of true, false, or
both, an appropriate error should be returned.'''
_, token = self.create_admin_user()
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
response = self.client.get(
'%s?archived=foo' % reverse('seedteam-list'))
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data, {
'archived': ['Must be one of [both, false, true]'],
})
def test_get_team_list_filter_permission_type(self):
'''If the querystring argument permission_contains is present, we
should only display teams that have that permission type.'''
_, token = self.create_admin_user()
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
org = SeedOrganization.objects.create(title='test org')
team1 = SeedTeam.objects.create(title='team 1', organization=org)
perm = team1.permissions.create(
type='bar:foo:bar', object_id='2', namespace='bar')
team2 = SeedTeam.objects.create(title='team 2', organization=org)
team2.permissions.create(
type='bar:bar:bar', object_id='3', namespace='foo')
response = self.client.get(
'%s?permission_contains=foo' % reverse('seedteam-list'))
self.assertEqual(len(response.data), 1)
self.assertEqual(
response.data[0]['permissions'][0]['type'],
perm.type)
def test_get_team_list_filter_permission_type_multiple(self):
'''If a team has multiple permissions that match, the team should only
be listed once.'''
_, token = self.create_admin_user()
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
org = SeedOrganization.objects.create()
team = SeedTeam.objects.create(organization=org)
team.permissions.create(
type='bar:foo:bar', object_id='2', namespace='bar')
team.permissions.create(
type='bar:foo:bar', object_id='3', namespace='bar')
response = self.client.get(
'%s?permission_contains=foo' % reverse('seedteam-list'))
self.assertEqual(len(response.data), 1)
def test_get_team_list_filter_object_id(self):
'''If the querystring argument object_id is present, we should only
display teams that have that object id in one of their permissions.'''
_, token = self.create_admin_user()
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
org = SeedOrganization.objects.create(title='test org')
team1 = SeedTeam.objects.create(title='team 1', organization=org)
perm = team1.permissions.create(
type='bar:foo:bar', object_id='2', namespace='bar')
team2 = SeedTeam.objects.create(title='team 2', organization=org)
team2.permissions.create(
type='bar:bar:bar', object_id='3', namespace='foo')
response = self.client.get(
'%s?object_id=2' % reverse('seedteam-list'))
self.assertEqual(len(response.data), 1)
self.assertEqual(
response.data[0]['permissions'][0]['object_id'],
perm.object_id)
def test_get_team_list_filter_object_id_multiple(self):
'''If a team has multiple permissions that match, the team should only
be listed once.'''
_, token = self.create_admin_user()
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
org = SeedOrganization.objects.create()
team = SeedTeam.objects.create(organization=org)
team.permissions.create(
type='bar:foo:bar', object_id='2', namespace='bar')
team.permissions.create(
type='bar:bar:bar', object_id='2', namespace='bar')
response = self.client.get(
'%s?object_id=2' % reverse('seedteam-list'))
self.assertEqual(len(response.data), 1)
def test_get_team_list_filter_namespace(self):
'''If the querystring argument namespace is present, we should only
display teams that have that namespace in one of their permissions.'''
_, token = self.create_admin_user()
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
org = SeedOrganization.objects.create(title='test org')
team1 = SeedTeam.objects.create(title='team 1', organization=org)
perm = team1.permissions.create(
type='bar:foo:bar', object_id='2', namespace='bar')
team2 = SeedTeam.objects.create(title='team 2', organization=org)
team2.permissions.create(
type='bar:bar:bar', object_id='3', namespace='foo')
response = self.client.get(
'%s?namespace=bar' % reverse('seedteam-list'))
self.assertEqual(len(response.data), 1)
self.assertEqual(
response.data[0]['permissions'][0]['namespace'],
perm.namespace)
def test_get_team_list_filter_namespace_multiple(self):
'''If a team has multiple permissions with the same namespace, the team
should only be listed once.'''
_, token = self.create_admin_user()
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
org = SeedOrganization.objects.create()
team = SeedTeam.objects.create(organization=org)
team.permissions.create(
type='bar:foo:bar', object_id='2', namespace='bar')
team.permissions.create(
type='bar:bar:bar', object_id='2', namespace='bar')
response = self.client.get(
'%s?namespace=bar' % reverse('seedteam-list'))
self.assertEqual(len(response.data), 1)
def test_get_team_list_archived_users(self):
'''When getting the list of teams, inactive users should not appear
on the list of users.'''
_, token = self.create_admin_user()
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
org = SeedOrganization.objects.create(title='test org')
team = SeedTeam.objects.create(title='test team', organization=org)
user = User.objects.create_user('test user')
team.users.add(user)
response = self.client.get(reverse('seedteam-list'))
self.assertEqual(len(response.data[0]['users']), 1)
user.is_active = False
user.save()
response = self.client.get(reverse('seedteam-list'))
self.assertEqual(len(response.data[0]['users']), 0)
def test_permissions_team_list_unauthorized(self):
'''Unauthorized users shouldn't be able to see team list.'''
url = reverse('seedteam-list')
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_permissions_team_list_member_of_team(self):
'''Teams that a user is a member of should be displayed on the list.'''
url = reverse('seedteam-list')
user, token = self.create_user()
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
org = SeedOrganization.objects.create()
team1 = SeedTeam.objects.create(organization=org)
SeedTeam.objects.create(organization=org)
team1.users.add(user)
response = self.client.get(url)
[team] = response.data
self.assertEqual(team['id'], str(team1.pk))
def test_permissions_team_list_admin_permission(self):
'''Teams that a user has 'team:admin' permission for should be
displayed on the list.'''
url = reverse('seedteam-list')
user, token = self.create_user()
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
org = SeedOrganization.objects.create()
team1 = SeedTeam.objects.create(organization=org)
team2 = SeedTeam.objects.create(organization=org)
self.add_permission(user, 'team:admin', team1.pk)
response = self.client.get(url)
self.assertTrue(team2.pk not in [t['id'] for t in response.data])
def test_permissions_team_list_org_member(self):
'''Teams that are a part of an organization that the user is part of
should be displayed in the team list.'''
url = reverse('seedteam-list')
user, token = self.create_user()
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
org = SeedOrganization.objects.create()
org.users.add(user)
team = SeedTeam.objects.create(organization=org)
response = self.client.get(url)
[resp_team] = response.data
self.assertEqual(str(team.pk), resp_team['id'])
def test_permissions_team_list_org_admin(self):
'''Teams that are a part of an organization that a user has org:admin
permission for should be displayed on the list.'''
url = reverse('seedteam-list')
user, token = self.create_user()
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
org = SeedOrganization.objects.create()
team = SeedTeam.objects.create(organization=org)
self.add_permission(user, 'org:admin', org.pk)
response = self.client.get(url)
self.assertTrue(str(team.pk) in [t['id'] for t in response.data])
def test_permissions_team_list_admin(self):
'''Admin users should be able to see all teams.'''
url = reverse('seedteam-list')
user, token = self.create_admin_user()
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
org = SeedOrganization.objects.create()
team = SeedTeam.objects.create(organization=org)
response = self.client.get(url)
[resp_team] = response.data
self.assertTrue(str(team.pk), resp_team['id'])
def test_create_team(self):
'''Creating teams on this endpoint should not be allowed.'''
_, token = self.create_admin_user()
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
response = self.client.post(reverse('seedteam-list'), data={})
self.assertEqual(
response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_delete_team(self):
'''Deleting a team should archive the team instead of removing it.'''
_, token = self.create_admin_user()
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
org = SeedOrganization.objects.create(title='test org')
team = SeedTeam.objects.create(organization=org, title='test team')
self.assertEqual(team.archived, False)
self.client.delete(reverse('seedteam-detail', args=[team.id]))
team.refresh_from_db()
self.assertEqual(team.archived, True)
def test_permission_delete_team_unauthorized(self):
'''Unauthorized users shouldn't be able to delete teams.'''
org = SeedOrganization.objects.create()
team = SeedTeam.objects.create(organization=org)
url = reverse('seedteam-detail', args=(team.pk,))
resp = self.client.delete(url)
self.assertEqual(resp.status_code, status.HTTP_401_UNAUTHORIZED)
def test_permission_delete_team_admin_permission(self):
'''Users with team:admin permission for that team should be able to
delete that team.'''
org = SeedOrganization.objects.create()
team1 = SeedTeam.objects.create(organization=org)
team2 = SeedTeam.objects.create(organization=org)
user, token = self.create_user()
self.add_permission(user, 'team:admin', team1.pk)
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
url = reverse('seedteam-detail', args=(team1.pk,))
resp = self.client.delete(url)
self.assertEqual(resp.status_code, status.HTTP_204_NO_CONTENT)
url = reverse('seedteam-detail', args=(team2.pk,))
resp = self.client.delete(url)
self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)
def test_permission_delete_team_org_admin_permission(self):
'''Users with org:admin permission for a team's organization should
be able to delete the team.'''
org1 = SeedOrganization.objects.create()
org2 = SeedOrganization.objects.create()
team1 = SeedTeam.objects.create(organization=org1)
team2 = SeedTeam.objects.create(organization=org2)
user, token = self.create_user()
self.add_permission(user, 'org:admin', org1.pk)
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
url = reverse('seedteam-detail', args=(team1.pk,))
resp = self.client.delete(url)
self.assertEqual(resp.status_code, status.HTTP_204_NO_CONTENT)
url = reverse('seedteam-detail', args=(team2.pk,))
resp = self.client.delete(url)
self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)
def test_permission_delete_team_admin_user(self):
'''Admin users should be able to delete any team.'''
org = SeedOrganization.objects.create()
team = SeedTeam.objects.create(organization=org)
user, token = self.create_admin_user()
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
url = reverse('seedteam-detail', args=(team.pk,))
resp = self.client.delete(url)
self.assertEqual(resp.status_code, status.HTTP_204_NO_CONTENT)
def test_update_team(self):
'''A PUT request to a team's endpoint should update an existing
team.'''
_, token = self.create_admin_user()
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
organization = SeedOrganization.objects.create(title='test org')
team = SeedTeam.objects.create(
organization=organization, title='test team')
url = reverse('seedteam-detail', args=[team.id])
data = {
'title': 'new team',
}
response = self.client.put(url, data=data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
team.refresh_from_db()
self.assertEqual(team.title, data['title'])
def test_permission_update_team_unauthorized(self):
'''Unauthorized users shouldn't be able to update teams.'''
org = SeedOrganization.objects.create()
team = SeedTeam.objects.create(organization=org)
url = reverse('seedteam-detail', args=(team.pk,))
data = {'title': 'test team'}
resp = self.client.put(url, data=data)
self.assertEqual(resp.status_code, status.HTTP_401_UNAUTHORIZED)
def test_permission_update_team_admin_permission(self):
'''Users with team:admin permission for that team should be able to
modify that team.'''
org = SeedOrganization.objects.create()
team1 = SeedTeam.objects.create(organization=org)
team2 = SeedTeam.objects.create(organization=org)
data = {'title': 'test team'}
user, token = self.create_user()
self.add_permission(user, 'team:admin', team1.pk)
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
url = reverse('seedteam-detail', args=(team1.pk,))
resp = self.client.put(url, data=data)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
url = reverse('seedteam-detail', args=(team2.pk,))
resp = self.client.put(url, data=data)
self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)
def test_permission_update_team_org_admin_permission(self):
'''Users with org:admin permission for a team's organization should
be able to update the team.'''
org1 = SeedOrganization.objects.create()
org2 = SeedOrganization.objects.create()
team1 = SeedTeam.objects.create(organization=org1)
team2 = SeedTeam.objects.create(organization=org2)
data = {'title': 'test team'}
user, token = self.create_user()
self.add_permission(user, 'org:admin', org1.pk)
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
url = reverse('seedteam-detail', args=(team1.pk,))
resp = self.client.put(url, data=data)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
url = reverse('seedteam-detail', args=(team2.pk,))
resp = self.client.put(url, data=data)
self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)
def test_permission_update_team_admin_user(self):
'''Admin users should be able to update any team.'''
org = SeedOrganization.objects.create()
team = SeedTeam.objects.create(organization=org)
data = {'title': 'test team'}
user, token = self.create_admin_user()
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
url = reverse('seedteam-detail', args=(team.pk,))
resp = self.client.put(url, data=data)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
def test_update_team_organization(self):
'''You shouldn't be able to change a team's organization.'''
_, token = self.create_admin_user()
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
org1 = SeedOrganization.objects.create(title='test org')
org2 = SeedOrganization.objects.create(title='test org')
team = SeedTeam.objects.create(organization=org1, title='test team')
url = reverse('seedteam-detail', args=[team.id])
data = {
'title': 'new title',
'organization': org2.pk,
}
response = self.client.put(url, data=data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data, {
'organization': ['This field can only be set on creation.']
})
def test_get_team(self):
'''A GET request to a team's endpoint should return that team's
details.'''
_, token = self.create_admin_user()
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
organization = SeedOrganization.objects.create()
team = SeedTeam.objects.create(organization=organization)
url = reverse('seedteam-detail', args=[team.id])
context = self.get_context(url)
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
expected = TeamSerializer(instance=team, context=context)
self.assertEqual(response.data, expected.data)
def test_permission_get_team_unauthorized(self):
'''Only authorized users should be able to access team details.'''
org = SeedOrganization.objects.create()
team = SeedTeam.objects.create(organization=org)
url = reverse('seedteam-detail', args=(team.pk,))
resp = self.client.get(url)
self.assertEqual(resp.status_code, status.HTTP_401_UNAUTHORIZED)
def test_permission_get_team_member_of_team(self):
'''Users that are a member of a team should be able to access that
team's details.'''
org = SeedOrganization.objects.create()
team = SeedTeam.objects.create(organization=org)
user, token = self.create_user()
team.users.add(user)
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
url = reverse('seedteam-detail', args=(team.pk,))
resp = self.client.get(url)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
def test_permission_get_team_admin_permission(self):
'''Users that have a team:admin permissions for the team should be able
to see the team details.'''
org = SeedOrganization.objects.create()
team1 = SeedTeam.objects.create(organization=org)
team2 = SeedTeam.objects.create(organization=org)
user, token = self.create_user()
self.add_permission(user, 'team:admin', team1.pk)
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
url = reverse('seedteam-detail', args=(team2.pk,))
resp = self.client.get(url)
self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)
url = reverse('seedteam-detail', args=(team1.pk,))
resp = self.client.get(url)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
def test_permission_get_team_org_member(self):
'''Users that are members of a team's organization should be able to
see the team's details.'''
org = SeedOrganization.objects.create()
team = SeedTeam.objects.create(organization=org)
user, token = self.create_user()
org.users.add(user)
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
url = reverse('seedteam-detail', args=(team.pk,))
resp = self.client.get(url)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
def test_permission_get_team_org_admin(self):
'''Users that have an org:admin permission for a team's organization
should be able to see the team details.'''
org1 = SeedOrganization.objects.create()
org2 = SeedOrganization.objects.create()
team1 = SeedTeam.objects.create(organization=org1)
team2 = SeedTeam.objects.create(organization=org2)
user, token = self.create_user()
self.add_permission(user, 'org:admin', org1.pk)
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
url = reverse('seedteam-detail', args=(team2.pk,))
resp = self.client.get(url)
self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)
url = reverse('seedteam-detail', args=(team1.pk,))
resp = self.client.get(url)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
def test_permission_get_team_admin(self):
'''Admin users should have read access to all teams.'''
org = SeedOrganization.objects.create()
team = SeedTeam.objects.create(organization=org)
_, token = self.create_admin_user()
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
url = reverse('seedteam-detail', args=(team.pk,))
resp = self.client.get(url)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
def test_serializer(self):
'''The TeamSerializer should return the correct information.'''
organization = SeedOrganization.objects.create()
team = SeedTeam.objects.create(
organization=organization, title='test team')
user = User.objects.create_user('foo@bar.org')
team.users.add(user)
permission = SeedPermission.objects.create()
team.permissions.add(permission)
url = self.get_full_url('seedteam-detail', args=[team.id])
context = self.get_context(url)
data = TeamSerializer(instance=team, context=context).data
self.assertEqual(data, {
'title': team.title,
'url': url,
'organization': OrganizationSummarySerializer(
instance=organization, context=context).data,
'permissions': [
PermissionSerializer(instance=permission, context=context).data
],
'id': str(team.id),
'users': [
UserSummarySerializer(instance=user, context=context).data],
'archived': team.archived,
})
def test_summary_serializer(self):
'''The TeamSummarySerializer should return the correct summary
information.'''
organization = SeedOrganization.objects.create()
team = SeedTeam.objects.create(organization=organization)
url = self.get_full_url('seedteam-detail', args=[team.id])
context = self.get_context(url)
data = TeamSummarySerializer(instance=team, context=context).data
self.assertEqual(data, {
'url': url,
'id': str(team.id)
})
def test_add_permission_to_team(self):
'''When adding a permission to a team, it should create a permission
and link it to that team.'''
_, token = self.create_admin_user()
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
org = SeedOrganization.objects.create(title='test org')
team = SeedTeam.objects.create(title='test team', organization=org)
self.assertEqual(len(team.permissions.all()), 0)
data = {
'type': 'foo:bar',
'object_id': '2',
'namespace': 'foo',
}
response = self.client.post(
reverse('seedteam-permissions-list', args=[team.id]), data=data)
[permission] = SeedPermission.objects.all()
self.assertEqual(response.data, {
'type': data['type'],
'object_id': data['object_id'],
'namespace': data['namespace'],
'id': str(permission.id)
})
self.assertEqual(len(team.permissions.all()), 1)
def test_permission_add_permission_unauthenticated(self):
'''Unauthenticated users should not be allowed to add permissions to
any teams.'''
org = SeedOrganization.objects.create(title='test org')
team = SeedTeam.objects.create(title='test team', organization=org)
data = {
'type': 'foo:bar',
'object_id': '2',
'namespace': 'foo',
}
response = self.client.post(
reverse('seedteam-permissions-list', args=[team.id]), data=data)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_permission_add_permission_no_permission(self):
'''Users that don't have the correct permissions shouldn't be allowed
to add permissions to any teams.'''
_, token = self.create_user()
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
org = SeedOrganization.objects.create(title='test org')
team = SeedTeam.objects.create(title='test team', organization=org)
data = {
'type': 'foo:bar',
'object_id': '2',
'namespace': 'foo',
}
response = self.client.post(
reverse('seedteam-permissions-list', args=[team.id]), data=data)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_permission_add_permission_read_access(self):
'''Users that have read access to a team can add permissions that
aren't org:admin or team:admin.'''
user, token = self.create_user()
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
org = SeedOrganization.objects.create(title='test org')
team = SeedTeam.objects.create(title='test team', organization=org)
team.users.add(user)
data = {
'type': 'foo:bar',
'object_id': '2',
'namespace': 'foo',
}
response = self.client.post(reverse(
'seedteam-permissions-list', args=[team.id]), data=data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_permission_add_permission_team_admin(self):
'''Users with team:admin for that team should be able to add any
permission except for org:admin to that team.'''
user, token = self.create_user()
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
org = SeedOrganization.objects.create(title='test org')
team = SeedTeam.objects.create(title='test team', organization=org)
team2, _ = self.add_permission(user, 'team:admin', team.pk)
# Correct team
response = self.client.post(
reverse('seedteam-permissions-list', args=[team.id]), data={
'type': 'team:admin',
'object_id': team.pk,
'namespace': '__auth__',
})
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
# Incorrect team
response = self.client.post(
reverse('seedteam-permissions-list', args=[team.id]), data={
'type': 'team:admin',
'object_id': team2.pk,
'namespace': '__auth__',
})
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
# org:admin
response = self.client.post(
reverse('seedteam-permissions-list', args=[team.id]), data={
'type': 'org:admin',
'object_id': org.pk,
'namespace': '__auth__',
})
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
# other
response = self.client.post(
reverse('seedteam-permissions-list', args=[team.id]), data={
'type': 'foo:bar',
'object_id': '7',
'namespace': '__auth__',
})
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_permission_add_permission_org_admin(self):
'''Users with org:admin should be able to add any permission to any of
that org's teams, except for org:admin where object_id is not the org
id that they are admin for.'''
user, token = self.create_user()
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
org = SeedOrganization.objects.create(title='test org')
org2 = SeedOrganization.objects.create(title='test org')
team = SeedTeam.objects.create(title='test team', organization=org)
team2, _ = self.add_permission(user, 'org:admin', org.pk)
# incorrect team object_id
response = self.client.post(
reverse('seedteam-permissions-list', args=[team.id]), data={
'type': 'team:admin',
'object_id': team2.pk,
'namespace': '__auth__',
})
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
# org:admin
response = self.client.post(
reverse('seedteam-permissions-list', args=[team.id]), data={
'type': 'org:admin',
'object_id': org.pk,
'namespace': '__auth__',
})
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
# org:admin incorrect org
response = self.client.post(
reverse('seedteam-permissions-list', args=[team.id]), data={
'type': 'org:admin',
'object_id': org2.pk,
'namespace': '__auth__',
})
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
# other
response = self.client.post(
reverse('seedteam-permissions-list', args=[team.id]), data={
'type': 'foo:bar',
'object_id': '7',
'namespace': 'foo',
})
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_remove_permission_from_team(self):
'''When removing a permission from a team, it should remove the
relation between the team and permission, and delete that
permission.'''
_, token = self.create_admin_user()
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
org = SeedOrganization.objects.create(title='test org')
team = SeedTeam.objects.create(title='test team', organization=org)
permission = team.permissions.create(
type='foo:bar', object_id='2', namespace='foo')
self.assertEqual(len(team.permissions.all()), 1)
response = self.client.delete(
reverse(
'seedteam-permissions-detail', args=[team.id, permission.id]))
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
self.assertEqual(len(team.permissions.all()), 0)
self.assertEqual(len(SeedPermission.objects.all()), 0)
def test_permission_remove_permission_unauthenticated(self):
'''Unauthenticated users should not be allowed to remove permissions
from any teams.'''
org = SeedOrganization.objects.create(title='test org')
team = SeedTeam.objects.create(title='test team', organization=org)
data = {
'type': 'foo:bar',
'object_id': '2',
'namespace': 'foo',
}
permission = SeedPermission.objects.create(**data)
response = self.client.delete(
reverse('seedteam-permissions-detail',
args=[team.id, permission.id]))
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_permission_remove_permission_no_permission(self):
'''Users that don't have the correct permissions shouldn't be allowed
to remove permissions from any teams.'''
_, token = self.create_user()
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
org = SeedOrganization.objects.create(title='test org')
team = SeedTeam.objects.create(title='test team', organization=org)
data = {
'type': 'foo:bar',
'object_id': '2',
'namespace': 'foo',
}
permission = SeedPermission.objects.create(**data)
response = self.client.delete(reverse(
'seedteam-permissions-detail', args=[team.id, permission.id]))
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_permission_remove_permission_read_access(self):
'''Any user that has read access to a team can remove team permissions
that aren't org:admin or team:admin.'''
user, token = self.create_user()
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
org = SeedOrganization.objects.create(title='test org')
team = SeedTeam.objects.create(title='test team', organization=org)
team.users.add(user)
data = {
'type': 'foo:bar',
'object_id': '2',
'namespace': 'foo',
}
permission = SeedPermission.objects.create(**data)
team.permissions.add(permission)
response = self.client.delete(reverse(
'seedteam-permissions-detail', args=[team.id, permission.id]))
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
def test_permission_remove_permission_team_admin(self):
'''Users with team:admin for that team should be able to remove any
permission except for org:admin from that team.'''
user, token = self.create_user()
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
org = SeedOrganization.objects.create(title='test org')
team = SeedTeam.objects.create(title='test team', organization=org)
team2, _ = self.add_permission(user, 'team:admin', team.pk)
self.add_permission(user, 'team:admin', team.pk)
# org:admin
permission = SeedPermission.objects.create(
type='org:admin', object_id=org.pk, namespace='__auth__')
team.permissions.add(permission)
response = self.client.delete(
reverse('seedteam-permissions-detail',
args=[team.id, permission.id]))
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
# Wrong team
wrong_team = SeedTeam.objects.create(
title='test team 2', organization=org)
permission = wrong_team.permissions.create(
type='team:admin', object_id=wrong_team.pk, namespace='__auth__')
response = self.client.delete(
reverse('seedteam-permissions-detail',
args=[wrong_team.id, permission.id]))
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
# other
permission = SeedPermission.objects.create(
type='foo:bar', object_id='7', namespace='foo')
team.permissions.add(permission)
response = self.client.delete(
reverse('seedteam-permissions-detail',
args=[team.id, permission.id]))
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
def test_permission_remove_permission_org_admin(self):
'''Users with org:admin should be able to remove any permission from
any of that org's teams, except for org:admin where object_id is not
the org id that they are admin for.'''
user, token = self.create_user()
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
org = SeedOrganization.objects.create(title='test org')
org2 = SeedOrganization.objects.create(title='test org')
team = SeedTeam.objects.create(title='test team', organization=org)
team2, _ = self.add_permission(user, 'org:admin', org.pk)
# incorrect team object_id
permission = SeedPermission.objects.create(
type='team:admin', object_id=team2.pk, namespace='__auth__')
team2.permissions.add(permission)
response = self.client.delete(
reverse('seedteam-permissions-detail',
args=[team.id, permission.id]))
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
# org:admin
permission = SeedPermission.objects.create(
type='org:admin', object_id=org.pk, namespace='__auth__')
team.permissions.add(permission)
response = self.client.delete(
reverse('seedteam-permissions-detail',
args=[team.id, permission.id]))
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
# org:admin incorrect org
permission = SeedPermission.objects.create(
type='org:admin', object_id=org2.pk, namespace='__auth__')
team2.permissions.add(permission)
response = self.client.delete(
reverse('seedteam-permissions-detail',
args=[team.id, permission.id]))
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
# other
permission = SeedPermission.objects.create(
type='foo:bar', object_id='7', namespace='foo')
team.permissions.add(permission)
response = self.client.delete(
reverse('seedteam-permissions-detail',
args=[team.id, permission.id]))
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
def test_add_user_to_team(self):
'''Adding a user to a team should create a relationship between the
two.'''
_, token = self.create_admin_user()
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
org = SeedOrganization.objects.create(title='test org')
team = SeedTeam.objects.create(title='test team', organization=org)
user = User.objects.create_user(username='test@example.org')
self.assertEqual(len(team.users.all()), 0)
response = self.client.put(
reverse('seedteam-users-detail', args=[team.id, user.pk]))
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
team.refresh_from_db()
self.assertEqual(len(team.users.all()), 1)
def test_add_user_to_team_idempotent(self):
'''Adding a user to a team should be idempotent.'''
_, token = self.create_admin_user()
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
org = SeedOrganization.objects.create(title='test org')
team = SeedTeam.objects.create(title='test team', organization=org)
user = User.objects.create_user(username='test@example.org')
self.assertEqual(len(team.users.all()), 0)
response = self.client.put(
reverse('seedteam-users-detail', args=[team.id, user.pk]))
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
response = self.client.put(
reverse('seedteam-users-detail', args=[team.id, user.pk]))
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
team.refresh_from_db()
self.assertEqual(len(team.users.all()), 1)
def test_permission_add_user_to_team_unauthenticated(self):
'''Unauthenticated users should not be able to add users to teams.'''
org = SeedOrganization.objects.create()
team = SeedTeam.objects.create(organization=org)
user = User.objects.create_user('test user')
response = self.client.put(reverse(
'seedteam-users-detail', args=[team.pk, user.pk]))
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_permission_add_user_to_team_no_permission(self):
'''Users without the correct permissions should not be able to add
users to teams.'''
org = SeedOrganization.objects.create()
team = SeedTeam.objects.create(organization=org)
user = User.objects.create_user('test user')
_, token = self.create_user()
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
response = self.client.put(reverse(
'seedteam-users-detail', args=[team.pk, user.pk]))
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_permission_add_user_to_team_team_admin(self):
'''Users with team:admin permission should only be able to add users
to that team.'''
org = SeedOrganization.objects.create()
team1 = SeedTeam.objects.create(organization=org)
team2 = SeedTeam.objects.create(organization=org)
user = User.objects.create_user('test user')
authuser, token = self.create_user()
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
self.add_permission(authuser, 'team:admin', team1.pk)
# Correct team
response = self.client.put(reverse(
'seedteam-users-detail', args=[team1.pk, user.pk]))
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
# Incorrect team
response = self.client.put(reverse(
'seedteam-users-detail', args=[team2.pk, user.pk]))
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_permission_add_user_to_team_org_admin(self):
'''Users with org:admin permission should only be able to add users
to that organization's teams.'''
org1 = SeedOrganization.objects.create()
org2 = SeedOrganization.objects.create()
team1 = SeedTeam.objects.create(organization=org1)
team2 = SeedTeam.objects.create(organization=org2)
user = User.objects.create_user('test user')
authuser, token = self.create_user()
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
self.add_permission(authuser, 'org:admin', org1.pk)
# Correct org
response = self.client.put(reverse(
'seedteam-users-detail', args=[team1.pk, user.pk]))
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
# Incorrect org
response = self.client.put(reverse(
'seedteam-users-detail', args=[team2.pk, user.pk]))
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_remove_user_from_team(self):
'''Removing a user from a team should remove the relationship between
the two.'''
_, token = self.create_admin_user()
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
org = SeedOrganization.objects.create(title='test org')
team = SeedTeam.objects.create(title='test team', organization=org)
user = User.objects.create_user(username='test@example.org')
team.users.add(user)
self.assertEqual(len(team.users.all()), 1)
response = self.client.delete(
reverse('seedteam-users-detail', args=[team.id, user.id]))
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
team.refresh_from_db()
self.assertEqual(len(team.users.all()), 0)
def test_permission_remove_user_from_team_unauthenticated(self):
'''Unauthenticated users should not be able to remove users from
teams.'''
org = SeedOrganization.objects.create()
team = SeedTeam.objects.create(organization=org)
user = User.objects.create_user('test user')
team.users.add(user)
response = self.client.delete(reverse(
'seedteam-users-detail', args=[team.pk, user.pk]))
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_permission_remove_user_from_team_no_permission(self):
'''Users without the correct permissions should not be able to remove
users from teams.'''
org = SeedOrganization.objects.create()
team = SeedTeam.objects.create(organization=org)
user = User.objects.create_user('test user')
team.users.add(user)
_, token = self.create_user()
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
response = self.client.delete(reverse(
'seedteam-users-detail', args=[team.pk, user.pk]))
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_permission_remove_user_from_team_team_admin(self):
'''Users with team:admin permission should only be able to remove users
from that team.'''
org = SeedOrganization.objects.create()
team1 = SeedTeam.objects.create(organization=org)
team2 = SeedTeam.objects.create(organization=org)
user = User.objects.create_user('test user')
team1.users.add(user)
team2.users.add(user)
authuser, token = self.create_user()
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
self.add_permission(authuser, 'team:admin', team1.pk)
# Correct team
response = self.client.delete(reverse(
'seedteam-users-detail', args=[team1.pk, user.pk]))
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
# Incorrect team
response = self.client.delete(reverse(
'seedteam-users-detail', args=[team2.pk, user.pk]))
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_permission_remove_user_from_team_org_admin(self):
'''Users with org:admin permission should only be able to remove users
from that organization's teams.'''
org1 = SeedOrganization.objects.create()
org2 = SeedOrganization.objects.create()
team1 = SeedTeam.objects.create(organization=org1)
team2 = SeedTeam.objects.create(organization=org2)
user = User.objects.create_user('test user')
team1.users.add(user)
team2.users.add(user)
authuser, token = self.create_user()
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
self.add_permission(authuser, 'org:admin', org1.pk)
# Correct org
response = self.client.delete(reverse(
'seedteam-users-detail', args=[team1.pk, user.pk]))
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
# Incorrect org
response = self.client.delete(reverse(
'seedteam-users-detail', args=[team2.pk, user.pk]))
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
| 44.473868
| 79
| 0.648817
| 6,007
| 51,056
| 5.375062
| 0.037623
| 0.067239
| 0.050081
| 0.040263
| 0.90529
| 0.879924
| 0.865244
| 0.843781
| 0.814111
| 0.803642
| 0
| 0.009578
| 0.233117
| 51,056
| 1,147
| 80
| 44.512642
| 0.815064
| 0.110193
| 0
| 0.803318
| 0
| 0
| 0.091863
| 0.022037
| 0
| 0
| 0
| 0
| 0.127962
| 1
| 0.074645
| false
| 0
| 0.007109
| 0
| 0.082938
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
49a805c40ee286370cde3ad188f7cc58e984ee8a
| 22,216
|
py
|
Python
|
ojuser/tests.py
|
BUPT-XJBGroup/BOJ-V4
|
31078ab998d0a786c6742b8f7c65f2e4d9642844
|
[
"MIT"
] | null | null | null |
ojuser/tests.py
|
BUPT-XJBGroup/BOJ-V4
|
31078ab998d0a786c6742b8f7c65f2e4d9642844
|
[
"MIT"
] | null | null | null |
ojuser/tests.py
|
BUPT-XJBGroup/BOJ-V4
|
31078ab998d0a786c6742b8f7c65f2e4d9642844
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from django.conf import settings
from django.core.urlresolvers import reverse
from django.test import TestCase, override_settings
from rest_framework import status
from rest_framework.test import APITestCase
from django.core import mail
from .models import Language, GroupProfile
from django.contrib.auth.models import User
from account.models import EmailConfirmation
class SignupViewTestCase(TestCase):
def test_get(self):
response = self.client.get(reverse("account_signup"))
self.assertEqual(response.status_code, 200)
def test_post(self):
data = {
"username": "foo",
"password": "bar",
"password_confirm": "bar",
"email": "foobar@example.com",
"nickname": "goo",
"gender": "S",
}
response = self.client.post(reverse("account_signup"), data)
self.assertEqual(response.status_code, 302)
def test_get_authenticated(self):
User.objects.create_user("foo", password="bar")
self.client.login(username="foo", password="bar")
with self.settings(ACCOUNT_LOGIN_REDIRECT_URL="/logged-in/"):
response = self.client.get(reverse("account_signup"))
self.assertRedirects(response, "/logged-in/", fetch_redirect_response=False)
def test_post_authenticated(self):
User.objects.create_user("foo", password="bar")
self.client.login(username="foo", password="bar")
with self.settings(ACCOUNT_LOGIN_REDIRECT_URL="/logged-in/"):
data = {
"username": "foo",
"password": "bar",
"password_confirm": "bar",
"email": "foobar@example.com",
"nickname": "goo",
"gender": "S",
"code": "abc123",
}
response = self.client.post(reverse("account_signup"), data)
self.assertEqual(response.status_code, 404)
def test_get_next_url(self):
next_url = "/next-url/"
data = {
"username": "foo",
"password": "bar",
"password_confirm": "bar",
"email": "foobar@example.com",
"nickname": "goo",
"gender": "S",
}
response = self.client.post("{}?next={}".format(reverse("account_signup"), next_url), data)
self.assertRedirects(response, next_url, fetch_redirect_response=False)
def test_post_next_url(self):
next_url = "/next-url/"
data = {
"username": "foo",
"password": "bar",
"password_confirm": "bar",
"email": "foobar@example.com",
"nickname": "goo",
"gender": "S",
"next": next_url,
}
response = self.client.post(reverse("account_signup"), data)
self.assertRedirects(response, next_url, fetch_redirect_response=False)
def test_session_next_url(self):
next_url = "/next-url/"
session = self.client.session
session["redirect_to"] = next_url
session.save()
data = {
"username": "foo",
"password": "bar",
"password_confirm": "bar",
"email": "foobar@example.com",
"nickname": "goo",
"gender": "S",
}
response = self.client.post(reverse("account_signup"), data)
self.assertRedirects(response, next_url, fetch_redirect_response=False)
class LoginViewTestCase(TestCase):
def signup(self):
data = {
"username": "foo",
"password": "bar",
"password_confirm": "bar",
"email": "foobar@example.com",
"nickname": "goo",
"gender": "S",
}
self.client.post(reverse("account_signup"), data)
self.client.logout()
def test_get(self):
response = self.client.get(reverse("account_login"))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.template_name, ["account/login.html"])
def test_post_wrong_password(self):
self.signup()
data = {
"username": "foo",
"password": "1234",
}
response = self.client.post(reverse("account_login"), data)
self.assertEqual(response.status_code, 200)
def test_post_empty(self):
data = {}
response = self.client.post(reverse("account_login"), data)
self.assertEqual(response.status_code, 200)
self.assertFalse(response.context["form"].is_valid())
@override_settings(
AUTHENTICATION_BACKENDS=[
"account.auth_backends.UsernameAuthenticationBackend",
]
)
def test_post_success(self):
self.signup()
data = {
"username": "foo",
"password": "bar",
}
response = self.client.post(reverse("account_login"), data)
self.assertRedirects(
response,
settings.ACCOUNT_LOGIN_REDIRECT_URL,
fetch_redirect_response=False
)
class LogoutViewTestCase(TestCase):
def signup(self):
data = {
"username": "foo",
"password": "bar",
"password_confirm": "bar",
"email": "foobar@example.com",
"nickname": "goo",
"gender": "S",
}
self.client.post(reverse("account_signup"), data)
def test_get_anonymous(self):
response = self.client.get(reverse("account_logout"))
self.assertRedirects(
response,
settings.ACCOUNT_LOGOUT_REDIRECT_URL,
fetch_redirect_response=False
)
def test_get_authenticated(self):
self.signup()
response = self.client.get(reverse("account_logout"))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.template_name, ["account/logout.html"])
def test_post_anonymous(self):
response = self.client.post(reverse("account_logout"), {})
self.assertRedirects(
response,
settings.ACCOUNT_LOGOUT_REDIRECT_URL,
fetch_redirect_response=False
)
def test_post_authenticated(self):
self.signup()
response = self.client.post(reverse("account_logout"), {})
self.assertRedirects(
response,
settings.ACCOUNT_LOGOUT_REDIRECT_URL,
fetch_redirect_response=False
)
class ConfirmEmailViewTestCase(TestCase):
def signup(self):
data = {
"username": "foo",
"password": "bar",
"password_confirm": "bar",
"email": "foobar@example.com",
"nickname": "goo",
"gender": "S",
}
self.client.post(reverse("account_signup"), data)
return EmailConfirmation.objects.get()
def test_get_good_key(self):
email_confirmation = self.signup()
response = self.client.get(
reverse("account_confirm_email", kwargs={"key": email_confirmation.key})
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.template_name, ["account/email_confirm.html"])
def test_get_bad_key(self):
response = self.client.get(reverse("account_confirm_email", kwargs={"key": "badkey"}))
self.assertEqual(response.status_code, 404)
@override_settings(ACCOUNT_EMAIL_CONFIRMATION_REQUIRED=True)
def test_post_required(self):
email_confirmation = self.signup()
response = self.client.post(
reverse("account_confirm_email", kwargs={"key": email_confirmation.key}), {}
)
self.assertRedirects(
response,
reverse(settings.ACCOUNT_EMAIL_CONFIRMATION_ANONYMOUS_REDIRECT_URL),
fetch_redirect_response=False
)
@override_settings(ACCOUNT_EMAIL_CONFIRMATION_REQUIRED=False)
def test_post_not_required(self):
email_confirmation = self.signup()
response = self.client.post(
reverse("account_confirm_email", kwargs={"key": email_confirmation.key}), {}
)
self.assertRedirects(
response,
settings.ACCOUNT_LOGIN_REDIRECT_URL,
fetch_redirect_response=False
)
class ChangePasswordViewTestCase(TestCase):
def signup(self):
data = {
"username": "foo",
"password": "bar",
"password_confirm": "bar",
"email": "foobar@example.com",
"nickname": "goo",
"gender": "S",
}
self.client.post(reverse("account_signup"), data)
mail.outbox = []
return User.objects.get(username="foo")
def test_get_anonymous(self):
response = self.client.get(reverse("account_password"))
self.assertRedirects(
response,
reverse("account_password_reset"),
fetch_redirect_response=False
)
def test_get_authenticated(self):
self.signup()
response = self.client.get(reverse("account_password"))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.template_name, ["account/password_change.html"])
def test_post_anonymous(self):
data = {
"password_current": "password",
"password_new": "new-password",
"password_new_confirm": "new-password",
}
response = self.client.post(reverse("account_password"), data)
self.assertEqual(response.status_code, 403)
def test_post_authenticated_success(self):
user = self.signup()
data = {
"password_current": "bar",
"password_new": "new-bar",
"password_new_confirm": "new-bar",
}
response = self.client.post(reverse("account_password"), data)
self.assertRedirects(
response,
reverse(settings.ACCOUNT_PASSWORD_CHANGE_REDIRECT_URL),
fetch_redirect_response=False
)
updated_user = User.objects.get(username=user.username)
self.assertNotEqual(user.password, updated_user.password)
self.assertEqual(len(mail.outbox), 1)
@override_settings(ACCOUNT_NOTIFY_ON_PASSWORD_CHANGE=False)
def test_post_authenticated_success_no_mail(self):
self.signup()
data = {
"password_current": "bar",
"password_new": "new-bar",
"password_new_confirm": "new-bar",
}
response = self.client.post(reverse("account_password"), data)
self.assertRedirects(
response,
reverse(settings.ACCOUNT_PASSWORD_CHANGE_REDIRECT_URL),
fetch_redirect_response=False
)
self.assertEqual(len(mail.outbox), 0)
class ProfilesTestCase(TestCase):
def setUp(self):
data = {
"username": "foo",
"password": "bar",
"password_confirm": "bar",
"email": "foobar@example.com",
"nickname": "foobar",
"gender": "M",
}
self.client.post(reverse("account_signup"), data)
Language.objects.create(key="gcc", name='GUN C', desc='gcc 11')
self.client.login(username='foo', password='bar')
def test_get(self):
response = self.client.get(reverse("account_profiles"))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.template_name, ["account/profiles.html"])
self.assertContains(response, 'foo')
self.assertContains(
response,
'''name="nickname" type="text" value="foobar"'''
)
self.assertContains(
response,
'''<option value="M" selected="selected">Male</option>'''
)
self.assertContains(
response,
'''<option value="1" selected="selected">GUN C</option>'''
)
def test_post_success(self):
data = {
"nickname": "google",
"gender": "F",
"prefer_lang": 1,
}
response = self.client.post(reverse("account_profiles"), data)
self.assertRedirects(
response,
reverse("account_profiles"),
fetch_redirect_response=False
)
class MyGroupsCreateTestCase(TestCase):
def setUp(self):
xx = 'admin_A0'
user = User.objects.create_user(xx, xx, xx)
user.is_superuser = True
user.is_staff = True
user.save()
for ch in range(ord('a'), ord('b') + 1):
xx = 'admin_' + chr(ch) + '0'
user = User.objects.create_user(xx, xx, xx)
user.is_staff = True
user.save()
gp = 0
xx = 'group_' + chr(ch)
if ch == ord('a'):
gp = GroupProfile.objects.create(name=xx, nickname=xx, superadmin=user)
else:
pr = (ch - ord('a') + 1) / 2
pr = GroupProfile.objects.get(pk=pr)
gp = GroupProfile.objects.create(name=xx, nickname=xx, superadmin=user, parent=pr)
xx = 'admin_' + chr(ch) + '1'
user = User.objects.create_user(xx, xx, xx)
user.is_staff = True
user.save()
gp.admin_group.user_set.add(user)
xx = 'admin_' + chr(ch) + '2'
user = User.objects.create_user(xx, xx, xx)
gp.admin_group.user_set.add(user)
xx = 'user_' + chr(ch) + '0'
user = User.objects.create_user(xx, xx, xx)
gp.admin_group.user_set.add(user)
xx = 'user_' + chr(ch) + '1'
user = User.objects.create_user(xx, xx, xx)
gp.admin_group.user_set.add(user)
def test_admin_create_group(self):
self.client.login(username='admin_A0', password='admin_A0')
response = self.client.get(reverse("mygroup-create"))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.template_name, ["ojuser/group_create_form.html"])
def test_staff_create_group(self):
self.client.login(username='admin_a0', password='admin_a0')
response = self.client.get(reverse("mygroup-create"))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.template_name, ["ojuser/group_create_form.html"])
def test_user_create_group(self):
self.client.login(username='user_a0', password='user_a0')
response = self.client.get(reverse("mygroup-create"))
self.assertEqual(response.status_code, 302)
def test_post_success(self):
self.client.login(username='admin_a0', password='admin_a0')
data = {
"name": "gc1",
"nickname": "gc2",
"parent": 1,
"admins": 2,
}
response = self.client.post(reverse("mygroup-create"), data)
self.assertRedirects(
response,
reverse("mygroup-detail", kwargs={"pk": GroupProfile.objects.count()}),
fetch_redirect_response=False
)
class MyGroupsListTestCase(TestCase):
def setUp(self):
xx = 'admin_A0'
user = User.objects.create_user(xx, xx, xx)
user.is_superuser = True
user.is_staff = True
user.save()
for ch in range(ord('a'), ord('g') + 1):
xx = 'admin_' + chr(ch) + '0'
user = User.objects.create_user(xx, xx, xx)
user.is_staff = True
user.save()
gp = 0
xx = 'group_' + chr(ch)
if ch == ord('a'):
gp = GroupProfile.objects.create(name=xx, nickname=xx, superadmin=user)
else:
pr = (ch - ord('a') + 1) / 2
pr = GroupProfile.objects.get(pk=pr)
gp = GroupProfile.objects.create(name=xx, nickname=xx, superadmin=user, parent=pr)
xx = 'admin_' + chr(ch) + '1'
user = User.objects.create_user(xx, xx, xx)
user.is_staff = True
user.save()
gp.admin_group.user_set.add(user)
xx = 'admin_' + chr(ch) + '2'
user = User.objects.create_user(xx, xx, xx)
gp.admin_group.user_set.add(user)
xx = 'user_' + chr(ch) + '0'
user = User.objects.create_user(xx, xx, xx)
gp.user_group.user_set.add(user)
xx = 'user_' + chr(ch) + '1'
user = User.objects.create_user(xx, xx, xx)
gp.user_group.user_set.add(user)
Language.objects.create(key="gcc", name='GUN C', desc='gcc 11')
def test_get(self):
self.client.login(username='admin_c0', password='admin_c0')
response = self.client.get(reverse("mygroup-list"))
self.assertEqual(response.status_code, 200)
self.assertEqual(
response.template_name,
["ojuser/group_list.html", "ojuser/groupprofile_list.html"]
)
def test_superadmin_group(self):
self.client.login(username='admin_A0', password='admin_A0')
response = self.client.get(reverse("mygroup-list"))
self.assertSequenceEqual(
list(response.context['group_can_view']),
list(GroupProfile.objects.filter(pk__in=[1, 2, 3, 4, 5, 6, 7, ])),
)
self.assertSequenceEqual(
list(response.context['group_can_change']),
list(GroupProfile.objects.filter(pk__in=[1, 2, 3, 4, 5, 6, 7, ])),
)
self.assertSequenceEqual(
list(response.context['group_can_delete']),
list(GroupProfile.objects.filter(pk__in=[1, 2, 3, 4, 5, 6, 7, ])),
)
self.assertContains(
response,
'''<a href="/accounts/mygroups/add" class="btn btn-large btn-primary">New </a>'''
)
def test_creater_group(self):
self.client.login(username='admin_c0', password='admin_c0')
response = self.client.get(reverse("mygroup-list"))
self.assertSequenceEqual(
list(response.context['group_can_view']),
list(GroupProfile.objects.filter(pk__in=[1, 3, 6, 7, ])),
)
self.assertSequenceEqual(
list(response.context['group_can_change']),
list(GroupProfile.objects.filter(pk__in=[3, 6, 7, ])),
)
self.assertSequenceEqual(
list(response.context['group_can_delete']),
list(GroupProfile.objects.filter(pk__in=[3, ])),
)
self.assertContains(
response,
'''<a href="/accounts/mygroups/add" class="btn btn-large btn-primary">New </a>'''
)
def test_staff_group(self):
self.client.login(username='admin_c1', password='admin_c1')
response = self.client.get(reverse("mygroup-list"))
self.assertContains(
response,
'''<a href="/accounts/mygroups/add" class="btn btn-large btn-primary">New </a>'''
)
def test_admin_group(self):
self.client.login(username='admin_c2', password='admin_c2')
response = self.client.get(reverse("mygroup-list"))
self.assertSequenceEqual(
list(response.context['group_can_view']),
list(GroupProfile.objects.filter(pk__in=[1, 3, 6, 7, ])),
)
self.assertSequenceEqual(
list(response.context['group_can_change']),
list(GroupProfile.objects.filter(pk__in=[3, 6, 7, ])),
)
self.assertSequenceEqual(
list(response.context['group_can_delete']),
list(GroupProfile.objects.filter(pk__in=[])),
)
def test_user_group(self):
self.client.login(username='user_c0', password='user_c0')
response = self.client.get(reverse("mygroup-list"))
self.assertSequenceEqual(
list(response.context['group_can_view']),
list(GroupProfile.objects.filter(pk__in=[1, 3, ])),
)
self.assertSequenceEqual(
list(response.context['group_can_change']),
list(GroupProfile.objects.filter(pk__in=[])),
)
self.assertSequenceEqual(
list(response.context['group_can_delete']),
list(GroupProfile.objects.filter(pk__in=[])),
)
def test_group_link(self):
self.client.login(username='admin_c0', password='admin_c0')
response = self.client.get(reverse("mygroup-list"))
self.assertContains(
response,
'''<a href="/accounts/mygroups/3/" title="查看组资源">'''
)
self.assertContains(
response,
'''<a href="/accounts/mygroups/3/members/" title="成员管理">'''
)
self.assertContains(
response,
'''<a href="/accounts/mygroups/3/update/" title="修改组信息">'''
)
self.assertContains(
response,
'''<a href="/accounts/mygroups/3/delete/" title="删除组">'''
)
class AccountTests(APITestCase):
def setUp(self):
xx = 'admin_A0'
user = User.objects.create_user(xx, xx, xx)
user.is_superuser = True
user.is_staff = True
user.save()
xx = 'admin_a0'
user = User.objects.create_user(xx, xx, xx)
user.is_staff = True
user.save()
xx = 'user_a0'
user = User.objects.create_user(xx, xx, xx)
def test_admin_create_account(self):
url = reverse('language-list')
data = {
"key": "gcc",
"name": "GUN C",
"desc": "gcc -o a a.c"
}
self.client.login(username='admin_a0', password='admin_a0')
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(Language.objects.count(), 1)
self.assertEqual(Language.objects.get().key, 'gcc')
def test_user_create_account(self):
url = reverse('language-list')
data = {
"key": "gcc",
"name": "GUN C",
"desc": "gcc -o a a.c"
}
self.client.login(username='user_a0', password='user_a0')
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
| 34.604361
| 99
| 0.577647
| 2,362
| 22,216
| 5.253175
| 0.087638
| 0.049968
| 0.056576
| 0.035542
| 0.836718
| 0.801821
| 0.773694
| 0.747663
| 0.72421
| 0.704384
| 0
| 0.010505
| 0.288711
| 22,216
| 641
| 100
| 34.658346
| 0.774712
| 0.000945
| 0
| 0.685083
| 0
| 0
| 0.133216
| 0.015817
| 0
| 0
| 0
| 0
| 0.128913
| 1
| 0.086556
| false
| 0.11418
| 0.016575
| 0
| 0.123389
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.