hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3dda0058f224bd21325e698701be29d017ab6010
| 80
|
py
|
Python
|
nmt/utils/__init__.py
|
jojkos/neural-machine-translation
|
c123dbfb66314050dfbe0b0e46899ef2f44500da
|
[
"MIT"
] | null | null | null |
nmt/utils/__init__.py
|
jojkos/neural-machine-translation
|
c123dbfb66314050dfbe0b0e46899ef2f44500da
|
[
"MIT"
] | null | null | null |
nmt/utils/__init__.py
|
jojkos/neural-machine-translation
|
c123dbfb66314050dfbe0b0e46899ef2f44500da
|
[
"MIT"
] | 1
|
2019-07-17T07:51:06.000Z
|
2019-07-17T07:51:06.000Z
|
from .data_utils import *
from .script_utils import *
from .math_utils import *
| 20
| 27
| 0.775
| 12
| 80
| 4.916667
| 0.5
| 0.559322
| 0.508475
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15
| 80
| 3
| 28
| 26.666667
| 0.867647
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
9a7c137d641c0900ffed3cdc819ad3689c661c84
| 40
|
py
|
Python
|
fancyprint/__init__.py
|
anthony-aylward/fancyprint
|
c4cf7f76260921a1dfb145bf4dd4f5272b203115
|
[
"MIT"
] | null | null | null |
fancyprint/__init__.py
|
anthony-aylward/fancyprint
|
c4cf7f76260921a1dfb145bf4dd4f5272b203115
|
[
"MIT"
] | null | null | null |
fancyprint/__init__.py
|
anthony-aylward/fancyprint
|
c4cf7f76260921a1dfb145bf4dd4f5272b203115
|
[
"MIT"
] | null | null | null |
from fancyprint.fancyprint import fprint
| 40
| 40
| 0.9
| 5
| 40
| 7.2
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.075
| 40
| 1
| 40
| 40
| 0.972973
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
|
0
| 7
|
9adc6f444dd374bd95ca13187c30084e3e307c5e
| 51,651
|
py
|
Python
|
ros2_plan_t1/plan_t1/cp.py
|
rapa5Gclass/ddrawaddaraga
|
285653cfd5c4980296477710bec7f8acf404aa2d
|
[
"Apache-2.0"
] | null | null | null |
ros2_plan_t1/plan_t1/cp.py
|
rapa5Gclass/ddrawaddaraga
|
285653cfd5c4980296477710bec7f8acf404aa2d
|
[
"Apache-2.0"
] | null | null | null |
ros2_plan_t1/plan_t1/cp.py
|
rapa5Gclass/ddrawaddaraga
|
285653cfd5c4980296477710bec7f8acf404aa2d
|
[
"Apache-2.0"
] | null | null | null |
# generated from rosidl_generator_py/resource/_idl.py.em
# with input from nav2_msgs:action/ComputePathThroughPoses.idl
# generated code does not contain a copyright notice
# Import statements for member types
import rosidl_parser.definition # noqa: E402, I100
class Metaclass_ComputePathThroughPoses_Goal(type):
"""Metaclass of message 'ComputePathThroughPoses_Goal'."""
_CREATE_ROS_MESSAGE = None
_CONVERT_FROM_PY = None
_CONVERT_TO_PY = None
_DESTROY_ROS_MESSAGE = None
_TYPE_SUPPORT = None
__constants = {
}
@classmethod
def __import_type_support__(cls):
try:
from rosidl_generator_py import import_type_support
module = import_type_support('nav2_msgs')
except ImportError:
import logging
import traceback
logger = logging.getLogger(
'nav2_msgs.action.ComputePathThroughPoses_Goal')
logger.debug(
'Failed to import needed modules for type support:\n' +
traceback.format_exc())
else:
cls._CREATE_ROS_MESSAGE = module.create_ros_message_msg__action__compute_path_through_poses__goal
cls._CONVERT_FROM_PY = module.convert_from_py_msg__action__compute_path_through_poses__goal
cls._CONVERT_TO_PY = module.convert_to_py_msg__action__compute_path_through_poses__goal
cls._TYPE_SUPPORT = module.type_support_msg__action__compute_path_through_poses__goal
cls._DESTROY_ROS_MESSAGE = module.destroy_ros_message_msg__action__compute_path_through_poses__goal
from geometry_msgs.msg import PoseStamped
if PoseStamped.__class__._TYPE_SUPPORT is None:
PoseStamped.__class__.__import_type_support__()
@classmethod
def __prepare__(cls, name, bases, **kwargs):
# list constant names here so that they appear in the help text of
# the message class under "Data and other attributes defined here:"
# as well as populate each message instance
return {
}
class ComputePathThroughPoses_Goal(metaclass=Metaclass_ComputePathThroughPoses_Goal):
"""Message class 'ComputePathThroughPoses_Goal'."""
__slots__ = [
'_goals',
'_start',
'_planner_id',
'_use_start',
]
_fields_and_field_types = {
'goals': 'sequence<geometry_msgs/PoseStamped>',
'start': 'geometry_msgs/PoseStamped',
'planner_id': 'string',
'use_start': 'boolean',
}
SLOT_TYPES = (
rosidl_parser.definition.UnboundedSequence(rosidl_parser.definition.NamespacedType(['geometry_msgs', 'msg'], 'PoseStamped')), # noqa: E501
rosidl_parser.definition.NamespacedType(['geometry_msgs', 'msg'], 'PoseStamped'), # noqa: E501
rosidl_parser.definition.UnboundedString(), # noqa: E501
rosidl_parser.definition.BasicType('boolean'), # noqa: E501
)
def __init__(self, **kwargs):
assert all('_' + key in self.__slots__ for key in kwargs.keys()), \
'Invalid arguments passed to constructor: %s' % \
', '.join(sorted(k for k in kwargs.keys() if '_' + k not in self.__slots__))
self.goals = kwargs.get('goals', [])
from geometry_msgs.msg import PoseStamped
self.start = kwargs.get('start', PoseStamped())
self.planner_id = kwargs.get('planner_id', str())
self.use_start = kwargs.get('use_start', bool())
def __repr__(self):
typename = self.__class__.__module__.split('.')
typename.pop()
typename.append(self.__class__.__name__)
args = []
for s, t in zip(self.__slots__, self.SLOT_TYPES):
field = getattr(self, s)
fieldstr = repr(field)
# We use Python array type for fields that can be directly stored
# in them, and "normal" sequences for everything else. If it is
# a type that we store in an array, strip off the 'array' portion.
if (
isinstance(t, rosidl_parser.definition.AbstractSequence) and
isinstance(t.value_type, rosidl_parser.definition.BasicType) and
t.value_type.typename in ['float', 'double', 'int8', 'uint8', 'int16', 'uint16', 'int32', 'uint32', 'int64', 'uint64']
):
if len(field) == 0:
fieldstr = '[]'
else:
assert fieldstr.startswith('array(')
prefix = "array('X', "
suffix = ')'
fieldstr = fieldstr[len(prefix):-len(suffix)]
args.append(s[1:] + '=' + fieldstr)
return '%s(%s)' % ('.'.join(typename), ', '.join(args))
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
if self.goals != other.goals:
return False
if self.start != other.start:
return False
if self.planner_id != other.planner_id:
return False
if self.use_start != other.use_start:
return False
return True
@classmethod
def get_fields_and_field_types(cls):
from copy import copy
return copy(cls._fields_and_field_types)
@property
def goals(self):
"""Message field 'goals'."""
return self._goals
@goals.setter
def goals(self, value):
if __debug__:
from geometry_msgs.msg import PoseStamped
from collections.abc import Sequence
from collections.abc import Set
from collections import UserList
from collections import UserString
assert \
((isinstance(value, Sequence) or
isinstance(value, Set) or
isinstance(value, UserList)) and
not isinstance(value, str) and
not isinstance(value, UserString) and
all(isinstance(v, PoseStamped) for v in value) and
True), \
"The 'goals' field must be a set or sequence and each value of type 'PoseStamped'"
self._goals = value
@property
def start(self):
"""Message field 'start'."""
return self._start
@start.setter
def start(self, value):
if __debug__:
from geometry_msgs.msg import PoseStamped
assert \
isinstance(value, PoseStamped), \
"The 'start' field must be a sub message of type 'PoseStamped'"
self._start = value
@property
def planner_id(self):
"""Message field 'planner_id'."""
return self._planner_id
@planner_id.setter
def planner_id(self, value):
if __debug__:
assert \
isinstance(value, str), \
"The 'planner_id' field must be of type 'str'"
self._planner_id = value
@property
def use_start(self):
"""Message field 'use_start'."""
return self._use_start
@use_start.setter
def use_start(self, value):
if __debug__:
assert \
isinstance(value, bool), \
"The 'use_start' field must be of type 'bool'"
self._use_start = value
# Import statements for member types
# already imported above
# import rosidl_parser.definition
class Metaclass_ComputePathThroughPoses_Result(type):
"""Metaclass of message 'ComputePathThroughPoses_Result'."""
_CREATE_ROS_MESSAGE = None
_CONVERT_FROM_PY = None
_CONVERT_TO_PY = None
_DESTROY_ROS_MESSAGE = None
_TYPE_SUPPORT = None
__constants = {
}
@classmethod
def __import_type_support__(cls):
try:
from rosidl_generator_py import import_type_support
module = import_type_support('nav2_msgs')
except ImportError:
import logging
import traceback
logger = logging.getLogger(
'nav2_msgs.action.ComputePathThroughPoses_Result')
logger.debug(
'Failed to import needed modules for type support:\n' +
traceback.format_exc())
else:
cls._CREATE_ROS_MESSAGE = module.create_ros_message_msg__action__compute_path_through_poses__result
cls._CONVERT_FROM_PY = module.convert_from_py_msg__action__compute_path_through_poses__result
cls._CONVERT_TO_PY = module.convert_to_py_msg__action__compute_path_through_poses__result
cls._TYPE_SUPPORT = module.type_support_msg__action__compute_path_through_poses__result
cls._DESTROY_ROS_MESSAGE = module.destroy_ros_message_msg__action__compute_path_through_poses__result
from builtin_interfaces.msg import Duration
if Duration.__class__._TYPE_SUPPORT is None:
Duration.__class__.__import_type_support__()
from nav_msgs.msg import Path
if Path.__class__._TYPE_SUPPORT is None:
Path.__class__.__import_type_support__()
@classmethod
def __prepare__(cls, name, bases, **kwargs):
# list constant names here so that they appear in the help text of
# the message class under "Data and other attributes defined here:"
# as well as populate each message instance
return {
}
class ComputePathThroughPoses_Result(metaclass=Metaclass_ComputePathThroughPoses_Result):
"""Message class 'ComputePathThroughPoses_Result'."""
__slots__ = [
'_path',
'_planning_time',
]
_fields_and_field_types = {
'path': 'nav_msgs/Path',
'planning_time': 'builtin_interfaces/Duration',
}
SLOT_TYPES = (
rosidl_parser.definition.NamespacedType(['nav_msgs', 'msg'], 'Path'), # noqa: E501
rosidl_parser.definition.NamespacedType(['builtin_interfaces', 'msg'], 'Duration'), # noqa: E501
)
def __init__(self, **kwargs):
assert all('_' + key in self.__slots__ for key in kwargs.keys()), \
'Invalid arguments passed to constructor: %s' % \
', '.join(sorted(k for k in kwargs.keys() if '_' + k not in self.__slots__))
from nav_msgs.msg import Path
self.path = kwargs.get('path', Path())
from builtin_interfaces.msg import Duration
self.planning_time = kwargs.get('planning_time', Duration())
def __repr__(self):
typename = self.__class__.__module__.split('.')
typename.pop()
typename.append(self.__class__.__name__)
args = []
for s, t in zip(self.__slots__, self.SLOT_TYPES):
field = getattr(self, s)
fieldstr = repr(field)
# We use Python array type for fields that can be directly stored
# in them, and "normal" sequences for everything else. If it is
# a type that we store in an array, strip off the 'array' portion.
if (
isinstance(t, rosidl_parser.definition.AbstractSequence) and
isinstance(t.value_type, rosidl_parser.definition.BasicType) and
t.value_type.typename in ['float', 'double', 'int8', 'uint8', 'int16', 'uint16', 'int32', 'uint32', 'int64', 'uint64']
):
if len(field) == 0:
fieldstr = '[]'
else:
assert fieldstr.startswith('array(')
prefix = "array('X', "
suffix = ')'
fieldstr = fieldstr[len(prefix):-len(suffix)]
args.append(s[1:] + '=' + fieldstr)
return '%s(%s)' % ('.'.join(typename), ', '.join(args))
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
if self.path != other.path:
return False
if self.planning_time != other.planning_time:
return False
return True
@classmethod
def get_fields_and_field_types(cls):
from copy import copy
return copy(cls._fields_and_field_types)
@property
def path(self):
"""Message field 'path'."""
return self._path
@path.setter
def path(self, value):
if __debug__:
from nav_msgs.msg import Path
assert \
isinstance(value, Path), \
"The 'path' field must be a sub message of type 'Path'"
self._path = value
@property
def planning_time(self):
"""Message field 'planning_time'."""
return self._planning_time
@planning_time.setter
def planning_time(self, value):
if __debug__:
from builtin_interfaces.msg import Duration
assert \
isinstance(value, Duration), \
"The 'planning_time' field must be a sub message of type 'Duration'"
self._planning_time = value
# Import statements for member types
# already imported above
# import rosidl_parser.definition
class Metaclass_ComputePathThroughPoses_Feedback(type):
"""Metaclass of message 'ComputePathThroughPoses_Feedback'."""
_CREATE_ROS_MESSAGE = None
_CONVERT_FROM_PY = None
_CONVERT_TO_PY = None
_DESTROY_ROS_MESSAGE = None
_TYPE_SUPPORT = None
__constants = {
}
@classmethod
def __import_type_support__(cls):
try:
from rosidl_generator_py import import_type_support
module = import_type_support('nav2_msgs')
except ImportError:
import logging
import traceback
logger = logging.getLogger(
'nav2_msgs.action.ComputePathThroughPoses_Feedback')
logger.debug(
'Failed to import needed modules for type support:\n' +
traceback.format_exc())
else:
cls._CREATE_ROS_MESSAGE = module.create_ros_message_msg__action__compute_path_through_poses__feedback
cls._CONVERT_FROM_PY = module.convert_from_py_msg__action__compute_path_through_poses__feedback
cls._CONVERT_TO_PY = module.convert_to_py_msg__action__compute_path_through_poses__feedback
cls._TYPE_SUPPORT = module.type_support_msg__action__compute_path_through_poses__feedback
cls._DESTROY_ROS_MESSAGE = module.destroy_ros_message_msg__action__compute_path_through_poses__feedback
@classmethod
def __prepare__(cls, name, bases, **kwargs):
# list constant names here so that they appear in the help text of
# the message class under "Data and other attributes defined here:"
# as well as populate each message instance
return {
}
class ComputePathThroughPoses_Feedback(metaclass=Metaclass_ComputePathThroughPoses_Feedback):
"""Message class 'ComputePathThroughPoses_Feedback'."""
__slots__ = [
]
_fields_and_field_types = {
}
SLOT_TYPES = (
)
def __init__(self, **kwargs):
assert all('_' + key in self.__slots__ for key in kwargs.keys()), \
'Invalid arguments passed to constructor: %s' % \
', '.join(sorted(k for k in kwargs.keys() if '_' + k not in self.__slots__))
def __repr__(self):
typename = self.__class__.__module__.split('.')
typename.pop()
typename.append(self.__class__.__name__)
args = []
for s, t in zip(self.__slots__, self.SLOT_TYPES):
field = getattr(self, s)
fieldstr = repr(field)
# We use Python array type for fields that can be directly stored
# in them, and "normal" sequences for everything else. If it is
# a type that we store in an array, strip off the 'array' portion.
if (
isinstance(t, rosidl_parser.definition.AbstractSequence) and
isinstance(t.value_type, rosidl_parser.definition.BasicType) and
t.value_type.typename in ['float', 'double', 'int8', 'uint8', 'int16', 'uint16', 'int32', 'uint32', 'int64', 'uint64']
):
if len(field) == 0:
fieldstr = '[]'
else:
assert fieldstr.startswith('array(')
prefix = "array('X', "
suffix = ')'
fieldstr = fieldstr[len(prefix):-len(suffix)]
args.append(s[1:] + '=' + fieldstr)
return '%s(%s)' % ('.'.join(typename), ', '.join(args))
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return True
@classmethod
def get_fields_and_field_types(cls):
from copy import copy
return copy(cls._fields_and_field_types)
# Import statements for member types
# already imported above
# import rosidl_parser.definition
class Metaclass_ComputePathThroughPoses_SendGoal_Request(type):
"""Metaclass of message 'ComputePathThroughPoses_SendGoal_Request'."""
_CREATE_ROS_MESSAGE = None
_CONVERT_FROM_PY = None
_CONVERT_TO_PY = None
_DESTROY_ROS_MESSAGE = None
_TYPE_SUPPORT = None
__constants = {
}
@classmethod
def __import_type_support__(cls):
try:
from rosidl_generator_py import import_type_support
module = import_type_support('nav2_msgs')
except ImportError:
import logging
import traceback
logger = logging.getLogger(
'nav2_msgs.action.ComputePathThroughPoses_SendGoal_Request')
logger.debug(
'Failed to import needed modules for type support:\n' +
traceback.format_exc())
else:
cls._CREATE_ROS_MESSAGE = module.create_ros_message_msg__action__compute_path_through_poses__send_goal__request
cls._CONVERT_FROM_PY = module.convert_from_py_msg__action__compute_path_through_poses__send_goal__request
cls._CONVERT_TO_PY = module.convert_to_py_msg__action__compute_path_through_poses__send_goal__request
cls._TYPE_SUPPORT = module.type_support_msg__action__compute_path_through_poses__send_goal__request
cls._DESTROY_ROS_MESSAGE = module.destroy_ros_message_msg__action__compute_path_through_poses__send_goal__request
from nav2_msgs.action import ComputePathThroughPoses
if ComputePathThroughPoses.Goal.__class__._TYPE_SUPPORT is None:
ComputePathThroughPoses.Goal.__class__.__import_type_support__()
from unique_identifier_msgs.msg import UUID
if UUID.__class__._TYPE_SUPPORT is None:
UUID.__class__.__import_type_support__()
@classmethod
def __prepare__(cls, name, bases, **kwargs):
# list constant names here so that they appear in the help text of
# the message class under "Data and other attributes defined here:"
# as well as populate each message instance
return {
}
class ComputePathThroughPoses_SendGoal_Request(metaclass=Metaclass_ComputePathThroughPoses_SendGoal_Request):
"""Message class 'ComputePathThroughPoses_SendGoal_Request'."""
__slots__ = [
'_goal_id',
'_goal',
]
_fields_and_field_types = {
'goal_id': 'unique_identifier_msgs/UUID',
'goal': 'nav2_msgs/ComputePathThroughPoses_Goal',
}
SLOT_TYPES = (
rosidl_parser.definition.NamespacedType(['unique_identifier_msgs', 'msg'], 'UUID'), # noqa: E501
rosidl_parser.definition.NamespacedType(['nav2_msgs', 'action'], 'ComputePathThroughPoses_Goal'), # noqa: E501
)
def __init__(self, **kwargs):
assert all('_' + key in self.__slots__ for key in kwargs.keys()), \
'Invalid arguments passed to constructor: %s' % \
', '.join(sorted(k for k in kwargs.keys() if '_' + k not in self.__slots__))
from unique_identifier_msgs.msg import UUID
self.goal_id = kwargs.get('goal_id', UUID())
from nav2_msgs.action._compute_path_through_poses import ComputePathThroughPoses_Goal
self.goal = kwargs.get('goal', ComputePathThroughPoses_Goal())
def __repr__(self):
typename = self.__class__.__module__.split('.')
typename.pop()
typename.append(self.__class__.__name__)
args = []
for s, t in zip(self.__slots__, self.SLOT_TYPES):
field = getattr(self, s)
fieldstr = repr(field)
# We use Python array type for fields that can be directly stored
# in them, and "normal" sequences for everything else. If it is
# a type that we store in an array, strip off the 'array' portion.
if (
isinstance(t, rosidl_parser.definition.AbstractSequence) and
isinstance(t.value_type, rosidl_parser.definition.BasicType) and
t.value_type.typename in ['float', 'double', 'int8', 'uint8', 'int16', 'uint16', 'int32', 'uint32', 'int64', 'uint64']
):
if len(field) == 0:
fieldstr = '[]'
else:
assert fieldstr.startswith('array(')
prefix = "array('X', "
suffix = ')'
fieldstr = fieldstr[len(prefix):-len(suffix)]
args.append(s[1:] + '=' + fieldstr)
return '%s(%s)' % ('.'.join(typename), ', '.join(args))
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
if self.goal_id != other.goal_id:
return False
if self.goal != other.goal:
return False
return True
@classmethod
def get_fields_and_field_types(cls):
from copy import copy
return copy(cls._fields_and_field_types)
@property
def goal_id(self):
"""Message field 'goal_id'."""
return self._goal_id
@goal_id.setter
def goal_id(self, value):
if __debug__:
from unique_identifier_msgs.msg import UUID
assert \
isinstance(value, UUID), \
"The 'goal_id' field must be a sub message of type 'UUID'"
self._goal_id = value
@property
def goal(self):
"""Message field 'goal'."""
return self._goal
@goal.setter
def goal(self, value):
if __debug__:
from nav2_msgs.action._compute_path_through_poses import ComputePathThroughPoses_Goal
assert \
isinstance(value, ComputePathThroughPoses_Goal), \
"The 'goal' field must be a sub message of type 'ComputePathThroughPoses_Goal'"
self._goal = value
# Import statements for member types
# already imported above
# import rosidl_parser.definition
class Metaclass_ComputePathThroughPoses_SendGoal_Response(type):
"""Metaclass of message 'ComputePathThroughPoses_SendGoal_Response'."""
_CREATE_ROS_MESSAGE = None
_CONVERT_FROM_PY = None
_CONVERT_TO_PY = None
_DESTROY_ROS_MESSAGE = None
_TYPE_SUPPORT = None
__constants = {
}
@classmethod
def __import_type_support__(cls):
try:
from rosidl_generator_py import import_type_support
module = import_type_support('nav2_msgs')
except ImportError:
import logging
import traceback
logger = logging.getLogger(
'nav2_msgs.action.ComputePathThroughPoses_SendGoal_Response')
logger.debug(
'Failed to import needed modules for type support:\n' +
traceback.format_exc())
else:
cls._CREATE_ROS_MESSAGE = module.create_ros_message_msg__action__compute_path_through_poses__send_goal__response
cls._CONVERT_FROM_PY = module.convert_from_py_msg__action__compute_path_through_poses__send_goal__response
cls._CONVERT_TO_PY = module.convert_to_py_msg__action__compute_path_through_poses__send_goal__response
cls._TYPE_SUPPORT = module.type_support_msg__action__compute_path_through_poses__send_goal__response
cls._DESTROY_ROS_MESSAGE = module.destroy_ros_message_msg__action__compute_path_through_poses__send_goal__response
from builtin_interfaces.msg import Time
if Time.__class__._TYPE_SUPPORT is None:
Time.__class__.__import_type_support__()
@classmethod
def __prepare__(cls, name, bases, **kwargs):
# list constant names here so that they appear in the help text of
# the message class under "Data and other attributes defined here:"
# as well as populate each message instance
return {
}
class ComputePathThroughPoses_SendGoal_Response(metaclass=Metaclass_ComputePathThroughPoses_SendGoal_Response):
"""Message class 'ComputePathThroughPoses_SendGoal_Response'."""
__slots__ = [
'_accepted',
'_stamp',
]
_fields_and_field_types = {
'accepted': 'boolean',
'stamp': 'builtin_interfaces/Time',
}
SLOT_TYPES = (
rosidl_parser.definition.BasicType('boolean'), # noqa: E501
rosidl_parser.definition.NamespacedType(['builtin_interfaces', 'msg'], 'Time'), # noqa: E501
)
def __init__(self, **kwargs):
assert all('_' + key in self.__slots__ for key in kwargs.keys()), \
'Invalid arguments passed to constructor: %s' % \
', '.join(sorted(k for k in kwargs.keys() if '_' + k not in self.__slots__))
self.accepted = kwargs.get('accepted', bool())
from builtin_interfaces.msg import Time
self.stamp = kwargs.get('stamp', Time())
def __repr__(self):
typename = self.__class__.__module__.split('.')
typename.pop()
typename.append(self.__class__.__name__)
args = []
for s, t in zip(self.__slots__, self.SLOT_TYPES):
field = getattr(self, s)
fieldstr = repr(field)
# We use Python array type for fields that can be directly stored
# in them, and "normal" sequences for everything else. If it is
# a type that we store in an array, strip off the 'array' portion.
if (
isinstance(t, rosidl_parser.definition.AbstractSequence) and
isinstance(t.value_type, rosidl_parser.definition.BasicType) and
t.value_type.typename in ['float', 'double', 'int8', 'uint8', 'int16', 'uint16', 'int32', 'uint32', 'int64', 'uint64']
):
if len(field) == 0:
fieldstr = '[]'
else:
assert fieldstr.startswith('array(')
prefix = "array('X', "
suffix = ')'
fieldstr = fieldstr[len(prefix):-len(suffix)]
args.append(s[1:] + '=' + fieldstr)
return '%s(%s)' % ('.'.join(typename), ', '.join(args))
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
if self.accepted != other.accepted:
return False
if self.stamp != other.stamp:
return False
return True
@classmethod
def get_fields_and_field_types(cls):
from copy import copy
return copy(cls._fields_and_field_types)
@property
def accepted(self):
"""Message field 'accepted'."""
return self._accepted
@accepted.setter
def accepted(self, value):
if __debug__:
assert \
isinstance(value, bool), \
"The 'accepted' field must be of type 'bool'"
self._accepted = value
@property
def stamp(self):
"""Message field 'stamp'."""
return self._stamp
@stamp.setter
def stamp(self, value):
if __debug__:
from builtin_interfaces.msg import Time
assert \
isinstance(value, Time), \
"The 'stamp' field must be a sub message of type 'Time'"
self._stamp = value
class Metaclass_ComputePathThroughPoses_SendGoal(type):
"""Metaclass of service 'ComputePathThroughPoses_SendGoal'."""
_TYPE_SUPPORT = None
@classmethod
def __import_type_support__(cls):
try:
from rosidl_generator_py import import_type_support
module = import_type_support('nav2_msgs')
except ImportError:
import logging
import traceback
logger = logging.getLogger(
'nav2_msgs.action.ComputePathThroughPoses_SendGoal')
logger.debug(
'Failed to import needed modules for type support:\n' +
traceback.format_exc())
else:
cls._TYPE_SUPPORT = module.type_support_srv__action__compute_path_through_poses__send_goal
from nav2_msgs.action import _compute_path_through_poses
if _compute_path_through_poses.Metaclass_ComputePathThroughPoses_SendGoal_Request._TYPE_SUPPORT is None:
_compute_path_through_poses.Metaclass_ComputePathThroughPoses_SendGoal_Request.__import_type_support__()
if _compute_path_through_poses.Metaclass_ComputePathThroughPoses_SendGoal_Response._TYPE_SUPPORT is None:
_compute_path_through_poses.Metaclass_ComputePathThroughPoses_SendGoal_Response.__import_type_support__()
class ComputePathThroughPoses_SendGoal(metaclass=Metaclass_ComputePathThroughPoses_SendGoal):
from nav2_msgs.action._compute_path_through_poses import ComputePathThroughPoses_SendGoal_Request as Request
from nav2_msgs.action._compute_path_through_poses import ComputePathThroughPoses_SendGoal_Response as Response
def __init__(self):
raise NotImplementedError('Service classes can not be instantiated')
# Import statements for member types
# already imported above
# import rosidl_parser.definition
class Metaclass_ComputePathThroughPoses_GetResult_Request(type):
"""Metaclass of message 'ComputePathThroughPoses_GetResult_Request'."""
_CREATE_ROS_MESSAGE = None
_CONVERT_FROM_PY = None
_CONVERT_TO_PY = None
_DESTROY_ROS_MESSAGE = None
_TYPE_SUPPORT = None
__constants = {
}
@classmethod
def __import_type_support__(cls):
try:
from rosidl_generator_py import import_type_support
module = import_type_support('nav2_msgs')
except ImportError:
import logging
import traceback
logger = logging.getLogger(
'nav2_msgs.action.ComputePathThroughPoses_GetResult_Request')
logger.debug(
'Failed to import needed modules for type support:\n' +
traceback.format_exc())
else:
cls._CREATE_ROS_MESSAGE = module.create_ros_message_msg__action__compute_path_through_poses__get_result__request
cls._CONVERT_FROM_PY = module.convert_from_py_msg__action__compute_path_through_poses__get_result__request
cls._CONVERT_TO_PY = module.convert_to_py_msg__action__compute_path_through_poses__get_result__request
cls._TYPE_SUPPORT = module.type_support_msg__action__compute_path_through_poses__get_result__request
cls._DESTROY_ROS_MESSAGE = module.destroy_ros_message_msg__action__compute_path_through_poses__get_result__request
from unique_identifier_msgs.msg import UUID
if UUID.__class__._TYPE_SUPPORT is None:
UUID.__class__.__import_type_support__()
@classmethod
def __prepare__(cls, name, bases, **kwargs):
# list constant names here so that they appear in the help text of
# the message class under "Data and other attributes defined here:"
# as well as populate each message instance
return {
}
class ComputePathThroughPoses_GetResult_Request(metaclass=Metaclass_ComputePathThroughPoses_GetResult_Request):
"""Message class 'ComputePathThroughPoses_GetResult_Request'."""
__slots__ = [
'_goal_id',
]
_fields_and_field_types = {
'goal_id': 'unique_identifier_msgs/UUID',
}
SLOT_TYPES = (
rosidl_parser.definition.NamespacedType(['unique_identifier_msgs', 'msg'], 'UUID'), # noqa: E501
)
def __init__(self, **kwargs):
assert all('_' + key in self.__slots__ for key in kwargs.keys()), \
'Invalid arguments passed to constructor: %s' % \
', '.join(sorted(k for k in kwargs.keys() if '_' + k not in self.__slots__))
from unique_identifier_msgs.msg import UUID
self.goal_id = kwargs.get('goal_id', UUID())
def __repr__(self):
typename = self.__class__.__module__.split('.')
typename.pop()
typename.append(self.__class__.__name__)
args = []
for s, t in zip(self.__slots__, self.SLOT_TYPES):
field = getattr(self, s)
fieldstr = repr(field)
# We use Python array type for fields that can be directly stored
# in them, and "normal" sequences for everything else. If it is
# a type that we store in an array, strip off the 'array' portion.
if (
isinstance(t, rosidl_parser.definition.AbstractSequence) and
isinstance(t.value_type, rosidl_parser.definition.BasicType) and
t.value_type.typename in ['float', 'double', 'int8', 'uint8', 'int16', 'uint16', 'int32', 'uint32', 'int64', 'uint64']
):
if len(field) == 0:
fieldstr = '[]'
else:
assert fieldstr.startswith('array(')
prefix = "array('X', "
suffix = ')'
fieldstr = fieldstr[len(prefix):-len(suffix)]
args.append(s[1:] + '=' + fieldstr)
return '%s(%s)' % ('.'.join(typename), ', '.join(args))
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
if self.goal_id != other.goal_id:
return False
return True
@classmethod
def get_fields_and_field_types(cls):
from copy import copy
return copy(cls._fields_and_field_types)
@property
def goal_id(self):
"""Message field 'goal_id'."""
return self._goal_id
@goal_id.setter
def goal_id(self, value):
if __debug__:
from unique_identifier_msgs.msg import UUID
assert \
isinstance(value, UUID), \
"The 'goal_id' field must be a sub message of type 'UUID'"
self._goal_id = value
# Import statements for member types
# already imported above
# import rosidl_parser.definition
class Metaclass_ComputePathThroughPoses_GetResult_Response(type):
"""Metaclass of message 'ComputePathThroughPoses_GetResult_Response'."""
_CREATE_ROS_MESSAGE = None
_CONVERT_FROM_PY = None
_CONVERT_TO_PY = None
_DESTROY_ROS_MESSAGE = None
_TYPE_SUPPORT = None
__constants = {
}
@classmethod
def __import_type_support__(cls):
try:
from rosidl_generator_py import import_type_support
module = import_type_support('nav2_msgs')
except ImportError:
import logging
import traceback
logger = logging.getLogger(
'nav2_msgs.action.ComputePathThroughPoses_GetResult_Response')
logger.debug(
'Failed to import needed modules for type support:\n' +
traceback.format_exc())
else:
cls._CREATE_ROS_MESSAGE = module.create_ros_message_msg__action__compute_path_through_poses__get_result__response
cls._CONVERT_FROM_PY = module.convert_from_py_msg__action__compute_path_through_poses__get_result__response
cls._CONVERT_TO_PY = module.convert_to_py_msg__action__compute_path_through_poses__get_result__response
cls._TYPE_SUPPORT = module.type_support_msg__action__compute_path_through_poses__get_result__response
cls._DESTROY_ROS_MESSAGE = module.destroy_ros_message_msg__action__compute_path_through_poses__get_result__response
from nav2_msgs.action import ComputePathThroughPoses
if ComputePathThroughPoses.Result.__class__._TYPE_SUPPORT is None:
ComputePathThroughPoses.Result.__class__.__import_type_support__()
@classmethod
def __prepare__(cls, name, bases, **kwargs):
# list constant names here so that they appear in the help text of
# the message class under "Data and other attributes defined here:"
# as well as populate each message instance
return {
}
class ComputePathThroughPoses_GetResult_Response(metaclass=Metaclass_ComputePathThroughPoses_GetResult_Response):
"""Message class 'ComputePathThroughPoses_GetResult_Response'."""
__slots__ = [
'_status',
'_result',
]
_fields_and_field_types = {
'status': 'int8',
'result': 'nav2_msgs/ComputePathThroughPoses_Result',
}
SLOT_TYPES = (
rosidl_parser.definition.BasicType('int8'), # noqa: E501
rosidl_parser.definition.NamespacedType(['nav2_msgs', 'action'], 'ComputePathThroughPoses_Result'), # noqa: E501
)
def __init__(self, **kwargs):
assert all('_' + key in self.__slots__ for key in kwargs.keys()), \
'Invalid arguments passed to constructor: %s' % \
', '.join(sorted(k for k in kwargs.keys() if '_' + k not in self.__slots__))
self.status = kwargs.get('status', int())
from nav2_msgs.action._compute_path_through_poses import ComputePathThroughPoses_Result
self.result = kwargs.get('result', ComputePathThroughPoses_Result())
def __repr__(self):
typename = self.__class__.__module__.split('.')
typename.pop()
typename.append(self.__class__.__name__)
args = []
for s, t in zip(self.__slots__, self.SLOT_TYPES):
field = getattr(self, s)
fieldstr = repr(field)
# We use Python array type for fields that can be directly stored
# in them, and "normal" sequences for everything else. If it is
# a type that we store in an array, strip off the 'array' portion.
if (
isinstance(t, rosidl_parser.definition.AbstractSequence) and
isinstance(t.value_type, rosidl_parser.definition.BasicType) and
t.value_type.typename in ['float', 'double', 'int8', 'uint8', 'int16', 'uint16', 'int32', 'uint32', 'int64', 'uint64']
):
if len(field) == 0:
fieldstr = '[]'
else:
assert fieldstr.startswith('array(')
prefix = "array('X', "
suffix = ')'
fieldstr = fieldstr[len(prefix):-len(suffix)]
args.append(s[1:] + '=' + fieldstr)
return '%s(%s)' % ('.'.join(typename), ', '.join(args))
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
if self.status != other.status:
return False
if self.result != other.result:
return False
return True
@classmethod
def get_fields_and_field_types(cls):
from copy import copy
return copy(cls._fields_and_field_types)
@property
def status(self):
"""Message field 'status'."""
return self._status
@status.setter
def status(self, value):
if __debug__:
assert \
isinstance(value, int), \
"The 'status' field must be of type 'int'"
assert value >= -128 and value < 128, \
"The 'status' field must be an integer in [-128, 127]"
self._status = value
@property
def result(self):
"""Message field 'result'."""
return self._result
@result.setter
def result(self, value):
if __debug__:
from nav2_msgs.action._compute_path_through_poses import ComputePathThroughPoses_Result
assert \
isinstance(value, ComputePathThroughPoses_Result), \
"The 'result' field must be a sub message of type 'ComputePathThroughPoses_Result'"
self._result = value
class Metaclass_ComputePathThroughPoses_GetResult(type):
"""Metaclass of service 'ComputePathThroughPoses_GetResult'."""
_TYPE_SUPPORT = None
@classmethod
def __import_type_support__(cls):
try:
from rosidl_generator_py import import_type_support
module = import_type_support('nav2_msgs')
except ImportError:
import logging
import traceback
logger = logging.getLogger(
'nav2_msgs.action.ComputePathThroughPoses_GetResult')
logger.debug(
'Failed to import needed modules for type support:\n' +
traceback.format_exc())
else:
cls._TYPE_SUPPORT = module.type_support_srv__action__compute_path_through_poses__get_result
from nav2_msgs.action import _compute_path_through_poses
if _compute_path_through_poses.Metaclass_ComputePathThroughPoses_GetResult_Request._TYPE_SUPPORT is None:
_compute_path_through_poses.Metaclass_ComputePathThroughPoses_GetResult_Request.__import_type_support__()
if _compute_path_through_poses.Metaclass_ComputePathThroughPoses_GetResult_Response._TYPE_SUPPORT is None:
_compute_path_through_poses.Metaclass_ComputePathThroughPoses_GetResult_Response.__import_type_support__()
class ComputePathThroughPoses_GetResult(metaclass=Metaclass_ComputePathThroughPoses_GetResult):
from nav2_msgs.action._compute_path_through_poses import ComputePathThroughPoses_GetResult_Request as Request
from nav2_msgs.action._compute_path_through_poses import ComputePathThroughPoses_GetResult_Response as Response
def __init__(self):
raise NotImplementedError('Service classes can not be instantiated')
# Import statements for member types
# already imported above
# import rosidl_parser.definition
class Metaclass_ComputePathThroughPoses_FeedbackMessage(type):
"""Metaclass of message 'ComputePathThroughPoses_FeedbackMessage'."""
_CREATE_ROS_MESSAGE = None
_CONVERT_FROM_PY = None
_CONVERT_TO_PY = None
_DESTROY_ROS_MESSAGE = None
_TYPE_SUPPORT = None
__constants = {
}
@classmethod
def __import_type_support__(cls):
try:
from rosidl_generator_py import import_type_support
module = import_type_support('nav2_msgs')
except ImportError:
import logging
import traceback
logger = logging.getLogger(
'nav2_msgs.action.ComputePathThroughPoses_FeedbackMessage')
logger.debug(
'Failed to import needed modules for type support:\n' +
traceback.format_exc())
else:
cls._CREATE_ROS_MESSAGE = module.create_ros_message_msg__action__compute_path_through_poses__feedback_message
cls._CONVERT_FROM_PY = module.convert_from_py_msg__action__compute_path_through_poses__feedback_message
cls._CONVERT_TO_PY = module.convert_to_py_msg__action__compute_path_through_poses__feedback_message
cls._TYPE_SUPPORT = module.type_support_msg__action__compute_path_through_poses__feedback_message
cls._DESTROY_ROS_MESSAGE = module.destroy_ros_message_msg__action__compute_path_through_poses__feedback_message
from nav2_msgs.action import ComputePathThroughPoses
if ComputePathThroughPoses.Feedback.__class__._TYPE_SUPPORT is None:
ComputePathThroughPoses.Feedback.__class__.__import_type_support__()
from unique_identifier_msgs.msg import UUID
if UUID.__class__._TYPE_SUPPORT is None:
UUID.__class__.__import_type_support__()
@classmethod
def __prepare__(cls, name, bases, **kwargs):
# list constant names here so that they appear in the help text of
# the message class under "Data and other attributes defined here:"
# as well as populate each message instance
return {
}
class ComputePathThroughPoses_FeedbackMessage(metaclass=Metaclass_ComputePathThroughPoses_FeedbackMessage):
"""Message class 'ComputePathThroughPoses_FeedbackMessage'."""
__slots__ = [
'_goal_id',
'_feedback',
]
_fields_and_field_types = {
'goal_id': 'unique_identifier_msgs/UUID',
'feedback': 'nav2_msgs/ComputePathThroughPoses_Feedback',
}
SLOT_TYPES = (
rosidl_parser.definition.NamespacedType(['unique_identifier_msgs', 'msg'], 'UUID'), # noqa: E501
rosidl_parser.definition.NamespacedType(['nav2_msgs', 'action'], 'ComputePathThroughPoses_Feedback'), # noqa: E501
)
def __init__(self, **kwargs):
assert all('_' + key in self.__slots__ for key in kwargs.keys()), \
'Invalid arguments passed to constructor: %s' % \
', '.join(sorted(k for k in kwargs.keys() if '_' + k not in self.__slots__))
from unique_identifier_msgs.msg import UUID
self.goal_id = kwargs.get('goal_id', UUID())
from nav2_msgs.action._compute_path_through_poses import ComputePathThroughPoses_Feedback
self.feedback = kwargs.get('feedback', ComputePathThroughPoses_Feedback())
def __repr__(self):
typename = self.__class__.__module__.split('.')
typename.pop()
typename.append(self.__class__.__name__)
args = []
for s, t in zip(self.__slots__, self.SLOT_TYPES):
field = getattr(self, s)
fieldstr = repr(field)
# We use Python array type for fields that can be directly stored
# in them, and "normal" sequences for everything else. If it is
# a type that we store in an array, strip off the 'array' portion.
if (
isinstance(t, rosidl_parser.definition.AbstractSequence) and
isinstance(t.value_type, rosidl_parser.definition.BasicType) and
t.value_type.typename in ['float', 'double', 'int8', 'uint8', 'int16', 'uint16', 'int32', 'uint32', 'int64', 'uint64']
):
if len(field) == 0:
fieldstr = '[]'
else:
assert fieldstr.startswith('array(')
prefix = "array('X', "
suffix = ')'
fieldstr = fieldstr[len(prefix):-len(suffix)]
args.append(s[1:] + '=' + fieldstr)
return '%s(%s)' % ('.'.join(typename), ', '.join(args))
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
if self.goal_id != other.goal_id:
return False
if self.feedback != other.feedback:
return False
return True
@classmethod
def get_fields_and_field_types(cls):
from copy import copy
return copy(cls._fields_and_field_types)
@property
def goal_id(self):
"""Message field 'goal_id'."""
return self._goal_id
@goal_id.setter
def goal_id(self, value):
if __debug__:
from unique_identifier_msgs.msg import UUID
assert \
isinstance(value, UUID), \
"The 'goal_id' field must be a sub message of type 'UUID'"
self._goal_id = value
@property
def feedback(self):
"""Message field 'feedback'."""
return self._feedback
@feedback.setter
def feedback(self, value):
if __debug__:
from nav2_msgs.action._compute_path_through_poses import ComputePathThroughPoses_Feedback
assert \
isinstance(value, ComputePathThroughPoses_Feedback), \
"The 'feedback' field must be a sub message of type 'ComputePathThroughPoses_Feedback'"
self._feedback = value
class Metaclass_ComputePathThroughPoses(type):
"""Metaclass of action 'ComputePathThroughPoses'."""
_TYPE_SUPPORT = None
@classmethod
def __import_type_support__(cls):
try:
from rosidl_generator_py import import_type_support
module = import_type_support('nav2_msgs')
except ImportError:
import logging
import traceback
logger = logging.getLogger(
'nav2_msgs.action.ComputePathThroughPoses')
logger.debug(
'Failed to import needed modules for type support:\n' +
traceback.format_exc())
else:
cls._TYPE_SUPPORT = module.type_support_action__action__compute_path_through_poses
from action_msgs.msg import _goal_status_array
if _goal_status_array.Metaclass_GoalStatusArray._TYPE_SUPPORT is None:
_goal_status_array.Metaclass_GoalStatusArray.__import_type_support__()
from action_msgs.srv import _cancel_goal
if _cancel_goal.Metaclass_CancelGoal._TYPE_SUPPORT is None:
_cancel_goal.Metaclass_CancelGoal.__import_type_support__()
from nav2_msgs.action import _compute_path_through_poses
if _compute_path_through_poses.Metaclass_ComputePathThroughPoses_SendGoal._TYPE_SUPPORT is None:
_compute_path_through_poses.Metaclass_ComputePathThroughPoses_SendGoal.__import_type_support__()
if _compute_path_through_poses.Metaclass_ComputePathThroughPoses_GetResult._TYPE_SUPPORT is None:
_compute_path_through_poses.Metaclass_ComputePathThroughPoses_GetResult.__import_type_support__()
if _compute_path_through_poses.Metaclass_ComputePathThroughPoses_FeedbackMessage._TYPE_SUPPORT is None:
_compute_path_through_poses.Metaclass_ComputePathThroughPoses_FeedbackMessage.__import_type_support__()
class ComputePathThroughPoses(metaclass=Metaclass_ComputePathThroughPoses):
# The goal message defined in the action definition.
from nav2_msgs.action._compute_path_through_poses import ComputePathThroughPoses_Goal as Goal
# The result message defined in the action definition.
from nav2_msgs.action._compute_path_through_poses import ComputePathThroughPoses_Result as Result
# The feedback message defined in the action definition.
from nav2_msgs.action._compute_path_through_poses import ComputePathThroughPoses_Feedback as Feedback
class Impl:
# The send_goal service using a wrapped version of the goal message as a request.
from nav2_msgs.action._compute_path_through_poses import ComputePathThroughPoses_SendGoal as SendGoalService
# The get_result service using a wrapped version of the result message as a response.
from nav2_msgs.action._compute_path_through_poses import ComputePathThroughPoses_GetResult as GetResultService
# The feedback message with generic fields which wraps the feedback message.
from nav2_msgs.action._compute_path_through_poses import ComputePathThroughPoses_FeedbackMessage as FeedbackMessage
# The generic service to cancel a goal.
from action_msgs.srv._cancel_goal import CancelGoal as CancelGoalService
# The generic message for get the status of a goal.
from action_msgs.msg._goal_status_array import GoalStatusArray as GoalStatusMessage
def __init__(self):
raise NotImplementedError('Action classes can not be instantiated')
| 39.57931
| 147
| 0.64748
| 5,673
| 51,651
| 5.465715
| 0.043011
| 0.040797
| 0.044119
| 0.056374
| 0.825104
| 0.797401
| 0.769213
| 0.759183
| 0.747218
| 0.730835
| 0
| 0.006484
| 0.271379
| 51,651
| 1,304
| 148
| 39.609663
| 0.817426
| 0.113957
| 0
| 0.675205
| 1
| 0
| 0.096224
| 0.024858
| 0
| 0
| 0
| 0
| 0.032787
| 1
| 0.086066
| false
| 0.008197
| 0.160861
| 0.008197
| 0.419057
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
9ae9d9e56180bb81a6c048f38f62bb0ab888f315
| 149
|
py
|
Python
|
nnatest_api/user/tests/test_user_view.py
|
marcelotrevisani/nnatest_api
|
e4858e2348bf041ad2bc0674a6316bf046305701
|
[
"MIT"
] | null | null | null |
nnatest_api/user/tests/test_user_view.py
|
marcelotrevisani/nnatest_api
|
e4858e2348bf041ad2bc0674a6316bf046305701
|
[
"MIT"
] | null | null | null |
nnatest_api/user/tests/test_user_view.py
|
marcelotrevisani/nnatest_api
|
e4858e2348bf041ad2bc0674a6316bf046305701
|
[
"MIT"
] | null | null | null |
import pytest
from django.contrib.auth import get_user_model
def create_user(**params):
return get_user_model().objects.create_user(**params)
| 18.625
| 57
| 0.785235
| 22
| 149
| 5.045455
| 0.636364
| 0.126126
| 0.216216
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114094
| 149
| 7
| 58
| 21.285714
| 0.840909
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0
| 0.5
| 0.25
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 8
|
9af6c9cc1715b3a56859120487eebc9dcb6ff631
| 2,231
|
py
|
Python
|
pynes/tests/dec_test.py
|
timgates42/pyNES
|
e385c7189eca44b9a9e0e781b28c8562e0647b0b
|
[
"BSD-3-Clause"
] | 1,046
|
2015-02-10T02:23:58.000Z
|
2022-03-16T02:42:02.000Z
|
pynes/tests/dec_test.py
|
mcanthony/pyNES
|
5f6078c02ae1fe9c6fecb4a8490f82f8c721cf3b
|
[
"BSD-3-Clause"
] | 30
|
2015-02-11T15:21:10.000Z
|
2022-03-11T23:12:26.000Z
|
pynes/tests/dec_test.py
|
mcanthony/pyNES
|
5f6078c02ae1fe9c6fecb4a8490f82f8c721cf3b
|
[
"BSD-3-Clause"
] | 132
|
2015-05-28T14:55:04.000Z
|
2021-12-09T18:58:45.000Z
|
# -*- coding: utf-8 -*-
import unittest
from pynes.compiler import lexical, syntax, semantic
class DecTest(unittest.TestCase):
def test_dec_zp(self):
tokens = list(lexical('DEC $00'))
self.assertEquals(2, len(tokens))
self.assertEquals('T_INSTRUCTION', tokens[0]['type'])
self.assertEquals('T_ADDRESS', tokens[1]['type'])
ast = syntax(tokens)
self.assertEquals(1, len(ast))
self.assertEquals('S_ZEROPAGE', ast[0]['type'])
code = semantic(ast)
self.assertEquals(code, [0xc6, 0x00])
def test_dec_zpx(self):
tokens = list(lexical('DEC $10,X'))
self.assertEquals(4, len(tokens))
self.assertEquals('T_INSTRUCTION', tokens[0]['type'])
self.assertEquals('T_ADDRESS', tokens[1]['type'])
self.assertEquals('T_SEPARATOR', tokens[2]['type'])
self.assertEquals('T_REGISTER', tokens[3]['type'])
ast = syntax(tokens)
self.assertEquals(1, len(ast))
self.assertEquals('S_ZEROPAGE_X', ast[0]['type'])
code = semantic(ast)
self.assertEquals(code, [0xd6, 0x10])
def test_dec_abs(self):
tokens = list(lexical('DEC $1234'))
self.assertEquals(2, len(tokens))
self.assertEquals('T_INSTRUCTION', tokens[0]['type'])
self.assertEquals('T_ADDRESS', tokens[1]['type'])
self.assertEquals('$1234', tokens[1]['value'])
ast = syntax(tokens)
self.assertEquals(1, len(ast))
self.assertEquals('S_ABSOLUTE', ast[0]['type'])
code = semantic(ast)
self.assertEquals(code, [0xce, 0x34, 0x12])
def test_dec_absx(self):
tokens = list(lexical('DEC $1234,X'))
self.assertEquals(4, len(tokens))
self.assertEquals('T_INSTRUCTION', tokens[0]['type'])
self.assertEquals('T_ADDRESS', tokens[1]['type'])
self.assertEquals('$1234', tokens[1]['value'])
self.assertEquals('T_SEPARATOR', tokens[2]['type'])
self.assertEquals('T_REGISTER', tokens[3]['type'])
ast = syntax(tokens)
self.assertEquals(1, len(ast))
self.assertEquals('S_ABSOLUTE_X', ast[0]['type'])
code = semantic(ast)
self.assertEquals(code, [0xde, 0x34, 0x12])
| 37.813559
| 61
| 0.609592
| 270
| 2,231
| 4.940741
| 0.203704
| 0.35982
| 0.152924
| 0.110195
| 0.856072
| 0.82009
| 0.778111
| 0.778111
| 0.778111
| 0.713643
| 0
| 0.040758
| 0.219184
| 2,231
| 58
| 62
| 38.465517
| 0.725029
| 0.009413
| 0
| 0.612245
| 0
| 0
| 0.133152
| 0
| 0
| 0
| 0.018116
| 0
| 0.612245
| 1
| 0.081633
| false
| 0
| 0.040816
| 0
| 0.142857
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
b1177d5b97e55775ba28fb65f6792f4b5cfa2aca
| 3,396
|
py
|
Python
|
Views/TrackingPanel.py
|
fefelson/FelsonSports
|
bc0c16d63b19ffe4d468dcda5ab224013abe23fa
|
[
"MIT"
] | null | null | null |
Views/TrackingPanel.py
|
fefelson/FelsonSports
|
bc0c16d63b19ffe4d468dcda5ab224013abe23fa
|
[
"MIT"
] | null | null | null |
Views/TrackingPanel.py
|
fefelson/FelsonSports
|
bc0c16d63b19ffe4d468dcda5ab224013abe23fa
|
[
"MIT"
] | null | null | null |
import matplotlib
matplotlib.use('WXAgg')
from matplotlib.figure import Figure
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigureCanvas
from matplotlib.backends.backend_wxagg import NavigationToolbar2WxAgg as NavigationToolbar2Wx
import wx
from pprint import pprint
class NCAABTrackingPanel(wx.Panel):
def __init__(self, parent, *args, **kwargs):
super().__init__(parent, *args, **kwargs)
self.figure = Figure()
self.axes = self.figure.subplots(2,1)
self.canvas = FigureCanvas(self, -1, self.figure)
self.toolbar = NavigationToolbar2Wx(self.canvas)
self.toolbar.Realize()
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self.canvas, 1, wx.LEFT | wx.TOP | wx.GROW)
sizer.Add(self.toolbar, 0, wx.LEFT | wx.EXPAND)
self.SetSizer(sizer)
self.Fit()
def setPanel(self, info):
for i, value in enumerate([info[hA] for hA in ("away", "home")]):
name = value["name"]
data = value["data"]
num = [x for x in range(len(data))]
colors = []
for mO in value["money"]:
if mO == 1:
colors.append("green")
else:
colors.append("red")
twoWeek = [sum(data[(i-5):i])/5 for i in range(5,len(data)+1)]
oneMonth = [sum(data[(i-13):i])/13 for i in range(13,len(data)+1)]
self.axes[i].clear()
self.axes[i].bar(num, data, color=colors)
self.axes[i].grid(True)
self.axes[i].axis([0, 40, -20, 20])
self.axes[i].plot([i for i in range(5,len(data)+1)], twoWeek, color="blue")
self.axes[i].plot([i for i in range(13,len(data)+1)], oneMonth, color="orange")
self.canvas.draw()
self.canvas.Refresh()
class NBATrackingPanel(wx.Panel):
def __init__(self, parent, *args, **kwargs):
super().__init__(parent, *args, **kwargs)
self.figure = Figure()
self.axes = self.figure.subplots(2,1)
self.canvas = FigureCanvas(self, -1, self.figure)
self.toolbar = NavigationToolbar2Wx(self.canvas)
self.toolbar.Realize()
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self.canvas, 1, wx.LEFT | wx.TOP | wx.GROW)
sizer.Add(self.toolbar, 0, wx.LEFT | wx.EXPAND)
self.SetSizer(sizer)
self.Fit()
def setPanel(self, info):
for i, value in enumerate([info[hA] for hA in ("away", "home")]):
name = value["name"]
data = value["data"]
num = [x for x in range(len(data))]
colors = []
for mO in value["money"]:
if mO == 1:
colors.append("green")
else:
colors.append("red")
twoWeek = [sum(data[(i-7):i])/7 for i in range(7,len(data)+1)]
oneMonth = [sum(data[(i-26):i])/26 for i in range(26,len(data)+1)]
self.axes[i].clear()
self.axes[i].bar(num, data, color=colors)
self.axes[i].grid(True)
self.axes[i].axis([40, 82, -20, 20])
self.axes[i].plot([i for i in range(7,len(data)+1)], twoWeek, color="blue")
self.axes[i].plot([i for i in range(26,len(data)+1)], oneMonth, color="orange")
self.canvas.draw()
self.canvas.Refresh()
| 33.294118
| 93
| 0.555654
| 452
| 3,396
| 4.134956
| 0.196903
| 0.059925
| 0.057785
| 0.047084
| 0.872124
| 0.872124
| 0.82932
| 0.812199
| 0.773676
| 0.773676
| 0
| 0.026174
| 0.291225
| 3,396
| 101
| 94
| 33.623762
| 0.750312
| 0
| 0
| 0.746667
| 0
| 0
| 0.024441
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.053333
| false
| 0
| 0.08
| 0
| 0.16
| 0.013333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b172e7642739bc4669e10c1336c61346215a68de
| 90
|
py
|
Python
|
suitcase/mongo_embedded/conftest.py
|
mrakitin/suitcase-mongo
|
5f150dde5f3ec5f11aa0b2076113211f941dbb3f
|
[
"BSD-3-Clause"
] | 1
|
2021-03-26T14:17:16.000Z
|
2021-03-26T14:17:16.000Z
|
suitcase/mongo_embedded/conftest.py
|
mrakitin/suitcase-mongo
|
5f150dde5f3ec5f11aa0b2076113211f941dbb3f
|
[
"BSD-3-Clause"
] | 15
|
2019-01-30T15:04:07.000Z
|
2019-04-29T14:23:20.000Z
|
suitcase/mongo_embedded/conftest.py
|
mrakitin/suitcase-mongo
|
5f150dde5f3ec5f11aa0b2076113211f941dbb3f
|
[
"BSD-3-Clause"
] | 3
|
2019-06-04T16:38:12.000Z
|
2020-12-04T22:29:09.000Z
|
from bluesky.tests.conftest import RE # noqa
from ophyd.tests.conftest import hw # noqa
| 30
| 45
| 0.777778
| 14
| 90
| 5
| 0.642857
| 0.371429
| 0.542857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.155556
| 90
| 2
| 46
| 45
| 0.921053
| 0.1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
b1ace9a1ad81c4984c3d280dae564bd7878143f5
| 1,681
|
py
|
Python
|
03.26.21 - Calculadora.py
|
AdamastorLinsFrancaNetto/python-iniciante
|
1dab7f824559e5d3db3f28b3e408d3899a5209b4
|
[
"MIT"
] | null | null | null |
03.26.21 - Calculadora.py
|
AdamastorLinsFrancaNetto/python-iniciante
|
1dab7f824559e5d3db3f28b3e408d3899a5209b4
|
[
"MIT"
] | null | null | null |
03.26.21 - Calculadora.py
|
AdamastorLinsFrancaNetto/python-iniciante
|
1dab7f824559e5d3db3f28b3e408d3899a5209b4
|
[
"MIT"
] | null | null | null |
#Implemente um script Python que consista em uma calculadora básica de quatro operações (soma, subtração, multiplicação e divisão)
sn='s'
while sn == 's':
n1=float(input('\nNúmero: '))
op=str(input('+ - * /: '))
while op != '+' and op != '-' and op != '*' and op != '/':
op=str(input('Informe uma das alternativas + - * /: '))
n2=float(input('Número: '))
if op == '+':
print(f'RESULTADO: {n1} + {n2} = {n1+n2}')
elif op == '-':
print(f'RESULTADO: {n1} - {n2} = {n1-n2}')
elif op == '*':
print(f'RESULTADO: {n1} * {n2} = {n1*n2}')
elif op == '/':
print(f'RESULTADO: {n1} / {n2} = {n1/n2}')
sn=str(input('\nDeseja fazer outra operação [s/n]? '))
while sn != 's' and sn != 'S' and sn != 'n' and sn != 'N':
sn=str(input('Deseja fazer outra operação [s/n]? '))
print('\nVOLTE SEMPRE!!!')
#for
for x in iter(int, 1):
pass
n1=float(input('\nNúmero: '))
op=str(input('+ - * / : '))
while op != '+' and op != '-' and op != '*' and op != '/':
op=str(input('Informe uma das alternativas + - * / : '))
n2=float(input('Número: '))
if op == '+':
print(f'RESULTADO: {n1} + {n2} = {n1+n2}')
elif op == '-':
print(f'RESULTADO: {n1} - {n2} = {n1-n2}')
elif op == '*':
print(f'RESULTADO: {n1} * {n2} = {n1*n2}')
elif op == '/':
print(f'RESULTADO: {n1} / {n2} = {n1/n2}')
sn=str(input('\nDeseja fazer outra operação [S/N]? '))
while sn != 's' and sn != 'S' and sn != 'n' and sn != 'N':
sn=str(input('Deseja fazer outra operação [S/N]? '))
if sn != 's' and sn != 'S':
break
print('\nVOLTE SEMPRE !!!')
| 32.960784
| 130
| 0.490184
| 228
| 1,681
| 3.614035
| 0.236842
| 0.07767
| 0.07767
| 0.165049
| 0.78034
| 0.769417
| 0.769417
| 0.769417
| 0.769417
| 0.769417
| 0
| 0.030057
| 0.267698
| 1,681
| 50
| 131
| 33.62
| 0.639318
| 0.078525
| 0
| 0.75
| 0
| 0
| 0.385612
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.025
| 0
| 0
| 0
| 0.25
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
49655b7aa01893c16f0de6f15d47ed273d05d267
| 40
|
py
|
Python
|
gsa_pytorch/__init__.py
|
lucidrains/global-self-attention-network
|
c31594a40d9d10bad9d60dc3a87fb76bbb7b0de9
|
[
"MIT"
] | 82
|
2020-10-03T00:35:22.000Z
|
2022-03-29T12:43:31.000Z
|
gsa_pytorch/__init__.py
|
Forsaken-core/global-self-attention-network
|
c31594a40d9d10bad9d60dc3a87fb76bbb7b0de9
|
[
"MIT"
] | 3
|
2020-10-20T14:21:14.000Z
|
2020-12-31T03:57:26.000Z
|
gsa_pytorch/__init__.py
|
Forsaken-core/global-self-attention-network
|
c31594a40d9d10bad9d60dc3a87fb76bbb7b0de9
|
[
"MIT"
] | 11
|
2020-10-07T02:34:36.000Z
|
2021-12-14T11:26:25.000Z
|
from gsa_pytorch.gsa_pytorch import GSA
| 20
| 39
| 0.875
| 7
| 40
| 4.714286
| 0.571429
| 0.606061
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 40
| 1
| 40
| 40
| 0.916667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
499fd4658dcc008654676e21c6245144a127b2fd
| 24,912
|
py
|
Python
|
tests/tests_dataprovider_integration.py
|
Open-CMMS/openCMMS_backend
|
56511ebac83a5dc1fb8768a98bc675e88530a447
|
[
"BSD-3-Clause"
] | 3
|
2021-03-08T19:14:38.000Z
|
2022-02-01T17:57:31.000Z
|
tests/tests_dataprovider_integration.py
|
Open-CMMS/openCMMS_backend
|
56511ebac83a5dc1fb8768a98bc675e88530a447
|
[
"BSD-3-Clause"
] | null | null | null |
tests/tests_dataprovider_integration.py
|
Open-CMMS/openCMMS_backend
|
56511ebac83a5dc1fb8768a98bc675e88530a447
|
[
"BSD-3-Clause"
] | null | null | null |
import os
import pytest
from init_db_tests import init_db
from django.contrib.auth.models import Permission
from django.test import TestCase
from maintenancemanagement.models import Equipment, Field
from openCMMS.settings import BASE_DIR
from rest_framework.test import APIClient
from usersmanagement.models import UserProfile
from utils.data_provider import add_job, scheduler
from utils.models import DataProvider
from utils.serializers import (
DataProviderRequirementsSerializer,
DataProviderSerializer,
)
class DataProviderTest(TestCase):
@pytest.fixture(scope="class", autouse=True)
def init_database(django_db_setup, django_db_blocker):
with django_db_blocker.unblock():
init_db()
def add_view_perm(self, user):
"""
Add view permission to user
"""
perm_view = Permission.objects.get(codename="view_dataprovider")
user.user_permissions.set([perm_view])
def add_add_perm(self, user):
"""
Add add permission to user
"""
perm_add = Permission.objects.get(codename="add_dataprovider")
user.user_permissions.add(perm_add)
def add_change_perm(self, user):
"""
Add change permission to user
"""
perm_change = Permission.objects.get(codename="change_dataprovider")
user.user_permissions.set([perm_change])
def add_delete_perm(self, user):
"""
Add delete permission to user
"""
perm_delete = Permission.objects.get(codename="delete_dataprovider")
user.user_permissions.set([perm_delete])
def test_US23_I1_dataproviderlist_get_with_perm(self):
"""
Test if a user with perm receive the dataproviders' list
Inputs:
user (UserProfile): a UserProfile with permissions to view data providers.
serializer (DataProviderRequirementsSerializer): a serializer containing all data providers of the database.
Expected Output:
We expect a 200 status code in the response.
We expect to get in the response the same data as in serializer.
"""
user = UserProfile.objects.create(username="user", password="p4ssword")
self.add_view_perm(user)
python_files = os.listdir(os.path.join(BASE_DIR, 'utils/data_providers'))
python_files.pop(python_files.index('__init__.py'))
if '__pycache__' in python_files:
python_files.pop(python_files.index('__pycache__'))
equipments = Equipment.objects.all()
data_providers = DataProvider.objects.all()
serializer = DataProviderRequirementsSerializer({'equipments': equipments, 'data_providers': data_providers})
dict_res = serializer.data.copy()
dict_res['python_files'] = python_files
c = APIClient()
c.force_authenticate(user=user)
response = c.get("/api/dataproviders/")
self.assertEqual(response.status_code, 200)
self.assertEqual(dict_res, response.json())
def test_US23_I1_dataproviderlist_get_without_perm(self):
"""
Test if a user without perm doesn't receive the dataproviders' list
Inputs:
user (UserProfile): a UserProfile without permissions to view data providers.
Expected Output:
We expect a 401 status code in the response.
"""
user = UserProfile.objects.create(username="user", password="p4ssword")
c = APIClient()
c.force_authenticate(user=user)
response = c.get("/api/dataproviders/")
self.assertEqual(response.status_code, 401)
def test_US23_I2_dataproviderlist_post_with_perm(self):
"""
Test if a user with perm can add a dataprovider
Inputs:
user (UserProfile): a UserProfile with permissions to add data providers.
serializer (DataProviderSerializer): a serializer containing the posted data provider data.
post data (JSON): a mock-up of a data provider.
Expected Output:
We expect a 201 status code in the response.
We expect to get in the response the same data as in serializer.
"""
user = UserProfile.objects.create(username="user", password="p4ssword")
self.add_add_perm(user)
client = APIClient()
client.force_authenticate(user=user)
response = client.post(
'/api/dataproviders/', {
'file_name': 'python_file.py',
'name': 'dataprovider de test',
'recurrence': '10d',
'ip_address': '127.0.0.1',
'port': 5002,
'equipment': Equipment.objects.get(name='Embouteilleuse AXB1').id,
'field_object': Field.objects.get(name="Nb bouteilles").object_set.get().id,
'is_activated': True
},
format='json'
)
dataprovider = DataProvider.objects.get(file_name='python_file.py')
serializer = DataProviderSerializer(dataprovider)
self.assertEqual(response.status_code, 201)
self.assertEqual(response.data, serializer.data)
def test_US23_I2_dataproviderlist_post_without_perm(self):
"""
Test if a user without perm can't add a dataprovider
Inputs:
user (UserProfile): a UserProfile without permissions to add data providers.
post data (JSON): a mock-up of a data provider.
Expected Output:
We expect a 401 status code in the response.
"""
user = UserProfile.objects.create(username="user", password="p4ssword")
client = APIClient()
client.force_authenticate(user=user)
response = client.post(
'/api/dataproviders/', {
'file_name': 'script.py',
'name': 'dataprovider de test',
'recurrence': '10d',
'ip_address': '127.0.0.1',
'port': 5002,
'equipment': Equipment.objects.get(name='Embouteilleuse AXB1').id,
'field_object': Field.objects.get(name="Nb bouteilles").object_set.get().id,
'is_activated': True
},
format='json'
)
self.assertEqual(response.status_code, 401)
def test_US23_I2_dataproviderlist_post_with_perm_and_missing_parms(self):
"""
Test if a user with perm can't add a dataprovider whith missing params
Inputs:
user (UserProfile): a UserProfile with permissions to add data providers.
post data (JSON): a mock-up of a data provider with missing params.
Expected Output:
We expect a 400 status code in the response.
"""
user = UserProfile.objects.create(username="user", password="p4ssword")
self.add_add_perm(user)
client = APIClient()
client.force_authenticate(user=user)
response = client.post(
'/api/dataproviders/', {
'file_name': 'script.py',
'equipment': Equipment.objects.get(name='Embouteilleuse AXB1').id,
},
format='json'
)
self.assertEqual(response.status_code, 400)
def test_US23_I2_dataproviderlist_post_with_perm_and_too_much_parms(self):
"""
Test if a user with perm can add a dataprovider whith too much params
Inputs:
user (UserProfile): a UserProfile with permissions to add data providers.
serializer (DataProviderSerializer): a serializer containing the posted data provider data.
post data (JSON): a mock-up of a data provider with too much params.
Expected Output:
We expect a 201 status code in the response.
We expect to get in the response the same data as in serializer.
"""
user = UserProfile.objects.create(username="user", password="p4ssword")
self.add_add_perm(user)
client = APIClient()
client.force_authenticate(user=user)
response = client.post(
'/api/dataproviders/', {
'file_name': 'script.py',
'name': 'dataprovider de test',
'recurrence': '10d',
'ip_address': '127.0.0.1',
'port': 5002,
'equipment': Equipment.objects.get(name='Embouteilleuse AXB1').id,
'field_object': Field.objects.get(name="Nb bouteilles").object_set.get().id,
'fake field': 'useless data',
'is_activated': True
},
format='json'
)
self.assertEqual(response.status_code, 201)
dataprovider = DataProvider.objects.get(file_name='script.py')
serializer = DataProviderSerializer(dataprovider)
self.assertEqual(response.data, serializer.data)
def test_US23_I3_dataproviderdetail_get_with_perm(self):
"""
Test if a user with perm can get a dataprovider.
Inputs:
user (UserProfile): a UserProfile with permissions to view data providers.
dataprovider (DataProvider): the data provider for which we want details.
serializer (DataProviderSerializer): a serializer containing the data of dataprovider.
Expected Output:
We expect a 200 status code in the response.
We expect to get in the response the same data as in serializer.
"""
user = UserProfile.objects.create(username="user", password="p4ssword")
self.add_view_perm(user)
client = APIClient()
client.force_authenticate(user=user)
dataprovider = DataProvider.objects.get(file_name="fichier_test_dataprovider.py")
serializer = DataProviderSerializer(dataprovider)
response = client.get(f'/api/dataproviders/{dataprovider.id}/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data, serializer.data)
def test_US23_I3_dataproviderdetail_get_without_perm(self):
"""
Test if a user without perm can't get a dataprovider.
Inputs:
user (UserProfile): a UserProfile without permissions to view data providers.
dataprovider (DataProvider): the data provider for which we want details.
Expected Output:
We expect a 401 status code in the response.
"""
user = UserProfile.objects.create(username="user", password="p4ssword")
client = APIClient()
client.force_authenticate(user=user)
dataprovider = DataProvider.objects.get(file_name="fichier_test_dataprovider.py")
response = client.get(f'/api/dataproviders/{dataprovider.id}/')
self.assertEqual(response.status_code, 401)
def test_US23_I4_dataproviderdetail_put_with_perm(self):
"""
Test if a user with perm can update a dataprovider.
Inputs:
user (UserProfile): a UserProfile with permissions to add and change data providers.
serializer (DataProviderSerializer): a serializer containing the data of the updated data provider.
post data (JSON): a mock-up of a data provider.
put data (JSON): a mock-up of an updated data provider.
Expected Output:
We expect a 200 status code in the response.
We expect to get in the response the same data as in serializer.
"""
user = UserProfile.objects.create(username="user", password="p4ssword")
self.add_change_perm(user)
self.add_add_perm(user)
client = APIClient()
client.force_authenticate(user=user)
client.post(
'/api/dataproviders/', {
'file_name': 'python_file.py',
'name': 'dataprovider de test pour put',
'recurrence': '10d',
'ip_address': '127.0.0.1',
'port': 5002,
'equipment': Equipment.objects.get(name='Embouteilleuse AXB1').id,
'field_object': Field.objects.get(name="Nb bouteilles").object_set.get().id,
'is_activated': True
},
format='json'
)
dataprovider = DataProvider.objects.get(name='dataprovider de test pour put')
response = client.put(
f'/api/dataproviders/{dataprovider.id}/', {
'file_name': 'fichier_test_dataprovider.py',
'name': 'dataprovider mis à jour',
'recurrence': '5d',
'ip_address': '192.168.0.1',
'port': 5002,
'equipment': Equipment.objects.get(name='Embouteilleuse AXB1').id,
'field_object': Field.objects.get(name="Nb bouteilles").object_set.get().id,
'is_activated': True
},
format='json'
)
dataprovider = DataProvider.objects.get(name='dataprovider mis à jour')
serializer = DataProviderSerializer(dataprovider)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data, serializer.data)
def test_US23_I4_dataproviderdetail_put_with_perm_and_missing_parms(self):
"""
Test if a user with perm can update a dataprovider with missing params.
Inputs:
user (UserProfile): a UserProfile with permissions to add and change data providers.
serializer (DataProviderSerializer): a serializer containing the data of the updated data provider.
post data (JSON): a mock-up of a data provider.
put data (JSON): a mock-up of an updated data provider with missing params.
Expected Output:
We expect a 200 status code in the response.
We expect to get in the response the same data as in serializer.
"""
user = UserProfile.objects.create(username="user", password="p4ssword")
self.add_change_perm(user)
self.add_add_perm(user)
client = APIClient()
client.force_authenticate(user=user)
client.post(
'/api/dataproviders/', {
'file_name': 'python_file.py',
'name': 'dataprovider de test pour put',
'recurrence': '10d',
'ip_address': '127.0.0.1',
'port': 5002,
'equipment': Equipment.objects.get(name='Embouteilleuse AXB1').id,
'field_object': Field.objects.get(name="Nb bouteilles").object_set.get().id,
'is_activated': True
},
format='json'
)
dataprovider = DataProvider.objects.get(name='dataprovider de test pour put')
response = client.put(
f'/api/dataproviders/{dataprovider.id}/', {
'name': 'dataprovider mis à jour 2',
'ip_address': '192.168.1.2',
},
format='json'
)
dataprovider = DataProvider.objects.get(name='dataprovider mis à jour 2')
serializer = DataProviderSerializer(dataprovider)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data, serializer.data)
def test_US23_I5_dataproviderdetail_delete_with_perm(self):
"""
Test if a user with perm can delete a dataprovider.
Inputs:
user (UserProfile): a UserProfile with permissions to delete data providers.
Expected Output:
We expect a 204 status code in the response.
We expect to not find in database the deleted data provider.
We expect to have one job less after deleted tha data provider.
"""
user = UserProfile.objects.create(username="user", password="p4ssword")
self.add_delete_perm(user)
client = APIClient()
client.force_authenticate(user=user)
dataprovider = DataProvider.objects.get(file_name="fichier_test_dataprovider.py")
add_job(dataprovider)
n_jobs_before = scheduler.get_jobs()
response = client.delete(f'/api/dataproviders/{dataprovider.id}/')
n_jobs_after = scheduler.get_jobs()
self.assertEqual(response.status_code, 204)
self.assertFalse(DataProvider.objects.filter(id=dataprovider.id).exists())
self.assertEqual(len(n_jobs_before), len(n_jobs_after) + 1)
def test_US23_I5_dataproviderdetail_delete_without_perm(self):
"""
Test if a user without perm can't delete a dataprovider.
Inputs:
user (UserProfile): a UserProfile without permissions to delete data providers.
Expected Output:
We expect a 401 status code in the response.
"""
user = UserProfile.objects.create(username="user", password="p4ssword")
client = APIClient()
client.force_authenticate(user=user)
dataprovider = DataProvider.objects.get(file_name="fichier_test_dataprovider.py")
response = client.delete(f'/api/dataproviders/{dataprovider.id}/')
self.assertEqual(response.status_code, 401)
def test_US23_I6_testdataprovider_post_with_perm(self):
"""
Test if a user with perm can test a data provider.
Inputs:
user (UserProfile): a UserProfile with permissions to add data providers.
file (File): a temporary file which will return a value for the data provider test.
post data (JSON): a mock-up of a data provider.
Expected Output:
We expect a 200 status code in the response.
We expect to get in the response the value returned by get_data function in the file.
"""
with open(os.path.join(BASE_DIR, 'utils/data_providers/temp_test_data_providers.py'), "w+") as file:
file.write('def get_data(ip_address, port):\n')
file.write(' return 2')
user = UserProfile.objects.create(username="user", password="p4ssword")
self.add_add_perm(user)
client = APIClient()
client.force_authenticate(user=user)
response = client.post(
f'/api/dataproviders/test/', {
'file_name': 'temp_test_data_providers.py',
'name': 'dataprovider de test',
'recurrence': '10d',
'ip_address': '127.0.0.1',
'port': 5002,
'equipment': Equipment.objects.get(name='Embouteilleuse AXB1').id,
'field_object': Field.objects.get(name="Nb bouteilles").object_set.get().id,
'is_activated': True
},
format='json'
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data["data"], 2)
os.remove(os.path.join(BASE_DIR, 'utils/data_providers/temp_test_data_providers.py'))
def test_US23_I6_testdataprovider_post_without_perm(self):
"""
Test if a user without perm can't test a data provider.
Inputs:
user (UserProfile): a UserProfile without permissions to add data providers.
Expected Output:
We expect a 401 status code in the response.
"""
user = UserProfile.objects.create(username="user", password="p4ssword")
client = APIClient()
client.force_authenticate(user=user)
response = client.post(f'/api/dataproviders/test/', format='json')
self.assertEqual(response.status_code, 401)
def test_US23_I6_testdataprovider_post_with_perm_and_not_well_formated_file(self):
"""
Test if a user with perm can test a data provider with a not well formted file.
Inputs:
user (UserProfile): a UserProfile with permissions to add data providers.
file (File): a temporary file which is not well formated.
post data (JSON): a mock-up of a data provider.
Expected Output:
We expect to find the pair {'error': 'Python file is not well formated, please follow the example'} in the error of the response's data.
"""
with open(os.path.join(BASE_DIR, 'utils/data_providers/temp_test_data_providers_error.py'), "w+") as file:
file.write('def wrong_get_data(ip_address, port):\n')
file.write(' return 2')
user = UserProfile.objects.create(username="user", password="p4ssword")
self.add_add_perm(user)
client = APIClient()
client.force_authenticate(user=user)
response = client.post(
f'/api/dataproviders/test/', {
'file_name': 'temp_test_data_providers_error.py',
'name': 'dataprovider de test',
'recurrence': '10d',
'ip_address': '127.0.0.1',
'port': 5002,
'equipment': Equipment.objects.get(name='Embouteilleuse AXB1').id,
'field_object': Field.objects.get(name="Nb bouteilles").object_set.get().id,
'is_activated': True
},
format='json'
)
self.assertEqual(response.data["error"], 'Python file is not well formated, please follow the example')
os.remove(os.path.join(BASE_DIR, 'utils/data_providers/temp_test_data_providers_error.py'))
def test_US23_I6_testdataprovider_post_with_perm_but_no_file(self):
"""
Test if a user with perm can test a data provider with missing file.
Inputs:
user (UserProfile): a UserProfile with permissions to add data providers.
post data (JSON): a mock-up of a data provider.
Expected Output:
We expect to find the pair {'error': "Python file not found, please enter 'name_of_your_file.py'"} in the error of the response's data.
"""
user = UserProfile.objects.create(username="user", password="p4ssword")
self.add_add_perm(user)
client = APIClient()
client.force_authenticate(user=user)
response = client.post(
f'/api/dataproviders/test/', {
'file_name': 'toto.py',
'name': 'dataprovider de test',
'recurrence': '10d',
'ip_address': '127.0.0.1',
'port': 5002,
'equipment': Equipment.objects.get(name='Embouteilleuse AXB1').id,
'field_object': Field.objects.get(name="Nb bouteilles").object_set.get().id,
'is_activated': True
},
format='json'
)
self.assertEqual(response.data["error"], "Python file not found, please enter 'name_of_your_file.py'")
def test_US23_I6_testdataprovider_post_with_perm_and_not_working_get_data(self):
"""
Test if a user with perm can test a data provider with a not working get_data function.
Inputs:
user (UserProfile): a UserProfile with permissions to add data providers.
file (File): a temporary file in which doesn't work.
post data (JSON): a mock-up of a data provider.
Expected Output:
We expect to find the pair {'error': 'IP not found or python file not working'} in the error of the response's data.
"""
with open(
os.path.join(BASE_DIR, 'utils/data_providers/temp_test_data_providers_error_in_getdata.py'), "w+"
) as file:
file.write('from utils.data_provider import GetDataException\n')
file.write('def get_data(ip_address, port):\n')
file.write(' raise GetDataException()')
user = UserProfile.objects.create(username="user", password="p4ssword")
self.add_add_perm(user)
client = APIClient()
client.force_authenticate(user=user)
response = client.post(
f'/api/dataproviders/test/', {
'file_name': 'temp_test_data_providers_error_in_getdata.py',
'name': 'dataprovider de test',
'recurrence': '10d',
'ip_address': '127.0.0.1',
'port': 5002,
'equipment': Equipment.objects.get(name='Embouteilleuse AXB1').id,
'field_object': Field.objects.get(name="Nb bouteilles").object_set.get().id,
'is_activated': True
},
format='json'
)
self.assertEqual(response.data["error"], 'IP not found or python file not working')
os.remove(os.path.join(BASE_DIR, 'utils/data_providers/temp_test_data_providers_error_in_getdata.py'))
| 44.645161
| 152
| 0.609626
| 2,833
| 24,912
| 5.219202
| 0.080127
| 0.032531
| 0.023671
| 0.012647
| 0.863993
| 0.855336
| 0.823752
| 0.813878
| 0.784255
| 0.765521
| 0
| 0.016962
| 0.294758
| 24,912
| 557
| 153
| 44.725314
| 0.824634
| 0.256382
| 0
| 0.634731
| 0
| 0
| 0.212727
| 0.057891
| 0
| 0
| 0
| 0
| 0.077844
| 1
| 0.065868
| false
| 0.050898
| 0.038922
| 0
| 0.107784
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
77299809b26ce62dc1fe864f8dd4e5686e21f77b
| 76,651
|
py
|
Python
|
objectModel/Python/tests/cdm/resolution_guidance/test_resolution_guidance_cardinality.py
|
MiguelSHS/microsoftCDM
|
d8df31fa455fcc6afd698e3ca7ec0f8c4a6716fd
|
[
"CC-BY-4.0",
"MIT"
] | 1
|
2021-03-05T03:35:58.000Z
|
2021-03-05T03:35:58.000Z
|
objectModel/Python/tests/cdm/resolution_guidance/test_resolution_guidance_cardinality.py
|
spbast/CDM
|
bf97a3720c97ee4c9df3625084cf8b3bc65ff9c7
|
[
"CC-BY-4.0",
"MIT"
] | 38
|
2021-09-07T21:23:21.000Z
|
2022-03-14T01:36:58.000Z
|
objectModel/Python/tests/cdm/resolution_guidance/test_resolution_guidance_cardinality.py
|
spbast/CDM
|
bf97a3720c97ee4c9df3625084cf8b3bc65ff9c7
|
[
"CC-BY-4.0",
"MIT"
] | null | null | null |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
from tests.cdm.resolution_guidance import common_test
from tests.common import async_test
from tests.utilities.object_validator import AttributeContextExpectedValue, AttributeExpectedValue
class ResolutionGuidanceCardinalityTest(common_test.CommonTest):
@async_test
async def test_foreign_key_one_to_one_cardinality(self):
"""Resolution Guidance Test - One:One Cardinality"""
test_name = 'test_foreign_key_one_to_one_cardinality'
entity_name = 'Person'
expectedContext_default = AttributeContextExpectedValue()
expectedContext_normalized = AttributeContextExpectedValue()
expectedContext_referenceOnly = AttributeContextExpectedValue()
expectedContext_structured = AttributeContextExpectedValue()
expectedContext_normalized_structured = AttributeContextExpectedValue()
expectedContext_referenceOnly_normalized = AttributeContextExpectedValue()
expectedContext_referenceOnly_structured = AttributeContextExpectedValue()
expectedContext_referenceOnly_normalized_structured = AttributeContextExpectedValue()
expected_default = []
expected_normalized = []
expected_referenceOnly = []
expected_structured = []
expected_normalized_structured = []
expected_referenceOnly_normalized = []
expected_referenceOnly_structured = []
expected_referenceOnly_normalized_structured = []
await self.run_test_with_values(
test_name,
entity_name,
expectedContext_default,
expectedContext_normalized,
expectedContext_referenceOnly,
expectedContext_structured,
expectedContext_normalized_structured,
expectedContext_referenceOnly_normalized,
expectedContext_referenceOnly_structured,
expectedContext_referenceOnly_normalized_structured,
expected_default,
expected_normalized,
expected_referenceOnly,
expected_structured,
expected_normalized_structured,
expected_referenceOnly_normalized,
expected_referenceOnly_structured,
expected_referenceOnly_normalized_structured
)
entity_name = 'PersonContact'
expectedContext_default = AttributeContextExpectedValue()
expectedContext_normalized = AttributeContextExpectedValue()
expectedContext_referenceOnly = AttributeContextExpectedValue()
expectedContext_structured = AttributeContextExpectedValue()
expectedContext_normalized_structured = AttributeContextExpectedValue()
expectedContext_referenceOnly_normalized = AttributeContextExpectedValue()
expectedContext_referenceOnly_structured = AttributeContextExpectedValue()
expectedContext_referenceOnly_normalized_structured = AttributeContextExpectedValue()
expected_default = []
expected_normalized = []
expected_referenceOnly = []
expected_structured = []
expected_normalized_structured = []
expected_referenceOnly_normalized = []
expected_referenceOnly_structured = []
expected_referenceOnly_normalized_structured = []
await self.run_test_with_values(
test_name,
entity_name,
expectedContext_default,
expectedContext_normalized,
expectedContext_referenceOnly,
expectedContext_structured,
expectedContext_normalized_structured,
expectedContext_referenceOnly_normalized,
expectedContext_referenceOnly_structured,
expectedContext_referenceOnly_normalized_structured,
expected_default,
expected_normalized,
expected_referenceOnly,
expected_structured,
expected_normalized_structured,
expected_referenceOnly_normalized,
expected_referenceOnly_structured,
expected_referenceOnly_normalized_structured
)
@async_test
async def test_foreign_key_many_to_many_cardinality(self):
"""Resolution Guidance Test - Many:Many Cardinality"""
test_name = 'test_foreign_key_many_to_many_cardinality'
entity_name = 'Customer'
expectedContext_default = AttributeContextExpectedValue()
expectedContext_normalized = AttributeContextExpectedValue()
expectedContext_referenceOnly = AttributeContextExpectedValue()
expectedContext_structured = AttributeContextExpectedValue()
expectedContext_normalized_structured = AttributeContextExpectedValue()
expectedContext_referenceOnly_normalized = AttributeContextExpectedValue()
expectedContext_referenceOnly_structured = AttributeContextExpectedValue()
expectedContext_referenceOnly_normalized_structured = AttributeContextExpectedValue()
expected_default = []
expected_normalized = []
expected_referenceOnly = []
expected_structured = []
expected_normalized_structured = []
expected_referenceOnly_normalized = []
expected_referenceOnly_structured = []
expected_referenceOnly_normalized_structured = []
await self.run_test_with_values(
test_name,
entity_name,
expectedContext_default,
expectedContext_normalized,
expectedContext_referenceOnly,
expectedContext_structured,
expectedContext_normalized_structured,
expectedContext_referenceOnly_normalized,
expectedContext_referenceOnly_structured,
expectedContext_referenceOnly_normalized_structured,
expected_default,
expected_normalized,
expected_referenceOnly,
expected_structured,
expected_normalized_structured,
expected_referenceOnly_normalized,
expected_referenceOnly_structured,
expected_referenceOnly_normalized_structured
)
entity_name = 'Product'
expectedContext_default = AttributeContextExpectedValue()
expectedContext_normalized = AttributeContextExpectedValue()
expectedContext_referenceOnly = AttributeContextExpectedValue()
expectedContext_structured = AttributeContextExpectedValue()
expectedContext_normalized_structured = AttributeContextExpectedValue()
expectedContext_referenceOnly_normalized = AttributeContextExpectedValue()
expectedContext_referenceOnly_structured = AttributeContextExpectedValue()
expectedContext_referenceOnly_normalized_structured = AttributeContextExpectedValue()
expected_default = []
expected_normalized = []
expected_referenceOnly = []
expected_structured = []
expected_normalized_structured = []
expected_referenceOnly_normalized = []
expected_referenceOnly_structured = []
expected_referenceOnly_normalized_structured = []
await self.run_test_with_values(
test_name,
entity_name,
expectedContext_default,
expectedContext_normalized,
expectedContext_referenceOnly,
expectedContext_structured,
expectedContext_normalized_structured,
expectedContext_referenceOnly_normalized,
expectedContext_referenceOnly_structured,
expectedContext_referenceOnly_normalized_structured,
expected_default,
expected_normalized,
expected_referenceOnly,
expected_structured,
expected_normalized_structured,
expected_referenceOnly_normalized,
expected_referenceOnly_structured,
expected_referenceOnly_normalized_structured
)
entity_name = 'Sales'
expectedContext_default = AttributeContextExpectedValue()
expectedContext_normalized = AttributeContextExpectedValue()
expectedContext_referenceOnly = AttributeContextExpectedValue()
expectedContext_structured = AttributeContextExpectedValue()
expectedContext_normalized_structured = AttributeContextExpectedValue()
expectedContext_referenceOnly_normalized = AttributeContextExpectedValue()
expectedContext_referenceOnly_structured = AttributeContextExpectedValue()
expectedContext_referenceOnly_normalized_structured = AttributeContextExpectedValue()
expected_default = []
expected_normalized = []
expected_referenceOnly = []
expected_structured = []
expected_normalized_structured = []
expected_referenceOnly_normalized = []
expected_referenceOnly_structured = []
expected_referenceOnly_normalized_structured = []
await self.run_test_with_values(
test_name,
entity_name,
expectedContext_default,
expectedContext_normalized,
expectedContext_referenceOnly,
expectedContext_structured,
expectedContext_normalized_structured,
expectedContext_referenceOnly_normalized,
expectedContext_referenceOnly_structured,
expectedContext_referenceOnly_normalized_structured,
expected_default,
expected_normalized,
expected_referenceOnly,
expected_structured,
expected_normalized_structured,
expected_referenceOnly_normalized,
expected_referenceOnly_structured,
expected_referenceOnly_normalized_structured
)
@async_test
async def test_foreign_key_one_to_many_cardinality(self):
"""Resolution Guidance Test - One:Many Cardinality"""
test_name = 'test_foreign_key_one_to_many_cardinality'
entity_name = 'Team'
expectedContext_default = AttributeContextExpectedValue()
expectedContext_normalized = AttributeContextExpectedValue()
expectedContext_referenceOnly = AttributeContextExpectedValue()
expectedContext_structured = AttributeContextExpectedValue()
expectedContext_normalized_structured = AttributeContextExpectedValue()
expectedContext_referenceOnly_normalized = AttributeContextExpectedValue()
expectedContext_referenceOnly_structured = AttributeContextExpectedValue()
expectedContext_referenceOnly_normalized_structured = AttributeContextExpectedValue()
expected_default = []
expected_normalized = []
expected_referenceOnly = []
expected_structured = []
expected_normalized_structured = []
expected_referenceOnly_normalized = []
expected_referenceOnly_structured = []
expected_referenceOnly_normalized_structured = []
await self.run_test_with_values(
test_name,
entity_name,
expectedContext_default,
expectedContext_normalized,
expectedContext_referenceOnly,
expectedContext_structured,
expectedContext_normalized_structured,
expectedContext_referenceOnly_normalized,
expectedContext_referenceOnly_structured,
expectedContext_referenceOnly_normalized_structured,
expected_default,
expected_normalized,
expected_referenceOnly,
expected_structured,
expected_normalized_structured,
expected_referenceOnly_normalized,
expected_referenceOnly_structured,
expected_referenceOnly_normalized_structured
)
entity_name = 'Employee'
expectedContext_default = AttributeContextExpectedValue()
expectedContext_default.type = 'entity'
expectedContext_default.name = 'Employee_Resolved_default'
expectedContext_default.definition = 'resolvedFrom/Employee'
expectedContext_default.contexts = []
attrCtx_LVL0_IND0 = AttributeContextExpectedValue()
attrCtx_LVL0_IND0.type = 'entityReferenceExtends'
attrCtx_LVL0_IND0.name = 'extends'
attrCtx_LVL0_IND0.parent = 'Employee_Resolved_default/attributeContext/Employee_Resolved_default'
attrCtx_LVL0_IND0.contexts = []
attrCtx_LVL1_IND0 = AttributeContextExpectedValue()
attrCtx_LVL1_IND0.type = 'entity'
attrCtx_LVL1_IND0.name = 'CdmEntity'
attrCtx_LVL1_IND0.parent = 'Employee_Resolved_default/attributeContext/Employee_Resolved_default/extends'
attrCtx_LVL1_IND0.definition = 'resolvedFrom/CdmEntity'
attrCtx_LVL0_IND0.contexts.append(attrCtx_LVL1_IND0)
expectedContext_default.contexts.append(attrCtx_LVL0_IND0)
attrCtx_LVL0_IND1 = AttributeContextExpectedValue()
attrCtx_LVL0_IND1.type = 'attributeDefinition'
attrCtx_LVL0_IND1.name = 'attributesAddedAtThisScope'
attrCtx_LVL0_IND1.parent = 'Employee_Resolved_default/attributeContext/Employee_Resolved_default'
attrCtx_LVL0_IND1.definition = 'resolvedFrom/Employee/hasAttributes/attributesAddedAtThisScope'
attrCtx_LVL0_IND1.contexts = []
attrCtx_LVL1_IND0 = AttributeContextExpectedValue()
attrCtx_LVL1_IND0.type = 'attributeGroup'
attrCtx_LVL1_IND0.name = 'attributesAddedAtThisScope'
attrCtx_LVL1_IND0.parent = 'Employee_Resolved_default/attributeContext/Employee_Resolved_default/attributesAddedAtThisScope'
attrCtx_LVL1_IND0.definition = 'resolvedFrom/Employee/hasAttributes/attributesAddedAtThisScope'
attrCtx_LVL1_IND0.contexts = []
attrCtx_LVL2_IND0 = AttributeContextExpectedValue()
attrCtx_LVL2_IND0.type = 'attributeDefinition'
attrCtx_LVL2_IND0.name = 'ID'
attrCtx_LVL2_IND0.parent = 'Employee_Resolved_default/attributeContext/Employee_Resolved_default/attributesAddedAtThisScope/attributesAddedAtThisScope'
attrCtx_LVL2_IND0.definition = 'resolvedFrom/Employee/hasAttributes/attributesAddedAtThisScope/members/ID'
attrCtx_LVL2_IND0.context_strings = []
attrCtx_LVL2_IND0.context_strings.append('Employee_Resolved_default/hasAttributes/ID')
attrCtx_LVL1_IND0.contexts.append(attrCtx_LVL2_IND0)
attrCtx_LVL2_IND1 = AttributeContextExpectedValue()
attrCtx_LVL2_IND1.type = 'attributeDefinition'
attrCtx_LVL2_IND1.name = 'FullName'
attrCtx_LVL2_IND1.parent = 'Employee_Resolved_default/attributeContext/Employee_Resolved_default/attributesAddedAtThisScope/attributesAddedAtThisScope'
attrCtx_LVL2_IND1.definition = 'resolvedFrom/Employee/hasAttributes/attributesAddedAtThisScope/members/FullName'
attrCtx_LVL2_IND1.context_strings = []
attrCtx_LVL2_IND1.context_strings.append('Employee_Resolved_default/hasAttributes/FullName')
attrCtx_LVL1_IND0.contexts.append(attrCtx_LVL2_IND1)
attrCtx_LVL2_IND2 = AttributeContextExpectedValue()
attrCtx_LVL2_IND2.type = 'attributeDefinition'
attrCtx_LVL2_IND2.name = 'TeamID'
attrCtx_LVL2_IND2.parent = 'Employee_Resolved_default/attributeContext/Employee_Resolved_default/attributesAddedAtThisScope/attributesAddedAtThisScope'
attrCtx_LVL2_IND2.definition = 'resolvedFrom/Employee/hasAttributes/attributesAddedAtThisScope/members/TeamID'
attrCtx_LVL2_IND2.contexts = []
attrCtx_LVL3_IND0 = AttributeContextExpectedValue()
attrCtx_LVL3_IND0.type = 'entity'
attrCtx_LVL3_IND0.name = 'Team'
attrCtx_LVL3_IND0.parent = 'Employee_Resolved_default/attributeContext/Employee_Resolved_default/attributesAddedAtThisScope/attributesAddedAtThisScope/TeamID'
attrCtx_LVL3_IND0.definition = 'resolvedFrom/Team'
attrCtx_LVL3_IND0.contexts = []
attrCtx_LVL4_IND0 = AttributeContextExpectedValue()
attrCtx_LVL4_IND0.type = 'entityReferenceExtends'
attrCtx_LVL4_IND0.name = 'extends'
attrCtx_LVL4_IND0.parent = 'Employee_Resolved_default/attributeContext/Employee_Resolved_default/attributesAddedAtThisScope/attributesAddedAtThisScope/TeamID/Team'
attrCtx_LVL4_IND0.contexts = []
attrCtx_LVL5_IND0 = AttributeContextExpectedValue()
attrCtx_LVL5_IND0.type = 'entity'
attrCtx_LVL5_IND0.name = 'CdmEntity'
attrCtx_LVL5_IND0.parent = 'Employee_Resolved_default/attributeContext/Employee_Resolved_default/attributesAddedAtThisScope/attributesAddedAtThisScope/TeamID/Team/extends'
attrCtx_LVL5_IND0.definition = 'resolvedFrom/CdmEntity'
attrCtx_LVL4_IND0.contexts.append(attrCtx_LVL5_IND0)
attrCtx_LVL3_IND0.contexts.append(attrCtx_LVL4_IND0)
attrCtx_LVL4_IND1 = AttributeContextExpectedValue()
attrCtx_LVL4_IND1.type = 'attributeDefinition'
attrCtx_LVL4_IND1.name = 'attributesAddedAtThisScope'
attrCtx_LVL4_IND1.parent = 'Employee_Resolved_default/attributeContext/Employee_Resolved_default/attributesAddedAtThisScope/attributesAddedAtThisScope/TeamID/Team'
attrCtx_LVL4_IND1.definition = 'resolvedFrom/Team/hasAttributes/attributesAddedAtThisScope'
attrCtx_LVL4_IND1.contexts = []
attrCtx_LVL5_IND0 = AttributeContextExpectedValue()
attrCtx_LVL5_IND0.type = 'attributeGroup'
attrCtx_LVL5_IND0.name = 'attributesAddedAtThisScope'
attrCtx_LVL5_IND0.parent = 'Employee_Resolved_default/attributeContext/Employee_Resolved_default/attributesAddedAtThisScope/attributesAddedAtThisScope/TeamID/Team/attributesAddedAtThisScope'
attrCtx_LVL5_IND0.definition = 'resolvedFrom/Team/hasAttributes/attributesAddedAtThisScope'
attrCtx_LVL5_IND0.contexts = []
attrCtx_LVL6_IND0 = AttributeContextExpectedValue()
attrCtx_LVL6_IND0.type = 'attributeDefinition'
attrCtx_LVL6_IND0.name = 'ID'
attrCtx_LVL6_IND0.parent = 'Employee_Resolved_default/attributeContext/Employee_Resolved_default/attributesAddedAtThisScope/attributesAddedAtThisScope/TeamID/Team/attributesAddedAtThisScope/attributesAddedAtThisScope'
attrCtx_LVL6_IND0.definition = 'resolvedFrom/Team/hasAttributes/attributesAddedAtThisScope/members/ID'
attrCtx_LVL5_IND0.contexts.append(attrCtx_LVL6_IND0)
attrCtx_LVL6_IND1 = AttributeContextExpectedValue()
attrCtx_LVL6_IND1.type = 'attributeDefinition'
attrCtx_LVL6_IND1.name = 'Name'
attrCtx_LVL6_IND1.parent = 'Employee_Resolved_default/attributeContext/Employee_Resolved_default/attributesAddedAtThisScope/attributesAddedAtThisScope/TeamID/Team/attributesAddedAtThisScope/attributesAddedAtThisScope'
attrCtx_LVL6_IND1.definition = 'resolvedFrom/Team/hasAttributes/attributesAddedAtThisScope/members/Name'
attrCtx_LVL5_IND0.contexts.append(attrCtx_LVL6_IND1)
attrCtx_LVL4_IND1.contexts.append(attrCtx_LVL5_IND0)
attrCtx_LVL3_IND0.contexts.append(attrCtx_LVL4_IND1)
attrCtx_LVL2_IND2.contexts.append(attrCtx_LVL3_IND0)
attrCtx_LVL3_IND1 = AttributeContextExpectedValue()
attrCtx_LVL3_IND1.type = 'generatedSet'
attrCtx_LVL3_IND1.name = '_generatedAttributeSet'
attrCtx_LVL3_IND1.parent = 'Employee_Resolved_default/attributeContext/Employee_Resolved_default/attributesAddedAtThisScope/attributesAddedAtThisScope/TeamID'
attrCtx_LVL3_IND1.contexts = []
attrCtx_LVL4_IND0 = AttributeContextExpectedValue()
attrCtx_LVL4_IND0.type = 'addedAttributeExpansionTotal'
attrCtx_LVL4_IND0.name = 'TeamIDTeamCount'
attrCtx_LVL4_IND0.parent = 'Employee_Resolved_default/attributeContext/Employee_Resolved_default/attributesAddedAtThisScope/attributesAddedAtThisScope/TeamID/_generatedAttributeSet'
attrCtx_LVL4_IND0.definition = 'resolvedFrom/Employee/hasAttributes/attributesAddedAtThisScope/members/TeamID/resolutionGuidance/countAttribute/TeamCount'
attrCtx_LVL4_IND0.context_strings = []
attrCtx_LVL4_IND0.context_strings.append('Employee_Resolved_default/hasAttributes/TeamIDTeamCount')
attrCtx_LVL3_IND1.contexts.append(attrCtx_LVL4_IND0)
attrCtx_LVL2_IND2.contexts.append(attrCtx_LVL3_IND1)
attrCtx_LVL1_IND0.contexts.append(attrCtx_LVL2_IND2)
attrCtx_LVL0_IND1.contexts.append(attrCtx_LVL1_IND0)
expectedContext_default.contexts.append(attrCtx_LVL0_IND1)
expectedContext_normalized = AttributeContextExpectedValue()
expectedContext_normalized.type = 'entity'
expectedContext_normalized.name = 'Employee_Resolved_normalized'
expectedContext_normalized.definition = 'resolvedFrom/Employee'
expectedContext_normalized.contexts = []
attrCtx_LVL0_IND0 = AttributeContextExpectedValue()
attrCtx_LVL0_IND0.type = 'entityReferenceExtends'
attrCtx_LVL0_IND0.name = 'extends'
attrCtx_LVL0_IND0.parent = 'Employee_Resolved_normalized/attributeContext/Employee_Resolved_normalized'
attrCtx_LVL0_IND0.contexts = []
attrCtx_LVL1_IND0 = AttributeContextExpectedValue()
attrCtx_LVL1_IND0.type = 'entity'
attrCtx_LVL1_IND0.name = 'CdmEntity'
attrCtx_LVL1_IND0.parent = 'Employee_Resolved_normalized/attributeContext/Employee_Resolved_normalized/extends'
attrCtx_LVL1_IND0.definition = 'resolvedFrom/CdmEntity'
attrCtx_LVL0_IND0.contexts.append(attrCtx_LVL1_IND0)
expectedContext_normalized.contexts.append(attrCtx_LVL0_IND0)
attrCtx_LVL0_IND1 = AttributeContextExpectedValue()
attrCtx_LVL0_IND1.type = 'attributeDefinition'
attrCtx_LVL0_IND1.name = 'attributesAddedAtThisScope'
attrCtx_LVL0_IND1.parent = 'Employee_Resolved_normalized/attributeContext/Employee_Resolved_normalized'
attrCtx_LVL0_IND1.definition = 'resolvedFrom/Employee/hasAttributes/attributesAddedAtThisScope'
attrCtx_LVL0_IND1.contexts = []
attrCtx_LVL1_IND0 = AttributeContextExpectedValue()
attrCtx_LVL1_IND0.type = 'attributeGroup'
attrCtx_LVL1_IND0.name = 'attributesAddedAtThisScope'
attrCtx_LVL1_IND0.parent = 'Employee_Resolved_normalized/attributeContext/Employee_Resolved_normalized/attributesAddedAtThisScope'
attrCtx_LVL1_IND0.definition = 'resolvedFrom/Employee/hasAttributes/attributesAddedAtThisScope'
attrCtx_LVL1_IND0.contexts = []
attrCtx_LVL2_IND0 = AttributeContextExpectedValue()
attrCtx_LVL2_IND0.type = 'attributeDefinition'
attrCtx_LVL2_IND0.name = 'TeamID'
attrCtx_LVL2_IND0.parent = 'Employee_Resolved_normalized/attributeContext/Employee_Resolved_normalized/attributesAddedAtThisScope/attributesAddedAtThisScope'
attrCtx_LVL2_IND0.definition = 'resolvedFrom/Employee/hasAttributes/attributesAddedAtThisScope/members/TeamID'
attrCtx_LVL2_IND0.contexts = []
attrCtx_LVL3_IND0 = AttributeContextExpectedValue()
attrCtx_LVL3_IND0.type = 'entity'
attrCtx_LVL3_IND0.name = 'Team'
attrCtx_LVL3_IND0.parent = 'Employee_Resolved_normalized/attributeContext/Employee_Resolved_normalized/attributesAddedAtThisScope/attributesAddedAtThisScope/TeamID'
attrCtx_LVL3_IND0.definition = 'resolvedFrom/Team'
attrCtx_LVL3_IND0.contexts = []
attrCtx_LVL4_IND0 = AttributeContextExpectedValue()
attrCtx_LVL4_IND0.type = 'entityReferenceExtends'
attrCtx_LVL4_IND0.name = 'extends'
attrCtx_LVL4_IND0.parent = 'Employee_Resolved_normalized/attributeContext/Employee_Resolved_normalized/attributesAddedAtThisScope/attributesAddedAtThisScope/TeamID/Team'
attrCtx_LVL4_IND0.contexts = []
attrCtx_LVL5_IND0 = AttributeContextExpectedValue()
attrCtx_LVL5_IND0.type = 'entity'
attrCtx_LVL5_IND0.name = 'CdmEntity'
attrCtx_LVL5_IND0.parent = 'Employee_Resolved_normalized/attributeContext/Employee_Resolved_normalized/attributesAddedAtThisScope/attributesAddedAtThisScope/TeamID/Team/extends'
attrCtx_LVL5_IND0.definition = 'resolvedFrom/CdmEntity'
attrCtx_LVL4_IND0.contexts.append(attrCtx_LVL5_IND0)
attrCtx_LVL3_IND0.contexts.append(attrCtx_LVL4_IND0)
attrCtx_LVL4_IND1 = AttributeContextExpectedValue()
attrCtx_LVL4_IND1.type = 'attributeDefinition'
attrCtx_LVL4_IND1.name = 'attributesAddedAtThisScope'
attrCtx_LVL4_IND1.parent = 'Employee_Resolved_normalized/attributeContext/Employee_Resolved_normalized/attributesAddedAtThisScope/attributesAddedAtThisScope/TeamID/Team'
attrCtx_LVL4_IND1.definition = 'resolvedFrom/Team/hasAttributes/attributesAddedAtThisScope'
attrCtx_LVL4_IND1.contexts = []
attrCtx_LVL5_IND0 = AttributeContextExpectedValue()
attrCtx_LVL5_IND0.type = 'attributeGroup'
attrCtx_LVL5_IND0.name = 'attributesAddedAtThisScope'
attrCtx_LVL5_IND0.parent = 'Employee_Resolved_normalized/attributeContext/Employee_Resolved_normalized/attributesAddedAtThisScope/attributesAddedAtThisScope/TeamID/Team/attributesAddedAtThisScope'
attrCtx_LVL5_IND0.definition = 'resolvedFrom/Team/hasAttributes/attributesAddedAtThisScope'
attrCtx_LVL5_IND0.contexts = []
attrCtx_LVL6_IND0 = AttributeContextExpectedValue()
attrCtx_LVL6_IND0.type = 'attributeDefinition'
attrCtx_LVL6_IND0.name = 'ID'
attrCtx_LVL6_IND0.parent = 'Employee_Resolved_normalized/attributeContext/Employee_Resolved_normalized/attributesAddedAtThisScope/attributesAddedAtThisScope/TeamID/Team/attributesAddedAtThisScope/attributesAddedAtThisScope'
attrCtx_LVL6_IND0.definition = 'resolvedFrom/Team/hasAttributes/attributesAddedAtThisScope/members/ID'
attrCtx_LVL5_IND0.contexts.append(attrCtx_LVL6_IND0)
attrCtx_LVL6_IND1 = AttributeContextExpectedValue()
attrCtx_LVL6_IND1.type = 'attributeDefinition'
attrCtx_LVL6_IND1.name = 'Name'
attrCtx_LVL6_IND1.parent = 'Employee_Resolved_normalized/attributeContext/Employee_Resolved_normalized/attributesAddedAtThisScope/attributesAddedAtThisScope/TeamID/Team/attributesAddedAtThisScope/attributesAddedAtThisScope'
attrCtx_LVL6_IND1.definition = 'resolvedFrom/Team/hasAttributes/attributesAddedAtThisScope/members/Name'
attrCtx_LVL5_IND0.contexts.append(attrCtx_LVL6_IND1)
attrCtx_LVL4_IND1.contexts.append(attrCtx_LVL5_IND0)
attrCtx_LVL3_IND0.contexts.append(attrCtx_LVL4_IND1)
attrCtx_LVL2_IND0.contexts.append(attrCtx_LVL3_IND0)
attrCtx_LVL1_IND0.contexts.append(attrCtx_LVL2_IND0)
attrCtx_LVL2_IND1 = AttributeContextExpectedValue()
attrCtx_LVL2_IND1.type = 'attributeDefinition'
attrCtx_LVL2_IND1.name = 'ID'
attrCtx_LVL2_IND1.parent = 'Employee_Resolved_normalized/attributeContext/Employee_Resolved_normalized/attributesAddedAtThisScope/attributesAddedAtThisScope'
attrCtx_LVL2_IND1.definition = 'resolvedFrom/Employee/hasAttributes/attributesAddedAtThisScope/members/ID'
attrCtx_LVL2_IND1.context_strings = []
attrCtx_LVL2_IND1.context_strings.append('Employee_Resolved_normalized/hasAttributes/ID')
attrCtx_LVL1_IND0.contexts.append(attrCtx_LVL2_IND1)
attrCtx_LVL2_IND2 = AttributeContextExpectedValue()
attrCtx_LVL2_IND2.type = 'attributeDefinition'
attrCtx_LVL2_IND2.name = 'FullName'
attrCtx_LVL2_IND2.parent = 'Employee_Resolved_normalized/attributeContext/Employee_Resolved_normalized/attributesAddedAtThisScope/attributesAddedAtThisScope'
attrCtx_LVL2_IND2.definition = 'resolvedFrom/Employee/hasAttributes/attributesAddedAtThisScope/members/FullName'
attrCtx_LVL2_IND2.context_strings = []
attrCtx_LVL2_IND2.context_strings.append('Employee_Resolved_normalized/hasAttributes/FullName')
attrCtx_LVL1_IND0.contexts.append(attrCtx_LVL2_IND2)
attrCtx_LVL0_IND1.contexts.append(attrCtx_LVL1_IND0)
expectedContext_normalized.contexts.append(attrCtx_LVL0_IND1)
expectedContext_referenceOnly = AttributeContextExpectedValue()
expectedContext_referenceOnly.type = 'entity'
expectedContext_referenceOnly.name = 'Employee_Resolved_referenceOnly'
expectedContext_referenceOnly.definition = 'resolvedFrom/Employee'
expectedContext_referenceOnly.contexts = []
attrCtx_LVL0_IND0 = AttributeContextExpectedValue()
attrCtx_LVL0_IND0.type = 'entityReferenceExtends'
attrCtx_LVL0_IND0.name = 'extends'
attrCtx_LVL0_IND0.parent = 'Employee_Resolved_referenceOnly/attributeContext/Employee_Resolved_referenceOnly'
attrCtx_LVL0_IND0.contexts = []
attrCtx_LVL1_IND0 = AttributeContextExpectedValue()
attrCtx_LVL1_IND0.type = 'entity'
attrCtx_LVL1_IND0.name = 'CdmEntity'
attrCtx_LVL1_IND0.parent = 'Employee_Resolved_referenceOnly/attributeContext/Employee_Resolved_referenceOnly/extends'
attrCtx_LVL1_IND0.definition = 'resolvedFrom/CdmEntity'
attrCtx_LVL0_IND0.contexts.append(attrCtx_LVL1_IND0)
expectedContext_referenceOnly.contexts.append(attrCtx_LVL0_IND0)
attrCtx_LVL0_IND1 = AttributeContextExpectedValue()
attrCtx_LVL0_IND1.type = 'attributeDefinition'
attrCtx_LVL0_IND1.name = 'attributesAddedAtThisScope'
attrCtx_LVL0_IND1.parent = 'Employee_Resolved_referenceOnly/attributeContext/Employee_Resolved_referenceOnly'
attrCtx_LVL0_IND1.definition = 'resolvedFrom/Employee/hasAttributes/attributesAddedAtThisScope'
attrCtx_LVL0_IND1.contexts = []
attrCtx_LVL1_IND0 = AttributeContextExpectedValue()
attrCtx_LVL1_IND0.type = 'attributeGroup'
attrCtx_LVL1_IND0.name = 'attributesAddedAtThisScope'
attrCtx_LVL1_IND0.parent = 'Employee_Resolved_referenceOnly/attributeContext/Employee_Resolved_referenceOnly/attributesAddedAtThisScope'
attrCtx_LVL1_IND0.definition = 'resolvedFrom/Employee/hasAttributes/attributesAddedAtThisScope'
attrCtx_LVL1_IND0.contexts = []
attrCtx_LVL2_IND0 = AttributeContextExpectedValue()
attrCtx_LVL2_IND0.type = 'attributeDefinition'
attrCtx_LVL2_IND0.name = 'ID'
attrCtx_LVL2_IND0.parent = 'Employee_Resolved_referenceOnly/attributeContext/Employee_Resolved_referenceOnly/attributesAddedAtThisScope/attributesAddedAtThisScope'
attrCtx_LVL2_IND0.definition = 'resolvedFrom/Employee/hasAttributes/attributesAddedAtThisScope/members/ID'
attrCtx_LVL2_IND0.context_strings = []
attrCtx_LVL2_IND0.context_strings.append('Employee_Resolved_referenceOnly/hasAttributes/ID')
attrCtx_LVL1_IND0.contexts.append(attrCtx_LVL2_IND0)
attrCtx_LVL2_IND1 = AttributeContextExpectedValue()
attrCtx_LVL2_IND1.type = 'attributeDefinition'
attrCtx_LVL2_IND1.name = 'FullName'
attrCtx_LVL2_IND1.parent = 'Employee_Resolved_referenceOnly/attributeContext/Employee_Resolved_referenceOnly/attributesAddedAtThisScope/attributesAddedAtThisScope'
attrCtx_LVL2_IND1.definition = 'resolvedFrom/Employee/hasAttributes/attributesAddedAtThisScope/members/FullName'
attrCtx_LVL2_IND1.context_strings = []
attrCtx_LVL2_IND1.context_strings.append('Employee_Resolved_referenceOnly/hasAttributes/FullName')
attrCtx_LVL1_IND0.contexts.append(attrCtx_LVL2_IND1)
attrCtx_LVL2_IND2 = AttributeContextExpectedValue()
attrCtx_LVL2_IND2.type = 'attributeDefinition'
attrCtx_LVL2_IND2.name = 'TeamID'
attrCtx_LVL2_IND2.parent = 'Employee_Resolved_referenceOnly/attributeContext/Employee_Resolved_referenceOnly/attributesAddedAtThisScope/attributesAddedAtThisScope'
attrCtx_LVL2_IND2.definition = 'resolvedFrom/Employee/hasAttributes/attributesAddedAtThisScope/members/TeamID'
attrCtx_LVL2_IND2.contexts = []
attrCtx_LVL3_IND0 = AttributeContextExpectedValue()
attrCtx_LVL3_IND0.type = 'entity'
attrCtx_LVL3_IND0.name = 'Team'
attrCtx_LVL3_IND0.parent = 'Employee_Resolved_referenceOnly/attributeContext/Employee_Resolved_referenceOnly/attributesAddedAtThisScope/attributesAddedAtThisScope/TeamID'
attrCtx_LVL3_IND0.definition = 'resolvedFrom/Team'
attrCtx_LVL2_IND2.contexts.append(attrCtx_LVL3_IND0)
attrCtx_LVL3_IND1 = AttributeContextExpectedValue()
attrCtx_LVL3_IND1.type = 'generatedSet'
attrCtx_LVL3_IND1.name = '_generatedAttributeSet'
attrCtx_LVL3_IND1.parent = 'Employee_Resolved_referenceOnly/attributeContext/Employee_Resolved_referenceOnly/attributesAddedAtThisScope/attributesAddedAtThisScope/TeamID'
attrCtx_LVL3_IND1.contexts = []
attrCtx_LVL4_IND0 = AttributeContextExpectedValue()
attrCtx_LVL4_IND0.type = 'addedAttributeExpansionTotal'
attrCtx_LVL4_IND0.name = 'TeamIDTeamCount'
attrCtx_LVL4_IND0.parent = 'Employee_Resolved_referenceOnly/attributeContext/Employee_Resolved_referenceOnly/attributesAddedAtThisScope/attributesAddedAtThisScope/TeamID/_generatedAttributeSet'
attrCtx_LVL4_IND0.definition = 'resolvedFrom/Employee/hasAttributes/attributesAddedAtThisScope/members/TeamID/resolutionGuidance/countAttribute/TeamCount'
attrCtx_LVL4_IND0.context_strings = []
attrCtx_LVL4_IND0.context_strings.append('Employee_Resolved_referenceOnly/hasAttributes/TeamIDTeamCount')
attrCtx_LVL3_IND1.contexts.append(attrCtx_LVL4_IND0)
attrCtx_LVL2_IND2.contexts.append(attrCtx_LVL3_IND1)
attrCtx_LVL1_IND0.contexts.append(attrCtx_LVL2_IND2)
attrCtx_LVL0_IND1.contexts.append(attrCtx_LVL1_IND0)
expectedContext_referenceOnly.contexts.append(attrCtx_LVL0_IND1)
expectedContext_structured = AttributeContextExpectedValue()
expectedContext_structured.type = 'entity'
expectedContext_structured.name = 'Employee_Resolved_structured'
expectedContext_structured.definition = 'resolvedFrom/Employee'
expectedContext_structured.contexts = []
attrCtx_LVL0_IND0 = AttributeContextExpectedValue()
attrCtx_LVL0_IND0.type = 'entityReferenceExtends'
attrCtx_LVL0_IND0.name = 'extends'
attrCtx_LVL0_IND0.parent = 'Employee_Resolved_structured/attributeContext/Employee_Resolved_structured'
attrCtx_LVL0_IND0.contexts = []
attrCtx_LVL1_IND0 = AttributeContextExpectedValue()
attrCtx_LVL1_IND0.type = 'entity'
attrCtx_LVL1_IND0.name = 'CdmEntity'
attrCtx_LVL1_IND0.parent = 'Employee_Resolved_structured/attributeContext/Employee_Resolved_structured/extends'
attrCtx_LVL1_IND0.definition = 'resolvedFrom/CdmEntity'
attrCtx_LVL0_IND0.contexts.append(attrCtx_LVL1_IND0)
expectedContext_structured.contexts.append(attrCtx_LVL0_IND0)
attrCtx_LVL0_IND1 = AttributeContextExpectedValue()
attrCtx_LVL0_IND1.type = 'attributeDefinition'
attrCtx_LVL0_IND1.name = 'attributesAddedAtThisScope'
attrCtx_LVL0_IND1.parent = 'Employee_Resolved_structured/attributeContext/Employee_Resolved_structured'
attrCtx_LVL0_IND1.definition = 'resolvedFrom/Employee/hasAttributes/attributesAddedAtThisScope'
attrCtx_LVL0_IND1.contexts = []
attrCtx_LVL1_IND0 = AttributeContextExpectedValue()
attrCtx_LVL1_IND0.type = 'attributeGroup'
attrCtx_LVL1_IND0.name = 'attributesAddedAtThisScope'
attrCtx_LVL1_IND0.parent = 'Employee_Resolved_structured/attributeContext/Employee_Resolved_structured/attributesAddedAtThisScope'
attrCtx_LVL1_IND0.definition = 'resolvedFrom/Employee/hasAttributes/attributesAddedAtThisScope'
attrCtx_LVL1_IND0.contexts = []
attrCtx_LVL2_IND0 = AttributeContextExpectedValue()
attrCtx_LVL2_IND0.type = 'attributeDefinition'
attrCtx_LVL2_IND0.name = 'ID'
attrCtx_LVL2_IND0.parent = 'Employee_Resolved_structured/attributeContext/Employee_Resolved_structured/attributesAddedAtThisScope/attributesAddedAtThisScope'
attrCtx_LVL2_IND0.definition = 'resolvedFrom/Employee/hasAttributes/attributesAddedAtThisScope/members/ID'
attrCtx_LVL2_IND0.context_strings = []
attrCtx_LVL2_IND0.context_strings.append('Employee_Resolved_structured/hasAttributes/ID')
attrCtx_LVL1_IND0.contexts.append(attrCtx_LVL2_IND0)
attrCtx_LVL2_IND1 = AttributeContextExpectedValue()
attrCtx_LVL2_IND1.type = 'attributeDefinition'
attrCtx_LVL2_IND1.name = 'FullName'
attrCtx_LVL2_IND1.parent = 'Employee_Resolved_structured/attributeContext/Employee_Resolved_structured/attributesAddedAtThisScope/attributesAddedAtThisScope'
attrCtx_LVL2_IND1.definition = 'resolvedFrom/Employee/hasAttributes/attributesAddedAtThisScope/members/FullName'
attrCtx_LVL2_IND1.context_strings = []
attrCtx_LVL2_IND1.context_strings.append('Employee_Resolved_structured/hasAttributes/FullName')
attrCtx_LVL1_IND0.contexts.append(attrCtx_LVL2_IND1)
attrCtx_LVL2_IND2 = AttributeContextExpectedValue()
attrCtx_LVL2_IND2.type = 'attributeDefinition'
attrCtx_LVL2_IND2.name = 'TeamID'
attrCtx_LVL2_IND2.parent = 'Employee_Resolved_structured/attributeContext/Employee_Resolved_structured/attributesAddedAtThisScope/attributesAddedAtThisScope'
attrCtx_LVL2_IND2.definition = 'resolvedFrom/Employee/hasAttributes/attributesAddedAtThisScope/members/TeamID'
attrCtx_LVL2_IND2.contexts = []
attrCtx_LVL3_IND0 = AttributeContextExpectedValue()
attrCtx_LVL3_IND0.type = 'entity'
attrCtx_LVL3_IND0.name = 'Team'
attrCtx_LVL3_IND0.parent = 'Employee_Resolved_structured/attributeContext/Employee_Resolved_structured/attributesAddedAtThisScope/attributesAddedAtThisScope/TeamID'
attrCtx_LVL3_IND0.definition = 'resolvedFrom/Team'
attrCtx_LVL3_IND0.contexts = []
attrCtx_LVL4_IND0 = AttributeContextExpectedValue()
attrCtx_LVL4_IND0.type = 'entityReferenceExtends'
attrCtx_LVL4_IND0.name = 'extends'
attrCtx_LVL4_IND0.parent = 'Employee_Resolved_structured/attributeContext/Employee_Resolved_structured/attributesAddedAtThisScope/attributesAddedAtThisScope/TeamID/Team'
attrCtx_LVL4_IND0.contexts = []
attrCtx_LVL5_IND0 = AttributeContextExpectedValue()
attrCtx_LVL5_IND0.type = 'entity'
attrCtx_LVL5_IND0.name = 'CdmEntity'
attrCtx_LVL5_IND0.parent = 'Employee_Resolved_structured/attributeContext/Employee_Resolved_structured/attributesAddedAtThisScope/attributesAddedAtThisScope/TeamID/Team/extends'
attrCtx_LVL5_IND0.definition = 'resolvedFrom/CdmEntity'
attrCtx_LVL4_IND0.contexts.append(attrCtx_LVL5_IND0)
attrCtx_LVL3_IND0.contexts.append(attrCtx_LVL4_IND0)
attrCtx_LVL4_IND1 = AttributeContextExpectedValue()
attrCtx_LVL4_IND1.type = 'attributeDefinition'
attrCtx_LVL4_IND1.name = 'attributesAddedAtThisScope'
attrCtx_LVL4_IND1.parent = 'Employee_Resolved_structured/attributeContext/Employee_Resolved_structured/attributesAddedAtThisScope/attributesAddedAtThisScope/TeamID/Team'
attrCtx_LVL4_IND1.definition = 'resolvedFrom/Team/hasAttributes/attributesAddedAtThisScope'
attrCtx_LVL4_IND1.contexts = []
attrCtx_LVL5_IND0 = AttributeContextExpectedValue()
attrCtx_LVL5_IND0.type = 'attributeGroup'
attrCtx_LVL5_IND0.name = 'attributesAddedAtThisScope'
attrCtx_LVL5_IND0.parent = 'Employee_Resolved_structured/attributeContext/Employee_Resolved_structured/attributesAddedAtThisScope/attributesAddedAtThisScope/TeamID/Team/attributesAddedAtThisScope'
attrCtx_LVL5_IND0.definition = 'resolvedFrom/Team/hasAttributes/attributesAddedAtThisScope'
attrCtx_LVL5_IND0.contexts = []
attrCtx_LVL6_IND0 = AttributeContextExpectedValue()
attrCtx_LVL6_IND0.type = 'attributeDefinition'
attrCtx_LVL6_IND0.name = 'ID'
attrCtx_LVL6_IND0.parent = 'Employee_Resolved_structured/attributeContext/Employee_Resolved_structured/attributesAddedAtThisScope/attributesAddedAtThisScope/TeamID/Team/attributesAddedAtThisScope/attributesAddedAtThisScope'
attrCtx_LVL6_IND0.definition = 'resolvedFrom/Team/hasAttributes/attributesAddedAtThisScope/members/ID'
attrCtx_LVL6_IND0.context_strings = []
attrCtx_LVL6_IND0.context_strings.append('Employee_Resolved_structured/hasAttributes/TeamID/members/ID')
attrCtx_LVL5_IND0.contexts.append(attrCtx_LVL6_IND0)
attrCtx_LVL6_IND1 = AttributeContextExpectedValue()
attrCtx_LVL6_IND1.type = 'attributeDefinition'
attrCtx_LVL6_IND1.name = 'Name'
attrCtx_LVL6_IND1.parent = 'Employee_Resolved_structured/attributeContext/Employee_Resolved_structured/attributesAddedAtThisScope/attributesAddedAtThisScope/TeamID/Team/attributesAddedAtThisScope/attributesAddedAtThisScope'
attrCtx_LVL6_IND1.definition = 'resolvedFrom/Team/hasAttributes/attributesAddedAtThisScope/members/Name'
attrCtx_LVL6_IND1.context_strings = []
attrCtx_LVL6_IND1.context_strings.append('Employee_Resolved_structured/hasAttributes/TeamID/members/Name')
attrCtx_LVL5_IND0.contexts.append(attrCtx_LVL6_IND1)
attrCtx_LVL4_IND1.contexts.append(attrCtx_LVL5_IND0)
attrCtx_LVL3_IND0.contexts.append(attrCtx_LVL4_IND1)
attrCtx_LVL2_IND2.contexts.append(attrCtx_LVL3_IND0)
attrCtx_LVL1_IND0.contexts.append(attrCtx_LVL2_IND2)
attrCtx_LVL0_IND1.contexts.append(attrCtx_LVL1_IND0)
expectedContext_structured.contexts.append(attrCtx_LVL0_IND1)
expectedContext_normalized_structured = AttributeContextExpectedValue()
expectedContext_normalized_structured.type = 'entity'
expectedContext_normalized_structured.name = 'Employee_Resolved_normalized_structured'
expectedContext_normalized_structured.definition = 'resolvedFrom/Employee'
expectedContext_normalized_structured.contexts = []
attrCtx_LVL0_IND0 = AttributeContextExpectedValue()
attrCtx_LVL0_IND0.type = 'entityReferenceExtends'
attrCtx_LVL0_IND0.name = 'extends'
attrCtx_LVL0_IND0.parent = 'Employee_Resolved_normalized_structured/attributeContext/Employee_Resolved_normalized_structured'
attrCtx_LVL0_IND0.contexts = []
attrCtx_LVL1_IND0 = AttributeContextExpectedValue()
attrCtx_LVL1_IND0.type = 'entity'
attrCtx_LVL1_IND0.name = 'CdmEntity'
attrCtx_LVL1_IND0.parent = 'Employee_Resolved_normalized_structured/attributeContext/Employee_Resolved_normalized_structured/extends'
attrCtx_LVL1_IND0.definition = 'resolvedFrom/CdmEntity'
attrCtx_LVL0_IND0.contexts.append(attrCtx_LVL1_IND0)
expectedContext_normalized_structured.contexts.append(attrCtx_LVL0_IND0)
attrCtx_LVL0_IND1 = AttributeContextExpectedValue()
attrCtx_LVL0_IND1.type = 'attributeDefinition'
attrCtx_LVL0_IND1.name = 'attributesAddedAtThisScope'
attrCtx_LVL0_IND1.parent = 'Employee_Resolved_normalized_structured/attributeContext/Employee_Resolved_normalized_structured'
attrCtx_LVL0_IND1.definition = 'resolvedFrom/Employee/hasAttributes/attributesAddedAtThisScope'
attrCtx_LVL0_IND1.contexts = []
attrCtx_LVL1_IND0 = AttributeContextExpectedValue()
attrCtx_LVL1_IND0.type = 'attributeGroup'
attrCtx_LVL1_IND0.name = 'attributesAddedAtThisScope'
attrCtx_LVL1_IND0.parent = 'Employee_Resolved_normalized_structured/attributeContext/Employee_Resolved_normalized_structured/attributesAddedAtThisScope'
attrCtx_LVL1_IND0.definition = 'resolvedFrom/Employee/hasAttributes/attributesAddedAtThisScope'
attrCtx_LVL1_IND0.contexts = []
attrCtx_LVL2_IND0 = AttributeContextExpectedValue()
attrCtx_LVL2_IND0.type = 'attributeDefinition'
attrCtx_LVL2_IND0.name = 'TeamID'
attrCtx_LVL2_IND0.parent = 'Employee_Resolved_normalized_structured/attributeContext/Employee_Resolved_normalized_structured/attributesAddedAtThisScope/attributesAddedAtThisScope'
attrCtx_LVL2_IND0.definition = 'resolvedFrom/Employee/hasAttributes/attributesAddedAtThisScope/members/TeamID'
attrCtx_LVL2_IND0.contexts = []
attrCtx_LVL3_IND0 = AttributeContextExpectedValue()
attrCtx_LVL3_IND0.type = 'entity'
attrCtx_LVL3_IND0.name = 'Team'
attrCtx_LVL3_IND0.parent = 'Employee_Resolved_normalized_structured/attributeContext/Employee_Resolved_normalized_structured/attributesAddedAtThisScope/attributesAddedAtThisScope/TeamID'
attrCtx_LVL3_IND0.definition = 'resolvedFrom/Team'
attrCtx_LVL3_IND0.contexts = []
attrCtx_LVL4_IND0 = AttributeContextExpectedValue()
attrCtx_LVL4_IND0.type = 'entityReferenceExtends'
attrCtx_LVL4_IND0.name = 'extends'
attrCtx_LVL4_IND0.parent = 'Employee_Resolved_normalized_structured/attributeContext/Employee_Resolved_normalized_structured/attributesAddedAtThisScope/attributesAddedAtThisScope/TeamID/Team'
attrCtx_LVL4_IND0.contexts = []
attrCtx_LVL5_IND0 = AttributeContextExpectedValue()
attrCtx_LVL5_IND0.type = 'entity'
attrCtx_LVL5_IND0.name = 'CdmEntity'
attrCtx_LVL5_IND0.parent = 'Employee_Resolved_normalized_structured/attributeContext/Employee_Resolved_normalized_structured/attributesAddedAtThisScope/attributesAddedAtThisScope/TeamID/Team/extends'
attrCtx_LVL5_IND0.definition = 'resolvedFrom/CdmEntity'
attrCtx_LVL4_IND0.contexts.append(attrCtx_LVL5_IND0)
attrCtx_LVL3_IND0.contexts.append(attrCtx_LVL4_IND0)
attrCtx_LVL4_IND1 = AttributeContextExpectedValue()
attrCtx_LVL4_IND1.type = 'attributeDefinition'
attrCtx_LVL4_IND1.name = 'attributesAddedAtThisScope'
attrCtx_LVL4_IND1.parent = 'Employee_Resolved_normalized_structured/attributeContext/Employee_Resolved_normalized_structured/attributesAddedAtThisScope/attributesAddedAtThisScope/TeamID/Team'
attrCtx_LVL4_IND1.definition = 'resolvedFrom/Team/hasAttributes/attributesAddedAtThisScope'
attrCtx_LVL4_IND1.contexts = []
attrCtx_LVL5_IND0 = AttributeContextExpectedValue()
attrCtx_LVL5_IND0.type = 'attributeGroup'
attrCtx_LVL5_IND0.name = 'attributesAddedAtThisScope'
attrCtx_LVL5_IND0.parent = 'Employee_Resolved_normalized_structured/attributeContext/Employee_Resolved_normalized_structured/attributesAddedAtThisScope/attributesAddedAtThisScope/TeamID/Team/attributesAddedAtThisScope'
attrCtx_LVL5_IND0.definition = 'resolvedFrom/Team/hasAttributes/attributesAddedAtThisScope'
attrCtx_LVL5_IND0.contexts = []
attrCtx_LVL6_IND0 = AttributeContextExpectedValue()
attrCtx_LVL6_IND0.type = 'attributeDefinition'
attrCtx_LVL6_IND0.name = 'ID'
attrCtx_LVL6_IND0.parent = 'Employee_Resolved_normalized_structured/attributeContext/Employee_Resolved_normalized_structured/attributesAddedAtThisScope/attributesAddedAtThisScope/TeamID/Team/attributesAddedAtThisScope/attributesAddedAtThisScope'
attrCtx_LVL6_IND0.definition = 'resolvedFrom/Team/hasAttributes/attributesAddedAtThisScope/members/ID'
attrCtx_LVL5_IND0.contexts.append(attrCtx_LVL6_IND0)
attrCtx_LVL6_IND1 = AttributeContextExpectedValue()
attrCtx_LVL6_IND1.type = 'attributeDefinition'
attrCtx_LVL6_IND1.name = 'Name'
attrCtx_LVL6_IND1.parent = 'Employee_Resolved_normalized_structured/attributeContext/Employee_Resolved_normalized_structured/attributesAddedAtThisScope/attributesAddedAtThisScope/TeamID/Team/attributesAddedAtThisScope/attributesAddedAtThisScope'
attrCtx_LVL6_IND1.definition = 'resolvedFrom/Team/hasAttributes/attributesAddedAtThisScope/members/Name'
attrCtx_LVL5_IND0.contexts.append(attrCtx_LVL6_IND1)
attrCtx_LVL4_IND1.contexts.append(attrCtx_LVL5_IND0)
attrCtx_LVL3_IND0.contexts.append(attrCtx_LVL4_IND1)
attrCtx_LVL2_IND0.contexts.append(attrCtx_LVL3_IND0)
attrCtx_LVL1_IND0.contexts.append(attrCtx_LVL2_IND0)
attrCtx_LVL2_IND1 = AttributeContextExpectedValue()
attrCtx_LVL2_IND1.type = 'attributeDefinition'
attrCtx_LVL2_IND1.name = 'ID'
attrCtx_LVL2_IND1.parent = 'Employee_Resolved_normalized_structured/attributeContext/Employee_Resolved_normalized_structured/attributesAddedAtThisScope/attributesAddedAtThisScope'
attrCtx_LVL2_IND1.definition = 'resolvedFrom/Employee/hasAttributes/attributesAddedAtThisScope/members/ID'
attrCtx_LVL2_IND1.context_strings = []
attrCtx_LVL2_IND1.context_strings.append('Employee_Resolved_normalized_structured/hasAttributes/ID')
attrCtx_LVL1_IND0.contexts.append(attrCtx_LVL2_IND1)
attrCtx_LVL2_IND2 = AttributeContextExpectedValue()
attrCtx_LVL2_IND2.type = 'attributeDefinition'
attrCtx_LVL2_IND2.name = 'FullName'
attrCtx_LVL2_IND2.parent = 'Employee_Resolved_normalized_structured/attributeContext/Employee_Resolved_normalized_structured/attributesAddedAtThisScope/attributesAddedAtThisScope'
attrCtx_LVL2_IND2.definition = 'resolvedFrom/Employee/hasAttributes/attributesAddedAtThisScope/members/FullName'
attrCtx_LVL2_IND2.context_strings = []
attrCtx_LVL2_IND2.context_strings.append('Employee_Resolved_normalized_structured/hasAttributes/FullName')
attrCtx_LVL1_IND0.contexts.append(attrCtx_LVL2_IND2)
attrCtx_LVL0_IND1.contexts.append(attrCtx_LVL1_IND0)
expectedContext_normalized_structured.contexts.append(attrCtx_LVL0_IND1)
expectedContext_referenceOnly_normalized = AttributeContextExpectedValue()
expectedContext_referenceOnly_normalized.type = 'entity'
expectedContext_referenceOnly_normalized.name = 'Employee_Resolved_referenceOnly_normalized'
expectedContext_referenceOnly_normalized.definition = 'resolvedFrom/Employee'
expectedContext_referenceOnly_normalized.contexts = []
attrCtx_LVL0_IND0 = AttributeContextExpectedValue()
attrCtx_LVL0_IND0.type = 'entityReferenceExtends'
attrCtx_LVL0_IND0.name = 'extends'
attrCtx_LVL0_IND0.parent = 'Employee_Resolved_referenceOnly_normalized/attributeContext/Employee_Resolved_referenceOnly_normalized'
attrCtx_LVL0_IND0.contexts = []
attrCtx_LVL1_IND0 = AttributeContextExpectedValue()
attrCtx_LVL1_IND0.type = 'entity'
attrCtx_LVL1_IND0.name = 'CdmEntity'
attrCtx_LVL1_IND0.parent = 'Employee_Resolved_referenceOnly_normalized/attributeContext/Employee_Resolved_referenceOnly_normalized/extends'
attrCtx_LVL1_IND0.definition = 'resolvedFrom/CdmEntity'
attrCtx_LVL0_IND0.contexts.append(attrCtx_LVL1_IND0)
expectedContext_referenceOnly_normalized.contexts.append(attrCtx_LVL0_IND0)
attrCtx_LVL0_IND1 = AttributeContextExpectedValue()
attrCtx_LVL0_IND1.type = 'attributeDefinition'
attrCtx_LVL0_IND1.name = 'attributesAddedAtThisScope'
attrCtx_LVL0_IND1.parent = 'Employee_Resolved_referenceOnly_normalized/attributeContext/Employee_Resolved_referenceOnly_normalized'
attrCtx_LVL0_IND1.definition = 'resolvedFrom/Employee/hasAttributes/attributesAddedAtThisScope'
attrCtx_LVL0_IND1.contexts = []
attrCtx_LVL1_IND0 = AttributeContextExpectedValue()
attrCtx_LVL1_IND0.type = 'attributeGroup'
attrCtx_LVL1_IND0.name = 'attributesAddedAtThisScope'
attrCtx_LVL1_IND0.parent = 'Employee_Resolved_referenceOnly_normalized/attributeContext/Employee_Resolved_referenceOnly_normalized/attributesAddedAtThisScope'
attrCtx_LVL1_IND0.definition = 'resolvedFrom/Employee/hasAttributes/attributesAddedAtThisScope'
attrCtx_LVL1_IND0.contexts = []
attrCtx_LVL2_IND0 = AttributeContextExpectedValue()
attrCtx_LVL2_IND0.type = 'attributeDefinition'
attrCtx_LVL2_IND0.name = 'TeamID'
attrCtx_LVL2_IND0.parent = 'Employee_Resolved_referenceOnly_normalized/attributeContext/Employee_Resolved_referenceOnly_normalized/attributesAddedAtThisScope/attributesAddedAtThisScope'
attrCtx_LVL2_IND0.definition = 'resolvedFrom/Employee/hasAttributes/attributesAddedAtThisScope/members/TeamID'
attrCtx_LVL2_IND0.contexts = []
attrCtx_LVL3_IND0 = AttributeContextExpectedValue()
attrCtx_LVL3_IND0.type = 'entity'
attrCtx_LVL3_IND0.name = 'Team'
attrCtx_LVL3_IND0.parent = 'Employee_Resolved_referenceOnly_normalized/attributeContext/Employee_Resolved_referenceOnly_normalized/attributesAddedAtThisScope/attributesAddedAtThisScope/TeamID'
attrCtx_LVL3_IND0.definition = 'resolvedFrom/Team'
attrCtx_LVL2_IND0.contexts.append(attrCtx_LVL3_IND0)
attrCtx_LVL1_IND0.contexts.append(attrCtx_LVL2_IND0)
attrCtx_LVL2_IND1 = AttributeContextExpectedValue()
attrCtx_LVL2_IND1.type = 'attributeDefinition'
attrCtx_LVL2_IND1.name = 'ID'
attrCtx_LVL2_IND1.parent = 'Employee_Resolved_referenceOnly_normalized/attributeContext/Employee_Resolved_referenceOnly_normalized/attributesAddedAtThisScope/attributesAddedAtThisScope'
attrCtx_LVL2_IND1.definition = 'resolvedFrom/Employee/hasAttributes/attributesAddedAtThisScope/members/ID'
attrCtx_LVL2_IND1.context_strings = []
attrCtx_LVL2_IND1.context_strings.append('Employee_Resolved_referenceOnly_normalized/hasAttributes/ID')
attrCtx_LVL1_IND0.contexts.append(attrCtx_LVL2_IND1)
attrCtx_LVL2_IND2 = AttributeContextExpectedValue()
attrCtx_LVL2_IND2.type = 'attributeDefinition'
attrCtx_LVL2_IND2.name = 'FullName'
attrCtx_LVL2_IND2.parent = 'Employee_Resolved_referenceOnly_normalized/attributeContext/Employee_Resolved_referenceOnly_normalized/attributesAddedAtThisScope/attributesAddedAtThisScope'
attrCtx_LVL2_IND2.definition = 'resolvedFrom/Employee/hasAttributes/attributesAddedAtThisScope/members/FullName'
attrCtx_LVL2_IND2.context_strings = []
attrCtx_LVL2_IND2.context_strings.append('Employee_Resolved_referenceOnly_normalized/hasAttributes/FullName')
attrCtx_LVL1_IND0.contexts.append(attrCtx_LVL2_IND2)
attrCtx_LVL0_IND1.contexts.append(attrCtx_LVL1_IND0)
expectedContext_referenceOnly_normalized.contexts.append(attrCtx_LVL0_IND1)
expectedContext_referenceOnly_structured = AttributeContextExpectedValue()
expectedContext_referenceOnly_structured.type = 'entity'
expectedContext_referenceOnly_structured.name = 'Employee_Resolved_referenceOnly_structured'
expectedContext_referenceOnly_structured.definition = 'resolvedFrom/Employee'
expectedContext_referenceOnly_structured.contexts = []
attrCtx_LVL0_IND0 = AttributeContextExpectedValue()
attrCtx_LVL0_IND0.type = 'entityReferenceExtends'
attrCtx_LVL0_IND0.name = 'extends'
attrCtx_LVL0_IND0.parent = 'Employee_Resolved_referenceOnly_structured/attributeContext/Employee_Resolved_referenceOnly_structured'
attrCtx_LVL0_IND0.contexts = []
attrCtx_LVL1_IND0 = AttributeContextExpectedValue()
attrCtx_LVL1_IND0.type = 'entity'
attrCtx_LVL1_IND0.name = 'CdmEntity'
attrCtx_LVL1_IND0.parent = 'Employee_Resolved_referenceOnly_structured/attributeContext/Employee_Resolved_referenceOnly_structured/extends'
attrCtx_LVL1_IND0.definition = 'resolvedFrom/CdmEntity'
attrCtx_LVL0_IND0.contexts.append(attrCtx_LVL1_IND0)
expectedContext_referenceOnly_structured.contexts.append(attrCtx_LVL0_IND0)
attrCtx_LVL0_IND1 = AttributeContextExpectedValue()
attrCtx_LVL0_IND1.type = 'attributeDefinition'
attrCtx_LVL0_IND1.name = 'attributesAddedAtThisScope'
attrCtx_LVL0_IND1.parent = 'Employee_Resolved_referenceOnly_structured/attributeContext/Employee_Resolved_referenceOnly_structured'
attrCtx_LVL0_IND1.definition = 'resolvedFrom/Employee/hasAttributes/attributesAddedAtThisScope'
attrCtx_LVL0_IND1.contexts = []
attrCtx_LVL1_IND0 = AttributeContextExpectedValue()
attrCtx_LVL1_IND0.type = 'attributeGroup'
attrCtx_LVL1_IND0.name = 'attributesAddedAtThisScope'
attrCtx_LVL1_IND0.parent = 'Employee_Resolved_referenceOnly_structured/attributeContext/Employee_Resolved_referenceOnly_structured/attributesAddedAtThisScope'
attrCtx_LVL1_IND0.definition = 'resolvedFrom/Employee/hasAttributes/attributesAddedAtThisScope'
attrCtx_LVL1_IND0.contexts = []
attrCtx_LVL2_IND0 = AttributeContextExpectedValue()
attrCtx_LVL2_IND0.type = 'attributeDefinition'
attrCtx_LVL2_IND0.name = 'ID'
attrCtx_LVL2_IND0.parent = 'Employee_Resolved_referenceOnly_structured/attributeContext/Employee_Resolved_referenceOnly_structured/attributesAddedAtThisScope/attributesAddedAtThisScope'
attrCtx_LVL2_IND0.definition = 'resolvedFrom/Employee/hasAttributes/attributesAddedAtThisScope/members/ID'
attrCtx_LVL2_IND0.context_strings = []
attrCtx_LVL2_IND0.context_strings.append('Employee_Resolved_referenceOnly_structured/hasAttributes/ID')
attrCtx_LVL1_IND0.contexts.append(attrCtx_LVL2_IND0)
attrCtx_LVL2_IND1 = AttributeContextExpectedValue()
attrCtx_LVL2_IND1.type = 'attributeDefinition'
attrCtx_LVL2_IND1.name = 'FullName'
attrCtx_LVL2_IND1.parent = 'Employee_Resolved_referenceOnly_structured/attributeContext/Employee_Resolved_referenceOnly_structured/attributesAddedAtThisScope/attributesAddedAtThisScope'
attrCtx_LVL2_IND1.definition = 'resolvedFrom/Employee/hasAttributes/attributesAddedAtThisScope/members/FullName'
attrCtx_LVL2_IND1.context_strings = []
attrCtx_LVL2_IND1.context_strings.append('Employee_Resolved_referenceOnly_structured/hasAttributes/FullName')
attrCtx_LVL1_IND0.contexts.append(attrCtx_LVL2_IND1)
attrCtx_LVL2_IND2 = AttributeContextExpectedValue()
attrCtx_LVL2_IND2.type = 'attributeDefinition'
attrCtx_LVL2_IND2.name = 'TeamID'
attrCtx_LVL2_IND2.parent = 'Employee_Resolved_referenceOnly_structured/attributeContext/Employee_Resolved_referenceOnly_structured/attributesAddedAtThisScope/attributesAddedAtThisScope'
attrCtx_LVL2_IND2.definition = 'resolvedFrom/Employee/hasAttributes/attributesAddedAtThisScope/members/TeamID'
attrCtx_LVL2_IND2.contexts = []
attrCtx_LVL3_IND0 = AttributeContextExpectedValue()
attrCtx_LVL3_IND0.type = 'entity'
attrCtx_LVL3_IND0.name = 'Team'
attrCtx_LVL3_IND0.parent = 'Employee_Resolved_referenceOnly_structured/attributeContext/Employee_Resolved_referenceOnly_structured/attributesAddedAtThisScope/attributesAddedAtThisScope/TeamID'
attrCtx_LVL3_IND0.definition = 'resolvedFrom/Team'
attrCtx_LVL2_IND2.contexts.append(attrCtx_LVL3_IND0)
attrCtx_LVL3_IND1 = AttributeContextExpectedValue()
attrCtx_LVL3_IND1.type = 'generatedSet'
attrCtx_LVL3_IND1.name = '_generatedAttributeSet'
attrCtx_LVL3_IND1.parent = 'Employee_Resolved_referenceOnly_structured/attributeContext/Employee_Resolved_referenceOnly_structured/attributesAddedAtThisScope/attributesAddedAtThisScope/TeamID'
attrCtx_LVL3_IND1.contexts = []
attrCtx_LVL4_IND0 = AttributeContextExpectedValue()
attrCtx_LVL4_IND0.type = 'generatedRound'
attrCtx_LVL4_IND0.name = '_generatedAttributeRound0'
attrCtx_LVL4_IND0.parent = 'Employee_Resolved_referenceOnly_structured/attributeContext/Employee_Resolved_referenceOnly_structured/attributesAddedAtThisScope/attributesAddedAtThisScope/TeamID/_generatedAttributeSet'
attrCtx_LVL4_IND0.contexts = []
attrCtx_LVL5_IND0 = AttributeContextExpectedValue()
attrCtx_LVL5_IND0.type = 'addedAttributeIdentity'
attrCtx_LVL5_IND0.name = '_foreignKey'
attrCtx_LVL5_IND0.parent = 'Employee_Resolved_referenceOnly_structured/attributeContext/Employee_Resolved_referenceOnly_structured/attributesAddedAtThisScope/attributesAddedAtThisScope/TeamID/_generatedAttributeSet/_generatedAttributeRound0'
attrCtx_LVL5_IND0.context_strings = []
attrCtx_LVL5_IND0.context_strings.append(
'Employee_Resolved_referenceOnly_structured/hasAttributes/TeamID/members/TeamID')
attrCtx_LVL4_IND0.contexts.append(attrCtx_LVL5_IND0)
attrCtx_LVL3_IND1.contexts.append(attrCtx_LVL4_IND0)
attrCtx_LVL2_IND2.contexts.append(attrCtx_LVL3_IND1)
attrCtx_LVL1_IND0.contexts.append(attrCtx_LVL2_IND2)
attrCtx_LVL0_IND1.contexts.append(attrCtx_LVL1_IND0)
expectedContext_referenceOnly_structured.contexts.append(attrCtx_LVL0_IND1)
expectedContext_referenceOnly_normalized_structured = AttributeContextExpectedValue()
expectedContext_referenceOnly_normalized_structured.type = 'entity'
expectedContext_referenceOnly_normalized_structured.name = 'Employee_Resolved_referenceOnly_normalized_structured'
expectedContext_referenceOnly_normalized_structured.definition = 'resolvedFrom/Employee'
expectedContext_referenceOnly_normalized_structured.contexts = []
attrCtx_LVL0_IND0 = AttributeContextExpectedValue()
attrCtx_LVL0_IND0.type = 'entityReferenceExtends'
attrCtx_LVL0_IND0.name = 'extends'
attrCtx_LVL0_IND0.parent = 'Employee_Resolved_referenceOnly_normalized_structured/attributeContext/Employee_Resolved_referenceOnly_normalized_structured'
attrCtx_LVL0_IND0.contexts = []
attrCtx_LVL1_IND0 = AttributeContextExpectedValue()
attrCtx_LVL1_IND0.type = 'entity'
attrCtx_LVL1_IND0.name = 'CdmEntity'
attrCtx_LVL1_IND0.parent = 'Employee_Resolved_referenceOnly_normalized_structured/attributeContext/Employee_Resolved_referenceOnly_normalized_structured/extends'
attrCtx_LVL1_IND0.definition = 'resolvedFrom/CdmEntity'
attrCtx_LVL0_IND0.contexts.append(attrCtx_LVL1_IND0)
expectedContext_referenceOnly_normalized_structured.contexts.append(attrCtx_LVL0_IND0)
attrCtx_LVL0_IND1 = AttributeContextExpectedValue()
attrCtx_LVL0_IND1.type = 'attributeDefinition'
attrCtx_LVL0_IND1.name = 'attributesAddedAtThisScope'
attrCtx_LVL0_IND1.parent = 'Employee_Resolved_referenceOnly_normalized_structured/attributeContext/Employee_Resolved_referenceOnly_normalized_structured'
attrCtx_LVL0_IND1.definition = 'resolvedFrom/Employee/hasAttributes/attributesAddedAtThisScope'
attrCtx_LVL0_IND1.contexts = []
attrCtx_LVL1_IND0 = AttributeContextExpectedValue()
attrCtx_LVL1_IND0.type = 'attributeGroup'
attrCtx_LVL1_IND0.name = 'attributesAddedAtThisScope'
attrCtx_LVL1_IND0.parent = 'Employee_Resolved_referenceOnly_normalized_structured/attributeContext/Employee_Resolved_referenceOnly_normalized_structured/attributesAddedAtThisScope'
attrCtx_LVL1_IND0.definition = 'resolvedFrom/Employee/hasAttributes/attributesAddedAtThisScope'
attrCtx_LVL1_IND0.contexts = []
attrCtx_LVL2_IND0 = AttributeContextExpectedValue()
attrCtx_LVL2_IND0.type = 'attributeDefinition'
attrCtx_LVL2_IND0.name = 'TeamID'
attrCtx_LVL2_IND0.parent = 'Employee_Resolved_referenceOnly_normalized_structured/attributeContext/Employee_Resolved_referenceOnly_normalized_structured/attributesAddedAtThisScope/attributesAddedAtThisScope'
attrCtx_LVL2_IND0.definition = 'resolvedFrom/Employee/hasAttributes/attributesAddedAtThisScope/members/TeamID'
attrCtx_LVL2_IND0.contexts = []
attrCtx_LVL3_IND0 = AttributeContextExpectedValue()
attrCtx_LVL3_IND0.type = 'entity'
attrCtx_LVL3_IND0.name = 'Team'
attrCtx_LVL3_IND0.parent = 'Employee_Resolved_referenceOnly_normalized_structured/attributeContext/Employee_Resolved_referenceOnly_normalized_structured/attributesAddedAtThisScope/attributesAddedAtThisScope/TeamID'
attrCtx_LVL3_IND0.definition = 'resolvedFrom/Team'
attrCtx_LVL2_IND0.contexts.append(attrCtx_LVL3_IND0)
attrCtx_LVL1_IND0.contexts.append(attrCtx_LVL2_IND0)
attrCtx_LVL2_IND1 = AttributeContextExpectedValue()
attrCtx_LVL2_IND1.type = 'attributeDefinition'
attrCtx_LVL2_IND1.name = 'ID'
attrCtx_LVL2_IND1.parent = 'Employee_Resolved_referenceOnly_normalized_structured/attributeContext/Employee_Resolved_referenceOnly_normalized_structured/attributesAddedAtThisScope/attributesAddedAtThisScope'
attrCtx_LVL2_IND1.definition = 'resolvedFrom/Employee/hasAttributes/attributesAddedAtThisScope/members/ID'
attrCtx_LVL2_IND1.context_strings = []
attrCtx_LVL2_IND1.context_strings.append(
'Employee_Resolved_referenceOnly_normalized_structured/hasAttributes/ID')
attrCtx_LVL1_IND0.contexts.append(attrCtx_LVL2_IND1)
attrCtx_LVL2_IND2 = AttributeContextExpectedValue()
attrCtx_LVL2_IND2.type = 'attributeDefinition'
attrCtx_LVL2_IND2.name = 'FullName'
attrCtx_LVL2_IND2.parent = 'Employee_Resolved_referenceOnly_normalized_structured/attributeContext/Employee_Resolved_referenceOnly_normalized_structured/attributesAddedAtThisScope/attributesAddedAtThisScope'
attrCtx_LVL2_IND2.definition = 'resolvedFrom/Employee/hasAttributes/attributesAddedAtThisScope/members/FullName'
attrCtx_LVL2_IND2.context_strings = []
attrCtx_LVL2_IND2.context_strings.append(
'Employee_Resolved_referenceOnly_normalized_structured/hasAttributes/FullName')
attrCtx_LVL1_IND0.contexts.append(attrCtx_LVL2_IND2)
attrCtx_LVL0_IND1.contexts.append(attrCtx_LVL1_IND0)
expectedContext_referenceOnly_normalized_structured.contexts.append(attrCtx_LVL0_IND1)
expected_default = []
att = AttributeExpectedValue()
att.attribute_context = 'Employee_Resolved_default/attributeContext/Employee_Resolved_default/attributesAddedAtThisScope/attributesAddedAtThisScope/ID'
att.data_format = 'Guid'
att.display_name = 'ID'
att.is_primary_key = True
att.name = 'ID'
att.source_name = 'ID'
expected_default.append(att)
att = AttributeExpectedValue()
att.attribute_context = 'Employee_Resolved_default/attributeContext/Employee_Resolved_default/attributesAddedAtThisScope/attributesAddedAtThisScope/FullName'
att.data_format = 'String'
att.display_name = 'FullName'
att.name = 'FullName'
att.source_name = 'FullName'
expected_default.append(att)
att = AttributeExpectedValue()
att.attribute_context = 'Employee_Resolved_default/attributeContext/Employee_Resolved_default/attributesAddedAtThisScope/attributesAddedAtThisScope/TeamID/_generatedAttributeSet/TeamIDTeamCount'
att.data_format = 'Int32'
att.name = 'TeamIDTeamCount'
expected_default.append(att)
expected_normalized = []
att = AttributeExpectedValue()
att.attribute_context = 'Employee_Resolved_normalized/attributeContext/Employee_Resolved_normalized/attributesAddedAtThisScope/attributesAddedAtThisScope/ID'
att.data_format = 'Guid'
att.display_name = 'ID'
att.is_primary_key = True
att.name = 'ID'
att.source_name = 'ID'
expected_normalized.append(att)
att = AttributeExpectedValue()
att.attribute_context = 'Employee_Resolved_normalized/attributeContext/Employee_Resolved_normalized/attributesAddedAtThisScope/attributesAddedAtThisScope/FullName'
att.data_format = 'String'
att.display_name = 'FullName'
att.name = 'FullName'
att.source_name = 'FullName'
expected_normalized.append(att)
expected_referenceOnly = []
att = AttributeExpectedValue()
att.attribute_context = 'Employee_Resolved_referenceOnly/attributeContext/Employee_Resolved_referenceOnly/attributesAddedAtThisScope/attributesAddedAtThisScope/ID'
att.data_format = 'Guid'
att.display_name = 'ID'
att.is_primary_key = True
att.name = 'ID'
att.source_name = 'ID'
expected_referenceOnly.append(att)
att = AttributeExpectedValue()
att.attribute_context = 'Employee_Resolved_referenceOnly/attributeContext/Employee_Resolved_referenceOnly/attributesAddedAtThisScope/attributesAddedAtThisScope/FullName'
att.data_format = 'String'
att.display_name = 'FullName'
att.name = 'FullName'
att.source_name = 'FullName'
expected_referenceOnly.append(att)
att = AttributeExpectedValue()
att.attribute_context = 'Employee_Resolved_referenceOnly/attributeContext/Employee_Resolved_referenceOnly/attributesAddedAtThisScope/attributesAddedAtThisScope/TeamID/_generatedAttributeSet/TeamIDTeamCount'
att.data_format = 'Int32'
att.name = 'TeamIDTeamCount'
expected_referenceOnly.append(att)
expected_structured = []
att = AttributeExpectedValue()
att.attribute_context = 'Employee_Resolved_structured/attributeContext/Employee_Resolved_structured/attributesAddedAtThisScope/attributesAddedAtThisScope/ID'
att.data_format = 'Guid'
att.display_name = 'ID'
att.is_primary_key = True
att.name = 'ID'
att.source_name = 'ID'
expected_structured.append(att)
att = AttributeExpectedValue()
att.attribute_context = 'Employee_Resolved_structured/attributeContext/Employee_Resolved_structured/attributesAddedAtThisScope/attributesAddedAtThisScope/FullName'
att.data_format = 'String'
att.display_name = 'FullName'
att.name = 'FullName'
att.source_name = 'FullName'
expected_structured.append(att)
attrib_group_ref = AttributeExpectedValue()
attrib_group_ref.attribute_group_name = 'TeamID'
attrib_group_ref.attribute_context = 'Employee_Resolved_structured/attributeContext/Employee_Resolved_structured/attributesAddedAtThisScope/attributesAddedAtThisScope/TeamID'
attrib_group_ref.members = []
att = AttributeExpectedValue()
att.attribute_context = 'Employee_Resolved_structured/attributeContext/Employee_Resolved_structured/attributesAddedAtThisScope/attributesAddedAtThisScope/TeamID/Team/attributesAddedAtThisScope/attributesAddedAtThisScope/ID'
att.data_format = 'Guid'
att.name = 'ID'
attrib_group_ref.members.append(att)
att = AttributeExpectedValue()
att.attribute_context = 'Employee_Resolved_structured/attributeContext/Employee_Resolved_structured/attributesAddedAtThisScope/attributesAddedAtThisScope/TeamID/Team/attributesAddedAtThisScope/attributesAddedAtThisScope/Name'
att.data_format = 'String'
att.name = 'Name'
attrib_group_ref.members.append(att)
expected_structured.append(attrib_group_ref)
expected_normalized_structured = []
att = AttributeExpectedValue()
att.attribute_context = 'Employee_Resolved_normalized_structured/attributeContext/Employee_Resolved_normalized_structured/attributesAddedAtThisScope/attributesAddedAtThisScope/ID'
att.data_format = 'Guid'
att.display_name = 'ID'
att.is_primary_key = True
att.name = 'ID'
att.source_name = 'ID'
expected_normalized_structured.append(att)
att = AttributeExpectedValue()
att.attribute_context = 'Employee_Resolved_normalized_structured/attributeContext/Employee_Resolved_normalized_structured/attributesAddedAtThisScope/attributesAddedAtThisScope/FullName'
att.data_format = 'String'
att.display_name = 'FullName'
att.name = 'FullName'
att.source_name = 'FullName'
expected_normalized_structured.append(att)
expected_referenceOnly_normalized = []
att = AttributeExpectedValue()
att.attribute_context = 'Employee_Resolved_referenceOnly_normalized/attributeContext/Employee_Resolved_referenceOnly_normalized/attributesAddedAtThisScope/attributesAddedAtThisScope/ID'
att.data_format = 'Guid'
att.display_name = 'ID'
att.is_primary_key = True
att.name = 'ID'
att.source_name = 'ID'
expected_referenceOnly_normalized.append(att)
att = AttributeExpectedValue()
att.attribute_context = 'Employee_Resolved_referenceOnly_normalized/attributeContext/Employee_Resolved_referenceOnly_normalized/attributesAddedAtThisScope/attributesAddedAtThisScope/FullName'
att.data_format = 'String'
att.display_name = 'FullName'
att.name = 'FullName'
att.source_name = 'FullName'
expected_referenceOnly_normalized.append(att)
expected_referenceOnly_structured = []
att = AttributeExpectedValue()
att.attribute_context = 'Employee_Resolved_referenceOnly_structured/attributeContext/Employee_Resolved_referenceOnly_structured/attributesAddedAtThisScope/attributesAddedAtThisScope/ID'
att.data_format = 'Guid'
att.display_name = 'ID'
att.is_primary_key = True
att.name = 'ID'
att.source_name = 'ID'
expected_referenceOnly_structured.append(att)
att = AttributeExpectedValue()
att.attribute_context = 'Employee_Resolved_referenceOnly_structured/attributeContext/Employee_Resolved_referenceOnly_structured/attributesAddedAtThisScope/attributesAddedAtThisScope/FullName'
att.data_format = 'String'
att.display_name = 'FullName'
att.name = 'FullName'
att.source_name = 'FullName'
expected_referenceOnly_structured.append(att)
attrib_group_ref = AttributeExpectedValue()
attrib_group_ref.attribute_group_name = 'TeamID'
attrib_group_ref.attribute_context = 'Employee_Resolved_referenceOnly_structured/attributeContext/Employee_Resolved_referenceOnly_structured/attributesAddedAtThisScope/attributesAddedAtThisScope/TeamID'
attrib_group_ref.members = []
att = AttributeExpectedValue()
att.attribute_context = 'Employee_Resolved_referenceOnly_structured/attributeContext/Employee_Resolved_referenceOnly_structured/attributesAddedAtThisScope/attributesAddedAtThisScope/TeamID/_generatedAttributeSet/_generatedAttributeRound0/_foreignKey'
att.data_format = 'Guid'
att.description = ''
att.display_name = 'TeamID'
att.name = 'TeamID'
att.source_name = 'TeamID'
attrib_group_ref.members.append(att)
expected_referenceOnly_structured.append(attrib_group_ref)
expected_referenceOnly_normalized_structured = []
att = AttributeExpectedValue()
att.attribute_context = 'Employee_Resolved_referenceOnly_normalized_structured/attributeContext/Employee_Resolved_referenceOnly_normalized_structured/attributesAddedAtThisScope/attributesAddedAtThisScope/ID'
att.data_format = 'Guid'
att.display_name = 'ID'
att.is_primary_key = True
att.name = 'ID'
att.source_name = 'ID'
expected_referenceOnly_normalized_structured.append(att)
att = AttributeExpectedValue()
att.attribute_context = 'Employee_Resolved_referenceOnly_normalized_structured/attributeContext/Employee_Resolved_referenceOnly_normalized_structured/attributesAddedAtThisScope/attributesAddedAtThisScope/FullName'
att.data_format = 'String'
att.display_name = 'FullName'
att.name = 'FullName'
att.source_name = 'FullName'
expected_referenceOnly_normalized_structured.append(att)
await self.run_test_with_values(
test_name,
entity_name,
expectedContext_default,
expectedContext_normalized,
expectedContext_referenceOnly,
expectedContext_structured,
expectedContext_normalized_structured,
expectedContext_referenceOnly_normalized,
expectedContext_referenceOnly_structured,
expectedContext_referenceOnly_normalized_structured,
expected_default,
expected_normalized,
expected_referenceOnly,
expected_structured,
expected_normalized_structured,
expected_referenceOnly_normalized,
expected_referenceOnly_structured,
expected_referenceOnly_normalized_structured
)
| 53.979577
| 258
| 0.775946
| 6,566
| 76,651
| 8.622297
| 0.018581
| 0.074893
| 0.033914
| 0.027555
| 0.973911
| 0.957873
| 0.94625
| 0.940085
| 0.936076
| 0.929346
| 0
| 0.022743
| 0.163064
| 76,651
| 1,419
| 259
| 54.017618
| 0.859755
| 0.001957
| 0
| 0.794964
| 0
| 0
| 0.341896
| 0.312606
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.002698
| 0
| 0.003597
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
77455218233752512da621b81d7a4534b1e3f03e
| 3,141
|
py
|
Python
|
lib/models/map_modules/map_conv.py
|
CFM-MSG/Code_LEORN
|
fabea1e1ded973a4db692e51e2df442bde55f626
|
[
"MIT"
] | 1
|
2022-01-31T03:23:37.000Z
|
2022-01-31T03:23:37.000Z
|
lib/models/map_modules/map_conv.py
|
CFM-MSG/Code_LEORN
|
fabea1e1ded973a4db692e51e2df442bde55f626
|
[
"MIT"
] | null | null | null |
lib/models/map_modules/map_conv.py
|
CFM-MSG/Code_LEORN
|
fabea1e1ded973a4db692e51e2df442bde55f626
|
[
"MIT"
] | null | null | null |
from torch import nn
import torch.nn.functional as F
from models.map_modules import get_padded_mask_and_weight
class MapConv(nn.Module):
def __init__(self, cfg):
super(MapConv, self).__init__()
input_size = cfg.INPUT_SIZE # 512
hidden_sizes = cfg.HIDDEN_SIZES # [512, 512, 512, 512, 512, 512, 512, 512]
kernel_sizes = cfg.KERNEL_SIZES # [5, 5, 5, 5, 5, 5, 5, 5]
strides = cfg.STRIDES # [1, 1, 1, 1, 1, 1, 1, 1]
paddings = cfg.PADDINGS # [16, 0, 0, 0, 0, 0, 0, 0]
dilations = cfg.DILATIONS # [1, 1, 1, 1, 1, 1, 1, 1]
self.convs = nn.ModuleList()
assert len(hidden_sizes) == len(kernel_sizes) \
and len(hidden_sizes) == len(strides) \
and len(hidden_sizes) == len(paddings) \
and len(hidden_sizes) == len(dilations)
channel_sizes = [input_size] + hidden_sizes
for i, (k, s, p, d) in enumerate(zip(kernel_sizes, strides, paddings, dilations)):
self.convs.append(nn.Conv2d(channel_sizes[i], channel_sizes[i + 1], k, s, p, d))
def forward(self, x, mask):
padded_mask = mask
for i, pred in enumerate(self.convs):
x = F.relu(pred(x))
padded_mask, masked_weight = get_padded_mask_and_weight(padded_mask, pred)
x = x * masked_weight
return x # batchsize * 512 * 16 * 16
class ResMapConv(nn.Module):
def __init__(self, cfg):
super(ResMapConv, self).__init__()
input_size = cfg.INPUT_SIZE # 512
hidden_sizes = cfg.HIDDEN_SIZES # [512, 512, 512, 512, 512, 512, 512, 512]
kernel_sizes = cfg.KERNEL_SIZES # [5, 5, 5, 5, 5, 5, 5, 5]
strides = cfg.STRIDES # [1, 1, 1, 1, 1, 1, 1, 1]
paddings = cfg.PADDINGS # [16, 0, 0, 0, 0, 0, 0, 0]
dilations = cfg.DILATIONS # [1, 1, 1, 1, 1, 1, 1, 1]
self.convs = nn.ModuleList()
assert len(hidden_sizes) == len(kernel_sizes) \
and len(hidden_sizes) == len(strides) \
and len(hidden_sizes) == len(paddings) \
and len(hidden_sizes) == len(dilations)
channel_sizes = [input_size] + hidden_sizes
for i, (k, s, p, d) in enumerate(zip(kernel_sizes, strides, paddings, dilations)):
self.convs.append(nn.Conv2d(channel_sizes[i], channel_sizes[i + 1], k, s, p, d))
if 'NORM' not in cfg or cfg.NORM:
self.bn_layers = nn.ModuleList(
[nn.BatchNorm2d(hidden_sizes[i]) for i in range(0, len(hidden_sizes))])
self.bn_layers.append(nn.BatchNorm2d(hidden_sizes[-1]))
else:
self.bn_layers = None
def forward(self, x, mask):
padded_mask = mask
if self.bn_layers is not None:
x = self.bn_layers[0](x)
for i, pred in enumerate(self.convs):
x = pred(x) + x
if self.bn_layers is not None:
x = self.bn_layers[i + 1](x)
x = F.relu(x)
padded_mask, masked_weight = get_padded_mask_and_weight(padded_mask, pred)
x = x * masked_weight
return x # batchsize * 512 * 16 * 16
| 44.239437
| 92
| 0.574021
| 458
| 3,141
| 3.755459
| 0.155022
| 0.032558
| 0.04186
| 0.046512
| 0.825581
| 0.812791
| 0.812791
| 0.781395
| 0.709302
| 0.709302
| 0
| 0.065365
| 0.298631
| 3,141
| 71
| 93
| 44.239437
| 0.715388
| 0.109201
| 0
| 0.709677
| 0
| 0
| 0.001437
| 0
| 0
| 0
| 0
| 0
| 0.032258
| 1
| 0.064516
| false
| 0
| 0.048387
| 0
| 0.177419
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
620d6da422257473a3e0b00dfa88aa83620e22c0
| 18,933
|
py
|
Python
|
platform/core/tests/test_auditor/test_auditor_build_job.py
|
hackerwins/polyaxon
|
ff56a098283ca872abfbaae6ba8abba479ffa394
|
[
"Apache-2.0"
] | null | null | null |
platform/core/tests/test_auditor/test_auditor_build_job.py
|
hackerwins/polyaxon
|
ff56a098283ca872abfbaae6ba8abba479ffa394
|
[
"Apache-2.0"
] | null | null | null |
platform/core/tests/test_auditor/test_auditor_build_job.py
|
hackerwins/polyaxon
|
ff56a098283ca872abfbaae6ba8abba479ffa394
|
[
"Apache-2.0"
] | null | null | null |
# pylint:disable=ungrouped-imports
from unittest.mock import patch
import pytest
import auditor
from events.registry import build_job as build_job_events
from factories.factory_build_jobs import BuildJobFactory
from factories.factory_projects import ProjectFactory
from tests.test_auditor.utils import AuditorBaseTest
@pytest.mark.auditor_mark
class AuditorBuildJobTest(AuditorBaseTest):
"""Testing subscribed events"""
EVENTS = build_job_events.EVENTS
def setUp(self):
super().setUp()
self.build_job = BuildJobFactory(project=ProjectFactory())
self.tested_events = {
build_job_events.BUILD_JOB_CREATED,
build_job_events.BUILD_JOB_UPDATED,
build_job_events.BUILD_JOB_STARTED,
build_job_events.BUILD_JOB_STARTED_TRIGGERED,
build_job_events.BUILD_JOB_DELETED,
build_job_events.BUILD_JOB_DELETED_TRIGGERED,
build_job_events.BUILD_JOB_STOPPED,
build_job_events.BUILD_JOB_STOPPED_TRIGGERED,
build_job_events.BUILD_JOB_CLEANED_TRIGGERED,
build_job_events.BUILD_JOB_VIEWED,
build_job_events.BUILD_JOB_ARCHIVED,
build_job_events.BUILD_JOB_RESTORED,
build_job_events.BUILD_JOB_BOOKMARKED,
build_job_events.BUILD_JOB_UNBOOKMARKED,
build_job_events.BUILD_JOB_NEW_STATUS,
build_job_events.BUILD_JOB_FAILED,
build_job_events.BUILD_JOB_SUCCEEDED,
build_job_events.BUILD_JOB_DONE,
build_job_events.BUILD_JOB_LOGS_VIEWED,
build_job_events.BUILD_JOB_STATUSES_VIEWED,
}
@patch('executor.executor_service.ExecutorService.record_event')
@patch('notifier.service.NotifierService.record_event')
@patch('tracker.service.TrackerService.record_event')
@patch('activitylogs.service.ActivityLogService.record_event')
def test_build_job_created(self,
activitylogs_record,
tracker_record,
notifier_record,
executor_record):
auditor.record(event_type=build_job_events.BUILD_JOB_CREATED,
instance=self.build_job)
assert tracker_record.call_count == 1
assert activitylogs_record.call_count == 1
assert notifier_record.call_count == 0
assert executor_record.call_count == 1
@patch('executor.executor_service.ExecutorService.record_event')
@patch('notifier.service.NotifierService.record_event')
@patch('tracker.service.TrackerService.record_event')
@patch('activitylogs.service.ActivityLogService.record_event')
def test_build_job_updated(self,
activitylogs_record,
tracker_record,
notifier_record,
executor_record):
auditor.record(event_type=build_job_events.BUILD_JOB_UPDATED,
instance=self.build_job,
actor_name='foo',
actor_id=1)
assert tracker_record.call_count == 1
assert activitylogs_record.call_count == 1
assert notifier_record.call_count == 0
assert executor_record.call_count == 0
@patch('executor.executor_service.ExecutorService.record_event')
@patch('notifier.service.NotifierService.record_event')
@patch('tracker.service.TrackerService.record_event')
@patch('activitylogs.service.ActivityLogService.record_event')
def test_build_job_started(self,
activitylogs_record,
tracker_record,
notifier_record,
executor_record):
auditor.record(event_type=build_job_events.BUILD_JOB_STARTED,
instance=self.build_job)
assert tracker_record.call_count == 1
assert activitylogs_record.call_count == 0
assert notifier_record.call_count == 0
assert executor_record.call_count == 0
@patch('executor.executor_service.ExecutorService.record_event')
@patch('notifier.service.NotifierService.record_event')
@patch('tracker.service.TrackerService.record_event')
@patch('activitylogs.service.ActivityLogService.record_event')
def test_build_job_started_triggered(self,
activitylogs_record,
tracker_record,
notifier_record,
executor_record):
auditor.record(event_type=build_job_events.BUILD_JOB_STARTED_TRIGGERED,
instance=self.build_job,
actor_id=1,
actor_name='foo')
assert tracker_record.call_count == 1
assert activitylogs_record.call_count == 1
assert notifier_record.call_count == 0
assert executor_record.call_count == 0
@patch('executor.executor_service.ExecutorService.record_event')
@patch('notifier.service.NotifierService.record_event')
@patch('tracker.service.TrackerService.record_event')
@patch('activitylogs.service.ActivityLogService.record_event')
def test_build_job_deleted(self,
activitylogs_record,
tracker_record,
notifier_record,
executor_record):
auditor.record(event_type=build_job_events.BUILD_JOB_DELETED,
instance=self.build_job)
assert tracker_record.call_count == 1
assert activitylogs_record.call_count == 0
assert notifier_record.call_count == 0
assert executor_record.call_count == 0
@patch('executor.executor_service.ExecutorService.record_event')
@patch('notifier.service.NotifierService.record_event')
@patch('tracker.service.TrackerService.record_event')
@patch('activitylogs.service.ActivityLogService.record_event')
def test_build_job_triggered_deleted(self,
activitylogs_record,
tracker_record,
notifier_record,
executor_record):
auditor.record(event_type=build_job_events.BUILD_JOB_DELETED_TRIGGERED,
instance=self.build_job,
actor_name='foo',
actor_id=1)
assert tracker_record.call_count == 1
assert activitylogs_record.call_count == 1
assert notifier_record.call_count == 0
assert executor_record.call_count == 0
@patch('executor.executor_service.ExecutorService.record_event')
@patch('notifier.service.NotifierService.record_event')
@patch('tracker.service.TrackerService.record_event')
@patch('activitylogs.service.ActivityLogService.record_event')
def test_build_job_stopped(self,
activitylogs_record,
tracker_record,
notifier_record,
executor_record):
auditor.record(event_type=build_job_events.BUILD_JOB_STOPPED,
instance=self.build_job)
assert tracker_record.call_count == 1
assert activitylogs_record.call_count == 0
assert notifier_record.call_count == 1
assert executor_record.call_count == 1
@patch('executor.executor_service.ExecutorService.record_event')
@patch('notifier.service.NotifierService.record_event')
@patch('tracker.service.TrackerService.record_event')
@patch('activitylogs.service.ActivityLogService.record_event')
def test_build_job_stopped_triggered(self,
activitylogs_record,
tracker_record,
notifier_record,
executor_record):
auditor.record(event_type=build_job_events.BUILD_JOB_STOPPED_TRIGGERED,
instance=self.build_job,
actor_name='foo',
actor_id=1)
assert tracker_record.call_count == 1
assert activitylogs_record.call_count == 1
assert notifier_record.call_count == 0
assert executor_record.call_count == 0
@patch('executor.executor_service.ExecutorService.record_event')
@patch('notifier.service.NotifierService.record_event')
@patch('tracker.service.TrackerService.record_event')
@patch('activitylogs.service.ActivityLogService.record_event')
def test_build_job_cleaned_triggered(self,
activitylogs_record,
tracker_record,
notifier_record,
executor_record):
auditor.record(event_type=build_job_events.BUILD_JOB_CLEANED_TRIGGERED,
instance=self.build_job)
assert tracker_record.call_count == 0
assert activitylogs_record.call_count == 0
assert notifier_record.call_count == 0
assert executor_record.call_count == 1
@patch('executor.executor_service.ExecutorService.record_event')
@patch('notifier.service.NotifierService.record_event')
@patch('tracker.service.TrackerService.record_event')
@patch('activitylogs.service.ActivityLogService.record_event')
def test_build_job_viewed(self,
activitylogs_record,
tracker_record,
notifier_record,
executor_record):
auditor.record(event_type=build_job_events.BUILD_JOB_VIEWED,
instance=self.build_job,
actor_name='foo',
actor_id=1)
assert tracker_record.call_count == 1
assert activitylogs_record.call_count == 1
assert notifier_record.call_count == 0
assert executor_record.call_count == 0
@patch('executor.executor_service.ExecutorService.record_event')
@patch('notifier.service.NotifierService.record_event')
@patch('tracker.service.TrackerService.record_event')
@patch('activitylogs.service.ActivityLogService.record_event')
def test_build_job_archived(self,
activitylogs_record,
tracker_record,
notifier_record,
executor_record):
auditor.record(event_type=build_job_events.BUILD_JOB_ARCHIVED,
instance=self.build_job,
actor_name='foo',
actor_id=1)
assert tracker_record.call_count == 1
assert activitylogs_record.call_count == 1
assert notifier_record.call_count == 0
assert executor_record.call_count == 0
@patch('executor.executor_service.ExecutorService.record_event')
@patch('notifier.service.NotifierService.record_event')
@patch('tracker.service.TrackerService.record_event')
@patch('activitylogs.service.ActivityLogService.record_event')
def test_build_job_restored(self,
activitylogs_record,
tracker_record,
notifier_record,
executor_record):
auditor.record(event_type=build_job_events.BUILD_JOB_RESTORED,
instance=self.build_job,
actor_name='foo',
actor_id=1)
assert tracker_record.call_count == 1
assert activitylogs_record.call_count == 1
assert notifier_record.call_count == 0
assert executor_record.call_count == 0
@patch('executor.executor_service.ExecutorService.record_event')
@patch('notifier.service.NotifierService.record_event')
@patch('tracker.service.TrackerService.record_event')
@patch('activitylogs.service.ActivityLogService.record_event')
def test_build_job_bookmarked(self,
activitylogs_record,
tracker_record,
notifier_record,
executor_record):
auditor.record(event_type=build_job_events.BUILD_JOB_BOOKMARKED,
instance=self.build_job,
actor_name='foo',
actor_id=1)
assert tracker_record.call_count == 1
assert activitylogs_record.call_count == 1
assert notifier_record.call_count == 0
assert executor_record.call_count == 0
@patch('executor.executor_service.ExecutorService.record_event')
@patch('notifier.service.NotifierService.record_event')
@patch('tracker.service.TrackerService.record_event')
@patch('activitylogs.service.ActivityLogService.record_event')
def test_build_job_unbookmarked(self,
activitylogs_record,
tracker_record,
notifier_record,
executor_record):
auditor.record(event_type=build_job_events.BUILD_JOB_UNBOOKMARKED,
instance=self.build_job,
actor_name='foo',
actor_id=1)
assert tracker_record.call_count == 1
assert activitylogs_record.call_count == 1
assert notifier_record.call_count == 0
assert executor_record.call_count == 0
@patch('executor.executor_service.ExecutorService.record_event')
@patch('notifier.service.NotifierService.record_event')
@patch('tracker.service.TrackerService.record_event')
@patch('activitylogs.service.ActivityLogService.record_event')
def test_build_job_new_status(self,
activitylogs_record,
tracker_record,
notifier_record,
executor_record):
auditor.record(event_type=build_job_events.BUILD_JOB_NEW_STATUS,
instance=self.build_job)
assert tracker_record.call_count == 1
assert activitylogs_record.call_count == 0
assert notifier_record.call_count == 0
assert executor_record.call_count == 1
@patch('executor.executor_service.ExecutorService.record_event')
@patch('notifier.service.NotifierService.record_event')
@patch('tracker.service.TrackerService.record_event')
@patch('activitylogs.service.ActivityLogService.record_event')
def test_build_job_failed(self,
activitylogs_record,
tracker_record,
notifier_record,
executor_record):
auditor.record(event_type=build_job_events.BUILD_JOB_FAILED,
instance=self.build_job)
assert tracker_record.call_count == 1
assert activitylogs_record.call_count == 0
assert notifier_record.call_count == 1
assert executor_record.call_count == 1
@patch('executor.executor_service.ExecutorService.record_event')
@patch('notifier.service.NotifierService.record_event')
@patch('tracker.service.TrackerService.record_event')
@patch('activitylogs.service.ActivityLogService.record_event')
def test_build_job_succeeded(self,
activitylogs_record,
tracker_record,
notifier_record,
executor_record):
auditor.record(event_type=build_job_events.BUILD_JOB_SUCCEEDED,
instance=self.build_job)
assert tracker_record.call_count == 1
assert activitylogs_record.call_count == 0
assert notifier_record.call_count == 1
assert executor_record.call_count == 1
@patch('executor.executor_service.ExecutorService.record_event')
@patch('notifier.service.NotifierService.record_event')
@patch('tracker.service.TrackerService.record_event')
@patch('activitylogs.service.ActivityLogService.record_event')
def test_build_job_done(self,
activitylogs_record,
tracker_record,
notifier_record,
executor_record):
auditor.record(event_type=build_job_events.BUILD_JOB_DONE,
instance=self.build_job)
assert tracker_record.call_count == 1
assert activitylogs_record.call_count == 0
assert notifier_record.call_count == 0
assert executor_record.call_count == 1
@patch('executor.executor_service.ExecutorService.record_event')
@patch('notifier.service.NotifierService.record_event')
@patch('tracker.service.TrackerService.record_event')
@patch('activitylogs.service.ActivityLogService.record_event')
def test_build_job_logs_viewed_triggered(self,
activitylogs_record,
tracker_record,
notifier_record,
executor_record):
auditor.record(event_type=build_job_events.BUILD_JOB_LOGS_VIEWED,
instance=self.build_job,
actor_name='foo',
actor_id=1)
assert tracker_record.call_count == 1
assert activitylogs_record.call_count == 1
assert notifier_record.call_count == 0
assert executor_record.call_count == 0
@patch('executor.executor_service.ExecutorService.record_event')
@patch('notifier.service.NotifierService.record_event')
@patch('tracker.service.TrackerService.record_event')
@patch('activitylogs.service.ActivityLogService.record_event')
def test_build_job_statuses_viewed_triggered(self,
activitylogs_record,
tracker_record,
notifier_record,
executor_record):
auditor.record(event_type=build_job_events.BUILD_JOB_STATUSES_VIEWED,
instance=self.build_job,
actor_name='foo',
actor_id=1)
assert tracker_record.call_count == 1
assert activitylogs_record.call_count == 1
assert notifier_record.call_count == 0
assert executor_record.call_count == 0
del AuditorBaseTest
| 46.178049
| 79
| 0.615116
| 1,823
| 18,933
| 6.027976
| 0.043335
| 0.090272
| 0.1092
| 0.059696
| 0.944035
| 0.943944
| 0.900264
| 0.873237
| 0.873237
| 0.868869
| 0
| 0.007013
| 0.314689
| 18,933
| 409
| 80
| 46.290954
| 0.839923
| 0.003116
| 0
| 0.77933
| 0
| 0
| 0.207388
| 0.205639
| 0
| 0
| 0
| 0
| 0.223464
| 1
| 0.058659
| false
| 0
| 0.019553
| 0
| 0.083799
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
6560aaad30544d619056a21227a12d1b8fb2982a
| 5,552
|
py
|
Python
|
xwp/spectral_1d.py
|
s-sajid-ali/xray_wave_propagators
|
2540f0d4009f202d7dcd2b994b598fa54c151359
|
[
"MIT"
] | 4
|
2019-01-12T22:43:03.000Z
|
2021-05-09T17:32:45.000Z
|
xwp/spectral_1d.py
|
s-sajid-ali/xray_wave_propagators
|
2540f0d4009f202d7dcd2b994b598fa54c151359
|
[
"MIT"
] | null | null | null |
xwp/spectral_1d.py
|
s-sajid-ali/xray_wave_propagators
|
2540f0d4009f202d7dcd2b994b598fa54c151359
|
[
"MIT"
] | null | null | null |
#1D versions of propagators
import numpy as np
__all__ = ['propTF',
'prop1FT',
'propFF',
'propIR']
'''
Propogation using the Transfer function method.
Inputs -
u : profile of the beam at the input plane.
step : is the sampling step size at the input plane.
L1 : side length of the support.
wavel : the wavelength of the light
z : the propogation distance
fft_object : (not implemented) to pass an FFTW object for
evaluation of the FFT
Outputs -
u : beam profile at the output plane
L1 : the side length of the support at the output plane.
'''
try:
import numexpr as ne
def propTF(u,step,L1,wavel,z,fft_object = None) :
N = np.shape(u)[0]
pi = np.pi
F = np.fft.fftfreq(N,step)
u = np.fft.fft(u)
u = ne.evaluate('exp(-1j*(2*pi*z/wavel)*sqrt(1-wavel**2*(F**2)))*u')
u = np.fft.ifft(u)
return u,L1
except:
def propTF(u,step,L1,wavel,z,fft_object = None) :
N = np.shape(u)[0]
pi = np.pi
F = np.fft.fftfreq(N,step)
u = np.fft.fft(u)
u = np.exp(-1j*(2*pi*z/wavel)*np.sqrt(1-wavel**2*(F**2)))*u
u = np.fft.ifft(u)
return u,L1
'''
Propogation using the Single Fourier Transform approach.
Input convention as above.
Inputs -
u : profile of the beam at the input plane.
step : is the sampling step size at the input plane.
L1 : side length of the support.
wavel : the wavelength of the light
z :the propogation distance
fft_object : (not implemented) to pass an FFTW object
for evaluation of the FFT
Outputs -
u : beam profile at the output plane
L_out : the side length of the support at the output plane.
'''
try:
import numexpr as ne
def prop1FT(u,step,L1,wavel,z,fft_object = None):
N = np.shape(u)[0]
k = 2*np.pi/wavel
x = np.linspace(-L1/2.0,L1/2.0,N)
L_out = wavel*z/step
step2 = wavel*z/L1
pi = np.pi
#Kenan's approach
f = np.fft.fftfreq(N,d=step)
f = np.fft.fftshift(f)
#c = np.exp((-1j*z*2*np.pi/wavel)*np.sqrt(1+wavel**2*(f**2)))
#c = np.exp((-1j*2*np.pi/wavel)*np.sqrt(x**2+z**2))
#u = u*c
u = ne.evaluate('exp(1j*pi/(wavel*z)*(x**2))*u')
u = np.fft.fft(u)*step
u = np.fft.fftshift(u)
#x2 = np.linspace(-L_out/2.0,L_out/2.0,N)
#u = ne.evaluate('exp((-1j*2*pi/wavel)*sqrt(x2**2+z**2))*u')
u = ne.evaluate('u*(sqrt(1/(1j*wavel*z)))')
return u,L_out
except:
def prop1FT(u,step,L1,wavel,z,fft_object = None):
N = np.shape(u)[0]
k = 2*np.pi/wavel
x = np.linspace(-L1/2.0,L1/2.0,N)
L_out = wavel*z/step
step2 = wavel*z/L1
pi = np.pi
#Kenan's approach
f = np.fft.fftfreq(N,d=step)
f = np.fft.fftshift(f)
#c = np.exp((-1j*z*2*np.pi/wavel)*np.sqrt(1+wavel**2*(f**2)))
#c = np.exp((-1j*2*np.pi/wavel)*np.sqrt(x**2+z**2))
#u = u*c
u = np.exp(1j*pi/(wavel*z)*(x**2))*u
u = np.fft.fft(u)*step
u = np.fft.fftshift(u)
#x2 = np.linspace(-L_out/2.0,L_out/2.0,N)
#u = ne.evaluate('exp((-1j*2*pi/wavel)*sqrt(x2**2+z**2))*u')
u = u*(np.sqrt(1/(1j*wavel*z)))
return u,L_out
'''
Fraunhofer propogation.
Inputs -
u : profile of the beam at the input plane.
step : is the sampling step size at the input plane.
L1 : side length of the support.
wavel : the wavelength of the light
z :the propogation distance
fft_object : (not implemented) to pass an FFTW object
for evaluation of the FFT
Outputs -
u : beam profile at the output plane
L_out : the side length of the support at the output plane.
'''
def propFF(u,step,L1,wavel,z,fft_object = None):
N = np.shape(u)[0]
k = 2*np.pi/wavel
L_out = wavel*z/step
step2 = wavel*z/L1
n = N #number of samples
x2 = np.linspace(-L_out/2.0,L_out/2.0,N)
#c = ne.evaluate('exp(((1j*k)/(2*z))*(x2**2))')
u = np.fft.fft(u)*step
u = np.fft.fftshift(u)
#u = ne.evaluate('c*u')
u = u*np.sqrt(1/(1j*wavel*z))
return u,L_out
'''
Warning : use is now Deprecated !
Propogation using the Impulse Response function. The convention of shiftinng
a function in realspace before performing the fourier transform which is used
in the reference is followed here. Input convention as above.
Use is deprecated since the implementation of 1FT for ranges that are too
large for TF but too small for FF.
'''
try:
import numexpr as ne
def propIR(u,step,L,wavel,z,fft_object = None):
N = np.shape(u)[0]
k = 2*np.pi/wavel
x = np.linspace(-L/2.0,L/2.0,N)
h = ne.evaluate('1/sqrt(1j*wavel*z)*exp(((1j*k)/(2*z))*(x**2))')
h = np.fft.fft(np.fft.fftshift(h))*step
u = np.fft.fft(u)
#u *= h
u = ne.evaluate('h * u')
u = np.fft.ifft(u)
return u,L
except:
def propIR(u,step,L,wavel,z,fft_object = None):
N = np.shape(u)[0]
k = 2*np.pi/wavel
x = np.linspace(-L/2.0,L/2.0,N)
#h = ne.evaluate('(exp(1j*k*z)/(1j*wavel*z))*exp(((1j*k)/(2*z))*(x**2))')
#h = np.exp(1j*k*z)*np.exp(((1j*k)/(2*z))*(x**2))
h = np.sqrt(1/(1j*wavel*z))*np.exp(((1j*k)/(2*z))*(x**2))
h = np.fft.fft(np.fft.fftshift(h))*step
u = np.fft.fft(u)
u *= h
#u = ne.evaluate('h * u')
u = np.fft.ifft(u)
return u,L
| 27.349754
| 81
| 0.559978
| 986
| 5,552
| 3.124746
| 0.123732
| 0.038948
| 0.027264
| 0.029211
| 0.8111
| 0.800065
| 0.78481
| 0.778968
| 0.776696
| 0.760143
| 0
| 0.034835
| 0.276117
| 5,552
| 202
| 82
| 27.485149
| 0.731774
| 0.130944
| 0
| 0.786517
| 0
| 0.022472
| 0.059797
| 0.049662
| 0
| 0
| 0
| 0
| 0
| 1
| 0.078652
| false
| 0
| 0.044944
| 0
| 0.202247
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
656e1a531742b980122139e358de7f967d633c43
| 2,833
|
py
|
Python
|
tests/data/nextbus/multi_predict_two.py
|
tylernorth/public-transit
|
e2430078557adf9d2ad03d794ea551a7b06ce145
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
tests/data/nextbus/multi_predict_two.py
|
tylernorth/public-transit
|
e2430078557adf9d2ad03d794ea551a7b06ce145
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
tests/data/nextbus/multi_predict_two.py
|
tylernorth/public-transit
|
e2430078557adf9d2ad03d794ea551a7b06ce145
|
[
"BSD-2-Clause-FreeBSD"
] | 3
|
2017-03-17T11:54:09.000Z
|
2022-01-21T05:07:16.000Z
|
text = '''<?xml version="1.0" encoding="utf-8" ?>
<body copyright="All data copyright San Francisco Muni 2015.">
<predictions agencyTitle="San Francisco Muni" routeTitle="38-Geary" routeTag="38" stopTitle="43rd Ave & Point Lobos Ave" stopTag="13568">
<direction title="Inbound to Downtown">
<prediction epochTime="1434139707528" seconds="1625" minutes="27" isDeparture="false" affectedByLayover="true" dirTag="38___I_F10" vehicle="6241" block="3806" tripTag="6629317" />
<prediction epochTime="1434140667528" seconds="2585" minutes="43" isDeparture="false" affectedByLayover="true" dirTag="38___I_F10" vehicle="6406" block="3808" tripTag="6629319" />
<prediction epochTime="1434141627528" seconds="3545" minutes="59" isDeparture="false" affectedByLayover="true" dirTag="38___I_F10" vehicle="6420" block="3811" tripTag="6629321" />
<prediction epochTime="1434142587528" seconds="4505" minutes="75" isDeparture="false" affectedByLayover="true" dirTag="38___I_F10" vehicle="6292" block="3843" tripTag="6629323" />
<prediction epochTime="1434143487528" seconds="5405" minutes="90" isDeparture="false" affectedByLayover="true" dirTag="38___I_F10" vehicle="6283" block="3813" tripTag="6629325" />
</direction>
<message text="Go to sfmta.com 4 Email/Text Alerts." priority="Low"/>
<message text="Discount cash fare increase 7/1. Info at sfmta.com or 3-1-1." priority="Low"/>
<message text="We're on Twitter: @sfmta_muni" priority="Low"/>
</predictions>
<predictions agencyTitle="San Francisco Muni" routeTitle="38-Geary" routeTag="38" stopTitle="43rd Ave & Clement St" stopTag="13567">
<direction title="Inbound to Downtown">
<prediction epochTime="1434139691349" seconds="1608" minutes="26" isDeparture="false" affectedByLayover="true" dirTag="38___I_F10" vehicle="6241" block="3806" tripTag="6629317" />
<prediction epochTime="1434140651349" seconds="2568" minutes="42" isDeparture="false" affectedByLayover="true" dirTag="38___I_F10" vehicle="6406" block="3808" tripTag="6629319" />
<prediction epochTime="1434141611349" seconds="3528" minutes="58" isDeparture="false" affectedByLayover="true" dirTag="38___I_F10" vehicle="6420" block="3811" tripTag="6629321" />
<prediction epochTime="1434142571349" seconds="4488" minutes="74" isDeparture="false" affectedByLayover="true" dirTag="38___I_F10" vehicle="6292" block="3843" tripTag="6629323" />
<prediction epochTime="1434143471349" seconds="5388" minutes="89" isDeparture="false" affectedByLayover="true" dirTag="38___I_F10" vehicle="6283" block="3813" tripTag="6629325" />
</direction>
<message text="Go to sfmta.com 4 Email/Text Alerts." priority="Low"/>
<message text="Discount cash fare increase 7/1. Info at sfmta.com or 3-1-1." priority="Low"/>
<message text="We're on Twitter: @sfmta_muni" priority="Low"/>
</predictions>
</body>
'''
| 97.689655
| 181
| 0.74797
| 361
| 2,833
| 5.753463
| 0.33241
| 0.091478
| 0.158883
| 0.178142
| 0.779971
| 0.779971
| 0.779971
| 0.731825
| 0.731825
| 0.731825
| 0
| 0.162611
| 0.086128
| 2,833
| 28
| 182
| 101.178571
| 0.639629
| 0
| 1
| 0.428571
| 0
| 0.5
| 0.995058
| 0.187787
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
65a2f412793ec040dc1157cf255558bf6e1956fe
| 2,921
|
py
|
Python
|
twitter_api_handler/MapStatusToList.py
|
DanielFrc/twitter-stream
|
28cb239742d851cb1ca8675f386ce206412c32ab
|
[
"MIT"
] | null | null | null |
twitter_api_handler/MapStatusToList.py
|
DanielFrc/twitter-stream
|
28cb239742d851cb1ca8675f386ce206412c32ab
|
[
"MIT"
] | null | null | null |
twitter_api_handler/MapStatusToList.py
|
DanielFrc/twitter-stream
|
28cb239742d851cb1ca8675f386ce206412c32ab
|
[
"MIT"
] | null | null | null |
from util import constants as constants
class MapStatusToList:
def map_tweepy_list (self, tweets):
"""
Function to map status object to a simple list (csv compatible)
Params:
tweets(obj): List of tweets in raw format
Return:
tweets_list(Array): Array of tweets in a clean format.
"""
tweets_lists = [[tweet.created_at,
tweet.id,
tweet.id_str,
tweet.truncated,
tweet.text,
str(constants.TRACKS),
tweet.source,
tweet.source_url,
tweet.in_reply_to_status_id,
tweet.in_reply_to_status_id_str,
tweet.in_reply_to_user_id,
tweet.in_reply_to_user_id_str,
tweet.in_reply_to_screen_name,
tweet.user.screen_name,
tweet.user.location,
tweet.geo,
tweet.coordinates,
tweet.place,
tweet.contributors,
tweet.is_quote_status,
tweet.retweet_count,
tweet.favorite_count,
tweet.favorited,
tweet.retweeted,
tweet.lang ] for tweet in tweets]
return tweets_lists
def map_tweepy_array (self, tweet):
"""
Function to map status object to a simple list (csv compatible)
Params:
tweet(str): List of tweets in raw format
Return:
tweets_list(Array): Array of tweets in a clean format.
"""
new_tweet = [tweet.created_at,
tweet.id,
tweet.id_str,
tweet.truncated,
tweet.text,
str(constants.TRACKS),
tweet.source,
tweet.source_url,
tweet.in_reply_to_status_id,
tweet.in_reply_to_status_id_str,
tweet.in_reply_to_user_id,
tweet.in_reply_to_user_id_str,
tweet.in_reply_to_screen_name,
tweet.user.screen_name,
tweet.user.location,
tweet.geo,
tweet.coordinates,
tweet.place,
tweet.contributors,
tweet.is_quote_status,
tweet.retweet_count,
tweet.favorite_count,
tweet.favorited,
tweet.retweeted,
tweet.lang ]
return new_tweet
| 39.472973
| 71
| 0.441972
| 262
| 2,921
| 4.660305
| 0.229008
| 0.063063
| 0.09828
| 0.11466
| 0.845209
| 0.845209
| 0.845209
| 0.845209
| 0.845209
| 0.845209
| 0
| 0
| 0.504279
| 2,921
| 74
| 72
| 39.472973
| 0.843232
| 0.137624
| 0
| 0.821429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035714
| false
| 0
| 0.017857
| 0
| 0.107143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
02b468f51a88bb9d1a55bb8fee9114244dc05a33
| 2,270
|
py
|
Python
|
tests/e2e/test_requests_custom.py
|
CarlosAMolina/requests
|
47972fc7c0a1f786a90900222fdadae24c0c0d51
|
[
"MIT"
] | 1
|
2020-11-11T11:17:48.000Z
|
2020-11-11T11:17:48.000Z
|
tests/e2e/test_requests_custom.py
|
CarlosAMolina/requests
|
47972fc7c0a1f786a90900222fdadae24c0c0d51
|
[
"MIT"
] | 3
|
2021-04-27T20:13:41.000Z
|
2021-04-27T20:13:46.000Z
|
tests/e2e/test_requests_custom.py
|
CarlosAMolina/requests
|
47972fc7c0a1f786a90900222fdadae24c0c0d51
|
[
"MIT"
] | null | null | null |
import unittest
from requests import exceptions
from requests_custom.requests_custom import RequestsCustom
class TestRequestsCustom(unittest.TestCase):
"""
.. _URLs information
https://httpstat.us/.
"""
def test_get_url_with_a_correct_response_works(self):
requests_custom = RequestsCustom(debug_simple=True).get_requests()
URL = "https://duckduckgo.com"
response = requests_custom.get(URL)
self.assertEqual(200, response.status_code)
def test_get_url_with_a_timeout_response_raises_an_exception(self):
requests_custom = RequestsCustom(debug_simple=True)
requests_custom.RETRY_ATTEMPTS = 1
requests_custom.BACKOFF_FACTOR = 0
requests_custom._log_backoff_factor()
requests_custom = requests_custom.get_requests()
URL = "https://httpstat.us/408"
try:
requests_custom.get(URL)
raise Exception("Expected RetryError exception not raised")
except exceptions.RetryError:
self.assertTrue(True)
def test_get_url_with_a_delayed_response_fails(self):
requests_custom = RequestsCustom(debug_simple=True)
requests_custom.RETRY_ATTEMPTS = 0
requests_custom.BACKOFF_FACTOR = 0
requests_custom.TIMEOUT_DEFAULT = 0.1
requests_custom._log_backoff_factor()
requests_custom = requests_custom.get_requests()
URL = "https://httpstat.us/200?sleep={miliseconds}".format(miliseconds=200)
try:
requests_custom.get(URL)
raise Exception("Expected RetryError exception not raised")
except exceptions.ConnectionError:
self.assertTrue(True)
def test_get_url_with_a_delayed_response_works(self):
requests_custom = RequestsCustom(debug_simple=True)
requests_custom.RETRY_ATTEMPTS = 0
requests_custom.BACKOFF_FACTOR = 0
requests_custom.TIMEOUT_DEFAULT = 1
requests_custom._log_backoff_factor()
requests_custom = requests_custom.get_requests()
URL = "https://httpstat.us/200?sleep={miliseconds}".format(miliseconds=100)
response = requests_custom.get(URL)
self.assertEqual(200, response.status_code)
if __name__ == "__main__":
unittest.main()
| 36.612903
| 83
| 0.703965
| 256
| 2,270
| 5.878906
| 0.253906
| 0.251163
| 0.07907
| 0.074419
| 0.802658
| 0.802658
| 0.778738
| 0.760133
| 0.760133
| 0.716944
| 0
| 0.016807
| 0.213656
| 2,270
| 61
| 84
| 37.213115
| 0.826331
| 0.019824
| 0
| 0.565217
| 0
| 0
| 0.09941
| 0
| 0
| 0
| 0
| 0
| 0.086957
| 1
| 0.086957
| false
| 0
| 0.065217
| 0
| 0.173913
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
02d04b4d0db5df29cdba9b5b024159dbc904300f
| 107
|
py
|
Python
|
examples/Movie-Lens/best_params/__init__.py
|
faizanahemad/Hybrid-Weighted-Embedding-Recommender
|
904a27c4b0126935735aee689408b2b6acf4af9a
|
[
"MIT"
] | 12
|
2019-11-29T00:06:01.000Z
|
2021-07-01T10:43:58.000Z
|
examples/Movie-Lens/best_params/__init__.py
|
kiminh/Hybrid-Weighted-Embedding-Recommender
|
457c4f13521aefa70476947c5849e85482abc3d4
|
[
"MIT"
] | 10
|
2020-03-31T09:54:00.000Z
|
2022-03-12T00:05:21.000Z
|
examples/Movie-Lens/best_params/__init__.py
|
kiminh/Hybrid-Weighted-Embedding-Recommender
|
457c4f13521aefa70476947c5849e85482abc3d4
|
[
"MIT"
] | 2
|
2019-12-10T04:11:32.000Z
|
2020-10-29T02:57:01.000Z
|
from .gcn_ncf_100K import params as params_gcn_ncf_100K
from .gcn_ncf_1M import params as params_gcn_ncf_1M
| 53.5
| 55
| 0.878505
| 22
| 107
| 3.818182
| 0.363636
| 0.285714
| 0.238095
| 0.47619
| 0.619048
| 0.619048
| 0
| 0
| 0
| 0
| 0
| 0.083333
| 0.102804
| 107
| 2
| 56
| 53.5
| 0.791667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
f30551b6ca496bbf75fe2f996b72a43e3be33181
| 39,790
|
bzl
|
Python
|
3rdparty/target_file.bzl
|
Jonathan-2287/bazel-deps
|
514c479c1afd3c4c73c6181f977e1066d65ceb8f
|
[
"MIT"
] | 235
|
2016-07-26T02:10:16.000Z
|
2022-03-31T06:23:15.000Z
|
3rdparty/target_file.bzl
|
Jonathan-2287/bazel-deps
|
514c479c1afd3c4c73c6181f977e1066d65ceb8f
|
[
"MIT"
] | 251
|
2016-06-23T03:53:26.000Z
|
2022-03-24T18:18:19.000Z
|
3rdparty/target_file.bzl
|
Jonathan-2287/bazel-deps
|
514c479c1afd3c4c73c6181f977e1066d65ceb8f
|
[
"MIT"
] | 107
|
2016-08-22T06:12:57.000Z
|
2022-02-01T19:18:25.000Z
|
# Do not edit. bazel-deps autogenerates this file from.
_JAVA_LIBRARY_TEMPLATE = """
java_library(
name = "{name}",
exports = [
{exports}
],
runtime_deps = [
{runtime_deps}
],
visibility = [
"{visibility}"
]
)\n"""
_SCALA_IMPORT_TEMPLATE = """
scala_import(
name = "{name}",
exports = [
{exports}
],
jars = [
{jars}
],
runtime_deps = [
{runtime_deps}
],
visibility = [
"{visibility}"
]
)
"""
_SCALA_LIBRARY_TEMPLATE = """
scala_library(
name = "{name}",
exports = [
{exports}
],
runtime_deps = [
{runtime_deps}
],
visibility = [
"{visibility}"
]
)
"""
def _build_external_workspace_from_opts_impl(ctx):
build_header = ctx.attr.build_header
separator = ctx.attr.separator
target_configs = ctx.attr.target_configs
result_dict = {}
for key, cfg in target_configs.items():
build_file_to_target_name = key.split(":")
build_file = build_file_to_target_name[0]
target_name = build_file_to_target_name[1]
if build_file not in result_dict:
result_dict[build_file] = []
result_dict[build_file].append(cfg)
for key, file_entries in result_dict.items():
build_file_contents = build_header + '\n\n'
for build_target in file_entries:
entry_map = {}
for entry in build_target:
elements = entry.split(separator)
build_entry_key = elements[0]
if elements[1] == "L":
entry_map[build_entry_key] = [e for e in elements[2::] if len(e) > 0]
elif elements[1] == "B":
entry_map[build_entry_key] = (elements[2] == "true" or elements[2] == "True")
else:
entry_map[build_entry_key] = elements[2]
exports_str = ""
for e in entry_map.get("exports", []):
exports_str += "\"" + e + "\",\n"
jars_str = ""
for e in entry_map.get("jars", []):
jars_str += "\"" + e + "\",\n"
runtime_deps_str = ""
for e in entry_map.get("runtimeDeps", []):
runtime_deps_str += "\"" + e + "\",\n"
name = entry_map["name"].split(":")[1]
if entry_map["lang"] == "java":
build_file_contents += _JAVA_LIBRARY_TEMPLATE.format(name = name, exports=exports_str, runtime_deps=runtime_deps_str, visibility=entry_map["visibility"])
elif entry_map["lang"].startswith("scala") and entry_map["kind"] == "import":
build_file_contents += _SCALA_IMPORT_TEMPLATE.format(name = name, exports=exports_str, jars=jars_str, runtime_deps=runtime_deps_str, visibility=entry_map["visibility"])
elif entry_map["lang"].startswith("scala") and entry_map["kind"] == "library":
build_file_contents += _SCALA_LIBRARY_TEMPLATE.format(name = name, exports=exports_str, runtime_deps=runtime_deps_str, visibility=entry_map["visibility"])
else:
print(entry_map)
ctx.file(ctx.path(key + "/BUILD"), build_file_contents, False)
return None
build_external_workspace_from_opts = repository_rule(
attrs = {
"target_configs": attr.string_list_dict(mandatory = True),
"separator": attr.string(mandatory = True),
"build_header": attr.string(mandatory = True),
},
implementation = _build_external_workspace_from_opts_impl
)
def build_header():
return """load("@io_bazel_rules_scala//scala:scala_import.bzl", "scala_import")
load("@io_bazel_rules_scala//scala:scala.bzl", "scala_library")"""
def list_target_data_separator():
return "|||"
def list_target_data():
return {
"3rdparty/jvm/com/fasterxml/jackson/core:jackson_annotations": ["lang||||||java","name||||||//3rdparty/jvm/com/fasterxml/jackson/core:jackson_annotations","visibility||||||//3rdparty/jvm:__subpackages__","kind||||||library","deps|||L|||","jars|||L|||","sources|||L|||","exports|||L|||//external:jar/com/fasterxml/jackson/core/jackson_annotations","runtimeDeps|||L|||","processorClasses|||L|||","generatesApi|||B|||false","licenses|||L|||","generateNeverlink|||B|||false"],
"3rdparty/jvm/com/fasterxml/jackson/core:jackson_core": ["lang||||||java","name||||||//3rdparty/jvm/com/fasterxml/jackson/core:jackson_core","visibility||||||//visibility:public","kind||||||library","deps|||L|||","jars|||L|||","sources|||L|||","exports|||L|||//external:jar/com/fasterxml/jackson/core/jackson_core","runtimeDeps|||L|||","processorClasses|||L|||","generatesApi|||B|||false","licenses|||L|||","generateNeverlink|||B|||false"],
"3rdparty/jvm/com/fasterxml/jackson/core:jackson_databind": ["lang||||||java","name||||||//3rdparty/jvm/com/fasterxml/jackson/core:jackson_databind","visibility||||||//visibility:public","kind||||||library","deps|||L|||","jars|||L|||","sources|||L|||","exports|||L|||//external:jar/com/fasterxml/jackson/core/jackson_databind","runtimeDeps|||L|||//3rdparty/jvm/com/fasterxml/jackson/core:jackson_annotations|||//3rdparty/jvm/com/fasterxml/jackson/core:jackson_core","processorClasses|||L|||","generatesApi|||B|||false","licenses|||L|||","generateNeverlink|||B|||false"],
"3rdparty/jvm/com/fasterxml/jackson/dataformat:jackson_dataformat_yaml": ["lang||||||java","name||||||//3rdparty/jvm/com/fasterxml/jackson/dataformat:jackson_dataformat_yaml","visibility||||||//visibility:public","kind||||||library","deps|||L|||","jars|||L|||","sources|||L|||","exports|||L|||//external:jar/com/fasterxml/jackson/dataformat/jackson_dataformat_yaml","runtimeDeps|||L|||//3rdparty/jvm/com/fasterxml/jackson/core:jackson_core|||//3rdparty/jvm/com/fasterxml/jackson/core:jackson_databind|||//3rdparty/jvm/org/yaml:snakeyaml","processorClasses|||L|||","generatesApi|||B|||false","licenses|||L|||","generateNeverlink|||B|||false"],
"3rdparty/jvm/com/google/guava:guava": ["lang||||||java","name||||||//3rdparty/jvm/com/google/guava:guava","visibility||||||//3rdparty/jvm:__subpackages__","kind||||||library","deps|||L|||","jars|||L|||","sources|||L|||","exports|||L|||//external:jar/com/google/guava/guava","runtimeDeps|||L|||","processorClasses|||L|||","generatesApi|||B|||false","licenses|||L|||","generateNeverlink|||B|||false"],
"3rdparty/jvm/commons_codec:commons_codec": ["lang||||||java","name||||||//3rdparty/jvm/commons_codec:commons_codec","visibility||||||//3rdparty/jvm:__subpackages__","kind||||||library","deps|||L|||","jars|||L|||","sources|||L|||","exports|||L|||//external:jar/commons_codec/commons_codec","runtimeDeps|||L|||","processorClasses|||L|||","generatesApi|||B|||false","licenses|||L|||","generateNeverlink|||B|||false"],
"3rdparty/jvm/javax/annotation:jsr250_api": ["lang||||||java","name||||||//3rdparty/jvm/javax/annotation:jsr250_api","visibility||||||//3rdparty/jvm:__subpackages__","kind||||||library","deps|||L|||","jars|||L|||","sources|||L|||","exports|||L|||//external:jar/javax/annotation/jsr250_api","runtimeDeps|||L|||","processorClasses|||L|||","generatesApi|||B|||false","licenses|||L|||","generateNeverlink|||B|||false"],
"3rdparty/jvm/javax/enterprise:cdi_api": ["lang||||||java","name||||||//3rdparty/jvm/javax/enterprise:cdi_api","visibility||||||//3rdparty/jvm:__subpackages__","kind||||||library","deps|||L|||","jars|||L|||","sources|||L|||","exports|||L|||//external:jar/javax/enterprise/cdi_api","runtimeDeps|||L|||//3rdparty/jvm/javax/annotation:jsr250_api|||//3rdparty/jvm/javax/inject:javax_inject","processorClasses|||L|||","generatesApi|||B|||false","licenses|||L|||","generateNeverlink|||B|||false"],
"3rdparty/jvm/javax/inject:javax_inject": ["lang||||||java","name||||||//3rdparty/jvm/javax/inject:javax_inject","visibility||||||//3rdparty/jvm:__subpackages__","kind||||||library","deps|||L|||","jars|||L|||","sources|||L|||","exports|||L|||//external:jar/javax/inject/javax_inject","runtimeDeps|||L|||","processorClasses|||L|||","generatesApi|||B|||false","licenses|||L|||","generateNeverlink|||B|||false"],
"3rdparty/jvm/org/apache/commons:commons_lang3": ["lang||||||java","name||||||//3rdparty/jvm/org/apache/commons:commons_lang3","visibility||||||//3rdparty/jvm:__subpackages__","kind||||||library","deps|||L|||","jars|||L|||","sources|||L|||","exports|||L|||//external:jar/org/apache/commons/commons_lang3","runtimeDeps|||L|||","processorClasses|||L|||","generatesApi|||B|||false","licenses|||L|||","generateNeverlink|||B|||false"],
"3rdparty/jvm/org/apache/httpcomponents:httpclient": ["lang||||||java","name||||||//3rdparty/jvm/org/apache/httpcomponents:httpclient","visibility||||||//3rdparty/jvm:__subpackages__","kind||||||library","deps|||L|||","jars|||L|||","sources|||L|||","exports|||L|||//external:jar/org/apache/httpcomponents/httpclient","runtimeDeps|||L|||//3rdparty/jvm/org/apache/httpcomponents:httpcore|||//3rdparty/jvm/commons_codec:commons_codec","processorClasses|||L|||","generatesApi|||B|||false","licenses|||L|||","generateNeverlink|||B|||false"],
"3rdparty/jvm/org/apache/httpcomponents:httpcore": ["lang||||||java","name||||||//3rdparty/jvm/org/apache/httpcomponents:httpcore","visibility||||||//3rdparty/jvm:__subpackages__","kind||||||library","deps|||L|||","jars|||L|||","sources|||L|||","exports|||L|||//external:jar/org/apache/httpcomponents/httpcore","runtimeDeps|||L|||","processorClasses|||L|||","generatesApi|||B|||false","licenses|||L|||","generateNeverlink|||B|||false"],
"3rdparty/jvm/org/apache/maven:maven_aether_provider": ["lang||||||java","name||||||//3rdparty/jvm/org/apache/maven:maven_aether_provider","visibility||||||//visibility:public","kind||||||library","deps|||L|||","jars|||L|||","sources|||L|||","exports|||L|||//external:jar/org/apache/maven/maven_aether_provider","runtimeDeps|||L|||//3rdparty/jvm/org/apache/maven:maven_model|||//3rdparty/jvm/org/apache/commons:commons_lang3|||//3rdparty/jvm/org/codehaus/plexus:plexus_component_annotations|||//3rdparty/jvm/org/eclipse/aether:aether_spi|||//3rdparty/jvm/org/apache/maven:maven_model_builder|||//3rdparty/jvm/org/codehaus/plexus:plexus_utils|||//3rdparty/jvm/org/eclipse/aether:aether_util|||//3rdparty/jvm/org/eclipse/aether:aether_api|||//3rdparty/jvm/org/eclipse/aether:aether_impl|||//3rdparty/jvm/org/apache/maven:maven_repository_metadata","processorClasses|||L|||","generatesApi|||B|||false","licenses|||L|||","generateNeverlink|||B|||false"],
"3rdparty/jvm/org/apache/maven:maven_artifact": ["lang||||||java","name||||||//3rdparty/jvm/org/apache/maven:maven_artifact","visibility||||||//3rdparty/jvm:__subpackages__","kind||||||library","deps|||L|||","jars|||L|||","sources|||L|||","exports|||L|||//external:jar/org/apache/maven/maven_artifact","runtimeDeps|||L|||//3rdparty/jvm/org/codehaus/plexus:plexus_utils|||//3rdparty/jvm/org/apache/commons:commons_lang3","processorClasses|||L|||","generatesApi|||B|||false","licenses|||L|||","generateNeverlink|||B|||false"],
"3rdparty/jvm/org/apache/maven:maven_builder_support": ["lang||||||java","name||||||//3rdparty/jvm/org/apache/maven:maven_builder_support","visibility||||||//3rdparty/jvm:__subpackages__","kind||||||library","deps|||L|||","jars|||L|||","sources|||L|||","exports|||L|||//external:jar/org/apache/maven/maven_builder_support","runtimeDeps|||L|||//3rdparty/jvm/org/codehaus/plexus:plexus_utils|||//3rdparty/jvm/org/apache/commons:commons_lang3","processorClasses|||L|||","generatesApi|||B|||false","licenses|||L|||","generateNeverlink|||B|||false"],
"3rdparty/jvm/org/apache/maven:maven_model": ["lang||||||java","name||||||//3rdparty/jvm/org/apache/maven:maven_model","visibility||||||//3rdparty/jvm:__subpackages__","kind||||||library","deps|||L|||","jars|||L|||","sources|||L|||","exports|||L|||//external:jar/org/apache/maven/maven_model","runtimeDeps|||L|||//3rdparty/jvm/org/codehaus/plexus:plexus_utils|||//3rdparty/jvm/org/apache/commons:commons_lang3","processorClasses|||L|||","generatesApi|||B|||false","licenses|||L|||","generateNeverlink|||B|||false"],
"3rdparty/jvm/org/apache/maven:maven_model_builder": ["lang||||||java","name||||||//3rdparty/jvm/org/apache/maven:maven_model_builder","visibility||||||//3rdparty/jvm:__subpackages__","kind||||||library","deps|||L|||","jars|||L|||","sources|||L|||","exports|||L|||//external:jar/org/apache/maven/maven_model_builder","runtimeDeps|||L|||//3rdparty/jvm/org/apache/maven:maven_model|||//3rdparty/jvm/org/apache/commons:commons_lang3|||//3rdparty/jvm/org/apache/maven:maven_builder_support|||//3rdparty/jvm/com/google/guava:guava|||//3rdparty/jvm/org/codehaus/plexus:plexus_component_annotations|||//3rdparty/jvm/org/codehaus/plexus:plexus_utils|||//3rdparty/jvm/org/apache/maven:maven_artifact|||//3rdparty/jvm/org/codehaus/plexus:plexus_interpolation","processorClasses|||L|||","generatesApi|||B|||false","licenses|||L|||","generateNeverlink|||B|||false"],
"3rdparty/jvm/org/apache/maven:maven_repository_metadata": ["lang||||||java","name||||||//3rdparty/jvm/org/apache/maven:maven_repository_metadata","visibility||||||//3rdparty/jvm:__subpackages__","kind||||||library","deps|||L|||","jars|||L|||","sources|||L|||","exports|||L|||//external:jar/org/apache/maven/maven_repository_metadata","runtimeDeps|||L|||//3rdparty/jvm/org/codehaus/plexus:plexus_utils","processorClasses|||L|||","generatesApi|||B|||false","licenses|||L|||","generateNeverlink|||B|||false"],
"3rdparty/jvm/org/apache/maven:maven_settings": ["lang||||||java","name||||||//3rdparty/jvm/org/apache/maven:maven_settings","visibility||||||//visibility:public","kind||||||library","deps|||L|||","jars|||L|||","sources|||L|||","exports|||L|||//external:jar/org/apache/maven/maven_settings","runtimeDeps|||L|||//3rdparty/jvm/org/codehaus/plexus:plexus_utils","processorClasses|||L|||","generatesApi|||B|||false","licenses|||L|||","generateNeverlink|||B|||false"],
"3rdparty/jvm/org/apache/maven:maven_settings_builder": ["lang||||||java","name||||||//3rdparty/jvm/org/apache/maven:maven_settings_builder","visibility||||||//visibility:public","kind||||||library","deps|||L|||","jars|||L|||","sources|||L|||","exports|||L|||//external:jar/org/apache/maven/maven_settings_builder","runtimeDeps|||L|||//3rdparty/jvm/org/apache/commons:commons_lang3|||//3rdparty/jvm/org/apache/maven:maven_builder_support|||//3rdparty/jvm/org/sonatype/plexus:plexus_sec_dispatcher|||//3rdparty/jvm/org/codehaus/plexus:plexus_component_annotations|||//3rdparty/jvm/org/codehaus/plexus:plexus_utils|||//3rdparty/jvm/org/codehaus/plexus:plexus_interpolation|||//3rdparty/jvm/org/apache/maven:maven_settings","processorClasses|||L|||","generatesApi|||B|||false","licenses|||L|||","generateNeverlink|||B|||false"],
"3rdparty/jvm/org/codehaus/plexus:plexus_classworlds": ["lang||||||java","name||||||//3rdparty/jvm/org/codehaus/plexus:plexus_classworlds","visibility||||||//3rdparty/jvm:__subpackages__","kind||||||library","deps|||L|||","jars|||L|||","sources|||L|||","exports|||L|||//external:jar/org/codehaus/plexus/plexus_classworlds","runtimeDeps|||L|||","processorClasses|||L|||","generatesApi|||B|||false","licenses|||L|||","generateNeverlink|||B|||false"],
"3rdparty/jvm/org/codehaus/plexus:plexus_component_annotations": ["lang||||||java","name||||||//3rdparty/jvm/org/codehaus/plexus:plexus_component_annotations","visibility||||||//3rdparty/jvm:__subpackages__","kind||||||library","deps|||L|||","jars|||L|||","sources|||L|||","exports|||L|||//external:jar/org/codehaus/plexus/plexus_component_annotations","runtimeDeps|||L|||","processorClasses|||L|||","generatesApi|||B|||false","licenses|||L|||","generateNeverlink|||B|||false"],
"3rdparty/jvm/org/codehaus/plexus:plexus_interpolation": ["lang||||||java","name||||||//3rdparty/jvm/org/codehaus/plexus:plexus_interpolation","visibility||||||//3rdparty/jvm:__subpackages__","kind||||||library","deps|||L|||","jars|||L|||","sources|||L|||","exports|||L|||//external:jar/org/codehaus/plexus/plexus_interpolation","runtimeDeps|||L|||","processorClasses|||L|||","generatesApi|||B|||false","licenses|||L|||","generateNeverlink|||B|||false"],
"3rdparty/jvm/org/codehaus/plexus:plexus_utils": ["lang||||||java","name||||||//3rdparty/jvm/org/codehaus/plexus:plexus_utils","visibility||||||//3rdparty/jvm:__subpackages__","kind||||||library","deps|||L|||","jars|||L|||","sources|||L|||","exports|||L|||//external:jar/org/codehaus/plexus/plexus_utils","runtimeDeps|||L|||","processorClasses|||L|||","generatesApi|||B|||false","licenses|||L|||","generateNeverlink|||B|||false"],
"3rdparty/jvm/org/eclipse/aether:aether_api": ["lang||||||java","name||||||//3rdparty/jvm/org/eclipse/aether:aether_api","visibility||||||//visibility:public","kind||||||library","deps|||L|||","jars|||L|||","sources|||L|||","exports|||L|||//external:jar/org/eclipse/aether/aether_api","runtimeDeps|||L|||","processorClasses|||L|||","generatesApi|||B|||false","licenses|||L|||","generateNeverlink|||B|||false"],
"3rdparty/jvm/org/eclipse/aether:aether_connector_basic": ["lang||||||java","name||||||//3rdparty/jvm/org/eclipse/aether:aether_connector_basic","visibility||||||//visibility:public","kind||||||library","deps|||L|||","jars|||L|||","sources|||L|||","exports|||L|||//external:jar/org/eclipse/aether/aether_connector_basic","runtimeDeps|||L|||//3rdparty/jvm/org/eclipse/aether:aether_api|||//3rdparty/jvm/org/eclipse/aether:aether_spi|||//3rdparty/jvm/org/eclipse/aether:aether_util","processorClasses|||L|||","generatesApi|||B|||false","licenses|||L|||","generateNeverlink|||B|||false"],
"3rdparty/jvm/org/eclipse/aether:aether_impl": ["lang||||||java","name||||||//3rdparty/jvm/org/eclipse/aether:aether_impl","visibility||||||//visibility:public","kind||||||library","deps|||L|||","jars|||L|||","sources|||L|||","exports|||L|||//external:jar/org/eclipse/aether/aether_impl","runtimeDeps|||L|||//3rdparty/jvm/org/eclipse/aether:aether_api|||//3rdparty/jvm/org/eclipse/aether:aether_spi|||//3rdparty/jvm/org/eclipse/aether:aether_util","processorClasses|||L|||","generatesApi|||B|||false","licenses|||L|||","generateNeverlink|||B|||false"],
"3rdparty/jvm/org/eclipse/aether:aether_spi": ["lang||||||java","name||||||//3rdparty/jvm/org/eclipse/aether:aether_spi","visibility||||||//visibility:public","kind||||||library","deps|||L|||","jars|||L|||","sources|||L|||","exports|||L|||//external:jar/org/eclipse/aether/aether_spi","runtimeDeps|||L|||//3rdparty/jvm/org/eclipse/aether:aether_api","processorClasses|||L|||","generatesApi|||B|||false","licenses|||L|||","generateNeverlink|||B|||false"],
"3rdparty/jvm/org/eclipse/aether:aether_transport_file": ["lang||||||java","name||||||//3rdparty/jvm/org/eclipse/aether:aether_transport_file","visibility||||||//visibility:public","kind||||||library","deps|||L|||","jars|||L|||","sources|||L|||","exports|||L|||//external:jar/org/eclipse/aether/aether_transport_file","runtimeDeps|||L|||//3rdparty/jvm/org/eclipse/aether:aether_api|||//3rdparty/jvm/org/eclipse/aether:aether_spi|||//3rdparty/jvm/org/eclipse/aether:aether_util","processorClasses|||L|||","generatesApi|||B|||false","licenses|||L|||","generateNeverlink|||B|||false"],
"3rdparty/jvm/org/eclipse/aether:aether_transport_http": ["lang||||||java","name||||||//3rdparty/jvm/org/eclipse/aether:aether_transport_http","visibility||||||//visibility:public","kind||||||library","deps|||L|||","jars|||L|||","sources|||L|||","exports|||L|||//external:jar/org/eclipse/aether/aether_transport_http","runtimeDeps|||L|||//3rdparty/jvm/org/eclipse/aether:aether_spi|||//3rdparty/jvm/org/eclipse/aether:aether_util|||//3rdparty/jvm/org/eclipse/aether:aether_api|||//3rdparty/jvm/org/apache/httpcomponents:httpclient|||//3rdparty/jvm/org/slf4j:jcl_over_slf4j","processorClasses|||L|||","generatesApi|||B|||false","licenses|||L|||","generateNeverlink|||B|||false"],
"3rdparty/jvm/org/eclipse/aether:aether_util": ["lang||||||java","name||||||//3rdparty/jvm/org/eclipse/aether:aether_util","visibility||||||//visibility:public","kind||||||library","deps|||L|||","jars|||L|||","sources|||L|||","exports|||L|||//external:jar/org/eclipse/aether/aether_util","runtimeDeps|||L|||//3rdparty/jvm/org/eclipse/aether:aether_api","processorClasses|||L|||","generatesApi|||B|||false","licenses|||L|||","generateNeverlink|||B|||false"],
"3rdparty/jvm/org/eclipse/sisu:org_eclipse_sisu_inject": ["lang||||||java","name||||||//3rdparty/jvm/org/eclipse/sisu:org_eclipse_sisu_inject","visibility||||||//3rdparty/jvm:__subpackages__","kind||||||library","deps|||L|||","jars|||L|||","sources|||L|||","exports|||L|||//external:jar/org/eclipse/sisu/org_eclipse_sisu_inject","runtimeDeps|||L|||","processorClasses|||L|||","generatesApi|||B|||false","licenses|||L|||","generateNeverlink|||B|||false"],
"3rdparty/jvm/org/eclipse/sisu:org_eclipse_sisu_plexus": ["lang||||||java","name||||||//3rdparty/jvm/org/eclipse/sisu:org_eclipse_sisu_plexus","visibility||||||//visibility:public","kind||||||library","deps|||L|||","jars|||L|||","sources|||L|||","exports|||L|||//external:jar/org/eclipse/sisu/org_eclipse_sisu_plexus","runtimeDeps|||L|||//3rdparty/jvm/org/codehaus/plexus:plexus_component_annotations|||//3rdparty/jvm/org/codehaus/plexus:plexus_utils|||//3rdparty/jvm/org/eclipse/sisu:org_eclipse_sisu_inject|||//3rdparty/jvm/org/codehaus/plexus:plexus_classworlds|||//3rdparty/jvm/javax/enterprise:cdi_api","processorClasses|||L|||","generatesApi|||B|||false","licenses|||L|||","generateNeverlink|||B|||false"],
"3rdparty/jvm/org/scala_sbt:test_interface": ["lang||||||java","name||||||//3rdparty/jvm/org/scala_sbt:test_interface","visibility||||||//3rdparty/jvm:__subpackages__","kind||||||library","deps|||L|||","jars|||L|||","sources|||L|||","exports|||L|||//external:jar/org/scala_sbt/test_interface","runtimeDeps|||L|||","processorClasses|||L|||","generatesApi|||B|||false","licenses|||L|||","generateNeverlink|||B|||false"],
"3rdparty/jvm/org/slf4j:jcl_over_slf4j": ["lang||||||java","name||||||//3rdparty/jvm/org/slf4j:jcl_over_slf4j","visibility||||||//3rdparty/jvm:__subpackages__","kind||||||library","deps|||L|||","jars|||L|||","sources|||L|||","exports|||L|||//external:jar/org/slf4j/jcl_over_slf4j","runtimeDeps|||L|||//3rdparty/jvm/org/slf4j:slf4j_api","processorClasses|||L|||","generatesApi|||B|||false","licenses|||L|||","generateNeverlink|||B|||false"],
"3rdparty/jvm/org/slf4j:slf4j_api": ["lang||||||java","name||||||//3rdparty/jvm/org/slf4j:slf4j_api","visibility||||||//visibility:public","kind||||||library","deps|||L|||","jars|||L|||","sources|||L|||","exports|||L|||//external:jar/org/slf4j/slf4j_api","runtimeDeps|||L|||","processorClasses|||L|||","generatesApi|||B|||false","licenses|||L|||","generateNeverlink|||B|||false"],
"3rdparty/jvm/org/slf4j:slf4j_simple": ["lang||||||java","name||||||//3rdparty/jvm/org/slf4j:slf4j_simple","visibility||||||//visibility:public","kind||||||library","deps|||L|||","jars|||L|||","sources|||L|||","exports|||L|||//external:jar/org/slf4j/slf4j_simple","runtimeDeps|||L|||//3rdparty/jvm/org/slf4j:slf4j_api","processorClasses|||L|||","generatesApi|||B|||false","licenses|||L|||","generateNeverlink|||B|||false"],
"3rdparty/jvm/org/sonatype/plexus:plexus_cipher": ["lang||||||java","name||||||//3rdparty/jvm/org/sonatype/plexus:plexus_cipher","visibility||||||//visibility:public","kind||||||library","deps|||L|||","jars|||L|||","sources|||L|||","exports|||L|||//external:jar/org/sonatype/plexus/plexus_cipher","runtimeDeps|||L|||","processorClasses|||L|||","generatesApi|||B|||false","licenses|||L|||","generateNeverlink|||B|||false"],
"3rdparty/jvm/org/sonatype/plexus:plexus_sec_dispatcher": ["lang||||||java","name||||||//3rdparty/jvm/org/sonatype/plexus:plexus_sec_dispatcher","visibility||||||//visibility:public","kind||||||library","deps|||L|||","jars|||L|||","sources|||L|||","exports|||L|||//external:jar/org/sonatype/plexus/plexus_sec_dispatcher","runtimeDeps|||L|||//3rdparty/jvm/org/codehaus/plexus:plexus_utils|||//3rdparty/jvm/org/sonatype/plexus:plexus_cipher","processorClasses|||L|||","generatesApi|||B|||false","licenses|||L|||","generateNeverlink|||B|||false"],
"3rdparty/jvm/org/yaml:snakeyaml": ["lang||||||java","name||||||//3rdparty/jvm/org/yaml:snakeyaml","visibility||||||//3rdparty/jvm:__subpackages__","kind||||||library","deps|||L|||","jars|||L|||","sources|||L|||","exports|||L|||//external:jar/org/yaml/snakeyaml","runtimeDeps|||L|||","processorClasses|||L|||","generatesApi|||B|||false","licenses|||L|||","generateNeverlink|||B|||false"],
"3rdparty/jvm/org/scala_lang:scala_compiler": ["lang||||||scala/unmangled:2.11.8","name||||||//3rdparty/jvm/org/scala_lang:scala_compiler","visibility||||||//3rdparty/jvm:__subpackages__","kind||||||library","deps|||L|||","jars|||L|||","sources|||L|||","exports|||L|||@io_bazel_rules_scala_scala_compiler//:io_bazel_rules_scala_scala_compiler","runtimeDeps|||L|||","processorClasses|||L|||","generatesApi|||B|||false","licenses|||L|||","generateNeverlink|||B|||false"],
"3rdparty/jvm/org/scala_lang:scala_library": ["lang||||||scala/unmangled:2.11.8","name||||||//3rdparty/jvm/org/scala_lang:scala_library","visibility||||||//3rdparty/jvm:__subpackages__","kind||||||library","deps|||L|||","jars|||L|||","sources|||L|||","exports|||L|||@io_bazel_rules_scala_scala_library//:io_bazel_rules_scala_scala_library","runtimeDeps|||L|||","processorClasses|||L|||","generatesApi|||B|||false","licenses|||L|||","generateNeverlink|||B|||false"],
"3rdparty/jvm/org/scala_lang:scala_reflect": ["lang||||||scala/unmangled:2.11.8","name||||||//3rdparty/jvm/org/scala_lang:scala_reflect","visibility||||||//3rdparty/jvm:__subpackages__","kind||||||library","deps|||L|||","jars|||L|||","sources|||L|||","exports|||L|||@io_bazel_rules_scala_scala_reflect//:io_bazel_rules_scala_scala_reflect","runtimeDeps|||L|||","processorClasses|||L|||","generatesApi|||B|||false","licenses|||L|||","generateNeverlink|||B|||false"],
"3rdparty/jvm/com/chuusai:shapeless": ["lang||||||scala:2.11.8","name||||||//3rdparty/jvm/com/chuusai:shapeless","visibility||||||//visibility:public","kind||||||import","deps|||L|||","jars|||L|||//external:jar/com/chuusai/shapeless_2_11","sources|||L|||","exports|||L|||","runtimeDeps|||L|||//3rdparty/jvm/org/scala_lang:scala_library|||//3rdparty/jvm/org/typelevel:macro_compat","processorClasses|||L|||","generatesApi|||B|||false","licenses|||L|||","generateNeverlink|||B|||false"],
"3rdparty/jvm/com/github/alexarchambault:argonaut_shapeless_6_2": ["lang||||||scala:2.11.8","name||||||//3rdparty/jvm/com/github/alexarchambault:argonaut_shapeless_6_2","visibility||||||//3rdparty/jvm:__subpackages__","kind||||||import","deps|||L|||","jars|||L|||//external:jar/com/github/alexarchambault/argonaut_shapeless_6_2_2_11","sources|||L|||","exports|||L|||","runtimeDeps|||L|||//3rdparty/jvm/org/scala_lang:scala_library|||//3rdparty/jvm/io/argonaut:argonaut|||//3rdparty/jvm/com/chuusai:shapeless","processorClasses|||L|||","generatesApi|||B|||false","licenses|||L|||","generateNeverlink|||B|||false"],
"3rdparty/jvm/com/monovore:decline": ["lang||||||scala:2.11.8","name||||||//3rdparty/jvm/com/monovore:decline","visibility||||||//visibility:public","kind||||||import","deps|||L|||","jars|||L|||//external:jar/com/monovore/decline_2_11","sources|||L|||","exports|||L|||","runtimeDeps|||L|||//3rdparty/jvm/org/scala_lang:scala_library|||//3rdparty/jvm/org/typelevel:cats_core","processorClasses|||L|||","generatesApi|||B|||false","licenses|||L|||","generateNeverlink|||B|||false"],
"3rdparty/jvm/io/argonaut:argonaut": ["lang||||||scala:2.11.8","name||||||//3rdparty/jvm/io/argonaut:argonaut","visibility||||||//3rdparty/jvm:__subpackages__","kind||||||import","deps|||L|||","jars|||L|||//external:jar/io/argonaut/argonaut_2_11","sources|||L|||","exports|||L|||","runtimeDeps|||L|||//3rdparty/jvm/org/scala_lang:scala_reflect","processorClasses|||L|||","generatesApi|||B|||false","licenses|||L|||","generateNeverlink|||B|||false"],
"3rdparty/jvm/io/circe:circe_core": ["lang||||||scala:2.11.8","name||||||//3rdparty/jvm/io/circe:circe_core","visibility||||||//visibility:public","kind||||||import","deps|||L|||","jars|||L|||//external:jar/io/circe/circe_core_2_11","sources|||L|||","exports|||L|||","runtimeDeps|||L|||//3rdparty/jvm/org/scala_lang:scala_library|||//3rdparty/jvm/io/circe:circe_numbers|||//3rdparty/jvm/org/typelevel:cats_core","processorClasses|||L|||","generatesApi|||B|||false","licenses|||L|||","generateNeverlink|||B|||false"],
"3rdparty/jvm/io/circe:circe_generic": ["lang||||||scala:2.11.8","name||||||//3rdparty/jvm/io/circe:circe_generic","visibility||||||//visibility:public","kind||||||import","deps|||L|||","jars|||L|||//external:jar/io/circe/circe_generic_2_11","sources|||L|||","exports|||L|||//3rdparty/jvm/com/chuusai:shapeless|||//3rdparty/jvm/org/typelevel:cats_core|||//3rdparty/jvm/org/typelevel:cats_kernel|||//3rdparty/jvm/org/typelevel:macro_compat","runtimeDeps|||L|||//3rdparty/jvm/org/scala_lang:scala_library|||//3rdparty/jvm/io/circe:circe_core","processorClasses|||L|||","generatesApi|||B|||false","licenses|||L|||","generateNeverlink|||B|||false"],
"3rdparty/jvm/io/circe:circe_jackson25": ["lang||||||scala:2.11.8","name||||||//3rdparty/jvm/io/circe:circe_jackson25","visibility||||||//visibility:public","kind||||||import","deps|||L|||","jars|||L|||//external:jar/io/circe/circe_jackson25_2_11","sources|||L|||","exports|||L|||","runtimeDeps|||L|||//3rdparty/jvm/org/scala_lang:scala_library|||//3rdparty/jvm/io/circe:circe_core|||//3rdparty/jvm/com/fasterxml/jackson/core:jackson_core|||//3rdparty/jvm/com/fasterxml/jackson/core:jackson_databind","processorClasses|||L|||","generatesApi|||B|||false","licenses|||L|||","generateNeverlink|||B|||false"],
"3rdparty/jvm/io/circe:circe_jawn": ["lang||||||scala:2.11.8","name||||||//3rdparty/jvm/io/circe:circe_jawn","visibility||||||//visibility:public","kind||||||import","deps|||L|||","jars|||L|||//external:jar/io/circe/circe_jawn_2_11","sources|||L|||","exports|||L|||","runtimeDeps|||L|||//3rdparty/jvm/org/scala_lang:scala_library|||//3rdparty/jvm/io/circe:circe_core|||//3rdparty/jvm/org/spire_math:jawn_parser","processorClasses|||L|||","generatesApi|||B|||false","licenses|||L|||","generateNeverlink|||B|||false"],
"3rdparty/jvm/io/circe:circe_numbers": ["lang||||||scala:2.11.8","name||||||//3rdparty/jvm/io/circe:circe_numbers","visibility||||||//3rdparty/jvm:__subpackages__","kind||||||import","deps|||L|||","jars|||L|||//external:jar/io/circe/circe_numbers_2_11","sources|||L|||","exports|||L|||","runtimeDeps|||L|||//3rdparty/jvm/org/scala_lang:scala_library","processorClasses|||L|||","generatesApi|||B|||false","licenses|||L|||","generateNeverlink|||B|||false"],
"3rdparty/jvm/io/get_coursier:coursier": ["lang||||||scala:2.11.8","name||||||//3rdparty/jvm/io/get_coursier:coursier","visibility||||||//visibility:public","kind||||||import","deps|||L|||","jars|||L|||//external:jar/io/get_coursier/coursier_2_11","sources|||L|||","exports|||L|||","runtimeDeps|||L|||//3rdparty/jvm/io/get_coursier:coursier_core|||//3rdparty/jvm/io/get_coursier:coursier_cache|||//3rdparty/jvm/org/scala_lang:scala_library|||//3rdparty/jvm/com/github/alexarchambault:argonaut_shapeless_6_2","processorClasses|||L|||","generatesApi|||B|||false","licenses|||L|||","generateNeverlink|||B|||false"],
"3rdparty/jvm/io/get_coursier:coursier_cache": ["lang||||||scala:2.11.8","name||||||//3rdparty/jvm/io/get_coursier:coursier_cache","visibility||||||//visibility:public","kind||||||import","deps|||L|||","jars|||L|||//external:jar/io/get_coursier/coursier_cache_2_11","sources|||L|||","exports|||L|||","runtimeDeps|||L|||//3rdparty/jvm/io/get_coursier:coursier_util|||//3rdparty/jvm/org/scala_lang:scala_library","processorClasses|||L|||","generatesApi|||B|||false","licenses|||L|||","generateNeverlink|||B|||false"],
"3rdparty/jvm/io/get_coursier:coursier_core": ["lang||||||scala:2.11.8","name||||||//3rdparty/jvm/io/get_coursier:coursier_core","visibility||||||//visibility:public","kind||||||import","deps|||L|||","jars|||L|||//external:jar/io/get_coursier/coursier_core_2_11","sources|||L|||","exports|||L|||","runtimeDeps|||L|||//3rdparty/jvm/io/get_coursier:coursier_util|||//3rdparty/jvm/org/scala_lang:scala_library|||//3rdparty/jvm/org/scala_lang/modules:scala_xml","processorClasses|||L|||","generatesApi|||B|||false","licenses|||L|||","generateNeverlink|||B|||false"],
"3rdparty/jvm/io/get_coursier:coursier_util": ["lang||||||scala:2.11.8","name||||||//3rdparty/jvm/io/get_coursier:coursier_util","visibility||||||//visibility:public","kind||||||import","deps|||L|||","jars|||L|||//external:jar/io/get_coursier/coursier_util_2_11","sources|||L|||","exports|||L|||","runtimeDeps|||L|||//3rdparty/jvm/org/scala_lang:scala_library","processorClasses|||L|||","generatesApi|||B|||false","licenses|||L|||","generateNeverlink|||B|||false"],
"3rdparty/jvm/org/scala_lang/modules:scala_parser_combinators": ["lang||||||scala:2.11.8","name||||||//3rdparty/jvm/org/scala_lang/modules:scala_parser_combinators","visibility||||||//3rdparty/jvm:__subpackages__","kind||||||library","deps|||L|||","jars|||L|||","sources|||L|||","exports|||L|||@io_bazel_rules_scala_scala_parser_combinators//:io_bazel_rules_scala_scala_parser_combinators","runtimeDeps|||L|||","processorClasses|||L|||","generatesApi|||B|||false","licenses|||L|||","generateNeverlink|||B|||false"],
"3rdparty/jvm/org/scala_lang/modules:scala_xml": ["lang||||||scala:2.11.8","name||||||//3rdparty/jvm/org/scala_lang/modules:scala_xml","visibility||||||//visibility:public","kind||||||library","deps|||L|||","jars|||L|||","sources|||L|||","exports|||L|||@io_bazel_rules_scala_scala_xml//:io_bazel_rules_scala_scala_xml","runtimeDeps|||L|||","processorClasses|||L|||","generatesApi|||B|||false","licenses|||L|||","generateNeverlink|||B|||false"],
"3rdparty/jvm/org/scalacheck:scalacheck": ["lang||||||scala:2.11.8","name||||||//3rdparty/jvm/org/scalacheck:scalacheck","visibility||||||//visibility:public","kind||||||import","deps|||L|||","jars|||L|||//external:jar/org/scalacheck/scalacheck_2_11","sources|||L|||","exports|||L|||","runtimeDeps|||L|||//3rdparty/jvm/org/scala_lang:scala_library|||//3rdparty/jvm/org/scala_sbt:test_interface","processorClasses|||L|||","generatesApi|||B|||false","licenses|||L|||","generateNeverlink|||B|||false"],
"3rdparty/jvm/org/scalactic:scalactic": ["lang||||||scala:2.11.8","name||||||//3rdparty/jvm/org/scalactic:scalactic","visibility||||||//visibility:public","kind||||||import","deps|||L|||","jars|||L|||//external:jar/org/scalactic/scalactic_2_11","sources|||L|||","exports|||L|||","runtimeDeps|||L|||//3rdparty/jvm/org/scala_lang:scala_library|||//3rdparty/jvm/org/scala_lang:scala_reflect","processorClasses|||L|||","generatesApi|||B|||false","licenses|||L|||","generateNeverlink|||B|||false"],
"3rdparty/jvm/org/scalatest:scalatest": ["lang||||||scala:2.11.8","name||||||//3rdparty/jvm/org/scalatest:scalatest","visibility||||||//visibility:public","kind||||||import","deps|||L|||","jars|||L|||//external:jar/org/scalatest/scalatest_2_11","sources|||L|||","exports|||L|||//3rdparty/jvm/org/scalactic:scalactic","runtimeDeps|||L|||//3rdparty/jvm/org/scala_lang:scala_library|||//3rdparty/jvm/org/scala_lang:scala_reflect|||//3rdparty/jvm/org/scala_lang/modules:scala_xml","processorClasses|||L|||","generatesApi|||B|||false","licenses|||L|||","generateNeverlink|||B|||false"],
"3rdparty/jvm/org/spire_math:jawn_parser": ["lang||||||scala:2.11.8","name||||||//3rdparty/jvm/org/spire_math:jawn_parser","visibility||||||//3rdparty/jvm:__subpackages__","kind||||||import","deps|||L|||","jars|||L|||//external:jar/org/spire_math/jawn_parser_2_11","sources|||L|||","exports|||L|||","runtimeDeps|||L|||//3rdparty/jvm/org/scala_lang:scala_library","processorClasses|||L|||","generatesApi|||B|||false","licenses|||L|||","generateNeverlink|||B|||false"],
"3rdparty/jvm/org/spire_math:kind_projector": ["lang||||||scala:2.11.8","name||||||//3rdparty/jvm/org/spire_math:kind_projector","visibility||||||//visibility:public","kind||||||import","deps|||L|||","jars|||L|||//external:jar/org/spire_math/kind_projector_2_11","sources|||L|||","exports|||L|||","runtimeDeps|||L|||//3rdparty/jvm/org/scala_lang:scala_compiler|||//3rdparty/jvm/org/scala_lang:scala_library","processorClasses|||L|||","generatesApi|||B|||false","licenses|||L|||","generateNeverlink|||B|||false"],
"3rdparty/jvm/org/typelevel:cats_core": ["lang||||||scala:2.11.8","name||||||//3rdparty/jvm/org/typelevel:cats_core","visibility||||||//visibility:public","kind||||||import","deps|||L|||","jars|||L|||//external:jar/org/typelevel/cats_core_2_11","sources|||L|||","exports|||L|||//3rdparty/jvm/org/typelevel:cats_kernel","runtimeDeps|||L|||//3rdparty/jvm/org/scala_lang:scala_library|||//3rdparty/jvm/org/typelevel:cats_macros|||//3rdparty/jvm/org/typelevel:machinist","processorClasses|||L|||","generatesApi|||B|||false","licenses|||L|||","generateNeverlink|||B|||false"],
"3rdparty/jvm/org/typelevel:cats_free": ["lang||||||scala:2.11.8","name||||||//3rdparty/jvm/org/typelevel:cats_free","visibility||||||//visibility:public","kind||||||import","deps|||L|||","jars|||L|||//external:jar/org/typelevel/cats_free_2_11","sources|||L|||","exports|||L|||","runtimeDeps|||L|||//3rdparty/jvm/org/scala_lang:scala_library|||//3rdparty/jvm/org/typelevel:cats_macros|||//3rdparty/jvm/org/typelevel:cats_core|||//3rdparty/jvm/org/typelevel:machinist","processorClasses|||L|||","generatesApi|||B|||false","licenses|||L|||","generateNeverlink|||B|||false"],
"3rdparty/jvm/org/typelevel:cats_kernel": ["lang||||||scala:2.11.8","name||||||//3rdparty/jvm/org/typelevel:cats_kernel","visibility||||||//visibility:public","kind||||||import","deps|||L|||","jars|||L|||//external:jar/org/typelevel/cats_kernel_2_11","sources|||L|||","exports|||L|||","runtimeDeps|||L|||//3rdparty/jvm/org/scala_lang:scala_library","processorClasses|||L|||","generatesApi|||B|||false","licenses|||L|||","generateNeverlink|||B|||false"],
"3rdparty/jvm/org/typelevel:cats_macros": ["lang||||||scala:2.11.8","name||||||//3rdparty/jvm/org/typelevel:cats_macros","visibility||||||//visibility:public","kind||||||import","deps|||L|||","jars|||L|||//external:jar/org/typelevel/cats_macros_2_11","sources|||L|||","exports|||L|||","runtimeDeps|||L|||//3rdparty/jvm/org/scala_lang:scala_library|||//3rdparty/jvm/org/typelevel:machinist","processorClasses|||L|||","generatesApi|||B|||false","licenses|||L|||","generateNeverlink|||B|||false"],
"3rdparty/jvm/org/typelevel:machinist": ["lang||||||scala:2.11.8","name||||||//3rdparty/jvm/org/typelevel:machinist","visibility||||||//3rdparty/jvm:__subpackages__","kind||||||import","deps|||L|||","jars|||L|||//external:jar/org/typelevel/machinist_2_11","sources|||L|||","exports|||L|||","runtimeDeps|||L|||//3rdparty/jvm/org/scala_lang:scala_reflect|||//3rdparty/jvm/org/scala_lang:scala_library","processorClasses|||L|||","generatesApi|||B|||false","licenses|||L|||","generateNeverlink|||B|||false"],
"3rdparty/jvm/org/typelevel:macro_compat": ["lang||||||scala:2.11.8","name||||||//3rdparty/jvm/org/typelevel:macro_compat","visibility||||||//visibility:public","kind||||||import","deps|||L|||","jars|||L|||//external:jar/org/typelevel/macro_compat_2_11","sources|||L|||","exports|||L|||","runtimeDeps|||L|||//3rdparty/jvm/org/scala_lang:scala_library","processorClasses|||L|||","generatesApi|||B|||false","licenses|||L|||","generateNeverlink|||B|||false"],
"3rdparty/jvm/org/typelevel:paiges_core": ["lang||||||scala:2.11.8","name||||||//3rdparty/jvm/org/typelevel:paiges_core","visibility||||||//visibility:public","kind||||||import","deps|||L|||","jars|||L|||//external:jar/org/typelevel/paiges_core_2_11","sources|||L|||","exports|||L|||","runtimeDeps|||L|||//3rdparty/jvm/org/scala_lang:scala_library","processorClasses|||L|||","generatesApi|||B|||false","licenses|||L|||","generateNeverlink|||B|||false"]
}
def build_external_workspace(name):
return build_external_workspace_from_opts(name = name, target_configs = list_target_data(), separator = list_target_data_separator(), build_header = build_header())
| 198.95
| 950
| 0.68007
| 4,930
| 39,790
| 5.322718
| 0.039148
| 0.125338
| 0.107237
| 0.026676
| 0.939332
| 0.914409
| 0.872375
| 0.827674
| 0.801723
| 0.750048
| 0
| 0.014555
| 0.026162
| 39,790
| 199
| 951
| 199.949749
| 0.662649
| 0.001332
| 0
| 0.180791
| 1
| 0.19774
| 0.845774
| 0.681658
| 0
| 0
| 0
| 0
| 0
| 1
| 0.028249
| false
| 0
| 0.169492
| 0.022599
| 0.225989
| 0.00565
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
f30d1c1344197ed61ecfb7de4c673a791a992e5f
| 66
|
py
|
Python
|
custom_addons/sales_practice/models/__init__.py
|
MonwarAdeeb/Bista_Solutions
|
d261e31f21ff03b2cc82b0c26d680036dca6d799
|
[
"MIT"
] | null | null | null |
custom_addons/sales_practice/models/__init__.py
|
MonwarAdeeb/Bista_Solutions
|
d261e31f21ff03b2cc82b0c26d680036dca6d799
|
[
"MIT"
] | null | null | null |
custom_addons/sales_practice/models/__init__.py
|
MonwarAdeeb/Bista_Solutions
|
d261e31f21ff03b2cc82b0c26d680036dca6d799
|
[
"MIT"
] | null | null | null |
from . import sales_practice
from . import sales_practice_inherit
| 22
| 36
| 0.848485
| 9
| 66
| 5.888889
| 0.555556
| 0.377358
| 0.566038
| 0.867925
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.121212
| 66
| 2
| 37
| 33
| 0.913793
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
b83b6af738618f96f45710588f3cb85d0c2e4fef
| 92
|
py
|
Python
|
nnutil/visual/__init__.py
|
aroig/nnutil
|
88df41ee89f592a28c1661ee8837dd8e8ca42cf3
|
[
"BSD-3-Clause"
] | null | null | null |
nnutil/visual/__init__.py
|
aroig/nnutil
|
88df41ee89f592a28c1661ee8837dd8e8ca42cf3
|
[
"BSD-3-Clause"
] | null | null | null |
nnutil/visual/__init__.py
|
aroig/nnutil
|
88df41ee89f592a28c1661ee8837dd8e8ca42cf3
|
[
"BSD-3-Clause"
] | null | null | null |
from .plot_sample import *
# For backwards compatibility
from ..util.print_sample import *
| 18.4
| 33
| 0.782609
| 12
| 92
| 5.833333
| 0.75
| 0.342857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.141304
| 92
| 4
| 34
| 23
| 0.886076
| 0.293478
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0.5
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
|
0
| 7
|
b867d31001a9a733b1572df3f9738ca8ee119eae
| 27,657
|
py
|
Python
|
tests/test_models/test_heads.py
|
kartikwar/Swin-Transformer-Semantic-Segmentation
|
d9b33fbd30d8572a8806754a86c785b6342c0b2a
|
[
"Apache-2.0"
] | 7
|
2021-05-22T09:02:06.000Z
|
2021-09-25T16:48:00.000Z
|
tests/test_models/test_heads.py
|
kartikwar/Swin-Transformer-Semantic-Segmentation
|
d9b33fbd30d8572a8806754a86c785b6342c0b2a
|
[
"Apache-2.0"
] | null | null | null |
tests/test_models/test_heads.py
|
kartikwar/Swin-Transformer-Semantic-Segmentation
|
d9b33fbd30d8572a8806754a86c785b6342c0b2a
|
[
"Apache-2.0"
] | null | null | null |
from unittest.mock import patch
import pytest
import torch
from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule
from mmcv.utils import ConfigDict
from mmcv.utils.parrots_wrapper import SyncBatchNorm
from mmseg.models.decode_heads import (ANNHead, APCHead, ASPPHead, CCHead,
DAHead, DepthwiseSeparableASPPHead,
DepthwiseSeparableFCNHead, DMHead,
DNLHead, EMAHead, EncHead, FCNHead,
GCHead, LRASPPHead, NLHead, OCRHead,
PointHead, PSAHead, PSPHead, UPerHead)
from mmseg.models.decode_heads.decode_head import BaseDecodeHead
def _conv_has_norm(module, sync_bn):
for m in module.modules():
if isinstance(m, ConvModule):
if not m.with_norm:
return False
if sync_bn:
if not isinstance(m.bn, SyncBatchNorm):
return False
return True
def to_cuda(module, data):
module = module.cuda()
if isinstance(data, list):
for i in range(len(data)):
data[i] = data[i].cuda()
return module, data
@patch.multiple(BaseDecodeHead, __abstractmethods__=set())
def test_decode_head():
with pytest.raises(AssertionError):
# default input_transform doesn't accept multiple inputs
BaseDecodeHead([32, 16], 16, num_classes=19)
with pytest.raises(AssertionError):
# default input_transform doesn't accept multiple inputs
BaseDecodeHead(32, 16, num_classes=19, in_index=[-1, -2])
with pytest.raises(AssertionError):
# supported mode is resize_concat only
BaseDecodeHead(32, 16, num_classes=19, input_transform='concat')
with pytest.raises(AssertionError):
# in_channels should be list|tuple
BaseDecodeHead(32, 16, num_classes=19, input_transform='resize_concat')
with pytest.raises(AssertionError):
# in_index should be list|tuple
BaseDecodeHead([32],
16,
in_index=-1,
num_classes=19,
input_transform='resize_concat')
with pytest.raises(AssertionError):
# len(in_index) should equal len(in_channels)
BaseDecodeHead([32, 16],
16,
num_classes=19,
in_index=[-1],
input_transform='resize_concat')
# test default dropout
head = BaseDecodeHead(32, 16, num_classes=19)
assert hasattr(head, 'dropout') and head.dropout.p == 0.1
# test set dropout
head = BaseDecodeHead(32, 16, num_classes=19, dropout_ratio=0.2)
assert hasattr(head, 'dropout') and head.dropout.p == 0.2
# test no input_transform
inputs = [torch.randn(1, 32, 45, 45)]
head = BaseDecodeHead(32, 16, num_classes=19)
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
assert head.in_channels == 32
assert head.input_transform is None
transformed_inputs = head._transform_inputs(inputs)
assert transformed_inputs.shape == (1, 32, 45, 45)
# test input_transform = resize_concat
inputs = [torch.randn(1, 32, 45, 45), torch.randn(1, 16, 21, 21)]
head = BaseDecodeHead([32, 16],
16,
num_classes=19,
in_index=[0, 1],
input_transform='resize_concat')
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
assert head.in_channels == 48
assert head.input_transform == 'resize_concat'
transformed_inputs = head._transform_inputs(inputs)
assert transformed_inputs.shape == (1, 48, 45, 45)
def test_fcn_head():
with pytest.raises(AssertionError):
# num_convs must be not less than 0
FCNHead(num_classes=19, num_convs=-1)
# test no norm_cfg
head = FCNHead(in_channels=32, channels=16, num_classes=19)
for m in head.modules():
if isinstance(m, ConvModule):
assert not m.with_norm
# test with norm_cfg
head = FCNHead(
in_channels=32,
channels=16,
num_classes=19,
norm_cfg=dict(type='BN'))
for m in head.modules():
if isinstance(m, ConvModule):
assert m.with_norm and isinstance(m.bn, SyncBatchNorm)
# test concat_input=False
inputs = [torch.randn(1, 32, 45, 45)]
head = FCNHead(
in_channels=32, channels=16, num_classes=19, concat_input=False)
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
assert len(head.convs) == 2
assert not head.concat_input and not hasattr(head, 'conv_cat')
outputs = head(inputs)
assert outputs.shape == (1, head.num_classes, 45, 45)
# test concat_input=True
inputs = [torch.randn(1, 32, 45, 45)]
head = FCNHead(
in_channels=32, channels=16, num_classes=19, concat_input=True)
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
assert len(head.convs) == 2
assert head.concat_input
assert head.conv_cat.in_channels == 48
outputs = head(inputs)
assert outputs.shape == (1, head.num_classes, 45, 45)
# test kernel_size=3
inputs = [torch.randn(1, 32, 45, 45)]
head = FCNHead(in_channels=32, channels=16, num_classes=19)
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
for i in range(len(head.convs)):
assert head.convs[i].kernel_size == (3, 3)
assert head.convs[i].padding == 1
outputs = head(inputs)
assert outputs.shape == (1, head.num_classes, 45, 45)
# test kernel_size=1
inputs = [torch.randn(1, 32, 45, 45)]
head = FCNHead(in_channels=32, channels=16, num_classes=19, kernel_size=1)
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
for i in range(len(head.convs)):
assert head.convs[i].kernel_size == (1, 1)
assert head.convs[i].padding == 0
outputs = head(inputs)
assert outputs.shape == (1, head.num_classes, 45, 45)
# test num_conv
inputs = [torch.randn(1, 32, 45, 45)]
head = FCNHead(in_channels=32, channels=16, num_classes=19, num_convs=1)
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
assert len(head.convs) == 1
outputs = head(inputs)
assert outputs.shape == (1, head.num_classes, 45, 45)
# test num_conv = 0
inputs = [torch.randn(1, 32, 45, 45)]
head = FCNHead(
in_channels=32,
channels=32,
num_classes=19,
num_convs=0,
concat_input=False)
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
assert isinstance(head.convs, torch.nn.Identity)
outputs = head(inputs)
assert outputs.shape == (1, head.num_classes, 45, 45)
def test_psp_head():
with pytest.raises(AssertionError):
# pool_scales must be list|tuple
PSPHead(in_channels=32, channels=16, num_classes=19, pool_scales=1)
# test no norm_cfg
head = PSPHead(in_channels=32, channels=16, num_classes=19)
assert not _conv_has_norm(head, sync_bn=False)
# test with norm_cfg
head = PSPHead(
in_channels=32,
channels=16,
num_classes=19,
norm_cfg=dict(type='BN'))
assert _conv_has_norm(head, sync_bn=True)
inputs = [torch.randn(1, 32, 45, 45)]
head = PSPHead(
in_channels=32, channels=16, num_classes=19, pool_scales=(1, 2, 3))
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
assert head.psp_modules[0][0].output_size == 1
assert head.psp_modules[1][0].output_size == 2
assert head.psp_modules[2][0].output_size == 3
outputs = head(inputs)
assert outputs.shape == (1, head.num_classes, 45, 45)
def test_apc_head():
with pytest.raises(AssertionError):
# pool_scales must be list|tuple
APCHead(in_channels=32, channels=16, num_classes=19, pool_scales=1)
# test no norm_cfg
head = APCHead(in_channels=32, channels=16, num_classes=19)
assert not _conv_has_norm(head, sync_bn=False)
# test with norm_cfg
head = APCHead(
in_channels=32,
channels=16,
num_classes=19,
norm_cfg=dict(type='BN'))
assert _conv_has_norm(head, sync_bn=True)
# fusion=True
inputs = [torch.randn(1, 32, 45, 45)]
head = APCHead(
in_channels=32,
channels=16,
num_classes=19,
pool_scales=(1, 2, 3),
fusion=True)
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
assert head.fusion is True
assert head.acm_modules[0].pool_scale == 1
assert head.acm_modules[1].pool_scale == 2
assert head.acm_modules[2].pool_scale == 3
outputs = head(inputs)
assert outputs.shape == (1, head.num_classes, 45, 45)
# fusion=False
inputs = [torch.randn(1, 32, 45, 45)]
head = APCHead(
in_channels=32,
channels=16,
num_classes=19,
pool_scales=(1, 2, 3),
fusion=False)
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
assert head.fusion is False
assert head.acm_modules[0].pool_scale == 1
assert head.acm_modules[1].pool_scale == 2
assert head.acm_modules[2].pool_scale == 3
outputs = head(inputs)
assert outputs.shape == (1, head.num_classes, 45, 45)
def test_dm_head():
with pytest.raises(AssertionError):
# filter_sizes must be list|tuple
DMHead(in_channels=32, channels=16, num_classes=19, filter_sizes=1)
# test no norm_cfg
head = DMHead(in_channels=32, channels=16, num_classes=19)
assert not _conv_has_norm(head, sync_bn=False)
# test with norm_cfg
head = DMHead(
in_channels=32,
channels=16,
num_classes=19,
norm_cfg=dict(type='BN'))
assert _conv_has_norm(head, sync_bn=True)
# fusion=True
inputs = [torch.randn(1, 32, 45, 45)]
head = DMHead(
in_channels=32,
channels=16,
num_classes=19,
filter_sizes=(1, 3, 5),
fusion=True)
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
assert head.fusion is True
assert head.dcm_modules[0].filter_size == 1
assert head.dcm_modules[1].filter_size == 3
assert head.dcm_modules[2].filter_size == 5
outputs = head(inputs)
assert outputs.shape == (1, head.num_classes, 45, 45)
# fusion=False
inputs = [torch.randn(1, 32, 45, 45)]
head = DMHead(
in_channels=32,
channels=16,
num_classes=19,
filter_sizes=(1, 3, 5),
fusion=False)
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
assert head.fusion is False
assert head.dcm_modules[0].filter_size == 1
assert head.dcm_modules[1].filter_size == 3
assert head.dcm_modules[2].filter_size == 5
outputs = head(inputs)
assert outputs.shape == (1, head.num_classes, 45, 45)
def test_aspp_head():
with pytest.raises(AssertionError):
# pool_scales must be list|tuple
ASPPHead(in_channels=32, channels=16, num_classes=19, dilations=1)
# test no norm_cfg
head = ASPPHead(in_channels=32, channels=16, num_classes=19)
assert not _conv_has_norm(head, sync_bn=False)
# test with norm_cfg
head = ASPPHead(
in_channels=32,
channels=16,
num_classes=19,
norm_cfg=dict(type='BN'))
assert _conv_has_norm(head, sync_bn=True)
inputs = [torch.randn(1, 32, 45, 45)]
head = ASPPHead(
in_channels=32, channels=16, num_classes=19, dilations=(1, 12, 24))
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
assert head.aspp_modules[0].conv.dilation == (1, 1)
assert head.aspp_modules[1].conv.dilation == (12, 12)
assert head.aspp_modules[2].conv.dilation == (24, 24)
outputs = head(inputs)
assert outputs.shape == (1, head.num_classes, 45, 45)
def test_psa_head():
with pytest.raises(AssertionError):
# psa_type must be in 'bi-direction', 'collect', 'distribute'
PSAHead(
in_channels=32,
channels=16,
num_classes=19,
mask_size=(39, 39),
psa_type='gather')
# test no norm_cfg
head = PSAHead(
in_channels=32, channels=16, num_classes=19, mask_size=(39, 39))
assert not _conv_has_norm(head, sync_bn=False)
# test with norm_cfg
head = PSAHead(
in_channels=32,
channels=16,
num_classes=19,
mask_size=(39, 39),
norm_cfg=dict(type='BN'))
assert _conv_has_norm(head, sync_bn=True)
# test 'bi-direction' psa_type
inputs = [torch.randn(1, 32, 39, 39)]
head = PSAHead(
in_channels=32, channels=16, num_classes=19, mask_size=(39, 39))
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
outputs = head(inputs)
assert outputs.shape == (1, head.num_classes, 39, 39)
# test 'bi-direction' psa_type, shrink_factor=1
inputs = [torch.randn(1, 32, 39, 39)]
head = PSAHead(
in_channels=32,
channels=16,
num_classes=19,
mask_size=(39, 39),
shrink_factor=1)
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
outputs = head(inputs)
assert outputs.shape == (1, head.num_classes, 39, 39)
# test 'bi-direction' psa_type with soft_max
inputs = [torch.randn(1, 32, 39, 39)]
head = PSAHead(
in_channels=32,
channels=16,
num_classes=19,
mask_size=(39, 39),
psa_softmax=True)
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
outputs = head(inputs)
assert outputs.shape == (1, head.num_classes, 39, 39)
# test 'collect' psa_type
inputs = [torch.randn(1, 32, 39, 39)]
head = PSAHead(
in_channels=32,
channels=16,
num_classes=19,
mask_size=(39, 39),
psa_type='collect')
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
outputs = head(inputs)
assert outputs.shape == (1, head.num_classes, 39, 39)
# test 'collect' psa_type, shrink_factor=1
inputs = [torch.randn(1, 32, 39, 39)]
head = PSAHead(
in_channels=32,
channels=16,
num_classes=19,
mask_size=(39, 39),
shrink_factor=1,
psa_type='collect')
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
outputs = head(inputs)
assert outputs.shape == (1, head.num_classes, 39, 39)
# test 'collect' psa_type, shrink_factor=1, compact=True
inputs = [torch.randn(1, 32, 39, 39)]
head = PSAHead(
in_channels=32,
channels=16,
num_classes=19,
mask_size=(39, 39),
psa_type='collect',
shrink_factor=1,
compact=True)
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
outputs = head(inputs)
assert outputs.shape == (1, head.num_classes, 39, 39)
# test 'distribute' psa_type
inputs = [torch.randn(1, 32, 39, 39)]
head = PSAHead(
in_channels=32,
channels=16,
num_classes=19,
mask_size=(39, 39),
psa_type='distribute')
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
outputs = head(inputs)
assert outputs.shape == (1, head.num_classes, 39, 39)
def test_gc_head():
head = GCHead(in_channels=32, channels=16, num_classes=19)
assert len(head.convs) == 2
assert hasattr(head, 'gc_block')
inputs = [torch.randn(1, 32, 45, 45)]
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
outputs = head(inputs)
assert outputs.shape == (1, head.num_classes, 45, 45)
def test_nl_head():
head = NLHead(in_channels=32, channels=16, num_classes=19)
assert len(head.convs) == 2
assert hasattr(head, 'nl_block')
inputs = [torch.randn(1, 32, 45, 45)]
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
outputs = head(inputs)
assert outputs.shape == (1, head.num_classes, 45, 45)
def test_cc_head():
head = CCHead(in_channels=32, channels=16, num_classes=19)
assert len(head.convs) == 2
assert hasattr(head, 'cca')
if not torch.cuda.is_available():
pytest.skip('CCHead requires CUDA')
inputs = [torch.randn(1, 32, 45, 45)]
head, inputs = to_cuda(head, inputs)
outputs = head(inputs)
assert outputs.shape == (1, head.num_classes, 45, 45)
def test_uper_head():
with pytest.raises(AssertionError):
# fpn_in_channels must be list|tuple
UPerHead(in_channels=32, channels=16, num_classes=19)
# test no norm_cfg
head = UPerHead(
in_channels=[32, 16], channels=16, num_classes=19, in_index=[-2, -1])
assert not _conv_has_norm(head, sync_bn=False)
# test with norm_cfg
head = UPerHead(
in_channels=[32, 16],
channels=16,
num_classes=19,
norm_cfg=dict(type='BN'),
in_index=[-2, -1])
assert _conv_has_norm(head, sync_bn=True)
inputs = [torch.randn(1, 32, 45, 45), torch.randn(1, 16, 21, 21)]
head = UPerHead(
in_channels=[32, 16], channels=16, num_classes=19, in_index=[-2, -1])
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
outputs = head(inputs)
assert outputs.shape == (1, head.num_classes, 45, 45)
def test_ann_head():
inputs = [torch.randn(1, 16, 45, 45), torch.randn(1, 32, 21, 21)]
head = ANNHead(
in_channels=[16, 32],
channels=16,
num_classes=19,
in_index=[-2, -1],
project_channels=8)
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
outputs = head(inputs)
assert outputs.shape == (1, head.num_classes, 21, 21)
def test_da_head():
inputs = [torch.randn(1, 32, 45, 45)]
head = DAHead(in_channels=32, channels=16, num_classes=19, pam_channels=8)
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
outputs = head(inputs)
assert isinstance(outputs, tuple) and len(outputs) == 3
for output in outputs:
assert output.shape == (1, head.num_classes, 45, 45)
test_output = head.forward_test(inputs, None, None)
assert test_output.shape == (1, head.num_classes, 45, 45)
def test_ocr_head():
inputs = [torch.randn(1, 32, 45, 45)]
ocr_head = OCRHead(
in_channels=32, channels=16, num_classes=19, ocr_channels=8)
fcn_head = FCNHead(in_channels=32, channels=16, num_classes=19)
if torch.cuda.is_available():
head, inputs = to_cuda(ocr_head, inputs)
head, inputs = to_cuda(fcn_head, inputs)
prev_output = fcn_head(inputs)
output = ocr_head(inputs, prev_output)
assert output.shape == (1, ocr_head.num_classes, 45, 45)
def test_enc_head():
# with se_loss, w.o. lateral
inputs = [torch.randn(1, 32, 21, 21)]
head = EncHead(
in_channels=[32], channels=16, num_classes=19, in_index=[-1])
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
outputs = head(inputs)
assert isinstance(outputs, tuple) and len(outputs) == 2
assert outputs[0].shape == (1, head.num_classes, 21, 21)
assert outputs[1].shape == (1, head.num_classes)
# w.o se_loss, w.o. lateral
inputs = [torch.randn(1, 32, 21, 21)]
head = EncHead(
in_channels=[32],
channels=16,
use_se_loss=False,
num_classes=19,
in_index=[-1])
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
outputs = head(inputs)
assert outputs.shape == (1, head.num_classes, 21, 21)
# with se_loss, with lateral
inputs = [torch.randn(1, 16, 45, 45), torch.randn(1, 32, 21, 21)]
head = EncHead(
in_channels=[16, 32],
channels=16,
add_lateral=True,
num_classes=19,
in_index=[-2, -1])
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
outputs = head(inputs)
assert isinstance(outputs, tuple) and len(outputs) == 2
assert outputs[0].shape == (1, head.num_classes, 21, 21)
assert outputs[1].shape == (1, head.num_classes)
test_output = head.forward_test(inputs, None, None)
assert test_output.shape == (1, head.num_classes, 21, 21)
def test_dw_aspp_head():
# test w.o. c1
inputs = [torch.randn(1, 32, 45, 45)]
head = DepthwiseSeparableASPPHead(
c1_in_channels=0,
c1_channels=0,
in_channels=32,
channels=16,
num_classes=19,
dilations=(1, 12, 24))
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
assert head.c1_bottleneck is None
assert head.aspp_modules[0].conv.dilation == (1, 1)
assert head.aspp_modules[1].depthwise_conv.dilation == (12, 12)
assert head.aspp_modules[2].depthwise_conv.dilation == (24, 24)
outputs = head(inputs)
assert outputs.shape == (1, head.num_classes, 45, 45)
# test with c1
inputs = [torch.randn(1, 8, 45, 45), torch.randn(1, 32, 21, 21)]
head = DepthwiseSeparableASPPHead(
c1_in_channels=8,
c1_channels=4,
in_channels=32,
channels=16,
num_classes=19,
dilations=(1, 12, 24))
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
assert head.c1_bottleneck.in_channels == 8
assert head.c1_bottleneck.out_channels == 4
assert head.aspp_modules[0].conv.dilation == (1, 1)
assert head.aspp_modules[1].depthwise_conv.dilation == (12, 12)
assert head.aspp_modules[2].depthwise_conv.dilation == (24, 24)
outputs = head(inputs)
assert outputs.shape == (1, head.num_classes, 45, 45)
def test_sep_fcn_head():
# test sep_fcn_head with concat_input=False
head = DepthwiseSeparableFCNHead(
in_channels=128,
channels=128,
concat_input=False,
num_classes=19,
in_index=-1,
norm_cfg=dict(type='BN', requires_grad=True, momentum=0.01))
x = [torch.rand(2, 128, 32, 32)]
output = head(x)
assert output.shape == (2, head.num_classes, 32, 32)
assert not head.concat_input
assert isinstance(head.convs[0], DepthwiseSeparableConvModule)
assert isinstance(head.convs[1], DepthwiseSeparableConvModule)
assert head.conv_seg.kernel_size == (1, 1)
head = DepthwiseSeparableFCNHead(
in_channels=64,
channels=64,
concat_input=True,
num_classes=19,
in_index=-1,
norm_cfg=dict(type='BN', requires_grad=True, momentum=0.01))
x = [torch.rand(3, 64, 32, 32)]
output = head(x)
assert output.shape == (3, head.num_classes, 32, 32)
assert head.concat_input
assert isinstance(head.convs[0], DepthwiseSeparableConvModule)
assert isinstance(head.convs[1], DepthwiseSeparableConvModule)
def test_dnl_head():
# DNL with 'embedded_gaussian' mode
head = DNLHead(in_channels=32, channels=16, num_classes=19)
assert len(head.convs) == 2
assert hasattr(head, 'dnl_block')
assert head.dnl_block.temperature == 0.05
inputs = [torch.randn(1, 32, 45, 45)]
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
outputs = head(inputs)
assert outputs.shape == (1, head.num_classes, 45, 45)
# NonLocal2d with 'dot_product' mode
head = DNLHead(
in_channels=32, channels=16, num_classes=19, mode='dot_product')
inputs = [torch.randn(1, 32, 45, 45)]
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
outputs = head(inputs)
assert outputs.shape == (1, head.num_classes, 45, 45)
# NonLocal2d with 'gaussian' mode
head = DNLHead(
in_channels=32, channels=16, num_classes=19, mode='gaussian')
inputs = [torch.randn(1, 32, 45, 45)]
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
outputs = head(inputs)
assert outputs.shape == (1, head.num_classes, 45, 45)
# NonLocal2d with 'concatenation' mode
head = DNLHead(
in_channels=32, channels=16, num_classes=19, mode='concatenation')
inputs = [torch.randn(1, 32, 45, 45)]
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
outputs = head(inputs)
assert outputs.shape == (1, head.num_classes, 45, 45)
def test_emanet_head():
head = EMAHead(
in_channels=32,
ema_channels=24,
channels=16,
num_stages=3,
num_bases=16,
num_classes=19)
for param in head.ema_mid_conv.parameters():
assert not param.requires_grad
assert hasattr(head, 'ema_module')
inputs = [torch.randn(1, 32, 45, 45)]
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
outputs = head(inputs)
assert outputs.shape == (1, head.num_classes, 45, 45)
def test_point_head():
inputs = [torch.randn(1, 32, 45, 45)]
point_head = PointHead(
in_channels=[32], in_index=[0], channels=16, num_classes=19)
assert len(point_head.fcs) == 3
fcn_head = FCNHead(in_channels=32, channels=16, num_classes=19)
if torch.cuda.is_available():
head, inputs = to_cuda(point_head, inputs)
head, inputs = to_cuda(fcn_head, inputs)
prev_output = fcn_head(inputs)
test_cfg = ConfigDict(
subdivision_steps=2, subdivision_num_points=8196, scale_factor=2)
output = point_head.forward_test(inputs, prev_output, None, test_cfg)
assert output.shape == (1, point_head.num_classes, 180, 180)
def test_lraspp_head():
with pytest.raises(ValueError):
# check invalid input_transform
LRASPPHead(
in_channels=(16, 16, 576),
in_index=(0, 1, 2),
channels=128,
input_transform='resize_concat',
dropout_ratio=0.1,
num_classes=19,
norm_cfg=dict(type='BN'),
act_cfg=dict(type='ReLU'),
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0))
with pytest.raises(AssertionError):
# check invalid branch_channels
LRASPPHead(
in_channels=(16, 16, 576),
in_index=(0, 1, 2),
channels=128,
branch_channels=64,
input_transform='multiple_select',
dropout_ratio=0.1,
num_classes=19,
norm_cfg=dict(type='BN'),
act_cfg=dict(type='ReLU'),
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0))
# test with default settings
lraspp_head = LRASPPHead(
in_channels=(16, 16, 576),
in_index=(0, 1, 2),
channels=128,
input_transform='multiple_select',
dropout_ratio=0.1,
num_classes=19,
norm_cfg=dict(type='BN'),
act_cfg=dict(type='ReLU'),
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0))
inputs = [
torch.randn(2, 16, 45, 45),
torch.randn(2, 16, 28, 28),
torch.randn(2, 576, 14, 14)
]
with pytest.raises(RuntimeError):
# check invalid inputs
output = lraspp_head(inputs)
inputs = [
torch.randn(2, 16, 111, 111),
torch.randn(2, 16, 77, 77),
torch.randn(2, 576, 55, 55)
]
output = lraspp_head(inputs)
assert output.shape == (2, 19, 111, 111)
| 33.122156
| 79
| 0.623857
| 3,801
| 27,657
| 4.364904
| 0.064983
| 0.075945
| 0.054246
| 0.054849
| 0.825206
| 0.78633
| 0.775842
| 0.765656
| 0.735278
| 0.717799
| 0
| 0.066221
| 0.254149
| 27,657
| 834
| 80
| 33.161871
| 0.738074
| 0.060889
| 0
| 0.712766
| 0
| 0
| 0.013393
| 0
| 0
| 0
| 0
| 0
| 0.218845
| 1
| 0.034954
| false
| 0
| 0.012158
| 0
| 0.053191
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b8717ebea5f9834d100cc2c50c69c83219dd6a5e
| 142
|
py
|
Python
|
mercadopago/config/__init__.py
|
nlgonzalez/sdk-python
|
972082b1a1c0015fce376a42f53e4696b163bc0b
|
[
"MIT"
] | 100
|
2015-02-17T03:16:15.000Z
|
2022-03-28T17:22:14.000Z
|
mercadopago/config/__init__.py
|
nlgonzalez/sdk-python
|
972082b1a1c0015fce376a42f53e4696b163bc0b
|
[
"MIT"
] | 29
|
2015-06-15T18:40:09.000Z
|
2022-02-24T15:36:03.000Z
|
mercadopago/config/__init__.py
|
nlgonzalez/sdk-python
|
972082b1a1c0015fce376a42f53e4696b163bc0b
|
[
"MIT"
] | 58
|
2015-01-16T21:46:45.000Z
|
2022-02-25T21:26:22.000Z
|
"""
Module: config/__init__.py
"""
from mercadopago.config.request_options import RequestOptions
from mercadopago.config.config import Config
| 23.666667
| 61
| 0.823944
| 17
| 142
| 6.588235
| 0.588235
| 0.267857
| 0.375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.084507
| 142
| 5
| 62
| 28.4
| 0.861538
| 0.183099
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
b884b116fe819cd2f56e0b66a7932214b43db076
| 32,125
|
py
|
Python
|
nlosExclusion/src/puGNSSPosCal.py
|
xiaoshitou4/GNSS-INS
|
6ea16568d85eb1ed6b5cc49fb192dcba0e0f7491
|
[
"Unlicense"
] | 3
|
2019-07-27T05:31:15.000Z
|
2021-06-10T02:16:46.000Z
|
nlosExclusion/src/puGNSSPosCal.py
|
yxw027/GNSS-INS
|
e5c5b7901b270a9c4d3a0ffd5555843d969f4018
|
[
"Unlicense"
] | null | null | null |
nlosExclusion/src/puGNSSPosCal.py
|
yxw027/GNSS-INS
|
e5c5b7901b270a9c4d3a0ffd5555843d969f4018
|
[
"Unlicense"
] | 3
|
2019-12-25T07:47:22.000Z
|
2021-02-03T03:24:46.000Z
|
#!/usr/bin/env python
# license removed for brevity
"""
GNSS positioning calculation
Welson Wen, Ph.D.
https://sites.google.com/view/weisongwen/about-me
"""
from numpy import * # numpy needed
import matplotlib as mpl #plot needed
mpl.use("TkAgg") # Use TKAgg to show figures:set this to show plot
import matplotlib.pyplot as plt #plotting
import pandas as pd # pandas needed renamed as pd
import numpy as np #numpy needed renamed as np
import geometry_msgs.msg as gm #ros geometry message
from geometry_msgs.msg import Quaternion, Point, Pose, Twist,PoseArray # commonly used message type
from sensor_msgs.msg import NavSatFix # standard message type for GNSSs
from nlosExclusion.msg import GNSS_Raw_Array,GNSS_Raw # customerized ros message type
from matplotlib.patches import Ellipse, Circle # draw circle needs library
import csv # csv reading needed library
import datetime #time format (datetime)
import time #time format (time)
import llh2ecef # llh to ecef
import ecef2llh #ecef coordinate to llh coordinate
from nlosExclusion.msg import Satellite_Info # customized ros message type Satellite_Info containing satellites exclusion numbers
import rospy
from novatel_msgs.msg import BESTPOS
class GNSSPosCal():
def __init__(self):
self.calMode = 'LSGPS' # 'LSGPS' 'LSGNSS'
self.GNSSTim = 0
self.dop = 0 # dop
self.toSv = 0
self.iterations_=0
self.appInfo = 0
self.prn = [] # prn
self.snr = [] # snr
self.pseudResid = {} # pseudorange residual
self.visi = [] # visibility
self.azimuth = []
self.elevation =[]
self.ecef_=[] # calculation result
self.llh_ =[]
# self.GroTruth = [22.303953,114.181925,14.0] # for experiment1: small NLOS reception
self.GroTruth = [22.303756,114.18215,14.0] # for experiment2: Big NLOS reception
self.error_ = 0
self.GPSNum = [] # create a list to save GPS satellites numbers
self.BeiNum = [] # create a list to save Beidou satellites numbers
def LSGPSPosCalStep(self,GNSS_one_epoch_, init_x, init_y, init_z, init_b): # least square
GNSS_one_epoch = GNSS_Raw_Array()
GNSS_one_epoch = GNSS_one_epoch_
rec_ini_pos_x = float(init_x) # initial position x (m)
rec_ini_pos_y = float(init_y) # initial position y (m)
rec_ini_pos_z = float(init_z) # initial position z (m)
rec_clo_bia_b = float(init_b) # initial distance bias caused by clock bias (m)
devia_xyz = 1.0 # initial set receiver position estimation error (m)
gues_pseud = [] # guessed pseudorange
pseud_error = [] # pseudorange error
G_matrix_ = array([2, 2, 2, 1], dtype='float') # creat G matrix to save transform parameters
self.dop = self.DopCalculation(GNSS_one_epoch)
for index_1 in range(len(GNSS_one_epoch.GNSS_Raws)): # index all the satellite information in one epoch
if((GNSS_one_epoch.GNSS_Raws[index_1].prn_satellites_index >= 88) and (
GNSS_one_epoch.GNSS_Raws[index_1].prn_satellites_index <= (87 + 37))):
if(self.iterations_<1): # save one time only
self.GNSSTim = GNSS_one_epoch.GNSS_Raws[index_1].GNSS_time
self.toSv = GNSS_one_epoch.GNSS_Raws[index_1].total_sv
self.prn.append(GNSS_one_epoch.GNSS_Raws[index_1].prn_satellites_index)
self.snr.append(GNSS_one_epoch.GNSS_Raws[index_1].snr)
self.visi.append(GNSS_one_epoch.GNSS_Raws[index_1].visable)
self.azimuth.append(GNSS_one_epoch.GNSS_Raws[index_1].azimuth)
self.elevation.append(GNSS_one_epoch.GNSS_Raws[index_1].elevation)
sx_1 = float(GNSS_one_epoch.GNSS_Raws[index_1].sat_pos_x) # satellite position x
sy_1 = float(GNSS_one_epoch.GNSS_Raws[index_1].sat_pos_y) # satellite position y
sz_1 = float(GNSS_one_epoch.GNSS_Raws[index_1].sat_pos_z) # satellite position z
sx_1 = (sx_1 - rec_ini_pos_x) * (sx_1 - rec_ini_pos_x) # satellite to receiver distance in x idrection
sy_1 = (sy_1 - rec_ini_pos_y) * (sy_1 - rec_ini_pos_y) # satellite to receiver distance in y idrection
sz_1 = (sz_1 - rec_ini_pos_z) * (sz_1 - rec_ini_pos_z) # satellite to receiver distance in z idrection
sat2rec_dis = sqrt(sx_1 + sy_1 + sz_1) # guessed pseudorange
gues_pseud.append(sat2rec_dis) # save guessed pseudorange
pseud_error_element = float(GNSS_one_epoch.GNSS_Raws[index_1].pseudorange) - float(
sat2rec_dis) + float(rec_clo_bia_b) # pseudorange error
pseud_error.append(pseud_error_element) # save pseudorange error
G_row = [] # G matrix row
G_row.append(float(GNSS_one_epoch.GNSS_Raws[index_1].sat_pos_x - rec_ini_pos_x) / float(
sat2rec_dis) * -1) # x for G matrix row
G_row.append(float(GNSS_one_epoch.GNSS_Raws[index_1].sat_pos_y - rec_ini_pos_y) / float(
sat2rec_dis) * -1) # y for G matrix row
G_row.append(float(GNSS_one_epoch.GNSS_Raws[index_1].sat_pos_z - rec_ini_pos_z) / float(
sat2rec_dis) * -1) # z for G matrix row
element_float = 1.0 # last element for each row
G_row.append(element_float) # save last element for each row
G_matrix_ = np.row_stack((G_matrix_, G_row)) # add each row to G_matrix
del G_row[:] # relief G_row
# get pseudorange error
pseud_error_mat = np.array(pseud_error) # from list to array
pseud_error_mat = pseud_error_mat.transpose() # transpose
# get G matrix
G_matrix_ = np.delete(G_matrix_, [0], axis=0) # delete the first row of G matrix
delta_p = np.dot((G_matrix_.transpose()), G_matrix_) # G(T) * G
delta_p_2 = np.linalg.inv(delta_p) # inverse matrix of G(T) * G
delta_p = np.dot(delta_p_2, (G_matrix_.transpose())) # multiply (inverse matrix of G(T) * G) and G(T)
delta_p = np.dot(delta_p, pseud_error_mat) # multiply with pseud_error_mat
rec_ini_pos_x = rec_ini_pos_x + float(delta_p[0]) # update receiver position in x direction
rec_ini_pos_y = rec_ini_pos_y + float(delta_p[1]) # update receiver position in y idrection
rec_ini_pos_z = rec_ini_pos_z + float(delta_p[2]) # update receiver position in z idrection
rec_clo_bia_b = rec_clo_bia_b + float(delta_p[3]) # update receiver clock bias in meters
devia_x = float(delta_p[0]) # save delta x
devia_y = float(delta_p[1]) # save delta y
devia_z = float(delta_p[2]) # save delta z
devia_b = float(delta_p[3]) # save delta bias
devia_xyz = sqrt(devia_x * devia_x + devia_y * devia_y + devia_z * devia_z) # get total bias
# print 'delta_p',delta_p
# print 'position estimation x=',rec_ini_pos_x
# print 'position estimation y=', rec_ini_pos_y
# print 'position estimation Z=', rec_ini_pos_z
# print 'position estimation b=', rec_clo_bia_b
# print 'position estimation devia_xyz=', devia_xyz
del gues_pseud[:] # relief gues_pseud[] list
del pseud_error[:] # relief pseud_error[] list
return float(rec_ini_pos_x), float(rec_ini_pos_y), float(rec_ini_pos_z), float(rec_clo_bia_b), float(devia_xyz)
'''
GPS: 1:32
GLONASS: 32 + 1:24
Galileo: 57 + 1:30
Beidou: 87 + 1:37
QZSS: 124 + 1:4
'''
def LSGNSSPosCalStep(self,GNSS_one_epoch_, init_x, init_y, init_z, init_b_GPS,
init_b_Beidou): # least square for hybrid GNSS positioning (GPS + Beidou)
GNSS_one_epoch = GNSS_Raw_Array()
GNSS_one_epoch = GNSS_one_epoch_
rec_ini_pos_x = float(init_x) # initial position x (m)
rec_ini_pos_y = float(init_y) # initial position y (m)
rec_ini_pos_z = float(init_z) # initial position z (m)
rec_clo_bia_b_GPS = float(init_b_GPS) # initial distance bias caused by clock bias of GPS (m)
rec_clo_bia_b_Beidou = float(init_b_Beidou) # initial distance bias caused by clock bias of Beidou (m)
devia_xyz = 1.0 # initial set receiver position estimation error (m)
gues_pseud = [] # guessed pseudorange
pseud_error = [] # pseudorange error
G_matrix_ = array([2, 2, 2, 1, 1], dtype='float') # creat G matrix to save transform parameters
self.dop = self.DopCalculation(GNSS_one_epoch)
# get guessed pseudorange and pseudorange error
for index_1 in range(len(GNSS_one_epoch.GNSS_Raws)): # index all the satellite information in one epoch
if ((GNSS_one_epoch.GNSS_Raws[index_1].prn_satellites_index >= 88) and (
GNSS_one_epoch.GNSS_Raws[index_1].prn_satellites_index <= (87 + 37)) or
((GNSS_one_epoch.GNSS_Raws[index_1].prn_satellites_index >= 1) and (
GNSS_one_epoch.GNSS_Raws[index_1].prn_satellites_index <= 32))):
if (self.iterations_ < 1): # save one time only
self.GNSSTim = GNSS_one_epoch.GNSS_Raws[index_1].GNSS_time
self.toSv = GNSS_one_epoch.GNSS_Raws[index_1].total_sv
self.prn.append(GNSS_one_epoch.GNSS_Raws[index_1].prn_satellites_index)
self.snr.append(GNSS_one_epoch.GNSS_Raws[index_1].snr)
self.visi.append(GNSS_one_epoch.GNSS_Raws[index_1].visable)
self.azimuth.append(GNSS_one_epoch.GNSS_Raws[index_1].azimuth)
self.elevation.append(GNSS_one_epoch.GNSS_Raws[index_1].elevation)
sx_1 = float(GNSS_one_epoch.GNSS_Raws[index_1].sat_pos_x) # satellite position x
sy_1 = float(GNSS_one_epoch.GNSS_Raws[index_1].sat_pos_y) # satellite position y
sz_1 = float(GNSS_one_epoch.GNSS_Raws[index_1].sat_pos_z) # satellite position z
# print 'satellite index',GNSS_one_epoch.GNSS_Raws[index_1].prn_satellites_index
sx_1 = (sx_1 - rec_ini_pos_x) * (
sx_1 - rec_ini_pos_x) # satellite to receiver distance in x idrection
sy_1 = (sy_1 - rec_ini_pos_y) * (
sy_1 - rec_ini_pos_y) # satellite to receiver distance in y idrection
sz_1 = (sz_1 - rec_ini_pos_z) * (
sz_1 - rec_ini_pos_z) # satellite to receiver distance in z idrection
sat2rec_dis = 0.0 # initialize variable
sat2rec_dis = sqrt(sx_1 + sy_1 + sz_1) # guessed pseudorange
gues_pseud.append(sat2rec_dis) # save guessed pseudorange
G_row = [] # G matrix row
G_row.append(float(GNSS_one_epoch.GNSS_Raws[index_1].sat_pos_x - rec_ini_pos_x) / float(
sat2rec_dis) * -1) # x for G matrix row
G_row.append(float(GNSS_one_epoch.GNSS_Raws[index_1].sat_pos_y - rec_ini_pos_y) / float(
sat2rec_dis) * -1) # y for G matrix row
G_row.append(float(GNSS_one_epoch.GNSS_Raws[index_1].sat_pos_z - rec_ini_pos_z) / float(
sat2rec_dis) * -1) # z for G matrix row
if ((GNSS_one_epoch.GNSS_Raws[index_1].prn_satellites_index >= 88) and (
GNSS_one_epoch.GNSS_Raws[index_1].prn_satellites_index <= (87 + 37))):
element_float_GPS = 0.0 # GPS element for each row
element_float_Beidou = 1.0 # Beidou element for each row
G_row.append(element_float_GPS) # save last two element for each row
G_row.append(element_float_Beidou) # save last two element for each row
pseud_error_element = float(GNSS_one_epoch.GNSS_Raws[index_1].pseudorange) - float(
sat2rec_dis) + float(rec_clo_bia_b_Beidou) # Beidou pseudorange error
pseud_error.append(pseud_error_element) # save pseudorange error
if ((GNSS_one_epoch.GNSS_Raws[index_1].prn_satellites_index >= 1) and (
GNSS_one_epoch.GNSS_Raws[index_1].prn_satellites_index <= 32)):
element_float_GPS = 1.0 # GPS element for each row
element_float_Beidou = 0.0 # Beidou element for each row
G_row.append(element_float_GPS) # save last two element for each row
G_row.append(element_float_Beidou) # save last two element for each row
pseud_error_element = float(GNSS_one_epoch.GNSS_Raws[index_1].pseudorange) - float(
sat2rec_dis) + float(rec_clo_bia_b_GPS) # GPS pseudorange error
pseud_error.append(pseud_error_element) # save pseudorange error
# print 'length of G_row',len(G_row),'GNSS_one_epoch.GNSS_Raws[index_1].prn_satellites_index',GNSS_one_epoch.GNSS_Raws[index_1].prn_satellites_index
G_matrix_ = np.row_stack((G_matrix_, G_row)) # add each row to G_matrix
del G_row[:] # relief G_row
# get pseudorange error
pseud_error_mat = np.array(pseud_error) # from list to array
pseud_error_mat = pseud_error_mat.transpose() # transpose
# get G matrix
G_matrix_ = np.delete(G_matrix_, [0], axis=0) # delete the first row of G matrix
# get cofactor matrix
# cofactorMat_ = np.array(self.cofactorMatrixCal(GNSS_one_epoch_))
# cofactorMat_ = np.diag(cofactorMat_) # diag matrix
# print 'cofactors', self.cofactorMatrixCal(GNSS_one_epoch_), cofactorMat_
delta_p = np.dot((G_matrix_.transpose()), G_matrix_) # G(T) * G
delta_p_2 = np.linalg.inv(delta_p) # inverse matrix of G(T) * G
delta_p = np.dot(delta_p_2, (G_matrix_.transpose())) # multiply (inverse matrix of G(T) * G) and G(T)
delta_p = np.dot(delta_p, pseud_error_mat) # multiply with pseud_error_mat
rec_ini_pos_x = rec_ini_pos_x + float(delta_p[0]) # update receiver position in x direction
rec_ini_pos_y = rec_ini_pos_y + float(delta_p[1]) # update receiver position in y idrection
rec_ini_pos_z = rec_ini_pos_z + float(delta_p[2]) # update receiver position in z idrection
rec_clo_bia_b_GPS = rec_clo_bia_b_GPS + float(delta_p[3]) # update receiver clock bias of GPS in meters
rec_clo_bia_b_Beidou = rec_clo_bia_b_Beidou + float(
delta_p[4]) # update receiver clock bias of Beidou in meters
devia_x = float(delta_p[0]) # save delta x
devia_y = float(delta_p[1]) # save delta y
devia_z = float(delta_p[2]) # save delta z
devia_b_GPS = float(delta_p[3]) # save delta bias of GPS
devia_b_Beidou = float(delta_p[4]) # save delta bias of Beidou
devia_xyz = sqrt(devia_x * devia_x + devia_y * devia_y + devia_z * devia_z) # get total bias
# print 'delta_p',delta_p
# print 'position estimation x=',rec_ini_pos_x
# print 'position estimation y=', rec_ini_pos_y
# print 'position estimation Z=', rec_ini_pos_z
# print 'position estimation b=', rec_clo_bia_b
# print 'position estimation devia_xyz=', devia_xyz
del gues_pseud[:] # relief gues_pseud[] list
del pseud_error[:] # relief pseud_error[] list
return float(rec_ini_pos_x), float(rec_ini_pos_y), float(rec_ini_pos_z), float(rec_clo_bia_b_GPS), float(
rec_clo_bia_b_Beidou), float(devia_xyz)
def WLSGNSSPosCalStep(self,GNSS_one_epoch_, init_x, init_y, init_z, init_b_GPS,
init_b_Beidou): # least square for hybrid GNSS positioning (GPS + Beidou)
GNSS_one_epoch = GNSS_Raw_Array()
GNSS_one_epoch = GNSS_one_epoch_
rec_ini_pos_x = float(init_x) # initial position x (m)
rec_ini_pos_y = float(init_y) # initial position y (m)
rec_ini_pos_z = float(init_z) # initial position z (m)
rec_clo_bia_b_GPS = float(init_b_GPS) # initial distance bias caused by clock bias of GPS (m)
rec_clo_bia_b_Beidou = float(init_b_Beidou) # initial distance bias caused by clock bias of Beidou (m)
devia_xyz = 1.0 # initial set receiver position estimation error (m)
gues_pseud = [] # guessed pseudorange
pseud_error = [] # pseudorange error
G_matrix_ = array([2, 2, 2, 1, 1], dtype='float') # creat G matrix to save transform parameters
self.dop = self.DopCalculation(GNSS_one_epoch)
# get guessed pseudorange and pseudorange error
for index_1 in range(len(GNSS_one_epoch.GNSS_Raws)): # index all the satellite information in one epoch
if ((GNSS_one_epoch.GNSS_Raws[index_1].prn_satellites_index >= 88) and (
GNSS_one_epoch.GNSS_Raws[index_1].prn_satellites_index <= (87 + 37)) or
((GNSS_one_epoch.GNSS_Raws[index_1].prn_satellites_index >= 1) and (
GNSS_one_epoch.GNSS_Raws[index_1].prn_satellites_index <= 32))):
if (self.iterations_ < 1): # save one time only
self.GNSSTim = GNSS_one_epoch.GNSS_Raws[index_1].GNSS_time
self.toSv = GNSS_one_epoch.GNSS_Raws[index_1].total_sv
self.prn.append(GNSS_one_epoch.GNSS_Raws[index_1].prn_satellites_index)
self.snr.append(GNSS_one_epoch.GNSS_Raws[index_1].snr)
self.visi.append(GNSS_one_epoch.GNSS_Raws[index_1].visable)
self.azimuth.append(GNSS_one_epoch.GNSS_Raws[index_1].azimuth)
self.elevation.append(GNSS_one_epoch.GNSS_Raws[index_1].elevation)
sx_1 = float(GNSS_one_epoch.GNSS_Raws[index_1].sat_pos_x) # satellite position x
sy_1 = float(GNSS_one_epoch.GNSS_Raws[index_1].sat_pos_y) # satellite position y
sz_1 = float(GNSS_one_epoch.GNSS_Raws[index_1].sat_pos_z) # satellite position z
# print 'satellite index',GNSS_one_epoch.GNSS_Raws[index_1].prn_satellites_index
sx_1 = (sx_1 - rec_ini_pos_x) * (
sx_1 - rec_ini_pos_x) # satellite to receiver distance in x idrection
sy_1 = (sy_1 - rec_ini_pos_y) * (
sy_1 - rec_ini_pos_y) # satellite to receiver distance in y idrection
sz_1 = (sz_1 - rec_ini_pos_z) * (
sz_1 - rec_ini_pos_z) # satellite to receiver distance in z idrection
sat2rec_dis = 0.0 # initialize variable
sat2rec_dis = sqrt(sx_1 + sy_1 + sz_1) # guessed pseudorange
gues_pseud.append(sat2rec_dis) # save guessed pseudorange
G_row = [] # G matrix row
G_row.append(float(GNSS_one_epoch.GNSS_Raws[index_1].sat_pos_x - rec_ini_pos_x) / float(
sat2rec_dis) * -1) # x for G matrix row
G_row.append(float(GNSS_one_epoch.GNSS_Raws[index_1].sat_pos_y - rec_ini_pos_y) / float(
sat2rec_dis) * -1) # y for G matrix row
G_row.append(float(GNSS_one_epoch.GNSS_Raws[index_1].sat_pos_z - rec_ini_pos_z) / float(
sat2rec_dis) * -1) # z for G matrix row
if ((GNSS_one_epoch.GNSS_Raws[index_1].prn_satellites_index >= 88) and (
GNSS_one_epoch.GNSS_Raws[index_1].prn_satellites_index <= (87 + 37))):
element_float_GPS = 0.0 # GPS element for each row
element_float_Beidou = 1.0 # Beidou element for each row
G_row.append(element_float_GPS) # save last two element for each row
G_row.append(element_float_Beidou) # save last two element for each row
pseud_error_element = float(GNSS_one_epoch.GNSS_Raws[index_1].pseudorange) - float(
sat2rec_dis) + float(rec_clo_bia_b_Beidou) # Beidou pseudorange error
pseud_error.append(pseud_error_element) # save pseudorange error
if ((GNSS_one_epoch.GNSS_Raws[index_1].prn_satellites_index >= 1) and (
GNSS_one_epoch.GNSS_Raws[index_1].prn_satellites_index <= 32)):
element_float_GPS = 1.0 # GPS element for each row
element_float_Beidou = 0.0 # Beidou element for each row
G_row.append(element_float_GPS) # save last two element for each row
G_row.append(element_float_Beidou) # save last two element for each row
pseud_error_element = float(GNSS_one_epoch.GNSS_Raws[index_1].pseudorange) - float(
sat2rec_dis) + float(rec_clo_bia_b_GPS) # GPS pseudorange error
pseud_error.append(pseud_error_element) # save pseudorange error
# print 'length of G_row',len(G_row),'GNSS_one_epoch.GNSS_Raws[index_1].prn_satellites_index',GNSS_one_epoch.GNSS_Raws[index_1].prn_satellites_index
G_matrix_ = np.row_stack((G_matrix_, G_row)) # add each row to G_matrix
del G_row[:] # relief G_row
# get pseudorange error
pseud_error_mat = np.array(pseud_error) # from list to array
pseud_error_mat = pseud_error_mat.transpose() # transpose
# get G matrix
G_matrix_ = np.delete(G_matrix_, [0], axis=0) # delete the first row of G matrix
# get cofactor matrix
cofactorMat_ = np.array(self.cofactorMatrixCal(GNSS_one_epoch))
cofactorMat_ = np.diag(cofactorMat_) # diag matrix
# print 'cofactors',self.cofactorMatrixCal(GNSS_one_epoch_),cofactorMat_,len(GNSS_one_epoch.GNSS_Raws)
delta_p = np.dot((G_matrix_.transpose()), cofactorMat_) # G(T) * G
delta_p = np.dot(delta_p, G_matrix_) # G(T) * G
delta_p_2 = np.linalg.inv(delta_p) # inverse matrix of G(T) * G
delta_p = np.dot(delta_p_2, (G_matrix_.transpose())) # multiply (inverse matrix of G(T) * G) and G(T)
delta_p = np.dot(delta_p, cofactorMat_) # multiply (inverse matrix of G(T) * G) and G(T)
delta_p = np.dot(delta_p, pseud_error_mat) # multiply with pseud_error_mat
rec_ini_pos_x = rec_ini_pos_x + float(delta_p[0]) # update receiver position in x direction
rec_ini_pos_y = rec_ini_pos_y + float(delta_p[1]) # update receiver position in y idrection
rec_ini_pos_z = rec_ini_pos_z + float(delta_p[2]) # update receiver position in z idrection
rec_clo_bia_b_GPS = rec_clo_bia_b_GPS + float(delta_p[3]) # update receiver clock bias of GPS in meters
rec_clo_bia_b_Beidou = rec_clo_bia_b_Beidou + float(
delta_p[4]) # update receiver clock bias of Beidou in meters
devia_x = float(delta_p[0]) # save delta x
devia_y = float(delta_p[1]) # save delta y
devia_z = float(delta_p[2]) # save delta z
devia_b_GPS = float(delta_p[3]) # save delta bias of GPS
devia_b_Beidou = float(delta_p[4]) # save delta bias of Beidou
devia_xyz = sqrt(devia_x * devia_x + devia_y * devia_y + devia_z * devia_z) # get total bias
# print 'delta_p',delta_p
# print 'position estimation x=',rec_ini_pos_x
# print 'position estimation y=', rec_ini_pos_y
# print 'position estimation Z=', rec_ini_pos_z
# print 'position estimation b=', rec_clo_bia_b
# print 'position estimation devia_xyz=', devia_xyz
del gues_pseud[:] # relief gues_pseud[] list
del pseud_error[:] # relief pseud_error[] list
return float(rec_ini_pos_x), float(rec_ini_pos_y), float(rec_ini_pos_z), float(rec_clo_bia_b_GPS), float(
rec_clo_bia_b_Beidou), float(devia_xyz)
def iterPosCal(self,GNSS_one_epoch_,calMode):
iterations = 0
itera_x = 0
itera_y = 0
itera_z = 0
self.calMode = calMode
self.getSatNum(GNSS_one_epoch_) # get satellites numbers
if(self.calMode=='LSGNSS') and (self.GPSNum>=4) and (self.BeiNum>1):
itera_x, itera_y, itera_z, itera_b_GPS, itera_b_Beidou, itera_bias_ = self.LSGNSSPosCalStep(
GNSS_one_epoch_, 0.1, 0.1, 0.1, 1.0, 1.0) # first iteration from (0.1, 0.1, 0.1, 1.0)
self.iterations_ = self.iterations_+1
while (itera_bias_ > 1e-4) and iterations < 10: # threshold for iterations:value and times
itera_x, itera_y, itera_z, itera_b_GPS, itera_b_Beidou, itera_bias_ = self.LSGNSSPosCalStep(
GNSS_one_epoch_, itera_x, itera_y, itera_z, itera_b_GPS, itera_b_Beidou) # iteration
self.iterations_ = self.iterations_ + 1
iterations = iterations + 1 # add one iteration
elif (self.calMode == 'LSGPS') and (self.GPSNum>=4):
itera_x, itera_y, itera_z, itera_b, itera_bias_ = self.LSGPSPosCalStep(
GNSS_one_epoch_, 0.1,
0.1, 0.1,
1.0) # first iteration from (0.1, 0.1, 0.1, 1.0)
self.iterations_ = self.iterations_ + 1
while (itera_bias_ > 1e-4) and iterations < 10: # threshold for iterations:value and times
itera_x, itera_y, itera_z, itera_b, itera_bias_ = self.LSGPSPosCalStep(
GNSS_one_epoch_,itera_x, itera_y,itera_z,itera_b)
self.iterations_ = self.iterations_ + 1
iterations = iterations + 1 # add one iteration
elif (self.calMode == 'WLSGNSS') and (self.GPSNum >= 4) and (self.BeiNum > 1):
itera_x, itera_y, itera_z, itera_b_GPS, itera_b_Beidou, itera_bias_ = self.WLSGNSSPosCalStep(
GNSS_one_epoch_, 0.1, 0.1, 0.1, 1.0, 1.0) # first iteration from (0.1, 0.1, 0.1, 1.0)
self.iterations_ = self.iterations_ + 1
while (itera_bias_ > 1e-4) and iterations < 10: # threshold for iterations:value and times
itera_x, itera_y, itera_z, itera_b_GPS, itera_b_Beidou, itera_bias_ = self.WLSGNSSPosCalStep(
GNSS_one_epoch_, itera_x, itera_y, itera_z, itera_b_GPS, itera_b_Beidou) # iteration
self.iterations_ = self.iterations_ + 1
iterations = iterations + 1 # add one iteration
print 'itera_b_GPS',itera_b_GPS,'itera_b_Beidou-----------------------------------------',itera_b_Beidou
self.ecef_.append(float(itera_x))
self.ecef_.append(float(itera_y))
self.ecef_.append(float(itera_z))
self.pseudoResCal(GNSS_one_epoch_)
self.PosError()
self.llh_ = ecef2llh.xyz2llh(self.ecef_) # ecef to llh coordinates
iterations = 0.0 # initialize iterations variable
# GNSSPosCal_ = puGNSSPosCal.GNSSPosCal()
# GNSSPosCal_.iterGNSSPosCal(self.GNSSArr)
def DopCalculation(self,GNSS_one_epoch_): # get Dop in one epoch
GNSS_one_epoch = GNSS_Raw_Array()
GNSS_one_epoch = GNSS_one_epoch_
H_matrix_ = array([2, 2, 2, 1], dtype='float') # creat H matrix to save transform parameters
Hdop_ = 0.0 # create an variable to save Hdop
elemin = 15.0 # minimun elevation angle
for index_1 in range(len(GNSS_one_epoch.GNSS_Raws)): # index all the satellite information in one epoch
if (GNSS_one_epoch.GNSS_Raws[index_1].elevation <= elemin):
# print 'satellite elevation less than 15 degree=',GNSS_one_epoch.GNSS_Raws[index_1].elevation
continue
cosel = float(cos(GNSS_one_epoch.GNSS_Raws[index_1].elevation))
sinel = float(sin(GNSS_one_epoch.GNSS_Raws[index_1].elevation))
H_row = [] # H matrix row
H_row.append(float(cosel * sin(GNSS_one_epoch.GNSS_Raws[index_1].azimuth)))
H_row.append(float(cosel * cos(GNSS_one_epoch.GNSS_Raws[index_1].azimuth)))
H_row.append(float(sinel))
H_row.append(1.0)
H_matrix_ = np.row_stack((H_matrix_, H_row)) # add each row to H_matrix
del H_row[:] # relief H_row
# get H matrix
H_matrix_ = np.delete(H_matrix_, [0], axis=0) # delete the first row of H matrix
# print 'H_matrix_',H_matrix_
Q_matrix_ = np.dot((H_matrix_.transpose()), H_matrix_) # H(T) * G
Q_matrix_ = np.linalg.inv(Q_matrix_) # inverse matrix of H(T) * G
Hdop = float(sqrt(Q_matrix_[0, 0] + Q_matrix_[1, 1]))
# print 'Q_matrix_', Q_matrix_, 'Hdop', Hdop
return float(Hdop) # return result
def getSatNum(self,GNSS_one_epoch): # get number of GPS and Beidou satellites in one epoch and save all the satellite number in list
for index_1 in range(len(GNSS_one_epoch.GNSS_Raws)): # index all the satellite information in one epoch
if ((GNSS_one_epoch.GNSS_Raws[index_1].prn_satellites_index >= 1) and (
GNSS_one_epoch.GNSS_Raws[index_1].prn_satellites_index <= 32)): # GPS satellites index range
self.GPSNum.append(float(GNSS_one_epoch.GNSS_Raws[index_1].prn_satellites_index))
if ((GNSS_one_epoch.GNSS_Raws[index_1].prn_satellites_index >= 88) and (
GNSS_one_epoch.GNSS_Raws[index_1].prn_satellites_index <= (87 + 37))): # Beidou satellites index range
self.BeiNum.append(float(GNSS_one_epoch.GNSS_Raws[index_1].prn_satellites_index))
def PosError(self):
xyzTru_ = llh2ecef.llh2xyz(self.GroTruth)
self.error_ = math.sqrt((self.ecef_[0]-xyzTru_[0]) * (self.ecef_[0]-xyzTru_[0]) + (self.ecef_[1]-xyzTru_[1]) * (self.ecef_[1]-xyzTru_[1]) + (self.ecef_[2]-xyzTru_[2]) * (self.ecef_[2]-xyzTru_[2]))
if(self.error_ > 100):
self.error_ = 100
def pseudoResCal(self,GNSS_one_epoch):
for index_1 in range(len(GNSS_one_epoch.GNSS_Raws)): # index all the satellite information in one epoch
pseudoRes_ = 0.0
res_x = GNSS_one_epoch.GNSS_Raws[index_1].sat_pos_x - self.ecef_[0]
res_y = GNSS_one_epoch.GNSS_Raws[index_1].sat_pos_y - self.ecef_[1]
res_z = GNSS_one_epoch.GNSS_Raws[index_1].sat_pos_z - self.ecef_[2]
pseudoRes_ = int ((math.sqrt(res_x * res_x + res_y * res_y + res_z * res_z)) - GNSS_one_epoch.GNSS_Raws[index_1].pseudorange)
satIdx_ = float(GNSS_one_epoch.GNSS_Raws[index_1].prn_satellites_index)
self.pseudResid[str(satIdx_)] = pseudoRes_
# print 'self.pseudResid=',self.pseudResid
def cofactorMatrixCal(self,GNSS_one_epoch):
snr_1 = 50.0 # T = 50
snr_A = 30.0 # A = 30
snr_a = 30.0 # a = 30
snr_0 = 10.0 # F = 10
cofactor_ = [] # cofactor of satellite
for index_1 in range(len(GNSS_one_epoch.GNSS_Raws)):
if ((GNSS_one_epoch.GNSS_Raws[index_1].prn_satellites_index >= 88) and (
GNSS_one_epoch.GNSS_Raws[index_1].prn_satellites_index <= (87 + 37)) or
((GNSS_one_epoch.GNSS_Raws[index_1].prn_satellites_index >= 1) and (
GNSS_one_epoch.GNSS_Raws[index_1].prn_satellites_index <= 32))):
snr_R = GNSS_one_epoch.GNSS_Raws[index_1].snr
elR = GNSS_one_epoch.GNSS_Raws[index_1].elevation
q_R_1 = 1 / (( sin(elR * pi/180.0 )) ** 2)
# q_R_1 = 1/q_R_1
q_R_2 = 10 ** (-(snr_R - snr_1) / snr_a)
q_R_3 = (((snr_A / (10 ** (-(snr_0 - snr_1) / snr_a)) - 1) / (snr_0 - snr_1)) * (snr_R - snr_1) + 1)
# q_R = float(1 / (( sin(elR * pi/180.0 )) ** 2) * (10 ** (-(snr_R - snr_1) / snr_a) * (
# (snr_A / (10 ** (-(snr_0 - snr_1) / snr_a)) - 1) / (snr_0 - snr_1) * (snr_R - snr_1) + 1)))
q_R = q_R_1* (q_R_2 * q_R_3)
cofactor_.append(float(1.0/q_R))
return cofactor_
| 68.061441
| 204
| 0.634895
| 4,732
| 32,125
| 3.968512
| 0.065089
| 0.060493
| 0.085628
| 0.091166
| 0.842111
| 0.827414
| 0.824272
| 0.811705
| 0.79754
| 0.789339
| 0
| 0.025238
| 0.272311
| 32,125
| 472
| 205
| 68.061441
| 0.778072
| 0.272311
| 0
| 0.669211
| 0
| 0
| 0.004996
| 0.00241
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.045802
| null | null | 0.002545
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b8934221f2ec1ecc673a063e8bc045bbb7e9c59c
| 87
|
py
|
Python
|
mapping/data_loader/__init__.py
|
syanga/model-augmented-mutual-information
|
a7c0ccb3b32320e9c45c266d668a879e240d39e3
|
[
"MIT"
] | 2
|
2021-06-10T05:45:16.000Z
|
2021-11-06T11:44:42.000Z
|
mapping/data_loader/__init__.py
|
syanga/model-augmented-mutual-information
|
a7c0ccb3b32320e9c45c266d668a879e240d39e3
|
[
"MIT"
] | null | null | null |
mapping/data_loader/__init__.py
|
syanga/model-augmented-mutual-information
|
a7c0ccb3b32320e9c45c266d668a879e240d39e3
|
[
"MIT"
] | null | null | null |
from .np_supervised import *
from .h5_supervised import *
from .h5_timeseries import *
| 21.75
| 28
| 0.793103
| 12
| 87
| 5.5
| 0.5
| 0.484848
| 0.606061
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.026667
| 0.137931
| 87
| 3
| 29
| 29
| 0.853333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
b8b1634f8e5ab1dc07d9ecf9c63754f10fa3fc7b
| 52,400
|
py
|
Python
|
flat_api/api/collection_api.py
|
FlatIO/api-client-python
|
898d1da77989b3e9075f0311b6a4d342a72e95ef
|
[
"Apache-2.0"
] | 8
|
2017-04-09T15:54:12.000Z
|
2021-07-14T13:38:43.000Z
|
flat_api/api/collection_api.py
|
FlatIO/api-client-python
|
898d1da77989b3e9075f0311b6a4d342a72e95ef
|
[
"Apache-2.0"
] | 4
|
2018-07-20T13:22:40.000Z
|
2022-03-23T20:03:21.000Z
|
flat_api/api/collection_api.py
|
FlatIO/api-client-python
|
898d1da77989b3e9075f0311b6a4d342a72e95ef
|
[
"Apache-2.0"
] | 2
|
2018-05-29T08:29:59.000Z
|
2018-07-23T07:16:13.000Z
|
# coding: utf-8
"""
Flat API
The Flat API allows you to easily extend the abilities of the [Flat Platform](https://flat.io), with a wide range of use cases including the following: * Creating and importing new music scores using MusicXML, MIDI, Guitar Pro (GP3, GP4, GP5, GPX, GP), PowerTab, TuxGuitar and MuseScore files * Browsing, updating, copying, exporting the user's scores (for example in MP3, WAV or MIDI) * Managing educational resources with Flat for Education: creating & updating the organization accounts, the classes, rosters and assignments. The Flat API is built on HTTP. Our API is RESTful It has predictable resource URLs. It returns HTTP response codes to indicate errors. It also accepts and returns JSON in the HTTP body. The [schema](/swagger.yaml) of this API follows the [OpenAPI Initiative (OAI) specification](https://www.openapis.org/), you can use and work with [compatible Swagger tools](http://swagger.io/open-source-integrations/). This API features Cross-Origin Resource Sharing (CORS) implemented in compliance with [W3C spec](https://www.w3.org/TR/cors/). You can use your favorite HTTP/REST library for your programming language to use Flat's API. This specification and reference is [available on Github](https://github.com/FlatIO/api-reference). Getting Started and learn more: * [API Overview and interoduction](https://flat.io/developers/docs/api/) * [Authentication (Personal Access Tokens or OAuth2)](https://flat.io/developers/docs/api/authentication.html) * [SDKs](https://flat.io/developers/docs/api/sdks.html) * [Rate Limits](https://flat.io/developers/docs/api/rate-limits.html) * [Changelog](https://flat.io/developers/docs/api/changelog.html) # noqa: E501
OpenAPI spec version: 2.7.0
Contact: developers@flat.io
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from flat_api.api_client import ApiClient
class CollectionApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def add_score_to_collection(self, collection, score, **kwargs): # noqa: E501
"""Add a score to the collection # noqa: E501
This operation will add a score to a collection. The default behavior will make the score available across multiple collections. You must have the capability `canAddScores` on the provided `collection` to perform the action. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.add_score_to_collection(collection, score, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str collection: Unique identifier of the collection. The following aliases are supported: - `root`: The root collection of the account - `sharedWithMe`: Automatically contains new resources that have been shared individually - `trash`: Automatically contains resources that have been deleted (required)
:param str score: Unique identifier of the score document. This can be a Flat Score unique identifier (i.e. `ScoreDetails.id`) or, if the score is also a Google Drive file, the Drive file unique identifier prefixed with `drive-` (e.g. `drive-0B000000000`). (required)
:param str sharing_key: This sharing key must be specified to access to a score or collection with a `privacy` mode set to `privateLink` and the current user is not a collaborator of the document.
:return: ScoreDetails
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.add_score_to_collection_with_http_info(collection, score, **kwargs) # noqa: E501
else:
(data) = self.add_score_to_collection_with_http_info(collection, score, **kwargs) # noqa: E501
return data
def add_score_to_collection_with_http_info(self, collection, score, **kwargs): # noqa: E501
"""Add a score to the collection # noqa: E501
This operation will add a score to a collection. The default behavior will make the score available across multiple collections. You must have the capability `canAddScores` on the provided `collection` to perform the action. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.add_score_to_collection_with_http_info(collection, score, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str collection: Unique identifier of the collection. The following aliases are supported: - `root`: The root collection of the account - `sharedWithMe`: Automatically contains new resources that have been shared individually - `trash`: Automatically contains resources that have been deleted (required)
:param str score: Unique identifier of the score document. This can be a Flat Score unique identifier (i.e. `ScoreDetails.id`) or, if the score is also a Google Drive file, the Drive file unique identifier prefixed with `drive-` (e.g. `drive-0B000000000`). (required)
:param str sharing_key: This sharing key must be specified to access to a score or collection with a `privacy` mode set to `privateLink` and the current user is not a collaborator of the document.
:return: ScoreDetails
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['collection', 'score', 'sharing_key'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method add_score_to_collection" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'collection' is set
if ('collection' not in local_var_params or
local_var_params['collection'] is None):
raise ValueError("Missing the required parameter `collection` when calling `add_score_to_collection`") # noqa: E501
# verify the required parameter 'score' is set
if ('score' not in local_var_params or
local_var_params['score'] is None):
raise ValueError("Missing the required parameter `score` when calling `add_score_to_collection`") # noqa: E501
collection_formats = {}
path_params = {}
if 'collection' in local_var_params:
path_params['collection'] = local_var_params['collection'] # noqa: E501
if 'score' in local_var_params:
path_params['score'] = local_var_params['score'] # noqa: E501
query_params = []
if 'sharing_key' in local_var_params:
query_params.append(('sharingKey', local_var_params['sharing_key'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['OAuth2'] # noqa: E501
return self.api_client.call_api(
'/collections/{collection}/scores/{score}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ScoreDetails', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def create_collection(self, collection_creation, **kwargs): # noqa: E501
"""Create a new collection # noqa: E501
This method will create a new collection and add it to your `root` collection. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_collection(collection_creation, async_req=True)
>>> result = thread.get()
:param async_req bool
:param CollectionCreation collection_creation: (required)
:return: Collection
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_collection_with_http_info(collection_creation, **kwargs) # noqa: E501
else:
(data) = self.create_collection_with_http_info(collection_creation, **kwargs) # noqa: E501
return data
def create_collection_with_http_info(self, collection_creation, **kwargs): # noqa: E501
"""Create a new collection # noqa: E501
This method will create a new collection and add it to your `root` collection. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_collection_with_http_info(collection_creation, async_req=True)
>>> result = thread.get()
:param async_req bool
:param CollectionCreation collection_creation: (required)
:return: Collection
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['collection_creation'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_collection" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'collection_creation' is set
if ('collection_creation' not in local_var_params or
local_var_params['collection_creation'] is None):
raise ValueError("Missing the required parameter `collection_creation` when calling `create_collection`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'collection_creation' in local_var_params:
body_params = local_var_params['collection_creation']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['OAuth2'] # noqa: E501
return self.api_client.call_api(
'/collections', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Collection', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_collection(self, collection, **kwargs): # noqa: E501
"""Delete the collection # noqa: E501
This method will schedule the deletion of the collection. Until deleted, the collection will be available in the `trash`. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection(collection, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str collection: Unique identifier of the collection. The following aliases are supported: - `root`: The root collection of the account - `sharedWithMe`: Automatically contains new resources that have been shared individually - `trash`: Automatically contains resources that have been deleted (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_collection_with_http_info(collection, **kwargs) # noqa: E501
else:
(data) = self.delete_collection_with_http_info(collection, **kwargs) # noqa: E501
return data
def delete_collection_with_http_info(self, collection, **kwargs): # noqa: E501
"""Delete the collection # noqa: E501
This method will schedule the deletion of the collection. Until deleted, the collection will be available in the `trash`. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_with_http_info(collection, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str collection: Unique identifier of the collection. The following aliases are supported: - `root`: The root collection of the account - `sharedWithMe`: Automatically contains new resources that have been shared individually - `trash`: Automatically contains resources that have been deleted (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['collection'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_collection" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'collection' is set
if ('collection' not in local_var_params or
local_var_params['collection'] is None):
raise ValueError("Missing the required parameter `collection` when calling `delete_collection`") # noqa: E501
collection_formats = {}
path_params = {}
if 'collection' in local_var_params:
path_params['collection'] = local_var_params['collection'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['OAuth2'] # noqa: E501
return self.api_client.call_api(
'/collections/{collection}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_score_from_collection(self, collection, score, **kwargs): # noqa: E501
"""Delete a score from the collection # noqa: E501
This method will delete a score from the collection. Unlike [`DELETE /scores/{score}`](#operation/deleteScore), this score will not remove the score from your account, but only from the collection. This can be used to *move* a score from one collection to another, or simply remove a score from one collection when this one is contained in multiple collections. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_score_from_collection(collection, score, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str collection: Unique identifier of the collection. The following aliases are supported: - `root`: The root collection of the account - `sharedWithMe`: Automatically contains new resources that have been shared individually - `trash`: Automatically contains resources that have been deleted (required)
:param str score: Unique identifier of the score document. This can be a Flat Score unique identifier (i.e. `ScoreDetails.id`) or, if the score is also a Google Drive file, the Drive file unique identifier prefixed with `drive-` (e.g. `drive-0B000000000`). (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_score_from_collection_with_http_info(collection, score, **kwargs) # noqa: E501
else:
(data) = self.delete_score_from_collection_with_http_info(collection, score, **kwargs) # noqa: E501
return data
def delete_score_from_collection_with_http_info(self, collection, score, **kwargs): # noqa: E501
"""Delete a score from the collection # noqa: E501
This method will delete a score from the collection. Unlike [`DELETE /scores/{score}`](#operation/deleteScore), this score will not remove the score from your account, but only from the collection. This can be used to *move* a score from one collection to another, or simply remove a score from one collection when this one is contained in multiple collections. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_score_from_collection_with_http_info(collection, score, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str collection: Unique identifier of the collection. The following aliases are supported: - `root`: The root collection of the account - `sharedWithMe`: Automatically contains new resources that have been shared individually - `trash`: Automatically contains resources that have been deleted (required)
:param str score: Unique identifier of the score document. This can be a Flat Score unique identifier (i.e. `ScoreDetails.id`) or, if the score is also a Google Drive file, the Drive file unique identifier prefixed with `drive-` (e.g. `drive-0B000000000`). (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['collection', 'score'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_score_from_collection" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'collection' is set
if ('collection' not in local_var_params or
local_var_params['collection'] is None):
raise ValueError("Missing the required parameter `collection` when calling `delete_score_from_collection`") # noqa: E501
# verify the required parameter 'score' is set
if ('score' not in local_var_params or
local_var_params['score'] is None):
raise ValueError("Missing the required parameter `score` when calling `delete_score_from_collection`") # noqa: E501
collection_formats = {}
path_params = {}
if 'collection' in local_var_params:
path_params['collection'] = local_var_params['collection'] # noqa: E501
if 'score' in local_var_params:
path_params['score'] = local_var_params['score'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['OAuth2'] # noqa: E501
return self.api_client.call_api(
'/collections/{collection}/scores/{score}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def edit_collection(self, collection, **kwargs): # noqa: E501
"""Update a collection's metadata # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.edit_collection(collection, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str collection: Unique identifier of the collection. The following aliases are supported: - `root`: The root collection of the account - `sharedWithMe`: Automatically contains new resources that have been shared individually - `trash`: Automatically contains resources that have been deleted (required)
:param CollectionModification collection_modification:
:return: Collection
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.edit_collection_with_http_info(collection, **kwargs) # noqa: E501
else:
(data) = self.edit_collection_with_http_info(collection, **kwargs) # noqa: E501
return data
def edit_collection_with_http_info(self, collection, **kwargs): # noqa: E501
"""Update a collection's metadata # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.edit_collection_with_http_info(collection, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str collection: Unique identifier of the collection. The following aliases are supported: - `root`: The root collection of the account - `sharedWithMe`: Automatically contains new resources that have been shared individually - `trash`: Automatically contains resources that have been deleted (required)
:param CollectionModification collection_modification:
:return: Collection
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['collection', 'collection_modification'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method edit_collection" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'collection' is set
if ('collection' not in local_var_params or
local_var_params['collection'] is None):
raise ValueError("Missing the required parameter `collection` when calling `edit_collection`") # noqa: E501
collection_formats = {}
path_params = {}
if 'collection' in local_var_params:
path_params['collection'] = local_var_params['collection'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'collection_modification' in local_var_params:
body_params = local_var_params['collection_modification']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['OAuth2'] # noqa: E501
return self.api_client.call_api(
'/collections/{collection}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Collection', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_collection(self, collection, **kwargs): # noqa: E501
"""Get collection details # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_collection(collection, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str collection: Unique identifier of the collection. The following aliases are supported: - `root`: The root collection of the account - `sharedWithMe`: Automatically contains new resources that have been shared individually - `trash`: Automatically contains resources that have been deleted (required)
:param str sharing_key: This sharing key must be specified to access to a score or collection with a `privacy` mode set to `privateLink` and the current user is not a collaborator of the document.
:return: Collection
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_collection_with_http_info(collection, **kwargs) # noqa: E501
else:
(data) = self.get_collection_with_http_info(collection, **kwargs) # noqa: E501
return data
def get_collection_with_http_info(self, collection, **kwargs): # noqa: E501
"""Get collection details # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_collection_with_http_info(collection, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str collection: Unique identifier of the collection. The following aliases are supported: - `root`: The root collection of the account - `sharedWithMe`: Automatically contains new resources that have been shared individually - `trash`: Automatically contains resources that have been deleted (required)
:param str sharing_key: This sharing key must be specified to access to a score or collection with a `privacy` mode set to `privateLink` and the current user is not a collaborator of the document.
:return: Collection
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['collection', 'sharing_key'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_collection" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'collection' is set
if ('collection' not in local_var_params or
local_var_params['collection'] is None):
raise ValueError("Missing the required parameter `collection` when calling `get_collection`") # noqa: E501
collection_formats = {}
path_params = {}
if 'collection' in local_var_params:
path_params['collection'] = local_var_params['collection'] # noqa: E501
query_params = []
if 'sharing_key' in local_var_params:
query_params.append(('sharingKey', local_var_params['sharing_key'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['OAuth2'] # noqa: E501
return self.api_client.call_api(
'/collections/{collection}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Collection', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def list_collection_scores(self, collection, **kwargs): # noqa: E501
"""List the scores contained in a collection # noqa: E501
Use this method to list the scores contained in a collection. If no sort option is provided, the scores are sorted by `modificationDate` `desc`. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_collection_scores(collection, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str collection: Unique identifier of the collection. The following aliases are supported: - `root`: The root collection of the account - `sharedWithMe`: Automatically contains new resources that have been shared individually - `trash`: Automatically contains resources that have been deleted (required)
:param str sharing_key: This sharing key must be specified to access to a score or collection with a `privacy` mode set to `privateLink` and the current user is not a collaborator of the document.
:param str sort: Sort
:param str direction: Sort direction
:param int limit: This is the maximum number of objects that may be returned
:param str next: An opaque string cursor to fetch the next page of data. The paginated API URLs are returned in the `Link` header when requesting the API. These URLs will contain a `next` and `previous` cursor based on the available data.
:param str previous: An opaque string cursor to fetch the previous page of data. The paginated API URLs are returned in the `Link` header when requesting the API. These URLs will contain a `next` and `previous` cursor based on the available data.
:return: list[ScoreDetails]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_collection_scores_with_http_info(collection, **kwargs) # noqa: E501
else:
(data) = self.list_collection_scores_with_http_info(collection, **kwargs) # noqa: E501
return data
def list_collection_scores_with_http_info(self, collection, **kwargs): # noqa: E501
"""List the scores contained in a collection # noqa: E501
Use this method to list the scores contained in a collection. If no sort option is provided, the scores are sorted by `modificationDate` `desc`. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_collection_scores_with_http_info(collection, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str collection: Unique identifier of the collection. The following aliases are supported: - `root`: The root collection of the account - `sharedWithMe`: Automatically contains new resources that have been shared individually - `trash`: Automatically contains resources that have been deleted (required)
:param str sharing_key: This sharing key must be specified to access to a score or collection with a `privacy` mode set to `privateLink` and the current user is not a collaborator of the document.
:param str sort: Sort
:param str direction: Sort direction
:param int limit: This is the maximum number of objects that may be returned
:param str next: An opaque string cursor to fetch the next page of data. The paginated API URLs are returned in the `Link` header when requesting the API. These URLs will contain a `next` and `previous` cursor based on the available data.
:param str previous: An opaque string cursor to fetch the previous page of data. The paginated API URLs are returned in the `Link` header when requesting the API. These URLs will contain a `next` and `previous` cursor based on the available data.
:return: list[ScoreDetails]
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['collection', 'sharing_key', 'sort', 'direction', 'limit', 'next', 'previous'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_collection_scores" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'collection' is set
if ('collection' not in local_var_params or
local_var_params['collection'] is None):
raise ValueError("Missing the required parameter `collection` when calling `list_collection_scores`") # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] > 100: # noqa: E501
raise ValueError("Invalid value for parameter `limit` when calling `list_collection_scores`, must be a value less than or equal to `100`") # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `limit` when calling `list_collection_scores`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'collection' in local_var_params:
path_params['collection'] = local_var_params['collection'] # noqa: E501
query_params = []
if 'sharing_key' in local_var_params:
query_params.append(('sharingKey', local_var_params['sharing_key'])) # noqa: E501
if 'sort' in local_var_params:
query_params.append(('sort', local_var_params['sort'])) # noqa: E501
if 'direction' in local_var_params:
query_params.append(('direction', local_var_params['direction'])) # noqa: E501
if 'limit' in local_var_params:
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'next' in local_var_params:
query_params.append(('next', local_var_params['next'])) # noqa: E501
if 'previous' in local_var_params:
query_params.append(('previous', local_var_params['previous'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['OAuth2'] # noqa: E501
return self.api_client.call_api(
'/collections/{collection}/scores', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[ScoreDetails]', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def list_collections(self, **kwargs): # noqa: E501
"""List the collections # noqa: E501
Use this method to list the user's collections contained in `parent` (by default in the `root` collection). If no sort option is provided, the collections are sorted by `creationDate` `desc`. Note that this method will not include the `parent` collection in the listing. For example, if you need the details of the `root` collection, you can use `GET /v2/collections/root`. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_collections(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str parent: List the collection contained in this `parent` collection. This option doesn't provide a complete multi-level collection support. When sharing a collection with someone, this one will have as `parent` `sharedWithMe`.
:param str sort: Sort
:param str direction: Sort direction
:param int limit: This is the maximum number of objects that may be returned
:param str next: An opaque string cursor to fetch the next page of data. The paginated API URLs are returned in the `Link` header when requesting the API. These URLs will contain a `next` and `previous` cursor based on the available data.
:param str previous: An opaque string cursor to fetch the previous page of data. The paginated API URLs are returned in the `Link` header when requesting the API. These URLs will contain a `next` and `previous` cursor based on the available data.
:return: list[Collection]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_collections_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.list_collections_with_http_info(**kwargs) # noqa: E501
return data
def list_collections_with_http_info(self, **kwargs): # noqa: E501
"""List the collections # noqa: E501
Use this method to list the user's collections contained in `parent` (by default in the `root` collection). If no sort option is provided, the collections are sorted by `creationDate` `desc`. Note that this method will not include the `parent` collection in the listing. For example, if you need the details of the `root` collection, you can use `GET /v2/collections/root`. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_collections_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str parent: List the collection contained in this `parent` collection. This option doesn't provide a complete multi-level collection support. When sharing a collection with someone, this one will have as `parent` `sharedWithMe`.
:param str sort: Sort
:param str direction: Sort direction
:param int limit: This is the maximum number of objects that may be returned
:param str next: An opaque string cursor to fetch the next page of data. The paginated API URLs are returned in the `Link` header when requesting the API. These URLs will contain a `next` and `previous` cursor based on the available data.
:param str previous: An opaque string cursor to fetch the previous page of data. The paginated API URLs are returned in the `Link` header when requesting the API. These URLs will contain a `next` and `previous` cursor based on the available data.
:return: list[Collection]
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['parent', 'sort', 'direction', 'limit', 'next', 'previous'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_collections" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
if 'limit' in local_var_params and local_var_params['limit'] > 100: # noqa: E501
raise ValueError("Invalid value for parameter `limit` when calling `list_collections`, must be a value less than or equal to `100`") # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `limit` when calling `list_collections`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'parent' in local_var_params:
query_params.append(('parent', local_var_params['parent'])) # noqa: E501
if 'sort' in local_var_params:
query_params.append(('sort', local_var_params['sort'])) # noqa: E501
if 'direction' in local_var_params:
query_params.append(('direction', local_var_params['direction'])) # noqa: E501
if 'limit' in local_var_params:
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'next' in local_var_params:
query_params.append(('next', local_var_params['next'])) # noqa: E501
if 'previous' in local_var_params:
query_params.append(('previous', local_var_params['previous'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['OAuth2'] # noqa: E501
return self.api_client.call_api(
'/collections', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[Collection]', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def untrash_collection(self, collection, **kwargs): # noqa: E501
"""Untrash a collection # noqa: E501
This method will restore the collection by removing it from the `trash` and add it back to the `root` collection. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.untrash_collection(collection, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str collection: Unique identifier of the collection. The following aliases are supported: - `root`: The root collection of the account - `sharedWithMe`: Automatically contains new resources that have been shared individually - `trash`: Automatically contains resources that have been deleted (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.untrash_collection_with_http_info(collection, **kwargs) # noqa: E501
else:
(data) = self.untrash_collection_with_http_info(collection, **kwargs) # noqa: E501
return data
def untrash_collection_with_http_info(self, collection, **kwargs): # noqa: E501
"""Untrash a collection # noqa: E501
This method will restore the collection by removing it from the `trash` and add it back to the `root` collection. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.untrash_collection_with_http_info(collection, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str collection: Unique identifier of the collection. The following aliases are supported: - `root`: The root collection of the account - `sharedWithMe`: Automatically contains new resources that have been shared individually - `trash`: Automatically contains resources that have been deleted (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['collection'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method untrash_collection" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'collection' is set
if ('collection' not in local_var_params or
local_var_params['collection'] is None):
raise ValueError("Missing the required parameter `collection` when calling `untrash_collection`") # noqa: E501
collection_formats = {}
path_params = {}
if 'collection' in local_var_params:
path_params['collection'] = local_var_params['collection'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['OAuth2'] # noqa: E501
return self.api_client.call_api(
'/collections/{collection}/untrash', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
| 53.524004
| 1,686
| 0.661641
| 6,470
| 52,400
| 5.16306
| 0.058733
| 0.040234
| 0.062865
| 0.01868
| 0.944619
| 0.940757
| 0.935279
| 0.928723
| 0.92567
| 0.918844
| 0
| 0.014483
| 0.256851
| 52,400
| 978
| 1,687
| 53.578732
| 0.843353
| 0.463302
| 0
| 0.801527
| 0
| 0.007634
| 0.197554
| 0.044649
| 0
| 0
| 0
| 0
| 0
| 1
| 0.03626
| false
| 0
| 0.007634
| 0
| 0.097328
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
b26464838281bee912d2363619722a03f8f03b88
| 64
|
py
|
Python
|
go/dlgo/ttt/__init__.py
|
huynq55/alpha-zero-general
|
7c7b8a9a09b79178157ec6b6d379a071c9f0994a
|
[
"MIT"
] | 1
|
2021-04-20T23:01:22.000Z
|
2021-04-20T23:01:22.000Z
|
go/dlgo/ttt/__init__.py
|
huynq55/alpha-zero-general
|
7c7b8a9a09b79178157ec6b6d379a071c9f0994a
|
[
"MIT"
] | null | null | null |
go/dlgo/ttt/__init__.py
|
huynq55/alpha-zero-general
|
7c7b8a9a09b79178157ec6b6d379a071c9f0994a
|
[
"MIT"
] | null | null | null |
from dlgo.ttt.tttboard import *
from dlgo.ttt.ttttypes import *
| 21.333333
| 31
| 0.78125
| 10
| 64
| 5
| 0.6
| 0.32
| 0.44
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 64
| 2
| 32
| 32
| 0.892857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
b2a008af79069cf3277d72c67f254213a558af97
| 12,755
|
py
|
Python
|
model/model_RelationalReasoning.py
|
haoyfan/SelfTime
|
28cafe1171bcd2f300242fe0703840478ae53bf0
|
[
"MIT"
] | 41
|
2020-10-08T12:27:02.000Z
|
2022-02-07T03:28:56.000Z
|
model/model_RelationalReasoning.py
|
haoyfan/SelfTime
|
28cafe1171bcd2f300242fe0703840478ae53bf0
|
[
"MIT"
] | null | null | null |
model/model_RelationalReasoning.py
|
haoyfan/SelfTime
|
28cafe1171bcd2f300242fe0703840478ae53bf0
|
[
"MIT"
] | 13
|
2021-01-15T03:17:55.000Z
|
2022-03-23T19:04:29.000Z
|
# -*- coding: utf-8 -*-
import torch
from optim.pytorchtools import EarlyStopping
import torch.nn as nn
class RelationalReasoning(torch.nn.Module):
def __init__(self, backbone, feature_size=64):
super(RelationalReasoning, self).__init__()
self.backbone = backbone
self.relation_head = torch.nn.Sequential(
torch.nn.Linear(feature_size*2, 256),
torch.nn.BatchNorm1d(256),
torch.nn.LeakyReLU(),
torch.nn.Linear(256, 1))
def aggregate(self, features, K):
relation_pairs_list = list()
targets_list = list()
size = int(features.shape[0] / K)
shifts_counter=1
for index_1 in range(0, size*K, size):
for index_2 in range(index_1+size, size*K, size):
# Using the 'cat' aggregation function by default
pos1 = features[index_1:index_1 + size]
pos2 = features[index_2:index_2+size]
pos_pair = torch.cat([pos1,
pos2], 1) # (batch_size, fz*2)
# Shuffle without collisions by rolling the mini-batch (negatives)
neg1 = torch.roll(features[index_2:index_2 + size],
shifts=shifts_counter, dims=0)
neg_pair1 = torch.cat([pos1, neg1], 1) # (batch_size, fz*2)
relation_pairs_list.append(pos_pair)
relation_pairs_list.append(neg_pair1)
targets_list.append(torch.ones(size, dtype=torch.float32).cuda())
targets_list.append(torch.zeros(size, dtype=torch.float32).cuda())
shifts_counter+=1
if(shifts_counter>=size):
shifts_counter=1 # avoid identity pairs
relation_pairs = torch.cat(relation_pairs_list, 0).cuda() # K(K-1) * (batch_size, fz*2)
targets = torch.cat(targets_list, 0).cuda()
return relation_pairs, targets
def train(self, tot_epochs, train_loader, opt):
patience = opt.patience
early_stopping = EarlyStopping(patience, verbose=True,
checkpoint_pth='{}/backbone_best.tar'.format(opt.ckpt_dir))
optimizer = torch.optim.Adam([
{'params': self.backbone.parameters()},
{'params': self.relation_head.parameters()}], lr=opt.learning_rate)
BCE = torch.nn.BCEWithLogitsLoss()
self.backbone.train()
self.relation_head.train()
epoch_max = 0
acc_max=0
for epoch in range(tot_epochs):
acc_epoch=0
loss_epoch=0
# the real target is discarded (unsupervised)
for i, (data_augmented, _) in enumerate(train_loader):
K = len(data_augmented) # tot augmentations
x = torch.cat(data_augmented, 0).cuda()
optimizer.zero_grad()
# forward pass (backbone)
features = self.backbone(x)
# aggregation function
relation_pairs, targets = self.aggregate(features, K)
# forward pass (relation head)
score = self.relation_head(relation_pairs).squeeze()
# cross-entropy loss and backward
loss = BCE(score, targets)
loss.backward()
optimizer.step()
# estimate the accuracy
predicted = torch.round(torch.sigmoid(score))
correct = predicted.eq(targets.view_as(predicted)).sum()
accuracy = (100.0 * correct / float(len(targets)))
acc_epoch += accuracy.item()
loss_epoch += loss.item()
acc_epoch /= len(train_loader)
loss_epoch /= len(train_loader)
if acc_epoch>acc_max:
acc_max = acc_epoch
epoch_max = epoch
early_stopping(acc_epoch, self.backbone)
if early_stopping.early_stop:
print("Early stopping")
break
if (epoch+1)%opt.save_freq==0:
print("[INFO] save backbone at epoch {}!".format(epoch))
torch.save(self.backbone.state_dict(), '{}/backbone_{}.tar'.format(opt.ckpt_dir, epoch))
print('Epoch [{}][{}][{}] loss= {:.5f}; Epoch ACC.= {:.2f}%, Max ACC.= {:.1f}%, Max Epoch={}' \
.format(epoch + 1, opt.model_name, opt.dataset_name,
loss_epoch, acc_epoch, acc_max, epoch_max))
return acc_max, epoch_max
class RelationalReasoning_Intra(torch.nn.Module):
def __init__(self, backbone, feature_size=64, nb_class=3):
super(RelationalReasoning_Intra, self).__init__()
self.backbone = backbone
self.cls_head = torch.nn.Sequential(
torch.nn.Linear(feature_size*2, 256),
torch.nn.BatchNorm1d(256),
torch.nn.LeakyReLU(),
torch.nn.Linear(256, nb_class),
torch.nn.Softmax(),
)
def run_test(self, predict, labels):
correct = 0
pred = predict.data.max(1)[1]
correct = pred.eq(labels.data).cpu().sum()
return correct, len(labels.data)
def train(self, tot_epochs, train_loader, opt):
patience = opt.patience
early_stopping = EarlyStopping(patience, verbose=True,
checkpoint_pth='{}/backbone_best.tar'.format(opt.ckpt_dir))
optimizer = torch.optim.Adam([
{'params': self.backbone.parameters()},
{'params': self.cls_head.parameters()},
], lr=opt.learning_rate)
c_criterion = nn.CrossEntropyLoss()
self.backbone.train()
self.cls_head.train()
epoch_max = 0
acc_max=0
for epoch in range(tot_epochs):
acc_epoch=0
acc_epoch_cls=0
loss_epoch=0
# the real target is discarded (unsupervised)
for i, (data_augmented0, data_augmented1, data_label, _) in enumerate(train_loader):
K = len(data_augmented0) # tot augmentations
x_cut0 = torch.cat(data_augmented0, 0).cuda()
x_cut1 = torch.cat(data_augmented1, 0).cuda()
c_label = torch.cat(data_label, 0).cuda()
optimizer.zero_grad()
# forward pass (backbone)
features_cut0 = self.backbone(x_cut0)
features_cut1 = self.backbone(x_cut1)
features_cls = torch.cat([features_cut0, features_cut1], 1)
# score_intra = self.relation_head(relation_pairs_intra).squeeze()
c_output = self.cls_head(features_cls)
correct_cls, length_cls = self.run_test(c_output, c_label)
loss_c = c_criterion(c_output, c_label)
loss=loss_c
loss.backward()
optimizer.step()
# estimate the accuracy
loss_epoch += loss.item()
accuracy_cls = 100. * correct_cls / length_cls
acc_epoch_cls += accuracy_cls.item()
acc_epoch_cls /= len(train_loader)
loss_epoch /= len(train_loader)
if acc_epoch_cls>acc_max:
acc_max = acc_epoch_cls
epoch_max = epoch
early_stopping(acc_epoch_cls, self.backbone)
if early_stopping.early_stop:
print("Early stopping")
break
if (epoch+1)%opt.save_freq==0:
print("[INFO] save backbone at epoch {}!".format(epoch))
torch.save(self.backbone.state_dict(), '{}/backbone_{}.tar'.format(opt.ckpt_dir, epoch))
print('Epoch [{}][{}][{}] loss= {:.5f}; Epoch ACC.= {:.2f}%, CLS.= {:.2f}%, '
'Max ACC.= {:.1f}%, Max Epoch={}' \
.format(epoch + 1, opt.model_name, opt.dataset_name,
loss_epoch, acc_epoch,acc_epoch_cls, acc_max, epoch_max))
return acc_max, epoch_max
class RelationalReasoning_InterIntra(torch.nn.Module):
def __init__(self, backbone, feature_size=64, nb_class=3):
super(RelationalReasoning_InterIntra, self).__init__()
self.backbone = backbone
self.relation_head = torch.nn.Sequential(
torch.nn.Linear(feature_size*2, 256),
torch.nn.BatchNorm1d(256),
torch.nn.LeakyReLU(),
torch.nn.Linear(256, 1))
self.cls_head = torch.nn.Sequential(
torch.nn.Linear(feature_size*2, 256),
torch.nn.BatchNorm1d(256),
torch.nn.LeakyReLU(),
torch.nn.Linear(256, nb_class),
torch.nn.Softmax(),
)
# self.softmax = nn.Softmax()
def aggregate(self, features, K):
relation_pairs_list = list()
targets_list = list()
size = int(features.shape[0] / K)
shifts_counter=1
for index_1 in range(0, size*K, size):
for index_2 in range(index_1+size, size*K, size):
# Using the 'cat' aggregation function by default
pos1 = features[index_1:index_1 + size]
pos2 = features[index_2:index_2+size]
pos_pair = torch.cat([pos1,
pos2], 1) # (batch_size, fz*2)
# Shuffle without collisions by rolling the mini-batch (negatives)
neg1 = torch.roll(features[index_2:index_2 + size],
shifts=shifts_counter, dims=0)
neg_pair1 = torch.cat([pos1, neg1], 1) # (batch_size, fz*2)
relation_pairs_list.append(pos_pair)
relation_pairs_list.append(neg_pair1)
targets_list.append(torch.ones(size, dtype=torch.float32).cuda())
targets_list.append(torch.zeros(size, dtype=torch.float32).cuda())
shifts_counter+=1
if(shifts_counter>=size):
shifts_counter=1 # avoid identity pairs
relation_pairs = torch.cat(relation_pairs_list, 0).cuda() # K(K-1) * (batch_size, fz*2)
targets = torch.cat(targets_list, 0).cuda()
return relation_pairs, targets
def run_test(self, predict, labels):
correct = 0
pred = predict.data.max(1)[1]
correct = pred.eq(labels.data).cpu().sum()
return correct, len(labels.data)
def train(self, tot_epochs, train_loader, opt):
patience = opt.patience
early_stopping = EarlyStopping(patience, verbose=True,
checkpoint_pth='{}/backbone_best.tar'.format(opt.ckpt_dir))
optimizer = torch.optim.Adam([
{'params': self.backbone.parameters()},
{'params': self.relation_head.parameters()},
{'params': self.cls_head.parameters()},
], lr=opt.learning_rate)
BCE = torch.nn.BCEWithLogitsLoss()
c_criterion = nn.CrossEntropyLoss()
self.backbone.train()
self.relation_head.train()
self.cls_head.train()
epoch_max = 0
acc_max=0
for epoch in range(tot_epochs):
acc_epoch=0
acc_epoch_cls=0
loss_epoch=0
# the real target is discarded (unsupervised)
for i, (data, data_augmented0, data_augmented1, data_label, _) in enumerate(train_loader):
K = len(data) # tot augmentations
x = torch.cat(data, 0).cuda()
x_cut0 = torch.cat(data_augmented0, 0).cuda()
x_cut1 = torch.cat(data_augmented1, 0).cuda()
c_label = torch.cat(data_label, 0).cuda()
optimizer.zero_grad()
# forward pass (backbone)
features = self.backbone(x)
features_cut0 = self.backbone(x_cut0)
features_cut1 = self.backbone(x_cut1)
features_cls = torch.cat([features_cut0, features_cut1], 1)
# aggregation function
relation_pairs, targets = self.aggregate(features, K)
# relation_pairs_intra, targets_intra = self.aggregate_intra(features_cut0, features_cut1, K)
# forward pass (relation head)
score = self.relation_head(relation_pairs).squeeze()
c_output = self.cls_head(features_cls)
correct_cls, length_cls = self.run_test(c_output, c_label)
# cross-entropy loss and backward
loss = BCE(score, targets)
loss_c = c_criterion(c_output, c_label)
loss+=loss_c
loss.backward()
optimizer.step()
# estimate the accuracy
predicted = torch.round(torch.sigmoid(score))
correct = predicted.eq(targets.view_as(predicted)).sum()
accuracy = (100.0 * correct / float(len(targets)))
acc_epoch += accuracy.item()
loss_epoch += loss.item()
accuracy_cls = 100. * correct_cls / length_cls
acc_epoch_cls += accuracy_cls.item()
acc_epoch /= len(train_loader)
acc_epoch_cls /= len(train_loader)
loss_epoch /= len(train_loader)
if (acc_epoch+acc_epoch_cls)>acc_max:
acc_max = (acc_epoch+acc_epoch_cls)
epoch_max = epoch
early_stopping((acc_epoch+acc_epoch_cls), self.backbone)
if early_stopping.early_stop:
print("Early stopping")
break
if (epoch+1)%opt.save_freq==0:
print("[INFO] save backbone at epoch {}!".format(epoch))
torch.save(self.backbone.state_dict(), '{}/backbone_{}.tar'.format(opt.ckpt_dir, epoch))
print('Epoch [{}][{}][{}] loss= {:.5f}; Epoch ACC.= {:.2f}%, CLS.= {:.2f}%, '
'Max ACC.= {:.1f}%, Max Epoch={}' \
.format(epoch + 1, opt.model_name, opt.dataset_name,
loss_epoch, acc_epoch,acc_epoch_cls, acc_max, epoch_max))
return acc_max, epoch_max
| 36.235795
| 101
| 0.619443
| 1,611
| 12,755
| 4.679081
| 0.112973
| 0.031839
| 0.02043
| 0.009552
| 0.957416
| 0.953569
| 0.936986
| 0.923057
| 0.91125
| 0.889493
| 0
| 0.023008
| 0.257154
| 12,755
| 351
| 102
| 36.339031
| 0.772559
| 0.08577
| 0
| 0.865385
| 0
| 0.003846
| 0.050077
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.038462
| false
| 0
| 0.011538
| 0
| 0.088462
| 0.034615
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
a2a4519512eac204e954f202e7af1ccff6bb708b
| 2,053
|
py
|
Python
|
python/tvm/relay/op/reduce.py
|
gaoxiong1/tvm
|
6770f6b77252cf17a89ea7aeb292a2a54190cfff
|
[
"Apache-2.0"
] | null | null | null |
python/tvm/relay/op/reduce.py
|
gaoxiong1/tvm
|
6770f6b77252cf17a89ea7aeb292a2a54190cfff
|
[
"Apache-2.0"
] | null | null | null |
python/tvm/relay/op/reduce.py
|
gaoxiong1/tvm
|
6770f6b77252cf17a89ea7aeb292a2a54190cfff
|
[
"Apache-2.0"
] | null | null | null |
"""Reduce operators."""
# pylint: disable=redefined-builtin
from . import _make
def argmax(data, axis=None, keepdims=False, exclude=False):
"""Returns the indices of the maximum values along an axis.
Parameters
----------
data : relay.Expr
The input data
axis : None or int or tuple of int
Axis or axes along which a argmin operation is performed.
The default, axis=None, will find the indices of maximum element all of the elements of
the input array. If axis is negative it counts from the last to the first axis.
keepdims : bool
If this is set to True, the axes which are reduced are left in the result as dimensions
with size one.
With this option, the result will broadcast correctly against the input array.
exclude : bool
If `exclude` is true, reduction will be performed on the axes that are
NOT in axis instead.
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.argmax(data, axis, keepdims, exclude)
def argmin(data, axis=None, keepdims=False, exclude=False):
"""Returns the indices of the minimum values along an axis.
Parameters
----------
data : relay.Expr
The input data
axis : None or int or tuple of int
Axis or axes along which a argmin operation is performed.
The default, axis=None, will find the indices of minimum element all of the elements of
the input array. If axis is negative it counts from the last to the first axis.
keepdims : bool
If this is set to True, the axes which are reduced are left in the result as dimensions
with size one.
With this option, the result will broadcast correctly against the input array.
exclude : bool
If `exclude` is true, reduction will be performed on the axes that are
NOT in axis instead.
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.argmin(data, axis, keepdims, exclude)
| 31.584615
| 95
| 0.660984
| 297
| 2,053
| 4.558923
| 0.26936
| 0.035451
| 0.035451
| 0.029542
| 0.880355
| 0.880355
| 0.880355
| 0.880355
| 0.880355
| 0.880355
| 0
| 0
| 0.273746
| 2,053
| 64
| 96
| 32.078125
| 0.908115
| 0.783244
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0
| 0.2
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 8
|
a2ca122fdf88b843e6d3c5f4128d47846d4a9d74
| 11,455
|
py
|
Python
|
api/response_modifiers.py
|
clearspending/api.clearspending.ru
|
ece8f9f5f1b37598cf12ad67e2a6da6214afc27c
|
[
"MIT"
] | 1
|
2019-01-15T16:52:58.000Z
|
2019-01-15T16:52:58.000Z
|
api/response_modifiers.py
|
clearspending/api.clearspending.ru
|
ece8f9f5f1b37598cf12ad67e2a6da6214afc27c
|
[
"MIT"
] | null | null | null |
api/response_modifiers.py
|
clearspending/api.clearspending.ru
|
ece8f9f5f1b37598cf12ad67e2a6da6214afc27c
|
[
"MIT"
] | null | null | null |
# coding=utf-8
def modifier_select_rsp_contracts(parametersDict):
'''
модификатор входных параметров апи для коллекции контрактов
'''
maxResultsPerQuery = 50
try:
perpage = int(parametersDict.get("perpage", maxResultsPerQuery))
except:
perpage = maxResultsPerQuery
if perpage > maxResultsPerQuery or perpage == 0: perpage = maxResultsPerQuery
parametersDict["perpage"] = perpage
returnfields = {
"_id": 1,
"id": 1,
"regNum": 1,
"price": 1,
"signDate": 1,
"customer.fullName": 1,
"customer.inn": 1,
"customer.kpp": 1,
"customer.regNum": 1,
"products": 1,
"fz": 1,
"regionCode": 1,
"suppliers": 1,
"misuses": 1,
"finances.budgetary": 1,
'name': 1,
"publishDate": 1,
'economic_sectors': 1
}
try:
parametersDict["supplierinn"] = parametersDict["supplierinn"].replace("%20", " ")
except:
pass
try:
parametersDict["supplierkpp"] = parametersDict["supplierkpp"].replace("%20", " ")
except:
pass
if parametersDict.get("returnfields", None) == None:
parametersDict["returnfields"] = returnfields
else:
pass
return parametersDict
def modifier_select_rsp_notifications(parametersDict):
'''
модификатор входных параметров апи для коллекции контрактов
'''
maxResultsPerQuery = 50
try:
perpage = int(parametersDict.get("perpage", maxResultsPerQuery))
except:
perpage = maxResultsPerQuery
if perpage > maxResultsPerQuery or perpage == 0: perpage = maxResultsPerQuery
parametersDict["perpage"] = perpage
if parametersDict.get("returnfields", None):
parametersDict["returnfields"] = {
"number": 1,
"placingWay": 1,
"orderName": 1,
"lots": 1,
'lot': 1,
'regionCode': 1,
"publishDate": 1,
"notificationCommission": 1,
"contactInfo": 1,
"href": 1,
"documentMetas": 1
}
return parametersDict
def modifier_select_rsp_grants(parametersDict):
'''
модификатор входных параметров апи для коллекции контрактов
'''
maxResultsPerQuery = 50
try:
perpage = int(parametersDict.get("perpage", maxResultsPerQuery))
except:
perpage = maxResultsPerQuery
if perpage > maxResultsPerQuery or perpage == 0: perpage = maxResultsPerQuery
parametersDict["perpage"] = perpage
if parametersDict.get("returnfields", None):
parametersDict["returnfields"] = {
"name_organization": 1,
"status": 1,
"grant_status": 1,
"description": 1,
"grant": 1,
"price": 1,
"site": 1,
"OGRN": 1,
"filing_date": 1,
"form_number": 1,
"address": 1,
"operator": 1,
"name_project": 1,
}
return parametersDict
def modifier_select_rsp_invalidcontracts(parametersDict):
'''
модификатор входных параметров апи для коллекции контрактов с проблемными инн/кпп
'''
maxResultsPerQuery = 50
try:
perpage = int(parametersDict.get("perpage", maxResultsPerQuery))
except:
perpage = maxResultsPerQuery
if perpage > maxResultsPerQuery or perpage == 0: perpage = maxResultsPerQuery
parametersDict["perpage"] = perpage
try:
parametersDict["supplierinn"] = parametersDict["supplierinn"].replace("%20", " ")
except:
pass
try:
parametersDict["supplierkpp"] = parametersDict["supplierkpp"].replace("%20", " ")
except:
pass
return parametersDict
def modifier_select_rsp_customers(parametersDict):
'''
модификатор входных параметров апи для коллекции заказчиков
'''
maxResultsPerQuery = 50
try:
perpage = int(parametersDict.get("perpage", maxResultsPerQuery))
except:
perpage = maxResultsPerQuery
if perpage > maxResultsPerQuery or perpage == 0: perpage = maxResultsPerQuery
parametersDict["perpage"] = perpage
returnfields = {
"_id": 1,
"id": 1,
"regNumber": 1,
"fullName": 1,
"inn": 1,
"kpp": 1,
"contractsSum": 1,
"contractsCount": 1,
"_orgClass": 1
}
if parametersDict.get("returnfields", None) == None:
parametersDict["returnfields"] = returnfields
else:
pass
return parametersDict
def modifier_select_rsp_suppliers(parametersDict):
'''
модификатор входных параметров апи для коллекции поставщиков
'''
maxResultsPerQuery = 50
try:
perpage = int(parametersDict.get("perpage", maxResultsPerQuery))
except:
perpage = maxResultsPerQuery
if perpage > maxResultsPerQuery or perpage == 0: perpage = maxResultsPerQuery
parametersDict["perpage"] = perpage
try:
parametersDict["inn"] = parametersDict["inn"].replace("%20", " ")
except:
pass
try:
parametersDict["kpp"] = parametersDict["kpp"].replace("%20", " ")
except:
pass
return parametersDict
def modifier_select_rsp_dictionaries(parametersDict):
'''
модификатор входных параметров апи для коллекций справочников
'''
maxResultsPerQuery = 1000
try:
perpage = int(parametersDict.get("perpage", maxResultsPerQuery))
except:
perpage = maxResultsPerQuery
if perpage > maxResultsPerQuery or perpage == 0: perpage = maxResultsPerQuery
parametersDict["perpage"] = perpage
return parametersDict
def modifier_get_notifications_rsp(parametersDict):
if not parametersDict.get("returnfields", None):
parametersDict["returnfields"] = {
"number": 1,
"placingWay": 1,
"orderName": 1,
'purchaseObjectInfo': 1,
'purchaseResponsible': 1,
'procedureInfo': 1,
"lots": 1,
'lot': 1,
'fz': 1,
"publishDate": 1,
'regionCode': 1,
"notificationCommission": 1,
"contactInfo": 1,
"href": 1,
"documentMetas": 1,
'customers': 1,
}
return parametersDict
def modifier_get_grants_rsp(parametersDict):
if not parametersDict.get("returnfields", None):
parametersDict["returnfields"] = {
"name_organization": 1,
"status": 1,
"grant_status": 1,
"description": 1,
"grant": 1,
"price": 1,
"site": 1,
"OGRN": 1,
"filing_date": 1,
"form_number": 1,
"address": 1,
"operator": 1,
"name_project": 1,
}
return parametersDict
def modifier_get_rsp(parametersDict):
'''
модификатор входных параметров апи всех get-запросов
'''
parametersDict["perpage"] = 1
try:
parametersDict["supplierinn"] = parametersDict["supplierinn"].replace("%20", " ")
except:
pass
try:
parametersDict["supplierkpp"] = parametersDict["supplierkpp"].replace("%20", " ")
except:
pass
try:
parametersDict["inn"] = parametersDict["inn"].replace("%20", " ")
except:
pass
try:
parametersDict["kpp"] = parametersDict["kpp"].replace("%20", " ")
except:
pass
return parametersDict
def modifier_top_rsp_contracts(parametersDict):
'''
модификатор входных параметров апи для коллекции топ контрактов
'''
maxResultsPerQuery = 100
try:
perpage = int(parametersDict.get("perpage", maxResultsPerQuery))
except:
perpage = maxResultsPerQuery
if perpage > maxResultsPerQuery or perpage == 0: perpage = maxResultsPerQuery
parametersDict["perpage"] = perpage
returnfields = {
"year": 1,
"regNum": 1,
"price": 1,
"signDate": 1,
"customer.fullName": 1,
"customer.inn": 1,
"customer.kpp": 1,
"customer.regNum": 1,
"regionCode": 1,
"products": 1,
"suppliers": 1
}
if parametersDict.get("returnfields", None) == None:
parametersDict["returnfields"] = returnfields
else:
pass
return parametersDict
def modifier_top_rsp_notifications(parametersDict):
'''
модификатор входных параметров апи для коллекции топ контрактов
'''
maxResultsPerQuery = 100
try:
perpage = int(parametersDict.get("perpage", maxResultsPerQuery))
except:
perpage = maxResultsPerQuery
if perpage > maxResultsPerQuery or perpage == 0: perpage = maxResultsPerQuery
parametersDict["perpage"] = perpage
if parametersDict.get("returnfields", None):
parametersDict["returnfields"] = {
"number": 1,
"placingWay": 1,
"orderName": 1,
"lots": 1,
'lot': 1,
'regionCode': 1,
"publishDate": 1,
"notificationCommission": 1,
"contactInfo": 1,
"href": 1,
"documentMetas": 1,
}
return parametersDict
def modifier_top_rsp_grants(parametersDict):
'''
модификатор входных параметров апи для коллекции топ контрактов
'''
maxResultsPerQuery = 100
try:
perpage = int(parametersDict.get("perpage", maxResultsPerQuery))
except:
perpage = maxResultsPerQuery
if perpage > maxResultsPerQuery or perpage == 0: perpage = maxResultsPerQuery
parametersDict["perpage"] = perpage
if parametersDict.get("returnfields", None):
parametersDict["returnfields"] = {
"name_organization": 1,
"status": 1,
"grant_status": 1,
"description": 1,
"grant": 1,
"price": 1,
"site": 1,
"OGRN": 1,
"filing_date": 1,
"form_number": 1,
"address": 1,
"operator": 1,
"name_project": 1,
}
return parametersDict
def modifier_top_rsp_organizations(parametersDict):
'''
модификатор входных параметров апи для коллекции топ заказчиков и поставщиков
'''
maxResultsPerQuery = 100
try:
perpage = int(parametersDict.get("perpage", maxResultsPerQuery))
except:
perpage = maxResultsPerQuery
if perpage > maxResultsPerQuery or perpage == 0: perpage = maxResultsPerQuery
parametersDict["perpage"] = perpage
returnfields = {u"_id": 0}
if parametersDict.get("returnfields", None) == None:
parametersDict["returnfields"] = returnfields
else:
pass
return parametersDict
def modifier_top_rsp_farma(parametersDict):
'''
модификатор входных параметров апи для коллекции топ заказчиков и поставщиков
'''
maxResultsPerQuery = 100
try:
perpage = int(parametersDict.get("perpage", maxResultsPerQuery))
except:
perpage = maxResultsPerQuery
if perpage > maxResultsPerQuery or perpage == 0: perpage = maxResultsPerQuery
parametersDict["perpage"] = perpage
if not parametersDict.get("returnfields", None):
parametersDict["returnfields"] = {
"_id": 0,
"name": 1,
"share": 1,
"summ": 1,
"inn": 1,
"num": 1,
}
return parametersDict
| 29.371795
| 89
| 0.590485
| 965
| 11,455
| 6.941969
| 0.115026
| 0.179131
| 0.048067
| 0.064786
| 0.917152
| 0.903866
| 0.891029
| 0.869981
| 0.850724
| 0.832363
| 0
| 0.023273
| 0.29856
| 11,455
| 389
| 90
| 29.447301
| 0.810454
| 0.074989
| 0
| 0.834375
| 0
| 0
| 0.16375
| 0.006346
| 0
| 0
| 0
| 0
| 0
| 1
| 0.046875
| false
| 0.04375
| 0
| 0
| 0.09375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0c050caeea403f34df6fd62e8421208888e77566
| 503
|
py
|
Python
|
functions_make_shirt_anthro_club.py
|
julencosme/python-crash-course
|
6b37d7346e235273c266110932207cd67ce4eb0e
|
[
"MIT"
] | null | null | null |
functions_make_shirt_anthro_club.py
|
julencosme/python-crash-course
|
6b37d7346e235273c266110932207cd67ce4eb0e
|
[
"MIT"
] | null | null | null |
functions_make_shirt_anthro_club.py
|
julencosme/python-crash-course
|
6b37d7346e235273c266110932207cd67ce4eb0e
|
[
"MIT"
] | null | null | null |
def make_shirt(size, message):
"""Display information regarding the size and message of a shirt."""
print("The shirt size is " + size +
" and the message will read: " + message + ".")
make_shirt('large', 'Archaeology Club')
def make_shirt(size, message):
"""Display information regarding the size and message of a shirt."""
print("The shirt size is " + size +
" and the message will read: " + message + ".")
make_shirt(size='large', message='Archaeology Club')
| 29.588235
| 72
| 0.646123
| 66
| 503
| 4.863636
| 0.287879
| 0.140187
| 0.121495
| 0.099688
| 0.841122
| 0.841122
| 0.841122
| 0.841122
| 0.841122
| 0.841122
| 0
| 0
| 0.222664
| 503
| 16
| 73
| 31.4375
| 0.820972
| 0.248509
| 0
| 0.75
| 0
| 0
| 0.370572
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0
| 0
| 0.25
| 0.25
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
0c05ddf647aeb0272d2db1a00c2076be2092ee9b
| 19,412
|
py
|
Python
|
tests/test_managedblockchain/test_managedblockchain_nodes.py
|
orenmazor/moto
|
4778377e8ecaf729d26602a2c5202b72c1438503
|
[
"Apache-2.0"
] | null | null | null |
tests/test_managedblockchain/test_managedblockchain_nodes.py
|
orenmazor/moto
|
4778377e8ecaf729d26602a2c5202b72c1438503
|
[
"Apache-2.0"
] | 4
|
2017-09-30T07:52:52.000Z
|
2021-12-13T06:56:55.000Z
|
tests/test_managedblockchain/test_managedblockchain_nodes.py
|
orenmazor/moto
|
4778377e8ecaf729d26602a2c5202b72c1438503
|
[
"Apache-2.0"
] | 2
|
2021-11-24T08:05:43.000Z
|
2021-11-25T16:18:48.000Z
|
from __future__ import unicode_literals
import boto3
import pytest
import sure # noqa
from botocore.exceptions import ClientError
from moto import mock_managedblockchain
from . import helpers
@mock_managedblockchain
def test_create_node():
conn = boto3.client("managedblockchain", region_name="us-east-1")
# Create network
response = conn.create_network(
Name="testnetwork1",
Description="Test Network 1",
Framework="HYPERLEDGER_FABRIC",
FrameworkVersion="1.2",
FrameworkConfiguration=helpers.default_frameworkconfiguration,
VotingPolicy=helpers.default_votingpolicy,
MemberConfiguration=helpers.default_memberconfiguration,
)
network_id = response["NetworkId"]
member_id = response["MemberId"]
# Create a node
response = conn.create_node(
NetworkId=network_id,
MemberId=member_id,
NodeConfiguration=helpers.default_nodeconfiguration,
)
node_id = response["NodeId"]
# Find node in full list
response = conn.list_nodes(NetworkId=network_id, MemberId=member_id)
nodes = response["Nodes"]
nodes.should.have.length_of(1)
helpers.node_id_exist_in_list(nodes, node_id).should.equal(True)
# Get node details
response = conn.get_node(NetworkId=network_id, MemberId=member_id, NodeId=node_id)
response["Node"]["AvailabilityZone"].should.equal("us-east-1a")
# Update node
logconfignewenabled = not helpers.default_nodeconfiguration[
"LogPublishingConfiguration"
]["Fabric"]["ChaincodeLogs"]["Cloudwatch"]["Enabled"]
logconfignew = {
"Fabric": {"ChaincodeLogs": {"Cloudwatch": {"Enabled": logconfignewenabled}}}
}
conn.update_node(
NetworkId=network_id,
MemberId=member_id,
NodeId=node_id,
LogPublishingConfiguration=logconfignew,
)
# Delete node
conn.delete_node(
NetworkId=network_id, MemberId=member_id, NodeId=node_id,
)
# Find node in full list
response = conn.list_nodes(NetworkId=network_id, MemberId=member_id)
nodes = response["Nodes"]
nodes.should.have.length_of(1)
helpers.node_id_exist_in_list(nodes, node_id).should.equal(True)
# Find node in full list - only DELETED
response = conn.list_nodes(
NetworkId=network_id, MemberId=member_id, Status="DELETED"
)
nodes = response["Nodes"]
nodes.should.have.length_of(1)
helpers.node_id_exist_in_list(nodes, node_id).should.equal(True)
# But cannot get
with pytest.raises(ClientError) as ex:
conn.get_node(NetworkId=network_id, MemberId=member_id, NodeId=node_id)
err = ex.value.response["Error"]
err["Code"].should.equal("ResourceNotFoundException")
err["Message"].should.contain("Node {0} not found".format(node_id))
@mock_managedblockchain
def test_create_node_standard_edition():
conn = boto3.client("managedblockchain", region_name="us-east-1")
frameworkconfiguration = {"Fabric": {"Edition": "STANDARD"}}
response = conn.create_network(
Name="testnetwork1",
Description="Test Network 1",
Framework="HYPERLEDGER_FABRIC",
FrameworkVersion="1.2",
FrameworkConfiguration=frameworkconfiguration,
VotingPolicy=helpers.default_votingpolicy,
MemberConfiguration=helpers.default_memberconfiguration,
)
network_id = response["NetworkId"]
member_id = response["MemberId"]
# Instance type only allowed with standard edition
logconfigbad = dict(helpers.default_nodeconfiguration)
logconfigbad["InstanceType"] = "bc.t3.large"
response = conn.create_node(
NetworkId=network_id, MemberId=member_id, NodeConfiguration=logconfigbad,
)
node_id = response["NodeId"]
# Get node details
response = conn.get_node(NetworkId=network_id, MemberId=member_id, NodeId=node_id)
response["Node"]["InstanceType"].should.equal("bc.t3.large")
# Need another member so the network does not get deleted
# Create proposal
response = conn.create_proposal(
NetworkId=network_id,
MemberId=member_id,
Actions=helpers.default_policy_actions,
)
proposal_id = response["ProposalId"]
# Vote yes
response = conn.vote_on_proposal(
NetworkId=network_id,
ProposalId=proposal_id,
VoterMemberId=member_id,
Vote="YES",
)
# Get the invitation
response = conn.list_invitations()
invitation_id = response["Invitations"][0]["InvitationId"]
# Create the member
response = conn.create_member(
InvitationId=invitation_id,
NetworkId=network_id,
MemberConfiguration=helpers.create_member_configuration(
"testmember2", "admin", "Admin12345", False, "Test Member 2"
),
)
# Remove member 1 - should remove nodes
conn.delete_member(NetworkId=network_id, MemberId=member_id)
# Should now be an exception
with pytest.raises(ClientError) as ex:
conn.list_nodes(NetworkId=network_id, MemberId=member_id)
err = ex.value.response["Error"]
err["Code"].should.equal("ResourceNotFoundException")
err["Message"].should.contain("Member {0} not found".format(member_id))
@mock_managedblockchain
def test_create_too_many_nodes():
conn = boto3.client("managedblockchain", region_name="us-east-1")
# Create network
response = conn.create_network(
Name="testnetwork1",
Description="Test Network 1",
Framework="HYPERLEDGER_FABRIC",
FrameworkVersion="1.2",
FrameworkConfiguration=helpers.default_frameworkconfiguration,
VotingPolicy=helpers.default_votingpolicy,
MemberConfiguration=helpers.default_memberconfiguration,
)
network_id = response["NetworkId"]
member_id = response["MemberId"]
# Create a node
response = conn.create_node(
NetworkId=network_id,
MemberId=member_id,
NodeConfiguration=helpers.default_nodeconfiguration,
)
# Create another node
response = conn.create_node(
NetworkId=network_id,
MemberId=member_id,
NodeConfiguration=helpers.default_nodeconfiguration,
)
# Find node in full list
response = conn.list_nodes(NetworkId=network_id, MemberId=member_id)
nodes = response["Nodes"]
nodes.should.have.length_of(2)
# Try to create one too many nodes
with pytest.raises(ClientError) as ex:
conn.create_node(
NetworkId=network_id,
MemberId=member_id,
NodeConfiguration=helpers.default_nodeconfiguration,
)
err = ex.value.response["Error"]
err["Code"].should.equal("ResourceLimitExceededException")
err["Message"].should.contain(
"Maximum number of nodes exceeded in member {0}".format(member_id)
)
@mock_managedblockchain
def test_create_node_badnetwork():
conn = boto3.client("managedblockchain", region_name="us-east-1")
with pytest.raises(ClientError) as ex:
conn.create_node(
NetworkId="n-ABCDEFGHIJKLMNOP0123456789",
MemberId="m-ABCDEFGHIJKLMNOP0123456789",
NodeConfiguration=helpers.default_nodeconfiguration,
)
err = ex.value.response["Error"]
err["Code"].should.equal("ResourceNotFoundException")
err["Message"].should.contain("Network n-ABCDEFGHIJKLMNOP0123456789 not found")
@mock_managedblockchain
def test_create_node_badmember():
conn = boto3.client("managedblockchain", region_name="us-east-1")
response = conn.create_network(
Name="testnetwork1",
Description="Test Network 1",
Framework="HYPERLEDGER_FABRIC",
FrameworkVersion="1.2",
FrameworkConfiguration=helpers.default_frameworkconfiguration,
VotingPolicy=helpers.default_votingpolicy,
MemberConfiguration=helpers.default_memberconfiguration,
)
network_id = response["NetworkId"]
with pytest.raises(ClientError) as ex:
conn.create_node(
NetworkId=network_id,
MemberId="m-ABCDEFGHIJKLMNOP0123456789",
NodeConfiguration=helpers.default_nodeconfiguration,
)
err = ex.value.response["Error"]
err["Code"].should.equal("ResourceNotFoundException")
err["Message"].should.contain("Member m-ABCDEFGHIJKLMNOP0123456789 not found")
@mock_managedblockchain
def test_create_node_badnodeconfig():
conn = boto3.client("managedblockchain", region_name="us-east-1")
response = conn.create_network(
Name="testnetwork1",
Description="Test Network 1",
Framework="HYPERLEDGER_FABRIC",
FrameworkVersion="1.2",
FrameworkConfiguration=helpers.default_frameworkconfiguration,
VotingPolicy=helpers.default_votingpolicy,
MemberConfiguration=helpers.default_memberconfiguration,
)
network_id = response["NetworkId"]
member_id = response["MemberId"]
# Incorrect instance type
logconfigbad = dict(helpers.default_nodeconfiguration)
logconfigbad["InstanceType"] = "foo"
with pytest.raises(ClientError) as ex:
conn.create_node(
NetworkId=network_id, MemberId=member_id, NodeConfiguration=logconfigbad
)
err = ex.value.response["Error"]
err["Code"].should.equal("InvalidRequestException")
err["Message"].should.contain("Requested instance foo isn't supported.")
# Incorrect instance type for edition
logconfigbad = dict(helpers.default_nodeconfiguration)
logconfigbad["InstanceType"] = "bc.t3.large"
with pytest.raises(ClientError) as ex:
conn.create_node(
NetworkId=network_id, MemberId=member_id, NodeConfiguration=logconfigbad
)
err = ex.value.response["Error"]
err["Code"].should.equal("InvalidRequestException")
err["Message"].should.contain(
"Instance type bc.t3.large is not supported with STARTER Edition networks."
)
# Incorrect availability zone
logconfigbad = dict(helpers.default_nodeconfiguration)
logconfigbad["AvailabilityZone"] = "us-east-11"
with pytest.raises(ClientError) as ex:
conn.create_node(
NetworkId=network_id, MemberId=member_id, NodeConfiguration=logconfigbad
)
err = ex.value.response["Error"]
err["Code"].should.equal("InvalidRequestException")
err["Message"].should.contain("Availability Zone is not valid")
@mock_managedblockchain
def test_list_nodes_badnetwork():
conn = boto3.client("managedblockchain", region_name="us-east-1")
with pytest.raises(ClientError) as ex:
conn.list_nodes(
NetworkId="n-ABCDEFGHIJKLMNOP0123456789",
MemberId="m-ABCDEFGHIJKLMNOP0123456789",
)
err = ex.value.response["Error"]
err["Code"].should.equal("ResourceNotFoundException")
err["Message"].should.contain("Network n-ABCDEFGHIJKLMNOP0123456789 not found")
@mock_managedblockchain
def test_list_nodes_badmember():
conn = boto3.client("managedblockchain", region_name="us-east-1")
response = conn.create_network(
Name="testnetwork1",
Description="Test Network 1",
Framework="HYPERLEDGER_FABRIC",
FrameworkVersion="1.2",
FrameworkConfiguration=helpers.default_frameworkconfiguration,
VotingPolicy=helpers.default_votingpolicy,
MemberConfiguration=helpers.default_memberconfiguration,
)
network_id = response["NetworkId"]
with pytest.raises(ClientError) as ex:
conn.list_nodes(
NetworkId=network_id, MemberId="m-ABCDEFGHIJKLMNOP0123456789",
)
err = ex.value.response["Error"]
err["Code"].should.equal("ResourceNotFoundException")
err["Message"].should.contain("Member m-ABCDEFGHIJKLMNOP0123456789 not found")
@mock_managedblockchain
def test_get_node_badnetwork():
conn = boto3.client("managedblockchain", region_name="us-east-1")
with pytest.raises(ClientError) as ex:
conn.get_node(
NetworkId="n-ABCDEFGHIJKLMNOP0123456789",
MemberId="m-ABCDEFGHIJKLMNOP0123456789",
NodeId="nd-ABCDEFGHIJKLMNOP0123456789",
)
err = ex.value.response["Error"]
err["Code"].should.equal("ResourceNotFoundException")
err["Message"].should.contain("Network n-ABCDEFGHIJKLMNOP0123456789 not found")
@mock_managedblockchain
def test_get_node_badmember():
conn = boto3.client("managedblockchain", region_name="us-east-1")
response = conn.create_network(
Name="testnetwork1",
Description="Test Network 1",
Framework="HYPERLEDGER_FABRIC",
FrameworkVersion="1.2",
FrameworkConfiguration=helpers.default_frameworkconfiguration,
VotingPolicy=helpers.default_votingpolicy,
MemberConfiguration=helpers.default_memberconfiguration,
)
network_id = response["NetworkId"]
with pytest.raises(ClientError) as ex:
conn.get_node(
NetworkId=network_id,
MemberId="m-ABCDEFGHIJKLMNOP0123456789",
NodeId="nd-ABCDEFGHIJKLMNOP0123456789",
)
err = ex.value.response["Error"]
err["Code"].should.equal("ResourceNotFoundException")
err["Message"].should.contain("Member m-ABCDEFGHIJKLMNOP0123456789 not found")
@mock_managedblockchain
def test_get_node_badnode():
conn = boto3.client("managedblockchain", region_name="us-east-1")
response = conn.create_network(
Name="testnetwork1",
Description="Test Network 1",
Framework="HYPERLEDGER_FABRIC",
FrameworkVersion="1.2",
FrameworkConfiguration=helpers.default_frameworkconfiguration,
VotingPolicy=helpers.default_votingpolicy,
MemberConfiguration=helpers.default_memberconfiguration,
)
network_id = response["NetworkId"]
member_id = response["MemberId"]
with pytest.raises(ClientError) as ex:
conn.get_node(
NetworkId=network_id,
MemberId=member_id,
NodeId="nd-ABCDEFGHIJKLMNOP0123456789",
)
err = ex.value.response["Error"]
err["Code"].should.equal("ResourceNotFoundException")
err["Message"].should.contain("Node nd-ABCDEFGHIJKLMNOP0123456789 not found")
@mock_managedblockchain
def test_delete_node_badnetwork():
conn = boto3.client("managedblockchain", region_name="us-east-1")
with pytest.raises(ClientError) as ex:
conn.delete_node(
NetworkId="n-ABCDEFGHIJKLMNOP0123456789",
MemberId="m-ABCDEFGHIJKLMNOP0123456789",
NodeId="nd-ABCDEFGHIJKLMNOP0123456789",
)
err = ex.value.response["Error"]
err["Code"].should.equal("ResourceNotFoundException")
err["Message"].should.contain("Network n-ABCDEFGHIJKLMNOP0123456789 not found")
@mock_managedblockchain
def test_delete_node_badmember():
conn = boto3.client("managedblockchain", region_name="us-east-1")
response = conn.create_network(
Name="testnetwork1",
Description="Test Network 1",
Framework="HYPERLEDGER_FABRIC",
FrameworkVersion="1.2",
FrameworkConfiguration=helpers.default_frameworkconfiguration,
VotingPolicy=helpers.default_votingpolicy,
MemberConfiguration=helpers.default_memberconfiguration,
)
network_id = response["NetworkId"]
with pytest.raises(ClientError) as ex:
conn.delete_node(
NetworkId=network_id,
MemberId="m-ABCDEFGHIJKLMNOP0123456789",
NodeId="nd-ABCDEFGHIJKLMNOP0123456789",
)
err = ex.value.response["Error"]
err["Code"].should.equal("ResourceNotFoundException")
err["Message"].should.contain("Member m-ABCDEFGHIJKLMNOP0123456789 not found")
@mock_managedblockchain
def test_delete_node_badnode():
conn = boto3.client("managedblockchain", region_name="us-east-1")
response = conn.create_network(
Name="testnetwork1",
Description="Test Network 1",
Framework="HYPERLEDGER_FABRIC",
FrameworkVersion="1.2",
FrameworkConfiguration=helpers.default_frameworkconfiguration,
VotingPolicy=helpers.default_votingpolicy,
MemberConfiguration=helpers.default_memberconfiguration,
)
network_id = response["NetworkId"]
member_id = response["MemberId"]
with pytest.raises(ClientError) as ex:
conn.delete_node(
NetworkId=network_id,
MemberId=member_id,
NodeId="nd-ABCDEFGHIJKLMNOP0123456789",
)
err = ex.value.response["Error"]
err["Code"].should.equal("ResourceNotFoundException")
err["Message"].should.contain("Node nd-ABCDEFGHIJKLMNOP0123456789 not found")
@mock_managedblockchain
def test_update_node_badnetwork():
conn = boto3.client("managedblockchain", region_name="us-east-1")
with pytest.raises(ClientError) as ex:
conn.update_node(
NetworkId="n-ABCDEFGHIJKLMNOP0123456789",
MemberId="m-ABCDEFGHIJKLMNOP0123456789",
NodeId="nd-ABCDEFGHIJKLMNOP0123456789",
LogPublishingConfiguration=helpers.default_nodeconfiguration[
"LogPublishingConfiguration"
],
)
err = ex.value.response["Error"]
err["Code"].should.equal("ResourceNotFoundException")
err["Message"].should.contain("Network n-ABCDEFGHIJKLMNOP0123456789 not found")
@mock_managedblockchain
def test_update_node_badmember():
conn = boto3.client("managedblockchain", region_name="us-east-1")
response = conn.create_network(
Name="testnetwork1",
Description="Test Network 1",
Framework="HYPERLEDGER_FABRIC",
FrameworkVersion="1.2",
FrameworkConfiguration=helpers.default_frameworkconfiguration,
VotingPolicy=helpers.default_votingpolicy,
MemberConfiguration=helpers.default_memberconfiguration,
)
network_id = response["NetworkId"]
with pytest.raises(ClientError) as ex:
conn.update_node(
NetworkId=network_id,
MemberId="m-ABCDEFGHIJKLMNOP0123456789",
NodeId="nd-ABCDEFGHIJKLMNOP0123456789",
LogPublishingConfiguration=helpers.default_nodeconfiguration[
"LogPublishingConfiguration"
],
)
err = ex.value.response["Error"]
err["Code"].should.equal("ResourceNotFoundException")
err["Message"].should.contain("Member m-ABCDEFGHIJKLMNOP0123456789 not found")
@mock_managedblockchain
def test_update_node_badnode():
conn = boto3.client("managedblockchain", region_name="us-east-1")
response = conn.create_network(
Name="testnetwork1",
Description="Test Network 1",
Framework="HYPERLEDGER_FABRIC",
FrameworkVersion="1.2",
FrameworkConfiguration=helpers.default_frameworkconfiguration,
VotingPolicy=helpers.default_votingpolicy,
MemberConfiguration=helpers.default_memberconfiguration,
)
network_id = response["NetworkId"]
member_id = response["MemberId"]
with pytest.raises(ClientError) as ex:
conn.update_node(
NetworkId=network_id,
MemberId=member_id,
NodeId="nd-ABCDEFGHIJKLMNOP0123456789",
LogPublishingConfiguration=helpers.default_nodeconfiguration[
"LogPublishingConfiguration"
],
)
err = ex.value.response["Error"]
err["Code"].should.equal("ResourceNotFoundException")
err["Message"].should.contain("Node nd-ABCDEFGHIJKLMNOP0123456789 not found")
| 35.23049
| 86
| 0.69797
| 1,917
| 19,412
| 6.90506
| 0.083464
| 0.052882
| 0.040795
| 0.054997
| 0.878296
| 0.876936
| 0.858956
| 0.848531
| 0.841278
| 0.832968
| 0
| 0.030542
| 0.197146
| 19,412
| 550
| 87
| 35.294545
| 0.8188
| 0.031733
| 0
| 0.753986
| 0
| 0
| 0.207641
| 0.086637
| 0
| 0
| 0
| 0
| 0
| 1
| 0.038724
| false
| 0
| 0.015945
| 0
| 0.05467
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0c5b11eb1d09d9cb3dfddbdeede503e0411a5198
| 136
|
py
|
Python
|
slot_attention/__init__.py
|
ajabri/slot-attention
|
32acb6614f1bd511f2dc3c263f852ed2dbe9c213
|
[
"MIT"
] | 247
|
2020-06-29T19:08:50.000Z
|
2022-03-30T08:36:24.000Z
|
slot_attention/__init__.py
|
ajabri/slot-attention
|
32acb6614f1bd511f2dc3c263f852ed2dbe9c213
|
[
"MIT"
] | 7
|
2020-07-01T01:32:49.000Z
|
2021-02-01T20:13:49.000Z
|
slot_attention/__init__.py
|
ajabri/slot-attention
|
32acb6614f1bd511f2dc3c263f852ed2dbe9c213
|
[
"MIT"
] | 26
|
2020-07-01T00:55:45.000Z
|
2022-03-25T12:05:24.000Z
|
from slot_attention.slot_attention import SlotAttention
from slot_attention.slot_attention_experimental import SlotAttentionExperimental
| 68
| 80
| 0.933824
| 15
| 136
| 8.133333
| 0.466667
| 0.42623
| 0.278689
| 0.344262
| 0.491803
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.051471
| 136
| 2
| 80
| 68
| 0.945736
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
a7828daa74d1c8d8c37d01305c8089247a3d54d4
| 13,717
|
py
|
Python
|
Visualizer/Source/Visualizer/ControlledDataSet.py
|
NB4444/BachelorProjectEnergyManager
|
d1fd93dcc83af6d6acd36b7efda364ac2aab90eb
|
[
"MIT"
] | null | null | null |
Visualizer/Source/Visualizer/ControlledDataSet.py
|
NB4444/BachelorProjectEnergyManager
|
d1fd93dcc83af6d6acd36b7efda364ac2aab90eb
|
[
"MIT"
] | null | null | null |
Visualizer/Source/Visualizer/ControlledDataSet.py
|
NB4444/BachelorProjectEnergyManager
|
d1fd93dcc83af6d6acd36b7efda364ac2aab90eb
|
[
"MIT"
] | null | null | null |
import collections
from enum import Enum
from matplotlib import pyplot
from typing import Any, OrderedDict
from Visualizer.DataSet import DataSet
from Visualizer.Plotting.Plot import Plot
from Visualizer.Plotting.ScatterPlot import ScatterPlot
class ControlComparison(Enum):
MEAN = 0
MEDIAN = 1
OPTIMAL = 2
class ControlledDataSet(object):
def __init__(self, data_set: DataSet, control_data_set: DataSet):
self.data_set = data_set
self.control_data_set = control_data_set
def energy_savings_vs_runtime_increase(self, control_comparison=ControlComparison.MEAN, normalized=True,
use_ear=False):
control_energy_consumption = None
control_runtime = None
if control_comparison == ControlComparison.MEAN:
control_energy_consumption = self.control_data_set.mean_energy_consumption(use_ear)
control_runtime = self.control_data_set.mean_runtime
elif control_comparison == ControlComparison.MEDIAN:
control_energy_consumption = self.control_data_set.median_energy_consumption(use_ear)
control_runtime = self.control_data_set.median_runtime
elif control_comparison == ControlComparison.OPTIMAL:
control_energy_consumption = self.control_data_set.minimum_energy_consumption_profiler_session.total_energy_consumption(
use_ear)
control_runtime = self.control_data_set.minimum_runtime_profiler_session.total_runtime
data: OrderedDict[str, OrderedDict[Any, Any]] = collections.OrderedDict({})
for profiler_session in self.data_set.data:
energy_savings = control_energy_consumption - profiler_session.total_energy_consumption(use_ear)
runtime_increase = profiler_session.total_runtime - control_runtime
profile = "Runs"
if profile not in data:
data[profile] = collections.OrderedDict()
if normalized:
data[profile][
runtime_increase / control_runtime * 100] = energy_savings / control_energy_consumption * 100
else:
data[profile][Plot.ns_to_s(runtime_increase)] = energy_savings
return data
def energy_savings_vs_runtime_increase_plot(self, control_comparison=ControlComparison.MEAN, normalized=True,
use_ear=False):
plot_series = self.energy_savings_vs_runtime_increase(control_comparison, normalized)
values = []
for profiler_session in self.data_set.data:
if "maximumCPUClockRate" in profiler_session.profile:
values.append(
profiler_session.profile["maximumCPUClockRate"] + profiler_session.profile["maximumGPUClockRate"])
max_value = max(values, default=None)
min_value = min(values, default=None)
return ScatterPlot(
title="Energy Savings vs. Runtime Increase", plot_series=plot_series,
x_label="Runtime Increase (" + ("% of optimal" if normalized else "Seconds") + ")",
y_label="Energy Savings (" + ("% of optimal" if normalized else "Joules") + ")",
colors=[pyplot.get_cmap("gist_rainbow")((value - min_value) / (max_value - min_value)) for value in
values] if len(values) > 0 else None,
labels=[profiler_session.plot_label(use_ear) for profiler_session in self.data_set.data]
)
def energy_savings_vs_flops_decrease(self, control_comparison=ControlComparison.MEAN, normalized=True,
use_ear=False):
control_energy_consumption = None
control_flops = None
if control_comparison == ControlComparison.MEAN:
control_energy_consumption = self.control_data_set.mean_energy_consumption(use_ear)
control_flops = self.control_data_set.mean_flops
elif control_comparison == ControlComparison.MEDIAN:
control_energy_consumption = self.control_data_set.median_energy_consumption(use_ear)
control_flops = self.control_data_set.median_flops
elif control_comparison == ControlComparison.OPTIMAL:
control_energy_consumption = self.control_data_set.minimum_energy_consumption_profiler_session.total_energy_consumption(
use_ear)
control_flops = self.control_data_set.maximum_flops_profiler_session.total_flops
data: OrderedDict[str, OrderedDict[Any, Any]] = collections.OrderedDict({})
for profiler_session in self.data_set.data:
energy_savings = control_energy_consumption - profiler_session.total_energy_consumption(use_ear)
flops_decrease = control_flops - profiler_session.total_flops
profile = "Runs"
if profile not in data:
data[profile] = collections.OrderedDict()
if normalized:
data[profile][flops_decrease / control_flops * 100] = energy_savings / control_energy_consumption * 100
else:
data[profile][flops_decrease] = energy_savings
return data
def energy_savings_vs_flops_decrease_plot(self, control_comparison=ControlComparison.MEAN, normalized=True,
use_ear=False):
plot_series = self.energy_savings_vs_flops_decrease(control_comparison, normalized)
values = []
for profiler_session in self.data_set.data:
if "maximumCPUClockRate" in profiler_session.profile:
values.append(
profiler_session.profile["maximumCPUClockRate"] + profiler_session.profile["maximumGPUClockRate"])
max_value = max(values, default=None)
min_value = min(values, default=None)
return ScatterPlot(
title="Energy Savings vs. FLOPs Decrease", plot_series=plot_series,
x_label="FLOPs Decrease (" + ("% of optimal" if normalized else "Operations") + ")",
y_label="Energy Savings (" + ("% of optimal" if normalized else "Joules") + ")",
colors=[pyplot.get_cmap("gist_rainbow")((value - min_value) / (max_value - min_value)) for value in
values] if len(values) > 0 else None,
labels=[profiler_session.plot_label(use_ear) for profiler_session in self.data_set.data]
)
def core_clock_rate_vs_gpu_clock_rate_vs_energy_savings(self, control_comparison=ControlComparison.MEAN,
use_ear=False):
control_energy_consumption = None
if control_comparison == ControlComparison.MEAN:
control_energy_consumption = self.control_data_set.mean_energy_consumption(use_ear)
elif control_comparison == ControlComparison.MEDIAN:
control_energy_consumption = self.control_data_set.median_energy_consumption(use_ear)
elif control_comparison == ControlComparison.OPTIMAL:
control_energy_consumption = self.control_data_set.minimum_energy_consumption_profiler_session.total_energy_consumption(
use_ear)
data: OrderedDict[
str, OrderedDict[int, OrderedDict[int, int]]] = collections.OrderedDict({})
for profiler_session in self.data_set.data:
savings = control_energy_consumption - profiler_session.total_energy_consumption(use_ear)
profile = "Saves Energy" if savings >= 0 else "Costs Energy"
if profile not in data:
data[profile] = collections.OrderedDict()
core_clock_rate = profiler_session.profile["maximumCPUClockRate"]
if core_clock_rate not in data[profile]:
data[profile][core_clock_rate] = collections.OrderedDict()
gpu_clock_rate = profiler_session.profile["maximumGPUClockRate"]
data[profile][core_clock_rate][gpu_clock_rate] = savings
return data
def core_clock_rate_vs_gpu_clock_rate_vs_energy_savings_scatter_plot(self,
control_comparison=ControlComparison.MEAN,
use_ear=False):
return ScatterPlot(
title="Core Frequency vs. GPU Frequency vs. Energy Savings",
plot_series=self.core_clock_rate_vs_gpu_clock_rate_vs_energy_savings(control_comparison),
x_label="Core Clock Rate (Hertz)",
y_label="GPU Clock Rate (Hertz)",
z_label="Energy Savings (Joules)",
labels=[profiler_session.plot_label(use_ear) for profiler_session in self.data_set.data]
)
def core_clock_rate_vs_gpu_clock_rate_vs_runtime_increase(self, control_comparison=ControlComparison.MEAN):
control_runtime = None
if control_comparison == ControlComparison.MEAN:
control_runtime = self.control_data_set.mean_runtime
elif control_comparison == ControlComparison.MEDIAN:
control_runtime = self.control_data_set.median_runtime
elif control_comparison == ControlComparison.OPTIMAL:
control_runtime = self.control_data_set.minimum_runtime_profiler_session.total_runtime
data: OrderedDict[
str, OrderedDict[int, OrderedDict[int, int]]] = collections.OrderedDict({})
for profiler_session in self.data_set.data:
increase = Plot.ns_to_s(profiler_session.total_runtime - control_runtime)
profile = "Saves Time" if increase <= 0 else "Costs Time"
if profile not in data:
data[profile] = collections.OrderedDict()
core_clock_rate = profiler_session.profile["maximumCPUClockRate"]
if core_clock_rate not in data[profile]:
data[profile][core_clock_rate] = collections.OrderedDict()
gpu_clock_rate = profiler_session.profile["maximumGPUClockRate"]
data[profile][core_clock_rate][gpu_clock_rate] = increase
return data
def core_clock_rate_vs_gpu_clock_rate_vs_runtime_increase_scatter_plot(self,
control_comparison=ControlComparison.MEAN,
use_ear=False):
return ScatterPlot(
title="Core Frequency vs. GPU Frequency vs. Runtime Increase",
plot_series=self.core_clock_rate_vs_gpu_clock_rate_vs_runtime_increase(control_comparison),
x_label="Core Clock Rate (Hertz)",
y_label="GPU Clock Rate (Hertz)",
z_label="Runtime Increase (Seconds)",
labels=[profiler_session.plot_label(use_ear) for profiler_session in self.data_set.data]
)
def core_clock_rate_vs_gpu_clock_rate_vs_energy_harvests(self, control_comparison=ControlComparison.MEAN,
use_ear=False):
control_energy_consumption = None
if control_comparison == ControlComparison.MEAN:
control_energy_consumption = self.control_data_set.mean_energy_consumption(use_ear)
elif control_comparison == ControlComparison.MEDIAN:
control_energy_consumption = self.control_data_set.median_energy_consumption(use_ear)
elif control_comparison == ControlComparison.OPTIMAL:
control_energy_consumption = self.control_data_set.minimum_energy_consumption_profiler_session.total_energy_consumption(
use_ear)
control_runtime = None
if control_comparison == ControlComparison.MEAN:
control_runtime = self.control_data_set.mean_runtime
elif control_comparison == ControlComparison.MEDIAN:
control_runtime = self.control_data_set.median_runtime
elif control_comparison == ControlComparison.OPTIMAL:
control_runtime = self.control_data_set.minimum_runtime_profiler_session.total_runtime
data: OrderedDict[
str, OrderedDict[int, OrderedDict[int, int]]] = collections.OrderedDict({})
for profiler_session in self.data_set.data:
energy_savings = control_energy_consumption - profiler_session.total_energy_consumption(use_ear)
runtime_increase = Plot.ns_to_s(profiler_session.total_runtime - control_runtime)
profile = "Harvests Energy" if energy_savings >= 0 and runtime_increase <= 0 else "Costs Energy"
if profile not in data:
data[profile] = collections.OrderedDict()
core_clock_rate = profiler_session.profile["maximumCPUClockRate"]
if core_clock_rate not in data[profile]:
data[profile][core_clock_rate] = collections.OrderedDict()
gpu_clock_rate = profiler_session.profile["maximumGPUClockRate"]
data[profile][core_clock_rate][gpu_clock_rate] = energy_savings
return data
def core_clock_rate_vs_gpu_clock_rate_vs_energy_harvests_scatter_plot(self,
control_comparison=ControlComparison.MEAN,
use_ear=False):
return ScatterPlot(
title="Core Frequency vs. GPU Frequency vs. Energy Harvests",
plot_series=self.core_clock_rate_vs_gpu_clock_rate_vs_energy_harvests(control_comparison),
x_label="Core Clock Rate (Hertz)",
y_label="GPU Clock Rate (Hertz)",
z_label="Energy Savings (Joules)",
labels=[profiler_session.plot_label(use_ear) for profiler_session in self.data_set.data]
)
| 53.582031
| 132
| 0.666764
| 1,479
| 13,717
| 5.82691
| 0.068966
| 0.078324
| 0.110466
| 0.052216
| 0.918543
| 0.897772
| 0.884892
| 0.875377
| 0.859016
| 0.858204
| 0
| 0.00208
| 0.263906
| 13,717
| 255
| 133
| 53.792157
| 0.851441
| 0
| 0
| 0.70283
| 0
| 0
| 0.066268
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.051887
| false
| 0
| 0.033019
| 0.014151
| 0.15566
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
a78e2c5f07cdbd4544044f31a7531a598c9e41e9
| 2,673
|
py
|
Python
|
runner/runs/doom_bots_sweep.py
|
neevparikh/hierarchical-doom
|
082f794b9c6101c4e94f15bf4f93c718ee219ea5
|
[
"MIT"
] | 1
|
2021-11-19T19:39:36.000Z
|
2021-11-19T19:39:36.000Z
|
runner/runs/doom_bots_sweep.py
|
neevparikh/hierarchical-doom
|
082f794b9c6101c4e94f15bf4f93c718ee219ea5
|
[
"MIT"
] | null | null | null |
runner/runs/doom_bots_sweep.py
|
neevparikh/hierarchical-doom
|
082f794b9c6101c4e94f15bf4f93c718ee219ea5
|
[
"MIT"
] | null | null | null |
from runner.run_description import RunDescription, Experiment, ParamGrid
_params = ParamGrid([
('seed', [42]),
])
_experiments = [
Experiment(
'bots_128_fs2_wide',
'python -m algorithms.appo.train_appo --env=doom_dwango5_bots_experimental --train_for_seconds=3600000 --algo=APPO --use_rnn=True --gamma=0.995 --env_frameskip=2 --rollout=32 --reward_scale=0.5 --num_workers=18 --num_envs_per_worker=20 --num_policies=1 --ppo_epochs=1 --rollout=32 --recurrence=32 --macro_batch=2048 --batch_size=2048 --res_w=128 --res_h=72 --wide_aspect_ratio=True',
_params.generate_params(randomize=False),
dict(DOOM_DEFAULT_UDP_PORT=35300),
),
Experiment(
'bots_128_fs2_narrow',
'python -m algorithms.appo.train_appo --env=doom_dwango5_bots_experimental --train_for_seconds=3600000 --algo=APPO --use_rnn=True --gamma=0.995 --env_frameskip=2 --rollout=32 --reward_scale=0.5 --num_workers=18 --num_envs_per_worker=20 --num_policies=1 --ppo_epochs=1 --rollout=32 --recurrence=32 --macro_batch=2048 --batch_size=2048 --res_w=128 --res_h=72 --wide_aspect_ratio=False',
_params.generate_params(randomize=False),
dict(DOOM_DEFAULT_UDP_PORT=40300),
),
Experiment(
'bots_128_fs2_wide_adam0.5',
'python -m algorithms.appo.train_appo --env=doom_dwango5_bots_experimental --train_for_seconds=3600000 --algo=APPO --use_rnn=True --gamma=0.995 --env_frameskip=2 --rollout=32 --reward_scale=0.5 --num_workers=18 --num_envs_per_worker=20 --num_policies=1 --ppo_epochs=1 --rollout=32 --recurrence=32 --macro_batch=2048 --batch_size=2048 --res_w=128 --res_h=72 --wide_aspect_ratio=True --adam_beta1=0.5',
_params.generate_params(randomize=False),
dict(DOOM_DEFAULT_UDP_PORT=45300),
),
Experiment(
'bots_128_fs2_narrow_adam0.5',
'python -m algorithms.appo.train_appo --env=doom_dwango5_bots_experimental --train_for_seconds=3600000 --algo=APPO --use_rnn=True --gamma=0.995 --env_frameskip=2 --rollout=32 --reward_scale=0.5 --num_workers=18 --num_envs_per_worker=20 --num_policies=1 --ppo_epochs=1 --rollout=32 --recurrence=32 --macro_batch=2048 --batch_size=2048 --res_w=128 --res_h=72 --wide_aspect_ratio=False --adam_beta1=0.5',
_params.generate_params(randomize=False),
dict(DOOM_DEFAULT_UDP_PORT=50300),
),
]
RUN_DESCRIPTION = RunDescription('doom_bots_v60_sweep',
experiments=_experiments,
pause_between_experiments=120,
use_gpus=4,
experiments_per_gpu=1,
max_parallel=4)
| 66.825
| 409
| 0.689487
| 376
| 2,673
| 4.545213
| 0.236702
| 0.04213
| 0.039789
| 0.046811
| 0.856641
| 0.798128
| 0.798128
| 0.798128
| 0.798128
| 0.798128
| 0
| 0.09787
| 0.174336
| 2,673
| 39
| 410
| 68.538462
| 0.676484
| 0
| 0
| 0.333333
| 0
| 0.111111
| 0.623644
| 0.225215
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.027778
| 0
| 0.027778
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
3bef4a453b2029d62313a888cfe679efe08c8aae
| 129
|
py
|
Python
|
examples/dwf/dwf.py
|
useblocks/dwf
|
debfb79cecfa57310627c78c4e5c68e21f7c7b6f
|
[
"CC-BY-4.0"
] | 6
|
2017-11-24T08:47:06.000Z
|
2021-06-25T12:02:06.000Z
|
examples/dwf/dwf.py
|
useblocks/dwf
|
debfb79cecfa57310627c78c4e5c68e21f7c7b6f
|
[
"CC-BY-4.0"
] | null | null | null |
examples/dwf/dwf.py
|
useblocks/dwf
|
debfb79cecfa57310627c78c4e5c68e21f7c7b6f
|
[
"CC-BY-4.0"
] | null | null | null |
class Tool: pass
class Documentation(Tool): pass
class Frustration(Tool): pass
class Dwf(Documentation, Frustration, Tool): pass
| 25.8
| 49
| 0.79845
| 17
| 129
| 6.058824
| 0.352941
| 0.31068
| 0.378641
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108527
| 129
| 4
| 50
| 32.25
| 0.895652
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 1
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
|
0
| 7
|
ce09f4f51a51d6982cdc96bf07737b96c2700aef
| 4,858
|
py
|
Python
|
exkaldi/function_config.py
|
luvwinnie/exkaldi
|
c1149e3c88dfc66084e8a534fd8f4d4d92556d35
|
[
"Apache-2.0"
] | 1
|
2021-04-02T03:02:14.000Z
|
2021-04-02T03:02:14.000Z
|
exkaldi/function_config.py
|
luvwinnie/exkaldi
|
c1149e3c88dfc66084e8a534fd8f4d4d92556d35
|
[
"Apache-2.0"
] | null | null | null |
exkaldi/function_config.py
|
luvwinnie/exkaldi
|
c1149e3c88dfc66084e8a534fd8f4d4d92556d35
|
[
"Apache-2.0"
] | null | null | null |
def configure(name):
if name == 'compute_mfcc':
return {"--allow-downsample":["false",str],
"--allow-upsample":["false",str],
"--blackman-coeff":[0.42,float],
"--cepstral-lifter":[22,int],
"--channel":[-1,int],
"--debug-mel":["false",str],
"--dither":[1,int],
"--energy-floor":[0,int],
"--frame-length":[25,int],
"--frame-shift":[10,int],
"--high-freq":[0,int],
"--htk-compat":["false",str],
"--low-freq":[20,int],
"--max-feature-vectors":[-1,int],
"--min-duration":[0,int],
"--num-ceps":[13,int],
"--num-mel-bins":[23,int],
"--output-format":["kaldi",str],
"--preemphasis-coefficient":[0.97,float],
"--raw-energy":["true",str],
"--remove-dc-offset":["true",str],
"--round-to-power-of-two":["true",str],
"--sample-frequency":[16000,int],
"--snip-edges":["false",str],
"--subtract-mean":["false",str],
"--use-energy":["true",str],
"--utt2spk":["",str],
"--vtln-high":[-500,int],
"--vtln-low":[100,int],
"--vtln-map":["",str],
"--vtln-warp":[1,int],
"--window-type":["povey",str],
"--write-utt2dur":["",str]
}
elif name == 'compute_fbank':
return {"--allow-downsample":["false",str],
"--allow-upsample":["false",str],
"--blackman-coeff":[0.42,float],
"--channel":[-1,int],
"--debug-mel":["false",str],
"--dither":[1,int],
"--energy-floor":[0,int],
"--frame-length":[25,int],
"--frame-shift":[10,int],
"--high-freq":[0,int],
"--htk-compat":["false",str],
"--low-freq":[20,int],
"--max-feature-vectors":[-1,int],
"--min-duration":[0,int],
"--num-mel-bins":[23,int],
"--output-format":["kaldi",str],
"--preemphasis-coefficient":[0.97,float],
"--raw-energy":["true",str],
"--remove-dc-offset":["true",str],
"--round-to-power-of-two":["true",str],
"--sample-frequency":[16000,int],
"--snip-edges":["false",str],
"--subtract-mean":["false",str],
"--use-energy":["true",str],
"--use-log-fbank":["true",str],
"--use-power":["true",str],
"--utt2spk":["",str],
"--vtln-high":[-500,int],
"--vtln-low":[100,int],
"--vtln-map":["",str],
"--vtln-warp":[1,int],
"--window-type":["povey",str],
"--write-utt2dur":["",str]
}
elif name == 'compute_plp':
return {"--allow-downsample":["false",str],
"--allow-upsample":["false",str],
"--blackman-coeff":[0.42,float],
"--cepstral-lifter":[22,int],
"--cepstral-scale":[1,int],
"--channel":[-1,int],
"--compress-factor":[0.33333,float],
"--debug-mel":['false',float],
"--dither":[1,int],
"--energy-floor":[0,int],
"--frame-length":[25,int],
"--frame-shift":[10,int],
"--high-freq":[0,int],
"--htk-compat":["false",str],
"--low-freq":[20,int],
"--lpc-order":[12,int],
"--max-feature-vectors":[-1,int],
"--min-duration":[0,int],
"--num-ceps":[13,int],
"--num-mel-bins":[23,int],
"--output-format":["kaldi",str],
"--preemphasis-coefficient":[0.97,float],
"--raw-energy":["true",str],
"--remove-dc-offset":["true",str],
"--round-to-power-of-two":["true",str],
"--sample-frequency":[16000,int],
"--snip-edges":["false",str],
"--subtract-mean":["false",str],
"--use-energy":["true",str],
"--utt2spk":["",str],
"--vtln-high":[-500,int],
"--vtln-low":[100,int],
"--vtln-map":["",str],
"--vtln-warp":[1,int],
"--window-type":["povey",str],
"--write-utt2dur":["",str]
}
elif name == 'compute_spectrogram':
return {"--allow-downsample":["false",str],
"--allow-upsample":["false",str],
"--blackman-coeff":[0.42,float],
"--channel":[-1,int],
"--dither":[1,int],
"--energy-floor":[0,int],
"--frame-length":[25,int],
"--frame-shift":[10,int],
"--max-feature-vectors":[-1,int],
"--min-duration":[0,int],
"--output-format":["kaldi",str],
"--preemphasis-coefficient":[0.97,float],
"--raw-energy":["true",str],
"--remove-dc-offset":["true",str],
"--round-to-power-of-two":["true",str],
"--sample-frequency":[16000,int],
"--snip-edges":["false",str],
"--subtract-mean":["false",str],
"--window-type":["povey",str],
"--write-utt2dur":["",str]
}
elif name == 'decode_lattice':
return {"--acoustic-scale":[0.1,float],
"--allow-partial":["false",str],
"--beam":[13,int],
"--beam-delta":[0.5,float],
"--delta":[0.000976562,float],
"--determinize-lattice":["true",str],
"--hash-ratio":[2,int],
"--lattice-beam":[8,int],
"--max-active":[7000,int],
"--max-mem":[50000000,int],
"--min-active":[200,int],
"--minimize":["false",str],
"--phone-determinize":["true",str],
"--prune-interval":[25,int],
"--word-determinize":["true",str],
"--word-symbol-table":["",str]
}
else:
return None
| 31.751634
| 45
| 0.520996
| 595
| 4,858
| 4.245378
| 0.206723
| 0.072842
| 0.036025
| 0.041172
| 0.811956
| 0.811956
| 0.811956
| 0.811956
| 0.811956
| 0.795724
| 0
| 0.042631
| 0.155002
| 4,858
| 152
| 46
| 31.960526
| 0.572716
| 0
| 0
| 0.768212
| 0
| 0
| 0.463249
| 0.061149
| 0
| 0
| 0
| 0
| 0
| 1
| 0.006623
| false
| 0
| 0
| 0
| 0.046358
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ce33095d639f433a869a59cf8b5064b41dae7b53
| 159
|
py
|
Python
|
tests/test_p_7_satz_von_pick.py
|
techrabbit58/uebung_informatik_vorkurs
|
e99312ae66ccccd6bfe45bfd3c3f43c01690659c
|
[
"Unlicense"
] | null | null | null |
tests/test_p_7_satz_von_pick.py
|
techrabbit58/uebung_informatik_vorkurs
|
e99312ae66ccccd6bfe45bfd3c3f43c01690659c
|
[
"Unlicense"
] | null | null | null |
tests/test_p_7_satz_von_pick.py
|
techrabbit58/uebung_informatik_vorkurs
|
e99312ae66ccccd6bfe45bfd3c3f43c01690659c
|
[
"Unlicense"
] | null | null | null |
"""
Teste die 'pick()' funktion.
"""
from tag_2.p_7_satz_von_pick import pick
def test_satz_von_pick():
assert pick(innenpunkte=37, randpunkte=42) == 57
| 17.666667
| 52
| 0.716981
| 26
| 159
| 4.076923
| 0.769231
| 0.132075
| 0.207547
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.058824
| 0.144654
| 159
| 8
| 53
| 19.875
| 0.720588
| 0.176101
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
02303ee28bd0812092741fb2878896f54f4e827c
| 35,065
|
py
|
Python
|
stubs/wafregional.py
|
claytonbrown/troposphere
|
bf0f1e48b14f578de0221d50f711467ad716ca87
|
[
"BSD-2-Clause"
] | null | null | null |
stubs/wafregional.py
|
claytonbrown/troposphere
|
bf0f1e48b14f578de0221d50f711467ad716ca87
|
[
"BSD-2-Clause"
] | null | null | null |
stubs/wafregional.py
|
claytonbrown/troposphere
|
bf0f1e48b14f578de0221d50f711467ad716ca87
|
[
"BSD-2-Clause"
] | null | null | null |
from . import AWSObject, AWSProperty
from .validators import *
from .constants import *
# -------------------------------------------
class WAFRegionalByteMatchTuple(AWSProperty):
"""# ByteMatchTuple - CloudFormationResourceSpecification version: 1.4.0
{
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafregional-bytematchset-bytematchtuple.html",
"Properties": {
"FieldToMatch": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafregional-bytematchset-bytematchtuple.html#cfn-wafregional-bytematchset-bytematchtuple-fieldtomatch",
"Required": true,
"Type": "FieldToMatch",
"UpdateType": "Mutable"
},
"PositionalConstraint": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafregional-bytematchset-bytematchtuple.html#cfn-wafregional-bytematchset-bytematchtuple-positionalconstraint",
"PrimitiveType": "String",
"Required": true,
"UpdateType": "Mutable"
},
"TargetString": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafregional-bytematchset-bytematchtuple.html#cfn-wafregional-bytematchset-bytematchtuple-targetstring",
"PrimitiveType": "String",
"Required": false,
"UpdateType": "Mutable"
},
"TargetStringBase64": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafregional-bytematchset-bytematchtuple.html#cfn-wafregional-bytematchset-bytematchtuple-targetstringbase64",
"PrimitiveType": "String",
"Required": false,
"UpdateType": "Mutable"
},
"TextTransformation": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafregional-bytematchset-bytematchtuple.html#cfn-wafregional-bytematchset-bytematchtuple-texttransformation",
"PrimitiveType": "String",
"Required": true,
"UpdateType": "Mutable"
}
}
}
"""
props = {
'TargetString': (basestring, False, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafregional-bytematchset-bytematchtuple.html#cfn-wafregional-bytematchset-bytematchtuple-targetstring'),
'TargetStringBase64': (basestring, False, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafregional-bytematchset-bytematchtuple.html#cfn-wafregional-bytematchset-bytematchtuple-targetstringbase64'),
'PositionalConstraint': (basestring, True, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafregional-bytematchset-bytematchtuple.html#cfn-wafregional-bytematchset-bytematchtuple-positionalconstraint'),
'TextTransformation': (basestring, True, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafregional-bytematchset-bytematchtuple.html#cfn-wafregional-bytematchset-bytematchtuple-texttransformation'),
'FieldToMatch': (FieldToMatch, True, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafregional-bytematchset-bytematchtuple.html#cfn-wafregional-bytematchset-bytematchtuple-fieldtomatch')
}
# -------------------------------------------
class WAFRegionalFieldToMatch(AWSProperty):
"""# FieldToMatch - CloudFormationResourceSpecification version: 1.4.0
{
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafregional-bytematchset-fieldtomatch.html",
"Properties": {
"Data": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafregional-bytematchset-fieldtomatch.html#cfn-wafregional-bytematchset-fieldtomatch-data",
"PrimitiveType": "String",
"Required": false,
"UpdateType": "Mutable"
},
"Type": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafregional-bytematchset-fieldtomatch.html#cfn-wafregional-bytematchset-fieldtomatch-type",
"PrimitiveType": "String",
"Required": true,
"UpdateType": "Mutable"
}
}
}
"""
props = {
'Type': (basestring, True, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafregional-bytematchset-fieldtomatch.html#cfn-wafregional-bytematchset-fieldtomatch-type'),
'Data': (basestring, False, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafregional-bytematchset-fieldtomatch.html#cfn-wafregional-bytematchset-fieldtomatch-data')
}
# -------------------------------------------
class WAFRegionalXssMatchTuple(AWSProperty):
"""# XssMatchTuple - CloudFormationResourceSpecification version: 1.4.0
{
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafregional-xssmatchset-xssmatchtuple.html",
"Properties": {
"FieldToMatch": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafregional-xssmatchset-xssmatchtuple.html#cfn-wafregional-xssmatchset-xssmatchtuple-fieldtomatch",
"Required": true,
"Type": "FieldToMatch",
"UpdateType": "Mutable"
},
"TextTransformation": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafregional-xssmatchset-xssmatchtuple.html#cfn-wafregional-xssmatchset-xssmatchtuple-texttransformation",
"PrimitiveType": "String",
"Required": true,
"UpdateType": "Mutable"
}
}
}
"""
props = {
'TextTransformation': (basestring, True, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafregional-xssmatchset-xssmatchtuple.html#cfn-wafregional-xssmatchset-xssmatchtuple-texttransformation'),
'FieldToMatch': (FieldToMatch, True, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafregional-xssmatchset-xssmatchtuple.html#cfn-wafregional-xssmatchset-xssmatchtuple-fieldtomatch')
}
# -------------------------------------------
class WAFRegionalRule(AWSProperty):
"""# Rule - CloudFormationResourceSpecification version: 1.4.0
{
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafregional-webacl-rule.html",
"Properties": {
"Action": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafregional-webacl-rule.html#cfn-wafregional-webacl-rule-action",
"Required": true,
"Type": "Action",
"UpdateType": "Mutable"
},
"Priority": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafregional-webacl-rule.html#cfn-wafregional-webacl-rule-priority",
"PrimitiveType": "Integer",
"Required": true,
"UpdateType": "Mutable"
},
"RuleId": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafregional-webacl-rule.html#cfn-wafregional-webacl-rule-ruleid",
"PrimitiveType": "String",
"Required": true,
"UpdateType": "Mutable"
}
}
}
"""
props = {
'Action': (Action, True, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafregional-webacl-rule.html#cfn-wafregional-webacl-rule-action'),
'Priority': (positive_integer, True, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafregional-webacl-rule.html#cfn-wafregional-webacl-rule-priority'),
'RuleId': (basestring, True, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafregional-webacl-rule.html#cfn-wafregional-webacl-rule-ruleid')
}
# -------------------------------------------
class WAFRegionalIPSetDescriptor(AWSProperty):
"""# IPSetDescriptor - CloudFormationResourceSpecification version: 1.4.0
{
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafregional-ipset-ipsetdescriptor.html",
"Properties": {
"Type": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafregional-ipset-ipsetdescriptor.html#cfn-wafregional-ipset-ipsetdescriptor-type",
"PrimitiveType": "String",
"Required": true,
"UpdateType": "Mutable"
},
"Value": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafregional-ipset-ipsetdescriptor.html#cfn-wafregional-ipset-ipsetdescriptor-value",
"PrimitiveType": "String",
"Required": true,
"UpdateType": "Mutable"
}
}
}
"""
props = {
'Type': (basestring, True, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafregional-ipset-ipsetdescriptor.html#cfn-wafregional-ipset-ipsetdescriptor-type'),
'Value': (basestring, True, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafregional-ipset-ipsetdescriptor.html#cfn-wafregional-ipset-ipsetdescriptor-value')
}
# -------------------------------------------
class WAFRegionalFieldToMatch(AWSProperty):
"""# FieldToMatch - CloudFormationResourceSpecification version: 1.4.0
{
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafregional-xssmatchset-fieldtomatch.html",
"Properties": {
"Data": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafregional-xssmatchset-fieldtomatch.html#cfn-wafregional-xssmatchset-fieldtomatch-data",
"PrimitiveType": "String",
"Required": false,
"UpdateType": "Mutable"
},
"Type": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafregional-xssmatchset-fieldtomatch.html#cfn-wafregional-xssmatchset-fieldtomatch-type",
"PrimitiveType": "String",
"Required": true,
"UpdateType": "Mutable"
}
}
}
"""
props = {
'Type': (basestring, True, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafregional-xssmatchset-fieldtomatch.html#cfn-wafregional-xssmatchset-fieldtomatch-type'),
'Data': (basestring, False, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafregional-xssmatchset-fieldtomatch.html#cfn-wafregional-xssmatchset-fieldtomatch-data')
}
# -------------------------------------------
class WAFRegionalSizeConstraint(AWSProperty):
"""# SizeConstraint - CloudFormationResourceSpecification version: 1.4.0
{
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafregional-sizeconstraintset-sizeconstraint.html",
"Properties": {
"ComparisonOperator": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafregional-sizeconstraintset-sizeconstraint.html#cfn-wafregional-sizeconstraintset-sizeconstraint-comparisonoperator",
"PrimitiveType": "String",
"Required": true,
"UpdateType": "Mutable"
},
"FieldToMatch": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafregional-sizeconstraintset-sizeconstraint.html#cfn-wafregional-sizeconstraintset-sizeconstraint-fieldtomatch",
"Required": true,
"Type": "FieldToMatch",
"UpdateType": "Mutable"
},
"Size": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafregional-sizeconstraintset-sizeconstraint.html#cfn-wafregional-sizeconstraintset-sizeconstraint-size",
"PrimitiveType": "Integer",
"Required": true,
"UpdateType": "Mutable"
},
"TextTransformation": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafregional-sizeconstraintset-sizeconstraint.html#cfn-wafregional-sizeconstraintset-sizeconstraint-texttransformation",
"PrimitiveType": "String",
"Required": true,
"UpdateType": "Mutable"
}
}
}
"""
props = {
'ComparisonOperator': (basestring, True, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafregional-sizeconstraintset-sizeconstraint.html#cfn-wafregional-sizeconstraintset-sizeconstraint-comparisonoperator'),
'Size': (positive_integer, True, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafregional-sizeconstraintset-sizeconstraint.html#cfn-wafregional-sizeconstraintset-sizeconstraint-size'),
'TextTransformation': (basestring, True, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafregional-sizeconstraintset-sizeconstraint.html#cfn-wafregional-sizeconstraintset-sizeconstraint-texttransformation'),
'FieldToMatch': (FieldToMatch, True, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafregional-sizeconstraintset-sizeconstraint.html#cfn-wafregional-sizeconstraintset-sizeconstraint-fieldtomatch')
}
# -------------------------------------------
class WAFRegionalAction(AWSProperty):
"""# Action - CloudFormationResourceSpecification version: 1.4.0
{
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafregional-webacl-action.html",
"Properties": {
"Type": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafregional-webacl-action.html#cfn-wafregional-webacl-action-type",
"PrimitiveType": "String",
"Required": true,
"UpdateType": "Mutable"
}
}
}
"""
props = {
'Type': (basestring, True, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafregional-webacl-action.html#cfn-wafregional-webacl-action-type')
}
# -------------------------------------------
class WAFRegionalFieldToMatch(AWSProperty):
"""# FieldToMatch - CloudFormationResourceSpecification version: 1.4.0
{
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafregional-sizeconstraintset-fieldtomatch.html",
"Properties": {
"Data": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafregional-sizeconstraintset-fieldtomatch.html#cfn-wafregional-sizeconstraintset-fieldtomatch-data",
"PrimitiveType": "String",
"Required": false,
"UpdateType": "Mutable"
},
"Type": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafregional-sizeconstraintset-fieldtomatch.html#cfn-wafregional-sizeconstraintset-fieldtomatch-type",
"PrimitiveType": "String",
"Required": true,
"UpdateType": "Mutable"
}
}
}
"""
props = {
'Type': (basestring, True, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafregional-sizeconstraintset-fieldtomatch.html#cfn-wafregional-sizeconstraintset-fieldtomatch-type'),
'Data': (basestring, False, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafregional-sizeconstraintset-fieldtomatch.html#cfn-wafregional-sizeconstraintset-fieldtomatch-data')
}
# -------------------------------------------
class WAFRegionalPredicate(AWSProperty):
"""# Predicate - CloudFormationResourceSpecification version: 1.4.0
{
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafregional-rule-predicate.html",
"Properties": {
"DataId": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafregional-rule-predicate.html#cfn-wafregional-rule-predicate-dataid",
"PrimitiveType": "String",
"Required": true,
"UpdateType": "Mutable"
},
"Negated": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafregional-rule-predicate.html#cfn-wafregional-rule-predicate-negated",
"PrimitiveType": "Boolean",
"Required": true,
"UpdateType": "Mutable"
},
"Type": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafregional-rule-predicate.html#cfn-wafregional-rule-predicate-type",
"PrimitiveType": "String",
"Required": true,
"UpdateType": "Mutable"
}
}
}
"""
props = {
'Type': (basestring, True, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafregional-rule-predicate.html#cfn-wafregional-rule-predicate-type'),
'DataId': (basestring, True, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafregional-rule-predicate.html#cfn-wafregional-rule-predicate-dataid'),
'Negated': (boolean, True, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafregional-rule-predicate.html#cfn-wafregional-rule-predicate-negated')
}
# -------------------------------------------
class WAFRegionalFieldToMatch(AWSProperty):
"""# FieldToMatch - CloudFormationResourceSpecification version: 1.4.0
{
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafregional-sqlinjectionmatchset-fieldtomatch.html",
"Properties": {
"Data": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafregional-sqlinjectionmatchset-fieldtomatch.html#cfn-wafregional-sqlinjectionmatchset-fieldtomatch-data",
"PrimitiveType": "String",
"Required": false,
"UpdateType": "Mutable"
},
"Type": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafregional-sqlinjectionmatchset-fieldtomatch.html#cfn-wafregional-sqlinjectionmatchset-fieldtomatch-type",
"PrimitiveType": "String",
"Required": true,
"UpdateType": "Mutable"
}
}
}
"""
props = {
'Type': (basestring, True, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafregional-sqlinjectionmatchset-fieldtomatch.html#cfn-wafregional-sqlinjectionmatchset-fieldtomatch-type'),
'Data': (basestring, False, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafregional-sqlinjectionmatchset-fieldtomatch.html#cfn-wafregional-sqlinjectionmatchset-fieldtomatch-data')
}
# -------------------------------------------
class WAFRegionalSqlInjectionMatchTuple(AWSProperty):
"""# SqlInjectionMatchTuple - CloudFormationResourceSpecification version: 1.4.0
{
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafregional-sqlinjectionmatchset-sqlinjectionmatchtuple.html",
"Properties": {
"FieldToMatch": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafregional-sqlinjectionmatchset-sqlinjectionmatchtuple.html#cfn-wafregional-sqlinjectionmatchset-sqlinjectionmatchtuple-fieldtomatch",
"Required": true,
"Type": "FieldToMatch",
"UpdateType": "Mutable"
},
"TextTransformation": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafregional-sqlinjectionmatchset-sqlinjectionmatchtuple.html#cfn-wafregional-sqlinjectionmatchset-sqlinjectionmatchtuple-texttransformation",
"PrimitiveType": "String",
"Required": true,
"UpdateType": "Mutable"
}
}
}
"""
props = {
'TextTransformation': (basestring, True, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafregional-sqlinjectionmatchset-sqlinjectionmatchtuple.html#cfn-wafregional-sqlinjectionmatchset-sqlinjectionmatchtuple-texttransformation'),
'FieldToMatch': (FieldToMatch, True, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-wafregional-sqlinjectionmatchset-sqlinjectionmatchtuple.html#cfn-wafregional-sqlinjectionmatchset-sqlinjectionmatchtuple-fieldtomatch')
}
# -------------------------------------------
class WAFRegionalSizeConstraintSet(AWSObject):
"""# AWS::WAFRegional::SizeConstraintSet - CloudFormationResourceSpecification version: 1.4.0
{
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-wafregional-sizeconstraintset.html",
"Properties": {
"Name": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-wafregional-sizeconstraintset.html#cfn-wafregional-sizeconstraintset-name",
"PrimitiveType": "String",
"Required": true,
"UpdateType": "Immutable"
},
"SizeConstraints": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-wafregional-sizeconstraintset.html#cfn-wafregional-sizeconstraintset-sizeconstraints",
"ItemType": "SizeConstraint",
"Required": false,
"Type": "List",
"UpdateType": "Mutable"
}
}
}
"""
resource_type = "AWS::WAFRegional::SizeConstraintSet"
props = {
'SizeConstraints': ([SizeConstraint], False, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-wafregional-sizeconstraintset.html#cfn-wafregional-sizeconstraintset-sizeconstraints'),
'Name': (basestring, True, 'Immutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-wafregional-sizeconstraintset.html#cfn-wafregional-sizeconstraintset-name')
}
# -------------------------------------------
class WAFRegionalSqlInjectionMatchSet(AWSObject):
"""# AWS::WAFRegional::SqlInjectionMatchSet - CloudFormationResourceSpecification version: 1.4.0
{
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-wafregional-sqlinjectionmatchset.html",
"Properties": {
"Name": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-wafregional-sqlinjectionmatchset.html#cfn-wafregional-sqlinjectionmatchset-name",
"PrimitiveType": "String",
"Required": true,
"UpdateType": "Immutable"
},
"SqlInjectionMatchTuples": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-wafregional-sqlinjectionmatchset.html#cfn-wafregional-sqlinjectionmatchset-sqlinjectionmatchtuples",
"ItemType": "SqlInjectionMatchTuple",
"Required": false,
"Type": "List",
"UpdateType": "Mutable"
}
}
}
"""
resource_type = "AWS::WAFRegional::SqlInjectionMatchSet"
props = {
'SqlInjectionMatchTuples': ([SqlInjectionMatchTuple], False, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-wafregional-sqlinjectionmatchset.html#cfn-wafregional-sqlinjectionmatchset-sqlinjectionmatchtuples'),
'Name': (basestring, True, 'Immutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-wafregional-sqlinjectionmatchset.html#cfn-wafregional-sqlinjectionmatchset-name')
}
# -------------------------------------------
class WAFRegionalXssMatchSet(AWSObject):
"""# AWS::WAFRegional::XssMatchSet - CloudFormationResourceSpecification version: 1.4.0
{
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-wafregional-xssmatchset.html",
"Properties": {
"Name": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-wafregional-xssmatchset.html#cfn-wafregional-xssmatchset-name",
"PrimitiveType": "String",
"Required": true,
"UpdateType": "Immutable"
},
"XssMatchTuples": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-wafregional-xssmatchset.html#cfn-wafregional-xssmatchset-xssmatchtuples",
"ItemType": "XssMatchTuple",
"Required": false,
"Type": "List",
"UpdateType": "Mutable"
}
}
}
"""
resource_type = "AWS::WAFRegional::XssMatchSet"
props = {
'XssMatchTuples': ([XssMatchTuple], False, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-wafregional-xssmatchset.html#cfn-wafregional-xssmatchset-xssmatchtuples'),
'Name': (basestring, True, 'Immutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-wafregional-xssmatchset.html#cfn-wafregional-xssmatchset-name')
}
# -------------------------------------------
class WAFRegionalByteMatchSet(AWSObject):
"""# AWS::WAFRegional::ByteMatchSet - CloudFormationResourceSpecification version: 1.4.0
{
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-wafregional-bytematchset.html",
"Properties": {
"ByteMatchTuples": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-wafregional-bytematchset.html#cfn-wafregional-bytematchset-bytematchtuples",
"ItemType": "ByteMatchTuple",
"Required": false,
"Type": "List",
"UpdateType": "Mutable"
},
"Name": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-wafregional-bytematchset.html#cfn-wafregional-bytematchset-name",
"PrimitiveType": "String",
"Required": true,
"UpdateType": "Immutable"
}
}
}
"""
resource_type = "AWS::WAFRegional::ByteMatchSet"
props = {
'ByteMatchTuples': ([ByteMatchTuple], False, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-wafregional-bytematchset.html#cfn-wafregional-bytematchset-bytematchtuples'),
'Name': (basestring, True, 'Immutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-wafregional-bytematchset.html#cfn-wafregional-bytematchset-name')
}
# -------------------------------------------
class WAFRegionalWebACLAssociation(AWSObject):
"""# AWS::WAFRegional::WebACLAssociation - CloudFormationResourceSpecification version: 1.4.0
{
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-wafregional-webaclassociation.html",
"Properties": {
"ResourceArn": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-wafregional-webaclassociation.html#cfn-wafregional-webaclassociation-resourcearn",
"PrimitiveType": "String",
"Required": true,
"UpdateType": "Immutable"
},
"WebACLId": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-wafregional-webaclassociation.html#cfn-wafregional-webaclassociation-webaclid",
"PrimitiveType": "String",
"Required": true,
"UpdateType": "Immutable"
}
}
}
"""
resource_type = "AWS::WAFRegional::WebACLAssociation"
props = {
'ResourceArn': (basestring, True, 'Immutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-wafregional-webaclassociation.html#cfn-wafregional-webaclassociation-resourcearn'),
'WebACLId': (basestring, True, 'Immutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-wafregional-webaclassociation.html#cfn-wafregional-webaclassociation-webaclid')
}
# -------------------------------------------
class WAFRegionalWebACL(AWSObject):
"""# AWS::WAFRegional::WebACL - CloudFormationResourceSpecification version: 1.4.0
{
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-wafregional-webacl.html",
"Properties": {
"DefaultAction": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-wafregional-webacl.html#cfn-wafregional-webacl-defaultaction",
"Required": true,
"Type": "Action",
"UpdateType": "Mutable"
},
"MetricName": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-wafregional-webacl.html#cfn-wafregional-webacl-metricname",
"PrimitiveType": "String",
"Required": true,
"UpdateType": "Immutable"
},
"Name": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-wafregional-webacl.html#cfn-wafregional-webacl-name",
"PrimitiveType": "String",
"Required": true,
"UpdateType": "Immutable"
},
"Rules": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-wafregional-webacl.html#cfn-wafregional-webacl-rules",
"ItemType": "Rule",
"Required": false,
"Type": "List",
"UpdateType": "Mutable"
}
}
}
"""
resource_type = "AWS::WAFRegional::WebACL"
props = {
'MetricName': (basestring, True, 'Immutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-wafregional-webacl.html#cfn-wafregional-webacl-metricname'),
'DefaultAction': (Action, True, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-wafregional-webacl.html#cfn-wafregional-webacl-defaultaction'),
'Rules': ([Rule], False, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-wafregional-webacl.html#cfn-wafregional-webacl-rules'),
'Name': (basestring, True, 'Immutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-wafregional-webacl.html#cfn-wafregional-webacl-name')
}
# -------------------------------------------
class WAFRegionalRule(AWSObject):
"""# AWS::WAFRegional::Rule - CloudFormationResourceSpecification version: 1.4.0
{
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-wafregional-rule.html",
"Properties": {
"MetricName": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-wafregional-rule.html#cfn-wafregional-rule-metricname",
"PrimitiveType": "String",
"Required": true,
"UpdateType": "Immutable"
},
"Name": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-wafregional-rule.html#cfn-wafregional-rule-name",
"PrimitiveType": "String",
"Required": true,
"UpdateType": "Immutable"
},
"Predicates": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-wafregional-rule.html#cfn-wafregional-rule-predicates",
"ItemType": "Predicate",
"Required": false,
"Type": "List",
"UpdateType": "Mutable"
}
}
}
"""
resource_type = "AWS::WAFRegional::Rule"
props = {
'MetricName': (basestring, True, 'Immutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-wafregional-rule.html#cfn-wafregional-rule-metricname'),
'Predicates': ([Predicate], False, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-wafregional-rule.html#cfn-wafregional-rule-predicates'),
'Name': (basestring, True, 'Immutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-wafregional-rule.html#cfn-wafregional-rule-name')
}
# -------------------------------------------
class WAFRegionalIPSet(AWSObject):
"""# AWS::WAFRegional::IPSet - CloudFormationResourceSpecification version: 1.4.0
{
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-wafregional-ipset.html",
"Properties": {
"IPSetDescriptors": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-wafregional-ipset.html#cfn-wafregional-ipset-ipsetdescriptors",
"ItemType": "IPSetDescriptor",
"Required": false,
"Type": "List",
"UpdateType": "Mutable"
},
"Name": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-wafregional-ipset.html#cfn-wafregional-ipset-name",
"PrimitiveType": "String",
"Required": true,
"UpdateType": "Immutable"
}
}
}
"""
resource_type = "AWS::WAFRegional::IPSet"
props = {
'IPSetDescriptors': ([IPSetDescriptor], False, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-wafregional-ipset.html#cfn-wafregional-ipset-ipsetdescriptors'),
'Name': (basestring, True, 'Immutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-wafregional-ipset.html#cfn-wafregional-ipset-name')
}
| 53.452744
| 280
| 0.676059
| 2,968
| 35,065
| 7.983827
| 0.033019
| 0.039838
| 0.054777
| 0.084656
| 0.91872
| 0.91872
| 0.900405
| 0.881794
| 0.877321
| 0.877321
| 0
| 0.00231
| 0.160645
| 35,065
| 655
| 281
| 53.534351
| 0.8028
| 0.638243
| 0
| 0.2
| 0
| 0.408333
| 0.705325
| 0.020616
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.025
| 0
| 0.425
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
024c542586c82802a98e3a5e5ca547ed394bd6e7
| 31,332
|
py
|
Python
|
tests/test_ccc.py
|
mfe/cdl_convert
|
1799ac2a80ccc8a5ad147c86cfa40f4de2ac266b
|
[
"MIT"
] | 42
|
2015-01-26T17:52:19.000Z
|
2021-07-21T08:38:30.000Z
|
tests/test_ccc.py
|
mfe/cdl_convert
|
1799ac2a80ccc8a5ad147c86cfa40f4de2ac266b
|
[
"MIT"
] | 26
|
2015-01-29T03:23:04.000Z
|
2021-05-27T02:14:16.000Z
|
tests/test_ccc.py
|
mfe/cdl_convert
|
1799ac2a80ccc8a5ad147c86cfa40f4de2ac266b
|
[
"MIT"
] | 17
|
2015-08-05T13:27:45.000Z
|
2022-02-19T20:52:22.000Z
|
#!/usr/bin/env python
"""
Tests the ccc related functions of cdl_convert
REQUIREMENTS:
mock
"""
#==============================================================================
# IMPORTS
#==============================================================================
# Standard Imports
from decimal import Decimal
try:
from unittest import mock
except ImportError:
import mock
import os
import sys
import tempfile
import unittest
from xml.etree import ElementTree
# Grab our test's path and append the cdL_convert root directory
# There has to be a better method than:
# 1) Getting our current directory
# 2) Splitting into list
# 3) Splicing out the last 3 entries (filepath, test dir, tools dir)
# 4) Joining
# 5) Appending to our Python path.
sys.path.append('/'.join(os.path.realpath(__file__).split('/')[:-2]))
import cdl_convert
#==============================================================================
# GLOBALS
#==============================================================================
# parse_ccc ===================================================================
CCC_FULL = """<?xml version="1.0" encoding="UTF-8"?>
<ColorCorrectionCollection xmlns="urn:ASC:CDL:v1.01">
<Description>CCC description 1</Description>
<InputDescription>CCC Input Desc Text</InputDescription>
<Description>CCC description 2</Description>
<ColorCorrection id="014_xf_seqGrade_v01">
<Description>CC description 1</Description>
<InputDescription>Input Desc Text</InputDescription>
<Description>CC description 2</Description>
<SOPNode>
<Description>Sop description 1</Description>
<Description>Sop description 2</Description>
<Slope>1.014 1.0104 0.62</Slope>
<Offset>-0.00315 -0.00124 0.3103</Offset>
<Power>1.0 0.9983 1.0</Power>
<Description>Sop description 3</Description>
</SOPNode>
<Description>CC description 3</Description>
<SATNode>
<Description>Sat description 1</Description>
<Saturation>1.09</Saturation>
<Description>Sat description 2</Description>
</SATNode>
<Description>CC description 4</Description>
<ViewingDescription>Viewing Desc Text</ViewingDescription>
<Description>CC description 5</Description>
</ColorCorrection>
<ColorCorrection id="f51.200">
<SopNode>
<Slope>0.2331 0.678669 1.0758</Slope>
<Offset>0.031 0.128 -0.096</Offset>
<Power>1.8 0.97 0.961</Power>
</SopNode>
<SatNode>
<Saturation>1.01</Saturation>
</SatNode>
</ColorCorrection>
<ColorCorrection id="f55.100">
<Description>Raised saturation a little!?! ag... \/Offset</Description>
<Description>Raised saturation a little!?! ag... \/Offset</Description>
<InputDescription>METAL VIEWER!!! \/\/</InputDescription>
<ViewingDescription>WOOD VIEWER!? ////</ViewingDescription>
<SopNode>
<Description>Raised saturation a little!?! ag... \/Offset</Description>
<Slope>137829.329 4327890.9833 3489031.003</Slope>
<Offset>-3424.011 -342789423.013 -4238923.11</Offset>
<Power>3271893.993 .0000998 0.0000000000000000113</Power>
</SopNode>
<SatNode>
<Saturation>1798787.01</Saturation>
</SatNode>
</ColorCorrection>
<ColorCorrection id="f54.112">
<ASC_SAT>
<Saturation>1.01</Saturation>
</ASC_SAT>
<ASC_SOP>
<Slope>0.2331 0.678669 1.0758</Slope>
<Offset>0.031 0.128 -0.096</Offset>
<Power>1.8 0.97 0.961</Power>
</ASC_SOP>
</ColorCorrection>
<Description>CCC description 3</Description>
<ViewingDescription>CCC Viewing Desc Text</ViewingDescription>
<Description>CCC description 4</Description>
<ColorCorrection id="burp_100.x12">
<ViewingDescription></ViewingDescription>
<Description></Description>
<SOPNode>
<Description></Description>
<Slope>1.014 1.0104 0.62</Slope>
<Offset>-0.00315 -0.00124 0.3103</Offset>
<Power>1.0 0.9983 1.0</Power>
</SOPNode>
<Description></Description>
<InputDescription></InputDescription>
<SatNode>
<Saturation>1.09</Saturation>
<Description></Description>
</SatNode>
<Description></Description>
</ColorCorrection>
<ColorCorrection id="burp_200.x15">
<SatNode>
<Description>I am a lovely sat node</Description>
<Saturation>1.01</Saturation>
</SatNode>
</ColorCorrection>
<ColorCorrection id="burp_300.x35">
<SopNode>
<Slope>0.2331 0.678669 1.0758</Slope>
<Offset>0.031 0.128 -0.096</Offset>
<Power>1.8 0.97 0.961</Power>
</SopNode>
</ColorCorrection>
</ColorCorrectionCollection>
"""
CCC_ODD = """<?xml version="1.0" encoding="UTF-8"?>
<ColorCorrectionCollection>
<Description></Description>
<InputDescription></InputDescription>
<Description>CCC description 1</Description>
<Description></Description>
<Description></Description>
<Description></Description>
<ColorCorrection id="014_xf_seqGrade_v01">
<SOPNode>
<Description>Sop description 1</Description>
<Description>Sop description 2</Description>
<Slope>1.014 1.0104 0.62</Slope>
<Offset>-0.00315 -0.00124 0.3103</Offset>
<Power>1.0 0.9983 1.0</Power>
<Description>Sop description 3</Description>
</SOPNode>
</ColorCorrection>
<ColorCorrection id="f51.200">
<SopNode>
<Slope>0.2331 0.678669 1.0758</Slope>
<Offset>0.031 0.128 -0.096</Offset>
<Power>1.8 0.97 0.961</Power>
</SopNode>
</ColorCorrection>
<Description>Raised1 saturation a little!?! ag... \/Offset</Description>
<Description>Raised2 saturation a little!?! ag... \/Offset</Description>
<ColorCorrection id="f55.100">
<SatNode>
<Saturation>1798787.01</Saturation>
</SatNode>
</ColorCorrection>
<ColorCorrection id="f54.112">
<ASC_SAT>
<Saturation>1.01</Saturation>
</ASC_SAT>
</ColorCorrection>
<ViewingDescription></ViewingDescription>
<ColorCorrection id="burp_200.x15">
<SatNode>
<Description>I am a lovely sat node</Description>
<Saturation>1.01</Saturation>
</SatNode>
</ColorCorrection>
</ColorCorrectionCollection>
"""
# write_ccc ===================================================================
CCC_FULL_WRITE = """<?xml version="1.0" encoding="UTF-8"?>
<ColorCorrectionCollection xmlns="urn:ASC:CDL:v1.01">
<InputDescription>CCC Input Desc Text</InputDescription>
<ViewingDescription>CCC Viewing Desc Text</ViewingDescription>
<Description>CCC description 1</Description>
<Description>CCC description 2</Description>
<Description>CCC description 3</Description>
<Description>CCC description 4</Description>
<ColorCorrection id="014_xf_seqGrade_v01">
<InputDescription>Input Desc Text</InputDescription>
<ViewingDescription>Viewing Desc Text</ViewingDescription>
<Description>CC description 1</Description>
<Description>CC description 2</Description>
<Description>CC description 3</Description>
<Description>CC description 4</Description>
<Description>CC description 5</Description>
<SOPNode>
<Description>Sop description 1</Description>
<Description>Sop description 2</Description>
<Description>Sop description 3</Description>
<Slope>1.014 1.0104 0.62</Slope>
<Offset>-0.00315 -0.00124 0.3103</Offset>
<Power>1.0 0.9983 1.0</Power>
</SOPNode>
<SATNode>
<Description>Sat description 1</Description>
<Description>Sat description 2</Description>
<Saturation>1.09</Saturation>
</SATNode>
</ColorCorrection>
<ColorCorrection id="f51.200">
<SOPNode>
<Slope>0.2331 0.678669 1.0758</Slope>
<Offset>0.031 0.128 -0.096</Offset>
<Power>1.8 0.97 0.961</Power>
</SOPNode>
<SATNode>
<Saturation>1.01</Saturation>
</SATNode>
</ColorCorrection>
<ColorCorrection id="f55.100">
<InputDescription>METAL VIEWER!!! \/\/</InputDescription>
<ViewingDescription>WOOD VIEWER!? ////</ViewingDescription>
<Description>Raised saturation a little!?! ag... \/Offset</Description>
<Description>Raised saturation a little!?! ag... \/Offset</Description>
<SOPNode>
<Description>Raised saturation a little!?! ag... \/Offset</Description>
<Slope>137829.329 4327890.9833 3489031.003</Slope>
<Offset>-3424.011 -342789423.013 -4238923.11</Offset>
<Power>3271893.993 0.0000998 0.0000000000000000113</Power>
</SOPNode>
<SATNode>
<Saturation>1798787.01</Saturation>
</SATNode>
</ColorCorrection>
<ColorCorrection id="f54.112">
<SOPNode>
<Slope>0.2331 0.678669 1.0758</Slope>
<Offset>0.031 0.128 -0.096</Offset>
<Power>1.8 0.97 0.961</Power>
</SOPNode>
<SATNode>
<Saturation>1.01</Saturation>
</SATNode>
</ColorCorrection>
<ColorCorrection id="burp_100.x12">
<SOPNode>
<Slope>1.014 1.0104 0.62</Slope>
<Offset>-0.00315 -0.00124 0.3103</Offset>
<Power>1.0 0.9983 1.0</Power>
</SOPNode>
<SATNode>
<Saturation>1.09</Saturation>
</SATNode>
</ColorCorrection>
<ColorCorrection id="burp_200.x15">
<SATNode>
<Description>I am a lovely sat node</Description>
<Saturation>1.01</Saturation>
</SATNode>
</ColorCorrection>
<ColorCorrection id="burp_300.x35">
<SOPNode>
<Slope>0.2331 0.678669 1.0758</Slope>
<Offset>0.031 0.128 -0.096</Offset>
<Power>1.8 0.97 0.961</Power>
</SOPNode>
</ColorCorrection>
</ColorCorrectionCollection>
"""
CCC_FULL_WRITE_CDL = """<?xml version="1.0" encoding="UTF-8"?>
<ColorDecisionList xmlns="urn:ASC:CDL:v1.01">
<InputDescription>CCC Input Desc Text</InputDescription>
<ViewingDescription>CCC Viewing Desc Text</ViewingDescription>
<Description>CCC description 1</Description>
<Description>CCC description 2</Description>
<Description>CCC description 3</Description>
<Description>CCC description 4</Description>
<ColorDecision>
<ColorCorrection id="014_xf_seqGrade_v01">
<InputDescription>Input Desc Text</InputDescription>
<ViewingDescription>Viewing Desc Text</ViewingDescription>
<Description>CC description 1</Description>
<Description>CC description 2</Description>
<Description>CC description 3</Description>
<Description>CC description 4</Description>
<Description>CC description 5</Description>
<SOPNode>
<Description>Sop description 1</Description>
<Description>Sop description 2</Description>
<Description>Sop description 3</Description>
<Slope>1.014 1.0104 0.62</Slope>
<Offset>-0.00315 -0.00124 0.3103</Offset>
<Power>1.0 0.9983 1.0</Power>
</SOPNode>
<SATNode>
<Description>Sat description 1</Description>
<Description>Sat description 2</Description>
<Saturation>1.09</Saturation>
</SATNode>
</ColorCorrection>
</ColorDecision>
<ColorDecision>
<ColorCorrection id="f51.200">
<SOPNode>
<Slope>0.2331 0.678669 1.0758</Slope>
<Offset>0.031 0.128 -0.096</Offset>
<Power>1.8 0.97 0.961</Power>
</SOPNode>
<SATNode>
<Saturation>1.01</Saturation>
</SATNode>
</ColorCorrection>
</ColorDecision>
<ColorDecision>
<ColorCorrection id="f55.100">
<InputDescription>METAL VIEWER!!! \/\/</InputDescription>
<ViewingDescription>WOOD VIEWER!? ////</ViewingDescription>
<Description>Raised saturation a little!?! ag... \/Offset</Description>
<Description>Raised saturation a little!?! ag... \/Offset</Description>
<SOPNode>
<Description>Raised saturation a little!?! ag... \/Offset</Description>
<Slope>137829.329 4327890.9833 3489031.003</Slope>
<Offset>-3424.011 -342789423.013 -4238923.11</Offset>
<Power>3271893.993 0.0000998 0.0000000000000000113</Power>
</SOPNode>
<SATNode>
<Saturation>1798787.01</Saturation>
</SATNode>
</ColorCorrection>
</ColorDecision>
<ColorDecision>
<ColorCorrection id="f54.112">
<SOPNode>
<Slope>0.2331 0.678669 1.0758</Slope>
<Offset>0.031 0.128 -0.096</Offset>
<Power>1.8 0.97 0.961</Power>
</SOPNode>
<SATNode>
<Saturation>1.01</Saturation>
</SATNode>
</ColorCorrection>
</ColorDecision>
<ColorDecision>
<ColorCorrection id="burp_100.x12">
<SOPNode>
<Slope>1.014 1.0104 0.62</Slope>
<Offset>-0.00315 -0.00124 0.3103</Offset>
<Power>1.0 0.9983 1.0</Power>
</SOPNode>
<SATNode>
<Saturation>1.09</Saturation>
</SATNode>
</ColorCorrection>
</ColorDecision>
<ColorDecision>
<ColorCorrection id="burp_200.x15">
<SATNode>
<Description>I am a lovely sat node</Description>
<Saturation>1.01</Saturation>
</SATNode>
</ColorCorrection>
</ColorDecision>
<ColorDecision>
<ColorCorrection id="burp_300.x35">
<SOPNode>
<Slope>0.2331 0.678669 1.0758</Slope>
<Offset>0.031 0.128 -0.096</Offset>
<Power>1.8 0.97 0.961</Power>
</SOPNode>
</ColorCorrection>
</ColorDecision>
</ColorDecisionList>
"""
CCC_ODD_WRITE = """<?xml version="1.0" encoding="UTF-8"?>
<ColorCorrectionCollection xmlns="urn:ASC:CDL:v1.01">
<Description>CCC description 1</Description>
<Description>Raised1 saturation a little!?! ag... \/Offset</Description>
<Description>Raised2 saturation a little!?! ag... \/Offset</Description>
<ColorCorrection id="014_xf_seqGrade_v01">
<SOPNode>
<Description>Sop description 1</Description>
<Description>Sop description 2</Description>
<Description>Sop description 3</Description>
<Slope>1.014 1.0104 0.62</Slope>
<Offset>-0.00315 -0.00124 0.3103</Offset>
<Power>1.0 0.9983 1.0</Power>
</SOPNode>
</ColorCorrection>
<ColorCorrection id="f51.200">
<SOPNode>
<Slope>0.2331 0.678669 1.0758</Slope>
<Offset>0.031 0.128 -0.096</Offset>
<Power>1.8 0.97 0.961</Power>
</SOPNode>
</ColorCorrection>
<ColorCorrection id="f55.100">
<SATNode>
<Saturation>1798787.01</Saturation>
</SATNode>
</ColorCorrection>
<ColorCorrection id="f54.112">
<SATNode>
<Saturation>1.01</Saturation>
</SATNode>
</ColorCorrection>
<ColorCorrection id="burp_200.x15">
<SATNode>
<Description>I am a lovely sat node</Description>
<Saturation>1.01</Saturation>
</SATNode>
</ColorCorrection>
</ColorCorrectionCollection>
"""
CCC_ODD_WRITE_CDL = """<?xml version="1.0" encoding="UTF-8"?>
<ColorDecisionList xmlns="urn:ASC:CDL:v1.01">
<Description>CCC description 1</Description>
<Description>Raised1 saturation a little!?! ag... \/Offset</Description>
<Description>Raised2 saturation a little!?! ag... \/Offset</Description>
<ColorDecision>
<ColorCorrection id="014_xf_seqGrade_v01">
<SOPNode>
<Description>Sop description 1</Description>
<Description>Sop description 2</Description>
<Description>Sop description 3</Description>
<Slope>1.014 1.0104 0.62</Slope>
<Offset>-0.00315 -0.00124 0.3103</Offset>
<Power>1.0 0.9983 1.0</Power>
</SOPNode>
</ColorCorrection>
</ColorDecision>
<ColorDecision>
<ColorCorrection id="f51.200">
<SOPNode>
<Slope>0.2331 0.678669 1.0758</Slope>
<Offset>0.031 0.128 -0.096</Offset>
<Power>1.8 0.97 0.961</Power>
</SOPNode>
</ColorCorrection>
</ColorDecision>
<ColorDecision>
<ColorCorrection id="f55.100">
<SATNode>
<Saturation>1798787.01</Saturation>
</SATNode>
</ColorCorrection>
</ColorDecision>
<ColorDecision>
<ColorCorrection id="f54.112">
<SATNode>
<Saturation>1.01</Saturation>
</SATNode>
</ColorCorrection>
</ColorDecision>
<ColorDecision>
<ColorCorrection id="burp_200.x15">
<SATNode>
<Description>I am a lovely sat node</Description>
<Saturation>1.01</Saturation>
</SATNode>
</ColorCorrection>
</ColorDecision>
</ColorDecisionList>
"""
CCC_BAD_TAG = """<?xml version="1.0" encoding="UTF-8"?>
<ColorCorrectionBollection xmlns="urn:ASC:CDL:v1.01">
<Description>CCC description 1</Description>
<Description>Raised1 saturation a little!?! ag... \/Offset</Description>
<Description>Raised2 saturation a little!?! ag... \/Offset</Description>
<ColorCorrection id="014_xf_seqGrade_v01">
<SOPNode>
<Description>Sop description 1</Description>
<Description>Sop description 2</Description>
<Description>Sop description 3</Description>
<Slope>1.014 1.0104 0.62</Slope>
<Offset>-0.00315 -0.00124 0.3103</Offset>
<Power>1.0 0.9983 1.0</Power>
</SOPNode>
</ColorCorrection>
<ColorCorrection id="f51.200">
<SOPNode>
<Slope>0.2331 0.678669 1.0758</Slope>
<Offset>0.031 0.128 -0.096</Offset>
<Power>1.8 0.97 0.961</Power>
</SOPNode>
</ColorCorrection>
<ColorCorrection id="f55.100">
<SATNode>
<Saturation>1798787.01</Saturation>
</SATNode>
</ColorCorrection>
<ColorCorrection id="f54.112">
<SATNode>
<Saturation>1.01</Saturation>
</SATNode>
</ColorCorrection>
<ColorCorrection id="burp_200.x15">
<SATNode>
<Description>I am a lovely sat node</Description>
<Saturation>1.01</Saturation>
</SATNode>
</ColorCorrection>
</ColorCorrectionBollection>
"""
# misc ========================================================================
UPPER = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
LOWER = 'abcdefghijklmnopqrstuvwxyz'
if sys.version_info[0] >= 3:
enc = lambda x: bytes(x, 'UTF-8')
else:
enc = lambda x: x
if sys.version_info[0] >= 3:
builtins = 'builtins'
else:
builtins = '__builtin__'
#==============================================================================
# TEST CLASSES
#==============================================================================
class TestParseCCCFull(unittest.TestCase):
"""Tests a full CCC parse"""
#==========================================================================
# SETUP & TEARDOWN
#==========================================================================
def setUp(self):
self.desc = [
'CCC description 1',
'CCC description 2',
'CCC description 3',
'CCC description 4'
]
self.input_desc = 'CCC Input Desc Text'
self.viewing_desc = 'CCC Viewing Desc Text'
self.color_correction_ids = [
'014_xf_seqGrade_v01',
'f51.200',
'f55.100',
'f54.112',
'burp_100.x12',
'burp_200.x15',
'burp_300.x35'
]
# Build our ccc
with tempfile.NamedTemporaryFile(mode='wb', delete=False) as f:
f.write(enc(CCC_FULL))
self.filename = f.name
self.node = cdl_convert.parse_ccc(self.filename)
#==========================================================================
def tearDown(self):
# The system should clean these up automatically,
# but we'll be neat.
os.remove(self.filename)
# We need to clear the ColorCorrection member dictionary so we don't
# have to worry about non-unique ids.
cdl_convert.reset_all()
#==========================================================================
# TESTS
#==========================================================================
def test_file_in(self):
"""Tests that the input_file has been set to the file in value"""
self.assertEqual(
self.filename,
self.node.file_in
)
#==========================================================================
def test_type(self):
"""Makes sure type is still set to ccc"""
self.assertEqual(
'ccc',
self.node.type
)
#==========================================================================
def test_descs(self):
"""Tests that the desc fields have been set correctly"""
self.assertEqual(
self.desc,
self.node.desc
)
#==========================================================================
def test_viewing_desc(self):
"""Tests that the viewing desc has been set correctly"""
self.assertEqual(
self.viewing_desc,
self.node.viewing_desc
)
#==========================================================================
def test_input_desc(self):
"""Tests that the input desc has been set correctly"""
self.assertEqual(
self.input_desc,
self.node.input_desc
)
#==========================================================================
def test_parse_results(self):
"""Tests that the parser picked up all the cc's"""
id_list = [i.id for i in self.node.color_corrections]
self.assertEqual(
self.color_correction_ids,
id_list
)
class TestParseCCCOdd(TestParseCCCFull):
"""Tests an odd CCC parse"""
#==========================================================================
# SETUP & TEARDOWN
#==========================================================================
def setUp(self):
self.desc = [
'CCC description 1',
'Raised1 saturation a little!?! ag... \/Offset',
'Raised2 saturation a little!?! ag... \/Offset',
]
self.input_desc = None
self.viewing_desc = None
self.color_correction_ids = [
'014_xf_seqGrade_v01',
'f51.200',
'f55.100',
'f54.112',
'burp_200.x15',
]
# Build our ccc
with tempfile.NamedTemporaryFile(mode='wb', delete=False) as f:
f.write(enc(CCC_ODD))
self.filename = f.name
self.node = cdl_convert.parse_ccc(self.filename)
class TestParseCCCExceptions(unittest.TestCase):
"""Tests that we run into the correct exceptions with bad XMLs"""
#==========================================================================
# SETUP & TEARDOWN
#==========================================================================
def setUp(self):
self.filename = None
def tearDown(self):
if self.filename:
os.remove(self.filename)
cdl_convert.reset_all()
#==========================================================================
# TESTS
#==========================================================================
def testBadTag(self):
"""Tests that a bad root tag raises a ValueError"""
# Build our ccc
with tempfile.NamedTemporaryFile(mode='wb', delete=False) as f:
f.write(enc(CCC_BAD_TAG))
self.filename = f.name
self.assertRaises(
ValueError,
cdl_convert.parse_ccc,
self.filename,
)
#==========================================================================
def testEmptyCCC(self):
"""Tests that an empty CCC file raises a ValueError"""
emptyCCC = ('<?xml version="1.0" encoding="UTF-8"?>\n'
'<ColorCorrectionCollection xmlns="urn:ASC:CDL:v1.01">\n'
'</ColorCorrectionCollection>')
# Build our ccc
with tempfile.NamedTemporaryFile(mode='wb', delete=False) as f:
f.write(enc(emptyCCC))
self.filename = f.name
self.assertRaises(
ValueError,
cdl_convert.parse_ccc,
self.filename,
)
class TestWriteCCCFull(unittest.TestCase):
"""Tests a full write of the CCC file
This is an integration style test. If parse_ccc stops working, this stops
working.
"""
#==========================================================================
# SETUP & TEARDOWN
#==========================================================================
def setUp(self):
# Build our ccc
with tempfile.NamedTemporaryFile(mode='wb', delete=False) as f:
f.write(enc(CCC_FULL))
self.filename = f.name
self.ccc = cdl_convert.parse_ccc(self.filename)
self.target_xml_root = enc(CCC_FULL_WRITE)
self.target_xml = enc('\n'.join(CCC_FULL_WRITE.split('\n')[1:]))
#==========================================================================
def tearDown(self):
os.remove(self.filename)
cdl_convert.reset_all()
#==========================================================================
# TESTS
#==========================================================================
def test_root_xml(self):
"""Tests that root_xml returns the full XML as expected"""
self.assertEqual(
self.target_xml_root,
self.ccc.xml_root
)
#==========================================================================
def test_base_xml(self):
"""Tests that the xml atrib returns the XML minus root as expected"""
self.assertEqual(
self.target_xml,
self.ccc.xml
)
#==========================================================================
def test_element(self):
"""Tests that the element returned is an etree type"""
self.assertEqual(
'ColorCorrectionCollection',
self.ccc.element.tag
)
#==========================================================================
def test_write(self):
"""Tests writing the ccc itself"""
mockOpen = mock.mock_open()
self.ccc._file_out = 'bobs_big_file.ccc'
with mock.patch(builtins + '.open', mockOpen, create=True):
cdl_convert.write_ccc(self.ccc)
mockOpen.assert_called_once_with('bobs_big_file.ccc', 'wb')
mockOpen().write.assert_called_once_with(self.target_xml_root)
class TestWriteCCCFullAsCDL(TestWriteCCCFull):
"""Tests a full write of the CCC file as a CDL
This is an integration style test. If parse_ccc stops working, this stops
working.
"""
#==========================================================================
# SETUP & TEARDOWN
#==========================================================================
def setUp(self):
# Build our ccc
with tempfile.NamedTemporaryFile(mode='wb', delete=False) as f:
f.write(enc(CCC_FULL))
self.filename = f.name
self.ccc = cdl_convert.parse_ccc(self.filename)
self.ccc.set_to_cdl()
self.target_xml_root = enc(CCC_FULL_WRITE_CDL)
self.target_xml = enc('\n'.join(CCC_FULL_WRITE_CDL.split('\n')[1:]))
#==========================================================================
def test_element(self):
"""Tests that the element returned is an etree type"""
self.assertEqual(
'ColorDecisionList',
self.ccc.element.tag
)
#==========================================================================
def test_write(self):
"""Tests writing the ccc itself"""
mockOpen = mock.mock_open()
self.ccc._file_out = 'bobs_big_file.cdl'
with mock.patch(builtins + '.open', mockOpen, create=True):
cdl_convert.write_cdl(self.ccc)
mockOpen.assert_called_once_with('bobs_big_file.cdl', 'wb')
mockOpen().write.assert_called_once_with(self.target_xml_root)
class TestWriteCCCOdd(TestWriteCCCFull):
"""Tests an odd write of the CCC file
This is an integration style test. If parse_ccc stops working, this stops
working.
"""
#==========================================================================
# SETUP & TEARDOWN
#==========================================================================
def setUp(self):
cdl_convert.reset_all()
# Build our ccc
with tempfile.NamedTemporaryFile(mode='wb', delete=False) as f:
f.write(enc(CCC_ODD))
self.filename = f.name
self.ccc = cdl_convert.parse_ccc(self.filename)
self.target_xml_root = enc(CCC_ODD_WRITE)
self.target_xml = enc('\n'.join(CCC_ODD_WRITE.split('\n')[1:]))
class TestWriteCCCOddAsCDL(TestWriteCCCFullAsCDL):
"""Tests a full write of the CCC file as a CDL
This is an integration style test. If parse_ccc stops working, this stops
working.
"""
#==========================================================================
# SETUP & TEARDOWN
#==========================================================================
def setUp(self):
# Build our ccc
with tempfile.NamedTemporaryFile(mode='wb', delete=False) as f:
f.write(enc(CCC_ODD))
self.filename = f.name
self.ccc = cdl_convert.parse_ccc(self.filename)
self.ccc.set_to_cdl()
self.target_xml_root = enc(CCC_ODD_WRITE_CDL)
self.target_xml = enc('\n'.join(CCC_ODD_WRITE_CDL.split('\n')[1:]))
#==============================================================================
# RUNNER
#==============================================================================
if __name__ == '__main__':
unittest.main()
| 34.774695
| 87
| 0.527448
| 3,045
| 31,332
| 5.350082
| 0.096223
| 0.067522
| 0.051071
| 0.045915
| 0.837334
| 0.807133
| 0.785771
| 0.767172
| 0.745258
| 0.731447
| 0
| 0.0725
| 0.255139
| 31,332
| 900
| 88
| 34.813333
| 0.625546
| 0.171965
| 0
| 0.826347
| 0
| 0
| 0.729112
| 0.131844
| 0
| 0
| 0
| 0
| 0.023952
| 1
| 0.035928
| false
| 0
| 0.01497
| 0
| 0.061377
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
02694fd1202c980b2d7a0d74d92e632c119fc0b2
| 63,793
|
py
|
Python
|
pscheduler-tool-iperf3/iperf3/iperf3_parser.py
|
igarny/pscheduler
|
0ab6e68bb3adb808e1116bab0eb7438bf4c31e2c
|
[
"Apache-2.0"
] | null | null | null |
pscheduler-tool-iperf3/iperf3/iperf3_parser.py
|
igarny/pscheduler
|
0ab6e68bb3adb808e1116bab0eb7438bf4c31e2c
|
[
"Apache-2.0"
] | null | null | null |
pscheduler-tool-iperf3/iperf3/iperf3_parser.py
|
igarny/pscheduler
|
0ab6e68bb3adb808e1116bab0eb7438bf4c31e2c
|
[
"Apache-2.0"
] | null | null | null |
import re
import pscheduler
import pprint
import json
logger = pscheduler.Log(quiet=True)
# A whole bunch of pattern matching against the output of the "iperf3" tool
# client output. Builds up an object of interesting bits from it.
def parse_output(lines):
results = {}
results['succeeded'] = True
try:
content = json.loads("".join(lines))
except Exception as e:
results['succeeded'] = False
results['error'] = "Unable to parse iperf3 output as JSON: %s" % e
return results
intervals = []
if content.has_key('intervals'):
intervals = content['intervals']
else:
results['succeeded'] = False
results['error'] = "iperf3 output is missing required field 'intervals'"
return results
final_streams = []
# Go through the JSON and convert to what we're expecting in throughput tests
# This is mostly a renaming since it's so similar
for interval in intervals:
#these don't appear to be required by json schema, so ignoring if missing
streams = interval.get('streams', [])
summary = interval.get('sum', {})
renamed_streams = []
for stream in streams:
new_stream = rename_json(stream)
renamed_streams.append(new_stream)
renamed_summary = rename_json(summary)
final_streams.append({
"streams": renamed_streams,
"summary": renamed_summary
})
sum_end = {}
if content.has_key('end'):
sum_end = content['end']
else:
results['succeeded'] = False
results['error'] = "iperf3 output is missing required field 'end'"
return results
# the "summary" keys are different for UDP/TCP here
if sum_end.has_key("sum_sent"):
summary = sum_end["sum_sent"]
elif sum_end.has_key("sum"):
summary = sum_end['sum']
else:
results['succeeded'] = False
results['error'] = "iperf3 output has neither 'sum_sent' nor 'sum' field, and one of them is required"
return results
renamed_summary = rename_json(summary)
# kind of like above, the streams summary is in a different key
# json schema does not require, so ignore if not provided
sum_streams = sum_end.get('streams', [])
renamed_sum_streams = []
for sum_stream in sum_streams:
if sum_stream.has_key("udp"):
renamed_sum_streams.append(rename_json(sum_stream['udp']))
elif sum_stream.has_key("sender"):
renamed_sum_streams.append(rename_json(sum_stream['sender']))
results["intervals"] = final_streams
results["summary"] = {"streams": renamed_sum_streams, "summary": renamed_summary}
return results
def rename_json(obj):
new_obj = {}
lookup = {
"socket": "stream-id",
"start": "start",
"end": "end",
"bytes": "throughput-bytes",
"bits_per_second": "throughput-bits",
"omitted": "omitted",
"jitter_ms": "jitter",
# only for UDP
"packets": "sent",
"lost_packets": "lost",
# only for TCP
"retransmits": "retransmits",
"snd_cwnd": "tcp-window-size",
"rtt": "rtt",
"mean_rtt": "rtt"
}
for k,v in obj.iteritems():
if lookup.has_key(k):
new_obj[lookup[k]] = v
return new_obj
if __name__ == "__main__":
# Test "regular" output
test_output = """
{
"start": {
"connected": [{
"socket": 4,
"local_host": "10.0.2.15",
"local_port": 33600,
"remote_host": "10.0.2.4",
"remote_port": 5201
}],
"version": "iperf 3.1.3",
"system_info": "Linux ps-test1 2.6.32-642.3.1.el6.x86_64 #1 SMP Tue Jul 12 18:30:56 UTC 2016 x86_64",
"timestamp": {
"time": "Tue, 16 Aug 2016 03:39:47 GMT",
"timesecs": 1471318787
},
"connecting_to": {
"host": "10.0.2.4",
"port": 5201
},
"cookie": "ps-test1.1471318787.639126.54345cb13",
"tcp_mss_default": 1448,
"test_start": {
"protocol": "TCP",
"num_streams": 1,
"blksize": 131072,
"omit": 0,
"duration": 10,
"bytes": 0,
"blocks": 0,
"reverse": 0
}
},
"intervals": [{
"streams": [{
"socket": 4,
"start": 0,
"end": 1.000375,
"seconds": 1.000375,
"bytes": 1982312,
"bits_per_second": 15852550.779440,
"retransmits": 4,
"snd_cwnd": 53576,
"rtt": 7375,
"omitted": false
}],
"sum": {
"start": 0,
"end": 1.000375,
"seconds": 1.000375,
"bytes": 1982312,
"bits_per_second": 15852550.779440,
"retransmits": 4,
"omitted": false
}
}, {
"streams": [{
"socket": 4,
"start": 1.000375,
"end": 2.004007,
"seconds": 1.003632,
"bytes": 301184,
"bits_per_second": 2400752.302863,
"retransmits": 2,
"snd_cwnd": 53576,
"rtt": 67000,
"omitted": false
}],
"sum": {
"start": 1.000375,
"end": 2.004007,
"seconds": 1.003632,
"bytes": 301184,
"bits_per_second": 2400752.302863,
"retransmits": 2,
"omitted": false
}
}, {
"streams": [{
"socket": 4,
"start": 2.004007,
"end": 3.002219,
"seconds": 0.998212,
"bytes": 860864,
"bits_per_second": 6899248.818251,
"retransmits": 1,
"snd_cwnd": 72400,
"rtt": 3375,
"omitted": false
}],
"sum": {
"start": 2.004007,
"end": 3.002219,
"seconds": 0.998212,
"bytes": 860864,
"bits_per_second": 6899248.818251,
"retransmits": 1,
"omitted": false
}
}, {
"streams": [{
"socket": 4,
"start": 3.002219,
"end": 4.003231,
"seconds": 1.001012,
"bytes": 2033744,
"bits_per_second": 16253502.044018,
"retransmits": 3,
"snd_cwnd": 99912,
"rtt": 10500,
"omitted": false
}],
"sum": {
"start": 3.002219,
"end": 4.003231,
"seconds": 1.001012,
"bytes": 2033744,
"bits_per_second": 16253502.044018,
"retransmits": 3,
"omitted": false
}
}, {
"streams": [{
"socket": 4,
"start": 4.003231,
"end": 5.000839,
"seconds": 0.997608,
"bytes": 2805528,
"bits_per_second": 22498040.518909,
"retransmits": 3,
"snd_cwnd": 136112,
"rtt": 3750,
"omitted": false
}],
"sum": {
"start": 4.003231,
"end": 5.000839,
"seconds": 0.997608,
"bytes": 2805528,
"bits_per_second": 22498040.518909,
"retransmits": 3,
"omitted": false
}
}, {
"streams": [{
"socket": 4,
"start": 5.000839,
"end": 6.002020,
"seconds": 1.001181,
"bytes": 23605296,
"bits_per_second": 188619584.572292,
"retransmits": 48,
"snd_cwnd": 36200,
"rtt": 1875,
"omitted": false
}],
"sum": {
"start": 5.000839,
"end": 6.002020,
"seconds": 1.001181,
"bytes": 23605296,
"bits_per_second": 188619584.572292,
"retransmits": 48,
"omitted": false
}
}, {
"streams": [{
"socket": 4,
"start": 6.002020,
"end": 7.000188,
"seconds": 0.998168,
"bytes": 52243840,
"bits_per_second": 418717814.537474,
"retransmits": 48,
"snd_cwnd": 194032,
"rtt": 1875,
"omitted": false
}],
"sum": {
"start": 6.002020,
"end": 7.000188,
"seconds": 0.998168,
"bytes": 52243840,
"bits_per_second": 418717814.537474,
"retransmits": 48,
"omitted": false
}
}, {
"streams": [{
"socket": 4,
"start": 7.000188,
"end": 8.000270,
"seconds": 1.000082,
"bytes": 179971920,
"bits_per_second": 1.439657e+09,
"retransmits": 1,
"snd_cwnd": 231680,
"rtt": 1875,
"omitted": false
}],
"sum": {
"start": 7.000188,
"end": 8.000270,
"seconds": 1.000082,
"bytes": 179971920,
"bits_per_second": 1.439657e+09,
"retransmits": 1,
"omitted": false
}
}, {
"streams": [{
"socket": 4,
"start": 8.000270,
"end": 9.000164,
"seconds": 0.999894,
"bytes": 213855120,
"bits_per_second": 1.711022e+09,
"retransmits": 45,
"snd_cwnd": 204168,
"rtt": 1875,
"omitted": false
}],
"sum": {
"start": 8.000270,
"end": 9.000164,
"seconds": 0.999894,
"bytes": 213855120,
"bits_per_second": 1.711022e+09,
"retransmits": 45,
"omitted": false
}
}, {
"streams": [{
"socket": 4,
"start": 9.000164,
"end": 10.001024,
"seconds": 1.000860,
"bytes": 8983392,
"bits_per_second": 71805385.105436,
"retransmits": 4,
"snd_cwnd": 60816,
"rtt": 2250,
"omitted": false
}],
"sum": {
"start": 9.000164,
"end": 10.001024,
"seconds": 1.000860,
"bytes": 8983392,
"bits_per_second": 71805385.105436,
"retransmits": 4,
"omitted": false
}
}],
"end": {
"streams": [{
"sender": {
"socket": 4,
"start": 0,
"end": 10.001024,
"seconds": 10.001024,
"bytes": 486643200,
"bits_per_second": 389274697.967401,
"retransmits": 159,
"max_snd_cwnd": 231680,
"max_rtt": 67000,
"min_rtt": 1875,
"mean_rtt": 10175
},
"receiver": {
"socket": 4,
"start": 0,
"end": 10.001024,
"seconds": 10.001024,
"bytes": 485969880,
"bits_per_second": 388736097.120548
}
}],
"sum_sent": {
"start": 0,
"end": 10.001024,
"seconds": 10.001024,
"bytes": 486643200,
"bits_per_second": 389274697.967401,
"retransmits": 159
},
"sum_received": {
"start": 0,
"end": 10.001024,
"seconds": 10.001024,
"bytes": 485969880,
"bits_per_second": 388736097.120548
},
"cpu_utilization_percent": {
"host_total": 2.181510,
"host_user": 0.148710,
"host_system": 2.101865,
"remote_total": 4.763802,
"remote_user": 0.015363,
"remote_system": 4.763079
}
}
}
"""
result = parse_output(test_output.split("\n"))
pprint.PrettyPrinter(indent=4).pprint(result)
test_output = """
{
"start": {
"connected": [{
"socket": 4,
"local_host": "10.0.2.15",
"local_port": 49036,
"remote_host": "10.0.2.4",
"remote_port": 5201
}],
"version": "iperf 3.1.3",
"system_info": "Linux ps-test1 2.6.32-642.3.1.el6.x86_64 #1 SMP Tue Jul 12 18:30:56 UTC 2016 x86_64",
"timestamp": {
"time": "Tue, 16 Aug 2016 04:48:35 GMT",
"timesecs": 1471322915
},
"connecting_to": {
"host": "10.0.2.4",
"port": 5201
},
"cookie": "ps-test1.1471322915.508871.24c661250",
"test_start": {
"protocol": "UDP",
"num_streams": 1,
"blksize": 8192,
"omit": 0,
"duration": 10,
"bytes": 0,
"blocks": 0,
"reverse": 0
}
},
"intervals": [{
"streams": [{
"socket": 4,
"start": 0,
"end": 1.001342,
"seconds": 1.001342,
"bytes": 131072,
"bits_per_second": 1047170.885411,
"packets": 16,
"omitted": false
}],
"sum": {
"start": 0,
"end": 1.001342,
"seconds": 1.001342,
"bytes": 131072,
"bits_per_second": 1047170.885411,
"packets": 16,
"omitted": false
}
}, {
"streams": [{
"socket": 4,
"start": 1.001342,
"end": 2.001610,
"seconds": 1.000268,
"bytes": 131072,
"bits_per_second": 1048295.075283,
"packets": 16,
"omitted": false
}],
"sum": {
"start": 1.001342,
"end": 2.001610,
"seconds": 1.000268,
"bytes": 131072,
"bits_per_second": 1048295.075283,
"packets": 16,
"omitted": false
}
}, {
"streams": [{
"socket": 4,
"start": 2.001610,
"end": 3.001298,
"seconds": 0.999688,
"bytes": 131072,
"bits_per_second": 1048903.102007,
"packets": 16,
"omitted": false
}],
"sum": {
"start": 2.001610,
"end": 3.001298,
"seconds": 0.999688,
"bytes": 131072,
"bits_per_second": 1048903.102007,
"packets": 16,
"omitted": false
}
}, {
"streams": [{
"socket": 4,
"start": 3.001298,
"end": 4.001750,
"seconds": 1.000452,
"bytes": 131072,
"bits_per_second": 1048102.214171,
"packets": 16,
"omitted": false
}],
"sum": {
"start": 3.001298,
"end": 4.001750,
"seconds": 1.000452,
"bytes": 131072,
"bits_per_second": 1048102.214171,
"packets": 16,
"omitted": false
}
}, {
"streams": [{
"socket": 4,
"start": 4.001750,
"end": 5.001297,
"seconds": 0.999547,
"bytes": 131072,
"bits_per_second": 1049051.215270,
"packets": 16,
"omitted": false
}],
"sum": {
"start": 4.001750,
"end": 5.001297,
"seconds": 0.999547,
"bytes": 131072,
"bits_per_second": 1049051.215270,
"packets": 16,
"omitted": false
}
}, {
"streams": [{
"socket": 4,
"start": 5.001297,
"end": 6.000906,
"seconds": 0.999609,
"bytes": 131072,
"bits_per_second": 1048986.160375,
"packets": 16,
"omitted": false
}],
"sum": {
"start": 5.001297,
"end": 6.000906,
"seconds": 0.999609,
"bytes": 131072,
"bits_per_second": 1048986.160375,
"packets": 16,
"omitted": false
}
}, {
"streams": [{
"socket": 4,
"start": 6.000906,
"end": 7.001737,
"seconds": 1.000831,
"bytes": 131072,
"bits_per_second": 1047705.473311,
"packets": 16,
"omitted": false
}],
"sum": {
"start": 6.000906,
"end": 7.001737,
"seconds": 1.000831,
"bytes": 131072,
"bits_per_second": 1047705.473311,
"packets": 16,
"omitted": false
}
}, {
"streams": [{
"socket": 4,
"start": 7.001737,
"end": 8.001043,
"seconds": 0.999306,
"bytes": 131072,
"bits_per_second": 1049304.255436,
"packets": 16,
"omitted": false
}],
"sum": {
"start": 7.001737,
"end": 8.001043,
"seconds": 0.999306,
"bytes": 131072,
"bits_per_second": 1049304.255436,
"packets": 16,
"omitted": false
}
}, {
"streams": [{
"socket": 4,
"start": 8.001043,
"end": 9.001251,
"seconds": 1.000208,
"bytes": 131072,
"bits_per_second": 1048357.795417,
"packets": 16,
"omitted": false
}],
"sum": {
"start": 8.001043,
"end": 9.001251,
"seconds": 1.000208,
"bytes": 131072,
"bits_per_second": 1048357.795417,
"packets": 16,
"omitted": false
}
}, {
"streams": [{
"socket": 4,
"start": 9.001251,
"end": 10.000900,
"seconds": 0.999649,
"bytes": 131072,
"bits_per_second": 1048944.129196,
"packets": 16,
"omitted": false
}],
"sum": {
"start": 9.001251,
"end": 10.000900,
"seconds": 0.999649,
"bytes": 131072,
"bits_per_second": 1048944.129196,
"packets": 16,
"omitted": false
}
}],
"end": {
"streams": [{
"udp": {
"socket": 4,
"start": 0,
"end": 10.000900,
"seconds": 10.000900,
"bytes": 1310720,
"bits_per_second": 1048481.633493,
"jitter_ms": 2735.461000,
"lost_packets": 0,
"packets": 159,
"lost_percent": 0,
"out_of_order": 0
}
}],
"sum": {
"start": 0,
"end": 10.000900,
"seconds": 10.000900,
"bytes": 1310720,
"bits_per_second": 1048481.633493,
"jitter_ms": 2735.461000,
"lost_packets": 0,
"packets": 159,
"lost_percent": 0
},
"cpu_utilization_percent": {
"host_total": 0.892589,
"host_user": 0.019825,
"host_system": 0.971782,
"remote_total": 0.135247,
"remote_user": 0,
"remote_system": 0.135226
}
}
}
"""
result = parse_output(test_output.split("\n"))
pprint.PrettyPrinter(indent=4).pprint(result)
test_output = """
{
"start": {
"connected": [{
"socket": 4,
"local_host": "150.254.208.65",
"local_port": 35026,
"remote_host": "140.182.44.177",
"remote_port": 5201
}],
"version": "iperf 3.1.6",
"system_info": "Linux ps-4-0 4.4.0-59-generic #80~14.04.1-Ubuntu SMP Fri Jan 6 18:02:02 UTC 2017 x86_64",
"timestamp": {
"time": "Mon, 13 Feb 2017 19:14:16 GMT",
"timesecs": 1487013256
},
"connecting_to": {
"host": "140.182.44.177",
"port": 5201
},
"cookie": "ps-4-0.1487013255.986166.651993251aa",
"tcp_mss_default": 1448,
"test_start": {
"protocol": "TCP",
"num_streams": 1,
"blksize": 131072,
"omit": 0,
"duration": 20,
"bytes": 0,
"blocks": 0,
"reverse": 0
}
},
"intervals": [{
"streams": [{
"socket": 4,
"start": 0,
"end": 1.000095,
"seconds": 1.000095,
"bytes": 2255984,
"bits_per_second": 18046155.287058,
"retransmits": 0,
"snd_cwnd": 350416,
"rtt": 134320,
"omitted": false
}],
"sum": {
"start": 0,
"end": 1.000095,
"seconds": 1.000095,
"bytes": 2255984,
"bits_per_second": 18046155.287058,
"retransmits": 0,
"omitted": false
}
}, {
"streams": [{
"socket": 4,
"start": 1.000095,
"end": 2.000278,
"seconds": 1.000183,
"bytes": 3598840,
"bits_per_second": 28785456.088557,
"retransmits": 137,
"snd_cwnd": 209960,
"rtt": 134534,
"omitted": false
}],
"sum": {
"start": 1.000095,
"end": 2.000278,
"seconds": 1.000183,
"bytes": 3598840,
"bits_per_second": 28785456.088557,
"retransmits": 137,
"omitted": false
}
}, {
"streams": [{
"socket": 4,
"start": 2.000278,
"end": 3.000251,
"seconds": 0.999973,
"bytes": 1310720,
"bits_per_second": 10486042.507611,
"retransmits": 0,
"snd_cwnd": 221544,
"rtt": 134808,
"omitted": false
}],
"sum": {
"start": 2.000278,
"end": 3.000251,
"seconds": 0.999973,
"bytes": 1310720,
"bits_per_second": 10486042.507611,
"retransmits": 0,
"omitted": false
}
}, {
"streams": [{
"socket": 4,
"start": 3.000251,
"end": 4.003264,
"seconds": 1.003013,
"bytes": 2621440,
"bits_per_second": 20908519.827961,
"retransmits": 0,
"snd_cwnd": 318560,
"rtt": 140105,
"omitted": false
}],
"sum": {
"start": 3.000251,
"end": 4.003264,
"seconds": 1.003013,
"bytes": 2621440,
"bits_per_second": 20908519.827961,
"retransmits": 0,
"omitted": false
}
}, {
"streams": [{
"socket": 4,
"start": 4.003264,
"end": 5.000230,
"seconds": 0.996966,
"bytes": 2621440,
"bits_per_second": 21035343.648278,
"retransmits": 0,
"snd_cwnd": 543000,
"rtt": 134169,
"omitted": false
}],
"sum": {
"start": 4.003264,
"end": 5.000230,
"seconds": 0.996966,
"bytes": 2621440,
"bits_per_second": 21035343.648278,
"retransmits": 0,
"omitted": false
}
}, {
"streams": [{
"socket": 4,
"start": 5.000230,
"end": 6.000112,
"seconds": 0.999882,
"bytes": 5242880,
"bits_per_second": 41947990.584254,
"retransmits": 0,
"snd_cwnd": 834048,
"rtt": 137125,
"omitted": false
}],
"sum": {
"start": 5.000230,
"end": 6.000112,
"seconds": 0.999882,
"bytes": 5242880,
"bits_per_second": 41947990.584254,
"retransmits": 0,
"omitted": false
}
}, {
"streams": [{
"socket": 4,
"start": 6.000112,
"end": 7.000081,
"seconds": 0.999969,
"bytes": 9175040,
"bits_per_second": 73402595.070514,
"retransmits": 0,
"snd_cwnd": 1274240,
"rtt": 134496,
"omitted": false
}],
"sum": {
"start": 6.000112,
"end": 7.000081,
"seconds": 0.999969,
"bytes": 9175040,
"bits_per_second": 73402595.070514,
"retransmits": 0,
"omitted": false
}
}, {
"streams": [{
"socket": 4,
"start": 7.000081,
"end": 8.000090,
"seconds": 1.000009,
"bytes": 13107200,
"bits_per_second": 104856650.008607,
"retransmits": 0,
"snd_cwnd": 1918600,
"rtt": 134377,
"omitted": false
}],
"sum": {
"start": 7.000081,
"end": 8.000090,
"seconds": 1.000009,
"bytes": 13107200,
"bits_per_second": 104856650.008607,
"retransmits": 0,
"omitted": false
}
}, {
"streams": [{
"socket": 4,
"start": 8.000090,
"end": 9.000100,
"seconds": 1.000010,
"bytes": 19660800,
"bits_per_second": 157284825.015771,
"retransmits": 0,
"snd_cwnd": 2735272,
"rtt": 134310,
"omitted": false
}],
"sum": {
"start": 8.000090,
"end": 9.000100,
"seconds": 1.000010,
"bytes": 19660800,
"bits_per_second": 157284825.015771,
"retransmits": 0,
"omitted": false
}
}, {
"streams": [{
"socket": 4,
"start": 9.000100,
"end": 10.000086,
"seconds": 0.999986,
"bytes": 27525120,
"bits_per_second": 220204057.543572,
"retransmits": 105,
"snd_cwnd": 3095824,
"rtt": 137908,
"omitted": false
}],
"sum": {
"start": 9.000100,
"end": 10.000086,
"seconds": 0.999986,
"bytes": 27525120,
"bits_per_second": 220204057.543572,
"retransmits": 105,
"omitted": false
}
}, {
"streams": [{
"socket": 4,
"start": 10.000086,
"end": 11.000096,
"seconds": 1.000010,
"bytes": 14417920,
"bits_per_second": 115342205.011566,
"retransmits": 285,
"snd_cwnd": 1849096,
"rtt": 134890,
"omitted": false
}],
"sum": {
"start": 10.000086,
"end": 11.000096,
"seconds": 1.000010,
"bytes": 14417920,
"bits_per_second": 115342205.011566,
"retransmits": 285,
"omitted": false
}
}, {
"streams": [{
"socket": 4,
"start": 11.000096,
"end": 12.000080,
"seconds": 0.999984,
"bytes": 14417920,
"bits_per_second": 115345202.529433,
"retransmits": 0,
"snd_cwnd": 1901224,
"rtt": 134602,
"omitted": false
}],
"sum": {
"start": 11.000096,
"end": 12.000080,
"seconds": 0.999984,
"bytes": 14417920,
"bits_per_second": 115345202.529433,
"retransmits": 0,
"omitted": false
}
}, {
"streams": [{
"socket": 4,
"start": 12.000080,
"end": 13.000086,
"seconds": 1.000006,
"bytes": 14417920,
"bits_per_second": 115342672.504098,
"retransmits": 0,
"snd_cwnd": 2106840,
"rtt": 137683,
"omitted": false
}],
"sum": {
"start": 12.000080,
"end": 13.000086,
"seconds": 1.000006,
"bytes": 14417920,
"bits_per_second": 115342672.504098,
"retransmits": 0,
"omitted": false
}
}, {
"streams": [{
"socket": 4,
"start": 13.000086,
"end": 14.000088,
"seconds": 1.000002,
"bytes": 15728640,
"bits_per_second": 125828880.000458,
"retransmits": 0,
"snd_cwnd": 2457256,
"rtt": 134995,
"omitted": false
}],
"sum": {
"start": 13.000086,
"end": 14.000088,
"seconds": 1.000002,
"bytes": 15728640,
"bits_per_second": 125828880.000458,
"retransmits": 0,
"omitted": false
}
}, {
"streams": [{
"socket": 4,
"start": 14.000088,
"end": 15.000085,
"seconds": 0.999997,
"bytes": 19660800,
"bits_per_second": 157286850.001287,
"retransmits": 0,
"snd_cwnd": 2930752,
"rtt": 134586,
"omitted": false
}],
"sum": {
"start": 14.000088,
"end": 15.000085,
"seconds": 0.999997,
"bytes": 19660800,
"bits_per_second": 157286850.001287,
"retransmits": 0,
"omitted": false
}
}, {
"streams": [{
"socket": 4,
"start": 15.000085,
"end": 16.000087,
"seconds": 1.000002,
"bytes": 19660800,
"bits_per_second": 157286100.000572,
"retransmits": 102,
"snd_cwnd": 1637688,
"rtt": 134678,
"omitted": false
}],
"sum": {
"start": 15.000085,
"end": 16.000087,
"seconds": 1.000002,
"bytes": 19660800,
"bits_per_second": 157286100.000572,
"retransmits": 102,
"omitted": false
}
}, {
"streams": [{
"socket": 4,
"start": 16.000087,
"end": 17.000085,
"seconds": 0.999998,
"bytes": 11796480,
"bits_per_second": 94372020.000343,
"retransmits": 0,
"snd_cwnd": 1647824,
"rtt": 134401,
"omitted": false
}],
"sum": {
"start": 16.000087,
"end": 17.000085,
"seconds": 0.999998,
"bytes": 11796480,
"bits_per_second": 94372020.000343,
"retransmits": 0,
"omitted": false
}
}, {
"streams": [{
"socket": 4,
"start": 17.000085,
"end": 18.000109,
"seconds": 1.000024,
"bytes": 11796480,
"bits_per_second": 94369567.554721,
"retransmits": 0,
"snd_cwnd": 1765112,
"rtt": 134668,
"omitted": false
}],
"sum": {
"start": 17.000085,
"end": 18.000109,
"seconds": 1.000024,
"bytes": 11796480,
"bits_per_second": 94369567.554721,
"retransmits": 0,
"omitted": false
}
}, {
"streams": [{
"socket": 4,
"start": 18.000109,
"end": 19.000080,
"seconds": 0.999971,
"bytes": 14417920,
"bits_per_second": 115346715.097590,
"retransmits": 0,
"snd_cwnd": 2031544,
"rtt": 135142,
"omitted": false
}],
"sum": {
"start": 18.000109,
"end": 19.000080,
"seconds": 0.999971,
"bytes": 14417920,
"bits_per_second": 115346715.097590,
"retransmits": 0,
"omitted": false
}
}, {
"streams": [{
"socket": 4,
"start": 19.000080,
"end": 20.000215,
"seconds": 1.000135,
"bytes": 11796480,
"bits_per_second": 94359106.718292,
"retransmits": 57,
"snd_cwnd": 1122200,
"rtt": 135457,
"omitted": false
}],
"sum": {
"start": 19.000080,
"end": 20.000215,
"seconds": 1.000135,
"bytes": 11796480,
"bits_per_second": 94359106.718292,
"retransmits": 57,
"omitted": false
}
}],
"end": {
"streams": [{
"sender": {
"socket": 4,
"start": 0,
"end": 20.000215,
"seconds": 20.000215,
"bytes": 235230824,
"bits_per_second": 94091317.866364,
"retransmits": 686,
"max_snd_cwnd": 3095824,
"max_rtt": 140105,
"min_rtt": 134169,
"mean_rtt": 135362
},
"receiver": {
"socket": 4,
"start": 0,
"end": 20.000215,
"seconds": 20.000215,
"bytes": 221415128,
"bits_per_second": 88565098.888017
}
}],
"sum_sent": {
"start": 0,
"end": 20.000215,
"seconds": 20.000215,
"bytes": 235230824,
"bits_per_second": 94091317.866364,
"retransmits": 686
},
"sum_received": {
"start": 0,
"end": 20.000215,
"seconds": 20.000215,
"bytes": 221415128,
"bits_per_second": 88565098.888017
},
"cpu_utilization_percent": {
"host_total": 0.957782,
"host_user": 0.038625,
"host_system": 0.907690,
"remote_total": 5.297520,
"remote_user": 0,
"remote_system": 5.307290
},
"sender_tcp_congestion": "htcp"
}
}
"""
result = parse_output(test_output.split("\n"))
pprint.PrettyPrinter(indent=4).pprint(result)
test_output = """
{
"start" : {
"connected": [{
"socket": 4,
"local_host": "150.254.208.65",
"local_port": 40574,
"remote_host": "140.182.44.177",
"remote_port": 5201
}],
"version": "iperf 3.1.6",
"system_info": "Linux ps-4-0 4.4.0-59-generic #80~14.04.1-Ubuntu SMP Fri Jan 6 18:02:02 UTC 2017 x86_64",
"timestamp": {
"time": "Mon, 13 Feb 2017 19:29:01 GMT",
"timesecs": 1487014141
},
"connecting_to": {
"host": "140.182.44.177",
"port": 5201
},
"cookie": "ps-4-0.1487014141.570141.2f10aa7723f",
"tcp_mss_default": 1448,
"test_start": {
"protocol": "TCP",
"num_streams": 1,
"blksize": 131072,
"omit": 0,
"duration": 20,
"bytes": 0,
"blocks": 0,
"reverse": 0
}
},
"intervals": [{
"streams": [{
"socket": 4,
"start": 0,
"end": 1.000098,
"seconds": 1.000098,
"bytes": 2114080,
"bits_per_second": 16910982.892177,
"retransmits": 0,
"snd_cwnd": 304080,
"rtt": 134421,
"omitted": false
}],
"sum": {
"start": 0,
"end": 1.000098,
"seconds": 1.000098,
"bytes": 2114080,
"bits_per_second": 16910982.892177,
"retransmits": 0,
"omitted": false
}
}, {
"streams": [{
"socket": 4,
"start": 1.000098,
"end": 2.000288,
"seconds": 1.000190,
"bytes": 2150280,
"bits_per_second": 17198971.858117,
"retransmits": 35,
"snd_cwnd": 130320,
"rtt": 133798,
"omitted": false
}],
"sum": {
"start": 1.000098,
"end": 2.000288,
"seconds": 1.000190,
"bytes": 2150280,
"bits_per_second": 17198971.858117,
"retransmits": 35,
"omitted": false
}
}, {
"streams": [{
"socket": 4,
"start": 2.000288,
"end": 3.000341,
"seconds": 1.000053,
"bytes": 1042560,
"bits_per_second": 8340038.570728,
"retransmits": 0,
"snd_cwnd": 144800,
"rtt": 133840,
"omitted": false
}],
"sum": {
"start": 2.000288,
"end": 3.000341,
"seconds": 1.000053,
"bytes": 1042560,
"bits_per_second": 8340038.570728,
"retransmits": 0,
"omitted": false
}
}, {
"streams": [{
"socket": 4,
"start": 3.000341,
"end": 4.000411,
"seconds": 1.000070,
"bytes": 1042560,
"bits_per_second": 8339897.402759,
"retransmits": 0,
"snd_cwnd": 215752,
"rtt": 138897,
"omitted": false
}],
"sum": {
"start": 3.000341,
"end": 4.000411,
"seconds": 1.000070,
"bytes": 1042560,
"bits_per_second": 8339897.402759,
"retransmits": 0,
"omitted": false
}
}, {
"streams": [{
"socket": 4,
"start": 4.000411,
"end": 5.000199,
"seconds": 0.999788,
"bytes": 2085120,
"bits_per_second": 16684496.347688,
"retransmits": 0,
"snd_cwnd": 354760,
"rtt": 133983,
"omitted": false
}],
"sum": {
"start": 4.000411,
"end": 5.000199,
"seconds": 0.999788,
"bytes": 2085120,
"bits_per_second": 16684496.347688,
"retransmits": 0,
"omitted": false
}
}, {
"streams": [{
"socket": 4,
"start": 5.000199,
"end": 6.000137,
"seconds": 0.999938,
"bytes": 3258000,
"bits_per_second": 26065615.777040,
"retransmits": 0,
"snd_cwnd": 563272,
"rtt": 133985,
"omitted": false
}],
"sum": {
"start": 5.000199,
"end": 6.000137,
"seconds": 0.999938,
"bytes": 3258000,
"bits_per_second": 26065615.777040,
"retransmits": 0,
"omitted": false
}
}, {
"streams": [{
"socket": 4,
"start": 6.000137,
"end": 7.000188,
"seconds": 1.000051,
"bytes": 5170200,
"bits_per_second": 41359489.773652,
"retransmits": 32,
"snd_cwnd": 380824,
"rtt": 134023,
"omitted": false
}],
"sum": {
"start": 6.000137,
"end": 7.000188,
"seconds": 1.000051,
"bytes": 5170200,
"bits_per_second": 41359489.773652,
"retransmits": 32,
"omitted": false
}
}, {
"streams": [{
"socket": 4,
"start": 7.000188,
"end": 8.000114,
"seconds": 0.999926,
"bytes": 2621440,
"bits_per_second": 20973070.114569,
"retransmits": 0,
"snd_cwnd": 390960,
"rtt": 134016,
"omitted": false
}],
"sum": {
"start": 7.000188,
"end": 8.000114,
"seconds": 0.999926,
"bytes": 2621440,
"bits_per_second": 20973070.114569,
"retransmits": 0,
"omitted": false
}
}, {
"streams": [{
"socket": 4,
"start": 8.000114,
"end": 9.000442,
"seconds": 1.000328,
"bytes": 2621440,
"bits_per_second": 20964647.253062,
"retransmits": 0,
"snd_cwnd": 480736,
"rtt": 133858,
"omitted": false
}],
"sum": {
"start": 8.000114,
"end": 9.000442,
"seconds": 1.000328,
"bytes": 2621440,
"bits_per_second": 20964647.253062,
"retransmits": 0,
"omitted": false
}
}, {
"streams": [{
"socket": 4,
"start": 9.000442,
"end": 10.000102,
"seconds": 0.999660,
"bytes": 5242880,
"bits_per_second": 41957304.849833,
"retransmits": 0,
"snd_cwnd": 695040,
"rtt": 134062,
"omitted": false
}],
"sum": {
"start": 9.000442,
"end": 10.000102,
"seconds": 0.999660,
"bytes": 5242880,
"bits_per_second": 41957304.849833,
"retransmits": 0,
"omitted": false
}
}, {
"streams": [{
"socket": 4,
"start": 10.000102,
"end": 11.000162,
"seconds": 1.000060,
"bytes": 6553600,
"bits_per_second": 52425650.189245,
"retransmits": 0,
"snd_cwnd": 986088,
"rtt": 134923,
"omitted": false
}],
"sum": {
"start": 10.000102,
"end": 11.000162,
"seconds": 1.000060,
"bytes": 6553600,
"bits_per_second": 52425650.189245,
"retransmits": 0,
"omitted": false
}
}, {
"streams": [{
"socket": 4,
"start": 11.000162,
"end": 12.000334,
"seconds": 1.000172,
"bytes": 5242880,
"bits_per_second": 41935821.242624,
"retransmits": 199,
"snd_cwnd": 279464,
"rtt": 134194,
"omitted": false
}],
"sum": {
"start": 11.000162,
"end": 12.000334,
"seconds": 1.000172,
"bytes": 5242880,
"bits_per_second": 41935821.242624,
"retransmits": 199,
"omitted": false
}
}, {
"streams": [{
"socket": 4,
"start": 12.000334,
"end": 13.000138,
"seconds": 0.999804,
"bytes": 2621440,
"bits_per_second": 20975635.807598,
"retransmits": 0,
"snd_cwnd": 293944,
"rtt": 133866,
"omitted": false
}],
"sum": {
"start": 12.000334,
"end": 13.000138,
"seconds": 0.999804,
"bytes": 2621440,
"bits_per_second": 20975635.807598,
"retransmits": 0,
"omitted": false
}
}, {
"streams": [{
"socket": 4,
"start": 13.000138,
"end": 14.000082,
"seconds": 0.999944,
"bytes": 2621440,
"bits_per_second": 20972690.065278,
"retransmits": 0,
"snd_cwnd": 402544,
"rtt": 134400,
"omitted": false
}],
"sum": {
"start": 13.000138,
"end": 14.000082,
"seconds": 0.999944,
"bytes": 2621440,
"bits_per_second": 20972690.065278,
"retransmits": 0,
"omitted": false
}
}, {
"streams": [{
"socket": 4,
"start": 14.000082,
"end": 15.000251,
"seconds": 1.000169,
"bytes": 2621440,
"bits_per_second": 20967980.597452,
"retransmits": 0,
"snd_cwnd": 577752,
"rtt": 137868,
"omitted": false
}],
"sum": {
"start": 14.000082,
"end": 15.000251,
"seconds": 1.000169,
"bytes": 2621440,
"bits_per_second": 20967980.597452,
"retransmits": 0,
"omitted": false
}
}, {
"streams": [{
"socket": 4,
"start": 15.000251,
"end": 16.000279,
"seconds": 1.000028,
"bytes": 2621440,
"bits_per_second": 20970930.016598,
"retransmits": 15,
"snd_cwnd": 308424,
"rtt": 134372,
"omitted": false
}],
"sum": {
"start": 15.000251,
"end": 16.000279,
"seconds": 1.000028,
"bytes": 2621440,
"bits_per_second": 20970930.016598,
"retransmits": 15,
"omitted": false
}
}, {
"streams": [{
"socket": 4,
"start": 16.000279,
"end": 17.000356,
"seconds": 1.000077,
"bytes": 2621440,
"bits_per_second": 20969905.124360,
"retransmits": 0,
"snd_cwnd": 360552,
"rtt": 138924,
"omitted": false
}],
"sum": {
"start": 16.000279,
"end": 17.000356,
"seconds": 1.000077,
"bytes": 2621440,
"bits_per_second": 20969905.124360,
"retransmits": 0,
"omitted": false
}
}, {
"streams": [{
"socket": 4,
"start": 17.000356,
"end": 18.000418,
"seconds": 1.000062,
"bytes": 2621440,
"bits_per_second": 20970220.080580,
"retransmits": 0,
"snd_cwnd": 524176,
"rtt": 138088,
"omitted": false
}],
"sum": {
"start": 17.000356,
"end": 18.000418,
"seconds": 1.000062,
"bytes": 2621440,
"bits_per_second": 20970220.080580,
"retransmits": 0,
"omitted": false
}
}, {
"streams": [{
"socket": 4,
"start": 18.000418,
"end": 19.000208,
"seconds": 0.999790,
"bytes": 5242880,
"bits_per_second": 41951851.850901,
"retransmits": 0,
"snd_cwnd": 836944,
"rtt": 133874,
"omitted": false
}],
"sum": {
"start": 18.000418,
"end": 19.000208,
"seconds": 0.999790,
"bytes": 5242880,
"bits_per_second": 41951851.850901,
"retransmits": 0,
"omitted": false
}
}, {
"streams": [{
"socket": 4,
"start": 19.000208,
"end": 20.000146,
"seconds": 0.999938,
"bytes": 7864320,
"bits_per_second": 62918460.241771,
"retransmits": 0,
"snd_cwnd": 1206184,
"rtt": 135744,
"omitted": false
}],
"sum": {
"start": 19.000208,
"end": 20.000146,
"seconds": 0.999938,
"bytes": 7864320,
"bits_per_second": 62918460.241771,
"retransmits": 0,
"omitted": false
}
}],
"end": {
"streams": [{
"sender": {
"socket": 4,
"start": 0,
"end": 20.000146,
"seconds": 20.000146,
"bytes": 67980880,
"bits_per_second": 27192153.616692,
"retransmits": 281,
"max_snd_cwnd": 1206184,
"max_rtt": 138924,
"min_rtt": 133798,
"mean_rtt": 135056
},
"receiver": {
"socket": 4,
"start": 0,
"end": 20.000146,
"seconds": 20.000146,
"bytes": 63723584,
"bits_per_second": 25489247.640428
}
}],
"sum_sent": {
"start": 0,
"end": 20.000146,
"seconds": 20.000146,
"bytes": 67980880,
"bits_per_second": 27192153.616692,
"retransmits": 281
},
"sum_received": {
"start": 0,
"end": 20.000146,
"seconds": 20.000146,
"bytes": 63723584,
"bits_per_second": 25489247.640428
},
"cpu_utilization_percent": {
"host_total": 0.863500,
"host_user": 0.115875,
"host_system": 0.753187,
"remote_total": 1.768670,
"remote_user": 0.083237,
"remote_system": 1.713290
},
"sender_tcp_congestion": "htcp"
}
}
"""
result = parse_output(test_output.split("\n"))
pprint.PrettyPrinter(indent=4).pprint(result)
| 36.061617
| 122
| 0.32165
| 4,226
| 63,793
| 4.735447
| 0.160435
| 0.047222
| 0.087697
| 0.056966
| 0.827254
| 0.784829
| 0.772636
| 0.756446
| 0.730312
| 0.696532
| 0
| 0.262846
| 0.565893
| 63,793
| 1,768
| 123
| 36.082014
| 0.459791
| 0.008606
| 0
| 0.80758
| 0
| 0.002332
| 0.955749
| 0.005662
| 0
| 0
| 0
| 0
| 0
| 1
| 0.001166
| false
| 0
| 0.002332
| 0
| 0.006997
| 0.002915
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 11
|
026bd6425a9781d510891eb67dbd33f576ab28a3
| 507
|
py
|
Python
|
2020-04-13/test_obj.py
|
feieryouyiji/learningpy
|
e110ec72ec3e4246b5195028776cdbaa22d25678
|
[
"MIT"
] | null | null | null |
2020-04-13/test_obj.py
|
feieryouyiji/learningpy
|
e110ec72ec3e4246b5195028776cdbaa22d25678
|
[
"MIT"
] | null | null | null |
2020-04-13/test_obj.py
|
feieryouyiji/learningpy
|
e110ec72ec3e4246b5195028776cdbaa22d25678
|
[
"MIT"
] | null | null | null |
# class Student(object):
# def __init__(self, name, score):
# self.name = name
# self.score = score
# def print_score(self):
# print('%s: %s' % (self.name, self.score))
class Student(object):
def __init__(self, name, score):
self.__name = name
self.__score = score
def print_score(self):
print('%s: %s' % (self.__name, self.__score))
fyp = Student('fyp', 59)
print("fyp===>", fyp)
print("fyp===>", fyp.name)
print("fyp===>", fyp.score)
| 21.125
| 53
| 0.56213
| 65
| 507
| 4.107692
| 0.184615
| 0.179775
| 0.194757
| 0.157303
| 0.786517
| 0.786517
| 0.786517
| 0.786517
| 0.786517
| 0.786517
| 0
| 0.005236
| 0.246548
| 507
| 23
| 54
| 22.043478
| 0.693717
| 0.370809
| 0
| 0
| 0
| 0
| 0.096154
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0
| 0
| 0.3
| 0.5
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 7
|
5a2c175a5b2e6eb67909e91d420d3ee8763cbd8c
| 31,117
|
py
|
Python
|
packages/fetchai/protocols/state_update/state_update_pb2.py
|
marcofavorito/agents-aea
|
e520f2f5d076a193514e194d94aa76c6423ac5bc
|
[
"Apache-2.0"
] | 13
|
2020-11-13T11:29:46.000Z
|
2021-11-29T18:29:41.000Z
|
packages/fetchai/protocols/state_update/state_update_pb2.py
|
marcofavorito/agents-aea
|
e520f2f5d076a193514e194d94aa76c6423ac5bc
|
[
"Apache-2.0"
] | 7
|
2020-11-13T10:37:16.000Z
|
2020-12-21T08:08:12.000Z
|
packages/fetchai/protocols/state_update/state_update_pb2.py
|
marcofavorito/agents-aea
|
e520f2f5d076a193514e194d94aa76c6423ac5bc
|
[
"Apache-2.0"
] | 4
|
2020-11-27T20:50:09.000Z
|
2021-11-30T16:36:29.000Z
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: state_update.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name="state_update.proto",
package="aea.fetchai.state_update",
syntax="proto3",
serialized_options=None,
serialized_pb=b'\n\x12state_update.proto\x12\x18\x61\x65\x61.fetchai.state_update"\xd2\x0b\n\x12StateUpdateMessage\x12P\n\x05\x61pply\x18\x05 \x01(\x0b\x32?.aea.fetchai.state_update.StateUpdateMessage.Apply_PerformativeH\x00\x12L\n\x03\x65nd\x18\x06 \x01(\x0b\x32=.aea.fetchai.state_update.StateUpdateMessage.End_PerformativeH\x00\x12Z\n\ninitialize\x18\x07 \x01(\x0b\x32\x44.aea.fetchai.state_update.StateUpdateMessage.Initialize_PerformativeH\x00\x1a\x9e\x06\n\x17Initialize_Performative\x12\x8c\x01\n\x1e\x65xchange_params_by_currency_id\x18\x01 \x03(\x0b\x32\x64.aea.fetchai.state_update.StateUpdateMessage.Initialize_Performative.ExchangeParamsByCurrencyIdEntry\x12\x82\x01\n\x19utility_params_by_good_id\x18\x02 \x03(\x0b\x32_.aea.fetchai.state_update.StateUpdateMessage.Initialize_Performative.UtilityParamsByGoodIdEntry\x12{\n\x15\x61mount_by_currency_id\x18\x03 \x03(\x0b\x32\\.aea.fetchai.state_update.StateUpdateMessage.Initialize_Performative.AmountByCurrencyIdEntry\x12{\n\x15quantities_by_good_id\x18\x04 \x03(\x0b\x32\\.aea.fetchai.state_update.StateUpdateMessage.Initialize_Performative.QuantitiesByGoodIdEntry\x1a\x41\n\x1f\x45xchangeParamsByCurrencyIdEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x02:\x02\x38\x01\x1a<\n\x1aUtilityParamsByGoodIdEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x02:\x02\x38\x01\x1a\x39\n\x17\x41mountByCurrencyIdEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x1a\x39\n\x17QuantitiesByGoodIdEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x1a\xfa\x02\n\x12\x41pply_Performative\x12v\n\x15\x61mount_by_currency_id\x18\x01 \x03(\x0b\x32W.aea.fetchai.state_update.StateUpdateMessage.Apply_Performative.AmountByCurrencyIdEntry\x12v\n\x15quantities_by_good_id\x18\x02 \x03(\x0b\x32W.aea.fetchai.state_update.StateUpdateMessage.Apply_Performative.QuantitiesByGoodIdEntry\x1a\x39\n\x17\x41mountByCurrencyIdEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x1a\x39\n\x17QuantitiesByGoodIdEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x1a\x12\n\x10\x45nd_PerformativeB\x0e\n\x0cperformativeb\x06proto3',
)
_STATEUPDATEMESSAGE_INITIALIZE_PERFORMATIVE_EXCHANGEPARAMSBYCURRENCYIDENTRY = _descriptor.Descriptor(
name="ExchangeParamsByCurrencyIdEntry",
full_name="aea.fetchai.state_update.StateUpdateMessage.Initialize_Performative.ExchangeParamsByCurrencyIdEntry",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="key",
full_name="aea.fetchai.state_update.StateUpdateMessage.Initialize_Performative.ExchangeParamsByCurrencyIdEntry.key",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="value",
full_name="aea.fetchai.state_update.StateUpdateMessage.Initialize_Performative.ExchangeParamsByCurrencyIdEntry.value",
index=1,
number=2,
type=2,
cpp_type=6,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=b"8\001",
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=877,
serialized_end=942,
)
_STATEUPDATEMESSAGE_INITIALIZE_PERFORMATIVE_UTILITYPARAMSBYGOODIDENTRY = _descriptor.Descriptor(
name="UtilityParamsByGoodIdEntry",
full_name="aea.fetchai.state_update.StateUpdateMessage.Initialize_Performative.UtilityParamsByGoodIdEntry",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="key",
full_name="aea.fetchai.state_update.StateUpdateMessage.Initialize_Performative.UtilityParamsByGoodIdEntry.key",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="value",
full_name="aea.fetchai.state_update.StateUpdateMessage.Initialize_Performative.UtilityParamsByGoodIdEntry.value",
index=1,
number=2,
type=2,
cpp_type=6,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=b"8\001",
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=944,
serialized_end=1004,
)
_STATEUPDATEMESSAGE_INITIALIZE_PERFORMATIVE_AMOUNTBYCURRENCYIDENTRY = _descriptor.Descriptor(
name="AmountByCurrencyIdEntry",
full_name="aea.fetchai.state_update.StateUpdateMessage.Initialize_Performative.AmountByCurrencyIdEntry",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="key",
full_name="aea.fetchai.state_update.StateUpdateMessage.Initialize_Performative.AmountByCurrencyIdEntry.key",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="value",
full_name="aea.fetchai.state_update.StateUpdateMessage.Initialize_Performative.AmountByCurrencyIdEntry.value",
index=1,
number=2,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=b"8\001",
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1006,
serialized_end=1063,
)
_STATEUPDATEMESSAGE_INITIALIZE_PERFORMATIVE_QUANTITIESBYGOODIDENTRY = _descriptor.Descriptor(
name="QuantitiesByGoodIdEntry",
full_name="aea.fetchai.state_update.StateUpdateMessage.Initialize_Performative.QuantitiesByGoodIdEntry",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="key",
full_name="aea.fetchai.state_update.StateUpdateMessage.Initialize_Performative.QuantitiesByGoodIdEntry.key",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="value",
full_name="aea.fetchai.state_update.StateUpdateMessage.Initialize_Performative.QuantitiesByGoodIdEntry.value",
index=1,
number=2,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=b"8\001",
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1065,
serialized_end=1122,
)
_STATEUPDATEMESSAGE_INITIALIZE_PERFORMATIVE = _descriptor.Descriptor(
name="Initialize_Performative",
full_name="aea.fetchai.state_update.StateUpdateMessage.Initialize_Performative",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="exchange_params_by_currency_id",
full_name="aea.fetchai.state_update.StateUpdateMessage.Initialize_Performative.exchange_params_by_currency_id",
index=0,
number=1,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="utility_params_by_good_id",
full_name="aea.fetchai.state_update.StateUpdateMessage.Initialize_Performative.utility_params_by_good_id",
index=1,
number=2,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="amount_by_currency_id",
full_name="aea.fetchai.state_update.StateUpdateMessage.Initialize_Performative.amount_by_currency_id",
index=2,
number=3,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="quantities_by_good_id",
full_name="aea.fetchai.state_update.StateUpdateMessage.Initialize_Performative.quantities_by_good_id",
index=3,
number=4,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[
_STATEUPDATEMESSAGE_INITIALIZE_PERFORMATIVE_EXCHANGEPARAMSBYCURRENCYIDENTRY,
_STATEUPDATEMESSAGE_INITIALIZE_PERFORMATIVE_UTILITYPARAMSBYGOODIDENTRY,
_STATEUPDATEMESSAGE_INITIALIZE_PERFORMATIVE_AMOUNTBYCURRENCYIDENTRY,
_STATEUPDATEMESSAGE_INITIALIZE_PERFORMATIVE_QUANTITIESBYGOODIDENTRY,
],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=324,
serialized_end=1122,
)
_STATEUPDATEMESSAGE_APPLY_PERFORMATIVE_AMOUNTBYCURRENCYIDENTRY = _descriptor.Descriptor(
name="AmountByCurrencyIdEntry",
full_name="aea.fetchai.state_update.StateUpdateMessage.Apply_Performative.AmountByCurrencyIdEntry",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="key",
full_name="aea.fetchai.state_update.StateUpdateMessage.Apply_Performative.AmountByCurrencyIdEntry.key",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="value",
full_name="aea.fetchai.state_update.StateUpdateMessage.Apply_Performative.AmountByCurrencyIdEntry.value",
index=1,
number=2,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=b"8\001",
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1006,
serialized_end=1063,
)
_STATEUPDATEMESSAGE_APPLY_PERFORMATIVE_QUANTITIESBYGOODIDENTRY = _descriptor.Descriptor(
name="QuantitiesByGoodIdEntry",
full_name="aea.fetchai.state_update.StateUpdateMessage.Apply_Performative.QuantitiesByGoodIdEntry",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="key",
full_name="aea.fetchai.state_update.StateUpdateMessage.Apply_Performative.QuantitiesByGoodIdEntry.key",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="value",
full_name="aea.fetchai.state_update.StateUpdateMessage.Apply_Performative.QuantitiesByGoodIdEntry.value",
index=1,
number=2,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=b"8\001",
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1065,
serialized_end=1122,
)
_STATEUPDATEMESSAGE_APPLY_PERFORMATIVE = _descriptor.Descriptor(
name="Apply_Performative",
full_name="aea.fetchai.state_update.StateUpdateMessage.Apply_Performative",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="amount_by_currency_id",
full_name="aea.fetchai.state_update.StateUpdateMessage.Apply_Performative.amount_by_currency_id",
index=0,
number=1,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="quantities_by_good_id",
full_name="aea.fetchai.state_update.StateUpdateMessage.Apply_Performative.quantities_by_good_id",
index=1,
number=2,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[
_STATEUPDATEMESSAGE_APPLY_PERFORMATIVE_AMOUNTBYCURRENCYIDENTRY,
_STATEUPDATEMESSAGE_APPLY_PERFORMATIVE_QUANTITIESBYGOODIDENTRY,
],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1125,
serialized_end=1503,
)
_STATEUPDATEMESSAGE_END_PERFORMATIVE = _descriptor.Descriptor(
name="End_Performative",
full_name="aea.fetchai.state_update.StateUpdateMessage.End_Performative",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1505,
serialized_end=1523,
)
_STATEUPDATEMESSAGE = _descriptor.Descriptor(
name="StateUpdateMessage",
full_name="aea.fetchai.state_update.StateUpdateMessage",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="apply",
full_name="aea.fetchai.state_update.StateUpdateMessage.apply",
index=0,
number=5,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="end",
full_name="aea.fetchai.state_update.StateUpdateMessage.end",
index=1,
number=6,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="initialize",
full_name="aea.fetchai.state_update.StateUpdateMessage.initialize",
index=2,
number=7,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[
_STATEUPDATEMESSAGE_INITIALIZE_PERFORMATIVE,
_STATEUPDATEMESSAGE_APPLY_PERFORMATIVE,
_STATEUPDATEMESSAGE_END_PERFORMATIVE,
],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name="performative",
full_name="aea.fetchai.state_update.StateUpdateMessage.performative",
index=0,
containing_type=None,
fields=[],
),
],
serialized_start=49,
serialized_end=1539,
)
_STATEUPDATEMESSAGE_INITIALIZE_PERFORMATIVE_EXCHANGEPARAMSBYCURRENCYIDENTRY.containing_type = (
_STATEUPDATEMESSAGE_INITIALIZE_PERFORMATIVE
)
_STATEUPDATEMESSAGE_INITIALIZE_PERFORMATIVE_UTILITYPARAMSBYGOODIDENTRY.containing_type = (
_STATEUPDATEMESSAGE_INITIALIZE_PERFORMATIVE
)
_STATEUPDATEMESSAGE_INITIALIZE_PERFORMATIVE_AMOUNTBYCURRENCYIDENTRY.containing_type = (
_STATEUPDATEMESSAGE_INITIALIZE_PERFORMATIVE
)
_STATEUPDATEMESSAGE_INITIALIZE_PERFORMATIVE_QUANTITIESBYGOODIDENTRY.containing_type = (
_STATEUPDATEMESSAGE_INITIALIZE_PERFORMATIVE
)
_STATEUPDATEMESSAGE_INITIALIZE_PERFORMATIVE.fields_by_name[
"exchange_params_by_currency_id"
].message_type = (
_STATEUPDATEMESSAGE_INITIALIZE_PERFORMATIVE_EXCHANGEPARAMSBYCURRENCYIDENTRY
)
_STATEUPDATEMESSAGE_INITIALIZE_PERFORMATIVE.fields_by_name[
"utility_params_by_good_id"
].message_type = _STATEUPDATEMESSAGE_INITIALIZE_PERFORMATIVE_UTILITYPARAMSBYGOODIDENTRY
_STATEUPDATEMESSAGE_INITIALIZE_PERFORMATIVE.fields_by_name[
"amount_by_currency_id"
].message_type = _STATEUPDATEMESSAGE_INITIALIZE_PERFORMATIVE_AMOUNTBYCURRENCYIDENTRY
_STATEUPDATEMESSAGE_INITIALIZE_PERFORMATIVE.fields_by_name[
"quantities_by_good_id"
].message_type = _STATEUPDATEMESSAGE_INITIALIZE_PERFORMATIVE_QUANTITIESBYGOODIDENTRY
_STATEUPDATEMESSAGE_INITIALIZE_PERFORMATIVE.containing_type = _STATEUPDATEMESSAGE
_STATEUPDATEMESSAGE_APPLY_PERFORMATIVE_AMOUNTBYCURRENCYIDENTRY.containing_type = (
_STATEUPDATEMESSAGE_APPLY_PERFORMATIVE
)
_STATEUPDATEMESSAGE_APPLY_PERFORMATIVE_QUANTITIESBYGOODIDENTRY.containing_type = (
_STATEUPDATEMESSAGE_APPLY_PERFORMATIVE
)
_STATEUPDATEMESSAGE_APPLY_PERFORMATIVE.fields_by_name[
"amount_by_currency_id"
].message_type = _STATEUPDATEMESSAGE_APPLY_PERFORMATIVE_AMOUNTBYCURRENCYIDENTRY
_STATEUPDATEMESSAGE_APPLY_PERFORMATIVE.fields_by_name[
"quantities_by_good_id"
].message_type = _STATEUPDATEMESSAGE_APPLY_PERFORMATIVE_QUANTITIESBYGOODIDENTRY
_STATEUPDATEMESSAGE_APPLY_PERFORMATIVE.containing_type = _STATEUPDATEMESSAGE
_STATEUPDATEMESSAGE_END_PERFORMATIVE.containing_type = _STATEUPDATEMESSAGE
_STATEUPDATEMESSAGE.fields_by_name[
"apply"
].message_type = _STATEUPDATEMESSAGE_APPLY_PERFORMATIVE
_STATEUPDATEMESSAGE.fields_by_name[
"end"
].message_type = _STATEUPDATEMESSAGE_END_PERFORMATIVE
_STATEUPDATEMESSAGE.fields_by_name[
"initialize"
].message_type = _STATEUPDATEMESSAGE_INITIALIZE_PERFORMATIVE
_STATEUPDATEMESSAGE.oneofs_by_name["performative"].fields.append(
_STATEUPDATEMESSAGE.fields_by_name["apply"]
)
_STATEUPDATEMESSAGE.fields_by_name[
"apply"
].containing_oneof = _STATEUPDATEMESSAGE.oneofs_by_name["performative"]
_STATEUPDATEMESSAGE.oneofs_by_name["performative"].fields.append(
_STATEUPDATEMESSAGE.fields_by_name["end"]
)
_STATEUPDATEMESSAGE.fields_by_name[
"end"
].containing_oneof = _STATEUPDATEMESSAGE.oneofs_by_name["performative"]
_STATEUPDATEMESSAGE.oneofs_by_name["performative"].fields.append(
_STATEUPDATEMESSAGE.fields_by_name["initialize"]
)
_STATEUPDATEMESSAGE.fields_by_name[
"initialize"
].containing_oneof = _STATEUPDATEMESSAGE.oneofs_by_name["performative"]
DESCRIPTOR.message_types_by_name["StateUpdateMessage"] = _STATEUPDATEMESSAGE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
StateUpdateMessage = _reflection.GeneratedProtocolMessageType(
"StateUpdateMessage",
(_message.Message,),
{
"Initialize_Performative": _reflection.GeneratedProtocolMessageType(
"Initialize_Performative",
(_message.Message,),
{
"ExchangeParamsByCurrencyIdEntry": _reflection.GeneratedProtocolMessageType(
"ExchangeParamsByCurrencyIdEntry",
(_message.Message,),
{
"DESCRIPTOR": _STATEUPDATEMESSAGE_INITIALIZE_PERFORMATIVE_EXCHANGEPARAMSBYCURRENCYIDENTRY,
"__module__": "state_update_pb2"
# @@protoc_insertion_point(class_scope:aea.fetchai.state_update.StateUpdateMessage.Initialize_Performative.ExchangeParamsByCurrencyIdEntry)
},
),
"UtilityParamsByGoodIdEntry": _reflection.GeneratedProtocolMessageType(
"UtilityParamsByGoodIdEntry",
(_message.Message,),
{
"DESCRIPTOR": _STATEUPDATEMESSAGE_INITIALIZE_PERFORMATIVE_UTILITYPARAMSBYGOODIDENTRY,
"__module__": "state_update_pb2"
# @@protoc_insertion_point(class_scope:aea.fetchai.state_update.StateUpdateMessage.Initialize_Performative.UtilityParamsByGoodIdEntry)
},
),
"AmountByCurrencyIdEntry": _reflection.GeneratedProtocolMessageType(
"AmountByCurrencyIdEntry",
(_message.Message,),
{
"DESCRIPTOR": _STATEUPDATEMESSAGE_INITIALIZE_PERFORMATIVE_AMOUNTBYCURRENCYIDENTRY,
"__module__": "state_update_pb2"
# @@protoc_insertion_point(class_scope:aea.fetchai.state_update.StateUpdateMessage.Initialize_Performative.AmountByCurrencyIdEntry)
},
),
"QuantitiesByGoodIdEntry": _reflection.GeneratedProtocolMessageType(
"QuantitiesByGoodIdEntry",
(_message.Message,),
{
"DESCRIPTOR": _STATEUPDATEMESSAGE_INITIALIZE_PERFORMATIVE_QUANTITIESBYGOODIDENTRY,
"__module__": "state_update_pb2"
# @@protoc_insertion_point(class_scope:aea.fetchai.state_update.StateUpdateMessage.Initialize_Performative.QuantitiesByGoodIdEntry)
},
),
"DESCRIPTOR": _STATEUPDATEMESSAGE_INITIALIZE_PERFORMATIVE,
"__module__": "state_update_pb2"
# @@protoc_insertion_point(class_scope:aea.fetchai.state_update.StateUpdateMessage.Initialize_Performative)
},
),
"Apply_Performative": _reflection.GeneratedProtocolMessageType(
"Apply_Performative",
(_message.Message,),
{
"AmountByCurrencyIdEntry": _reflection.GeneratedProtocolMessageType(
"AmountByCurrencyIdEntry",
(_message.Message,),
{
"DESCRIPTOR": _STATEUPDATEMESSAGE_APPLY_PERFORMATIVE_AMOUNTBYCURRENCYIDENTRY,
"__module__": "state_update_pb2"
# @@protoc_insertion_point(class_scope:aea.fetchai.state_update.StateUpdateMessage.Apply_Performative.AmountByCurrencyIdEntry)
},
),
"QuantitiesByGoodIdEntry": _reflection.GeneratedProtocolMessageType(
"QuantitiesByGoodIdEntry",
(_message.Message,),
{
"DESCRIPTOR": _STATEUPDATEMESSAGE_APPLY_PERFORMATIVE_QUANTITIESBYGOODIDENTRY,
"__module__": "state_update_pb2"
# @@protoc_insertion_point(class_scope:aea.fetchai.state_update.StateUpdateMessage.Apply_Performative.QuantitiesByGoodIdEntry)
},
),
"DESCRIPTOR": _STATEUPDATEMESSAGE_APPLY_PERFORMATIVE,
"__module__": "state_update_pb2"
# @@protoc_insertion_point(class_scope:aea.fetchai.state_update.StateUpdateMessage.Apply_Performative)
},
),
"End_Performative": _reflection.GeneratedProtocolMessageType(
"End_Performative",
(_message.Message,),
{
"DESCRIPTOR": _STATEUPDATEMESSAGE_END_PERFORMATIVE,
"__module__": "state_update_pb2"
# @@protoc_insertion_point(class_scope:aea.fetchai.state_update.StateUpdateMessage.End_Performative)
},
),
"DESCRIPTOR": _STATEUPDATEMESSAGE,
"__module__": "state_update_pb2"
# @@protoc_insertion_point(class_scope:aea.fetchai.state_update.StateUpdateMessage)
},
)
_sym_db.RegisterMessage(StateUpdateMessage)
_sym_db.RegisterMessage(StateUpdateMessage.Initialize_Performative)
_sym_db.RegisterMessage(
StateUpdateMessage.Initialize_Performative.ExchangeParamsByCurrencyIdEntry
)
_sym_db.RegisterMessage(
StateUpdateMessage.Initialize_Performative.UtilityParamsByGoodIdEntry
)
_sym_db.RegisterMessage(
StateUpdateMessage.Initialize_Performative.AmountByCurrencyIdEntry
)
_sym_db.RegisterMessage(
StateUpdateMessage.Initialize_Performative.QuantitiesByGoodIdEntry
)
_sym_db.RegisterMessage(StateUpdateMessage.Apply_Performative)
_sym_db.RegisterMessage(StateUpdateMessage.Apply_Performative.AmountByCurrencyIdEntry)
_sym_db.RegisterMessage(StateUpdateMessage.Apply_Performative.QuantitiesByGoodIdEntry)
_sym_db.RegisterMessage(StateUpdateMessage.End_Performative)
_STATEUPDATEMESSAGE_INITIALIZE_PERFORMATIVE_EXCHANGEPARAMSBYCURRENCYIDENTRY._options = (
None
)
_STATEUPDATEMESSAGE_INITIALIZE_PERFORMATIVE_UTILITYPARAMSBYGOODIDENTRY._options = None
_STATEUPDATEMESSAGE_INITIALIZE_PERFORMATIVE_AMOUNTBYCURRENCYIDENTRY._options = None
_STATEUPDATEMESSAGE_INITIALIZE_PERFORMATIVE_QUANTITIESBYGOODIDENTRY._options = None
_STATEUPDATEMESSAGE_APPLY_PERFORMATIVE_AMOUNTBYCURRENCYIDENTRY._options = None
_STATEUPDATEMESSAGE_APPLY_PERFORMATIVE_QUANTITIESBYGOODIDENTRY._options = None
# @@protoc_insertion_point(module_scope)
| 38.368681
| 2,250
| 0.663239
| 2,777
| 31,117
| 7.062297
| 0.067339
| 0.030186
| 0.138691
| 0.05568
| 0.845758
| 0.795074
| 0.740873
| 0.703192
| 0.618397
| 0.609678
| 0
| 0.027626
| 0.25086
| 31,117
| 810
| 2,251
| 38.416049
| 0.813693
| 0.043288
| 0
| 0.738065
| 1
| 0.00129
| 0.221348
| 0.191604
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.005161
| 0
| 0.005161
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
5a2d64ae674181120650cec41529448f519e7c64
| 45
|
py
|
Python
|
LogSystem_JE/venv/Lib/site-packages/JELogSystem/__init__.py
|
JE-Chen/je_old_repo
|
a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5
|
[
"MIT"
] | 3
|
2020-12-21T03:59:11.000Z
|
2020-12-30T07:27:47.000Z
|
LogSystem_JE/venv/Lib/site-packages/JELogSystem/__init__.py
|
JE-Chen/je_old_repo
|
a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5
|
[
"MIT"
] | null | null | null |
LogSystem_JE/venv/Lib/site-packages/JELogSystem/__init__.py
|
JE-Chen/je_old_repo
|
a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5
|
[
"MIT"
] | null | null | null |
from JELogSystem.Log_System import Log_System
| 45
| 45
| 0.911111
| 7
| 45
| 5.571429
| 0.714286
| 0.461538
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.066667
| 45
| 1
| 45
| 45
| 0.928571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
5a38b7bad01c4cc0ca6d76787a3d503597e99deb
| 9,108
|
py
|
Python
|
appengine/chromium_build/tests/console_test.py
|
mithro/chromium-infra
|
d27ac0b230bedae4bc968515b02927cf9e17c2b7
|
[
"BSD-3-Clause"
] | 2
|
2021-04-13T21:22:18.000Z
|
2021-09-07T02:11:57.000Z
|
appengine/chromium_build/tests/console_test.py
|
mithro/chromium-infra
|
d27ac0b230bedae4bc968515b02927cf9e17c2b7
|
[
"BSD-3-Clause"
] | 21
|
2020-09-06T02:41:05.000Z
|
2022-03-02T04:40:01.000Z
|
appengine/chromium_build/tests/console_test.py
|
mithro/chromium-infra
|
d27ac0b230bedae4bc968515b02927cf9e17c2b7
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# Copyright (c) 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import app
from tests import cb
class ConsoleTestCase(cb.CbTestCase):
def test_console_handler(self):
self.save_page(localpath='chromium/sheriff.js',
content='document.write(\'sheriff1\')')
self.save_page(localpath='chromium/sheriff_webkit.js',
content='document.write(\'sheriff2\')')
self.save_page(localpath='chromium/sheriff_memory.js',
content='document.write(\'sheriff3\')')
self.save_page(localpath='chromium/sheriff_nacl.js',
content='document.write(\'sheriff4\')')
self.save_page(localpath='chromium/sheriff_perf.js',
content='document.write(\'sheriff5\')')
self.save_page(localpath='chromium/sheriff_cros_mtv.js',
content='document.write(\'sheriff6, sheriff7\')')
self.save_page(localpath='chromium/sheriff_cros_nonmtv.js',
content='document.write(\'sheriff8\')')
exp_console = self.read_file('exp_console.html')
in_console = {'content': self.read_file('in_console.html')}
act_console = app.console_handler(
unquoted_localpath='chromium/console',
remoteurl='http://build.chromium.org/p/chromium/console',
page_data=in_console)['content']
# Uncomment if deeper inspection is needed of the returned console.
# This is also useful if changing the site layout and you need to
# 'retrain' the test expectations.
# self.write_file('exp_console.html', act_console)
self.assertEquals(exp_console, act_console,
'Unexpected console output found')
def test_console_handler_utf8(self):
self.save_page(localpath='chromium/sheriff.js',
content='document.write(\'sheriff1\')')
self.save_page(localpath='chromium/sheriff_webkit.js',
content='document.write(\'sheriff2\')')
self.save_page(localpath='chromium/sheriff_memory.js',
content='document.write(\'sheriff3\')')
self.save_page(localpath='chromium/sheriff_nacl.js',
content='document.write(\'sheriff4\')')
self.save_page(localpath='chromium/sheriff_perf.js',
content='document.write(\'sheriff5\')')
self.save_page(localpath='chromium/sheriff_cros_mtv.js',
content='document.write(\'sheriff6, sheriff7\')')
self.save_page(localpath='chromium/sheriff_cros_nonmtv.js',
content='document.write(\'sheriff8\')')
exp_console = self.read_file('exp_console.html')
in_console = {'content': self.read_file('in_console.html')}
act_console = app.console_handler(
unquoted_localpath='chromium/console',
remoteurl='http://build.chromium.org/p/chromium/console',
page_data=in_console)['content']
# Uncomment if deeper inspection is needed of the returned console.
# This is also useful if changing the site layout and you need to
# 'retrain' the test expectations.
# self.write_file('exp_console.html', act_console)
self.assertEquals(exp_console, act_console,
'Unexpected console output found')
def test_parse_master(self):
in_console = {'content': self.read_file('in_console.html')}
app.parse_master(
localpath='chromium/console',
remoteurl='http://build.chromium.org/p/chromium/console',
page_data=in_console)
test_revision = '314671'
rowdata = app.get_and_cache_rowdata('chromium/console/' + test_revision)
summary = app.get_and_cache_pagedata('chromium/console/summary')['content']
act_row = {}
exp_row = {}
for item in ['rev', 'name', 'status', 'comment']:
# We only want to test specific values in rowdata, so we create a new
# hash that has just those values.
act_row[item] = rowdata[item]
# Uncomment if deeper inspection is needed of the returned console.
# This is also useful if changing the site layout and you need to
# 'retrain' the test expectations.
# self.write_file('exp_%s.html' % item,
# act_row[item].encode('utf-8'))
# self.write_file('exp_summary.html',
# summary.encode('utf-8'))
exp_row[item] = self.read_file('exp_%s.html' % item).decode('utf-8')
exp_summary = self.read_file('exp_summary.html').decode('utf-8')
self.assertEquals(exp_row, act_row, 'Unexpected row data found')
self.assertEquals(exp_summary, summary, 'Unexpected build summary found')
def test_parse_master_utf8(self):
in_console = {'content': self.read_file('in_console.html')}
app.parse_master(
localpath='chromium/console',
remoteurl='http://build.chromium.org/p/chromium/console',
page_data=in_console)
test_revision = '314921'
rowdata = app.get_and_cache_rowdata('chromium/console/' + test_revision)
summary = app.get_and_cache_pagedata('chromium/console/summary')['content']
act_row = {}
exp_row = {}
for item in ['rev', 'name', 'status', 'comment']:
# We only want to test specific values in rowdata, so we create a new
# hash that has just those values.
act_row[item] = rowdata[item]
# Uncomment if deeper inspection is needed of the returned console.
# This is also useful if changing the site layout and you need to
# 'retrain' the test expectations.
# self.write_file('exp_%s.html' % item,
# act_row[item].encode('utf-8'))
# self.write_file('exp_summary.html',
# summary.encode('utf-8'))
exp_row[item] = self.read_file('exp_%s.html' % item).decode('utf-8')
exp_summary = self.read_file('exp_summary.html').decode('utf-8')
self.assertEquals(exp_row, act_row, 'Unexpected row data found')
self.assertEquals(exp_summary, summary, 'Unexpected build summary found')
def test_console_merger(self):
for master in ['linux', 'mac']:
page_data = {'content': self.read_file('in_%s.html' % master)}
app.parse_master(
localpath='chromium.%s/console' % master,
remoteurl='http://build.chromium.org/p/chromium.%s/console' % master,
page_data=page_data)
# Get the expected and real output, compare.
app.console_merger(
'chromium/console', '', {},
masters_to_merge=[
'chromium.linux',
'chromium.mac',
],
num_rows_to_merge=20)
actual_console = app.get_and_cache_pagedata('chromium/console')['content']
# Uncomment if deeper inspection is needed of the returned console.
# import logging
# logging.debug('foo')
# self.write_file('exp_merged.html', actual_console)
# import code
# code.interact(local=locals())
self.assertEquals(
self.read_file('exp_merged.html').decode('utf-8'),
actual_console, 'Unexpected console output found')
def test_console_merger_splitrevs(self):
for master in ['linux', 'mac']:
page_data = {'content': self.read_file('in_%s.html' % master)}
app.parse_master(
localpath='chromium.%s/console' % master,
remoteurl='http://build.chromium.org/p/chromium.%s/console' % master,
page_data=page_data)
# Get the expected and real output, compare.
app.console_merger(
'chromium/console', '', {},
masters_to_merge=[
'chromium.linux',
'chromium.mac',
],
num_rows_to_merge=20)
act_merged = app.get_and_cache_pagedata('chromium/console')['content']
# Uncomment if deeper inspection is needed of the returned console.
# import logging
# logging.debug('foo')
# self.write_file('exp_merged.html', act_merged)
# import code
# code.interact(local=locals())
self.assertEquals(self.read_file('exp_merged.html'), act_merged,
'Unexpected console output found')
def test_console_utf8_devcomment(self):
"""Test that a console DevComment row with a UTF-8 character is retained."""
for master in ['mac']:
page_data = {'content': self.read_file('in_%s.html' % master)}
app.parse_master(
localpath='chromium.%s/console' % master,
remoteurl='http://build.chromium.org/p/chromium.%s/console' % master,
page_data=page_data)
# Get the expected and real output, compare.
app.console_merger(
'chromium/console', '', {},
masters_to_merge=[
'chromium.mac',
],
num_rows_to_merge=20)
act_merged = app.get_and_cache_pagedata('chromium/console')['content']
# Uncomment if deeper inspection is needed of the returned console.
# import logging
# logging.debug('foo')
# self.write_file('exp_merged.html', act_merged.encode('utf-8'))
# import code
# code.interact(local=locals())
self.assertEquals(self.read_file('exp_merged.html').decode('utf-8'),
act_merged, 'Unexpected console output found')
| 42.760563
| 80
| 0.653931
| 1,141
| 9,108
| 5.037686
| 0.137599
| 0.062109
| 0.033403
| 0.051148
| 0.933194
| 0.926061
| 0.918058
| 0.910752
| 0.901009
| 0.901009
| 0
| 0.007424
| 0.216184
| 9,108
| 212
| 81
| 42.962264
| 0.797731
| 0.246926
| 0
| 0.850746
| 0
| 0
| 0.287961
| 0.0538
| 0
| 0
| 0
| 0
| 0.067164
| 1
| 0.052239
| false
| 0
| 0.014925
| 0
| 0.074627
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ce6ae01da7798e7fc4817a6c05f5685a2879cdaa
| 3,925
|
py
|
Python
|
trello/lists.py
|
glitchdotcom/trello-py
|
3fea701fbac389306b6f36641fd8e39303df7e91
|
[
"BSD-2-Clause"
] | 1
|
2020-05-21T22:12:36.000Z
|
2020-05-21T22:12:36.000Z
|
lists.py
|
Caloba1/trello-api-py
|
03fede65b8bf4cd1b13ee685a1e5e21bcf2fbf57
|
[
"MIT"
] | null | null | null |
lists.py
|
Caloba1/trello-api-py
|
03fede65b8bf4cd1b13ee685a1e5e21bcf2fbf57
|
[
"MIT"
] | 2
|
2015-11-19T22:26:57.000Z
|
2019-10-22T19:34:15.000Z
|
import json
import requests
class Lists(object):
__module__ = 'trello'
def __init__(self, apikey, token=None):
self._apikey = apikey
self._token = token
def get(self, list_id, cards=None, card_fields=None, fields=None):
resp = requests.get("https://trello.com/1/lists/%s" % (list_id), params=dict(key=self._apikey, token=self._token, cards=cards, card_fields=card_fields, fields=fields), data=None)
resp.raise_for_status()
return json.loads(resp.content)
def get_field(self, field, list_id):
resp = requests.get("https://trello.com/1/lists/%s/%s" % (list_id, field), params=dict(key=self._apikey, token=self._token), data=None)
resp.raise_for_status()
return json.loads(resp.content)
def get_action(self, list_id, filter=None, fields=None, limit=None, page=None, idModels=None):
resp = requests.get("https://trello.com/1/lists/%s/actions" % (list_id), params=dict(key=self._apikey, token=self._token, filter=filter, fields=fields, limit=limit, page=page, idModels=idModels), data=None)
resp.raise_for_status()
return json.loads(resp.content)
def get_board(self, list_id, fields=None):
resp = requests.get("https://trello.com/1/lists/%s/board" % (list_id), params=dict(key=self._apikey, token=self._token, fields=fields), data=None)
resp.raise_for_status()
return json.loads(resp.content)
def get_board_field(self, field, list_id):
resp = requests.get("https://trello.com/1/lists/%s/board/%s" % (list_id, field), params=dict(key=self._apikey, token=self._token), data=None)
resp.raise_for_status()
return json.loads(resp.content)
def get_card(self, list_id, actions=None, attachments=None, members=None, checkItemStates=None, checklists=None, filter=None, fields=None):
resp = requests.get("https://trello.com/1/lists/%s/cards" % (list_id), params=dict(key=self._apikey, token=self._token, actions=actions, attachments=attachments, members=members, checkItemStates=checkItemStates, checklists=checklists, filter=filter, fields=fields), data=None)
resp.raise_for_status()
return json.loads(resp.content)
def get_card_filter(self, filter, list_id):
resp = requests.get("https://trello.com/1/lists/%s/cards/%s" % (list_id, filter), params=dict(key=self._apikey, token=self._token), data=None)
resp.raise_for_status()
return json.loads(resp.content)
def update(self, list_id, name=None, closed=None):
resp = requests.put("https://trello.com/1/lists/%s" % (list_id), params=dict(key=self._apikey, token=self._token), data=dict(name=name, closed=closed))
resp.raise_for_status()
return json.loads(resp.content)
def update_closed(self, list_id, value):
resp = requests.put("https://trello.com/1/lists/%s/closed" % (list_id), params=dict(key=self._apikey, token=self._token), data=dict(value=value))
resp.raise_for_status()
return json.loads(resp.content)
def update_name(self, list_id, value):
resp = requests.put("https://trello.com/1/lists/%s/name" % (list_id), params=dict(key=self._apikey, token=self._token), data=dict(value=value))
resp.raise_for_status()
return json.loads(resp.content)
def new(self, name, idBoard):
resp = requests.post("https://trello.com/1/lists" % (), params=dict(key=self._apikey, token=self._token), data=dict(name=name, idBoard=idBoard))
resp.raise_for_status()
return json.loads(resp.content)
def new_card(self, list_id, name, desc=None):
resp = requests.post("https://trello.com/1/lists/%s/cards" % (list_id), params=dict(key=self._apikey, token=self._token), data=dict(name=name, desc=desc))
resp.raise_for_status()
return json.loads(resp.content)
| 54.513889
| 285
| 0.673121
| 558
| 3,925
| 4.566308
| 0.100358
| 0.051805
| 0.076531
| 0.070644
| 0.741366
| 0.741366
| 0.741366
| 0.741366
| 0.720958
| 0.697802
| 0
| 0.003705
| 0.174777
| 3,925
| 71
| 286
| 55.28169
| 0.782958
| 0
| 0
| 0.436364
| 0
| 0
| 0.106383
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.236364
| false
| 0
| 0.036364
| 0
| 0.527273
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 7
|
ce6b918c8145176731e624a4e63fc9b99437bde1
| 145
|
py
|
Python
|
geekshop/mainapp/admin.py
|
PorcupineVN/interner-store
|
c19a26d79933f41ba4f2a8df69bd386c4ba168a8
|
[
"MIT"
] | null | null | null |
geekshop/mainapp/admin.py
|
PorcupineVN/interner-store
|
c19a26d79933f41ba4f2a8df69bd386c4ba168a8
|
[
"MIT"
] | 1
|
2022-02-09T07:29:04.000Z
|
2022-02-09T07:29:04.000Z
|
geekshop/mainapp/admin.py
|
Deathless47/Internet-store
|
09b1fa631295bcc9e12fc2c8dfbb7b027cac73ed
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Product, ProductCategory
admin.site.register(ProductCategory)
admin.site.register(Product)
| 24.166667
| 44
| 0.841379
| 18
| 145
| 6.777778
| 0.555556
| 0.327869
| 0.393443
| 0.52459
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.082759
| 145
| 5
| 45
| 29
| 0.917293
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
ceb93c335c07601f33ad16d0e61ea063167165bd
| 3,921
|
py
|
Python
|
tests/test_composition_measurement.py
|
IBPA/FoodAtlas
|
0a431f0a391adaa8984b380f3f6f7189f27b9311
|
[
"Apache-2.0"
] | 1
|
2022-02-07T10:04:35.000Z
|
2022-02-07T10:04:35.000Z
|
tests/test_composition_measurement.py
|
IBPA/FoodAtlas
|
0a431f0a391adaa8984b380f3f6f7189f27b9311
|
[
"Apache-2.0"
] | null | null | null |
tests/test_composition_measurement.py
|
IBPA/FoodAtlas
|
0a431f0a391adaa8984b380f3f6f7189f27b9311
|
[
"Apache-2.0"
] | null | null | null |
import pytest
from food_ke.composition_measurement import CompositionMeasurement
class TestCompositionMeasurement:
@pytest.mark.parametrize(
"a,b",
[
(
CompositionMeasurement(
food_name="cocoa",
constituent_name="epicatechin",
central_tendency_measurement=1.0,
variance_measurement=0.1,
units="mg/kg",
),
CompositionMeasurement(
food_name="cocoa",
constituent_name="epicatechin",
central_tendency_measurement=1.0,
variance_measurement=0.1,
units="mg/kg",
),
),
(
CompositionMeasurement(
food_name="cocoa",
constituent_name="epicatechin",
central_tendency_measurement=1.0,
variance_measurement=0.1,
units=None,
),
CompositionMeasurement(
food_name="cocoa",
constituent_name="epicatechin",
central_tendency_measurement=1.0,
variance_measurement=0.1,
units=None,
),
),
(
CompositionMeasurement(
food_name="cocoa",
constituent_name="epicatechin",
central_tendency_measurement=1.0,
variance_measurement=None,
units=None,
),
CompositionMeasurement(
food_name="cocoa",
constituent_name="epicatechin",
central_tendency_measurement=1.0,
variance_measurement=None,
units=None,
),
),
(
CompositionMeasurement(
food_name="cocoa",
constituent_name="epicatechin",
central_tendency_measurement=1.0,
variance_measurement=0.1,
units=None,
),
CompositionMeasurement(
food_name="cocoa",
constituent_name="epicatechin",
central_tendency_measurement=1.0,
variance_measurement=0.1,
units=None,
),
),
(
CompositionMeasurement(
food_name=None,
constituent_name=None,
central_tendency_measurement=1.0,
variance_measurement=None,
units=None,
),
CompositionMeasurement(
food_name=None,
constituent_name=None,
central_tendency_measurement=1.0,
variance_measurement=None,
units=None,
),
),
],
)
def test_eq(self, a, b):
assert a == b
@pytest.mark.parametrize(
"a,b",
[
(
CompositionMeasurement(
food_name=None,
constituent_name=None,
central_tendency_measurement=47.0,
variance_measurement=None,
units=None,
),
CompositionMeasurement(
food_name=None,
constituent_name=None,
central_tendency_measurement=693.0,
variance_measurement=None,
units=None,
),
),
],
)
def test_neq(self, a, b):
assert not a == b
assert not b == a
| 32.94958
| 66
| 0.429737
| 256
| 3,921
| 6.332031
| 0.140625
| 0.192474
| 0.222085
| 0.166564
| 0.90438
| 0.90438
| 0.90438
| 0.90438
| 0.847008
| 0.847008
| 0
| 0.019928
| 0.500893
| 3,921
| 118
| 67
| 33.228814
| 0.80838
| 0
| 0
| 0.824561
| 0
| 0
| 0.036725
| 0
| 0
| 0
| 0
| 0
| 0.026316
| 1
| 0.017544
| false
| 0
| 0.017544
| 0
| 0.04386
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
0c686020ac65f88973ba1ab57c9c2885bee6327a
| 4,368
|
py
|
Python
|
algorithms/spot_finding/threshold.py
|
toastisme/dials
|
6bc8ababc33bfe334513677f8adb65c0e90003f3
|
[
"BSD-3-Clause"
] | 58
|
2015-10-15T09:28:20.000Z
|
2022-03-28T20:09:38.000Z
|
algorithms/spot_finding/threshold.py
|
toastisme/dials
|
6bc8ababc33bfe334513677f8adb65c0e90003f3
|
[
"BSD-3-Clause"
] | 1,741
|
2015-11-24T08:17:02.000Z
|
2022-03-31T15:46:42.000Z
|
algorithms/spot_finding/threshold.py
|
toastisme/dials
|
6bc8ababc33bfe334513677f8adb65c0e90003f3
|
[
"BSD-3-Clause"
] | 45
|
2015-10-14T13:44:16.000Z
|
2022-03-22T14:45:56.000Z
|
class ThresholdStrategy:
"""
Base class for spot finder threshold strategies.
"""
def __init__(self, **kwargs):
"""
Initialise with key word arguments.
"""
pass
def __call__(self, image):
"""
Threshold the image.
"""
raise RuntimeError("Overload Me!")
class DispersionThresholdStrategy(ThresholdStrategy):
"""
A class implementing a 'gain' threshold.
"""
def __init__(self, **kwargs):
"""
Set the threshold algorithm up
"""
# Initialise the base class
ThresholdStrategy.__init__(self, **kwargs)
# Get the parameters
self._kernel_size = kwargs.get("kernel_size", (3, 3))
self._gain = kwargs.get("gain")
self._n_sigma_b = kwargs.get("n_sigma_b", 6)
self._n_sigma_s = kwargs.get("n_sigma_s", 3)
self._min_count = kwargs.get("min_count", 2)
self._threshold = kwargs.get("global_threshold", 0)
# Save the constant gain
self._gain_map = None
# Create a buffer
self.algorithm = {}
def __call__(self, image, mask):
"""
Call the thresholding function
:param image: The image to process
:param mask: The mask to use
:return: The thresholded image
"""
from dials.algorithms.image import threshold
from dials.array_family import flex
# Initialise the algorithm
try:
algorithm = self.algorithm[image.all()]
except Exception:
algorithm = threshold.DispersionThreshold(
image.all(),
self._kernel_size,
self._n_sigma_b,
self._n_sigma_s,
self._threshold,
self._min_count,
)
self.algorithm[image.all()] = algorithm
# Set the gain
if self._gain is not None:
assert self._gain > 0
self._gain_map = flex.double(image.accessor(), self._gain)
self._gain = None
# Compute the threshold
result = flex.bool(flex.grid(image.all()))
if self._gain_map:
algorithm(image, mask, self._gain_map, result)
else:
algorithm(image, mask, result)
# Return the result
return result
class DispersionExtendedThresholdStrategy(ThresholdStrategy):
"""
A class implementing a 'gain' threshold.
"""
def __init__(self, **kwargs):
"""
Set the threshold algorithm up
"""
# Initialise the base class
ThresholdStrategy.__init__(self, **kwargs)
# Get the parameters
self._kernel_size = kwargs.get("kernel_size", (3, 3))
self._gain = kwargs.get("gain")
self._n_sigma_b = kwargs.get("n_sigma_b", 6)
self._n_sigma_s = kwargs.get("n_sigma_s", 3)
self._min_count = kwargs.get("min_count", 2)
self._threshold = kwargs.get("global_threshold", 0)
# Save the constant gain
self._gain_map = None
# Create a buffer
self.algorithm = {}
def __call__(self, image, mask):
"""
Call the thresholding function
:param image: The image to process
:param mask: The mask to use
:return: The thresholded image
"""
from dials.algorithms.image import threshold
from dials.array_family import flex
# Initialise the algorithm
try:
algorithm = self.algorithm[image.all()]
except Exception:
algorithm = threshold.DispersionExtendedThreshold(
image.all(),
self._kernel_size,
self._n_sigma_b,
self._n_sigma_s,
self._threshold,
self._min_count,
)
self.algorithm[image.all()] = algorithm
# Set the gain
if self._gain is not None:
assert self._gain > 0
self._gain_map = flex.double(image.accessor(), self._gain)
self._gain = None
# Compute the threshold
result = flex.bool(flex.grid(image.all()))
if self._gain_map:
algorithm(image, mask, self._gain_map, result)
else:
algorithm(image, mask, result)
# Return the result
return result
| 28.180645
| 70
| 0.564103
| 472
| 4,368
| 4.970339
| 0.184322
| 0.061381
| 0.034101
| 0.018755
| 0.875533
| 0.875533
| 0.875533
| 0.875533
| 0.875533
| 0.875533
| 0
| 0.00487
| 0.341804
| 4,368
| 154
| 71
| 28.363636
| 0.81113
| 0.18956
| 0
| 0.87013
| 0
| 0
| 0.03887
| 0
| 0
| 0
| 0
| 0
| 0.025974
| 1
| 0.077922
| false
| 0.012987
| 0.051948
| 0
| 0.194805
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0cb5ef6c2f84e55ff2885c685786ca15de416654
| 9,505
|
py
|
Python
|
pdx-extract/tests/test_main.py
|
michaelheyman/PSU-Code-Review
|
5a55d981425aaad69dc9ee06baaaef22bc426893
|
[
"MIT"
] | null | null | null |
pdx-extract/tests/test_main.py
|
michaelheyman/PSU-Code-Review
|
5a55d981425aaad69dc9ee06baaaef22bc426893
|
[
"MIT"
] | null | null | null |
pdx-extract/tests/test_main.py
|
michaelheyman/PSU-Code-Review
|
5a55d981425aaad69dc9ee06baaaef22bc426893
|
[
"MIT"
] | null | null | null |
import asyncio
import unittest.mock as mock
import asynctest
import pytest
from app import main
from tests import data
from tests import utils
@mock.patch("requests.post")
def test_authenticate_current_session_returns_ok(mock_requests):
import requests
term = {"code": "201904", "description": "Fall 2019 Quarter"}
unique_session_id = "abcdef1234567890"
cookies = {"JSESSIONID": "CF6813D3F9BFD1ABEEEF47E2FB094926"}
response = requests.Response()
response.status_code = requests.codes.ok
mock_requests.return_value = response
res = main.authenticate_current_session(term, unique_session_id, cookies)
assert res.ok is True
@mock.patch("requests.post")
def test_authenticate_current_session_returns_not_ok_if_response_fails(mock_requests):
import requests
term = {"code": "201904", "description": "Fall 2019 Quarter"}
unique_session_id = "abcdef1234567890"
cookies = {"JSESSIONID": "CF6813D3F9BFD1ABEEEF47E2FB094926"}
response = requests.Response()
response.status_code = requests.codes.bad_request
mock_requests.return_value = response
res = main.authenticate_current_session(term, unique_session_id, cookies)
assert res.ok is False
@mock.patch("requests.get")
def test_get_schedule_json_returns_ok_when_good_response(mock_requests):
import json
import requests
subject = {"code": "CS", "description": "Computer Science"}
term = {"code": "201904", "description": "Fall 2019 Quarter"}
unique_session_id = "abcdef1234567890"
cookies = {"JSESSIONID": "CF6813D3F9BFD1ABEEEF47E2FB094926"}
response = requests.Response()
response._content = bytearray(json.dumps(data.example_schedule), "utf-8")
response.status_code = requests.codes.ok
mock_requests.return_value = response
schedule_json = main.get_schedule_json(subject, term, unique_session_id, cookies)
assert schedule_json == data.example_schedule
@mock.patch("requests.get")
def test_get_schedule_json_returns_not_ok_when_bad_response(mock_requests):
import requests
subject = {"code": "CS", "description": "Computer Science"}
term = {"code": "201904", "description": "Fall 2019 Quarter"}
unique_session_id = "abcdef1234567890"
cookies = {"JSESSIONID": "CF6813D3F9BFD1ABEEEF47E2FB094926"}
response = requests.Response()
response.status_code = requests.codes.bad_request
mock_requests.return_value = response
schedule_json = main.get_schedule_json(subject, term, unique_session_id, cookies)
assert schedule_json is None
@mock.patch("requests.get")
def test_get_subjects_returns_json_response_when_response_ok(mock_requests):
import json
import requests
cookies = {"cookie": "jar"}
unique_session_id = "abcdef1234567890"
term_date = "201904"
subjects_response = [{"code": "ACTG", "description": "Accounting"}]
response = requests.Response()
response._content = bytearray(json.dumps(subjects_response), "utf-8")
response.status_code = requests.codes.ok
mock_requests.return_value = response
subjects = main.get_subjects(cookies, unique_session_id, term_date)
assert subjects == subjects_response
@mock.patch("requests.get")
def test_get_subjects_returns_none_when_response_ok(mock_requests):
import json
import requests
cookies = {"cookie": "jar"}
unique_session_id = "abcdef1234567890"
term_date = "201904"
subjects_response = [{"code": "ACTG", "description": "Accounting"}]
response = requests.Response()
response._content = bytearray(json.dumps(subjects_response), "utf-8")
response.status_code = requests.codes.bad_request
mock_requests.return_value = response
subjects = main.get_subjects(cookies, unique_session_id, term_date)
assert subjects is None
@pytest.mark.asyncio
@asynctest.patch("app.main.authenticate_current_session")
@asynctest.patch("app.main.get_schedule_json")
@asynctest.patch("app.pyppeteer.get_unique_session_id")
async def test_get_subjects_json_returns_data(
mock_get_unique_session_id, mock_get_schedule_json, mock_authenticate
):
subjects = [
{"code": "ACTG", "description": "Accounting"},
{"code": "ACTG", "description": "Accounting"},
]
term = {"code": "201904", "description": "Fall 2019 Quarter"}
cookies = {"cookie": "jar"}
unique_session_id = "abcdef1234567890"
mock_get_unique_session_id = asynctest.CoroutineMock()
mock_get_unique_session_id = utils.set_async_result(
mock_get_unique_session_id, unique_session_id
)
mock_get_schedule_json.return_value = {"data": "foo"}
subjects_json = await main.get_subjects_json(subjects, term, cookies, None)
assert "foo" in subjects_json
assert len(subjects_json) == 2
@pytest.mark.asyncio
@asynctest.patch("app.main.authenticate_current_session")
@asynctest.patch("app.main.get_schedule_json")
@asynctest.patch("app.pyppeteer.get_unique_session_id")
async def test_get_subjects_json_returns_none_when_no_data(
mock_get_unique_session_id, mock_get_schedule_json, mock_authenticate
):
subjects = [
{"code": "ACTG", "description": "Accounting"},
{"code": "ACTG", "description": "Accounting"},
]
term = {"code": "201904", "description": "Fall 2019 Quarter"}
cookies = {"cookie": "jar"}
unique_session_id = "abcdef1234567890"
mock_get_unique_session_id = asynctest.CoroutineMock()
mock_get_unique_session_id = utils.set_async_result(
mock_get_unique_session_id, unique_session_id
)
mock_get_schedule_json.return_value = {}
subjects_json = await main.get_subjects_json(subjects, term, cookies, None)
assert subjects_json == []
@mock.patch("requests.get")
def test_get_terms_returns_json_response_when_response_ok(mock_requests):
import json
import requests
cookies = {"cookie": "jar"}
unique_session_id = "abcdef1234567890"
terms_response = [{"code": "201904", "description": "Fall 2019 Quarter"}]
response = requests.Response()
response._content = bytearray(json.dumps(terms_response), "utf-8")
response.status_code = requests.codes.ok
mock_requests.return_value = response
terms = main.get_terms(cookies, unique_session_id)
assert terms == terms_response
@mock.patch("requests.get")
def test_get_terms_returns_none_when_response_not_ok(mock_requests):
import json
import requests
cookies = {"cookie": "jar"}
unique_session_id = "abcdef1234567890"
terms_response = [{"code": "201904", "description": "Fall 2019 Quarter"}]
response = requests.Response()
response._content = bytearray(json.dumps(terms_response), "utf-8")
response.status_code = requests.codes.bad_request
mock_requests.return_value = response
terms = main.get_terms(cookies, unique_session_id)
assert terms is None
@pytest.mark.asyncio
@asynctest.patch("app.storage.upload_to_bucket")
@asynctest.patch("app.main.get_terms")
@asynctest.patch("app.pyppeteer.get_tokens")
@asynctest.patch("app.pyppeteer.get_page")
@asynctest.patch("app.pyppeteer.initialize")
async def test_run_returns_empty_payload_when_no_results(
mock_initialize,
mock_get_page,
mock_get_tokens,
mock_get_terms,
mock_upload_to_bucket,
):
browser = asynctest.CoroutineMock()
browser.close.return_value = asyncio.Future()
browser.close.return_value.set_result(None)
mock_initialize = utils.set_async_result(mock_initialize, browser)
mock_get_page = utils.set_async_result(mock_get_page, None)
mock_get_tokens.return_value = asyncio.Future()
mock_get_tokens.return_value.set_result(["foo", "abcdef1234567890"])
mock_get_terms.return_value = []
mock_upload_to_bucket().return_value = None
payload = await main.run()
mock_initialize.assert_called
mock_get_page.assert_called
mock_get_tokens.assert_called
mock_get_terms.assert_called
mock_upload_to_bucket.assert_called
assert payload == []
@pytest.mark.asyncio
@asynctest.patch("app.storage.upload_to_bucket")
@asynctest.patch("app.main.get_subjects_json")
@asynctest.patch("app.main.get_subjects")
@asynctest.patch("app.main.get_terms")
@asynctest.patch("app.pyppeteer.get_tokens")
@asynctest.patch("app.pyppeteer.get_page")
@asynctest.patch("app.pyppeteer.initialize")
async def test_run_returns_payload(
mock_initialize,
mock_get_page,
mock_get_tokens,
mock_get_terms,
mock_get_subjects,
mock_get_subjects_json,
mock_upload_to_bucket,
):
browser = asynctest.CoroutineMock()
browser.close.return_value = asyncio.Future()
browser.close.return_value.set_result(None)
mock_initialize = utils.set_async_result(mock_initialize, browser)
mock_get_page = utils.set_async_result(mock_get_page, None)
mock_get_tokens.return_value = asyncio.Future()
mock_get_tokens.return_value.set_result(["foo", "abcdef1234567890"])
mock_get_terms.return_value = [
{"code": "201904", "description": "Fall 2019 Quarter"}
]
mock_get_subjects_json = utils.set_async_result(
mock_get_subjects_json, data.subjects_json
)
mock_upload_to_bucket().return_value = None
payload = await main.run()
mock_initialize.assert_called
mock_get_page.assert_called
mock_get_tokens.assert_called
mock_get_terms.assert_called
mock_get_subjects_json.assert_called
mock_upload_to_bucket.assert_called
assert payload[0][0]["crn"] == 10883
# error on get_subects in main where it makes a request
| 33.825623
| 86
| 0.745713
| 1,182
| 9,505
| 5.650592
| 0.093909
| 0.040874
| 0.067375
| 0.046414
| 0.92424
| 0.92424
| 0.902081
| 0.902081
| 0.894146
| 0.87019
| 0
| 0.036886
| 0.147186
| 9,505
| 280
| 87
| 33.946429
| 0.787071
| 0.005576
| 0
| 0.768182
| 0
| 0
| 0.171958
| 0.06
| 0
| 0
| 0
| 0
| 0.109091
| 1
| 0.036364
| false
| 0
| 0.090909
| 0
| 0.127273
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0cd9cdcc395fffd1692882cee87701ebbce8d789
| 27,564
|
py
|
Python
|
zauberer_game/zauber_pak/act1.py
|
fwalterjames/zauberer
|
3bc4ad4401f27900e009d4d20efaa3a438fe8017
|
[
"MIT"
] | null | null | null |
zauberer_game/zauber_pak/act1.py
|
fwalterjames/zauberer
|
3bc4ad4401f27900e009d4d20efaa3a438fe8017
|
[
"MIT"
] | null | null | null |
zauberer_game/zauber_pak/act1.py
|
fwalterjames/zauberer
|
3bc4ad4401f27900e009d4d20efaa3a438fe8017
|
[
"MIT"
] | null | null | null |
from random import randint
from pygame import mixer
def act_one():
print("\n ========= ==== === === ======= ========= ======== ========= ======== ")
print(" ==== == == === === == == == == == == == == ")
print(" === === === === === ====== ======== == ==== ======== == ==== ")
print(" === =========== === === == === == == == == == == ")
print(" === === === === === == === == == === == == ===")
print("========= === === ======== ====== ========== == == ========== == ===")
print("\n💤You are dreaming, and in this dream you are many other people. You like how this feels.💤")
print("\n💤 You are awakened by a harsh bang.\n")
print("You are now yourself. The bang from within your bedroom.\n")
input("Press enter to turn on your bedside lantern. 💡")
print("\n------------💡------------")
print("Your bedroom is now alight 🔥, and nothing looks out of the ordinary.\n")
print("Your eyes drift to the window above your book shelf. Your brain finally registers a harsh storm out there. 🌩️ \n")
night = input(
"⏺️ Type SEARCH to get out of bed and look out the window, and SNOOZE to remain in bed.")
if night == "SNOOZE" or night == "Snooze" or night == "snooze":
print("😞 You really-really-really do not want to go over there.\n")
print("You're NOT afraid, you just...you know, you're tired or whatever.\n")
print("The wind howls ever madly as you attempt to drift off.\n")
print("You dream of a man with skin made of iron, and of a stranger banging on your front door ⚰️")
print("===============================================================\n")
print("Nope! you think. Heart hammering a beat of anxiety in your chest, you climb back into bed.\n")
print("The banging persists, the urgnt knocking of someone who might keep at it all night and yet who cannot take a hint.\n")
print("Despite this intrusion, you drift off to sleep with relative ease, tired from the months of training and harvest you've had.\n")
print("Your heart is warm with your hopes for a relaxing summer vacation... your mind is nagging with a distant worry..... ✨ ")
print("--------------------------------------------- ✨ \n")
input("Press enter to wake up ☀️ \n")
print("Cockledoodle doo, you think, as you stretch your arms and yawn, wipe the sleep from your eyes, wishing for a few more hours abed. \n")
print("You don't love farming, but your mother loved it, so you stick by it to honor her💘\n")
input("Press enter to go outside and feed the chickens. 🐔🐔🐔🐔🐔 \n")
print("You enter the living room, and pad over to the cottage's front door. You grab the door handle.\n")
input("Press enter to open the front door.... 🚪\n")
print("You gasp in surprise and take a step back.")
print("Your farm looks the same as it ever did--a bit soggy from the storm, and a bit rundown from your being a lousy farmer, but nothing to gasp about.")
print("No, your gasp is from the man who stands before you. 👤 A stranger...at your door...")
print("The man throws up his hands. 💬 FINALLY, he says. DO YOU SLEEP WITH COTTON IN YOUR EARS?\n")
print("The man is disheveled, and clearly not from this village. He wears a tattered cloak, has misshapen eyes and a wiry mustache. 👤\n")
print("And he gives you the distinct impression that your life is about to change... 🥺\n")
return segue()
else:
print("You take a deep breath, swing your legs off the bed...😶🌫️\n")
input("Press enter to walk over and peer out the window. 🛌 ")
print(" ============================")
print(" == 🌧️ == /- ==")
print(" == 🌧️ == \/ | ==")
print(" ============================")
print(" == == \ | ==")
print(" == == | ==")
print(" == == \ ==")
print(" ============================")
print("You see that a crooked branch has fallen from the apple tree on your cottage-farmhouse lawn. 🌳 \n")
print("It now leans against the window's glass. 💭 This must've been the crashing that roused me, you think.")
input("\nCase Closed! (Press enter to go back to bed, satisfied by your successful investigation.) 😇")
print("==================❌😩=====================")
leere = mixer.Sound("leere.mp3")
leere.play()
print("\nYou start back toward the bed 🛌\n")
print("❌BANG ❌ BANG ❌BANG ❌ BANG ❌BANG ❌ \n")
print("😮💭 What the hecking eff! you think, your heart racing a whole new notch.\n")
print("⚠️ This new sound has come from the living room.")
print("By the deep, shadow-gray of the clouds above your village, it must be after midnight and far from dawn.🕛🌑\n")
print("***Your choices are, go back to sleep and pretend you have heard nothing. OR: go to the living room and investiage the banging.***")
print("Please note: you live alone.")
print("----------------------------------------------------------")
guest = input(
"⏺️ Type and enter NOPE to go back to bed, or SURE to check the door. 💫")
if guest == "NOPE" or guest == "nope" or guest == "Nope":
print("=============================😶🌫️===============================\n")
print(
"Nope! you think. Heart hammering anxiety drums in your chest, you climb back into bed.\n")
print("The banging persists at your door, the urgnt knocking of someone who might keep at it all night and yet who cannot take a hint.\n")
input("Press enter to get back out of bed and walk toward the closet 😰 \n")
print(" 👁️🗨️ You're not going out there--you already made that choice--but you're scared, and need some guidance.\n")
print(
"🏴 You open the closet door and look at the full-length mirror behind the door.🚪\n")
print("You need to speak with the thing that lives inside your mirror 🌀 \n")
input("Press enter to activate your mirror and speak to the thing 🙃\n")
print("🌀 the thing materializes over your own reflection. It looks despondent.👤\n👤Yes, it sighs more than asks, as though speaking is too much effort for it.")
print("💬 I need to play rock-paper-scissors, you say: If I win, I stop stressing and get some sleep. If you win, I go answer the door.")
print(
"👤💬 Wait......the thing says, looking confused and alarmed. What's at the door?\n")
input("Press enter to shout: IRRELEVANT!")
return rock_paper_scissor()
else:
print("=======================")
print("You take a deep breath and grab the hammer you used to fix the chicken coop just hours earlier 🔨\n")
print(
"🕴️ You creep out into the living room, and then you creep over to the cottage's front door.")
print("You don't have a peephole--these are olden times, and no one in your village has thought of those yet--so you've got to open the door.\n")
input("Press enter to open the front door... 🚪\n")
print("----------------------------------- 🚪\n")
print("⛈️ The world is a basin of torrential rain and thunder that happens to be shaped like your village, shaped like your farm ⛈️\n")
print("Standing on your cottage porch is a strange little man. 👤\n")
print("The man is disheveled, and clearly not from this county. He wears a tattered cloak, has misshapen eyes and a wiry mustache. 👤\n")
print("He is soaking wet.\n")
print("💬 IT'S ABOUT TIME, he wails with ornery grit. IT'S ONLY THE FATE OF THE ENTIRE CONTINENT AT STAKE!")
danger = input(
"⏺️ Type SAFE to slam the door in this loud, rude stranger's face, or SORRY to merely blink at him, puzzled\n")
if danger == "Sorry" or danger == "SORRY" or danger == "sorry":
print("You look at this stranger with groggy mind, and sleepy eyes\n")
print("He speaks English, but his accent is unfamiliar to you. 🗺️\n")
print(
"You say,😑💬 Sir, I wish to be sleeping. How may I help you, and how quickly can it be done?")
print(
"He leans his face close to your face. You smell ale and unwashed horses.\n")
input("Press enter to lean away... 🤢 \n")
print("--------------------------------------")
print("The stranger says, 💬 If you wish to keep this lovely farm, along with air in your lungs, you're gonna wanna let me in. Mate. 👤\n")
print("And just like that, your life changes. 🥺")
return segue()
else:
print("You step back and grab the door...")
input(
"⏺️ Type and enter: A) to smile as the door slams 🙃 | B) to look apologetic as the door slams\n")
print(
"💬 Yeah, sorry mate, you say, and prepare to close the door on this man and the storm he rode in on\n")
print("But he shoves his muddy boot in the way 😐 \n")
print(
"He leans his face close to your face. You smell ale and unwashed horses.\n")
input("Press enter to lean away in disgust... 🤢 \n")
print("The stranger says, 💬 If you wish to keep this lovely farm, along with air in your lungs, you're gonna wanna let me in. Mate. 👤\n")
print("And just like that, your life changes. 🥺")
return segue()
def rock_paper_scissor():
t = ["Rock", "Paper", "Scissors"]
Karminrot = t[randint(0, 2)]
player = False
print("❤️ Welcome to ROCK-PAPER-SCISSORS! A game you play with a being cursed to live for all eternity in your family mirror! ❤️\n")
print("💁♀️If you win, you go right back to bed, following your gut about the person at your front door.💁♂️\n")
print("👤if you lose, you ignore your own feelings--like you always do, like your mother always did--and go answer the door.👤")
print("What fun!-🥰-Let's play!")
while player == False:
player = input("Type: Rock, Paper, or Scissors?")
if player == Karminrot:
print("Tie!")
return rock_paper_scissor()
elif player == "Rock" or player == "rock" or player == "ROCK":
if Karminrot == "Paper":
print("You lose!", Karminrot, "smothers", player)
print("\n======== 😐 Ugh. ==========")
print(
"You take a deep breath and grab the hammer you used to fix the chicken coop just hours earlier 🔨\n")
print(
"You creep out into the living room, and then you creep over to the cottage's front door.")
print("You don't have a peephole--these are olden times, and no one in your village has thought of those yet--so you've got to open the door.\n")
input("Press enter to open the front door... 🚪\n")
print("----------------------------------- 🚪\n")
print("⛈️ The world is a basin of torrential rain and thunder that happens to be shaped like your village, shaped like your farm ⛈️\n")
print("Standing on your cottage porch is a strange little man. 👤\n")
print("The man is disheveled, and clearly not from this county. He wears a tattered cloak, has misshapen eyes and a wiry mustache. 👤\n")
print("He is soaking wet.\n")
print(
"💬 IT'S ABOUT TIME, he wails with ornery grit. IT'S ONLY THE FATE OF THE ENTIRE CONTINENT AT STAKE!")
danger = input(
"⏺️ Type SAFE to slam the door in this loud jerk's face, or SORRY to merely blink at him, puzzled\n")
if danger == "Sorry" or danger == "SORRY" or danger == "sorry":
print(
"You look at this stranger with groggy mind, and sleepy eyes\n")
print(
"He speaks English, but his accent is unfamiliar to you. 🗺️\n")
print(
"You say,😑💬 Sir, I wish to be sleeping. How may I help you, and how quickly can it be done?")
print(
"He leans his face close to your face. You smell ale and unwashed horses.\n")
input("Press enter to lean away... 🤢 \n")
print("--------------------------------------")
print("The stranger says, 💬 If you wish to keep this lovely farm, along with air in your lungs, you're gonna wanna let me in. Mate. 👤\n")
print("And just like that, your life changes. 🥺")
return segue()
else:
input("Press enter to smile and slam the door 🙃 \n")
print(
"💬 Yeah, sorry mate, you say, and prepare to close the door on this man and the storm he rode in on\n")
print("But he shoves his muddy boot in the way 😐 \n")
print(
"He leans his face close to your face. You smell ale and unwashed horses.\n")
input("Press enter to lean away... 🤢 \n")
print("The stranger says, 💬 If you wish to keep this lovely farm, along with air in your lungs, you're gonna wanna let me in. Mate. 👤\n")
print("And just like that, your life changes. 🥺")
return segue()
elif Karminrot == "Rock":
print("That's a tie!")
return rock_paper_scissor()
else:
print("You win!", player, "destroys", Karminrot)
print(
"\nYou sigh with relief, and you shut the closet door. You crawl into bed.")
print("Despite this stress, you drift off to sleep with relative ease, tired from the months of training and harvest you've had.\n")
print("Your heart is warm with your hopes for a relaxing summer vacation... your mind is nagging with a distant worry..... ✨ ")
print("--------------------------------------------- ✨ \n")
input("Press enter to wake up ☀️ \n")
print("Cockledoodle doo, you think, as you stretch your arms and yawn, wipe the sleep from your eyes, wishing for a few more hours abed. \n")
print(
"You don't love farming, but your mother loved it, so you stick with it to honor her💘\n")
input("Press enter to go outside and feed the chickens. 🐔🐔🐔🐔🐔 \n")
print(
"You enter the living room, and pad over to the cottage's front door. You grab the door handle.\n")
input("Press enter to open the front door.... 🚪\n")
print("You gasp in surprise and take a step back.")
print("Your farm looks the same as it ever did--a bit soggy from the storm, and a bit rundown from your lack of care, but nothing to gasp about.")
print("No, your gasp is from the man who stands before you. 👤")
print(
"The man throws up his hands. 💬 FINALLY, he says. DO YOU SLEEP WITH COTTON IN YOUR EARS?\n")
print("The man is disheveled, and clearly not from this village. He wears a tattered cloak, has misshapen eyes and a wiry mustache. 👤\n")
print(
"And he gives you the distinct impression that your life is about to change... 🥺\n")
return segue()
elif player == "Paper" or player == "paper" or player == "PAPER":
if Karminrot == "Scissors":
print("You lose!", Karminrot, "sliced", player)
print("\n======== 😐 Ugh. ==========")
print(
"You take a deep breath and grab the hammer you used to fix the chicken coop just hours earlier 🔨\n")
print(
"You creep out into the living room, and then you creep over to the cottage's front door.")
print("You don't have a peephole--these are olden times, and no one in your village has thought of those yet--so you've got to open the door.\n")
input("Press enter to open the front door... 🚪\n")
print("----------------------------------- 🚪\n")
print("⛈️ The world is a basin of torrential rain and thunder that happens to be shaped like your village, shaped like your farm ⛈️\n")
print("Standing on your cottage porch is a strange little man. 👤\n")
print("The man is disheveled, and clearly not from this county. He wears a tattered cloak, has misshapen eyes and a wiry mustache. 👤\n")
print("He is soaking wet.\n")
print(
"💬 IT'S ABOUT TIME, he wails with ornery grit. IT'S ONLY THE FATE OF THE ENTIRE CONTINENT AT STAKE!")
danger = input(
"⏺️ Type SAFE to slam the door in this loud jerk's face, or SORRY to merely blink at him, puzzled\n")
if danger == "Sorry" or danger == "SORRY" or danger == "sorry":
print(
"You look at this stranger with groggy mind, and sleepy eyes\n")
print(
"He speaks English, but his accent is unfamiliar to you. 🗺️\n")
print(
"You say,😑💬 Sir, I wish to be sleeping. How may I help you, and how quickly can it be done?")
print(
"He leans his face close to your face. You smell ale and unwashed horses.\n")
input("Press enter to lean away... 🤢 \n")
print("--------------------------------------")
print("The stranger says, 💬 If you wish to keep this lovely farm, along with air in your lungs, you're gonna wanna let me in. Mate. 👤\n")
print("And just like that, your life changes. 🥺")
return segue()
else:
input("Press enter to smile and slam the door 🙃 \n")
print(
"💬 Yeah, sorry mate, you say, and prepare to close the door on this man and the storm he rode in on\n")
print("But he shoves his muddy boot in the way 😐 \n")
print(
"He leans his face close to your face. You smell ale and unwashed horses.\n")
input("Press enter to lean away... 🤢 \n")
print("The stranger says, 💬 If you wish to keep this lovely farm, along with air in your lungs, you're gonna wanna let me in. Mate. 👤\n")
print("And just like that, your life changes. 🥺")
return segue()
elif Karminrot == "Paper":
print("That's a tie!")
return rock_paper_scissor()
else:
print("You win!", player, "smothers", Karminrot)
print(
"\nYou sigh with relief, and you shut the closet door. You crawl into bed.")
print("Despite this stress, you drift off to sleep with relative ease, tired from the months of training and harvest you've had.\n")
print("Your heart is warm with your hopes for a relaxing summer vacation... your mind is nagging with a distant worry..... ✨ ")
print("--------------------------------------------- ✨ \n")
input("Press enter to wake up ☀️ \n")
print("Cockledoodle doo, you think, as you stretch your arms and yawn, wipe the sleep from your eyes, wishing for a few more hours abed. \n")
print(
"You don't love farming, but your mother loved it, so you stick with it to honor her💘\n")
input("Press enter to go outside and feed the chickens. 🐔🐔🐔🐔🐔 \n")
print(
"You enter the living room, and pad over to the cottage's front door. You grab the door handle.\n")
input("Press enter to open the front door.... 🚪\n")
print("You gasp in surprise and take a step back.")
print("Your farm looks the same as it ever did--a bit soggy from the storm, and a bit rundown from your lack of care, but nothing to gasp about.")
print("No, your gasp is from the man who stands before you. 👤")
print(
"The man throws up his hands. 💬 FINALLY, he says. DO YOU SLEEP WITH COTTON IN YOUR EARS?\n")
print("The man is disheveled, and clearly not from this village. He wears a tattered cloak, has misshapen eyes and a wiry mustache. 👤\n")
print(
"And he gives you the distinct impression that your life is about to change... 🥺\n")
return segue()
elif player == "Scissors" or player == "scissors" or player == "SCISSORS":
if Karminrot == "Rock":
print("You lose...", Karminrot, "bashes", player)
print("\n======== 😐 Ugh. ==========")
print(
"You take a deep breath and grab the hammer you used to fix the chicken coop just hours earlier 🔨\n")
print(
"You creep out into the living room, and then you creep over to the cottage's front door.")
print("You don't have a peephole--these are olden times, and no one in your village has thought of those yet--so you've got to open the door.\n")
input("Press enter to open the front door... 🚪\n")
print("----------------------------------- 🚪\n")
print("⛈️ The world is a basin of torrential rain and thunder that happens to be shaped like your village, shaped like your farm ⛈️\n")
print("Standing on your cottage porch is a strange little man. 👤\n")
print("The man is disheveled, and clearly not from this county. He wears a tattered cloak, has misshapen eyes and a wiry mustache. 👤\n")
print("He is soaking wet.\n")
print(
"💬 IT'S ABOUT TIME, he wails with ornery grit. IT'S ONLY THE FATE OF THE ENTIRE CONTINENT AT STAKE!")
danger = input(
"⏺️ Type SAFE to slam the door in this loud jerk's face, or SORRY to merely blink at him, puzzled\n")
if danger == "Sorry" or danger == "SORRY" or danger == "sorry":
print(
"You look at this stranger with groggy mind, and sleepy eyes\n")
print(
"He speaks English, but his accent is unfamiliar to you. 🗺️\n")
print(
"You say,😑💬 Sir, I wish to be sleeping. How may I help you, and how quickly can it be done?")
print(
"He leans his face close to your face. You smell ale and unwashed horses.\n")
input("Press enter to lean away... 🤢 \n")
print("--------------------------------------")
print("The stranger says, 💬 If you wish to keep this lovely farm, along with air in your lungs, you're gonna wanna let me in. Mate. 👤\n")
print("And just like that, your life changes. 🥺")
return segue()
else:
input("Press enter to smile and slam the door 🙃 \n")
print(
"💬 Yeah, sorry mate, you say, and prepare to close the door on this man and the storm he rode in on\n")
print("But he shoves his muddy boot in the way 😐 \n")
print(
"He leans his face close to your face. You smell ale and unwashed horses.\n")
input("Press enter to lean away... 🤢 \n")
print("The stranger says, 💬 If you wish to keep this lovely farm, along with air in your lungs, you're gonna wanna let me in. Mate. 👤\n")
print("And just like that, your life changes. 🥺")
return segue()
elif Karminrot == "Scissors":
print("That's a tie!")
return rock_paper_scissor()
else:
print("You win!", player, "diced", Karminrot)
print(
"\nYou sigh with relief, and you shut the closet door. You crawl into bed.")
print("Despite this stress, you drift off to sleep with relative ease, tired from the months of training and harvest you've had.\n")
print("Your heart is warm with your hopes for a relaxing summer vacation... your mind is nagging with a distant worry..... ✨ ")
print("--------------------------------------------- ✨ \n")
input("Press enter to wake up ☀️ \n")
print("Cockledoodle doo, you think, as you stretch your arms and yawn, wipe the sleep from your eyes, wishing for a few more hours abed. \n")
print(
"You don't love farming, but your mother loved it, so you stick with it to honor her💘\n")
input("Press enter to go outside and feed the chickens. 🐔🐔🐔🐔🐔 \n")
print(
"You enter the living room, and pad over to the cottage's front door. You grab the door handle.\n")
input("Press enter to open the front door.... 🚪\n")
print("You gasp in surprise and take a step back.")
print("Your farm looks the same as it ever did--a bit soggy from the storm, and a bit rundown from your lack of care, but nothing to gasp about.")
print("No, your gasp is from the man who stands before you. 👤")
print(
"The man throws up his hands. 💬 FINALLY, he says. DO YOU SLEEP WITH COTTON IN YOUR EARS?\n")
print("The man is disheveled, and clearly not from this village. He wears a tattered cloak, has misshapen eyes and a wiry mustache. 👤\n")
print(
"And he gives you the distinct impression that your life is about to change... 🥺\n")
return segue()
else:
print(
"\n 👤 I don't understand that gesture. Check your spelling maybe? Let's go again...\n")
# player was set to True, but we want it to be False so the loop continues
player = False
Karminrot = t[randint(0, 2)]
def segue():
print("✨✨✨✨✨✨✨✨✨✨✨✨✨✨✨✨✨")
| 74.699187
| 172
| 0.528697
| 3,858
| 27,564
| 3.84422
| 0.132711
| 0.04612
| 0.026701
| 0.03668
| 0.800148
| 0.772234
| 0.764884
| 0.758883
| 0.758883
| 0.75713
| 0
| 0.000276
| 0.3436
| 27,564
| 368
| 173
| 74.902174
| 0.804455
| 0.002612
| 0
| 0.699164
| 0
| 0.267409
| 0.661751
| 0.030824
| 0
| 0
| 0
| 0
| 0
| 1
| 0.008357
| false
| 0
| 0.005571
| 0
| 0.061281
| 0.568245
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 9
|
0b295e215286c5499ba92e826f69979c2b5e0a95
| 5,081
|
py
|
Python
|
turtlebot3_ws/src/global_path_planning/scripts/algorithms/neighbors.py
|
SakshayMahna/Robotics-Playground
|
2ad1d16b40126dde14b92d26beaa3cdb53a1b4d8
|
[
"MIT"
] | 2
|
2022-03-09T07:19:04.000Z
|
2022-03-30T07:32:48.000Z
|
turtlebot3_ws/src/global_path_planning/scripts/algorithms/neighbors.py
|
SakshayMahna/Robotics-Playground
|
2ad1d16b40126dde14b92d26beaa3cdb53a1b4d8
|
[
"MIT"
] | 1
|
2022-03-09T03:00:51.000Z
|
2022-03-12T02:53:09.000Z
|
turtlebot3_ws/src/global_path_planning/scripts/algorithms/neighbors.py
|
SakshayMahna/Robotics-Playground
|
2ad1d16b40126dde14b92d26beaa3cdb53a1b4d8
|
[
"MIT"
] | null | null | null |
def find_neighbors(index, width, height, costmap, orthogonal_step_cost):
"""
Identifies neighbor nodes inspecting the 8 adjacent neighbors
Checks if neighbor is inside the map boundaries and if is not an obstacle according to a threshold
Returns a list with valid neighbour nodes as [index, step_cost] pairs
"""
neighbors = []
# length of diagonal = length of one side by the square root of 2 (1.41421)
diagonal_step_cost = orthogonal_step_cost * 1.41421
# threshold value used to reject neighbor nodes as they are considered as obstacles [1-254]
lethal_cost = 150
upper = index - width
if upper > 0:
if costmap[upper] < lethal_cost:
step_cost = orthogonal_step_cost + costmap[upper]/255
neighbors.append([upper, step_cost])
left = index - 1
if left % width > 0:
if costmap[left] < lethal_cost:
step_cost = orthogonal_step_cost + costmap[left]/255
neighbors.append([left, step_cost])
upper_left = index - width - 1
if upper_left > 0 and upper_left % width > 0:
if costmap[upper_left] < lethal_cost:
step_cost = diagonal_step_cost + costmap[upper_left]/255
neighbors.append([index - width - 1, step_cost])
upper_right = index - width + 1
if upper_right > 0 and (upper_right) % width != (width - 1):
if costmap[upper_right] < lethal_cost:
step_cost = diagonal_step_cost + costmap[upper_right]/255
neighbors.append([upper_right, step_cost])
right = index + 1
if right % width != (width + 1):
if costmap[right] < lethal_cost:
step_cost = orthogonal_step_cost + costmap[right]/255
neighbors.append([right, step_cost])
lower_left = index + width - 1
if lower_left < height * width and lower_left % width != 0:
if costmap[lower_left] < lethal_cost:
step_cost = diagonal_step_cost + costmap[lower_left]/255
neighbors.append([lower_left, step_cost])
lower = index + width
if lower <= height * width:
if costmap[lower] < lethal_cost:
step_cost = orthogonal_step_cost + costmap[lower]/255
neighbors.append([lower, step_cost])
lower_right = index + width + 1
if (lower_right) <= height * width and lower_right % width != (width - 1):
if costmap[lower_right] < lethal_cost:
step_cost = diagonal_step_cost + costmap[lower_right]/255
neighbors.append([lower_right, step_cost])
return neighbors
def find_weighted_neighbors(index, width, height, costmap, orthogonal_step_cost):
"""
Identifies neighbor nodes inspecting the 8 adjacent neighbors
Checks if neighbor is inside the map boundaries and if is not an obstacle according to a threshold
Returns a list with valid neighbour nodes as [index, step_cost] pairs
"""
neighbors = []
# length of diagonal = length of one side by the square root of 2 (1.41421)
diagonal_step_cost = orthogonal_step_cost * 1.41421
# threshold value used to reject neighbor nodes as they are considered as obstacles [1-254]
lethal_cost = 150
step_cost = 0
upper = index - width
if upper > 0:
if costmap[upper] < lethal_cost:
step_cost = orthogonal_step_cost + costmap[upper]/255
else:
step_cost = float('inf')
neighbors.append([upper, step_cost])
left = index - 1
if left % width > 0:
if costmap[left] < lethal_cost:
step_cost = orthogonal_step_cost + costmap[left]/255
else:
step_cost = float('inf')
neighbors.append([left, step_cost])
upper_left = index - width - 1
if upper_left > 0 and upper_left % width > 0:
if costmap[upper_left] < lethal_cost:
step_cost = diagonal_step_cost + costmap[upper_left]/255
else:
step_cost = float('inf')
neighbors.append([index - width - 1, step_cost])
upper_right = index - width + 1
if upper_right > 0 and (upper_right) % width != (width - 1):
if costmap[upper_right] < lethal_cost:
step_cost = diagonal_step_cost + costmap[upper_right]/255
else:
step_cost = float('inf')
neighbors.append([upper_right, step_cost])
right = index + 1
if right % width != (width + 1):
if costmap[right] < lethal_cost:
step_cost = orthogonal_step_cost + costmap[right]/255
else:
step_cost = float('inf')
neighbors.append([right, step_cost])
lower_left = index + width - 1
if lower_left < height * width and lower_left % width != 0:
if costmap[lower_left] < lethal_cost:
step_cost = diagonal_step_cost + costmap[lower_left]/255
else:
step_cost = float('inf')
neighbors.append([lower_left, step_cost])
lower = index + width
if lower <= height * width:
if costmap[lower] < lethal_cost:
step_cost = orthogonal_step_cost + costmap[lower]/255
else:
step_cost = float('inf')
neighbors.append([lower, step_cost])
lower_right = index + width + 1
if (lower_right) <= height * width and lower_right % width != (width - 1):
if costmap[lower_right] < lethal_cost:
step_cost = diagonal_step_cost + costmap[lower_right]/255
else:
step_cost = float('inf')
neighbors.append([lower_right, step_cost])
return neighbors
| 36.292857
| 100
| 0.688644
| 718
| 5,081
| 4.658774
| 0.098886
| 0.155456
| 0.066966
| 0.086099
| 0.990732
| 0.990732
| 0.990732
| 0.990732
| 0.968012
| 0.9142
| 0
| 0.030812
| 0.214328
| 5,081
| 140
| 101
| 36.292857
| 0.807114
| 0.155481
| 0
| 0.971963
| 0
| 0
| 0.005636
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.018692
| false
| 0
| 0
| 0
| 0.037383
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e7f0e5b24648094826b58a90ae11185e42caa554
| 135
|
py
|
Python
|
puma/registration/__init__.py
|
okryush/puma
|
fd3f21c5566ae64110420a26ef6c9d8da0e67dce
|
[
"MIT"
] | 239
|
2021-03-30T07:33:37.000Z
|
2022-03-15T07:14:06.000Z
|
puma/registration/__init__.py
|
alualu628628/puma
|
4a5980fcd302fc794f50e782e478a3bdd77f57b2
|
[
"MIT"
] | 12
|
2021-06-10T17:26:36.000Z
|
2022-03-29T16:23:52.000Z
|
puma/registration/__init__.py
|
alualu628628/puma
|
4a5980fcd302fc794f50e782e478a3bdd77f57b2
|
[
"MIT"
] | 46
|
2021-03-30T07:18:52.000Z
|
2022-03-30T04:49:34.000Z
|
from .method_selector import *
from .o3d_aliases import *
from .run_icp import *
from .scan2mesh import *
from .scan2mesh_icp import *
| 22.5
| 30
| 0.777778
| 19
| 135
| 5.315789
| 0.473684
| 0.39604
| 0.376238
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.026087
| 0.148148
| 135
| 5
| 31
| 27
| 0.852174
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
e7f9c742c2753a0b8b846e8699a107dc0f452dd0
| 548
|
py
|
Python
|
Exercicios/mundo3-exercicios-72/ex109/moeda.py
|
rafaelbarretomg/Curso-Python-3
|
7e772cbaf4c1e1bf7f1a9fb2925ec2e0eecf2998
|
[
"MIT"
] | null | null | null |
Exercicios/mundo3-exercicios-72/ex109/moeda.py
|
rafaelbarretomg/Curso-Python-3
|
7e772cbaf4c1e1bf7f1a9fb2925ec2e0eecf2998
|
[
"MIT"
] | null | null | null |
Exercicios/mundo3-exercicios-72/ex109/moeda.py
|
rafaelbarretomg/Curso-Python-3
|
7e772cbaf4c1e1bf7f1a9fb2925ec2e0eecf2998
|
[
"MIT"
] | null | null | null |
def aumentar(preco=0, taxa=0, formato=False):
res = preco + (preco * taxa/100)
return res if formato is False else moeda(res)
def diminuir(preco=0, taxa=0, formato=False):
res = preco - (preco * taxa/100)
return res if formato is False else moeda(res)
def dobro(preco=0, formato=False):
res = preco * 2
return res if not formato else moeda(res)
def metade(preco=0, formato=False):
res = preco / 2
return res if not formato else moeda(res)
def moeda(p=0, m='R$'):
return f'{m}{p:>.2f}' .replace('.', ',')
| 23.826087
| 50
| 0.636861
| 90
| 548
| 3.877778
| 0.277778
| 0.068768
| 0.148997
| 0.183381
| 0.836676
| 0.836676
| 0.836676
| 0.836676
| 0.836676
| 0.836676
| 0
| 0.037296
| 0.217153
| 548
| 22
| 51
| 24.909091
| 0.776224
| 0
| 0
| 0.285714
| 0
| 0
| 0.027372
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.357143
| false
| 0
| 0
| 0.071429
| 0.714286
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
f025fbf9d9a45ece984ba4a2ef3b4052f7154917
| 124
|
py
|
Python
|
tkge/data/__init__.py
|
tkg-framework/TKG-framework
|
98586b7199bda0e96d74b2ea02c62226901822cc
|
[
"MIT",
"Unlicense"
] | null | null | null |
tkge/data/__init__.py
|
tkg-framework/TKG-framework
|
98586b7199bda0e96d74b2ea02c62226901822cc
|
[
"MIT",
"Unlicense"
] | null | null | null |
tkge/data/__init__.py
|
tkg-framework/TKG-framework
|
98586b7199bda0e96d74b2ea02c62226901822cc
|
[
"MIT",
"Unlicense"
] | null | null | null |
from .dataset import DatasetProcessor
from .custom_dataset import ICEWS14AtiseDatasetProcessor, TestICEWS14DatasetProcessor
| 41.333333
| 85
| 0.903226
| 10
| 124
| 11.1
| 0.7
| 0.234234
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.034783
| 0.072581
| 124
| 2
| 86
| 62
| 0.930435
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
f038db53ebef4880edcef75b812d2c0c06cde9d7
| 1,322
|
py
|
Python
|
lib/operators.py
|
briandeheus/nots
|
43d1427b95fafe09417ad84b4ef2419b8e47d31c
|
[
"MIT"
] | 4
|
2021-07-17T11:50:53.000Z
|
2022-02-10T16:43:33.000Z
|
lib/operators.py
|
briandeheus/nots
|
43d1427b95fafe09417ad84b4ef2419b8e47d31c
|
[
"MIT"
] | 1
|
2021-07-15T17:27:32.000Z
|
2021-07-15T17:27:32.000Z
|
lib/operators.py
|
briandeheus/nots
|
43d1427b95fafe09417ad84b4ef2419b8e47d31c
|
[
"MIT"
] | null | null | null |
from lib.types import CASTING_TABLE
def eq(column, value):
column_type = column.type.__visit_name__
if column_type in CASTING_TABLE:
return column == CASTING_TABLE[column_type].cast_to(value)
return column == value
def neq(column, value):
column_type = column.type.__visit_name__
if column_type in CASTING_TABLE:
return column != CASTING_TABLE[column_type].cast_to(value)
return column != value
def gte(column, value):
column_type = column.type.__visit_name__
if column_type in CASTING_TABLE:
return column >= CASTING_TABLE[column_type].cast_to(value)
return column >= value
def gt(column, value):
column_type = column.type.__visit_name__
if column_type in CASTING_TABLE:
return column > CASTING_TABLE[column_type].cast_to(value)
return column > value
def lte(column, value):
column_type = column.type.__visit_name__
if column_type in CASTING_TABLE:
return column <= CASTING_TABLE[column_type].cast_to(value)
return column <= value
def lt(column, value):
column_type = column.type.__visit_name__
if column_type in CASTING_TABLE:
return column < CASTING_TABLE[column_type].cast_to(value)
return column < value
OPERATORS = {"eq": eq, "neq": neq, "gte": gte, "gt": gt, "lt": lt}
| 22.40678
| 66
| 0.704236
| 185
| 1,322
| 4.67027
| 0.12973
| 0.277778
| 0.118056
| 0.145833
| 0.90625
| 0.90625
| 0.90625
| 0.90625
| 0.90625
| 0.90625
| 0
| 0
| 0.204236
| 1,322
| 58
| 67
| 22.793103
| 0.821293
| 0
| 0
| 0.375
| 0
| 0
| 0.009077
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1875
| false
| 0
| 0.03125
| 0
| 0.59375
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 8
|
f0418e930ba72827ab51a284cc20ed7c3ef2f8b4
| 137,085
|
py
|
Python
|
sdk/python/pulumi_okta/app/saml.py
|
pulumi/pulumi-okta
|
83f7617a85b3d05213901773fa4e6a151ab6076b
|
[
"ECL-2.0",
"Apache-2.0"
] | 5
|
2019-10-29T21:59:22.000Z
|
2021-11-08T12:00:24.000Z
|
sdk/python/pulumi_okta/app/saml.py
|
pulumi/pulumi-okta
|
83f7617a85b3d05213901773fa4e6a151ab6076b
|
[
"ECL-2.0",
"Apache-2.0"
] | 109
|
2020-01-06T10:28:09.000Z
|
2022-03-25T19:52:40.000Z
|
sdk/python/pulumi_okta/app/saml.py
|
pulumi/pulumi-okta
|
83f7617a85b3d05213901773fa4e6a151ab6076b
|
[
"ECL-2.0",
"Apache-2.0"
] | 2
|
2020-09-11T16:31:04.000Z
|
2020-11-24T12:23:17.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['SamlArgs', 'Saml']
@pulumi.input_type
class SamlArgs:
def __init__(__self__, *,
label: pulumi.Input[str],
accessibility_error_redirect_url: Optional[pulumi.Input[str]] = None,
accessibility_login_redirect_url: Optional[pulumi.Input[str]] = None,
accessibility_self_service: Optional[pulumi.Input[bool]] = None,
acs_endpoints: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
admin_note: Optional[pulumi.Input[str]] = None,
app_links_json: Optional[pulumi.Input[str]] = None,
app_settings_json: Optional[pulumi.Input[str]] = None,
assertion_signed: Optional[pulumi.Input[bool]] = None,
attribute_statements: Optional[pulumi.Input[Sequence[pulumi.Input['SamlAttributeStatementArgs']]]] = None,
audience: Optional[pulumi.Input[str]] = None,
authn_context_class_ref: Optional[pulumi.Input[str]] = None,
auto_submit_toolbar: Optional[pulumi.Input[bool]] = None,
default_relay_state: Optional[pulumi.Input[str]] = None,
destination: Optional[pulumi.Input[str]] = None,
digest_algorithm: Optional[pulumi.Input[str]] = None,
enduser_note: Optional[pulumi.Input[str]] = None,
features: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
groups: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
hide_ios: Optional[pulumi.Input[bool]] = None,
hide_web: Optional[pulumi.Input[bool]] = None,
honor_force_authn: Optional[pulumi.Input[bool]] = None,
idp_issuer: Optional[pulumi.Input[str]] = None,
inline_hook_id: Optional[pulumi.Input[str]] = None,
key_name: Optional[pulumi.Input[str]] = None,
key_years_valid: Optional[pulumi.Input[int]] = None,
logo: Optional[pulumi.Input[str]] = None,
preconfigured_app: Optional[pulumi.Input[str]] = None,
recipient: Optional[pulumi.Input[str]] = None,
request_compressed: Optional[pulumi.Input[bool]] = None,
response_signed: Optional[pulumi.Input[bool]] = None,
saml_version: Optional[pulumi.Input[str]] = None,
signature_algorithm: Optional[pulumi.Input[str]] = None,
single_logout_certificate: Optional[pulumi.Input[str]] = None,
single_logout_issuer: Optional[pulumi.Input[str]] = None,
single_logout_url: Optional[pulumi.Input[str]] = None,
skip_groups: Optional[pulumi.Input[bool]] = None,
skip_users: Optional[pulumi.Input[bool]] = None,
sp_issuer: Optional[pulumi.Input[str]] = None,
sso_url: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None,
subject_name_id_format: Optional[pulumi.Input[str]] = None,
subject_name_id_template: Optional[pulumi.Input[str]] = None,
user_name_template: Optional[pulumi.Input[str]] = None,
user_name_template_suffix: Optional[pulumi.Input[str]] = None,
user_name_template_type: Optional[pulumi.Input[str]] = None,
users: Optional[pulumi.Input[Sequence[pulumi.Input['SamlUserArgs']]]] = None):
"""
The set of arguments for constructing a Saml resource.
:param pulumi.Input[str] label: label of application.
:param pulumi.Input[str] accessibility_error_redirect_url: Custom error page URL.
:param pulumi.Input[str] accessibility_login_redirect_url: Custom login page for this application.
:param pulumi.Input[bool] accessibility_self_service: Enable self-service. By default, it is `false`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] acs_endpoints: An array of ACS endpoints. You can configure a maximum of 100 endpoints.
:param pulumi.Input[str] admin_note: Application notes for admins.
:param pulumi.Input[str] app_links_json: Displays specific appLinks for the app. The value for the link should be boolean.
:param pulumi.Input[str] app_settings_json: Application settings in JSON format.
:param pulumi.Input[bool] assertion_signed: Determines whether the SAML assertion is digitally signed.
:param pulumi.Input[Sequence[pulumi.Input['SamlAttributeStatementArgs']]] attribute_statements: List of SAML Attribute statements.
:param pulumi.Input[str] audience: Audience restriction.
:param pulumi.Input[str] authn_context_class_ref: Identifies the SAML authentication context class for the assertion’s authentication statement.
:param pulumi.Input[bool] auto_submit_toolbar: Display auto submit toolbar.
:param pulumi.Input[str] default_relay_state: Identifies a specific application resource in an IDP initiated SSO scenario.
:param pulumi.Input[str] destination: Identifies the location where the SAML response is intended to be sent inside the SAML assertion.
:param pulumi.Input[str] digest_algorithm: Determines the digest algorithm used to digitally sign the SAML assertion and response.
:param pulumi.Input[str] enduser_note: Application notes for end users.
:param pulumi.Input[Sequence[pulumi.Input[str]]] features: features enabled. Notice: you can't currently configure provisioning features via the API.
:param pulumi.Input[Sequence[pulumi.Input[str]]] groups: Groups associated with the application.
- `DEPRECATED`: Please replace usage with the `AppGroupAssignments` (or `app.GroupAssignment`) resource.
:param pulumi.Input[bool] hide_ios: Do not display application icon on mobile app.
:param pulumi.Input[bool] hide_web: Do not display application icon to users
:param pulumi.Input[bool] honor_force_authn: Prompt user to re-authenticate if SP asks for it.
:param pulumi.Input[str] idp_issuer: SAML issuer ID.
:param pulumi.Input[str] inline_hook_id: Saml Inline Hook associated with the application.
:param pulumi.Input[str] key_name: Certificate name. This modulates the rotation of keys. New name == new key. Required to be set with `key_years_valid`.
:param pulumi.Input[int] key_years_valid: Number of years the certificate is valid (2 - 10 years).
:param pulumi.Input[str] logo: Local file path to the logo. The file must be in PNG, JPG, or GIF format, and less than 1 MB in size.
:param pulumi.Input[str] preconfigured_app: name of application from the Okta Integration Network, if not included a custom app will be created.
:param pulumi.Input[str] recipient: The location where the app may present the SAML assertion.
:param pulumi.Input[bool] request_compressed: Denotes whether the request is compressed or not.
:param pulumi.Input[bool] response_signed: Determines whether the SAML auth response message is digitally signed.
:param pulumi.Input[str] saml_version: SAML version for the app's sign-on mode. Valid values are: `"2.0"` or `"1.1"`. Default is `"2.0"`.
:param pulumi.Input[str] signature_algorithm: Signature algorithm used ot digitally sign the assertion and response.
:param pulumi.Input[str] single_logout_certificate: x509 encoded certificate that the Service Provider uses to sign Single Logout requests.
Note: should be provided without `-----BEGIN CERTIFICATE-----` and `-----END CERTIFICATE-----`, see [official documentation](https://developer.okta.com/docs/reference/api/apps/#service-provider-certificate).
:param pulumi.Input[str] single_logout_issuer: The issuer of the Service Provider that generates the Single Logout request.
:param pulumi.Input[str] single_logout_url: The location where the logout response is sent.
:param pulumi.Input[bool] skip_groups: Indicator that allows the app to skip `groups` sync (it's also can be provided during import). Default is `false`.
:param pulumi.Input[bool] skip_users: Indicator that allows the app to skip `users` sync (it's also can be provided during import). Default is `false`.
:param pulumi.Input[str] sp_issuer: SAML service provider issuer.
:param pulumi.Input[str] sso_url: Single Sign-on Url.
:param pulumi.Input[str] status: status of application.
:param pulumi.Input[str] subject_name_id_format: Identifies the SAML processing rules.
:param pulumi.Input[str] subject_name_id_template: Template for app user's username when a user is assigned to the app.
:param pulumi.Input[str] user_name_template: Username template.
:param pulumi.Input[str] user_name_template_suffix: Username template suffix.
:param pulumi.Input[str] user_name_template_type: Username template type.
:param pulumi.Input[Sequence[pulumi.Input['SamlUserArgs']]] users: Users associated with the application.
- `DEPRECATED`: Please replace usage with the `app.User` resource.
"""
pulumi.set(__self__, "label", label)
if accessibility_error_redirect_url is not None:
pulumi.set(__self__, "accessibility_error_redirect_url", accessibility_error_redirect_url)
if accessibility_login_redirect_url is not None:
pulumi.set(__self__, "accessibility_login_redirect_url", accessibility_login_redirect_url)
if accessibility_self_service is not None:
pulumi.set(__self__, "accessibility_self_service", accessibility_self_service)
if acs_endpoints is not None:
pulumi.set(__self__, "acs_endpoints", acs_endpoints)
if admin_note is not None:
pulumi.set(__self__, "admin_note", admin_note)
if app_links_json is not None:
pulumi.set(__self__, "app_links_json", app_links_json)
if app_settings_json is not None:
pulumi.set(__self__, "app_settings_json", app_settings_json)
if assertion_signed is not None:
pulumi.set(__self__, "assertion_signed", assertion_signed)
if attribute_statements is not None:
pulumi.set(__self__, "attribute_statements", attribute_statements)
if audience is not None:
pulumi.set(__self__, "audience", audience)
if authn_context_class_ref is not None:
pulumi.set(__self__, "authn_context_class_ref", authn_context_class_ref)
if auto_submit_toolbar is not None:
pulumi.set(__self__, "auto_submit_toolbar", auto_submit_toolbar)
if default_relay_state is not None:
pulumi.set(__self__, "default_relay_state", default_relay_state)
if destination is not None:
pulumi.set(__self__, "destination", destination)
if digest_algorithm is not None:
pulumi.set(__self__, "digest_algorithm", digest_algorithm)
if enduser_note is not None:
pulumi.set(__self__, "enduser_note", enduser_note)
if features is not None:
pulumi.set(__self__, "features", features)
if groups is not None:
warnings.warn("""The direct configuration of groups in this app resource is deprecated, please ensure you use the resource `okta_app_group_assignments` for this functionality.""", DeprecationWarning)
pulumi.log.warn("""groups is deprecated: The direct configuration of groups in this app resource is deprecated, please ensure you use the resource `okta_app_group_assignments` for this functionality.""")
if groups is not None:
pulumi.set(__self__, "groups", groups)
if hide_ios is not None:
pulumi.set(__self__, "hide_ios", hide_ios)
if hide_web is not None:
pulumi.set(__self__, "hide_web", hide_web)
if honor_force_authn is not None:
pulumi.set(__self__, "honor_force_authn", honor_force_authn)
if idp_issuer is not None:
pulumi.set(__self__, "idp_issuer", idp_issuer)
if inline_hook_id is not None:
pulumi.set(__self__, "inline_hook_id", inline_hook_id)
if key_name is not None:
pulumi.set(__self__, "key_name", key_name)
if key_years_valid is not None:
pulumi.set(__self__, "key_years_valid", key_years_valid)
if logo is not None:
pulumi.set(__self__, "logo", logo)
if preconfigured_app is not None:
pulumi.set(__self__, "preconfigured_app", preconfigured_app)
if recipient is not None:
pulumi.set(__self__, "recipient", recipient)
if request_compressed is not None:
pulumi.set(__self__, "request_compressed", request_compressed)
if response_signed is not None:
pulumi.set(__self__, "response_signed", response_signed)
if saml_version is not None:
pulumi.set(__self__, "saml_version", saml_version)
if signature_algorithm is not None:
pulumi.set(__self__, "signature_algorithm", signature_algorithm)
if single_logout_certificate is not None:
pulumi.set(__self__, "single_logout_certificate", single_logout_certificate)
if single_logout_issuer is not None:
pulumi.set(__self__, "single_logout_issuer", single_logout_issuer)
if single_logout_url is not None:
pulumi.set(__self__, "single_logout_url", single_logout_url)
if skip_groups is not None:
pulumi.set(__self__, "skip_groups", skip_groups)
if skip_users is not None:
pulumi.set(__self__, "skip_users", skip_users)
if sp_issuer is not None:
pulumi.set(__self__, "sp_issuer", sp_issuer)
if sso_url is not None:
pulumi.set(__self__, "sso_url", sso_url)
if status is not None:
pulumi.set(__self__, "status", status)
if subject_name_id_format is not None:
pulumi.set(__self__, "subject_name_id_format", subject_name_id_format)
if subject_name_id_template is not None:
pulumi.set(__self__, "subject_name_id_template", subject_name_id_template)
if user_name_template is not None:
pulumi.set(__self__, "user_name_template", user_name_template)
if user_name_template_suffix is not None:
pulumi.set(__self__, "user_name_template_suffix", user_name_template_suffix)
if user_name_template_type is not None:
pulumi.set(__self__, "user_name_template_type", user_name_template_type)
if users is not None:
warnings.warn("""The direct configuration of users in this app resource is deprecated, please ensure you use the resource `okta_app_user` for this functionality.""", DeprecationWarning)
pulumi.log.warn("""users is deprecated: The direct configuration of users in this app resource is deprecated, please ensure you use the resource `okta_app_user` for this functionality.""")
if users is not None:
pulumi.set(__self__, "users", users)
@property
@pulumi.getter
def label(self) -> pulumi.Input[str]:
"""
label of application.
"""
return pulumi.get(self, "label")
@label.setter
def label(self, value: pulumi.Input[str]):
pulumi.set(self, "label", value)
@property
@pulumi.getter(name="accessibilityErrorRedirectUrl")
def accessibility_error_redirect_url(self) -> Optional[pulumi.Input[str]]:
"""
Custom error page URL.
"""
return pulumi.get(self, "accessibility_error_redirect_url")
@accessibility_error_redirect_url.setter
def accessibility_error_redirect_url(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "accessibility_error_redirect_url", value)
@property
@pulumi.getter(name="accessibilityLoginRedirectUrl")
def accessibility_login_redirect_url(self) -> Optional[pulumi.Input[str]]:
"""
Custom login page for this application.
"""
return pulumi.get(self, "accessibility_login_redirect_url")
@accessibility_login_redirect_url.setter
def accessibility_login_redirect_url(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "accessibility_login_redirect_url", value)
@property
@pulumi.getter(name="accessibilitySelfService")
def accessibility_self_service(self) -> Optional[pulumi.Input[bool]]:
"""
Enable self-service. By default, it is `false`.
"""
return pulumi.get(self, "accessibility_self_service")
@accessibility_self_service.setter
def accessibility_self_service(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "accessibility_self_service", value)
@property
@pulumi.getter(name="acsEndpoints")
def acs_endpoints(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
An array of ACS endpoints. You can configure a maximum of 100 endpoints.
"""
return pulumi.get(self, "acs_endpoints")
@acs_endpoints.setter
def acs_endpoints(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "acs_endpoints", value)
@property
@pulumi.getter(name="adminNote")
def admin_note(self) -> Optional[pulumi.Input[str]]:
"""
Application notes for admins.
"""
return pulumi.get(self, "admin_note")
@admin_note.setter
def admin_note(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "admin_note", value)
@property
@pulumi.getter(name="appLinksJson")
def app_links_json(self) -> Optional[pulumi.Input[str]]:
"""
Displays specific appLinks for the app. The value for the link should be boolean.
"""
return pulumi.get(self, "app_links_json")
@app_links_json.setter
def app_links_json(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "app_links_json", value)
@property
@pulumi.getter(name="appSettingsJson")
def app_settings_json(self) -> Optional[pulumi.Input[str]]:
"""
Application settings in JSON format.
"""
return pulumi.get(self, "app_settings_json")
@app_settings_json.setter
def app_settings_json(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "app_settings_json", value)
@property
@pulumi.getter(name="assertionSigned")
def assertion_signed(self) -> Optional[pulumi.Input[bool]]:
"""
Determines whether the SAML assertion is digitally signed.
"""
return pulumi.get(self, "assertion_signed")
@assertion_signed.setter
def assertion_signed(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "assertion_signed", value)
@property
@pulumi.getter(name="attributeStatements")
def attribute_statements(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SamlAttributeStatementArgs']]]]:
"""
List of SAML Attribute statements.
"""
return pulumi.get(self, "attribute_statements")
@attribute_statements.setter
def attribute_statements(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SamlAttributeStatementArgs']]]]):
pulumi.set(self, "attribute_statements", value)
@property
@pulumi.getter
def audience(self) -> Optional[pulumi.Input[str]]:
"""
Audience restriction.
"""
return pulumi.get(self, "audience")
@audience.setter
def audience(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "audience", value)
@property
@pulumi.getter(name="authnContextClassRef")
def authn_context_class_ref(self) -> Optional[pulumi.Input[str]]:
"""
Identifies the SAML authentication context class for the assertion’s authentication statement.
"""
return pulumi.get(self, "authn_context_class_ref")
@authn_context_class_ref.setter
def authn_context_class_ref(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "authn_context_class_ref", value)
@property
@pulumi.getter(name="autoSubmitToolbar")
def auto_submit_toolbar(self) -> Optional[pulumi.Input[bool]]:
"""
Display auto submit toolbar.
"""
return pulumi.get(self, "auto_submit_toolbar")
@auto_submit_toolbar.setter
def auto_submit_toolbar(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "auto_submit_toolbar", value)
@property
@pulumi.getter(name="defaultRelayState")
def default_relay_state(self) -> Optional[pulumi.Input[str]]:
"""
Identifies a specific application resource in an IDP initiated SSO scenario.
"""
return pulumi.get(self, "default_relay_state")
@default_relay_state.setter
def default_relay_state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "default_relay_state", value)
@property
@pulumi.getter
def destination(self) -> Optional[pulumi.Input[str]]:
"""
Identifies the location where the SAML response is intended to be sent inside the SAML assertion.
"""
return pulumi.get(self, "destination")
@destination.setter
def destination(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "destination", value)
@property
@pulumi.getter(name="digestAlgorithm")
def digest_algorithm(self) -> Optional[pulumi.Input[str]]:
"""
Determines the digest algorithm used to digitally sign the SAML assertion and response.
"""
return pulumi.get(self, "digest_algorithm")
@digest_algorithm.setter
def digest_algorithm(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "digest_algorithm", value)
@property
@pulumi.getter(name="enduserNote")
def enduser_note(self) -> Optional[pulumi.Input[str]]:
"""
Application notes for end users.
"""
return pulumi.get(self, "enduser_note")
@enduser_note.setter
def enduser_note(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "enduser_note", value)
@property
@pulumi.getter
def features(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
features enabled. Notice: you can't currently configure provisioning features via the API.
"""
return pulumi.get(self, "features")
@features.setter
def features(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "features", value)
@property
@pulumi.getter
def groups(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Groups associated with the application.
- `DEPRECATED`: Please replace usage with the `AppGroupAssignments` (or `app.GroupAssignment`) resource.
"""
return pulumi.get(self, "groups")
@groups.setter
def groups(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "groups", value)
@property
@pulumi.getter(name="hideIos")
def hide_ios(self) -> Optional[pulumi.Input[bool]]:
"""
Do not display application icon on mobile app.
"""
return pulumi.get(self, "hide_ios")
@hide_ios.setter
def hide_ios(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "hide_ios", value)
@property
@pulumi.getter(name="hideWeb")
def hide_web(self) -> Optional[pulumi.Input[bool]]:
"""
Do not display application icon to users
"""
return pulumi.get(self, "hide_web")
@hide_web.setter
def hide_web(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "hide_web", value)
@property
@pulumi.getter(name="honorForceAuthn")
def honor_force_authn(self) -> Optional[pulumi.Input[bool]]:
"""
Prompt user to re-authenticate if SP asks for it.
"""
return pulumi.get(self, "honor_force_authn")
@honor_force_authn.setter
def honor_force_authn(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "honor_force_authn", value)
@property
@pulumi.getter(name="idpIssuer")
def idp_issuer(self) -> Optional[pulumi.Input[str]]:
"""
SAML issuer ID.
"""
return pulumi.get(self, "idp_issuer")
@idp_issuer.setter
def idp_issuer(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "idp_issuer", value)
@property
@pulumi.getter(name="inlineHookId")
def inline_hook_id(self) -> Optional[pulumi.Input[str]]:
"""
Saml Inline Hook associated with the application.
"""
return pulumi.get(self, "inline_hook_id")
@inline_hook_id.setter
def inline_hook_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "inline_hook_id", value)
@property
@pulumi.getter(name="keyName")
def key_name(self) -> Optional[pulumi.Input[str]]:
"""
Certificate name. This modulates the rotation of keys. New name == new key. Required to be set with `key_years_valid`.
"""
return pulumi.get(self, "key_name")
@key_name.setter
def key_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key_name", value)
@property
@pulumi.getter(name="keyYearsValid")
def key_years_valid(self) -> Optional[pulumi.Input[int]]:
"""
Number of years the certificate is valid (2 - 10 years).
"""
return pulumi.get(self, "key_years_valid")
@key_years_valid.setter
def key_years_valid(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "key_years_valid", value)
@property
@pulumi.getter
def logo(self) -> Optional[pulumi.Input[str]]:
"""
Local file path to the logo. The file must be in PNG, JPG, or GIF format, and less than 1 MB in size.
"""
return pulumi.get(self, "logo")
@logo.setter
def logo(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "logo", value)
@property
@pulumi.getter(name="preconfiguredApp")
def preconfigured_app(self) -> Optional[pulumi.Input[str]]:
"""
name of application from the Okta Integration Network, if not included a custom app will be created.
"""
return pulumi.get(self, "preconfigured_app")
@preconfigured_app.setter
def preconfigured_app(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "preconfigured_app", value)
@property
@pulumi.getter
def recipient(self) -> Optional[pulumi.Input[str]]:
"""
The location where the app may present the SAML assertion.
"""
return pulumi.get(self, "recipient")
@recipient.setter
def recipient(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "recipient", value)
@property
@pulumi.getter(name="requestCompressed")
def request_compressed(self) -> Optional[pulumi.Input[bool]]:
"""
Denotes whether the request is compressed or not.
"""
return pulumi.get(self, "request_compressed")
@request_compressed.setter
def request_compressed(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "request_compressed", value)
@property
@pulumi.getter(name="responseSigned")
def response_signed(self) -> Optional[pulumi.Input[bool]]:
"""
Determines whether the SAML auth response message is digitally signed.
"""
return pulumi.get(self, "response_signed")
@response_signed.setter
def response_signed(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "response_signed", value)
@property
@pulumi.getter(name="samlVersion")
def saml_version(self) -> Optional[pulumi.Input[str]]:
"""
SAML version for the app's sign-on mode. Valid values are: `"2.0"` or `"1.1"`. Default is `"2.0"`.
"""
return pulumi.get(self, "saml_version")
@saml_version.setter
def saml_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "saml_version", value)
@property
@pulumi.getter(name="signatureAlgorithm")
def signature_algorithm(self) -> Optional[pulumi.Input[str]]:
"""
Signature algorithm used ot digitally sign the assertion and response.
"""
return pulumi.get(self, "signature_algorithm")
@signature_algorithm.setter
def signature_algorithm(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "signature_algorithm", value)
@property
@pulumi.getter(name="singleLogoutCertificate")
def single_logout_certificate(self) -> Optional[pulumi.Input[str]]:
"""
x509 encoded certificate that the Service Provider uses to sign Single Logout requests.
Note: should be provided without `-----BEGIN CERTIFICATE-----` and `-----END CERTIFICATE-----`, see [official documentation](https://developer.okta.com/docs/reference/api/apps/#service-provider-certificate).
"""
return pulumi.get(self, "single_logout_certificate")
@single_logout_certificate.setter
def single_logout_certificate(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "single_logout_certificate", value)
@property
@pulumi.getter(name="singleLogoutIssuer")
def single_logout_issuer(self) -> Optional[pulumi.Input[str]]:
"""
The issuer of the Service Provider that generates the Single Logout request.
"""
return pulumi.get(self, "single_logout_issuer")
@single_logout_issuer.setter
def single_logout_issuer(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "single_logout_issuer", value)
@property
@pulumi.getter(name="singleLogoutUrl")
def single_logout_url(self) -> Optional[pulumi.Input[str]]:
"""
The location where the logout response is sent.
"""
return pulumi.get(self, "single_logout_url")
@single_logout_url.setter
def single_logout_url(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "single_logout_url", value)
@property
@pulumi.getter(name="skipGroups")
def skip_groups(self) -> Optional[pulumi.Input[bool]]:
"""
Indicator that allows the app to skip `groups` sync (it's also can be provided during import). Default is `false`.
"""
return pulumi.get(self, "skip_groups")
@skip_groups.setter
def skip_groups(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "skip_groups", value)
@property
@pulumi.getter(name="skipUsers")
def skip_users(self) -> Optional[pulumi.Input[bool]]:
"""
Indicator that allows the app to skip `users` sync (it's also can be provided during import). Default is `false`.
"""
return pulumi.get(self, "skip_users")
@skip_users.setter
def skip_users(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "skip_users", value)
@property
@pulumi.getter(name="spIssuer")
def sp_issuer(self) -> Optional[pulumi.Input[str]]:
"""
SAML service provider issuer.
"""
return pulumi.get(self, "sp_issuer")
@sp_issuer.setter
def sp_issuer(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "sp_issuer", value)
@property
@pulumi.getter(name="ssoUrl")
def sso_url(self) -> Optional[pulumi.Input[str]]:
"""
Single Sign-on Url.
"""
return pulumi.get(self, "sso_url")
@sso_url.setter
def sso_url(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "sso_url", value)
@property
@pulumi.getter
def status(self) -> Optional[pulumi.Input[str]]:
"""
status of application.
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "status", value)
@property
@pulumi.getter(name="subjectNameIdFormat")
def subject_name_id_format(self) -> Optional[pulumi.Input[str]]:
"""
Identifies the SAML processing rules.
"""
return pulumi.get(self, "subject_name_id_format")
@subject_name_id_format.setter
def subject_name_id_format(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "subject_name_id_format", value)
@property
@pulumi.getter(name="subjectNameIdTemplate")
def subject_name_id_template(self) -> Optional[pulumi.Input[str]]:
"""
Template for app user's username when a user is assigned to the app.
"""
return pulumi.get(self, "subject_name_id_template")
@subject_name_id_template.setter
def subject_name_id_template(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "subject_name_id_template", value)
@property
@pulumi.getter(name="userNameTemplate")
def user_name_template(self) -> Optional[pulumi.Input[str]]:
"""
Username template.
"""
return pulumi.get(self, "user_name_template")
@user_name_template.setter
def user_name_template(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "user_name_template", value)
@property
@pulumi.getter(name="userNameTemplateSuffix")
def user_name_template_suffix(self) -> Optional[pulumi.Input[str]]:
"""
Username template suffix.
"""
return pulumi.get(self, "user_name_template_suffix")
@user_name_template_suffix.setter
def user_name_template_suffix(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "user_name_template_suffix", value)
@property
@pulumi.getter(name="userNameTemplateType")
def user_name_template_type(self) -> Optional[pulumi.Input[str]]:
"""
Username template type.
"""
return pulumi.get(self, "user_name_template_type")
@user_name_template_type.setter
def user_name_template_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "user_name_template_type", value)
@property
@pulumi.getter
def users(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SamlUserArgs']]]]:
"""
Users associated with the application.
- `DEPRECATED`: Please replace usage with the `app.User` resource.
"""
return pulumi.get(self, "users")
@users.setter
def users(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SamlUserArgs']]]]):
pulumi.set(self, "users", value)
@pulumi.input_type
class _SamlState:
def __init__(__self__, *,
accessibility_error_redirect_url: Optional[pulumi.Input[str]] = None,
accessibility_login_redirect_url: Optional[pulumi.Input[str]] = None,
accessibility_self_service: Optional[pulumi.Input[bool]] = None,
acs_endpoints: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
admin_note: Optional[pulumi.Input[str]] = None,
app_links_json: Optional[pulumi.Input[str]] = None,
app_settings_json: Optional[pulumi.Input[str]] = None,
assertion_signed: Optional[pulumi.Input[bool]] = None,
attribute_statements: Optional[pulumi.Input[Sequence[pulumi.Input['SamlAttributeStatementArgs']]]] = None,
audience: Optional[pulumi.Input[str]] = None,
authn_context_class_ref: Optional[pulumi.Input[str]] = None,
auto_submit_toolbar: Optional[pulumi.Input[bool]] = None,
certificate: Optional[pulumi.Input[str]] = None,
default_relay_state: Optional[pulumi.Input[str]] = None,
destination: Optional[pulumi.Input[str]] = None,
digest_algorithm: Optional[pulumi.Input[str]] = None,
enduser_note: Optional[pulumi.Input[str]] = None,
entity_key: Optional[pulumi.Input[str]] = None,
entity_url: Optional[pulumi.Input[str]] = None,
features: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
groups: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
hide_ios: Optional[pulumi.Input[bool]] = None,
hide_web: Optional[pulumi.Input[bool]] = None,
honor_force_authn: Optional[pulumi.Input[bool]] = None,
http_post_binding: Optional[pulumi.Input[str]] = None,
http_redirect_binding: Optional[pulumi.Input[str]] = None,
idp_issuer: Optional[pulumi.Input[str]] = None,
inline_hook_id: Optional[pulumi.Input[str]] = None,
key_id: Optional[pulumi.Input[str]] = None,
key_name: Optional[pulumi.Input[str]] = None,
key_years_valid: Optional[pulumi.Input[int]] = None,
label: Optional[pulumi.Input[str]] = None,
logo: Optional[pulumi.Input[str]] = None,
logo_url: Optional[pulumi.Input[str]] = None,
metadata: Optional[pulumi.Input[str]] = None,
metadata_url: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
preconfigured_app: Optional[pulumi.Input[str]] = None,
recipient: Optional[pulumi.Input[str]] = None,
request_compressed: Optional[pulumi.Input[bool]] = None,
response_signed: Optional[pulumi.Input[bool]] = None,
saml_version: Optional[pulumi.Input[str]] = None,
sign_on_mode: Optional[pulumi.Input[str]] = None,
signature_algorithm: Optional[pulumi.Input[str]] = None,
single_logout_certificate: Optional[pulumi.Input[str]] = None,
single_logout_issuer: Optional[pulumi.Input[str]] = None,
single_logout_url: Optional[pulumi.Input[str]] = None,
skip_groups: Optional[pulumi.Input[bool]] = None,
skip_users: Optional[pulumi.Input[bool]] = None,
sp_issuer: Optional[pulumi.Input[str]] = None,
sso_url: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None,
subject_name_id_format: Optional[pulumi.Input[str]] = None,
subject_name_id_template: Optional[pulumi.Input[str]] = None,
user_name_template: Optional[pulumi.Input[str]] = None,
user_name_template_suffix: Optional[pulumi.Input[str]] = None,
user_name_template_type: Optional[pulumi.Input[str]] = None,
users: Optional[pulumi.Input[Sequence[pulumi.Input['SamlUserArgs']]]] = None):
"""
Input properties used for looking up and filtering Saml resources.
:param pulumi.Input[str] accessibility_error_redirect_url: Custom error page URL.
:param pulumi.Input[str] accessibility_login_redirect_url: Custom login page for this application.
:param pulumi.Input[bool] accessibility_self_service: Enable self-service. By default, it is `false`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] acs_endpoints: An array of ACS endpoints. You can configure a maximum of 100 endpoints.
:param pulumi.Input[str] admin_note: Application notes for admins.
:param pulumi.Input[str] app_links_json: Displays specific appLinks for the app. The value for the link should be boolean.
:param pulumi.Input[str] app_settings_json: Application settings in JSON format.
:param pulumi.Input[bool] assertion_signed: Determines whether the SAML assertion is digitally signed.
:param pulumi.Input[Sequence[pulumi.Input['SamlAttributeStatementArgs']]] attribute_statements: List of SAML Attribute statements.
:param pulumi.Input[str] audience: Audience restriction.
:param pulumi.Input[str] authn_context_class_ref: Identifies the SAML authentication context class for the assertion’s authentication statement.
:param pulumi.Input[bool] auto_submit_toolbar: Display auto submit toolbar.
:param pulumi.Input[str] certificate: The raw signing certificate.
:param pulumi.Input[str] default_relay_state: Identifies a specific application resource in an IDP initiated SSO scenario.
:param pulumi.Input[str] destination: Identifies the location where the SAML response is intended to be sent inside the SAML assertion.
:param pulumi.Input[str] digest_algorithm: Determines the digest algorithm used to digitally sign the SAML assertion and response.
:param pulumi.Input[str] enduser_note: Application notes for end users.
:param pulumi.Input[str] entity_key: Entity ID, the ID portion of the `entity_url`.
:param pulumi.Input[str] entity_url: Entity URL for instance [http://www.okta.com/exk1fcia6d6EMsf331d8](http://www.okta.com/exk1fcia6d6EMsf331d8).
:param pulumi.Input[Sequence[pulumi.Input[str]]] features: features enabled. Notice: you can't currently configure provisioning features via the API.
:param pulumi.Input[Sequence[pulumi.Input[str]]] groups: Groups associated with the application.
- `DEPRECATED`: Please replace usage with the `AppGroupAssignments` (or `app.GroupAssignment`) resource.
:param pulumi.Input[bool] hide_ios: Do not display application icon on mobile app.
:param pulumi.Input[bool] hide_web: Do not display application icon to users
:param pulumi.Input[bool] honor_force_authn: Prompt user to re-authenticate if SP asks for it.
:param pulumi.Input[str] http_post_binding: `urn:oasis:names:tc:SAML:2.0:bindings:HTTP-Post` location from the SAML metadata.
:param pulumi.Input[str] http_redirect_binding: `urn:oasis:names:tc:SAML:2.0:bindings:HTTP-Redirect` location from the SAML metadata.
:param pulumi.Input[str] idp_issuer: SAML issuer ID.
:param pulumi.Input[str] inline_hook_id: Saml Inline Hook associated with the application.
:param pulumi.Input[str] key_id: Certificate key ID.
:param pulumi.Input[str] key_name: Certificate name. This modulates the rotation of keys. New name == new key. Required to be set with `key_years_valid`.
:param pulumi.Input[int] key_years_valid: Number of years the certificate is valid (2 - 10 years).
:param pulumi.Input[str] label: label of application.
:param pulumi.Input[str] logo: Local file path to the logo. The file must be in PNG, JPG, or GIF format, and less than 1 MB in size.
:param pulumi.Input[str] logo_url: Direct link of application logo.
:param pulumi.Input[str] metadata: The raw SAML metadata in XML.
:param pulumi.Input[str] metadata_url: SAML xml metadata URL.
:param pulumi.Input[str] name: The name of the attribute statement.
:param pulumi.Input[str] preconfigured_app: name of application from the Okta Integration Network, if not included a custom app will be created.
:param pulumi.Input[str] recipient: The location where the app may present the SAML assertion.
:param pulumi.Input[bool] request_compressed: Denotes whether the request is compressed or not.
:param pulumi.Input[bool] response_signed: Determines whether the SAML auth response message is digitally signed.
:param pulumi.Input[str] saml_version: SAML version for the app's sign-on mode. Valid values are: `"2.0"` or `"1.1"`. Default is `"2.0"`.
:param pulumi.Input[str] sign_on_mode: Sign-on mode of application.
:param pulumi.Input[str] signature_algorithm: Signature algorithm used ot digitally sign the assertion and response.
:param pulumi.Input[str] single_logout_certificate: x509 encoded certificate that the Service Provider uses to sign Single Logout requests.
Note: should be provided without `-----BEGIN CERTIFICATE-----` and `-----END CERTIFICATE-----`, see [official documentation](https://developer.okta.com/docs/reference/api/apps/#service-provider-certificate).
:param pulumi.Input[str] single_logout_issuer: The issuer of the Service Provider that generates the Single Logout request.
:param pulumi.Input[str] single_logout_url: The location where the logout response is sent.
:param pulumi.Input[bool] skip_groups: Indicator that allows the app to skip `groups` sync (it's also can be provided during import). Default is `false`.
:param pulumi.Input[bool] skip_users: Indicator that allows the app to skip `users` sync (it's also can be provided during import). Default is `false`.
:param pulumi.Input[str] sp_issuer: SAML service provider issuer.
:param pulumi.Input[str] sso_url: Single Sign-on Url.
:param pulumi.Input[str] status: status of application.
:param pulumi.Input[str] subject_name_id_format: Identifies the SAML processing rules.
:param pulumi.Input[str] subject_name_id_template: Template for app user's username when a user is assigned to the app.
:param pulumi.Input[str] user_name_template: Username template.
:param pulumi.Input[str] user_name_template_suffix: Username template suffix.
:param pulumi.Input[str] user_name_template_type: Username template type.
:param pulumi.Input[Sequence[pulumi.Input['SamlUserArgs']]] users: Users associated with the application.
- `DEPRECATED`: Please replace usage with the `app.User` resource.
"""
if accessibility_error_redirect_url is not None:
pulumi.set(__self__, "accessibility_error_redirect_url", accessibility_error_redirect_url)
if accessibility_login_redirect_url is not None:
pulumi.set(__self__, "accessibility_login_redirect_url", accessibility_login_redirect_url)
if accessibility_self_service is not None:
pulumi.set(__self__, "accessibility_self_service", accessibility_self_service)
if acs_endpoints is not None:
pulumi.set(__self__, "acs_endpoints", acs_endpoints)
if admin_note is not None:
pulumi.set(__self__, "admin_note", admin_note)
if app_links_json is not None:
pulumi.set(__self__, "app_links_json", app_links_json)
if app_settings_json is not None:
pulumi.set(__self__, "app_settings_json", app_settings_json)
if assertion_signed is not None:
pulumi.set(__self__, "assertion_signed", assertion_signed)
if attribute_statements is not None:
pulumi.set(__self__, "attribute_statements", attribute_statements)
if audience is not None:
pulumi.set(__self__, "audience", audience)
if authn_context_class_ref is not None:
pulumi.set(__self__, "authn_context_class_ref", authn_context_class_ref)
if auto_submit_toolbar is not None:
pulumi.set(__self__, "auto_submit_toolbar", auto_submit_toolbar)
if certificate is not None:
pulumi.set(__self__, "certificate", certificate)
if default_relay_state is not None:
pulumi.set(__self__, "default_relay_state", default_relay_state)
if destination is not None:
pulumi.set(__self__, "destination", destination)
if digest_algorithm is not None:
pulumi.set(__self__, "digest_algorithm", digest_algorithm)
if enduser_note is not None:
pulumi.set(__self__, "enduser_note", enduser_note)
if entity_key is not None:
pulumi.set(__self__, "entity_key", entity_key)
if entity_url is not None:
pulumi.set(__self__, "entity_url", entity_url)
if features is not None:
pulumi.set(__self__, "features", features)
if groups is not None:
warnings.warn("""The direct configuration of groups in this app resource is deprecated, please ensure you use the resource `okta_app_group_assignments` for this functionality.""", DeprecationWarning)
pulumi.log.warn("""groups is deprecated: The direct configuration of groups in this app resource is deprecated, please ensure you use the resource `okta_app_group_assignments` for this functionality.""")
if groups is not None:
pulumi.set(__self__, "groups", groups)
if hide_ios is not None:
pulumi.set(__self__, "hide_ios", hide_ios)
if hide_web is not None:
pulumi.set(__self__, "hide_web", hide_web)
if honor_force_authn is not None:
pulumi.set(__self__, "honor_force_authn", honor_force_authn)
if http_post_binding is not None:
pulumi.set(__self__, "http_post_binding", http_post_binding)
if http_redirect_binding is not None:
pulumi.set(__self__, "http_redirect_binding", http_redirect_binding)
if idp_issuer is not None:
pulumi.set(__self__, "idp_issuer", idp_issuer)
if inline_hook_id is not None:
pulumi.set(__self__, "inline_hook_id", inline_hook_id)
if key_id is not None:
pulumi.set(__self__, "key_id", key_id)
if key_name is not None:
pulumi.set(__self__, "key_name", key_name)
if key_years_valid is not None:
pulumi.set(__self__, "key_years_valid", key_years_valid)
if label is not None:
pulumi.set(__self__, "label", label)
if logo is not None:
pulumi.set(__self__, "logo", logo)
if logo_url is not None:
pulumi.set(__self__, "logo_url", logo_url)
if metadata is not None:
pulumi.set(__self__, "metadata", metadata)
if metadata_url is not None:
pulumi.set(__self__, "metadata_url", metadata_url)
if name is not None:
pulumi.set(__self__, "name", name)
if preconfigured_app is not None:
pulumi.set(__self__, "preconfigured_app", preconfigured_app)
if recipient is not None:
pulumi.set(__self__, "recipient", recipient)
if request_compressed is not None:
pulumi.set(__self__, "request_compressed", request_compressed)
if response_signed is not None:
pulumi.set(__self__, "response_signed", response_signed)
if saml_version is not None:
pulumi.set(__self__, "saml_version", saml_version)
if sign_on_mode is not None:
pulumi.set(__self__, "sign_on_mode", sign_on_mode)
if signature_algorithm is not None:
pulumi.set(__self__, "signature_algorithm", signature_algorithm)
if single_logout_certificate is not None:
pulumi.set(__self__, "single_logout_certificate", single_logout_certificate)
if single_logout_issuer is not None:
pulumi.set(__self__, "single_logout_issuer", single_logout_issuer)
if single_logout_url is not None:
pulumi.set(__self__, "single_logout_url", single_logout_url)
if skip_groups is not None:
pulumi.set(__self__, "skip_groups", skip_groups)
if skip_users is not None:
pulumi.set(__self__, "skip_users", skip_users)
if sp_issuer is not None:
pulumi.set(__self__, "sp_issuer", sp_issuer)
if sso_url is not None:
pulumi.set(__self__, "sso_url", sso_url)
if status is not None:
pulumi.set(__self__, "status", status)
if subject_name_id_format is not None:
pulumi.set(__self__, "subject_name_id_format", subject_name_id_format)
if subject_name_id_template is not None:
pulumi.set(__self__, "subject_name_id_template", subject_name_id_template)
if user_name_template is not None:
pulumi.set(__self__, "user_name_template", user_name_template)
if user_name_template_suffix is not None:
pulumi.set(__self__, "user_name_template_suffix", user_name_template_suffix)
if user_name_template_type is not None:
pulumi.set(__self__, "user_name_template_type", user_name_template_type)
if users is not None:
warnings.warn("""The direct configuration of users in this app resource is deprecated, please ensure you use the resource `okta_app_user` for this functionality.""", DeprecationWarning)
pulumi.log.warn("""users is deprecated: The direct configuration of users in this app resource is deprecated, please ensure you use the resource `okta_app_user` for this functionality.""")
if users is not None:
pulumi.set(__self__, "users", users)
@property
@pulumi.getter(name="accessibilityErrorRedirectUrl")
def accessibility_error_redirect_url(self) -> Optional[pulumi.Input[str]]:
"""
Custom error page URL.
"""
return pulumi.get(self, "accessibility_error_redirect_url")
@accessibility_error_redirect_url.setter
def accessibility_error_redirect_url(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "accessibility_error_redirect_url", value)
@property
@pulumi.getter(name="accessibilityLoginRedirectUrl")
def accessibility_login_redirect_url(self) -> Optional[pulumi.Input[str]]:
"""
Custom login page for this application.
"""
return pulumi.get(self, "accessibility_login_redirect_url")
@accessibility_login_redirect_url.setter
def accessibility_login_redirect_url(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "accessibility_login_redirect_url", value)
@property
@pulumi.getter(name="accessibilitySelfService")
def accessibility_self_service(self) -> Optional[pulumi.Input[bool]]:
"""
Enable self-service. By default, it is `false`.
"""
return pulumi.get(self, "accessibility_self_service")
@accessibility_self_service.setter
def accessibility_self_service(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "accessibility_self_service", value)
@property
@pulumi.getter(name="acsEndpoints")
def acs_endpoints(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
An array of ACS endpoints. You can configure a maximum of 100 endpoints.
"""
return pulumi.get(self, "acs_endpoints")
@acs_endpoints.setter
def acs_endpoints(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "acs_endpoints", value)
@property
@pulumi.getter(name="adminNote")
def admin_note(self) -> Optional[pulumi.Input[str]]:
"""
Application notes for admins.
"""
return pulumi.get(self, "admin_note")
@admin_note.setter
def admin_note(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "admin_note", value)
@property
@pulumi.getter(name="appLinksJson")
def app_links_json(self) -> Optional[pulumi.Input[str]]:
"""
Displays specific appLinks for the app. The value for the link should be boolean.
"""
return pulumi.get(self, "app_links_json")
@app_links_json.setter
def app_links_json(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "app_links_json", value)
@property
@pulumi.getter(name="appSettingsJson")
def app_settings_json(self) -> Optional[pulumi.Input[str]]:
"""
Application settings in JSON format.
"""
return pulumi.get(self, "app_settings_json")
@app_settings_json.setter
def app_settings_json(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "app_settings_json", value)
@property
@pulumi.getter(name="assertionSigned")
def assertion_signed(self) -> Optional[pulumi.Input[bool]]:
"""
Determines whether the SAML assertion is digitally signed.
"""
return pulumi.get(self, "assertion_signed")
@assertion_signed.setter
def assertion_signed(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "assertion_signed", value)
@property
@pulumi.getter(name="attributeStatements")
def attribute_statements(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SamlAttributeStatementArgs']]]]:
"""
List of SAML Attribute statements.
"""
return pulumi.get(self, "attribute_statements")
@attribute_statements.setter
def attribute_statements(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SamlAttributeStatementArgs']]]]):
pulumi.set(self, "attribute_statements", value)
@property
@pulumi.getter
def audience(self) -> Optional[pulumi.Input[str]]:
"""
Audience restriction.
"""
return pulumi.get(self, "audience")
@audience.setter
def audience(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "audience", value)
@property
@pulumi.getter(name="authnContextClassRef")
def authn_context_class_ref(self) -> Optional[pulumi.Input[str]]:
"""
Identifies the SAML authentication context class for the assertion’s authentication statement.
"""
return pulumi.get(self, "authn_context_class_ref")
@authn_context_class_ref.setter
def authn_context_class_ref(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "authn_context_class_ref", value)
@property
@pulumi.getter(name="autoSubmitToolbar")
def auto_submit_toolbar(self) -> Optional[pulumi.Input[bool]]:
"""
Display auto submit toolbar.
"""
return pulumi.get(self, "auto_submit_toolbar")
@auto_submit_toolbar.setter
def auto_submit_toolbar(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "auto_submit_toolbar", value)
@property
@pulumi.getter
def certificate(self) -> Optional[pulumi.Input[str]]:
"""
The raw signing certificate.
"""
return pulumi.get(self, "certificate")
@certificate.setter
def certificate(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "certificate", value)
@property
@pulumi.getter(name="defaultRelayState")
def default_relay_state(self) -> Optional[pulumi.Input[str]]:
"""
Identifies a specific application resource in an IDP initiated SSO scenario.
"""
return pulumi.get(self, "default_relay_state")
@default_relay_state.setter
def default_relay_state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "default_relay_state", value)
@property
@pulumi.getter
def destination(self) -> Optional[pulumi.Input[str]]:
"""
Identifies the location where the SAML response is intended to be sent inside the SAML assertion.
"""
return pulumi.get(self, "destination")
@destination.setter
def destination(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "destination", value)
@property
@pulumi.getter(name="digestAlgorithm")
def digest_algorithm(self) -> Optional[pulumi.Input[str]]:
"""
Determines the digest algorithm used to digitally sign the SAML assertion and response.
"""
return pulumi.get(self, "digest_algorithm")
@digest_algorithm.setter
def digest_algorithm(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "digest_algorithm", value)
@property
@pulumi.getter(name="enduserNote")
def enduser_note(self) -> Optional[pulumi.Input[str]]:
"""
Application notes for end users.
"""
return pulumi.get(self, "enduser_note")
@enduser_note.setter
def enduser_note(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "enduser_note", value)
@property
@pulumi.getter(name="entityKey")
def entity_key(self) -> Optional[pulumi.Input[str]]:
"""
Entity ID, the ID portion of the `entity_url`.
"""
return pulumi.get(self, "entity_key")
@entity_key.setter
def entity_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "entity_key", value)
@property
@pulumi.getter(name="entityUrl")
def entity_url(self) -> Optional[pulumi.Input[str]]:
"""
Entity URL for instance [http://www.okta.com/exk1fcia6d6EMsf331d8](http://www.okta.com/exk1fcia6d6EMsf331d8).
"""
return pulumi.get(self, "entity_url")
@entity_url.setter
def entity_url(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "entity_url", value)
@property
@pulumi.getter
def features(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
features enabled. Notice: you can't currently configure provisioning features via the API.
"""
return pulumi.get(self, "features")
@features.setter
def features(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "features", value)
@property
@pulumi.getter
def groups(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Groups associated with the application.
- `DEPRECATED`: Please replace usage with the `AppGroupAssignments` (or `app.GroupAssignment`) resource.
"""
return pulumi.get(self, "groups")
@groups.setter
def groups(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "groups", value)
@property
@pulumi.getter(name="hideIos")
def hide_ios(self) -> Optional[pulumi.Input[bool]]:
"""
Do not display application icon on mobile app.
"""
return pulumi.get(self, "hide_ios")
@hide_ios.setter
def hide_ios(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "hide_ios", value)
@property
@pulumi.getter(name="hideWeb")
def hide_web(self) -> Optional[pulumi.Input[bool]]:
"""
Do not display application icon to users
"""
return pulumi.get(self, "hide_web")
@hide_web.setter
def hide_web(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "hide_web", value)
@property
@pulumi.getter(name="honorForceAuthn")
def honor_force_authn(self) -> Optional[pulumi.Input[bool]]:
"""
Prompt user to re-authenticate if SP asks for it.
"""
return pulumi.get(self, "honor_force_authn")
@honor_force_authn.setter
def honor_force_authn(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "honor_force_authn", value)
@property
@pulumi.getter(name="httpPostBinding")
def http_post_binding(self) -> Optional[pulumi.Input[str]]:
"""
`urn:oasis:names:tc:SAML:2.0:bindings:HTTP-Post` location from the SAML metadata.
"""
return pulumi.get(self, "http_post_binding")
@http_post_binding.setter
def http_post_binding(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "http_post_binding", value)
@property
@pulumi.getter(name="httpRedirectBinding")
def http_redirect_binding(self) -> Optional[pulumi.Input[str]]:
"""
`urn:oasis:names:tc:SAML:2.0:bindings:HTTP-Redirect` location from the SAML metadata.
"""
return pulumi.get(self, "http_redirect_binding")
@http_redirect_binding.setter
def http_redirect_binding(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "http_redirect_binding", value)
@property
@pulumi.getter(name="idpIssuer")
def idp_issuer(self) -> Optional[pulumi.Input[str]]:
"""
SAML issuer ID.
"""
return pulumi.get(self, "idp_issuer")
@idp_issuer.setter
def idp_issuer(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "idp_issuer", value)
@property
@pulumi.getter(name="inlineHookId")
def inline_hook_id(self) -> Optional[pulumi.Input[str]]:
"""
Saml Inline Hook associated with the application.
"""
return pulumi.get(self, "inline_hook_id")
@inline_hook_id.setter
def inline_hook_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "inline_hook_id", value)
@property
@pulumi.getter(name="keyId")
def key_id(self) -> Optional[pulumi.Input[str]]:
"""
Certificate key ID.
"""
return pulumi.get(self, "key_id")
@key_id.setter
def key_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key_id", value)
@property
@pulumi.getter(name="keyName")
def key_name(self) -> Optional[pulumi.Input[str]]:
"""
Certificate name. This modulates the rotation of keys. New name == new key. Required to be set with `key_years_valid`.
"""
return pulumi.get(self, "key_name")
@key_name.setter
def key_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key_name", value)
@property
@pulumi.getter(name="keyYearsValid")
def key_years_valid(self) -> Optional[pulumi.Input[int]]:
"""
Number of years the certificate is valid (2 - 10 years).
"""
return pulumi.get(self, "key_years_valid")
@key_years_valid.setter
def key_years_valid(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "key_years_valid", value)
@property
@pulumi.getter
def label(self) -> Optional[pulumi.Input[str]]:
"""
label of application.
"""
return pulumi.get(self, "label")
@label.setter
def label(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "label", value)
@property
@pulumi.getter
def logo(self) -> Optional[pulumi.Input[str]]:
"""
Local file path to the logo. The file must be in PNG, JPG, or GIF format, and less than 1 MB in size.
"""
return pulumi.get(self, "logo")
@logo.setter
def logo(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "logo", value)
@property
@pulumi.getter(name="logoUrl")
def logo_url(self) -> Optional[pulumi.Input[str]]:
"""
Direct link of application logo.
"""
return pulumi.get(self, "logo_url")
@logo_url.setter
def logo_url(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "logo_url", value)
@property
@pulumi.getter
def metadata(self) -> Optional[pulumi.Input[str]]:
"""
The raw SAML metadata in XML.
"""
return pulumi.get(self, "metadata")
@metadata.setter
def metadata(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "metadata", value)
@property
@pulumi.getter(name="metadataUrl")
def metadata_url(self) -> Optional[pulumi.Input[str]]:
"""
SAML xml metadata URL.
"""
return pulumi.get(self, "metadata_url")
@metadata_url.setter
def metadata_url(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "metadata_url", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the attribute statement.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="preconfiguredApp")
def preconfigured_app(self) -> Optional[pulumi.Input[str]]:
"""
name of application from the Okta Integration Network, if not included a custom app will be created.
"""
return pulumi.get(self, "preconfigured_app")
@preconfigured_app.setter
def preconfigured_app(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "preconfigured_app", value)
@property
@pulumi.getter
def recipient(self) -> Optional[pulumi.Input[str]]:
"""
The location where the app may present the SAML assertion.
"""
return pulumi.get(self, "recipient")
@recipient.setter
def recipient(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "recipient", value)
@property
@pulumi.getter(name="requestCompressed")
def request_compressed(self) -> Optional[pulumi.Input[bool]]:
"""
Denotes whether the request is compressed or not.
"""
return pulumi.get(self, "request_compressed")
@request_compressed.setter
def request_compressed(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "request_compressed", value)
@property
@pulumi.getter(name="responseSigned")
def response_signed(self) -> Optional[pulumi.Input[bool]]:
"""
Determines whether the SAML auth response message is digitally signed.
"""
return pulumi.get(self, "response_signed")
@response_signed.setter
def response_signed(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "response_signed", value)
@property
@pulumi.getter(name="samlVersion")
def saml_version(self) -> Optional[pulumi.Input[str]]:
"""
SAML version for the app's sign-on mode. Valid values are: `"2.0"` or `"1.1"`. Default is `"2.0"`.
"""
return pulumi.get(self, "saml_version")
@saml_version.setter
def saml_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "saml_version", value)
@property
@pulumi.getter(name="signOnMode")
def sign_on_mode(self) -> Optional[pulumi.Input[str]]:
"""
Sign-on mode of application.
"""
return pulumi.get(self, "sign_on_mode")
@sign_on_mode.setter
def sign_on_mode(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "sign_on_mode", value)
@property
@pulumi.getter(name="signatureAlgorithm")
def signature_algorithm(self) -> Optional[pulumi.Input[str]]:
"""
Signature algorithm used ot digitally sign the assertion and response.
"""
return pulumi.get(self, "signature_algorithm")
@signature_algorithm.setter
def signature_algorithm(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "signature_algorithm", value)
@property
@pulumi.getter(name="singleLogoutCertificate")
def single_logout_certificate(self) -> Optional[pulumi.Input[str]]:
"""
x509 encoded certificate that the Service Provider uses to sign Single Logout requests.
Note: should be provided without `-----BEGIN CERTIFICATE-----` and `-----END CERTIFICATE-----`, see [official documentation](https://developer.okta.com/docs/reference/api/apps/#service-provider-certificate).
"""
return pulumi.get(self, "single_logout_certificate")
@single_logout_certificate.setter
def single_logout_certificate(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "single_logout_certificate", value)
@property
@pulumi.getter(name="singleLogoutIssuer")
def single_logout_issuer(self) -> Optional[pulumi.Input[str]]:
"""
The issuer of the Service Provider that generates the Single Logout request.
"""
return pulumi.get(self, "single_logout_issuer")
@single_logout_issuer.setter
def single_logout_issuer(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "single_logout_issuer", value)
@property
@pulumi.getter(name="singleLogoutUrl")
def single_logout_url(self) -> Optional[pulumi.Input[str]]:
"""
The location where the logout response is sent.
"""
return pulumi.get(self, "single_logout_url")
@single_logout_url.setter
def single_logout_url(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "single_logout_url", value)
@property
@pulumi.getter(name="skipGroups")
def skip_groups(self) -> Optional[pulumi.Input[bool]]:
"""
Indicator that allows the app to skip `groups` sync (it's also can be provided during import). Default is `false`.
"""
return pulumi.get(self, "skip_groups")
@skip_groups.setter
def skip_groups(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "skip_groups", value)
@property
@pulumi.getter(name="skipUsers")
def skip_users(self) -> Optional[pulumi.Input[bool]]:
"""
Indicator that allows the app to skip `users` sync (it's also can be provided during import). Default is `false`.
"""
return pulumi.get(self, "skip_users")
@skip_users.setter
def skip_users(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "skip_users", value)
@property
@pulumi.getter(name="spIssuer")
def sp_issuer(self) -> Optional[pulumi.Input[str]]:
"""
SAML service provider issuer.
"""
return pulumi.get(self, "sp_issuer")
@sp_issuer.setter
def sp_issuer(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "sp_issuer", value)
@property
@pulumi.getter(name="ssoUrl")
def sso_url(self) -> Optional[pulumi.Input[str]]:
"""
Single Sign-on Url.
"""
return pulumi.get(self, "sso_url")
@sso_url.setter
def sso_url(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "sso_url", value)
@property
@pulumi.getter
def status(self) -> Optional[pulumi.Input[str]]:
"""
status of application.
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "status", value)
@property
@pulumi.getter(name="subjectNameIdFormat")
def subject_name_id_format(self) -> Optional[pulumi.Input[str]]:
"""
Identifies the SAML processing rules.
"""
return pulumi.get(self, "subject_name_id_format")
@subject_name_id_format.setter
def subject_name_id_format(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "subject_name_id_format", value)
@property
@pulumi.getter(name="subjectNameIdTemplate")
def subject_name_id_template(self) -> Optional[pulumi.Input[str]]:
"""
Template for app user's username when a user is assigned to the app.
"""
return pulumi.get(self, "subject_name_id_template")
@subject_name_id_template.setter
def subject_name_id_template(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "subject_name_id_template", value)
@property
@pulumi.getter(name="userNameTemplate")
def user_name_template(self) -> Optional[pulumi.Input[str]]:
"""
Username template.
"""
return pulumi.get(self, "user_name_template")
@user_name_template.setter
def user_name_template(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "user_name_template", value)
@property
@pulumi.getter(name="userNameTemplateSuffix")
def user_name_template_suffix(self) -> Optional[pulumi.Input[str]]:
"""
Username template suffix.
"""
return pulumi.get(self, "user_name_template_suffix")
@user_name_template_suffix.setter
def user_name_template_suffix(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "user_name_template_suffix", value)
@property
@pulumi.getter(name="userNameTemplateType")
def user_name_template_type(self) -> Optional[pulumi.Input[str]]:
"""
Username template type.
"""
return pulumi.get(self, "user_name_template_type")
@user_name_template_type.setter
def user_name_template_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "user_name_template_type", value)
@property
@pulumi.getter
def users(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SamlUserArgs']]]]:
"""
Users associated with the application.
- `DEPRECATED`: Please replace usage with the `app.User` resource.
"""
return pulumi.get(self, "users")
@users.setter
def users(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SamlUserArgs']]]]):
pulumi.set(self, "users", value)
class Saml(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
accessibility_error_redirect_url: Optional[pulumi.Input[str]] = None,
accessibility_login_redirect_url: Optional[pulumi.Input[str]] = None,
accessibility_self_service: Optional[pulumi.Input[bool]] = None,
acs_endpoints: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
admin_note: Optional[pulumi.Input[str]] = None,
app_links_json: Optional[pulumi.Input[str]] = None,
app_settings_json: Optional[pulumi.Input[str]] = None,
assertion_signed: Optional[pulumi.Input[bool]] = None,
attribute_statements: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SamlAttributeStatementArgs']]]]] = None,
audience: Optional[pulumi.Input[str]] = None,
authn_context_class_ref: Optional[pulumi.Input[str]] = None,
auto_submit_toolbar: Optional[pulumi.Input[bool]] = None,
default_relay_state: Optional[pulumi.Input[str]] = None,
destination: Optional[pulumi.Input[str]] = None,
digest_algorithm: Optional[pulumi.Input[str]] = None,
enduser_note: Optional[pulumi.Input[str]] = None,
features: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
groups: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
hide_ios: Optional[pulumi.Input[bool]] = None,
hide_web: Optional[pulumi.Input[bool]] = None,
honor_force_authn: Optional[pulumi.Input[bool]] = None,
idp_issuer: Optional[pulumi.Input[str]] = None,
inline_hook_id: Optional[pulumi.Input[str]] = None,
key_name: Optional[pulumi.Input[str]] = None,
key_years_valid: Optional[pulumi.Input[int]] = None,
label: Optional[pulumi.Input[str]] = None,
logo: Optional[pulumi.Input[str]] = None,
preconfigured_app: Optional[pulumi.Input[str]] = None,
recipient: Optional[pulumi.Input[str]] = None,
request_compressed: Optional[pulumi.Input[bool]] = None,
response_signed: Optional[pulumi.Input[bool]] = None,
saml_version: Optional[pulumi.Input[str]] = None,
signature_algorithm: Optional[pulumi.Input[str]] = None,
single_logout_certificate: Optional[pulumi.Input[str]] = None,
single_logout_issuer: Optional[pulumi.Input[str]] = None,
single_logout_url: Optional[pulumi.Input[str]] = None,
skip_groups: Optional[pulumi.Input[bool]] = None,
skip_users: Optional[pulumi.Input[bool]] = None,
sp_issuer: Optional[pulumi.Input[str]] = None,
sso_url: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None,
subject_name_id_format: Optional[pulumi.Input[str]] = None,
subject_name_id_template: Optional[pulumi.Input[str]] = None,
user_name_template: Optional[pulumi.Input[str]] = None,
user_name_template_suffix: Optional[pulumi.Input[str]] = None,
user_name_template_type: Optional[pulumi.Input[str]] = None,
users: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SamlUserArgs']]]]] = None,
__props__=None):
"""
Creates an SAML Application.
This resource allows you to create and configure an SAML Application.
## Example Usage
```python
import pulumi
import pulumi_okta as okta
example = okta.app.Saml("example",
attribute_statements=[okta.app.SamlAttributeStatementArgs(
filter_type="REGEX",
filter_value=".*",
name="groups",
type="GROUP",
)],
audience="http://example.com/audience",
authn_context_class_ref="urn:oasis:names:tc:SAML:2.0:ac:classes:PasswordProtectedTransport",
destination="http://example.com",
digest_algorithm="SHA256",
honor_force_authn=False,
label="example",
recipient="http://example.com",
response_signed=True,
signature_algorithm="RSA_SHA256",
sso_url="http://example.com",
subject_name_id_format="urn:oasis:names:tc:SAML:1.1:nameid-format:emailAddress",
subject_name_id_template=user["userName"])
```
### With inline hook
```python
import pulumi
import pulumi_okta as okta
test_hook = okta.inline.Hook("testHook",
status="ACTIVE",
type="com.okta.saml.tokens.transform",
version="1.0.2",
channel={
"type": "HTTP",
"version": "1.0.0",
"uri": "https://example.com/test1",
"method": "POST",
},
auth={
"key": "Authorization",
"type": "HEADER",
"value": "secret",
})
test_saml = okta.app.Saml("testSaml",
label="testAcc_replace_with_uuid",
sso_url="http://google.com",
recipient="http://here.com",
destination="http://its-about-the-journey.com",
audience="http://audience.com",
subject_name_id_template=user["userName"],
subject_name_id_format="urn:oasis:names:tc:SAML:1.1:nameid-format:emailAddress",
response_signed=True,
signature_algorithm="RSA_SHA256",
digest_algorithm="SHA256",
honor_force_authn=False,
authn_context_class_ref="urn:oasis:names:tc:SAML:2.0:ac:classes:PasswordProtectedTransport",
inline_hook_id=test_hook.id,
attribute_statements=[okta.app.SamlAttributeStatementArgs(
type="GROUP",
name="groups",
filter_type="REGEX",
filter_value=".*",
)],
opts=pulumi.ResourceOptions(depends_on=[test_hook]))
```
### Pre-configured app with SAML 1.1 sign-on mode
```python
import pulumi
import pulumi_okta as okta
test = okta.app.Saml("test",
app_settings_json=\"\"\"{
"groupFilter": "app1.*",
"siteURL": "http://www.okta.com"
}
\"\"\",
label="SharePoint (On-Premise)",
preconfigured_app="sharepoint_onpremise",
saml_version="1.1",
status="ACTIVE",
user_name_template=source["login"],
user_name_template_type="BUILT_IN")
```
### Pre-configured app with SAML 1.1 sign-on mode, `app_settings_json` and `app_links_json`
```python
import pulumi
import pulumi_okta as okta
office365 = okta.app.Saml("office365",
app_links_json=\"\"\" {
"calendar": false,
"crm": false,
"delve": false,
"excel": false,
"forms": false,
"mail": false,
"newsfeed": false,
"onedrive": false,
"people": false,
"planner": false,
"powerbi": false,
"powerpoint": false,
"sites": false,
"sway": false,
"tasks": false,
"teams": false,
"video": false,
"word": false,
"yammer": false,
"login": true
}
\"\"\",
app_settings_json=\"\"\" {
"wsFedConfigureType": "AUTO",
"windowsTransportEnabled": false,
"domain": "okta.com",
"msftTenant": "okta",
"domains": [],
"requireAdminConsent": false
}
\"\"\",
label="Microsoft Office 365",
preconfigured_app="office365",
saml_version="1.1",
status="ACTIVE")
```
## Import
A SAML App can be imported via the Okta ID.
```sh
$ pulumi import okta:app/saml:Saml example <app id>
```
It's also possible to import app without groups or/and users. In this case ID may look like this
```sh
$ pulumi import okta:app/saml:Saml example <app id>/skip_users
```
```sh
$ pulumi import okta:app/saml:Saml example <app id>/skip_users/skip_groups
```
```sh
$ pulumi import okta:app/saml:Saml example <app id>/skip_groups
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] accessibility_error_redirect_url: Custom error page URL.
:param pulumi.Input[str] accessibility_login_redirect_url: Custom login page for this application.
:param pulumi.Input[bool] accessibility_self_service: Enable self-service. By default, it is `false`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] acs_endpoints: An array of ACS endpoints. You can configure a maximum of 100 endpoints.
:param pulumi.Input[str] admin_note: Application notes for admins.
:param pulumi.Input[str] app_links_json: Displays specific appLinks for the app. The value for the link should be boolean.
:param pulumi.Input[str] app_settings_json: Application settings in JSON format.
:param pulumi.Input[bool] assertion_signed: Determines whether the SAML assertion is digitally signed.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SamlAttributeStatementArgs']]]] attribute_statements: List of SAML Attribute statements.
:param pulumi.Input[str] audience: Audience restriction.
:param pulumi.Input[str] authn_context_class_ref: Identifies the SAML authentication context class for the assertion’s authentication statement.
:param pulumi.Input[bool] auto_submit_toolbar: Display auto submit toolbar.
:param pulumi.Input[str] default_relay_state: Identifies a specific application resource in an IDP initiated SSO scenario.
:param pulumi.Input[str] destination: Identifies the location where the SAML response is intended to be sent inside the SAML assertion.
:param pulumi.Input[str] digest_algorithm: Determines the digest algorithm used to digitally sign the SAML assertion and response.
:param pulumi.Input[str] enduser_note: Application notes for end users.
:param pulumi.Input[Sequence[pulumi.Input[str]]] features: features enabled. Notice: you can't currently configure provisioning features via the API.
:param pulumi.Input[Sequence[pulumi.Input[str]]] groups: Groups associated with the application.
- `DEPRECATED`: Please replace usage with the `AppGroupAssignments` (or `app.GroupAssignment`) resource.
:param pulumi.Input[bool] hide_ios: Do not display application icon on mobile app.
:param pulumi.Input[bool] hide_web: Do not display application icon to users
:param pulumi.Input[bool] honor_force_authn: Prompt user to re-authenticate if SP asks for it.
:param pulumi.Input[str] idp_issuer: SAML issuer ID.
:param pulumi.Input[str] inline_hook_id: Saml Inline Hook associated with the application.
:param pulumi.Input[str] key_name: Certificate name. This modulates the rotation of keys. New name == new key. Required to be set with `key_years_valid`.
:param pulumi.Input[int] key_years_valid: Number of years the certificate is valid (2 - 10 years).
:param pulumi.Input[str] label: label of application.
:param pulumi.Input[str] logo: Local file path to the logo. The file must be in PNG, JPG, or GIF format, and less than 1 MB in size.
:param pulumi.Input[str] preconfigured_app: name of application from the Okta Integration Network, if not included a custom app will be created.
:param pulumi.Input[str] recipient: The location where the app may present the SAML assertion.
:param pulumi.Input[bool] request_compressed: Denotes whether the request is compressed or not.
:param pulumi.Input[bool] response_signed: Determines whether the SAML auth response message is digitally signed.
:param pulumi.Input[str] saml_version: SAML version for the app's sign-on mode. Valid values are: `"2.0"` or `"1.1"`. Default is `"2.0"`.
:param pulumi.Input[str] signature_algorithm: Signature algorithm used ot digitally sign the assertion and response.
:param pulumi.Input[str] single_logout_certificate: x509 encoded certificate that the Service Provider uses to sign Single Logout requests.
Note: should be provided without `-----BEGIN CERTIFICATE-----` and `-----END CERTIFICATE-----`, see [official documentation](https://developer.okta.com/docs/reference/api/apps/#service-provider-certificate).
:param pulumi.Input[str] single_logout_issuer: The issuer of the Service Provider that generates the Single Logout request.
:param pulumi.Input[str] single_logout_url: The location where the logout response is sent.
:param pulumi.Input[bool] skip_groups: Indicator that allows the app to skip `groups` sync (it's also can be provided during import). Default is `false`.
:param pulumi.Input[bool] skip_users: Indicator that allows the app to skip `users` sync (it's also can be provided during import). Default is `false`.
:param pulumi.Input[str] sp_issuer: SAML service provider issuer.
:param pulumi.Input[str] sso_url: Single Sign-on Url.
:param pulumi.Input[str] status: status of application.
:param pulumi.Input[str] subject_name_id_format: Identifies the SAML processing rules.
:param pulumi.Input[str] subject_name_id_template: Template for app user's username when a user is assigned to the app.
:param pulumi.Input[str] user_name_template: Username template.
:param pulumi.Input[str] user_name_template_suffix: Username template suffix.
:param pulumi.Input[str] user_name_template_type: Username template type.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SamlUserArgs']]]] users: Users associated with the application.
- `DEPRECATED`: Please replace usage with the `app.User` resource.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: SamlArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Creates an SAML Application.
This resource allows you to create and configure an SAML Application.
## Example Usage
```python
import pulumi
import pulumi_okta as okta
example = okta.app.Saml("example",
attribute_statements=[okta.app.SamlAttributeStatementArgs(
filter_type="REGEX",
filter_value=".*",
name="groups",
type="GROUP",
)],
audience="http://example.com/audience",
authn_context_class_ref="urn:oasis:names:tc:SAML:2.0:ac:classes:PasswordProtectedTransport",
destination="http://example.com",
digest_algorithm="SHA256",
honor_force_authn=False,
label="example",
recipient="http://example.com",
response_signed=True,
signature_algorithm="RSA_SHA256",
sso_url="http://example.com",
subject_name_id_format="urn:oasis:names:tc:SAML:1.1:nameid-format:emailAddress",
subject_name_id_template=user["userName"])
```
### With inline hook
```python
import pulumi
import pulumi_okta as okta
test_hook = okta.inline.Hook("testHook",
status="ACTIVE",
type="com.okta.saml.tokens.transform",
version="1.0.2",
channel={
"type": "HTTP",
"version": "1.0.0",
"uri": "https://example.com/test1",
"method": "POST",
},
auth={
"key": "Authorization",
"type": "HEADER",
"value": "secret",
})
test_saml = okta.app.Saml("testSaml",
label="testAcc_replace_with_uuid",
sso_url="http://google.com",
recipient="http://here.com",
destination="http://its-about-the-journey.com",
audience="http://audience.com",
subject_name_id_template=user["userName"],
subject_name_id_format="urn:oasis:names:tc:SAML:1.1:nameid-format:emailAddress",
response_signed=True,
signature_algorithm="RSA_SHA256",
digest_algorithm="SHA256",
honor_force_authn=False,
authn_context_class_ref="urn:oasis:names:tc:SAML:2.0:ac:classes:PasswordProtectedTransport",
inline_hook_id=test_hook.id,
attribute_statements=[okta.app.SamlAttributeStatementArgs(
type="GROUP",
name="groups",
filter_type="REGEX",
filter_value=".*",
)],
opts=pulumi.ResourceOptions(depends_on=[test_hook]))
```
### Pre-configured app with SAML 1.1 sign-on mode
```python
import pulumi
import pulumi_okta as okta
test = okta.app.Saml("test",
app_settings_json=\"\"\"{
"groupFilter": "app1.*",
"siteURL": "http://www.okta.com"
}
\"\"\",
label="SharePoint (On-Premise)",
preconfigured_app="sharepoint_onpremise",
saml_version="1.1",
status="ACTIVE",
user_name_template=source["login"],
user_name_template_type="BUILT_IN")
```
### Pre-configured app with SAML 1.1 sign-on mode, `app_settings_json` and `app_links_json`
```python
import pulumi
import pulumi_okta as okta
office365 = okta.app.Saml("office365",
app_links_json=\"\"\" {
"calendar": false,
"crm": false,
"delve": false,
"excel": false,
"forms": false,
"mail": false,
"newsfeed": false,
"onedrive": false,
"people": false,
"planner": false,
"powerbi": false,
"powerpoint": false,
"sites": false,
"sway": false,
"tasks": false,
"teams": false,
"video": false,
"word": false,
"yammer": false,
"login": true
}
\"\"\",
app_settings_json=\"\"\" {
"wsFedConfigureType": "AUTO",
"windowsTransportEnabled": false,
"domain": "okta.com",
"msftTenant": "okta",
"domains": [],
"requireAdminConsent": false
}
\"\"\",
label="Microsoft Office 365",
preconfigured_app="office365",
saml_version="1.1",
status="ACTIVE")
```
## Import
A SAML App can be imported via the Okta ID.
```sh
$ pulumi import okta:app/saml:Saml example <app id>
```
It's also possible to import app without groups or/and users. In this case ID may look like this
```sh
$ pulumi import okta:app/saml:Saml example <app id>/skip_users
```
```sh
$ pulumi import okta:app/saml:Saml example <app id>/skip_users/skip_groups
```
```sh
$ pulumi import okta:app/saml:Saml example <app id>/skip_groups
```
:param str resource_name: The name of the resource.
:param SamlArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(SamlArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
accessibility_error_redirect_url: Optional[pulumi.Input[str]] = None,
accessibility_login_redirect_url: Optional[pulumi.Input[str]] = None,
accessibility_self_service: Optional[pulumi.Input[bool]] = None,
acs_endpoints: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
admin_note: Optional[pulumi.Input[str]] = None,
app_links_json: Optional[pulumi.Input[str]] = None,
app_settings_json: Optional[pulumi.Input[str]] = None,
assertion_signed: Optional[pulumi.Input[bool]] = None,
attribute_statements: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SamlAttributeStatementArgs']]]]] = None,
audience: Optional[pulumi.Input[str]] = None,
authn_context_class_ref: Optional[pulumi.Input[str]] = None,
auto_submit_toolbar: Optional[pulumi.Input[bool]] = None,
default_relay_state: Optional[pulumi.Input[str]] = None,
destination: Optional[pulumi.Input[str]] = None,
digest_algorithm: Optional[pulumi.Input[str]] = None,
enduser_note: Optional[pulumi.Input[str]] = None,
features: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
groups: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
hide_ios: Optional[pulumi.Input[bool]] = None,
hide_web: Optional[pulumi.Input[bool]] = None,
honor_force_authn: Optional[pulumi.Input[bool]] = None,
idp_issuer: Optional[pulumi.Input[str]] = None,
inline_hook_id: Optional[pulumi.Input[str]] = None,
key_name: Optional[pulumi.Input[str]] = None,
key_years_valid: Optional[pulumi.Input[int]] = None,
label: Optional[pulumi.Input[str]] = None,
logo: Optional[pulumi.Input[str]] = None,
preconfigured_app: Optional[pulumi.Input[str]] = None,
recipient: Optional[pulumi.Input[str]] = None,
request_compressed: Optional[pulumi.Input[bool]] = None,
response_signed: Optional[pulumi.Input[bool]] = None,
saml_version: Optional[pulumi.Input[str]] = None,
signature_algorithm: Optional[pulumi.Input[str]] = None,
single_logout_certificate: Optional[pulumi.Input[str]] = None,
single_logout_issuer: Optional[pulumi.Input[str]] = None,
single_logout_url: Optional[pulumi.Input[str]] = None,
skip_groups: Optional[pulumi.Input[bool]] = None,
skip_users: Optional[pulumi.Input[bool]] = None,
sp_issuer: Optional[pulumi.Input[str]] = None,
sso_url: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None,
subject_name_id_format: Optional[pulumi.Input[str]] = None,
subject_name_id_template: Optional[pulumi.Input[str]] = None,
user_name_template: Optional[pulumi.Input[str]] = None,
user_name_template_suffix: Optional[pulumi.Input[str]] = None,
user_name_template_type: Optional[pulumi.Input[str]] = None,
users: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SamlUserArgs']]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = SamlArgs.__new__(SamlArgs)
__props__.__dict__["accessibility_error_redirect_url"] = accessibility_error_redirect_url
__props__.__dict__["accessibility_login_redirect_url"] = accessibility_login_redirect_url
__props__.__dict__["accessibility_self_service"] = accessibility_self_service
__props__.__dict__["acs_endpoints"] = acs_endpoints
__props__.__dict__["admin_note"] = admin_note
__props__.__dict__["app_links_json"] = app_links_json
__props__.__dict__["app_settings_json"] = app_settings_json
__props__.__dict__["assertion_signed"] = assertion_signed
__props__.__dict__["attribute_statements"] = attribute_statements
__props__.__dict__["audience"] = audience
__props__.__dict__["authn_context_class_ref"] = authn_context_class_ref
__props__.__dict__["auto_submit_toolbar"] = auto_submit_toolbar
__props__.__dict__["default_relay_state"] = default_relay_state
__props__.__dict__["destination"] = destination
__props__.__dict__["digest_algorithm"] = digest_algorithm
__props__.__dict__["enduser_note"] = enduser_note
__props__.__dict__["features"] = features
if groups is not None and not opts.urn:
warnings.warn("""The direct configuration of groups in this app resource is deprecated, please ensure you use the resource `okta_app_group_assignments` for this functionality.""", DeprecationWarning)
pulumi.log.warn("""groups is deprecated: The direct configuration of groups in this app resource is deprecated, please ensure you use the resource `okta_app_group_assignments` for this functionality.""")
__props__.__dict__["groups"] = groups
__props__.__dict__["hide_ios"] = hide_ios
__props__.__dict__["hide_web"] = hide_web
__props__.__dict__["honor_force_authn"] = honor_force_authn
__props__.__dict__["idp_issuer"] = idp_issuer
__props__.__dict__["inline_hook_id"] = inline_hook_id
__props__.__dict__["key_name"] = key_name
__props__.__dict__["key_years_valid"] = key_years_valid
if label is None and not opts.urn:
raise TypeError("Missing required property 'label'")
__props__.__dict__["label"] = label
__props__.__dict__["logo"] = logo
__props__.__dict__["preconfigured_app"] = preconfigured_app
__props__.__dict__["recipient"] = recipient
__props__.__dict__["request_compressed"] = request_compressed
__props__.__dict__["response_signed"] = response_signed
__props__.__dict__["saml_version"] = saml_version
__props__.__dict__["signature_algorithm"] = signature_algorithm
__props__.__dict__["single_logout_certificate"] = single_logout_certificate
__props__.__dict__["single_logout_issuer"] = single_logout_issuer
__props__.__dict__["single_logout_url"] = single_logout_url
__props__.__dict__["skip_groups"] = skip_groups
__props__.__dict__["skip_users"] = skip_users
__props__.__dict__["sp_issuer"] = sp_issuer
__props__.__dict__["sso_url"] = sso_url
__props__.__dict__["status"] = status
__props__.__dict__["subject_name_id_format"] = subject_name_id_format
__props__.__dict__["subject_name_id_template"] = subject_name_id_template
__props__.__dict__["user_name_template"] = user_name_template
__props__.__dict__["user_name_template_suffix"] = user_name_template_suffix
__props__.__dict__["user_name_template_type"] = user_name_template_type
if users is not None and not opts.urn:
warnings.warn("""The direct configuration of users in this app resource is deprecated, please ensure you use the resource `okta_app_user` for this functionality.""", DeprecationWarning)
pulumi.log.warn("""users is deprecated: The direct configuration of users in this app resource is deprecated, please ensure you use the resource `okta_app_user` for this functionality.""")
__props__.__dict__["users"] = users
__props__.__dict__["certificate"] = None
__props__.__dict__["entity_key"] = None
__props__.__dict__["entity_url"] = None
__props__.__dict__["http_post_binding"] = None
__props__.__dict__["http_redirect_binding"] = None
__props__.__dict__["key_id"] = None
__props__.__dict__["logo_url"] = None
__props__.__dict__["metadata"] = None
__props__.__dict__["metadata_url"] = None
__props__.__dict__["name"] = None
__props__.__dict__["sign_on_mode"] = None
super(Saml, __self__).__init__(
'okta:app/saml:Saml',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
accessibility_error_redirect_url: Optional[pulumi.Input[str]] = None,
accessibility_login_redirect_url: Optional[pulumi.Input[str]] = None,
accessibility_self_service: Optional[pulumi.Input[bool]] = None,
acs_endpoints: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
admin_note: Optional[pulumi.Input[str]] = None,
app_links_json: Optional[pulumi.Input[str]] = None,
app_settings_json: Optional[pulumi.Input[str]] = None,
assertion_signed: Optional[pulumi.Input[bool]] = None,
attribute_statements: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SamlAttributeStatementArgs']]]]] = None,
audience: Optional[pulumi.Input[str]] = None,
authn_context_class_ref: Optional[pulumi.Input[str]] = None,
auto_submit_toolbar: Optional[pulumi.Input[bool]] = None,
certificate: Optional[pulumi.Input[str]] = None,
default_relay_state: Optional[pulumi.Input[str]] = None,
destination: Optional[pulumi.Input[str]] = None,
digest_algorithm: Optional[pulumi.Input[str]] = None,
enduser_note: Optional[pulumi.Input[str]] = None,
entity_key: Optional[pulumi.Input[str]] = None,
entity_url: Optional[pulumi.Input[str]] = None,
features: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
groups: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
hide_ios: Optional[pulumi.Input[bool]] = None,
hide_web: Optional[pulumi.Input[bool]] = None,
honor_force_authn: Optional[pulumi.Input[bool]] = None,
http_post_binding: Optional[pulumi.Input[str]] = None,
http_redirect_binding: Optional[pulumi.Input[str]] = None,
idp_issuer: Optional[pulumi.Input[str]] = None,
inline_hook_id: Optional[pulumi.Input[str]] = None,
key_id: Optional[pulumi.Input[str]] = None,
key_name: Optional[pulumi.Input[str]] = None,
key_years_valid: Optional[pulumi.Input[int]] = None,
label: Optional[pulumi.Input[str]] = None,
logo: Optional[pulumi.Input[str]] = None,
logo_url: Optional[pulumi.Input[str]] = None,
metadata: Optional[pulumi.Input[str]] = None,
metadata_url: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
preconfigured_app: Optional[pulumi.Input[str]] = None,
recipient: Optional[pulumi.Input[str]] = None,
request_compressed: Optional[pulumi.Input[bool]] = None,
response_signed: Optional[pulumi.Input[bool]] = None,
saml_version: Optional[pulumi.Input[str]] = None,
sign_on_mode: Optional[pulumi.Input[str]] = None,
signature_algorithm: Optional[pulumi.Input[str]] = None,
single_logout_certificate: Optional[pulumi.Input[str]] = None,
single_logout_issuer: Optional[pulumi.Input[str]] = None,
single_logout_url: Optional[pulumi.Input[str]] = None,
skip_groups: Optional[pulumi.Input[bool]] = None,
skip_users: Optional[pulumi.Input[bool]] = None,
sp_issuer: Optional[pulumi.Input[str]] = None,
sso_url: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None,
subject_name_id_format: Optional[pulumi.Input[str]] = None,
subject_name_id_template: Optional[pulumi.Input[str]] = None,
user_name_template: Optional[pulumi.Input[str]] = None,
user_name_template_suffix: Optional[pulumi.Input[str]] = None,
user_name_template_type: Optional[pulumi.Input[str]] = None,
users: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SamlUserArgs']]]]] = None) -> 'Saml':
"""
Get an existing Saml resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] accessibility_error_redirect_url: Custom error page URL.
:param pulumi.Input[str] accessibility_login_redirect_url: Custom login page for this application.
:param pulumi.Input[bool] accessibility_self_service: Enable self-service. By default, it is `false`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] acs_endpoints: An array of ACS endpoints. You can configure a maximum of 100 endpoints.
:param pulumi.Input[str] admin_note: Application notes for admins.
:param pulumi.Input[str] app_links_json: Displays specific appLinks for the app. The value for the link should be boolean.
:param pulumi.Input[str] app_settings_json: Application settings in JSON format.
:param pulumi.Input[bool] assertion_signed: Determines whether the SAML assertion is digitally signed.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SamlAttributeStatementArgs']]]] attribute_statements: List of SAML Attribute statements.
:param pulumi.Input[str] audience: Audience restriction.
:param pulumi.Input[str] authn_context_class_ref: Identifies the SAML authentication context class for the assertion’s authentication statement.
:param pulumi.Input[bool] auto_submit_toolbar: Display auto submit toolbar.
:param pulumi.Input[str] certificate: The raw signing certificate.
:param pulumi.Input[str] default_relay_state: Identifies a specific application resource in an IDP initiated SSO scenario.
:param pulumi.Input[str] destination: Identifies the location where the SAML response is intended to be sent inside the SAML assertion.
:param pulumi.Input[str] digest_algorithm: Determines the digest algorithm used to digitally sign the SAML assertion and response.
:param pulumi.Input[str] enduser_note: Application notes for end users.
:param pulumi.Input[str] entity_key: Entity ID, the ID portion of the `entity_url`.
:param pulumi.Input[str] entity_url: Entity URL for instance [http://www.okta.com/exk1fcia6d6EMsf331d8](http://www.okta.com/exk1fcia6d6EMsf331d8).
:param pulumi.Input[Sequence[pulumi.Input[str]]] features: features enabled. Notice: you can't currently configure provisioning features via the API.
:param pulumi.Input[Sequence[pulumi.Input[str]]] groups: Groups associated with the application.
- `DEPRECATED`: Please replace usage with the `AppGroupAssignments` (or `app.GroupAssignment`) resource.
:param pulumi.Input[bool] hide_ios: Do not display application icon on mobile app.
:param pulumi.Input[bool] hide_web: Do not display application icon to users
:param pulumi.Input[bool] honor_force_authn: Prompt user to re-authenticate if SP asks for it.
:param pulumi.Input[str] http_post_binding: `urn:oasis:names:tc:SAML:2.0:bindings:HTTP-Post` location from the SAML metadata.
:param pulumi.Input[str] http_redirect_binding: `urn:oasis:names:tc:SAML:2.0:bindings:HTTP-Redirect` location from the SAML metadata.
:param pulumi.Input[str] idp_issuer: SAML issuer ID.
:param pulumi.Input[str] inline_hook_id: Saml Inline Hook associated with the application.
:param pulumi.Input[str] key_id: Certificate key ID.
:param pulumi.Input[str] key_name: Certificate name. This modulates the rotation of keys. New name == new key. Required to be set with `key_years_valid`.
:param pulumi.Input[int] key_years_valid: Number of years the certificate is valid (2 - 10 years).
:param pulumi.Input[str] label: label of application.
:param pulumi.Input[str] logo: Local file path to the logo. The file must be in PNG, JPG, or GIF format, and less than 1 MB in size.
:param pulumi.Input[str] logo_url: Direct link of application logo.
:param pulumi.Input[str] metadata: The raw SAML metadata in XML.
:param pulumi.Input[str] metadata_url: SAML xml metadata URL.
:param pulumi.Input[str] name: The name of the attribute statement.
:param pulumi.Input[str] preconfigured_app: name of application from the Okta Integration Network, if not included a custom app will be created.
:param pulumi.Input[str] recipient: The location where the app may present the SAML assertion.
:param pulumi.Input[bool] request_compressed: Denotes whether the request is compressed or not.
:param pulumi.Input[bool] response_signed: Determines whether the SAML auth response message is digitally signed.
:param pulumi.Input[str] saml_version: SAML version for the app's sign-on mode. Valid values are: `"2.0"` or `"1.1"`. Default is `"2.0"`.
:param pulumi.Input[str] sign_on_mode: Sign-on mode of application.
:param pulumi.Input[str] signature_algorithm: Signature algorithm used ot digitally sign the assertion and response.
:param pulumi.Input[str] single_logout_certificate: x509 encoded certificate that the Service Provider uses to sign Single Logout requests.
Note: should be provided without `-----BEGIN CERTIFICATE-----` and `-----END CERTIFICATE-----`, see [official documentation](https://developer.okta.com/docs/reference/api/apps/#service-provider-certificate).
:param pulumi.Input[str] single_logout_issuer: The issuer of the Service Provider that generates the Single Logout request.
:param pulumi.Input[str] single_logout_url: The location where the logout response is sent.
:param pulumi.Input[bool] skip_groups: Indicator that allows the app to skip `groups` sync (it's also can be provided during import). Default is `false`.
:param pulumi.Input[bool] skip_users: Indicator that allows the app to skip `users` sync (it's also can be provided during import). Default is `false`.
:param pulumi.Input[str] sp_issuer: SAML service provider issuer.
:param pulumi.Input[str] sso_url: Single Sign-on Url.
:param pulumi.Input[str] status: status of application.
:param pulumi.Input[str] subject_name_id_format: Identifies the SAML processing rules.
:param pulumi.Input[str] subject_name_id_template: Template for app user's username when a user is assigned to the app.
:param pulumi.Input[str] user_name_template: Username template.
:param pulumi.Input[str] user_name_template_suffix: Username template suffix.
:param pulumi.Input[str] user_name_template_type: Username template type.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SamlUserArgs']]]] users: Users associated with the application.
- `DEPRECATED`: Please replace usage with the `app.User` resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _SamlState.__new__(_SamlState)
__props__.__dict__["accessibility_error_redirect_url"] = accessibility_error_redirect_url
__props__.__dict__["accessibility_login_redirect_url"] = accessibility_login_redirect_url
__props__.__dict__["accessibility_self_service"] = accessibility_self_service
__props__.__dict__["acs_endpoints"] = acs_endpoints
__props__.__dict__["admin_note"] = admin_note
__props__.__dict__["app_links_json"] = app_links_json
__props__.__dict__["app_settings_json"] = app_settings_json
__props__.__dict__["assertion_signed"] = assertion_signed
__props__.__dict__["attribute_statements"] = attribute_statements
__props__.__dict__["audience"] = audience
__props__.__dict__["authn_context_class_ref"] = authn_context_class_ref
__props__.__dict__["auto_submit_toolbar"] = auto_submit_toolbar
__props__.__dict__["certificate"] = certificate
__props__.__dict__["default_relay_state"] = default_relay_state
__props__.__dict__["destination"] = destination
__props__.__dict__["digest_algorithm"] = digest_algorithm
__props__.__dict__["enduser_note"] = enduser_note
__props__.__dict__["entity_key"] = entity_key
__props__.__dict__["entity_url"] = entity_url
__props__.__dict__["features"] = features
__props__.__dict__["groups"] = groups
__props__.__dict__["hide_ios"] = hide_ios
__props__.__dict__["hide_web"] = hide_web
__props__.__dict__["honor_force_authn"] = honor_force_authn
__props__.__dict__["http_post_binding"] = http_post_binding
__props__.__dict__["http_redirect_binding"] = http_redirect_binding
__props__.__dict__["idp_issuer"] = idp_issuer
__props__.__dict__["inline_hook_id"] = inline_hook_id
__props__.__dict__["key_id"] = key_id
__props__.__dict__["key_name"] = key_name
__props__.__dict__["key_years_valid"] = key_years_valid
__props__.__dict__["label"] = label
__props__.__dict__["logo"] = logo
__props__.__dict__["logo_url"] = logo_url
__props__.__dict__["metadata"] = metadata
__props__.__dict__["metadata_url"] = metadata_url
__props__.__dict__["name"] = name
__props__.__dict__["preconfigured_app"] = preconfigured_app
__props__.__dict__["recipient"] = recipient
__props__.__dict__["request_compressed"] = request_compressed
__props__.__dict__["response_signed"] = response_signed
__props__.__dict__["saml_version"] = saml_version
__props__.__dict__["sign_on_mode"] = sign_on_mode
__props__.__dict__["signature_algorithm"] = signature_algorithm
__props__.__dict__["single_logout_certificate"] = single_logout_certificate
__props__.__dict__["single_logout_issuer"] = single_logout_issuer
__props__.__dict__["single_logout_url"] = single_logout_url
__props__.__dict__["skip_groups"] = skip_groups
__props__.__dict__["skip_users"] = skip_users
__props__.__dict__["sp_issuer"] = sp_issuer
__props__.__dict__["sso_url"] = sso_url
__props__.__dict__["status"] = status
__props__.__dict__["subject_name_id_format"] = subject_name_id_format
__props__.__dict__["subject_name_id_template"] = subject_name_id_template
__props__.__dict__["user_name_template"] = user_name_template
__props__.__dict__["user_name_template_suffix"] = user_name_template_suffix
__props__.__dict__["user_name_template_type"] = user_name_template_type
__props__.__dict__["users"] = users
return Saml(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="accessibilityErrorRedirectUrl")
def accessibility_error_redirect_url(self) -> pulumi.Output[Optional[str]]:
"""
Custom error page URL.
"""
return pulumi.get(self, "accessibility_error_redirect_url")
@property
@pulumi.getter(name="accessibilityLoginRedirectUrl")
def accessibility_login_redirect_url(self) -> pulumi.Output[Optional[str]]:
"""
Custom login page for this application.
"""
return pulumi.get(self, "accessibility_login_redirect_url")
@property
@pulumi.getter(name="accessibilitySelfService")
def accessibility_self_service(self) -> pulumi.Output[Optional[bool]]:
"""
Enable self-service. By default, it is `false`.
"""
return pulumi.get(self, "accessibility_self_service")
@property
@pulumi.getter(name="acsEndpoints")
def acs_endpoints(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
An array of ACS endpoints. You can configure a maximum of 100 endpoints.
"""
return pulumi.get(self, "acs_endpoints")
@property
@pulumi.getter(name="adminNote")
def admin_note(self) -> pulumi.Output[Optional[str]]:
"""
Application notes for admins.
"""
return pulumi.get(self, "admin_note")
@property
@pulumi.getter(name="appLinksJson")
def app_links_json(self) -> pulumi.Output[Optional[str]]:
"""
Displays specific appLinks for the app. The value for the link should be boolean.
"""
return pulumi.get(self, "app_links_json")
@property
@pulumi.getter(name="appSettingsJson")
def app_settings_json(self) -> pulumi.Output[Optional[str]]:
"""
Application settings in JSON format.
"""
return pulumi.get(self, "app_settings_json")
@property
@pulumi.getter(name="assertionSigned")
def assertion_signed(self) -> pulumi.Output[Optional[bool]]:
"""
Determines whether the SAML assertion is digitally signed.
"""
return pulumi.get(self, "assertion_signed")
@property
@pulumi.getter(name="attributeStatements")
def attribute_statements(self) -> pulumi.Output[Optional[Sequence['outputs.SamlAttributeStatement']]]:
"""
List of SAML Attribute statements.
"""
return pulumi.get(self, "attribute_statements")
@property
@pulumi.getter
def audience(self) -> pulumi.Output[Optional[str]]:
"""
Audience restriction.
"""
return pulumi.get(self, "audience")
@property
@pulumi.getter(name="authnContextClassRef")
def authn_context_class_ref(self) -> pulumi.Output[Optional[str]]:
"""
Identifies the SAML authentication context class for the assertion’s authentication statement.
"""
return pulumi.get(self, "authn_context_class_ref")
@property
@pulumi.getter(name="autoSubmitToolbar")
def auto_submit_toolbar(self) -> pulumi.Output[Optional[bool]]:
"""
Display auto submit toolbar.
"""
return pulumi.get(self, "auto_submit_toolbar")
@property
@pulumi.getter
def certificate(self) -> pulumi.Output[str]:
"""
The raw signing certificate.
"""
return pulumi.get(self, "certificate")
@property
@pulumi.getter(name="defaultRelayState")
def default_relay_state(self) -> pulumi.Output[Optional[str]]:
"""
Identifies a specific application resource in an IDP initiated SSO scenario.
"""
return pulumi.get(self, "default_relay_state")
@property
@pulumi.getter
def destination(self) -> pulumi.Output[Optional[str]]:
"""
Identifies the location where the SAML response is intended to be sent inside the SAML assertion.
"""
return pulumi.get(self, "destination")
@property
@pulumi.getter(name="digestAlgorithm")
def digest_algorithm(self) -> pulumi.Output[Optional[str]]:
"""
Determines the digest algorithm used to digitally sign the SAML assertion and response.
"""
return pulumi.get(self, "digest_algorithm")
@property
@pulumi.getter(name="enduserNote")
def enduser_note(self) -> pulumi.Output[Optional[str]]:
"""
Application notes for end users.
"""
return pulumi.get(self, "enduser_note")
@property
@pulumi.getter(name="entityKey")
def entity_key(self) -> pulumi.Output[str]:
"""
Entity ID, the ID portion of the `entity_url`.
"""
return pulumi.get(self, "entity_key")
@property
@pulumi.getter(name="entityUrl")
def entity_url(self) -> pulumi.Output[str]:
"""
Entity URL for instance [http://www.okta.com/exk1fcia6d6EMsf331d8](http://www.okta.com/exk1fcia6d6EMsf331d8).
"""
return pulumi.get(self, "entity_url")
@property
@pulumi.getter
def features(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
features enabled. Notice: you can't currently configure provisioning features via the API.
"""
return pulumi.get(self, "features")
@property
@pulumi.getter
def groups(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
Groups associated with the application.
- `DEPRECATED`: Please replace usage with the `AppGroupAssignments` (or `app.GroupAssignment`) resource.
"""
return pulumi.get(self, "groups")
@property
@pulumi.getter(name="hideIos")
def hide_ios(self) -> pulumi.Output[Optional[bool]]:
"""
Do not display application icon on mobile app.
"""
return pulumi.get(self, "hide_ios")
@property
@pulumi.getter(name="hideWeb")
def hide_web(self) -> pulumi.Output[Optional[bool]]:
"""
Do not display application icon to users
"""
return pulumi.get(self, "hide_web")
@property
@pulumi.getter(name="honorForceAuthn")
def honor_force_authn(self) -> pulumi.Output[Optional[bool]]:
"""
Prompt user to re-authenticate if SP asks for it.
"""
return pulumi.get(self, "honor_force_authn")
@property
@pulumi.getter(name="httpPostBinding")
def http_post_binding(self) -> pulumi.Output[str]:
"""
`urn:oasis:names:tc:SAML:2.0:bindings:HTTP-Post` location from the SAML metadata.
"""
return pulumi.get(self, "http_post_binding")
@property
@pulumi.getter(name="httpRedirectBinding")
def http_redirect_binding(self) -> pulumi.Output[str]:
"""
`urn:oasis:names:tc:SAML:2.0:bindings:HTTP-Redirect` location from the SAML metadata.
"""
return pulumi.get(self, "http_redirect_binding")
@property
@pulumi.getter(name="idpIssuer")
def idp_issuer(self) -> pulumi.Output[Optional[str]]:
"""
SAML issuer ID.
"""
return pulumi.get(self, "idp_issuer")
@property
@pulumi.getter(name="inlineHookId")
def inline_hook_id(self) -> pulumi.Output[Optional[str]]:
"""
Saml Inline Hook associated with the application.
"""
return pulumi.get(self, "inline_hook_id")
@property
@pulumi.getter(name="keyId")
def key_id(self) -> pulumi.Output[str]:
"""
Certificate key ID.
"""
return pulumi.get(self, "key_id")
@property
@pulumi.getter(name="keyName")
def key_name(self) -> pulumi.Output[Optional[str]]:
"""
Certificate name. This modulates the rotation of keys. New name == new key. Required to be set with `key_years_valid`.
"""
return pulumi.get(self, "key_name")
@property
@pulumi.getter(name="keyYearsValid")
def key_years_valid(self) -> pulumi.Output[Optional[int]]:
"""
Number of years the certificate is valid (2 - 10 years).
"""
return pulumi.get(self, "key_years_valid")
@property
@pulumi.getter
def label(self) -> pulumi.Output[str]:
"""
label of application.
"""
return pulumi.get(self, "label")
@property
@pulumi.getter
def logo(self) -> pulumi.Output[Optional[str]]:
"""
Local file path to the logo. The file must be in PNG, JPG, or GIF format, and less than 1 MB in size.
"""
return pulumi.get(self, "logo")
@property
@pulumi.getter(name="logoUrl")
def logo_url(self) -> pulumi.Output[str]:
"""
Direct link of application logo.
"""
return pulumi.get(self, "logo_url")
@property
@pulumi.getter
def metadata(self) -> pulumi.Output[str]:
"""
The raw SAML metadata in XML.
"""
return pulumi.get(self, "metadata")
@property
@pulumi.getter(name="metadataUrl")
def metadata_url(self) -> pulumi.Output[str]:
"""
SAML xml metadata URL.
"""
return pulumi.get(self, "metadata_url")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the attribute statement.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="preconfiguredApp")
def preconfigured_app(self) -> pulumi.Output[Optional[str]]:
"""
name of application from the Okta Integration Network, if not included a custom app will be created.
"""
return pulumi.get(self, "preconfigured_app")
@property
@pulumi.getter
def recipient(self) -> pulumi.Output[Optional[str]]:
"""
The location where the app may present the SAML assertion.
"""
return pulumi.get(self, "recipient")
@property
@pulumi.getter(name="requestCompressed")
def request_compressed(self) -> pulumi.Output[Optional[bool]]:
"""
Denotes whether the request is compressed or not.
"""
return pulumi.get(self, "request_compressed")
@property
@pulumi.getter(name="responseSigned")
def response_signed(self) -> pulumi.Output[Optional[bool]]:
"""
Determines whether the SAML auth response message is digitally signed.
"""
return pulumi.get(self, "response_signed")
@property
@pulumi.getter(name="samlVersion")
def saml_version(self) -> pulumi.Output[Optional[str]]:
"""
SAML version for the app's sign-on mode. Valid values are: `"2.0"` or `"1.1"`. Default is `"2.0"`.
"""
return pulumi.get(self, "saml_version")
@property
@pulumi.getter(name="signOnMode")
def sign_on_mode(self) -> pulumi.Output[str]:
"""
Sign-on mode of application.
"""
return pulumi.get(self, "sign_on_mode")
@property
@pulumi.getter(name="signatureAlgorithm")
def signature_algorithm(self) -> pulumi.Output[Optional[str]]:
"""
Signature algorithm used ot digitally sign the assertion and response.
"""
return pulumi.get(self, "signature_algorithm")
@property
@pulumi.getter(name="singleLogoutCertificate")
def single_logout_certificate(self) -> pulumi.Output[Optional[str]]:
"""
x509 encoded certificate that the Service Provider uses to sign Single Logout requests.
Note: should be provided without `-----BEGIN CERTIFICATE-----` and `-----END CERTIFICATE-----`, see [official documentation](https://developer.okta.com/docs/reference/api/apps/#service-provider-certificate).
"""
return pulumi.get(self, "single_logout_certificate")
@property
@pulumi.getter(name="singleLogoutIssuer")
def single_logout_issuer(self) -> pulumi.Output[Optional[str]]:
"""
The issuer of the Service Provider that generates the Single Logout request.
"""
return pulumi.get(self, "single_logout_issuer")
@property
@pulumi.getter(name="singleLogoutUrl")
def single_logout_url(self) -> pulumi.Output[Optional[str]]:
"""
The location where the logout response is sent.
"""
return pulumi.get(self, "single_logout_url")
@property
@pulumi.getter(name="skipGroups")
def skip_groups(self) -> pulumi.Output[Optional[bool]]:
"""
Indicator that allows the app to skip `groups` sync (it's also can be provided during import). Default is `false`.
"""
return pulumi.get(self, "skip_groups")
@property
@pulumi.getter(name="skipUsers")
def skip_users(self) -> pulumi.Output[Optional[bool]]:
"""
Indicator that allows the app to skip `users` sync (it's also can be provided during import). Default is `false`.
"""
return pulumi.get(self, "skip_users")
@property
@pulumi.getter(name="spIssuer")
def sp_issuer(self) -> pulumi.Output[Optional[str]]:
"""
SAML service provider issuer.
"""
return pulumi.get(self, "sp_issuer")
@property
@pulumi.getter(name="ssoUrl")
def sso_url(self) -> pulumi.Output[Optional[str]]:
"""
Single Sign-on Url.
"""
return pulumi.get(self, "sso_url")
@property
@pulumi.getter
def status(self) -> pulumi.Output[Optional[str]]:
"""
status of application.
"""
return pulumi.get(self, "status")
@property
@pulumi.getter(name="subjectNameIdFormat")
def subject_name_id_format(self) -> pulumi.Output[Optional[str]]:
"""
Identifies the SAML processing rules.
"""
return pulumi.get(self, "subject_name_id_format")
@property
@pulumi.getter(name="subjectNameIdTemplate")
def subject_name_id_template(self) -> pulumi.Output[Optional[str]]:
"""
Template for app user's username when a user is assigned to the app.
"""
return pulumi.get(self, "subject_name_id_template")
@property
@pulumi.getter(name="userNameTemplate")
def user_name_template(self) -> pulumi.Output[Optional[str]]:
"""
Username template.
"""
return pulumi.get(self, "user_name_template")
@property
@pulumi.getter(name="userNameTemplateSuffix")
def user_name_template_suffix(self) -> pulumi.Output[Optional[str]]:
"""
Username template suffix.
"""
return pulumi.get(self, "user_name_template_suffix")
@property
@pulumi.getter(name="userNameTemplateType")
def user_name_template_type(self) -> pulumi.Output[Optional[str]]:
"""
Username template type.
"""
return pulumi.get(self, "user_name_template_type")
@property
@pulumi.getter
def users(self) -> pulumi.Output[Optional[Sequence['outputs.SamlUser']]]:
"""
Users associated with the application.
- `DEPRECATED`: Please replace usage with the `app.User` resource.
"""
return pulumi.get(self, "users")
| 46.001678
| 222
| 0.651924
| 16,227
| 137,085
| 5.269119
| 0.028594
| 0.095974
| 0.083507
| 0.082337
| 0.973919
| 0.965977
| 0.952048
| 0.943838
| 0.94096
| 0.921745
| 0
| 0.002692
| 0.238531
| 137,085
| 2,979
| 223
| 46.01712
| 0.816403
| 0.308655
| 0
| 0.878378
| 1
| 0.007371
| 0.13257
| 0.035636
| 0
| 0
| 0
| 0
| 0.015971
| 1
| 0.168919
| false
| 0.000614
| 0.0043
| 0
| 0.275799
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
f04b3f2e5b4f3b11cc74bc92b628eefc0e653bde
| 42,572
|
py
|
Python
|
src/beginners/layer1.py
|
lilyychen96/Cubr
|
83e05f612f259bcd93b8f2fb8c7149ddae09d85b
|
[
"MIT"
] | null | null | null |
src/beginners/layer1.py
|
lilyychen96/Cubr
|
83e05f612f259bcd93b8f2fb8c7149ddae09d85b
|
[
"MIT"
] | null | null | null |
src/beginners/layer1.py
|
lilyychen96/Cubr
|
83e05f612f259bcd93b8f2fb8c7149ddae09d85b
|
[
"MIT"
] | null | null | null |
import sys, os
sys.path.insert(0, os.path.abspath('src/cube'))
from moves import execute_moves
from cube import Cube, find_edge, find_corner
from facelet import Color as Cl, Corner as Cn, Edge as Ed
"""
white cross: part 1 of first layer algorithms
"""
def is_white_cross(cube_state):
"""
Checks if the cube has reached the "white cross" state
"""
return ((cube_state[31] == Cl.D) and
(cube_state[28] == Cl.D) and (cube_state[25] == Cl.F) and
(cube_state[30] == Cl.D) and (cube_state[43] == Cl.L) and
(cube_state[34] == Cl.D) and (cube_state[52] == Cl.B) and
(cube_state[32] == Cl.D) and (cube_state[16] == Cl.R))
def move_DF(cube_obj, loc):
"""
Returns the moves list to orient and position the DF edge
"""
cb = cube_obj.get_cb()
cubies = cube_obj.cubies
if loc == Ed.UR:
try:
assert((cb[5] == Cl.D) or (cb[10] == Cl.D))
if (cb[5] == Cl.D):
return ["R2", "D3"]
else: # cb[10] == Cl.D)
return ["R3", "F1"]
except AssertionError:
print("invalid colors (at least one should be Cl.D): (%s, %s)"
% cubies[Ed.UR])
elif loc == Ed.UF:
try:
assert((cb[7] == Cl.D) or (cb[19] == Cl.D))
if (cb[7] == Cl.D):
return ["F2"]
else: # cb[19] == Cl.D)
return ["U1", "L1", "F3"]
except AssertionError:
print("invalid colors (at least one should be Cl.D): (%s, %s)"
% cubies[Ed.UF])
elif loc == Ed.UL:
try:
assert((cb[3] == Cl.D) or (cb[37] == Cl.D))
if (cb[3] == Cl.D):
return ["L2", "D1"]
else: # cb[37] == Cl.D)
return ["L1", "F3"]
except AssertionError:
print("invalid colors (at least one should be Cl.D): (%s, %s)"
% cubies[Ed.UL])
elif loc == Ed.UB:
try:
assert((cb[1] == Cl.D) or (cb[46] == Cl.D))
if (cb[1] == Cl.D):
return ["B2", "D2"]
else: # cb[46] == Cl.D)
return ["U3", "L1", "F3"]
except AssertionError:
print("invalid colors (at least one should be Cl.D): (%s, %s)"
% cubies[Ed.UB])
elif loc == Ed.DR:
try:
assert((cb[32] == Cl.D) or (cb[16] == Cl.D))
if (cb[32] == Cl.D):
return ["D3"]
else: # cb[16] == Cl.D)
return ["R1", "F1"]
except AssertionError:
print("invalid colors (at least one should be Cl.D): (%s, %s)"
% cubies[Ed.DR])
elif loc == Ed.DF:
try:
assert((cb[28] == Cl.D) or (cb[25] == Cl.D))
if ((cb[28] == Cl.F) and (cb[25] == Cl.D)):
return ["F2", "U3", "R3", "F1"]
else:
assert((cb[28] == Cl.D) and (cb[25] == Cl.F))
return []
except AssertionError:
print("invalid colors (at least one should be Cl.D): (%s, %s)"
% cubies[Ed.DF])
elif loc == Ed.DL:
try:
assert((cb[30] == Cl.D) or (cb[43] == Cl.D))
if (cb[30] == Cl.D):
return ["D1"]
else: # cb[43] == Cl.D)
return ["L3", "F3"]
except AssertionError:
print("invalid colors (at least one should be Cl.D): (%s, %s)"
% cubies[Ed.DL])
elif loc == Ed.DB:
try:
assert((cb[34] == Cl.D) or (cb[52] == Cl.D))
if (cb[34] == Cl.D):
return ["D2"]
else: # cb[52] == Cl.D)
return ["B1", "R1", "D3"]
except AssertionError:
print("invalid colors (at least one should be Cl.D): (%s, %s)"
% cubies[Ed.DB])
elif loc == Ed.FR:
try:
assert((cb[23] == Cl.D) or (cb[12] == Cl.D))
if (cb[23] == Cl.D):
return ["R3", "D3"]
else: # cb[12] == Cl.D)
return ["F1"]
except AssertionError:
print("invalid colors (at least one should be Cl.D): (%s, %s)"
% cubies[Ed.FR])
elif loc == Ed.FL:
try:
assert((cb[21] == Cl.D) or (cb[41] == Cl.D))
if (cb[21] == Cl.D):
return ["L1", "D1"]
else: # cb[41] == Cl.D)
return ["F3"]
except AssertionError:
print("invalid colors (at least one should be Cl.D): (%s, %s)"
% cubies[Ed.FL])
elif loc == Ed.BR:
try:
assert((cb[14] == Cl.D) or (cb[48] == Cl.D))
if (cb[14] == Cl.D):
return ["B3", "D2"]
else: # cb[48] == Cl.D)
return ["R1", "D3"]
except AssertionError:
print("invalid colors (at least one should be Cl.D): (%s, %s)"
% cubies[Ed.BR])
else: #if loc == Ed.BL
try:
assert((cb[39] == Cl.D) or (cb[50] == Cl.D))
if (cb[39] == Cl.D):
return ["B1", "D2"]
else: # cb[50] == Cl.D)
return ["L3", "D1"]
except AssertionError:
print("invalid colors (at least one should be Cl.D): (%s, %s)"
% cubies[Ed.BL])
return []
def move_DL(cube_obj, loc):
"""
Returns the moves list to orient and position the DL edge
"""
cb = cube_obj.get_cb()
cubies = cube_obj.cubies
if loc == Ed.UR:
try:
assert((cb[5] == Cl.D) or (cb[10] == Cl.D))
if (cb[5] == Cl.D):
return ["U2", "L2"]
else: # cb[10] == Cl.D)
return ["U3", "B1", "L3"]
except AssertionError:
print("invalid colors (at least one should be Cl.D): (%s, %s)"
% cubies[Ed.UR])
elif loc == Ed.UF:
try:
assert((cb[7] == Cl.D) or (cb[19] == Cl.D))
if (cb[7] == Cl.D):
return ["U1", "L2"]
else: # cb[19] == Cl.D)
return ["U2", "B1", "L3"]
except AssertionError:
print("invalid colors (at least one should be Cl.D): (%s, %s)"
% cubies[Ed.UF])
elif loc == Ed.UL:
try:
assert((cb[3] == Cl.D) or (cb[37] == Cl.D))
if (cb[3] == Cl.D):
return ["L2"]
else: # cb[37] == Cl.D)
return ["U1", "B1", "L3"]
except AssertionError:
print("invalid colors (at least one should be Cl.D): (%s, %s)"
% cubies[Ed.UL])
elif loc == Ed.UB:
try:
assert((cb[1] == Cl.D) or (cb[46] == Cl.D))
if (cb[1] == Cl.D):
return ["U3", "L2"]
else: # cb[46] == Cl.D)
return ["B1", "L3"]
except AssertionError:
print("invalid colors (at least one should be Cl.D): (%s, %s)"
% cubies[Ed.UB])
elif loc == Ed.DR:
try:
assert((cb[32] == Cl.D) or (cb[16] == Cl.D))
if (cb[32] == Cl.D):
return ["R3", "B2", "L3"]
else: # cb[16] == Cl.D)
return ["R2", "U3", "B1", "L3"]
except AssertionError:
print("invalid colors (at least one should be Cl.D): (%s, %s)"
% cubies[Ed.DR])
elif loc == Ed.DF:
try:
# should not reach here
assert((cb[25] == Cl.F) and (cb[28] == Cl.D))
return []
except AssertionError:
print("should be (%s); is (%s)"
% (tuple(sorted([Cl.D, Cl.B])), cubies[Ed.DF]))
elif loc == Ed.DL:
try:
assert((cb[30] == Cl.D) or (cb[43] == Cl.D))
if ((cb[30] == Cl.L) and (cb[43] == Cl.D)):
return ["L1", "B3", "U3", "L2"]
else:
assert((cb[30] == Cl.D) and (cb[43] == Cl.L))
return []
except AssertionError:
print("invalid colors (at least one should be Cl.D): (%s, %s)"
% cubies[Ed.DL])
elif loc == Ed.DB:
try:
assert((cb[34] == Cl.D) or (cb[52] == Cl.D))
if (cb[34] == Cl.D):
return ["B2", "U3", "L2"]
else: # cb[52] == Cl.D)
return ["B3", "L3"]
except AssertionError:
print("invalid colors (at least one should be Cl.D): (%s, %s)"
% cubies[Ed.DB])
elif loc == Ed.FR:
try:
assert((cb[23] == Cl.D) or (cb[12] == Cl.D))
if (cb[23] == Cl.D):
return ["R1", "U2", "L2"]
else: # cb[12] == Cl.D)
return ["R1", "U3", "B1", "L3"]
except AssertionError:
print("invalid colors (at least one should be Cl.D): (%s, %s)"
% cubies[Ed.FR])
elif loc == Ed.FL:
try:
assert((cb[21] == Cl.D) or (cb[41] == Cl.D))
if (cb[21] == Cl.D):
return ["L1"]
else: # cb[41] == Cl.D)
return ["L3", "U1", "B1", "L3"]
except AssertionError:
print("invalid colors (at least one should be Cl.D): (%s, %s)"
% cubies[Ed.FL])
elif loc == Ed.BR:
try:
assert((cb[14] == Cl.D) or (cb[48] == Cl.D))
if (cb[14] == Cl.D):
return ["B1", "U3", "L2"]
else: # cb[48] == Cl.D)
return ["B2", "L3"]
except AssertionError:
print("invalid colors (at least one should be Cl.D): (%s, %s)"
% cubies[Ed.BR])
else: #if loc == Ed.BL
try:
assert((cb[39] == Cl.D) or (cb[50] == Cl.D))
if (cb[39] == Cl.D):
return ["B3", "U3", "L2"]
else: # cb[50] == Cl.D)
return ["L3"]
except AssertionError:
print("invalid colors (at least one should be Cl.D): (%s, %s)"
% cubies[Ed.BL])
return []
def move_DB(cube_obj, loc):
"""
Returns the moves list to orient and position the DB edge
"""
cb = cube_obj.get_cb()
cubies = cube_obj.cubies
if loc == Ed.UR:
try:
assert((cb[5] == Cl.D) or (cb[10] == Cl.D))
if (cb[5] == Cl.D):
return ["U3", "B2"]
else: # cb[10] == Cl.D)
return ["R1", "B3"]
except AssertionError:
print("invalid colors (at least one should be Cl.D): (%s, %s)"
% cubies[Ed.UR])
elif loc == Ed.UF:
try:
assert((cb[7] == Cl.D) or (cb[19] == Cl.D))
if (cb[7] == Cl.D):
return ["U2", "B2"]
else: # cb[19] == Cl.D)
return ["U3", "R1", "B3"]
except AssertionError:
print("invalid colors (at least one should be Cl.D): (%s, %s)"
% cubies[Ed.UF])
elif loc == Ed.UL:
try:
assert((cb[3] == Cl.D) or (cb[37] == Cl.D))
if (cb[3] == Cl.D):
return ["U1", "B2"]
else: # cb[37] == Cl.D)
return ["U2", "R1", "B3"]
except AssertionError:
print("invalid colors (at least one should be Cl.D): (%s, %s)"
% cubies[Ed.UL])
elif loc == Ed.UB:
try:
assert((cb[1] == Cl.D) or (cb[46] == Cl.D))
if (cb[1] == Cl.D):
return ["B2"]
else: # cb[46] == Cl.D)
return ["U1", "R1", "B3"]
except AssertionError:
print("invalid colors (at least one should be Cl.D): (%s, %s)"
% cubies[Ed.UB])
elif loc == Ed.DR:
try:
assert((cb[32] == Cl.D) or (cb[16] == Cl.D))
if (cb[32] == Cl.D):
return ["R2", "U3", "B2"]
else: # cb[16] == Cl.D)
return ["R3", "B3"]
except AssertionError:
print("invalid colors (at least one should be Cl.D): (%s, %s)"
% cubies[Ed.DR])
elif loc == Ed.DF:
try:
# should not reach here
assert((cb[25] == Cl.F) and (cb[28] == Cl.D))
return []
except AssertionError:
print("should be (%s); is (%s)"
% (tuple(sorted([Cl.D, Cl.B])), cubies[Ed.DF]))
elif loc == Ed.DL:
try:
# should not reach here
assert((cb[30] == Cl.D) and (cb[43] == Cl.L))
return []
except AssertionError:
print("should be (%s); is (%s)"
% (tuple(sorted([Cl.D, Cl.L])), cubies[Ed.DL]))
elif loc == Ed.DB:
try:
assert((cb[34] == Cl.D) or (cb[52] == Cl.D))
if ((cb[34] == Cl.B) and (cb[52] == Cl.D)):
return ["B2", "U1", "R1", "B3"]
else:
assert((cb[34] == Cl.D) and (cb[52] == Cl.B))
return []
except AssertionError:
print("invalid colors (at least one should be Cl.D): (%s, %s)"
% cubies[Ed.DL])
elif loc == Ed.FR:
try:
assert((cb[23] == Cl.D) or (cb[12] == Cl.D))
if (cb[23] == Cl.D):
return ["R1", "U3", "B2"]
else: # cb[12] == Cl.D)
return ["R2", "B3"]
except AssertionError:
print("invalid colors (at least one should be Cl.D): (%s, %s)"
% cubies[Ed.FR])
elif loc == Ed.FL:
try:
assert((cb[21] == Cl.D) or (cb[41] == Cl.D))
if (cb[21] == Cl.D):
return ["D1", "L1", "D3"]
else: # cb[41] == Cl.D)
return ["D2", "F3", "D2"]
except AssertionError:
print("invalid colors (at least one should be Cl.D): (%s, %s)"
% cubies[Ed.FL])
elif loc == Ed.BR:
try:
assert((cb[14] == Cl.D) or (cb[48] == Cl.D))
if (cb[14] == Cl.D):
return ["B3"]
else: # cb[48] == Cl.D)
return ["B1", "U1", "R1", "B3"]
except AssertionError:
print("invalid colors (at least one should be Cl.D): (%s, %s)"
% cubies[Ed.BR])
else: #if loc == Ed.BL
try:
assert((cb[39] == Cl.D) or (cb[50] == Cl.D))
if (cb[39] == Cl.D):
return ["B1"]
else: # cb[50] == Cl.D)
return ["B3", "U1", "R1", "B3"]
except AssertionError:
print("invalid colors (at least one should be Cl.D): (%s, %s)"
% cubies[Ed.BL])
return []
def move_DR(cube_obj, loc):
"""
Returns the moves list to orient and position the DR edge
"""
cb = cube_obj.get_cb()
cubies = cube_obj.cubies
if loc == Ed.UR:
try:
assert((cb[5] == Cl.D) or (cb[10] == Cl.D))
if (cb[5] == Cl.D):
return ["R2"]
else: # cb[10] == Cl.D)
return ["R1", "D1", "B3", "D3"]
except AssertionError:
print("invalid colors (at least one should be Cl.D): (%s, %s)"
% cubies[Ed.UR])
elif loc == Ed.UF:
try:
assert((cb[7] == Cl.D) or (cb[19] == Cl.D))
if (cb[7] == Cl.D):
return ["U3", "R2"]
else: # cb[19] == Cl.D)
return ["U2", "B3", "R1", "B1"]
except AssertionError:
print("invalid colors (at least one should be Cl.D): (%s, %s)"
% cubies[Ed.UF])
elif loc == Ed.UL:
try:
assert((cb[3] == Cl.D) or (cb[37] == Cl.D))
if (cb[3] == Cl.D):
return ["U2", "R2"]
else: # cb[37] == Cl.D)
return ["U1", "B3", "R1", "B1"]
except AssertionError:
print("invalid colors (at least one should be Cl.D): (%s, %s)"
% cubies[Ed.UL])
elif loc == Ed.UB:
try:
assert((cb[1] == Cl.D) or (cb[46] == Cl.D))
if (cb[1] == Cl.D):
return ["U1", "R2"]
else: # cb[46] == Cl.D)
return ["B3", "R1", "B1"]
except AssertionError:
print("invalid colors (at least one should be Cl.D): (%s, %s)"
% cubies[Ed.UB])
elif loc == Ed.DR:
try:
assert((cb[32] == Cl.D) or (cb[16] == Cl.D))
if ((cb[32] == Cl.R) and (cb[16] == Cl.D)):
return ["R3", "D1", "B3", "D3"]
else:
assert((cb[32] == Cl.D) and (cb[16] == Cl.R))
return []
except AssertionError:
print("invalid colors (at least one should be Cl.D): (%s, %s)"
% cubies[Ed.DR])
elif loc == Ed.DF:
try:
# should not reach here
assert((cb[25] == Cl.F) and (cb[28] == Cl.D))
return []
except AssertionError:
print("should be (%s); is (%s)"
% (tuple(sorted([Cl.D, Cl.B])), cubies[Ed.DF]))
elif loc == Ed.DL:
try:
# should not reach here
assert((cb[30] == Cl.D) and (cb[43] == Cl.L))
return []
except AssertionError:
print("should be (%s); is (%s)"
% (tuple(sorted([Cl.D, Cl.L])), cubies[Ed.DL]))
elif loc == Ed.DB:
try:
# should not reach here
assert((cb[34] == Cl.D) and (cb[52] == Cl.B))
return []
except AssertionError:
print("should be (%s); is (%s)"
% (tuple(sorted([Cl.D, Cl.B])), cubies[Ed.DB]))
elif loc == Ed.FR:
try:
assert((cb[23] == Cl.D) or (cb[12] == Cl.D))
if (cb[23] == Cl.D):
return ["R3"]
else: # cb[12] == Cl.D)
return ["D3", "F1", "D1"]
except AssertionError:
print("invalid colors (at least one should be Cl.D): (%s, %s)"
% cubies[Ed.FR])
elif loc == Ed.FL:
try:
assert((cb[21] == Cl.D) or (cb[41] == Cl.D))
if (cb[21] == Cl.D):
return ["D2", "L1", "D2"]
else: # cb[41] == Cl.D)
return ["D3", "F3", "D1"]
except AssertionError:
print("invalid colors (at least one should be Cl.D): (%s, %s)"
% cubies[Ed.FL])
elif loc == Ed.BR:
try:
assert((cb[14] == Cl.D) or (cb[48] == Cl.D))
if (cb[14] == Cl.D):
return ["D1", "B3", "D3"]
else: # cb[48] == Cl.D)
return ["R1"]
except AssertionError:
print("invalid colors (at least one should be Cl.D): (%s, %s)"
% cubies[Ed.BR])
else: #if loc == Ed.BL
try:
assert((cb[39] == Cl.D) or (cb[50] == Cl.D))
if (cb[39] == Cl.D):
return ["D1", "B1", "D3"]
else: # cb[50] == Cl.D)
return ["D2", "L3", "D2"]
except AssertionError:
print("invalid colors (at least one should be Cl.D): (%s, %s)"
% cubies[Ed.BL])
return []
def white_cross(cube_obj):
"""
Outputs the moves list for solving the down face edge cubies according to
the initial cube state
"""
# search for cubies and move into place
# edge DF
cube_state = cube_obj.cb
cubies = cube_obj.cubies
if is_white_cross(cube_state):
print("white cross completed")
print(cube_obj)
return
# print("DF")
# print(cube_obj)
if not ((cube_state[28] == Cl.D) and (cube_state[25] == Cl.F)):
loc = find_edge(cubies, tuple(sorted([Cl.D, Cl.F])))
# print(loc)
if loc is None:
print("can't find edge (%s, %s)" % tuple(sorted([Cl.D, Cl.F])))
else:
move_list = move_DF(cube_obj, loc)
execute_moves(cube_obj, move_list)
# edge DL
cube_state = cube_obj.cb
cubies = cube_obj.cubies
if is_white_cross(cube_state):
print("white cross completed")
print(cube_obj)
return
# print("DL")
# print(cube_obj)
if not ((cube_state[30] == Cl.D) and (cube_state[43] == Cl.L)):
loc = find_edge(cubies, tuple(sorted([Cl.D, Cl.L])))
# print(loc)
if loc is None:
print("can't find edge (%s, %s)" % tuple(sorted([Cl.D, Cl.L])))
else:
move_list = move_DL(cube_obj, loc)
execute_moves(cube_obj, move_list)
# edge DB
cube_state = cube_obj.cb
cubies = cube_obj.cubies
if is_white_cross(cube_state):
print("white cross completed")
print(cube_obj)
return
# print("DB")
# print(cube_obj)
if not ((cube_state[34] == Cl.D) and (cube_state[52] == Cl.B)):
loc = find_edge(cubies, tuple(sorted([Cl.D, Cl.B])))
# print(loc)
if loc is None:
print("can't find edge (%s, %s)" % tuple(sorted([Cl.D, Cl.B])))
else:
move_list = move_DB(cube_obj, loc)
execute_moves(cube_obj, move_list)
# edge DR
cube_state = cube_obj.cb
cubies = cube_obj.cubies
if is_white_cross(cube_state):
print("white cross completed")
print(cube_obj)
return
# print("DR")
# print(cube_obj)
if not ((cube_state[32] == Cl.D) and (cube_state[16] == Cl.R)):
loc = find_edge(cubies, tuple(sorted([Cl.D, Cl.R])))
# print(loc)
if loc is None:
print("can't find edge (%s, %s)" % tuple(sorted([Cl.U, Cl.R])))
else:
move_list = move_DR(cube_obj, loc)
execute_moves(cube_obj, move_list)
try:
assert(is_white_cross(cube_obj.cb))
print("white cross completed")
print(cube_obj)
except AssertionError:
print("did not successfully reach white cross state\n")
"""
white corners: part 2 of first layer algorithms
"""
def is_white_corners(cstate):
"""
Checks if the cube has reached the "white corners" state
"""
return (is_white_cross(cstate) and
(cstate[29] == Cl.D) and (cstate[26] == Cl.F) and (cstate[15] == Cl.R) and
(cstate[27] == Cl.D) and (cstate[44] == Cl.L) and (cstate[24] == Cl.F) and
(cstate[33] == Cl.D) and (cstate[53] == Cl.B) and (cstate[42] == Cl.L) and
(cstate[35] == Cl.D) and (cstate[17] == Cl.R) and (cstate[51] == Cl.B))
def move_DFR(cube_obj, loc):
"""
Returns the moves list to orient and position the DFR corner
"""
cb = cube_obj.get_cb()
cubies = cube_obj.cubies
if loc == Cn.URF:
try:
assert((cb[8] == Cl.D) or (cb[9] == Cl.D) or (cb[20] == Cl.D))
if (cb[8] == Cl.D):
return ["R1", "U2", "R3", "U2", "F3", "U1", "F1"]
elif (cb[9] == Cl.D):
return ["R1", "U1", "R3"]
else: # cb[20] == Cl.D)
return ["F3", "U3", "F1"]
except AssertionError:
print("invalid colors (at least one should be Cl.D): (%s, %s, %s)"
% cubies[Cn.URF])
elif loc == Cn.UFL:
try:
assert((cb[6] == Cl.D) or (cb[18] == Cl.D) or (cb[38] == Cl.D))
if (cb[6] == Cl.D):
return ["R1", "U2", "R3", "U1", "R1", "U3", "R3"]
elif (cb[18] == Cl.D):
return ["U3", "R1", "U1", "R3"]
else: # cb[38] == Cl.D)
return ["R1", "U3", "R3"]
except AssertionError:
print("invalid colors (at least one should be Cl.D): (%s, %s, %s)"
% cubies[Cn.UFL])
elif loc == Cn.ULB:
try:
assert((cb[0] == Cl.D) or (cb[36] == Cl.D) or (cb[47] == Cl.D))
if (cb[0] == Cl.D):
return ["R1", "U1", "R3", "U1", "R1", "U3", "R3"]
elif (cb[36] == Cl.D):
return ["F3", "U2", "F1"]
else: # cb[47] == Cl.D)
return ["R1", "U2", "R3"]
except AssertionError:
print("invalid colors (at least one should be Cl.D): (%s, %s, %s)"
% cubies[Cn.ULB])
elif loc == Cn.UBR:
try:
assert((cb[2] == Cl.D) or (cb[45] == Cl.D) or (cb[11] == Cl.D))
if (cb[2] == Cl.D):
return ["U3", "R1", "U1", "R3", "U1", "R1", "U3", "R3"]
elif (cb[45] == Cl.D):
return ["F3", "U1", "F1"]
else: # cb[11] == Cl.D)
return ["U2", "R1", "U3", "R3"]
except AssertionError:
print("invalid colors (at least one should be Cl.D): (%s, %s, %s)"
% cubies[Cn.UBR])
elif loc == Cn.DFR:
try:
assert((cb[29] == Cl.D) or (cb[26] == Cl.D) or (cb[15] == Cl.D))
if (cb[26] == Cl.D):
return ["R1", "U3", "R3", "U1", "R1", "U3", "R3"]
elif (cb[15] == Cl.D):
return ["R1", "U1", "R3", "U2", "F3", "U1", "F1"]
else: # cb[29] == Cl.D)
assert((cb[29] == Cl.D) and (cb[26] == Cl.F) and (cb[15] == Cl.R))
return []
except AssertionError:
print("invalid colors (at least one should be Cl.D): (%s, %s, %s)"
% cubies[Cn.DFR])
elif loc == Cn.DLF:
try:
assert((cb[27] == Cl.D) or (cb[44] == Cl.D) or (cb[24] == Cl.D))
if (cb[27] == Cl.D):
return ["F1", "U1", "F3", "R1", "U2", "R3"]
elif (cb[44] == Cl.D):
return ["L3", "R1", "U3", "L1", "R3"]
else: # cb[24] == Cl.D)
return ["L3", "R1", "U2", "R3", "U1", "R1", "U3", "R3", "L1"]
except AssertionError:
print("invalid colors (at least one should be Cl.D): (%s, %s, %s)"
% cubies[Cn.DLF])
elif loc == Cn.DBL:
try:
assert((cb[33] == Cl.D) or (cb[53] == Cl.D) or (cb[42] == Cl.D))
if (cb[33] == Cl.D):
return ["B3", "F3", "U2", "B1", "F1"]
elif (cb[53] == Cl.D):
return ["L1", "R1", "U1", "R3", "U1", "R1", "U3", "R3", "L3"]
else: # cb[42] == Cl.D)
return ["B3", "R1", "U1", "R3", "B1", "U1", "R1", "U3", "R3"]
except AssertionError:
print("invalid colors (at least one should be Cl.D): (%s, %s, %s)"
% cubies[Cn.DBL])
else: #if loc == Cn.DRB
try:
assert((cb[35] == Cl.D) or (cb[17] == Cl.D) or (cb[51] == Cl.D))
if (cb[35] == Cl.D):
return ["B1", "U2", "B3", "R1", "U3", "R3"]
elif (cb[17] == Cl.D):
return ["R3", "U2", "R2", "U3", "R3"]
else: # cb[51] == Cl.D)
return ["F3", "B1", "U1", "B3", "F1"]
except AssertionError:
print("invalid colors (at least one should be Cl.D): (%s, %s, %s)"
% cubies[Cn.DRB])
return []
def move_DLF(cube_obj, loc):
"""
Returns the moves list to orient and position the DFR corner
"""
cb = cube_obj.get_cb()
cubies = cube_obj.cubies
if loc == Cn.URF:
try:
assert((cb[8] == Cl.D) or (cb[9] == Cl.D) or (cb[20] == Cl.D))
if (cb[8] == Cl.D):
return ["L3", "U2", "L1", "U3", "L3", "U1", "L1"]
elif (cb[9] == Cl.D):
return ["L3", "U1", "L1"]
else: # cb[20] == Cl.D)
return ["U2", "F1", "U3", "F3"]
except AssertionError:
print("invalid colors (at least one should be Cl.D): (%s, %s, %s)"
% cubies[Cn.URF])
elif loc == Cn.UFL:
try:
assert((cb[6] == Cl.D) or (cb[18] == Cl.D) or (cb[38] == Cl.D))
if (cb[6] == Cl.D):
return ["U1", "F1", "U2", "F3", "U1", "F1", "U3", "F3"]
elif (cb[18] == Cl.D):
return ["U3", "L3", "U1", "L1"]
else: # cb[38] == Cl.D)
return ["L3", "U3", "L1"]
except AssertionError:
print("invalid colors (at least one should be Cl.D): (%s, %s, %s)"
% cubies[Cn.UFL])
elif loc == Cn.ULB:
try:
assert((cb[0] == Cl.D) or (cb[36] == Cl.D) or (cb[47] == Cl.D))
if (cb[0] == Cl.D):
return ["U1", "L3", "U3", "L1", "U3", "L3", "U1", "L1"]
elif (cb[36] == Cl.D):
return ["U2", "L3", "U1", "L1"]
else: # cb[47] == Cl.D)
return ["F1", "U3", "F3"]
except AssertionError:
print("invalid colors (at least one should be Cl.D): (%s, %s, %s)"
% cubies[Cn.ULB])
elif loc == Cn.UBR:
try:
assert((cb[2] == Cl.D) or (cb[45] == Cl.D) or (cb[11] == Cl.D))
if (cb[2] == Cl.D):
return ["L3", "U3", "L1", "U3", "L3", "U1", "L1"]
elif (cb[45] == Cl.D):
return ["L3", "U2", "L1"]
else: # cb[11] == Cl.D)
return ["U3", "F1", "U3", "F3"]
except AssertionError:
print("invalid colors (at least one should be Cl.D): (%s, %s, %s)"
% cubies[Cn.UBR])
elif loc == Cn.DFR:
try:
assert((cb[29] == Cl.D) and (cb[26] == Cl.F) and (cb[15] == Cl.R))
return []
except AssertionError:
print("should be (%s); is (%s)"
% (tuple(sorted([Cl.D, Cl.F, Cl.R])), cubies[Cn.DFR]))
elif loc == Cn.DLF:
try:
assert((cb[27] == Cl.D) or (cb[44] == Cl.D) or (cb[24] == Cl.D))
if (cb[44] == Cl.D):
return ["L3", "U2", "L1", "U3", "F1", "U3", "F3"]
elif (cb[24] == Cl.D):
return ["F1", "U1", "F3", "U3", "F1", "U1", "F3", "U3"]
else: # cb[27] == Cl.D)
assert((cb[27] == Cl.D) and (cb[44] == Cl.L) and (cb[24] == Cl.F))
return []
except AssertionError:
print("invalid colors (at least one should be Cl.D): (%s, %s, %s)"
% cubies[Cn.DLF])
elif loc == Cn.DBL:
try:
assert((cb[33] == Cl.D) or (cb[53] == Cl.D) or (cb[42] == Cl.D))
if (cb[33] == Cl.D):
return ["B3", "U2", "B1", "L3", "U1", "L1"]
elif (cb[53] == Cl.D):
return ["B3", "U2", "B1", "U2", "F1", "U3", "F3"]
else: # cb[42] == Cl.D)
return ["B3", "U1", "B1", "U2", "L3", "U1", "L1"]
except AssertionError:
print("invalid colors (at least one should be Cl.D): (%s, %s, %s)"
% cubies[Cn.DBL])
else: #if loc == Cn.DRB
try:
assert((cb[35] == Cl.D) or (cb[17] == Cl.D) or (cb[51] == Cl.D))
if (cb[35] == Cl.D):
return ["B1", "U1", "B3", "U2", "F1", "U3", "F3"]
elif (cb[17] == Cl.D):
return ["B1", "U3", "B3", "F1", "U2", "F3"]
else: # cb[51] == Cl.D)
return ["B1", "U1", "L3", "U1", "B3", "L1"]
except AssertionError:
print("invalid colors (at least one should be Cl.D): (%s, %s, %s)"
% cubies[Cn.DRB])
return []
def move_DBL(cube_obj, loc):
"""
Returns the moves list to orient and position the DFR corner
"""
cb = cube_obj.get_cb()
cubies = cube_obj.cubies
if loc == Cn.URF:
try:
assert((cb[8] == Cl.D) or (cb[9] == Cl.D) or (cb[20] == Cl.D))
if (cb[8] == Cl.D):
return ["B3", "U3", "B1", "U3", "B3", "U1", "B1"]
elif (cb[9] == Cl.D):
return ["U1", "B3", "U1", "B1"]
else: # cb[20] == Cl.D)
return ["U3", "L1", "U3", "L3"]
except AssertionError:
print("invalid colors (at least one should be Cl.D): (%s, %s, %s)"
% cubies[Cn.URF])
elif loc == Cn.UFL:
try:
assert((cb[6] == Cl.D) or (cb[18] == Cl.D) or (cb[38] == Cl.D))
if (cb[6] == Cl.D):
return ["B3", "U2", "B1", "U3", "B3", "U1", "B1"]
elif (cb[18] == Cl.D):
return ["B3", "U1", "B1"]
else: # cb[38] == Cl.D)
return ["U2", "L1", "U3", "L3"]
except AssertionError:
print("invalid colors (at least one should be Cl.D): (%s, %s, %s)"
% cubies[Cn.UFL])
elif loc == Cn.ULB:
try:
assert((cb[0] == Cl.D) or (cb[36] == Cl.D) or (cb[47] == Cl.D))
if (cb[0] == Cl.D):
return ["B3", "U1", "B1", "L1", "U2", "L3"]
elif (cb[36] == Cl.D):
return ["U3", "B3", "U1", "B1"]
else: # cb[47] == Cl.D)
return ["U1", "L1", "U3", "L3"]
except AssertionError:
print("invalid colors (at least one should be Cl.D): (%s, %s, %s)"
% cubies[Cn.ULB])
elif loc == Cn.UBR:
try:
assert((cb[2] == Cl.D) or (cb[45] == Cl.D) or (cb[11] == Cl.D))
if (cb[2] == Cl.D):
return ["R3", "B2", "R1", "B2", "R1"]
elif (cb[45] == Cl.D):
return ["U3", "L1", "U1", "L3"]
else: # cb[11] == Cl.D)
return ["L1", "U3", "L3"]
except AssertionError:
print("invalid colors (at least one should be Cl.D): (%s, %s, %s)"
% cubies[Cn.UBR])
elif loc == Cn.DFR:
try:
assert((cb[29] == Cl.D) and (cb[26] == Cl.F) and (cb[15] == Cl.R))
return []
except AssertionError:
print("should be (%s); is (%s)"
% (tuple(sorted([Cl.D, Cl.F, Cl.R])), cubies[Cn.DFR]))
elif loc == Cn.DLF:
try:
assert((cb[27] == Cl.D) and (cb[44] == Cl.L) and (cb[24] == Cl.F))
return []
except AssertionError:
print("should be (%s); is (%s)"
% (tuple(sorted([Cl.D, Cl.L, Cl.F])), cubies[Cn.DLF]))
elif loc == Cn.DBL:
try:
assert((cb[33] == Cl.D) or (cb[53] == Cl.D) or (cb[42] == Cl.D))
if (cb[53] == Cl.D):
return ["B3", "U1", "B1", "U3", "B3", "U2", "B1", "U3", "B3", "U1", "B1"]
elif (cb[42] == Cl.D):
return ["B3", "U1", "B1", "U3", "B3", "U1", "B1"]
else: # cb[33] == Cl.D)
assert((cb[33] == Cl.D) and (cb[53] == Cl.B) and (cb[42] == Cl.L))
return []
except AssertionError:
print("invalid colors (at least one should be Cl.D): (%s, %s, %s)"
% cubies[Cn.DBL])
else: #if loc == Cn.DRB
try:
assert((cb[35] == Cl.D) or (cb[17] == Cl.D) or (cb[51] == Cl.D))
if (cb[35] == Cl.D):
return ["B1", "U3", "B3", "U2", "B3", "U1", "B1"]
elif (cb[17] == Cl.D):
return ["B1", "U3", "B3", "L1", "U3", "L3"]
else: # cb[51] == Cl.D)
return ["B1", "U3", "B3", "L1", "U2", "L3", "U1", "L1", "U3", "L3"]
except AssertionError:
print("invalid colors (at least one should be Cl.D): (%s, %s, %s)"
% cubies[Cn.DRB])
return []
def move_DRB(cube_obj, loc):
"""
Returns the moves list to orient and position the DFR corner
"""
cb = cube_obj.get_cb()
cubies = cube_obj.cubies
if loc == Cn.URF:
try:
assert((cb[8] == Cl.D) or (cb[9] == Cl.D) or (cb[20] == Cl.D))
if (cb[8] == Cl.D):
return ["B1", "U2", "B3", "U1", "B1", "U3", "B3"]
elif (cb[9] == Cl.D):
return ["U2", "R3", "U1", "R1"]
else: # cb[20] == Cl.D)
return ["B1", "U3", "B3"]
except AssertionError:
print("invalid colors (at least one should be Cl.D): (%s, %s, %s)"
% cubies[Cn.URF])
elif loc == Cn.UFL:
try:
assert((cb[6] == Cl.D) or (cb[18] == Cl.D) or (cb[38] == Cl.D))
if (cb[6] == Cl.D):
return ["B1", "U1", "B3", "U1", "B1", "U3", "B3"]
elif (cb[18] == Cl.D):
return ["U1", "R3", "U1", "R1"]
else: # cb[38] == Cl.D)
return ["B1", "U2", "B3"]
except AssertionError:
print("invalid colors (at least one should be Cl.D): (%s, %s, %s)"
% cubies[Cn.UFL])
elif loc == Cn.ULB:
try:
assert((cb[0] == Cl.D) or (cb[36] == Cl.D) or (cb[47] == Cl.D))
if (cb[0] == Cl.D):
return ["R3", "U2", "R1", "U3", "R3", "U1", "R1"]
elif (cb[36] == Cl.D):
return ["R3", "U1", "R1"]
else: # cb[47] == Cl.D)
return ["U2", "B1", "U3", "B3"]
except AssertionError:
print("invalid colors (at least one should be Cl.D): (%s, %s, %s)"
% cubies[Cn.ULB])
elif loc == Cn.UBR:
try:
assert((cb[2] == Cl.D) or (cb[45] == Cl.D) or (cb[11] == Cl.D))
if (cb[2] == Cl.D):
return ["U1", "B1", "U2", "B3", "U1", "B1", "U3", "B3"]
elif (cb[45] == Cl.D):
return ["U3", "R3", "U1", "R1"]
else: # cb[11] == Cl.D)
return ["U1", "B1", "U3", "B3"]
except AssertionError:
print("invalid colors (at least one should be Cl.D): (%s, %s, %s)"
% cubies[Cn.UBR])
elif loc == Cn.DFR:
try:
assert((cb[29] == Cl.D) and (cb[26] == Cl.F) and (cb[15] == Cl.R))
return []
except AssertionError:
print("should be (%s); is (%s)"
% (tuple(sorted([Cl.D, Cl.F, Cl.R])), cubies[Cn.DFR]))
elif loc == Cn.DLF:
try:
assert((cb[27] == Cl.D) and (cb[44] == Cl.L) and (cb[24] == Cl.F))
return []
except AssertionError:
print("should be (%s); is (%s)"
% (tuple(sorted([Cl.D, Cl.L, Cl.F])), cubies[Cn.DLF]))
elif loc == Cn.DBL:
try:
assert((cb[33] == Cl.D) and (cb[53] == Cl.B) and (cb[42] == Cl.L))
return []
except AssertionError:
print("should be (%s); is (%s)"
% (tuple(sorted([Cl.D, Cl.B, Cl.L])), cubies[Cn.DBL]))
else: #if loc == Cn.DRB
try:
assert((cb[35] == Cl.D) or (cb[17] == Cl.D) or (cb[51] == Cl.D))
if (cb[17] == Cl.D):
return ["B1", "U3", "B3", "U1", "B1", "U3", "B3"]
elif (cb[51] == Cl.D):
return ["B1", "U1", "B3", "U2", "R3", "U1", "R1"]
else: # cb[51] == Cl.D)
assert((cb[35] == Cl.D) and (cb[17] == Cl.R) and (cb[51] == Cl.B))
return []
except AssertionError:
print("invalid colors (at least one should be Cl.D): (%s, %s, %s)"
% cubies[Cn.DRB])
return []
def white_corners(cube_obj):
"""
Outputs the moves list for solving the down face corner cubies after
solving for the white cross
"""
# search for cubies and move into place
# corner DFR
cstate = cube_obj.cb
cubies = cube_obj.cubies
if is_white_corners(cstate):
print("white corners completed")
print(cube_obj)
return
# print("DFR")
# print(cube_obj)
if not ((cstate[29] == Cl.D) and (cstate[26] == Cl.F) and (cstate[15] == Cl.R)):
loc = find_corner(cubies, tuple(sorted([Cl.D, Cl.F, Cl.R])))
# print(loc)
if loc is None:
print("can't find corner (%s, %s, %s)"
% tuple(sorted([Cl.D, Cl.F, Cl.R])))
else:
move_list = move_DFR(cube_obj, loc)
execute_moves(cube_obj, move_list)
# corner DLF
cstate = cube_obj.cb
cubies = cube_obj.cubies
if is_white_corners(cstate):
print("white corners completed")
print(cube_obj)
return
# print("DLF")
# print(cube_obj)
if not ((cstate[27] == Cl.D) and (cstate[44] == Cl.L) and (cstate[24] == Cl.F)):
loc = find_corner(cubies, tuple(sorted([Cl.D, Cl.L, Cl.F])))
# print(loc)
if loc is None:
print("can't find corner (%s, %s, %s)"
% tuple(sorted([Cl.D, Cl.L, Cl.F])))
else:
move_list = move_DLF(cube_obj, loc)
execute_moves(cube_obj, move_list)
# corner DBL
cstate = cube_obj.cb
cubies = cube_obj.cubies
if is_white_corners(cstate):
print("white corners completed")
print(cube_obj)
return
# print("DBL")
# print(cube_obj)
if not ((cstate[33] == Cl.D) and (cstate[53] == Cl.B) and (cstate[42] == Cl.L)):
loc = find_corner(cubies, tuple(sorted([Cl.D, Cl.B, Cl.L])))
# print(loc)
if loc is None:
print("can't find corner (%s, %s, %s)"
% tuple(sorted([Cl.D, Cl.B, Cl.L])))
else:
move_list = move_DBL(cube_obj, loc)
execute_moves(cube_obj, move_list)
# corner DRB
cstate = cube_obj.cb
cubies = cube_obj.cubies
if is_white_corners(cstate):
print("white corners completed")
print(cube_obj)
return
# print("DRB")
# print(cube_obj)
if not ((cstate[35] == Cl.D) and (cstate[17] == Cl.R) and (cstate[51] == Cl.B)):
loc = find_corner(cubies, tuple(sorted([Cl.D, Cl.R, Cl.B])))
# print(loc)
if loc is None:
print("can't find corner (%s, %s, %s)"
% tuple(sorted([Cl.D, Cl.R, Cl.B])))
else:
move_list = move_DRB(cube_obj, loc)
execute_moves(cube_obj, move_list)
try:
assert(is_white_corners(cube_obj.cb))
print("white corners completed")
print(cube_obj)
except AssertionError:
print("did not successfully reach white corners state\n")
def layer1(cube_obj):
"""
Calls the solving algorithms for the first layer: white cross and white
corners
"""
white_cross(cube_obj)
white_corners(cube_obj)
| 33.129961
| 89
| 0.423072
| 5,746
| 42,572
| 3.100418
| 0.032022
| 0.076116
| 0.079315
| 0.036935
| 0.940107
| 0.918047
| 0.842829
| 0.801235
| 0.792759
| 0.765535
| 0
| 0.052147
| 0.385582
| 42,572
| 1,285
| 90
| 33.129961
| 0.628933
| 0.061308
| 0
| 0.755511
| 0
| 0.026052
| 0.14655
| 0
| 0
| 0
| 0
| 0
| 0.172345
| 1
| 0.013026
| false
| 0
| 0.004008
| 0
| 0.209419
| 0.11022
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f04f7a6f7bce03ac21ae08e0c3f1d3e1acb13396
| 72
|
py
|
Python
|
models/__init__.py
|
fenghansen/ELD
|
39846c79b5bcb406881c52700d282a9c1149666e
|
[
"MIT"
] | 258
|
2020-05-16T17:43:13.000Z
|
2022-03-22T07:02:54.000Z
|
models/__init__.py
|
scott-mao/ELD
|
c1e009f21ab7ac6e87c4e37588bee3856160cd93
|
[
"MIT"
] | 23
|
2020-06-22T02:07:04.000Z
|
2022-03-25T01:33:20.000Z
|
models/__init__.py
|
scott-mao/ELD
|
c1e009f21ab7ac6e87c4e37588bee3856160cd93
|
[
"MIT"
] | 39
|
2020-05-19T06:29:28.000Z
|
2022-03-21T04:00:16.000Z
|
from .ELD_model import ELDModel
def eld_model():
return ELDModel()
| 14.4
| 31
| 0.736111
| 10
| 72
| 5.1
| 0.7
| 0.313725
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.180556
| 72
| 4
| 32
| 18
| 0.864407
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 0
| 0
|
0
| 7
|
f05ed14da55b2f34e53ec01aab443e19ae22d904
| 2,310
|
py
|
Python
|
train_step.py
|
terrifyzhao/multi_task_lm
|
b60bfe34a02e5483540062074b24135753c537cd
|
[
"Apache-2.0"
] | 1
|
2022-02-16T06:34:16.000Z
|
2022-02-16T06:34:16.000Z
|
train_step.py
|
terrifyzhao/multi_task_lm
|
b60bfe34a02e5483540062074b24135753c537cd
|
[
"Apache-2.0"
] | null | null | null |
train_step.py
|
terrifyzhao/multi_task_lm
|
b60bfe34a02e5483540062074b24135753c537cd
|
[
"Apache-2.0"
] | null | null | null |
import torch
from annlp import get_device
device = get_device()
def base_step(batch, optim, model, amp):
all_loss = 0
for index in range(len(batch)):
optim.zero_grad()
output = model(batch[index]['input_ids'].to(device),
batch[index]['attention_mask'].to(device),
labels=batch[index]['labels'].to(device),
task_id=index)
loss = output.loss
all_loss += loss.item()
if torch.cuda.is_available():
with amp.scale_loss(loss, optim) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
optim.step()
return all_loss / len(batch)
def weight_step(batch, optim, model, amp):
all_loss = 0
for index in range(len(batch)):
optim.zero_grad()
output = model(batch[index]['input_ids'].to(device),
batch[index]['attention_mask'].to(device),
labels=batch[index]['labels'].to(device),
task_id=index)
loss = output.loss
loss = torch.log(loss)
all_loss += loss.item()
if torch.cuda.is_available():
with amp.scale_loss(loss, optim) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
optim.step()
return all_loss / len(batch)
def grad_accumulation_step(batch, optim, model, amp, with_log=False):
all_loss = None
optim.zero_grad()
for index in range(len(batch)):
output = model(batch[index]['input_ids'].to(device),
batch[index]['attention_mask'].to(device),
labels=batch[index]['labels'].to(device),
task_id=index)
loss = output.loss
if all_loss:
if with_log:
all_loss += torch.log(loss)
else:
all_loss += loss
else:
if with_log:
all_loss = torch.log(loss)
else:
all_loss = loss
if torch.cuda.is_available():
with amp.scale_loss(loss, optim) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
optim.step()
return all_loss.item() / len(batch)
| 30
| 69
| 0.5329
| 271
| 2,310
| 4.372694
| 0.180812
| 0.076793
| 0.037131
| 0.048101
| 0.87173
| 0.853165
| 0.833755
| 0.833755
| 0.833755
| 0.833755
| 0
| 0.001337
| 0.352381
| 2,310
| 76
| 70
| 30.394737
| 0.790775
| 0
| 0
| 0.78125
| 0
| 0
| 0.037662
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.046875
| false
| 0
| 0.03125
| 0
| 0.125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b2c6d6d88248f183e4c0bf30c74c2447e06ab738
| 194
|
py
|
Python
|
rbact/peewee/__init__.py
|
chin-wag/async-rbac
|
442b5d02ee785ba4d9c04d5cbb3afeeec6dac633
|
[
"MIT"
] | null | null | null |
rbact/peewee/__init__.py
|
chin-wag/async-rbac
|
442b5d02ee785ba4d9c04d5cbb3afeeec6dac633
|
[
"MIT"
] | 11
|
2021-11-29T14:43:05.000Z
|
2022-02-03T15:33:48.000Z
|
rbact/peewee/__init__.py
|
chin-wag/rbact
|
442b5d02ee785ba4d9c04d5cbb3afeeec6dac633
|
[
"MIT"
] | null | null | null |
from .models import ModelsLoader, Users, Roles, UsersRoles, Rules
from .peewee_adapter import PeeweeAdapter
__all__ = ["PeeweeAdapter", "ModelsLoader", "Users", "Roles", "UsersRoles", "Rules"]
| 38.8
| 84
| 0.757732
| 20
| 194
| 7.1
| 0.6
| 0.239437
| 0.309859
| 0.450704
| 0.521127
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108247
| 194
| 4
| 85
| 48.5
| 0.820809
| 0
| 0
| 0
| 0
| 0
| 0.257732
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
b2ede2e020a36b7f068328ab82a52d80d930da11
| 2,719
|
py
|
Python
|
src/Python/Unittests/test_read_write_stl.py
|
rzoller/OpenMesh
|
f84bca0b26c61eab5f9335b2191962ca8545c5f6
|
[
"BSD-3-Clause"
] | 19
|
2020-08-13T05:15:09.000Z
|
2022-03-31T14:51:29.000Z
|
src/Python/Unittests/test_read_write_stl.py
|
ccopsey/OpenMesh
|
93e6e626c3f282bf4275521c33cd8da1ca559c7d
|
[
"BSD-3-Clause"
] | 2
|
2020-09-08T07:03:04.000Z
|
2021-08-04T05:43:27.000Z
|
src/Python/Unittests/test_read_write_stl.py
|
ccopsey/OpenMesh
|
93e6e626c3f282bf4275521c33cd8da1ca559c7d
|
[
"BSD-3-Clause"
] | 10
|
2020-08-06T02:37:46.000Z
|
2021-07-01T09:12:06.000Z
|
import unittest
import openmesh
class ReadWriteSTL(unittest.TestCase):
def setUp(self):
self.mesh = openmesh.TriMesh()
def test_load_simple_stl_file(self):
ok = openmesh.read_mesh(self.mesh, "cube1.stl")
self.assertTrue(ok)
self.assertEqual(self.mesh.n_vertices(), 7526)
self.assertEqual(self.mesh.n_edges(), 22572)
self.assertEqual(self.mesh.n_faces(), 15048)
def test_load_simple_stl_file_with_normals(self):
self.mesh.request_face_normals()
options = openmesh.Options()
options += openmesh.Options.FaceNormal
ok = openmesh.read_mesh(self.mesh, "cube1.stl", options)
self.assertTrue(ok)
self.assertAlmostEqual(self.mesh.normal(self.mesh.face_handle(0))[0], -0.038545)
self.assertAlmostEqual(self.mesh.normal(self.mesh.face_handle(0))[1], -0.004330)
self.assertAlmostEqual(self.mesh.normal(self.mesh.face_handle(0))[2], 0.999247)
self.assertEqual(self.mesh.n_vertices(), 7526)
self.assertEqual(self.mesh.n_edges(), 22572)
self.assertEqual(self.mesh.n_faces(), 15048)
self.mesh.release_face_normals()
def test_load_simple_stl_binary_file(self):
ok = openmesh.read_mesh(self.mesh, "cube1Binary.stl")
self.assertTrue(ok)
self.assertEqual(self.mesh.n_vertices(), 7526)
self.assertEqual(self.mesh.n_edges(), 22572)
self.assertEqual(self.mesh.n_faces(), 15048)
def test_load_simple_stl_binary_file_with_normals(self):
self.mesh.request_face_normals()
options = openmesh.Options()
options += openmesh.Options.FaceNormal
options += openmesh.Options.Binary
ok = openmesh.read_mesh(self.mesh, "cube1Binary.stl", options)
self.assertTrue(ok)
self.assertTrue(options.is_binary())
self.assertTrue(options.face_has_normal())
self.assertFalse(options.vertex_has_normal())
self.assertAlmostEqual(self.mesh.normal(self.mesh.face_handle(0))[0], -0.038545, 5)
self.assertAlmostEqual(self.mesh.normal(self.mesh.face_handle(0))[1], -0.004330, 5)
self.assertAlmostEqual(self.mesh.normal(self.mesh.face_handle(0))[2], 0.999247, 5)
self.assertEqual(self.mesh.n_vertices(), 7526)
self.assertEqual(self.mesh.n_edges(), 22572)
self.assertEqual(self.mesh.n_faces(), 15048)
self.mesh.release_face_normals()
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(ReadWriteSTL)
unittest.TextTestRunner(verbosity=2).run(suite)
| 35.776316
| 91
| 0.650975
| 329
| 2,719
| 5.182371
| 0.182371
| 0.154839
| 0.133724
| 0.161877
| 0.808211
| 0.808211
| 0.767155
| 0.748387
| 0.655718
| 0.655718
| 0
| 0.055924
| 0.223979
| 2,719
| 75
| 92
| 36.253333
| 0.752133
| 0
| 0
| 0.48
| 0
| 0
| 0.020596
| 0
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0.1
| false
| 0
| 0.04
| 0
| 0.16
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
650e1955fcc19edb16dd0fbf4dc28656ca2335df
| 99,470
|
py
|
Python
|
shenfun/legendre/bases.py
|
jaisw7/shenfun
|
7482beb5b35580bc45f72704b69343cc6fc1d773
|
[
"BSD-2-Clause"
] | 1
|
2021-03-06T09:29:39.000Z
|
2021-03-06T09:29:39.000Z
|
shenfun/legendre/bases.py
|
jaisw7/shenfun
|
7482beb5b35580bc45f72704b69343cc6fc1d773
|
[
"BSD-2-Clause"
] | null | null | null |
shenfun/legendre/bases.py
|
jaisw7/shenfun
|
7482beb5b35580bc45f72704b69343cc6fc1d773
|
[
"BSD-2-Clause"
] | null | null | null |
"""
Module for defining function spaces in the Legendre family
"""
from __future__ import division
import os
import functools
import sympy
import numpy as np
from numpy.polynomial import legendre as leg
from scipy.special import eval_legendre
from mpi4py_fft import fftw
from shenfun.spectralbase import SpectralBase, work, Transform, islicedict, \
slicedict
from .lobatto import legendre_lobatto_nodes_and_weights
__all__ = ['LegendreBase', 'Orthogonal', 'ShenDirichlet',
'ShenBiharmonic', 'ShenNeumann',
'ShenBiPolar', 'ShenBiPolar0',
'NeumannDirichlet', 'DirichletNeumann',
'UpperDirichletNeumann',
'UpperDirichlet',
'BCDirichlet', 'BCBiharmonic', 'BCNeumann']
#pylint: disable=method-hidden,no-else-return,not-callable,abstract-method,no-member,cyclic-import
try:
import quadpy
from mpmath import mp
mp.dps = 30
has_quadpy = True
except:
has_quadpy = False
mp = None
mode = os.environ.get('SHENFUN_LEGENDRE_MODE', 'numpy')
mode = mode if has_quadpy else 'numpy'
class LegendreBase(SpectralBase):
"""Base class for all Legendre spaces
Parameters
----------
N : int
Number of quadrature points
quad : str, optional
Type of quadrature
- LG - Legendre-Gauss
- GL - Legendre-Gauss-Lobatto
domain : 2-tuple of floats, optional
The computational domain
padding_factor : float, optional
Factor for padding backward transforms.
dealias_direct : bool, optional
Set upper 1/3 of coefficients to zero before backward transform
dtype : data-type, optional
Type of input data in real physical space. Will be overloaded when
basis is part of a :class:`.TensorProductSpace`.
coordinates: 2- or 3-tuple (coordinate, position vector (, sympy assumptions)), optional
Map for curvilinear coordinatesystem.
The new coordinate variable in the new coordinate system is the first item.
Second item is a tuple for the Cartesian position vector as function of the
new variable in the first tuple. Example::
theta = sp.Symbols('x', real=True, positive=True)
rv = (sp.cos(theta), sp.sin(theta))
"""
def __init__(self, N, quad="LG", domain=(-1., 1.), dtype=np.float, padding_factor=1,
dealias_direct=False, coordinates=None):
SpectralBase.__init__(self, N, quad=quad, domain=domain, dtype=dtype,
padding_factor=padding_factor, dealias_direct=dealias_direct,
coordinates=coordinates)
self.forward = functools.partial(self.forward, fast_transform=False)
self.backward = functools.partial(self.backward, fast_transform=False)
self.scalar_product = functools.partial(self.scalar_product, fast_transform=False)
self.plan(int(padding_factor*N), 0, dtype, {})
@staticmethod
def family():
return 'legendre'
def reference_domain(self):
return (-1, 1)
def points_and_weights(self, N=None, map_true_domain=False, weighted=True, **kw):
if N is None:
N = self.shape(False)
if self.quad == "LG":
points, weights = leg.leggauss(N)
elif self.quad == "GL":
points, weights = legendre_lobatto_nodes_and_weights(N)
else:
raise NotImplementedError
if map_true_domain is True:
points = self.map_true_domain(points)
return points, weights
def mpmath_points_and_weights(self, N=None, map_true_domain=False, weighted=True, **kw):
if mode == 'numpy' or not has_quadpy:
return self.points_and_weights(N=N, map_true_domain=map_true_domain, weighted=weighted, **kw)
if N is None:
N = self.shape(False)
if self.quad == 'LG':
pw = quadpy.line_segment.gauss_legendre(N, 'mpmath')
elif self.quad == 'GL':
pw = quadpy.line_segment.gauss_lobatto(N) # No mpmath in quadpy for lobatto:-(
points = pw.points
if map_true_domain is True:
points = self.map_true_domain(points)
return points, pw.weights
def vandermonde(self, x):
return leg.legvander(x, self.shape(False)-1)
def sympy_basis(self, i=0, x=sympy.symbols('x', real=True)):
return sympy.legendre(i, x)
def evaluate_basis(self, x, i=0, output_array=None):
x = np.atleast_1d(x)
if output_array is None:
output_array = np.zeros(x.shape)
output_array = eval_legendre(i, x, out=output_array)
return output_array
def evaluate_basis_derivative_all(self, x=None, k=0, argument=0):
if x is None:
x = self.mesh(False, False)
V = self.vandermonde(x)
#N, M = self.shape(False), self.shape(True)
M = V.shape[-1]
if k > 0:
D = np.zeros((M, M))
D[:-k] = leg.legder(np.eye(M, M), k)
V = np.dot(V, D)
return self._composite(V, argument=argument)
def evaluate_basis_all(self, x=None, argument=0):
if x is None:
x = self.mesh(False, False)
V = self.vandermonde(x)
return self._composite(V, argument=argument)
def evaluate_basis_derivative(self, x=None, i=0, k=0, output_array=None):
if x is None:
x = self.mesh(False, False)
x = np.atleast_1d(x)
basis = np.zeros(self.shape(True))
basis[i] = 1
basis = leg.Legendre(basis)
if k > 0:
basis = basis.deriv(k)
return basis(x)
def _composite(self, V, argument=0):
"""Return composite basis, where ``V`` is primary Vandermonde matrix."""
return V
def plan(self, shape, axis, dtype, options):
if shape in (0, (0,)):
return
if isinstance(axis, tuple):
assert len(axis) == 1
axis = axis[0]
if isinstance(self.forward, Transform):
if self.forward.input_array.shape == shape and self.axis == axis:
# Already planned
return
U = fftw.aligned(shape, dtype=dtype)
V = fftw.aligned(shape, dtype=dtype)
U.fill(0)
V.fill(0)
self.axis = axis
if self.padding_factor > 1.+1e-8:
trunc_array = self._get_truncarray(shape, V.dtype)
self.forward = Transform(self.forward, None, U, V, trunc_array)
self.backward = Transform(self.backward, None, trunc_array, V, U)
else:
self.forward = Transform(self.forward, None, U, V, V)
self.backward = Transform(self.backward, None, V, V, U)
self.scalar_product = Transform(self.scalar_product, None, U, V, V)
self.si = islicedict(axis=self.axis, dimensions=self.dimensions)
self.sl = slicedict(axis=self.axis, dimensions=self.dimensions)
def get_orthogonal(self):
return Orthogonal(self.N, quad=self.quad, dtype=self.dtype,
domain=self.domain,
padding_factor=self.padding_factor,
dealias_direct=self.dealias_direct,
coordinates=self.coors.coordinates)
class Orthogonal(LegendreBase):
"""Function space for regular (orthogonal) Legendre series
Parameters
----------
N : int
Number of quadrature points
quad : str, optional
Type of quadrature
- LG - Legendre-Gauss
- GL - Legendre-Gauss-Lobatto
domain : 2-tuple of floats, optional
The computational domain
padding_factor : float, optional
Factor for padding backward transforms.
dealias_direct : bool, optional
Set upper 1/3 of coefficients to zero before backward transform
dtype : data-type, optional
Type of input data in real physical space. Will be overloaded when
basis is part of a :class:`.TensorProductSpace`.
coordinates: 2- or 3-tuple (coordinate, position vector (, sympy assumptions)), optional
Map for curvilinear coordinatesystem.
The new coordinate variable in the new coordinate system is the first item.
Second item is a tuple for the Cartesian position vector as function of the
new variable in the first tuple. Example::
theta = sp.Symbols('x', real=True, positive=True)
rv = (sp.cos(theta), sp.sin(theta))
"""
def __init__(self, N, quad="LG", domain=(-1., 1.), dtype=np.float, padding_factor=1,
dealias_direct=False, coordinates=None):
LegendreBase.__init__(self, N, quad=quad, domain=domain, dtype=dtype,
padding_factor=padding_factor, dealias_direct=dealias_direct,
coordinates=coordinates)
def eval(self, x, u, output_array=None):
if output_array is None:
output_array = np.zeros(x.shape, dtype=self.dtype)
x = self.map_reference_domain(x)
output_array[:] = leg.legval(x, u)
return output_array
@property
def is_orthogonal(self):
return True
class ShenDirichlet(LegendreBase):
"""Legendre Function space for Dirichlet boundary conditions
Parameters
----------
N : int
Number of quadrature points
quad : str, optional
Type of quadrature
- LG - Legendre-Gauss
- GL - Legendre-Gauss-Lobatto
bc : tuple of numbers
Boundary conditions at edges of domain
domain : 2-tuple of floats, optional
The computational domain
scaled : bool, optional
Whether or not to scale test functions with 1/sqrt(4k+6).
Scaled test functions give a stiffness matrix equal to the
identity matrix.
padding_factor : float, optional
Factor for padding backward transforms.
dealias_direct : bool, optional
Set upper 1/3 of coefficients to zero before backward transform
dtype : data-type, optional
Type of input data in real physical space. Will be overloaded when
basis is part of a :class:`.TensorProductSpace`.
coordinates: 2- or 3-tuple (coordinate, position vector (, sympy assumptions)), optional
Map for curvilinear coordinatesystem.
The new coordinate variable in the new coordinate system is the first item.
Second item is a tuple for the Cartesian position vector as function of the
new variable in the first tuple. Example::
theta = sp.Symbols('x', real=True, positive=True)
rv = (sp.cos(theta), sp.sin(theta))
"""
def __init__(self, N, quad="LG", bc=(0., 0.), domain=(-1., 1.), dtype=np.float, scaled=False,
padding_factor=1, dealias_direct=False, coordinates=None):
LegendreBase.__init__(self, N, quad=quad, domain=domain, dtype=dtype,
padding_factor=padding_factor, dealias_direct=dealias_direct,
coordinates=coordinates)
from shenfun.tensorproductspace import BoundaryValues
self._scaled = scaled
self._factor = np.ones(1)
self._bc_basis = None
self.bc = BoundaryValues(self, bc=bc)
@staticmethod
def boundary_condition():
return 'Dirichlet'
@property
def has_nonhomogeneous_bcs(self):
return self.bc.has_nonhomogeneous_bcs()
def set_factor_array(self, v):
if self.is_scaled():
if not self._factor.shape == v.shape:
k = self.wavenumbers().astype(np.float)
self._factor = 1./np.sqrt(4*k+6)
def is_scaled(self):
return self._scaled
def _composite(self, V, argument=0):
P = np.zeros(V.shape)
if not self.is_scaled():
P[:, :-2] = V[:, :-2] - V[:, 2:]
else:
k = np.arange(self.N-2).astype(np.float)
P[:, :-2] = (V[:, :-2] - V[:, 2:])/np.sqrt(4*k+6)
if argument == 1:
P[:, -2] = (V[:, 0] - V[:, 1])/2
P[:, -1] = (V[:, 0] + V[:, 1])/2
return P
def to_ortho(self, input_array, output_array=None):
if output_array is None:
output_array = np.zeros_like(input_array)
else:
output_array.fill(0)
s0 = self.sl[slice(0, -2)]
s1 = self.sl[slice(2, None)]
if self.is_scaled():
k = self.wavenumbers()
output_array[s0] = input_array[s0]/np.sqrt(4*k+6)
output_array[s1] -= input_array[s0]/np.sqrt(4*k+6)
else:
output_array[s0] = input_array[s0]
output_array[s1] -= input_array[s0]
self.bc.add_to_orthogonal(output_array, input_array)
return output_array
def slice(self):
return slice(0, self.N-2)
def sympy_basis(self, i=0, x=sympy.symbols('x', real=True)):
f = sympy.legendre(i, x)-sympy.legendre(i+2, x)
if self.is_scaled():
f /= np.sqrt(4*i+6)
return f
def evaluate_basis(self, x, i=0, output_array=None):
x = np.atleast_1d(x)
if output_array is None:
output_array = np.zeros(x.shape)
output_array[:] = eval_legendre(i, x) - eval_legendre(i+2, x)
if self.is_scaled():
output_array /= np.sqrt(4*i+6)
return output_array
def evaluate_basis_derivative(self, x=None, i=0, k=0, output_array=None):
if x is None:
x = self.mesh(False, False)
if output_array is None:
output_array = np.zeros(x.shape)
x = np.atleast_1d(x)
basis = np.zeros(self.shape(True))
basis[np.array([i, i+2])] = (1, -1)
basis = leg.Legendre(basis)
if k > 0:
basis = basis.deriv(k)
output_array[:] = basis(x)
if self.is_scaled():
output_array /= np.sqrt(4*i+6)
return output_array
def _evaluate_scalar_product(self, fast_transform=False):
SpectralBase._evaluate_scalar_product(self)
self.scalar_product.output_array[self.si[-2]] = 0
self.scalar_product.output_array[self.si[-1]] = 0
def eval(self, x, u, output_array=None):
x = np.atleast_1d(x)
if output_array is None:
output_array = np.zeros(x.shape, dtype=self.dtype)
x = self.map_reference_domain(x)
w_hat = work[(u, 0, True)]
self.set_factor_array(u)
output_array[:] = leg.legval(x, u[:-2]*self._factor)
w_hat[2:] = u[:-2]*self._factor
output_array -= leg.legval(x, w_hat)
output_array += 0.5*(u[-1]*(1+x) + u[-2]*(1-x))
return output_array
def get_bc_basis(self):
if self._bc_basis:
return self._bc_basis
self._bc_basis = BCDirichlet(self.N, quad=self.quad, domain=self.domain,
scaled=self._scaled, coordinates=self.coors.coordinates)
return self._bc_basis
def get_refined(self, N):
return ShenDirichlet(N,
quad=self.quad,
domain=self.domain,
dtype=self.dtype,
padding_factor=self.padding_factor,
dealias_direct=self.dealias_direct,
coordinates=self.coors.coordinates,
bc=self.bc.bc,
scaled=self._scaled)
def get_dealiased(self, padding_factor=1.5, dealias_direct=False):
return ShenDirichlet(self.N,
quad=self.quad,
dtype=self.dtype,
padding_factor=padding_factor,
dealias_direct=dealias_direct,
domain=self.domain,
coordinates=self.coors.coordinates,
bc=self.bc.bc,
scaled=self._scaled)
def get_unplanned(self):
return ShenDirichlet(self.N,
quad=self.quad,
domain=self.domain,
dtype=self.dtype,
padding_factor=self.padding_factor,
dealias_direct=self.dealias_direct,
coordinates=self.coors.coordinates,
bc=self.bc.bc,
scaled=self._scaled)
class ShenNeumann(LegendreBase):
"""Function space for homogeneous Neumann boundary conditions
Parameters
----------
N : int
Number of quadrature points
quad : str, optional
Type of quadrature
- LG - Legendre-Gauss
- GL - Legendre-Gauss-Lobatto
mean : number
mean value
bc : 2-tuple of numbers
Boundary conditions at edges of domain
domain : 2-tuple of floats, optional
The computational domain
padding_factor : float, optional
Factor for padding backward transforms.
dealias_direct : bool, optional
Set upper 1/3 of coefficients to zero before backward transform
dtype : data-type, optional
Type of input data in real physical space. Will be overloaded when
basis is part of a :class:`.TensorProductSpace`.
coordinates: 2- or 3-tuple (coordinate, position vector (, sympy assumptions)), optional
Map for curvilinear coordinatesystem.
The new coordinate variable in the new coordinate system is the first item.
Second item is a tuple for the Cartesian position vector as function of the
new variable in the first tuple. Example::
theta = sp.Symbols('x', real=True, positive=True)
rv = (sp.cos(theta), sp.sin(theta))
"""
def __init__(self, N, quad="LG", mean=0, bc=(0., 0.), domain=(-1., 1.), padding_factor=1,
dealias_direct=False, dtype=np.float, coordinates=None):
LegendreBase.__init__(self, N, quad=quad, domain=domain, dtype=dtype,
padding_factor=padding_factor, dealias_direct=dealias_direct,
coordinates=coordinates)
from shenfun.tensorproductspace import BoundaryValues
self.mean = mean
self._factor = np.zeros(0)
self._bc_basis = None
self.bc = BoundaryValues(self, bc=bc)
@staticmethod
def boundary_condition():
return 'Neumann'
@property
def has_nonhomogeneous_bcs(self):
return self.bc.has_nonhomogeneous_bcs()
def _composite(self, V, argument=0):
P = np.zeros(V.shape)
k = np.arange(V.shape[1]).astype(np.float)
P[:, :-2] = V[:, :-2] - (k[:-2]*(k[:-2]+1)/(k[:-2]+2))/(k[:-2]+3)*V[:, 2:]
if argument == 1:
P[:, -2] = 0.5*V[:, 1] - 1/6*V[:, 2]
P[:, -1] = 0.5*V[:, 1] + 1/6*V[:, 2]
return P
def set_factor_array(self, v):
if not self._factor.shape == v.shape:
k = self.wavenumbers().astype(np.float)
self._factor = k*(k+1)/(k+2)/(k+3)
def _evaluate_scalar_product(self, fast_transform=False):
SpectralBase._evaluate_scalar_product(self)
self.scalar_product.output_array[self.sl[slice(-2, None)]] = 0
self.scalar_product.output_array[self.si[0]] = self.mean*np.pi
def sympy_basis(self, i=0, x=sympy.symbols('x', real=True)):
f = sympy.legendre(i, x) - (i*(i+1))/((i+2)*(i+3))*sympy.legendre(i+2, x)
return f
def evaluate_basis(self, x, i=0, output_array=None):
x = np.atleast_1d(x)
if output_array is None:
output_array = np.zeros(x.shape)
output_array[:] = eval_legendre(i, x) - i*(i+1.)/(i+2.)/(i+3.)*eval_legendre(i+2, x)
return output_array
def evaluate_basis_derivative(self, x=None, i=0, k=0, output_array=None):
if x is None:
x = self.mesh(False, False)
if output_array is None:
output_array = np.zeros(x.shape)
x = np.atleast_1d(x)
basis = np.zeros(self.shape(True))
basis[np.array([i, i+2])] = (1, -i*(i+1.)/(i+2.)/(i+3.))
basis = leg.Legendre(basis)
if k > 0:
basis = basis.deriv(k)
output_array[:] = basis(x)
return output_array
def to_ortho(self, input_array, output_array=None):
if output_array is None:
output_array = np.zeros_like(input_array)
else:
output_array.fill(0)
s0 = self.sl[slice(0, -2)]
s1 = self.sl[slice(2, None)]
self.set_factor_array(input_array)
output_array[s0] = input_array[s0]
output_array[s1] -= self._factor*input_array[s0]
self.bc.add_to_orthogonal(output_array, input_array)
return output_array
def slice(self):
return slice(0, self.N-2)
def eval(self, x, u, output_array=None):
if output_array is None:
output_array = np.zeros(x.shape, dtype=self.dtype)
x = self.map_reference_domain(x)
w_hat = work[(u, 0, True)]
self.set_factor_array(u)
output_array[:] = leg.legval(x, u[:-2])
w_hat[2:] = self._factor*u[:-2]
output_array -= leg.legval(x, w_hat)
output_array += u[-2]*(0.5*x-1/3*(3*x**2-1)) + u[-1]*(0.5*x+1/3*(3*x**2-1))
return output_array
def get_bc_basis(self):
if self._bc_basis:
return self._bc_basis
self._bc_basis = BCNeumann(self.N, quad=self.quad, domain=self.domain,
coordinates=self.coors.coordinates)
return self._bc_basis
def get_refined(self, N):
return ShenNeumann(N,
quad=self.quad,
domain=self.domain,
bc=self.bc.bc,
dtype=self.dtype,
padding_factor=self.padding_factor,
dealias_direct=self.dealias_direct,
coordinates=self.coors.coordinates,
mean=self.mean)
def get_dealiased(self, padding_factor=1.5, dealias_direct=False):
return ShenNeumann(self.N,
quad=self.quad,
domain=self.domain,
bc=self.bc.bc,
dtype=self.dtype,
padding_factor=padding_factor,
dealias_direct=dealias_direct,
coordinates=self.coors.coordinates,
mean=self.mean)
def get_unplanned(self):
return ShenNeumann(self.N,
quad=self.quad,
domain=self.domain,
bc=self.bc.bc,
dtype=self.dtype,
padding_factor=self.padding_factor,
dealias_direct=self.dealias_direct,
coordinates=self.coors.coordinates,
mean=self.mean)
class ShenBiharmonic(LegendreBase):
"""Function space for biharmonic basis
Both Dirichlet and Neumann boundary conditions.
Parameters
----------
N : int
Number of quadrature points
quad : str, optional
Type of quadrature
- LG - Legendre-Gauss
- GL - Legendre-Gauss-Lobatto
4-tuple of numbers, optional
The values of the 4 boundary conditions at x=(-1, 1).
The two Dirichlet first and then the Neumann.
domain : 2-tuple of floats, optional
The computational domain
padding_factor : float, optional
Factor for padding backward transforms.
dealias_direct : bool, optional
Set upper 1/3 of coefficients to zero before backward transform
dtype : data-type, optional
Type of input data in real physical space. Will be overloaded when
basis is part of a :class:`.TensorProductSpace`.
coordinates: 2- or 3-tuple (coordinate, position vector (, sympy assumptions)), optional
Map for curvilinear coordinatesystem.
The new coordinate variable in the new coordinate system is the first item.
Second item is a tuple for the Cartesian position vector as function of the
new variable in the first tuple. Example::
theta = sp.Symbols('x', real=True, positive=True)
rv = (sp.cos(theta), sp.sin(theta))
"""
def __init__(self, N, quad="LG", bc=(0, 0, 0, 0), domain=(-1., 1.), padding_factor=1,
dealias_direct=False, dtype=np.float, coordinates=None):
from shenfun.tensorproductspace import BoundaryValues
LegendreBase.__init__(self, N, quad=quad, domain=domain, dtype=dtype,
padding_factor=padding_factor, dealias_direct=dealias_direct,
coordinates=coordinates)
self._factor1 = np.zeros(0)
self._factor2 = np.zeros(0)
self._bc_basis = None
self.bc = BoundaryValues(self, bc=bc)
@staticmethod
def boundary_condition():
return 'Biharmonic'
@property
def has_nonhomogeneous_bcs(self):
return self.bc.has_nonhomogeneous_bcs()
def _composite(self, V, argument=0):
P = np.zeros_like(V)
k = np.arange(V.shape[1]).astype(np.float)[:-4]
P[:, :-4] = V[:, :-4] - (2*(2*k+5)/(2*k+7))*V[:, 2:-2] + ((2*k+3)/(2*k+7))*V[:, 4:]
if argument == 1:
P[:, -4:] = np.tensordot(V[:, :4], BCBiharmonic.coefficient_matrix(), (1, 1))
return P
def set_factor_arrays(self, v):
s = self.sl[self.slice()]
if not self._factor1.shape == v[s].shape:
k = self.wavenumbers().astype(np.float)
self._factor1 = (-2*(2*k+5)/(2*k+7)).astype(float)
self._factor2 = ((2*k+3)/(2*k+7)).astype(float)
def _evaluate_scalar_product(self, fast_transform=False):
SpectralBase._evaluate_scalar_product(self)
self.scalar_product.output_array[self.sl[slice(-4, None)]] = 0
#@optimizer
def set_w_hat(self, w_hat, fk, f1, f2): # pragma: no cover
s = self.sl[self.slice()]
s2 = self.sl[slice(2, -2)]
s4 = self.sl[slice(4, None)]
w_hat[s] = fk[s]
w_hat[s2] += f1*fk[s]
w_hat[s4] += f2*fk[s]
return w_hat
def sympy_basis(self, i=0, x=sympy.symbols('x', real=True)):
if i < self.N-4:
f = (sympy.legendre(i, x)
-2*(2*i+5.)/(2*i+7.)*sympy.legendre(i+2, x)
+((2*i+3.)/(2*i+7.))*sympy.legendre(i+4, x))
else:
f = 0
for j, c in enumerate(BCBiharmonic.coefficient_matrix()[i-(self.N-4)]):
f += c*sympy.legendre(j, x)
return f
def evaluate_basis(self, x, i=0, output_array=None):
x = np.atleast_1d(x)
if output_array is None:
output_array = np.zeros(x.shape)
if i < self.N-4:
output_array[:] = eval_legendre(i, x) - 2*(2*i+5.)/(2*i+7.)*eval_legendre(i+2, x) + ((2*i+3.)/(2*i+7.))*eval_legendre(i+4, x)
else:
X = sympy.symbols('x', real=True)
output_array[:] = sympy.lambdify(X, self.sympy_basis(i, x=X))(x)
return output_array
def evaluate_basis_derivative(self, x=None, i=0, k=0, output_array=None):
if x is None:
x = self.mesh(False, False)
if output_array is None:
output_array = np.zeros(x.shape)
x = np.atleast_1d(x)
if i < self.N-4:
basis = np.zeros(self.shape(True))
basis[np.array([i, i+2, i+4])] = (1, -2*(2*i+5.)/(2*i+7.), ((2*i+3.)/(2*i+7.)))
basis = leg.Legendre(basis)
if k > 0:
basis = basis.deriv(k)
output_array[:] = basis(x)
else:
X = sympy.symbols('x', real=True)
output_array[:] = sympy.lambdify(X, self.sympy_basis(i, X).diff(X, k))(x)
return output_array
def to_ortho(self, input_array, output_array=None):
if output_array is None:
output_array = np.zeros_like(input_array)
else:
output_array.fill(0)
self.set_factor_arrays(input_array)
output_array = self.set_w_hat(output_array, input_array, self._factor1, self._factor2)
self.bc.add_to_orthogonal(output_array, input_array)
return output_array
def slice(self):
return slice(0, self.N-4)
def eval(self, x, u, output_array=None):
if output_array is None:
output_array = np.zeros(x.shape, dtype=self.dtype)
x = self.map_reference_domain(x)
w_hat = work[(u, 0, True)]
self.set_factor_arrays(u)
output_array[:] = leg.legval(x, u[:-4])
w_hat[2:-2] = self._factor1*u[:-4]
output_array += leg.legval(x, w_hat[:-2])
w_hat[4:] = self._factor2*u[:-4]
w_hat[:4] = 0
output_array += leg.legval(x, w_hat)
return output_array
def get_bc_basis(self):
if self._bc_basis:
return self._bc_basis
self._bc_basis = BCBiharmonic(self.N, quad=self.quad, domain=self.domain,
coordinates=self.coors.coordinates)
return self._bc_basis
def get_refined(self, N):
return ShenBiharmonic(N,
quad=self.quad,
domain=self.domain,
dtype=self.dtype,
padding_factor=self.padding_factor,
dealias_direct=self.dealias_direct,
coordinates=self.coors.coordinates,
bc=self.bc.bc)
def get_dealiased(self, padding_factor=1.5, dealias_direct=False):
return ShenBiharmonic(self.N,
quad=self.quad,
domain=self.domain,
dtype=self.dtype,
padding_factor=padding_factor,
dealias_direct=dealias_direct,
coordinates=self.coors.coordinates,
bc=self.bc.bc)
def get_unplanned(self):
return ShenBiharmonic(self.N,
quad=self.quad,
domain=self.domain,
dtype=self.dtype,
padding_factor=self.padding_factor,
dealias_direct=self.dealias_direct,
coordinates=self.coors.coordinates,
bc=self.bc.bc)
class BeamFixedFree(LegendreBase):
"""Function space for biharmonic basis
Function space for biharmonic basis
Fulfills the following boundary conditions:
u(-1) = a, u'(-1) = b, u''(1) = c, u'''(1) = d.
Both Dirichlet and Neumann boundary conditions.
Parameters
----------
N : int
Number of quadrature points
quad : str, optional
Type of quadrature
- LG - Legendre-Gauss
- GL - Legendre-Gauss-Lobatto
4-tuple of numbers, optional
The values of the 4 boundary conditions
u(-1) = a, u'(-1) = b, u''(1) = c, u'''(1) = d
domain : 2-tuple of floats, optional
The computational domain
padding_factor : float, optional
Factor for padding backward transforms.
dealias_direct : bool, optional
Set upper 1/3 of coefficients to zero before backward transform
dtype : data-type, optional
Type of input data in real physical space. Will be overloaded when
basis is part of a :class:`.TensorProductSpace`.
coordinates: 2- or 3-tuple (coordinate, position vector (, sympy assumptions)), optional
Map for curvilinear coordinatesystem.
The new coordinate variable in the new coordinate system is the first item.
Second item is a tuple for the Cartesian position vector as function of the
new variable in the first tuple. Example::
theta = sp.Symbol('x', real=True, positive=True)
rv = (sp.cos(theta), sp.sin(theta))
"""
def __init__(self, N, quad="LG", bc=(0, 0, 0, 0), domain=(-1., 1.), padding_factor=1,
dealias_direct=False, dtype=np.float, coordinates=None):
from shenfun.tensorproductspace import BoundaryValues
LegendreBase.__init__(self, N, quad=quad, domain=domain, dtype=dtype,
padding_factor=padding_factor, dealias_direct=dealias_direct,
coordinates=coordinates)
self._factor1 = np.zeros(0)
self._factor2 = np.zeros(0)
self._factor3 = np.zeros(0)
self._factor4 = np.zeros(0)
self._bc_basis = None
self.bc = BoundaryValues(self, bc=bc)
@staticmethod
def boundary_condition():
return 'BeamFixedFree'
@property
def has_nonhomogeneous_bcs(self):
return self.bc.has_nonhomogeneous_bcs()
def _composite(self, V, argument=0):
P = np.zeros_like(V)
k = np.arange(V.shape[1]).astype(np.float)[:-4]
P[:, :-4] = (V[:, :-4] + 4*(2*k+3)/((k+3)**2)*V[:, 1:-3] - 2*(k-1)*(k+1)*(k+6)*(2*k+5)/((k+3)**2*(k+4)*(2*k+7))*V[:, 2:-2]
- 4*(k+1)**2*(2*k+3)/((k+3)**2*(k+4)**2)*V[:, 3:-1] + ((k+1)**2*(k+2)**2*(2*k+3)/((k+3)**2*(k+4)**2*(2*k+7)))*V[:, 4:])
if argument == 1:
P[:, -4:] = np.tensordot(V[:, :4], BCBeamFixedFree.coefficient_matrix(), (1, 1))
return P
def set_factor_arrays(self, v):
s = self.sl[self.slice()]
if not self._factor1.shape == v[s].shape:
k = self.wavenumbers().astype(np.float)
self._factor1 = (4*(2*k+3)/((k+3)**2)).astype(float)
self._factor2 = (-(2*(k-1)*(k+1)*(k+6)*(2*k+5)/((k+3)**2*(k+4)*(2*k+7)))).astype(float)
self._factor3 = (- 4*(k+1)**2*(2*k+3)/((k+3)**2*(k+4)**2)).astype(float)
self._factor4 = ((((k+1)/(k+3))*((k+2)/(k+4)))**2*(2*k+3)/(2*k+7)).astype(float)
def _evaluate_scalar_product(self, fast_transform=False):
SpectralBase._evaluate_scalar_product(self)
self.scalar_product.output_array[self.sl[slice(-4, None)]] = 0
def set_w_hat(self, w_hat, fk, f1, f2): # pragma: no cover
s = self.sl[self.slice()]
s2 = self.sl[slice(2, -2)]
s4 = self.sl[slice(4, None)]
w_hat[s] = fk[s]
w_hat[s2] += f1*fk[s]
w_hat[s4] += f2*fk[s]
return w_hat
def sympy_basis(self, i=0, x=sympy.symbols('x', real=True)):
if i < self.N-4:
f = (sympy.legendre(i, x)
+(4*(2*i+3)/((i+3)**2))*sympy.legendre(i+1, x)
-(2*(i-1)*(i+1)*(i+6)*(2*i+5)/((i+3)**2*(i+4)*(2*i+7)))*sympy.legendre(i+2, x)
-4*(i+1)**2*(2*i+3)/((i+3)**2*(i+4)**2)*sympy.legendre(i+3, x)
+(i+1)**2*(i+2)**2*(2*i+3)/((i+3)**2*(i+4)**2*(2*i+7))*sympy.legendre(i+4, x))
else:
f = 0
for j, c in enumerate(BCBeamFixedFree.coefficient_matrix()[i-(self.N-4)]):
f += c*sympy.legendre(j, x)
return f
def evaluate_basis(self, x, i=0, output_array=None):
x = np.atleast_1d(x)
if output_array is None:
output_array = np.zeros(x.shape)
if i < self.N-4:
output_array[:] = eval_legendre(i, x) + (4*(2*i+3)/((i+3)**2))*eval_legendre(i+1, x) \
-(2*(i-1)*(i+1)*(i+6)*(2*i+5)/((i+3)**2*(i+4)*(2*i+7)))*eval_legendre(i+2, x) \
-4*(i+1)**2*(2*i+3)/((i+3)**2*(i+4)**2)*eval_legendre(i+3, x) \
+(i+1)**2*(i+2)**2*(2*i+3)/((i+3)^2*(i+4)**2*(2*i+7))*eval_legendre(i+4, x)
else:
X = sympy.symbols('x', real=True)
output_array[:] = sympy.lambdify(X, self.sympy_basis(i, x=X))(x)
return output_array
def evaluate_basis_derivative(self, x=None, i=0, k=0, output_array=None):
if x is None:
x = self.mesh(False, False)
if output_array is None:
output_array = np.zeros(x.shape)
x = np.atleast_1d(x)
if i < self.N-4:
basis = np.zeros(self.shape(True))
basis[np.array([i, i+1, i+2, i+3, i+4])] = (1, 4*(2*i+3)/((i+3)**2), -(2*(i-1)*(i+1)*(i+6)*(2*i+5)/((i+3)**2*(i+4)*(2*i+7))), \
-4*(i+1)**2*(2*i+3)/((i+3)**2*(i+4)**2), \
(i+1)**2*(i+2)**2*(2*i+3)/((i+3)**2*(i+4)**2*(2*i+7)))
basis = leg.Legendre(basis)
if k > 0:
basis = basis.deriv(k)
output_array[:] = basis(x)
else:
X = sympy.symbols('x', real=True)
output_array[:] = sympy.lambdify(X, self.sympy_basis(i, X).diff(X, k))(x)
return output_array
def to_ortho(self, input_array, output_array=None):
if output_array is None:
output_array = Function(self.get_orthogonal())
else:
output_array.fill(0)
self.set_factor_arrays(input_array)
output_array = self.set_w_hat(output_array, input_array, self._factor1, self._factor2)
self.bc.add_to_orthogonal(output_array, input_array)
return output_array
def slice(self):
return slice(0, self.N-4)
def eval(self, x, u, output_array=None):
if output_array is None:
output_array = np.zeros(x.shape, dtype=self.dtype)
x = self.map_reference_domain(x)
w_hat = work[(u, 0, True)]
self.set_factor_arrays(u)
output_array[:] = leg.legval(x, u[:-4])
w_hat[1:-3] = self._factor1*u[:-4]
w_hat[0] = 0
output_array += leg.legval(x, w_hat[:-3])
w_hat[2:-2] = self._factor2*u[:-4]
w_hat[:2] = 0
output_array += leg.legval(x, w_hat[:-2])
w_hat[3:-1] = self._factor3*u[:-4]
w_hat[:3] = 0
output_array += leg.legval(x, w_hat[:-1])
w_hat[4:] = self._factor3*u[:-4]
w_hat[:4] = 0
output_array += leg.legval(x, w_hat)
return output_array
def get_bc_basis(self):
if self._bc_basis:
return self._bc_basis
self._bc_basis = BCBeamFixedFree(self.N, quad=self.quad, domain=self.domain,
coordinates=self.coors.coordinates)
return self._bc_basis
def get_refined(self, N):
return BeamFixedFree(N,
quad=self.quad,
domain=self.domain,
dtype=self.dtype,
padding_factor=self.padding_factor,
dealias_direct=self.dealias_direct,
coordinates=self.coors.coordinates,
bc=self.bc.bc)
def get_dealiased(self, padding_factor=1.5, dealias_direct=False):
return BeamFixedFree(self.N,
quad=self.quad,
domain=self.domain,
dtype=self.dtype,
padding_factor=padding_factor,
dealias_direct=dealias_direct,
coordinates=self.coors.coordinates,
bc=self.bc.bc)
def get_unplanned(self):
return BeamFixedFree(self.N,
quad=self.quad,
domain=self.domain,
dtype=self.dtype,
padding_factor=self.padding_factor,
dealias_direct=self.dealias_direct,
coordinates=self.coors.coordinates,
bc=self.bc.bc)
class UpperDirichlet(LegendreBase):
"""Legendre function space with homogeneous Dirichlet boundary conditions on x=1
Parameters
----------
N : int
Number of quadrature points
quad : str, optional
Type of quadrature
- LG - Legendre-Gauss
- GL - Legendre-Gauss-Lobatto
domain : 2-tuple of floats, optional
The computational domain
padding_factor : float, optional
Factor for padding backward transforms.
dealias_direct : bool, optional
Set upper 1/3 of coefficients to zero before backward transform
dtype : data-type, optional
Type of input data in real physical space. Will be overloaded when
basis is part of a :class:`.TensorProductSpace`.
coordinates: 2- or 3-tuple (coordinate, position vector (, sympy assumptions)), optional
Map for curvilinear coordinatesystem.
The new coordinate variable in the new coordinate system is the first item.
Second item is a tuple for the Cartesian position vector as function of the
new variable in the first tuple. Example::
theta = sp.Symbols('x', real=True, positive=True)
rv = (sp.cos(theta), sp.sin(theta))
"""
def __init__(self, N, quad="LG", bc=(None, 0), domain=(-1., 1.), dtype=np.float,
padding_factor=1, dealias_direct=False, coordinates=None):
assert quad == "LG"
LegendreBase.__init__(self, N, quad=quad, domain=domain, dtype=dtype,
padding_factor=padding_factor, dealias_direct=dealias_direct,
coordinates=coordinates)
from shenfun.tensorproductspace import BoundaryValues
self._factor = np.ones(1)
self._bc_basis = None
self.bc = BoundaryValues(self, bc=bc)
@staticmethod
def boundary_condition():
return 'UpperDirichlet'
@property
def has_nonhomogeneous_bcs(self):
return self.bc.has_nonhomogeneous_bcs()
def is_scaled(self):
return False
def _composite(self, V, argument=0):
P = np.zeros(V.shape)
P[:, :-1] = V[:, :-1] - V[:, 1:]
if argument == 1: # if trial function
P[:, -1] = (V[:, 0] + V[:, 1])/2 # x = +1
return P
def to_ortho(self, input_array, output_array=None):
if output_array is None:
output_array = np.zeros_like(input_array)
else:
output_array.fill(0)
s0 = self.sl[slice(0, -1)]
s1 = self.sl[slice(1, None)]
output_array[s0] = input_array[s0]
output_array[s1] -= input_array[s0]
self.bc.add_to_orthogonal(output_array, input_array)
return output_array
def slice(self):
return slice(0, self.N-1)
def sympy_basis(self, i=0, x=sympy.symbols('x', real=True)):
if i < self.N-1:
return sympy.legendre(i, x)-sympy.legendre(i+1, x)
assert i == self.N-1
return 0.5*(1+x)
def evaluate_basis(self, x, i=0, output_array=None):
x = np.atleast_1d(x)
if output_array is None:
output_array = np.zeros(x.shape)
if i < self.N-1:
output_array[:] = eval_legendre(i, x) - eval_legendre(i+1, x)
elif i == self.N-1:
output_array[:] = 0.5*(1+x)
return output_array
def evaluate_basis_derivative(self, x=None, i=0, k=0, output_array=None):
if x is None:
x = self.mesh(False, False)
if output_array is None:
output_array = np.zeros(x.shape)
x = np.atleast_1d(x)
if i < self.N-1:
basis = np.zeros(self.shape(True))
basis[np.array([i, i+1])] = (1, -1)
basis = leg.Legendre(basis)
if k > 0:
basis = basis.deriv(k)
output_array[:] = basis(x)
else:
if k == 1:
output_array[:] = 0.5
else:
output_array[:] = 0
return output_array
def _evaluate_scalar_product(self, fast_transform=False):
SpectralBase._evaluate_scalar_product(self)
self.scalar_product.output_array[self.si[-1]] = 0
def eval(self, x, u, output_array=None):
x = np.atleast_1d(x)
if output_array is None:
output_array = np.zeros(x.shape, dtype=self.dtype)
x = self.map_reference_domain(x)
w_hat = work[(u, 0, True)]
output_array[:] = leg.legval(x, u[:-1])
w_hat[1:] = u[:-1]
output_array -= leg.legval(x, w_hat)
output_array += 0.5*u[-1]*(1+x)
return output_array
def get_bc_basis(self):
if self._bc_basis:
return self._bc_basis
self._bc_basis = BCUpperDirichlet(self.N, quad=self.quad, domain=self.domain,
coordinates=self.coors.coordinates)
return self._bc_basis
def get_refined(self, N):
return UpperDirichlet(N,
quad=self.quad,
domain=self.domain,
dtype=self.dtype,
padding_factor=self.padding_factor,
dealias_direct=self.dealias_direct,
coordinates=self.coors.coordinates,
bc=self.bc.bc)
def get_dealiased(self, padding_factor=1.5, dealias_direct=False):
return UpperDirichlet(self.N,
quad=self.quad,
domain=self.domain,
dtype=self.dtype,
padding_factor=padding_factor,
dealias_direct=dealias_direct,
coordinates=self.coors.coordinates,
bc=self.bc.bc)
def get_unplanned(self):
return UpperDirichlet(self.N,
quad=self.quad,
domain=self.domain,
dtype=self.dtype,
padding_factor=self.padding_factor,
dealias_direct=self.dealias_direct,
coordinates=self.coors.coordinates,
bc=self.bc.bc)
class ShenBiPolar(LegendreBase):
"""Legendre function space for the Biharmonic equation in polar coordinates
Parameters
----------
N : int
Number of quadrature points
quad : str, optional
Type of quadrature
- LG - Legendre-Gauss
- GL - Legendre-Gauss-Lobatto
domain : 2-tuple of floats, optional
The computational domain
padding_factor : float, optional
Factor for padding backward transforms.
dealias_direct : bool, optional
Set upper 1/3 of coefficients to zero before backward transform
dtype : data-type, optional
Type of input data in real physical space. Will be overloaded when
basis is part of a :class:`.TensorProductSpace`.
coordinates: 2- or 3-tuple (coordinate, position vector (, sympy assumptions)), optional
Map for curvilinear coordinatesystem.
The new coordinate variable in the new coordinate system is the first item.
Second item is a tuple for the Cartesian position vector as function of the
new variable in the first tuple. Example::
theta = sp.Symbols('x', real=True, positive=True)
rv = (sp.cos(theta), sp.sin(theta))
"""
def __init__(self, N, quad="LG", domain=(-1., 1.), dtype=np.float,
padding_factor=1, dealias_direct=False, coordinates=None):
assert quad == "LG"
LegendreBase.__init__(self, N, quad=quad, domain=domain, dtype=dtype,
padding_factor=padding_factor, dealias_direct=dealias_direct,
coordinates=coordinates)
@staticmethod
def boundary_condition():
return 'BiPolar'
@property
def has_nonhomogeneous_bcs(self):
return False
def to_ortho(self, input_array, output_array=None):
raise(NotImplementedError)
def slice(self):
return slice(0, self.N-4)
def sympy_basis(self, i=0, x=sympy.symbols('x', real=True)):
return (1-x)**2*(1+x)**2*(sympy.legendre(i+1, x).diff(x, 1))
def evaluate_basis(self, x=None, i=0, output_array=None):
output_array = SpectralBase.evaluate_basis(self, x=x, i=i, output_array=output_array)
return output_array
def evaluate_basis_derivative(self, x=None, i=0, k=0, output_array=None):
output_array = SpectralBase.evaluate_basis_derivative(self, x=x, i=i, k=k, output_array=output_array)
return output_array
def evaluate_basis_all(self, x=None, argument=0):
if x is None:
#x = self.mesh(False, False)
x = self.mpmath_points_and_weights()[0]
output_array = np.zeros((x.shape[0], self.N))
for j in range(self.N-4):
output_array[:, j] = self.evaluate_basis(x, j, output_array=output_array[:, j])
return output_array
def evaluate_basis_derivative_all(self, x=None, k=0, argument=0):
if x is None:
x = self.mpmath_points_and_weights()[0]
V = np.zeros((x.shape[0], self.N))
for i in range(self.N-2):
V[:, i] = self.evaluate_basis_derivative(x, i, k, output_array=V[:, i])
return V
def _evaluate_scalar_product(self, fast_transform=False):
SpectralBase._evaluate_scalar_product(self)
self.scalar_product.output_array[self.sl[slice(-4, None)]] = 0
def eval(self, x, u, output_array=None):
x = np.atleast_1d(x)
if output_array is None:
output_array = np.zeros(x.shape, dtype=self.dtype)
else:
output_array.fill(0)
x = self.map_reference_domain(x)
fj = self.evaluate_basis_all(x)
output_array[:] = np.dot(fj, u)
return output_array
class ShenBiPolar0(LegendreBase):
"""Legendre function space for biharmonic basis for polar coordinates
Homogeneous Dirichlet and Neumann boundary conditions.
Parameters
----------
N : int
Number of quadrature points
quad : str, optional
Type of quadrature
- LG - Legendre-Gauss
4-tuple of numbers, optional
The values of the 4 boundary conditions at x=(-1, 1).
The two Dirichlet first and then the Neumann.
domain : 2-tuple of floats, optional
The computational domain
padding_factor : float, optional
Factor for padding backward transforms.
dealias_direct : bool, optional
Set upper 1/3 of coefficients to zero before backward transform
dtype : data-type, optional
Type of input data in real physical space. Will be overloaded when
basis is part of a :class:`.TensorProductSpace`.
coordinates: 2- or 3-tuple (coordinate, position vector (, sympy assumptions)), optional
Map for curvilinear coordinatesystem.
The new coordinate variable in the new coordinate system is the first item.
Second item is a tuple for the Cartesian position vector as function of the
new variable in the first tuple. Example::
theta = sp.Symbols('x', real=True, positive=True)
rv = (sp.cos(theta), sp.sin(theta))
"""
def __init__(self, N, quad="LG", domain=(-1., 1.), padding_factor=1,
dealias_direct=False, dtype=np.float, coordinates=None):
assert quad == "LG"
LegendreBase.__init__(self, N, quad="LG", domain=domain, dtype=dtype,
padding_factor=padding_factor, dealias_direct=dealias_direct,
coordinates=coordinates)
self._factor1 = np.zeros(0)
self._factor2 = np.zeros(0)
self._factor3 = np.zeros(0)
@staticmethod
def boundary_condition():
return 'BiPolar0'
@property
def has_nonhomogeneous_bcs(self):
return False
def _composite(self, V, argument=0):
P = np.zeros_like(V)
k = np.arange(V.shape[1]).astype(np.float)[:-3]
P[:, :-3] = V[:, :-3] - ((2*k+3)*(k+4)/(2*k+5)/(k+2))*V[:, 1:-2] - (k*(k+1)/(k+2)/(k+3))*V[:, 2:-1] + (k+1)*(2*k+3)/(k+3)/(2*k+5)*V[:, 3:]
return P
def set_factor_arrays(self, v):
s = self.sl[self.slice()]
if not self._factor1.shape == v[s].shape:
k = self.wavenumbers().astype(np.float)
self._factor1 = (-(2*k+3)*(k+4)/(2*k+5)/(k+2)).astype(float)
self._factor2 = (-k*(k+1)/(k+2)/(k+3)).astype(float)
self._factor3 = ((k+1)*(2*k+3)/(k+3)/(2*k+5)).astype(float)
#@optimizer
def set_w_hat(self, w_hat, fk, f1, f2, f3): # pragma: no cover
s = self.sl[self.slice()]
s1 = self.sl[slice(1, -2)]
s2 = self.sl[slice(2, -1)]
s3 = self.sl[slice(3, None)]
w_hat[s] = fk[s]
w_hat[s1] += f1*fk[s]
w_hat[s2] += f2*fk[s]
w_hat[s3] += f3*fk[s]
return w_hat
def sympy_basis(self, i=0, x=sympy.symbols('x', real=True)):
#x = self.map_reference_domain(x)
return (sympy.legendre(i, x)
-(2*i+3)*(i+4)/(2*i+5)/(i+2)*sympy.legendre(i+1, x)
-i*(i+1)/(i+2)/(i+3)*sympy.legendre(i+2, x)
+(i+1)*(2*i+3)/(i+3)/(2*i+5)*sympy.legendre(i+3, x))
#return
# (sympy.legendre(i, x) -(2*i+3)*(i+4)/(2*i+5)*sympy.legendre(i+1, x) -i*(i+1)/(i+2)/(i+3)*sympy.legendre(i+2, x) +(i+1)*(i+2)*(2*i+3)/(i+3)/(2*i+5)*sympy.legendre(i+3, x))
def evaluate_basis(self, x=None, i=0, output_array=None):
output_array = SpectralBase.evaluate_basis(self, x=x, i=i, output_array=output_array)
return output_array
#def evaluate_basis_derivative(self, x=None, i=0, k=0, output_array=None):
# output_array = SpectralBase.evaluate_basis_derivative(self, x=x, i=i, k=k, output_array=output_array)
# return output_array
def evaluate_basis_derivative(self, x=None, i=0, k=0, output_array=None):
if x is None:
x = self.mesh(False, False)
x = np.atleast_1d(x)
if output_array is None:
output_array = np.zeros(x.shape)
if i < self.N-3:
basis = np.zeros(self.shape(True))
basis[np.array([i, i+1, i+2, i+3])] = (1,
-(2*i+3)*(i+4)/(2*i+5)/(i+2),
-i*(i+1)/(i+2)/(i+3),
(i+1)*(2*i+3)/(i+3)/(2*i+5))
basis = leg.Legendre(basis)
if k > 0:
basis = basis.deriv(k)
output_array[:] = basis(x)
else:
raise RuntimeError
return output_array
def evaluate_basis_derivative_all(self, x=None, k=0, argument=0):
if x is None:
x = self.mpmath_points_and_weights()[0]
V = np.zeros((x.shape[0], self.N))
for i in range(self.N-3):
V[:, i] = self.evaluate_basis_derivative(x, i, k, output_array=V[:, i])
return V
def to_ortho(self, input_array, output_array=None):
if output_array is None:
output_array = np.zeros_like(input_array)
else:
output_array.fill(0)
self.set_factor_arrays(input_array)
output_array = self.set_w_hat(output_array, input_array, self._factor1, self._factor2, self._factor3)
return output_array
def slice(self):
return slice(0, self.N-3)
def _evaluate_scalar_product(self, fast_transform=False):
SpectralBase._evaluate_scalar_product(self)
self.scalar_product.output_array[self.sl[slice(-3, None)]] = 0
def eval(self, x, u, output_array=None):
if output_array is None:
output_array = np.zeros(x.shape, dtype=self.dtype)
x = self.map_reference_domain(x)
w_hat = work[(u, 0, True)]
self.set_factor_arrays(u)
output_array[:] = leg.legval(x, u[:-3])
w_hat[1:-2] = self._factor1*u[:-3]
output_array += leg.legval(x, w_hat[:-2])
w_hat[2:-1] = self._factor2*u[:-3]
w_hat[:2] = 0
output_array += leg.legval(x, w_hat)
w_hat[3:] = self._factor3*u[:-3]
w_hat[:3] = 0
output_array += leg.legval(x, w_hat)
return output_array
class DirichletNeumann(LegendreBase):
"""Function space for mixed Dirichlet/Neumann boundary conditions
u(-1)=0, u'(1)=0
Parameters
----------
N : int
Number of quadrature points
quad : str, optional
Type of quadrature
- LG - Legendre-Gauss
- GL - Legendre-Gauss-Lobatto
bc : tuple of numbers
Boundary conditions at edges of domain
domain : 2-tuple of floats, optional
The computational domain
scaled : bool, optional
Whether or not to scale test functions with 1/sqrt(4k+6).
Scaled test functions give a stiffness matrix equal to the
identity matrix.
padding_factor : float, optional
Factor for padding backward transforms.
dealias_direct : bool, optional
Set upper 1/3 of coefficients to zero before backward transform
dtype : data-type, optional
Type of input data in real physical space. Will be overloaded when
basis is part of a :class:`.TensorProductSpace`.
coordinates: 2- or 3-tuple (coordinate, position vector (, sympy assumptions)), optional
Map for curvilinear coordinatesystem.
The new coordinate variable in the new coordinate system is the first item.
Second item is a tuple for the Cartesian position vector as function of the
new variable in the first tuple. Example::
theta = sp.Symbols('x', real=True, positive=True)
rv = (sp.cos(theta), sp.sin(theta))
"""
def __init__(self, N, quad="LG", bc=(0., 0.), domain=(-1., 1.), dtype=np.float,
padding_factor=1, dealias_direct=False, coordinates=None):
LegendreBase.__init__(self, N, quad=quad, domain=domain, dtype=dtype,
padding_factor=padding_factor, dealias_direct=dealias_direct,
coordinates=coordinates)
from shenfun.tensorproductspace import BoundaryValues
self._factor1 = np.ones(1)
self._factor2 = np.ones(1)
self._bc_basis = None
self.bc = BoundaryValues(self, bc=bc)
@staticmethod
def boundary_condition():
return 'DirichletNeumann'
@property
def has_nonhomogeneous_bcs(self):
return self.bc.has_nonhomogeneous_bcs()
def set_factor_array(self, v):
"""Set intermediate factor arrays"""
s = self.sl[self.slice()]
if not self._factor1.shape == v[s].shape:
k = self.wavenumbers().astype(float)
self._factor1 = ((2*k+3)/(k+2)**2).astype(float)
self._factor2 = -(((k+1)/(k+2))**2).astype(float)
def _composite(self, V, argument=0):
P = np.zeros_like(V)
k = np.arange(V.shape[1]).astype(np.float)[:-2]
P[:, :-2] = (V[:, :-2]
+((2*k+3)/(k+2)**2)*V[:, 1:-1]
-(((k+1)/(k+2))**2)*V[:, 2:])
if argument == 1:
P[:, -2] = V[:, 0]
P[:, -1] = V[:, 0]+V[:, 1]
return P
def set_w_hat(self, w_hat, fk, f1, f2):
s = self.sl[self.slice()]
s1 = self.sl[slice(1, -1)]
s2 = self.sl[slice(2, None)]
w_hat[s] = fk[s]
w_hat[s1] += f1*fk[s]
w_hat[s2] += f2*fk[s]
return w_hat
def to_ortho(self, input_array, output_array=None):
if output_array is None:
output_array = np.zeros_like(input_array)
else:
output_array.fill(0)
self.set_factor_arrays(input_array)
output_array = self.set_w_hat(output_array, input_array, self._factor1, self._factor2)
self.bc.add_to_orthogonal(output_array, input_array)
return output_array
def slice(self):
return slice(0, self.N-2)
def _evaluate_scalar_product(self, fast_transform=False):
SpectralBase._evaluate_scalar_product(self)
self.scalar_product.output_array[self.sl[slice(-2, None)]] = 0
def sympy_basis(self, i=0, x=sympy.symbols('x', real=True)):
assert i < self.N-2
return (sympy.legendre(i, x)
+(2*i+3)/(i+2)**2*sympy.legendre(i+1, x)
-(i+1)**2/(i+2)**2*sympy.legendre(i+2, x))
def evaluate_basis(self, x, i=0, output_array=None):
x = np.atleast_1d(x)
if output_array is None:
output_array = np.zeros(x.shape)
output_array[:] = (eval_legendre(i, x)
+(2*i+3)/(i+2)**2*eval_legendre(i+1, x)
-(i+1)**2/(i+2)**2*eval_legendre(i+2, x))
return output_array
def evaluate_basis_derivative(self, x=None, i=0, k=0, output_array=None):
if x is None:
x = self.mesh(False, False)
if output_array is None:
output_array = np.zeros(x.shape)
x = np.atleast_1d(x)
basis = np.zeros(self.shape(True))
basis[np.array([i, i+1, i+2])] = (1, (2*i+3)/(i+2)**2, -(i+1)**2/(i+2)**2)
basis = leg.Legendre(basis)
if k > 0:
basis = basis.deriv(k)
output_array[:] = basis(x)
return output_array
def eval(self, x, u, output_array=None):
x = np.atleast_1d(x)
if output_array is None:
output_array = np.zeros(x.shape)
x = self.map_reference_domain(x)
w_hat = work[(u, 0, True)]
self.set_factor_array(w_hat)
output_array[:] = leg.legval(x, u[:-2])
w_hat[1:-1] = self._factor1*u[:-2]
output_array += leg.legval(x, w_hat)
w_hat[2:] = self._factor2*u[:-2]
w_hat[:2] = 0
output_array += leg.legval(x, w_hat)
output_array += u[-2] + u[-1]*(1+x)
return output_array
def get_bc_basis(self):
if self._bc_basis:
return self._bc_basis
self._bc_basis = BCDirichletNeumann(self.N, quad=self.quad, domain=self.domain,
coordinates=self.coors.coordinates)
return self._bc_basis
def get_refined(self, N):
return self.__class__(N,
quad=self.quad,
domain=self.domain,
dtype=self.dtype,
padding_factor=self.padding_factor,
dealias_direct=self.dealias_direct,
coordinates=self.coors.coordinates,
bc=self.bc.bc)
def get_dealiased(self, padding_factor=1.5, dealias_direct=False):
return self.__class__(self.N,
quad=self.quad,
dtype=self.dtype,
padding_factor=padding_factor,
dealias_direct=dealias_direct,
domain=self.domain,
coordinates=self.coors.coordinates,
bc=self.bc.bc)
def get_unplanned(self):
return self.__class__(self.N,
quad=self.quad,
domain=self.domain,
dtype=self.dtype,
padding_factor=self.padding_factor,
dealias_direct=self.dealias_direct,
coordinates=self.coors.coordinates,
bc=self.bc.bc)
class NeumannDirichlet(LegendreBase):
"""Function space for mixed Dirichlet/Neumann boundary conditions
u'(-1)=0, u(1)=0
Parameters
----------
N : int
Number of quadrature points
quad : str, optional
Type of quadrature
- LG - Legendre-Gauss
- GL - Legendre-Gauss-Lobatto
bc : tuple of numbers
Boundary conditions at edges of domain
domain : 2-tuple of floats, optional
The computational domain
scaled : bool, optional
Whether or not to scale test functions with 1/sqrt(4k+6).
Scaled test functions give a stiffness matrix equal to the
identity matrix.
padding_factor : float, optional
Factor for padding backward transforms.
dealias_direct : bool, optional
Set upper 1/3 of coefficients to zero before backward transform
dtype : data-type, optional
Type of input data in real physical space. Will be overloaded when
basis is part of a :class:`.TensorProductSpace`.
coordinates: 2- or 3-tuple (coordinate, position vector (, sympy assumptions)), optional
Map for curvilinear coordinatesystem.
The new coordinate variable in the new coordinate system is the first item.
Second item is a tuple for the Cartesian position vector as function of the
new variable in the first tuple. Example::
theta = sp.Symbols('x', real=True, positive=True)
rv = (sp.cos(theta), sp.sin(theta))
"""
def __init__(self, N, quad="LG", bc=(0., 0.), domain=(-1., 1.), dtype=np.float,
padding_factor=1, dealias_direct=False, coordinates=None):
LegendreBase.__init__(self, N, quad=quad, domain=domain, dtype=dtype,
padding_factor=padding_factor, dealias_direct=dealias_direct,
coordinates=coordinates)
from shenfun.tensorproductspace import BoundaryValues
self._factor1 = np.ones(1)
self._factor2 = np.ones(1)
self._bc_basis = None
self.bc = BoundaryValues(self, bc=bc)
@staticmethod
def boundary_condition():
return 'NeumannDirichlet'
@property
def has_nonhomogeneous_bcs(self):
return self.bc.has_nonhomogeneous_bcs()
def set_factor_array(self, v):
"""Set intermediate factor arrays"""
s = self.sl[self.slice()]
if not self._factor1.shape == v[s].shape:
k = self.wavenumbers().astype(float)
self._factor1 = (-(2*k+3)/(k+2)**2).astype(float)
self._factor2 = -((k+1)**2/(k+2)**2).astype(float)
def _composite(self, V, argument=0):
P = np.zeros_like(V)
k = np.arange(V.shape[1]).astype(np.float)[:-2]
P[:, :-2] = (V[:, :-2]
-((2*k+3)/(k+2)**2)*V[:, 1:-1]
-((k+1)**2/(k+2)**2)*V[:, 2:])
if argument == 1:
P[:, -2] = V[:, 0]-0.5*V[:, 1]-0.5*V[:, 2]
P[:, -1] = V[:, 0]
return P
def set_w_hat(self, w_hat, fk, f1, f2): # pragma: no cover
s = self.sl[self.slice()]
s1 = self.sl[slice(1, -1)]
s2 = self.sl[slice(2, None)]
w_hat[s] = fk[s]
w_hat[s1] += f1*fk[s]
w_hat[s2] += f2*fk[s]
return w_hat
def to_ortho(self, input_array, output_array=None):
if output_array is None:
output_array = np.zeros_like(input_array)
else:
output_array.fill(0)
self.set_factor_arrays(input_array)
output_array = self.set_w_hat(output_array, input_array, self._factor1, self._factor2)
self.bc.add_to_orthogonal(output_array, input_array)
return output_array
def slice(self):
return slice(0, self.N-2)
def _evaluate_scalar_product(self, fast_transform=False):
SpectralBase._evaluate_scalar_product(self)
self.scalar_product.output_array[self.sl[slice(-2, None)]] = 0
def sympy_basis(self, i=0, x=sympy.symbols('x', real=True)):
assert i < self.N-2
return (sympy.legendre(i, x)
-(2*i+3)/(i+2)**2*sympy.legendre(i+1, x)
-(i+1)**2/(i+2)**2*sympy.legendre(i+2, x))
def evaluate_basis(self, x, i=0, output_array=None):
x = np.atleast_1d(x)
if output_array is None:
output_array = np.zeros(x.shape)
output_array[:] = (eval_legendre(i, x)
-(2*i+3)/(i+2)**2*eval_legendre(i+1, x)
-(i+1)**2/(i+2)**2*eval_legendre(i+2, x))
return output_array
def evaluate_basis_derivative(self, x=None, i=0, k=0, output_array=None):
if x is None:
x = self.mesh(False, False)
if output_array is None:
output_array = np.zeros(x.shape)
x = np.atleast_1d(x)
basis = np.zeros(self.shape(True))
basis[np.array([i, i+1, i+2])] = (1, -(2*i+3)/(i+2)**2, -(i+1)**2/(i+2)**2)
basis = leg.Legendre(basis)
if k > 0:
basis = basis.deriv(k)
output_array[:] = basis(x)
return output_array
def eval(self, x, u, output_array=None):
x = np.atleast_1d(x)
if output_array is None:
output_array = np.zeros(x.shape)
x = self.map_reference_domain(x)
w_hat = work[(u, 0, True)]
self.set_factor_array(w_hat)
output_array[:] = leg.legval(x, u[:-2])
w_hat[1:-1] = self._factor1*u[:-2]
output_array += leg.legval(x, w_hat)
w_hat[2:] = self._factor2*u[:-2]
w_hat[:2] = 0
output_array += leg.legval(x, w_hat)
output_array += u[-1] + u[-2]*(1-0.5*x-0.25*(3*x**2-1))
return output_array
def get_bc_basis(self):
if self._bc_basis:
return self._bc_basis
self._bc_basis = BCNeumannDirichlet(self.N, quad=self.quad, domain=self.domain,
coordinates=self.coors.coordinates)
return self._bc_basis
def get_refined(self, N):
return self.__class__(N,
quad=self.quad,
domain=self.domain,
dtype=self.dtype,
padding_factor=self.padding_factor,
dealias_direct=self.dealias_direct,
coordinates=self.coors.coordinates,
bc=self.bc.bc)
def get_dealiased(self, padding_factor=1.5, dealias_direct=False):
return self.__class__(self.N,
quad=self.quad,
dtype=self.dtype,
padding_factor=padding_factor,
dealias_direct=dealias_direct,
domain=self.domain,
coordinates=self.coors.coordinates,
bc=self.bc.bc)
def get_unplanned(self):
return self.__class__(self.N,
quad=self.quad,
domain=self.domain,
dtype=self.dtype,
padding_factor=self.padding_factor,
dealias_direct=self.dealias_direct,
coordinates=self.coors.coordinates,
bc=self.bc.bc)
class UpperDirichletNeumann(LegendreBase):
"""Function space for mixed Dirichlet/Neumann boundary conditions
u(1)=0, u'(1)=0
Parameters
----------
N : int
Number of quadrature points
quad : str, optional
Type of quadrature
- LG - Legendre-Gauss
- GL - Legendre-Gauss-Lobatto
bc : tuple of numbers
Boundary conditions at edges of domain
domain : 2-tuple of floats, optional
The computational domain
scaled : bool, optional
Whether or not to scale test functions with 1/sqrt(4k+6).
Scaled test functions give a stiffness matrix equal to the
identity matrix.
padding_factor : float, optional
Factor for padding backward transforms.
dealias_direct : bool, optional
Set upper 1/3 of coefficients to zero before backward transform
dtype : data-type, optional
Type of input data in real physical space. Will be overloaded when
basis is part of a :class:`.TensorProductSpace`.
coordinates: 2- or 3-tuple (coordinate, position vector (, sympy assumptions)), optional
Map for curvilinear coordinatesystem.
The new coordinate variable in the new coordinate system is the first item.
Second item is a tuple for the Cartesian position vector as function of the
new variable in the first tuple. Example::
theta = sp.Symbols('x', real=True, positive=True)
rv = (sp.cos(theta), sp.sin(theta))
Note
----
This basis is not recommended as it leads to a poorly conditioned
stiffness matrix.
"""
def __init__(self, N, quad="LG", bc=(0., 0.), domain=(-1., 1.), dtype=np.float,
padding_factor=1, dealias_direct=False, coordinates=None):
LegendreBase.__init__(self, N, quad=quad, domain=domain, dtype=dtype,
padding_factor=padding_factor, dealias_direct=dealias_direct,
coordinates=coordinates)
from shenfun.tensorproductspace import BoundaryValues
self._factor1 = np.ones(1)
self._factor2 = np.ones(1)
self._bc_basis = None
self.bc = BoundaryValues(self, bc=bc)
@staticmethod
def boundary_condition():
return 'UpperDirichletNeumann'
@property
def has_nonhomogeneous_bcs(self):
return self.bc.has_nonhomogeneous_bcs()
def set_factor_array(self, v):
"""Set intermediate factor arrays"""
s = self.sl[self.slice()]
if not self._factor1.shape == v[s].shape:
k = self.wavenumbers().astype(float)
self._factor1 = (-(2*k+3)/(k+2)).astype(float)
self._factor2 = ((k+1)/(k+2)).astype(float)
def _composite(self, V, argument=0):
P = np.zeros_like(V)
k = np.arange(V.shape[1]).astype(np.float)[:-2]
P[:, :-2] = (V[:, :-2]
-((2*k+3)/(k+2))*V[:, 1:-1]
+((k+1)/(k+2))*V[:, 2:])
if argument == 1:
P[:, -2] = V[:, 0]
P[:, -1] = V[:, 0]-2*V[:, 1]+V[:, 2]
return P
def set_w_hat(self, w_hat, fk, f1, f2):
s = self.sl[self.slice()]
s1 = self.sl[slice(1, -1)]
s2 = self.sl[slice(2, None)]
w_hat[s] = fk[s]
w_hat[s1] += f1*fk[s]
w_hat[s2] += f2*fk[s]
return w_hat
def to_ortho(self, input_array, output_array=None):
if output_array is None:
output_array = np.zeros_like(input_array)
else:
output_array.fill(0)
self.set_factor_arrays(input_array)
output_array = self.set_w_hat(output_array, input_array, self._factor1, self._factor2)
self.bc.add_to_orthogonal(output_array, input_array)
return output_array
def slice(self):
return slice(0, self.N-2)
def _evaluate_scalar_product(self, fast_transform=False):
SpectralBase._evaluate_scalar_product(self)
self.scalar_product.output_array[self.sl[slice(-2, None)]] = 0
def sympy_basis(self, i=0, x=sympy.symbols('x', real=True)):
assert i < self.N-2
return (sympy.legendre(i, x)
-(2*i+3)/(i+2)*sympy.legendre(i+1, x)
+(i+1)/(i+2)*sympy.legendre(i+2, x))
def evaluate_basis(self, x, i=0, output_array=None):
x = np.atleast_1d(x)
if output_array is None:
output_array = np.zeros(x.shape)
output_array[:] = (eval_legendre(i, x)
-(2*i+3)/(i+2)*eval_legendre(i+1, x)
+(i+1)/(i+2)*eval_legendre(i+2, x))
return output_array
def evaluate_basis_derivative(self, x=None, i=0, k=0, output_array=None):
if x is None:
x = self.mesh(False, False)
if output_array is None:
output_array = np.zeros(x.shape)
x = np.atleast_1d(x)
basis = np.zeros(self.shape(True))
basis[np.array([i, i+1, i+2])] = (1, -(2*i+3)/(i+2), (i+1)/(i+2))
basis = leg.Legendre(basis)
if k > 0:
basis = basis.deriv(k)
output_array[:] = basis(x)
return output_array
def eval(self, x, u, output_array=None):
x = np.atleast_1d(x)
if output_array is None:
output_array = np.zeros(x.shape)
x = self.map_reference_domain(x)
w_hat = work[(u, 0, True)]
self.set_factor_array(w_hat)
output_array[:] = leg.legval(x, u[:-2])
w_hat[1:-1] = self._factor1*u[:-2]
output_array += leg.legval(x, w_hat)
w_hat[2:] = self._factor2*u[:-2]
w_hat[:2] = 0
output_array += leg.legval(x, w_hat)
output_array += u[-2] + u[-1]*(1-2*x+0.5*(3*x**2-1))
return output_array
def get_bc_basis(self):
if self._bc_basis:
return self._bc_basis
self._bc_basis = BCUpperDirichletNeumann(self.N, quad=self.quad, domain=self.domain,
coordinates=self.coors.coordinates)
return self._bc_basis
def get_refined(self, N):
return self.__class__(N,
quad=self.quad,
domain=self.domain,
dtype=self.dtype,
padding_factor=self.padding_factor,
dealias_direct=self.dealias_direct,
coordinates=self.coors.coordinates,
bc=self.bc.bc)
def get_dealiased(self, padding_factor=1.5, dealias_direct=False):
return self.__class__(self.N,
quad=self.quad,
dtype=self.dtype,
padding_factor=padding_factor,
dealias_direct=dealias_direct,
domain=self.domain,
coordinates=self.coors.coordinates,
bc=self.bc.bc)
def get_unplanned(self):
return self.__class__(self.N,
quad=self.quad,
domain=self.domain,
dtype=self.dtype,
padding_factor=self.padding_factor,
dealias_direct=self.dealias_direct,
coordinates=self.coors.coordinates,
bc=self.bc.bc)
class BCDirichlet(LegendreBase):
def __init__(self, N, quad="LG", scaled=False,
domain=(-1., 1.), coordinates=None):
LegendreBase.__init__(self, N, quad=quad, domain=domain, coordinates=coordinates)
self._scaled = scaled
def slice(self):
return slice(self.N-2, self.N)
def shape(self, forward_output=True):
if forward_output:
return 2
else:
return self.N
@staticmethod
def boundary_condition():
return 'Apply'
def vandermonde(self, x):
return leg.legvander(x, 1)
@staticmethod
def coefficient_matrix():
return np.array([[0.5, -0.5],
[0.5, 0.5]])
def _composite(self, V, argument=0):
P = np.zeros(V.shape)
P[:, 0] = (V[:, 0] - V[:, 1])/2
P[:, 1] = (V[:, 0] + V[:, 1])/2
return P
def sympy_basis(self, i=0, x=sympy.symbols('x', real=True)):
if i == 0:
return 0.5*(1-x)
elif i == 1:
return 0.5*(1+x)
else:
raise AttributeError('Only two bases, i < 2')
def evaluate_basis(self, x, i=0, output_array=None):
assert i < 2
x = np.atleast_1d(x)
if output_array is None:
output_array = np.zeros(x.shape)
if i == 0:
output_array[:] = 0.5*(1-x)
elif i == 1:
output_array[:] = 0.5*(1+x)
return output_array
def evaluate_basis_derivative(self, x=None, i=0, k=0, output_array=None):
x = np.atleast_1d(x)
if output_array is None:
output_array = np.zeros(x.shape)
if i == 0 and k == 1:
output_array[:] = -0.5
elif i == 1 and k == 1:
output_array[:] = 0.5
else:
output_array[:] = 0
return output_array
class BCNeumann(LegendreBase):
def __init__(self, N, quad="LG", scaled=False,
domain=(-1., 1.), coordinates=None):
LegendreBase.__init__(self, N, quad=quad, domain=domain, coordinates=coordinates)
self._scaled = scaled
def slice(self):
return slice(self.N-2, self.N)
def shape(self, forward_output=True):
if forward_output:
return 2
else:
return self.N
@staticmethod
def boundary_condition():
return 'Apply'
def vandermonde(self, x):
return leg.legvander(x, 2)
@staticmethod
def coefficient_matrix():
return np.array([[0, 1/2, -1/6],
[0, 1/2, 1/6]])
def _composite(self, V, argument=0):
P = np.zeros(V[:, :2].shape)
P[:, 0] = 0.5*V[:, 1] - 1/6*V[:, 2]
P[:, 1] = 0.5*V[:, 1] + 1/6*V[:, 2]
return P
def sympy_basis(self, i=0, x=sympy.symbols('x', real=True)):
if i == 0:
return x/2-(3*x**2-1)/3
elif i == 1:
return x/2+(3*x**2-1)/3
else:
raise AttributeError('Only two bases, i < 2')
def evaluate_basis(self, x, i=0, output_array=None):
assert i < 2
x = np.atleast_1d(x)
if output_array is None:
output_array = np.zeros(x.shape)
if i == 0:
output_array[:] = x/2-(3*x**2-1)/3
elif i == 1:
output_array[:] = x/2+(3*x**2-1)/3
return output_array
def evaluate_basis_derivative(self, x=None, i=0, k=0, output_array=None):
x = np.atleast_1d(x)
if output_array is None:
output_array = np.zeros(x.shape)
if i == 0 and k == 0:
output_array[:] = x/2-(3*x**2-1)/3
elif i == 0 and k == 1:
output_array[:] = 0.5-2*x
elif i == 0 and k == 2:
output_array[:] = -2
elif i == 1 and k == 0:
output_array[:] = x/2+(3*x**2-1)/3
elif i == 1 and k == 1:
output_array[:] = 0.5+2*x
elif i == 1 and k == 2:
output_array[:] = 2
else:
output_array[:] = 0
return output_array
class BCBiharmonic(LegendreBase):
"""Function space for inhomogeneous Biharmonic boundary conditions
Parameters
----------
N : int, optional
Number of quadrature points
quad : str, optional
Type of quadrature
- LG - Legendre-Gauss
- GL - Legendre-Gauss-Lobatto
domain : 2-tuple of floats, optional
The computational domain
scaled : bool, optional
Whether or not to use scaled basis
padding_factor : float, optional
Factor for padding backward transforms.
dealias_direct : bool, optional
Set upper 1/3 of coefficients to zero before backward transform
coordinates: 2- or 3-tuple (coordinate, position vector (, sympy assumptions)), optional
Map for curvilinear coordinatesystem.
The new coordinate variable in the new coordinate system is the first item.
Second item is a tuple for the Cartesian position vector as function of the
new variable in the first tuple. Example::
theta = sp.Symbols('x', real=True, positive=True)
rv = (sp.cos(theta), sp.sin(theta))
"""
def __init__(self, N, quad="LG", domain=(-1., 1.),
padding_factor=1, dealias_direct=False, coordinates=None):
LegendreBase.__init__(self, N, quad=quad, domain=domain,
padding_factor=padding_factor, dealias_direct=dealias_direct,
coordinates=coordinates)
def slice(self):
return slice(self.N-4, self.N)
def shape(self, forward_output=True):
if forward_output:
return 4
else:
return self.N
@staticmethod
def boundary_condition():
return 'Apply'
def vandermonde(self, x):
return leg.legvander(x, 3)
@staticmethod
def coefficient_matrix():
return np.array([[0.5, -0.6, 0, 0.1],
[0.5, 0.6, 0, -0.1],
[1./6., -1./10., -1./6., 1./10.],
[-1./6., -1./10., 1./6., 1./10.]])
def _composite(self, V, argument=0):
P = np.tensordot(V[:, :4], self.coefficient_matrix(), (1, 1))
return P
def sympy_basis(self, i=0, x=sympy.symbols('x', real=True)):
if i < 4:
f = 0
for j, c in enumerate(self.coefficient_matrix()[i]):
f += c*sympy.legendre(j, x)
return f
else:
raise AttributeError('Only four bases, i < 4')
def evaluate_basis(self, x, i=0, output_array=None):
x = np.atleast_1d(x)
if output_array is None:
output_array = np.zeros(x.shape)
V = self.vandermonde(x)
output_array[:] = np.dot(V, self.coefficient_matrix()[i])
return output_array
def evaluate_basis_derivative(self, x=None, i=0, k=0, output_array=None):
output_array = SpectralBase.evaluate_basis_derivative(self, x=x, i=i, k=k, output_array=output_array)
return output_array
class BCBeamFixedFree(LegendreBase):
"""Function space for inhomogeneous Biharmonic boundary conditions
u(-1), u'(-1), u''(1), u'''(1)
Parameters
----------
N : int, optional
Number of quadrature points
quad : str, optional
Type of quadrature
- LG - Legendre-Gauss
- GL - Legendre-Gauss-Lobatto
domain : 2-tuple of floats, optional
The computational domain
scaled : bool, optional
Whether or not to use scaled basis
padding_factor : float, optional
Factor for padding backward transforms.
dealias_direct : bool, optional
Set upper 1/3 of coefficients to zero before backward transform
coordinates: 2- or 3-tuple (coordinate, position vector (, sympy assumptions)), optional
Map for curvilinear coordinatesystem.
The new coordinate variable in the new coordinate system is the first item.
Second item is a tuple for the Cartesian position vector as function of the
new variable in the first tuple. Example::
theta = sp.Symbols('x', real=True, positive=True)
rv = (sp.cos(theta), sp.sin(theta))
"""
def __init__(self, N, quad="LG", domain=(-1., 1.),
padding_factor=1, dealias_direct=False, coordinates=None):
LegendreBase.__init__(self, N, quad=quad, domain=domain,
padding_factor=padding_factor, dealias_direct=dealias_direct,
coordinates=coordinates)
def slice(self):
return slice(self.N-4, self.N)
def shape(self, forward_output=True):
if forward_output:
return 4
else:
return self.N
@staticmethod
def boundary_condition():
return 'Apply'
def vandermonde(self, x):
return leg.legvander(x, 3)
@staticmethod
def coefficient_matrix():
return np.array([[1, 0, 0, 0],
[1, 1, 0, 0],
[2/3, 1, 1/3, 0],
[-1, -1.4, -1/3, 1/15]])
def _composite(self, V, argument=0):
P = np.tensordot(V[:, :4], self.coefficient_matrix(), (1, 1))
return P
def sympy_basis(self, i=0, x=sympy.symbols('x', real=True)):
if i < 4:
f = 0
for j, c in enumerate(self.coefficient_matrix()[i]):
f += c*sympy.legendre(j, x)
return f
else:
raise AttributeError('Only four bases, i < 4')
def evaluate_basis(self, x, i=0, output_array=None):
x = np.atleast_1d(x)
if output_array is None:
output_array = np.zeros(x.shape)
V = self.vandermonde(x)
output_array[:] = np.dot(V, self.coefficient_matrix()[i])
return output_array
class BCUpperDirichlet(LegendreBase):
"""Function space for Dirichlet boundary conditions at x=1
Parameters
----------
N : int, optional
Number of quadrature points
quad : str, optional
Type of quadrature
- GL - Chebyshev-Gauss-Lobatto
- GC - Chebyshev-Gauss
domain : 2-tuple of floats, optional
The computational domain
scaled : bool, optional
Whether or not to use scaled basis
coordinates: 2- or 3-tuple (coordinate, position vector (, sympy assumptions)), optional
Map for curvilinear coordinatesystem.
The new coordinate variable in the new coordinate system is the first item.
Second item is a tuple for the Cartesian position vector as function of the
new variable in the first tuple. Example::
theta = sp.Symbols('x', real=True, positive=True)
rv = (sp.cos(theta), sp.sin(theta))
"""
def __init__(self, N, quad="GC", domain=(-1., 1.), scaled=False,
coordinates=None):
LegendreBase.__init__(self, N, quad=quad, domain=domain,
coordinates=coordinates)
def slice(self):
return slice(self.N-1, self.N)
def shape(self, forward_output=True):
if forward_output:
return 1
else:
return self.N
@staticmethod
def boundary_condition():
return 'Apply'
def vandermonde(self, x):
return leg.legvander(x, 1)
def coefficient_matrix(self):
return np.array([[0.5, 0.5]])
def _composite(self, V, argument=0):
P = np.zeros(V[:, :1].shape)
P[:, 0] = (V[:, 0] + V[:, 1])/2
return P
def sympy_basis(self, i=0, x=sympy.Symbol('x', real=True)):
if i == 0:
return 0.5*(1+x)
else:
raise AttributeError('Only one basis, i == 0')
def evaluate_basis(self, x, i=0, output_array=None):
x = np.atleast_1d(x)
if output_array is None:
output_array = np.zeros(x.shape)
if i == 0:
output_array[:] = 0.5*(1+x)
else:
raise AttributeError('Only one basis, i == 0')
return output_array
def evaluate_basis_derivative(self, x=None, i=0, k=0, output_array=None):
assert i == 0
x = np.atleast_1d(x)
if output_array is None:
output_array = np.zeros(x.shape)
output_array[:] = 0
if k == 1:
output_array[:] = 0.5
elif k == 0:
output_array[:] = 0.5*(1+x)
return output_array
def evaluate_basis_derivative_all(self, x=None, k=0, argument=0):
if x is None:
x = self.mesh(False, False)
output_array = np.zeros((self.N, 1))
self.evaluate_basis_derivative(x=x, k=k, output_array=output_array[:, 0])
return output_array
class BCNeumannDirichlet(LegendreBase):
def __init__(self, N, quad="LG", scaled=False,
domain=(-1., 1.), coordinates=None):
LegendreBase.__init__(self, N, quad=quad, domain=domain, coordinates=coordinates)
self._scaled = scaled
def slice(self):
return slice(self.N-2, self.N)
def shape(self, forward_output=True):
if forward_output:
return 2
else:
return self.N
@staticmethod
def boundary_condition():
return 'Apply'
def vandermonde(self, x):
return leg.legvander(x, 2)
@staticmethod
def coefficient_matrix():
return np.array([[1, -0.5, -0.5],
[1, 0, 0]])
def _composite(self, V, argument=0):
P = np.zeros(V[:, :2].shape)
P[:, 0] = V[:, 0] - 0.5*V[:, 1] -0.5*V[:, 2]
P[:, 1] = V[:, 0]
return P
def sympy_basis(self, i=0, x=sympy.symbols('x', real=True)):
if i == 0:
return 1-0.5*x-0.25*(3*x**2-1)
elif i == 1:
return 1
else:
raise AttributeError('Only two bases, i < 2')
def evaluate_basis(self, x, i=0, output_array=None):
assert i < 2
x = np.atleast_1d(x)
if output_array is None:
output_array = np.zeros(x.shape)
if i == 0:
output_array[:] = 1-0.5*x-0.25*(3*x**2-1)
elif i == 1:
output_array[:] = 1
return output_array
def evaluate_basis_derivative(self, x=None, i=0, k=0, output_array=None):
x = np.atleast_1d(x)
if output_array is None:
output_array = np.zeros(x.shape)
if i == 0 and k == 0:
output_array[:] = 1-0.5*x-0.25*(3*x**2-1)
elif i == 0 and k == 1:
output_array[:] = -0.5-1.5*x
elif i == 0 and k == 2:
output_array[:] = -1.5
elif i == 1 and k == 0:
output_array[:] = 1
else:
output_array[:] = 0
return output_array
#def evaluate_basis_derivative_all(self, x=None, k=0, argument=0):
# if x is None:
# x = self.mesh(False, False)
# output_array = np.zeros((self.N, 2))
# self.evaluate_basis_derivative(x=x, i=0, k=k, output_array=output_array[:, 0])
# self.evaluate_basis_derivative(x=x, i=1, k=k, output_array=output_array[:, 1])
# return output_array
class BCDirichletNeumann(LegendreBase):
def __init__(self, N, quad="LG",
domain=(-1., 1.), coordinates=None):
LegendreBase.__init__(self, N, quad=quad, domain=domain, coordinates=coordinates)
def slice(self):
return slice(self.N-2, self.N)
def shape(self, forward_output=True):
if forward_output:
return 2
else:
return self.N
@staticmethod
def boundary_condition():
return 'Apply'
def vandermonde(self, x):
return leg.legvander(x, 1)
@staticmethod
def coefficient_matrix():
return np.array([[1, 0],
[1, 1]])
def _composite(self, V, argument=0):
P = np.zeros(V[:, :2].shape)
P[:, 0] = V[:, 0]
P[:, 1] = V[:, 0] + V[:, 1]
return P
def sympy_basis(self, i=0, x=sympy.symbols('x', real=True)):
if i == 0:
return 1
elif i == 1:
return 1+x
else:
raise AttributeError('Only two bases, i < 2')
def evaluate_basis(self, x, i=0, output_array=None):
assert i < 2
x = np.atleast_1d(x)
if output_array is None:
output_array = np.zeros(x.shape)
if i == 0:
output_array[:] = 1
elif i == 1:
output_array[:] = 1+x
return output_array
def evaluate_basis_derivative(self, x=None, i=0, k=0, output_array=None):
x = np.atleast_1d(x)
if output_array is None:
output_array = np.zeros(x.shape)
if i == 0 and k == 0:
output_array[:] = 1
elif i == 1 and k == 0:
output_array[:] = 1+x
elif i == 1 and k == 1:
output_array[:] = 1
else:
output_array[:] = 0
return output_array
def evaluate_basis_derivative_all(self, x=None, k=0, argument=0):
if x is None:
x = self.mesh(False, False)
output_array = np.zeros((self.N, 2))
self.evaluate_basis_derivative(x=x, i=0, k=k, output_array=output_array[:, 0])
self.evaluate_basis_derivative(x=x, i=1, k=k, output_array=output_array[:, 1])
return output_array
class BCUpperDirichletNeumann(LegendreBase):
def __init__(self, N, quad="LG",
domain=(-1., 1.), coordinates=None):
LegendreBase.__init__(self, N, quad=quad, domain=domain, coordinates=coordinates)
def slice(self):
return slice(self.N-2, self.N)
def shape(self, forward_output=True):
if forward_output:
return 2
else:
return self.N
@staticmethod
def boundary_condition():
return 'Apply'
def vandermonde(self, x):
return leg.legvander(x, 2)
@staticmethod
def coefficient_matrix():
return np.array([[1, 0, 0],
[1, -2, 1]])
def _composite(self, V, argument=0):
P = np.zeros(V[:, :2].shape)
P[:, 0] = V[:, 0]
P[:, 1] = V[:, 0] - 2*V[:, 1] + V[:, 2]
return P
def sympy_basis(self, i=0, x=sympy.symbols('x', real=True)):
if i == 0:
return 1
elif i == 1:
return 1-2*x+0.5*(3*x**2-1)
else:
raise AttributeError('Only two bases, i < 2')
def evaluate_basis(self, x, i=0, output_array=None):
assert i < 2
x = np.atleast_1d(x)
if output_array is None:
output_array = np.zeros(x.shape)
if i == 0:
output_array[:] = 1
elif i == 1:
output_array[:] = 1-2*x+0.5*(3*x**2-1)
return output_array
def evaluate_basis_derivative(self, x=None, i=0, k=0, output_array=None):
x = np.atleast_1d(x)
if output_array is None:
output_array = np.zeros(x.shape)
if i == 0 and k == 0:
output_array[:] = 1
elif i == 1 and k == 0:
output_array[:] = 1-2*x+0.5*(3*x**2-1)
elif i == 1 and k == 1:
output_array[:] = -2+3*x
elif i == 1 and k == 2:
output_array[:] = 3
else:
output_array[:] = 0
return output_array
def evaluate_basis_derivative_all(self, x=None, k=0, argument=0):
if x is None:
x = self.mesh(False, False)
output_array = np.zeros((self.N, 2))
self.evaluate_basis_derivative(x=x, i=0, k=k, output_array=output_array[:, 0])
self.evaluate_basis_derivative(x=x, i=1, k=k, output_array=output_array[:, 1])
return output_array
| 37.994652
| 180
| 0.549864
| 13,031
| 99,470
| 4.060011
| 0.026322
| 0.086701
| 0.011057
| 0.019053
| 0.926568
| 0.914811
| 0.90175
| 0.894379
| 0.882301
| 0.872737
| 0
| 0.02783
| 0.327727
| 99,470
| 2,617
| 181
| 38.009171
| 0.763335
| 0.202574
| 0
| 0.808618
| 0
| 0
| 0.008578
| 0.000813
| 0
| 0
| 0
| 0
| 0.007834
| 1
| 0.144936
| false
| 0
| 0.011192
| 0.053721
| 0.303861
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
653be0c144b40281cb97b06271573c4274e6ed24
| 106
|
py
|
Python
|
montepython/likelihoods/Planck_lowlTT/__init__.py
|
emiliobellini/montepython_public
|
a3b1ba7ef614db21ac737de226648bc3477aca35
|
[
"MIT"
] | 1
|
2018-04-29T06:48:35.000Z
|
2018-04-29T06:48:35.000Z
|
montepython/likelihoods/Planck_lowlTT/__init__.py
|
emiliobellini/montepython_public
|
a3b1ba7ef614db21ac737de226648bc3477aca35
|
[
"MIT"
] | null | null | null |
montepython/likelihoods/Planck_lowlTT/__init__.py
|
emiliobellini/montepython_public
|
a3b1ba7ef614db21ac737de226648bc3477aca35
|
[
"MIT"
] | 2
|
2019-10-11T09:46:35.000Z
|
2019-12-05T14:55:04.000Z
|
from montepython.likelihood_class import Likelihood_clik
class Planck_lowlTT(Likelihood_clik):
pass
| 17.666667
| 56
| 0.839623
| 13
| 106
| 6.538462
| 0.692308
| 0.329412
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.122642
| 106
| 5
| 57
| 21.2
| 0.913978
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 7
|
6547858856bef80d893ade67b98883fc61663bc4
| 4,243
|
py
|
Python
|
First_Iteration/Cards.py
|
mehularora8/UNO
|
32713dab69a5fdc7d49607d3195ef2ad5e5a9795
|
[
"MIT"
] | null | null | null |
First_Iteration/Cards.py
|
mehularora8/UNO
|
32713dab69a5fdc7d49607d3195ef2ad5e5a9795
|
[
"MIT"
] | null | null | null |
First_Iteration/Cards.py
|
mehularora8/UNO
|
32713dab69a5fdc7d49607d3195ef2ad5e5a9795
|
[
"MIT"
] | null | null | null |
class Card:
# Game card.
def __init__(self, number, color, ability, wild):
# Number on the face of the card
self.number = number
# Which color is the thing
self.color = color
# Draw 2 / Reverse etc
self.ability = ability
# Wild card?
self.wild = wild
def __eq__(self, other):
return (self.number == other.number) and (self.color == other.color) and (self.ability == other.ability) and (self.wild == other.wild)
cards = [
Card(0, (255, 0, 0), None, None),
Card(1, (255, 0, 0), None, None),
Card(2, (255, 0, 0), None, None),
Card(3, (255, 0, 0), None, None),
Card(4, (255, 0, 0), None, None),
Card(5, (255, 0, 0), None, None),
Card(6, (255, 0, 0), None, None),
Card(7, (255, 0, 0), None, None),
Card(8, (255, 0, 0), None, None),
Card(9, (255, 0, 0), None, None),
Card(1, (255, 0, 0), None, None),
Card(2, (255, 0, 0), None, None),
Card(3, (255, 0, 0), None, None),
Card(4, (255, 0, 0), None, None),
Card(5, (255, 0, 0), None, None),
Card(6, (255, 0, 0), None, None),
Card(7, (255, 0, 0), None, None),
Card(8, (255, 0, 0), None, None),
Card(9, (255, 0, 0), None, None),
# Ability cards
Card("d2", (255, 0, 0), "d2", None),
Card("d2", (255, 0, 0), "d2", None),
Card("skip", (255, 0, 0), "skip", None),
Card("skip", (255, 0, 0), "skip", None),
Card("rev", (255, 0, 0), "rev", None),
Card("rev", (255, 0, 0), "rev", None),
#Green
Card(0, (0, 255, 0), None, None),
Card(1, (0, 255, 0), None, None),
Card(2, (0, 255, 0), None, None),
Card(3, (0, 255, 0), None, None),
Card(4, (0, 255, 0), None, None),
Card(5, (0, 255, 0), None, None),
Card(6, (0, 255, 0), None, None),
Card(7, (0, 255, 0), None, None),
Card(8, (0, 255, 0), None, None),
Card(9, (0, 255, 0), None, None),
Card(1, (0, 255, 0), None, None),
Card(2, (0, 255, 0), None, None),
Card(3, (0, 255, 0), None, None),
Card(4, (0, 255, 0), None, None),
Card(5, (0, 255, 0), None, None),
Card(6, (0, 255, 0), None, None),
Card(7, (0, 255, 0), None, None),
Card(8, (0, 255, 0), None, None),
Card(9, (0, 255, 0), None, None),
# Ability cards
Card("d2", (0, 255, 0), "d2", None),
Card("d2", (0, 255, 0), "d2", None),
Card("skip", (0, 255, 0), "skip", None),
Card("skip", (0, 255, 0), "skip", None),
Card("rev", (0, 255, 0), "rev", None),
Card("rev", (0, 255, 0), "rev", None),
# Blue
Card(0, (0, 0, 255), None, None),
Card(1, (0, 0, 255), None, None),
Card(2, (0, 0, 255), None, None),
Card(3, (0, 0, 255), None, None),
Card(4, (0, 0, 255), None, None),
Card(5, (0, 0, 255), None, None),
Card(6, (0, 0, 255), None, None),
Card(7, (0, 0, 255), None, None),
Card(8, (0, 0, 255), None, None),
Card(9, (0, 0, 255), None, None),
Card(1, (0, 0, 255), None, None),
Card(2, (0, 0, 255), None, None),
Card(3, (0, 0, 255), None, None),
Card(4, (0, 0, 255), None, None),
Card(5, (0, 0, 255), None, None),
Card(6, (0, 0, 255), None, None),
Card(7, (0, 0, 255), None, None),
Card(8, (0, 0, 255), None, None),
Card(9, (0, 0, 255), None, None),
# Ability cards
Card("d2", (0, 0, 255), "d2", None),
Card("d2", (0, 0, 255), "d2", None),
Card("skip", (0, 0, 255), "skip", None),
Card("skip", (0, 0, 255), "skip", None),
Card("rev", (0, 0, 255), "rev", None),
Card("rev", (0, 0, 255), "rev", None),
# Yellow
Card(0, (250, 192, 32), None, None),
Card(1, (250, 192, 32), None, None),
Card(2, (250, 192, 32), None, None),
Card(3, (250, 192, 32), None, None),
Card(4, (250, 192, 32), None, None),
Card(5, (250, 192, 32), None, None),
Card(6, (250, 192, 32), None, None),
Card(7, (250, 192, 32), None, None),
Card(8, (250, 192, 32), None, None),
Card(9, (250, 192, 32), None, None),
Card(1, (250, 192, 32), None, None),
Card(2, (250, 192, 32), None, None),
Card(3, (250, 192, 32), None, None),
Card(4, (250, 192, 32), None, None),
Card(5, (250, 192, 32), None, None),
Card(6, (250, 192, 32), None, None),
Card(7, (250, 192, 32), None, None),
Card(8, (250, 192, 32), None, None),
Card(9, (250, 192, 32), None, None),
# Ability cards
Card("d2", (250, 192, 32), "d2", None),
Card("d2", (250, 192, 32), "d2", None),
Card("skip", (250, 192, 32), "skip", None),
Card("skip", (250, 192, 32), "skip", None),
Card("rev", (250, 192, 32), "rev", None),
Card("rev", (250, 192, 32), "rev", None),
]
| 32.891473
| 136
| 0.532878
| 777
| 4,243
| 2.899614
| 0.059202
| 0.326676
| 0.383489
| 0.207723
| 0.855304
| 0.855304
| 0.849978
| 0.833999
| 0.715047
| 0.623613
| 0
| 0.195379
| 0.194202
| 4,243
| 129
| 137
| 32.891473
| 0.463586
| 0.040537
| 0
| 0.872727
| 0
| 0
| 0.035477
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.018182
| false
| 0
| 0
| 0.009091
| 0.036364
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 11
|
e8fb423c7db32b998a3411a578d49b49c51d6946
| 21,900
|
py
|
Python
|
AMR-Policies-Other/Environment.py
|
irom-lab/AMR-Policies
|
43552ca0ddcd584a9faa12b5588874bac41bd205
|
[
"MIT"
] | 2
|
2020-10-29T02:21:27.000Z
|
2021-07-26T07:38:23.000Z
|
AMR-Policies-Other/Environment.py
|
irom-lab/AMR-Policies
|
43552ca0ddcd584a9faa12b5588874bac41bd205
|
[
"MIT"
] | null | null | null |
AMR-Policies-Other/Environment.py
|
irom-lab/AMR-Policies
|
43552ca0ddcd584a9faa12b5588874bac41bd205
|
[
"MIT"
] | null | null | null |
import pybullet
import pybullet_utils.bullet_client as bc
import pybullet_data
import numpy as np
from abc import ABC, abstractmethod
class SetEnvironment():
def setup_pybullet(self, robot_file, parallel=False):
robot_radius = self.robot.robot_radius
if parallel:
if self.gui:
print("Warning: Can only have one thread be a gui")
p = bc.BulletClient(connection_mode=pybullet.GUI)
visual_shape_id = p.createVisualShape(pybullet.GEOM_SPHERE, radius=robot_radius, rgbaColor=[0, 0, 0, 0])
else:
p = bc.BulletClient(connection_mode=pybullet.DIRECT)
visual_shape_id = -1
p.setAdditionalSearchPath(pybullet_data.getDataPath())
else:
if self.gui:
pybullet.connect(pybullet.GUI)
p = pybullet
# This just makes sure that the sphere is not visible (we only use the sphere for collision checking)
visual_shape_id = p.createVisualShape(pybullet.GEOM_SPHERE, radius=robot_radius, rgbaColor=[0, 0, 0, 0])
else:
pybullet.connect(pybullet.DIRECT)
p = pybullet
visual_shape_id = -1
p.loadURDF("./URDFs/plane.urdf") # Ground plane
husky = p.loadURDF(robot_file, globalScaling=0.5) # Load robot from URDF
col_sphere_id = pybullet.createCollisionShape(pybullet.GEOM_SPHERE, radius=robot_radius) # Sphere
mass = 0
sphere = pybullet.createMultiBody(mass, col_sphere_id, visual_shape_id)
self.p = p
self.husky = husky
self.sphere = sphere
def set_gui(self, gui):
self.p.disconnect()
self.gui = gui
self.setup_pybullet()
class Environment(ABC):
@abstractmethod
def generate_obstacles(self):
pass
# Discrete Maze Environment
#**********************************************************************************************************************
class GridWorld(Environment):
'''
ENVIRONMENT DESCRIPTION:
Grid World size 20 x 20
'''
def __init__(self, size, robot, empty=False, filename='None'):
self.size = size
self.robot = robot
self.empty = empty
self.data_filename = filename
def generate_obstacles(self):
maze = np.zeros((20,20))
return maze
# Random Obstacle Environment
#**********************************************************************************************************************
class RandomObstacle(Environment, SetEnvironment):
'''
ENVIRONMENT DESCRIPTION:
Maze Navigation environment. Walled maze with two obstacles placed in locations from files. Locations were uniformly
sampled: y1 drawn from set [y_min + 5.2, y_min + 7.2], y2 drawn from set [y_min + 2, y_min + 8]
'''
def __init__(self, robot, parallel=False, gui=False, x_min=-5.0, x_max=5.0, y_min=0.0, y_max=10.0,
task=None, mode='train', filename=None):
self.parallel = parallel
self.gui = gui
self.robot = robot
self.height_obs = 100*robot.height
self.x_lim = [x_min, x_max]
self.y_lim = [y_min, y_max]
self.p = None
self.husky = None
self.sphere = None
self.setup_pybullet(self.robot.get_robot())
self.task = task
self.mode = mode
self.data_filename = filename
if self.mode is 'train':
self.sample_y1 = np.load("./envs/train_Maze_250_y1.npy")
self.sample_y2 = np.load("./envs/train_Maze_250_y2.npy")
else:
self.sample_y1 = np.load("./envs/test_Maze_20_y1.npy")
self.sample_y2 = np.load("./envs/test_Maze_20_y2.npy")
def generate_obstacles(self, s):
if self.parallel:
self.setup_pybullet(self.robot.get_robot(), self.parallel)
p = self.p
x_lim = self.x_lim
y_lim = self.y_lim
numObs = 0
heightObs = self.height_obs
numEnvParts = 9
linkMasses = [None] * (numObs + numEnvParts)
colIdxs = [None] * (numObs + numEnvParts)
visIdxs = [None] * (numObs + numEnvParts)
posObs = [None] * (numObs + numEnvParts)
orientObs = [None] * (numObs + numEnvParts)
parentIdxs = [None] * (numObs + numEnvParts)
linkInertialFramePositions = [None] * (numObs + numEnvParts)
linkInertialFrameOrientations = [None] * (numObs + numEnvParts)
linkJointTypes = [None] * (numObs + numEnvParts)
linkJointAxis = [None] * (numObs + numEnvParts)
for obs in range(numObs + numEnvParts):
linkMasses[obs] = 0.0
visIdxs[obs] = -1
parentIdxs[obs] = 0
linkInertialFramePositions[obs] = [0, 0, 0]
linkInertialFrameOrientations[obs] = [0, 0, 0, 1]
linkJointTypes[obs] = p.JOINT_FIXED
linkJointAxis[obs] = np.array([0, 0, 1])
orientObs[obs] = [0, 0, 0, 1]
# Left wall
posObs[numObs] = [x_lim[0], (y_lim[0] + y_lim[1] - 1) / 2.0, 0.0]
colIdxs[numObs] = p.createCollisionShape(p.GEOM_BOX,
halfExtents=[0.1, (y_lim[1] - y_lim[0] + 1) / 2, heightObs / 2])
visIdxs[numObs] = p.createVisualShape(p.GEOM_BOX,
halfExtents=[0.1, (y_lim[1] - y_lim[0] + 1) / 2, heightObs / 2],
rgbaColor=[0.5, 0.5, 0.5, 1])
# Right wall
posObs[numObs + 1] = [x_lim[1], (y_lim[0] + y_lim[1] - 1) / 2.0, 0.0]
colIdxs[numObs + 1] = p.createCollisionShape(p.GEOM_BOX,
halfExtents=[0.1, (y_lim[1] - y_lim[0] + 1) / 2, heightObs / 2])
visIdxs[numObs + 1] = p.createVisualShape(p.GEOM_BOX,
halfExtents=[0.1, (y_lim[1] - y_lim[0] + 1) / 2, heightObs / 2],
rgbaColor=[0.5, 0.5, 0.5, 1])
# Top Wall
orientObs[numObs + 2] = [0, 0, np.sqrt(2) / 2, np.sqrt(2) / 2]
posObs[numObs + 2] = [(x_lim[0] + x_lim[1]) / 2.0, y_lim[1], 0.0]
colIdxs[numObs + 2] = p.createCollisionShape(p.GEOM_BOX,
halfExtents=[0.1, (x_lim[1] - x_lim[0]) / 2.0, heightObs / 2])
visIdxs[numObs + 2] = p.createVisualShape(p.GEOM_BOX,
halfExtents=[0.1, (x_lim[1] - x_lim[0]) / 2.0, heightObs / 2],
rgbaColor=[0.5, 0.5, 0.5, 1])
# Bottom Wall
orientObs[numObs + 6] = [0, 0, np.sqrt(2) / 2, np.sqrt(2) / 2]
posObs[numObs + 6] = [(x_lim[0] + x_lim[1]) / 2.0, y_lim[0] - 1, 0.0]
colIdxs[numObs + 6] = p.createCollisionShape(p.GEOM_BOX,
halfExtents=[0.1, (x_lim[1] - x_lim[0]) / 2.0, heightObs / 2])
visIdxs[numObs + 6] = p.createVisualShape(p.GEOM_BOX,
halfExtents=[0.1, (x_lim[1] - x_lim[0]) / 2.0, heightObs / 2],
rgbaColor=[0.5, 0.5, 0.5, 1])
if self.mode is 'test3':
# Left wall
posObs[numObs] = [x_lim[0], (y_lim[0] + y_lim[1] - 1) / 2.0, 0.0]
colIdxs[numObs] = p.createCollisionShape(p.GEOM_BOX,
halfExtents=[0.1, (y_lim[1] - y_lim[0] + 1) / 2, heightObs / 2])
visIdxs[numObs] = p.createVisualShape(p.GEOM_BOX,
halfExtents=[0.1, (y_lim[1] - y_lim[0] + 1) / 2, heightObs / 2],
rgbaColor=[0.27, 0.89, 0.96, 1])
# Right wall
posObs[numObs + 1] = [x_lim[1], (y_lim[0] + y_lim[1] - 1) / 2.0, 0.0]
colIdxs[numObs + 1] = p.createCollisionShape(p.GEOM_BOX,
halfExtents=[0.1, (y_lim[1] - y_lim[0] + 1) / 2, heightObs / 2])
visIdxs[numObs + 1] = p.createVisualShape(p.GEOM_BOX,
halfExtents=[0.1, (y_lim[1] - y_lim[0] + 1) / 2, heightObs / 2],
rgbaColor=[0.49, 0.59, 0.49, 1])
# Top Wall
orientObs[numObs + 2] = [0, 0, np.sqrt(2) / 2, np.sqrt(2) / 2]
posObs[numObs + 2] = [(x_lim[0] + x_lim[1]) / 2.0, y_lim[1], 0.0]
colIdxs[numObs + 2] = p.createCollisionShape(p.GEOM_BOX,
halfExtents=[0.1, (x_lim[1] - x_lim[0]) / 2.0, heightObs / 2])
visIdxs[numObs + 2] = p.createVisualShape(p.GEOM_BOX,
halfExtents=[0.1, (x_lim[1] - x_lim[0]) / 2.0, heightObs / 2],
rgbaColor=[0.74, 0.69, 0.36, 1])
# Bottom Wall
orientObs[numObs + 6] = [0, 0, np.sqrt(2) / 2, np.sqrt(2) / 2]
posObs[numObs + 6] = [(x_lim[0] + x_lim[1]) / 2.0, y_lim[0] - 1, 0.0]
colIdxs[numObs + 6] = p.createCollisionShape(p.GEOM_BOX,
halfExtents=[0.1, (x_lim[1] - x_lim[0]) / 2.0, heightObs / 2])
visIdxs[numObs + 6] = p.createVisualShape(p.GEOM_BOX,
halfExtents=[0.1, (x_lim[1] - x_lim[0]) / 2.0, heightObs / 2],
rgbaColor=[0.5, 0.5, 0.5, 1])
#Two obstacles
# Obstacle 1
posObs[numObs + 3] = [x_lim[0] + 3, self.sample_y1[s], 0]
colIdxs[numObs + 3] = p.createCollisionShape(p.GEOM_BOX,
halfExtents=[0.65, 1.5, heightObs / 2])
visIdxs[numObs + 3] = p.createVisualShape(p.GEOM_BOX,
halfExtents=[0.65, 1.5, heightObs / 2],
rgbaColor=[0.69, 0.35, 0.47, 1])
# Obstacle 2
posObs[numObs + 4] = [x_lim[1] - 2., self.sample_y2[s], 0]
colIdxs[numObs + 4] = p.createCollisionShape(p.GEOM_BOX,
halfExtents=[2, 0.65, heightObs / 2])
visIdxs[numObs + 4] = p.createVisualShape(p.GEOM_BOX,
halfExtents=[2, 0.65, heightObs / 2],
rgbaColor=[0.38, 0.03, 0.63, 1])
elif self.mode is 'test2':
# Two obstacles
# Obstacle 1
posObs[numObs + 3] = [x_lim[0] + 3, self.sample_y1[s], 0]
colIdxs[numObs + 3] = p.createCollisionShape(p.GEOM_BOX,
halfExtents=[0.65, 1.5, heightObs / 2])
visIdxs[numObs + 3] = p.createVisualShape(p.GEOM_BOX,
halfExtents=[0.65, 1.5, heightObs / 2],
rgbaColor=[0, 0, 1, 1])
# Obstacle 2
posObs[numObs + 4] = [x_lim[1] - 2., self.sample_y2[s], 0]
colIdxs[numObs + 4] = p.createCollisionShape(p.GEOM_BOX,
halfExtents=[2, 0.65, heightObs / 2])
visIdxs[numObs + 4] = p.createVisualShape(p.GEOM_BOX,
halfExtents=[2, 0.65, heightObs / 2],
rgbaColor=[1, 0, 0, 1])
else:
# Two obstacles
# Obstacle 1
posObs[numObs + 3] = [x_lim[0] + 3, self.sample_y1[s], 0]
colIdxs[numObs + 3] = p.createCollisionShape(p.GEOM_BOX,
halfExtents=[0.65, 1.5, heightObs / 2])
visIdxs[numObs + 3] = p.createVisualShape(p.GEOM_BOX,
halfExtents=[0.65, 1.5, heightObs / 2],
rgbaColor=[1, 0, 0, 1])
# Obstacle 2
posObs[numObs + 4] = [x_lim[1] - 2., self.sample_y2[s], 0]
colIdxs[numObs + 4] = p.createCollisionShape(p.GEOM_BOX,
halfExtents=[2, 0.65, heightObs / 2])
visIdxs[numObs + 4] = p.createVisualShape(p.GEOM_BOX,
halfExtents=[2, 0.65, heightObs / 2],
rgbaColor=[0, 0, 1, 1])
# Goal Marker
if self.task is not None:
posObs[numObs + 5] = [self.task.goal[0], self.task.goal[1], 0]
visIdxs[numObs + 5] = p.createVisualShape(p.GEOM_CYLINDER,
radius=0.5, length=2.5,
rgbaColor=[0., 1., 0., 1])
obsUid = p.createMultiBody(baseCollisionShapeIndex=-1, baseVisualShapeIndex=-1, basePosition=[0, 0, 0],
baseOrientation=[0, 0, 0, 1], baseInertialFramePosition=[0, 0, 0],
baseInertialFrameOrientation=[0, 0, 0, 1], linkMasses=linkMasses,
linkCollisionShapeIndices=colIdxs, linkVisualShapeIndices=visIdxs,
linkPositions=posObs, linkOrientations=orientObs, linkParentIndices=parentIdxs,
linkInertialFramePositions=linkInertialFramePositions,
linkInertialFrameOrientations=linkInertialFrameOrientations,
linkJointTypes=linkJointTypes, linkJointAxis=linkJointAxis)
p.resetDebugVisualizerCamera(cameraDistance=15., cameraYaw=0., cameraPitch=-85., cameraTargetPosition=[0, 5, 0])
return obsUid
# Corridor Environment
#**********************************************************************************************************************
class Corridor(Environment, SetEnvironment):
'''
ENVIRONMENT DESCRIPTION:
One red corridor, one green corridor with randomly chosen colored walls
'''
def __init__(self, robot, parallel=False, gui=False, x_min=-5.0, x_max=5.0, y_min=0.0, y_max=10.0):
self.parallel = parallel
self.gui = gui
self.robot = robot
self.height_obs = 100 * robot.height
self.x_lim = [x_min, x_max]
self.y_lim = [y_min, y_max]
self.p = None
self.husky = None
self.sphere = None
self.setup_pybullet(self.robot.get_robot())
def generate_obstacles(self):
p = self.p
x_lim = self.x_lim
y_lim = self.y_lim
numObs = 0
heightObs = self.height_obs
rgb_range = np.linspace(0, 1, 11)
numEnvParts = 8
linkMasses = [None] * (numObs + numEnvParts)
colIdxs = [None] * (numObs + numEnvParts)
visIdxs = [None] * (numObs + numEnvParts)
posObs = [None] * (numObs + numEnvParts)
orientObs = [None] * (numObs + numEnvParts)
parentIdxs = [None] * (numObs + numEnvParts)
linkInertialFramePositions = [None] * (numObs + numEnvParts)
linkInertialFrameOrientations = [None] * (numObs + numEnvParts)
linkJointTypes = [None] * (numObs + numEnvParts)
linkJointAxis = [None] * (numObs + numEnvParts)
for obs in range(numObs + numEnvParts):
linkMasses[obs] = 0.0
visIdxs[obs] = -1
parentIdxs[obs] = 0
linkInertialFramePositions[obs] = [0, 0, 0]
linkInertialFrameOrientations[obs] = [0, 0, 0, 1]
linkJointTypes[obs] = p.JOINT_FIXED
linkJointAxis[obs] = np.array([0, 0, 1])
orientObs[obs] = [0, 0, 0, 1]
# Left wall
posObs[numObs] = [x_lim[0], (y_lim[0] + y_lim[1]) / 2.0, 0.0]
colIdxs[numObs] = p.createCollisionShape(p.GEOM_BOX,
halfExtents=[0.1, (y_lim[1] - y_lim[0]) / 2.0, heightObs / 2])
visIdxs[numObs] = p.createVisualShape(p.GEOM_BOX,
halfExtents=[0.1, (y_lim[1] - y_lim[0]) / 2.0, heightObs / 2],
rgbaColor=[np.random.choice(rgb_range, 1), np.random.choice(rgb_range, 1),
np.random.choice(rgb_range, 1), 1])
# Right wall
posObs[numObs + 1] = [x_lim[1], (y_lim[0] + y_lim[1]) / 2.0, 0.0]
colIdxs[numObs + 1] = p.createCollisionShape(p.GEOM_BOX,
halfExtents=[0.1, (y_lim[1] - y_lim[0]) / 2.0, heightObs / 2])
visIdxs[numObs + 1] = p.createVisualShape(p.GEOM_BOX,
halfExtents=[0.1, (y_lim[1] - y_lim[0]) / 2.0, heightObs / 2],
rgbaColor=[np.random.choice(rgb_range, 1),
np.random.choice(rgb_range, 1),
np.random.choice(rgb_range, 1), 1])
# Bottom wall
orientObs[numObs + 2] = [0, 0, np.sqrt(2) / 2, np.sqrt(2) / 2]
posObs[numObs + 2] = [(x_lim[0] + x_lim[1]) / 2.0, y_lim[0], 0.0]
colIdxs[numObs + 2] = p.createCollisionShape(p.GEOM_BOX,
halfExtents=[0.1, (x_lim[1] - x_lim[0]) / 2.0, heightObs / 2])
visIdxs[numObs + 2] = p.createVisualShape(p.GEOM_BOX,
halfExtents=[0.1, (x_lim[1] - x_lim[0]) / 2.0, heightObs / 2],
rgbaColor=[np.random.choice(rgb_range, 1),
np.random.choice(rgb_range, 1),
np.random.choice(rgb_range, 1), 1])
# Top wall
orientObs[numObs + 3] = [0, 0, np.sqrt(2) / 2, np.sqrt(2) / 2]
posObs[numObs + 3] = [(x_lim[0] + x_lim[1]) / 2.0, y_lim[1], 0.0]
colIdxs[numObs + 3] = p.createCollisionShape(p.GEOM_BOX,
halfExtents=[0.1, (x_lim[1] - x_lim[0]) / 2.0, heightObs / 2])
visIdxs[numObs + 3] = p.createVisualShape(p.GEOM_BOX,
halfExtents=[0.1, (x_lim[1] - x_lim[0]) / 2.0, heightObs / 2],
rgbaColor=[np.random.choice(rgb_range, 1),
np.random.choice(rgb_range, 1),
np.random.choice(rgb_range, 1), 1])
# Corridor 1
posObs[numObs + 4] = [-1.1, 1.5, 0.0]
colIdxs[numObs + 4] = p.createCollisionShape(p.GEOM_BOX,
halfExtents=[0.1, 1.5, heightObs / 2])
visIdxs[numObs + 4] = p.createVisualShape(p.GEOM_BOX,
halfExtents=[0.1, 1.5, heightObs / 2],
rgbaColor=[0, 1, 0, 1])
posObs[numObs + 5] = [-0.1, 1.5, 0.0]
colIdxs[numObs + 5] = p.createCollisionShape(p.GEOM_BOX,
halfExtents=[0.1, 1.5, heightObs / 2])
visIdxs[numObs + 5] = p.createVisualShape(p.GEOM_BOX,
halfExtents=[0.1, 1.5, heightObs / 2],
rgbaColor=[0, 1, 0, 1])
# Corridor 2
posObs[numObs + 6] = [0.1, 1.5, 0.0]
colIdxs[numObs + 6] = p.createCollisionShape(p.GEOM_BOX,
halfExtents=[0.1, 1.5, heightObs / 2])
visIdxs[numObs + 6] = p.createVisualShape(p.GEOM_BOX,
halfExtents=[0.1, 1.5, heightObs / 2],
rgbaColor=[1, 0, 0, 1])
posObs[numObs + 7] = [1.1, 1.5, 0.0]
colIdxs[numObs + 7] = p.createCollisionShape(p.GEOM_BOX,
halfExtents=[0.1, 1.5, heightObs / 2])
visIdxs[numObs + 7] = p.createVisualShape(p.GEOM_BOX,
halfExtents=[0.1, 1.5, heightObs / 2],
rgbaColor=[1, 0, 0, 1])
obsUid = p.createMultiBody(baseCollisionShapeIndex=-1, baseVisualShapeIndex=-1, basePosition=[0, 0, 0],
baseOrientation=[0, 0, 0, 1], baseInertialFramePosition=[0, 0, 0],
baseInertialFrameOrientation=[0, 0, 0, 1], linkMasses=linkMasses,
linkCollisionShapeIndices=colIdxs, linkVisualShapeIndices=visIdxs,
linkPositions=posObs, linkOrientations=orientObs, linkParentIndices=parentIdxs,
linkInertialFramePositions=linkInertialFramePositions,
linkInertialFrameOrientations=linkInertialFrameOrientations,
linkJointTypes=linkJointTypes, linkJointAxis=linkJointAxis)
p.resetDebugVisualizerCamera(cameraDistance=15., cameraYaw=0., cameraPitch=-85., cameraTargetPosition=[0, 5, 0])
return obsUid
| 50.344828
| 121
| 0.470183
| 2,367
| 21,900
| 4.238276
| 0.091677
| 0.014952
| 0.035088
| 0.083333
| 0.810805
| 0.800738
| 0.781001
| 0.776316
| 0.762061
| 0.761762
| 0
| 0.062812
| 0.396621
| 21,900
| 434
| 122
| 50.460829
| 0.696383
| 0.055753
| 0
| 0.747604
| 0
| 0
| 0.009333
| 0.00525
| 0
| 0
| 0
| 0
| 0
| 1
| 0.028754
| false
| 0.003195
| 0.015974
| 0
| 0.070288
| 0.003195
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
3317f3d8306af2b3d6515d91ad539863ec86664f
| 2,861
|
py
|
Python
|
tests/resolver_tests.py
|
Ginkooo/ginkooowebsite
|
cd303c8aed7d17ce28e4593f0459ce37815dd36d
|
[
"MIT"
] | null | null | null |
tests/resolver_tests.py
|
Ginkooo/ginkooowebsite
|
cd303c8aed7d17ce28e4593f0459ce37815dd36d
|
[
"MIT"
] | null | null | null |
tests/resolver_tests.py
|
Ginkooo/ginkooowebsite
|
cd303c8aed7d17ce28e4593f0459ce37815dd36d
|
[
"MIT"
] | null | null | null |
from unittest import TestCase
import src.resolver as resolver
from config import settings
class ResolverTests(TestCase):
def check_can_get_parts_of_url(self):
urls = [
'/',
'/foo',
'/foo/',
'/foo/bar',
'/foo/bar/',
'/foo/bar/car/lar/',
'/foo/bar/car/lar/dar/',
'/foo/bar/car/?foo=bar&mar=far',
'/foo/bar?foo=bar&mar=far/',
]
fn = resolver.get_parts_of_url
results = []
for url in urls:
results.append(fn(url))
controller, action, params, qs_params = results[0]
self.assertEqual(settings.DEFAULT_CONTROLLER, controller)
self.assertEqual(settings.DEFAULT_ACTION, action)
self.assertFalse(params)
self.assertFalse(qs_params)
controller, action, params, qs_params = results[1]
self.assertEqual('foo', controller)
self.assertEqual(settings.DEFAULT_ACTION, action)
self.assertFalse(params)
self.assertFalse(qs_params)
controller, action, params, qs_params = results[2]
self.assertEqual('foo', controller)
self.assertEqual(settings.DEFAULT_ACTION, action)
self.assertFalse(params)
self.assertFalse(qs_params)
controller, action, params, qs_params = results[3]
self.assertEqual('foo', controller)
self.assertEqual('bar', action)
self.assertFalse(params)
self.assertFalse(qs_params)
controller, action, params, qs_params = results[4]
self.assertEqual('foo', controller)
self.assertEqual('bar', action)
self.assertFalse(params)
self.assertFalse(qs_params)
controller, action, params, qs_params = results[5]
self.assertEqual('foo', controller)
self.assertEqual('bar', action)
self.assertTrue('car' == params[0])
self.assertTrue('lar' == params[1])
self.assertFalse(qs_params)
controller, action, params, qs_params = results[6]
self.assertEqual('foo', controller)
self.assertEqual('bar', action)
self.assertTrue('car' == params[0])
self.assertTrue('lar' == params[1])
self.assertTrue('dar' == params[2])
self.assertFalse(qs_params)
controller, action, params, qs_params = results[7]
self.assertEqual('foo', controller)
self.assertEqual('bar', action)
self.assertEqual('car', params[0])
self.assertEqual(1, len(params))
self.assertEqual('far', qs_params['mar'])
controller, action, params, qs_params = results[7]
self.assertEqual('foo', controller)
self.assertEqual('bar', action)
self.assertEqual('car', params[0])
self.assertEqual(1, len(params))
self.assertEqual('far', qs_params['mar'])
| 33.267442
| 65
| 0.607829
| 311
| 2,861
| 5.495177
| 0.157556
| 0.21065
| 0.115857
| 0.12639
| 0.797542
| 0.797542
| 0.775892
| 0.775892
| 0.775892
| 0.775892
| 0
| 0.008535
| 0.262845
| 2,861
| 85
| 66
| 33.658824
| 0.801802
| 0
| 0
| 0.585714
| 0
| 0
| 0.067808
| 0.026215
| 0
| 0
| 0
| 0
| 0.585714
| 1
| 0.014286
| false
| 0
| 0.042857
| 0
| 0.071429
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
3369318b83db1b6103cc4fe17e4231f370b0a190
| 220
|
py
|
Python
|
tests/examples/helper.py
|
897615138/tfsnippet-jill
|
2fc898a4def866c8d3c685168df1fa22083bb143
|
[
"MIT"
] | 63
|
2018-06-06T11:56:40.000Z
|
2022-03-22T08:00:59.000Z
|
tests/examples/helper.py
|
897615138/tfsnippet-jill
|
2fc898a4def866c8d3c685168df1fa22083bb143
|
[
"MIT"
] | 39
|
2018-07-04T12:40:53.000Z
|
2022-02-09T23:48:44.000Z
|
tests/examples/helper.py
|
897615138/tfsnippet-jill
|
2fc898a4def866c8d3c685168df1fa22083bb143
|
[
"MIT"
] | 34
|
2018-06-25T09:59:22.000Z
|
2022-02-23T12:46:33.000Z
|
import os
import unittest
def skipUnlessRunExamplesTests():
return unittest.skipUnless(
os.environ.get('RUN_EXAMPLES_TEST_CASE') == '1',
'RUN_EXAMPLES_TEST_CASE is not set to 1, thus skipped'
)
| 22
| 62
| 0.7
| 28
| 220
| 5.285714
| 0.714286
| 0.148649
| 0.202703
| 0.256757
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011494
| 0.209091
| 220
| 9
| 63
| 24.444444
| 0.83908
| 0
| 0
| 0
| 0
| 0
| 0.340909
| 0.2
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| true
| 0
| 0.285714
| 0.142857
| 0.571429
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
cc031e0dcfdc07cd4c5c24c9f64562c5c65fde5c
| 8,766
|
py
|
Python
|
src/graphql/scalar_descriptors/strict/test/scalar_descriptors.py
|
btrekkie/graphql
|
6c118550267eeb57a9653f4f46d7bbd6c5902110
|
[
"MIT"
] | null | null | null |
src/graphql/scalar_descriptors/strict/test/scalar_descriptors.py
|
btrekkie/graphql
|
6c118550267eeb57a9653f4f46d7bbd6c5902110
|
[
"MIT"
] | null | null | null |
src/graphql/scalar_descriptors/strict/test/scalar_descriptors.py
|
btrekkie/graphql
|
6c118550267eeb57a9653f4f46d7bbd6c5902110
|
[
"MIT"
] | null | null | null |
import unittest
from graphql.scalar_descriptors.strict import GraphQlStrictBooleanDescriptor
from graphql.scalar_descriptors.strict import GraphQlStrictFloatDescriptor
from graphql.scalar_descriptors.strict import GraphQlStrictIdDescriptor
from graphql.scalar_descriptors.strict import GraphQlStrictIntDescriptor
from graphql.scalar_descriptors.strict import GraphQlStrictStringDescriptor
class GraphQlStrictScalarDescriptorsTest(unittest.TestCase):
"""Tests scalar descriptors in graphql.scalar_descriptors.strict."""
def test_string(self):
"""Test GraphQlStrictStringDescriptor."""
descriptor = GraphQlStrictStringDescriptor('String')
# graphql_to_python
self.assertEqual('foo', descriptor.graphql_to_python('foo'))
self.assertEqual('', descriptor.graphql_to_python(''))
with self.assertRaises(TypeError):
descriptor.graphql_to_python(None)
with self.assertRaises(TypeError):
descriptor.graphql_to_python(12)
with self.assertRaises(TypeError):
descriptor.graphql_to_python(True)
with self.assertRaises(TypeError):
descriptor.graphql_to_python(6.2)
with self.assertRaises(TypeError):
descriptor.graphql_to_python(object())
# python_to_graphql
self.assertEqual('foo', descriptor.python_to_graphql('foo'))
self.assertEqual('', descriptor.python_to_graphql(''))
with self.assertRaises(TypeError):
descriptor.python_to_graphql(None)
with self.assertRaises(TypeError):
descriptor.python_to_graphql(12)
with self.assertRaises(TypeError):
descriptor.python_to_graphql(True)
with self.assertRaises(TypeError):
descriptor.python_to_graphql(6.2)
with self.assertRaises(TypeError):
descriptor.python_to_graphql(object())
def test_id(self):
"""Test GraphQlStrictIdDescriptor."""
descriptor = GraphQlStrictIdDescriptor('String')
# graphql_to_python
self.assertEqual('foo', descriptor.graphql_to_python('foo'))
self.assertEqual('', descriptor.graphql_to_python(''))
self.assertEqual('12', descriptor.graphql_to_python(12))
with self.assertRaises(TypeError):
descriptor.graphql_to_python(None)
with self.assertRaises(TypeError):
descriptor.graphql_to_python(True)
with self.assertRaises(TypeError):
descriptor.graphql_to_python(6.2)
with self.assertRaises(TypeError):
descriptor.graphql_to_python(object())
# python_to_graphql
self.assertEqual('foo', descriptor.python_to_graphql('foo'))
self.assertEqual('', descriptor.python_to_graphql(''))
with self.assertRaises(TypeError):
descriptor.python_to_graphql(None)
with self.assertRaises(TypeError):
descriptor.python_to_graphql(12)
with self.assertRaises(TypeError):
descriptor.python_to_graphql(True)
with self.assertRaises(TypeError):
descriptor.python_to_graphql(6.2)
with self.assertRaises(TypeError):
descriptor.python_to_graphql(object())
def test_int(self):
"""Test GraphQlStrictIntDescriptor."""
descriptor = GraphQlStrictIntDescriptor('Integer')
# graphql_to_python
self.assertEqual(42, descriptor.graphql_to_python(42))
self.assertEqual(-12, descriptor.graphql_to_python(-12))
self.assertEqual(0, descriptor.graphql_to_python(0))
with self.assertRaises(TypeError):
descriptor.graphql_to_python(None)
with self.assertRaises(TypeError):
descriptor.graphql_to_python('14')
with self.assertRaises(TypeError):
descriptor.graphql_to_python(2.6)
with self.assertRaises(TypeError):
descriptor.graphql_to_python('2.6')
with self.assertRaises(ValueError):
descriptor.graphql_to_python(123456789012)
with self.assertRaises(TypeError):
descriptor.graphql_to_python(object())
# python_to_graphql
self.assertEqual(42, descriptor.python_to_graphql(42))
self.assertEqual(-12, descriptor.python_to_graphql(-12))
self.assertEqual(0, descriptor.python_to_graphql(0))
with self.assertRaises(TypeError):
descriptor.python_to_graphql(None)
with self.assertRaises(TypeError):
descriptor.python_to_graphql('14')
with self.assertRaises(TypeError):
descriptor.python_to_graphql(2.6)
with self.assertRaises(TypeError):
descriptor.python_to_graphql('2.6')
with self.assertRaises(ValueError):
descriptor.python_to_graphql(123456789012)
with self.assertRaises(TypeError):
descriptor.python_to_graphql(object())
def test_float(self):
"""Test GraphQlStrictFloatDescriptor."""
descriptor = GraphQlStrictFloatDescriptor('Float')
# graphql_to_python
self.assertEqual(42, descriptor.graphql_to_python(42))
self.assertEqual(-12, descriptor.graphql_to_python(-12))
self.assertEqual(0, descriptor.graphql_to_python(0))
self.assertTrue(
descriptor.graphql_to_python(123456789123456789123456789L) in
(
123456789123456789123456789L,
float(123456789123456789123456789L),
))
self.assertEqual(4.3, descriptor.graphql_to_python(4.3))
self.assertEqual(-15.3, descriptor.graphql_to_python(-15.3))
self.assertEqual(1.4e28, descriptor.graphql_to_python(1.4e28))
self.assertEqual(2.6e-8, descriptor.graphql_to_python(2.6e-8))
with self.assertRaises(TypeError):
descriptor.graphql_to_python(None)
with self.assertRaises(TypeError):
descriptor.graphql_to_python('14')
with self.assertRaises(TypeError):
descriptor.graphql_to_python('6.5')
with self.assertRaises(TypeError):
descriptor.graphql_to_python('6.5e-2')
with self.assertRaises(TypeError):
descriptor.graphql_to_python(object())
# python_to_graphql
self.assertEqual(42, descriptor.python_to_graphql(42))
self.assertEqual(-12, descriptor.python_to_graphql(-12))
self.assertEqual(0, descriptor.python_to_graphql(0))
self.assertTrue(
descriptor.python_to_graphql(123456789123456789123456789L) in
(
123456789123456789123456789L,
float(123456789123456789123456789L),
))
self.assertEqual(4.3, descriptor.python_to_graphql(4.3))
self.assertEqual(-15.3, descriptor.python_to_graphql(-15.3))
self.assertEqual(1.4e28, descriptor.python_to_graphql(1.4e28))
self.assertEqual(2.6e-8, descriptor.python_to_graphql(2.6e-8))
with self.assertRaises(TypeError):
descriptor.python_to_graphql(None)
with self.assertRaises(TypeError):
descriptor.python_to_graphql('14')
with self.assertRaises(TypeError):
descriptor.python_to_graphql('6.5')
with self.assertRaises(TypeError):
descriptor.python_to_graphql('6.5e-2')
with self.assertRaises(TypeError):
descriptor.python_to_graphql(object())
def test_boolean(self):
"""Test GraphQlStrictBooleanDescriptor."""
descriptor = GraphQlStrictBooleanDescriptor('Boolean')
# graphql_to_python
self.assertEqual(True, descriptor.graphql_to_python(True))
self.assertEqual(False, descriptor.graphql_to_python(False))
with self.assertRaises(TypeError):
descriptor.graphql_to_python(None)
with self.assertRaises(TypeError):
descriptor.graphql_to_python('12')
with self.assertRaises(TypeError):
descriptor.graphql_to_python('true')
with self.assertRaises(TypeError):
descriptor.graphql_to_python(42)
with self.assertRaises(TypeError):
descriptor.graphql_to_python(object())
# python_to_graphql
self.assertEqual(True, descriptor.python_to_graphql(True))
self.assertEqual(False, descriptor.python_to_graphql(False))
with self.assertRaises(TypeError):
descriptor.python_to_graphql(None)
with self.assertRaises(TypeError):
descriptor.python_to_graphql('12')
with self.assertRaises(TypeError):
descriptor.python_to_graphql('true')
with self.assertRaises(TypeError):
descriptor.python_to_graphql(42)
with self.assertRaises(TypeError):
descriptor.python_to_graphql(object())
| 44.050251
| 76
| 0.680242
| 895
| 8,766
| 6.435754
| 0.063687
| 0.070833
| 0.177083
| 0.246701
| 0.852083
| 0.810764
| 0.769444
| 0.740278
| 0.723438
| 0.693576
| 0
| 0.047864
| 0.223021
| 8,766
| 198
| 77
| 44.272727
| 0.797827
| 0.02042
| 0
| 0.689024
| 0
| 0
| 0.012154
| 0
| 0
| 0
| 0
| 0
| 0.52439
| 0
| null | null | 0
| 0.036585
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
cc2b09adb51ec2aee1acd64f6e0cd12b1fb97c57
| 217
|
py
|
Python
|
frontend/views.py
|
arnaudlimbourg/rencontres-django-2016
|
e7074d772791be7068f155d832c2c9265a8c9522
|
[
"Apache-2.0"
] | null | null | null |
frontend/views.py
|
arnaudlimbourg/rencontres-django-2016
|
e7074d772791be7068f155d832c2c9265a8c9522
|
[
"Apache-2.0"
] | null | null | null |
frontend/views.py
|
arnaudlimbourg/rencontres-django-2016
|
e7074d772791be7068f155d832c2c9265a8c9522
|
[
"Apache-2.0"
] | null | null | null |
from django.shortcuts import render
from django.http import HttpResponse
from django.shortcuts import render_to_response
def index(request):
return render_to_response('index.html', context={"request": request})
| 27.125
| 73
| 0.806452
| 29
| 217
| 5.896552
| 0.517241
| 0.175439
| 0.222222
| 0.292398
| 0.362573
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.110599
| 217
| 7
| 74
| 31
| 0.88601
| 0
| 0
| 0
| 0
| 0
| 0.078341
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.6
| 0.2
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
cc2c132a60b55de5852b91806855d60ae07baa4d
| 131
|
py
|
Python
|
reverser/tests/reverse_test.py
|
devacto/python-puzzles
|
e92d43b950c3cc44a03b85199fc0f568a3c7b70b
|
[
"MIT"
] | null | null | null |
reverser/tests/reverse_test.py
|
devacto/python-puzzles
|
e92d43b950c3cc44a03b85199fc0f568a3c7b70b
|
[
"MIT"
] | null | null | null |
reverser/tests/reverse_test.py
|
devacto/python-puzzles
|
e92d43b950c3cc44a03b85199fc0f568a3c7b70b
|
[
"MIT"
] | null | null | null |
from nose.tools import assert_equal
from reverser import reverse
def test_return():
assert_equal(reverse.reverse("go"), "og")
| 21.833333
| 45
| 0.763359
| 19
| 131
| 5.105263
| 0.684211
| 0.226804
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.129771
| 131
| 5
| 46
| 26.2
| 0.850877
| 0
| 0
| 0
| 0
| 0
| 0.030534
| 0
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0.25
| true
| 0
| 0.5
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
cc2fb183652bb56a016f46cedd017dfb565ced91
| 16,602
|
py
|
Python
|
tests/test_mapping.py
|
oarepo/invenio-oarepo-multilingual
|
03d382c152aa44f2912c13b225adb418dbf48109
|
[
"MIT"
] | null | null | null |
tests/test_mapping.py
|
oarepo/invenio-oarepo-multilingual
|
03d382c152aa44f2912c13b225adb418dbf48109
|
[
"MIT"
] | 3
|
2020-08-30T18:00:00.000Z
|
2021-08-05T16:34:28.000Z
|
tests/test_mapping.py
|
oarepo/oarepo-multilingual
|
03d382c152aa44f2912c13b225adb418dbf48109
|
[
"MIT"
] | 4
|
2020-08-20T11:18:40.000Z
|
2021-03-24T10:53:40.000Z
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2019 CESNET.
#
# Invenio OpenID Connect is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
from flask import Flask
from oarepo_multilingual.mapping.mapping_handler import handler
def test_mapping():
"""Simple test of mapping."""
app = Flask('testapp')
app.config.update(ELASTICSEARCH_DEFAULT_LANGUAGE_TEMPLATE={
"type": "text",
"fields": {
"raw": {
"type": "keyword"
}
}
})
app.config.update(MULTILINGUAL_SUPPORTED_LANGUAGES=["cs", "en", "_"])
assert handler(app=app) == {
'type': 'object', 'properties':
{
'cs': {
'type': 'text',
'fields': {
"raw": {
"type": "keyword"
}
}
},
'en': {
'type': 'text',
'fields': {
"raw": {
"type": "keyword"
}
}
},
'_': {
'type': 'text',
'fields': {
"raw": {
"type": "keyword"
}
}
}
}
}
app.config.update(MULTILINGUAL_SUPPORTED_LANGUAGES=["_"])
assert handler(app=app) == {
'type': 'object', 'properties':
{
'_': {
'type': 'text',
'fields': {
"raw": {
"type": "keyword"
}
}
}
}
}
app.config.update(MULTILINGUAL_SUPPORTED_LANGUAGES=["cs", "_"])
assert handler(app=app) == {
'type': 'object', 'properties':
{
'cs': {
'type': 'text',
'fields': {
"raw": {
"type": "keyword"
}
}
},
'_': {
'type': 'text',
'fields': {
"raw": {
"type": "keyword"
}
}
}
}
}
app.config.update(MULTILINGUAL_SUPPORTED_LANGUAGES=["cs", "en", "_"])
app.config.update(ELASTICSEARCH_LANGUAGE_TEMPLATES={
"cs": {
"type": "text",
"fields": {
"raw": {
"type": "text"
}
}
},
"en": {
"type": "text",
"fields": {
"raw": {
"type": "text"
}
}
},
'_': {
'type': 'text',
'fields': {
"raw": {
"type": "keyword"
}
}
}
}
)
assert handler(app=app) == {
'type': 'object', 'properties':
{
'cs': {
"type": "text",
"fields": {
"raw": {
"type": "text"
}
}
},
'en': {
"type": "text",
"fields": {
"raw": {
"type": "text"
}
}
},
'_': {
'type': 'text',
'fields': {
"raw": {
"type": "keyword"
}
}
}
}
}
app.config.update(MULTILINGUAL_SUPPORTED_LANGUAGES=["cs", "en", "_"])
app.config.update(ELASTICSEARCH_LANGUAGE_TEMPLATES={
"cs": {
"type": "text",
"fields": {
"raw": {
"type": "text"
}
}
}
}
)
assert handler(app=app) == {
'type': 'object', 'properties':
{
'cs': {
"type": "text",
"fields": {
"raw": {
"type": "text"
}
}
},
'en': {
"type": "text",
"fields": {
"raw": {
"type": "keyword"
}
}
},
'_': {
'type': 'text',
'fields': {
"raw": {
"type": "keyword"
}
}
}
}
}
app.config.update(MULTILINGUAL_SUPPORTED_LANGUAGES=["en", "_"])
app.config.update(ELASTICSEARCH_LANGUAGE_TEMPLATES={
"_": {
"type": "text",
"fields": {
"raw": {
"type": "text"
}
}
}
}
)
assert handler(app=app) == {
'type': 'object', 'properties':
{
'en': {
"type": "text",
"fields": {
"raw": {
"type": "keyword"
}
}
},
'_': {
'type': 'text',
'fields': {
"raw": {
"type": "text"
}
}
}
}
}
def test_ids():
app = Flask('testapp')
app.config.update(MULTILINGUAL_SUPPORTED_LANGUAGES=["cs", "_"])
app.config.update(ELASTICSEARCH_LANGUAGE_TEMPLATES={
"cs#context":
{
"type": "text",
"fields": {
"raw": {
"type": "text"
},
"jej":
{"type": "text"}
}
},
"cs":
{
"type": "text",
"fields": {
"raw": {
"type": "text"
}
}
}
}
)
assert handler(app=app, id='context') == {
'type': 'object', 'properties':
{
'cs': {
"type": "text",
"fields": {
"raw": {
"type": "text"
},
"jej": {"type": "text"}
}
},
'_': {}
}
}
app.config.update(MULTILINGUAL_SUPPORTED_LANGUAGES=["cs", "en", "_"])
app.config.update(ELASTICSEARCH_LANGUAGE_TEMPLATES={
"cs#context":
{
"type": "text",
"fields": {
"raw": {
"type": "text"
},
"jej":
{"type": "text"}
}
},
"cs":
{
"type": "text",
"fields": {
"raw": {
"type": "keyword"
}
}
},
"en":
{
"type": "text",
"fields": {
"raw": {
"type": "keyword"
}
}
},
"_#context":
{
"type": "text",
"fields": {
"raw": {
"type": "text"
}
}
}
}
)
assert handler(app=app, id='context') == {
'type': 'object', 'properties':
{
'cs': {
'type': 'text',
'fields': {
"raw": {
"type": "text"
},
"jej":
{"type": "text"}
}
},
'en': {
'type': 'text',
'fields': {
"raw": {
"type": "keyword"
}
}
},
'_': {
'type': 'text',
'fields': {
"raw": {
"type": "text"
}
}
}
}
}
def test_all_languages():
app = Flask('testapp')
app.config.update(
MULTILINGUAL_SUPPORTED_LANGUAGES=['cs', 'en', 'sk', 'de', 'fr', 'ru', 'es', 'nl', 'it',
'no', 'pl', 'da', 'el',
'hu', 'lt', 'pt', 'bg', 'ro', 'sv'])
app.config.update(ELASTICSEARCH_LANGUAGE_TEMPLATES={
"*#context": {
"type": "text",
"copy_to": "field.*",
"fields": {
"raw": {
"type": "keyword"
}
}
}
}
)
assert handler(app=app, id='context') == {
'properties': {
'bg': {
'copy_to': 'field.bg',
'fields': {'raw': {'type': 'keyword'}},
'type': 'text'
},
'cs': {
'copy_to': 'field.cs',
'fields': {'raw': {'type': 'keyword'}},
'type': 'text'
},
'da': {
'copy_to': 'field.da',
'fields': {'raw': {'type': 'keyword'}},
'type': 'text'
},
'de': {
'copy_to': 'field.de',
'fields': {'raw': {'type': 'keyword'}},
'type': 'text'
},
'el': {
'copy_to': 'field.el',
'fields': {'raw': {'type': 'keyword'}},
'type': 'text'
},
'en': {
'copy_to': 'field.en',
'fields': {'raw': {'type': 'keyword'}},
'type': 'text'
},
'es': {
'copy_to': 'field.es',
'fields': {'raw': {'type': 'keyword'}},
'type': 'text'
},
'fr': {
'copy_to': 'field.fr',
'fields': {'raw': {'type': 'keyword'}},
'type': 'text'
},
'hu': {
'copy_to': 'field.hu',
'fields': {'raw': {'type': 'keyword'}},
'type': 'text'
},
'it': {
'copy_to': 'field.it',
'fields': {'raw': {'type': 'keyword'}},
'type': 'text'
},
'lt': {
'copy_to': 'field.lt',
'fields': {'raw': {'type': 'keyword'}},
'type': 'text'
},
'nl': {
'copy_to': 'field.nl',
'fields': {'raw': {'type': 'keyword'}},
'type': 'text'
},
'no': {
'copy_to': 'field.no',
'fields': {'raw': {'type': 'keyword'}},
'type': 'text'
},
'pl': {
'copy_to': 'field.pl',
'fields': {'raw': {'type': 'keyword'}},
'type': 'text'
},
'pt': {
'copy_to': 'field.pt',
'fields': {'raw': {'type': 'keyword'}},
'type': 'text'
},
'ro': {
'copy_to': 'field.ro',
'fields': {'raw': {'type': 'keyword'}},
'type': 'text'
},
'ru': {
'copy_to': 'field.ru',
'fields': {'raw': {'type': 'keyword'}},
'type': 'text'
},
'sk': {
'copy_to': 'field.sk',
'fields': {'raw': {'type': 'keyword'}},
'type': 'text'
},
'sv': {
'copy_to': 'field.sv',
'fields': {'raw': {'type': 'keyword'}},
'type': 'text'
}
},
'type': 'object'
}
def test_all_languages_2():
app = Flask('testapp')
app.config.update(
MULTILINGUAL_SUPPORTED_LANGUAGES=['cs', 'en', 'sk', 'de', 'fr', 'ru', 'es', 'nl', 'it',
'no', 'pl', 'da', 'el',
'hu', 'lt', 'pt', 'bg', 'ro', 'sv'])
app.config.update(ELASTICSEARCH_LANGUAGE_TEMPLATES={
"*#context": {
"type": "text",
"copy_to": "field",
"fields": {
"raw": {
"type": "keyword"
}
}
}
}
)
assert handler(app=app, id='context') == {
'properties': {
'bg': {
'copy_to': 'field',
'fields': {'raw': {'type': 'keyword'}},
'type': 'text'
},
'cs': {
'copy_to': 'field',
'fields': {'raw': {'type': 'keyword'}},
'type': 'text'
},
'da': {
'copy_to': 'field',
'fields': {'raw': {'type': 'keyword'}},
'type': 'text'
},
'de': {
'copy_to': 'field',
'fields': {'raw': {'type': 'keyword'}},
'type': 'text'
},
'el': {
'copy_to': 'field',
'fields': {'raw': {'type': 'keyword'}},
'type': 'text'
},
'en': {
'copy_to': 'field',
'fields': {'raw': {'type': 'keyword'}},
'type': 'text'
},
'es': {
'copy_to': 'field',
'fields': {'raw': {'type': 'keyword'}},
'type': 'text'
},
'fr': {
'copy_to': 'field',
'fields': {'raw': {'type': 'keyword'}},
'type': 'text'
},
'hu': {
'copy_to': 'field',
'fields': {'raw': {'type': 'keyword'}},
'type': 'text'
},
'it': {
'copy_to': 'field',
'fields': {'raw': {'type': 'keyword'}},
'type': 'text'
},
'lt': {
'copy_to': 'field',
'fields': {'raw': {'type': 'keyword'}},
'type': 'text'
},
'nl': {
'copy_to': 'field',
'fields': {'raw': {'type': 'keyword'}},
'type': 'text'
},
'no': {
'copy_to': 'field',
'fields': {'raw': {'type': 'keyword'}},
'type': 'text'
},
'pl': {
'copy_to': 'field',
'fields': {'raw': {'type': 'keyword'}},
'type': 'text'
},
'pt': {
'copy_to': 'field',
'fields': {'raw': {'type': 'keyword'}},
'type': 'text'
},
'ro': {
'copy_to': 'field',
'fields': {'raw': {'type': 'keyword'}},
'type': 'text'
},
'ru': {
'copy_to': 'field',
'fields': {'raw': {'type': 'keyword'}},
'type': 'text'
},
'sk': {
'copy_to': 'field',
'fields': {'raw': {'type': 'keyword'}},
'type': 'text'
},
'sv': {
'copy_to': 'field',
'fields': {'raw': {'type': 'keyword'}},
'type': 'text'
}
},
'type': 'object'
}
| 28.138983
| 95
| 0.265149
| 946
| 16,602
| 4.542283
| 0.091966
| 0.165697
| 0.211776
| 0.255993
| 0.913661
| 0.913661
| 0.906679
| 0.885036
| 0.873865
| 0.867349
| 0
| 0.000841
| 0.570413
| 16,602
| 589
| 96
| 28.186757
| 0.601655
| 0.013613
| 0
| 0.610018
| 0
| 0
| 0.184968
| 0
| 0
| 0
| 0
| 0
| 0.017889
| 1
| 0.007156
| false
| 0
| 0.003578
| 0
| 0.010733
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
0bf2b853d3118e51c5623bc226ae8cfe7e939b90
| 116
|
py
|
Python
|
platform/hwconf_data/efm32jg1b/PythonSnippet/__init__.py
|
lenloe1/v2.7
|
9ac9c4a7bb37987af382c80647f42d84db5f2e1d
|
[
"Zlib"
] | null | null | null |
platform/hwconf_data/efm32jg1b/PythonSnippet/__init__.py
|
lenloe1/v2.7
|
9ac9c4a7bb37987af382c80647f42d84db5f2e1d
|
[
"Zlib"
] | 1
|
2020-08-25T02:36:22.000Z
|
2020-08-25T02:36:22.000Z
|
platform/hwconf_data/efm32jg1b/PythonSnippet/__init__.py
|
lenloe1/v2.7
|
9ac9c4a7bb37987af382c80647f42d84db5f2e1d
|
[
"Zlib"
] | 1
|
2020-08-25T01:56:04.000Z
|
2020-08-25T01:56:04.000Z
|
from efm32jg1b.halconfig import halconfig_types as types
from efm32jg1b.halconfig import halconfig_dependency as dep
| 58
| 59
| 0.887931
| 16
| 116
| 6.3125
| 0.5
| 0.257426
| 0.435644
| 0.554455
| 0.732673
| 0
| 0
| 0
| 0
| 0
| 0
| 0.057143
| 0.094828
| 116
| 2
| 59
| 58
| 0.904762
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
042cd755802e520db226530a880c6a76ded1f0b7
| 14,137
|
py
|
Python
|
experiments/WebsiteFingerprinting/Bento-DF/DeepFingerprint-changes/utility.py
|
rajKarra69420/bento
|
1324189e26acfe3a372882519bd78e037d93997c
|
[
"BSD-3-Clause"
] | 3
|
2021-12-01T02:11:15.000Z
|
2022-02-03T22:45:00.000Z
|
experiments/WebsiteFingerprinting/Bento-DF/DeepFingerprint-changes/utility.py
|
rajKarra69420/bento
|
1324189e26acfe3a372882519bd78e037d93997c
|
[
"BSD-3-Clause"
] | 4
|
2021-11-27T11:04:36.000Z
|
2022-02-17T02:53:21.000Z
|
experiments/WebsiteFingerprinting/Bento-DF/DeepFingerprint-changes/utility.py
|
rajKarra69420/bento
|
1324189e26acfe3a372882519bd78e037d93997c
|
[
"BSD-3-Clause"
] | 5
|
2021-07-01T20:23:43.000Z
|
2022-03-12T18:10:34.000Z
|
import pickle as pickle
import numpy as np
# Load data for non-defended dataset for CW setting
def LoadDataBentoCW():
print("[BENTO] Loading non-defended dataset for closed-world scenario")
# Point to the directory storing data
dataset_dir = '../dataset/ClosedWorld/BentoBrowser/'
# X represents a sequence of traffic directions
# y represents a sequence of corresponding label (website's label)
# Load training data
with open(dataset_dir + 'X_train_VANILLA.pkl', 'rb') as handle:
X_train = np.array(pickle.load(handle, encoding='latin1'))
with open(dataset_dir + 'y_train_VANILLA.pkl', 'rb') as handle:
y_train = np.array(pickle.load(handle, encoding='latin1'))
# Load validation data
with open(dataset_dir + 'X_valid_VANILLA.pkl', 'rb') as handle:
X_valid = np.array(pickle.load(handle, encoding='latin1'))
with open(dataset_dir + 'y_valid_VANILLA.pkl', 'rb') as handle:
y_valid = np.array(pickle.load(handle, encoding='latin1'))
# Load testing data
with open(dataset_dir + 'X_test_VANILLA.pkl', 'rb') as handle:
X_test = np.array(pickle.load(handle, encoding='latin1'))
with open(dataset_dir + 'y_test_VANILLA.pkl', 'rb') as handle:
y_test = np.array(pickle.load(handle, encoding='latin1'))
print("Data dimensions:")
print("X: Training data's shape : ", X_train.shape)
print("y: Training data's shape : ", y_train.shape)
print("X: Validation data's shape : ", X_valid.shape)
print("y: Validation data's shape : ", y_valid.shape)
print("X: Testing data's shape : ", X_test.shape)
print("y: Testing data's shape : ", y_test.shape)
return X_train, y_train, X_valid, y_valid, X_test, y_test
# Load data for non-defended dataset for CW setting
def LoadDataNoDefCW():
print("Loading non-defended dataset for closed-world scenario")
# Point to the directory storing data
dataset_dir = 'D:/ClosedWorld/NoDef/'
# X represents a sequence of traffic directions
# y represents a sequence of corresponding label (website's label)
# Load training data
with open(dataset_dir + 'X_train_NoDef.pkl', 'rb') as handle:
X_train = np.array(pickle.load(handle, encoding='latin1'))
with open(dataset_dir + 'y_train_NoDef.pkl', 'rb') as handle:
y_train = np.array(pickle.load(handle, encoding='latin1'))
# Load validation data
with open(dataset_dir + 'X_valid_NoDef.pkl', 'rb') as handle:
X_valid = np.array(pickle.load(handle, encoding='latin1'))
with open(dataset_dir + 'y_valid_NoDef.pkl', 'rb') as handle:
y_valid = np.array(pickle.load(handle, encoding='latin1'))
# Load testing data
with open(dataset_dir + 'X_test_NoDef.pkl', 'rb') as handle:
X_test = np.array(pickle.load(handle, encoding='latin1'))
with open(dataset_dir + 'y_test_NoDef.pkl', 'rb') as handle:
y_test = np.array(pickle.load(handle, encoding='latin1'))
print("Data dimensions:")
print("X: Training data's shape : ", X_train.shape)
print("y: Training data's shape : ", y_train.shape)
print("X: Validation data's shape : ", X_valid.shape)
print("y: Validation data's shape : ", y_valid.shape)
print("X: Testing data's shape : ", X_test.shape)
print("y: Testing data's shape : ", y_test.shape)
return X_train, y_train, X_valid, y_valid, X_test, y_test
# Load data for non-defended dataset for CW setting
def LoadDataWTFPADCW():
print("Loading WTF-PAD dataset for closed-world scenario")
# Point to the directory storing data
dataset_dir = 'D:/ClosedWorld/WTFPAD/'
# X represents a sequence of traffic directions
# y represents a sequence of corresponding label (website's label)
# Load training data
with open(dataset_dir + 'X_train_WTFPAD.pkl', 'rb') as handle:
X_train = np.array(pickle.load(handle, encoding='latin1'))
with open(dataset_dir + 'y_train_WTFPAD.pkl', 'rb') as handle:
y_train = np.array(pickle.load(handle, encoding='latin1'))
# Load validation data
with open(dataset_dir + 'X_valid_WTFPAD.pkl', 'rb') as handle:
X_valid = np.array(pickle.load(handle, encoding='latin1'))
with open(dataset_dir + 'y_valid_WTFPAD.pkl', 'rb') as handle:
y_valid = np.array(pickle.load(handle, encoding='latin1'))
# Load testing data
with open(dataset_dir + 'X_test_WTFPAD.pkl', 'rb') as handle:
X_test = np.array(pickle.load(handle, encoding='latin1'))
with open(dataset_dir + 'y_test_WTFPAD.pkl', 'rb') as handle:
y_test = np.array(pickle.load(handle, encoding='latin1'))
print("Data dimensions:")
print("X: Training data's shape : ", X_train.shape)
print("y: Training data's shape : ", y_train.shape)
print("X: Validation data's shape : ", X_valid.shape)
print("y: Validation data's shape : ", y_valid.shape)
print("X: Testing data's shape : ", X_test.shape)
print("y: Testing data's shape : ", y_test.shape)
return X_train, y_train, X_valid, y_valid, X_test, y_test
# Load data for non-defended dataset for CW setting
def LoadDataWalkieTalkieCW():
print("Loading Walkie-Talkie dataset for closed-world scenario")
# Point to the directory storing data
dataset_dir = 'D:/ClosedWorld/WalkieTalkie/'
# X represents a sequence of traffic directions
# y represents a sequence of corresponding label (website's label)
# Load training data
with open(dataset_dir + 'X_train_WalkieTalkie.pkl', 'rb') as handle:
X_train = np.array(pickle.load(handle, encoding='latin1'))
with open(dataset_dir + 'y_train_WalkieTalkie.pkl', 'rb') as handle:
y_train = np.array(pickle.load(handle, encoding='latin1'))
# Load validation data
with open(dataset_dir + 'X_valid_WalkieTalkie.pkl', 'rb') as handle:
X_valid = np.array(pickle.load(handle, encoding='latin1'))
with open(dataset_dir + 'y_valid_WalkieTalkie.pkl', 'rb') as handle:
y_valid = np.array(pickle.load(handle, encoding='latin1'))
# Load testing data
with open(dataset_dir + 'X_test_WalkieTalkie.pkl', 'rb') as handle:
X_test = np.array(pickle.load(handle, encoding='latin1'))
with open(dataset_dir + 'y_test_WalkieTalkie.pkl', 'rb') as handle:
y_test = np.array(pickle.load(handle, encoding='latin1'))
print("Data dimensions:")
print("X: Training data's shape : ", X_train.shape)
print("y: Training data's shape : ", y_train.shape)
print("X: Validation data's shape : ", X_valid.shape)
print("y: Validation data's shape : ", y_valid.shape)
print("X: Testing data's shape : ", X_test.shape)
print("y: Testing data's shape : ", y_test.shape)
return X_train, y_train, X_valid, y_valid, X_test, y_test
# Load data for non-defended dataset for OW training
def LoadDataNoDefOW_Training():
print("Loading non-defended dataset for open-world scenario for training")
# Point to the directory storing data
dataset_dir = '../dataset/OpenWorld/NoDef/'
# X represents a sequence of traffic directions
# y represents a sequence of corresponding label (website's label)
# Load training data
with open(dataset_dir + 'X_train_NoDef.pkl', 'rb') as handle:
X_train = np.array(pickle.load(handle, encoding='latin1'))
with open(dataset_dir + 'y_train_NoDef.pkl', 'rb') as handle:
y_train = np.array(pickle.load(handle, encoding='latin1'))
# Load validation data
with open(dataset_dir + 'X_valid_NoDef.pkl', 'rb') as handle:
X_valid = np.array(pickle.load(handle, encoding='latin1'))
with open(dataset_dir + 'y_valid_NoDef.pkl', 'rb') as handle:
y_valid = np.array(pickle.load(handle, encoding='latin1'))
print("Data dimensions:")
print("X: Training data's shape : ", X_train.shape)
print("y: Training data's shape : ", y_train.shape)
print("X: Validation data's shape : ", X_valid.shape)
print("y: Validation data's shape : ", y_valid.shape)
return X_train, y_train, X_valid, y_valid
# Load data for non-defended dataset for OW evaluation
def LoadDataNoDefOW_Evaluation():
print("Loading non-defended dataset for open-world scenario for evaluation")
# Point to the directory storing data
dataset_dir = '../dataset/OpenWorld/NoDef/'
# X represents a sequence of traffic directions
# y represents a sequence of corresponding label (website's label)
# Load training data
with open(dataset_dir + 'X_test_Mon_NoDef.pkl', 'rb') as handle:
X_test_Mon = pickle.load(handle, encoding='latin1')
with open(dataset_dir + 'y_test_Mon_NoDef.pkl', 'rb') as handle:
y_test_Mon = pickle.load(handle, encoding='latin1')
with open(dataset_dir + 'X_test_Unmon_NoDef.pkl', 'rb') as handle:
X_test_Unmon = pickle.load(handle, encoding='latin1')
with open(dataset_dir + 'y_test_Unmon_NoDef.pkl', 'rb') as handle:
y_test_Unmon = pickle.load(handle, encoding='latin1')
X_test_Mon = np.array(X_test_Mon)
y_test_Mon = np.array(y_test_Mon)
X_test_Unmon = np.array(X_test_Unmon)
y_test_Unmon = np.array(y_test_Unmon)
return X_test_Mon, y_test_Mon, X_test_Unmon, y_test_Unmon
# Load data for WTF-PAD dataset for OW training
def LoadDataWTFPADOW_Training():
print("Loading WTF-PAD dataset for open-world scenario for training")
# Point to the directory storing data
dataset_dir = '../dataset/OpenWorld/WTFPAD/'
# X represents a sequence of traffic directions
# y represents a sequence of corresponding label (website's label)
# Load training data
with open(dataset_dir + 'X_train_WTFPAD.pkl', 'rb') as handle:
X_train = np.array(pickle.load(handle, encoding='latin1'))
with open(dataset_dir + 'y_train_WTFPAD.pkl', 'rb') as handle:
y_train = np.array(pickle.load(handle, encoding='latin1'))
# Load validation data
with open(dataset_dir + 'X_valid_WTFPAD.pkl', 'rb') as handle:
X_valid = np.array(pickle.load(handle, encoding='latin1'))
with open(dataset_dir + 'y_valid_WTFPAD.pkl', 'rb') as handle:
y_valid = np.array(pickle.load(handle, encoding='latin1'))
print("Data dimensions:")
print("X: Training data's shape : ", X_train.shape)
print("y: Training data's shape : ", y_train.shape)
print("X: Validation data's shape : ", X_valid.shape)
print("y: Validation data's shape : ", y_valid.shape)
return X_train, y_train, X_valid, y_valid
# Load data for WTF-PAD dataset for OW evaluation
def LoadDataWTFPADOW_Evaluation():
print("Loading WTF-PAD dataset for open-world scenario for evaluation")
# Point to the directory storing data
dataset_dir = '../dataset/OpenWorld/WTFPAD/'
# X represents a sequence of traffic directions
# y represents a sequence of corresponding label (website's label)
# Load training data
with open(dataset_dir + 'X_test_Mon_WTFPAD.pkl', 'rb') as handle:
X_test_Mon = pickle.load(handle, encoding='latin1')
with open(dataset_dir + 'y_test_Mon_WTFPAD.pkl', 'rb') as handle:
y_test_Mon = pickle.load(handle, encoding='latin1')
with open(dataset_dir + 'X_test_Unmon_WTFPAD.pkl', 'rb') as handle:
X_test_Unmon = pickle.load(handle, encoding='latin1')
with open(dataset_dir + 'y_test_Unmon_WTFPAD.pkl', 'rb') as handle:
y_test_Unmon = pickle.load(handle, encoding='latin1')
X_test_Mon = np.array(X_test_Mon)
y_test_Mon = np.array(y_test_Mon)
X_test_Unmon = np.array(X_test_Unmon)
y_test_Unmon = np.array(y_test_Unmon)
return X_test_Mon, y_test_Mon, X_test_Unmon, y_test_Unmon
# Load data for WalkieTalkie dataset for OW training
def LoadDataWalkieTalkieOW_Training():
print("Loading Walkie-Talkie dataset for open-world scenario for training")
# Point to the directory storing data
dataset_dir = '../dataset/OpenWorld/WalkieTalkie/'
# X represents a sequence of traffic directions
# y represents a sequence of corresponding label (website's label)
# Load training data
with open(dataset_dir + 'X_train_WalkieTalkie.pkl', 'rb') as handle:
X_train = np.array(pickle.load(handle, encoding='latin1'))
with open(dataset_dir + 'y_train_WalkieTalkie.pkl', 'rb') as handle:
y_train = np.array(pickle.load(handle, encoding='latin1'))
# Load validation data
with open(dataset_dir + 'X_valid_WalkieTalkie.pkl', 'rb') as handle:
X_valid = np.array(pickle.load(handle, encoding='latin1'))
with open(dataset_dir + 'y_valid_WalkieTalkie.pkl', 'rb') as handle:
y_valid = np.array(pickle.load(handle, encoding='latin1'))
print("Data dimensions:")
print("X: Training data's shape : ", X_train.shape)
print("y: Training data's shape : ", y_train.shape)
print("X: Validation data's shape : ", X_valid.shape)
print("y: Validation data's shape : ", y_valid.shape)
return X_train, y_train, X_valid, y_valid
# Load data for WTF-PAD dataset for OW evaluation
def LoadDataWalkieTalkieOW_Evaluation():
print("Loading Walkie-Talkie dataset for open-world scenario for evaluation")
# Point to the directory storing data
dataset_dir = '../dataset/OpenWorld/WalkieTalkie/'
# X represents a sequence of traffic directions
# y represents a sequence of corresponding label (website's label)
# Load training data
with open(dataset_dir + 'X_test_Mon_WalkieTalkie.pkl', 'rb') as handle:
X_test_Mon = pickle.load(handle, encoding='latin1')
with open(dataset_dir + 'y_test_Mon_WalkieTalkie.pkl', 'rb') as handle:
y_test_Mon = pickle.load(handle, encoding='latin1')
with open(dataset_dir + 'X_test_Unmon_WalkieTalkie.pkl', 'rb') as handle:
X_test_Unmon = pickle.load(handle, encoding='latin1')
with open(dataset_dir + 'y_test_Unmon_WalkieTalkie.pkl', 'rb') as handle:
y_test_Unmon = pickle.load(handle, encoding='latin1')
X_test_Mon = np.array(X_test_Mon)
y_test_Mon = np.array(y_test_Mon)
X_test_Unmon = np.array(X_test_Unmon)
y_test_Unmon = np.array(y_test_Unmon)
return X_test_Mon, y_test_Mon, X_test_Unmon, y_test_Unmon
| 42.581325
| 81
| 0.693499
| 2,088
| 14,137
| 4.515326
| 0.037356
| 0.061519
| 0.076368
| 0.091642
| 0.967331
| 0.961179
| 0.95174
| 0.937102
| 0.932541
| 0.931799
| 0
| 0.004175
| 0.186744
| 14,137
| 331
| 82
| 42.70997
| 0.815865
| 0.167999
| 0
| 0.740933
| 0
| 0
| 0.287265
| 0.065556
| 0
| 0
| 0
| 0
| 0
| 1
| 0.051813
| false
| 0
| 0.010363
| 0
| 0.11399
| 0.274611
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
04350c4f2add71af542bd93194b95079383f7e08
| 912
|
py
|
Python
|
popcorn_gallery/popcorn/search_indexes.py
|
Koenkk/popcorn_maker
|
0978b9f98dacd4e8eb753404b24eb584f410aa11
|
[
"BSD-3-Clause"
] | 15
|
2015-03-23T02:55:20.000Z
|
2021-01-12T12:42:30.000Z
|
popcorn_gallery/popcorn/search_indexes.py
|
Koenkk/popcorn_maker
|
0978b9f98dacd4e8eb753404b24eb584f410aa11
|
[
"BSD-3-Clause"
] | null | null | null |
popcorn_gallery/popcorn/search_indexes.py
|
Koenkk/popcorn_maker
|
0978b9f98dacd4e8eb753404b24eb584f410aa11
|
[
"BSD-3-Clause"
] | 16
|
2015-02-18T21:43:31.000Z
|
2021-11-09T22:50:03.000Z
|
from haystack import indexes
from .models import Project, Template
class TemplateIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True)
name = indexes.CharField(model_attr='name')
description = indexes.CharField(model_attr='description')
def get_model(self):
return Template
def index_queryset(self):
"""Used when the entire index for model is updated."""
return self.get_model().live.all()
class ProjectIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True)
name = indexes.CharField(model_attr='name')
description = indexes.CharField(model_attr='description')
def get_model(self):
return Project
def index_queryset(self):
"""Used when the entire index for model is updated."""
return self.get_model().live.all()
| 30.4
| 62
| 0.712719
| 111
| 912
| 5.747748
| 0.324324
| 0.15047
| 0.131661
| 0.15674
| 0.833856
| 0.833856
| 0.833856
| 0.833856
| 0.833856
| 0.833856
| 0
| 0
| 0.182018
| 912
| 29
| 63
| 31.448276
| 0.855228
| 0.10636
| 0
| 0.666667
| 0
| 0
| 0.037313
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.222222
| false
| 0
| 0.111111
| 0.111111
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 8
|
f0930cab7a2e83fc1db5cbdc16994a8879d56e2b
| 178
|
py
|
Python
|
site-root/change_loadout_name.py
|
TED-996/krait-twostones
|
51b27793b9cd536d680fb9a6785c57473d35cac1
|
[
"MIT"
] | null | null | null |
site-root/change_loadout_name.py
|
TED-996/krait-twostones
|
51b27793b9cd536d680fb9a6785c57473d35cac1
|
[
"MIT"
] | null | null | null |
site-root/change_loadout_name.py
|
TED-996/krait-twostones
|
51b27793b9cd536d680fb9a6785c57473d35cac1
|
[
"MIT"
] | null | null | null |
from ctrl import change_loadout_name
import krait
import logging
logging.debug("Got in pagina rutabila change_loadout_name ")
krait.response = change_loadout_name.get_response()
| 29.666667
| 60
| 0.848315
| 26
| 178
| 5.538462
| 0.576923
| 0.270833
| 0.354167
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.095506
| 178
| 6
| 61
| 29.666667
| 0.89441
| 0
| 0
| 0
| 0
| 0
| 0.240223
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.6
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
f0dc32771f05024b2b093d992f62a714d7ce8afa
| 191
|
py
|
Python
|
djangoratings/exceptions.py
|
gelo-zhukov/django-ratings
|
fb5495d7a9a3aec9800f12dff58803ff68a4c753
|
[
"BSD-2-Clause"
] | 68
|
2015-02-06T17:04:59.000Z
|
2021-11-26T14:43:46.000Z
|
djangoratings/exceptions.py
|
conorsheehan/django-ratings
|
ecc051df5096d57044e038a8ffa1504dea1acbbe
|
[
"BSD-2-Clause"
] | 13
|
2020-02-18T09:57:52.000Z
|
2022-01-13T02:12:04.000Z
|
djangoratings/exceptions.py
|
conorsheehan/django-ratings
|
ecc051df5096d57044e038a8ffa1504dea1acbbe
|
[
"BSD-2-Clause"
] | 58
|
2015-01-06T09:37:07.000Z
|
2022-03-02T22:37:36.000Z
|
class InvalidRating(ValueError): pass
class AuthRequired(TypeError): pass
class CannotChangeVote(Exception): pass
class CannotDeleteVote(Exception): pass
class IPLimitReached(Exception): pass
| 38.2
| 39
| 0.848168
| 20
| 191
| 8.1
| 0.5
| 0.222222
| 0.222222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.073298
| 191
| 5
| 40
| 38.2
| 0.915254
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 1
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
|
0
| 7
|
9bd7a404e1975638a7f2736cb4c6fe13c222fc46
| 6,658
|
py
|
Python
|
tests/utils.py
|
originalpkbims/subgrounds-pkbims
|
03271135d985bc4a53129edb0cb2391555012270
|
[
"Apache-2.0"
] | null | null | null |
tests/utils.py
|
originalpkbims/subgrounds-pkbims
|
03271135d985bc4a53129edb0cb2391555012270
|
[
"Apache-2.0"
] | null | null | null |
tests/utils.py
|
originalpkbims/subgrounds-pkbims
|
03271135d985bc4a53129edb0cb2391555012270
|
[
"Apache-2.0"
] | null | null | null |
from subgrounds.schema import (
TypeMeta,
SchemaMeta,
TypeRef,
# input_value_of_argument
)
from subgrounds.subgraph import Subgraph
def schema():
return SchemaMeta(query_type='Query', type_map={
'Int': TypeMeta.ScalarMeta('Int', ''),
'Float': TypeMeta.ScalarMeta('Float', ''),
'BigInt': TypeMeta.ScalarMeta('BigInt', ''),
'BigDecimal': TypeMeta.ScalarMeta('BigDecimal', ''),
'String': TypeMeta.ScalarMeta('String', ''),
'OrderDirection': TypeMeta.EnumMeta('OrderDirection', '', [
TypeMeta.EnumValueMeta('asc', ''),
TypeMeta.EnumValueMeta('desc', '')
]),
'Query': TypeMeta.ObjectMeta('Query', '', fields=[
TypeMeta.FieldMeta('pairs', '', [
TypeMeta.ArgumentMeta('first', '', TypeRef.Named('Int'), None),
TypeMeta.ArgumentMeta('where', '', TypeRef.Named('Pair_filter'), None),
TypeMeta.ArgumentMeta('orderBy', '', TypeRef.Named('Pair_orderBy'), None),
TypeMeta.ArgumentMeta('orderDirection', '', TypeRef.Named('OrderDirection'), None),
], TypeRef.non_null_list('Pair')),
TypeMeta.FieldMeta('swaps', '', [], TypeRef.non_null_list('Swap')),
]),
'Swap': TypeMeta.ObjectMeta('Swap', '', fields=[
TypeMeta.FieldMeta('id', '', [], TypeRef.Named('String')),
TypeMeta.FieldMeta('timestamp', '', [], TypeRef.Named('BigInt')),
TypeMeta.FieldMeta('amount0In', '', [], TypeRef.Named('BigDecimal')),
TypeMeta.FieldMeta('amount0Out', '', [], TypeRef.Named('BigDecimal')),
TypeMeta.FieldMeta('amount1In', '', [], TypeRef.Named('BigDecimal')),
TypeMeta.FieldMeta('amount1Out', '', [], TypeRef.Named('BigDecimal')),
]),
'Token': TypeMeta.ObjectMeta('Token', '', fields=[
TypeMeta.FieldMeta('id', '', [], TypeRef.Named('String')),
TypeMeta.FieldMeta('name', '', [], TypeRef.Named('String')),
TypeMeta.FieldMeta('symbol', '', [], TypeRef.Named('String')),
TypeMeta.FieldMeta('decimals', '', [], TypeRef.Named('Int')),
]),
'Pair': TypeMeta.ObjectMeta('Pair', '', fields=[
TypeMeta.FieldMeta('id', '', [], TypeRef.Named('String')),
TypeMeta.FieldMeta('token0', '', [], TypeRef.Named('Token')),
TypeMeta.FieldMeta('token1', '', [], TypeRef.Named('Token')),
TypeMeta.FieldMeta('reserveUSD', '', [], TypeRef.Named('BigDecimal')),
TypeMeta.FieldMeta('priceToken0', '', [], TypeRef.Named('BigDecimal')),
TypeMeta.FieldMeta('priceToken1', '', [], TypeRef.Named('BigDecimal')),
]),
'Pair_filter': TypeMeta.InputObjectMeta('Pair_filter', '', [
TypeMeta.ArgumentMeta('token0', '', TypeRef.Named('String'), None),
TypeMeta.ArgumentMeta('token1', '', TypeRef.Named('String'), None),
TypeMeta.ArgumentMeta('reserveUSD_lt', '', TypeRef.Named('BigDecimal'), None),
TypeMeta.ArgumentMeta('reserveUSD_gt', '', TypeRef.Named('BigDecimal'), None),
TypeMeta.ArgumentMeta('priceToken0_lt', '', TypeRef.Named('BigDecimal'), None),
TypeMeta.ArgumentMeta('priceToken0_gt', '', TypeRef.Named('BigDecimal'), None),
TypeMeta.ArgumentMeta('priceToken1_lt', '', TypeRef.Named('BigDecimal'), None),
TypeMeta.ArgumentMeta('priceToken1_gt', '', TypeRef.Named('BigDecimal'), None),
]),
'Pair_orderBy': TypeMeta.EnumMeta('Pair_orderBy', '', [
TypeMeta.EnumValueMeta('id', ''),
TypeMeta.EnumValueMeta('reserveUSD', '')
])
})
def subgraph():
return Subgraph("", SchemaMeta(query_type='Query', type_map={
'Int': TypeMeta.ScalarMeta('Int', ''),
'Float': TypeMeta.ScalarMeta('Float', ''),
'BigInt': TypeMeta.ScalarMeta('BigInt', ''),
'BigDecimal': TypeMeta.ScalarMeta('BigDecimal', ''),
'String': TypeMeta.ScalarMeta('String', ''),
'OrderDirection': TypeMeta.EnumMeta('OrderDirection', '', [
TypeMeta.EnumValueMeta('asc', ''),
TypeMeta.EnumValueMeta('desc', '')
]),
'Query': TypeMeta.ObjectMeta('Query', '', fields=[
TypeMeta.FieldMeta('pairs', '', [
TypeMeta.ArgumentMeta('first', '', TypeRef.Named('Int'), None),
TypeMeta.ArgumentMeta('where', '', TypeRef.Named('Pair_filter'), None),
TypeMeta.ArgumentMeta('orderBy', '', TypeRef.Named('Pair_orderBy'), None),
TypeMeta.ArgumentMeta('orderDirection', '', TypeRef.Named('OrderDirection'), None),
], TypeRef.non_null_list('Pair')),
TypeMeta.FieldMeta('swaps', '', [], TypeRef.non_null_list('Swap')),
]),
'Swap': TypeMeta.ObjectMeta('Swap', '', fields=[
TypeMeta.FieldMeta('id', '', [], TypeRef.Named('String')),
TypeMeta.FieldMeta('timestamp', '', [], TypeRef.Named('BigInt')),
TypeMeta.FieldMeta('amount0In', '', [], TypeRef.Named('BigDecimal')),
TypeMeta.FieldMeta('amount0Out', '', [], TypeRef.Named('BigDecimal')),
TypeMeta.FieldMeta('amount1In', '', [], TypeRef.Named('BigDecimal')),
TypeMeta.FieldMeta('amount1Out', '', [], TypeRef.Named('BigDecimal')),
]),
'Token': TypeMeta.ObjectMeta('Token', '', fields=[
TypeMeta.FieldMeta('id', '', [], TypeRef.Named('String')),
TypeMeta.FieldMeta('name', '', [], TypeRef.Named('String')),
TypeMeta.FieldMeta('symbol', '', [], TypeRef.Named('String')),
TypeMeta.FieldMeta('decimals', '', [], TypeRef.Named('Int')),
]),
'Pair': TypeMeta.ObjectMeta('Pair', '', fields=[
TypeMeta.FieldMeta('id', '', [], TypeRef.Named('String')),
TypeMeta.FieldMeta('token0', '', [], TypeRef.Named('Token')),
TypeMeta.FieldMeta('token1', '', [], TypeRef.Named('Token')),
TypeMeta.FieldMeta('reserveUSD', '', [], TypeRef.Named('BigDecimal')),
TypeMeta.FieldMeta('priceToken0', '', [], TypeRef.Named('BigDecimal')),
TypeMeta.FieldMeta('priceToken1', '', [], TypeRef.Named('BigDecimal')),
]),
'Pair_filter': TypeMeta.InputObjectMeta('Pair_filter', '', [
TypeMeta.ArgumentMeta('token0', '', TypeRef.Named('String'), None),
TypeMeta.ArgumentMeta('token1', '', TypeRef.Named('String'), None),
TypeMeta.ArgumentMeta('reserveUSD_lt', '', TypeRef.Named('BigDecimal'), None),
TypeMeta.ArgumentMeta('reserveUSD_gt', '', TypeRef.Named('BigDecimal'), None),
TypeMeta.ArgumentMeta('priceToken0_lt', '', TypeRef.Named('BigDecimal'), None),
TypeMeta.ArgumentMeta('priceToken0_gt', '', TypeRef.Named('BigDecimal'), None),
TypeMeta.ArgumentMeta('priceToken1_lt', '', TypeRef.Named('BigDecimal'), None),
TypeMeta.ArgumentMeta('priceToken1_gt', '', TypeRef.Named('BigDecimal'), None),
]),
'Pair_orderBy': TypeMeta.EnumMeta('Pair_orderBy', '', [
TypeMeta.EnumValueMeta('id', ''),
TypeMeta.EnumValueMeta('reserveUSD', '')
])
}))
def identity(x):
return x
| 51.215385
| 91
| 0.627966
| 584
| 6,658
| 7.092466
| 0.111301
| 0.16224
| 0.138098
| 0.075326
| 0.961371
| 0.961371
| 0.961371
| 0.961371
| 0.961371
| 0.961371
| 0
| 0.00495
| 0.150345
| 6,658
| 129
| 92
| 51.612403
| 0.727241
| 0.003454
| 0
| 0.885246
| 0
| 0
| 0.20594
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02459
| false
| 0
| 0.016393
| 0.02459
| 0.065574
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
9be2afaee3c59ffa976d24db903f71f737d209d8
| 42,509
|
py
|
Python
|
ArcPy/ModeloBooleano.py
|
phporath/GIS-Tools
|
5a1613dfcd516ae1194dd4f1d3981ed11aa0dfa7
|
[
"MIT"
] | null | null | null |
ArcPy/ModeloBooleano.py
|
phporath/GIS-Tools
|
5a1613dfcd516ae1194dd4f1d3981ed11aa0dfa7
|
[
"MIT"
] | null | null | null |
ArcPy/ModeloBooleano.py
|
phporath/GIS-Tools
|
5a1613dfcd516ae1194dd4f1d3981ed11aa0dfa7
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------------
# ModeloBooleano.py
# Created on: 2019-10-22 10:43:58.00000
# (generated by ArcGIS/ModelBuilder)
# Description:
# ---------------------------------------------------------------------------
# Set the necessary product code
# import arcinfo
# Import arcpy module
import arcpy
# Check out any necessary licenses
arcpy.CheckOutExtension("spatial")
# Load required toolboxes
arcpy.ImportToolbox("C:/Users/phpor/Documents/UFSC/Dissertação/ModelBuilder/01_ModelBuilder/FerramentaDissertacao.tbx")
# Local variables:
Hidrografia = "C:\\Users\\phpor\\Documents\\UFSC\\Dissertação\\Dados_georreferenciados\\SHP\\hidrografia.shp"
Massa_dagua = "C:\\Users\\phpor\\Documents\\UFSC\\Dissertação\\Dados_georreferenciados\\SHP\\techoMassaDagua.shp"
Nascentes = "C:\\Users\\phpor\\Documents\\UFSC\\Dissertação\\Dados_georreferenciados\\SHP\\nascenteRio.shp"
Área_de_estudo = "C:\\Users\\phpor\\Documents\\UFSC\\Dissertação\\Dados_georreferenciados\\SHP\\areaEstudo.shp"
raster = "C:\\Users\\phpor\\Documents\\UFSC\\Dissertação\\Dados_georreferenciados\\GDB\\Modelagem.gdb"
terraIndigena_shp = "C:\\Users\\phpor\\Documents\\UFSC\\Dissertação\\Dados_georreferenciados\\SHP\\terraIndigena.shp"
Sítio_arqueológico = "C:\\Users\\phpor\\Documents\\UFSC\\Dissertação\\Dados_georreferenciados\\SHP\\sitiosArquelogicos.shp"
Área_Edificada = "C:\\Users\\phpor\\Documents\\UFSC\\Dissertação\\Dados_georreferenciados\\SHP\\areaEdificada.shp"
setorRisco_shp = "C:\\Users\\phpor\\Documents\\UFSC\\Dissertação\\Dados_georreferenciados\\SHP\\setorRisco.shp"
linhaTransmissao_shp = "C:\\Users\\phpor\\Documents\\UFSC\\Dissertação\\Dados_georreferenciados\\SHP\\linhaTransmissao.shp"
Output_Coordinate_System = "PROJCS['SIRGAS_2000_UTM_Zone_22S',GEOGCS['GCS_SIRGAS_2000',DATUM['D_SIRGAS_2000',SPHEROID['GRS_1980',6378137.0,298.257222101]],PRIMEM['Greenwich',0.0],UNIT['Degree',0.0174532925199433]],PROJECTION['Transverse_Mercator'],PARAMETER['False_Easting',500000.0],PARAMETER['False_Northing',10000000.0],PARAMETER['Central_Meridian',-51.0],PARAMETER['Scale_Factor',0.9996],PARAMETER['Latitude_Of_Origin',0.0],UNIT['Meter',1.0]]"
mangue_shp = "C:\\Users\\phpor\\Documents\\UFSC\\Dissertação\\Dados_georreferenciados\\SHP\\mangue.shp"
vetor = "C:\\Users\\phpor\\Documents\\UFSC\\Dissertação\\Dados_georreferenciados\\GDB\\Modelagem.gdb\\BOL_vetor"
Mask_Mosaico_MDT = "%raster%\\mosaico_MDT_extractbymask"
Slope_Mosaico_MDT = "%raster%\\mosaico_MDT_slope"
APP_Margem_Rio_10m_Pol = "%vetor%\\bol_APP_margem_rio_10m_pol"
Buffer_APP_Margem_de_Rio__10m__Pol = "%vetor%\\bol_APP_margem_rio_10m_buffer_pol"
bol_app_merge_Dissolve = "%vetor%\\bol_app_merge_Dissolve"
Adição_de_informação = "C:\\Users\\phpor\\Documents\\UFSC\\Dissertação\\Dados_georreferenciados\\SHP\\areaEstudo.shp"
Clip_APP = "%vetor%\\bol_APP_clip"
APP_Margem_de_Rio__10m_ = "%vetor%\\bol_APP_margem_rio_10m"
APP_Nascente = "%vetor%\\bol_nascentes"
Adição_campo = "C:\\Users\\phpor\\Documents\\UFSC\\Dissertação\\Dados_georreferenciados\\SHP\\areaEstudo.shp"
Erase_APP = "%vetor%\\bol_APP_erase"
Merge_APP = "%vetor%\\bol_APP_merge"
Adição_campo_Booelana__5_ = "%vetor%\\bol_APP_merge"
Raster_MDT = "%raster%\\mosaico_MDT_reclassify"
bol_APP_dissolve = "%vetor%\\bol_APP_dissolve"
Raster_APP = "%raster%\\bol_APP"
BOL_result = "%vetor%\\BOL_result"
APP_Margem_Rio_10_a_50_m_Pol = "%vetor%\\bol_APP_margem_rio_10a50m_pol"
Buffer_APP_Margem_Rio_10_a_50_m_Pol = "%vetor%\\bol_APP_margem_rio_10a50m_pol_buffer"
APP_Margem_Rio_50_a_200_m_Pol = "%vetor%\\bol_APP_margem_rio_50a200m_pol"
Buffer_APP_Margem_Rio_50_a_200_m_Pol = "%vetor%\\bol_APP_margem_rio_50a200m_pol_buffer"
Clip_Terra_Indígena = "%vetor%\\bol_ti_clip"
Erase_Terra_Indígena = "%vetor%\\bol_ti_erase"
Merge_Terra_Indígena = "%vetor%\\bol_ti_merge"
Adição_campo_Booelana__8_ = "%vetor%\\bol_ti_merge"
bol_ti_dissolve = "%vetor%\\bol_ti_dissolve"
Raster_Terra_Indígena = "%raster%\\bol_ti"
Sitio_Arqueológico = "%vetor%\\bol_sitio_arqueologico"
Clip_Sítio_Arqueológico = "%vetor%\\bol_sitio_arqueologico_clip"
Erase_Terra_Indígena__2_ = "%vetor%\\bol_sitio_arqueologico_erase"
Merge_Sítio_Arqueológico = "%vetor%\\bol_sitio_arqueologico_merge"
Adição_campo_Booelana__9_ = "%vetor%\\bol_sitio_arqueologico_merge"
bol_sitio_arqueologico_dissolve = "%vetor%\\bol_sitio_arqueologico_dissolve"
Raster_Sítio_Arqueológico = "%raster%\\bol_sitio_arqueologico"
Erase_Área_Edificada = "%vetor%\\bol_area_edificada_erase"
Merge_Área_Edificada = "%vetor%\\bol_area_edificada_merge"
Adição_campo_Booelana__10_ = "%vetor%\\bol_area_edificada_merge"
bol_area_edificada_dissolve = "%vetor%\\bol_area_edificada_dissolve"
Raster_Área_Edificada = "%raster%\\bol_area_edificada"
Buffer_LT = "%vetor%\\SHP\\bol_lt"
Clip_LT = "%vetor%\\bol_lt_clip"
Erase_LT = "%vetor%\\bol_lt_erase"
Merge_LT = "%vetor%\\bol_lt_merge"
Adição_campo_Booelana__11_ = "%vetor%\\bol_lt_merge"
bol_lt_dissolve = "%vetor%\\bol_lt_dissolve"
Raster_LT = "%raster%\\bol_lt"
Clip_Setor_de_Risco = "%vetor%\\bol_setor_risco_clip"
Erase_Setor_de_Risco = "%vetor%\\bol_setor_risco_erase"
Merge_Setor_de_Risco = "%vetor%\\bol_setor_risco_merge"
Adição_campo_Booelana__12_ = "%vetor%\\bol_setor_risco_merge"
bol_setor_risco_dissolve = "%vetor%\\bol_setor_risco_dissolve"
Raster_Setor_de_Risco = "%raster%\\bol_setor_risco"
Output_Values__2_ = "C:\\Users\\phpor\\Documents\\UFSC\\Dissertação\\Dados_georreferenciados\\Raster\\MDT\\MDT_SG-22-Z-D-II-3-NE-D.tif"
mosaico_MDT = "C:\\Users\\phpor\\Documents\\UFSC\\Dissertação\\Dados_georreferenciados\\GDB\\Modelagem.gdb\\mosaico_MDT"
Parâmetros_Booleanos__12_ = "%vetor%\\bol_setor_risco_merge"
Adição_campo_PorcArea = "%vetor%\\bol_setor_risco_merge"
Parâmetros_Booleanos__7_ = "%vetor%\\bol_setor_risco_merge"
Parâmetros_Booleanos__8_ = "%vetor%\\bol_ti_merge"
Adição_campo_PorcArea__2_ = "%vetor%\\bol_ti_merge"
Parâmetros_Booleanos__13_ = "%vetor%\\bol_ti_merge"
Parâmetros_Booleanos__11_ = "%vetor%\\bol_lt_merge"
Adição_campo_PorcArea__3_ = "%vetor%\\bol_lt_merge"
Parâmetros_Booleanos__14_ = "%vetor%\\bol_lt_merge"
Parâmetros_Booleanos__9_ = "%vetor%\\bol_sitio_arqueologico_merge"
Adição_campo_PorcArea__7_ = "%vetor%\\bol_sitio_arqueologico_merge"
Parâmetros_Booleanos__18_ = "%vetor%\\bol_sitio_arqueologico_merge"
Parâmetros_Booleanos__5_ = "%vetor%\\bol_APP_merge"
Adição_campo_PorcArea__9_ = "%vetor%\\bol_APP_merge"
Parâmetros_Booleanos__20_ = "%vetor%\\bol_APP_merge"
Parâmetros_Booleanos__10_ = "%vetor%\\bol_area_edificada_merge"
Adição_campo_PorcArea__10_ = "%vetor%\\bol_area_edificada_merge"
Parâmetros_Booleanos__21_ = "%vetor%\\bol_area_edificada_merge"
Merge_APP_2 = "%vetor%\\bol_app_merge"
APP_declividade = "%vetor%\\APP_declividade"
# Process: Add Field (7)
arcpy.AddField_management(Área_de_estudo, "Aestudo", "TEXT", "2", "", "5", "", "NULLABLE", "NON_REQUIRED", "")
# Process: Calculate Field (7)
arcpy.CalculateField_management(Adição_campo, "Aestudo", "\"Sim\"", "PYTHON", "")
# Process: Select (2)
tempEnvironment0 = arcpy.env.outputCoordinateSystem
arcpy.env.outputCoordinateSystem = "PROJCS['SIRGAS_2000_UTM_Zone_22S',GEOGCS['GCS_SIRGAS_2000',DATUM['D_SIRGAS_2000',SPHEROID['GRS_1980',6378137.0,298.257222101]],PRIMEM['Greenwich',0.0],UNIT['Degree',0.0174532925199433]],PROJECTION['Transverse_Mercator'],PARAMETER['False_Easting',500000.0],PARAMETER['False_Northing',10000000.0],PARAMETER['Central_Meridian',-51.0],PARAMETER['Scale_Factor',0.9996],PARAMETER['Latitude_Of_Origin',0.0],UNIT['Meter',1.0]]"
arcpy.Select_analysis(Massa_dagua, APP_Margem_Rio_10m_Pol, "\"largura\" = 'Até 10m'")
arcpy.env.outputCoordinateSystem = tempEnvironment0
# Process: Buffer (3)
arcpy.Buffer_analysis(APP_Margem_Rio_10m_Pol, Buffer_APP_Margem_de_Rio__10m__Pol, "30 Meters", "FULL", "ROUND", "ALL", "")
# Process: Select (3)
tempEnvironment0 = arcpy.env.outputCoordinateSystem
arcpy.env.outputCoordinateSystem = "PROJCS['SIRGAS_2000_UTM_Zone_22S',GEOGCS['GCS_SIRGAS_2000',DATUM['D_SIRGAS_2000',SPHEROID['GRS_1980',6378137.0,298.257222101]],PRIMEM['Greenwich',0.0],UNIT['Degree',0.0174532925199433]],PROJECTION['Transverse_Mercator'],PARAMETER['False_Easting',500000.0],PARAMETER['False_Northing',10000000.0],PARAMETER['Central_Meridian',-51.0],PARAMETER['Scale_Factor',0.9996],PARAMETER['Latitude_Of_Origin',0.0],UNIT['Meter',1.0]]"
arcpy.Select_analysis(Massa_dagua, APP_Margem_Rio_10_a_50_m_Pol, "\"largura\" = '10 a 50m'")
arcpy.env.outputCoordinateSystem = tempEnvironment0
# Process: Buffer (2)
arcpy.Buffer_analysis(APP_Margem_Rio_10_a_50_m_Pol, Buffer_APP_Margem_Rio_10_a_50_m_Pol, "50 Meters", "FULL", "ROUND", "ALL", "")
# Process: Buffer (7)
tempEnvironment0 = arcpy.env.outputCoordinateSystem
arcpy.env.outputCoordinateSystem = "PROJCS['SIRGAS_2000_UTM_Zone_22S',GEOGCS['GCS_SIRGAS_2000',DATUM['D_SIRGAS_2000',SPHEROID['GRS_1980',6378137.0,298.257222101]],PRIMEM['Greenwich',0.0],UNIT['Degree',0.0174532925199433]],PROJECTION['Transverse_Mercator'],PARAMETER['False_Easting',500000.0],PARAMETER['False_Northing',10000000.0],PARAMETER['Central_Meridian',-51.0],PARAMETER['Scale_Factor',0.9996],PARAMETER['Latitude_Of_Origin',0.0],UNIT['Meter',1.0]]"
arcpy.Buffer_analysis(Nascentes, APP_Nascente, "50 Meters", "FULL", "ROUND", "ALL", "")
arcpy.env.outputCoordinateSystem = tempEnvironment0
# Process: Select (4)
tempEnvironment0 = arcpy.env.outputCoordinateSystem
arcpy.env.outputCoordinateSystem = "PROJCS['SIRGAS_2000_UTM_Zone_22S',GEOGCS['GCS_SIRGAS_2000',DATUM['D_SIRGAS_2000',SPHEROID['GRS_1980',6378137.0,298.257222101]],PRIMEM['Greenwich',0.0],UNIT['Degree',0.0174532925199433]],PROJECTION['Transverse_Mercator'],PARAMETER['False_Easting',500000.0],PARAMETER['False_Northing',10000000.0],PARAMETER['Central_Meridian',-51.0],PARAMETER['Scale_Factor',0.9996],PARAMETER['Latitude_Of_Origin',0.0],UNIT['Meter',1.0]]"
arcpy.Select_analysis(Massa_dagua, APP_Margem_Rio_50_a_200_m_Pol, "\"largura\" = '50 a 200m'")
arcpy.env.outputCoordinateSystem = tempEnvironment0
# Process: Buffer (4)
arcpy.Buffer_analysis(APP_Margem_Rio_50_a_200_m_Pol, Buffer_APP_Margem_Rio_50_a_200_m_Pol, "100 Meters", "FULL", "ROUND", "ALL", "")
# Process: Buffer (6)
tempEnvironment0 = arcpy.env.outputCoordinateSystem
arcpy.env.outputCoordinateSystem = "PROJCS['SIRGAS_2000_UTM_Zone_22S',GEOGCS['GCS_SIRGAS_2000',DATUM['D_SIRGAS_2000',SPHEROID['GRS_1980',6378137.0,298.257222101]],PRIMEM['Greenwich',0.0],UNIT['Degree',0.0174532925199433]],PROJECTION['Transverse_Mercator'],PARAMETER['False_Easting',500000.0],PARAMETER['False_Northing',10000000.0],PARAMETER['Central_Meridian',-51.0],PARAMETER['Scale_Factor',0.9996],PARAMETER['Latitude_Of_Origin',0.0],UNIT['Meter',1.0]]"
arcpy.Buffer_analysis(Hidrografia, APP_Margem_de_Rio__10m_, "30 Meters", "FULL", "ROUND", "ALL", "")
arcpy.env.outputCoordinateSystem = tempEnvironment0
# Process: Analise Booleana Itarador Raster
arcpy.gp.toolbox = "C:/Users/phpor/Documents/UFSC/Dissertação/ModelBuilder/01_ModelBuilder/FerramentaDissertacao.tbx";
# Warning: the toolbox C:/Users/phpor/Documents/UFSC/Dissertação/ModelBuilder/01_ModelBuilder/FerramentaDissertacao.tbx DOES NOT have an alias.
# Please assign this toolbox an alias to avoid tool name collisions
# And replace arcpy.gp.AnaliseBooleanaItaradorRaster(...) with arcpy.AnaliseBooleanaItaradorRaster_ALIAS(...)
arcpy.gp.AnaliseBooleanaItaradorRaster()
# Process: Mosaic To New Raster
arcpy.MosaicToNewRaster_management(Output_Values__2_, raster, "mosaico_MDT", Output_Coordinate_System, "32_BIT_SIGNED", "", "1", "MAXIMUM", "MATCH")
# Process: Extract by Mask
arcpy.gp.ExtractByMask_sa(mosaico_MDT, Área_de_estudo, Mask_Mosaico_MDT)
# Process: Slope
arcpy.gp.Slope_sa(Mask_Mosaico_MDT, Slope_Mosaico_MDT, "PERCENT_RISE", "1")
# Process: Reclassify (2)
arcpy.gp.Reclassify_sa(Slope_Mosaico_MDT, "Value", "0 100 NODATA;100 1500 0", Raster_MDT, "DATA")
# Process: Raster to Polygon
tempEnvironment0 = arcpy.env.workspace
arcpy.env.workspace = "C:\\Users\\phpor\\Documents\\UFSC\\Dissertação\\Dados_georreferenciados\\GDB\\Modelagem.gdb\\BOL_vetor"
arcpy.RasterToPolygon_conversion(Raster_MDT, APP_declividade, "NO_SIMPLIFY", "VALUE")
arcpy.env.workspace = tempEnvironment0
# Process: Merge (6)
arcpy.Merge_management("%vetor%\\bol_APP_margem_rio_10m_buffer_pol;%vetor%\\bol_APP_margem_rio_10a50m_pol_buffer;%vetor%\\bol_nascentes;%vetor%\\bol_APP_margem_rio_50a200m_pol_buffer;%vetor%\\bol_APP_margem_rio_10m;%vetor%\\APP_declividade;C:\\Users\\phpor\\Documents\\UFSC\\Dissertação\\Dados_georreferenciados\\SHP\\mangue.shp", Merge_APP_2, "Id \"Id\" true true false 6 Long 0 6 ,First,#,%vetor%\\APP_declividade,ID,-1,-1,%vetor%\\bol_APP_margem_rio_10m,ID,-1,-1;app \"app\" true true false 50 Text 0 0 ,First,#,C:\\Users\\phpor\\Documents\\UFSC\\Dissertação\\Dados_georreferenciados\\SHP\\mangue.shp,app,-1,-1;Shape_Leng \"Shape_Leng\" false true true 0 Double 0 0 ,First,#,%vetor%\\APP_declividade,Shape_Length,-1,-1;Shape_Area \"Shape_Area\" false true true 0 Double 0 0 ,First,#,%vetor%\\APP_declividade,Shape_Area,-1,-1,%vetor%\\bol_APP_margem_rio_10a50m_pol_buffer,Shape_area,-1,-1,%vetor%\\bol_APP_margem_rio_10m_buffer_pol,Shape_area,-1,-1,%vetor%\\bol_APP_margem_rio_50a200m_pol_buffer,Shape_area,-1,-1;GRIDCODE \"GRIDCODE\" true true false 0 Long 0 0 ,First,#,%vetor%\\APP_declividade,GRIDCODE,-1,-1;NOME \"NOME\" true true false 80 Text 0 0 ,First,#,%vetor%\\bol_APP_margem_rio_10a50m_pol_buffer,NOME,-1,-1,%vetor%\\bol_APP_margem_rio_10m_buffer_pol,NOME,-1,-1,%vetor%\\bol_APP_margem_rio_50a200m_pol_buffer,NOME,-1,-1;GEOMETRIAA \"GEOMETRIAA\" true true false 9 Long 0 9 ,First,#,%vetor%\\bol_APP_margem_rio_10a50m_pol_buffer,GEOMETRIAA,-1,-1,%vetor%\\bol_APP_margem_rio_10m_buffer_pol,GEOMETRIAA,-1,-1,%vetor%\\bol_APP_margem_rio_50a200m_pol_buffer,GEOMETRIAA,-1,-1;REGIME \"REGIME\" true true false 9 Long 0 9 ,First,#,%vetor%\\bol_APP_margem_rio_10a50m_pol_buffer,REGIME,-1,-1,%vetor%\\bol_APP_margem_rio_10m_buffer_pol,REGIME,-1,-1,%vetor%\\bol_APP_margem_rio_50a200m_pol_buffer,REGIME,-1,-1;NOMEABREV \"NOMEABREV\" true true false 50 Text 0 0 ,First,#,%vetor%\\bol_APP_margem_rio_10a50m_pol_buffer,NOMEABREV,-1,-1,%vetor%\\bol_APP_margem_rio_10m_buffer_pol,NOMEABREV,-1,-1,%vetor%\\bol_APP_margem_rio_50a200m_pol_buffer,NOMEABREV,-1,-1;ID_TRECHO_ \"ID_TRECHO_\" true true false 19 Double 0 0 ,First,#,%vetor%\\bol_APP_margem_rio_10a50m_pol_buffer,ID_TRECHO_,-1,-1,%vetor%\\bol_APP_margem_rio_10m_buffer_pol,ID_TRECHO_,-1,-1,%vetor%\\bol_APP_margem_rio_50a200m_pol_buffer,ID_TRECHO_,-1,-1;TIPOTRECHO \"TIPOTRECHO\" true true false 9 Long 0 9 ,First,#,%vetor%\\bol_APP_margem_rio_10a50m_pol_buffer,TIPOTRECHO,-1,-1,%vetor%\\bol_APP_margem_rio_10m_buffer_pol,TIPOTRECHO,-1,-1,%vetor%\\bol_APP_margem_rio_50a200m_pol_buffer,TIPOTRECHO,-1,-1;SALINIDADE \"SALINIDADE\" true true false 9 Long 0 9 ,First,#,%vetor%\\bol_APP_margem_rio_10a50m_pol_buffer,SALINIDADE,-1,-1,%vetor%\\bol_APP_margem_rio_10m_buffer_pol,SALINIDADE,-1,-1,%vetor%\\bol_APP_margem_rio_50a200m_pol_buffer,SALINIDADE,-1,-1;largura \"largura\" true true false 15 Text 0 0 ,First,#,%vetor%\\bol_APP_margem_rio_10a50m_pol_buffer,largura,-1,-1,%vetor%\\bol_APP_margem_rio_10m_buffer_pol,largura,-1,-1,%vetor%\\bol_APP_margem_rio_50a200m_pol_buffer,largura,-1,-1;Shape_le_1 \"Shape_le_1\" true true false 0 Double 0 0 ,First,#,%vetor%\\bol_APP_margem_rio_10a50m_pol_buffer,Shape_length,-1,-1;BUFF_DIST \"BUFF_DIST\" true true false 0 Double 0 0 ,First,#,%vetor%\\bol_APP_margem_rio_10a50m_pol_buffer,BUFF_DIST,-1,-1,%vetor%\\bol_APP_margem_rio_10m_buffer_pol,BUFF_DIST,-1,-1,%vetor%\\bol_APP_margem_rio_50a200m_pol_buffer,BUFF_DIST,-1,-1,%vetor%\\bol_APP_margem_rio_10m,BUFF_DIST,-1,-1,%vetor%\\bol_nascentes,BUFF_DIST,-1,-1;Shape_le_2 \"Shape_le_2\" true true false 0 Double 0 0 ,First,#,%vetor%\\bol_APP_margem_rio_10m_buffer_pol,Shape_length,-1,-1;Shape_le_3 \"Shape_le_3\" true true false 0 Double 0 0 ,First,#,%vetor%\\bol_APP_margem_rio_50a200m_pol_buffer,Shape_length,-1,-1;COTRECHO \"COTRECHO\" true true false 19 Double 0 0 ,First,#,%vetor%\\bol_APP_margem_rio_10m,COTRECHO,-1,-1;COCURSODAG \"COCURSODAG\" true true false 50 Text 0 0 ,First,#,%vetor%\\bol_APP_margem_rio_10m,COCURSODAG,-1,-1,%vetor%\\bol_nascentes,COCURSODAG,-1,-1;COBACIA \"COBACIA\" true true false 50 Text 0 0 ,First,#,%vetor%\\bol_APP_margem_rio_10m,COBACIA,-1,-1;CORIO \"CORIO\" true true false 254 Text 0 0 ,First,#,%vetor%\\bol_APP_margem_rio_10m,CORIO,-1,-1;CODOM \"CODOM\" true true false 19 Double 0 0 ,First,#,%vetor%\\bol_APP_margem_rio_10m,CODOM,-1,-1;DEDOMINIAL \"DEDOMINIAL\" true true false 50 Text 0 0 ,First,#,%vetor%\\bol_APP_margem_rio_10m,DEDOMINIAL,-1,-1;NUCOMPTREC \"NUCOMPTREC\" true true false 19 Double 0 0 ,First,#,%vetor%\\bol_APP_margem_rio_10m,NUCOMPTREC,-1,-1;NUDISTBACT \"NUDISTBACT\" true true false 19 Double 0 0 ,First,#,%vetor%\\bol_APP_margem_rio_10m,NUDISTBACT,-1,-1;NUDISTCDAG \"NUDISTCDAG\" true true false 19 Double 0 0 ,First,#,%vetor%\\bol_APP_margem_rio_10m,NUDISTCDAG,-1,-1;NUAREACONT \"NUAREACONT\" true true false 19 Double 0 0 ,First,#,%vetor%\\bol_APP_margem_rio_10m,NUAREACONT,-1,-1;NUAREAMONT \"NUAREAMONT\" true true false 19 Double 0 0 ,First,#,%vetor%\\bol_APP_margem_rio_10m,NUAREAMONT,-1,-1;NUNIVOTTO \"NUNIVOTTO\" true true false 19 Double 0 0 ,First,#,%vetor%\\bol_APP_margem_rio_10m,NUNIVOTTO,-1,-1;DEDIREC \"DEDIREC\" true true false 19 Double 0 0 ,First,#,%vetor%\\bol_APP_margem_rio_10m,DEDIREC,-1,-1;DECORPODAG \"DECORPODAG\" true true false 254 Text 0 0 ,First,#,%vetor%\\bol_APP_margem_rio_10m,DECORPODAG,-1,-1;DELIGACAO \"DELIGACAO\" true true false 254 Text 0 0 ,First,#,%vetor%\\bol_APP_margem_rio_10m,DELIGACAO,-1,-1;NORIO \"NORIO\" true true false 254 Text 0 0 ,First,#,%vetor%\\bol_APP_margem_rio_10m,NORIO,-1,-1;NORIOCOMP \"NORIOCOMP\" true true false 254 Text 0 0 ,First,#,%vetor%\\bol_APP_margem_rio_10m,NORIOCOMP,-1,-1;NUCOMPRIO \"NUCOMPRIO\" true true false 19 Double 0 0 ,First,#,%vetor%\\bol_APP_margem_rio_10m,NUCOMPRIO,-1,-1;NUDISTBACR \"NUDISTBACR\" true true false 19 Double 0 0 ,First,#,%vetor%\\bol_APP_margem_rio_10m,NUDISTBACR,-1,-1;COCDADESAG \"COCDADESAG\" true true false 254 Text 0 0 ,First,#,%vetor%\\bol_APP_margem_rio_10m,COCDADESAG,-1,-1;NUCOMPCDA \"NUCOMPCDA\" true true false 19 Double 0 0 ,First,#,%vetor%\\bol_APP_margem_rio_10m,NUCOMPCDA,-1,-1;NUTRJUS \"NUTRJUS\" true true false 19 Double 0 0 ,First,#,%vetor%\\bol_APP_margem_rio_10m,NUTRJUS,-1,-1;NUTRMON \"NUTRMON\" true true false 19 Double 0 0 ,First,#,%vetor%\\bol_APP_margem_rio_10m,NUTRMON,-1,-1;NUTRAFL \"NUTRAFL\" true true false 19 Double 0 0 ,First,#,%vetor%\\bol_APP_margem_rio_10m,NUTRAFL,-1,-1;NUDISTBACC \"NUDISTBACC\" true true false 19 Double 0 0 ,First,#,%vetor%\\bol_APP_margem_rio_10m,NUDISTBACC,-1,-1;NUAREABACC \"NUAREABACC\" true true false 19 Double 0 0 ,First,#,%vetor%\\bol_APP_margem_rio_10m,NUAREABACC,-1,-1;NUORDEMCDA \"NUORDEMCDA\" true true false 19 Double 0 0 ,First,#,%vetor%\\bol_APP_margem_rio_10m,NUORDEMCDA,-1,-1;NUNIVOTCDA \"NUNIVOTCDA\" true true false 19 Double 0 0 ,First,#,%vetor%\\bol_APP_margem_rio_10m,NUNIVOTCDA,-1,-1;NULONDETRE \"NULONDETRE\" true true false 19 Double 0 0 ,First,#,%vetor%\\bol_APP_margem_rio_10m,NULONDETRE,-1,-1;NULATDETRE \"NULATDETRE\" true true false 19 Double 0 0 ,First,#,%vetor%\\bol_APP_margem_rio_10m,NULATDETRE,-1,-1;NULONPATRE \"NULONPATRE\" true true false 19 Double 0 0 ,First,#,%vetor%\\bol_APP_margem_rio_10m,NULONPATRE,-1,-1;NULATPATRE \"NULATPATRE\" true true false 19 Double 0 0 ,First,#,%vetor%\\bol_APP_margem_rio_10m,NULATPATRE,-1,-1;NULONDECDA \"NULONDECDA\" true true false 19 Double 0 0 ,First,#,%vetor%\\bol_APP_margem_rio_10m,NULONDECDA,-1,-1,%vetor%\\bol_nascentes,NULONDECDA,-1,-1;NULATDECDA \"NULATDECDA\" true true false 19 Double 0 0 ,First,#,%vetor%\\bol_APP_margem_rio_10m,NULATDECDA,-1,-1,%vetor%\\bol_nascentes,NULATDECDA,-1,-1;NULONPACDA \"NULONPACDA\" true true false 19 Double 0 0 ,First,#,%vetor%\\bol_APP_margem_rio_10m,NULONPACDA,-1,-1;NULATPACDA \"NULATPACDA\" true true false 19 Double 0 0 ,First,#,%vetor%\\bol_APP_margem_rio_10m,NULATPACDA,-1,-1;NULONDERIO \"NULONDERIO\" true true false 19 Double 0 0 ,First,#,%vetor%\\bol_APP_margem_rio_10m,NULONDERIO,-1,-1;NULATDERIO \"NULATDERIO\" true true false 19 Double 0 0 ,First,#,%vetor%\\bol_APP_margem_rio_10m,NULATDERIO,-1,-1;NULONPARIO \"NULONPARIO\" true true false 19 Double 0 0 ,First,#,%vetor%\\bol_APP_margem_rio_10m,NULONPARIO,-1,-1;NULATPARIO \"NULATPARIO\" true true false 19 Double 0 0 ,First,#,%vetor%\\bol_APP_margem_rio_10m,NULATPARIO,-1,-1;DTVERSAO \"DTVERSAO\" true true false 50 Text 0 0 ,First,#,%vetor%\\bol_APP_margem_rio_10m,DTVERSAO,-1,-1,%vetor%\\bol_nascentes,DTVERSAO,-1,-1;poligono \"poligono\" true true false 3 Text 0 0 ,First,#,%vetor%\\bol_APP_margem_rio_10m,poligono,-1,-1;OBJECTID \"OBJECTID\" true true false 9 Long 0 9 ,First,#,%vetor%\\bol_nascentes,OBJECTID,-1,-1;CONOCDA \"CONOCDA\" true true false 19 Double 0 0 ,First,#,%vetor%\\bol_nascentes,CONOCDA,-1,-1;COCDA \"COCDA\" true true false 19 Double 0 0 ,First,#,%vetor%\\bol_nascentes,COCDA,-1,-1;COCDACOSTA \"COCDACOSTA\" true true false 19 Double 0 0 ,First,#,%vetor%\\bol_nascentes,COCDACOSTA,-1,-1;CONASCDA \"CONASCDA\" true true false 19 Double 0 0 ,First,#,%vetor%\\bol_nascentes,CONASCDA,-1,-1")
# Process: Dissolve (4)
arcpy.Dissolve_management(Merge_APP_2, bol_app_merge_Dissolve, "", "", "MULTI_PART", "DISSOLVE_LINES")
# Process: Clip (4)
arcpy.Clip_analysis(bol_app_merge_Dissolve, Adição_de_informação, Clip_APP, "")
# Process: Erase (5)
arcpy.Erase_analysis(Adição_de_informação, Clip_APP, Erase_APP, "")
# Process: Merge (5)
arcpy.Merge_management("%vetor%\\bol_APP_erase;%vetor%\\bol_APP_clip", Merge_APP, "nome \"nome\" true true false 40 Text 0 0 ,First,#,%vetor%\\bol_APP_erase,nome,-1,-1;fonte \"fonte\" true true false 20 Text 0 0 ,First,#,%vetor%\\bol_APP_erase,fonte,-1,-1;Aestudo \"Aestudo\" true true false 5 Text 0 0 ,First,#,%vetor%\\bol_APP_erase,Aestudo,-1,-1;area \"area\" true true false 13 Float 0 0 ,First,#,%vetor%\\bol_APP_erase,area,-1,-1;Shape_length \"Shape_length\" true true false 0 Double 0 0 ,First,#,%vetor%\\bol_APP_erase,Shape_length,-1,-1;Shape_area \"Shape_area\" true true false 0 Double 0 0 ,First,#,%vetor%\\bol_APP_erase,Shape_area,-1,-1")
# Process: Add Field (9)
arcpy.AddField_management(Merge_APP, "Booleana", "SHORT", "2", "", "", "", "NULLABLE", "NON_REQUIRED", "")
# Process: Calculate Field (9)
arcpy.CalculateField_management(Adição_campo_Booelana__5_, "Booleana", "Classify ( !Aestudo! )", "PYTHON", "def Classify(booleana):\\n if (booleana == 'Sim'):\\n return 1\\n else:\\n return 0")
# Process: Add Field (21)
arcpy.AddField_management(Parâmetros_Booleanos__5_, "PorcArea", "FLOAT", "7", "5", "", "", "NULLABLE", "NON_REQUIRED", "")
# Process: Calculate Field (21)
arcpy.CalculateField_management(Adição_campo_PorcArea__9_, "PorcArea", "(!Shape.area@squaremeters!*100)/318725000", "PYTHON", "")
# Process: Dissolve (9)
arcpy.Dissolve_management(Parâmetros_Booleanos__20_, bol_APP_dissolve, "Booleana", "PorcArea SUM", "MULTI_PART", "DISSOLVE_LINES")
# Process: Feature to Raster (8)
arcpy.FeatureToRaster_conversion(bol_APP_dissolve, "Booleana", Raster_APP, "5")
# Process: Clip (6)
tempEnvironment0 = arcpy.env.outputCoordinateSystem
arcpy.env.outputCoordinateSystem = "PROJCS['SIRGAS_2000_UTM_Zone_22S',GEOGCS['GCS_SIRGAS_2000',DATUM['D_SIRGAS_2000',SPHEROID['GRS_1980',6378137.0,298.257222101]],PRIMEM['Greenwich',0.0],UNIT['Degree',0.0174532925199433]],PROJECTION['Transverse_Mercator'],PARAMETER['False_Easting',500000.0],PARAMETER['False_Northing',10000000.0],PARAMETER['Central_Meridian',-51.0],PARAMETER['Scale_Factor',0.9996],PARAMETER['Latitude_Of_Origin',0.0],UNIT['Meter',1.0]]"
arcpy.Clip_analysis(terraIndigena_shp, Adição_de_informação, Clip_Terra_Indígena, "")
arcpy.env.outputCoordinateSystem = tempEnvironment0
# Process: Erase (7)
arcpy.Erase_analysis(Adição_de_informação, Clip_Terra_Indígena, Erase_Terra_Indígena, "")
# Process: Merge (7)
arcpy.Merge_management("%vetor%\\bol_ti_clip;%vetor%\\bol_ti_erase", Merge_Terra_Indígena, "idTerraInd \"idTerraInd\" true true false 9 Long 0 9 ,First,#,%vetor%\\bol_ti_clip,idTerraInd,-1,-1;codFunai \"codFunai\" true true false 9 Long 0 9 ,First,#,%vetor%\\bol_ti_clip,codFunai,-1,-1;nome \"nome\" true true false 254 Text 0 0 ,First,#,%vetor%\\bol_ti_clip,nome,-1,-1,%vetor%\\bol_ti_erase,nome,-1,-1;etnia \"etnia\" true true false 254 Text 0 0 ,First,#,%vetor%\\bol_ti_clip,etnia,-1,-1;municipio \"municipio\" true true false 254 Text 0 0 ,First,#,%vetor%\\bol_ti_clip,municipio,-1,-1;uf \"uf\" true true false 254 Text 0 0 ,First,#,%vetor%\\bol_ti_clip,uf,-1,-1;superficie \"superficie\" true true false 19 Double 0 0 ,First,#,%vetor%\\bol_ti_clip,superficie,-1,-1;fase_ti \"fase_ti\" true true false 254 Text 0 0 ,First,#,%vetor%\\bol_ti_clip,fase_ti,-1,-1;modalidade \"modalidade\" true true false 254 Text 0 0 ,First,#,%vetor%\\bol_ti_clip,modalidade,-1,-1;reestudo \"reestudo\" true true false 254 Text 0 0 ,First,#,%vetor%\\bol_ti_clip,reestudo,-1,-1;coordRegio \"coordRegio\" true true false 254 Text 0 0 ,First,#,%vetor%\\bol_ti_clip,coordRegio,-1,-1;fonte \"fonte\" true true false 50 Text 0 0 ,First,#,%vetor%\\bol_ti_clip,fonte,-1,-1,%vetor%\\bol_ti_erase,fonte,-1,-1;area \"area\" true true false 19 Double 0 0 ,First,#,%vetor%\\bol_ti_clip,area,-1,-1,%vetor%\\bol_ti_erase,area,-1,-1;Shape_leng \"Shape_leng\" true true false 0 Double 0 0 ,First,#,%vetor%\\bol_ti_clip,Shape_length,-1,-1;Shape_area \"Shape_area\" true true false 0 Double 0 0 ,First,#,%vetor%\\bol_ti_clip,Shape_area,-1,-1,%vetor%\\bol_ti_erase,Shape_area,-1,-1;Aestudo \"Aestudo\" true true false 5 Text 0 0 ,First,#,%vetor%\\bol_ti_erase,Aestudo,-1,-1;Shape_length \"Shape_length\" true true false 0 Double 0 0 ,First,#,%vetor%\\bol_ti_erase,Shape_length,-1,-1")
# Process: Add Field (8)
arcpy.AddField_management(Merge_Terra_Indígena, "Booleana", "SHORT", "2", "", "", "", "NULLABLE", "NON_REQUIRED", "")
# Process: Calculate Field (8)
arcpy.CalculateField_management(Adição_campo_Booelana__8_, "Booleana", "Classify ( !Aestudo! )", "PYTHON", "def Classify(booleana):\\n if (booleana == 'Sim'):\\n return 1\\n else:\\n return 0")
# Process: Add Field (14)
arcpy.AddField_management(Parâmetros_Booleanos__8_, "PorcArea", "FLOAT", "7", "5", "", "", "NULLABLE", "NON_REQUIRED", "")
# Process: Calculate Field (14)
arcpy.CalculateField_management(Adição_campo_PorcArea__2_, "PorcArea", "(!Shape.area@squaremeters!*100)/318725000", "PYTHON", "")
# Process: Dissolve (2)
arcpy.Dissolve_management(Parâmetros_Booleanos__13_, bol_ti_dissolve, "Booleana", "PorcArea SUM", "MULTI_PART", "DISSOLVE_LINES")
# Process: Feature to Raster (7)
arcpy.FeatureToRaster_conversion(bol_ti_dissolve, "Booleana", Raster_Terra_Indígena, "5")
# Process: Buffer (5)
tempEnvironment0 = arcpy.env.outputCoordinateSystem
arcpy.env.outputCoordinateSystem = "PROJCS['SIRGAS_2000_UTM_Zone_22S',GEOGCS['GCS_SIRGAS_2000',DATUM['D_SIRGAS_2000',SPHEROID['GRS_1980',6378137.0,298.257222101]],PRIMEM['Greenwich',0.0],UNIT['Degree',0.0174532925199433]],PROJECTION['Transverse_Mercator'],PARAMETER['False_Easting',500000.0],PARAMETER['False_Northing',10000000.0],PARAMETER['Central_Meridian',-51.0],PARAMETER['Scale_Factor',0.9996],PARAMETER['Latitude_Of_Origin',0.0],UNIT['Meter',1.0]]"
arcpy.Buffer_analysis(Sítio_arqueológico, Sitio_Arqueológico, "50 Meters", "FULL", "ROUND", "ALL", "")
arcpy.env.outputCoordinateSystem = tempEnvironment0
# Process: Clip (9)
arcpy.Clip_analysis(Sitio_Arqueológico, Adição_de_informação, Clip_Sítio_Arqueológico, "")
# Process: Erase (8)
arcpy.Erase_analysis(Adição_de_informação, Clip_Sítio_Arqueológico, Erase_Terra_Indígena__2_, "")
# Process: Merge (8)
arcpy.Merge_management("%vetor%\\bol_sitio_arqueologico_clip;%vetor%\\bol_sitio_arqueologico_erase", Merge_Sítio_Arqueológico, "nome \"nome\" true true false 40 Text 0 0 ,First,#,%vetor%\\bol_sitio_arqueologico_clip,nome,-1,-1,%vetor%\\bol_sitio_arqueologico_erase,nome,-1,-1;Shape_Area \"Shape_Area\" true true false 19 Double 0 0 ,First,#,%vetor%\\bol_sitio_arqueologico_clip,Shape_area,-1,-1,%vetor%\\bol_sitio_arqueologico_erase,Shape_area,-1,-1;cnsa \"cnsa\" true true false 254 Text 0 0 ,First,#,%vetor%\\bol_sitio_arqueologico_clip,cnsa,-1,-1;município \"município\" true true false 254 Text 0 0 ,First,#,%vetor%\\bol_sitio_arqueologico_clip,município,-1,-1;uf \"uf\" true true false 254 Text 0 0 ,First,#,%vetor%\\bol_sitio_arqueologico_clip,uf,-1,-1;BUFF_DIST \"BUFF_DIST\" true true false 0 Double 0 0 ,First,#,%vetor%\\bol_sitio_arqueologico_clip,BUFF_DIST,-1,-1;Shape_le_1 \"Shape_le_1\" true true false 0 Double 0 0 ,First,#,%vetor%\\bol_sitio_arqueologico_clip,Shape_length,-1,-1;fonte \"fonte\" true true false 20 Text 0 0 ,First,#,%vetor%\\bol_sitio_arqueologico_erase,fonte,-1,-1;Aestudo \"Aestudo\" true true false 5 Text 0 0 ,First,#,%vetor%\\bol_sitio_arqueologico_erase,Aestudo,-1,-1;area \"area\" true true false 13 Float 0 0 ,First,#,%vetor%\\bol_sitio_arqueologico_erase,area,-1,-1;Shape_length \"Shape_length\" true true false 0 Double 0 0 ,First,#,%vetor%\\bol_sitio_arqueologico_erase,Shape_length,-1,-1")
# Process: Add Field (10)
arcpy.AddField_management(Merge_Sítio_Arqueológico, "Booleana", "SHORT", "2", "", "", "", "NULLABLE", "NON_REQUIRED", "")
# Process: Calculate Field (10)
arcpy.CalculateField_management(Adição_campo_Booelana__9_, "Booleana", "Classify ( !Aestudo! )", "PYTHON", "def Classify(booleana):\\n if (booleana == 'Sim'):\\n return 1\\n else:\\n return 0")
# Process: Add Field (19)
arcpy.AddField_management(Parâmetros_Booleanos__9_, "PorcArea", "FLOAT", "7", "5", "", "", "NULLABLE", "NON_REQUIRED", "")
# Process: Calculate Field (19)
arcpy.CalculateField_management(Adição_campo_PorcArea__7_, "PorcArea", "(!Shape.area@squaremeters!*100)/318725000", "PYTHON", "")
# Process: Dissolve (7)
arcpy.Dissolve_management(Parâmetros_Booleanos__18_, bol_sitio_arqueologico_dissolve, "Booleana", "PorcArea SUM", "MULTI_PART", "DISSOLVE_LINES")
# Process: Feature to Raster (9)
arcpy.FeatureToRaster_conversion(bol_sitio_arqueologico_dissolve, "Booleana", Raster_Sítio_Arqueológico, "5")
# Process: Erase (9)
arcpy.Erase_analysis(Adição_de_informação, Área_Edificada, Erase_Área_Edificada, "")
# Process: Merge (9)
arcpy.Merge_management("%vetor%\\bol_area_edificada_erase;C:\\Users\\phpor\\Documents\\UFSC\\Dissertação\\Dados_georreferenciados\\SHP\\areaEdificada.shp", Merge_Área_Edificada, "id \"id\" true true false 4 Short 0 4 ,First,#,C:\\Users\\phpor\\Documents\\UFSC\\Dissertação\\Dados_georreferenciados\\SHP\\areaEdificada.shp,id,-1,-1;nome \"nome\" true true false 40 Text 0 0 ,First,#,%vetor%\\bol_area_edificada_erase,nome,-1,-1;fonte \"fonte\" true true false 20 Text 0 0 ,First,#,%vetor%\\bol_area_edificada_erase,fonte,-1,-1;Aestudo \"Aestudo\" true true false 5 Text 0 0 ,First,#,%vetor%\\bol_area_edificada_erase,Aestudo,-1,-1;area \"area\" true true false 13 Float 0 0 ,First,#,%vetor%\\bol_area_edificada_erase,area,-1,-1;Shape_length \"Shape_length\" true true false 0 Double 0 0 ,First,#,%vetor%\\bol_area_edificada_erase,Shape_length,-1,-1;Shape_area \"Shape_area\" true true false 0 Double 0 0 ,First,#,%vetor%\\bol_area_edificada_erase,Shape_area,-1,-1")
# Process: Add Field (11)
arcpy.AddField_management(Merge_Área_Edificada, "Booleana", "SHORT", "2", "", "", "", "NULLABLE", "NON_REQUIRED", "")
# Process: Calculate Field (11)
arcpy.CalculateField_management(Adição_campo_Booelana__10_, "Booleana", "Classify ( !Aestudo! )", "PYTHON", "def Classify(booleana):\\n if (booleana == 'Sim'):\\n return 1\\n else:\\n return 0")
# Process: Add Field (22)
arcpy.AddField_management(Parâmetros_Booleanos__10_, "PorcArea", "FLOAT", "7", "5", "", "", "NULLABLE", "NON_REQUIRED", "")
# Process: Calculate Field (22)
arcpy.CalculateField_management(Adição_campo_PorcArea__10_, "PorcArea", "(!Shape.area@squaremeters!*100)/318725000", "PYTHON", "")
# Process: Dissolve (10)
arcpy.Dissolve_management(Parâmetros_Booleanos__21_, bol_area_edificada_dissolve, "Booleana", "PorcArea SUM", "MULTI_PART", "DISSOLVE_LINES")
# Process: Feature to Raster (10)
arcpy.FeatureToRaster_conversion(bol_area_edificada_dissolve, "Booleana", Raster_Área_Edificada, "5")
# Process: Buffer (8)
tempEnvironment0 = arcpy.env.outputCoordinateSystem
arcpy.env.outputCoordinateSystem = "PROJCS['SIRGAS_2000_UTM_Zone_22S',GEOGCS['GCS_SIRGAS_2000',DATUM['D_SIRGAS_2000',SPHEROID['GRS_1980',6378137.0,298.257222101]],PRIMEM['Greenwich',0.0],UNIT['Degree',0.0174532925199433]],PROJECTION['Transverse_Mercator'],PARAMETER['False_Easting',500000.0],PARAMETER['False_Northing',10000000.0],PARAMETER['Central_Meridian',-51.0],PARAMETER['Scale_Factor',0.9996],PARAMETER['Latitude_Of_Origin',0.0],UNIT['Meter',1.0]]"
arcpy.Buffer_analysis(linhaTransmissao_shp, Buffer_LT, "buffer", "FULL", "ROUND", "ALL", "")
arcpy.env.outputCoordinateSystem = tempEnvironment0
# Process: Clip (10)
arcpy.Clip_analysis(Buffer_LT, Adição_de_informação, Clip_LT, "")
# Process: Erase (10)
arcpy.Erase_analysis(Adição_de_informação, Clip_LT, Erase_LT, "")
# Process: Merge (10)
arcpy.Merge_management("%vetor%\\bol_lt_clip;%vetor%\\bol_lt_erase", Merge_LT, "Shape_Leng \"Shape_Leng\" true true false 19 Double 0 0 ,First,#,%vetor%\\bol_lt_clip,Shape_Leng,-1,-1;Shape_Area \"Shape_Area\" true true false 19 Double 0 0 ,First,#,%vetor%\\bol_lt_clip,Shape_area,-1,-1,%vetor%\\bol_lt_erase,Shape_area,-1,-1;osm_id \"osm_id\" true true false 254 Text 0 0 ,First,#,%vetor%\\bol_lt_clip,osm_id,-1,-1;name \"name\" true true false 254 Text 0 0 ,First,#,%vetor%\\bol_lt_clip,name,-1,-1;highway \"highway\" true true false 254 Text 0 0 ,First,#,%vetor%\\bol_lt_clip,highway,-1,-1;waterway \"waterway\" true true false 254 Text 0 0 ,First,#,%vetor%\\bol_lt_clip,waterway,-1,-1;aerialway \"aerialway\" true true false 254 Text 0 0 ,First,#,%vetor%\\bol_lt_clip,aerialway,-1,-1;barrier \"barrier\" true true false 254 Text 0 0 ,First,#,%vetor%\\bol_lt_clip,barrier,-1,-1;man_made \"man_made\" true true false 254 Text 0 0 ,First,#,%vetor%\\bol_lt_clip,man_made,-1,-1;z_order \"z_order\" true true false 19 Double 0 0 ,First,#,%vetor%\\bol_lt_clip,z_order,-1,-1;other_tags \"other_tags\" true true false 254 Text 0 0 ,First,#,%vetor%\\bol_lt_clip,other_tags,-1,-1;tensao \"tensao\" true true false 50 Text 0 0 ,First,#,%vetor%\\bol_lt_clip,tensao,-1,-1;buffer \"buffer\" true true false 13 Float 0 0 ,First,#,%vetor%\\bol_lt_clip,buffer,-1,-1;BUFF_DIST \"BUFF_DIST\" true true false 0 Double 0 0 ,First,#,%vetor%\\bol_lt_clip,BUFF_DIST,-1,-1;Shape_le_1 \"Shape_le_1\" true true false 0 Double 0 0 ,First,#,%vetor%\\bol_lt_clip,Shape_length,-1,-1;nome \"nome\" true true false 40 Text 0 0 ,First,#,%vetor%\\bol_lt_erase,nome,-1,-1;fonte \"fonte\" true true false 20 Text 0 0 ,First,#,%vetor%\\bol_lt_erase,fonte,-1,-1;Aestudo \"Aestudo\" true true false 5 Text 0 0 ,First,#,%vetor%\\bol_lt_erase,Aestudo,-1,-1;area \"area\" true true false 13 Float 0 0 ,First,#,%vetor%\\bol_lt_erase,area,-1,-1;Shape_length \"Shape_length\" true true false 0 Double 0 0 ,First,#,%vetor%\\bol_lt_erase,Shape_length,-1,-1")
# Process: Add Field (12)
arcpy.AddField_management(Merge_LT, "Booleana", "SHORT", "2", "", "", "", "NULLABLE", "NON_REQUIRED", "")
# Process: Calculate Field (12)
arcpy.CalculateField_management(Adição_campo_Booelana__11_, "Booleana", "Classify ( !Aestudo! )", "PYTHON", "def Classify(booleana):\\n if (booleana == 'Sim'):\\n return 1\\n else:\\n return 0")
# Process: Add Field (15)
arcpy.AddField_management(Parâmetros_Booleanos__11_, "PorcArea", "FLOAT", "7", "5", "", "", "NULLABLE", "NON_REQUIRED", "")
# Process: Calculate Field (15)
arcpy.CalculateField_management(Adição_campo_PorcArea__3_, "PorcArea", "(!Shape.area@squaremeters!*100)/318725000", "PYTHON", "")
# Process: Dissolve (3)
arcpy.Dissolve_management(Parâmetros_Booleanos__14_, bol_lt_dissolve, "Booleana", "PorcArea SUM", "MULTI_PART", "DISSOLVE_LINES")
# Process: Feature to Raster (11)
arcpy.FeatureToRaster_conversion(bol_lt_dissolve, "Booleana", Raster_LT, "5")
# Process: Clip (11)
tempEnvironment0 = arcpy.env.outputCoordinateSystem
arcpy.env.outputCoordinateSystem = "PROJCS['SIRGAS_2000_UTM_Zone_22S',GEOGCS['GCS_SIRGAS_2000',DATUM['D_SIRGAS_2000',SPHEROID['GRS_1980',6378137.0,298.257222101]],PRIMEM['Greenwich',0.0],UNIT['Degree',0.0174532925199433]],PROJECTION['Transverse_Mercator'],PARAMETER['False_Easting',500000.0],PARAMETER['False_Northing',10000000.0],PARAMETER['Central_Meridian',-51.0],PARAMETER['Scale_Factor',0.9996],PARAMETER['Latitude_Of_Origin',0.0],UNIT['Meter',1.0]]"
arcpy.Clip_analysis(setorRisco_shp, Adição_de_informação, Clip_Setor_de_Risco, "")
arcpy.env.outputCoordinateSystem = tempEnvironment0
# Process: Erase (11)
arcpy.Erase_analysis(Adição_de_informação, Clip_Setor_de_Risco, Erase_Setor_de_Risco, "")
# Process: Merge (11)
arcpy.Merge_management("%vetor%\\bol_setor_risco_erase;%vetor%\\bol_setor_risco_clip", Merge_Setor_de_Risco, "Shape_Area \"Shape_Area\" true true false 19 Double 0 0 ,First,#,%vetor%\\bol_setor_risco_clip,Shape_area,-1,-1,%vetor%\\bol_setor_risco_erase,Shape_area,-1,-1;Name \"Name\" true true false 254 Text 0 0 ,First,#,%vetor%\\bol_setor_risco_clip,Name,-1,-1;UF \"UF\" true true false 254 Text 0 0 ,First,#,%vetor%\\bol_setor_risco_clip,UF,-1,-1;MUNIC \"MUNIC\" true true false 254 Text 0 0 ,First,#,%vetor%\\bol_setor_risco_clip,MUNIC,-1,-1;LOCAL \"LOCAL\" true true false 254 Text 0 0 ,First,#,%vetor%\\bol_setor_risco_clip,LOCAL,-1,-1;DATA_SETOR \"DATA_SETOR\" true true false 8 Date 0 0 ,First,#,%vetor%\\bol_setor_risco_clip,DATA_SETOR,-1,-1;NUM_SETOR \"NUM_SETOR\" true true false 254 Text 0 0 ,First,#,%vetor%\\bol_setor_risco_clip,NUM_SETOR,-1,-1;TIPOLO_G1 \"TIPOLO_G1\" true true false 254 Text 0 0 ,First,#,%vetor%\\bol_setor_risco_clip,TIPOLO_G1,-1,-1;TIPOLO_E1 \"TIPOLO_E1\" true true false 254 Text 0 0 ,First,#,%vetor%\\bol_setor_risco_clip,TIPOLO_E1,-1,-1;COBRADE_1 \"COBRADE_1\" true true false 254 Text 0 0 ,First,#,%vetor%\\bol_setor_risco_clip,COBRADE_1,-1,-1;TIPOLO_G2 \"TIPOLO_G2\" true true false 254 Text 0 0 ,First,#,%vetor%\\bol_setor_risco_clip,TIPOLO_G2,-1,-1;TIPOLO_E2 \"TIPOLO_E2\" true true false 254 Text 0 0 ,First,#,%vetor%\\bol_setor_risco_clip,TIPOLO_E2,-1,-1;COBRADE_2 \"COBRADE_2\" true true false 254 Text 0 0 ,First,#,%vetor%\\bol_setor_risco_clip,COBRADE_2,-1,-1;TIPOLO_G3 \"TIPOLO_G3\" true true false 254 Text 0 0 ,First,#,%vetor%\\bol_setor_risco_clip,TIPOLO_G3,-1,-1;TIPOLO_E3 \"TIPOLO_E3\" true true false 254 Text 0 0 ,First,#,%vetor%\\bol_setor_risco_clip,TIPOLO_E3,-1,-1;COBRADE_3 \"COBRADE_3\" true true false 254 Text 0 0 ,First,#,%vetor%\\bol_setor_risco_clip,COBRADE_3,-1,-1;TIPOLO_G4 \"TIPOLO_G4\" true true false 254 Text 0 0 ,First,#,%vetor%\\bol_setor_risco_clip,TIPOLO_G4,-1,-1;TIPOLO_E4 \"TIPOLO_E4\" true true false 254 Text 0 0 ,First,#,%vetor%\\bol_setor_risco_clip,TIPOLO_E4,-1,-1;COBRADE_4 \"COBRADE_4\" true true false 254 Text 0 0 ,First,#,%vetor%\\bol_setor_risco_clip,COBRADE_4,-1,-1;TIPOLO_G5 \"TIPOLO_G5\" true true false 254 Text 0 0 ,First,#,%vetor%\\bol_setor_risco_clip,TIPOLO_G5,-1,-1;TIPOLO_E5 \"TIPOLO_E5\" true true false 254 Text 0 0 ,First,#,%vetor%\\bol_setor_risco_clip,TIPOLO_E5,-1,-1;COBRADE_5 \"COBRADE_5\" true true false 254 Text 0 0 ,First,#,%vetor%\\bol_setor_risco_clip,COBRADE_5,-1,-1;SITUACAO \"SITUACAO\" true true false 254 Text 0 0 ,First,#,%vetor%\\bol_setor_risco_clip,SITUACAO,-1,-1;DESCRICAO \"DESCRICAO\" true true false 254 Text 0 0 ,First,#,%vetor%\\bol_setor_risco_clip,DESCRICAO,-1,-1;NUM_MORAD \"NUM_MORAD\" true true false 19 Double 6 18 ,First,#,%vetor%\\bol_setor_risco_clip,NUM_MORAD,-1,-1;NUM_PESS \"NUM_PESS\" true true false 19 Double 6 18 ,First,#,%vetor%\\bol_setor_risco_clip,NUM_PESS,-1,-1;OBS_OCUP \"OBS_OCUP\" true true false 254 Text 0 0 ,First,#,%vetor%\\bol_setor_risco_clip,OBS_OCUP,-1,-1;GRAU_VULNE \"GRAU_VULNE\" true true false 254 Text 0 0 ,First,#,%vetor%\\bol_setor_risco_clip,GRAU_VULNE,-1,-1;GRAU_RISCO \"GRAU_RISCO\" true true false 254 Text 0 0 ,First,#,%vetor%\\bol_setor_risco_clip,GRAU_RISCO,-1,-1;ORGAO_EXEC \"ORGAO_EXEC\" true true false 254 Text 0 0 ,First,#,%vetor%\\bol_setor_risco_clip,ORGAO_EXEC,-1,-1;PROJETO \"PROJETO\" true true false 254 Text 0 0 ,First,#,%vetor%\\bol_setor_risco_clip,PROJETO,-1,-1;UTME \"UTME\" true true false 19 Double 6 18 ,First,#,%vetor%\\bol_setor_risco_clip,UTME,-1,-1;UTMN \"UTMN\" true true false 19 Double 6 18 ,First,#,%vetor%\\bol_setor_risco_clip,UTMN,-1,-1;ZONA \"ZONA\" true true false 19 Double 6 18 ,First,#,%vetor%\\bol_setor_risco_clip,ZONA,-1,-1;Shape_le_1 \"Shape_le_1\" true true false 0 Double 0 0 ,First,#,%vetor%\\bol_setor_risco_clip,Shape_length,-1,-1;nome \"nome\" true true false 40 Text 0 0 ,First,#,%vetor%\\bol_setor_risco_erase,nome,-1,-1;fonte \"fonte\" true true false 20 Text 0 0 ,First,#,%vetor%\\bol_setor_risco_erase,fonte,-1,-1;Aestudo \"Aestudo\" true true false 5 Text 0 0 ,First,#,%vetor%\\bol_setor_risco_erase,Aestudo,-1,-1;area \"area\" true true false 13 Float 0 0 ,First,#,%vetor%\\bol_setor_risco_erase,area,-1,-1;Shape_length \"Shape_length\" true true false 0 Double 0 0 ,First,#,%vetor%\\bol_setor_risco_erase,Shape_length,-1,-1")
# Process: Add Field (13)
arcpy.AddField_management(Merge_Setor_de_Risco, "Booleana", "SHORT", "2", "", "", "", "NULLABLE", "NON_REQUIRED", "")
# Process: Calculate Field (13)
arcpy.CalculateField_management(Adição_campo_Booelana__12_, "Booleana", "Classify ( !Aestudo! )", "PYTHON", "def Classify(booleana):\\n if (booleana == 'Sim'):\\n return 1\\n else:\\n return 0")
# Process: Add Field (5)
arcpy.AddField_management(Parâmetros_Booleanos__12_, "PorcArea", "FLOAT", "7", "5", "", "", "NULLABLE", "NON_REQUIRED", "")
# Process: Calculate Field (5)
arcpy.CalculateField_management(Adição_campo_PorcArea, "PorcArea", "(!Shape.area@squaremeters!*100)/318725000", "PYTHON", "")
# Process: Dissolve
arcpy.Dissolve_management(Parâmetros_Booleanos__7_, bol_setor_risco_dissolve, "Booleana", "PorcArea SUM", "MULTI_PART", "DISSOLVE_LINES")
# Process: Feature to Raster (12)
arcpy.FeatureToRaster_conversion(bol_setor_risco_dissolve, "Booleana", Raster_Setor_de_Risco, "5")
# Process: Raster Calculator
arcpy.gp.RasterCalculator_sa("\"%Raster APP%\"*\"%Raster Terra Indígena%\"*\"%Raster Sítio Arqueológico%\"*\"%Raster Área Edificada%\"*\"%Raster LT%\"*\"%Raster Setor de Risco%\"", BOL_result)
| 115.513587
| 9,052
| 0.76158
| 6,556
| 42,509
| 4.638652
| 0.060403
| 0.071027
| 0.069679
| 0.059189
| 0.772714
| 0.72385
| 0.65598
| 0.618263
| 0.593765
| 0.547401
| 0
| 0.067275
| 0.068809
| 42,509
| 367
| 9,053
| 115.828338
| 0.70099
| 0.058905
| 0
| 0.138462
| 1
| 0.923077
| 0.71022
| 0.527973
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.010256
| 0
| 0.010256
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
50380aa12d66188dbec0f5b56c942206d5d2fcb9
| 2,069
|
py
|
Python
|
challenges/hashmap-tree-intersection/tests/test_hashmap_tree_intersection.py
|
odai1990/data-structures-and-algorithms
|
fde43d7bdb241f6ef8de7018edab7e741b65cf77
|
[
"MIT"
] | null | null | null |
challenges/hashmap-tree-intersection/tests/test_hashmap_tree_intersection.py
|
odai1990/data-structures-and-algorithms
|
fde43d7bdb241f6ef8de7018edab7e741b65cf77
|
[
"MIT"
] | 1
|
2021-06-13T19:18:34.000Z
|
2021-06-13T19:18:34.000Z
|
challenges/hashmap-tree-intersection/tests/test_hashmap_tree_intersection.py
|
odai1990/data-structures-and-algorithms
|
fde43d7bdb241f6ef8de7018edab7e741b65cf77
|
[
"MIT"
] | null | null | null |
from hashmap_tree_intersection import __version__
from hashmap_tree_intersection.tree import BinarySearchTree
from hashmap_tree_intersection.tree_intersection import TreeIntersection
def test_version():
assert __version__ == '0.1.0'
def test_happy_case():
tree1 = BinarySearchTree()
tree1.add(10)
tree1.add(5)
tree1.add(6)
tree2 = BinarySearchTree()
tree2.add(1)
tree2.add(5)
tree2.add(2)
test=TreeIntersection(1024)
expected=test.tree_intersection(tree1,tree2)
actual=[5]
assert expected==actual
def test_no_match():
tree1 = BinarySearchTree()
tree1.add(10)
tree1.add(4)
tree1.add(6)
tree2 = BinarySearchTree()
tree2.add(1)
tree2.add(5)
tree2.add(2)
test=TreeIntersection(1024)
expected=test.tree_intersection(tree1,tree2)
actual=[]
assert expected==actual
def test_array_long_than_other():
tree1 = BinarySearchTree()
tree1.add(10)
tree1.add(4)
tree1.add(6)
tree1.add(5)
tree1.add(44)
tree1.add(655)
tree2 = BinarySearchTree()
tree2.add(1)
tree2.add(5)
tree2.add(2)
test=TreeIntersection(1024)
expected=test.tree_intersection(tree1,tree2)
actual=[5]
assert expected==actual
def test_array_long_than_other_and_have_repeted_number_in_same_array():
tree1 = BinarySearchTree()
tree1.add(10)
tree1.add(4)
tree1.add(6)
tree1.add(5)
tree1.add(5)
tree1.add(6)
tree2 = BinarySearchTree()
tree2.add(3)
tree2.add(5)
tree2.add(10)
test=TreeIntersection(1024)
expected=test.tree_intersection(tree1,tree2)
actual=[10,5]
assert expected==actual
def test_other_solution():
tree1 = BinarySearchTree()
tree1.add(10)
tree1.add(4)
tree1.add(6)
tree1.add(5)
tree1.add(5)
tree1.add(6)
tree2 = BinarySearchTree()
tree2.add(3)
tree2.add(5)
tree2.add(10)
test=TreeIntersection(1024)
expected=test.tree_intersection_without_hashtabke(tree1,tree2)
actual=[10,5]
assert expected==actual
| 20.89899
| 72
| 0.675689
| 270
| 2,069
| 5.014815
| 0.162963
| 0.141802
| 0.046529
| 0.062038
| 0.836041
| 0.790251
| 0.785081
| 0.762186
| 0.716396
| 0.669867
| 0
| 0.08486
| 0.208313
| 2,069
| 98
| 73
| 21.112245
| 0.741758
| 0
| 0
| 0.822785
| 0
| 0
| 0.002419
| 0
| 0
| 0
| 0
| 0
| 0.075949
| 1
| 0.075949
| false
| 0
| 0.037975
| 0
| 0.113924
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ace6e531c391cf4ecdf7fac9ae397d66dfb9bf8e
| 192
|
py
|
Python
|
tests/test_parametrizer.py
|
davidemoro/parametrizer
|
e5fe3b6276d30b41402b24fac61520b8e5e198a0
|
[
"Apache-2.0"
] | null | null | null |
tests/test_parametrizer.py
|
davidemoro/parametrizer
|
e5fe3b6276d30b41402b24fac61520b8e5e198a0
|
[
"Apache-2.0"
] | 2
|
2019-03-14T12:41:32.000Z
|
2019-03-14T12:45:21.000Z
|
tests/test_parametrizer.py
|
davidemoro/parametrizer
|
e5fe3b6276d30b41402b24fac61520b8e5e198a0
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
def test_parametrizer():
""" Test parametrizer """
from parametrizer import Parametrizer
assert Parametrizer({'foo': 'bar'}).parametrize('$foo') == 'bar'
| 24
| 68
| 0.630208
| 19
| 192
| 6.315789
| 0.631579
| 0.266667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006329
| 0.177083
| 192
| 7
| 69
| 27.428571
| 0.753165
| 0.213542
| 0
| 0
| 0
| 0
| 0.090278
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
acee645e5356ce0ffa6439fdc5199b2ac38bbbe1
| 38
|
py
|
Python
|
spea/minimum_clique_cover/__init__.py
|
heyaroom/spea_echo
|
fd05285aaa55d358bde4458cc73f4e3d39058b68
|
[
"MIT"
] | null | null | null |
spea/minimum_clique_cover/__init__.py
|
heyaroom/spea_echo
|
fd05285aaa55d358bde4458cc73f4e3d39058b68
|
[
"MIT"
] | null | null | null |
spea/minimum_clique_cover/__init__.py
|
heyaroom/spea_echo
|
fd05285aaa55d358bde4458cc73f4e3d39058b68
|
[
"MIT"
] | null | null | null |
from .clique_cover import clique_cover
| 38
| 38
| 0.894737
| 6
| 38
| 5.333333
| 0.666667
| 0.6875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.078947
| 38
| 1
| 38
| 38
| 0.914286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
acf09d8942761ce92a838957e1011f8e949b5a7f
| 111
|
py
|
Python
|
api/v1/generics/__init__.py
|
blockomat2100/vulnman
|
835ff3aae1168d8e2fa5556279bc86efd2e46472
|
[
"MIT"
] | null | null | null |
api/v1/generics/__init__.py
|
blockomat2100/vulnman
|
835ff3aae1168d8e2fa5556279bc86efd2e46472
|
[
"MIT"
] | 23
|
2021-12-01T10:00:38.000Z
|
2021-12-11T11:43:13.000Z
|
api/v1/generics/__init__.py
|
blockomat2100/vulnman
|
835ff3aae1168d8e2fa5556279bc86efd2e46472
|
[
"MIT"
] | null | null | null |
from api.v1.generics.agents import AgentModelViewSet
from api.v1.generics.session import ProjectSessionViewSet
| 37
| 57
| 0.873874
| 14
| 111
| 6.928571
| 0.642857
| 0.14433
| 0.185567
| 0.350515
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.019417
| 0.072072
| 111
| 2
| 58
| 55.5
| 0.92233
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
acf4de65fe52fc412eed03ac739a9e84a93edfa3
| 172
|
py
|
Python
|
autonomous_systems_project/agents/__init__.py
|
alessandropacielli/autonomous_systems_project
|
ae429099409356db5cdd19597af871f239300ffb
|
[
"MIT"
] | null | null | null |
autonomous_systems_project/agents/__init__.py
|
alessandropacielli/autonomous_systems_project
|
ae429099409356db5cdd19597af871f239300ffb
|
[
"MIT"
] | null | null | null |
autonomous_systems_project/agents/__init__.py
|
alessandropacielli/autonomous_systems_project
|
ae429099409356db5cdd19597af871f239300ffb
|
[
"MIT"
] | null | null | null |
from autonomous_systems_project.agents.actor_critic import *
from autonomous_systems_project.agents.double_dqn import *
from autonomous_systems_project.agents.dqn import *
| 43
| 60
| 0.877907
| 23
| 172
| 6.217391
| 0.434783
| 0.293706
| 0.440559
| 0.587413
| 0.797203
| 0.559441
| 0
| 0
| 0
| 0
| 0
| 0
| 0.069767
| 172
| 3
| 61
| 57.333333
| 0.89375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
4a080ae13c876589897cf230e6a792ce74c2dcb1
| 24,987
|
py
|
Python
|
NetworkControllability/TmpEdgeAttackExperiments.py
|
xinfeng1i/NetworkControllability
|
8a22ad0498ea12438c132556814dc255e709dc01
|
[
"BSD-2-Clause"
] | 1
|
2019-02-06T13:39:49.000Z
|
2019-02-06T13:39:49.000Z
|
NetworkControllability/TmpEdgeAttackExperiments.py
|
python27/NetworkControllability
|
8a22ad0498ea12438c132556814dc255e709dc01
|
[
"BSD-2-Clause"
] | 1
|
2020-11-03T22:51:32.000Z
|
2020-11-06T11:48:28.000Z
|
NetworkControllability/TmpEdgeAttackExperiments.py
|
xinfeng1i/NetworkControllability
|
8a22ad0498ea12438c132556814dc255e709dc01
|
[
"BSD-2-Clause"
] | null | null | null |
import networkx as nx
import matplotlib.pyplot as plt
import exact_controllability as ECT
from networkx.utils import powerlaw_sequence
import operator
import random
import csv
import copy
import subprocess, os
import time
import numpy as np
from ControllabilityRobustnessBasedOnEdgeAttack import RandomEdgeAttack
from ControllabilityRobustnessBasedOnEdgeAttack import InitialEdgeDegreeAttack
from ControllabilityRobustnessBasedOnEdgeAttack import RecalculatedEdgeDegreeAttack
from ControllabilityRobustnessBasedOnEdgeAttack import InitialEdgeBetweennessAttack
from ControllabilityRobustnessBasedOnEdgeAttack import RecalculatedEdgeBetweennessAttack
import strutral_controllability as SC
def EdgeAttackBA():
start_time = time.time()
n = 200
m = 3
fraction = 0.2
E = 591
E_rm = 118
run_cnt = 100
#******** Run Node Attack 1 ********#
tot_ND1 = [0] * (E_rm + 1)
tot_T1 = [0] * (E_rm + 1)
rndseed = 0
for i in range(run_cnt):
G1 = nx.barabasi_albert_graph(n, m, seed=rndseed)
print ">>>>>>>>>>>>>>> Random Attack run time count: ", i + 1, "<<<<<<<<<<<<<<<<<<"
print "graph info", nx.info(G1)
ND1, T1 = RandomEdgeAttack(G1, remove_fraction=fraction)
tot_ND1 = [x + y for x, y in zip(tot_ND1, ND1)]
rndseed += 1
tot_ND1 = [((x + 0.0) / run_cnt) for x in tot_ND1]
tot_T1 = T1
tot_ND1 = [(x + 0.0) / (n + 0.0) for x in tot_ND1]
tot_T1 = [(x + 0.0) / (E + 0.0) for x in tot_T1]
with open("results2/edge_attack1_BA.csv", "w") as f:
writer = csv.writer(f, delimiter=',')
writer.writerows(zip(tot_T1, tot_ND1))
#******** Run Node Attack 2 ********#
tot_ND1 = [0] * (E_rm + 1)
tot_T1 = [0] * (E_rm + 1)
rndseed = 0
for i in range(run_cnt):
G1 = nx.barabasi_albert_graph(n, m, seed=rndseed)
print ">>>>>>>>>>>>>>> Initial Degree Attack run time count: ", i + 1, "<<<<<<<<<<<<<<<<<<"
print "graph info", nx.info(G1)
ND1, T1 = InitialEdgeDegreeAttack(G1, remove_fraction=fraction)
tot_ND1 = [x + y for x, y in zip(tot_ND1, ND1)]
rndseed += 1
tot_ND1 = [((x + 0.0) / run_cnt) for x in tot_ND1]
tot_T1 = T1
tot_ND1 = [(x + 0.0) / (n + 0.0) for x in tot_ND1]
tot_T1 = [(x + 0.0) / (E + 0.0) for x in tot_T1]
with open("results2/edge_attack2_BA.csv", "w") as f:
writer = csv.writer(f, delimiter=',')
writer.writerows(zip(tot_T1, tot_ND1))
#******** Run Node Attack 3 ********#
tot_ND1 = [0] * (E_rm + 1)
tot_T1 = [0] * (E_rm + 1)
rndseed = 0
for i in range(run_cnt):
G1 = nx.barabasi_albert_graph(n, m, seed=rndseed)
print ">>>>>>>>>>>>>>> Recalculated Degree Attack run time count: ", i + 1, "<<<<<<<<<<<<<<<<<<"
print "graph info", nx.info(G1)
ND1, T1 = RecalculatedEdgeDegreeAttack(G1, remove_fraction=fraction)
tot_ND1 = [x + y for x, y in zip(tot_ND1, ND1)]
rndseed += 1
tot_ND1 = [((x + 0.0) / run_cnt) for x in tot_ND1]
tot_T1 = T1
tot_ND1 = [(x + 0.0) / (n + 0.0) for x in tot_ND1]
tot_T1 = [(x + 0.0) / (E + 0.0) for x in tot_T1]
with open("results2/edge_attack3_BA.csv", "w") as f:
writer = csv.writer(f, delimiter=',')
writer.writerows(zip(tot_T1, tot_ND1))
#******** Run Node Attack 4 ********#
tot_ND1 = [0] * (E_rm + 1)
tot_T1 = [0] * (E_rm + 1)
rndseed = 0
for i in range(run_cnt):
G1 = nx.barabasi_albert_graph(n, m, seed=rndseed)
print ">>>>>>>>>>>>>>> Initial Betweenness Attack run time count: ", i + 1, "<<<<<<<<<<<<<<<<<<"
print "graph info", nx.info(G1)
ND1, T1 = InitialEdgeBetweennessAttack(G1, remove_fraction=fraction)
tot_ND1 = [x + y for x, y in zip(tot_ND1, ND1)]
rndseed += 1
tot_ND1 = [((x + 0.0) / run_cnt) for x in tot_ND1]
tot_T1 = T1
tot_ND1 = [(x + 0.0) / (n + 0.0) for x in tot_ND1]
tot_T1 = [(x + 0.0) / (E + 0.0) for x in tot_T1]
with open("results2/edge_attack4_BA.csv", "w") as f:
writer = csv.writer(f, delimiter=',')
writer.writerows(zip(tot_T1, tot_ND1))
#******** Run Node Attack 5 ********#
tot_ND1 = [0] * (E_rm + 1)
tot_T1 = [0] * (E_rm + 1)
rndseed = 0
for i in range(run_cnt):
G1 = nx.barabasi_albert_graph(n, m, seed=rndseed)
print ">>>>>>>>>>>>>>> Recalculated Betweenness Attack run time count: ", i + 1, "<<<<<<<<<<<<<<<<<<"
print "graph info", nx.info(G1)
ND1, T1 = RecalculatedEdgeBetweennessAttack(G1, remove_fraction=fraction)
tot_ND1 = [x + y for x, y in zip(tot_ND1, ND1)]
rndseed += 1
tot_ND1 = [((x + 0.0) / run_cnt) for x in tot_ND1]
tot_T1 = T1
tot_ND1 = [(x + 0.0) / (n + 0.0) for x in tot_ND1]
tot_T1 = [(x + 0.0) / (E + 0.0) for x in tot_T1]
with open("results2/edge_attack5_BA.csv", "w") as f:
writer = csv.writer(f, delimiter=',')
writer.writerows(zip(tot_T1, tot_ND1))
print "--- cost time %s seconds ---" %(time.time() - start_time)
def EdgeAttackUSAir():
start_time = time.time()
n = 332
fraction = 0.2
E = 2126
E_rm = int(0.2 * E)
run_cnt = 100
#******** Run Edge Attack 1 ********#
tot_ND1 = [0] * (E_rm + 1)
tot_T1 = [0] * (E_rm + 1)
rndseed = 1;
for i in range(run_cnt):
G1 = nx.read_pajek("dataset/USAir97.net")
print ">>>>>>>>>>>>>>> Random Attack run time count: ", i + 1, "<<<<<<<<<<<<<<<<<<"
print "graph info", nx.info(G1)
random.seed(rndseed)
ND1, T1 = RandomEdgeAttack(G1, remove_fraction=fraction)
tot_ND1 = [x + y for x, y in zip(tot_ND1, ND1)]
rndseed += 1;
tot_ND1 = [((x + 0.0) / run_cnt) for x in tot_ND1]
tot_T1 = T1
tot_ND1 = [(x + 0.0) / (n + 0.0) for x in tot_ND1]
tot_T1 = [(x + 0.0) / (E + 0.0) for x in tot_T1]
with open("results2/edge_attack1_USAir.csv", "w") as f:
writer = csv.writer(f, delimiter=',')
writer.writerows(zip(tot_T1, tot_ND1))
run_cnt = 3
#******** Run Edge Attack 2 ********#
tot_ND1 = [0] * (E_rm + 1)
tot_T1 = [0] * (E_rm + 1)
for i in range(run_cnt):
G1 = nx.read_pajek("dataset/USAir97.net")
print ">>>>>>>>>>>>>>> Initial Degree Attack run time count: ", i + 1, "<<<<<<<<<<<<<<<<<<"
print "graph info", nx.info(G1)
ND1, T1 = InitialEdgeDegreeAttack(G1, remove_fraction=fraction)
tot_ND1 = [x + y for x, y in zip(tot_ND1, ND1)]
tot_ND1 = [((x + 0.0) / run_cnt) for x in tot_ND1]
tot_T1 = T1
tot_ND1 = [(x + 0.0) / (n + 0.0) for x in tot_ND1]
tot_T1 = [(x + 0.0) / (E + 0.0) for x in tot_T1]
with open("results2/edge_attack2_USAir.csv", "w") as f:
writer = csv.writer(f, delimiter=',')
writer.writerows(zip(tot_T1, tot_ND1))
run_cnt = 3
#******** Run Edge Attack 3 ********#
tot_ND1 = [0] * (E_rm + 1)
tot_T1 = [0] * (E_rm + 1)
for i in range(run_cnt):
G1 = nx.read_pajek("dataset/USAir97.net")
print ">>>>>>>>>>>>>>> Recalculated Degree Attack run time count: ", i + 1, "<<<<<<<<<<<<<<<<<<"
print "graph info", nx.info(G1)
ND1, T1 = RecalculatedEdgeDegreeAttack(G1, remove_fraction=fraction)
tot_ND1 = [x + y for x, y in zip(tot_ND1, ND1)]
tot_ND1 = [((x + 0.0) / run_cnt) for x in tot_ND1]
tot_T1 = T1
tot_ND1 = [(x + 0.0) / (n + 0.0) for x in tot_ND1]
tot_T1 = [(x + 0.0) / (E + 0.0) for x in tot_T1]
with open("results2/edge_attack3_USAir.csv", "w") as f:
writer = csv.writer(f, delimiter=',')
writer.writerows(zip(tot_T1, tot_ND1))
run_cnt = 3
#******** Run Edge Attack 4 ********#
tot_ND1 = [0] * (E_rm + 1)
tot_T1 = [0] * (E_rm + 1)
for i in range(run_cnt):
G1 = nx.read_pajek("dataset/USAir97.net")
print ">>>>>>>>>>>>>>> Initial Betweenness Attack run time count: ", i + 1, "<<<<<<<<<<<<<<<<<<"
print "graph info", nx.info(G1)
ND1, T1 = InitialEdgeBetweennessAttack(G1, remove_fraction=fraction)
tot_ND1 = [x + y for x, y in zip(tot_ND1, ND1)]
tot_ND1 = [((x + 0.0) / run_cnt) for x in tot_ND1]
tot_T1 = T1
tot_ND1 = [(x + 0.0) / (n + 0.0) for x in tot_ND1]
tot_T1 = [(x + 0.0) / (E + 0.0) for x in tot_T1]
with open("results2/edge_attack4_USAir.csv", "w") as f:
writer = csv.writer(f, delimiter=',')
writer.writerows(zip(tot_T1, tot_ND1))
run_cnt = 3
#******** Run Edge Attack 5 ********#
tot_ND1 = [0] * (E_rm + 1)
tot_T1 = [0] * (E_rm + 1)
for i in range(run_cnt):
G1 = nx.read_pajek("dataset/USAir97.net")
print ">>>>>>>>>>>>>>> Recalculated Betweenness Attack run time count: ", i + 1, "<<<<<<<<<<<<<<<<<<"
print "graph info", nx.info(G1)
ND1, T1 = RecalculatedEdgeBetweennessAttack(G1, remove_fraction=fraction)
tot_ND1 = [x + y for x, y in zip(tot_ND1, ND1)]
tot_ND1 = [((x + 0.0) / run_cnt) for x in tot_ND1]
tot_T1 = T1
tot_ND1 = [(x + 0.0) / (n + 0.0) for x in tot_ND1]
tot_T1 = [(x + 0.0) / (E + 0.0) for x in tot_T1]
with open("results2/edge_attack5_USAir.csv", "w") as f:
writer = csv.writer(f, delimiter=',')
writer.writerows(zip(tot_T1, tot_ND1))
print "--- cost time %s seconds ---" %(time.time() - start_time)
def EdgeAttackErdosNetwork():
start_time = time.time()
n = 429
fraction = 0.2
E = 1312
E_rm = int(0.2 * E)
run_cnt = 30
#******** Run Node Attack 1 ********#
tot_ND1 = [0] * (E_rm + 1)
tot_T1 = [0] * (E_rm + 1)
rndseed = 1
for i in range(run_cnt):
G = nx.read_pajek("dataset/Erdos971_revised.net")
G1 = max(nx.connected_component_subgraphs(G),key=len)
print ">>>>>>>>>>>>>>> Random Attack run time count: ", i + 1, "<<<<<<<<<<<<<<<<<<"
print "graph info", nx.info(G1)
random.seed(rndseed)
ND1, T1 = RandomEdgeAttack(G1, remove_fraction=fraction)
tot_ND1 = [x + y for x, y in zip(tot_ND1, ND1)]
rndseed += 1
tot_ND1 = [((x + 0.0) / run_cnt) for x in tot_ND1]
tot_T1 = T1
tot_ND1 = [(x + 0.0) / (n + 0.0) for x in tot_ND1]
tot_T1 = [(x + 0.0) / (E + 0.0) for x in tot_T1]
with open("results2/edge_attack1_ErdosNetwork.csv", "w") as f:
writer = csv.writer(f, delimiter=',')
writer.writerows(zip(tot_T1, tot_ND1))
run_cnt = 1
random.seed()
#******** Run Node Attack 2 ********#
tot_ND1 = [0] * (E_rm + 1)
tot_T1 = [0] * (E_rm + 1)
for i in range(run_cnt):
G = nx.read_pajek("dataset/Erdos971_revised.net")
G1 = max(nx.connected_component_subgraphs(G),key=len)
print ">>>>>>>>>>>>>>> Initial Degree Attack run time count: ", i + 1, "<<<<<<<<<<<<<<<<<<"
print "graph info", nx.info(G1)
ND1, T1 = InitialEdgeDegreeAttack(G1, remove_fraction=fraction)
tot_ND1 = [x + y for x, y in zip(tot_ND1, ND1)]
tot_ND1 = [((x + 0.0) / run_cnt) for x in tot_ND1]
tot_T1 = T1
tot_ND1 = [(x + 0.0) / (n + 0.0) for x in tot_ND1]
tot_T1 = [(x + 0.0) / (E + 0.0) for x in tot_T1]
with open("results2/edge_attack2_ErdosNetwork.csv", "w") as f:
writer = csv.writer(f, delimiter=',')
writer.writerows(zip(tot_T1, tot_ND1))
run_cnt = 1
random.seed()
#******** Run Node Attack 3 ********#
tot_ND1 = [0] * (E_rm + 1)
tot_T1 = [0] * (E_rm + 1)
for i in range(run_cnt):
G = nx.read_pajek("dataset/Erdos971_revised.net")
G1 = max(nx.connected_component_subgraphs(G),key=len)
print ">>>>>>>>>>>>>>> Recalculated Degree Attack run time count: ", i + 1, "<<<<<<<<<<<<<<<<<<"
print "graph info", nx.info(G1)
ND1, T1 = RecalculatedEdgeDegreeAttack(G1, remove_fraction=fraction)
tot_ND1 = [x + y for x, y in zip(tot_ND1, ND1)]
tot_ND1 = [((x + 0.0) / run_cnt) for x in tot_ND1]
tot_T1 = T1
tot_ND1 = [(x + 0.0) / (n + 0.0) for x in tot_ND1]
tot_T1 = [(x + 0.0) / (E + 0.0) for x in tot_T1]
with open("results2/edge_attack3_ErdosNetwork.csv", "w") as f:
writer = csv.writer(f, delimiter=',')
writer.writerows(zip(tot_T1, tot_ND1))
run_cnt = 1
random.seed()
#******** Run Node Attack 4 ********#
tot_ND1 = [0] * (E_rm + 1)
tot_T1 = [0] * (E_rm + 1)
for i in range(run_cnt):
G = nx.read_pajek("dataset/Erdos971_revised.net")
G1 = max(nx.connected_component_subgraphs(G),key=len)
print ">>>>>>>>>>>>>>> Initial Betweenness Attack run time count: ", i + 1, "<<<<<<<<<<<<<<<<<<"
print "graph info", nx.info(G1)
ND1, T1 = InitialEdgeBetweennessAttack(G1, remove_fraction=fraction)
tot_ND1 = [x + y for x, y in zip(tot_ND1, ND1)]
tot_ND1 = [((x + 0.0) / run_cnt) for x in tot_ND1]
tot_T1 = T1
tot_ND1 = [(x + 0.0) / (n + 0.0) for x in tot_ND1]
tot_T1 = [(x + 0.0) / (E + 0.0) for x in tot_T1]
with open("results2/edge_attack4_ErdosNetwork.csv", "w") as f:
writer = csv.writer(f, delimiter=',')
writer.writerows(zip(tot_T1, tot_ND1))
run_cnt = 1
random.seed()
#******** Run Node Attack 5 ********#
tot_ND1 = [0] * (E_rm + 1)
tot_T1 = [0] * (E_rm + 1)
for i in range(run_cnt):
G = nx.read_pajek("dataset/Erdos971_revised.net")
G1 = max(nx.connected_component_subgraphs(G),key=len)
print ">>>>>>>>>>>>>>> Recalculated Betweenness Attack run time count: ", i + 1, "<<<<<<<<<<<<<<<<<<"
print "graph info", nx.info(G1)
ND1, T1 = RecalculatedEdgeBetweennessAttack(G1, remove_fraction=fraction)
tot_ND1 = [x + y for x, y in zip(tot_ND1, ND1)]
tot_ND1 = [((x + 0.0) / run_cnt) for x in tot_ND1]
tot_T1 = T1
tot_ND1 = [(x + 0.0) / (n + 0.0) for x in tot_ND1]
tot_T1 = [(x + 0.0) / (E + 0.0) for x in tot_T1]
with open("results2/edge_attack5_ErdosNetwork.csv", "w") as f:
writer = csv.writer(f, delimiter=',')
writer.writerows(zip(tot_T1, tot_ND1))
print "--- cost time %s seconds ---" %(time.time() - start_time)
def ReadPajek(filename):
'''Read pajek file to construct DiGraph'''
G = nx.DiGraph()
fp = open(filename, 'r')
line = fp.readline()
while line:
if line[0] == '*':
line = line.strip().split()
label = line[0]
number = int(line[1])
if label == '*Vertices' or label == '*vertices':
NodeNum = number
for i in range(NodeNum):
NodeLine = fp.readline()
NodeLine = NodeLine.strip().split()
NodeID = int(NodeLine[0])
NodeLabel = NodeLine[1]
G.add_node(NodeID)
elif label == '*Arcs' or label == '*arcs':
EdgeNum = number
for i in range(EdgeNum):
EdgeLine = fp.readline()
EdgeLine = EdgeLine.strip().split()
u = int(EdgeLine[0])
v = int(EdgeLine[1])
#w = float(EdgeLine[2])
G.add_edge(u, v)
else:
pass
line = fp.readline()
fp.close()
return G
def EdgeAttack(G):
""" Edge attack experiments on real world networks
Params:
G: A directed network of networkx
Returns:
None. Print the network controllability n_D after
5% 10% 15% 20% edges removed
"""
NodesNum = G.number_of_nodes()
EdgesNum = G.number_of_edges()
# Edge remove fraction F0, F1, F2, F3, F4
F1 = 0.05
F2 = 0.10
F3 = 0.15
F4 = 0.20
LRA = []
LID = []
LRD = []
LIB = []
LRB = []
# Following is Edge Random Attack (RA)
print '########## Edge RA ##########'
G1 = copy.deepcopy(G)
RandomEdges = copy.deepcopy(G1.edges())
random.shuffle(RandomEdges)
i = 0
while i < int(F1 * EdgesNum):
u, v = RandomEdges[i]
G1.remove_edge(u, v)
i += 1
nD = len(SC.control_nodes(G1)) / (NodesNum + 0.0)
print F1, nD
LRA.append(nD)
while i < int(F2 * EdgesNum):
u, v = RandomEdges[i]
G1.remove_edge(u, v)
i += 1
nD = len(SC.control_nodes(G1)) / (NodesNum + 0.0)
print F2, nD
LRA.append(nD)
while i < int(F3 * EdgesNum):
u, v = RandomEdges[i]
G1.remove_edge(u, v)
i += 1
nD = len(SC.control_nodes(G1)) / (NodesNum + 0.0)
print F3, nD
LRA.append(nD)
while i < int(F4 * EdgesNum):
u, v = RandomEdges[i]
G1.remove_edge(u, v)
i += 1
nD = len(SC.control_nodes(G1)) / (NodesNum + 0.0)
print F4, nD
LRA.append(nD)
G1.clear()
RandomEdges = []
# Following is Initial Edge Degree Attack (IDA)
print '########## Edge IDA ##########'
G2 = copy.deepcopy(G)
NodeDegrees = nx.degree(G2)
EdgeDegrees = {}
for u, v in G2.edges_iter(): # Calculate the edge degrees
EdgeDegrees[(u, v)] = NodeDegrees[u] * NodeDegrees[v]
# Sort the edges decrendingly according to edge degree
SortedEdges = sorted(EdgeDegrees, key=EdgeDegrees.get, reverse=True)
i = 0
while i < int(F1 * EdgesNum):
u, v = SortedEdges[i]
G2.remove_edge(u, v)
i += 1
nD = len(SC.control_nodes(G2)) / (NodesNum + 0.0)
print F1, nD
LID.append(nD)
while i < int(F2 * EdgesNum):
u, v = SortedEdges[i]
G2.remove_edge(u, v)
i += 1
nD = len(SC.control_nodes(G2)) / (NodesNum + 0.0)
print F2, nD
LID.append(nD)
while i < int(F3 * EdgesNum):
u, v = SortedEdges[i]
G2.remove_edge(u, v)
i += 1
nD = len(SC.control_nodes(G2)) / (NodesNum + 0.0)
print F3, nD
LID.append(nD)
while i < int(F4 * EdgesNum):
u, v = SortedEdges[i]
G2.remove_edge(u, v)
i += 1
nD = len(SC.control_nodes(G2)) / (NodesNum + 0.0)
print F4, nD
LID.append(nD)
G2.clear()
NodeDegrees = {}
EdgeDegrees = {}
SortedEdges = []
# Following is Recalculated Edge Degree Attack (RDA)
print '########## Edge RDA ##########'
G3 = copy.deepcopy(G)
i = 0
while i < int(F1 * EdgesNum):
# Find the edge with max edge degree at present
MaxU = -1; MaxV = -1; MaxDegree = -1;
NodeDegrees = nx.degree(G3)
for (u, v) in G3.edges_iter():
CurDegree = NodeDegrees[u] * NodeDegrees[v]
if CurDegree > MaxDegree:
MaxDegree = CurDegree
MaxU = u
MaxV = v
G3.remove_edge(MaxU, MaxV)
i += 1
nD = len(SC.control_nodes(G3)) / (NodesNum + 0.0)
print F1, nD
LRD.append(nD)
while i < int(F2 * EdgesNum):
# Find the edge with max edge degree at present
MaxU = -1; MaxV = -1; MaxDegree = -1;
NodeDegrees = nx.degree(G3)
for (u, v) in G3.edges_iter():
CurDegree = NodeDegrees[u] * NodeDegrees[v]
if CurDegree > MaxDegree:
MaxDegree = CurDegree
MaxU = u
MaxV = v
G3.remove_edge(MaxU, MaxV)
i += 1
nD = len(SC.control_nodes(G3)) / (NodesNum + 0.0)
print F2, nD
LRD.append(nD)
while i < int(F3 * EdgesNum):
# Find the edge with max edge degree at present
MaxU = -1; MaxV = -1; MaxDegree = -1;
NodeDegrees = nx.degree(G3)
for (u, v) in G3.edges_iter():
CurDegree = NodeDegrees[u] * NodeDegrees[v]
if CurDegree > MaxDegree:
MaxDegree = CurDegree
MaxU = u
MaxV = v
G3.remove_edge(MaxU, MaxV)
i += 1
nD = len(SC.control_nodes(G3)) / (NodesNum + 0.0)
print F3, nD
LRD.append(nD)
while i < int(F4 * EdgesNum):
# Find the edge with max edge degree at present
MaxU = -1; MaxV = -1; MaxDegree = -1;
NodeDegrees = nx.degree(G3)
for (u, v) in G3.edges_iter():
CurDegree = NodeDegrees[u] * NodeDegrees[v]
if CurDegree > MaxDegree:
MaxDegree = CurDegree
MaxU = u
MaxV = v
G3.remove_edge(MaxU, MaxV)
i += 1
nD = len(SC.control_nodes(G3)) / (NodesNum + 0.0)
print F4, nD
LRD.append(nD)
G3.clear()
# Folloing is Initial Edge Betweenness Attack (IBA)
print '########## Edge IBA ##########'
G4 = copy.deepcopy(G)
EdgeBetweenness = nx.edge_betweenness_centrality(G4)
SortedBetEdges = sorted(EdgeBetweenness,
key=EdgeBetweenness.get, reverse=True)
i = 0
while i < int(F1 * EdgesNum):
u, v = SortedBetEdges[i]
G4.remove_edge(u, v)
i += 1
nD = len(SC.control_nodes(G4)) / (NodesNum + 0.0)
print F1, nD
LIB.append(nD)
while i < int(F2 * EdgesNum):
u, v = SortedBetEdges[i]
G4.remove_edge(u, v)
i += 1
nD = len(SC.control_nodes(G4)) / (NodesNum + 0.0)
print F2, nD
LIB.append(nD)
while i < int(F3 * EdgesNum):
u, v = SortedBetEdges[i]
G4.remove_edge(u, v)
i += 1
nD = len(SC.control_nodes(G4)) / (NodesNum + 0.0)
print F3, nD
LIB.append(nD)
while i < int(F4 * EdgesNum):
u, v = SortedBetEdges[i]
G4.remove_edge(u, v)
i += 1
nD = len(SC.control_nodes(G4)) / (NodesNum + 0.0)
print F4, nD
LIB.append(nD)
G4.clear()
EdgeBetweenness = {}
SortedBetEdges = []
# Following is Recalculated Edge Betweenness Attack (RBA)
print '########## Edge RBA ##########'
G5 = copy.deepcopy(G)
i = 0
while i < int(F1 * EdgesNum):
EdgeBets = nx.edge_betweenness_centrality(G5)
# Find the edge with Max edge betweenness
uMax = -1; vMax = -1; betMax = -1.0;
for ((u, v), bet) in EdgeBets.iteritems():
if bet > betMax:
betMax = bet
uMax = u
vMax = v
G5.remove_edge(uMax, vMax)
i += 1
nD = len(SC.control_nodes(G5)) / (NodesNum + 0.0)
print F1, nD
LRB.append(nD)
while i < int(F2 * EdgesNum):
EdgeBets = nx.edge_betweenness_centrality(G5)
# Find the edge with Max edge betweenness
uMax = -1; vMax = -1; betMax = -1.0;
for ((u, v), bet) in EdgeBets.iteritems():
if bet > betMax:
betMax = bet
uMax = u
vMax = v
G5.remove_edge(uMax, vMax)
i += 1
nD = len(SC.control_nodes(G5)) / (NodesNum + 0.0)
print F2, nD
LRB.append(nD)
while i < int(F3 * EdgesNum):
EdgeBets = nx.edge_betweenness_centrality(G5)
# Find the edge with Max edge betweenness
uMax = -1; vMax = -1; betMax = -1.0;
for ((u, v), bet) in EdgeBets.iteritems():
if bet > betMax:
betMax = bet
uMax = u
vMax = v
G5.remove_edge(uMax, vMax)
i += 1
nD = len(SC.control_nodes(G5)) / (NodesNum + 0.0)
print F3, nD
LRB.append(nD)
while i < int(F4 * EdgesNum):
EdgeBets = nx.edge_betweenness_centrality(G5)
# Find the edge with Max edge betweenness
uMax = -1; vMax = -1; betMax = -1.0;
for ((u, v), bet) in EdgeBets.iteritems():
if bet > betMax:
betMax = bet
uMax = u
vMax = v
G5.remove_edge(uMax, vMax)
i += 1
nD = len(SC.control_nodes(G5)) / (NodesNum + 0.0)
print F4, nD
LRB.append(nD)
G5.clear()
print 'RA: ', LRA[0], LRA[1], LRA[2], LRA[3]
print 'ID: ', LID[0], LID[1], LID[2], LID[3]
print 'RD: ', LRD[0], LRD[1], LRD[2], LRD[3]
print 'IB: ', LIB[0], LIB[1], LIB[2], LIB[3]
print 'RB: ', LRB[0], LRB[1], LRB[2], LRB[3]
if __name__ == "__main__":
#EdgeAttackBA()
#EdgeAttackUSAir()
# Edge Attack Erdos971 Network
# for random attack, we set the random seed to from 1 to 100 for the
# independent 100 runs. For other deliberate attacks, as the attack order
# is fixed, we reset the seed of random to the initial state, i.e. seed(None)
#EdgeAttackErdosNetwork()
# Regulatory
#G = ReadPajek('./dataset/Regulatory/TRN-Yeast-1.net')
#G = ReadPajek('./dataset/Regulatory/TRN-Yeast-2.net')
#G = ReadPajek('./dataset/Regulatory/TRN-EC-2.net')
#G = ReadPajek('./dataset/Regulatory/Ownership.net')
# World Wide Web (WWW)
G = ReadPajek('./dataset/WWW/PoliticalBlogs.net')
print 'Edge Attack From Temp Files ... '
print 'WWW --- PoliticalBlogs'
NodesNum = G.number_of_nodes()
EdgesNum = G.number_of_edges()
DriverNodes = SC.control_nodes(G)
nD = len(DriverNodes) / (NodesNum + 0.0)
print 'Nodes Num: ', NodesNum
print 'Edges Num: ', EdgesNum
print 'nD = ', nD
EdgeAttack(G)
| 34.370014
| 109
| 0.540521
| 3,580
| 24,987
| 3.651676
| 0.074302
| 0.055075
| 0.024095
| 0.03098
| 0.782146
| 0.775415
| 0.761034
| 0.746577
| 0.744282
| 0.730207
| 0
| 0.052772
| 0.295474
| 24,987
| 727
| 110
| 34.370014
| 0.689843
| 0.071957
| 0
| 0.780239
| 0
| 0
| 0.105973
| 0.02877
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.001704
| 0.028961
| null | null | 0.115843
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4a13bacba15ebf17c8cfc42cad96cd3b58f57140
| 305
|
py
|
Python
|
crypt.py
|
heyrict/formify
|
eebd7bb3685a892adb0e4ac82ad34b6610bd0a4e
|
[
"Apache-2.0"
] | 1
|
2021-06-19T19:32:01.000Z
|
2021-06-19T19:32:01.000Z
|
crypt.py
|
heyrict/formify
|
eebd7bb3685a892adb0e4ac82ad34b6610bd0a4e
|
[
"Apache-2.0"
] | null | null | null |
crypt.py
|
heyrict/formify
|
eebd7bb3685a892adb0e4ac82ad34b6610bd0a4e
|
[
"Apache-2.0"
] | null | null | null |
import bcrypt
class Bcrypt(object):
def get_hashed_password(plain_text_password):
return bcrypt.hashpw(plain_text_password.encode(), bcrypt.gensalt())
def check_password(plain_text_password, hashed_password):
return bcrypt.checkpw(plain_text_password.encode(), hashed_password)
| 30.5
| 76
| 0.77377
| 38
| 305
| 5.868421
| 0.421053
| 0.161435
| 0.304933
| 0.224215
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137705
| 305
| 9
| 77
| 33.888889
| 0.847909
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0.666667
| 0.166667
| 0.333333
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
|
0
| 7
|
680f5487f3c7b14f1073a9e3723d4f09b3722c0f
| 77
|
py
|
Python
|
trot_analysis/__init__.py
|
chrishavlin/trot_analysis
|
e1adb2af3faba6e1875a96ef1f9a71b11aa941f6
|
[
"MIT"
] | 1
|
2020-11-30T02:38:33.000Z
|
2020-11-30T02:38:33.000Z
|
trot_analysis/__init__.py
|
chrishavlin/trot_analysis
|
e1adb2af3faba6e1875a96ef1f9a71b11aa941f6
|
[
"MIT"
] | null | null | null |
trot_analysis/__init__.py
|
chrishavlin/trot_analysis
|
e1adb2af3faba6e1875a96ef1f9a71b11aa941f6
|
[
"MIT"
] | null | null | null |
from trot_analysis.trotters import trotters
from trot_analysis import awards
| 25.666667
| 43
| 0.883117
| 11
| 77
| 6
| 0.545455
| 0.242424
| 0.484848
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.103896
| 77
| 2
| 44
| 38.5
| 0.956522
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
a83c44e16c299e18314865a0b3e4230025777ebb
| 28,836
|
py
|
Python
|
python_msx_sdk/api/services_api.py
|
CiscoDevNet/python-msx-sdk
|
d7e0a08c656504b4f4551d263e67c671a2a04b3f
|
[
"MIT"
] | null | null | null |
python_msx_sdk/api/services_api.py
|
CiscoDevNet/python-msx-sdk
|
d7e0a08c656504b4f4551d263e67c671a2a04b3f
|
[
"MIT"
] | null | null | null |
python_msx_sdk/api/services_api.py
|
CiscoDevNet/python-msx-sdk
|
d7e0a08c656504b4f4551d263e67c671a2a04b3f
|
[
"MIT"
] | null | null | null |
"""
MSX SDK
MSX SDK client. # noqa: E501
The version of the OpenAPI document: 1.0.9
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from python_msx_sdk.api_client import ApiClient, Endpoint as _Endpoint
from python_msx_sdk.model_utils import ( # noqa: F401
check_allowed_values,
check_validations,
date,
datetime,
file_type,
none_type,
validate_and_convert_types
)
from python_msx_sdk.model.error import Error
from python_msx_sdk.model.legacy_service_order import LegacyServiceOrder
from python_msx_sdk.model.legacy_service_order_response import LegacyServiceOrderResponse
from python_msx_sdk.model.service import Service
from python_msx_sdk.model.service_update import ServiceUpdate
from python_msx_sdk.model.services_page import ServicesPage
class ServicesApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def __delete_service(
self,
id,
**kwargs
):
"""Deletes a service. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_service(id, async_req=True)
>>> result = thread.get()
Args:
id (str):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['id'] = \
id
return self.call_with_http_info(**kwargs)
self.delete_service = _Endpoint(
settings={
'response_type': None,
'auth': [],
'endpoint_path': '/manage/api/v8/services/{id}',
'operation_id': 'delete_service',
'http_method': 'DELETE',
'servers': None,
},
params_map={
'all': [
'id',
],
'required': [
'id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'id':
(str,),
},
'attribute_map': {
'id': 'id',
},
'location_map': {
'id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__delete_service
)
def __get_service(
self,
id,
**kwargs
):
"""Returns a service. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_service(id, async_req=True)
>>> result = thread.get()
Args:
id (str):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Service
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['id'] = \
id
return self.call_with_http_info(**kwargs)
self.get_service = _Endpoint(
settings={
'response_type': (Service,),
'auth': [],
'endpoint_path': '/manage/api/v8/services/{id}',
'operation_id': 'get_service',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'id',
],
'required': [
'id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'id':
(str,),
},
'attribute_map': {
'id': 'id',
},
'location_map': {
'id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__get_service
)
def __get_services_page(
self,
page,
page_size,
**kwargs
):
"""Returns a page of services. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_services_page(page, page_size, async_req=True)
>>> result = thread.get()
Args:
page (int):
page_size (int):
Keyword Args:
tenant_ids ([str]): [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
ServicesPage
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['page'] = \
page
kwargs['page_size'] = \
page_size
return self.call_with_http_info(**kwargs)
self.get_services_page = _Endpoint(
settings={
'response_type': (ServicesPage,),
'auth': [],
'endpoint_path': '/manage/api/v8/services',
'operation_id': 'get_services_page',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'page',
'page_size',
'tenant_ids',
],
'required': [
'page',
'page_size',
],
'nullable': [
],
'enum': [
],
'validation': [
'page',
'page_size',
]
},
root_map={
'validations': {
('page',): {
'inclusive_minimum': 0,
},
('page_size',): {
'inclusive_maximum': 1000,
'inclusive_minimum': 1,
},
},
'allowed_values': {
},
'openapi_types': {
'page':
(int,),
'page_size':
(int,),
'tenant_ids':
([str],),
},
'attribute_map': {
'page': 'page',
'page_size': 'pageSize',
'tenant_ids': 'tenantIds',
},
'location_map': {
'page': 'query',
'page_size': 'query',
'tenant_ids': 'query',
},
'collection_format_map': {
'tenant_ids': 'multi',
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__get_services_page
)
def __submit_order(
self,
product_id,
offer_id,
legacy_service_order,
**kwargs
):
"""Submits an order. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.submit_order(product_id, offer_id, legacy_service_order, async_req=True)
>>> result = thread.get()
Args:
product_id (str):
offer_id (str):
legacy_service_order (LegacyServiceOrder):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
LegacyServiceOrderResponse
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['product_id'] = \
product_id
kwargs['offer_id'] = \
offer_id
kwargs['legacy_service_order'] = \
legacy_service_order
return self.call_with_http_info(**kwargs)
self.submit_order = _Endpoint(
settings={
'response_type': (LegacyServiceOrderResponse,),
'auth': [],
'endpoint_path': '/manage/api/v8/services',
'operation_id': 'submit_order',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'product_id',
'offer_id',
'legacy_service_order',
],
'required': [
'product_id',
'offer_id',
'legacy_service_order',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'product_id':
(str,),
'offer_id':
(str,),
'legacy_service_order':
(LegacyServiceOrder,),
},
'attribute_map': {
'product_id': 'productId',
'offer_id': 'offerId',
},
'location_map': {
'product_id': 'query',
'offer_id': 'query',
'legacy_service_order': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client,
callable=__submit_order
)
def __update_order(
self,
product_id,
offer_id,
legacy_service_order,
**kwargs
):
"""Updates an order. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_order(product_id, offer_id, legacy_service_order, async_req=True)
>>> result = thread.get()
Args:
product_id (str):
offer_id (str):
legacy_service_order (LegacyServiceOrder):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
LegacyServiceOrderResponse
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['product_id'] = \
product_id
kwargs['offer_id'] = \
offer_id
kwargs['legacy_service_order'] = \
legacy_service_order
return self.call_with_http_info(**kwargs)
self.update_order = _Endpoint(
settings={
'response_type': (LegacyServiceOrderResponse,),
'auth': [],
'endpoint_path': '/manage/api/v8/services',
'operation_id': 'update_order',
'http_method': 'PUT',
'servers': None,
},
params_map={
'all': [
'product_id',
'offer_id',
'legacy_service_order',
],
'required': [
'product_id',
'offer_id',
'legacy_service_order',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'product_id':
(str,),
'offer_id':
(str,),
'legacy_service_order':
(LegacyServiceOrder,),
},
'attribute_map': {
'product_id': 'productId',
'offer_id': 'offerId',
},
'location_map': {
'product_id': 'query',
'offer_id': 'query',
'legacy_service_order': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client,
callable=__update_order
)
def __update_service(
self,
id,
service_update,
**kwargs
):
"""Updates a service. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_service(id, service_update, async_req=True)
>>> result = thread.get()
Args:
id (str):
service_update (ServiceUpdate):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Service
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['id'] = \
id
kwargs['service_update'] = \
service_update
return self.call_with_http_info(**kwargs)
self.update_service = _Endpoint(
settings={
'response_type': (Service,),
'auth': [],
'endpoint_path': '/manage/api/v8/services/{id}',
'operation_id': 'update_service',
'http_method': 'PUT',
'servers': None,
},
params_map={
'all': [
'id',
'service_update',
],
'required': [
'id',
'service_update',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'id':
(str,),
'service_update':
(ServiceUpdate,),
},
'attribute_map': {
'id': 'id',
},
'location_map': {
'id': 'path',
'service_update': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client,
callable=__update_service
)
| 35.165854
| 101
| 0.446595
| 2,412
| 28,836
| 5.07131
| 0.082919
| 0.030903
| 0.025507
| 0.026488
| 0.867479
| 0.852273
| 0.847531
| 0.844997
| 0.82791
| 0.813849
| 0
| 0.003336
| 0.469829
| 28,836
| 819
| 102
| 35.208791
| 0.796769
| 0.305798
| 0
| 0.653501
| 1
| 0
| 0.211497
| 0.030814
| 0
| 0
| 0
| 0
| 0
| 1
| 0.012567
| false
| 0
| 0.017953
| 0
| 0.043088
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
a85874908f4ac7d17c5abdbf8b971fe06fab0c18
| 92
|
py
|
Python
|
augur/metrics/repo_meta/__init__.py
|
Claire-Hough/augur
|
b48d246a8959f62473c8e898148a2113772a700c
|
[
"MIT"
] | 3
|
2019-10-31T19:07:48.000Z
|
2019-11-20T23:14:15.000Z
|
augur/metrics/repo_meta/__init__.py
|
Claire-Hough/augur
|
b48d246a8959f62473c8e898148a2113772a700c
|
[
"MIT"
] | 3
|
2019-12-03T21:21:17.000Z
|
2019-12-05T15:26:22.000Z
|
augur/metrics/repo_meta/__init__.py
|
Claire-Hough/augur
|
b48d246a8959f62473c8e898148a2113772a700c
|
[
"MIT"
] | 4
|
2019-11-05T20:22:12.000Z
|
2019-12-12T18:08:30.000Z
|
from .repo_meta import create_repo_meta_metrics
from .routes import create_repo_meta_routes
| 30.666667
| 47
| 0.891304
| 15
| 92
| 5
| 0.466667
| 0.32
| 0.426667
| 0.533333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.086957
| 92
| 3
| 48
| 30.666667
| 0.892857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
a87dfa582c7ab24d706467107ffdc96d9bcc6b9e
| 23,058
|
py
|
Python
|
WAnet/training.py
|
THREDgroup/WAnet
|
2b160012880f05f8ea3abcc426fb53eedfd51f80
|
[
"MIT"
] | null | null | null |
WAnet/training.py
|
THREDgroup/WAnet
|
2b160012880f05f8ea3abcc426fb53eedfd51f80
|
[
"MIT"
] | null | null | null |
WAnet/training.py
|
THREDgroup/WAnet
|
2b160012880f05f8ea3abcc426fb53eedfd51f80
|
[
"MIT"
] | null | null | null |
import keras
import sklearn.model_selection
import numpy
import pkg_resources
import os
VERBOSE = 1
def load_data():
curves = numpy.load(pkg_resources.resource_filename('WAnet', 'data/compiled_data/data_curves.npz'))['curves']
geometry = numpy.load(pkg_resources.resource_filename('WAnet', 'data/compiled_data/data_geometry.npz'))['geometry']
constants = numpy.load(pkg_resources.resource_filename('WAnet', 'data/compiled_data/constants.npz'))
S = constants['S']
N = constants['N']
D = constants['D']
F = constants['F']
G = constants['G']
new_curves = numpy.zeros((S*N, D * F))
for i, curveset in enumerate(curves):
new_curves[i, :] = curveset.T.flatten() / 1000000
new_geometry = numpy.zeros((S*N, G * G * G))
for i, geometryset in enumerate(geometry):
new_geometry[i, :] = geometryset.T.flatten()
return curves, geometry, S, N, D, F, G, new_curves, new_geometry
def train_geometry_autoencoder(epochs, latent_dim, save_results, print_network):
curves, geometry, S, N, D, F, G, new_curves, new_geometry = load_data()
batch_size = 100
original_dim = G*G*G
intermediate_dim = 256
epsilon_std = 1.0
x = keras.layers.Input(shape=(original_dim,))
h = keras.layers.Dense(intermediate_dim, activation='relu')(x)
z_mean = keras.layers.Dense(latent_dim)(h)
z_log_var = keras.layers.Dense(latent_dim)(h)
def sampling(args):
z_mean, z_log_var = args
epsilon = keras.backend.random_normal(shape=(keras.backend.shape(z_mean)[0], latent_dim), mean=0.,
stddev=epsilon_std)
return z_mean + keras.backend.exp(z_log_var / 2) * epsilon
# note that "output_shape" isn't necessary with the TensorFlow backend
z = keras.layers.Lambda(sampling, output_shape=(latent_dim,))([z_mean, z_log_var])
# we instantiate these layers separately so as to reuse them later
decoder_h = keras.layers.Dense(intermediate_dim, activation='relu')
decoder_mean = keras.layers.Dense(original_dim, activation='sigmoid')
h_decoded = decoder_h(z)
x_decoded_mean = decoder_mean(h_decoded)
# Custom loss layer
class CustomVariationalLayer(keras.layers.Layer):
def __init__(self, **kwargs):
self.is_placeholder = True
super(CustomVariationalLayer, self).__init__(**kwargs)
def vae_loss(self, x, x_decoded_mean):
xent_loss = original_dim * keras.metrics.binary_crossentropy(x, x_decoded_mean)
kl_loss = - 0.5 * keras.backend.sum(1 + z_log_var - keras.backend.square(z_mean) - keras.backend.exp(z_log_var), axis=-1)
return keras.backend.mean(xent_loss + kl_loss)
def call(self, inputs, **kwargs):
x = inputs[0]
x_decoded_mean = inputs[1]
loss = self.vae_loss(x, x_decoded_mean)
self.add_loss(loss, inputs=inputs)
# We won't actually use the output.
return x
y = CustomVariationalLayer()([x, x_decoded_mean])
vae = keras.models.Model(x, y)
vae.compile(optimizer='rmsprop', loss=None)
x_train, x_test = sklearn.model_selection.train_test_split(new_geometry, shuffle=False)
weights = pkg_resources.resource_filename('WAnet', 'trained_models/'+str(latent_dim)+'temp_vae_weights.h5')
logger = pkg_resources.resource_filename('WAnet', 'trained_models/'+str(latent_dim)+'geometry_vae_training.csv')
vae.fit(x_train,
shuffle=True,
epochs=epochs,
batch_size=batch_size,
validation_data=(x_test, None),
verbose=VERBOSE,
callbacks=[keras.callbacks.ModelCheckpoint(filepath=weights, verbose=VERBOSE, save_best_only=True),
keras.callbacks.CSVLogger(logger, separator=',', append=False)])
vae.load_weights(weights)
os.remove(weights)
# build a model to project inputs on the latent space
encoder = keras.models.Model(x, z_mean)
# build a digit generator that can sample from the learned distribution
decoder_input = keras.layers.Input(shape=(latent_dim,))
_h_decoded = decoder_h(decoder_input)
_x_decoded_mean = decoder_mean(_h_decoded)
generator = keras.models.Model(decoder_input, _x_decoded_mean)
# Build and save the autoencoder
_h_decoded2 = decoder_h(z_mean)
_x_decoded_mean2 = decoder_mean(_h_decoded2)
autoencoder = keras.models.Model(x, _x_decoded_mean2)
structure = []
weights = []
if save_results:
# Save encoder structure and weights
temp = open(pkg_resources.resource_filename('WAnet', 'trained_models/'+str(latent_dim)+'geometry_encoder_structure.yml'), 'w')
temp.write(encoder.to_yaml())
temp.close()
encoder.save_weights(pkg_resources.resource_filename('WAnet', 'trained_models/'+str(latent_dim)+'geometry_encoder_weights.h5'))
# Save decoder structure and weights
temp = open(pkg_resources.resource_filename('WAnet', 'trained_models/'+str(latent_dim)+'geometry_decoder_structure.yml'), 'w')
temp.write(generator.to_yaml())
temp.close()
generator.save_weights(pkg_resources.resource_filename('WAnet', 'trained_models/'+str(latent_dim)+'geometry_decoder_weights.h5'))
# Save full autoencoder structure and weights
structure = pkg_resources.resource_filename('WAnet', 'trained_models/'+str(latent_dim)+'geometry_autoencoder_structure.yml')
weights = pkg_resources.resource_filename('WAnet', 'trained_models/'+str(latent_dim)+'geometry_autoencoder_weights.h5')
temp = open(structure, 'w')
temp.write(autoencoder.to_yaml())
temp.close()
autoencoder.save_weights(weights)
if print_network:
keras.utils.plot_model(generator, to_file=pkg_resources.resource_filename('WAnet', 'figures/'+str(latent_dim)+'geometry_decoder.eps'), show_shapes=True)
keras.utils.plot_model(encoder, to_file=pkg_resources.resource_filename('WAnet', 'figures/'+str(latent_dim)+'geometry_encoder.eps'), show_shapes=True)
keras.utils.plot_model(autoencoder, to_file=pkg_resources.resource_filename('WAnet', 'figures/'+str(latent_dim)+'geometry_autoencoder.eps'), show_shapes=True)
# Final check on metrics
x_pred = autoencoder.predict(x_test)
mse = keras.backend.mean(keras.losses.binary_crossentropy(x_pred, x_test)).eval()
x_pred.fill(numpy.mean(x_test.flatten()))
s2 = keras.backend.mean(keras.losses.binary_crossentropy(x_pred, x_test)).eval()
r2 = 1-mse/s2
print("Final BCE: "+str(mse))
print("Final S2: "+str(s2))
print("Final R2: "+str(r2))
return r2
def train_response_autoencoder(epochs, latent_dim, save_results, print_network):
curves, geometry, S, N, D, F, G, new_curves, new_geometry = load_data()
batch_size = 10
original_dim = D*F
intermediate_dim = 64
epsilon_std = 1.0
x = keras.layers.Input(shape=(original_dim,))
h = keras.layers.Dense(intermediate_dim, activation='relu')(x)
z_mean = keras.layers.Dense(latent_dim)(h)
z_log_var = keras.layers.Dense(latent_dim)(h)
def sampling(args):
z_mean, z_log_var = args
epsilon = keras.backend.random_normal(shape=(keras.backend.shape(z_mean)[0], latent_dim), mean=0.,
stddev=epsilon_std)
return z_mean + keras.backend.exp(z_log_var / 2) * epsilon
# note that "output_shape" isn't necessary with the TensorFlow backend
z = keras.layers.Lambda(sampling, output_shape=(latent_dim,))([z_mean, z_log_var])
# we instantiate these layers separately so as to reuse them later
decoder_h = keras.layers.Dense(intermediate_dim, activation='relu')
decoder_mean = keras.layers.Dense(original_dim, activation='sigmoid')
h_decoded = decoder_h(z)
x_decoded_mean = decoder_mean(h_decoded)
# Custom loss layer
class CustomVariationalLayer(keras.layers.Layer):
def __init__(self, **kwargs):
self.is_placeholder = True
super(CustomVariationalLayer, self).__init__(**kwargs)
def vae_loss(self, x, x_decoded_mean):
xent_loss = original_dim * keras.metrics.mean_squared_error(x, x_decoded_mean)
kl_loss = - 0.5 * keras.backend.sum(1 + z_log_var - keras.backend.square(z_mean) - keras.backend.exp(z_log_var), axis=-1)
return keras.backend.mean(xent_loss + kl_loss)
def call(self, inputs):
x = inputs[0]
x_decoded_mean = inputs[1]
loss = self.vae_loss(x, x_decoded_mean)
self.add_loss(loss, inputs=inputs)
# We won't actually use the output.
return x
y = CustomVariationalLayer()([x, x_decoded_mean])
vae = keras.models.Model(x, y)
vae.compile(optimizer='rmsprop', loss=None)
# train the VAE on MNIST digits
x_train, x_test = sklearn.model_selection.train_test_split(new_curves, shuffle=False)
weights = pkg_resources.resource_filename('WAnet', 'trained_models/'+str(latent_dim)+'temp_vae_weights.h5')
logger = pkg_resources.resource_filename('WAnet', 'trained_models/'+str(latent_dim)+'curve_vae_training.csv')
vae.fit(x_train,
shuffle=True,
epochs=epochs,
batch_size=batch_size,
validation_data=(x_test, None),
verbose=VERBOSE,
callbacks=[keras.callbacks.ModelCheckpoint(filepath=weights, verbose=VERBOSE, save_best_only=True),
keras.callbacks.CSVLogger(logger, separator=',', append=False)])
vae.load_weights(weights)
os.remove(weights)
# build a model to project inputs on the latent space
encoder = keras.models.Model(x, z_mean)
# build a digit generator that can sample from the learned distribution
decoder_input = keras.layers.Input(shape=(latent_dim,))
_h_decoded = decoder_h(decoder_input)
_x_decoded_mean = decoder_mean(_h_decoded)
generator = keras.models.Model(decoder_input, _x_decoded_mean)
# Build and save the autoencoder
_h_decoded2 = decoder_h(z_mean)
_x_decoded_mean2 = decoder_mean(_h_decoded2)
autoencoder = keras.models.Model(x, _x_decoded_mean2)
if save_results:
# Save encoder structure and weights
temp = open(pkg_resources.resource_filename('WAnet', 'trained_models/'+str(latent_dim)+'curve_encoder_structure.yml'), 'w')
temp.write(encoder.to_yaml())
temp.close()
encoder.save_weights(pkg_resources.resource_filename('WAnet', 'trained_models/'+str(latent_dim)+'curve_encoder_weights.h5'))
# Save decoder structure and weights
temp = open(pkg_resources.resource_filename('WAnet', 'trained_models/'+str(latent_dim)+'curve_decoder_structure.yml'), 'w')
temp.write(generator.to_yaml())
temp.close()
generator.save_weights(pkg_resources.resource_filename('WAnet', 'trained_models/'+str(latent_dim)+'curve_decoder_weights.h5'))
# Save full autoencoder structure and weights
structure = pkg_resources.resource_filename('WAnet', 'trained_models/'+str(latent_dim)+'curve_autoencoder_structure.yml')
weights = pkg_resources.resource_filename('WAnet', 'trained_models/'+str(latent_dim)+'curve_autoencoder_weights.h5')
temp = open(structure, 'w')
temp.write(autoencoder.to_yaml())
temp.close()
autoencoder.save_weights(weights)
if print_network:
keras.utils.plot_model(generator, to_file=pkg_resources.resource_filename('WAnet', 'figures/'+str(latent_dim)+'curve_decoder.eps'), show_shapes=True)
keras.utils.plot_model(encoder, to_file=pkg_resources.resource_filename('WAnet', 'figures/'+str(latent_dim)+'curve_encoder.eps'), show_shapes=True)
keras.utils.plot_model(autoencoder, to_file=pkg_resources.resource_filename('WAnet', 'figures/'+str(latent_dim)+'curve_autoencoder.eps'), show_shapes=True)
x_pred = autoencoder.predict(x_test)
s2 = numpy.mean(numpy.power(numpy.mean(x_test.flatten()) - x_test.flatten(), 2))
mse = keras.backend.mean(keras.losses.mean_squared_error(x_pred, x_test)).eval()
r2 = 1-mse/s2
print("Final MSE: "+str(mse))
print("Final S2: "+str(s2))
print("Final R2: "+str(r2))
return r2
def train_forward_network(epochs, latent_dim, save_results, print_network):
curves, geometry, S, N, D, F, G, new_curves, new_geometry = load_data()
# Define model
x = keras.layers.Input(shape=(32768,))
de1 = keras.layers.Dense(256, activation='relu')(x)
de2 = keras.layers.Dense(latent_dim, activation='relu')(de1)
con = keras.layers.Dense(latent_dim, activation='relu')(de2)
dd2 = keras.layers.Dense(64, activation='relu')(con)
y = keras.layers.Dense(192, activation='sigmoid')(dd2)
# Build and compile ,model
mdl = keras.models.Model(x, y)
mdl.compile(optimizer='rmsprop', loss='mse')
# # Instantiate and freeze layers if possible
temp = open(pkg_resources.resource_filename('WAnet', 'trained_models/'+str(latent_dim)+'geometry_encoder_structure.yml'), 'r')
geo = keras.models.model_from_yaml(temp.read())
geo.load_weights(pkg_resources.resource_filename('WAnet', 'trained_models/'+str(latent_dim)+'geometry_encoder_weights.h5'))
# Load curve autoencoder
temp = open(pkg_resources.resource_filename('WAnet', 'trained_models/'+str(latent_dim)+'curve_decoder_structure.yml'), 'r')
curve = keras.models.model_from_yaml(temp.read())
curve.load_weights(pkg_resources.resource_filename('WAnet', 'trained_models/'+str(latent_dim)+'curve_decoder_weights.h5'))
mdl.layers[1].set_weights(geo.layers[1].get_weights())
mdl.layers[1].trainable = False
mdl.layers[2].set_weights(geo.layers[2].get_weights())
mdl.layers[2].trainable = False
mdl.layers[4].set_weights(curve.layers[1].get_weights())
mdl.layers[4].trainable = False
mdl.layers[5].set_weights(curve.layers[2].get_weights())
mdl.layers[5].trainable = False
# Make file names
weights = pkg_resources.resource_filename('WAnet', 'trained_models/'+str(latent_dim)+'forward_weights.h5')
structure = pkg_resources.resource_filename('WAnet', 'trained_models/'+str(latent_dim)+'forward_structure.yml')
plot = pkg_resources.resource_filename('WAnet', 'figures/'+str(latent_dim)+'forward.eps')
# Save model structure and start training
x_train, x_test, y_train, y_test = sklearn.model_selection.train_test_split(new_geometry, new_curves, shuffle=False)
if save_results:
mdl.fit(x_train, y_train, verbose=VERBOSE, epochs=epochs, shuffle=False, validation_data=(x_test, y_test),
callbacks=[keras.callbacks.ModelCheckpoint(filepath=weights, verbose=VERBOSE, save_best_only=True)])
# Save decoder structure and weights
temp = open(structure, 'w')
temp.write(mdl.to_yaml())
temp.close()
else:
mdl.fit(new_geometry, new_curves, verbose=VERBOSE, epochs=epochs, shuffle=False, validation_data=(x_test, y_test))
if print_network:
keras.utils.plot_model(mdl, to_file=plot, show_shapes=True)
#
mdl.load_weights(weights)
y_pred = mdl.predict(x_test)
s2 = numpy.mean(numpy.power(numpy.mean(y_test.flatten()) - y_test.flatten(), 2))
mse = keras.backend.mean(keras.losses.mean_squared_error(y_pred, y_test)).eval()
r2 = 1-mse/s2
print("Final MSE: "+str(mse))
print("Final S2: "+str(s2))
print("Final R2: "+str(r2))
return r2
def train_inverse_network(epochs, latent_dim, save_results, print_network):
curves, geometry, S, N, D, F, G, new_curves, new_geometry = load_data()
# Define model
x = keras.layers.Input(shape=(192,))
de1 = keras.layers.Dense(64, activation='relu')(x)
de2 = keras.layers.Dense(latent_dim, activation='relu')(de1)
con = keras.layers.Dense(latent_dim, activation='relu')(de2)
dd2 = keras.layers.Dense(256, activation='relu')(con)
y = keras.layers.Dense(32768, activation='sigmoid')(dd2)
# Build and compile ,model
mdl = keras.models.Model(x, y)
mdl.compile(optimizer='rmsprop', loss='binary_crossentropy')
# # Instantiate and freeze layers if possible
temp = open(pkg_resources.resource_filename('WAnet', 'trained_models/'+str(latent_dim)+'geometry_decoder_structure.yml'), 'r')
geo = keras.models.model_from_yaml(temp.read())
geo.load_weights(pkg_resources.resource_filename('WAnet', 'trained_models/'+str(latent_dim)+'geometry_decoder_weights.h5'))
# Load curve autoencoder
temp = open(pkg_resources.resource_filename('WAnet', 'trained_models/'+str(latent_dim)+'curve_encoder_structure.yml'), 'r')
curve = keras.models.model_from_yaml(temp.read())
curve.load_weights(pkg_resources.resource_filename('WAnet', 'trained_models/'+str(latent_dim)+'curve_encoder_weights.h5'))
mdl.layers[1].set_weights(curve.layers[1].get_weights())
mdl.layers[1].trainable = False
mdl.layers[2].set_weights(curve.layers[2].get_weights())
mdl.layers[2].trainable = False
mdl.layers[4].set_weights(geo.layers[1].get_weights())
mdl.layers[4].trainable = False
mdl.layers[5].set_weights(geo.layers[2].get_weights())
mdl.layers[5].trainable = False
# Make file names
weights = pkg_resources.resource_filename('WAnet', 'trained_models/'+str(latent_dim)+'inverse_weights.h5')
structure = pkg_resources.resource_filename('WAnet', 'trained_models/'+str(latent_dim)+'inverse_structure.yml')
plot = pkg_resources.resource_filename('WAnet', 'figures/'+str(latent_dim)+'inverse.eps')
# Save model structure and start training
x_train, x_test, y_train, y_test = sklearn.model_selection.train_test_split(new_curves, new_geometry, shuffle=False)
if save_results:
mdl.fit(new_curves, new_geometry, verbose=VERBOSE, epochs=epochs, shuffle=False, validation_data=(x_test, y_test),
callbacks=[keras.callbacks.ModelCheckpoint(filepath=weights, verbose=VERBOSE, save_best_only=True)])
# Save decoder structure and weights
temp = open(structure, 'w')
temp.write(mdl.to_yaml())
temp.close()
else:
mdl.fit(new_curves, new_geometry, verbose=VERBOSE, epochs=epochs, shuffle=False, validation_data=(x_test, y_test))
if print_network:
keras.utils.plot_model(mdl, to_file=plot, show_shapes=True)
# Final check on metrics
mdl.load_weights(weights)
y_pred = mdl.predict(x_test)
mse = keras.backend.mean(keras.losses.binary_crossentropy(y_pred, y_test)).eval()
y_pred.fill(numpy.mean(x_test.flatten()))
s2 = keras.backend.mean(keras.losses.binary_crossentropy(y_pred, y_test)).eval()
r2 = 1-mse/s2
print("Final BCE: "+str(mse))
print("Final S2: "+str(s2))
print("Final R2: "+str(r2))
return r2
def train_simple_inverse_network(epochs, save_results, print_network):
curves, geometry, S, N, D, F, G, new_curves, new_geometry = load_data()
# Define model
x = keras.layers.Input(shape=(192,))
de1 = keras.layers.Dense(384, activation='relu')(x)
de2 = keras.layers.Dense(768, activation='relu')(de1)
con = keras.layers.Dense(1536, activation='relu')(de2)
dd2 = keras.layers.Dense(3072, activation='relu')(con)
y = keras.layers.Dense(32768, activation='sigmoid')(dd2)
# Build and compile ,model
mdl = keras.models.Model(x, y)
mdl.compile(optimizer='rmsprop', loss='binary_crossentropy')
# Make file names
weights = pkg_resources.resource_filename('WAnet', 'trained_models/simple_inverse_weights.h5')
structure = pkg_resources.resource_filename('WAnet', 'trained_models/simple_inverse_structure.yml')
plot = pkg_resources.resource_filename('WAnet', 'figures/simple_inverse.eps')
# Save model structure and start training
x_train, x_test, y_train, y_test = sklearn.model_selection.train_test_split(new_curves, new_geometry, shuffle=False)
if save_results:
mdl.fit(new_curves, new_geometry, verbose=VERBOSE, epochs=epochs, shuffle=False, validation_data=(x_test, y_test),
callbacks=[keras.callbacks.ModelCheckpoint(filepath=weights, verbose=VERBOSE, save_best_only=True)])
# Save decoder structure and weights
temp = open(structure, 'w')
temp.write(mdl.to_yaml())
temp.close()
else:
mdl.fit(new_curves, new_geometry, verbose=VERBOSE, epochs=epochs, shuffle=False, validation_data=(x_test, y_test))
if print_network:
keras.utils.plot_model(mdl, to_file=plot, show_shapes=True)
# Final check on metrics
mdl.load_weights(weights)
y_pred = mdl.predict(x_test)
mse = keras.backend.mean(keras.losses.binary_crossentropy(y_pred, y_test)).eval()
y_pred.fill(numpy.mean(x_test.flatten()))
s2 = keras.backend.mean(keras.losses.binary_crossentropy(y_pred, y_test)).eval()
r2 = 1-mse/s2
print("Final BCE: "+str(mse))
print("Final S2: "+str(s2))
print("Final R2: "+str(r2))
return r2
def train_simple_forward_network(epochs, save_results, print_network):
curves, geometry, S, N, D, F, G, new_curves, new_geometry = load_data()
# Define model
x = keras.layers.Input(shape=(32768,))
de1 = keras.layers.Dense(3072, activation='relu')(x)
de2 = keras.layers.Dense(1536, activation='relu')(de1)
con = keras.layers.Dense(768, activation='relu')(de2)
dd2 = keras.layers.Dense(384, activation='relu')(con)
y = keras.layers.Dense(192, activation='sigmoid')(dd2)
# Build and compile ,model
mdl = keras.models.Model(x, y)
mdl.compile(optimizer='rmsprop', loss='mse')
# Make file names
weights = pkg_resources.resource_filename('WAnet', 'trained_models/simple_forward_weights.h5')
structure = pkg_resources.resource_filename('WAnet', 'trained_models/simple_forward_structure.yml')
plot = pkg_resources.resource_filename('WAnet', 'figures/simple_forward.eps')
# Save model structure and start training
x_train, x_test, y_train, y_test = sklearn.model_selection.train_test_split(new_geometry, new_curves, shuffle=False)
if save_results:
mdl.fit(x_train, y_train, verbose=VERBOSE, epochs=epochs, shuffle=False, validation_data=(x_test, y_test),
callbacks=[keras.callbacks.ModelCheckpoint(filepath=weights, verbose=VERBOSE, save_best_only=True)])
# Save decoder structure and weights
temp = open(structure, 'w')
temp.write(mdl.to_yaml())
temp.close()
else:
mdl.fit(new_geometry, new_curves, verbose=VERBOSE, epochs=epochs, shuffle=False, validation_data=(x_test, y_test))
if print_network:
keras.utils.plot_model(mdl, to_file=plot, show_shapes=True)
#
mdl.load_weights(weights)
y_pred = mdl.predict(x_test)
s2 = numpy.mean(numpy.power(numpy.mean(y_test.flatten()) - y_test.flatten(), 2))
mse = keras.backend.mean(keras.losses.mean_squared_error(y_pred, y_test)).eval()
r2 = 1 - mse / s2
print("Final MSE: " + str(mse))
print("Final S2: " + str(s2))
print("Final R2: " + str(r2))
return r2
| 45.47929
| 166
| 0.69876
| 3,151
| 23,058
| 4.87496
| 0.074897
| 0.031639
| 0.05859
| 0.082026
| 0.960875
| 0.95736
| 0.952803
| 0.935551
| 0.935551
| 0.931254
| 0
| 0.012516
| 0.171871
| 23,058
| 507
| 167
| 45.47929
| 0.791935
| 0.071645
| 0
| 0.70028
| 0
| 0
| 0.108551
| 0.047298
| 0
| 0
| 0
| 0
| 0
| 1
| 0.042017
| false
| 0
| 0.014006
| 0
| 0.098039
| 0.084034
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
763e9aee578d7770d8934bb15401538afa02f839
| 1,067
|
py
|
Python
|
tests/integrate/test_login.py
|
NTUT-108-SE/CMS-Backend
|
1daa37960ba61f935da5174516f0a2e411b68706
|
[
"MIT"
] | null | null | null |
tests/integrate/test_login.py
|
NTUT-108-SE/CMS-Backend
|
1daa37960ba61f935da5174516f0a2e411b68706
|
[
"MIT"
] | null | null | null |
tests/integrate/test_login.py
|
NTUT-108-SE/CMS-Backend
|
1daa37960ba61f935da5174516f0a2e411b68706
|
[
"MIT"
] | 1
|
2019-10-12T02:48:16.000Z
|
2019-10-12T02:48:16.000Z
|
import pytest
def test_login_failed(user_client):
res = user_client.post("/login", data={'{"email": "admin@gmail.com", "password": "asd" }': ''})
assert res.status_code == 401
def test_login_None(user_client):
res = user_client.post("/login", data={'{"email": "admin@gmail.com" }': ''})
assert res.status_code == 401
def test_login(user_client):
res = user_client.post(
"/login", data={'{"email": "admin@gmail.com", "password": "admin" }': ''}
)
assert res.status_code == 200
assert res.json['user']['email'] == "admin@gmail.com"
def test_logout(user_client):
test_login(user_client)
res = user_client.get("/logout")
assert res.status_code == 200
def test_logout_before_login(user_client):
res = user_client.get("/logout")
assert res.status_code == 401
def test_check_failed(user_client):
res = user_client.get("/check")
assert res.status_code == 401
def test_check_failed(user_client):
test_login(user_client)
res = user_client.get("/check")
assert res.status_code == 200
| 25.404762
| 99
| 0.663543
| 148
| 1,067
| 4.527027
| 0.189189
| 0.238806
| 0.135821
| 0.177612
| 0.846269
| 0.804478
| 0.795522
| 0.795522
| 0.69403
| 0.69403
| 0
| 0.023756
| 0.171509
| 1,067
| 41
| 100
| 26.02439
| 0.734163
| 0
| 0
| 0.555556
| 0
| 0
| 0.182755
| 0
| 0
| 0
| 0
| 0
| 0.296296
| 1
| 0.259259
| false
| 0.074074
| 0.037037
| 0
| 0.296296
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 9
|
769a2a877b701c243cbc22ad5356f2028e91a22f
| 11,808
|
py
|
Python
|
test/test_raw_data_manifest_manager.py
|
feiga/fedlearner
|
99a19934b872a9fba6d85ae018b0ec145612fbca
|
[
"Apache-2.0"
] | null | null | null |
test/test_raw_data_manifest_manager.py
|
feiga/fedlearner
|
99a19934b872a9fba6d85ae018b0ec145612fbca
|
[
"Apache-2.0"
] | null | null | null |
test/test_raw_data_manifest_manager.py
|
feiga/fedlearner
|
99a19934b872a9fba6d85ae018b0ec145612fbca
|
[
"Apache-2.0"
] | 1
|
2020-04-09T07:50:55.000Z
|
2020-04-09T07:50:55.000Z
|
# Copyright 2020 The FedLearner Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
import unittest
from fedlearner.common import etcd_client
from fedlearner.common import data_join_service_pb2 as dj_pb
from fedlearner.common import common_pb2 as common_pb
from fedlearner.data_join import raw_data_manifest_manager
class TestRawDataManifestManager(unittest.TestCase):
def test_raw_data_manifest_manager(self):
cli = etcd_client.EtcdClient('test_cluster', 'localhost:2379',
'fedlearner', True)
partition_num = 4
rank_id = 2
data_source = common_pb.DataSource()
data_source.data_source_meta.name = "milestone-x"
data_source.data_source_meta.partition_num = partition_num
data_source.role = common_pb.FLRole.Leader
cli.delete_prefix(data_source.data_source_meta.name)
manifest_manager = raw_data_manifest_manager.RawDataManifestManager(
cli, data_source)
manifest_map = manifest_manager.list_all_manifest()
for i in range(partition_num):
self.assertTrue(i in manifest_map)
self.assertEqual(
manifest_map[i].sync_example_id_rep.state,
dj_pb.SyncExampleIdState.UnSynced
)
self.assertEqual(manifest_map[i].sync_example_id_rep.rank_id, -1)
self.assertEqual(
manifest_map[i].join_example_rep.state,
dj_pb.JoinExampleState.UnJoined
)
self.assertEqual(manifest_map[i].join_example_rep.rank_id, -1)
self.assertFalse(manifest_map[i].finished)
manifest = manifest_manager.alloc_sync_exampld_id(rank_id)
self.assertNotEqual(manifest, None)
partition_id = manifest.partition_id
manifest_map = manifest_manager.list_all_manifest()
for i in range(partition_num):
self.assertTrue(i in manifest_map)
if i != partition_id:
self.assertEqual(
manifest_map[i].sync_example_id_rep.state,
dj_pb.SyncExampleIdState.UnSynced
)
self.assertEqual(manifest_map[i].sync_example_id_rep.rank_id, -1)
self.assertEqual(
manifest_map[i].join_example_rep.state,
dj_pb.JoinExampleState.UnJoined
)
self.assertEqual(manifest_map[i].join_example_rep.rank_id, -1)
else:
self.assertEqual(
manifest_map[i].sync_example_id_rep.state,
dj_pb.SyncExampleIdState.Syncing
)
self.assertEqual(manifest_map[i].sync_example_id_rep.rank_id, rank_id)
self.assertEqual(
manifest_map[i].join_example_rep.state,
dj_pb.JoinExampleState.UnJoined
)
self.assertEqual(manifest_map[i].join_example_rep.rank_id, -1)
self.assertFalse(manifest_map[i].finished)
partition_id2 = 3 - partition_id
rank_id2 = 100
manifest = manifest_manager.alloc_join_example(rank_id2, partition_id2)
manifest_map = manifest_manager.list_all_manifest()
for i in range(partition_num):
self.assertTrue(i in manifest_map)
if i == partition_id:
self.assertEqual(
manifest_map[i].sync_example_id_rep.state,
dj_pb.SyncExampleIdState.Syncing
)
self.assertEqual(manifest_map[i].sync_example_id_rep.rank_id, rank_id)
else:
self.assertEqual(
manifest_map[i].sync_example_id_rep.state,
dj_pb.SyncExampleIdState.UnSynced
)
self.assertEqual(manifest_map[i].sync_example_id_rep.rank_id, -1)
if i == partition_id2:
self.assertEqual(
manifest_map[i].join_example_rep.state,
dj_pb.JoinExampleState.Joining
)
self.assertEqual(manifest_map[i].join_example_rep.rank_id, rank_id2)
else:
self.assertEqual(
manifest_map[i].join_example_rep.state,
dj_pb.JoinExampleState.UnJoined
)
self.assertEqual(manifest_map[i].join_example_rep.rank_id, -1)
self.assertFalse(manifest_map[i].finished)
self.assertRaises(Exception, manifest_manager.finish_join_example,
rank_id, partition_id)
self.assertRaises(Exception, manifest_manager.finish_join_example,
rank_id2, partition_id2)
self.assertRaises(Exception, manifest_manager.finish_sync_example_id,
-rank_id, partition_id)
self.assertRaises(Exception, manifest_manager.finish_sync_example_id,
rank_id2, partition_id2)
rank_id3 = 0
manifest = manifest_manager.alloc_join_example(rank_id3, partition_id)
manifest_map = manifest_manager.list_all_manifest()
for i in range(partition_num):
self.assertTrue(i in manifest_map)
if i == partition_id:
self.assertEqual(
manifest_map[i].sync_example_id_rep.state,
dj_pb.SyncExampleIdState.Syncing
)
self.assertEqual(manifest_map[i].sync_example_id_rep.rank_id, rank_id)
else:
self.assertEqual(
manifest_map[i].sync_example_id_rep.state,
dj_pb.SyncExampleIdState.UnSynced
)
self.assertEqual(manifest_map[i].sync_example_id_rep.rank_id, -1)
if i == partition_id:
self.assertEqual(
manifest_map[i].join_example_rep.state,
dj_pb.JoinExampleState.Joining
)
self.assertEqual(manifest_map[i].join_example_rep.rank_id, rank_id3)
elif i == partition_id2:
self.assertEqual(
manifest_map[i].join_example_rep.state,
dj_pb.JoinExampleState.Joining
)
self.assertEqual(manifest_map[i].join_example_rep.rank_id, rank_id2)
else:
self.assertEqual(
manifest_map[i].join_example_rep.state,
dj_pb.JoinExampleState.UnJoined
)
self.assertEqual(manifest_map[i].join_example_rep.rank_id, -1)
self.assertFalse(manifest_map[i].finished)
self.assertRaises(Exception, manifest_manager.finish_sync_example_id,
rank_id, partition_id)
self.assertRaises(Exception, manifest_manager.add_raw_data,
partition_id, ['a', 'a', 'b'], False)
manifest_manager.add_raw_data(partition_id, ['a', 'a', 'b'], True)
manifest = manifest_manager.get_manifest(partition_id)
self.assertEqual(manifest.next_process_index, 2)
manifest_manager.add_raw_data(partition_id, ['a', 'a', 'b', 'c', 'd'], True)
manifest_map = manifest_manager.list_all_manifest()
for i in range(partition_num):
self.assertTrue(i in manifest_map)
if i == partition_id:
self.assertEqual(manifest_map[i].next_process_index, 4)
else:
self.assertEqual(manifest_map[i].next_process_index, 0)
manifest_manager.finish_raw_data(partition_id)
manifest_manager.finish_raw_data(partition_id)
self.assertRaises(Exception, manifest_manager.add_raw_data, partition_id, 200)
manifest_manager.finish_sync_example_id(rank_id, partition_id)
manifest_manager.finish_sync_example_id(rank_id, partition_id)
manifest_map = manifest_manager.list_all_manifest()
for i in range(data_source.data_source_meta.partition_num):
self.assertTrue(i in manifest_map)
if i == partition_id:
self.assertEqual(
manifest_map[i].sync_example_id_rep.state,
dj_pb.SyncExampleIdState.Synced
)
self.assertEqual(manifest_map[i].sync_example_id_rep.rank_id, rank_id)
self.assertTrue(manifest_map[i].finished)
else:
self.assertEqual(
manifest_map[i].sync_example_id_rep.state,
dj_pb.SyncExampleIdState.UnSynced
)
self.assertEqual(manifest_map[i].sync_example_id_rep.rank_id, -1)
if i == partition_id:
self.assertEqual(
manifest_map[i].join_example_rep.state,
dj_pb.JoinExampleState.Joining
)
self.assertEqual(manifest_map[i].join_example_rep.rank_id, rank_id3)
elif i == partition_id2:
self.assertEqual(
manifest_map[i].join_example_rep.state,
dj_pb.JoinExampleState.Joining
)
self.assertEqual(manifest_map[i].join_example_rep.rank_id, rank_id2)
else:
self.assertEqual(
manifest_map[i].join_example_rep.state,
dj_pb.JoinExampleState.UnJoined
)
self.assertEqual(manifest_map[i].join_example_rep.rank_id, -1)
manifest_manager.finish_join_example(rank_id3, partition_id)
manifest_manager.finish_join_example(rank_id3, partition_id)
manifest_map = manifest_manager.list_all_manifest()
for i in range(data_source.data_source_meta.partition_num):
self.assertTrue(i in manifest_map)
if i == partition_id:
self.assertEqual(
manifest_map[i].sync_example_id_rep.state,
dj_pb.SyncExampleIdState.Synced
)
self.assertEqual(manifest_map[i].sync_example_id_rep.rank_id, rank_id)
else:
self.assertEqual(
manifest_map[i].sync_example_id_rep.state,
dj_pb.SyncExampleIdState.UnSynced
)
self.assertEqual(manifest_map[i].sync_example_id_rep.rank_id, -1)
if i == partition_id:
self.assertEqual(
manifest_map[i].join_example_rep.state,
dj_pb.JoinExampleState.Joined
)
self.assertEqual(manifest_map[i].join_example_rep.rank_id, rank_id3)
elif i == partition_id2:
self.assertEqual(
manifest_map[i].join_example_rep.state,
dj_pb.JoinExampleState.Joining
)
self.assertEqual(manifest_map[i].join_example_rep.rank_id, rank_id2)
else:
self.assertEqual(
manifest_map[i].join_example_rep.state,
dj_pb.JoinExampleState.UnJoined
)
self.assertEqual(manifest_map[i].join_example_rep.rank_id, -1)
cli.destory_client_pool()
if __name__ == '__main__':
unittest.main()
| 46.488189
| 86
| 0.607808
| 1,332
| 11,808
| 5.045796
| 0.120871
| 0.116203
| 0.101771
| 0.201161
| 0.805832
| 0.800774
| 0.787383
| 0.761494
| 0.752715
| 0.748847
| 0
| 0.007935
| 0.316904
| 11,808
| 253
| 87
| 46.671937
| 0.825316
| 0.05039
| 0
| 0.701754
| 0
| 0
| 0.005894
| 0
| 0
| 0
| 0
| 0
| 0.320175
| 1
| 0.004386
| false
| 0
| 0.02193
| 0
| 0.030702
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
76bce575ea23f076f84a05c65a04ffa3300bf4ad
| 2,697
|
py
|
Python
|
utils/utils.py
|
dawoeh/BioMedRxiv_API
|
27ca2f42466ffc5a7c0a46215a9df73fc5a9abd3
|
[
"MIT"
] | null | null | null |
utils/utils.py
|
dawoeh/BioMedRxiv_API
|
27ca2f42466ffc5a7c0a46215a9df73fc5a9abd3
|
[
"MIT"
] | null | null | null |
utils/utils.py
|
dawoeh/BioMedRxiv_API
|
27ca2f42466ffc5a7c0a46215a9df73fc5a9abd3
|
[
"MIT"
] | null | null | null |
import requests
import json
from time import sleep
import tqdm
import math
import pandas as pd
def date_rxiv(server, from_date, to_date, result_start=0):
data = pd.DataFrame()
pbar = None
while True:
response = requests.get(f'https://api.biorxiv.org/details/{server}/{from_date}/{to_date}/{result_start}')
doc = response.json()
result_count = len(doc['collection'])
batch = 100
if pbar is None:
pbar = tqdm.tqdm(total= math.ceil(int(doc['messages'][0]['total'])/batch))
if result_count == 0:
break
data = data.append(pd.DataFrame(doc['collection'])).reset_index(drop=True)
result_start += 100
pbar.update(1)
sleep(0.2)
pbar.close()
return data
def article_detail(server, doi):
data = pd.DataFrame()
response = requests.get(f'https://api.biorxiv.org/details/{server}/{doi}')
doc = response.json()
data = data.append(pd.DataFrame(doc['collection'])).reset_index(drop=True)
return data
def date_published_article(from_date, to_date, result_start=0):
data = pd.DataFrame()
pbar = None
while True:
response = requests.get(f'https://api.biorxiv.org/pub/{from_date}/{to_date}/{result_start}')
doc = response.json()
result_count = len(doc['collection'])
batch = 100
if pbar is None:
pbar = tqdm.tqdm(total= math.ceil(int(doc['messages'][0]['total'])/batch))
if result_count == 0:
break
data = data.append(pd.DataFrame(doc['collection'])).reset_index(drop=True)
result_start += batch
pbar.update(1)
sleep(0.2)
pbar.close()
return data
def date_publisher_detail(publisher_id, from_date, to_date, result_start=0):
data = pd.DataFrame()
pbar = None
while True:
response = requests.get(f'https://api.biorxiv.org/publisher/{publisher_id}/{from_date}/{to_date}/{result_start}')
doc = response.json()
result_count = len(doc['collection'])
batch = 100
if pbar is None:
pbar = tqdm.tqdm(total= math.ceil(int(doc['messages'][0]['total'])/batch))
if result_count == 0:
break
data = data.append(pd.DataFrame(doc['collection'])).reset_index(drop=True)
result_start += batch
pbar.update(1)
sleep(0.2)
pbar.close()
return data
def biorxiv_stats(interval = 'm'):
data = pd.DataFrame()
response = requests.get(f'https://api.biorxiv.org/sum/{interval}')
doc = response.json()
data = data.append(pd.DataFrame(doc['bioRxiv content statistics'])).reset_index(drop=True)
return data
| 34.139241
| 121
| 0.618465
| 356
| 2,697
| 4.570225
| 0.191011
| 0.067609
| 0.036878
| 0.051629
| 0.859865
| 0.859865
| 0.836509
| 0.832821
| 0.819299
| 0.783651
| 0
| 0.014613
| 0.238784
| 2,697
| 79
| 122
| 34.139241
| 0.777886
| 0
| 0
| 0.75
| 0
| 0
| 0.165308
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.069444
| false
| 0
| 0.083333
| 0
| 0.222222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4f5df69aa9de018222c7713fcedc0ff6c1f4b20e
| 710
|
py
|
Python
|
lib/workflow/rule/tag_multi.py
|
auho/python-ETL
|
761589814b04e076ba6fa1c0e64b83ce62ce8556
|
[
"Apache-2.0"
] | null | null | null |
lib/workflow/rule/tag_multi.py
|
auho/python-ETL
|
761589814b04e076ba6fa1c0e64b83ce62ce8556
|
[
"Apache-2.0"
] | null | null | null |
lib/workflow/rule/tag_multi.py
|
auho/python-ETL
|
761589814b04e076ba6fa1c0e64b83ce62ce8556
|
[
"Apache-2.0"
] | null | null | null |
from . import tag
class TagRule(tag.TagRule):
def _main(self):
self._keywordFunList.append(tag.symbol_underline_fun)
def get_keys(self):
return [self._keywordName, self._get_alias(f"{self._name}_keyword_num")] + self.get_tags_keys()
def tag_insert(self, content):
return self._tag_multi_insert(content=content)
class TagRuleEveryKeyword(tag.TagRule):
def _main(self):
self._keywordFunList.append(tag.symbol_underline_fun)
def get_keys(self):
return [self._keywordName, self._get_alias(f"{self._name}_keyword_num")] + self.get_tags_keys()
def tag_insert(self, content):
return self._tag_multi_insert_every_keyword(content=content)
| 29.583333
| 103
| 0.722535
| 95
| 710
| 5.021053
| 0.294737
| 0.083857
| 0.054507
| 0.071279
| 0.813417
| 0.813417
| 0.813417
| 0.813417
| 0.813417
| 0.813417
| 0
| 0
| 0.166197
| 710
| 23
| 104
| 30.869565
| 0.805743
| 0
| 0
| 0.666667
| 0
| 0
| 0.067606
| 0.067606
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0
| 0.066667
| 0.266667
| 0.866667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 8
|
96e50fb0fcee2f9625b7316c1e0d1cda524f9e76
| 6,229
|
py
|
Python
|
tikv_client/__init__.py
|
gotoxu/client-py
|
1f3158e9a0a88e10afb89078b15ddf1b1d11c7dc
|
[
"Apache-2.0"
] | null | null | null |
tikv_client/__init__.py
|
gotoxu/client-py
|
1f3158e9a0a88e10afb89078b15ddf1b1d11c7dc
|
[
"Apache-2.0"
] | null | null | null |
tikv_client/__init__.py
|
gotoxu/client-py
|
1f3158e9a0a88e10afb89078b15ddf1b1d11c7dc
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 TiKV Project Authors. Licensed under Apache-2.0.
import asyncio
from . import asynchronous
class RawClient:
def __init__(self):
raise Exception("Please use `RawClient.connect()` instead.")
@classmethod
def connect(cls, pd_endpoint):
event_loop = asyncio.get_event_loop()
inner = event_loop.run_until_complete(
asynchronous.RawClient.connect(pd_endpoint))
self = cls.__new__(cls)
self.inner = inner
return self
def get(self, key, cf="default"):
event_loop = asyncio.get_event_loop()
return event_loop.run_until_complete(self.inner.get(key, cf))
def batch_get(self, keys, cf="default"):
event_loop = asyncio.get_event_loop()
return event_loop.run_until_complete(self.inner.batch_get(keys, cf))
def scan(self, start, end, limit, include_start=True, include_end=False, cf="default"):
event_loop = asyncio.get_event_loop()
return event_loop.run_until_complete(self.inner.scan(start, end, limit, include_start, include_end, cf))
def scan_keys(self, start, end, limit, include_start=True, include_end=False, cf="default"):
event_loop = asyncio.get_event_loop()
return event_loop.run_until_complete(self.inner.scan_keys(start, end, limit, include_start, include_end, cf))
def put(self, key, value, cf="default"):
event_loop = asyncio.get_event_loop()
event_loop.run_until_complete(self.inner.put(key, value, cf))
def batch_put(self, pairs, cf="default"):
event_loop = asyncio.get_event_loop()
event_loop.run_until_complete(self.inner.put(pairs, cf))
def delete(self, key, cf="default"):
event_loop = asyncio.get_event_loop()
event_loop.run_until_complete(self.inner.delete(key, cf))
def batch_delete(self, keys, cf="default"):
event_loop = asyncio.get_event_loop()
return event_loop.run_until_complete(self.inner.batch_delete(keys, cf))
def delete_range(self, start, end=None, include_start=True, include_end=False, cf="default"):
event_loop = asyncio.get_event_loop()
return event_loop.run_until_complete(self.inner.delete_range(start, end, include_start, include_end, cf))
class TransactionClient:
def __init__(self):
raise Exception("Please use `TransactionClient.connect()` instead.")
@classmethod
def connect(cls, pd_endpoint):
event_loop = asyncio.get_event_loop()
inner = event_loop.run_until_complete(
asynchronous.TransactionClient.connect(pd_endpoint))
self = cls.__new__(cls)
self.inner = inner
return self
def begin(self, pessimistic=False):
event_loop = asyncio.get_event_loop()
transaction = event_loop.run_until_complete(
self.inner.begin(pessimistic))
return Transaction(transaction)
def current_timestamp(self):
event_loop = asyncio.get_event_loop()
return event_loop.run_until_complete(
self.inner.current_timestamp())
def snapshot(self, timestamp, pessimistic):
snapshot = self.inner.snapshot(timestamp, pessimistic)
return Snapshot(snapshot)
class Snapshot:
def __init__(self, inner):
self.inner = inner
def get(self, key):
event_loop = asyncio.get_event_loop()
return event_loop.run_until_complete(self.inner.get(key))
def key_exists(self, key):
event_loop = asyncio.get_event_loop()
return event_loop.run_until_complete(self.inner.key_exists(key))
def batch_get(self, keys):
event_loop = asyncio.get_event_loop()
return event_loop.run_until_complete(self.inner.batch_get(keys))
def scan(self, start, end, limit, include_start=True, include_end=False):
event_loop = asyncio.get_event_loop()
return event_loop.run_until_complete(self.inner.scan(start, end, limit, include_start, include_end))
def scan_keys(self, start, end, limit, include_start=True, include_end=False):
event_loop = asyncio.get_event_loop()
return event_loop.run_until_complete(self.inner.scan_keys(start, end, limit, include_start, include_end))
class Transaction:
def __init__(self, inner):
self.inner = inner
def get(self, key):
event_loop = asyncio.get_event_loop()
return event_loop.run_until_complete(self.inner.get(key))
def get_for_update(self, key):
event_loop = asyncio.get_event_loop()
return event_loop.run_until_complete(self.inner.get_for_update(key))
def key_exists(self, key):
event_loop = asyncio.get_event_loop()
return event_loop.run_until_complete(self.inner.key_exists(key))
def batch_get(self, keys):
event_loop = asyncio.get_event_loop()
return event_loop.run_until_complete(self.inner.batch_get(keys))
def batch_get_for_update(self, keys):
event_loop = asyncio.get_event_loop()
return event_loop.run_until_complete(self.inner.batch_get_for_update(keys))
def scan(self, start, end, limit, include_start=True, include_end=False):
event_loop = asyncio.get_event_loop()
return event_loop.run_until_complete(self.inner.scan(start, end, limit, include_start, include_end))
def scan_keys(self, start, end, limit, include_start=True, include_end=False):
event_loop = asyncio.get_event_loop()
return event_loop.run_until_complete(self.inner.scan_keys(start, end, limit, include_start, include_end))
def lock_keys(self, keys):
event_loop = asyncio.get_event_loop()
event_loop.run_until_complete(self.inner.lock_keys(keys))
def put(self, key, value):
event_loop = asyncio.get_event_loop()
event_loop.run_until_complete(self.inner.put(key, value))
def insert(self, key, value):
event_loop = asyncio.get_event_loop()
event_loop.run_until_complete(self.inner.insert(key, value))
def delete(self, key):
event_loop = asyncio.get_event_loop()
event_loop.run_until_complete(self.inner.delete(key))
def commit(self):
event_loop = asyncio.get_event_loop()
event_loop.run_until_complete(self.inner.commit())
| 38.450617
| 117
| 0.700915
| 852
| 6,229
| 4.806338
| 0.080986
| 0.197802
| 0.117216
| 0.139194
| 0.842247
| 0.828571
| 0.828571
| 0.793651
| 0.78779
| 0.786081
| 0
| 0.001196
| 0.194895
| 6,229
| 161
| 118
| 38.689441
| 0.815354
| 0.010114
| 0
| 0.566667
| 0
| 0
| 0.024822
| 0.008112
| 0
| 0
| 0
| 0
| 0
| 1
| 0.291667
| false
| 0
| 0.016667
| 0
| 0.533333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8c14f9519e17251dad6656b2f31d302f266612ef
| 6,701
|
py
|
Python
|
quick_sort2.py
|
RitRa/Algorithms-project-
|
d9de6437be9fa3424aeec08cb8cf42bc377c5813
|
[
"MIT"
] | null | null | null |
quick_sort2.py
|
RitRa/Algorithms-project-
|
d9de6437be9fa3424aeec08cb8cf42bc377c5813
|
[
"MIT"
] | null | null | null |
quick_sort2.py
|
RitRa/Algorithms-project-
|
d9de6437be9fa3424aeec08cb8cf42bc377c5813
|
[
"MIT"
] | null | null | null |
# importing the random numbers
from randomnumber import *
# Quick Sort
def printalistay(alist):
return (' '.join(str(i) for i in alist))
def quicksort(alist, i, j):
if i < j:
pos = partition(alist, i, j)
quicksort(alist, i, pos - 1)
quicksort(alist, pos + 1, j)
def partition(alist, i, j):
#pivot = alist[j] # pivot on the last item
pivot = alist[int(j/2)] # pivot on the median
small = i - 1
for k in range(i, j):
if alist[k] <= pivot:
small += 1
swap(alist, k, small)
swap(alist, j, small + 1)
print ("Pivot = " + str(alist[small + 1]), " alist = " + printalistay(alist))
return small + 1
def swap(alist, i, j):
alist[i], alist[j] = alist[j], alist[i]
#if __name__ == '__main__':
# alist = [9, 4, 8, 3, 1, 2, 5]
# print (" Initial alistay :",printalistay(alist))
# quicksort(alist, 0, len(alist) - 1)
# import time module
import time
num_runs = 10
results = []
quicksort_avglist = []
def benchmark_quick():
for r in range(num_runs):
# start timer
start_time = time.time()
######## call quick sort
quicksort(alist, 0, len(alist) - 1)
end_time = time.time()
time_elapsed= end_time - start_time
results.append(time_elapsed)
b = sum(results)
average = (b/num_runs)
average = round(average, 3)
quicksort_avglist.append(average)
for r in range(num_runs):
# start timer
start_time = time.time()
######## call quick sort
quicksort(alist1, 0, len(alist) - 1)
end_time = time.time()
time_elapsed= end_time - start_time
results.append(time_elapsed)
b = sum(results)
average = (b/num_runs)
average = round(average, 3)
quicksort_avglist.append(average)
for r in range(num_runs):
# start timer
start_time = time.time()
######## call quick sort
quicksort(alist2, 0, len(alist) - 1)
end_time = time.time()
time_elapsed= end_time - start_time
results.append(time_elapsed)
b = sum(results)
average = (b/num_runs)
average = round(average, 3)
quicksort_avglist.append(average)
for r in range(num_runs):
# start timer
start_time = time.time()
######## call quick sort
quicksort(alist3, 0, len(alist) - 1)
end_time = time.time()
time_elapsed= end_time - start_time
results.append(time_elapsed)
b = sum(results)
average = (b/num_runs)
average = round(average, 3)
quicksort_avglist.append(average)
for r in range(num_runs):
# start timer
start_time = time.time()
######## call quick sort
quicksort(alist4, 0, len(alist) - 1)
end_time = time.time()
time_elapsed= end_time - start_time
results.append(time_elapsed)
b = sum(results)
average = (b/num_runs)
average = round(average, 3)
quicksort_avglist.append(average)
for r in range(num_runs):
# start timer
start_time = time.time()
######## call quick sort
quicksort(alist5, 0, len(alist) - 1)
end_time = time.time()
time_elapsed= end_time - start_time
results.append(time_elapsed)
b = sum(results)
average = (b/num_runs)
average = round(average, 3)
quicksort_avglist.append(average)
for r in range(num_runs):
# start timer
start_time = time.time()
######## call quick sort
quicksort(alist6, 0, len(alist) - 1)
end_time = time.time()
time_elapsed= end_time - start_time
results.append(time_elapsed)
b = sum(results)
average = (b/num_runs)
average = round(average, 3)
quicksort_avglist.append(average)
for r in range(num_runs):
# start timer
start_time = time.time()
######## call quick sort
quicksort(alist7, 0, len(alist) - 1)
end_time = time.time()
time_elapsed= end_time - start_time
results.append(time_elapsed)
b = sum(results)
average = (b/num_runs)
average = round(average, 3)
quicksort_avglist.append(average)
for r in range(num_runs):
# start timer
start_time = time.time()
######## call quick sort
quicksort(alist8, 0, len(alist) - 1)
end_time = time.time()
time_elapsed= end_time - start_time
results.append(time_elapsed)
b = sum(results)
average = (b/num_runs)
average = round(average, 3)
quicksort_avglist.append(average)
for r in range(num_runs):
# start timer
start_time = time.time()
quicksort(alist9, 0, len(alist) - 1)
end_time = time.time()
time_elapsed= end_time - start_time
results.append(time_elapsed)
b = sum(results)
average = (b/num_runs)
average = round(average, 3)
quicksort_avglist.append(average)
for r in range(num_runs):
# start timer
start_time = time.time()
######## call quick sort
quicksort(alist10, 0, len(alist) - 1)
end_time = time.time()
time_elapsed= end_time - start_time
results.append(time_elapsed)
b = sum(results)
average = (b/num_runs)
average = round(average, 3)
quicksort_avglist.append(average)
for r in range(num_runs):
# start timer
start_time = time.time()
######## call quick sort
quicksort(alist11, 0, len(alist) - 1)
end_time = time.time()
time_elapsed= end_time - start_time
results.append(time_elapsed)
b = sum(results)
average = (b/num_runs)
average = round(average, 3)
quicksort_avglist.append(average)
for r in range(num_runs):
# start timer
start_time = time.time()
######## call quick sort
quicksort(alist12, 0, len(alist) - 1)
end_time = time.time()
time_elapsed= end_time - start_time
results.append(time_elapsed)
b = sum(results)
average = (b/num_runs)
# round to 3 decimals
average = round(average, 3)
quicksort_avglist.append(average)
for r in range(num_runs):
# start timer
start_time = time.time()
######## call quick sort
quicksort(alist13, 0, len(alist) - 1)
end_time = time.time()
time_elapsed= end_time - start_time
results.append(time_elapsed)
b = sum(results)
average = (b/num_runs)
# round to 3 decimals
average = round(average, 3)
quicksort_avglist.append(average)
print(quicksort_avglist)
benchmark_quick()
| 27.463115
| 79
| 0.584241
| 860
| 6,701
| 4.390698
| 0.094186
| 0.148305
| 0.133475
| 0.039725
| 0.818591
| 0.818591
| 0.810911
| 0.810911
| 0.810911
| 0.810911
| 0
| 0.016978
| 0.296821
| 6,701
| 244
| 80
| 27.463115
| 0.78438
| 0.102074
| 0
| 0.754491
| 0
| 0
| 0.003058
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02994
| false
| 0
| 0.011976
| 0.005988
| 0.053892
| 0.017964
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8c36d746f722700e291437f166c23acae2412cb6
| 129,264
|
py
|
Python
|
simplema/jessetkdata/dnafiles/ETH-USDT 2019-12-20 2021-11-12.py
|
ysdede/jesse_strategies
|
ade9f4ba42cec11207c766d267b9d8feb8bce648
|
[
"CC0-1.0"
] | 38
|
2021-09-18T15:33:28.000Z
|
2022-02-21T17:29:08.000Z
|
simplema/jessetkdata/dnafiles/ETH-USDT 2019-12-20 2021-11-12.py
|
ysdede/jesse_strategies
|
ade9f4ba42cec11207c766d267b9d8feb8bce648
|
[
"CC0-1.0"
] | 4
|
2022-01-02T14:46:12.000Z
|
2022-02-16T18:39:41.000Z
|
simplema/jessetkdata/dnafiles/ETH-USDT 2019-12-20 2021-11-12.py
|
ysdede/jesse_strategies
|
ade9f4ba42cec11207c766d267b9d8feb8bce648
|
[
"CC0-1.0"
] | 11
|
2021-10-19T06:21:43.000Z
|
2022-02-21T17:29:10.000Z
|
dnas = [
['pX3Eo\\]', 38, 146, 1069.71, 38, 26, -27.55, {'qty_to_risk': 7, 'target_pnl': 253, 'stop': 35, 'donchlen': 78, 'treshold': 92, 'ema_fast': 14, 'ema_slow': 40}],
['k[3Eo\\]', 38, 146, 1030.13, 38, 26, -27.55, {'qty_to_risk': 7, 'target_pnl': 267, 'stop': 35, 'donchlen': 78, 'treshold': 92, 'ema_fast': 14, 'ema_slow': 40}],
['kP3Eo\\e', 37, 139, 1082.78, 35, 28, -29.22, {'qty_to_risk': 7, 'target_pnl': 215, 'stop': 35, 'donchlen': 78, 'treshold': 92, 'ema_fast': 14, 'ema_slow': 43}],
['FJa00oP', 28, 98, 365.62, 33, 12, 16.07, {'qty_to_risk': 4, 'target_pnl': 186, 'stop': 140, 'donchlen': 27, 'treshold': 28, 'ema_fast': 18, 'ema_slow': 36}],
['<Q4g6_w', 39, 82, 155.73, 33, 12, -9.72, {'qty_to_risk': 4, 'target_pnl': 220, 'stop': 37, 'donchlen': 161, 'treshold': 34, 'ema_fast': 15, 'ema_slow': 50}],
['vTJpoL_', 30, 128, 1745.08, 35, 14, -19.59, {'qty_to_risk': 8, 'target_pnl': 234, 'stop': 87, 'donchlen': 183, 'treshold': 92, 'ema_fast': 10, 'ema_slow': 41}],
['\\VkQcqv', 33, 84, 413.65, 35, 14, -4.28, {'qty_to_risk': 6, 'target_pnl': 243, 'stop': 163, 'donchlen': 108, 'treshold': 80, 'ema_fast': 19, 'ema_slow': 50}],
[':Wso^YR', 33, 113, 346.06, 30, 13, -8.96, {'qty_to_risk': 3, 'target_pnl': 248, 'stop': 181, 'donchlen': 181, 'treshold': 75, 'ema_fast': 13, 'ema_slow': 36}],
[']Vdom\\a', 33, 103, 707.66, 33, 12, -28.46, {'qty_to_risk': 6, 'target_pnl': 243, 'stop': 147, 'donchlen': 181, 'treshold': 90, 'ema_fast': 14, 'ema_slow': 42}],
['vP3Ep\\g', 36, 141, 1354.18, 29, 27, -35.49, {'qty_to_risk': 8, 'target_pnl': 215, 'stop': 35, 'donchlen': 78, 'treshold': 93, 'ema_fast': 14, 'ema_slow': 44}],
['7\\lUAmD', 37, 88, 205.96, 28, 14, -14.69, {'qty_to_risk': 3, 'target_pnl': 272, 'stop': 165, 'donchlen': 117, 'treshold': 45, 'ema_fast': 18, 'ema_slow': 31}],
['v[JpoL_', 30, 128, 2365.3, 35, 14, -19.59, {'qty_to_risk': 8, 'target_pnl': 267, 'stop': 87, 'donchlen': 183, 'treshold': 92, 'ema_fast': 10, 'ema_slow': 41}],
['vP3EpJl', 36, 152, 1098.34, 36, 25, -23.1, {'qty_to_risk': 8, 'target_pnl': 215, 'stop': 35, 'donchlen': 78, 'treshold': 93, 'ema_fast': 10, 'ema_slow': 46}],
['[QIuPr7', 36, 110, 1317.14, 15, 13, -40.34, {'qty_to_risk': 6, 'target_pnl': 220, 'stop': 85, 'donchlen': 195, 'treshold': 61, 'ema_fast': 19, 'ema_slow': 27}],
['vXJpoL_', 30, 128, 2498.01, 35, 14, -19.59, {'qty_to_risk': 8, 'target_pnl': 253, 'stop': 87, 'donchlen': 183, 'treshold': 92, 'ema_fast': 10, 'ema_slow': 41}],
['8=Hq`W7', 33, 130, 175.83, 37, 16, 11.82, {'qty_to_risk': 3, 'target_pnl': 125, 'stop': 83, 'donchlen': 185, 'treshold': 77, 'ema_fast': 13, 'ema_slow': 27}],
['Mlo/YnH', 35, 110, 1170.47, 38, 18, 57.63, {'qty_to_risk': 5, 'target_pnl': 348, 'stop': 172, 'donchlen': 25, 'treshold': 70, 'ema_fast': 18, 'ema_slow': 33}],
['DOLnrsd', 39, 87, 351.48, 33, 12, -20.77, {'qty_to_risk': 4, 'target_pnl': 210, 'stop': 92, 'donchlen': 178, 'treshold': 95, 'ema_fast': 19, 'ema_slow': 43}],
['vP3Ea\\l', 35, 137, 1265.15, 30, 26, -31.0, {'qty_to_risk': 8, 'target_pnl': 215, 'stop': 35, 'donchlen': 78, 'treshold': 78, 'ema_fast': 14, 'ema_slow': 46}],
['4[qSS>o', 23, 119, 146.98, 31, 16, -3.48, {'qty_to_risk': 3, 'target_pnl': 267, 'stop': 176, 'donchlen': 113, 'treshold': 64, 'ema_fast': 7, 'ema_slow': 47}],
['v]JpoL_', 30, 128, 2726.18, 35, 14, -19.59, {'qty_to_risk': 8, 'target_pnl': 277, 'stop': 87, 'donchlen': 183, 'treshold': 92, 'ema_fast': 10, 'ema_slow': 41}],
['vP3Ek\\l', 34, 138, 1209.83, 30, 26, -31.0, {'qty_to_risk': 8, 'target_pnl': 215, 'stop': 35, 'donchlen': 78, 'treshold': 88, 'ema_fast': 14, 'ema_slow': 46}],
['1[:@YZH', 41, 140, 296.69, 31, 22, 2.86, {'qty_to_risk': 3, 'target_pnl': 267, 'stop': 51, 'donchlen': 66, 'treshold': 70, 'ema_fast': 13, 'ema_slow': 33}],
['DAduF:U', 28, 147, 277.95, 31, 19, -7.77, {'qty_to_risk': 4, 'target_pnl': 144, 'stop': 147, 'donchlen': 195, 'treshold': 50, 'ema_fast': 6, 'ema_slow': 38}],
['vvJpoL_', 30, 128, 2249.55, 35, 14, -19.59, {'qty_to_risk': 8, 'target_pnl': 395, 'stop': 87, 'donchlen': 183, 'treshold': 92, 'ema_fast': 10, 'ema_slow': 41}],
['vP3Ep[l', 34, 138, 1209.83, 30, 26, -31.0, {'qty_to_risk': 8, 'target_pnl': 215, 'stop': 35, 'donchlen': 78, 'treshold': 93, 'ema_fast': 14, 'ema_slow': 46}],
['vP3Ew\\l', 34, 139, 1152.6, 30, 26, -31.0, {'qty_to_risk': 8, 'target_pnl': 215, 'stop': 35, 'donchlen': 78, 'treshold': 100, 'ema_fast': 14, 'ema_slow': 46}],
['vdJp\\L_', 30, 126, 2105.9, 35, 14, -19.59, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 87, 'donchlen': 183, 'treshold': 73, 'ema_fast': 10, 'ema_slow': 41}],
['vdJpaL_', 30, 127, 2103.54, 35, 14, -19.59, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 87, 'donchlen': 183, 'treshold': 78, 'ema_fast': 10, 'ema_slow': 41}],
['vjJpoL_', 30, 128, 1936.28, 35, 14, -19.59, {'qty_to_risk': 8, 'target_pnl': 338, 'stop': 87, 'donchlen': 183, 'treshold': 92, 'ema_fast': 10, 'ema_slow': 41}],
['vdJvoL_', 31, 128, 2106.04, 35, 14, -19.59, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 87, 'donchlen': 198, 'treshold': 92, 'ema_fast': 10, 'ema_slow': 41}],
['wdJpoL_', 30, 128, 2013.32, 35, 14, -19.59, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 87, 'donchlen': 183, 'treshold': 92, 'ema_fast': 10, 'ema_slow': 41}],
['vdJqoL_', 30, 128, 2013.32, 35, 14, -19.59, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 87, 'donchlen': 185, 'treshold': 92, 'ema_fast': 10, 'ema_slow': 41}],
['vdJpoL_', 30, 128, 2013.32, 35, 14, -19.59, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 87, 'donchlen': 183, 'treshold': 92, 'ema_fast': 10, 'ema_slow': 41}],
['vdJplL_', 30, 128, 2013.32, 35, 14, -19.59, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 87, 'donchlen': 183, 'treshold': 89, 'ema_fast': 10, 'ema_slow': 41}],
['vdwpoL_', 30, 124, 2547.18, 38, 13, -22.82, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 190, 'donchlen': 183, 'treshold': 92, 'ema_fast': 10, 'ema_slow': 41}],
['vdNpoL_', 30, 128, 2185.17, 28, 14, -22.55, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 97, 'donchlen': 183, 'treshold': 92, 'ema_fast': 10, 'ema_slow': 41}],
['vdtpoL_', 30, 124, 2016.4, 38, 13, -22.82, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 183, 'donchlen': 183, 'treshold': 92, 'ema_fast': 10, 'ema_slow': 41}],
['vdYpoL_', 30, 126, 2041.22, 35, 14, -24.07, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 122, 'donchlen': 183, 'treshold': 92, 'ema_fast': 10, 'ema_slow': 41}],
['cbUlsOb', 30, 120, 1093.9, 30, 13, -25.37, {'qty_to_risk': 6, 'target_pnl': 300, 'stop': 113, 'donchlen': 173, 'treshold': 96, 'ema_fast': 11, 'ema_slow': 42}],
['vdapoL_', 30, 124, 2274.04, 35, 14, -26.51, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 140, 'donchlen': 183, 'treshold': 92, 'ema_fast': 10, 'ema_slow': 41}],
['wiUvMZ]', 33, 108, 2347.41, 33, 12, -36.39, {'qty_to_risk': 8, 'target_pnl': 334, 'stop': 113, 'donchlen': 198, 'treshold': 57, 'ema_fast': 13, 'ema_slow': 40}],
['woIT+t:', 33, 78, 1225.74, 55, 9, 33.19, {'qty_to_risk': 8, 'target_pnl': 362, 'stop': 85, 'donchlen': 115, 'treshold': 23, 'ema_fast': 19, 'ema_slow': 28}],
['`HiuQd/', 33, 124, 778.12, 25, 16, -12.38, {'qty_to_risk': 6, 'target_pnl': 177, 'stop': 158, 'donchlen': 195, 'treshold': 62, 'ema_fast': 16, 'ema_slow': 24}],
['vdJpDL_', 30, 122, 2259.28, 35, 14, -19.59, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 87, 'donchlen': 183, 'treshold': 48, 'ema_fast': 10, 'ema_slow': 41}],
['vPJpF;s', 28, 138, 3152.11, 23, 17, -34.81, {'qty_to_risk': 8, 'target_pnl': 215, 'stop': 87, 'donchlen': 183, 'treshold': 50, 'ema_fast': 6, 'ema_slow': 49}],
['kN3ko9r', 35, 152, 1396.09, 28, 21, -29.79, {'qty_to_risk': 7, 'target_pnl': 205, 'stop': 35, 'donchlen': 171, 'treshold': 92, 'ema_fast': 6, 'ema_slow': 48}],
['G?Iu.?n', 29, 105, 312.72, 16, 12, -16.71, {'qty_to_risk': 4, 'target_pnl': 134, 'stop': 85, 'donchlen': 195, 'treshold': 26, 'ema_fast': 7, 'ema_slow': 47}],
['vdJpSL_', 30, 124, 2348.38, 35, 14, -19.59, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 87, 'donchlen': 183, 'treshold': 64, 'ema_fast': 10, 'ema_slow': 41}],
['vdJpoLe', 29, 124, 2134.97, 35, 14, -26.58, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 87, 'donchlen': 183, 'treshold': 92, 'ema_fast': 10, 'ema_slow': 43}],
['vdOpoLd', 29, 124, 2241.98, 28, 14, -29.34, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 99, 'donchlen': 183, 'treshold': 92, 'ema_fast': 10, 'ema_slow': 43}],
[']_nl?Fi', 29, 115, 1128.7, 28, 14, -27.26, {'qty_to_risk': 6, 'target_pnl': 286, 'stop': 169, 'donchlen': 173, 'treshold': 43, 'ema_fast': 9, 'ema_slow': 45}],
['ps3Eo\\]', 38, 146, 982.76, 38, 26, -27.55, {'qty_to_risk': 7, 'target_pnl': 381, 'stop': 35, 'donchlen': 78, 'treshold': 92, 'ema_fast': 14, 'ema_slow': 40}],
['pw3Eo\\]', 38, 146, 982.76, 38, 26, -27.55, {'qty_to_risk': 7, 'target_pnl': 400, 'stop': 35, 'donchlen': 78, 'treshold': 92, 'ema_fast': 14, 'ema_slow': 40}],
['1auvefX', 38, 96, 435.14, 27, 11, -14.3, {'qty_to_risk': 3, 'target_pnl': 296, 'stop': 185, 'donchlen': 198, 'treshold': 82, 'ema_fast': 16, 'ema_slow': 39}],
['KbpY`wG', 37, 88, 487.95, 33, 12, 11.94, {'qty_to_risk': 5, 'target_pnl': 300, 'stop': 174, 'donchlen': 127, 'treshold': 77, 'ema_fast': 20, 'ema_slow': 32}],
['`hvt6fr', 41, 79, 1660.27, 22, 9, -18.87, {'qty_to_risk': 6, 'target_pnl': 329, 'stop': 188, 'donchlen': 193, 'treshold': 34, 'ema_fast': 16, 'ema_slow': 48}],
['vdJpML_', 30, 124, 2348.38, 35, 14, -19.59, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 87, 'donchlen': 183, 'treshold': 57, 'ema_fast': 10, 'ema_slow': 41}],
['vdJpOL_', 30, 124, 2348.38, 35, 14, -19.59, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 87, 'donchlen': 183, 'treshold': 59, 'ema_fast': 10, 'ema_slow': 41}],
['vdJpoL`', 30, 123, 2107.11, 35, 14, -19.59, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 87, 'donchlen': 183, 'treshold': 92, 'ema_fast': 10, 'ema_slow': 42}],
['kP9Eo9r', 32, 159, 934.31, 24, 25, -19.97, {'qty_to_risk': 7, 'target_pnl': 215, 'stop': 49, 'donchlen': 78, 'treshold': 92, 'ema_fast': 6, 'ema_slow': 48}],
['vdJp3L_', 31, 107, 4377.89, 22, 9, -22.59, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 87, 'donchlen': 183, 'treshold': 31, 'ema_fast': 10, 'ema_slow': 41}],
['R];A2sC', 41, 99, 1061.26, 50, 14, 41.43, {'qty_to_risk': 5, 'target_pnl': 277, 'stop': 53, 'donchlen': 69, 'treshold': 30, 'ema_fast': 19, 'ema_slow': 31}],
['k\\3EoR]', 38, 154, 982.81, 36, 22, -18.55, {'qty_to_risk': 7, 'target_pnl': 272, 'stop': 35, 'donchlen': 78, 'treshold': 92, 'ema_fast': 12, 'ema_slow': 40}],
['sXvp5;l', 29, 124, 5036.25, 20, 15, -39.71, {'qty_to_risk': 8, 'target_pnl': 253, 'stop': 188, 'donchlen': 183, 'treshold': 33, 'ema_fast': 6, 'ema_slow': 46}],
['vdOpoL\\', 31, 128, 2652.8, 28, 14, -23.01, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 99, 'donchlen': 183, 'treshold': 92, 'ema_fast': 10, 'ema_slow': 40}],
['ke3Eo\\]', 38, 146, 1043.58, 38, 26, -27.55, {'qty_to_risk': 7, 'target_pnl': 315, 'stop': 35, 'donchlen': 78, 'treshold': 92, 'ema_fast': 14, 'ema_slow': 40}],
['Qn`0e]D', 29, 134, 355.67, 29, 17, -14.58, {'qty_to_risk': 5, 'target_pnl': 357, 'stop': 138, 'donchlen': 27, 'treshold': 82, 'ema_fast': 14, 'ema_slow': 31}],
['vdJpoL]', 30, 128, 2444.91, 35, 14, -19.59, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 87, 'donchlen': 183, 'treshold': 92, 'ema_fast': 10, 'ema_slow': 40}],
['kb3Eo\\]', 38, 146, 1030.12, 38, 26, -27.55, {'qty_to_risk': 7, 'target_pnl': 300, 'stop': 35, 'donchlen': 78, 'treshold': 92, 'ema_fast': 14, 'ema_slow': 40}],
['vYon5;`', 30, 130, 3029.38, 25, 16, -35.11, {'qty_to_risk': 8, 'target_pnl': 258, 'stop': 172, 'donchlen': 178, 'treshold': 33, 'ema_fast': 6, 'ema_slow': 42}],
['v;3sp1l', 30, 181, 1323.4, 31, 29, -25.15, {'qty_to_risk': 8, 'target_pnl': 115, 'stop': 35, 'donchlen': 190, 'treshold': 93, 'ema_fast': 4, 'ema_slow': 46}],
['vdJpoLV', 29, 125, 1781.93, 38, 13, -17.48, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 87, 'donchlen': 183, 'treshold': 92, 'ema_fast': 10, 'ema_slow': 38}],
['vdJpoB_', 28, 135, 2251.95, 31, 16, -24.5, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 87, 'donchlen': 183, 'treshold': 92, 'ema_fast': 8, 'ema_slow': 41}],
['k_3EoR]', 38, 154, 1007.64, 36, 22, -18.55, {'qty_to_risk': 7, 'target_pnl': 286, 'stop': 35, 'donchlen': 78, 'treshold': 92, 'ema_fast': 12, 'ema_slow': 40}],
['.blr^UG', 33, 120, 156.42, 25, 16, -9.84, {'qty_to_risk': 2, 'target_pnl': 300, 'stop': 165, 'donchlen': 188, 'treshold': 75, 'ema_fast': 12, 'ema_slow': 32}],
['vaJpJ;^', 29, 143, 2197.13, 35, 20, -26.69, {'qty_to_risk': 8, 'target_pnl': 296, 'stop': 87, 'donchlen': 183, 'treshold': 54, 'ema_fast': 6, 'ema_slow': 41}],
['qudn8bC', 34, 97, 2097.05, 22, 9, 47.77, {'qty_to_risk': 8, 'target_pnl': 391, 'stop': 147, 'donchlen': 178, 'treshold': 36, 'ema_fast': 15, 'ema_slow': 31}],
['UYLBTX@', 36, 116, 762.8, 42, 21, -12.8, {'qty_to_risk': 5, 'target_pnl': 258, 'stop': 92, 'donchlen': 71, 'treshold': 65, 'ema_fast': 13, 'ema_slow': 30}],
['Xhpl>Ak', 31, 116, 1368.89, 26, 15, -28.19, {'qty_to_risk': 6, 'target_pnl': 329, 'stop': 174, 'donchlen': 173, 'treshold': 42, 'ema_fast': 8, 'ema_slow': 46}],
['vaJpJ;[', 30, 143, 2372.41, 38, 21, 19.37, {'qty_to_risk': 8, 'target_pnl': 296, 'stop': 87, 'donchlen': 183, 'treshold': 54, 'ema_fast': 6, 'ema_slow': 40}],
['vYon5=`', 29, 118, 4118.64, 20, 15, -39.08, {'qty_to_risk': 8, 'target_pnl': 258, 'stop': 172, 'donchlen': 178, 'treshold': 33, 'ema_fast': 7, 'ema_slow': 42}],
['YK<V:WK', 31, 106, 298.04, 50, 14, 63.22, {'qty_to_risk': 6, 'target_pnl': 191, 'stop': 56, 'donchlen': 120, 'treshold': 38, 'ema_fast': 13, 'ema_slow': 34}],
['-qF<RNL', 34, 132, 105.58, 40, 22, 3.05, {'qty_to_risk': 2, 'target_pnl': 372, 'stop': 78, 'donchlen': 57, 'treshold': 63, 'ema_fast': 11, 'ema_slow': 34}],
['vP3EODf', 37, 154, 1273.12, 42, 28, -22.3, {'qty_to_risk': 8, 'target_pnl': 215, 'stop': 35, 'donchlen': 78, 'treshold': 59, 'ema_fast': 8, 'ema_slow': 44}],
[']muo_<U', 29, 140, 943.6, 41, 17, 29.01, {'qty_to_risk': 6, 'target_pnl': 353, 'stop': 185, 'donchlen': 181, 'treshold': 76, 'ema_fast': 7, 'ema_slow': 38}],
['kd9?o_H', 42, 150, 1239.52, 40, 25, 9.54, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 49, 'donchlen': 64, 'treshold': 92, 'ema_fast': 15, 'ema_slow': 33}],
['kd9?o`H', 42, 150, 1239.52, 40, 25, 9.54, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 49, 'donchlen': 64, 'treshold': 92, 'ema_fast': 15, 'ema_slow': 33}],
['kd:EoR]', 36, 139, 1471.03, 36, 22, 0.21, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 51, 'donchlen': 78, 'treshold': 92, 'ema_fast': 12, 'ema_slow': 40}],
['vdJp:L_', 30, 114, 3472.8, 36, 11, -13.96, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 87, 'donchlen': 183, 'treshold': 38, 'ema_fast': 10, 'ema_slow': 41}],
['W@Iob+W', 24, 216, 347.92, 32, 28, 31.43, {'qty_to_risk': 6, 'target_pnl': 139, 'stop': 85, 'donchlen': 181, 'treshold': 79, 'ema_fast': 3, 'ema_slow': 38}],
['?eg)2Qw', 34, 100, 984.75, 40, 15, 29.06, {'qty_to_risk': 4, 'target_pnl': 315, 'stop': 154, 'donchlen': 10, 'treshold': 30, 'ema_fast': 11, 'ema_slow': 50}],
['v>3so1l', 30, 181, 1240.57, 31, 29, -25.15, {'qty_to_risk': 8, 'target_pnl': 129, 'stop': 35, 'donchlen': 190, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 46}],
['sXvp55l', 29, 134, 4086.17, 22, 18, -39.78, {'qty_to_risk': 8, 'target_pnl': 253, 'stop': 188, 'donchlen': 183, 'treshold': 33, 'ema_fast': 5, 'ema_slow': 46}],
['vbHKpUJ', 33, 118, 690.48, 43, 16, -5.56, {'qty_to_risk': 8, 'target_pnl': 300, 'stop': 83, 'donchlen': 93, 'treshold': 93, 'ema_fast': 12, 'ema_slow': 33}],
['o0Anf]W', 36, 110, 317.52, 30, 13, -9.8, {'qty_to_risk': 7, 'target_pnl': 63, 'stop': 67, 'donchlen': 178, 'treshold': 83, 'ema_fast': 14, 'ema_slow': 38}],
['Yaop5;q', 30, 124, 3140.89, 20, 15, -28.81, {'qty_to_risk': 6, 'target_pnl': 296, 'stop': 172, 'donchlen': 183, 'treshold': 33, 'ema_fast': 6, 'ema_slow': 48}],
['vdos5>l', 29, 119, 5804.19, 21, 14, -40.86, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 172, 'donchlen': 190, 'treshold': 33, 'ema_fast': 7, 'ema_slow': 46}],
['vdfp5@l', 28, 118, 5572.38, 21, 14, -40.86, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 151, 'donchlen': 183, 'treshold': 33, 'ema_fast': 7, 'ema_slow': 46}],
[':Ggnu1\\', 25, 176, 145.78, 36, 25, 14.84, {'qty_to_risk': 3, 'target_pnl': 172, 'stop': 154, 'donchlen': 178, 'treshold': 98, 'ema_fast': 4, 'ema_slow': 40}],
['vaJp\\;g', 29, 139, 3026.37, 25, 20, -36.7, {'qty_to_risk': 8, 'target_pnl': 296, 'stop': 87, 'donchlen': 183, 'treshold': 73, 'ema_fast': 6, 'ema_slow': 44}],
['vdds59l', 30, 125, 4270.78, 20, 15, -39.71, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 147, 'donchlen': 190, 'treshold': 33, 'ema_fast': 6, 'ema_slow': 46}],
['vNJp2._', 23, 181, 1897.78, 34, 23, 61.03, {'qty_to_risk': 8, 'target_pnl': 205, 'stop': 87, 'donchlen': 183, 'treshold': 30, 'ema_fast': 3, 'ema_slow': 41}],
['vP3Ep8l', 34, 169, 1107.02, 42, 28, -14.65, {'qty_to_risk': 8, 'target_pnl': 215, 'stop': 35, 'donchlen': 78, 'treshold': 93, 'ema_fast': 6, 'ema_slow': 46}],
['hQ0D1jv', 39, 119, 707.59, 38, 18, -24.86, {'qty_to_risk': 7, 'target_pnl': 220, 'stop': 28, 'donchlen': 76, 'treshold': 29, 'ema_fast': 17, 'ema_slow': 50}],
['v:3so1l', 30, 181, 1256.96, 31, 29, -28.78, {'qty_to_risk': 8, 'target_pnl': 110, 'stop': 35, 'donchlen': 190, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 46}],
['oqTqk4j', 29, 154, 1636.42, 26, 23, -30.47, {'qty_to_risk': 7, 'target_pnl': 372, 'stop': 110, 'donchlen': 185, 'treshold': 88, 'ema_fast': 5, 'ema_slow': 45}],
['vaJpp;g', 29, 142, 3317.1, 25, 20, -36.7, {'qty_to_risk': 8, 'target_pnl': 296, 'stop': 87, 'donchlen': 183, 'treshold': 93, 'ema_fast': 6, 'ema_slow': 44}],
['vYon`1`', 25, 175, 1637.6, 37, 24, 46.75, {'qty_to_risk': 8, 'target_pnl': 258, 'stop': 172, 'donchlen': 178, 'treshold': 77, 'ema_fast': 4, 'ema_slow': 42}],
['kd9?o^H', 42, 154, 1267.35, 34, 23, 14.3, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 49, 'donchlen': 64, 'treshold': 92, 'ema_fast': 14, 'ema_slow': 33}],
['KtKEg9e', 28, 132, 445.51, 38, 21, 6.49, {'qty_to_risk': 5, 'target_pnl': 386, 'stop': 90, 'donchlen': 78, 'treshold': 84, 'ema_fast': 6, 'ema_slow': 43}],
['vYon:1`', 25, 163, 2063.5, 40, 20, 61.56, {'qty_to_risk': 8, 'target_pnl': 258, 'stop': 172, 'donchlen': 178, 'treshold': 38, 'ema_fast': 4, 'ema_slow': 42}],
['vu3Ep\\l', 34, 138, 1227.23, 30, 26, -31.0, {'qty_to_risk': 8, 'target_pnl': 391, 'stop': 35, 'donchlen': 78, 'treshold': 93, 'ema_fast': 14, 'ema_slow': 46}],
['vY3?5)l', 26, 210, 1230.95, 40, 32, 59.65, {'qty_to_risk': 8, 'target_pnl': 258, 'stop': 35, 'donchlen': 64, 'treshold': 33, 'ema_fast': 2, 'ema_slow': 46}],
['lr;N.eU', 35, 77, 900.49, 50, 12, 46.65, {'qty_to_risk': 7, 'target_pnl': 376, 'stop': 53, 'donchlen': 100, 'treshold': 26, 'ema_fast': 16, 'ema_slow': 38}],
['kd3EbR]', 38, 153, 1018.09, 36, 22, -18.55, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 35, 'donchlen': 78, 'treshold': 79, 'ema_fast': 12, 'ema_slow': 40}],
['kd3EiR]', 38, 154, 973.49, 36, 22, -18.55, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 35, 'donchlen': 78, 'treshold': 86, 'ema_fast': 12, 'ema_slow': 40}],
['kd3EfR]', 38, 154, 973.49, 36, 22, -18.55, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 35, 'donchlen': 78, 'treshold': 83, 'ema_fast': 12, 'ema_slow': 40}],
['vB3so1l', 30, 181, 1130.16, 31, 29, -23.33, {'qty_to_risk': 8, 'target_pnl': 148, 'stop': 35, 'donchlen': 190, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 46}],
['sXvp5.d', 21, 189, 1911.75, 36, 22, 65.51, {'qty_to_risk': 8, 'target_pnl': 253, 'stop': 188, 'donchlen': 183, 'treshold': 33, 'ema_fast': 3, 'ema_slow': 43}],
['sYvp5.l', 21, 183, 1763.91, 33, 24, 51.26, {'qty_to_risk': 8, 'target_pnl': 258, 'stop': 188, 'donchlen': 183, 'treshold': 33, 'ema_fast': 3, 'ema_slow': 46}],
['v93pt1l', 30, 183, 1017.62, 32, 28, -21.13, {'qty_to_risk': 8, 'target_pnl': 106, 'stop': 35, 'donchlen': 183, 'treshold': 97, 'ema_fast': 4, 'ema_slow': 46}],
['vn3Ep\\l', 34, 138, 1227.23, 30, 26, -31.0, {'qty_to_risk': 8, 'target_pnl': 357, 'stop': 35, 'donchlen': 78, 'treshold': 93, 'ema_fast': 14, 'ema_slow': 46}],
['vYonC1`', 25, 167, 2094.76, 39, 23, 48.94, {'qty_to_risk': 8, 'target_pnl': 258, 'stop': 172, 'donchlen': 178, 'treshold': 47, 'ema_fast': 4, 'ema_slow': 42}],
['seHtAe4', 34, 117, 1865.52, 23, 17, -9.45, {'qty_to_risk': 8, 'target_pnl': 315, 'stop': 83, 'donchlen': 193, 'treshold': 45, 'ema_fast': 16, 'ema_slow': 25}],
['kX3EQH]', 36, 153, 986.47, 41, 24, -12.4, {'qty_to_risk': 7, 'target_pnl': 253, 'stop': 35, 'donchlen': 78, 'treshold': 62, 'ema_fast': 9, 'ema_slow': 40}],
['vT3?5)l', 26, 210, 1101.01, 40, 32, 59.65, {'qty_to_risk': 8, 'target_pnl': 234, 'stop': 35, 'donchlen': 64, 'treshold': 33, 'ema_fast': 2, 'ema_slow': 46}],
['kd3?oR]', 39, 168, 1316.32, 41, 24, -16.21, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 35, 'donchlen': 64, 'treshold': 92, 'ema_fast': 12, 'ema_slow': 40}],
['kd3DoR]', 38, 154, 1018.64, 38, 21, -16.7, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 35, 'donchlen': 76, 'treshold': 92, 'ema_fast': 12, 'ema_slow': 40}],
['kf3EoR]', 38, 154, 986.67, 36, 22, -18.55, {'qty_to_risk': 7, 'target_pnl': 319, 'stop': 35, 'donchlen': 78, 'treshold': 92, 'ema_fast': 12, 'ema_slow': 40}],
['kd3EoR]', 38, 154, 973.49, 36, 22, -18.55, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 35, 'donchlen': 78, 'treshold': 92, 'ema_fast': 12, 'ema_slow': 40}],
['kd3EpR]', 38, 154, 973.49, 36, 22, -18.55, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 35, 'donchlen': 78, 'treshold': 93, 'ema_fast': 12, 'ema_slow': 40}],
['fd3EoR]', 38, 154, 973.49, 36, 22, -18.55, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 35, 'donchlen': 78, 'treshold': 92, 'ema_fast': 12, 'ema_slow': 40}],
['kd3FoR]', 39, 151, 1081.64, 33, 21, -19.85, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 35, 'donchlen': 81, 'treshold': 92, 'ema_fast': 12, 'ema_slow': 40}],
['td3EoR]', 38, 154, 1289.09, 36, 22, -22.62, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 35, 'donchlen': 78, 'treshold': 92, 'ema_fast': 12, 'ema_slow': 40}],
['v_3Ep\\l', 34, 138, 1339.57, 30, 26, -31.0, {'qty_to_risk': 8, 'target_pnl': 286, 'stop': 35, 'donchlen': 78, 'treshold': 93, 'ema_fast': 14, 'ema_slow': 46}],
['[uiH3MG', 29, 97, 826.0, 58, 12, 40.05, {'qty_to_risk': 6, 'target_pnl': 391, 'stop': 158, 'donchlen': 86, 'treshold': 31, 'ema_fast': 10, 'ema_slow': 32}],
['kU3EQH]', 36, 153, 969.47, 41, 24, -12.4, {'qty_to_risk': 7, 'target_pnl': 239, 'stop': 35, 'donchlen': 78, 'treshold': 62, 'ema_fast': 9, 'ema_slow': 40}],
['vYos51l', 26, 153, 2408.51, 25, 20, -13.89, {'qty_to_risk': 8, 'target_pnl': 258, 'stop': 172, 'donchlen': 190, 'treshold': 33, 'ema_fast': 4, 'ema_slow': 46}],
['vP3Eo>f', 36, 169, 1130.8, 42, 28, -15.83, {'qty_to_risk': 8, 'target_pnl': 215, 'stop': 35, 'donchlen': 78, 'treshold': 92, 'ema_fast': 7, 'ema_slow': 44}],
['vYon55`', 28, 142, 2859.04, 33, 18, -2.12, {'qty_to_risk': 8, 'target_pnl': 258, 'stop': 172, 'donchlen': 178, 'treshold': 33, 'ema_fast': 5, 'ema_slow': 42}],
['sXvp51l', 26, 153, 2493.23, 30, 20, -5.57, {'qty_to_risk': 8, 'target_pnl': 253, 'stop': 188, 'donchlen': 183, 'treshold': 33, 'ema_fast': 4, 'ema_slow': 46}],
['vXds51l', 26, 153, 2367.79, 25, 20, -13.89, {'qty_to_risk': 8, 'target_pnl': 253, 'stop': 147, 'donchlen': 190, 'treshold': 33, 'ema_fast': 4, 'ema_slow': 46}],
['vd2poL_', 35, 136, 1700.25, 33, 15, -22.26, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 33, 'donchlen': 183, 'treshold': 92, 'ema_fast': 10, 'ema_slow': 41}],
['vXJp3._', 23, 185, 3158.93, 34, 23, 53.85, {'qty_to_risk': 8, 'target_pnl': 253, 'stop': 87, 'donchlen': 183, 'treshold': 31, 'ema_fast': 3, 'ema_slow': 41}],
['sXvp5._', 22, 189, 2570.78, 34, 23, 53.88, {'qty_to_risk': 8, 'target_pnl': 253, 'stop': 188, 'donchlen': 183, 'treshold': 33, 'ema_fast': 3, 'ema_slow': 41}],
['vXJp5._', 23, 189, 2484.11, 34, 23, 53.85, {'qty_to_risk': 8, 'target_pnl': 253, 'stop': 87, 'donchlen': 183, 'treshold': 33, 'ema_fast': 3, 'ema_slow': 41}],
['vYHn51`', 27, 159, 2464.05, 36, 19, 50.3, {'qty_to_risk': 8, 'target_pnl': 258, 'stop': 83, 'donchlen': 178, 'treshold': 33, 'ema_fast': 4, 'ema_slow': 42}],
['sYon51`', 26, 159, 2530.66, 36, 19, 49.09, {'qty_to_risk': 8, 'target_pnl': 258, 'stop': 172, 'donchlen': 178, 'treshold': 33, 'ema_fast': 4, 'ema_slow': 42}],
['vY\\n51`', 26, 159, 2497.07, 36, 19, 49.09, {'qty_to_risk': 8, 'target_pnl': 258, 'stop': 128, 'donchlen': 178, 'treshold': 33, 'ema_fast': 4, 'ema_slow': 42}],
['vYSn51`', 27, 159, 2590.06, 36, 19, 49.09, {'qty_to_risk': 8, 'target_pnl': 258, 'stop': 108, 'donchlen': 178, 'treshold': 33, 'ema_fast': 4, 'ema_slow': 42}],
['vYnn51`', 26, 159, 2553.93, 36, 19, 49.09, {'qty_to_risk': 8, 'target_pnl': 258, 'stop': 169, 'donchlen': 178, 'treshold': 33, 'ema_fast': 4, 'ema_slow': 42}],
['vYon51`', 26, 159, 2530.66, 36, 19, 49.09, {'qty_to_risk': 8, 'target_pnl': 258, 'stop': 172, 'donchlen': 178, 'treshold': 33, 'ema_fast': 4, 'ema_slow': 42}],
['^XJp5._', 23, 189, 1266.82, 34, 23, 39.99, {'qty_to_risk': 6, 'target_pnl': 253, 'stop': 87, 'donchlen': 183, 'treshold': 33, 'ema_fast': 3, 'ema_slow': 41}],
['R<js+/0', 25, 207, 378.94, 22, 27, 29.95, {'qty_to_risk': 5, 'target_pnl': 120, 'stop': 160, 'donchlen': 190, 'treshold': 23, 'ema_fast': 4, 'ema_slow': 24}],
['vYo+51`', 25, 170, 2506.12, 39, 23, 26.0, {'qty_to_risk': 8, 'target_pnl': 258, 'stop': 172, 'donchlen': 15, 'treshold': 33, 'ema_fast': 4, 'ema_slow': 42}],
['][[ta0Z', 26, 183, 1014.77, 25, 24, 9.29, {'qty_to_risk': 6, 'target_pnl': 267, 'stop': 126, 'donchlen': 193, 'treshold': 78, 'ema_fast': 4, 'ema_slow': 39}],
['kd3EoO]', 40, 153, 1626.41, 31, 22, -18.7, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 35, 'donchlen': 78, 'treshold': 92, 'ema_fast': 11, 'ema_slow': 40}],
['kd3EoQ]', 40, 153, 1626.41, 31, 22, -18.7, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 35, 'donchlen': 78, 'treshold': 92, 'ema_fast': 11, 'ema_slow': 40}],
['kd3EoP]', 40, 153, 1626.41, 31, 22, -18.7, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 35, 'donchlen': 78, 'treshold': 92, 'ema_fast': 11, 'ema_slow': 40}],
['vaJpC;g', 29, 132, 3485.85, 26, 19, -34.41, {'qty_to_risk': 8, 'target_pnl': 296, 'stop': 87, 'donchlen': 183, 'treshold': 47, 'ema_fast': 6, 'ema_slow': 44}],
['vaJpD;g', 29, 133, 3136.76, 26, 19, -34.41, {'qty_to_risk': 8, 'target_pnl': 296, 'stop': 87, 'donchlen': 183, 'treshold': 48, 'ema_fast': 6, 'ema_slow': 44}],
['vaJpJ@g', 27, 138, 2179.89, 23, 17, -36.36, {'qty_to_risk': 8, 'target_pnl': 296, 'stop': 87, 'donchlen': 183, 'treshold': 54, 'ema_fast': 7, 'ema_slow': 44}],
['vaJp4;g', 30, 122, 4327.08, 18, 16, -38.13, {'qty_to_risk': 8, 'target_pnl': 296, 'stop': 87, 'donchlen': 183, 'treshold': 32, 'ema_fast': 6, 'ema_slow': 44}],
['sXvp5.`', 22, 189, 2033.02, 33, 24, 57.65, {'qty_to_risk': 8, 'target_pnl': 253, 'stop': 188, 'donchlen': 183, 'treshold': 33, 'ema_fast': 3, 'ema_slow': 42}],
['vaJpJ;S', 29, 144, 2403.35, 40, 22, 27.33, {'qty_to_risk': 8, 'target_pnl': 296, 'stop': 87, 'donchlen': 183, 'treshold': 54, 'ema_fast': 6, 'ema_slow': 37}],
['vP3E>1f', 33, 166, 1116.84, 43, 23, 24.18, {'qty_to_risk': 8, 'target_pnl': 215, 'stop': 35, 'donchlen': 78, 'treshold': 42, 'ema_fast': 4, 'ema_slow': 44}],
['vd3EoJ_', 38, 157, 1761.7, 31, 22, -18.93, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 35, 'donchlen': 78, 'treshold': 92, 'ema_fast': 10, 'ema_slow': 41}],
['vd3EoM_', 38, 157, 1761.7, 31, 22, -18.93, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 35, 'donchlen': 78, 'treshold': 92, 'ema_fast': 10, 'ema_slow': 41}],
['vd3poL_', 36, 136, 1442.87, 26, 15, -24.81, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 35, 'donchlen': 183, 'treshold': 92, 'ema_fast': 10, 'ema_slow': 41}],
['vd3soK_', 36, 135, 1614.49, 25, 16, -28.06, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 35, 'donchlen': 190, 'treshold': 92, 'ema_fast': 10, 'ema_slow': 41}],
['vXon51`', 26, 159, 2248.09, 36, 19, 49.09, {'qty_to_risk': 8, 'target_pnl': 253, 'stop': 172, 'donchlen': 178, 'treshold': 33, 'ema_fast': 4, 'ema_slow': 42}],
['kd3EoRY', 38, 154, 1036.61, 36, 22, -17.84, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 35, 'donchlen': 78, 'treshold': 92, 'ema_fast': 12, 'ema_slow': 39}],
['md3EpIl', 36, 152, 950.08, 36, 25, -18.9, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 35, 'donchlen': 78, 'treshold': 93, 'ema_fast': 10, 'ema_slow': 46}],
['pU3ko7j', 33, 163, 1077.02, 29, 24, -30.26, {'qty_to_risk': 7, 'target_pnl': 239, 'stop': 35, 'donchlen': 171, 'treshold': 92, 'ema_fast': 5, 'ema_slow': 45}],
['q^TkJ8d', 28, 135, 1785.82, 25, 20, -32.11, {'qty_to_risk': 8, 'target_pnl': 281, 'stop': 110, 'donchlen': 171, 'treshold': 54, 'ema_fast': 6, 'ema_slow': 43}],
['vaJp5;g', 30, 122, 4327.08, 23, 17, -37.62, {'qty_to_risk': 8, 'target_pnl': 296, 'stop': 87, 'donchlen': 183, 'treshold': 33, 'ema_fast': 6, 'ema_slow': 44}],
['vdfp53l', 29, 134, 3752.98, 22, 18, -39.78, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 151, 'donchlen': 183, 'treshold': 33, 'ema_fast': 5, 'ema_slow': 46}],
['vdos54l', 29, 134, 3585.68, 22, 18, -42.57, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 172, 'donchlen': 190, 'treshold': 33, 'ema_fast': 5, 'ema_slow': 46}],
['vdos57l', 29, 134, 3585.68, 22, 18, -42.57, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 172, 'donchlen': 190, 'treshold': 33, 'ema_fast': 5, 'ema_slow': 46}],
['v_fp5.l', 21, 183, 1858.03, 33, 24, 40.08, {'qty_to_risk': 8, 'target_pnl': 286, 'stop': 151, 'donchlen': 183, 'treshold': 33, 'ema_fast': 3, 'ema_slow': 46}],
['savp5.l', 21, 183, 1980.57, 33, 24, 40.08, {'qty_to_risk': 8, 'target_pnl': 296, 'stop': 188, 'donchlen': 183, 'treshold': 33, 'ema_fast': 3, 'ema_slow': 46}],
['v[JtJ/g', 27, 168, 2267.42, 26, 23, 10.37, {'qty_to_risk': 8, 'target_pnl': 267, 'stop': 87, 'donchlen': 193, 'treshold': 54, 'ema_fast': 4, 'ema_slow': 44}],
['v[MtJ/g', 27, 167, 2589.49, 26, 23, 7.42, {'qty_to_risk': 8, 'target_pnl': 267, 'stop': 94, 'donchlen': 193, 'treshold': 54, 'ema_fast': 4, 'ema_slow': 44}],
['v[OtJ/g', 27, 167, 2392.53, 26, 23, 7.94, {'qty_to_risk': 8, 'target_pnl': 267, 'stop': 99, 'donchlen': 193, 'treshold': 54, 'ema_fast': 4, 'ema_slow': 44}],
['v[dtJ/g', 26, 167, 2380.55, 26, 23, 6.14, {'qty_to_risk': 8, 'target_pnl': 267, 'stop': 147, 'donchlen': 193, 'treshold': 54, 'ema_fast': 4, 'ema_slow': 44}],
['vaJpF;g', 29, 133, 3136.76, 26, 19, -34.41, {'qty_to_risk': 8, 'target_pnl': 296, 'stop': 87, 'donchlen': 183, 'treshold': 50, 'ema_fast': 6, 'ema_slow': 44}],
['vaJpH;g', 29, 134, 2960.01, 25, 20, -36.7, {'qty_to_risk': 8, 'target_pnl': 296, 'stop': 87, 'donchlen': 183, 'treshold': 52, 'ema_fast': 6, 'ema_slow': 44}],
['vaJp9;g', 29, 124, 3801.16, 23, 17, -37.62, {'qty_to_risk': 8, 'target_pnl': 296, 'stop': 87, 'donchlen': 183, 'treshold': 37, 'ema_fast': 6, 'ema_slow': 44}],
['vVJp4._', 23, 188, 2406.45, 34, 23, 53.85, {'qty_to_risk': 8, 'target_pnl': 243, 'stop': 87, 'donchlen': 183, 'treshold': 32, 'ema_fast': 3, 'ema_slow': 41}],
['vdJpo7_', 28, 163, 1836.74, 34, 23, 18.63, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 87, 'donchlen': 183, 'treshold': 92, 'ema_fast': 5, 'ema_slow': 41}],
['vdJpo5_', 28, 163, 1836.74, 34, 23, 18.63, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 87, 'donchlen': 183, 'treshold': 92, 'ema_fast': 5, 'ema_slow': 41}],
['vP3E;1f', 33, 159, 1141.61, 40, 22, 13.19, {'qty_to_risk': 8, 'target_pnl': 215, 'stop': 35, 'donchlen': 78, 'treshold': 39, 'ema_fast': 4, 'ema_slow': 44}],
['GbU[3aj', 36, 77, 256.86, 45, 11, 2.89, {'qty_to_risk': 4, 'target_pnl': 300, 'stop': 113, 'donchlen': 132, 'treshold': 31, 'ema_fast': 15, 'ema_slow': 45}],
['pD3vo1]', 29, 186, 1037.73, 35, 28, -5.09, {'qty_to_risk': 7, 'target_pnl': 158, 'stop': 35, 'donchlen': 198, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 40}],
['kD3so1]', 29, 185, 885.39, 35, 28, -6.62, {'qty_to_risk': 7, 'target_pnl': 158, 'stop': 35, 'donchlen': 190, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 40}],
['ad3EpIe', 39, 156, 886.94, 34, 23, -15.14, {'qty_to_risk': 6, 'target_pnl': 310, 'stop': 35, 'donchlen': 78, 'treshold': 93, 'ema_fast': 10, 'ema_slow': 43}],
['vG3so1l', 30, 181, 1285.38, 31, 29, -19.3, {'qty_to_risk': 8, 'target_pnl': 172, 'stop': 35, 'donchlen': 190, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 46}],
['vG3sp1l', 30, 181, 1285.38, 31, 29, -19.3, {'qty_to_risk': 8, 'target_pnl': 172, 'stop': 35, 'donchlen': 190, 'treshold': 93, 'ema_fast': 4, 'ema_slow': 46}],
['v\\on51`', 26, 159, 2332.0, 36, 19, 53.27, {'qty_to_risk': 8, 'target_pnl': 272, 'stop': 172, 'donchlen': 178, 'treshold': 33, 'ema_fast': 4, 'ema_slow': 42}],
['vcKpF4b', 28, 153, 2437.63, 36, 22, 24.71, {'qty_to_risk': 8, 'target_pnl': 305, 'stop': 90, 'donchlen': 183, 'treshold': 50, 'ema_fast': 5, 'ema_slow': 42}],
['vdJpS4_', 28, 157, 1782.23, 34, 23, 18.63, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 87, 'donchlen': 183, 'treshold': 64, 'ema_fast': 5, 'ema_slow': 41}],
['vcvpF4b', 28, 152, 2365.78, 28, 21, 0.79, {'qty_to_risk': 8, 'target_pnl': 305, 'stop': 188, 'donchlen': 183, 'treshold': 50, 'ema_fast': 5, 'ema_slow': 42}],
['vaJpN;g', 29, 137, 2908.28, 25, 20, -36.7, {'qty_to_risk': 8, 'target_pnl': 296, 'stop': 87, 'donchlen': 183, 'treshold': 58, 'ema_fast': 6, 'ema_slow': 44}],
['vXJp7._', 22, 192, 2063.79, 37, 24, 73.34, {'qty_to_risk': 8, 'target_pnl': 253, 'stop': 87, 'donchlen': 183, 'treshold': 35, 'ema_fast': 3, 'ema_slow': 41}],
['vVJp7._', 22, 192, 1959.47, 37, 24, 73.34, {'qty_to_risk': 8, 'target_pnl': 243, 'stop': 87, 'donchlen': 183, 'treshold': 35, 'ema_fast': 3, 'ema_slow': 41}],
['vWJpo1_', 25, 179, 1702.78, 36, 25, 46.47, {'qty_to_risk': 8, 'target_pnl': 248, 'stop': 87, 'donchlen': 183, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 41}],
['vT3?o1l', 31, 198, 983.48, 48, 35, 35.52, {'qty_to_risk': 8, 'target_pnl': 234, 'stop': 35, 'donchlen': 64, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 46}],
['vF3?o1l', 31, 198, 926.08, 48, 35, 35.96, {'qty_to_risk': 8, 'target_pnl': 167, 'stop': 35, 'donchlen': 64, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 46}],
['vP3po1f', 30, 182, 1223.09, 40, 25, 8.77, {'qty_to_risk': 8, 'target_pnl': 215, 'stop': 35, 'donchlen': 183, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 44}],
['vP3wo1f', 30, 182, 1527.73, 37, 27, -1.8, {'qty_to_risk': 8, 'target_pnl': 215, 'stop': 35, 'donchlen': 200, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 44}],
['safs51l', 26, 153, 2928.33, 25, 20, -4.93, {'qty_to_risk': 8, 'target_pnl': 296, 'stop': 151, 'donchlen': 190, 'treshold': 33, 'ema_fast': 4, 'ema_slow': 46}],
['vF3sp1l', 30, 181, 1378.71, 31, 29, -19.3, {'qty_to_risk': 8, 'target_pnl': 167, 'stop': 35, 'donchlen': 190, 'treshold': 93, 'ema_fast': 4, 'ema_slow': 46}],
['vT3sp1l', 30, 181, 1350.3, 31, 29, -20.9, {'qty_to_risk': 8, 'target_pnl': 234, 'stop': 35, 'donchlen': 190, 'treshold': 93, 'ema_fast': 4, 'ema_slow': 46}],
['sXvp5.Y', 23, 196, 2146.89, 34, 23, 61.53, {'qty_to_risk': 8, 'target_pnl': 253, 'stop': 188, 'donchlen': 183, 'treshold': 33, 'ema_fast': 3, 'ema_slow': 39}],
['kD9?o1H', 29, 217, 897.86, 39, 28, 18.95, {'qty_to_risk': 7, 'target_pnl': 158, 'stop': 49, 'donchlen': 64, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 33}],
['vdOpoL9', 30, 146, 1863.92, 31, 19, 9.84, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 99, 'donchlen': 183, 'treshold': 92, 'ema_fast': 10, 'ema_slow': 27}],
['vdOpoL8', 30, 146, 1863.92, 31, 19, 9.84, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 99, 'donchlen': 183, 'treshold': 92, 'ema_fast': 10, 'ema_slow': 27}],
['s`fs51l', 26, 153, 2825.29, 25, 20, -4.93, {'qty_to_risk': 8, 'target_pnl': 291, 'stop': 151, 'donchlen': 190, 'treshold': 33, 'ema_fast': 4, 'ema_slow': 46}],
['vahpJ;g', 28, 135, 3360.68, 25, 20, -36.28, {'qty_to_risk': 8, 'target_pnl': 296, 'stop': 156, 'donchlen': 183, 'treshold': 54, 'ema_fast': 6, 'ema_slow': 44}],
['vaQpJ;g', 29, 135, 3270.91, 25, 20, -36.28, {'qty_to_risk': 8, 'target_pnl': 296, 'stop': 103, 'donchlen': 183, 'treshold': 54, 'ema_fast': 6, 'ema_slow': 44}],
['vappJ;g', 28, 135, 3200.73, 25, 20, -36.28, {'qty_to_risk': 8, 'target_pnl': 296, 'stop': 174, 'donchlen': 183, 'treshold': 54, 'ema_fast': 6, 'ema_slow': 44}],
['vaepJ;g', 28, 135, 3420.86, 25, 20, -36.28, {'qty_to_risk': 8, 'target_pnl': 296, 'stop': 149, 'donchlen': 183, 'treshold': 54, 'ema_fast': 6, 'ema_slow': 44}],
['vafpJ;g', 28, 135, 3408.73, 25, 20, -36.28, {'qty_to_risk': 8, 'target_pnl': 296, 'stop': 151, 'donchlen': 183, 'treshold': 54, 'ema_fast': 6, 'ema_slow': 44}],
['vaPpJ;g', 29, 135, 3351.67, 25, 20, -36.28, {'qty_to_risk': 8, 'target_pnl': 296, 'stop': 101, 'donchlen': 183, 'treshold': 54, 'ema_fast': 6, 'ema_slow': 44}],
['vajpJ;g', 28, 135, 3323.26, 25, 20, -36.28, {'qty_to_risk': 8, 'target_pnl': 296, 'stop': 160, 'donchlen': 183, 'treshold': 54, 'ema_fast': 6, 'ema_slow': 44}],
['vaVpJ;g', 29, 135, 3311.56, 25, 20, -36.28, {'qty_to_risk': 8, 'target_pnl': 296, 'stop': 115, 'donchlen': 183, 'treshold': 54, 'ema_fast': 6, 'ema_slow': 44}],
['vaYpJ;g', 28, 135, 3207.91, 25, 20, -36.28, {'qty_to_risk': 8, 'target_pnl': 296, 'stop': 122, 'donchlen': 183, 'treshold': 54, 'ema_fast': 6, 'ema_slow': 44}],
['vatpJ;g', 28, 135, 3131.95, 25, 20, -36.28, {'qty_to_risk': 8, 'target_pnl': 296, 'stop': 183, 'donchlen': 183, 'treshold': 54, 'ema_fast': 6, 'ema_slow': 44}],
['raJpJ;g', 29, 136, 3119.27, 25, 20, -36.7, {'qty_to_risk': 8, 'target_pnl': 296, 'stop': 87, 'donchlen': 183, 'treshold': 54, 'ema_fast': 6, 'ema_slow': 44}],
['vaJpJ:g', 29, 136, 3119.27, 25, 20, -36.7, {'qty_to_risk': 8, 'target_pnl': 296, 'stop': 87, 'donchlen': 183, 'treshold': 54, 'ema_fast': 6, 'ema_slow': 44}],
['saJpJ;g', 29, 136, 3119.27, 25, 20, -36.7, {'qty_to_risk': 8, 'target_pnl': 296, 'stop': 87, 'donchlen': 183, 'treshold': 54, 'ema_fast': 6, 'ema_slow': 44}],
['vaJoJ;g', 29, 136, 3119.27, 25, 20, -36.7, {'qty_to_risk': 8, 'target_pnl': 296, 'stop': 87, 'donchlen': 181, 'treshold': 54, 'ema_fast': 6, 'ema_slow': 44}],
['v`JpJ;g', 29, 136, 2996.35, 25, 20, -36.7, {'qty_to_risk': 8, 'target_pnl': 291, 'stop': 87, 'donchlen': 183, 'treshold': 54, 'ema_fast': 6, 'ema_slow': 44}],
['v^JpJ;g', 29, 136, 2967.74, 25, 20, -36.7, {'qty_to_risk': 8, 'target_pnl': 281, 'stop': 87, 'donchlen': 183, 'treshold': 54, 'ema_fast': 6, 'ema_slow': 44}],
['vaLpJ;g', 29, 135, 3887.32, 20, 20, -37.06, {'qty_to_risk': 8, 'target_pnl': 296, 'stop': 92, 'donchlen': 183, 'treshold': 54, 'ema_fast': 6, 'ema_slow': 44}],
['vaMpJ;g', 29, 135, 3762.28, 20, 20, -37.29, {'qty_to_risk': 8, 'target_pnl': 296, 'stop': 94, 'donchlen': 183, 'treshold': 54, 'ema_fast': 6, 'ema_slow': 44}],
['sXvp5.[', 23, 195, 2114.7, 34, 23, 59.17, {'qty_to_risk': 8, 'target_pnl': 253, 'stop': 188, 'donchlen': 183, 'treshold': 33, 'ema_fast': 3, 'ema_slow': 40}],
['v^op5/`', 27, 159, 2668.1, 36, 19, 40.68, {'qty_to_risk': 8, 'target_pnl': 281, 'stop': 172, 'donchlen': 183, 'treshold': 33, 'ema_fast': 4, 'ema_slow': 42}],
['v^Jpi/Y', 25, 182, 1949.23, 36, 25, 40.65, {'qty_to_risk': 8, 'target_pnl': 281, 'stop': 87, 'donchlen': 183, 'treshold': 86, 'ema_fast': 4, 'ema_slow': 39}],
['Uqeo@3b', 28, 151, 974.31, 33, 21, 21.58, {'qty_to_risk': 5, 'target_pnl': 372, 'stop': 149, 'donchlen': 181, 'treshold': 44, 'ema_fast': 5, 'ema_slow': 42}],
['v93po1c', 30, 186, 1084.16, 40, 25, -4.7, {'qty_to_risk': 8, 'target_pnl': 106, 'stop': 35, 'donchlen': 183, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 43}],
['vZ3p51l', 29, 154, 1344.09, 28, 21, -5.65, {'qty_to_risk': 8, 'target_pnl': 262, 'stop': 35, 'donchlen': 183, 'treshold': 33, 'ema_fast': 4, 'ema_slow': 46}],
['pC3vo1]', 29, 186, 1019.18, 35, 28, -9.67, {'qty_to_risk': 7, 'target_pnl': 153, 'stop': 35, 'donchlen': 198, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 40}],
['vJ3so1l', 30, 181, 1425.72, 31, 29, -19.3, {'qty_to_risk': 8, 'target_pnl': 186, 'stop': 35, 'donchlen': 190, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 46}],
['vJ3sp1l', 30, 181, 1425.72, 31, 29, -19.3, {'qty_to_risk': 8, 'target_pnl': 186, 'stop': 35, 'donchlen': 190, 'treshold': 93, 'ema_fast': 4, 'ema_slow': 46}],
['vI3so1l', 30, 181, 1307.83, 31, 29, -19.3, {'qty_to_risk': 8, 'target_pnl': 182, 'stop': 35, 'donchlen': 190, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 46}],
['v^JpF/g', 26, 165, 2391.66, 39, 23, 46.35, {'qty_to_risk': 8, 'target_pnl': 281, 'stop': 87, 'donchlen': 183, 'treshold': 50, 'ema_fast': 4, 'ema_slow': 44}],
['vS3?o1l', 31, 198, 964.5, 48, 35, 45.11, {'qty_to_risk': 8, 'target_pnl': 229, 'stop': 35, 'donchlen': 64, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 46}],
['sdfs61l', 26, 154, 2076.54, 25, 20, -4.93, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 151, 'donchlen': 190, 'treshold': 34, 'ema_fast': 4, 'ema_slow': 46}],
['vd3?ohl', 40, 142, 1108.06, 39, 28, -24.58, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 35, 'donchlen': 64, 'treshold': 92, 'ema_fast': 17, 'ema_slow': 46}],
['vaJp=;g', 29, 131, 3645.68, 27, 18, -29.96, {'qty_to_risk': 8, 'target_pnl': 296, 'stop': 87, 'donchlen': 183, 'treshold': 41, 'ema_fast': 6, 'ema_slow': 44}],
['vaJpJ;i', 29, 139, 2871.34, 21, 19, -38.75, {'qty_to_risk': 8, 'target_pnl': 296, 'stop': 87, 'donchlen': 183, 'treshold': 54, 'ema_fast': 6, 'ema_slow': 45}],
['vVJ/2._', 23, 188, 3181.8, 38, 26, 84.61, {'qty_to_risk': 8, 'target_pnl': 243, 'stop': 87, 'donchlen': 25, 'treshold': 30, 'ema_fast': 3, 'ema_slow': 41}],
['vXJp/._', 22, 171, 2206.25, 33, 21, 82.07, {'qty_to_risk': 8, 'target_pnl': 253, 'stop': 87, 'donchlen': 183, 'treshold': 27, 'ema_fast': 3, 'ema_slow': 41}],
['vVjp2._', 22, 181, 2617.23, 34, 23, 53.88, {'qty_to_risk': 8, 'target_pnl': 243, 'stop': 160, 'donchlen': 183, 'treshold': 30, 'ema_fast': 3, 'ema_slow': 41}],
['vVJo2._', 23, 181, 2570.32, 34, 23, 53.85, {'qty_to_risk': 8, 'target_pnl': 243, 'stop': 87, 'donchlen': 181, 'treshold': 30, 'ema_fast': 3, 'ema_slow': 41}],
['vVJp2.^', 23, 181, 2570.32, 34, 23, 53.85, {'qty_to_risk': 8, 'target_pnl': 243, 'stop': 87, 'donchlen': 183, 'treshold': 30, 'ema_fast': 3, 'ema_slow': 41}],
['vVJp2._', 23, 181, 2570.32, 34, 23, 53.85, {'qty_to_risk': 8, 'target_pnl': 243, 'stop': 87, 'donchlen': 183, 'treshold': 30, 'ema_fast': 3, 'ema_slow': 41}],
['vVRp2._', 22, 181, 2551.84, 34, 23, 53.88, {'qty_to_risk': 8, 'target_pnl': 243, 'stop': 106, 'donchlen': 183, 'treshold': 30, 'ema_fast': 3, 'ema_slow': 41}],
['vVTp2._', 22, 181, 2512.34, 34, 23, 53.88, {'qty_to_risk': 8, 'target_pnl': 243, 'stop': 110, 'donchlen': 183, 'treshold': 30, 'ema_fast': 3, 'ema_slow': 41}],
['vVXp2._', 22, 181, 2413.63, 34, 23, 53.88, {'qty_to_risk': 8, 'target_pnl': 243, 'stop': 119, 'donchlen': 183, 'treshold': 30, 'ema_fast': 3, 'ema_slow': 41}],
['vVNp2._', 22, 181, 2538.94, 34, 23, 52.46, {'qty_to_risk': 8, 'target_pnl': 243, 'stop': 97, 'donchlen': 183, 'treshold': 30, 'ema_fast': 3, 'ema_slow': 41}],
['vVJ)2._', 23, 195, 1481.09, 35, 28, 48.64, {'qty_to_risk': 8, 'target_pnl': 243, 'stop': 87, 'donchlen': 10, 'treshold': 30, 'ema_fast': 3, 'ema_slow': 41}],
['lVJp2._', 23, 181, 1855.41, 34, 23, 47.77, {'qty_to_risk': 7, 'target_pnl': 243, 'stop': 87, 'donchlen': 183, 'treshold': 30, 'ema_fast': 3, 'ema_slow': 41}],
['iVJp2._', 23, 181, 1855.41, 34, 23, 47.77, {'qty_to_risk': 7, 'target_pnl': 243, 'stop': 87, 'donchlen': 183, 'treshold': 30, 'ema_fast': 3, 'ema_slow': 41}],
['nVJp2._', 23, 181, 1855.41, 34, 23, 47.77, {'qty_to_risk': 7, 'target_pnl': 243, 'stop': 87, 'donchlen': 183, 'treshold': 30, 'ema_fast': 3, 'ema_slow': 41}],
['vVJr2._', 23, 181, 2551.79, 30, 23, 44.52, {'qty_to_risk': 8, 'target_pnl': 243, 'stop': 87, 'donchlen': 188, 'treshold': 30, 'ema_fast': 3, 'ema_slow': 41}],
['vW3so1l', 30, 181, 1480.15, 31, 29, -20.9, {'qty_to_risk': 8, 'target_pnl': 248, 'stop': 35, 'donchlen': 190, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 46}],
[']a2A;ov', 38, 121, 437.51, 34, 23, -21.89, {'qty_to_risk': 6, 'target_pnl': 296, 'stop': 33, 'donchlen': 69, 'treshold': 39, 'ema_fast': 18, 'ema_slow': 50}],
['vXJp.._', 22, 170, 2249.7, 33, 21, 82.07, {'qty_to_risk': 8, 'target_pnl': 253, 'stop': 87, 'donchlen': 183, 'treshold': 26, 'ema_fast': 3, 'ema_slow': 41}],
['vVJp.._', 22, 170, 2126.6, 33, 21, 82.07, {'qty_to_risk': 8, 'target_pnl': 243, 'stop': 87, 'donchlen': 183, 'treshold': 26, 'ema_fast': 3, 'ema_slow': 41}],
['vYJn31_', 26, 155, 3183.64, 35, 20, 48.12, {'qty_to_risk': 8, 'target_pnl': 258, 'stop': 87, 'donchlen': 178, 'treshold': 31, 'ema_fast': 4, 'ema_slow': 41}],
['vdfp5(l', 21, 218, 1955.95, 28, 28, 36.7, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 151, 'donchlen': 183, 'treshold': 33, 'ema_fast': 2, 'ema_slow': 46}],
['sdfs5(l', 21, 218, 1934.64, 25, 28, 26.35, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 151, 'donchlen': 190, 'treshold': 33, 'ema_fast': 2, 'ema_slow': 46}],
['vdds=1l', 26, 163, 2273.86, 27, 22, 4.95, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 147, 'donchlen': 190, 'treshold': 41, 'ema_fast': 4, 'ema_slow': 46}],
['vdos>1l', 26, 163, 2131.45, 26, 23, -4.29, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 172, 'donchlen': 190, 'treshold': 42, 'ema_fast': 4, 'ema_slow': 46}],
['vddsf1l', 26, 172, 1717.54, 24, 25, -13.01, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 147, 'donchlen': 190, 'treshold': 83, 'ema_fast': 4, 'ema_slow': 46}],
['vV3sp1l', 30, 181, 1480.15, 31, 29, -20.9, {'qty_to_risk': 8, 'target_pnl': 243, 'stop': 35, 'donchlen': 190, 'treshold': 93, 'ema_fast': 4, 'ema_slow': 46}],
['voJpJ;g', 29, 136, 2553.58, 25, 20, -36.7, {'qty_to_risk': 8, 'target_pnl': 362, 'stop': 87, 'donchlen': 183, 'treshold': 54, 'ema_fast': 6, 'ema_slow': 44}],
['vZJp2._', 23, 181, 2760.32, 34, 23, 56.94, {'qty_to_risk': 8, 'target_pnl': 262, 'stop': 87, 'donchlen': 183, 'treshold': 30, 'ema_fast': 3, 'ema_slow': 41}],
['vVJp2.Q', 24, 200, 2138.78, 32, 25, 55.45, {'qty_to_risk': 8, 'target_pnl': 243, 'stop': 87, 'donchlen': 183, 'treshold': 30, 'ema_fast': 3, 'ema_slow': 36}],
['vqop5.W', 23, 196, 2827.28, 33, 24, 50.59, {'qty_to_risk': 8, 'target_pnl': 372, 'stop': 172, 'donchlen': 183, 'treshold': 33, 'ema_fast': 3, 'ema_slow': 38}],
['mQS_l.S', 21, 211, 279.1, 31, 29, 41.06, {'qty_to_risk': 7, 'target_pnl': 220, 'stop': 108, 'donchlen': 142, 'treshold': 89, 'ema_fast': 3, 'ema_slow': 37}],
['*dLU8YD', 34, 93, 108.48, 30, 13, -2.74, {'qty_to_risk': 2, 'target_pnl': 310, 'stop': 92, 'donchlen': 117, 'treshold': 36, 'ema_fast': 13, 'ema_slow': 31}],
['kd3E.H]', 36, 116, 1430.81, 40, 15, -3.23, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 35, 'donchlen': 78, 'treshold': 26, 'ema_fast': 9, 'ema_slow': 40}],
['vqom5.`', 23, 189, 2124.49, 33, 24, 47.02, {'qty_to_risk': 8, 'target_pnl': 372, 'stop': 172, 'donchlen': 176, 'treshold': 33, 'ema_fast': 3, 'ema_slow': 42}],
['vqop5.a', 22, 189, 2115.73, 33, 24, 47.02, {'qty_to_risk': 8, 'target_pnl': 372, 'stop': 172, 'donchlen': 183, 'treshold': 33, 'ema_fast': 3, 'ema_slow': 42}],
['vqsp5.`', 22, 189, 2063.58, 33, 24, 47.02, {'qty_to_risk': 8, 'target_pnl': 372, 'stop': 181, 'donchlen': 183, 'treshold': 33, 'ema_fast': 3, 'ema_slow': 42}],
['vaJpJ1g', 26, 167, 2729.22, 37, 24, 45.25, {'qty_to_risk': 8, 'target_pnl': 296, 'stop': 87, 'donchlen': 183, 'treshold': 54, 'ema_fast': 4, 'ema_slow': 44}],
['vaJtJ/g', 27, 168, 2861.0, 26, 23, 2.49, {'qty_to_risk': 8, 'target_pnl': 296, 'stop': 87, 'donchlen': 193, 'treshold': 54, 'ema_fast': 4, 'ema_slow': 44}],
['p<3vo1]', 29, 186, 1049.6, 35, 28, -14.03, {'qty_to_risk': 7, 'target_pnl': 120, 'stop': 35, 'donchlen': 198, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 40}],
['vH3so1l', 30, 181, 1247.74, 31, 29, -19.3, {'qty_to_risk': 8, 'target_pnl': 177, 'stop': 35, 'donchlen': 190, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 46}],
['vVJp2.Y', 23, 189, 2010.14, 34, 23, 61.5, {'qty_to_risk': 8, 'target_pnl': 243, 'stop': 87, 'donchlen': 183, 'treshold': 30, 'ema_fast': 3, 'ema_slow': 39}],
['vdfp5.d', 21, 189, 1846.81, 36, 22, 53.69, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 151, 'donchlen': 183, 'treshold': 33, 'ema_fast': 3, 'ema_slow': 43}],
['vdfp5.e', 21, 189, 1846.81, 36, 22, 53.69, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 151, 'donchlen': 183, 'treshold': 33, 'ema_fast': 3, 'ema_slow': 43}],
['vYJpF._', 22, 199, 1787.28, 34, 26, 52.34, {'qty_to_risk': 8, 'target_pnl': 258, 'stop': 87, 'donchlen': 183, 'treshold': 50, 'ema_fast': 3, 'ema_slow': 41}],
['vN3Bo,a', 29, 223, 1144.73, 39, 33, 37.29, {'qty_to_risk': 8, 'target_pnl': 205, 'stop': 35, 'donchlen': 71, 'treshold': 92, 'ema_fast': 3, 'ema_slow': 42}],
['pN3oo,j', 25, 212, 761.07, 37, 29, 18.24, {'qty_to_risk': 7, 'target_pnl': 205, 'stop': 35, 'donchlen': 181, 'treshold': 92, 'ema_fast': 3, 'ema_slow': 45}],
['vN3wo,a', 25, 216, 1027.22, 34, 32, 13.34, {'qty_to_risk': 8, 'target_pnl': 205, 'stop': 35, 'donchlen': 200, 'treshold': 92, 'ema_fast': 3, 'ema_slow': 42}],
['vN3vo,a', 25, 216, 1027.22, 34, 32, 13.34, {'qty_to_risk': 8, 'target_pnl': 205, 'stop': 35, 'donchlen': 198, 'treshold': 92, 'ema_fast': 3, 'ema_slow': 42}],
['vN3to,a', 25, 216, 1027.11, 34, 32, 9.39, {'qty_to_risk': 8, 'target_pnl': 205, 'stop': 35, 'donchlen': 193, 'treshold': 92, 'ema_fast': 3, 'ema_slow': 42}],
['vd3EoHS', 38, 162, 1270.34, 43, 23, -11.34, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 35, 'donchlen': 78, 'treshold': 92, 'ema_fast': 9, 'ema_slow': 37}],
['vYon51Q', 26, 168, 3395.1, 36, 19, 51.71, {'qty_to_risk': 8, 'target_pnl': 258, 'stop': 172, 'donchlen': 178, 'treshold': 33, 'ema_fast': 4, 'ema_slow': 36}],
['vkop5.U', 23, 196, 2358.35, 33, 24, 50.59, {'qty_to_risk': 8, 'target_pnl': 343, 'stop': 172, 'donchlen': 183, 'treshold': 33, 'ema_fast': 3, 'ema_slow': 38}],
['vXopF._', 22, 199, 1777.92, 34, 26, 45.68, {'qty_to_risk': 8, 'target_pnl': 253, 'stop': 172, 'donchlen': 183, 'treshold': 50, 'ema_fast': 3, 'ema_slow': 41}],
['vXqpF._', 22, 199, 1777.92, 34, 26, 45.68, {'qty_to_risk': 8, 'target_pnl': 253, 'stop': 176, 'donchlen': 183, 'treshold': 50, 'ema_fast': 3, 'ema_slow': 41}],
['v^Jpn/Y', 25, 185, 2127.4, 36, 25, 40.65, {'qty_to_risk': 8, 'target_pnl': 281, 'stop': 87, 'donchlen': 183, 'treshold': 91, 'ema_fast': 4, 'ema_slow': 39}],
['v^Jpv/Y', 25, 185, 2045.77, 36, 25, 40.65, {'qty_to_risk': 8, 'target_pnl': 281, 'stop': 87, 'donchlen': 183, 'treshold': 99, 'ema_fast': 4, 'ema_slow': 39}],
['vVBp2._', 23, 181, 1828.71, 34, 23, 33.55, {'qty_to_risk': 8, 'target_pnl': 243, 'stop': 69, 'donchlen': 183, 'treshold': 30, 'ema_fast': 3, 'ema_slow': 41}],
['vXJ@F._', 23, 186, 2427.9, 29, 27, 26.48, {'qty_to_risk': 8, 'target_pnl': 253, 'stop': 87, 'donchlen': 66, 'treshold': 50, 'ema_fast': 3, 'ema_slow': 41}],
['vdJpoL:', 29, 145, 1384.92, 33, 18, 13.71, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 87, 'donchlen': 183, 'treshold': 92, 'ema_fast': 10, 'ema_slow': 28}],
['vdOpoL;', 30, 145, 1627.22, 27, 18, 11.81, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 99, 'donchlen': 183, 'treshold': 92, 'ema_fast': 10, 'ema_slow': 28}],
['vP3no1a', 30, 186, 1122.07, 40, 25, 3.26, {'qty_to_risk': 8, 'target_pnl': 215, 'stop': 35, 'donchlen': 178, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 42}],
['iJovj:)', 24, 239, 484.69, 28, 38, -10.21, {'qty_to_risk': 7, 'target_pnl': 186, 'stop': 172, 'donchlen': 198, 'treshold': 87, 'ema_fast': 6, 'ema_slow': 21}],
['pd3voA]', 34, 143, 1203.03, 29, 17, -24.03, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 35, 'donchlen': 198, 'treshold': 92, 'ema_fast': 8, 'ema_slow': 40}],
['vVJpF/Y', 25, 174, 2063.8, 37, 24, 52.8, {'qty_to_risk': 8, 'target_pnl': 243, 'stop': 87, 'donchlen': 183, 'treshold': 50, 'ema_fast': 4, 'ema_slow': 39}],
['vXMpB._', 22, 198, 1840.3, 34, 26, 47.73, {'qty_to_risk': 8, 'target_pnl': 253, 'stop': 94, 'donchlen': 183, 'treshold': 46, 'ema_fast': 3, 'ema_slow': 41}],
['vXbpB._', 22, 198, 1925.97, 34, 26, 45.68, {'qty_to_risk': 8, 'target_pnl': 253, 'stop': 142, 'donchlen': 183, 'treshold': 46, 'ema_fast': 3, 'ema_slow': 41}],
['vXjpB._', 22, 198, 1925.97, 34, 26, 45.68, {'qty_to_risk': 8, 'target_pnl': 253, 'stop': 160, 'donchlen': 183, 'treshold': 46, 'ema_fast': 3, 'ema_slow': 41}],
['vXtpB._', 22, 198, 1925.97, 34, 26, 45.68, {'qty_to_risk': 8, 'target_pnl': 253, 'stop': 183, 'donchlen': 183, 'treshold': 46, 'ema_fast': 3, 'ema_slow': 41}],
['sdvp5.l', 21, 183, 1559.26, 33, 24, 40.08, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 188, 'donchlen': 183, 'treshold': 33, 'ema_fast': 3, 'ema_slow': 46}],
['sfvp5.l', 21, 183, 1766.85, 33, 24, 40.08, {'qty_to_risk': 8, 'target_pnl': 319, 'stop': 188, 'donchlen': 183, 'treshold': 33, 'ema_fast': 3, 'ema_slow': 46}],
['v^JpF/]', 26, 169, 2680.74, 37, 24, 39.51, {'qty_to_risk': 8, 'target_pnl': 281, 'stop': 87, 'donchlen': 183, 'treshold': 50, 'ema_fast': 4, 'ema_slow': 40}],
['sdMp5.l', 22, 183, 1607.27, 33, 24, 39.23, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 94, 'donchlen': 183, 'treshold': 33, 'ema_fast': 3, 'ema_slow': 46}],
['sdNp5.l', 22, 183, 1523.95, 33, 24, 39.19, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 97, 'donchlen': 183, 'treshold': 33, 'ema_fast': 3, 'ema_slow': 46}],
['vdft5.l', 22, 184, 1766.37, 29, 24, 32.67, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 151, 'donchlen': 193, 'treshold': 33, 'ema_fast': 3, 'ema_slow': 46}],
['sdfs5-l', 21, 183, 1455.82, 29, 24, 32.67, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 151, 'donchlen': 190, 'treshold': 33, 'ema_fast': 3, 'ema_slow': 46}],
['vaJpJ;N', 30, 150, 2617.14, 39, 23, 29.55, {'qty_to_risk': 8, 'target_pnl': 296, 'stop': 87, 'donchlen': 183, 'treshold': 54, 'ema_fast': 6, 'ema_slow': 35}],
['9]JIo0S', 24, 174, 222.6, 41, 24, 27.33, {'qty_to_risk': 3, 'target_pnl': 277, 'stop': 87, 'donchlen': 88, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 37}],
['vVJ@F._', 23, 186, 2305.19, 29, 27, 26.48, {'qty_to_risk': 8, 'target_pnl': 243, 'stop': 87, 'donchlen': 66, 'treshold': 50, 'ema_fast': 3, 'ema_slow': 41}],
['v[JtJ.g', 21, 210, 1659.0, 24, 25, 16.36, {'qty_to_risk': 8, 'target_pnl': 267, 'stop': 87, 'donchlen': 193, 'treshold': 54, 'ema_fast': 3, 'ema_slow': 44}],
['vP3Ep)l', 26, 249, 565.29, 35, 34, 12.78, {'qty_to_risk': 8, 'target_pnl': 215, 'stop': 35, 'donchlen': 78, 'treshold': 93, 'ema_fast': 2, 'ema_slow': 46}],
['kdOqo1l', 27, 175, 1345.74, 30, 26, 1.32, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 99, 'donchlen': 185, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 46}],
['kdVqo1l', 27, 174, 1263.47, 30, 26, 1.4, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 115, 'donchlen': 185, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 46}],
['vdOpo/l', 27, 175, 1780.29, 30, 26, -0.92, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 99, 'donchlen': 183, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 46}],
['vdOpo1l', 27, 175, 1780.29, 30, 26, -0.92, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 99, 'donchlen': 183, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 46}],
['kdaqo1l', 26, 172, 1151.76, 30, 26, -2.06, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 140, 'donchlen': 185, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 46}],
['-c1W<Zf', 37, 109, 65.94, 53, 15, -2.54, {'qty_to_risk': 2, 'target_pnl': 305, 'stop': 31, 'donchlen': 122, 'treshold': 40, 'ema_fast': 13, 'ema_slow': 44}],
['vdRso1l', 27, 174, 1874.45, 24, 25, -13.01, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 106, 'donchlen': 190, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 46}],
['vdRsp1l', 27, 174, 1874.45, 24, 25, -13.01, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 106, 'donchlen': 190, 'treshold': 93, 'ema_fast': 4, 'ema_slow': 46}],
['vdYso1l', 27, 174, 1837.85, 24, 25, -13.01, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 122, 'donchlen': 190, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 46}],
['vdZso1l', 27, 174, 1817.62, 24, 25, -13.01, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 124, 'donchlen': 190, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 46}],
['vdZsp1l', 27, 174, 1817.62, 24, 25, -13.01, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 124, 'donchlen': 190, 'treshold': 93, 'ema_fast': 4, 'ema_slow': 46}],
['vd[so1l', 27, 174, 1781.49, 24, 25, -13.01, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 126, 'donchlen': 190, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 46}],
['vdjso1l', 26, 173, 1554.68, 24, 25, -13.01, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 160, 'donchlen': 190, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 46}],
['vdoso1l', 26, 173, 1501.98, 24, 25, -13.01, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 172, 'donchlen': 190, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 46}],
['vdLso1l', 27, 175, 2221.88, 24, 25, -14.45, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 92, 'donchlen': 190, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 46}],
['vN3BO,f', 28, 217, 1223.92, 43, 30, 57.71, {'qty_to_risk': 8, 'target_pnl': 205, 'stop': 35, 'donchlen': 71, 'treshold': 59, 'ema_fast': 3, 'ema_slow': 44}],
['vkop5._', 22, 189, 2763.54, 34, 23, 44.1, {'qty_to_risk': 8, 'target_pnl': 343, 'stop': 172, 'donchlen': 183, 'treshold': 33, 'ema_fast': 3, 'ema_slow': 41}],
['sXvp5.N', 21, 210, 1468.64, 29, 27, 40.57, {'qty_to_risk': 8, 'target_pnl': 253, 'stop': 188, 'donchlen': 183, 'treshold': 33, 'ema_fast': 3, 'ema_slow': 35}],
['sXvp5.O', 21, 210, 1468.64, 29, 27, 40.57, {'qty_to_risk': 8, 'target_pnl': 253, 'stop': 188, 'donchlen': 183, 'treshold': 33, 'ema_fast': 3, 'ema_slow': 35}],
['kX9?o1H', 29, 217, 1459.94, 39, 28, 12.95, {'qty_to_risk': 7, 'target_pnl': 253, 'stop': 49, 'donchlen': 64, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 33}],
['v=3so1_', 30, 185, 1086.95, 35, 28, -17.14, {'qty_to_risk': 8, 'target_pnl': 125, 'stop': 35, 'donchlen': 190, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 41}],
['v[JtJ/r', 28, 160, 2567.97, 20, 24, -44.08, {'qty_to_risk': 8, 'target_pnl': 267, 'stop': 87, 'donchlen': 193, 'treshold': 54, 'ema_fast': 4, 'ema_slow': 48}],
['vC9pY1F', 25, 216, 416.4, 39, 28, 60.67, {'qty_to_risk': 8, 'target_pnl': 153, 'stop': 49, 'donchlen': 183, 'treshold': 70, 'ema_fast': 4, 'ema_slow': 32}],
['vVJp\\._', 22, 207, 1435.09, 32, 28, 35.84, {'qty_to_risk': 8, 'target_pnl': 243, 'stop': 87, 'donchlen': 183, 'treshold': 73, 'ema_fast': 3, 'ema_slow': 41}],
['vVJpZ._', 22, 205, 1297.72, 32, 28, 35.84, {'qty_to_risk': 8, 'target_pnl': 243, 'stop': 87, 'donchlen': 183, 'treshold': 71, 'ema_fast': 3, 'ema_slow': 41}],
['k89?o1H', 29, 217, 601.17, 39, 28, 9.23, {'qty_to_risk': 7, 'target_pnl': 101, 'stop': 49, 'donchlen': 64, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 33}],
['vdds?1l', 26, 164, 2211.02, 26, 23, -4.29, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 147, 'donchlen': 190, 'treshold': 43, 'ema_fast': 4, 'ema_slow': 46}],
['vdos?1l', 26, 164, 2070.71, 26, 23, -4.29, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 172, 'donchlen': 190, 'treshold': 43, 'ema_fast': 4, 'ema_slow': 46}],
['vdos;1l', 27, 160, 2381.13, 23, 21, -7.6, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 172, 'donchlen': 190, 'treshold': 39, 'ema_fast': 4, 'ema_slow': 46}],
['vVJpI._', 22, 200, 1459.43, 34, 26, 50.53, {'qty_to_risk': 8, 'target_pnl': 243, 'stop': 87, 'donchlen': 183, 'treshold': 53, 'ema_fast': 3, 'ema_slow': 41}],
['vqon51`', 26, 159, 2576.59, 36, 19, 40.68, {'qty_to_risk': 8, 'target_pnl': 372, 'stop': 172, 'donchlen': 178, 'treshold': 33, 'ema_fast': 4, 'ema_slow': 42}],
['vjop5/`', 27, 159, 2329.85, 36, 19, 40.68, {'qty_to_risk': 8, 'target_pnl': 338, 'stop': 172, 'donchlen': 183, 'treshold': 33, 'ema_fast': 4, 'ema_slow': 42}],
['Rm^VO9P', 26, 138, 257.04, 45, 22, 33.24, {'qty_to_risk': 5, 'target_pnl': 353, 'stop': 133, 'donchlen': 120, 'treshold': 59, 'ema_fast': 6, 'ema_slow': 36}],
['bXfqL=C', 29, 156, 993.95, 42, 21, 26.66, {'qty_to_risk': 6, 'target_pnl': 253, 'stop': 151, 'donchlen': 185, 'treshold': 56, 'ema_fast': 7, 'ema_slow': 31}],
['stfs51l', 26, 153, 2659.87, 25, 20, 2.97, {'qty_to_risk': 8, 'target_pnl': 386, 'stop': 151, 'donchlen': 190, 'treshold': 33, 'ema_fast': 4, 'ema_slow': 46}],
['vdbp5/`', 27, 159, 2445.12, 36, 19, 40.68, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 142, 'donchlen': 183, 'treshold': 33, 'ema_fast': 4, 'ema_slow': 42}],
['vdkp5/`', 27, 159, 2311.66, 36, 19, 40.68, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 163, 'donchlen': 183, 'treshold': 33, 'ema_fast': 4, 'ema_slow': 42}],
['vdIp5/`', 27, 159, 2157.96, 36, 19, 40.54, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 85, 'donchlen': 183, 'treshold': 33, 'ema_fast': 4, 'ema_slow': 42}],
['vdMp5/`', 27, 159, 2626.21, 36, 19, 39.15, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 94, 'donchlen': 183, 'treshold': 33, 'ema_fast': 4, 'ema_slow': 42}],
['vdNp5/`', 27, 159, 2497.15, 36, 19, 38.34, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 97, 'donchlen': 183, 'treshold': 33, 'ema_fast': 4, 'ema_slow': 42}],
['vXJpw._', 22, 211, 1704.65, 32, 28, 35.84, {'qty_to_risk': 8, 'target_pnl': 253, 'stop': 87, 'donchlen': 183, 'treshold': 100, 'ema_fast': 3, 'ema_slow': 41}],
['vXJpo._', 22, 211, 1704.65, 32, 28, 35.84, {'qty_to_risk': 8, 'target_pnl': 253, 'stop': 87, 'donchlen': 183, 'treshold': 92, 'ema_fast': 3, 'ema_slow': 41}],
['vVJpq._', 22, 211, 1632.94, 32, 28, 35.84, {'qty_to_risk': 8, 'target_pnl': 243, 'stop': 87, 'donchlen': 183, 'treshold': 94, 'ema_fast': 3, 'ema_slow': 41}],
['vXPpo._', 22, 211, 1617.93, 32, 28, 33.2, {'qty_to_risk': 8, 'target_pnl': 253, 'stop': 101, 'donchlen': 183, 'treshold': 92, 'ema_fast': 3, 'ema_slow': 41}],
['eXJpo._', 22, 211, 1313.2, 32, 28, 31.39, {'qty_to_risk': 7, 'target_pnl': 253, 'stop': 87, 'donchlen': 183, 'treshold': 92, 'ema_fast': 3, 'ema_slow': 41}],
['vdds51b', 27, 159, 2391.88, 31, 19, 30.35, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 147, 'donchlen': 190, 'treshold': 33, 'ema_fast': 4, 'ema_slow': 42}],
['vXUpo._', 22, 210, 1714.35, 32, 28, 30.42, {'qty_to_risk': 8, 'target_pnl': 253, 'stop': 113, 'donchlen': 183, 'treshold': 92, 'ema_fast': 3, 'ema_slow': 41}],
['pdPvo1]', 27, 180, 1394.93, 28, 25, 7.47, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 101, 'donchlen': 198, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 40}],
['pdfvo1]', 26, 178, 1301.86, 28, 25, 6.92, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 151, 'donchlen': 198, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 40}],
['pdtvo1]', 26, 178, 1202.36, 28, 25, 6.92, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 183, 'donchlen': 198, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 40}],
['vXZro._', 22, 209, 1717.65, 25, 28, 5.04, {'qty_to_risk': 8, 'target_pnl': 253, 'stop': 124, 'donchlen': 188, 'treshold': 92, 'ema_fast': 3, 'ema_slow': 41}],
['vdop5/l', 26, 153, 2185.44, 30, 20, 2.66, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 172, 'donchlen': 183, 'treshold': 33, 'ema_fast': 4, 'ema_slow': 46}],
['srfs51l', 26, 153, 2659.87, 25, 20, 2.97, {'qty_to_risk': 8, 'target_pnl': 376, 'stop': 151, 'donchlen': 190, 'treshold': 33, 'ema_fast': 4, 'ema_slow': 46}],
['sqfs51l', 26, 153, 2562.68, 25, 20, 2.97, {'qty_to_risk': 8, 'target_pnl': 372, 'stop': 151, 'donchlen': 190, 'treshold': 33, 'ema_fast': 4, 'ema_slow': 46}],
['sdfm51l', 26, 153, 2349.09, 30, 20, 2.66, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 151, 'donchlen': 176, 'treshold': 33, 'ema_fast': 4, 'ema_slow': 46}],
['vddq51l', 26, 153, 2318.55, 30, 20, 2.66, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 147, 'donchlen': 185, 'treshold': 33, 'ema_fast': 4, 'ema_slow': 46}],
['sdfq51l', 26, 153, 2295.84, 30, 20, 2.66, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 151, 'donchlen': 185, 'treshold': 33, 'ema_fast': 4, 'ema_slow': 46}],
['vpds51l', 26, 153, 2587.75, 25, 20, -0.02, {'qty_to_risk': 8, 'target_pnl': 367, 'stop': 147, 'donchlen': 190, 'treshold': 33, 'ema_fast': 4, 'ema_slow': 46}],
['kdYso1]', 26, 178, 1746.41, 25, 24, -0.35, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 122, 'donchlen': 190, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 40}],
['kdWso1]', 27, 178, 1743.61, 25, 24, -0.35, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 117, 'donchlen': 190, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 40}],
['kdXso1]', 26, 178, 1713.65, 25, 24, -0.35, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 119, 'donchlen': 190, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 40}],
['kd]so1]', 26, 178, 1683.84, 25, 24, -0.35, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 131, 'donchlen': 190, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 40}],
['kd^so1]', 26, 178, 1655.87, 25, 24, -0.35, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 133, 'donchlen': 190, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 40}],
['kdaso1]', 26, 177, 1576.22, 25, 24, -0.35, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 140, 'donchlen': 190, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 40}],
['kdoso1]', 26, 177, 1466.59, 25, 24, -0.35, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 172, 'donchlen': 190, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 40}],
['kd_so1]', 26, 177, 1416.56, 25, 24, -0.35, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 135, 'donchlen': 190, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 40}],
['smfs51l', 26, 153, 2416.95, 25, 20, -2.0, {'qty_to_risk': 8, 'target_pnl': 353, 'stop': 151, 'donchlen': 190, 'treshold': 33, 'ema_fast': 4, 'ema_slow': 46}],
['vXZto._', 23, 211, 2219.58, 22, 27, -3.27, {'qty_to_risk': 8, 'target_pnl': 253, 'stop': 124, 'donchlen': 193, 'treshold': 92, 'ema_fast': 3, 'ema_slow': 41}],
['vdds51l', 26, 153, 2318.67, 25, 20, -4.93, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 147, 'donchlen': 190, 'treshold': 33, 'ema_fast': 4, 'ema_slow': 46}],
['sdds51l', 26, 153, 2318.67, 25, 20, -4.93, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 147, 'donchlen': 190, 'treshold': 33, 'ema_fast': 4, 'ema_slow': 46}],
['vdds51k', 26, 153, 2318.67, 25, 20, -4.93, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 147, 'donchlen': 190, 'treshold': 33, 'ema_fast': 4, 'ema_slow': 46}],
['qdds51l', 26, 153, 2318.67, 25, 20, -4.93, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 147, 'donchlen': 190, 'treshold': 33, 'ema_fast': 4, 'ema_slow': 46}],
['vdds51m', 26, 153, 2318.67, 25, 20, -4.93, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 147, 'donchlen': 190, 'treshold': 33, 'ema_fast': 4, 'ema_slow': 46}],
['sefs51l', 26, 153, 2298.26, 25, 20, -4.93, {'qty_to_risk': 8, 'target_pnl': 315, 'stop': 151, 'donchlen': 190, 'treshold': 33, 'ema_fast': 4, 'ema_slow': 46}],
['sdUs51l', 27, 153, 2289.59, 25, 20, -4.93, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 113, 'donchlen': 190, 'treshold': 33, 'ema_fast': 4, 'ema_slow': 46}],
['vdVs51l', 26, 153, 2266.23, 25, 20, -4.93, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 115, 'donchlen': 190, 'treshold': 33, 'ema_fast': 4, 'ema_slow': 46}],
['sdis51l', 26, 153, 2255.92, 25, 20, -4.93, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 158, 'donchlen': 190, 'treshold': 33, 'ema_fast': 4, 'ema_slow': 46}],
['vdXs51l', 26, 153, 2185.08, 25, 20, -4.93, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 119, 'donchlen': 190, 'treshold': 33, 'ema_fast': 4, 'ema_slow': 46}],
['wdos51l', 26, 153, 2167.56, 25, 20, -4.93, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 172, 'donchlen': 190, 'treshold': 33, 'ema_fast': 4, 'ema_slow': 46}],
['vdos51l', 26, 153, 2167.56, 25, 20, -4.93, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 172, 'donchlen': 190, 'treshold': 33, 'ema_fast': 4, 'ema_slow': 46}],
['vdts51l', 26, 153, 2105.65, 25, 20, -4.93, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 183, 'donchlen': 190, 'treshold': 33, 'ema_fast': 4, 'ema_slow': 46}],
['vd]s51l', 26, 153, 2093.14, 25, 20, -4.93, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 131, 'donchlen': 190, 'treshold': 33, 'ema_fast': 4, 'ema_slow': 46}],
['vdKs51l', 26, 153, 2508.39, 25, 20, -5.96, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 90, 'donchlen': 190, 'treshold': 33, 'ema_fast': 4, 'ema_slow': 46}],
['vdJs51l', 26, 153, 2183.0, 25, 20, -5.65, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 87, 'donchlen': 190, 'treshold': 33, 'ema_fast': 4, 'ema_slow': 46}],
['ad3EpIS', 37, 158, 881.62, 40, 22, -5.42, {'qty_to_risk': 6, 'target_pnl': 310, 'stop': 35, 'donchlen': 78, 'treshold': 93, 'ema_fast': 10, 'ema_slow': 37}],
['ad3EpIT', 37, 158, 881.62, 40, 22, -5.42, {'qty_to_risk': 6, 'target_pnl': 310, 'stop': 35, 'donchlen': 78, 'treshold': 93, 'ema_fast': 10, 'ema_slow': 37}],
['vdOs51l', 27, 153, 2340.46, 25, 20, -6.06, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 99, 'donchlen': 190, 'treshold': 33, 'ema_fast': 4, 'ema_slow': 46}],
['vdosD1l', 26, 165, 1882.63, 25, 24, -9.06, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 172, 'donchlen': 190, 'treshold': 48, 'ema_fast': 4, 'ema_slow': 46}],
['vqop5.v', 23, 171, 2692.26, 26, 23, -23.96, {'qty_to_risk': 8, 'target_pnl': 372, 'stop': 172, 'donchlen': 183, 'treshold': 33, 'ema_fast': 3, 'ema_slow': 50}],
['sdfs51s', 26, 148, 2312.51, 20, 20, -41.08, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 151, 'donchlen': 190, 'treshold': 33, 'ema_fast': 4, 'ema_slow': 49}],
['vdfp5.j', 22, 184, 2022.81, 34, 23, 46.39, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 151, 'donchlen': 183, 'treshold': 33, 'ema_fast': 3, 'ema_slow': 45}],
['v_JpF.b', 23, 198, 2047.16, 33, 27, 44.66, {'qty_to_risk': 8, 'target_pnl': 286, 'stop': 87, 'donchlen': 183, 'treshold': 50, 'ema_fast': 3, 'ema_slow': 42}],
['PbIT=>K', 28, 125, 349.21, 45, 20, 33.26, {'qty_to_risk': 5, 'target_pnl': 300, 'stop': 85, 'donchlen': 115, 'treshold': 41, 'ema_fast': 7, 'ema_slow': 34}],
['pdJvo1]', 26, 181, 1159.85, 28, 25, 10.4, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 87, 'donchlen': 198, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 40}],
['kdJso1]', 26, 180, 1363.27, 25, 24, -0.37, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 87, 'donchlen': 190, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 40}],
['scfs51l', 26, 153, 2264.67, 25, 20, -4.93, {'qty_to_risk': 8, 'target_pnl': 305, 'stop': 151, 'donchlen': 190, 'treshold': 33, 'ema_fast': 4, 'ema_slow': 46}],
['ev7CNRG', 39, 138, 851.01, 28, 25, -13.3, {'qty_to_risk': 7, 'target_pnl': 395, 'stop': 44, 'donchlen': 74, 'treshold': 58, 'ema_fast': 12, 'ema_slow': 32}],
['vdop5,v', 23, 171, 2421.44, 26, 23, -23.96, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 172, 'donchlen': 183, 'treshold': 33, 'ema_fast': 3, 'ema_slow': 50}],
['vdop5)`', 20, 236, 1966.67, 29, 31, 61.73, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 172, 'donchlen': 183, 'treshold': 33, 'ema_fast': 2, 'ema_slow': 42}],
['vdop5*`', 20, 236, 1966.67, 29, 31, 61.73, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 172, 'donchlen': 183, 'treshold': 33, 'ema_fast': 2, 'ema_slow': 42}],
['vdop5,Z', 23, 196, 2397.56, 34, 23, 50.28, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 172, 'donchlen': 183, 'treshold': 33, 'ema_fast': 3, 'ema_slow': 39}],
['pU3Bo,j', 29, 223, 946.14, 44, 34, 39.01, {'qty_to_risk': 7, 'target_pnl': 239, 'stop': 35, 'donchlen': 71, 'treshold': 92, 'ema_fast': 3, 'ema_slow': 45}],
['vYon512', 24, 226, 1933.09, 26, 38, 36.17, {'qty_to_risk': 8, 'target_pnl': 258, 'stop': 172, 'donchlen': 178, 'treshold': 33, 'ema_fast': 4, 'ema_slow': 25}],
['Xdq.9P=', 30, 131, 1246.26, 45, 20, 33.49, {'qty_to_risk': 6, 'target_pnl': 310, 'stop': 176, 'donchlen': 23, 'treshold': 37, 'ema_fast': 11, 'ema_slow': 29}],
['vdosG1l', 26, 165, 1882.63, 25, 24, -9.06, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 172, 'donchlen': 190, 'treshold': 51, 'ema_fast': 4, 'ema_slow': 46}],
['vYon51H', 25, 184, 2482.48, 35, 20, 35.23, {'qty_to_risk': 8, 'target_pnl': 258, 'stop': 172, 'donchlen': 178, 'treshold': 33, 'ema_fast': 4, 'ema_slow': 33}],
['IWe_<2W', 25, 165, 642.38, 38, 21, 34.54, {'qty_to_risk': 5, 'target_pnl': 248, 'stop': 149, 'donchlen': 142, 'treshold': 40, 'ema_fast': 4, 'ema_slow': 38}],
['qSKn-;4', 28, 170, 1951.2, 43, 16, 29.18, {'qty_to_risk': 8, 'target_pnl': 229, 'stop': 90, 'donchlen': 178, 'treshold': 25, 'ema_fast': 6, 'ema_slow': 25}],
['_tT<c6F', 25, 182, 595.23, 37, 24, -0.17, {'qty_to_risk': 6, 'target_pnl': 386, 'stop': 110, 'donchlen': 57, 'treshold': 80, 'ema_fast': 5, 'ema_slow': 32}],
['vdOso1_', 26, 179, 1987.14, 25, 24, -1.91, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 99, 'donchlen': 190, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 41}],
['vdfso1_', 25, 177, 1736.36, 25, 24, -1.91, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 151, 'donchlen': 190, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 41}],
['vdIso1_', 26, 180, 1434.22, 25, 24, -2.18, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 85, 'donchlen': 190, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 41}],
['pF3vo1]', 29, 186, 1176.34, 35, 28, -5.09, {'qty_to_risk': 7, 'target_pnl': 167, 'stop': 35, 'donchlen': 198, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 40}],
['kN3so1]', 29, 185, 975.82, 35, 28, -5.97, {'qty_to_risk': 7, 'target_pnl': 205, 'stop': 35, 'donchlen': 190, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 40}],
['kO3so1]', 29, 185, 975.82, 35, 28, -5.97, {'qty_to_risk': 7, 'target_pnl': 210, 'stop': 35, 'donchlen': 190, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 40}],
['k_3EQH]', 36, 153, 975.95, 41, 24, -12.4, {'qty_to_risk': 7, 'target_pnl': 286, 'stop': 35, 'donchlen': 78, 'treshold': 62, 'ema_fast': 9, 'ema_slow': 40}],
['kd3EoM]', 37, 152, 1172.04, 31, 22, -17.75, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 35, 'donchlen': 78, 'treshold': 92, 'ema_fast': 10, 'ema_slow': 40}],
['v^JpF/u', 26, 159, 2469.54, 25, 24, -30.42, {'qty_to_risk': 8, 'target_pnl': 281, 'stop': 87, 'donchlen': 183, 'treshold': 50, 'ema_fast': 4, 'ema_slow': 49}],
['vd3p4<l', 34, 120, 2170.19, 21, 14, -35.84, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 35, 'donchlen': 183, 'treshold': 32, 'ema_fast': 7, 'ema_slow': 46}],
['vXZpo.Y', 23, 217, 1629.82, 33, 27, 53.51, {'qty_to_risk': 8, 'target_pnl': 253, 'stop': 124, 'donchlen': 183, 'treshold': 92, 'ema_fast': 3, 'ema_slow': 39}],
['vaJpJ.g', 21, 208, 1642.84, 34, 26, 49.43, {'qty_to_risk': 8, 'target_pnl': 296, 'stop': 87, 'donchlen': 183, 'treshold': 54, 'ema_fast': 3, 'ema_slow': 44}],
['v_JpF/Y', 25, 174, 2737.76, 37, 24, 42.98, {'qty_to_risk': 8, 'target_pnl': 286, 'stop': 87, 'donchlen': 183, 'treshold': 50, 'ema_fast': 4, 'ema_slow': 39}],
['v^JpF/Y', 25, 174, 2673.52, 37, 24, 42.98, {'qty_to_risk': 8, 'target_pnl': 281, 'stop': 87, 'donchlen': 183, 'treshold': 50, 'ema_fast': 4, 'ema_slow': 39}],
['v^JpF/Z', 25, 174, 2673.52, 37, 24, 42.98, {'qty_to_risk': 8, 'target_pnl': 281, 'stop': 87, 'donchlen': 183, 'treshold': 50, 'ema_fast': 4, 'ema_slow': 39}],
['v`JpF/Y', 25, 174, 2359.0, 37, 24, 42.98, {'qty_to_risk': 8, 'target_pnl': 291, 'stop': 87, 'donchlen': 183, 'treshold': 50, 'ema_fast': 4, 'ema_slow': 39}],
['v_JpF._', 22, 199, 2019.59, 34, 26, 41.49, {'qty_to_risk': 8, 'target_pnl': 286, 'stop': 87, 'donchlen': 183, 'treshold': 50, 'ema_fast': 3, 'ema_slow': 41}],
['v^JpF._', 22, 199, 1949.09, 34, 26, 41.49, {'qty_to_risk': 8, 'target_pnl': 281, 'stop': 87, 'donchlen': 183, 'treshold': 50, 'ema_fast': 3, 'ema_slow': 41}],
['v^JpF/U', 25, 178, 2658.62, 37, 24, 39.79, {'qty_to_risk': 8, 'target_pnl': 281, 'stop': 87, 'donchlen': 183, 'treshold': 50, 'ema_fast': 4, 'ema_slow': 38}],
['v^MpF/Y', 26, 173, 2772.86, 37, 24, 39.5, {'qty_to_risk': 8, 'target_pnl': 281, 'stop': 94, 'donchlen': 183, 'treshold': 50, 'ema_fast': 4, 'ema_slow': 39}],
['v^apF/Y', 25, 173, 2646.38, 37, 24, 38.89, {'qty_to_risk': 8, 'target_pnl': 281, 'stop': 140, 'donchlen': 183, 'treshold': 50, 'ema_fast': 4, 'ema_slow': 39}],
['v^`pF/Y', 25, 173, 2306.06, 37, 24, 38.89, {'qty_to_risk': 8, 'target_pnl': 281, 'stop': 138, 'donchlen': 183, 'treshold': 50, 'ema_fast': 4, 'ema_slow': 39}],
['Z^JpF/Y', 25, 174, 1376.86, 37, 24, 35.01, {'qty_to_risk': 6, 'target_pnl': 281, 'stop': 87, 'donchlen': 183, 'treshold': 50, 'ema_fast': 4, 'ema_slow': 39}],
['v^JhF/Y', 24, 171, 1500.89, 34, 23, 29.17, {'qty_to_risk': 8, 'target_pnl': 281, 'stop': 87, 'donchlen': 164, 'treshold': 50, 'ema_fast': 4, 'ema_slow': 39}],
['vVJp2.C', 21, 220, 1436.56, 29, 31, 20.41, {'qty_to_risk': 8, 'target_pnl': 243, 'stop': 87, 'donchlen': 183, 'treshold': 30, 'ema_fast': 3, 'ema_slow': 31}],
['kd3E1H]', 36, 122, 1491.54, 40, 15, -3.23, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 35, 'donchlen': 78, 'treshold': 29, 'ema_fast': 9, 'ema_slow': 40}],
['kJ3so1]', 29, 185, 1035.31, 35, 28, -6.62, {'qty_to_risk': 7, 'target_pnl': 186, 'stop': 35, 'donchlen': 190, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 40}],
['kd3EQJ]', 37, 147, 1200.83, 31, 22, -17.75, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 35, 'donchlen': 78, 'treshold': 62, 'ema_fast': 10, 'ema_slow': 40}],
['`a2@srh', 39, 141, 414.49, 33, 27, -20.2, {'qty_to_risk': 6, 'target_pnl': 296, 'stop': 33, 'donchlen': 66, 'treshold': 96, 'ema_fast': 19, 'ema_slow': 44}],
['Yn14iDf', 32, 201, 230.81, 42, 33, -22.85, {'qty_to_risk': 6, 'target_pnl': 357, 'stop': 31, 'donchlen': 37, 'treshold': 86, 'ema_fast': 8, 'ema_slow': 44}],
['vbJpB._', 22, 198, 1957.82, 34, 26, 41.49, {'qty_to_risk': 8, 'target_pnl': 300, 'stop': 87, 'donchlen': 183, 'treshold': 46, 'ema_fast': 3, 'ema_slow': 41}],
['v93po1F', 25, 222, 835.65, 46, 28, 35.25, {'qty_to_risk': 8, 'target_pnl': 106, 'stop': 35, 'donchlen': 183, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 32}],
['vP3so1_', 30, 185, 1119.78, 35, 28, -4.5, {'qty_to_risk': 8, 'target_pnl': 215, 'stop': 35, 'donchlen': 190, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 41}],
['pW3vo1]', 29, 186, 1266.77, 35, 28, -7.54, {'qty_to_risk': 7, 'target_pnl': 248, 'stop': 35, 'donchlen': 198, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 40}],
['vn3Eo?l', 36, 168, 1701.84, 48, 27, -18.31, {'qty_to_risk': 8, 'target_pnl': 357, 'stop': 35, 'donchlen': 78, 'treshold': 92, 'ema_fast': 7, 'ema_slow': 46}],
['vn3Eo=l', 36, 168, 1701.84, 48, 27, -18.31, {'qty_to_risk': 8, 'target_pnl': 357, 'stop': 35, 'donchlen': 78, 'treshold': 92, 'ema_fast': 7, 'ema_slow': 46}],
['vVJpK._', 22, 202, 1575.44, 33, 27, 50.6, {'qty_to_risk': 8, 'target_pnl': 243, 'stop': 87, 'donchlen': 183, 'treshold': 55, 'ema_fast': 3, 'ema_slow': 41}],
['pV3vo1]', 29, 186, 1266.77, 35, 28, -7.54, {'qty_to_risk': 7, 'target_pnl': 243, 'stop': 35, 'donchlen': 198, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 40}],
['vd3D5@l', 37, 131, 2579.75, 47, 19, -12.67, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 35, 'donchlen': 76, 'treshold': 33, 'ema_fast': 7, 'ema_slow': 46}],
['vddsP1l', 26, 168, 2044.31, 24, 25, -13.01, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 147, 'donchlen': 190, 'treshold': 61, 'ema_fast': 4, 'ema_slow': 46}],
['vkJp2._', 23, 181, 2068.61, 34, 23, 44.07, {'qty_to_risk': 8, 'target_pnl': 343, 'stop': 87, 'donchlen': 183, 'treshold': 30, 'ema_fast': 3, 'ema_slow': 41}],
['vVJp2.H', 22, 210, 1406.48, 32, 28, 33.99, {'qty_to_risk': 8, 'target_pnl': 243, 'stop': 87, 'donchlen': 183, 'treshold': 30, 'ema_fast': 3, 'ema_slow': 33}],
['wS@?Q-e', 23, 203, 1084.23, 32, 28, 25.86, {'qty_to_risk': 8, 'target_pnl': 229, 'stop': 65, 'donchlen': 64, 'treshold': 62, 'ema_fast': 3, 'ema_slow': 43}],
['kd3E@H]', 37, 142, 1667.53, 42, 21, -3.3, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 35, 'donchlen': 78, 'treshold': 44, 'ema_fast': 9, 'ema_slow': 40}],
['ad3Ep?l', 36, 168, 949.5, 48, 27, -14.22, {'qty_to_risk': 6, 'target_pnl': 310, 'stop': 35, 'donchlen': 78, 'treshold': 93, 'ema_fast': 7, 'ema_slow': 46}],
['vd3Eo=l', 36, 168, 1811.27, 48, 27, -18.31, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 35, 'donchlen': 78, 'treshold': 92, 'ema_fast': 7, 'ema_slow': 46}],
['vd3Eo>l', 36, 168, 1811.27, 48, 27, -18.31, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 35, 'donchlen': 78, 'treshold': 92, 'ema_fast': 7, 'ema_slow': 46}],
['kd3po@l', 34, 146, 1463.08, 36, 19, -25.21, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 35, 'donchlen': 183, 'treshold': 92, 'ema_fast': 7, 'ema_slow': 46}],
['vd3so>l', 34, 147, 2379.6, 36, 19, -29.93, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 35, 'donchlen': 190, 'treshold': 92, 'ema_fast': 7, 'ema_slow': 46}],
['vd3so?l', 34, 147, 2379.6, 36, 19, -29.93, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 35, 'donchlen': 190, 'treshold': 92, 'ema_fast': 7, 'ema_slow': 46}],
['vd3sp<l', 34, 147, 2379.6, 36, 19, -29.93, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 35, 'donchlen': 190, 'treshold': 93, 'ema_fast': 7, 'ema_slow': 46}],
['vd3po@l', 34, 146, 2037.24, 36, 19, -29.93, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 35, 'donchlen': 183, 'treshold': 92, 'ema_fast': 7, 'ema_slow': 46}],
['vd3po=l', 34, 146, 2037.24, 36, 19, -29.93, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 35, 'donchlen': 183, 'treshold': 92, 'ema_fast': 7, 'ema_slow': 46}],
['vd3po<l', 34, 146, 2037.24, 36, 19, -29.93, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 35, 'donchlen': 183, 'treshold': 92, 'ema_fast': 7, 'ema_slow': 46}],
['vd3po?l', 34, 146, 2037.24, 36, 19, -29.93, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 35, 'donchlen': 183, 'treshold': 92, 'ema_fast': 7, 'ema_slow': 46}],
['vdJp<,`', 23, 196, 1760.51, 36, 25, 65.73, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 87, 'donchlen': 183, 'treshold': 40, 'ema_fast': 3, 'ema_slow': 42}],
['vk3?5)l', 26, 210, 1121.95, 40, 32, 59.65, {'qty_to_risk': 8, 'target_pnl': 343, 'stop': 35, 'donchlen': 64, 'treshold': 33, 'ema_fast': 2, 'ema_slow': 46}],
['veJp2._', 23, 181, 2222.74, 34, 23, 44.07, {'qty_to_risk': 8, 'target_pnl': 315, 'stop': 87, 'donchlen': 183, 'treshold': 30, 'ema_fast': 3, 'ema_slow': 41}],
['vYon51F', 25, 189, 3903.54, 38, 21, 35.53, {'qty_to_risk': 8, 'target_pnl': 258, 'stop': 172, 'donchlen': 178, 'treshold': 33, 'ema_fast': 4, 'ema_slow': 32}],
['vT3po)l', 24, 248, 823.72, 31, 32, -6.18, {'qty_to_risk': 8, 'target_pnl': 234, 'stop': 35, 'donchlen': 183, 'treshold': 92, 'ema_fast': 2, 'ema_slow': 46}],
['kd3E_H]', 36, 158, 922.59, 41, 24, -12.4, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 35, 'donchlen': 78, 'treshold': 76, 'ema_fast': 9, 'ema_slow': 40}],
['vXJpB.w', 24, 183, 2210.92, 26, 26, -25.36, {'qty_to_risk': 8, 'target_pnl': 253, 'stop': 87, 'donchlen': 183, 'treshold': 46, 'ema_fast': 3, 'ema_slow': 50}],
['ve3?5)l', 26, 210, 1178.11, 40, 32, 59.65, {'qty_to_risk': 8, 'target_pnl': 315, 'stop': 35, 'donchlen': 64, 'treshold': 33, 'ema_fast': 2, 'ema_slow': 46}],
['vaJpJ;<', 28, 181, 1930.12, 40, 25, 45.73, {'qty_to_risk': 8, 'target_pnl': 296, 'stop': 87, 'donchlen': 183, 'treshold': 54, 'ema_fast': 6, 'ema_slow': 28}],
['vVJpR._', 22, 203, 1545.55, 32, 28, 35.84, {'qty_to_risk': 8, 'target_pnl': 243, 'stop': 87, 'donchlen': 183, 'treshold': 63, 'ema_fast': 3, 'ema_slow': 41}],
['vdfp5.q', 23, 180, 2123.39, 34, 23, 29.31, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 151, 'donchlen': 183, 'treshold': 33, 'ema_fast': 3, 'ema_slow': 48}],
['vd3p5)l', 23, 219, 958.55, 32, 28, 9.12, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 35, 'donchlen': 183, 'treshold': 33, 'ema_fast': 2, 'ema_slow': 46}],
['kN3ko,r', 27, 208, 722.03, 32, 28, -2.6, {'qty_to_risk': 7, 'target_pnl': 205, 'stop': 35, 'donchlen': 171, 'treshold': 92, 'ema_fast': 3, 'ema_slow': 48}],
['kN3ko-r', 27, 208, 722.03, 32, 28, -2.6, {'qty_to_risk': 7, 'target_pnl': 205, 'stop': 35, 'donchlen': 171, 'treshold': 92, 'ema_fast': 3, 'ema_slow': 48}],
['v]3p51l', 29, 154, 1260.63, 28, 21, -5.65, {'qty_to_risk': 8, 'target_pnl': 277, 'stop': 35, 'donchlen': 183, 'treshold': 33, 'ema_fast': 4, 'ema_slow': 46}],
['vdop5/_', 25, 159, 2361.82, 35, 20, 36.95, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 172, 'donchlen': 183, 'treshold': 33, 'ema_fast': 4, 'ema_slow': 41}],
['v\\3?o1l', 31, 198, 1107.39, 48, 35, 35.52, {'qty_to_risk': 8, 'target_pnl': 272, 'stop': 35, 'donchlen': 64, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 46}],
['sdfs51_', 25, 159, 2474.73, 30, 20, 26.3, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 151, 'donchlen': 190, 'treshold': 33, 'ema_fast': 4, 'ema_slow': 41}],
['vdIpo.l', 22, 206, 1085.99, 31, 29, 26.53, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 85, 'donchlen': 183, 'treshold': 92, 'ema_fast': 3, 'ema_slow': 46}],
['vdOpo+l', 22, 205, 1184.66, 31, 29, 21.92, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 99, 'donchlen': 183, 'treshold': 92, 'ema_fast': 3, 'ema_slow': 46}],
['vdVpo.l', 22, 204, 1112.28, 31, 29, 19.8, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 115, 'donchlen': 183, 'treshold': 92, 'ema_fast': 3, 'ema_slow': 46}],
['ghl_\\=3', 29, 174, 942.06, 34, 26, 19.52, {'qty_to_risk': 7, 'target_pnl': 329, 'stop': 165, 'donchlen': 142, 'treshold': 73, 'ema_fast': 7, 'ema_slow': 25}],
['kd3E5H]', 36, 125, 1411.3, 43, 16, -2.2, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 35, 'donchlen': 78, 'treshold': 33, 'ema_fast': 9, 'ema_slow': 40}],
['kd3E8H]', 36, 130, 1358.28, 47, 17, -2.14, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 35, 'donchlen': 78, 'treshold': 36, 'ema_fast': 9, 'ema_slow': 40}],
['kd3CQH]', 38, 155, 1196.17, 48, 25, -2.53, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 35, 'donchlen': 74, 'treshold': 62, 'ema_fast': 9, 'ema_slow': 40}],
['k]3qo1l', 30, 182, 1025.62, 32, 28, -9.72, {'qty_to_risk': 7, 'target_pnl': 277, 'stop': 35, 'donchlen': 185, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 46}],
['sd3EQH]', 36, 153, 1250.92, 41, 24, -12.35, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 35, 'donchlen': 78, 'treshold': 62, 'ema_fast': 9, 'ema_slow': 40}],
['v\\3so1l', 30, 181, 1429.09, 31, 29, -20.9, {'qty_to_risk': 8, 'target_pnl': 272, 'stop': 35, 'donchlen': 190, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 46}],
['v]3so1l', 30, 181, 1429.09, 31, 29, -20.9, {'qty_to_risk': 8, 'target_pnl': 277, 'stop': 35, 'donchlen': 190, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 46}],
['v]3sp1l', 30, 181, 1429.09, 31, 29, -20.9, {'qty_to_risk': 8, 'target_pnl': 277, 'stop': 35, 'donchlen': 190, 'treshold': 93, 'ema_fast': 4, 'ema_slow': 46}],
['vdfp5.M', 21, 210, 2022.13, 29, 27, 33.46, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 151, 'donchlen': 183, 'treshold': 33, 'ema_fast': 3, 'ema_slow': 35}],
['vdds51R', 26, 168, 3795.25, 31, 19, 27.54, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 147, 'donchlen': 190, 'treshold': 33, 'ema_fast': 4, 'ema_slow': 36}],
['sdfs51Q', 26, 168, 3795.25, 31, 19, 27.54, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 151, 'donchlen': 190, 'treshold': 33, 'ema_fast': 4, 'ema_slow': 36}],
['v[JtJ(g', 22, 248, 2240.73, 19, 31, 12.38, {'qty_to_risk': 8, 'target_pnl': 267, 'stop': 87, 'donchlen': 193, 'treshold': 54, 'ema_fast': 2, 'ema_slow': 44}],
['kd3EHH]', 37, 148, 1502.77, 40, 22, -7.85, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 35, 'donchlen': 78, 'treshold': 52, 'ema_fast': 9, 'ema_slow': 40}],
['vd3Eo8l', 34, 169, 1275.65, 42, 28, -14.65, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 35, 'donchlen': 78, 'treshold': 92, 'ema_fast': 6, 'ema_slow': 46}],
['vd3?odl', 38, 149, 1087.85, 39, 28, -26.19, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 35, 'donchlen': 64, 'treshold': 92, 'ema_fast': 16, 'ema_slow': 46}],
['vd3so8l', 35, 153, 1755.59, 33, 21, -32.37, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 35, 'donchlen': 190, 'treshold': 92, 'ema_fast': 6, 'ema_slow': 46}],
['vd3sp9l', 35, 153, 1755.59, 33, 21, -32.37, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 35, 'donchlen': 190, 'treshold': 93, 'ema_fast': 6, 'ema_slow': 46}],
['hXt*s/2', 24, 268, 839.83, 26, 50, 57.86, {'qty_to_risk': 7, 'target_pnl': 253, 'stop': 183, 'donchlen': 13, 'treshold': 96, 'ema_fast': 4, 'ema_slow': 25}],
['vaJpJ;B', 28, 171, 1488.4, 40, 20, 37.84, {'qty_to_risk': 8, 'target_pnl': 296, 'stop': 87, 'donchlen': 183, 'treshold': 54, 'ema_fast': 6, 'ema_slow': 31}],
['kN5ko,r', 26, 206, 854.57, 28, 28, 34.1, {'qty_to_risk': 7, 'target_pnl': 205, 'stop': 40, 'donchlen': 171, 'treshold': 92, 'ema_fast': 3, 'ema_slow': 48}],
['vbopR.`', 22, 202, 1910.45, 31, 29, 26.58, {'qty_to_risk': 8, 'target_pnl': 300, 'stop': 172, 'donchlen': 183, 'treshold': 63, 'ema_fast': 3, 'ema_slow': 42}],
['Or_1/.M', 21, 199, 375.04, 37, 27, 24.95, {'qty_to_risk': 5, 'target_pnl': 376, 'stop': 135, 'donchlen': 30, 'treshold': 27, 'ema_fast': 3, 'ema_slow': 35}],
['kavko,r', 23, 197, 1060.81, 29, 27, 3.73, {'qty_to_risk': 7, 'target_pnl': 296, 'stop': 188, 'donchlen': 171, 'treshold': 92, 'ema_fast': 3, 'ema_slow': 48}],
['v93mo1_', 30, 185, 1016.07, 36, 25, -10.23, {'qty_to_risk': 8, 'target_pnl': 106, 'stop': 35, 'donchlen': 176, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 41}],
['td3EoH]', 36, 159, 1131.42, 41, 24, -12.35, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 35, 'donchlen': 78, 'treshold': 92, 'ema_fast': 9, 'ema_slow': 40}],
['v93to1_', 30, 185, 1497.8, 35, 28, -19.39, {'qty_to_risk': 8, 'target_pnl': 106, 'stop': 35, 'donchlen': 193, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 41}],
['sXvp5.7', 22, 251, 2261.68, 29, 41, 52.61, {'qty_to_risk': 8, 'target_pnl': 253, 'stop': 188, 'donchlen': 183, 'treshold': 33, 'ema_fast': 3, 'ema_slow': 27}],
['vdos51S', 27, 166, 3940.27, 30, 20, 24.97, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 172, 'donchlen': 190, 'treshold': 33, 'ema_fast': 4, 'ema_slow': 37}],
['kO3ko,r', 27, 208, 691.88, 32, 28, -2.52, {'qty_to_risk': 7, 'target_pnl': 210, 'stop': 35, 'donchlen': 171, 'treshold': 92, 'ema_fast': 3, 'ema_slow': 48}],
['v92po1_', 30, 185, 1016.13, 33, 27, -15.49, {'qty_to_risk': 8, 'target_pnl': 106, 'stop': 33, 'donchlen': 183, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 41}],
['v`3so1l', 30, 181, 1429.09, 31, 29, -20.9, {'qty_to_risk': 8, 'target_pnl': 291, 'stop': 35, 'donchlen': 190, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 46}],
['vd3so?_', 36, 150, 1883.32, 38, 21, -27.19, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 35, 'donchlen': 190, 'treshold': 92, 'ema_fast': 7, 'ema_slow': 41}],
['vd3so=_', 36, 150, 1883.32, 38, 21, -27.19, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 35, 'donchlen': 190, 'treshold': 92, 'ema_fast': 7, 'ema_slow': 41}],
['vaJpJ;?', 26, 182, 1827.54, 45, 22, 65.98, {'qty_to_risk': 8, 'target_pnl': 296, 'stop': 87, 'donchlen': 183, 'treshold': 54, 'ema_fast': 6, 'ema_slow': 29}],
['\\qvqf*v', 21, 229, 1086.59, 27, 33, 21.87, {'qty_to_risk': 6, 'target_pnl': 372, 'stop': 188, 'donchlen': 185, 'treshold': 83, 'ema_fast': 2, 'ema_slow': 50}],
['>V?u^C*', 31, 187, 419.33, 30, 30, -0.46, {'qty_to_risk': 4, 'target_pnl': 243, 'stop': 62, 'donchlen': 195, 'treshold': 75, 'ema_fast': 8, 'ema_slow': 22}],
['kd3E9H]', 36, 130, 1358.28, 47, 17, -2.14, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 35, 'donchlen': 78, 'treshold': 37, 'ema_fast': 9, 'ema_slow': 40}],
['kd3so;]', 34, 156, 1201.74, 41, 24, -8.31, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 35, 'donchlen': 190, 'treshold': 92, 'ema_fast': 6, 'ema_slow': 40}],
['kd3E6H]', 36, 128, 1298.44, 47, 17, -2.14, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 35, 'donchlen': 78, 'treshold': 34, 'ema_fast': 9, 'ema_slow': 40}],
['vQ3so1_', 30, 185, 1160.97, 35, 28, -4.5, {'qty_to_risk': 8, 'target_pnl': 220, 'stop': 35, 'donchlen': 190, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 41}],
['vH3so1_', 30, 185, 1089.49, 35, 28, -8.97, {'qty_to_risk': 8, 'target_pnl': 177, 'stop': 35, 'donchlen': 190, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 41}],
['vYon51:', 23, 209, 1415.31, 35, 28, 26.83, {'qty_to_risk': 8, 'target_pnl': 258, 'stop': 172, 'donchlen': 178, 'treshold': 33, 'ema_fast': 4, 'ema_slow': 28}],
['vYon51;', 23, 209, 1415.31, 35, 28, 26.83, {'qty_to_risk': 8, 'target_pnl': 258, 'stop': 172, 'donchlen': 178, 'treshold': 33, 'ema_fast': 4, 'ema_slow': 28}],
['vdds51J', 25, 184, 2774.51, 30, 20, 22.52, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 147, 'donchlen': 190, 'treshold': 33, 'ema_fast': 4, 'ema_slow': 33}],
['sdfs51J', 25, 184, 2749.57, 30, 20, 22.52, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 151, 'donchlen': 190, 'treshold': 33, 'ema_fast': 4, 'ema_slow': 33}],
['vvZpo._', 22, 210, 2164.98, 32, 28, 22.26, {'qty_to_risk': 8, 'target_pnl': 395, 'stop': 124, 'donchlen': 183, 'treshold': 92, 'ema_fast': 3, 'ema_slow': 41}],
['sdfs512', 25, 227, 1858.58, 23, 38, 7.88, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 151, 'donchlen': 190, 'treshold': 33, 'ema_fast': 4, 'ema_slow': 25}],
[';]mcKO(', 29, 148, 216.08, 39, 23, 7.02, {'qty_to_risk': 3, 'target_pnl': 277, 'stop': 167, 'donchlen': 151, 'treshold': 55, 'ema_fast': 11, 'ema_slow': 21}],
['vt3ko3l', 32, 158, 1304.06, 29, 24, -36.99, {'qty_to_risk': 8, 'target_pnl': 386, 'stop': 35, 'donchlen': 171, 'treshold': 92, 'ema_fast': 5, 'ema_slow': 46}],
['vk\\pF,_', 22, 199, 1629.5, 34, 26, 37.4, {'qty_to_risk': 8, 'target_pnl': 343, 'stop': 128, 'donchlen': 183, 'treshold': 50, 'ema_fast': 3, 'ema_slow': 41}],
['tdcpF._', 22, 199, 2000.0, 34, 26, 37.4, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 144, 'donchlen': 183, 'treshold': 50, 'ema_fast': 3, 'ema_slow': 41}],
['vdop5/Z', 26, 162, 3281.86, 35, 20, 36.49, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 172, 'donchlen': 183, 'treshold': 33, 'ema_fast': 4, 'ema_slow': 39}],
['vdJpo)l', 21, 242, 1559.25, 28, 32, 25.28, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 87, 'donchlen': 183, 'treshold': 92, 'ema_fast': 2, 'ema_slow': 46}],
['kdIpo)l', 21, 242, 1167.67, 28, 32, 25.79, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 85, 'donchlen': 183, 'treshold': 92, 'ema_fast': 2, 'ema_slow': 46}],
['kdI?o1H', 25, 200, 750.4, 35, 28, 25.74, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 85, 'donchlen': 64, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 33}],
['kdNpo)l', 21, 242, 1184.97, 28, 32, 22.62, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 97, 'donchlen': 183, 'treshold': 92, 'ema_fast': 2, 'ema_slow': 46}],
['vdSpo)l', 21, 241, 1503.79, 28, 32, 21.31, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 108, 'donchlen': 183, 'treshold': 92, 'ema_fast': 2, 'ema_slow': 46}],
['vdQpo)l', 21, 242, 1449.48, 28, 32, 21.32, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 103, 'donchlen': 183, 'treshold': 92, 'ema_fast': 2, 'ema_slow': 46}],
['vdfpo(l', 20, 239, 1276.17, 28, 32, 21.31, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 151, 'donchlen': 183, 'treshold': 92, 'ema_fast': 2, 'ema_slow': 46}],
['vdhpo(l', 20, 239, 1256.98, 28, 32, 21.31, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 156, 'donchlen': 183, 'treshold': 92, 'ema_fast': 2, 'ema_slow': 46}],
['vdopo(l', 20, 239, 1205.11, 28, 32, 21.31, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 172, 'donchlen': 183, 'treshold': 92, 'ema_fast': 2, 'ema_slow': 46}],
['vd_po(l', 20, 240, 1205.11, 28, 32, 21.31, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 135, 'donchlen': 183, 'treshold': 92, 'ema_fast': 2, 'ema_slow': 46}],
['vdopo)l', 20, 239, 1205.11, 28, 32, 21.31, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 172, 'donchlen': 183, 'treshold': 92, 'ema_fast': 2, 'ema_slow': 46}],
['vdrpo(l', 20, 239, 1180.87, 28, 32, 21.31, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 179, 'donchlen': 183, 'treshold': 92, 'ema_fast': 2, 'ema_slow': 46}],
['kdO?o1H', 25, 198, 773.88, 35, 28, 20.41, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 99, 'donchlen': 64, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 33}],
['vdos51M', 26, 174, 3219.49, 28, 21, 15.55, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 172, 'donchlen': 190, 'treshold': 33, 'ema_fast': 4, 'ema_slow': 35}],
['kdX?o1H', 24, 195, 691.09, 33, 27, 13.9, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 119, 'donchlen': 64, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 33}],
['kdU?o1H', 24, 195, 686.96, 33, 27, 11.89, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 113, 'donchlen': 64, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 33}],
['vd3?o6l', 33, 180, 885.89, 45, 35, -11.35, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 35, 'donchlen': 64, 'treshold': 92, 'ema_fast': 5, 'ema_slow': 46}],
['vd3po4l', 33, 159, 1444.21, 32, 25, -35.46, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 35, 'donchlen': 183, 'treshold': 92, 'ema_fast': 5, 'ema_slow': 46}],
['vd3po6l', 33, 159, 1444.21, 32, 25, -35.46, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 35, 'donchlen': 183, 'treshold': 92, 'ema_fast': 5, 'ema_slow': 46}],
['vd3so5l', 33, 159, 1637.68, 30, 26, -42.02, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 35, 'donchlen': 190, 'treshold': 92, 'ema_fast': 5, 'ema_slow': 46}],
['vd3so4l', 33, 159, 1637.68, 30, 26, -42.02, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 35, 'donchlen': 190, 'treshold': 92, 'ema_fast': 5, 'ema_slow': 46}],
['vd3sp4l', 33, 159, 1637.68, 30, 26, -42.02, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 35, 'donchlen': 190, 'treshold': 93, 'ema_fast': 5, 'ema_slow': 46}],
['NCoF+A*', 29, 127, 406.42, 46, 15, 83.7, {'qty_to_risk': 5, 'target_pnl': 153, 'stop': 172, 'donchlen': 81, 'treshold': 23, 'ema_fast': 8, 'ema_slow': 22}],
['OsE\\e=)', 27, 214, 400.37, 45, 33, 66.95, {'qty_to_risk': 5, 'target_pnl': 381, 'stop': 76, 'donchlen': 134, 'treshold': 82, 'ema_fast': 7, 'ema_slow': 21}],
['sXvp5.?', 21, 239, 1362.06, 34, 38, 66.02, {'qty_to_risk': 8, 'target_pnl': 253, 'stop': 188, 'donchlen': 183, 'treshold': 33, 'ema_fast': 3, 'ema_slow': 29}],
['sXvp5.=', 21, 239, 1362.06, 34, 38, 66.02, {'qty_to_risk': 8, 'target_pnl': 253, 'stop': 188, 'donchlen': 183, 'treshold': 33, 'ema_fast': 3, 'ema_slow': 29}],
['E8kw9)r', 21, 218, 178.97, 27, 29, 31.01, {'qty_to_risk': 4, 'target_pnl': 101, 'stop': 163, 'donchlen': 200, 'treshold': 37, 'ema_fast': 2, 'ema_slow': 48}],
['vuZpo._', 22, 210, 2164.98, 32, 28, 22.26, {'qty_to_risk': 8, 'target_pnl': 391, 'stop': 124, 'donchlen': 183, 'treshold': 92, 'ema_fast': 3, 'ema_slow': 41}],
['v53so1l', 30, 181, 981.66, 31, 29, -24.56, {'qty_to_risk': 8, 'target_pnl': 87, 'stop': 35, 'donchlen': 190, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 46}],
['vdop5/V', 27, 166, 4211.53, 35, 20, 35.31, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 172, 'donchlen': 183, 'treshold': 33, 'ema_fast': 4, 'ema_slow': 38}],
['vdos51W', 27, 166, 4197.59, 30, 20, 24.97, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 172, 'donchlen': 190, 'treshold': 33, 'ema_fast': 4, 'ema_slow': 38}],
['kV3ko,r', 27, 208, 764.96, 32, 28, -3.8, {'qty_to_risk': 7, 'target_pnl': 243, 'stop': 35, 'donchlen': 171, 'treshold': 92, 'ema_fast': 3, 'ema_slow': 48}],
['vaJpJ)g', 21, 245, 1917.51, 28, 32, 45.17, {'qty_to_risk': 8, 'target_pnl': 296, 'stop': 87, 'donchlen': 183, 'treshold': 54, 'ema_fast': 2, 'ema_slow': 44}],
['vaJpJ*g', 21, 245, 1917.51, 28, 32, 45.17, {'qty_to_risk': 8, 'target_pnl': 296, 'stop': 87, 'donchlen': 183, 'treshold': 54, 'ema_fast': 2, 'ema_slow': 44}],
['vdop5/G', 25, 189, 4518.72, 38, 21, 35.53, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 172, 'donchlen': 183, 'treshold': 33, 'ema_fast': 4, 'ema_slow': 32}],
['sXvp5.2', 23, 266, 2518.63, 23, 47, 26.2, {'qty_to_risk': 8, 'target_pnl': 253, 'stop': 188, 'donchlen': 183, 'treshold': 33, 'ema_fast': 3, 'ema_slow': 25}],
['vdOpoL6', 31, 147, 2028.9, 36, 19, 22.31, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 99, 'donchlen': 183, 'treshold': 92, 'ema_fast': 10, 'ema_slow': 26}],
['kd3E:H]', 36, 132, 1241.91, 47, 17, -2.14, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 35, 'donchlen': 78, 'treshold': 38, 'ema_fast': 9, 'ema_slow': 40}],
['nj3U:WD', 38, 107, 724.13, 40, 15, -14.5, {'qty_to_risk': 7, 'target_pnl': 338, 'stop': 35, 'donchlen': 117, 'treshold': 38, 'ema_fast': 13, 'ema_slow': 31}],
['sXvp5.@', 21, 232, 1352.84, 35, 34, 71.08, {'qty_to_risk': 8, 'target_pnl': 253, 'stop': 188, 'donchlen': 183, 'treshold': 33, 'ema_fast': 3, 'ema_slow': 30}],
['sXvp5.A', 21, 232, 1352.84, 35, 34, 71.08, {'qty_to_risk': 8, 'target_pnl': 253, 'stop': 188, 'donchlen': 183, 'treshold': 33, 'ema_fast': 3, 'ema_slow': 30}],
['kdGpo)l', 21, 242, 1040.83, 28, 32, 27.1, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 81, 'donchlen': 183, 'treshold': 92, 'ema_fast': 2, 'ema_slow': 46}],
['vdds51[', 27, 158, 3339.23, 30, 20, 26.3, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 147, 'donchlen': 190, 'treshold': 33, 'ema_fast': 4, 'ema_slow': 40}],
['vdJpo+_', 22, 211, 1407.51, 32, 28, 26.14, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 87, 'donchlen': 183, 'treshold': 92, 'ema_fast': 3, 'ema_slow': 41}],
['veZpo._', 22, 210, 1919.95, 32, 28, 22.26, {'qty_to_risk': 8, 'target_pnl': 315, 'stop': 124, 'donchlen': 183, 'treshold': 92, 'ema_fast': 3, 'ema_slow': 41}],
['vkZpo._', 22, 210, 1835.44, 32, 28, 22.26, {'qty_to_risk': 8, 'target_pnl': 343, 'stop': 124, 'donchlen': 183, 'treshold': 92, 'ema_fast': 3, 'ema_slow': 41}],
['vdqpo.^', 22, 208, 1750.25, 32, 28, 22.26, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 176, 'donchlen': 183, 'treshold': 92, 'ema_fast': 3, 'ema_slow': 41}],
['kd3so5]', 32, 167, 1288.67, 35, 28, -14.41, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 35, 'donchlen': 190, 'treshold': 92, 'ema_fast': 5, 'ema_slow': 40}],
['v^JpF/G', 24, 206, 2650.1, 42, 26, 56.48, {'qty_to_risk': 8, 'target_pnl': 281, 'stop': 87, 'donchlen': 183, 'treshold': 50, 'ema_fast': 4, 'ema_slow': 32}],
['kd[po.]', 23, 216, 1242.36, 32, 28, 27.33, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 126, 'donchlen': 183, 'treshold': 92, 'ema_fast': 3, 'ema_slow': 40}],
['vdcp4L+', 32, 139, 3671.03, 36, 19, 2.93, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 144, 'donchlen': 183, 'treshold': 32, 'ema_fast': 10, 'ema_slow': 22}],
['vdvp4L+', 32, 139, 3671.03, 36, 19, 2.93, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 188, 'donchlen': 183, 'treshold': 32, 'ema_fast': 10, 'ema_slow': 22}],
['vd3so1e', 29, 185, 1356.08, 37, 27, -6.14, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 35, 'donchlen': 190, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 43}],
['vd3sp1c', 29, 185, 1356.08, 37, 27, -6.14, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 35, 'donchlen': 190, 'treshold': 93, 'ema_fast': 4, 'ema_slow': 43}],
['kd9?o1i', 29, 193, 700.03, 31, 29, -13.26, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 49, 'donchlen': 64, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 45}],
['vd:s51l', 28, 153, 1799.33, 25, 20, -22.9, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 51, 'donchlen': 190, 'treshold': 33, 'ema_fast': 4, 'ema_slow': 46}],
['k]3wo1r', 31, 174, 1170.04, 27, 29, -36.92, {'qty_to_risk': 7, 'target_pnl': 277, 'stop': 35, 'donchlen': 200, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 48}],
['k]3vo1r', 31, 174, 1168.27, 27, 29, -36.92, {'qty_to_risk': 7, 'target_pnl': 277, 'stop': 35, 'donchlen': 198, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 48}],
['vd3E>1l', 32, 165, 1310.33, 44, 25, 14.05, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 35, 'donchlen': 78, 'treshold': 42, 'ema_fast': 4, 'ema_slow': 46}],
['kU3ko,r', 27, 208, 737.15, 32, 28, -3.8, {'qty_to_risk': 7, 'target_pnl': 239, 'stop': 35, 'donchlen': 171, 'treshold': 92, 'ema_fast': 3, 'ema_slow': 48}],
['pd3wo1j', 30, 185, 1206.69, 33, 27, -9.76, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 35, 'donchlen': 200, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 45}],
['vd3so1j', 30, 184, 1365.16, 33, 27, -13.57, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 35, 'donchlen': 190, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 45}],
['vd3so1i', 30, 184, 1365.16, 33, 27, -13.57, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 35, 'donchlen': 190, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 45}],
['vd3sf1l', 30, 178, 1109.72, 31, 29, -20.9, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 35, 'donchlen': 190, 'treshold': 83, 'ema_fast': 4, 'ema_slow': 46}],
['KecdnJ/', 29, 150, 359.54, 35, 20, 12.82, {'qty_to_risk': 5, 'target_pnl': 315, 'stop': 144, 'donchlen': 154, 'treshold': 91, 'ema_fast': 10, 'ema_slow': 24}],
['vw3so1l', 30, 181, 1364.12, 31, 29, -20.9, {'qty_to_risk': 8, 'target_pnl': 400, 'stop': 35, 'donchlen': 190, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 46}],
['vkop5.7', 22, 251, 2419.47, 29, 41, 52.61, {'qty_to_risk': 8, 'target_pnl': 343, 'stop': 172, 'donchlen': 183, 'treshold': 33, 'ema_fast': 3, 'ema_slow': 27}],
['vdfp5.F', 21, 224, 1363.92, 31, 32, 47.43, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 151, 'donchlen': 183, 'treshold': 33, 'ema_fast': 3, 'ema_slow': 32}],
['vdqpo.X', 22, 215, 1628.64, 33, 27, 42.68, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 176, 'donchlen': 183, 'treshold': 92, 'ema_fast': 3, 'ema_slow': 39}],
['vdqpo.Y', 22, 215, 1628.64, 33, 27, 42.68, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 176, 'donchlen': 183, 'treshold': 92, 'ema_fast': 3, 'ema_slow': 39}],
['vj3?o1l', 31, 198, 1054.59, 48, 35, 35.52, {'qty_to_risk': 8, 'target_pnl': 338, 'stop': 35, 'donchlen': 64, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 46}],
['vi3?o1l', 31, 198, 1054.59, 48, 35, 35.52, {'qty_to_risk': 8, 'target_pnl': 334, 'stop': 35, 'donchlen': 64, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 46}],
['kt3qo1l', 30, 182, 979.79, 32, 28, -9.72, {'qty_to_risk': 7, 'target_pnl': 386, 'stop': 35, 'donchlen': 185, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 46}],
['vd3Bp1l', 31, 194, 1182.71, 47, 34, 35.26, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 35, 'donchlen': 71, 'treshold': 93, 'ema_fast': 4, 'ema_slow': 46}],
['vd3?o/l', 31, 198, 1107.42, 48, 35, 35.52, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 35, 'donchlen': 64, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 46}],
['vd3?o1l', 31, 198, 1107.42, 48, 35, 35.52, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 35, 'donchlen': 64, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 46}],
['vd3?u1l', 31, 199, 1081.5, 48, 35, 35.52, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 35, 'donchlen': 64, 'treshold': 98, 'ema_fast': 4, 'ema_slow': 46}],
['vm3?o1l', 31, 198, 1054.59, 48, 35, 35.52, {'qty_to_risk': 8, 'target_pnl': 353, 'stop': 35, 'donchlen': 64, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 46}],
['vn3?o1l', 31, 198, 1054.59, 48, 35, 35.52, {'qty_to_risk': 8, 'target_pnl': 357, 'stop': 35, 'donchlen': 64, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 46}],
['vd3Ao1l', 30, 197, 1043.86, 47, 34, 33.37, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 35, 'donchlen': 69, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 46}],
['od3?o1l', 31, 198, 855.06, 48, 35, 32.5, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 35, 'donchlen': 64, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 46}],
['Wd3?o1l', 31, 198, 638.98, 48, 35, 26.64, {'qty_to_risk': 6, 'target_pnl': 310, 'stop': 35, 'donchlen': 64, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 46}],
['vd3Do1l', 30, 194, 955.39, 43, 32, 4.72, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 35, 'donchlen': 76, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 46}],
['vn3Do1l', 30, 194, 908.8, 43, 32, 4.72, {'qty_to_risk': 8, 'target_pnl': 357, 'stop': 35, 'donchlen': 76, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 46}],
['kf3qo1l', 30, 182, 1038.28, 32, 28, -9.72, {'qty_to_risk': 7, 'target_pnl': 319, 'stop': 35, 'donchlen': 185, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 46}],
['gd3qo1l', 30, 182, 1025.62, 32, 28, -9.72, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 35, 'donchlen': 185, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 46}],
['od3qo1l', 30, 182, 1025.62, 32, 28, -9.72, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 35, 'donchlen': 185, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 46}],
['ed3qo1l', 30, 182, 1025.62, 32, 28, -9.72, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 35, 'donchlen': 185, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 46}],
['kd3qu1l', 30, 183, 1001.62, 32, 28, -9.72, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 35, 'donchlen': 185, 'treshold': 98, 'ema_fast': 4, 'ema_slow': 46}],
['kr3qo1l', 30, 182, 979.79, 32, 28, -9.72, {'qty_to_risk': 7, 'target_pnl': 376, 'stop': 35, 'donchlen': 185, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 46}],
['ad3wp1l', 30, 182, 890.24, 31, 29, -12.58, {'qty_to_risk': 6, 'target_pnl': 310, 'stop': 35, 'donchlen': 200, 'treshold': 93, 'ema_fast': 4, 'ema_slow': 46}],
['vd3no1l', 30, 182, 1372.66, 32, 28, -13.44, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 35, 'donchlen': 178, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 46}],
['vd3qo1l', 30, 182, 1366.79, 32, 28, -13.44, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 35, 'donchlen': 185, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 46}],
['vd3po/l', 30, 182, 1366.82, 32, 28, -13.44, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 35, 'donchlen': 183, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 46}],
['vd3po2l', 30, 182, 1366.82, 32, 28, -13.44, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 35, 'donchlen': 183, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 46}],
['vd3do1l', 29, 181, 793.58, 29, 27, -15.57, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 35, 'donchlen': 154, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 46}],
['jd3so1l', 30, 181, 1066.14, 31, 29, -16.75, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 35, 'donchlen': 190, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 46}],
['vd3ro1l', 30, 180, 1228.28, 31, 29, -19.43, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 35, 'donchlen': 188, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 46}],
['vd3to1l', 30, 180, 1536.53, 31, 29, -20.9, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 35, 'donchlen': 193, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 46}],
['vn3to1l', 30, 180, 1466.58, 31, 29, -20.9, {'qty_to_risk': 8, 'target_pnl': 357, 'stop': 35, 'donchlen': 193, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 46}],
['vd3sp1l', 30, 181, 1429.09, 31, 29, -20.9, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 35, 'donchlen': 190, 'treshold': 93, 'ema_fast': 4, 'ema_slow': 46}],
['vd3sm1l', 30, 181, 1429.09, 31, 29, -20.9, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 35, 'donchlen': 190, 'treshold': 90, 'ema_fast': 4, 'ema_slow': 46}],
['vd3so1l', 30, 181, 1429.09, 31, 29, -20.9, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 35, 'donchlen': 190, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 46}],
['wd3so1l', 30, 181, 1429.09, 31, 29, -20.9, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 35, 'donchlen': 190, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 46}],
['vc3sp1l', 30, 181, 1429.09, 31, 29, -20.9, {'qty_to_risk': 8, 'target_pnl': 305, 'stop': 35, 'donchlen': 190, 'treshold': 93, 'ema_fast': 4, 'ema_slow': 46}],
['vd3sp/l', 30, 181, 1429.09, 31, 29, -20.9, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 35, 'donchlen': 190, 'treshold': 93, 'ema_fast': 4, 'ema_slow': 46}],
['vd3sv1l', 30, 182, 1395.85, 31, 29, -20.9, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 35, 'donchlen': 190, 'treshold': 99, 'ema_fast': 4, 'ema_slow': 46}],
['vd3sr1l', 30, 182, 1395.85, 31, 29, -20.9, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 35, 'donchlen': 190, 'treshold': 95, 'ema_fast': 4, 'ema_slow': 46}],
['vd3sw1l', 30, 182, 1395.85, 31, 29, -20.9, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 35, 'donchlen': 190, 'treshold': 100, 'ema_fast': 4, 'ema_slow': 46}],
['vo3so1l', 30, 181, 1364.12, 31, 29, -20.9, {'qty_to_risk': 8, 'target_pnl': 362, 'stop': 35, 'donchlen': 190, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 46}],
['vd3sl1l', 30, 180, 1043.2, 31, 29, -20.9, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 35, 'donchlen': 190, 'treshold': 89, 'ema_fast': 4, 'ema_slow': 46}],
['RZ-5*A+', 33, 169, 110.73, 42, 19, 21.09, {'qty_to_risk': 5, 'target_pnl': 262, 'stop': 21, 'donchlen': 40, 'treshold': 22, 'ema_fast': 8, 'ema_slow': 22}],
['vd3so1g', 30, 181, 1409.69, 37, 27, -6.14, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 35, 'donchlen': 190, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 44}],
['nhFo<58', 27, 201, 736.67, 46, 26, 51.55, {'qty_to_risk': 7, 'target_pnl': 329, 'stop': 78, 'donchlen': 181, 'treshold': 40, 'ema_fast': 5, 'ema_slow': 27}],
['vc\\pF4>', 25, 197, 1935.85, 44, 27, 46.26, {'qty_to_risk': 8, 'target_pnl': 305, 'stop': 128, 'donchlen': 183, 'treshold': 50, 'ema_fast': 5, 'ema_slow': 29}],
['vqopR,]', 22, 209, 1990.61, 32, 28, 33.15, {'qty_to_risk': 8, 'target_pnl': 372, 'stop': 172, 'donchlen': 183, 'treshold': 63, 'ema_fast': 3, 'ema_slow': 40}],
['vd3sc1l', 30, 178, 1109.72, 31, 29, -20.9, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 35, 'donchlen': 190, 'treshold': 80, 'ema_fast': 4, 'ema_slow': 46}],
['vdJpoL)', 32, 168, 2257.74, 25, 27, -23.49, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 87, 'donchlen': 183, 'treshold': 92, 'ema_fast': 10, 'ema_slow': 21}],
['vd3?F1l', 31, 180, 1429.75, 50, 32, 33.36, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 35, 'donchlen': 64, 'treshold': 50, 'ema_fast': 4, 'ema_slow': 46}],
['kd9?o4H', 31, 195, 1017.57, 44, 25, 24.1, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 49, 'donchlen': 64, 'treshold': 92, 'ema_fast': 5, 'ema_slow': 33}],
['ku9?o1H', 29, 217, 1445.7, 39, 28, 12.95, {'qty_to_risk': 7, 'target_pnl': 391, 'stop': 49, 'donchlen': 64, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 33}],
['kd9?g1H', 29, 214, 1167.51, 39, 28, 12.95, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 49, 'donchlen': 64, 'treshold': 84, 'ema_fast': 4, 'ema_slow': 33}],
['kd9?f1H', 29, 214, 1167.51, 39, 28, 12.95, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 49, 'donchlen': 64, 'treshold': 83, 'ema_fast': 4, 'ema_slow': 33}],
['vd3EC1l', 31, 172, 1191.8, 42, 26, 9.68, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 35, 'donchlen': 78, 'treshold': 47, 'ema_fast': 4, 'ema_slow': 46}],
['k^3so1]', 29, 185, 1036.82, 35, 28, -8.35, {'qty_to_risk': 7, 'target_pnl': 281, 'stop': 35, 'donchlen': 190, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 40}],
['^cF-21O', 23, 181, 589.13, 34, 26, 25.34, {'qty_to_risk': 6, 'target_pnl': 305, 'stop': 78, 'donchlen': 20, 'treshold': 30, 'ema_fast': 4, 'ema_slow': 35}],
['vdopR._', 22, 203, 2022.19, 32, 28, 22.26, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 172, 'donchlen': 183, 'treshold': 63, 'ema_fast': 3, 'ema_slow': 41}],
['sdfs51B', 24, 197, 4194.1, 32, 25, 19.69, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 151, 'donchlen': 190, 'treshold': 33, 'ema_fast': 4, 'ema_slow': 31}],
['vd3E@1l', 31, 170, 1173.33, 44, 25, 14.05, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 35, 'donchlen': 78, 'treshold': 44, 'ema_fast': 4, 'ema_slow': 46}],
['kp9?o1H', 29, 217, 1411.1, 39, 28, 12.95, {'qty_to_risk': 7, 'target_pnl': 367, 'stop': 49, 'donchlen': 64, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 33}],
['kd9?b1H', 29, 214, 1167.51, 39, 28, 12.95, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 49, 'donchlen': 64, 'treshold': 79, 'ema_fast': 4, 'ema_slow': 33}],
['vt3to,l', 26, 213, 1012.97, 34, 32, 1.87, {'qty_to_risk': 8, 'target_pnl': 386, 'stop': 35, 'donchlen': 193, 'treshold': 92, 'ema_fast': 3, 'ema_slow': 46}],
['vd3?o1H', 29, 222, 1063.32, 43, 32, 31.17, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 35, 'donchlen': 64, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 33}],
['vd3Co1a', 31, 198, 944.24, 41, 29, 19.13, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 35, 'donchlen': 74, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 42}],
['vdds51<', 24, 210, 1282.89, 32, 28, 14.04, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 147, 'donchlen': 190, 'treshold': 33, 'ema_fast': 4, 'ema_slow': 28}],
['vdds51;', 24, 210, 1282.89, 32, 28, 14.04, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 147, 'donchlen': 190, 'treshold': 33, 'ema_fast': 4, 'ema_slow': 28}],
['>jHtMH7', 30, 155, 395.93, 30, 20, 7.81, {'qty_to_risk': 4, 'target_pnl': 338, 'stop': 83, 'donchlen': 193, 'treshold': 57, 'ema_fast': 9, 'ema_slow': 27}],
['vd3so.l', 25, 212, 891.32, 34, 32, 1.87, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 35, 'donchlen': 190, 'treshold': 92, 'ema_fast': 3, 'ema_slow': 46}],
['vd3so,l', 25, 212, 891.32, 34, 32, 1.87, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 35, 'donchlen': 190, 'treshold': 92, 'ema_fast': 3, 'ema_slow': 46}],
['vd3lo1a', 30, 185, 1255.78, 37, 24, -0.69, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 35, 'donchlen': 173, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 42}],
['vd3mo1a', 30, 185, 1255.78, 37, 24, -0.69, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 35, 'donchlen': 176, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 42}],
['kd3so1a', 30, 185, 987.72, 37, 27, -8.14, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 35, 'donchlen': 190, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 42}],
['vcopR.[', 22, 209, 1772.81, 32, 28, 33.15, {'qty_to_risk': 8, 'target_pnl': 305, 'stop': 172, 'donchlen': 183, 'treshold': 63, 'ema_fast': 3, 'ema_slow': 40}],
['vd3?T1l', 30, 187, 865.15, 47, 34, 31.15, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 35, 'donchlen': 64, 'treshold': 65, 'ema_fast': 4, 'ema_slow': 46}],
['vkop5.2', 23, 266, 2356.32, 23, 47, 26.2, {'qty_to_risk': 8, 'target_pnl': 343, 'stop': 172, 'donchlen': 183, 'treshold': 33, 'ema_fast': 3, 'ema_slow': 25}],
['vdOpoL-', 30, 163, 2041.13, 37, 24, 24.49, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 99, 'donchlen': 183, 'treshold': 92, 'ema_fast': 10, 'ema_slow': 23}],
['\\d9?o1H', 29, 217, 1023.4, 39, 28, 14.46, {'qty_to_risk': 6, 'target_pnl': 310, 'stop': 49, 'donchlen': 64, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 33}],
[']d9?o1H', 29, 217, 1023.4, 39, 28, 14.46, {'qty_to_risk': 6, 'target_pnl': 310, 'stop': 49, 'donchlen': 64, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 33}],
['wd9?o1H', 29, 217, 1862.38, 39, 28, 12.14, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 49, 'donchlen': 64, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 33}],
['kd9?o2H', 29, 217, 1409.9, 39, 28, 12.95, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 49, 'donchlen': 64, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 33}],
['gd9?o1H', 29, 217, 1409.9, 39, 28, 12.95, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 49, 'donchlen': 64, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 33}],
['kd9?r1H', 29, 217, 1409.9, 39, 28, 12.95, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 49, 'donchlen': 64, 'treshold': 95, 'ema_fast': 4, 'ema_slow': 33}],
['kd3qo1n', 30, 178, 902.44, 29, 27, -29.02, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 35, 'donchlen': 185, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 47}],
['kd3s81]', 28, 163, 880.97, 39, 23, -0.53, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 35, 'donchlen': 190, 'treshold': 36, 'ema_fast': 4, 'ema_slow': 40}],
['v^:pF/Y', 27, 176, 1396.05, 36, 25, 31.96, {'qty_to_risk': 8, 'target_pnl': 281, 'stop': 51, 'donchlen': 183, 'treshold': 50, 'ema_fast': 4, 'ema_slow': 39}],
['vd:so1_', 27, 183, 1167.19, 30, 26, 6.13, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 51, 'donchlen': 190, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 41}],
['vd3sP1l', 29, 174, 1135.73, 31, 29, -20.9, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 35, 'donchlen': 190, 'treshold': 61, 'ema_fast': 4, 'ema_slow': 46}],
['vd3sR1l', 29, 174, 1135.73, 31, 29, -20.9, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 35, 'donchlen': 190, 'treshold': 63, 'ema_fast': 4, 'ema_slow': 46}],
['kd9?;1H', 29, 184, 2612.96, 39, 23, 17.78, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 49, 'donchlen': 64, 'treshold': 39, 'ema_fast': 4, 'ema_slow': 33}],
['kd9?W1H', 29, 212, 1044.89, 39, 28, 12.95, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 49, 'donchlen': 64, 'treshold': 68, 'ema_fast': 4, 'ema_slow': 33}],
['v23so1l', 30, 181, 844.07, 31, 29, -20.83, {'qty_to_risk': 8, 'target_pnl': 72, 'stop': 35, 'donchlen': 190, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 46}],
['vd3?o1T', 30, 203, 1057.99, 42, 33, 25.49, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 35, 'donchlen': 64, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 37}],
['vd3sp1F', 25, 222, 1023.84, 43, 30, 25.74, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 35, 'donchlen': 190, 'treshold': 93, 'ema_fast': 4, 'ema_slow': 32}],
['vd5po(l', 23, 247, 1073.08, 25, 32, 10.01, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 40, 'donchlen': 183, 'treshold': 92, 'ema_fast': 2, 'ema_slow': 46}],
['vd0so1l', 29, 182, 1070.15, 27, 29, -4.26, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 28, 'donchlen': 190, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 46}],
['vd3so1S', 27, 194, 1004.75, 35, 28, -10.27, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 35, 'donchlen': 190, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 37}],
['vdfp5.)', 22, 304, 1501.04, 34, 55, 79.33, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 151, 'donchlen': 183, 'treshold': 33, 'ema_fast': 3, 'ema_slow': 21}],
['vdos51?', 23, 214, 1753.98, 36, 25, 31.27, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 172, 'donchlen': 190, 'treshold': 33, 'ema_fast': 4, 'ema_slow': 29}],
['kd9?51H', 29, 172, 1768.37, 33, 21, 9.44, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 49, 'donchlen': 64, 'treshold': 33, 'ema_fast': 4, 'ema_slow': 33}],
['vd3wo)l', 23, 248, 1004.76, 29, 34, -13.45, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 35, 'donchlen': 200, 'treshold': 92, 'ema_fast': 2, 'ema_slow': 46}],
['vd3so(l', 24, 248, 1143.62, 29, 34, -16.79, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 35, 'donchlen': 190, 'treshold': 92, 'ema_fast': 2, 'ema_slow': 46}],
['vd3sp1X', 28, 190, 1175.52, 35, 28, -10.17, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 35, 'donchlen': 190, 'treshold': 93, 'ema_fast': 4, 'ema_slow': 39}],
['vd3si1_', 30, 182, 979.8, 35, 28, -10.47, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 35, 'donchlen': 190, 'treshold': 86, 'ema_fast': 4, 'ema_slow': 41}],
['sXvp5.+', 22, 300, 1632.61, 26, 50, 25.63, {'qty_to_risk': 8, 'target_pnl': 253, 'stop': 188, 'donchlen': 183, 'treshold': 33, 'ema_fast': 3, 'ema_slow': 22}],
['sXvp5.,', 22, 300, 1632.61, 26, 50, 25.63, {'qty_to_risk': 8, 'target_pnl': 253, 'stop': 188, 'donchlen': 183, 'treshold': 33, 'ema_fast': 3, 'ema_slow': 22}],
['pd1vo1]', 28, 187, 950.29, 31, 29, -4.64, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 31, 'donchlen': 198, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 40}],
['pm3vo1]', 29, 186, 1155.02, 35, 28, -7.54, {'qty_to_risk': 7, 'target_pnl': 353, 'stop': 35, 'donchlen': 198, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 40}],
['ks3so1]', 29, 185, 990.24, 35, 28, -8.35, {'qty_to_risk': 7, 'target_pnl': 381, 'stop': 35, 'donchlen': 190, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 40}],
['JciU+8@', 25, 129, 309.07, 54, 11, 37.45, {'qty_to_risk': 5, 'target_pnl': 305, 'stop': 158, 'donchlen': 117, 'treshold': 23, 'ema_fast': 6, 'ema_slow': 30}],
['vl3po1_', 30, 185, 1059.8, 38, 26, 0.54, {'qty_to_risk': 8, 'target_pnl': 348, 'stop': 35, 'donchlen': 183, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 41}],
['vs3po1_', 30, 185, 1059.8, 38, 26, 0.54, {'qty_to_risk': 8, 'target_pnl': 381, 'stop': 35, 'donchlen': 183, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 41}],
['vh3po1_', 30, 185, 1059.8, 38, 26, 0.54, {'qty_to_risk': 8, 'target_pnl': 329, 'stop': 35, 'donchlen': 183, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 41}],
['kd3sV1]', 28, 180, 753.89, 35, 28, -8.35, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 35, 'donchlen': 190, 'treshold': 67, 'ema_fast': 4, 'ema_slow': 40}],
['vd3wo1_', 30, 186, 1504.98, 35, 28, -9.1, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 35, 'donchlen': 200, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 41}],
['vf3so1_', 30, 185, 1278.79, 35, 28, -10.47, {'qty_to_risk': 8, 'target_pnl': 319, 'stop': 35, 'donchlen': 190, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 41}],
['vd3so1_', 30, 185, 1263.28, 35, 28, -10.47, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 35, 'donchlen': 190, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 41}],
['vd3st1_', 30, 186, 1233.15, 35, 28, -10.47, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 35, 'donchlen': 190, 'treshold': 97, 'ema_fast': 4, 'ema_slow': 41}],
['vd3so1W', 27, 194, 1070.4, 35, 28, -10.27, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 35, 'donchlen': 190, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 38}],
['kd9?o;H', 32, 174, 954.44, 48, 25, 67.2, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 49, 'donchlen': 64, 'treshold': 92, 'ema_fast': 6, 'ema_slow': 33}],
['v^JpF/@', 23, 221, 1078.03, 34, 32, 22.84, {'qty_to_risk': 8, 'target_pnl': 281, 'stop': 87, 'donchlen': 183, 'treshold': 50, 'ema_fast': 4, 'ema_slow': 30}],
['vdds51@', 23, 204, 2148.6, 30, 26, 21.91, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 147, 'donchlen': 190, 'treshold': 33, 'ema_fast': 4, 'ema_slow': 30}],
['pd3v?1]', 29, 173, 1097.95, 40, 25, 3.7, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 35, 'donchlen': 198, 'treshold': 43, 'ema_fast': 4, 'ema_slow': 40}],
['vd4so1_', 30, 185, 1192.87, 29, 27, -0.04, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 37, 'donchlen': 190, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 41}],
['pd3oo1]', 29, 186, 1014.18, 38, 26, 2.16, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 35, 'donchlen': 181, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 40}],
['kd3qo1[', 29, 185, 927.58, 38, 26, 2.16, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 35, 'donchlen': 185, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 40}],
['kd3lo1]', 29, 185, 1007.24, 36, 25, 1.67, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 35, 'donchlen': 173, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 40}],
['Qd3vo1]', 29, 186, 601.68, 35, 28, -2.08, {'qty_to_risk': 5, 'target_pnl': 310, 'stop': 35, 'donchlen': 198, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 40}],
['kd3po)u', 25, 239, 896.61, 31, 32, -3.82, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 35, 'donchlen': 183, 'treshold': 92, 'ema_fast': 2, 'ema_slow': 49}],
['vd3po(t', 25, 239, 1172.98, 31, 32, -4.32, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 35, 'donchlen': 183, 'treshold': 92, 'ema_fast': 2, 'ema_slow': 49}],
['pf3vo1]', 29, 186, 1227.59, 35, 28, -7.54, {'qty_to_risk': 7, 'target_pnl': 319, 'stop': 35, 'donchlen': 198, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 40}],
['pd3wo1]', 29, 186, 1212.2, 35, 28, -7.54, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 35, 'donchlen': 200, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 40}],
['pd3vq1]', 29, 186, 1210.37, 35, 28, -7.54, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 35, 'donchlen': 198, 'treshold': 94, 'ema_fast': 4, 'ema_slow': 40}],
['vd2so1_', 30, 186, 1438.32, 32, 28, -8.23, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 33, 'donchlen': 190, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 41}],
['kd3to1]', 29, 185, 1289.24, 35, 28, -8.35, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 35, 'donchlen': 193, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 40}],
['pd3to1]', 29, 185, 1289.24, 35, 28, -8.35, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 35, 'donchlen': 193, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 40}],
['hd3so1]', 29, 185, 1036.82, 35, 28, -8.35, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 35, 'donchlen': 190, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 40}],
['gd3so1]', 29, 185, 1036.82, 35, 28, -8.35, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 35, 'donchlen': 190, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 40}],
['kd3so1]', 29, 185, 1036.82, 35, 28, -8.35, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 35, 'donchlen': 190, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 40}],
['kd3so0]', 29, 185, 1036.82, 35, 28, -8.35, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 35, 'donchlen': 190, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 40}],
['kd3so/]', 29, 185, 1036.82, 35, 28, -8.35, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 35, 'donchlen': 190, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 40}],
['id3so1]', 29, 185, 1036.82, 35, 28, -8.35, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 35, 'donchlen': 190, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 40}],
['qd3vo1]', 29, 186, 1649.3, 35, 28, -9.1, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 35, 'donchlen': 198, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 40}],
['vd3so1\\', 29, 185, 1384.98, 35, 28, -10.47, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 35, 'donchlen': 190, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 40}],
['pd3v\\1]', 29, 183, 980.06, 35, 28, -7.54, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 35, 'donchlen': 198, 'treshold': 73, 'ema_fast': 4, 'ema_slow': 40}],
['vdfp5.,', 22, 300, 1626.82, 26, 50, 25.63, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 151, 'donchlen': 183, 'treshold': 33, 'ema_fast': 3, 'ema_slow': 22}],
['kv3ko,r', 27, 208, 698.51, 32, 28, -3.8, {'qty_to_risk': 7, 'target_pnl': 395, 'stop': 35, 'donchlen': 171, 'treshold': 92, 'ema_fast': 3, 'ema_slow': 48}],
['eeUY+N,', 29, 113, 1095.42, 40, 10, 22.03, {'qty_to_risk': 7, 'target_pnl': 315, 'stop': 113, 'donchlen': 127, 'treshold': 23, 'ema_fast': 11, 'ema_slow': 22}],
['kd9?B1H', 29, 194, 1795.09, 40, 25, 19.98, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 49, 'donchlen': 64, 'treshold': 46, 'ema_fast': 4, 'ema_slow': 33}],
['p43vo1]', 29, 186, 940.19, 35, 28, -12.05, {'qty_to_risk': 7, 'target_pnl': 82, 'stop': 35, 'donchlen': 198, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 40}],
['pd6vo1]', 29, 185, 871.18, 25, 27, 23.08, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 42, 'donchlen': 198, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 40}],
['kd6so1]', 29, 185, 973.11, 25, 27, 19.67, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 42, 'donchlen': 190, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 40}],
['XlIr0/7', 22, 209, 692.47, 31, 29, 29.78, {'qty_to_risk': 6, 'target_pnl': 348, 'stop': 85, 'donchlen': 188, 'treshold': 28, 'ema_fast': 4, 'ema_slow': 27}],
['kd9?J1H', 30, 203, 1381.71, 37, 27, 7.95, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 49, 'donchlen': 64, 'treshold': 54, 'ema_fast': 4, 'ema_slow': 33}],
['kf3ko,r', 27, 208, 744.96, 32, 28, -3.8, {'qty_to_risk': 7, 'target_pnl': 319, 'stop': 35, 'donchlen': 171, 'treshold': 92, 'ema_fast': 3, 'ema_slow': 48}],
['kd3ko,r', 27, 208, 734.11, 32, 28, -3.8, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 35, 'donchlen': 171, 'treshold': 92, 'ema_fast': 3, 'ema_slow': 48}],
['kd9?M1H', 29, 204, 1381.6, 37, 27, 7.95, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 49, 'donchlen': 64, 'treshold': 57, 'ema_fast': 4, 'ema_slow': 33}],
['pd3vD1]', 29, 175, 965.23, 37, 27, -4.28, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 35, 'donchlen': 198, 'treshold': 48, 'ema_fast': 4, 'ema_slow': 40}],
['vd3soF)', 33, 187, 1699.13, 35, 34, 7.36, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 35, 'donchlen': 190, 'treshold': 92, 'ema_fast': 9, 'ema_slow': 21}],
['kd3sO1]', 28, 177, 845.92, 35, 28, -8.35, {'qty_to_risk': 7, 'target_pnl': 310, 'stop': 35, 'donchlen': 190, 'treshold': 59, 'ema_fast': 4, 'ema_slow': 40}],
['vd3p4L+', 34, 141, 1312.64, 40, 20, 12.37, {'qty_to_risk': 8, 'target_pnl': 310, 'stop': 35, 'donchlen': 183, 'treshold': 32, 'ema_fast': 10, 'ema_slow': 22}],
['p13vo1]', 29, 186, 663.6, 35, 28, -1.65, {'qty_to_risk': 7, 'target_pnl': 68, 'stop': 35, 'donchlen': 198, 'treshold': 92, 'ema_fast': 4, 'ema_slow': 40}],
]
| 160.576398
| 163
| 0.593646
| 22,870
| 129,264
| 3.173852
| 0.068518
| 0.055245
| 0.099441
| 0.07591
| 0.794864
| 0.777822
| 0.74513
| 0.721076
| 0.679814
| 0.670474
| 0
| 0.240718
| 0.130322
| 129,264
| 804
| 164
| 160.776119
| 0.404962
| 0
| 0
| 0
| 0
| 0
| 0.39735
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4fc4620099c9e264ed4654e67b171bfc6263526d
| 2,250
|
py
|
Python
|
tests/test_transform.py
|
brandonschabell/datetransform
|
7331185d854f14734a33ee00a84367d33c2aada0
|
[
"MIT"
] | 1
|
2020-01-15T02:05:16.000Z
|
2020-01-15T02:05:16.000Z
|
tests/test_transform.py
|
brandonschabell/datetransform
|
7331185d854f14734a33ee00a84367d33c2aada0
|
[
"MIT"
] | 3
|
2020-04-22T20:48:22.000Z
|
2020-05-04T23:38:32.000Z
|
tests/test_transform.py
|
brandonschabell/datetransform
|
7331185d854f14734a33ee00a84367d33c2aada0
|
[
"MIT"
] | null | null | null |
import pandas as pd
from datetransform import transform
def test_add_date_features():
df = pd.DataFrame({'dateCol': ['2019-08-09 16:39:47']})
df = transform.add_date_features(df, 'dateCol')
assert df.columns.tolist() == ['dateCol',
'dateColYear',
'dateColMonth',
'dateColWeek',
'dateColDay',
'dateColDayOfWeek',
'dateColDayOfYear',
'dateColIsMonthEnd',
'dateColIsMonthStart',
'dateColIsQuarterEnd',
'dateColIsQuarterStart',
'dateColIsYearEnd',
'dateColIsYearStart',
'dateColHour',
'dateColMinute',
'dateColSecond']
def test_inplace_transform():
df = pd.DataFrame({'dateCol': ['2019-08-09 16:39:47']})
transform.add_date_features(df, 'dateCol', inplace=True)
assert df.columns.tolist() == ['dateCol',
'dateColYear',
'dateColMonth',
'dateColWeek',
'dateColDay',
'dateColDayOfWeek',
'dateColDayOfYear',
'dateColIsMonthEnd',
'dateColIsMonthStart',
'dateColIsQuarterEnd',
'dateColIsQuarterStart',
'dateColIsYearEnd',
'dateColIsYearStart',
'dateColHour',
'dateColMinute',
'dateColSecond']
def test_not_inplace_transform():
df = pd.DataFrame({'dateCol': ['2019-08-09 16:39:47']})
transform.add_date_features(df, 'dateCol')
assert df.columns.tolist() == ['dateCol']
| 44.117647
| 60
| 0.389778
| 118
| 2,250
| 7.313559
| 0.355932
| 0.032445
| 0.069525
| 0.078795
| 0.900348
| 0.900348
| 0.900348
| 0.900348
| 0.900348
| 0.900348
| 0
| 0.038961
| 0.520889
| 2,250
| 51
| 61
| 44.117647
| 0.761596
| 0
| 0
| 0.795455
| 0
| 0
| 0.251444
| 0.018658
| 0
| 0
| 0
| 0
| 0.068182
| 1
| 0.068182
| false
| 0
| 0.045455
| 0
| 0.113636
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
4ff561f47d5a1d599392b77e3ea5d6c9e806ff95
| 26,999
|
py
|
Python
|
examples/fitzhugh-nagumo/visualize.py
|
JamesBrofos/Thresholds-in-Hamiltonian-Monte-Carlo
|
7ee1b530db0eb536666dbc872fbf8200e53dd49b
|
[
"MIT"
] | 1
|
2021-11-23T15:40:07.000Z
|
2021-11-23T15:40:07.000Z
|
examples/fitzhugh-nagumo/visualize.py
|
JamesBrofos/Thresholds-in-Hamiltonian-Monte-Carlo
|
7ee1b530db0eb536666dbc872fbf8200e53dd49b
|
[
"MIT"
] | null | null | null |
examples/fitzhugh-nagumo/visualize.py
|
JamesBrofos/Thresholds-in-Hamiltonian-Monte-Carlo
|
7ee1b530db0eb536666dbc872fbf8200e53dd49b
|
[
"MIT"
] | null | null | null |
import glob
import os
import pickle
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as spst
from scipy.integrate import odeint
from hmc import summarize
from hmc.applications.fitzhugh_nagumo import fn_dynamics
from load_data import load_data
def euclidean_samples():
num_samples = [1000, 10000, 100000]
euclid = {}
for ns in num_samples:
d = {}
fns = sorted(glob.glob(os.path.join('samples', '*num-samples-{}-*euclidean*'.format(ns))))
for f in fns:
ss = f.split('-step-size-')[1].split('-')[0]
ss = float(ss)
with open(f, 'rb') as g:
d[ss] = pickle.load(g)
euclid[ns] = d
return euclid
def iid_samples():
iid = []
with open(os.path.join('data', 'samples.pkl'), 'rb') as f:
iid.append(pickle.load(f))
with open(os.path.join('data', 'samples-{}.pkl'.format(1)), 'rb') as f:
iid.append(pickle.load(f))
return iid
def riemannian_samples(newton_momentum=False, newton_position=False):
num_samples = [1000, 10000, 100000]
rmn = {}
for ns in num_samples:
d = {}
fns = sorted(glob.glob(os.path.join('samples', '*num-steps-6*-num-samples-{}-*riemannian*partial-momentum-0.0*-correct-True*newton-momentum-{}*newton-position-{}*'.format(ns, newton_momentum, newton_position))))
for f in fns:
t = f.split('-thresh-')[1].split('-m')[0]
t = float(t)
with open(f, 'rb') as g:
d[t] = pickle.load(g)
rmn[ns] = d
return rmn
def fitzhugh_nagumo():
euclid = euclidean_samples()[100000]
rmn = riemannian_samples()[100000]
y, time, sigma, state = load_data()
rkeys = sorted(rmn.keys(), reverse=False)
ekeys = sorted(euclid.keys(), reverse=False)
m = len(rkeys) + len(ekeys)
fig = plt.figure(figsize=(30, 5))
for i, t in enumerate(ekeys):
s = euclid[t]['samples']
yh = []
for j in range(0, len(s), 100):
params = tuple(s[j])
yh.append(odeint(fn_dynamics, state, time, params))
yh = np.array(yh)
ax = fig.add_subplot(1, m, i+1)
ax.plot(time, yh[..., 0].T, '-', color='tab:blue', alpha=0.1)
ax.plot(time, yh[..., 1].T, '-', color='tab:orange', alpha=0.1)
ax.plot(time, y[..., 0], '.', color='tab:blue', markersize=2)
ax.plot(time, y[..., 1], '.', color='tab:orange', markersize=2)
ax.set_ylim((-3, 3))
ax.set_title('Euclid. {:.0e}'.format(t), fontsize=35)
ax.axes.get_xaxis().set_visible(False)
ax.axes.get_yaxis().set_visible(False)
for i, t in enumerate(rkeys):
s = rmn[t]['samples']
yh = []
for j in range(0, len(s), 100):
params = tuple(s[j])
yh.append(odeint(fn_dynamics, state, time, params))
yh = np.array(yh)
ax = fig.add_subplot(1, m, i+len(ekeys)+1)
ax.plot(time, yh[..., 0].T, '-', color='tab:blue', alpha=0.1)
ax.plot(time, yh[..., 1].T, '-', color='tab:orange', alpha=0.1)
ax.plot(time, y[..., 0], '.', color='tab:blue', markersize=2)
ax.plot(time, y[..., 1], '.', color='tab:orange', markersize=2)
ax.set_ylim((-3, 3))
ax.set_title('Thresh. {:.0e}'.format(t), fontsize=35)
ax.axes.get_xaxis().set_visible(False)
ax.axes.get_yaxis().set_visible(False)
fig.tight_layout()
fig.savefig(os.path.join('images', 'fitzhugh-nagumo.png'))
def effective_sample_size():
euclid = euclidean_samples()[100000]
rmn = riemannian_samples()[100000]
ekeys = sorted(euclid.keys(), reverse=False)
rkeys = sorted(rmn.keys(), reverse=False)
labels = ['Euclidean {}'.format(t) for t in ekeys] + ['Threshold {:.0e}'.format(t) for t in rkeys]
fig = plt.figure(figsize=(10, 4))
ax = fig.add_subplot(111)
num_breaks = 20
ess = {}
for t in ekeys:
breaks = np.split(euclid[t]['samples'], num_breaks, axis=0)
k = 'euclid-{}'.format(t)
ess[k] = []
for i, b in enumerate(breaks):
metrics = summarize(b)
m = metrics['ess'].min()
ess[k].append(m)
ax.violinplot([ess[k] for k in ess.keys()], showmeans=True, showmedians=True, showextrema=False)
ess = {}
for t in rkeys:
breaks = np.split(rmn[t]['samples'], num_breaks, axis=0)
k = 'rmn-{}'.format(t)
ess[k] = []
for i, b in enumerate(breaks):
metrics = summarize(b)
m = metrics['ess'].min()
ess[k].append(m)
vpb = ax.violinplot([ess[k] for k in ess.keys()], positions=np.arange(len(rkeys)) + 2, showmeans=True, showmedians=True, showextrema=False)
ax.set_xticks(np.arange(1, len(labels) + 1))
ax.set_xticklabels(['' for l in labels])
ax.set_xticklabels(labels)
ax.set_xlim(0.25, len(labels) + 0.75)
for tick in ax.get_xticklabels():
tick.set_rotation(90)
ax.axvline(len(ekeys) + 0.5, color='black', linestyle='--')
ax.set_xlabel('')
ax.set_ylabel('Min. ESS', fontsize=16)
ax.tick_params(axis='y', labelsize=16)
ax.tick_params(axis='x', labelsize=16)
ax.grid(linestyle=':')
fig.tight_layout()
fig.savefig(os.path.join('images', 'minimum-ess.pdf'))
def effective_sample_size_per_second():
euclid = euclidean_samples()[100000]
rmn = riemannian_samples()[100000]
nm_rmn = riemannian_samples(True)[100000]
nb_rmn = riemannian_samples(True, True)[100000]
ekeys = sorted(euclid.keys(), reverse=False)
rkeys = sorted(rmn.keys(), reverse=False)
labels = ['Euclid. {}'.format(t) for t in ekeys] + ['Thresh. {:.0e}'.format(t) for t in rkeys]
for vidx in range(1, 4):
labels = ['Euclid. {}'.format(t) for t in ekeys] + ['Thresh. {:.0e}'.format(t) for t in rkeys]
fig = plt.figure()
ax = fig.add_subplot(111)
num_breaks = 20
ess = {}
for t in ekeys:
breaks = np.split(euclid[t]['samples'][:, [-vidx]], num_breaks, axis=0)
k = 'euclid-{}'.format(t)
ess[k] = []
for i, b in enumerate(breaks):
metrics = summarize(b)
m = metrics['ess'].min() / (euclid[t]['time'] / num_breaks)
ess[k].append(m)
ax.violinplot([ess[k] for k in ess.keys()], showmeans=True, showmedians=True, showextrema=False)
ess = {}
for t in rkeys:
breaks = np.split(rmn[t]['samples'][:, [-vidx]], num_breaks, axis=0)
k = 'rmn-{}'.format(t)
ess[k] = []
for i, b in enumerate(breaks):
metrics = summarize(b)
m = metrics['ess'].min() / (rmn[t]['time'] / num_breaks)
ess[k].append(m)
vpb = ax.violinplot([ess[k] for k in ess.keys()], positions=np.arange(len(rkeys)) + 2, showmeans=True, showmedians=True, showextrema=False)
ax.set_xticks(np.arange(1, len(labels) + 1))
ax.set_xticklabels(['' for l in labels])
ax.set_xticklabels(labels)
ax.set_xlim(0.25, len(labels) + 0.75)
for tick in ax.get_xticklabels():
tick.set_rotation(90)
ax.axvline(len(ekeys) + 0.5, color='black', linestyle='--')
ax.set_xlabel('')
ax.set_ylabel('ESS / Sec.', fontsize=20)
ax.tick_params(axis='x', labelsize=20)
ax.tick_params(axis='y', labelsize=20)
ax.grid(linestyle=':')
fig.tight_layout()
fig.savefig(os.path.join('images', 'minimum-ess-per-second-{}.pdf'.format(vidx)))
labels = ['Thresh. {:.0e}'.format(t) for t in rkeys]
fig = plt.figure()
ax = fig.add_subplot(111)
num_breaks = 20
ess = {}
for t in rkeys:
breaks = np.split(rmn[t]['samples'][:, [-vidx]], num_breaks, axis=0)
k = 'rmn-{}'.format(t)
ess[k] = []
for i, b in enumerate(breaks):
metrics = summarize(b)
m = metrics['ess'].min() / (rmn[t]['time'] / num_breaks)
ess[k].append(m)
vpb = ax.violinplot([ess[k] for k in ess.keys()], positions=np.arange(len(rkeys)) + 1, showmeans=True, showmedians=True, showextrema=False)
ess = {}
for t in rkeys:
breaks = np.split(nm_rmn[t]['samples'][:, [-vidx]], num_breaks, axis=0)
k = 'rmn-{}'.format(t)
ess[k] = []
for i, b in enumerate(breaks):
metrics = summarize(b)
m = metrics['ess'].min() / (nm_rmn[t]['time'] / num_breaks)
ess[k].append(m)
vpc = ax.violinplot([ess[k] for k in ess.keys()], positions=np.arange(len(rkeys)) + 1, showmeans=True, showmedians=True, showextrema=False)
ess = {}
for t in rkeys:
breaks = np.split(nb_rmn[t]['samples'][:, [-vidx]], num_breaks, axis=0)
k = 'rmn-{}'.format(t)
ess[k] = []
for i, b in enumerate(breaks):
metrics = summarize(b)
m = metrics['ess'].min() / (nb_rmn[t]['time'] / num_breaks)
ess[k].append(m)
vpd = ax.violinplot([ess[k] for k in ess.keys()], positions=np.arange(len(rkeys)) + 1, showmeans=True, showmedians=True, showextrema=False)
ax.set_xticks(np.arange(1, len(labels) + 1))
ax.set_xticklabels(['' for l in labels])
ax.set_xticklabels(labels)
ax.set_xlim(0.25, len(labels) + 0.75)
for tick in ax.get_xticklabels():
tick.set_rotation(90)
ax.set_xlabel('')
ax.set_ylabel('ESS / Sec.', fontsize=20)
ax.tick_params(axis='x', labelsize=20)
ax.tick_params(axis='y', labelsize=20)
ax.grid(linestyle=':')
if vidx == 1:
ax.legend([vpb["bodies"][0], vpc["bodies"][0], vpd["bodies"][0]], [r'Fixed Point', r'Newton (Mom.)', r'Newton (Mom. and Pos.)'], fontsize=16, loc='upper left')
fig.tight_layout()
fig.savefig(os.path.join('images', 'minimum-ess-per-second-vs-newton-{}.pdf'.format(vidx)))
def kolmogorov_smirnov():
euclid = euclidean_samples()[100000]
rmn = riemannian_samples()[100000]
nm_rmn = riemannian_samples(True)[100000]
nb_rmn = riemannian_samples(True, True)[100000]
iid = iid_samples()
num_iid_ks = 100
iid_ks = np.zeros(num_iid_ks)
x, y = iid[0], iid[1]
for i in range(num_iid_ks):
u = np.random.normal(size=x.shape[-1])
u = u / np.linalg.norm(u)
iid_ks[i] = spst.ks_2samp(x@u, y@u).statistic
print(iid_ks)
summarize(x)
summarize(y)
summarize(rmn[1e-8]['samples'])
print(list(rmn.keys()))
ekeys = sorted(euclid.keys(), reverse=False)
rkeys = sorted(rmn.keys(), reverse=False)
labels = ['I.I.D.'] + ['Euclid. {}'.format(t) for t in ekeys] + ['Thresh. {:.0e}'.format(t) for t in rkeys]
fig = plt.figure(figsize=(10, 4))
ax = fig.add_subplot(111)
ax.violinplot([np.log10(iid_ks)], showmeans=True, showmedians=True, showextrema=False)
ess = {}
for t in ekeys:
k = 'euclid-{}'.format(t)
ess[k] = np.log10(euclid[t]['ks'])
vpa = ax.violinplot([ess[k] for k in ess.keys()], positions=np.array([2.0]), showmeans=True, showmedians=True, showextrema=False)
ess = {}
for t in rkeys:
k = 'rmn-{}'.format(t)
ess[k] = np.log10(rmn[t]['ks'])
vpb = ax.violinplot([ess[k] for k in ess.keys()], positions=np.arange(len(rkeys)) + 3, showmeans=True, showmedians=True, showextrema=False)
ax.set_xticks(np.arange(1, len(labels) + 1))
ax.set_xticklabels(['' for l in labels])
ax.set_xticklabels(labels, rotation=90, ha='right', fontsize=16)
ax.set_xlim(0.25, len(labels) + 0.75)
ax.axvline(len(ekeys) + 1.5, color='black', linestyle='--')
ax.set_xlabel('')
ax.set_ylabel('KS Statistic', fontsize=16)
ax.tick_params(axis='y', labelsize=16)
ax.grid(linestyle=':')
fig.tight_layout()
fig.savefig(os.path.join('images', 'kolmogorov-smirnov.pdf'))
labels = ['Thresh. {:.0e}'.format(t) for t in rkeys]
fig = plt.figure()
ax = fig.add_subplot(111)
ess = {}
for t in rkeys:
k = 'rmn-{}'.format(t)
ess[k] = np.log10(rmn[t]['ks'])
vpb = ax.violinplot([ess[k] for k in ess.keys()], positions=np.arange(len(rkeys)) + 1, showmeans=True, showmedians=True, showextrema=False)
ess = {}
for t in rkeys:
k = 'rmn-{}'.format(t)
ess[k] = np.log10(nm_rmn[t]['ks'])
vpc = ax.violinplot([ess[k] for k in ess.keys()], positions=np.arange(len(rkeys)) + 1, showmeans=True, showmedians=True, showextrema=False)
ess = {}
for t in rkeys:
k = 'rmn-{}'.format(t)
ess[k] = np.log10(nb_rmn[t]['ks'])
vpd = ax.violinplot([ess[k] for k in ess.keys()], positions=np.arange(len(rkeys)) + 1, showmeans=True, showmedians=True, showextrema=False)
ax.set_xticks(np.arange(1, len(labels) + 1))
ax.set_xticklabels(['' for l in labels])
ax.set_xticklabels(labels, rotation=90, ha='right', fontsize=24)
ax.set_xlim(0.25, len(labels) + 0.75)
ax.set_xlabel('')
ax.set_ylabel('KS Statistic', fontsize=30)
ax.tick_params(axis='y', labelsize=24)
ax.grid(linestyle=':')
fig.tight_layout()
fig.savefig(os.path.join('images', 'kolmogorov-smirnov-vs-newton.pdf'))
def mmd():
euclid = euclidean_samples()[100000]
rmn = riemannian_samples()[100000]
ekeys = sorted(euclid.keys(), reverse=False)
rkeys = sorted(rmn.keys(), reverse=False)
num_thresholds = len(rkeys)
thresholds = np.array(rkeys)
emmd = np.log10(np.abs(np.array([euclid[k]['mmd'] for k in ekeys])))
rmmd = np.log10(np.abs(np.array([rmn[k]['mmd'] for k in rkeys])))
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(rmmd, '.-')
for v in emmd:
ax.axhline(v, color='k')
ax.xaxis.set_tick_params(direction='out')
ax.xaxis.set_ticks_position('bottom')
ax.set_xticks(np.arange(0, num_thresholds))
ax.set_xticklabels(['{:.0f}'.format(np.log10(t)) for t in thresholds], fontsize=24)
ax.tick_params(axis='y', labelsize=24)
ax.grid(linestyle=':')
ax.set_xlabel(r'$\log_{10}$ Threshold', fontsize=30)
ax.set_ylabel(r'$\log_{10}|\mathrm{MMD}^2|$ Estimate', fontsize=30)
fig.tight_layout()
fig.savefig(os.path.join('images', 'mmd.pdf'))
def wasserstein_sliced():
euclid = euclidean_samples()[100000]
rmn = riemannian_samples()[100000]
ekeys = sorted(euclid.keys(), reverse=False)
rkeys = sorted(rmn.keys(), reverse=False)
num_thresholds = len(rkeys)
thresholds = np.array(rkeys)
esw = np.log10(np.abs(np.array([euclid[k]['sw'] for k in ekeys])))
rsw = np.log10(np.abs(np.array([rmn[k]['sw'] for k in rkeys])))
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(rsw, '.-')
for v in esw:
ax.axhline(v, color='k')
ax.xaxis.set_tick_params(direction='out')
ax.xaxis.set_ticks_position('bottom')
ax.set_xticks(np.arange(0, num_thresholds))
ax.set_xticklabels(['{:.0f}'.format(np.log10(t)) for t in thresholds], fontsize=24)
ax.tick_params(axis='y', labelsize=24)
ax.grid(linestyle=':')
ax.set_xlabel(r'$\log_{10}$ Threshold', fontsize=30)
ax.set_ylabel(r'$\log_{10}$ Sliced Wasserstein', fontsize=30)
fig.tight_layout()
fig.savefig(os.path.join('images', 'sw.pdf'))
def volume_preservation():
euclid = euclidean_samples()
rmn = riemannian_samples()
num_thresholds = 9
thresholds = np.logspace(-num_thresholds, -1, num_thresholds)
dat = [rmn[100000][t]['jacdet'][1e-5] for t in thresholds]
dat = [_[~np.isnan(_)] for _ in dat]
fig = plt.figure()
ax = fig.add_subplot(111)
ax.boxplot(dat, notch=True)
ax.grid(linestyle=':')
ax.xaxis.set_tick_params(direction='out')
ax.xaxis.set_ticks_position('bottom')
ax.set_xticks(np.arange(1, num_thresholds + 1))
ax.set_xticklabels(['{:.0f}'.format(np.log10(t)) for t in thresholds], fontsize=24)
ax.tick_params(axis='y', labelsize=24)
ax.set_xlim(0.25, len(thresholds) + 0.75)
ax.set_xlabel('$\log_{10}$ Threshold', fontsize=30)
ax.set_ylabel('$\log_{10}$ Vol. Pres. Err.', fontsize=30)
fig.tight_layout()
fig.savefig(os.path.join('images', 'jacobian-determinant.pdf'))
fig = plt.figure()
ax = fig.add_subplot(111)
bp = ax.boxplot(dat, notch=True, patch_artist=True)
for patch in bp['boxes']:
patch.set(facecolor='tab:blue')
nm_rmn = riemannian_samples(True)
dat = [nm_rmn[100000][t]['jacdet'][1e-5] for t in thresholds]
dat = [_[~np.isnan(_)] for _ in dat]
nm_bp = ax.boxplot(dat, notch=True, patch_artist=True)
for patch in nm_bp['boxes']:
patch.set(facecolor='tab:red')
nb_rmn = riemannian_samples(True, True)
dat = [nb_rmn[100000][t]['jacdet'][1e-5] for t in thresholds]
dat = [_[~np.isnan(_)] for _ in dat]
nb_bp = ax.boxplot(dat, notch=True, patch_artist=True)
for patch in nb_bp['boxes']:
patch.set(facecolor='tab:green')
ax.grid(linestyle=':')
ax.xaxis.set_tick_params(direction='out')
ax.xaxis.set_ticks_position('bottom')
ax.set_xticks(np.arange(1, num_thresholds + 1))
ax.set_xticklabels(['{:.0f}'.format(np.log10(t)) for t in thresholds], fontsize=24)
ax.tick_params(axis='y', labelsize=24)
ax.set_xlim(0.25, len(thresholds) + 0.75)
ax.set_xlabel('$\log_{10}$ Threshold', fontsize=30)
ax.set_ylabel('$\log_{10}$ Vol. Pres. Err.', fontsize=30)
fig.tight_layout()
fig.savefig(os.path.join('images', 'jacobian-determinant-vs-newton.pdf'))
perturb = sorted(rmn[100000][1e-9]['jacdet'].keys())
num_perturb = len(perturb)
dat = [rmn[100000][1e-9]['jacdet'][p] for p in perturb]
dat = [_[~np.isnan(_)] for _ in dat]
fig = plt.figure()
ax = fig.add_subplot(111)
ax.boxplot(dat, notch=True)
ax.grid(linestyle=':')
ax.xaxis.set_tick_params(direction='out')
ax.xaxis.set_ticks_position('bottom')
ax.set_xticks(np.arange(1, num_perturb + 1))
ax.set_xticklabels(['{:.0f}'.format(np.log10(t)) for t in perturb], fontsize=24)
ax.tick_params(axis='y', labelsize=24)
ax.set_xlim(0.25, num_perturb + 0.75)
ax.set_xlabel('$\log_{10}$ Perturbation', fontsize=30)
ax.set_ylabel('$\log_{10}$ Volume Preservation Error', fontsize=20)
fig.tight_layout()
fig.savefig(os.path.join('images', 'perturbation.pdf'))
def reversibility():
euclid = euclidean_samples()
rmn = riemannian_samples()
num_thresholds = 9
thresholds = np.logspace(-num_thresholds, -1, num_thresholds)
dat = [rmn[100000][t]['absrev'] for t in thresholds]
dat = [_[~np.isnan(_)] for _ in dat]
fig = plt.figure()
ax = fig.add_subplot(111)
ax.boxplot(dat, notch=True)
ax.grid(linestyle=':')
ax.xaxis.set_tick_params(direction='out')
ax.xaxis.set_ticks_position('bottom')
ax.set_xticks(np.arange(1, num_thresholds + 1))
ax.set_xticklabels(['{:.0f}'.format(np.log10(t)) for t in thresholds], fontsize=24)
ax.tick_params(axis='y', labelsize=24)
ax.set_xlim(0.25, len(thresholds) + 0.75)
ax.set_xlabel('$\log_{10}$ Threshold', fontsize=30)
ax.set_ylabel('$\log_{10}$ Abs. Rev. Error', fontsize=30)
fig.tight_layout()
fig.savefig(os.path.join('images', 'absolute-reversibility.pdf'))
fig = plt.figure()
ax = fig.add_subplot(111)
bp = ax.boxplot(dat, notch=True, patch_artist=True)
for patch in bp['boxes']:
patch.set(facecolor='tab:blue')
nm_rmn = riemannian_samples(True)
dat = [nm_rmn[100000][t]['absrev'] for t in thresholds]
dat = [_[~np.isnan(_)] for _ in dat]
nm_bp = ax.boxplot(dat, notch=True, patch_artist=True)
for patch in nm_bp['boxes']:
patch.set(facecolor='tab:red')
nb_rmn = riemannian_samples(True, True)
dat = [nb_rmn[100000][t]['absrev'] for t in thresholds]
dat = [_[~np.isnan(_)] for _ in dat]
nb_bp = ax.boxplot(dat, notch=True, patch_artist=True)
for patch in nb_bp['boxes']:
patch.set(facecolor='tab:green')
ax.grid(linestyle=':')
ax.xaxis.set_tick_params(direction='out')
ax.xaxis.set_ticks_position('bottom')
ax.set_xticks(np.arange(1, num_thresholds + 1))
ax.set_xticklabels(['{:.0f}'.format(np.log10(t)) for t in thresholds], fontsize=24)
ax.tick_params(axis='y', labelsize=24)
ax.set_xlim(0.25, len(thresholds) + 0.75)
ax.set_xlabel('$\log_{10}$ Threshold', fontsize=30)
ax.set_ylabel('$\log_{10}$ Abs. Rev. Err.', fontsize=30)
fig.tight_layout()
fig.savefig(os.path.join('images', 'absolute-reversibility-vs-newton.pdf'))
dat = [rmn[100000][t]['relrev'] for t in thresholds]
dat = [_[~np.isnan(_)] for _ in dat]
fig = plt.figure()
ax = fig.add_subplot(111)
ax.boxplot(dat, notch=True)
ax.grid(linestyle=':')
ax.xaxis.set_tick_params(direction='out')
ax.xaxis.set_ticks_position('bottom')
ax.set_xticks(np.arange(1, num_thresholds + 1))
ax.set_xticklabels(['{:.0f}'.format(np.log10(t)) for t in thresholds], fontsize=24)
ax.tick_params(axis='y', labelsize=24)
ax.set_xlim(0.25, len(thresholds) + 0.75)
ax.set_xlabel('$\log_{10}$ Threshold', fontsize=30)
ax.set_ylabel('$\log_{10}$ Rel. Rev. Error', fontsize=30)
fig.tight_layout()
fig.savefig(os.path.join('images', 'relative-reversibility.pdf'))
def momentum_fixed_point():
euclid = euclidean_samples()
rmn = riemannian_samples()
num_thresholds = 9
thresholds = np.logspace(-num_thresholds, -1, num_thresholds)
dat = [np.log10(rmn[100000][t]['nfp_mom']) for t in thresholds]
dat = [_[~np.isnan(_)] for _ in dat]
dat = [_[np.random.permutation(len(_))[:10000]] for _ in dat]
fig = plt.figure()
ax = fig.add_subplot(111)
ax.boxplot(dat, notch=True)
ax.grid(linestyle=':')
ax.xaxis.set_tick_params(direction='out')
ax.xaxis.set_ticks_position('bottom')
ax.set_xticks(np.arange(1, num_thresholds + 1))
ax.set_xticklabels(['{:.0f}'.format(np.log10(t)) for t in thresholds], fontsize=24)
ax.tick_params(axis='y', labelsize=24)
ax.set_xlim(0.25, len(thresholds) + 0.75)
ax.set_xlabel('$\log_{10}$ Threshold', fontsize=30)
ax.set_ylabel('$\log_{10}$ Mom. Fixed Point', fontsize=30)
fig.tight_layout()
fig.savefig(os.path.join('images', 'num-fixed-point-momentum.pdf'))
nrmn = riemannian_samples(True)
dat = [np.log10(rmn[100000][t]['nfp_mom']) for t in thresholds]
dat = [_[~np.isnan(_)] for _ in dat]
mean = np.array([np.mean(_) for _ in dat])
std = np.array([np.std(_) for _ in dat])
ndat = [np.log10(nrmn[100000][t]['nfp_mom']) for t in thresholds]
ndat = [_[~np.isnan(_)] for _ in ndat]
nmean = np.array([np.mean(_) for _ in ndat])
nstd = np.array([np.std(_) for _ in ndat])
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(np.arange(1, num_thresholds + 1), mean, color='tab:blue', label='Fixed Point')
ax.plot(np.arange(1, num_thresholds + 1), mean + std, '--', color='tab:blue')
ax.plot(np.arange(1, num_thresholds + 1), mean - std, '--', color='tab:blue')
ax.plot(np.arange(1, num_thresholds + 1), nmean, color='tab:orange', label='Newton')
ax.plot(np.arange(1, num_thresholds + 1), nmean + nstd, '--', color='tab:orange')
ax.plot(np.arange(1, num_thresholds + 1), nmean - nstd, '--', color='tab:orange')
ax.grid(linestyle=':')
ax.xaxis.set_tick_params(direction='out')
ax.xaxis.set_ticks_position('bottom')
ax.set_xticks(np.arange(1, num_thresholds + 1))
ax.set_xticklabels(['{:.0f}'.format(np.log10(t)) for t in thresholds], fontsize=24)
ax.tick_params(axis='y', labelsize=24)
ax.set_xlim(0.25, len(thresholds) + 0.75)
ax.set_xlabel('$\log_{10}$ Threshold', fontsize=30)
ax.set_ylabel('$\log_{10}$ Momentum Fixed Point', fontsize=20)
ax.set_ylim((0.0, 1.1))
ax.legend(fontsize=30)
fig.tight_layout()
fig.savefig(os.path.join('images', 'num-fixed-point-momentum-vs-newton.pdf'))
def position_fixed_point():
euclid = euclidean_samples()
rmn = riemannian_samples()
nrmn = riemannian_samples(True, True)
num_thresholds = 9
thresholds = np.logspace(-num_thresholds, -1, num_thresholds)
dat = [np.log10(rmn[100000][t]['nfp_pos']) for t in thresholds]
dat = [_[~np.isnan(_)] for _ in dat]
dat = [_[np.random.permutation(len(_))[:10000]] for _ in dat]
fig = plt.figure()
ax = fig.add_subplot(111)
ax.boxplot(dat, notch=True)
ax.grid(linestyle=':')
ax.xaxis.set_tick_params(direction='out')
ax.xaxis.set_ticks_position('bottom')
ax.set_xticks(np.arange(1, num_thresholds + 1))
ax.set_xticklabels(['{:.0f}'.format(np.log10(t)) for t in thresholds], fontsize=24)
ax.tick_params(axis='y', labelsize=24)
ax.set_xlim(0.25, len(thresholds) + 0.75)
ax.set_xlabel('$\log_{10}$ Threshold', fontsize=30)
ax.set_ylabel('$\log_{10}$ Pos. Fixed Point', fontsize=30)
fig.tight_layout()
fig.savefig(os.path.join('images', 'num-fixed-point-position.pdf'))
dat = [np.log10(rmn[100000][t]['nfp_pos']) for t in thresholds]
dat = [_[~np.isnan(_)] for _ in dat]
mean = np.array([np.mean(_) for _ in dat])
std = np.array([np.std(_) for _ in dat])
ndat = [np.log10(nrmn[100000][t]['nfp_pos']) for t in thresholds]
ndat = [_[~np.isnan(_)] for _ in ndat]
nmean = np.array([np.mean(_) for _ in ndat])
nstd = np.array([np.std(_) for _ in ndat])
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(np.arange(1, num_thresholds + 1), mean, color='tab:blue', label='Fixed Point')
ax.plot(np.arange(1, num_thresholds + 1), mean + std, '--', color='tab:blue')
ax.plot(np.arange(1, num_thresholds + 1), mean - std, '--', color='tab:blue')
ax.plot(np.arange(1, num_thresholds + 1), nmean, color='tab:orange', label='Newton')
ax.plot(np.arange(1, num_thresholds + 1), nmean + nstd, '--', color='tab:orange')
ax.plot(np.arange(1, num_thresholds + 1), nmean - nstd, '--', color='tab:orange')
ax.grid(linestyle=':')
ax.xaxis.set_tick_params(direction='out')
ax.xaxis.set_ticks_position('bottom')
ax.set_xticks(np.arange(1, num_thresholds + 1))
ax.set_xticklabels(['{:.0f}'.format(np.log10(t)) for t in thresholds], fontsize=24)
ax.tick_params(axis='y', labelsize=24)
ax.set_xlim(0.25, len(thresholds) + 0.75)
ax.set_xlabel('$\log_{10}$ Threshold', fontsize=30)
ax.set_ylabel('$\log_{10}$ Position Fixed Point', fontsize=20)
ax.set_ylim((0.0, 1.1))
ax.legend(fontsize=30)
fig.tight_layout()
fig.savefig(os.path.join('images', 'num-fixed-point-position-vs-newton.pdf'))
def main():
kolmogorov_smirnov()
exit()
momentum_fixed_point()
position_fixed_point()
wasserstein_sliced()
mmd()
fitzhugh_nagumo()
effective_sample_size_per_second()
effective_sample_size()
volume_preservation()
reversibility()
if __name__ == '__main__':
main()
| 39.242733
| 219
| 0.610874
| 3,968
| 26,999
| 4.033014
| 0.068548
| 0.029369
| 0.017622
| 0.023996
| 0.87771
| 0.86515
| 0.857464
| 0.852653
| 0.830657
| 0.819034
| 0
| 0.038185
| 0.206563
| 26,999
| 687
| 220
| 39.299854
| 0.708851
| 0
| 0
| 0.732773
| 0
| 0.001681
| 0.095374
| 0.021038
| 0
| 0
| 0
| 0
| 0
| 1
| 0.023529
| false
| 0
| 0.016807
| 0
| 0.045378
| 0.003361
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8c90f117276cb3890e33b89c6c379cacf637802b
| 1,196
|
py
|
Python
|
mat_db/main/migrations/0006_hosedynamic_nu_min40_hosedynamic_nu_plus100_and_more.py
|
tkminek/material_database
|
8661617077192d20e8d9445cd6560bf1266f0582
|
[
"MIT"
] | null | null | null |
mat_db/main/migrations/0006_hosedynamic_nu_min40_hosedynamic_nu_plus100_and_more.py
|
tkminek/material_database
|
8661617077192d20e8d9445cd6560bf1266f0582
|
[
"MIT"
] | null | null | null |
mat_db/main/migrations/0006_hosedynamic_nu_min40_hosedynamic_nu_plus100_and_more.py
|
tkminek/material_database
|
8661617077192d20e8d9445cd6560bf1266f0582
|
[
"MIT"
] | null | null | null |
# Generated by Django 4.0.2 on 2022-02-23 10:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0005_hose_hosestatic_hosedynamic'),
]
operations = [
migrations.AddField(
model_name='hosedynamic',
name='nu_min40',
field=models.FloatField(default=0.495),
),
migrations.AddField(
model_name='hosedynamic',
name='nu_plus100',
field=models.FloatField(default=0.495),
),
migrations.AddField(
model_name='hosedynamic',
name='nu_plus23',
field=models.FloatField(default=0.495),
),
migrations.AddField(
model_name='hosestatic',
name='nu_min40',
field=models.FloatField(default=0.495),
),
migrations.AddField(
model_name='hosestatic',
name='nu_plus100',
field=models.FloatField(default=0.495),
),
migrations.AddField(
model_name='hosestatic',
name='nu_plus23',
field=models.FloatField(default=0.495),
),
]
| 27.181818
| 53
| 0.551839
| 113
| 1,196
| 5.707965
| 0.327434
| 0.167442
| 0.213953
| 0.251163
| 0.75814
| 0.75814
| 0.75814
| 0.699225
| 0.699225
| 0.631008
| 0
| 0.07125
| 0.331104
| 1,196
| 43
| 54
| 27.813953
| 0.735
| 0.037625
| 0
| 0.810811
| 1
| 0
| 0.133159
| 0.02785
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.027027
| 0
| 0.108108
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.