hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
dfbb0256287dd2cbd4ce421bc0c2333540e9d21b
| 245
|
py
|
Python
|
elf/types/section/header/sh_name.py
|
Valmarelox/elftoolsng
|
99c3f4913a7e477007b1d81df83274d7657bf693
|
[
"MIT"
] | null | null | null |
elf/types/section/header/sh_name.py
|
Valmarelox/elftoolsng
|
99c3f4913a7e477007b1d81df83274d7657bf693
|
[
"MIT"
] | null | null | null |
elf/types/section/header/sh_name.py
|
Valmarelox/elftoolsng
|
99c3f4913a7e477007b1d81df83274d7657bf693
|
[
"MIT"
] | null | null | null |
from elf.types.base.elf_name_type import ElfNameType
from elf.types.base import ElfOffset
# TODO: Generify to ElfStringType
class SHName(ElfNameType):
@property
def strtab_accessor(self):
return int(self.elf.header.e_shstrndx)
| 24.5
| 52
| 0.767347
| 34
| 245
| 5.411765
| 0.735294
| 0.076087
| 0.130435
| 0.173913
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.155102
| 245
| 9
| 53
| 27.222222
| 0.888889
| 0.126531
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 0
| 1
| 0.166667
| false
| 0
| 0.333333
| 0.166667
| 0.833333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
dfef9bfa104382c3a5eda10715d69e669e03e902
| 218
|
py
|
Python
|
apps/life_sci/dgllife/utils/__init__.py
|
arangoml/dgl
|
d135058f9986fadcbdf6aa1011a00c3ad45a8ce3
|
[
"Apache-2.0"
] | 3
|
2020-02-28T07:28:52.000Z
|
2020-06-03T08:41:55.000Z
|
apps/life_sci/python/dgllife/utils/__init__.py
|
sherry-1001/dgl
|
60d2e7d3c928d43bbb18e7ab17c066451c49f649
|
[
"Apache-2.0"
] | null | null | null |
apps/life_sci/python/dgllife/utils/__init__.py
|
sherry-1001/dgl
|
60d2e7d3c928d43bbb18e7ab17c066451c49f649
|
[
"Apache-2.0"
] | 2
|
2020-12-07T09:34:01.000Z
|
2020-12-13T06:18:58.000Z
|
"""Utils for data processing."""
from .complex_to_graph import *
from .early_stop import *
from .eval import *
from .featurizers import *
from .mol_to_graph import *
from .rdkit_utils import *
from .splitters import *
| 24.222222
| 32
| 0.756881
| 31
| 218
| 5.129032
| 0.516129
| 0.377358
| 0.163522
| 0.213836
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.146789
| 218
| 8
| 33
| 27.25
| 0.854839
| 0.119266
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
5f006ecb902171c6b66ca04d50152453bcda65fe
| 44
|
py
|
Python
|
janusbackup/core/__init__.py
|
NikitosnikN/janus-backup
|
413d365663b532a0611575be16ea0a4f0c7ffd20
|
[
"MIT"
] | null | null | null |
janusbackup/core/__init__.py
|
NikitosnikN/janus-backup
|
413d365663b532a0611575be16ea0a4f0c7ffd20
|
[
"MIT"
] | null | null | null |
janusbackup/core/__init__.py
|
NikitosnikN/janus-backup
|
413d365663b532a0611575be16ea0a4f0c7ffd20
|
[
"MIT"
] | null | null | null |
from .backup_pipeline import BackupPipeline
| 22
| 43
| 0.886364
| 5
| 44
| 7.6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 44
| 1
| 44
| 44
| 0.95
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a049f24b1bb3840c29bf99a96cacf661e921860e
| 20,810
|
py
|
Python
|
code/python/FactSetFunds/v1/fds/sdk/FactSetFunds/model/costs_fees.py
|
factset/enterprise-sdk
|
3fd4d1360756c515c9737a0c9a992c7451d7de7e
|
[
"Apache-2.0"
] | 6
|
2022-02-07T16:34:18.000Z
|
2022-03-30T08:04:57.000Z
|
code/python/FactSetFunds/v1/fds/sdk/FactSetFunds/model/costs_fees.py
|
factset/enterprise-sdk
|
3fd4d1360756c515c9737a0c9a992c7451d7de7e
|
[
"Apache-2.0"
] | 2
|
2022-02-07T05:25:57.000Z
|
2022-03-07T14:18:04.000Z
|
code/python/FactSetFunds/v1/fds/sdk/FactSetFunds/model/costs_fees.py
|
factset/enterprise-sdk
|
3fd4d1360756c515c9737a0c9a992c7451d7de7e
|
[
"Apache-2.0"
] | null | null | null |
"""
FactSet Funds API
FactSet Mutual Funds data offers over 50 fund- and share class-specific data points for mutual funds listed in the United States. <p>FactSet Mutual Funds Reference provides fund-specific reference information as well as FactSet's proprietary classification system. It includes but is not limited to the following coverage * Fund descriptions * A seven-tier classification system * Leverage information * Fees and expenses * Portfolio managers FactSet Mutual Funds Time Series provides quantitative data items on a historical basis. It includes but is not limited to the following coverage * Net asset value * Fund flows * Assets under management * Total return # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: api@factset.com
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from fds.sdk.FactSetFunds.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from fds.sdk.FactSetFunds.exceptions import ApiAttributeError
class CostsFees(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'fsym_id': (str,), # noqa: E501
'management_expenses': (float,), # noqa: E501
'date': (str,), # noqa: E501
'currency': (str,), # noqa: E501
'entry_expense': (float,), # noqa: E501
'exit_expense': (float,), # noqa: E501
'front_expenses_max': (float,), # noqa: E501
'back_expenses_max': (float,), # noqa: E501
'expense_ratio': (float,), # noqa: E501
'expense_ratio_prospectus': (float,), # noqa: E501
'init_investment_min': (float,), # noqa: E501
'init_investment_ira': (float,), # noqa: E501
'swing_price': (float,), # noqa: E501
'swing_price_date': (str,), # noqa: E501
'sri_priips': (int,), # noqa: E501
'srri_ucits': (int,), # noqa: E501
'performance_fee': (float,), # noqa: E501
'trading_expense_ratio': (float,), # noqa: E501
'request_id': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'fsym_id': 'fsymId', # noqa: E501
'management_expenses': 'managementExpenses', # noqa: E501
'date': 'date', # noqa: E501
'currency': 'currency', # noqa: E501
'entry_expense': 'entryExpense', # noqa: E501
'exit_expense': 'exitExpense', # noqa: E501
'front_expenses_max': 'frontExpensesMax', # noqa: E501
'back_expenses_max': 'backExpensesMax', # noqa: E501
'expense_ratio': 'expenseRatio', # noqa: E501
'expense_ratio_prospectus': 'expenseRatioProspectus', # noqa: E501
'init_investment_min': 'initInvestmentMin', # noqa: E501
'init_investment_ira': 'initInvestmentIra', # noqa: E501
'swing_price': 'swingPrice', # noqa: E501
'swing_price_date': 'swingPriceDate', # noqa: E501
'sri_priips': 'sriPriips', # noqa: E501
'srri_ucits': 'srriUcits', # noqa: E501
'performance_fee': 'performanceFee', # noqa: E501
'trading_expense_ratio': 'tradingExpenseRatio', # noqa: E501
'request_id': 'requestId', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""CostsFees - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
fsym_id (str): FactSet Security Identifier. Six alpha-numeric characters, excluding vowels, with a -S suffix (XXXXXX-S), resolved from the requestId of the Fund requested.. [optional] # noqa: E501
management_expenses (float): The management fee, or maintenance fee, is charged by the fund manager. This cost is usually between 0.5% and 2% of assets on average and is a periodic fee.. [optional] # noqa: E501
date (str): The Expense Date expressed in YYYY-MM-DD.. [optional] # noqa: E501
currency (str): ISO3 Currency. [optional] # noqa: E501
entry_expense (float): The transaction entry fee or purchase fee collected from investors when they join or leave a scheme. The fee is paid to the fund. [optional] # noqa: E501
exit_expense (float): The transaction exit fee is charged to investors when they redeem shares from a fund.. [optional] # noqa: E501
front_expenses_max (float): The Maximum sales load or initial Sales Fee is a reduction made from each investment in the fund, the maximum paid is dependent on the size of the purchase, it decreases as the investment increases. Often associated with class 'A' shares of a mutual fund it is also known as Sales Charge, this is a fee paid when shares are purchased. Also known as a \"front-end load\", this fee typically goes to the brokers that sell the fund's shares. (Under the Investment Company Act of 1940 is 9%. The maximum sales load under NASD Rules is 81⁄2%).\" . [optional] # noqa: E501
back_expenses_max (float): The Back Expense Maximum. [optional] # noqa: E501
expense_ratio (float): The Expense Ratio. [optional] # noqa: E501
expense_ratio_prospectus (float): The Expense Ratio Prospectus. [optional] # noqa: E501
init_investment_min (float): The Initial Investment Minimum. [optional] # noqa: E501
init_investment_ira (float): The Initial Investment Individual Retirement Accounts. [optional] # noqa: E501
swing_price (float): Swing Price. Swing pricing occurs when a fund provider adjusts the net asset value (NAV) of a fund in order to pass on trading costs to purchasing or redeeming shareholders. This anti-dilution technique is used to protect long-term shareholder’s interests.. [optional] # noqa: E501
swing_price_date (str): Swing Price Date. Swing pricing occurs when a fund provider adjusts the net asset value (NAV) of a fund in order to pass on trading costs to purchasing or redeeming shareholders. This anti-dilution technique is used to protect long-term shareholder’s interests.. [optional] # noqa: E501
sri_priips (int): The SRI (Summary Risk Indicator) illustrates PRIIPs’ risk and reward profile by measuring the market and credit risk level. Returns 1 for low risk up to 7 for higher risk.. [optional] # noqa: E501
srri_ucits (int): Synthetic Risk and Reward Indicator illustrates a UCITS or NURS (Non-UCITS Retail Scheme) fund’s risk and reward profile by measuring the market risk level. Returns 1 for low risk up to 7 for high risk.. [optional] # noqa: E501
performance_fee (float): Represents fees made to an investment manager as a percentage of investment profits for generating positive returns.. [optional] # noqa: E501
trading_expense_ratio (float): Represents the amount of trading commissions incurred to manage the portfolio as a percentage of the total assets of the fund.. [optional] # noqa: E501
request_id (str): The requested Id sent as input.. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""CostsFees - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
fsym_id (str): FactSet Security Identifier. Six alpha-numeric characters, excluding vowels, with a -S suffix (XXXXXX-S), resolved from the requestId of the Fund requested.. [optional] # noqa: E501
management_expenses (float): The management fee, or maintenance fee, is charged by the fund manager. This cost is usually between 0.5% and 2% of assets on average and is a periodic fee.. [optional] # noqa: E501
date (str): The Expense Date expressed in YYYY-MM-DD.. [optional] # noqa: E501
currency (str): ISO3 Currency. [optional] # noqa: E501
entry_expense (float): The transaction entry fee or purchase fee collected from investors when they join or leave a scheme. The fee is paid to the fund. [optional] # noqa: E501
exit_expense (float): The transaction exit fee is charged to investors when they redeem shares from a fund.. [optional] # noqa: E501
front_expenses_max (float): The Maximum sales load or initial Sales Fee is a reduction made from each investment in the fund, the maximum paid is dependent on the size of the purchase, it decreases as the investment increases. Often associated with class 'A' shares of a mutual fund it is also known as Sales Charge, this is a fee paid when shares are purchased. Also known as a \"front-end load\", this fee typically goes to the brokers that sell the fund's shares. (Under the Investment Company Act of 1940 is 9%. The maximum sales load under NASD Rules is 81⁄2%).\" . [optional] # noqa: E501
back_expenses_max (float): The Back Expense Maximum. [optional] # noqa: E501
expense_ratio (float): The Expense Ratio. [optional] # noqa: E501
expense_ratio_prospectus (float): The Expense Ratio Prospectus. [optional] # noqa: E501
init_investment_min (float): The Initial Investment Minimum. [optional] # noqa: E501
init_investment_ira (float): The Initial Investment Individual Retirement Accounts. [optional] # noqa: E501
swing_price (float): Swing Price. Swing pricing occurs when a fund provider adjusts the net asset value (NAV) of a fund in order to pass on trading costs to purchasing or redeeming shareholders. This anti-dilution technique is used to protect long-term shareholder’s interests.. [optional] # noqa: E501
swing_price_date (str): Swing Price Date. Swing pricing occurs when a fund provider adjusts the net asset value (NAV) of a fund in order to pass on trading costs to purchasing or redeeming shareholders. This anti-dilution technique is used to protect long-term shareholder’s interests.. [optional] # noqa: E501
sri_priips (int): The SRI (Summary Risk Indicator) illustrates PRIIPs’ risk and reward profile by measuring the market and credit risk level. Returns 1 for low risk up to 7 for higher risk.. [optional] # noqa: E501
srri_ucits (int): Synthetic Risk and Reward Indicator illustrates a UCITS or NURS (Non-UCITS Retail Scheme) fund’s risk and reward profile by measuring the market risk level. Returns 1 for low risk up to 7 for high risk.. [optional] # noqa: E501
performance_fee (float): Represents fees made to an investment manager as a percentage of investment profits for generating positive returns.. [optional] # noqa: E501
trading_expense_ratio (float): Represents the amount of trading commissions incurred to manage the portfolio as a percentage of the total assets of the fund.. [optional] # noqa: E501
request_id (str): The requested Id sent as input.. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| 63.25228
| 709
| 0.628784
| 2,573
| 20,810
| 4.933929
| 0.168675
| 0.050414
| 0.047893
| 0.012603
| 0.800079
| 0.769279
| 0.750847
| 0.746436
| 0.746436
| 0.746436
| 0
| 0.019825
| 0.306776
| 20,810
| 328
| 710
| 63.445122
| 0.860044
| 0.658193
| 0
| 0.38255
| 0
| 0
| 0.219112
| 0.041404
| 0
| 0
| 0
| 0
| 0
| 1
| 0.033557
| false
| 0.013423
| 0.026846
| 0.006711
| 0.14094
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a06f446d3844839bd8a53b28fca1b6b8cee02277
| 8,105
|
py
|
Python
|
library/graph_processing.py
|
RomainClaret/CONVEX
|
b1477d3e332903846f21b5c1ac2d5cb8cfdac21e
|
[
"MIT"
] | 29
|
2019-10-08T13:21:18.000Z
|
2021-12-30T03:32:23.000Z
|
library/graph_processing.py
|
RomainClaret/CONVEX
|
b1477d3e332903846f21b5c1ac2d5cb8cfdac21e
|
[
"MIT"
] | 19
|
2019-10-31T13:43:33.000Z
|
2022-03-29T00:57:00.000Z
|
library/graph_processing.py
|
RomainClaret/CONVEX
|
b1477d3e332903846f21b5c1ac2d5cb8cfdac21e
|
[
"MIT"
] | 11
|
2019-10-31T12:38:43.000Z
|
2022-03-23T11:02:34.000Z
|
import networkx as nx
# used to distinguish between multiple predicate nodes with same label - next index for predicate
predicate_nodes = {}
qualifier_predicate_nodes = {}
#####################################################
### Graphs
#####################################################
# one element of the answer_statements is a dictionary with 'entity', 'object', 'predicate' and 'qualifiers' attributes
# see statement_structure.json for details
def expand_context_with_statements(context, statements, turn = 1, qa=False):
if not context:
context = nx.Graph()
# print statements
for statement in statements:
# add the entity and object node
if not statement['entity']['id'] in context:
context.add_node(statement['entity']['id'], type='entity', turn=turn, qa=qa)
if not statement['object']['id'] in context:
context.add_node(statement['object']['id'], type='entity', turn=turn, qa=qa)
# get current index of predicate used
if not predicate_nodes.get(statement['predicate']['id']):
# the predicate did not occur yet => index 0 and new entry
predicate_nodes[statement['predicate']['id']] = 1
predicate_index = 0
else:
# the predicate already occured => fetch the next index available and increase the saved one
predicate_index = predicate_nodes[statement['predicate']['id']]
predicate_nodes[statement['predicate']['id']] += 1
# add the predicate node
predicate_node_id = (statement['predicate']['id'] + "-" + str(predicate_index))
context.add_node(predicate_node_id, type='predicate', turn=turn)
# add the two edges (entity->predicate->object)
context.add_edge(statement['entity']['id'], predicate_node_id)
context.add_edge(predicate_node_id, statement['object']['id'])
# if there were qualifiers occuring in the statement
if statement.get('qualifiers'):
for qualifier_statement in statement['qualifiers']:
# add the qualifier_statment object
if not qualifier_statement['qualifier_object']['id'] in context:
context.add_node(qualifier_statement['qualifier_object']['id'], type='entity', turn=turn, qa=qa)
# get current index of qualifier_predicate used
if not qualifier_predicate_nodes.get(qualifier_statement['qualifier_predicate']['id']):
# the qualifier_predicate did not occur yet => index 0 and new entry
qualifier_predicate_nodes[qualifier_statement['qualifier_predicate']['id']] = 1
predicate_index = 0
else:
# the qualifier_predicate already occured => fetch the next index available and increase the saved one
predicate_index = qualifier_predicate_nodes[qualifier_statement['qualifier_predicate']['id']]
qualifier_predicate_nodes[qualifier_statement['qualifier_predicate']['id']] += 1
# add the qualifier_predicate
qualifier_predicate_node_id = qualifier_statement['qualifier_predicate']['id'] + "-" + str(predicate_index)
context.add_node(qualifier_predicate_node_id, type='qualifier_predicate', turn=turn)
# add the two edges (qualifier_entity->qualifier_predicate->qualifier_object)
context.add_edge(predicate_node_id, qualifier_predicate_node_id)
context.add_edge(qualifier_predicate_node_id, qualifier_statement['qualifier_object']['id'])
return context
# one element of the answer_statements is a dictionary with 'entity', 'object', 'predicate' and 'qualifiers' attributes
# see statement_structure.json for details
def expand_context_with_frontier(context, frontier, frontier_statement, turn = 1):
if not context:
context = nx.Graph()
# complete the statement with labels
# statement = complete_statement(frontier_statement, True)
statement = frontier_statement
# add the entity and object node
if not statement['entity']['id'] in context:
context.add_node(statement['entity']['id'], type='entity', turn=turn, qa=False)
if not statement['object']['id'] in context:
context.add_node(statement['object']['id'], type='entity', turn=turn, qa=False)
# get current index of predicate used
if not predicate_nodes.get(statement['predicate']['id']):
# the predicate did not occur yet => index 0 and new entry
predicate_nodes[statement['predicate']['id']] = 1
predicate_index = 0
else:
# the predicate already occured => fetch the next index available and increase the saved one
predicate_index = predicate_nodes[statement['predicate']['id']]
predicate_nodes[statement['predicate']['id']] += 1
# add the predicate node
predicate_node_id = (statement['predicate']['id'] + "-" + str(predicate_index))
context.add_node(predicate_node_id, type='predicate', turn=turn)
# if the frontier is the predicate node, set the label as frontier
if frontier == statement['predicate']['id']:
frontier = predicate_node_id
# add the two edges (entity->predicate->object)
context.add_edge(statement['entity']['id'], predicate_node_id)
context.add_edge(predicate_node_id, statement['object']['id'])
# if there were qualifiers occuring in the statement
if statement.get('qualifiers'):
for qualifier_statement in statement['qualifiers']:
# add the qualfier_statment object
if not qualifier_statement['qualifier_object']['id'] in context:
context.add_node(qualifier_statement['qualifier_object']['id'], type='entity', turn=turn, qa=False)
# get current index of qualifier_predicate used
if not qualifier_predicate_nodes.get(qualifier_statement['qualifier_predicate']['id']):
# the qualifier_predicate did not occur yet => index 0 and new entry
qualifier_predicate_nodes[qualifier_statement['qualifier_predicate']['id']] = 1
predicate_index = 0
else:
# the qualifier_predicate already occured => fetch the next index available and increase the saved one
predicate_index = qualifier_predicate_nodes[qualifier_statement['qualifier_predicate']['id']]
qualifier_predicate_nodes[qualifier_statement['qualifier_predicate']['id']] += 1
# add the qualifier_predicate
qualifier_predicate_node_id = qualifier_statement['qualifier_predicate']['id'] + "-" + str(predicate_index)
context.add_node(qualifier_predicate_node_id, type='qualifier_predicate', turn=turn)
# if the frontier is the predicate node, set the label as frontier
if frontier == qualifier_statement['qualifier_predicate']['id']:
frontier = qualifier_predicate_node_id
# add the two edges (qualifier_entity->qualifier_predicate->qualifier_object)
context.add_edge(predicate_node_id, qualifier_predicate_node_id)
context.add_edge(qualifier_predicate_node_id, qualifier_statement['qualifier_object']['id'])
return context, frontier
# expand the context by the top candidates
def expand_context_with_candidates(graph, candidates, turn=1):
statements = []
for candidate in candidates:
statements.append(candidate['statement'])
graph = expand_context_with_statements(graph, statements, turn)
return graph
# set all nodes as qa nodes in the given graph
def set_all_nodes_as_qa_nodes(graph):
for node in list(graph.nodes(data=True)):
node[1]['qa'] = True
# return a list of all entity nodes which where question words or answers of the graph
def get_all_qa_nodes(graph):
entity_nodes = [node for node in list(graph.nodes(data=True)) if node[1]['type'] == 'entity' and node[1]['qa']]
return entity_nodes
# return a list of all entity nodes which could be answers
def get_all_answer_candidates(graph):
entity_nodes = [node[0] for node in list(graph.nodes(data=True)) if node[1]['type'] == 'entity' and not node[1]['qa']]
if entity_nodes:
return entity_nodes
else:
return get_all_answer_candidates_with_qa(graph)
# return a list of all entity nodes which could be answers
def get_all_answer_candidates_with_qa(graph):
entity_nodes = [node[0] for node in list(graph.nodes(data=True)) if node[1]['type'] == 'entity']
return entity_nodes
def get_distance(graph, answer_candidate, entity_node):
return float(nx.shortest_path_length(graph, source=answer_candidate, target=entity_node) + 1.0)
# graph to file
def write_graph(graph, file_path):
nx.write_gpickle(graph, file_path)
# load graph from file
def load_graph(file_path):
return nx.read_gpickle(file_path)
| 44.532967
| 119
| 0.745219
| 1,102
| 8,105
| 5.278584
| 0.103448
| 0.12687
| 0.051573
| 0.068076
| 0.811587
| 0.805054
| 0.789238
| 0.776173
| 0.765343
| 0.765343
| 0
| 0.004119
| 0.1314
| 8,105
| 181
| 120
| 44.779006
| 0.822159
| 0.290315
| 0
| 0.58
| 0
| 0
| 0.131654
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.01
| 0.02
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a077406730a08b76ee288c4760edc7e8d5bfeb61
| 656
|
py
|
Python
|
src/pathway_forte/pipeline/__init__.py
|
pathwayforte/PathwayForte
|
07775f3e174bfc756f7cf8f03efe49ef95a1cfd9
|
[
"Apache-2.0"
] | 10
|
2019-03-31T14:53:05.000Z
|
2021-01-16T07:33:41.000Z
|
src/pathway_forte/pipeline/__init__.py
|
pathwayforte/PathwayForte
|
07775f3e174bfc756f7cf8f03efe49ef95a1cfd9
|
[
"Apache-2.0"
] | 16
|
2019-03-31T07:25:43.000Z
|
2019-08-21T09:47:26.000Z
|
src/pathway_forte/pipeline/__init__.py
|
pathwayforte/PathwayForte
|
07775f3e174bfc756f7cf8f03efe49ef95a1cfd9
|
[
"Apache-2.0"
] | 3
|
2020-04-23T13:55:29.000Z
|
2020-08-28T16:10:27.000Z
|
# -*- coding: utf-8 -*-
"""Pipelines from Pathway Forte."""
from pathway_forte.pipeline.binary import do_binary_prediction
from pathway_forte.pipeline.export import do_export
from pathway_forte.pipeline.geometric import do_hypergeometric
from pathway_forte.pipeline.gsea import do_gsea
from pathway_forte.pipeline.gsea_msig import do_gsea_msig
from pathway_forte.pipeline.import_gmt import gmt_parser
from pathway_forte.pipeline.ssgsea import do_ssgsea
from pathway_forte.pipeline.stability import do_stability_prediction
from pathway_forte.pipeline.subtype import do_subtype_prediction
from pathway_forte.pipeline.survival import do_survival_prediction
| 43.733333
| 68
| 0.867378
| 94
| 656
| 5.765957
| 0.244681
| 0.223247
| 0.324723
| 0.442804
| 0.291513
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001653
| 0.077744
| 656
| 14
| 69
| 46.857143
| 0.894215
| 0.079268
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a08049ff810ab9612712756497cbceb04ec51ad4
| 276
|
py
|
Python
|
Codewars/8kyu/grasshopper-combine-strings/Python/test.py
|
RevansChen/online-judge
|
ad1b07fee7bd3c49418becccda904e17505f3018
|
[
"MIT"
] | 7
|
2017-09-20T16:40:39.000Z
|
2021-08-31T18:15:08.000Z
|
Codewars/8kyu/grasshopper-combine-strings/Python/test.py
|
RevansChen/online-judge
|
ad1b07fee7bd3c49418becccda904e17505f3018
|
[
"MIT"
] | null | null | null |
Codewars/8kyu/grasshopper-combine-strings/Python/test.py
|
RevansChen/online-judge
|
ad1b07fee7bd3c49418becccda904e17505f3018
|
[
"MIT"
] | null | null | null |
# Python - 2.7.6
Test.describe('combine names')
Test.it('example tests')
Test.assert_equals(combine_names('James', 'Stevens'), 'James Stevens')
Test.assert_equals(combine_names('Davy', 'Back'), 'Davy Back')
Test.assert_equals(combine_names('Arthur', 'Dent'), 'Arthur Dent')
| 30.666667
| 70
| 0.728261
| 39
| 276
| 5
| 0.487179
| 0.246154
| 0.246154
| 0.353846
| 0.430769
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011811
| 0.07971
| 276
| 8
| 71
| 34.5
| 0.755906
| 0.050725
| 0
| 0
| 0
| 0
| 0.342308
| 0
| 0
| 0
| 0
| 0
| 0.6
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a0a32a6cfc506420c6aa632e0146a0b9266c6c28
| 80
|
py
|
Python
|
examples/testlib2/box/__init__.py
|
uibcdf/pyunitwizard
|
54cdce7369e1f2a3771a1f05a4a6ba1d7610a5e7
|
[
"MIT"
] | 2
|
2021-07-01T14:33:58.000Z
|
2022-03-19T19:19:09.000Z
|
examples/testlib2/box/__init__.py
|
uibcdf/pyunitwizard
|
54cdce7369e1f2a3771a1f05a4a6ba1d7610a5e7
|
[
"MIT"
] | 15
|
2021-02-11T18:54:16.000Z
|
2022-03-18T17:38:03.000Z
|
examples/testlib2/box/__init__.py
|
uibcdf/pyunitwizard
|
54cdce7369e1f2a3771a1f05a4a6ba1d7610a5e7
|
[
"MIT"
] | 2
|
2021-06-17T18:56:02.000Z
|
2022-03-08T05:02:17.000Z
|
from .methods_a import get_default_form
from .methods_b import set_default_form
| 26.666667
| 39
| 0.875
| 14
| 80
| 4.571429
| 0.642857
| 0.34375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 80
| 2
| 40
| 40
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
39ff9ac1ffd835711400efbeebe984e6ded6187f
| 3,888
|
py
|
Python
|
FEMpy/tests/integration/test_poisson_1d.py
|
floydie7/FEMpy
|
50e11b88dc249ff7c599472b455b07b04df1afd7
|
[
"MIT"
] | null | null | null |
FEMpy/tests/integration/test_poisson_1d.py
|
floydie7/FEMpy
|
50e11b88dc249ff7c599472b455b07b04df1afd7
|
[
"MIT"
] | null | null | null |
FEMpy/tests/integration/test_poisson_1d.py
|
floydie7/FEMpy
|
50e11b88dc249ff7c599472b455b07b04df1afd7
|
[
"MIT"
] | 1
|
2022-01-22T06:39:38.000Z
|
2022-01-22T06:39:38.000Z
|
import numpy as np
import FEMpy
def coefficient_function(x):
return np.exp(x)
def source_function(x):
return -np.exp(x) * (np.cos(x) - 2 * np.sin(x) - x * np.cos(x) - x * np.sin(x))
def dirichlet_function(x):
if x == 0:
return 0
def neumann_function(x):
if x == 1:
return np.cos(1) - np.sin(1)
def analytical_sol(x):
return x * np.cos(x)
def dx_analytical_sol(x):
return np.cos(x) - x * np.sin(x)
class TestLinearElements(object):
def setup(self):
self.mesh = FEMpy.Interval1D(0, 1, 1/4, 'linear')
self.basis = FEMpy.IntervalBasis1D('linear')
self.bcs = FEMpy.BoundaryConditions(self.mesh,
['dirichlet', 'neumann'],
dirichlet_fun=dirichlet_function,
neumann_fun=neumann_function,
coeff_fun=coefficient_function)
self.poisson_eq = FEMpy.Poisson1D(self.mesh, self.basis, self.basis, self.bcs)
def test_solution_vector(self):
nodal_solution_vector = self.poisson_eq.solve(coeff_fun=coefficient_function, source_fun=source_function)
assert np.allclose(nodal_solution_vector, np.array([2.9317e-16, 0.24174, 0.43690, 0.54469, 0.53351]))
def test_l_infinity_norm_error(self):
self.poisson_eq.solve(coeff_fun=coefficient_function, source_fun=source_function)
l_infinity_norm_error = self.poisson_eq.l_inf_error(analytical_sol)
assert np.abs(l_infinity_norm_error - 2.0464e-02) <= 1e-5
def test_l_2_norm_error(self):
self.poisson_eq.solve(coeff_fun=coefficient_function, source_fun=source_function)
l_2_norm_error = self.poisson_eq.l2_error(analytical_sol)
assert np.abs(l_2_norm_error - 1.1205e-02) <= 1e-5
def test_h_1_seminorm_error(self):
self.poisson_eq.solve(coeff_fun=coefficient_function, source_fun=source_function)
h_1_seminorm_error = self.poisson_eq.h1_seminorm_error(dx_analytical_sol)
assert np.abs(h_1_seminorm_error - 1.0542e-01) <= 1e-5
class TestQuadraticElements(object):
def setup(self):
self.mesh = FEMpy.Interval1D(0, 1, 1 / 4, 'quadratic')
self.basis = FEMpy.IntervalBasis1D('quadratic')
self.bcs = FEMpy.BoundaryConditions(self.mesh,
['dirichlet', 'neumann'],
dirichlet_fun=dirichlet_function,
neumann_fun=neumann_function,
coeff_fun=coefficient_function)
self.poisson_eq = FEMpy.Poisson1D(self.mesh, self.basis, self.basis, self.bcs)
def test_solution_vector(self):
nodal_solution_vector = self.poisson_eq.solve(coeff_fun=coefficient_function, source_fun=source_function)
assert np.allclose(nodal_solution_vector, np.array([-1.3260e-15, 0.12407, 0.24223, 0.34899, 0.43880, 0.50689, 0.54878, 0.56090, 0.54031]),
rtol=1e-4, atol=1e-7)
def test_l_infinity_norm_error(self):
self.poisson_eq.solve(coeff_fun=coefficient_function, source_fun=source_function)
l_infinity_norm_error = self.poisson_eq.l_inf_error(analytical_sol)
assert np.abs(l_infinity_norm_error - 3.3279e-04) <= 1e-5
def test_l_2_norm_error(self):
self.poisson_eq.solve(coeff_fun=coefficient_function, source_fun=source_function)
l_2_norm_error = self.poisson_eq.l2_error(analytical_sol)
assert np.abs(l_2_norm_error - 2.1050e-04) <= 1e-5
def test_h_1_seminorm_error(self):
self.poisson_eq.solve(coeff_fun=coefficient_function, source_fun=source_function)
h_1_seminorm_error = self.poisson_eq.h1_seminorm_error(dx_analytical_sol)
assert np.abs(h_1_seminorm_error - 5.4213e-03) <= 1e-5
| 41.806452
| 147
| 0.652006
| 538
| 3,888
| 4.420074
| 0.172862
| 0.074012
| 0.087468
| 0.113541
| 0.799832
| 0.796468
| 0.778806
| 0.767872
| 0.767872
| 0.767872
| 0
| 0.061287
| 0.244599
| 3,888
| 92
| 148
| 42.26087
| 0.748383
| 0
| 0
| 0.537313
| 0
| 0
| 0.015947
| 0
| 0
| 0
| 0
| 0
| 0.119403
| 1
| 0.238806
| false
| 0
| 0.029851
| 0.059701
| 0.38806
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
261753006962b6f5023a994d77457b91ece7c7c3
| 27
|
py
|
Python
|
__init__.py
|
rsgalloway/QRangeSlider
|
06cb61de8fa174e4eea556c1e6eda4b0d04e5b10
|
[
"BSD-3-Clause"
] | 24
|
2015-03-20T08:02:46.000Z
|
2021-02-28T06:25:19.000Z
|
__init__.py
|
rsgalloway/QRangeSlider
|
06cb61de8fa174e4eea556c1e6eda4b0d04e5b10
|
[
"BSD-3-Clause"
] | 3
|
2017-01-27T20:31:40.000Z
|
2021-01-19T20:05:19.000Z
|
__init__.py
|
rsgalloway/QRangeSlider
|
06cb61de8fa174e4eea556c1e6eda4b0d04e5b10
|
[
"BSD-3-Clause"
] | 9
|
2016-08-24T16:14:25.000Z
|
2020-08-18T07:06:02.000Z
|
from qrangeslider import *
| 13.5
| 26
| 0.814815
| 3
| 27
| 7.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.148148
| 27
| 1
| 27
| 27
| 0.956522
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
26495e8d5e05ec83207013e49457b028a8e4266c
| 21,682
|
py
|
Python
|
cubework/module/parallel_3d/module.py
|
kurisusnowdeng/Cubework
|
56c0d35f87765efc8f2b6d47a4ccea6f2ec626aa
|
[
"Apache-2.0"
] | null | null | null |
cubework/module/parallel_3d/module.py
|
kurisusnowdeng/Cubework
|
56c0d35f87765efc8f2b6d47a4ccea6f2ec626aa
|
[
"Apache-2.0"
] | null | null | null |
cubework/module/parallel_3d/module.py
|
kurisusnowdeng/Cubework
|
56c0d35f87765efc8f2b6d47a4ccea6f2ec626aa
|
[
"Apache-2.0"
] | null | null | null |
import math
from typing import Callable
import torch
import torch.nn as nn
import torch.nn.functional as F
from cubework.distributed import ParallelManager as pm
from cubework.distributed import all_reduce, broadcast
from cubework.global_vars import env
from cubework.utils import get_current_device, seed
from torch import Tensor
from torch.nn import Parameter
from .. import init
from ..utils import set_tensor_parallel_attribute_by_partition, split_tensor, to_2tuple
from ._operation import classifier_3d, layernorm_3d, linear_3d
from ._utils import (
all_gather_weight_3d,
broadcast_weight_3d_from_diagonal,
get_depth_from_env,
get_input_parallel_mode,
get_output_parallel_mode,
get_weight_parallel_mode,
get_input_x_weight_parallel_mode,
get_output_x_weight_parallel_mode,
reduce_scatter_tensor_3d,
split_batch_3d,
swap_in_out_group,
)
class LayerNorm3D(nn.Module):
def __init__(self, normalized_shape: int, eps: float = 1e-12, dtype=None):
super().__init__()
self.input_parallel_mode = get_input_parallel_mode()
self.weight_parallel_mode = get_weight_parallel_mode()
self.output_parallel_mode = get_output_parallel_mode()
self.input_x_weight_parallel_mode = get_input_x_weight_parallel_mode()
self.output_x_weight_parallel_mode = get_output_x_weight_parallel_mode()
self.depth = get_depth_from_env()
self.normalized_shape = normalized_shape
self.normalized_shape_per_partition = normalized_shape // self.depth
self.weight = Parameter(
torch.ones(self.normalized_shape_per_partition, device=get_current_device(), dtype=dtype)
)
self.bias = Parameter(
torch.zeros(self.normalized_shape_per_partition, device=get_current_device(), dtype=dtype)
)
self.variance_epsilon = eps
self._set_tensor_parallel_attributes()
def _set_tensor_parallel_attributes(self) -> None:
set_tensor_parallel_attribute_by_partition(self.weight, self.depth)
set_tensor_parallel_attribute_by_partition(self.bias, self.depth)
def reset_parameters(self) -> None:
init.zeros_()(self.bias)
init.ones_()(self.weight)
def forward(self, input_: Tensor) -> Tensor:
return layernorm_3d(
input_,
self.weight,
self.bias,
self.normalized_shape,
self.variance_epsilon,
self.input_parallel_mode,
self.weight_parallel_mode,
self.output_parallel_mode,
self.input_x_weight_parallel_mode,
)
class Linear3D(nn.Module):
def __init__(
self,
in_features: int,
out_features: int,
bias: bool = True,
dtype: torch.dtype = None,
weight_initializer: Callable = init.kaiming_uniform_(a=math.sqrt(5)),
bias_initializer: Callable = init.xavier_uniform_(a=1, scale=1),
):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.input_parallel_mode = get_input_parallel_mode()
self.weight_parallel_mode = get_weight_parallel_mode()
self.output_parallel_mode = get_output_parallel_mode()
self.input_x_weight_parallel_mode = get_input_x_weight_parallel_mode()
self.output_x_weight_parallel_mode = get_output_x_weight_parallel_mode()
self.depth = get_depth_from_env()
self.in_features_per_partition = in_features // self.depth
self.out_features_per_partition = out_features // self.depth**2
self.bias_features_per_partition = out_features // self.depth
self.weight = Parameter(
torch.empty(
self.in_features_per_partition,
self.out_features_per_partition,
device=get_current_device(),
dtype=dtype,
)
)
if bias:
self.bias = Parameter(
torch.zeros(self.bias_features_per_partition, device=get_current_device(), dtype=dtype)
)
else:
self.bias = None
self.reset_parameters(weight_initializer, bias_initializer)
self._set_tensor_parallel_attributes()
swap_in_out_group()
def _set_tensor_parallel_attributes(self) -> None:
set_tensor_parallel_attribute_by_partition(self.weight, self.depth**3)
if self.bias is not None:
set_tensor_parallel_attribute_by_partition(self.bias, self.depth)
def reset_parameters(self, weight_initializer, bias_initializer) -> None:
with seed(pm.TENSOR):
fan_in, fan_out = self.in_features, self.out_features
weight_initializer(self.weight, fan_in=fan_in, fan_out=fan_out)
if self.bias is not None:
bias_initializer(self.bias, fan_in=fan_in)
# weight_src_rank = self.weight_parallel_mode.rank_by_idx(0)
# output_src_rank = self.output_parallel_mode.rank_by_idx(0)
# broadcast(self.bias, weight_src_rank, self.weight_parallel_mode)
# broadcast(self.bias, output_src_rank, self.output_parallel_mode)
broadcast(
self.bias,
self.output_x_weight_parallel_mode.rank_by_idx(0),
self.output_x_weight_parallel_mode,
)
def forward(self, input_: Tensor) -> Tensor:
return linear_3d(
input_,
self.weight,
self.bias,
self.input_parallel_mode,
self.weight_parallel_mode,
self.output_parallel_mode,
)
class Classifier3D(nn.Module):
def __init__(
self,
in_features: int,
num_classes: int,
weight: Parameter = None,
bias: bool = True,
dtype: torch.dtype = None,
weight_initializer: Callable = init.kaiming_uniform_(a=math.sqrt(5)),
bias_initializer: Callable = init.xavier_uniform_(a=1, scale=1),
):
super().__init__()
self.in_features = in_features
self.num_classes = num_classes
self.input_parallel_mode = get_input_parallel_mode()
self.weight_parallel_mode = get_weight_parallel_mode()
self.output_parallel_mode = get_output_parallel_mode()
self.depth = get_depth_from_env()
self.in_features_per_partition = in_features // self.depth
if weight is not None:
self.weight = weight
self.has_weight = False
else:
self.weight = Parameter(
torch.empty(self.num_classes, self.in_features_per_partition, device=get_current_device(), dtype=dtype)
)
self.has_weight = True
if bias:
self.bias = Parameter(torch.zeros(self.num_classes, device=get_current_device(), dtype=dtype))
else:
self.bias = None
self.reset_parameters(weight_initializer, bias_initializer)
self._set_tensor_parallel_attributes()
def _set_tensor_parallel_attributes(self) -> None:
if self.has_weight:
set_tensor_parallel_attribute_by_partition(self.weight, self.depth)
def reset_parameters(self, weight_initializer, bias_initializer) -> None:
with seed(pm.TENSOR):
fan_in, fan_out = self.in_features, self.num_classes
weight_src_rank = self.weight_parallel_mode.rank_by_idx(0)
output_src_rank = self.output_parallel_mode.rank_by_idx(0)
input_src_rank = self.input_parallel_mode.rank_by_idx(0)
if self.has_weight:
weight_initializer(self.weight, fan_in=fan_in, fan_out=fan_out)
broadcast(self.weight, weight_src_rank, self.weight_parallel_mode)
if self.bias is not None:
bias_initializer(self.bias, fan_in=fan_in)
broadcast(self.bias, weight_src_rank, self.weight_parallel_mode)
broadcast(self.bias, output_src_rank, self.output_parallel_mode)
broadcast(self.bias, input_src_rank, self.input_parallel_mode)
def forward(self, input_: Tensor) -> Tensor:
return classifier_3d(
input_,
self.weight,
self.bias,
self.input_parallel_mode,
self.weight_parallel_mode,
self.output_parallel_mode,
)
class VocabParallelClassifier3D(nn.Module):
def __init__(
self,
in_features: int,
num_classes: int,
weight: Parameter = None,
bias: bool = True,
dtype: torch.dtype = None,
weight_initializer: Callable = init.kaiming_uniform_(a=math.sqrt(5)),
bias_initializer: Callable = init.xavier_uniform_(a=1, scale=1),
):
super().__init__()
self.in_features = in_features
self.num_classes = num_classes
self.input_parallel_mode = get_input_parallel_mode()
self.weight_parallel_mode = get_weight_parallel_mode()
self.output_parallel_mode = get_output_parallel_mode()
self.input_x_weight_parallel_mode = get_input_x_weight_parallel_mode()
self.output_x_weight_parallel_mode = get_output_x_weight_parallel_mode()
self.depth = get_depth_from_env()
self.in_features_per_partition = in_features // self.depth
self.out_features_per_partition = num_classes // self.depth**2
self.bias_features_per_partition = num_classes // self.depth
if weight is not None:
self.weight = weight
self.has_weight = False
else:
self.weight = Parameter(
torch.empty(
self.out_features_per_partition,
self.in_features_per_partition,
device=get_current_device(),
dtype=dtype,
)
)
self.has_weight = True
if bias:
self.bias = Parameter(
torch.zeros(self.bias_features_per_partition, device=get_current_device(), dtype=dtype)
)
else:
self.bias = None
self.reset_parameters(weight_initializer, bias_initializer)
self._set_tensor_parallel_attributes()
swap_in_out_group()
env.vocab_parallel = True
def _set_tensor_parallel_attributes(self) -> None:
if self.has_weight:
set_tensor_parallel_attribute_by_partition(self.weight, self.depth**3)
if self.bias is not None:
set_tensor_parallel_attribute_by_partition(self.bias, self.depth)
def reset_parameters(self, weight_initializer, bias_initializer) -> None:
with seed(pm.TENSOR):
fan_in, fan_out = self.in_features, self.num_classes
if self.has_weight:
weight_initializer(self.weight, fan_in=fan_in, fan_out=fan_out)
if self.bias is not None:
bias_initializer(self.bias, fan_in=fan_in)
# weight_src_rank = self.weight_parallel_mode.rank_by_idx(0)
# output_src_rank = self.output_parallel_mode.rank_by_idx(0)
# broadcast(self.bias, weight_src_rank, self.weight_parallel_mode)
# broadcast(self.bias, output_src_rank, self.output_parallel_mode)
broadcast(
self.bias,
self.output_x_weight_parallel_mode.rank_by_idx(0),
self.output_x_weight_parallel_mode,
)
def forward(self, input_: Tensor) -> Tensor:
return linear_3d(
input_,
self.weight.transpose(0, 1),
self.bias,
self.input_parallel_mode,
self.weight_parallel_mode,
self.output_parallel_mode,
)
class PatchEmbedding3D(nn.Module):
def __init__(
self,
img_size: int,
patch_size: int,
in_chans: int,
embed_size: int,
flatten: bool = True,
dtype: torch.dtype = None,
weight_initializer: Callable = init.kaiming_uniform_(a=math.sqrt(5)),
bias_initializer: Callable = init.xavier_uniform_(a=1, scale=1),
position_embed_initializer: Callable = init.zeros_(),
):
super().__init__()
self.depth = get_depth_from_env()
self.input_parallel_mode = get_input_parallel_mode()
self.weight_parallel_mode = get_weight_parallel_mode()
self.output_parallel_mode = get_output_parallel_mode()
self.patch_size = to_2tuple(patch_size)
grid_size = to_2tuple(img_size // patch_size)
num_patches = grid_size[0] * grid_size[1]
self.embed_size = embed_size
embed_size_per_partition = embed_size // self.depth
self.flatten = flatten
self.weight = nn.Parameter(
torch.empty(
(embed_size_per_partition, in_chans, *self.patch_size), device=get_current_device(), dtype=dtype
)
)
self.bias = nn.Parameter(torch.empty(embed_size_per_partition, device=get_current_device(), dtype=dtype))
self.cls_token = nn.Parameter(
torch.zeros((1, 1, embed_size_per_partition), device=get_current_device(), dtype=dtype)
)
self.pos_embed = nn.Parameter(
torch.zeros((1, num_patches + 1, embed_size_per_partition), device=get_current_device(), dtype=dtype)
)
self.reset_parameters(weight_initializer, bias_initializer, position_embed_initializer)
self._set_tensor_parallel_attributes()
def _set_tensor_parallel_attributes(self) -> None:
set_tensor_parallel_attribute_by_partition(self.weight, self.depth)
set_tensor_parallel_attribute_by_partition(self.bias, self.depth)
set_tensor_parallel_attribute_by_partition(self.cls_token, self.depth)
set_tensor_parallel_attribute_by_partition(self.pos_embed, self.depth)
def _sync_grad_hook(self, grad) -> Tensor:
grad = all_reduce(grad.clone(), self.input_parallel_mode)
grad = all_reduce(grad, self.weight_parallel_mode)
return grad
def reset_parameters(self, weight_initializer, bias_initializer, position_embed_initializer) -> None:
with seed(pm.TENSOR):
fan_in, _ = nn.init._calculate_fan_in_and_fan_out(self.weight)
fan_out = self.embed_size
weight_initializer(self.weight, fan_in=fan_in, fan_out=fan_out)
bias_initializer(self.bias, fan_in=fan_in)
position_embed_initializer(self.pos_embed)
weight_src_rank = self.weight_parallel_mode.rank_by_idx(0)
input_src_rank = self.input_parallel_mode.rank_by_idx(0)
broadcast(self.weight, weight_src_rank, self.weight_parallel_mode)
broadcast(self.bias, weight_src_rank, self.weight_parallel_mode)
broadcast(self.pos_embed, weight_src_rank, self.weight_parallel_mode)
broadcast(self.weight, input_src_rank, self.input_parallel_mode)
broadcast(self.bias, input_src_rank, self.input_parallel_mode)
broadcast(self.pos_embed, input_src_rank, self.input_parallel_mode)
self.weight.register_hook(self._sync_grad_hook)
self.bias.register_hook(self._sync_grad_hook)
self.cls_token.register_hook(self._sync_grad_hook)
self.pos_embed.register_hook(self._sync_grad_hook)
def forward(self, input_: Tensor) -> Tensor:
input_ = split_batch_3d(
input_, input_parallel_mode=self.input_parallel_mode, weight_parallel_mode=self.weight_parallel_mode
)
output = F.conv2d(input_, self.weight, self.bias, stride=self.patch_size)
if self.flatten:
output = output.flatten(2).transpose(1, 2) # BCHW -> BNC
cls_token = self.cls_token.expand(output.shape[0], -1, -1)
output = torch.cat((cls_token, output), dim=1)
output = output + self.pos_embed
return output
class Embedding3D(nn.Module):
def __init__(
self,
num_embeddings: int,
embedding_dim: int,
padding_idx: int = None,
dtype: torch.dtype = None,
weight_initializer: Callable = init.normal_(),
*args,
**kwargs
):
super().__init__()
self.depth = get_depth_from_env()
self.input_parallel_mode = get_input_parallel_mode()
self.weight_parallel_mode = get_weight_parallel_mode()
self.output_parallel_mode = get_output_parallel_mode()
self.input_x_weight_parallel_mode = get_input_x_weight_parallel_mode()
self.output_x_weight_parallel_mode = get_output_x_weight_parallel_mode()
self.num_embeddings = num_embeddings
self.embed_dim = embedding_dim
embed_dim_per_partition = embedding_dim // self.depth
self.padding_idx = padding_idx
self.embed_args = args
self.embed_kwargs = kwargs
self.weight = nn.Parameter(
torch.empty((num_embeddings, embed_dim_per_partition), device=get_current_device(), dtype=dtype)
)
self.reset_parameters(weight_initializer)
self._set_tensor_parallel_attributes()
def _set_tensor_parallel_attributes(self) -> None:
set_tensor_parallel_attribute_by_partition(self.weight, self.depth)
def _sync_grad_hook(self, grad) -> Tensor:
# grad = all_reduce(grad.clone(), self.input_parallel_mode)
# grad = all_reduce(grad, self.weight_parallel_mode)
grad = all_reduce(grad.clone(), self.input_x_weight_parallel_mode)
return grad
def reset_parameters(self, weight_initializer) -> None:
with seed(pm.TENSOR):
fan_in, fan_out = self.num_embeddings, self.embed_dim
weight_initializer(self.weight, fan_in=fan_in, fan_out=fan_out)
self._fill_padding_idx_with_zero()
# weight_src_rank = self.weight_parallel_mode.rank_by_idx(0)
broadcast(self.weight, self.input_x_weight_parallel_mode.rank_by_idx(0), self.input_x_weight_parallel_mode)
self.weight.register_hook(self._sync_grad_hook)
def _fill_padding_idx_with_zero(self) -> None:
if self.padding_idx is not None:
with torch.no_grad():
self.weight[self.padding_idx].fill_(0)
def forward(self, input_: Tensor) -> Tensor:
input_ = split_batch_3d(
input_, input_parallel_mode=self.input_parallel_mode, weight_parallel_mode=self.weight_parallel_mode
)
# weight = broadcast_weight_3d_from_diagonal(
# self.weight, self.input_parallel_mode, self.weight_parallel_mode, self.output_parallel_mode
# )
output = F.embedding(input_, self.weight, self.padding_idx, *self.embed_args, **self.embed_kwargs)
return output
class VocabParallelEmbedding3D(torch.nn.Module):
def __init__(
self,
num_embeddings: int,
embedding_dim: int,
padding_idx: int = None,
dtype: torch.dtype = None,
weight_initializer: Callable = init.normal_(),
*args,
**kwargs
):
super().__init__()
self.num_embeddings = num_embeddings
self.embed_dim = embedding_dim
self.padding_idx = padding_idx
self.embed_args = args
self.embed_kwargs = kwargs
self.depth = get_depth_from_env()
self.input_parallel_mode = get_input_parallel_mode()
self.weight_parallel_mode = get_weight_parallel_mode()
self.output_parallel_mode = get_output_parallel_mode()
self.num_embeddings_per_partition = self.num_embeddings // self.depth**2
self.embed_dim_per_partition = self.embed_dim // self.depth
vocab_parallel_rank = self.input_parallel_mode.local_rank
self.vocab_start_index = vocab_parallel_rank * self.num_embeddings_per_partition * self.depth
self.vocab_end_index = self.vocab_start_index + self.num_embeddings_per_partition * self.depth
self.weight = Parameter(
torch.empty(
(self.num_embeddings_per_partition, self.embed_dim_per_partition),
device=get_current_device(),
dtype=dtype,
)
)
self.reset_parameters(weight_initializer)
self._set_tensor_parallel_attributes()
env.vocab_parallel = True
def _set_tensor_parallel_attributes(self):
set_tensor_parallel_attribute_by_partition(self.weight, self.depth**3)
def reset_parameters(self, weight_initializer) -> None:
with seed(pm.TENSOR):
fan_in, fan_out = self.num_embeddings, self.embed_dim
weight_initializer(self.weight, fan_in=fan_in, fan_out=fan_out)
self._fill_padding_idx_with_zero()
def _fill_padding_idx_with_zero(self) -> None:
if (
self.padding_idx is not None
and self.padding_idx >= self.vocab_start_index
and self.padding_idx < self.vocab_end_index
):
with torch.no_grad():
self.weight[self.padding_idx - self.vocab_start_index].fill_(0)
def forward(self, input_: Tensor) -> Tensor:
input_ = split_tensor(input_, 0, self.weight_parallel_mode)
input_mask = (input_ < self.vocab_start_index) | (input_ >= self.vocab_end_index)
masked_input = input_.clone() - self.vocab_start_index
masked_input[input_mask] = 0
weight = all_gather_weight_3d(self.weight, 0, self.weight_parallel_mode)
output_parallel = F.embedding(masked_input, weight, self.padding_idx, *self.embed_args, **self.embed_kwargs)
output_parallel[input_mask, :] = 0.0
output = reduce_scatter_tensor_3d(output_parallel, 0, self.input_parallel_mode)
return output
| 40.451493
| 119
| 0.667835
| 2,705
| 21,682
| 4.925693
| 0.05878
| 0.113479
| 0.089162
| 0.049535
| 0.847643
| 0.818598
| 0.797583
| 0.77094
| 0.736866
| 0.706019
| 0
| 0.005171
| 0.250715
| 21,682
| 535
| 120
| 40.527103
| 0.81497
| 0.037681
| 0
| 0.66443
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071588
| false
| 0
| 0.033557
| 0.008949
| 0.14094
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
cd1a7483160f9454132be4e3913db772b4a8e2ec
| 7,971
|
py
|
Python
|
model.py
|
gitlimlab/FeatureControlHRL-Tensorflow
|
7e611febd296bada68f44710992f9bcd284941d2
|
[
"MIT"
] | 26
|
2017-12-11T21:13:15.000Z
|
2019-11-05T08:21:21.000Z
|
model.py
|
clvrai/FeatureControlHRL-Tensorflow
|
7e611febd296bada68f44710992f9bcd284941d2
|
[
"MIT"
] | null | null | null |
model.py
|
clvrai/FeatureControlHRL-Tensorflow
|
7e611febd296bada68f44710992f9bcd284941d2
|
[
"MIT"
] | 4
|
2018-02-05T08:23:09.000Z
|
2019-02-11T10:56:51.000Z
|
import numpy as np
import tensorflow as tf
import tensorflow.contrib.rnn as rnn
from ops import flatten, conv2d, linear
def normalized_columns_initializer(std=1.0):
def _initializer(shape, dtype=None, partition_info=None):
out = np.random.randn(*shape).astype(np.float32)
out *= std / np.sqrt(np.square(out).sum(axis=0, keepdims=True))
return tf.constant(out)
return _initializer
def categorical_sample(logits, d):
value = tf.squeeze(tf.multinomial(logits - tf.reduce_max(logits, [1], keep_dims=True), 1), [1])
return tf.one_hot(value, d)
class SubPolicy(object):
def __init__(self, ob_space, ac_space, subgoal_space, intrinsic_type):
self.x = x = tf.placeholder(tf.float32, [None] + list(ob_space), name='x')
self.action_prev = action_prev = tf.placeholder(tf.float32, [None, ac_space], name='action_prev')
self.reward_prev = reward_prev = tf.placeholder(tf.float32, [None, 1], name='reward_prev')
self.subgoal = subgoal = tf.placeholder(tf.float32, [None, subgoal_space], name='subgoal')
self.intrinsic_type = intrinsic_type
with tf.variable_scope('encoder'):
x = tf.image.resize_images(x, [84, 84])
x = x / 255.0
self.p = x
x = tf.nn.relu(conv2d(x, 16, "l1", [8, 8], [4, 4]))
x = tf.nn.relu(conv2d(x, 32, "l2", [4, 4], [2, 2]))
self.f = tf.reduce_mean(x, axis=[1, 2])
x = flatten(x)
with tf.variable_scope('sub_policy'):
x = tf.nn.relu(linear(x, 256, "fc",
normalized_columns_initializer(0.01)))
x = tf.concat([x, action_prev], axis=1)
x = tf.concat([x, reward_prev], axis=1)
x = tf.concat([x, subgoal], axis=1)
# introduce a "fake" batch dimension of 1 after flatten
# so that we can do LSTM over time dim
x = tf.expand_dims(x, [0])
size = 256
lstm = rnn.BasicLSTMCell(size, state_is_tuple=True)
self.state_size = lstm.state_size
step_size = tf.shape(self.x)[:1]
c_init = np.zeros((1, lstm.state_size.c), np.float32)
h_init = np.zeros((1, lstm.state_size.h), np.float32)
self.state_init = [c_init, h_init]
c_in = tf.placeholder(tf.float32, [1, lstm.state_size.c])
h_in = tf.placeholder(tf.float32, [1, lstm.state_size.h])
self.state_in = [c_in, h_in]
state_in = rnn.LSTMStateTuple(c_in, h_in)
lstm_outputs, lstm_state = tf.nn.dynamic_rnn(
lstm, x, initial_state=state_in, sequence_length=step_size,
time_major=False
)
lstm_c, lstm_h = lstm_state
lstm_outputs = tf.reshape(lstm_outputs, [-1, size])
self.logits = linear(lstm_outputs, ac_space, "action",
normalized_columns_initializer(0.01))
self.vf = tf.reshape(linear(lstm_outputs, 1, "value",
normalized_columns_initializer(1.0)), [-1])
self.state_out = [lstm_c[:1, :], lstm_h[:1, :]]
self.sample = categorical_sample(self.logits, ac_space)[0, :]
self.var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
tf.get_variable_scope().name)
def get_initial_features(self):
return self.state_init
def act(self, ob, action_prev, reward_prev, subgoal, c, h):
sess = tf.get_default_session()
return sess.run([self.sample, self.vf] + self.state_out,
{self.x: [ob], self.state_in[0]: c, self.state_in[1]: h,
self.action_prev: [action_prev],
self.reward_prev: [reward_prev],
self.subgoal: [subgoal]})
def value(self, ob, action_prev, reward_prev, subgoal, c, h):
sess = tf.get_default_session()
return sess.run(self.vf, {self.x: [ob], self.state_in[0]: c,
self.state_in[1]: h,
self.action_prev: [action_prev],
self.reward_prev: [reward_prev],
self.subgoal: [subgoal]})[0]
def feature(self, state):
sess = tf.get_default_session()
if self.intrinsic_type == 'feature':
return sess.run(self.f, {self.x: [state]})[0, :]
else:
return sess.run(self.p, {self.x: [state]})[0, :]
class MetaPolicy(object):
def __init__(self, ob_space, subgoal_space, intrinsic_type):
self.x = x = \
tf.placeholder(tf.float32, [None] + list(ob_space), name='x_meta')
self.subgoal_prev = subgoal_prev = \
tf.placeholder(tf.float32, [None, subgoal_space], name='subgoal_prev')
self.reward_prev = reward_prev = \
tf.placeholder(tf.float32, [None, 1], name='reward_prev_meta')
self.intrinsic_type = intrinsic_type
with tf.variable_scope('encoder', reuse=True):
x = tf.image.resize_images(x, [84, 84])
x = x / 255.0
x = tf.nn.relu(conv2d(x, 16, "l1", [8, 8], [4, 4]))
x = tf.nn.relu(conv2d(x, 32, "l2", [4, 4], [2, 2]))
x = flatten(x)
with tf.variable_scope('meta_policy'):
x = tf.nn.relu(linear(x, 256, "fc",
normalized_columns_initializer(0.01)))
x = tf.concat([x, subgoal_prev], axis=1)
x = tf.concat([x, reward_prev], axis=1)
# introduce a "fake" batch dimension of 1 after flatten
# so that we can do LSTM over time dim
x = tf.expand_dims(x, [0])
size = 256
lstm = rnn.BasicLSTMCell(size, state_is_tuple=True)
self.state_size = lstm.state_size
step_size = tf.shape(self.x)[:1]
c_init = np.zeros((1, lstm.state_size.c), np.float32)
h_init = np.zeros((1, lstm.state_size.h), np.float32)
self.state_init = [c_init, h_init]
c_in = tf.placeholder(tf.float32, [1, lstm.state_size.c])
h_in = tf.placeholder(tf.float32, [1, lstm.state_size.h])
self.state_in = [c_in, h_in]
state_in = rnn.LSTMStateTuple(c_in, h_in)
lstm_outputs, lstm_state = tf.nn.dynamic_rnn(
lstm, x, initial_state=state_in, sequence_length=step_size,
time_major=False)
lstm_c, lstm_h = lstm_state
lstm_outputs = tf.reshape(lstm_outputs, [-1, size])
self.logits = linear(lstm_outputs, subgoal_space, "action",
normalized_columns_initializer(0.01))
self.vf = tf.reshape(linear(lstm_outputs, 1, "value",
normalized_columns_initializer(1.0)), [-1])
self.state_out = [lstm_c[:1, :], lstm_h[:1, :]]
self.sample = categorical_sample(self.logits, subgoal_space)[0, :]
self.var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, tf.get_variable_scope().name)
def get_initial_features(self):
return self.state_init
def act(self, ob, subgoal_prev, reward_prev, c, h):
sess = tf.get_default_session()
return sess.run([self.sample, self.vf] + self.state_out,
{self.x: [ob], self.state_in[0]: c, self.state_in[1]: h,
self.subgoal_prev: [subgoal_prev],
self.reward_prev: [reward_prev]})
def value(self, ob, subgoal_prev, reward_prev, c, h):
sess = tf.get_default_session()
return sess.run(self.vf, {self.x: [ob], self.state_in[0]: c,
self.state_in[1]: h,
self.subgoal_prev: [subgoal_prev],
self.reward_prev: [reward_prev]})[0]
| 46.075145
| 105
| 0.559403
| 1,062
| 7,971
| 3.990584
| 0.142185
| 0.044597
| 0.038933
| 0.057102
| 0.846626
| 0.823738
| 0.805569
| 0.801793
| 0.788344
| 0.765219
| 0
| 0.029465
| 0.31025
| 7,971
| 172
| 106
| 46.343023
| 0.74136
| 0.022707
| 0
| 0.578571
| 0
| 0
| 0.017983
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.085714
| false
| 0
| 0.028571
| 0.014286
| 0.207143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
26e98a4a18f62f4031d492d98a09e2069665f4da
| 81,255
|
py
|
Python
|
users/views.py
|
anshrathod/Lyrico
|
377bb884b95953d8853e939d920eadbc502cac66
|
[
"MIT"
] | null | null | null |
users/views.py
|
anshrathod/Lyrico
|
377bb884b95953d8853e939d920eadbc502cac66
|
[
"MIT"
] | null | null | null |
users/views.py
|
anshrathod/Lyrico
|
377bb884b95953d8853e939d920eadbc502cac66
|
[
"MIT"
] | 1
|
2020-04-13T10:53:33.000Z
|
2020-04-13T10:53:33.000Z
|
from django.contrib import messages
from django.contrib.auth import (authenticate, login, logout,
update_session_auth_hash)
from django.contrib.auth.decorators import login_required
from django.contrib.auth.forms import PasswordChangeForm
from django.contrib.auth.models import User
from django.shortcuts import redirect, render
from django.urls import reverse
from django.utils.datastructures import MultiValueDictKeyError
from songs.models import Song
from .forms import UserRegisterForm
from .models import Profile
def login_user(request):
if request.method == "POST":
username = request.POST['username']
password = request.POST['password']
user = authenticate(username=username, password=password)
if user is not None:
if user.is_active:
login(request, user)
messages.success(request, ('{},Just Logged In!'.format(username)))
return redirect('songs-home')
else:
messages.warning(request,'Your account has been deactivated.')
return render(request, 'users/login.html')
else:
messages.warning(request,'The account details entered were wrong.')
return render(request, 'users/login.html')
return render(request, 'users/login.html')
def signup(request):
if request.method=='POST':
try:
user = UserRegisterForm(request.POST)
if user.is_valid():
password1 = user.cleaned_data.get('password1')
password2 = user.cleaned_data.get('password2')
username = user.cleaned_data.get('username')
email=user.cleaned_data.get('email')
fname =user.cleaned_data.get('fname')
lname =user.cleaned_data.get('lname')
context={
'fname':fname,
'lname':lname,
'username':username,
'email':email,
}
if password1 == password2:
passval = valid_pass(password1)
if passval == 'True':
if valid_email(email):
user.save()
useracc=User.objects.get(username=username)
profile=Profile(user = useracc,age=0)
profile.save()
messages.success(request, ('Account has been created for {}! Please Fill the details to build up your Profile.'.format(username)))
login(request,useracc)
return redirect('songs-addprofile')
else:
messages.warning(request,'Your account couldn\'t be created...Enter Valid Email-id.')
return render(request,'users/signup.html',context)
else:
messages.warning(request,passval)
return render(request,'users/signup.html',context)
else:
messages.warning(request,'Both The Passwords Entered Didn\'t Match.')
return render(request,'users/signup.html',context)
except Exception as e:
messages.warning(request,e)
print(profile.age)
return render(request,'users/signup.html',{})
else:
user = UserRegisterForm()
return render(request,'users/signup.html',{})
def valid_email(mail):
domains = ['somaiya.edu',
'gmail.com' ,]
user_domain = mail.split('@')[1]
if user_domain in domains:
return True
return False
def valid_pass(password):
passlist=['aaa', 'abc', 'academia', 'academic', 'access', 'ada', 'admin', 'adrian', 'adrianna', 'aerobics', 'airplane', 'albany', 'albatross', 'albert', 'alex', 'alexander', 'alf', 'algebra', 'alias', 'aliases', 'alice', 'alicia', 'alisa', 'alison', 'allison', 'alpha', 'alphabet', 'ama', 'amadeus', 'amanda', 'amber', 'amorphous', 'amy', 'analog', 'anchor', 'andrea', 'andromache', 'andy', 'angela', 'angerine', 'angie', 'animals', 'anita', 'ann', 'anna', 'anne', 'annette', 'answer', 'anthropogenic', 'anvils', 'anything', 'april', 'aria', 'ariadne', 'arlene', 'arrow', 'arthur', 'asd', 'asm', 'asshole', 'athena', 'atmosphere', 'aztecs', 'azure', 'bacchus', 'badass', 'bailey', 'banana', 'bananas', 'bandit', 'banks', 'barbara', 'barber', 'baritone', 'bart', 'bartman', 'basic', 'bass', 'bassoon', 'batch', 'batman', 'beach', 'beater', 'beauty', 'beaver', 'becky', 'beethoven', 'beloved', 'benz', 'beowulf', 'berkeley', 'berlin', 'berliner', 'beryl', 'beta', 'beth', 'betsie', 'betty', 'beverly', 'bicameral', 'bishop', 'bitch', 'bob', 'bradley', 'brandi', 'brandy', 'brenda', 'brian', 'bridget', 'broadway', 'bsd', 'bumbling', 'burgess', 'cad', 'camille', 'campanile', 'candi', 'candy', 'cantor', 'cardinal', 'caren', 'carla', 'carmen', 'carol', 'carole', 'carolina', 'caroline', 'carrie', 'carson', 'cascades', 'castle', 'cat', 'catherine', 'cathy', 'cayuga', 'cecily', 'celtics', 'cerulean', 'change', 'charity', 'charles', 'charming', 'charon', 'chat', 'chem', 'chemistry', 'chess', 'chester', 'christina', 'christine', 'christy', 'cigar', 'cindy', 'class', 'classic', 'claudia', 'cluster', 'clusters', 'code', 'coffee', 'coke', 'collins', 'commrades', 'computer', 'comrade', 'comrades', 'condo', 'condom', 'connect', 'connie', 'console', 'cookie', 'cooper', 'cornelius', 'couscous', 'create', 'creation', 'creosote', 'cretin', 'criminal', 'cristina', 'crystal', 'cshrc', 'cynthia', 'daemon', 'daisy', 'dana', 'dancer', 'daniel', 'danielle', 'danny', 'dapper', 'data', 'dave', 'dawn', 'deb', 'debbie', 'deborah', 'december', 'default', 'defoe', 'deluge', 'denise', 'desiree', 'desperate', 'develop', 'device', 'dial', 'diana', 'diane', 'diet', 'dieter', 'digital', 'disc', 'discovery', 'disk', 'disney', 'dog', 'dos', 'drought', 'dulce', 'duncan', 'eager', 'earth', 'easier', 'easy', 'eatme', 'edges', 'edinburgh', 'edwin', 'edwina', 'egghead', 'eiderdown', 'eileen', 'einstein', 'elaine', 'elanor', 'elephant', 'elizabeth', 'ellen', 'email', 'emerald', 'emily', 'emmanuel', 'enemy', 'engine', 'engineer', 'enterprise', 'enzyme', 'erenity', 'erica', 'erika', 'erin', 'ersatz', 'establish', 'estate', 'eternity', 'euclid', 'evelyn', 'extension', 'fairway', 'felicia', 'fender', 'fermat', 'ferrari', 'fidelity', 'field', 'file', 'finite', 'fishers', 'flakes', 'float', 'flower', 'flowers', 'foolproof', 'football', 'foresight', 'format', 'forsythe', 'fourier', 'fred', 'friend', 'frighten', 'fun', 'function', 'fungible', 'gabriel', 'games', 'gardner', 'garfield', 'gatt', 'gauss', 'george', 'gertrude', 'gibson', 'gina', 'ginger', 'glacier', 'gnu', 'golf', 'golfer', 'gorgeous', 'gorges', 'gosling', 'gouge', 'graham', 'grahm', 'group', 'gryphon', 'gucci', 'guess', 'guest', 'guitar', 'gumption', 'guntis', 'hack', 'hacker', 'hal', 'hamlet', 'handily', 'happening', 'harmony', 'harold', 'harvey', 'hawaii', 'heather', 'hebrides', 'heidi', 'heinlein', 'hello', 'help', 'herbert', 'hiawatha', 'hibernia', 'hidden', 'holly', 'homework', 'honey', 'horse', 'horus', 'hutchins', 'hydrogen', 'ibm', 'imbroglio', 'imperial', 'include', 'ingres', 'ingress', 'ingrid', 'inna', 'innocuous', 'internet', 'irene', 'irishman', 'isis', 'jackie', 'jane', 'janet', 'janice', 'janie', 'japan', 'jasmin', 'jean', 'jeanne', 'jen', 'jenni', 'jennifer', 'jenny', 'jessica', 'jester', 'jill', 'jixian', 'joanne', 'jody', 'johnny', 'joseph', 'joshua', 'joy', 'joyce', 'judith', 'judy', 'juggle', 'julia', 'julie', 'june', 'jupiter', 'karen', 'karie', 'karina', 'kate', 'kathleen', 'kathrine', 'kathy', 'katina', 'katrina', 'kelly', 'keri', 'kermit', 'kernel', 'kerri', 'kerrie', 'kerry', 'key', 'kim', 'kimberly', 'kirkland', 'kitten', 'knight', 'krista', 'kristen', 'kristi', 'kristie', 'kristin', 'kristine', 'kristy', 'ladle', 'lambda', 'lamination', 'lana', 'lara', 'larkin', 'larry', 'laura', 'lazarus', 'leah', 'lebesgue', 'lee', 'leland', 'leroy', 'leslie', 'lewis', 'library', 'light', 'linda', 'lisa', 'lisp', 'liz', 'lock', 'lockout', 'lois', 'lori', 'lorin', 'lorraine', 'louis', 'love', 'lucy', 'lynn', 'lynne', 'macintosh', 'mack', 'maggot', 'magic', 'mail', 'maint', 'malcolm', 'malcom', 'manager', 'mara', 'marci', 'marcy', 'maria', 'marietta', 'mark', 'markus', 'marni', 'mars', 'marty', 'marvin', 'mary', 'master', 'math', 'maurice', 'meagan', 'megan', 'melissa', 'mellon', 'memory', 'mercury', 'merlin', 'mets', 'mgr', 'michael', 'michele', 'michelle', 'mickey', 'mike', 'minimum', 'minsky', 'mit', 'modem', 'mogul', 'moguls', 'monica', 'moose', 'morley', 'mouse', 'mozart', 'mutant', 'nagel', 'nancy', 'napoleon', 'nasa', 'nepenthe', 'neptune', 'ness', 'net', 'network', 'new', 'news', 'newton', 'next', 'nicole', 'nita', 'nobody', 'noreen', 'noxious', 'nuclear', 'nutrition', 'nyquist', 'oceanography', 'ocelot', 'office', 'olivetti', 'olivia', 'open', 'operator', 'oracle', 'orca', 'orwell', 'osiris', 'outlaw', 'oxford', 'pacific', 'pad', 'painless', 'pakistan', 'pam', 'pamela', 'paper', 'papers', 'pass', 'password', 'pat', 'patricia', 'patty', 'paula', 'pencil', 'penelope', 'penguin', 'penis', 'peoria', 'percolate', 'persimmon', 'persona', 'pete', 'peter', 'philip', 'phoenix', 'phone', 'pierre', 'pizza', 'plane', 'playboy', 'plover', 'pluto', 'plymouth', 'polly', 'polynomial', 'pondering', 'pork', 'porsche', 'poster', 'power', 'praise', 'precious', 'prelude', 'presto', 'prince', 'princeton', 'priv', 'private', 'privs', 'professor', 'profile', 'program', 'protect', 'protozoa', 'pub', 'public', 'pumpkin', 'puneet', 'puppet', 'qwerty', 'rabbit', 'rachel', 'rachelle', 'rachmaninoff', 'rainbow', 'raindrop', 'raleigh', 'random', 'rascal', 'reagan', 'really', 'rebecca', 'regional', 'remote', 'renee', 'rick', 'ripple', 'risc', 'rje', 'robin', 'robot', 'robotics', 'robyn', 'rochelle', 'rochester', 'rodent', 'rolex', 'romano', 'ronald', 'root', 'rose', 'rosebud', 'rosemary', 'roses', 'ruben', 'rules', 'ruth', 'sal', 'samantha', 'sandra', 'sandy', 'sara', 'sarah', 'saturn', 'saxon', 'scamper', 'scheme', 'school', 'scott', 'scotty', 'secret', 'security', 'sensor', 'serenity', 'service', 'sesame', 'sex', 'shannon', 'sharc', 'shark', 'sharks', 'sharon', 'sheffield', 'sheldon', 'shell', 'sherri', 'shirley', 'shit', 'shiva', 'shivers', 'shuttle', 'signature', 'simon', 'simple', 'simpsons', 'singer', 'single', 'smile', 'smiles', 'smooch', 'smother', 'snatch', 'snoopy', 'soap', 'socrates', 'somebody', 'sondra', 'sonia', 'sonya', 'sossina', 'sparrows', 'spit', 'spring', 'springer', 'squires', 'stacey', 'staci', 'stacie', 'stacy', 'steph', 'stephanie', 'strangle', 'stratford', 'student', 'stuttgart', 'subway', 'success', 'summer', 'sun', 'super', 'superstage', 'superuser', 'support', 'supported', 'surfer', 'susan', 'susanne', 'susie', 'suzanne', 'suzie', 'swearer', 'sybil', 'symmetry', 'sys', 'sysadmin', 'system', 'tamara', 'tami', 'tamie', 'tammy', 'tangerine', 'tape', 'tara', 'target', 'tarragon', 'taylor', 'tech', 'telephone', 'temptation', 'tennis', 'terminal', 'test', 'thailand', 'theresa', 'tiffany', 'tiger', 'tina', 'toggle', 'tomato', 'topography', 'tortoise', 'toxic', 'toyota', 'traci', 'tracie', 'tracy', 'trails', 'transfer', 'trisha', 'trivial', 'trombone', 'tty', 'tubas', 'tuttle', 'umesh', 'unhappy', 'unicorn', 'unix', 'unknown', 'uranus', 'urchin', 'ursula', 'util', 'utility', 'uucp', 'valerie', 'vasant', 'venus', 'veronica', 'vertigo', 'vicky', 'village', 'virgin', 'virginia', 'visitor', 'wargames', 'warren', 'water', 'weenie', 'wendi', 'wendy', 'whatever', 'whatnot', 'whiting', 'whitney', 'wholesale', 'will', 'william', 'williamsburg', 'willie', 'wilma', 'winston', 'wisconsin', 'wizard', 'wombat', 'woodwind', 'word', 'work', 'wormwood', 'wyoming', 'xfer', 'xmodem', 'xyz', 'xyzzy', 'yaco', 'yang', 'yellowstone', 'yolanda', 'yosemite', 'zap', 'zimmerman', 'zmodem205872676', '5486982622', '92643204', '6081221417', '40203187', '748129688', '72964906', '010082078', '0210395526', '57263586', '78736144', '61047360', '638823175', '473435327', '368325244', '926459102', '125415212', '2736039583', '587761526', '3833005906', '0271844539', '86137287', '77054789', '46405199', '889181183', '9634294852', '080836494', '6041544654', '9319256179', '862352199', '394240443', '788519005', '78128583', '135017337', '08109051', '767048160', '626066187', '323208607', '023029412', '79943939', '5528589064', '259238147', '77500805', '5146510884', '88887255', '55498608', '626523888', '77029997', '93459888', '04670948', '7010611718', '216248062', '04441240', '20609731', '57741109', '2586283131', '59521148', '25580151', '199725357', '298925898', '106491369', '443623415', '36259929', '906795192', '41015761', '101531332', '95169405', '4980314043', '040126416', '410735415', '88994601', '4264791343', '23497410', '61253672', '849399415', '873521056', '82069019', '60958028', '2098270193', '973303552', '85192757', '253924399', '39903440', '3702584472', '714228818', '71377644', '69992280', '96174821', '09993815', '093700439', '5895502546', '0191300364', '62067767', '5652213620', '947446246', '88174084', '60811706', '5912845817', '38861198', '036450082', '32908799', '90828184', '579574772', '8013072544', '55825615', '123616340', '869570729', '77979386', '2267958982', '34259339', '5612348956', '02210552', '90853004', '340530407', '5875303173', '370513224', '767205328', '65505805', '5460776758', '3212765385', '247858387', '8228021268', '2458104815', '49114996', '600053873', '0663039758', '050226181', '461821841', '373021922', '812426946', '46715471', '86993720', '8524264380', '41858993', '7414000131', '2964344941', '584813146', '55853986', '1004922837', '852299753', '550622521', '681836155', '24537075', '1861238626', '09214984', '80892640', '17145804', '762916566', '696629214', '591501480', '7000996008', '34394775', '2231818188', '51365333', '4282630417', '70371717', '27795915', '35797729', '6144925829', '16258800', '086628313', '65101748', '9325246458', '3852461002', '1022801230', '8113277405', '52477917', '718442370', '87949384', '863114561', '6665314018', '87858928', '80276241', '481123025', '985255534', '55841594', '062325080', '4260565534', '03871091', '74514411', '848498500', '39218647', '252500024', '549504241', '24223418', '866027068', '9935305726', '331830110', '45656185', '1530286334', '4957213829', '8919749998', '14869377', '7443220435', '5705332715', '72731916', '239484319', '7259668010', '375351983', '56071357', '249361183', '4640412232', '1376953048', '73388310', '49411191', '528260460', '547873740', '4735921482', '0310707606', '49641067', '0978248751', '76025770', '7029815471', '396424879', '81523502', '856984864', '78268209', '302133706', '44126656', '12988380', '267153288', '80763006', '85333238', '591349824', '041596989', '90377692', '64996701', '517483009', '512818141', '7012902825', '237792911', '973932906', '070092454', '27859665', '04518158', '6388989652', '2175656476', '301879936', '1526951037', '194172736', '7072901309', '116931129', '1307715938', '31394508', '769937297', '86892822', '37157966', '09431747', '349425591', '31553244', '3791311541', '58061963', '57859927', '27951894', '95316390', '08446267', '37656552', '24194480', '69229845', '30397034', '8046892226', '77885435', '99462046', '60450736', '237436887', '99793118', '019296672', '1460802410', '6763569962', '927955052', '36096873', '1328275600', '072712824', '843604131', '696058291', '428128686', '79620240', '98385584', '2114347721', '117013296', '70531343', '93243907', '597583363', '182725877', '0862990158', '689162606', '5206997397', '0564528505', '36217976', '85776046', '01526518', '52799559', '608781462', '86476817', '5496110973', '38346623', '269651723', '4215942810', '5682616125', '913235337', '154223929', '145566379', '6911864567', '5633464775', '4851525388', '35126971', '911854173', '35078796', '5017732674', '9942407084', '9155259909', '012086196', '41051592', '3441589251', '1029251453', '4102905416', '58035846', '53856806', '9104789310', '700790540', '187538972', '28444340', '5369026494', '62479044', '13503071', '2238424055', '737618364', '0751991438', '64369575', '7078747724', '91083148', '7443404538', '29445122', '329342070', '61109498', '30583420', '327788368', '5122091059', '87817611', '304548610', '85734176', '75289558', '704236561', '8895396768', '264739049', '3282981298', '31935166', '8620246265', '34401773', '37251253', '4908090391', '171134924', '9177831553', '948075665', '2111015986', '08503987', '2968095014', '393408943', '68874658', '19276831', '4862178082', '857261646', '35171305', '037121527', '38906190', '537004268', '1226699245', '1610038191', '134473699', '9872678940', '9151598203', '679030565', '8040694164', '369033593', '811351935', '226341114', '71463394', '0676589578', '839251129', '6632874758', '665245381', '275357081', '380140767', '95071802', '358322632', '562488086', '0350172491', '27641320', '83694777', '0788489589', '4601626090', '34777749', '696121269', '957912283', '904447963', '1580783840', '5557467977', '8031814330', '1540907642', '816581913', '73274852', '5515179887', '58295446', '76233979', '244516364', '9422686661', '294095725', '240199748', '624940272', '017115148', '796714983', '16906611', '8946365600', '104347164', '760424986', '2915781592', '5501116763', '28297942', '9118755511', '98650518', '78133027', '90909359', '5425476565', '964735186', '70140966', '7787663110', '767487486', '48804043', '2755119637', '38036611', '127126236', '12663388', '8332636206', '957068347', '97436163', '063647686', '1025337674', '9005861140', '9561845498', '243445075', '927273680', '98780197', '85004535', '0173328798', '7473228286', '30933103', '2036826809', '30205777', '0151165569', '54663352', '56467555', '32278809', '82081057', '4101675112', '190031709', '1472009951', '7302620939', '05577133', '1733177961', '059474180', '62917533', '2156772190', '3882645909', '872429172', '301431870', '132233880', '980812647', '3104258496', '55828074', '821616791', '5579022994', '9595433405', '617744253', '357086378', '646221314', '4811992082', '863656482', '9597568175', '738584156', '3944098346', '4573732770', '418900817', '9776624660', '7050354645', '4048856499', '53922895', '585113851', '7299670478', '7352419452', '32176322', '55422094', '9888853302', '76074854', '1552834020', '80578530', '32336678', '964783114', '701851982', '54112080', '159766646', '7992521100', '92094954', '30781877', '354322309', '1059683934', '80731895', '2200466319', '52747609', '8392494497', '27970926', '386196788', '266412372', '2733552072', '548067083', '64258058', '876575789', '8520717974', '03441199', '0936005258', '499431625', '83418924', '94496644', '7383327006', '754803409', '4446795335', '558119051', '6959245705', '215947036', '9021850909', '51415056', '58511233', '537319897', '052783020', '6871852313', '24352636', '232100778', '161511921', '9720870183', '4184160209', '04966508', '4489044260', '3544681935', '2864860170', '03737692', '25491316', '1578160761', '312957724', '7313895668', '21509127', '506782231', '83578455', '760828200', '451596402', '045484403', '0674135750', '6008872390', '2880754745', '15017018', '38967899', '8733943548', '4513473769', '015087594', '30511342', '8096389974', '166238992', '2971532680', '7919826952', '36388117', '1407478558', '86496246', '9083503189', '1448326940', '2887919018', '407979361', '864506953', '95848263', '863314460', '657743366', '0214361274', '539593713', '5140018548', '06313595', '23924319', '155613358', '4715975782', '6184200995', '43532571', '915172186', '44709546', '619041943', '1912324746', '16806314', '74268431', '786180862', '5736192288', '523842238', '1827664455', '120889144', '48521483', '273019127', '77125522', '943037623', '54175209', '0990197328', '8913466796', '3242479118', '63546198', '79343190', '51228645', '98004554', '68722719', '4535603923', '341356930', '087035161', '29275733', '5242971118', '2125918688', '510806085', '456449346', '379947109', '36181008', '008636755', '37469607', '06132932', '1156769615', '2447079437', '1101536495', '42253846', '5128085488', '083668161', '9461336592', '50096830', '2876940604', '5030821403', '631556658', '02778609', '30125573', '804820942', '932430920', '869145303', '0113282950', '154964096', '31962882', '97308022', '56055051', '6976871206', '611198468', '4564904340', '03466630', '3693758887', '64803141', '46162393', '859062632', '1488090251', '2995389022', '7768952512', '58493214', '381529659', '57351735', '643120770', '257433367', '61200478', '6223469678', '811643037', '975602979', '6275167132', '3705461807', '1817434585', '785266100', '286712734', '136843758', '432431051', '843790552', '911313092', '79142906', '479357515', '90810477', '4270141079', '560927660', '61577430', '0085621509', '798620660', '2334241414', '038892648', '432804888', '62691898', '633895743', '936283774', '8480164255', '1817124795', '91503240', '2361113173', '7913891399', '1656230388', '88763553', '469505297', '777112860', '115289892', '13699080', '2679726893', '7932739614', '93290974', '07821282', '186313367', '0290203249', '33144796', '406488380', '0271192058', '750729811', '3151967065', '0753311262', '36900070', '2498849275', '943618625', '40137671', '44880467', '20120601', '18350496', '179505598', '1020721344', '3753947420', '150289758', '1181589485', '650030299', '8447081663', '620040163', '70416023', '8035256357', '2870192294', '97312565', '94766788', '6514641555', '6210599917', '0163119124', '08443329', '971241013', '398449031', '8473336676', '062615560', '3178610551', '91114238', '131461747', '20122554', '349860047', '9346615734', '63603402', '9465583618', '8393558566', '66517706', '7455676549', '51684171', '1151952569', '16066686', '21753783', '58670581', '83572953', '697822083', '7406913130', '733652108', '448118338', '77999322', '58349901', '5598310922', '90956598', '6796869453', '551367497', '581941218', '518991891', '435347312', '05079628', '589343757', '06711196', '3784524851', '540166998', '746338359', '128260602', '3699801859', '84099245', '092066851', '47868370', '283380041', '2357862717', '69894232', '584419376', '6172162035', '488811857', '924871043', '9167337009', '843279740', '654714717', '155478772', '65694615', '79163878', '24509983', '69095064', '836883904', '0349705486', '8762808858', '36496149', '329470398', '18009348', '055001565', '028565795', '55590303', '5781249406', '921315105', '13888491', '26815914', '604246948', '059359758', '88969130', '0034656954', '062976438', '76975616', '22060383', '29793032', '097223066', '678111181', '2875870639', '47044093', '45280746', '3144015407', '801352769', '4477520313', '9690889079', '149865541', '2194568805', '682677589', '3227291636', '826118107', '5190305520', '0018479846', '48674326', '327231893', '4747575123', '10552798', '90212266', '799588992', '98042623', '45142448', '26464578', '0507412909', '785878056', '594934804', '476589023', '79275600', '2062377982', '052238390', '2465215727', '33179301', '039934602', '26970679', '979558116', '4155762782', '515083840', '554005935', '8005880951', '43336835', '1720732494', '2096446822', '2803655936', '0620851988', '809044785', '6931763632', '5263206223', '64436277', '05017284', '67355569', '6399110962', '720582267', '3336987972', '845963872', '52286243', '3499766266', '380097676', '055846832', '5543543012', '4607477782', '532688269', '61089095', '07476113', '9980411886', '18455069', '038580925', '1123413285', '4568069987', '22173342', '473934915', '5079299403', '5034532746', '237801645', '767223257', '365104223', '475991386', '6320439142', '3014954415', '2393696865', '6960489525', '753927941', '0587128953', '10808255', '6185285601', '815338262', '3468653478', '9325951162', '836896440', '81231109', '04232442', '0339787631', '8116322033', '72435725', '78199481', '6623670715', '7497022031', '768770566', '570270314', '770113924', '51755920', '296176018', '85914558', '082493566', '780709335', '1516455147', '307792672', '623168125', '06114573', '374342115', '9284824614', '5514748206', '46213765', '35585823', '146747402', '744763411', '678726537', '8803359796', '895430583', '068679409', '215298344', '9922819428', '1307325311', '076302323', '0068981410', '5950918583', '01156629', '1620024377', '1600481619', '84002807', '336342871', '457399541', '7877124973', '9385809198', '600675372', '57656654', '8457230788', '5036695920', '049659433', '0885821790', '64867770', '974124038', '185381359', '082192019', '39275722', '3050617869', '9323828267', '7421727024', '08002091', '33612808', '39482965', '517914526', '95453143', '6794328643', '68079066', '2361181657', '4066287420', '0557215634', '76742772', '67230775', '610677648', '88608942', '068200301', '80664642', '1209087981', '54148582', '748031762', '835137298', '08700682', '96223744', '94714444', '72486378', '9794643771', '77229400', '71224398', '790613157', '4391154217', '277158244', '70302648', '44236751', '761954172', '549911561', '7840267994', '1957883320', '70368741', '1072112796', '0002532933', '30587209', '78853288', '2354635308', '68226759', '2126941799', '52853145', '5918357358', '134552723', '585232938', '15706714', '02395178', '71136109', '6603917194', '528456917', '8071281648', '231871937', '40321438', '81424880', '98537308', '919872191', '10446972', '91855469', '66908660', '75240916', '5670250707', '6930981556', '0305699709', '1966609021', '41190574', '78457484', '46856528', '4935064333', '0450591158', '39869629', '489755977', '4271273473', '36133129', '460208323', '6377733157', '9285523382', '318233636', '76567562', '18222161', '544769033', '60485941', '01271370', '270503603', '02464934', '185020011', '360289921', '8283444097', '7736834244', '3407881692', '268669167', '07349025', '81204597', '7940276044', '965978683', '44775739', '3191687379', '2344186284', '350037794', '10732998', '53790398', '0791000531', '70689614', '13905454', '201657861', '811858357', '61271226', '5560025456', '4070743278', '6774884983', '77040274', '3866858028', '05370508', '1559931743', '65535047', '74731216', '4824926728', '44364531', '5540481818', '7709415915', '6830848540', '252094073', '131010594', '3372831279', '6441728983', '430295322', '22541512', '53139393', '870471116', '2367380929', '067849308', '37726382', '275851917', '512598930', '163283132', '92460464', '953652929', '54083056', '39226248', '5799122195', '4245299982', '9082075190', '0444758795', '53245183', '469867617', '164311311', '317978182', '12730130', '976010326', '9719138726', '2467486015', '94002652', '956632593', '635377568', '676493064', '152866216', '76126401', '66532472', '11466418', '445181362', '907192120', '36537603', '5431260378', '8291811753', '6150262035', '938069102', '77895236', '0033023382', '9371480881', '12614080', '338243630', '0326522922', '49375868', '131606994', '6608417042', '9290945514', '829587301', '394641940', '1020612427', '75503527', '12796973', '14886905', '3442137341', '559180474', '4347853300', '48515148', '86673118', '4951351448', '4348709136', '48944108', '72884664', '17920249', '781036053', '0200029656', '3307271853', '44274992', '619815355', '6247314876', '026963256', '952416746', '33906888', '3126971409', '972511513', '199585279', '5873159473', '948166536', '067389265', '2866563762', '6300923033', '3147291451', '1353062353', '074955414', '4467023955', '5190859043', '435627930', '6833833970', '20581333', '399184478', '8304085418', '48848733', '759157968', '89379605', '5369930018', '2546306380', '40615981', '02159540', '120559095', '17508577', '070311833', '6885397067', '388094637', '3417082843', '3089294661', '4058227253', '542599361', '27202884', '84790803', '36782293', '16495498', '8046071938', '9620222137', '708498265', '217783061', '8843804447', '7325920882', '0644759839', '7987211156', '3486732857', '91367823', '45534409', '3133428320', '863508868', '139580930', '5796632961', '02120051', '318438958', '13034839', '41968727', '688539910', '20035797', '6092703737', '83345324', '307654637', '628078524', '40259675', '4795673640', '16085534', '546313421', '91298516', '8900119147', '95596484', '28744496', '0028765135', '85630184', '3297417128', '26326060', '780996066', '70986734', '0553123189', '9372157359', '50781289', '95214997', '4931726082', '04609878', '27619040', '749657932', '1735906709', '7931437962', '215010366', '567047650', '8171889510', '9761487481', '8440454607', '646631171', '866489663', '782516083', '9694730813', '00182828', '658710185', '23991076', '204907747', '21248818', '03763298', '567835088', '34813633', '3367154650', '7070117794', '7108499449', '6931829564', '29843303', '062796697', '7479447305', '50232535', '91908775', '709411592', '387102064', '7008719931', '9520213865', '82950267', '55355728', '77881390', '657733679', '77040271', '8643545449', '1308271616', '1100812278', '164090804', '9299537999', '5963366857', '950352152', '309341862', '2941726808', '84987029', '0280401129', '68892637', '3614954253', '82404408', '49790182', '52372324', '66045769', '649209220', '318647827', '61119400', '8825988478', '889775787', '6385356736', '36448890', '62987162', '69602948', '43410399', '554351821', '2030220473', '02422461', '280714743', '62720532', '7029022477', '0929453282', '9935998761', '2217951389', '6351932133', '81170639', '84371343', '281373521', '90643874', '70448188', '248786351', '09869806', '00976387', '566831378', '3388482467', '98501829', '118578477', '800712660', '5709208765', '3882595848', '045787244', '53165792', '14554091', '39048111', '289991607', '8830557278', '37070879', '48481069', '4032450677', '5155384150', '45008126', '084458889', '83710514', '788843225', '387295428', '2590367953', '5535189182', '12306085', '859328245', '8612892566', '64907371', '3070114214', '0885969645', '52438877', '514297062', '160907614', '0642716854', '89280579', '74685985', '437939057', '952127320', '733720198', '817596822', '226012792', '6865755410', '2304156624', '12668170', '107540355', '384007694', '2007696273', '8256842845', '5655232678', '954705511', '431178860', '4636727382', '14200765', '874578159', '20833621', '8770409001', '965381089', '112122395', '675089196', '805975720', '860660858', '1248911907', '845449495', '6362963962', '54450096', '308425288', '70082895', '09143233', '241217170', '467792710', '79211322', '287974980', '8999357635', '842463281', '68762269', '521558215', '19288918', '4521253872', '64507860', '532614600', '0443124440', '564235528', '837720558', '66654324', '86651875', '616646688', '25000765', '6472640790', '26179377', '77930013', '58422288', '6993332382', '319646126', '2422817639', '32073051', '09123612', '70636078', '72063617', '820704948', '02211953', '90435168', '0358965494', '88572011', '69088834', '696423889', '641001455', '5288714726', '1072080522', '671823499', '643859192', '321282959', '25684453', '550816526', '9958436635', '957326799', '49456733', '4972524844', '71878850', '38005674', '86545503', '308964525', '227382891', '423754307', '461633520', '1490951533', '9920193623', '744198249', '54249321', '03570754', '37616005', '79206565', '6894773162', '07955859', '730177594', '0023277381', '540610205', '3300919713', '045847416', '352205028', '45391679', '904993338', '71963956', '702830564', '67786583', '54703842', '6977334242', '12543138', '6132474115', '47942998', '3465231035', '90795799', '22745470', '330017838', '026534375', '922816136', '979499176', '27038449', '283706185', '62872676', '907507291', '720484976', '43371085', '151842373', '366259142', '10800397', '82209119', '9429924609', '9157787891', '7412344614', '448027021', '5894470303', '843941938', '6772366796', '6948531420', '764730746', '401519546', '9184304645', '2868022017', '7728641163', '96349804', '99528105', '3480675786', '52529813', '787206429', '049658036', '767703868', '7436731409', '0404249378', '1714895149', '9089815213', '12654177', '468656060', '2677296593', '727597106', '1697707365', '30780236', '7525150707', '20571991', '9405639281', '571883945', '268074876', '25303159', '7906990011', '777208481', '852577480', '61544452', '336338314', '7441513251', '758156953', '450960729', '6501053327', '5908710089', '54474164', '1978101369', '6995238157', '844215765', '3789972458', '7266989380', '53275394', '45935203', '954988653', '59912772', '93840003', '322309760', '738144286', '97563443', '9420070692', '30697432', '25847831', '22006605', '02237928', '052911921', '4024393711', '18350998', '94840148', '65654145', '4236998127', '04872401', '6893378985', '893920509', '72770029', '21692514', '1481783089', '67767738', '666563803', '48895854', '63237353', '27732374', '96387809', '1156540763', '119264019', '380386228', '48186480', '844323281', '17258121', '3240702571', '824948808', '730323033', '5421318423', '924065151', '742983642', '4645908370', '6410615978', '4259786251', '77687257', '587457392', '09633558', '965803707', '17861226', '9226854527', '06348880', '897322378', '8562196577', '0237878419', '4184458320', '032890372', '856930818', '4416851826', '43468553', '248836917', '715928600', '305653231', '638247515', '8859483472', '8794346515', '44776105', '63093970', '7779284623', '3295922271', '807819641', '836250226', '95715241', '0528498231', '39903741', '3530367722', '1782420221', '863545943', '230220382', '04270587', '975138672', '23978071', '50276720', '118364846', '88772979', '4018434689', '988684013', '404653838', '25672863', '878897254', '04650331', '7323710474', '4807414926', '2966025529', '5142791494', '25468626', '5689339281', '88197211', '380316705', '8025156238', '21518994', '1886065269', '61078725', '289200141', '0915845199', '84637292', '44350479', '1513865934', '43591939', '30652969', '90810053', '398676413', '186352494', '942417420', '74316929', '38498668', '3379381460', '79986412', '25583813', '72363917', '85163344', '45726520', '988568675', '11122454', '5105669198', '752034435', '9989181071', '85518045', '01036413', '50923782', '088821897', '82284644', '95395459', '975829984', '6254933772', '39143322', '54429096', '41868250', '464143093', '8271163434', '5659969694', '929233788', '96711684', '21148055', '1949403968', '297940962', '3614062449', '3994380653', '57773004', '6103810102', '09655229', '35599816', '588050929', '2659749389', '725347660', '45531823', '969335938', '350846509', '953684597', '02973469', '3498915444', '092038710', '14343934', '469886053', '7046337632', '469475725', '9594513276', '9016246906', '4820269644', '544513549', '843371907', '09805596', '2594267006', '5240950240', '920001479', '3121725225', '035402127', '2905635968', '63649651', '725986435', '9439821552', '2198368838', '430867001', '3622646628', '32019427', '6380677802', '2023685808', '3661897732', '557754215', '078401581', '938807027', '3795472262', '2345113439', '977456593', '8004379625', '17747234', '42812635', '705037414', '50671415', '9159968717', '3392575060', '057177082', '797473329', '96143263', '06932817', '77086728', '3553702411', '943801659', '21875119', '3397471834', '29584849', '762057117', '10633439', '40618881', '850221871', '3807082076', '858185987', '31896763', '4838738169', '416612222', '80316649', '960041935', '9479493412', '3730833229', '87606151', '4206125348', '58013099', '11513737', '76463022', '4065129817', '824129992', '455786178', '197739104', '6273397907', '97422024', '3385151114', '89153696', '398743381', '9873442252', '72591127', '460503802', '70766024', '35055380', '898787596', '76431054', '67732356', '0262255375', '378373917', '6487316977', '9740165141', '283316905', '8084484402', '438922772', '281118706', '341012047', '49215585', '033331222', '48112508', '61466913', '397685968', '70943050', '228236812', '50156679', '47315319', '949980648', '9824218817', '5980547708', '3847648634', '36441251', '14169364', '35384903', '76824457', '7866761470', '167720650', '60616531', '70755813', '545866883', '96711204', '4718097213', '6380611570', '1411960694', '777466869', '0889775165', '6561409364', '4769369150', '488380013', '731093636', '23967424', '393118545', '40680786', '299785636', '94910143', '077182770', '89096893', '861030968', '38335642', '028210128', '606128914', '5631950549', '688563101', '23984020', '232716347', '4409868301', '7458173710', '529960166', '090156871', '4564309646', '861086121', '654883131', '76009165', '8387273144', '9885100032', '06201612', '341734433', '11943261', '0147118287', '90740296', '661261425', '2578307245', '4768589622', '94381589', '932192805', '730407794', '5707684582', '4658708495', '9329747304', '23018872', '00728274', '7768213646', '1291161899', '231172760', '38368033', '21577767', '2266337167', '09693757', '44785105', '577521544', '44363207', '80974934', '6678917342', '768456145', '40708193', '55288639', '708326821', '7884689054', '555472761', '25987148', '6583408014', '865759822', '1105893988', '83279705', '56422167', '856471911', '79099695', '64465790', '124415536', '0279140121', '67687543', '75653610', '834479669', '909381957', '1243061561', '506920868', '32516616', '2626698425', '3980827953', '57007585', '09914417', '452252741', '67555963', '6796770747', '922556704', '1629569184', '23237947', '61809846', '255904474', '520280655', '542885716', '11439247', '85760288', '23156126', '5301743960', '562823164', '66954937', '29062193', '61622240', '5822381839', '816605428', '035355044', '6612367089', '037239953', '0177518694', '54585533', '572929373', '868125488', '0686783276', '077862974', '55850546', '65929184', '216470691', '9966076713', '861839557', '663673088', '236081770', '8185518810', '634609317', '604394336', '94087221', '637917968', '23025343', '409966525', '72721480', '52199976', '5460504579', '6346890205', '549604274', '4310958179', '2545932242', '20833947', '6926733691', '1059836381', '4592724233', '38771785', '50020341', '26151675', '50509461', '74004920', '74846348', '705013080', '2703921189', '2113830825', '36291918', '882533339', '65428320', '71650253', '91496684', '5328697013', '58613220', '07458015', '91406307', '0717819329', '237524768', '93120607', '829020444', '072259017', '9838912487', '3111134836', '325241477', '044402987', '86382032', '36776794', '829317122', '083245611', '764078734', '362298749', '323744241', '82503998', '3625433860', '62033757', '53635414', '303004845', '9442658305', '38117145', '3416301131', '6579691864', '3252991772', '9317267916', '5648740291', '886891405', '696250672', '3873052737', '31602612', '170114277', '24335541', '26355143', '6620137733', '7486520338', '42868778', '947455211', '4514654655', '9023954007', '3908107455', '38214710', '4546986646', '6878660234', '964139259', '52997342', '486798399', '225646771', '2375821055', '48373827', '6450311401', '44591372', '27756403', '8883366218', '7617007951', '264850653', '01601753', '382365362', '86579837', '98595386', '72941619', '0740480528', '05836098', '2295522075', '711957094', '56992733', '8320211772', '886529544', '510435788', '84292925', '26815748', '49277218', '60851772', '28645857', '0584038573', '827958094', '126565523', '447672232', '3699084019', '69303047', '238413814', '4560478249', '5332901388', '36565099', '501722292', '7421905168', '307716007', '61556333', '2248871204', '1214834965', '94302344', '7292505403', '6293635637', '96613303', '8323384777', '6809967203', '103073062', '480112763', '5846997883', '626694851', '05172685', '15971706', '7313114190', '6454032073', '83360646', '19283829', '905297681', '29463721', '9149926373', '41005844', '9686580018', '61252456', '0301892071', '15219977', '32858633', '4922157376', '932531818', '92975453', '397605778', '49034838', '875090900', '495708969', '67308826', '5638746037', '5662844501', '35967855', '148915702', '539947690', '432590698', '3836292379', '33293468', '432028182', '6307664605', '429386270', '341036454', '926001340', '080235300', '661958476', '0944369261', '6749924788', '3719376861', '641966942', '495616619', '25409888', '06247947', '718362172', '699867030', '30074111', '2775552244', '16799630', '8353627452', '891130021', '2069569529', '684934689', '00953655', '963722905', '6726639892', '147816695', '550369305', '62871295', '4230709722', '9068032998', '301613454', '5146585902', '4602722492', '3843869660', '32144084', '90137221', '447791530', '907396506', '7737823064', '11159528', '1232510510', '4117479947', '1644527520', '6857116286', '7759644343', '1884074290', '802665452', '60129937', '8666036113', '32809979', '985002198', '60704734', '9772134549', '8492256677', '850990842', '59414593', '0174645715', '71082682', '7862653289', '4292252408', '37781016', '9397602423', '3497210131', '735042089', '922810392', '27575442', '355533504', '048669658', '42605097', '7193060964', '68219967', '439833653', '69386770', '0768509812', '633449315', '56074369', '974696232', '1393101165', '8907901793', '675768073', '94344480', '116173775', '268510435', '8172702609', '8114163509', '6048926877', '855547316', '9687121287', '0208250862', '166265786', '8407778302', '09777511', '811953493', '983125668', '38466693', '242654312', '631380316', '234201236', '791951120', '20983406', '53735474', '0842510534', '0994922983', '2453099472', '5588530883', '1416424072', '1472367715', '6785444184', '76140741', '9611082246', '55467573', '39448082', '27958718', '8706495679', '323603931', '75842313', '89046427', '2153542139', '38867869', '451127490', '64762926', '4357711767', '71620029', '9457161814', '601110177', '9157154748', '00328909', '43657169', '1760353381', '207045561', '74600694', '0387732986', '440686519', '16190043', '4984777493', '13185339', '3287645228', '67615127', '11241611', '153235974', '990116494', '592077758', '780983425', '043955211', '799768014', '215190699', '232001455', '7184712062', '317005544', '4190840472', '8649405028', '421383805', '2211857537', '922992967', '042488644', '1072664223', '674072619', '481406904', '79316156', '862260399', '56338487', '3132660097', '843622299', '2936964245', '23762067', '06610431', '72176065', '6762455659', '382226491', '1391447402', '634745202', '1146501295', '3513746539', '9528439391', '63169325', '3253525518', '7138689573', '84742616', '17712930', '039569775', '979168762', '8353412544', '495137573', '891822429', '263471279', '4244048194', '1419855144', '6954751071', '140342955', '56652842', '316985895', '573101742', '1590223678', '06703026', '286736518', '252347459', '872244038', '84443936', '849958660', '95558217', '575059985', '513545508', '51260522', '13252258', '888332100', '608101596', '0673698790', '545017618', '605778308', '28712755', '2291573665', '255489778', '47775972', '6846459250', '162374284', '7504910787', '179061648', '560778565', '01527916', '800214094', '43741146', '19468999', '24572592', '297268108', '098742888', '93363101', '50019533', '98502269', '779073060', '4142505857', '87625177', '35559513', '53383687', '21480371', '19090062', '2840941119', '179805939', '5331348770', '69236407', '336509881', '11748283', '17123100', '8190082355', '45089434', '546918192', '92534064', '91726672', '43179768', '2952766447', '064157707', '9950642831', '263384054', '99386538', '270868434', '744379154', '236912453', '4668544712', '328658177', '89867302', '99825076', '028097902', '2102782050', '5330603477', '328374999', '2018231672', '936539911', '04450765', '9359968111', '209758372', '091827864', '089595032', '66524447', '8818710719', '1746516905', '191359183', '729713543', '189014317', '69867422', '105745039', '99819918', '1050535422', '063586394', '612840517', '763370789', '3486717796', '6433321274', '458672303', '8413555578', '9193218833', '4825021578', '1218901055', '6893835279', '3484374191', '0358710084', '6003926101', '1201056811', '180766885', '9259575315', '3203406821', '5688335799', '58734474', '71811323', '015603404', '828733010', '2170998268', '48832597', '03223347', '772939404', '09460234', '672872601', '2722422790', '31327690', '98424945', '8666666820', '619406817', '69075364', '088528502', '6022604420', '862419255', '69938400', '506693024', '389223183', '08022206', '969623244', '114985269', '234748622', '4286728560', '6415236451', '904539396', '452127702', '4971407283', '2613112216', '026489637', '3910626149', '36548776', '04838328', '856235524', '952566639', '27137666', '300940846', '795340646', '5932676117', '095111690', '266411615', '57474001', '518858792', '10519600', '837171780', '869266138', '04571218', '54166504', '810488682', '706128253', '28455315', '3286354859', '92900073', '89526940', '55946083', '991642822', '702210232', '267258196', '147973117', '7755605267', '779197880', '5119478109', '718364739', '78908247', '1489625686', '74798086', '3862999936', '5609012675', '503603309', '86597703', '91327638', '7324517968', '4461295184', '3456296979', '67355393', '94113110', '10675676', '105011416', '87596886', '5164890507', '92452993', '02775106', '68503230', '80846383', '4716991986', '153889604', '2172274131', '35611492', '7331026501', '5489617484', '86801053', '9907516795', '5221481485', '6206982662', '70889180', '799503522', '0867359803', '36791148', '4326642822', '340886085', '803155896', '45536203', '22876254', '4643397262', '802298802', '0212199454', '4907233453', '65486053', '91551855', '915308134', '210336915', '92730869', '51096593', '876665307', '7195304003', '84720191', '32149023', '167748488', '5491066551', '18257128', '57720932', '35474620', '11064350', '7866229282', '03951018', '31757414', '246350333', '79325512', '91412231', '7493762356', '388956545', '3158028001', '8023417987', '19740025', '8708307385', '55746806', '71534875', '9574298540', '8476543537', '65130138', '9493225077', '362174160', '161534496', '477242179', '693366884', '2349604557', '411005136', '1918163307', '438686258', '67979091', '559473152', '50840854', '8927955018', '24113869', '081670813', '5200046803', '37686030', '0640692370', '771034514', '69486738', '17211872', '92850576', '051863394', '80452249', '087267816', '7338356548', '118385752', '296675498', '491038895', '1660007707', '4667191808', '80742972', '6541497026', '819078702', '481948241', '9658629378', '84607482', '902314431', '164956613', '573408452', '67779803', '619484368', '20216211', '5295004895', '35481436', '831520485', '43241509', '5541298152', '97784547', '91925267', '5169368547', '341421558', '13751201', '8100180320', '294875805', '0903439494', '851995273', '603909830', '10305470', '093666525', '5438189915', '516569236', '1844629104', '353930500', '221424399', '5632095864', '610905301', '62057018', '63210592', '66696189', '05676724', '725984863', '30932426', '914156454', '49918934', '87012078', '987796342', '3623852505', '559454554', '506043972', '21198393', '466169085', '49923962', '12687088', '677236465', '9917470431', '5515428701', '814410132', '640003395', '823253321', '644751681', '00418798', '29395591', '78897201', '7458448545', '733111751', '53897163', '32486357', '9562049578', '4491575611', '9558309708', '506805725', '95095599', '31261254', '3001564702', '83163275', '5761530970', '62932629', '44134770', '68759621', '24523836', '73073540', '666830961', '1234245881', '16687477', '3854529125', '848175465', '93663660', '8483200804', '54246439', '07201818', '48885228', '198625492', '581771128', '4627616514', '4846656446', '1856658676', '64533204', '7239780829', '74474785', '60045422', '6466491483', '924110842', '70316840', '013737284', '2820400341', '4858763032', '8737066724', '74818804', '5689499359', '1372693443', '58227233', '9682263070', '62891036', '76176328', '971450575', '489587982', '6244497619', '587835189', '41868445', '43422989', '16138522', '91860144', '111624069', '796849368', '9389872946', '880905206', '60995029', '4576931200', '453649629', '291535601', '9333811892', '949373240', '1256106435', '02550285', '17265542', '965255038', '455465546', '26413811', '98723005', '5266866193', '703960192', '97965357', '871592262', '544206823', '88581736', '626541483', '806778228', '6838125655', '09217509', '976818978', '6082688848', '70874864', '888794896', '3603394169', '42397995', '4014546548', '0126245634', '522892142', '26272598', '850782094', '4834970638', '3139909632', '5247413501', '78513970', '780621070', '523303892', '2643474888', '973978272', '4497495770', '40396117', '37572712', '803572707', '117235213', '1820970254', '227069049', '12341627', '3428958171', '991999423', '980825964', '7919046756', '9549977178', '237600100', '5571534680', '74039017', '5165281727', '632521522', '1054653233', '60656473', '4310053675', '824522588', '125894313', '9147802581', '21649870', '0107442526', '910800767', '8490324280', '057069283', '19525873', '287085914', '5386921532', '362501700', '174892066', '958531720', '70426812', '1022084797', '968460295', '1665732260', '595090057', '24909951', '024277125', '226065209', '685968971', '904150006', '270055783', '636466455', '39928088', '510560363', '75729532', '9965688803', '2853393420', '65438499', '9935368793', '83694241', '968931712', '693008269', '35951467', '551752286', '89249455', '37846906', '658523188', '87498710', '0624530965', '13743608', '80352259', '4692537031', '2219310267', '48594030', '7957226020', '8831252685', '494354228', '79784730', '525680528', '3331261468', '2804040302', '0922782029', '753820869', '62682362', '5702443902', '0503591452', '7777893924', '527474839', '458989039', '69204057', '5527098619', '83181255', '31037526', '6367212245', '9312544162', '7116186014', '752931342', '49428895', '9729685059', '1049044287', '2813726214', '53985957', '5292863634', '861100640', '067270303', '993400896', '986989901', '1927666994', '50565740', '2653172994', '047626531', '761806259', '543258421', '905190076', '535994464', '968048124', '5690972724', '672524643', '9595609016', '03187922', '527190068', '03585637', '35858636', '6108074167', '187318374', '74719350', '49882690', '740779161', '507719272', '85463811', '6361574172', '814319462', '2828382384', '746884827', '395119783', '7661296744', '494509982', '94024460', '4344192351', '30032939', '60466622', '416360618', '69485121', '2491716927', '7248275965', '50521729', '2935511103', '199724129', '4740771871', '68865213', '820001784', '80062179', '3482617918', '765432103', '3905734859', '4517988480', '3113339549', '07507613', '618993809', '218326211', '299204934', '9511461293', '6310035412', '443596488', '483884008', '696281142', '6248596035', '8338642558', '351688953', '64983776', '4173550121', '305105768', '185265858', '8312017253', '64094614', '23355492', '3763890290', '465291000', '1796626138', '693514045', '6571384082', '01756584', '72421233', '4661200208', '9013780169', '7057251992', '8907563063', '08981041', '8919864707', '8043443356', '01745999', '235514970', '75419192', '583761020', '5198563556', '05330295', '7739169585', '31655927', '5333157195', '4509763719', '403150056', '31874889', '5354528450', '4896770012', '57624178', '4726231560', '835117436', '9853189702', '69960030', '49174730', '19723196', '78637945', '1138283262', '15703285', '34455456', '25128417', '990011487', '632921668', '17613342', '972878461', '12536449', '933300439', '721173437', '2731565875', '215225576', '106867412', '98078218', '141120017', '7742210099', '2034614701', '30845923', '854811921', '53292912', '862239531', '7563444303', '392008652', '580450697', '526682860', '221522435', '6578804521', '539434583', '13956301', '15766316', '92412246', '9504645111', '441447623', '2722986720', '611259227', '6973471986', '32997397', '82876300', '0152751519', '681529185', '9062654759', '283306228', '4151953036', '03452763', '2803311236', '2258296645', '7098336112', '2502669842', '313982570', '91902939', '55292586', '383814299', '593715261', '5876662497', '304883042', '4991837314', '6804460809', '8104454362', '224345866', '41287367', '431030863', '694715117', '544950747', '26030964', '4216010333', '549490060', '7760478977', '17560845', '0381154566', '52078037', '809193928', '3208442627', '642977364', '76444125', '62585150', '30436516', '9513444287', '875634019', '472773715', '326129225', '51829765', '1190864127', '7125490493', '98126057', '50584423', '07244419', '453293948', '5375341997', '071429306', '6354174212', '758142432', '62319710', '8605530606', '47054800', '70661922', '2742447911', '992007009', '837999819', '0614513542', '0782708432', '346176045', '43377765', '092274084', '18812960', '5101731763', '58862181', '6463876480', '475034864', '55206665', '13389833', '4787716065', '530406294', '4231231297', '121755706', '59258870', '5324070491', '830648293', '885391883', '625563611', '91794318', '2102317864', '734591839', '8167332617', '611495226', '3929219445', '2896460570', '21321613', '049998935', '43842857', '4123840737', '8712117947', '1945598087', '049865737', '32119629', '0096491387', '77849276', '958744828', '916823674', '807978149', '5673837480', '63238691', '6021288905', '7091409443', '26315618', '960554158', '02955547', '55969113', '58634065', '52309741', '7120925180', '45720668', '2656253700', '4385324000', '150759368', '41186684', '24575969', '33057191', '8932857026', '296837586', '5163612493', '6453620522', '2513435793', '146266115', '727541414', '509382387', '182634417', '970780540', '784019063', '84946390', '9651606857', '80486547', '146346394', '249623374', '76459335', '370989653', '9135388799', '358384297', '470467280', '8573318791', '224665693', '3713265515', '4893963760', '513817552', '0816289160', '62838881', '6015794783', '474723904', '8866501642', '3061014171', '593588892', '737416643', '96281394', '8363238748', '1808568102', '452010573', '837884924', '7029299005', '0283779720', '5020314711', '670303613', '486879398', '06909652', '3260290928', '4904366181', '67018209', '818989500', '9359483013', '03204360', '9525199206', '037007521', '150509605', '00005830', '363468799', '2232200137', '302695992', '6718967824', '2899053771', '76727050', '42491221', '80317325', '4092150173', '6591160218', '325539904', '888474932', '61913388', '21335821', '10839501', '201279065', '9741548594', '151454724', '07144204', '52631847', '3295391805', '1543347076', '68877331', '1562389865', '0269166582', '1216320187', '2941439205', '058134880', '986679319', '18717134', '2966982727', '19378370', '2464285958', '70031236', '96984216', '6608094845', '3353729809', '823615970', '40504967', '731134708', '340944080', '868197128', '6085703159', '00686929', '84351508', '844046017', '02937686', '86288949', '5211624888', '242404576', '237817439', '5665249832', '6367216605', '9963169834', '76544226', '2109141656', '91011261', '99125225', '30866638', '4177682377', '6998141719', '744287927', '33781965', '873813909', '80922945', '0152479601', '08147305', '190013410', '6097331828', '37471996', '005569304', '20030326', '12120496', '10553997', '38456626', '2379142818', '7669563596', '7269519381', '0810374374', '6022593058', '13270652', '6069089313', '3557647108', '7299835654', '6604195242', '362531017', '3902316224', '30138499', '023749879', '0881609485', '5784012209', '623337950', '9652479766', '5833221647', '24158071', '57315112', '79822745', '926283519', '828484952', '22538255', '880133307', '9065272381', '549222286', '2465142130', '9586452367', '86206694', '005328848', '56170242', '90337935', '373552303', '6114858470', '752443796', '632640554', '7930772199', '95203410', '071249420', '2155599156', '33866220', '921723882', '14106446', '94963351', '645041333', '6522369809', '3254854049', '688498371', '904025791', '091687713', '68622250', '299972558', '471541941', '573287797', '514745433', '7469592106', '130350487', '82217429', '948604084', '814501144', '876041124', '035289036', '69096546', '577067812', '65715886', '886574445', '82017690', '976180038', '21187162', '494377910', '0893735396', '577968658', '8325189217', '889639149', '1745516282', '0786678706', '7097820703', '442543820', '0838154536', '199117068', '35386396', '043452366', '604200236', '3918046952', '5345226594', '67846089', '382466336', '7348894759', '7244872326', '990254697', '8980996886', '14160748', '570118470', '1991058311', '474842745', '9761633214', '01377112', '11484793', '06971051', '861210831', '34482661', '14107585', '1430772920', '874699721', '2158324925', '01668958', '5341348430', '0983108071', '595704932', '560630971', '2264505386', '98465297', '508983051', '3594081703', '90803367', '782334771', '5722527999', '592520924', '34479577', '62225497', '07183704', '17566975', '2040815210', '54354523', '22297866', '85512938', '93872723', '5001842004', '475155328', '686830058', '94566108', '5232537986', '5857447946', '2873317305', '010552628', '390024513', '56644540', '646716365', '3075626088', '6112717427', '42503706', '60903993', '64248099', '89149709', '849263179', '67262800', '36883723', '305566488', '19453772', '0100507766', '071297891', '25116640', '797856275', '08829141', '465705205', '36114410', '4973537926', '215316282', '6058401631', '76341460', '44109095', '3499878852', '667130207', '1781327257', '5083764040', '8527609637', '908440744', '51921545', '4619095751', '3088760539', '876589763', '67814873', '4389499915', '966611240', '74284983', '289682362', '99139126', '5926647403', '9950492646', '3255590585', '0302521362', '6074788010', '3129079748', '9694772878', '131784207', '3259759311', '4335527600', '20119200', '44727627', '87652004', '1000959450', '421077581', '4735976229', '944548607', '8898395226', '999127959', '094444127', '0896230381', '01153953', '260389526', '2698276040', '3397675107', '3781686913', '955502991', '71119264', '9601807933', '870458729', '26976428', '0661091718', '78175287', '03992857', '555745473', '78353431', '2597308329', '57894958', '74606336', '1470508853', '210727243', '7813362162', '2737243308', '0402509566', '8605039248', '01157474', '15321667', '23184581', '8394661677', '3910944572', '02868514', '1264468473', '73884327', '3681766160', '521136309', '924657539', '41913711', '94863049', '80843825', '272050368', '96133959', '1941091181', '56473015', '14876510', '5885326270', '6597881411', '147205102', '957544180', '521697629', '8414800656', '65606575', '845857911', '8889080265', '298442942', '815156800', '15171157', '677318073', '72348639', '90445816', '67649697', '183433828', '6210402560', '85968607', '9412741882', '3218971873', '467931741', '988138847', '918128908', '8559267902', '2707125672', '50407361', '8121855072', '603476012', '7664371769', '567847775', '73885135', '2673212099', '24073467', '750928575', '7425525196', '28598700', '33952444', '7123595224', '1452456764', '5216177048', '7001130174', '0701609187', '1389684923', '467551122', '908945383', '903866348', '683275554', '22933699', '23014129', '2787884033', '07767116', '7446509192', '35490679', '806513008', '15258328', '8565976685', '26948671', '89097290', '6287746249', '2109586179', '36504637', '3088023522', '2469467473', '87460304', '2005555423', '617565973', '483682915', '56823195', '9057501605', '0848608414', '24892777', '9394817140', '89653511', '6457535796', '615310839', '251363055', '50593529', '8252356699', '170461509', '7603674145', '3784686284', '29766573', '673399540', '02530199', '445223290', '4114422425', '321143660', '27252487', '260747912', '863788716', '358388246', '51542334', '9903013701', '62501373', '28209858', '822930449', '14247035', '248198282', '659902249', '8810352249', '10285764', '974283024', '88852281', '10923890', '83824593', '158927448', '829223765', '84967635', '57798532', '24444904', '29050289', '34511573', '5627525314', '34191778', '20636537', '7782569692', '043800165', '12733424', '88125060', '430903773', '69023499', '1389080404', '517309504', '7498500664', '095308391', '96742227', '8131620371', '1296170546', '999909415', '4678227322', '54544878', '6232691822', '3627445538', '5624248641', '867790603', '222980833', '15930229', '0490072902', '5809341480', '6251594515', '8030415344', '436303953', '25400329', '836739525', '8874887045', '662548659', '6551961865', '14976910', '5428669435', '43128715', '948493899', '2844046548', '53826049', '73680312', '7232864670', '3025233051', '8214380885', '026890279', '774566322', '86878658', '023942386', '286209858', '92912770', '289211314', '174607454', '59367141', '62512910', '038432214', '90547689', '1012375431', '284450302', '948618734', '085587460', '2896309008', '963557920', '95741938', '26950510', '52460798', '12316548', '1774711111', '11261088', '85872824', '357682167', '427594537', '16393539', '2556216534', '57898358', '0299367311', '336094887', '2384691833', '37273630', '283335122', '9251456776', '034596010', '757067724', '045455780', '019034577', '9224309667', '42327283', '07090495', '283867723', '9004572718', '5065975722', '92284899', '703454253', '21854579', '90194849', '7881767507', '4041718713', '91192969', '48334603', '98547805', '2245127860', '77464162', '25236606', '8513955400', '542046824', '47923490', '480591432', '91122361', '2507551724', '0684534589', '86252723', '48613918', '3637380465', '710044251', '415926916', '39289028', '91788019', '46451059', '171697822', '0471672418', '222611642', '312056989', '126682330', '766753261', '3851196181', '63469930', '933887893', '6948046623', '599817954', '356349760', '0146335728', '03566720', '3769241645', '801773879', '13774769', '25486232', '1972395830', '59978184', '329024574', '3228354613', '451344445', '0249065886', '23495380', '33473999', '3182238110', '61676164', '98072180', '032142277', '99433946', '50770406', '47500110', '1658439021', '678298010', '73365412', '836774760', '473532953', '7803402390', '986431137', '31746052', '98176305', '5397240402', '89728628', '22346748', '232315654', '8074650472', '6504841480', '6657671429', '4233031809', '7001560546', '6441556570', '8847783101', '2977971985', '9947354314', '471071678', '91106645', '8627653024', '61891869', '79939703', '714276412', '78722234', '41716511', '160107734', '0039146756', '2059835234', '579197034', '50695854', '8172748508', '637928387', '59879727', '192582995', '814454633', '9448161361', '08474232', '006257561', '03428037', '118044814', '9254373584', '37467928', '728528002', '38288242', '380358463', '99793782', '32521852', '98791547', '3448032443', '20056038', '5390379474', '6975148931', '53258268', '59641972', '913411268', '4784358863', '25097158', '1704691997', '1142539648', '11540979', '18995086', '260704469', '617413002', '8513678150', '5995417005', '3559573602', '4922640054', '69259723', '6474204333', '7899418299', '9938390885', '291815158', '209759043', '499080573', '98247776', '0634814311', '79827931', '62317253', '611815177', '11650224', '02617082', '77560008', '371059941', '7826293807', '779678038', '93061498', '319024692', '61829190', '70063550', '754723925', '4867725648', '434505498', '336373492', '26699903', '46247337', '257355867', '591903380', '9579011876', '17175971', '1003176825', '426536789', '455617585', '8724659821', '7358658075', '16079078', '184664130', '0441621298', '24379462', '08129044', '57813792', '2194800226', '19600752', '95124372', '9051737229', '894284421', '0186628727', '2297085487', '731343706', '932135726', '368921639', '838517939', '90154840', '6902077894', '96253141', '80723584', '14778879', '416297904', '367975755', '777815257', '2551020979', '772090487', '98732256', '66520238', '162725097', '145996005', '857940840', '727135023', '05498898', '4183134449', '517356173', '1220109536', '873543520', '4046231673', '995272474', '15931522', '54489474', '2907500220', '2149943801', '94144051', '6870601573', '81249374', '5603079375', '4625481791', '8230797845', '7935752565', '59206515', '373990397', '38905421', '2737487137', '461023105', '90176223', '8217149106', '49195670', '526643506', '6794202430', '6388782052', '93651413', '1968638693', '31388702', '113913057', '59223809', '439032743', '69922932', '764450681', '87076355', '103388176', '319168890', '3340139159', '7586640479', '181113200', '8168913118', '382260895', '10398761', '95048325', '21336630', '26092611', '58751326', '7783324827', '215422736', '89517753', '1409528054', '33216270', '59259545', '12448463', '1401324331', '207805931', '52656612', '87884523', '7119135452', '1518648701', '5091967601', '791737307', '422031317', '463789818', '2946790132', '400448887', '768420390', '00730059', '355930742', '9733430501', '30146923', '55998566', '5191052144', '2054437806', '152515118', '261609849', '4593888645', '48900292', '323385350', '882675165', '3026477222', '74243715', '25232450', '65771887', '3181113736', '518413674', '990559302', '639439883', '4609088484', '97813672', '024984282', '642778635', '8130239472', '8376238876', '5705362016', '3644043000', '2295064490', '50763530', '084502951', '596852493', '825302069', '9113706935', '2625845384', '35689975', '5766415454', '21428279', '57944704', '6220489238', '6910979514', '605181894', '974364634', '311507510', '023595998', '52613870', '659123269', '94731969', '7925055554', '51137118', '46647629', '48695512', '24547428', '488734723', '3928015577', '01125379', '162202006', '2389414979', '1802033813', '179697878', '511964606', '1371866562', '2535012718', '110292698', '7015190071', '98688700', '015316836', '9467172337', '57434586', '91480849', '369285698', '1468336589', '24924016', '108354125', '22688672', '01140941', '0552274198', '49374286', '4707145867', '031996230', '88198512', '15371428', '5875368264', '003435646', '970277440', '19756089', '77672292', '6624593243', '6347359469', '952196105', '911097346', '14286975', '05487083', '689984559', '65560625', '357090521', '9748979068', '42857639', '899751946', '5421153511', '59897560', '403544732', '821250737', '86721663', '699922313', '701127071', '8636625472', '79571988', '09455357', '646180750', '6162635242', '142429034', '0127528061', '838727847', '312693957', '982503343', '1040388072', '6624400877', '3911917196', '97912700', '38149911', '2318070466', '267372387', '068567725', '965674669', '890785758', '604940972', '7423854085', '466164694', '453008777', '6745520541', '2357891853', '709299241', '50049749', '9551582174', '6033847565', '140098969', '39115404', '74076267', '840322332', '45307451', '892460797', '953592335', '3982984172', '306428839', '9611375149', '10538247', '29992550', '352997435', '93419085', '453177352', '6556852604', '8897969302', '96697039', '892934963', '050499186', '173308308', '0307629262', '13104698', '2075204053', '5029674127', '13469323', '112452392', '7523730398', '962722361', '9406444252', '64634314', '101911236', '37365864', '64838150', '1026905771', '44209129', '31964772', '168846614', '9761197596', '575012470', '86731003', '0232289645', '9363533088', '019310347', '193608431', '93202329', '5137266693', '499262351', '119999166', '3409869753', '3611212158', '260400057', '8524911309', '98637751', '016993687', '304183088', '0546990433', '1215899316', '948476321', '314034181', '607550622', '251536250', '837878508', '7939164596', '6993004442', '721046712', '408184780', '84963660', '83415223', '4216258559', '968589513', '7582737119', '2547802694', '5347730148', '604811554', '8447144589', '373099504', '5284681049', '27574344', '10405093', '621411560', '21171061', '725617027', '066303779', '56058909', '081990215', '102366791', '03967539', '50189758', '397826984', '8631851936', '8955398335', '682441754', '9219921790', '706582533', '1050789947', '7077659196', '501880263', '573139135', '844806776', '5845544327', '78462021', '128023855', '0779969022', '52856381', '0218575744', '034338321', '98180361', '625789144', '5143129612', '3990526743', '63777825', '487167912', '01882718', '106253249', '84520163', '29532623', '9300674778', '10874989', '512900479', '8580255745', '70374761', '1576593021', '6448400205', '9038897314', '6637591158', '92784849', '710751512', '855496989', '250500315', '71595699', '047460012', '07779077', '18091916', '504016129', '038858706', '777711743', '347065599', '61232074', '95414222', '16487701', '513248884', '2105091017', '612083280', '1677516910', '5591162839', '34664493', '1914799319', '30622866', '03125640', '84479297', '1199565542', '988742771', '94176990', '5594183583', '6684119102', '97808510', '43778479', '135282844', '8841462319', '892662846', '971294492', '551594428', '963211771', '6304376807', '16203233', '12241965', '43869320', '295666281', '8074423125', '138188363', '35952191', '20440737', '6013490783', '67384354', '528267589', '03496855', '3963084547', '29892804', '293941591', '313494054', '68676969', '683260078', '08796277', '3329454720', '415137082', '80742885', '77583957', '8144855982', '187299359', '483628691', '87009246', '27951291', '7759009811', '9913051459', '11938316', '14547147', '6178378216', '283697699', '78506600', '339895318', '1496335160', '07862914', '4313504488', '44950279', '572845862', '04286259', '7665825293', '1269476158', '253786182', '10869657', '15082359', '83883415', '72361020', '148570615', '75589464', '89575129', '77579959', '44070068', '49649002', '66797724', '13289986', '4382615291', '5298861923', '331129152', '435401457', '4030629446', '1542043939', '9969732782', '91190701', '1692862118', '868437747', '0199246488', '1553341617', '753663917', '41296621', '323672042', '4303694569', '8838304971', '869485290', '19664222', '4163093642', '566000848', '80874100', '6762593034', '3172782208', '4121919790', '205263694', '47753378', '6342749490', '1433359124', '6411654810', '13754032', '715841740', '093233262', '7698222568', '32714963', '1059707369', '000684630', '454751832', '42063840', '095712388', '9667022323', '44827826', '861589963', '24310231', '4596423717', '583157089', '981145966', '048980678', '830978493', '9256512726', '57927233', '192249310', '70161823', '355920300', '65821818', '11545661', '67176732', '16061288', '6723248094', '0621783390', '360149171', '695657635', '26013502', '38070713', '3353532356', '3417294568', '65513563', '3288734783', '19274727', '44467910', '094432906', '267824238', '0121002444', '1044722294', '7572203040', '8151140341', '16620096', '62296842', '5341533572', '0060185114', '6005194482', '09759766', '70225626', '764932080', '45773285', '4588836836', '2379813963', '99238580', '367087098', '84915287', '586118929', '68140427', '52264686', '7017743401', '3707148345', '87249260', '831317410', '92481892', '887434100', '94782583', '75538675', '18311475', '122172464', '34598935', '1294985661', '849580957', '9523199010', '695004093', '03051814', '4242718266', '51264700', '9210349970', '3014983818', '423606566', '75455863', '906274550', '47192129', '04654664', '75445222', '8564412389', '3760735862', '5619354120', '2299902669', '8713637420', '6243214644', '680153135', '08495582', '734081967', '060678252', '47871351', '679538846', '73475451', '9449593662', '6269039660', '44266150', '61929343', '34647495', '496403739', '6244694885', '71086364', '253550522', '944879617', '3961869923', '9484084296', '909299199', '80372789', '580456177', '340120821', '9123455533', '196571148', '134310272', '974201897', '23032872', '97476372', '2188713255', '78309047', '4827940482', '5567760963', '0342609762', '6070764365', '3861476144', '8010718061', '240087768', '9104956735', '044654063', '49232188', '0601860541', '636958496', '012512782', '744560108', '0628855098', '7863986264', '4642150241', '89440625', '093687257', '03063036', '794441246', '2036379099', '37241493', '503295641', '6629846815', '9795668948', '491651414', '575600957', '685763546', '129660473', '0347552550', '135673095', '7469299196', '2532920186', '20878506', '2172932287', '67426792', '41835671', '790188697', '7254749838', '89334122', '8591380622', '194112992', '91723166', '8399730544', '3412475961', '73693364', '7667512747', '9705494908', '3225996575', '1056082642', '4787812794', '42808355', '3780559625', '5585206745', '4261843113', '52706493', '8636984294', '07881040', '935482382', '0112781772', '8342072375', '9443839835', '01266571', '00934481', '066118775', '273121962', '228864095', '978077680', '34291426', '7638645773', '76899746', '45210593', '0854822626', '17492776', '8268316955', '29484539', '145294521', '546932636', '492346758', '85072565', '88338056', '880852647', '977390070', '105206232', '47830595', '3175143618', '8032594715', '2676229477', '009089440', '48071410', '4688170413', '266751928', '2320316005', '353366691', '34510333', '554822247', '4737838427', '294338584', '353001701', '2929973584', '3675988129', '50374712', '118020456', '7717564446', '256838732', '8493005644', '083333610', '361262694', '998931820', '7239325319', '4300078357', '4311460441', '61960877', '7854703292', '747336381', '4613805290', '5977757351', '81710551', '1761202553', '9908340690', '57701405', '0414065455', '26678563', '32924608', '28984529', '5854183932', '3493338474', '5697038471', '234853374', '4691712741', '0315617617', '80760559', '66949125', '4192752139', '9496101486', '5701950494', '09690372', '483828342', '96764258', '008294469', '98689290', '68849123', '34801891', '0181603179', '107866556', '21038662', '5684186185', '09804909', '145208857', '53347417', '1901918113', '59948564', '98366594', '4425352039', '86576389', '912215764', '8800235967', '226093842', '34249699', '5719884193', '1663251278', '1725383885', '90458345', '776719494', '80993668', '3618782405', '271692301', '1318056974', '34364484', '06505153', '475150944', '372405221', '2677013681', '557139826', '520374389', '660848469', '47075669', '0517663028', '9049513930', '433152457', '886636776', '86787168', '022780214', '7951609560', '5667764093', '838849745', '019976678', '769718498', '6363480479', '74561957', '4711396354', '64741935', '635318886', '816838007', '8712918098', '5069664500', '66696992', '194393501', '17515275', '507810470', '7867508645', '97890146', '76164566', '960260412', '55671172', '8080934355', '33322692', '34902059', '773826048', '9394476360', '5642776715', '92323083', '68073232', '4805110340', '45004956', '9802565395', '59267496', '066107622', '42467322', '288455477', '782541117', '43106322', '2272435863', '5016294921', '09509123', '552779786', '95990920', '81483132', '03527776', '752980016', '1847734688', '556562653', '839255902', '4616104595', '29809218', '391672287', '334718306', '43585080', '02583589', '7233217482', '5403259520', '61032416', '001439134', '7058915151', '21665236', '832152636', '03282139', '93566282', '44780337', '10744720', '285322742', '85751112', '4264593611', '19978159', '6360606429', '52539429', '3455702781', '01444845', '84040878', '408881107', '1817185797', '83487185', '927938359', '8869793488', '50151381', '3457896092', '1490798877', '57968397', '4709037743', '227201954', '196932065', '318201504', '8838874777', '50381240', '562562100', '7705461382', '52034903', '863896707', '960820902', '9727114049', '3691744758', '62844878', '380548707', '9286349503', '3834106725', '74623696', '602032334', '774510182', '968236124', '7854930259', '4297473892', '132356371', '35425624', '122020102', '4223520651', '9714106805', '35928467', '48512782', '394240918', '33399393', '53951183', '3850403750', '8021460959', '3543204008', '608551628', '5371246500', '329359881', '0136318189', '1083281746', '4102911340', '75641423', '52281621', '9009502592', '342103442', '91353477', '33048915', '06504776', '982996475', '47529969', '858454960', '0896749455', '769670880', '79735644', '0133327778', '320797815', '19301633', '126294891', '364875711', '9072522986', '453432023', '20175550', '081968749', '94424661', '045687704', '0561301489', '16306713', '826327737', '0260020635', '92515243', '5451887104', '1048129290', '38574822', '2502418864', '09021707', '763974148', '016902212', '163853103', '4041493575', '2514184329', '69615090', '45344830', '887090211', '3318586313', '30455689', '99871061', '2173888953', '6334254385', '6825999200', '9088883251', '3787524177', '6860170298', '9812578720', '425733990', '8312740947', '29980568', '0372339101', '7981067118', '624630040', '2793291403', '70305410', '3158680053', '597523279', '64962625', '3319662405', '841855358', '551825959', '23062809', '77689352', '919435998', '38942013', '154569753', '46729265', '26698528', '552360095', '848714627', '4959976019', '3462199841', '45850338', '74468227', '5982155184', '597761005', '13991922', '59252972', '5666238993', '85217864', '019165316', '488056585', '268787858', '40708141', '624636523', '86419466', '3082174627', '470603256', '62389167', '6106717614', '319875631', '389937254', '496273456', '552652578', '015816555', '03547144', '1260461138', '8388011719', '65737964', '8182451478', '9482394688', '25818370', '096449044', '604390541', '5817872454', '634680513', '252854770', '6497251918', '842510557', '9858261500', '380829397', '53719812', '75898725', '69780807', '2577773688', '5617338824', '5358843847', '5158996702', '388334646', '09030180', '139779187', '10226416', '2962610342', '744053464', '2246285290', '3516697382', '698795249', '959265564', '527950107', '582639600', '535703319', '436868289', '60270554', '76911864', '576533462', '85743480', '627514222', '8311924121', '7928042215', '8359510393', '927961348', '58254586', '64334179', '096508524', '877994940', '94339887', '0602894014', '7445209063', '5915284132', '1748207192', '26089352', '925717977', '39332462', '856567455', '2293732537', '744312230', '0345555658', '503415985', '43453777', '13546073', '935165885', '621037979', '1005186348', '547416332', '2066897926', '63377305', '0182748056', '2900525818', '13077613', '3190016016', '07046498', '5058772088', '5955378122', '482965541', '423525197', '852886496', '7747587233', '884137307', '208876581', '58808123', '84883473', '7770938772', '7295501719', '76561929', '5967813835', '90336244', '179575899', '4752061914', '02585410', '117346749', '354984353', '9431376777', '404314761', '281346400', '2280922029', '05462701', '10925840', '20263454', '6521257963', '45277081', '7814010055', '4913772053', '581126665', '2734692205', '919427716', '24956709', '7019984427', '19928781', '9768078626', '5517560815', '097198081', '981729233', '892372599', '195920022', '9257811187', '80540216', '4436801371', '705063970', '10435230', '4074454383', '159356225', '8448344204', '717643121', '3985639040', '0738510708', '431769553', '5496625815']
if len(password)>7:
a=0
if '@' in password:
for i in password:
if i in ['0','1','2','3','4','5','6','7','8','9']:
a+=1
if a>0:
if password in passlist:
return 'Your Password is too Common...'
return 'True'
return 'Your Password Doesn\'t contain numbers...'
return 'Your Password Doesn\'t contain @...'
return 'Your Password must contain at least 8 characters.'
def changepass(request):
if request.method == 'POST':
form = PasswordChangeForm(data=request.POST, user=request.user)
if form.is_valid():
form.save()
update_session_auth_hash(request, form.user)
return redirect('songs-profile')
else:
return redirect(reverse('songs-password'))
else:
form = PasswordChangeForm(user=request.user)
args = {'form': form}
return render(request, 'users/change_password.html', args)
@login_required
def addsong(request):
if request.method=='POST':
title=request.POST['title']
lyrics=request.POST['lyrics']
composer=request.user.username
featuring=request.POST['featuring']
album=request.POST['album']
img=request.FILES['img']
link=request.POST['link']
genre=request.POST['genre']
genre = genre.upper()
audio=request.FILES['audio']
ytlink=link
link='https://www.youtube.com/embed/'+ link[link.index('=')+1:]+'?rel=0'
song = Song(title=title,lyrics=lyrics,composer=composer,featuring=featuring,album=album,img=img,link=link,ytlink=ytlink,audio=audio,genre=genre)
song.save()
return redirect('songs-profile')
else:
return render(request,'songs/addsong.html')
def logout_user(request):
logout(request)
messages.warning(request,'You have successfully logged out.')
return redirect('songs-home')
@login_required
def profile(request):
mysongs = list()
songs = Song.objects.all()
username = request.user.username
for song in songs:
if song.composer == username or song.featuring == username:
mysongs.append(song)
size = len(mysongs)
context = {'size': size,'songs':mysongs,'thisuser':username}
return render(request,'users/display_profile.html',context)
def addprofile(request):
if request.user.profile.bio :
messages.warning(request,'Your Profile can be added only Once.You can update your profile.To update your Profile Go to /profile/update')
return redirect('songs-home')
if request.method =="POST":
print("heyy")
username=request.POST.get('username')
user = User.objects.filter(username=username).first()
profile=Profile.objects.get(user=user)
user.first_name=request.POST['fname']
user.last_name=request.POST['lname']
profile.gender=request.POST['gender']
profile.age=request.POST['age']
profile.bio=request.POST['bio']
if request.FILES['pic']:
profile.image=request.FILES['pic']
user.save()
profile.saave()
else:
return render(request,'users/addprofile.html')
return render(request,'songs/base.html')
def update(request):
profile = request.user
mysongs = list()
a=0
userprofile = Profile.objects.get(user=profile)
if request.method == 'POST':
if request.POST['fname']!=profile.first_name:
fname = request.POST['fname']
a+=1
else:
fname=profile.first_name
if request.POST['lname']!=profile.last_name:
lname = request.POST['lname']
a+=1
else:
lname=profile.last_name
if request.POST['gender']!=userprofile.gender:
gender = request.POST['gender']
a+=1
else:
gender=userprofile.gender
if request.POST['age']!=userprofile.age:
age = request.POST['age']
a+=1
else:
age=userprofile.age
if request.POST['bio']!=userprofile.bio:
bio = request.POST['bio']
a+=1
else:
bio=userprofile.bio
try:
if request.FILES['pic']:
pic = request.FILES['pic']
path = userprofile.image.path
import os
os.remove(path)
a+=1
else:
pic=userprofile.image
except MultiValueDictKeyError :
print("m")
pic=userprofile.image
profile.first_name = fname
profile.last_name = lname
userprofile.gender=gender
userprofile.age=age
userprofile.bio=bio
userprofile.image=pic
userprofile.saave()
profile.save()
if a>1:
messages.success(request, 'Your Account has been updated!')
return redirect('songs-profile')
songs = Song.objects.all()
username = profile.username
for song in songs:
if song.composer == username or song.featuring == username:
mysongs.append(song)
context={
'songs':mysongs,
'profile':userprofile
}
return render(request,'users/profile.html',context)
| 330.304878
| 73,738
| 0.683564
| 6,832
| 81,255
| 8.124561
| 0.883782
| 0.005153
| 0.004792
| 0.005189
| 0.016358
| 0.012935
| 0.006179
| 0.005441
| 0.005441
| 0.005441
| 0
| 0.612192
| 0.087502
| 81,255
| 245
| 73,739
| 331.653061
| 0.136435
| 0
| 0
| 0.302632
| 0
| 0.004386
| 0.634127
| 0.000898
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04386
| false
| 0.109649
| 0.052632
| 0
| 0.223684
| 0.013158
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
f82f7aefc4dc6217cf7f3f7d7c2702a859202a36
| 30
|
py
|
Python
|
tests/test_inventory_service.py
|
accelero-cloud/tutorials
|
9a9580e60bc216bf45ec0011f6d9b6b14d5a8d03
|
[
"Apache-2.0"
] | 2
|
2019-08-09T16:15:40.000Z
|
2020-01-12T09:46:28.000Z
|
tests/test_inventory_service.py
|
accelero-cloud/tutorials
|
9a9580e60bc216bf45ec0011f6d9b6b14d5a8d03
|
[
"Apache-2.0"
] | 2
|
2021-03-31T18:48:41.000Z
|
2021-12-13T19:49:46.000Z
|
tests/test_inventory_service.py
|
accelero-cloud/tutorials
|
9a9580e60bc216bf45ec0011f6d9b6b14d5a8d03
|
[
"Apache-2.0"
] | null | null | null |
def test_default():
pass
| 7.5
| 19
| 0.633333
| 4
| 30
| 4.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.266667
| 30
| 3
| 20
| 10
| 0.818182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
f8892aaad6559f0cce09d9a2a58429a7d441eb47
| 101
|
py
|
Python
|
Assignment.2/test.py
|
ash0x0/AUC-ProgrammingLanguagePython
|
3fc64c6acac981eef30d5b48278f06086ef4212b
|
[
"Apache-2.0"
] | null | null | null |
Assignment.2/test.py
|
ash0x0/AUC-ProgrammingLanguagePython
|
3fc64c6acac981eef30d5b48278f06086ef4212b
|
[
"Apache-2.0"
] | null | null | null |
Assignment.2/test.py
|
ash0x0/AUC-ProgrammingLanguagePython
|
3fc64c6acac981eef30d5b48278f06086ef4212b
|
[
"Apache-2.0"
] | null | null | null |
import pickle
file = open('file.txt.code' , 'rb')
print(pickle.load(file))
print(pickle.load(file))
| 16.833333
| 35
| 0.70297
| 16
| 101
| 4.4375
| 0.5625
| 0.309859
| 0.422535
| 0.535211
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.09901
| 101
| 5
| 36
| 20.2
| 0.78022
| 0
| 0
| 0.5
| 0
| 0
| 0.148515
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.25
| 0
| 0.25
| 0.5
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
f8cc319ca37228c74b00557cef8f75231ad84c32
| 44
|
py
|
Python
|
sdk/exception/service_exception.py
|
CLG0125/elemesdk
|
344466398bad7cf026e082e47c77d3ca98621ef3
|
[
"MIT"
] | 1
|
2021-04-03T05:11:29.000Z
|
2021-04-03T05:11:29.000Z
|
sdk/exception/service_exception.py
|
CLG0125/elemesdk
|
344466398bad7cf026e082e47c77d3ca98621ef3
|
[
"MIT"
] | null | null | null |
sdk/exception/service_exception.py
|
CLG0125/elemesdk
|
344466398bad7cf026e082e47c77d3ca98621ef3
|
[
"MIT"
] | null | null | null |
class ServiceException(Exception):pass
| 14.666667
| 38
| 0.772727
| 4
| 44
| 8.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.159091
| 44
| 3
| 39
| 14.666667
| 0.918919
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 1
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
|
0
| 6
|
3e05b8b904f5b29089ee4f538ee5a7e43627430c
| 7,243
|
py
|
Python
|
src/micropython/microbit/__model/constants.py
|
julianrendell/vscode-python-devicesimulator
|
8014a940c9a0551793bfb5694bff9a52df6c0155
|
[
"MIT"
] | 151
|
2019-11-05T10:10:29.000Z
|
2022-02-18T11:46:27.000Z
|
src/micropython/microbit/__model/constants.py
|
julianrendell/vscode-python-devicesimulator
|
8014a940c9a0551793bfb5694bff9a52df6c0155
|
[
"MIT"
] | 98
|
2019-11-09T17:41:30.000Z
|
2021-12-17T23:05:01.000Z
|
src/micropython/microbit/__model/constants.py
|
julianrendell/vscode-python-devicesimulator
|
8014a940c9a0551793bfb5694bff9a52df6c0155
|
[
"MIT"
] | 42
|
2019-11-10T02:26:27.000Z
|
2022-03-22T01:43:01.000Z
|
MICROBIT = "micro:bit"
# string arguments for constructor
BLANK_5X5 = "00000:00000:00000:00000:00000:"
# pre-defined image patterns
IMAGE_PATTERNS = {
"HEART": "09090:99999:99999:09990:00900:",
"HEART_SMALL": "00000:09090:09990:00900:00000:",
"HAPPY": "00000:09090:00000:90009:09990:",
"SMILE": "00000:00000:00000:90009:09990:",
"SAD": "00000:09090:00000:09990:90009:",
"CONFUSED": "00000:09090:00000:09090:90909:",
"ANGRY": "90009:09090:00000:99999:90909:",
"ASLEEP": "00000:99099:00000:09990:00000:",
"SURPRISED": "09090:00000:00900:09090:00900:",
"SILLY": "90009:00000:99999:00909:00999:",
"FABULOUS": "99999:99099:00000:09090:09990:",
"MEH": "09090:00000:00090:00900:09000:",
"YES": "00000:00009:00090:90900:09000:",
"NO": "90009:09090:00900:09090:90009:",
"CLOCK12": "00900:00900:00900:00000:00000:",
"CLOCK11": "09000:09000:00900:00000:00000:",
"CLOCK10": "00000:99000:00900:00000:00000:",
"CLOCK9": "00000:00000:99900:00000:00000:",
"CLOCK8": "00000:00000:00900:99000:00000:",
"CLOCK7": "00000:00000:00900:09000:09000:",
"CLOCK6": "00000:00000:00900:00900:00900:",
"CLOCK5": "00000:00000:00900:00090:00090:",
"CLOCK4": "00000:00000:00900:00099:00000:",
"CLOCK3": "00000:00000:00999:00000:00000:",
"CLOCK2": "00000:00099:00900:00000:00000:",
"CLOCK1": "00090:00090:00900:00000:00000:",
"ARROW_N": "00900:09990:90909:00900:00900:",
"ARROW_NE": "00999:00099:00909:09000:90000:",
"ARROW_E": "00900:00090:99999:00090:00900:",
"ARROW_SE": "90000:09000:00909:00099:00999:",
"ARROW_S": "00900:00900:90909:09990:00900:",
"ARROW_SW": "00009:00090:90900:99000:99900:",
"ARROW_W": "00900:09000:99999:09000:00900:",
"ARROW_NW": "99900:99000:90900:00090:00009:",
"TRIANGLE": "00000:00900:09090:99999:00000:",
"TRIANGLE_LEFT": "90000:99000:90900:90090:99999:",
"CHESSBOARD": "09090:90909:09090:90909:09090:",
"DIAMOND": "00900:09090:90009:09090:00900:",
"DIAMOND_SMALL": "00000:00900:09090:00900:00000:",
"SQUARE": "99999:90009:90009:90009:99999:",
"SQUARE_SMALL": "00000:09990:09090:09990:00000:",
"RABBIT": "90900:90900:99990:99090:99990:",
"COW": "90009:90009:99999:09990:00900:",
"MUSIC_CROTCHET": "00900:00900:00900:99900:99900:",
"MUSIC_QUAVER": "00900:00990:00909:99900:99900:",
"MUSIC_QUAVERS": "09999:09009:09009:99099:99099:",
"PITCHFORK": "90909:90909:99999:00900:00900:",
"XMAS": "00900:09990:00900:09990:99999:",
"PACMAN": "09999:99090:99900:99990:09999:",
"TARGET": "00900:09990:99099:09990:00900:",
"TSHIRT": "99099:99999:09990:09990:09990:",
"ROLLERSKATE": "00099:00099:99999:99999:09090:",
"DUCK": "09900:99900:09999:09990:00000:",
"HOUSE": "00900:09990:99999:09990:09090:",
"TORTOISE": "00000:09990:99999:09090:00000:",
"BUTTERFLY": "99099:99999:00900:99999:99099:",
"STICKFIGURE": "00900:99999:00900:09090:90009:",
"GHOST": "99999:90909:99999:99999:90909:",
"SWORD": "00900:00900:00900:09990:00900:",
"GIRAFFE": "99000:09000:09000:09990:09090:",
"SKULL": "09990:90909:99999:09990:09990:",
"UMBRELLA": "09990:99999:00900:90900:09900:",
"SNAKE": "99000:99099:09090:09990:00000:",
}
IMAGE_TUPLE_LOOKUP = {
"ALL_CLOCKS": [
"CLOCK12",
"CLOCK11",
"CLOCK10",
"CLOCK9",
"CLOCK8",
"CLOCK7",
"CLOCK6",
"CLOCK5",
"CLOCK4",
"CLOCK3",
"CLOCK2",
"CLOCK1",
],
"ALL_ARROWS": [
"ARROW_N",
"ARROW_NE",
"ARROW_E",
"ARROW_SE",
"ARROW_S",
"ARROW_SW",
"ARROW_W",
"ARROW_NW",
],
}
# 5x5 Alphabet
# Taken from https://raw.githubusercontent.com/micropython/micropython/264d80c84e034541bd6e4b461bfece4443ffd0ac/ports/nrf/boards/microbit/modules/microbitfont.h
ALPHABET = b"\x00\x00\x00\x00\x00\x08\x08\x08\x00\x08\x0a\x4a\x40\x00\x00\x0a\x5f\xea\x5f\xea\x0e\xd9\x2e\xd3\x6e\x19\x32\x44\x89\x33\x0c\x92\x4c\x92\x4d\x08\x08\x00\x00\x00\x04\x88\x08\x08\x04\x08\x04\x84\x84\x88\x00\x0a\x44\x8a\x40\x00\x04\x8e\xc4\x80\x00\x00\x00\x04\x88\x00\x00\x0e\xc0\x00\x00\x00\x00\x08\x00\x01\x22\x44\x88\x10\x0c\x92\x52\x52\x4c\x04\x8c\x84\x84\x8e\x1c\x82\x4c\x90\x1e\x1e\xc2\x44\x92\x4c\x06\xca\x52\x5f\xe2\x1f\xf0\x1e\xc1\x3e\x02\x44\x8e\xd1\x2e\x1f\xe2\x44\x88\x10\x0e\xd1\x2e\xd1\x2e\x0e\xd1\x2e\xc4\x88\x00\x08\x00\x08\x00\x00\x04\x80\x04\x88\x02\x44\x88\x04\x82\x00\x0e\xc0\x0e\xc0\x08\x04\x82\x44\x88\x0e\xd1\x26\xc0\x04\x0e\xd1\x35\xb3\x6c\x0c\x92\x5e\xd2\x52\x1c\x92\x5c\x92\x5c\x0e\xd0\x10\x10\x0e\x1c\x92\x52\x52\x5c\x1e\xd0\x1c\x90\x1e\x1e\xd0\x1c\x90\x10\x0e\xd0\x13\x71\x2e\x12\x52\x5e\xd2\x52\x1c\x88\x08\x08\x1c\x1f\xe2\x42\x52\x4c\x12\x54\x98\x14\x92\x10\x10\x10\x10\x1e\x11\x3b\x75\xb1\x31\x11\x39\x35\xb3\x71\x0c\x92\x52\x52\x4c\x1c\x92\x5c\x90\x10\x0c\x92\x52\x4c\x86\x1c\x92\x5c\x92\x51\x0e\xd0\x0c\x82\x5c\x1f\xe4\x84\x84\x84\x12\x52\x52\x52\x4c\x11\x31\x31\x2a\x44\x11\x31\x35\xbb\x71\x12\x52\x4c\x92\x52\x11\x2a\x44\x84\x84\x1e\xc4\x88\x10\x1e\x0e\xc8\x08\x08\x0e\x10\x08\x04\x82\x41\x0e\xc2\x42\x42\x4e\x04\x8a\x40\x00\x00\x00\x00\x00\x00\x1f\x08\x04\x80\x00\x00\x00\x0e\xd2\x52\x4f\x10\x10\x1c\x92\x5c\x00\x0e\xd0\x10\x0e\x02\x42\x4e\xd2\x4e\x0c\x92\x5c\x90\x0e\x06\xc8\x1c\x88\x08\x0e\xd2\x4e\xc2\x4c\x10\x10\x1c\x92\x52\x08\x00\x08\x08\x08\x02\x40\x02\x42\x4c\x10\x14\x98\x14\x92\x08\x08\x08\x08\x06\x00\x1b\x75\xb1\x31\x00\x1c\x92\x52\x52\x00\x0c\x92\x52\x4c\x00\x1c\x92\x5c\x90\x00\x0e\xd2\x4e\xc2\x00\x0e\xd0\x10\x10\x00\x06\xc8\x04\x98\x08\x08\x0e\xc8\x07\x00\x12\x52\x52\x4f\x00\x11\x31\x2a\x44\x00\x11\x31\x35\xbb\x00\x12\x4c\x8c\x92\x00\x11\x2a\x44\x98\x00\x1e\xc4\x88\x1e\x06\xc4\x8c\x84\x86\x08\x08\x08\x08\x08\x18\x08\x0c\x88\x18\x00\x00\x0c\x83\x60"
# We support ASCII characters between these indexes on the microbit
ASCII_START = 32
ASCII_END = 126
SPACE_BETWEEN_LETTERS_WIDTH = 1
WHITESPACE_WIDTH = 3
# numerical LED values
LED_HEIGHT = 5
LED_WIDTH = 5
BRIGHTNESS_MIN = 0
BRIGHTNESS_MAX = 9
# sensor max/min values
MAX_TEMPERATURE = 125
MIN_TEMPERATURE = -55
MAX_LIGHT_LEVEL = 255
MIN_LIGHT_LEVEL = 0
MAX_ACCELERATION = 1023
MIN_ACCELERATION = -1023
GESTURES = set(
[
"up",
"down",
"left",
"right",
"face up",
"face down",
"freefall",
"3g",
"6g",
"8g",
"shake",
]
)
# error messages
BRIGHTNESS_ERR = "brightness out of bounds"
COPY_ERR_MESSAGE = "please call copy function first"
INCORR_IMAGE_SIZE = "image data is incorrect size"
INDEX_ERR = "index out of bounds"
NOT_IMPLEMENTED_ERROR = "This method is not implemented by the simulator"
UNSUPPORTED_ADD_TYPE = "unsupported types for __add__:"
SAME_SIZE_ERR = "images must be the same size"
INVALID_GESTURE_ERR = "invalid gesture"
INVALID_ACCEL_ERR = "invalid acceleration"
INVALID_LIGHT_LEVEL_ERR = "invalid light level"
INVALID_TEMPERATURE_ERR = "invalid temperature"
TIME_DELAY = 0.03
EXPECTED_INPUT_BUTTONS = [
"button_a",
"button_b",
]
EXPECTED_INPUT_ACCEL = {
"motion_x": "x",
"motion_y": "y",
"motion_z": "z",
}
EXPECTED_INPUT_LIGHT = "light"
EXPECTED_INPUT_TEMP = "temperature"
EXPECTED_INPUT_GESTURE = "gesture"
| 42.605882
| 1,914
| 0.677482
| 1,135
| 7,243
| 4.245815
| 0.266079
| 0.027392
| 0.022411
| 0.014941
| 0.023034
| 0
| 0
| 0
| 0
| 0
| 0
| 0.388801
| 0.127157
| 7,243
| 169
| 1,915
| 42.857988
| 0.373458
| 0.049013
| 0
| 0.013605
| 0
| 0.006803
| 0.701265
| 0.555313
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
3e4aa6aa7b822f47c6496278e880676fb0f3eb6f
| 64
|
py
|
Python
|
tests/01_import_test.py
|
andybrice/Pypework
|
d71c46cd0dfb41660f776ce8d435bb6893466c25
|
[
"MIT"
] | 3
|
2019-07-25T09:31:14.000Z
|
2021-07-11T10:33:27.000Z
|
tests/01_import_test.py
|
andybrice/pypework
|
d71c46cd0dfb41660f776ce8d435bb6893466c25
|
[
"MIT"
] | null | null | null |
tests/01_import_test.py
|
andybrice/pypework
|
d71c46cd0dfb41660f776ce8d435bb6893466c25
|
[
"MIT"
] | null | null | null |
import pypework
def test_module_imports():
assert pypework
| 12.8
| 26
| 0.78125
| 8
| 64
| 6
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.171875
| 64
| 4
| 27
| 16
| 0.90566
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.333333
| true
| 0
| 0.666667
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e43f8bf6a885551c25ef6c760f3ee1e16ea80cd9
| 35
|
py
|
Python
|
addons14/datamodel/tests/__init__.py
|
odoochain/addons_oca
|
55d456d798aebe16e49b4a6070765f206a8885ca
|
[
"MIT"
] | 1
|
2021-06-10T14:59:13.000Z
|
2021-06-10T14:59:13.000Z
|
addons14/datamodel/tests/__init__.py
|
odoochain/addons_oca
|
55d456d798aebe16e49b4a6070765f206a8885ca
|
[
"MIT"
] | null | null | null |
addons14/datamodel/tests/__init__.py
|
odoochain/addons_oca
|
55d456d798aebe16e49b4a6070765f206a8885ca
|
[
"MIT"
] | 1
|
2021-04-09T09:44:44.000Z
|
2021-04-09T09:44:44.000Z
|
from . import test_build_datamodel
| 17.5
| 34
| 0.857143
| 5
| 35
| 5.6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114286
| 35
| 1
| 35
| 35
| 0.903226
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e44c6a0598c6242a093a6f6d084b47ed8cf72c4a
| 176
|
py
|
Python
|
envs/water_tank/no_shield.py
|
safe-rl/safe-rl-shielding
|
287d540df6b26928eed512a57297d44d72f19832
|
[
"MIT"
] | 26
|
2018-12-30T20:32:45.000Z
|
2022-03-15T06:11:40.000Z
|
envs/water_tank/no_shield.py
|
safe-rl/safe-rl-shielding
|
287d540df6b26928eed512a57297d44d72f19832
|
[
"MIT"
] | 20
|
2018-08-29T10:34:48.000Z
|
2022-03-11T23:16:24.000Z
|
envs/water_tank/no_shield.py
|
safe-rl/safe-rl-shielding
|
287d540df6b26928eed512a57297d44d72f19832
|
[
"MIT"
] | 13
|
2019-05-11T01:59:58.000Z
|
2022-03-15T14:12:40.000Z
|
class Shield:
def __init__(self):
self.water_level = 0
self.switch_state = 0
def tick(self, water_level, switch_state, action):
return action
| 19.555556
| 54
| 0.630682
| 23
| 176
| 4.478261
| 0.565217
| 0.174757
| 0.271845
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.016
| 0.289773
| 176
| 8
| 55
| 22
| 0.808
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0.166667
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
e463045136a20cb2df10b62bfab5431c572d57b5
| 3,668
|
py
|
Python
|
tests/test_image_array.py
|
deepgreenAN/tracking_annotation
|
f13e2dbf0bd6400db07b26a13715b24b5b4fd7aa
|
[
"Apache-2.0"
] | null | null | null |
tests/test_image_array.py
|
deepgreenAN/tracking_annotation
|
f13e2dbf0bd6400db07b26a13715b24b5b4fd7aa
|
[
"Apache-2.0"
] | 1
|
2021-04-27T06:27:48.000Z
|
2021-04-27T06:27:48.000Z
|
tests/test_image_array.py
|
deepgreenAN/tracking_annotation
|
f13e2dbf0bd6400db07b26a13715b24b5b4fd7aa
|
[
"Apache-2.0"
] | null | null | null |
import unittest
from pathlib import Path
import time
from image_array import MovieImageArray, MovieImageArrayFile, MovieImageArrayRaw
class TestMovieImageArray(unittest.TestCase):
def setUp(self):
self.test_path = Path("tests")
self.test_path_contensts = set(list(self.test_path.iterdir()))
def tearDown(self):
# ファイルの構造が変わってないか確認.
end_test_path_contents = set(list(self.test_path.iterdir()))
self.assertEqual(end_test_path_contents, self.test_path_contensts)
def test_movie_image_array(self):
image_array1 = MovieImageArray("tests/mini_movie.mp4", is_temp=False, temp_dir=Path("tests")) # 保存データは破棄されない
image_array1.read_movie(is_update=True)
#image_arrayから所得できるものか3階テンソルであり,最後の次数が3
for image in image_array1:
self.assertEqual(len(image.shape), 3)
self.assertEqual(image.shape[-1], 3)
image_array1.close()
image_array2 = MovieImageArray.from_file(image_array1.saved_path, is_temp=True) # 保存データが破棄される
#image_arrayから所得できるものか3階テンソルであり,最後の次数が3
for image in image_array2:
self.assertEqual(len(image.shape), 3)
self.assertEqual(image.shape[-1], 3)
image_array2.close()
time.sleep(3) # ファイル削除のラグを考慮
def test_movie_image_array_temp(self):
image_array1 = MovieImageArray("tests/mini_movie.mp4", is_temp=True, temp_dir=Path("tests")) # 保存データは破棄される
image_array1.read_movie(is_update=True)
#image_arrayから所得できるものか3階テンソルであり,最後の次数が3
for image in image_array1:
self.assertEqual(len(image.shape), 3)
self.assertEqual(image.shape[-1], 3)
image_array1.close()
time.sleep(3) # ファイル削除のラグを考慮
def test_movie_image_array_file(self):
image_array1 = MovieImageArrayFile("tests/mini_movie.mp4", is_temp=False, temp_dir=Path("tests")) # 保存データは破棄されない
image_array1.read_movie(is_update=True)
#image_arrayから所得できるものか3階テンソルであり,最後の次数が3
for image in image_array1:
self.assertEqual(len(image.shape), 3)
self.assertEqual(image.shape[-1], 3)
image_array1.close()
image_array2 = MovieImageArrayFile.from_file(image_array1.saved_path, is_temp=True) # 保存データが破棄される
#image_arrayから所得できるものか3階テンソルであり,最後の次数が3
for image in image_array2:
self.assertEqual(len(image.shape), 3)
self.assertEqual(image.shape[-1], 3)
image_array2.close()
time.sleep(3) # ファイル削除のラグを考慮
def test_movie_image_array_file_temp(self):
image_array1 = MovieImageArrayFile("tests/mini_movie.mp4", is_temp=True, temp_dir=Path("tests")) # 保存データは破棄される
image_array1.read_movie(is_update=True)
#image_arrayから所得できるものか3階テンソルであり,最後の次数が3
for image in image_array1:
self.assertEqual(len(image.shape), 3)
self.assertEqual(image.shape[-1], 3)
image_array1.close()
time.sleep(3) # ファイル削除のラグを考慮
def test_movie_image_array_raw(self):
image_array1 = MovieImageArrayRaw("tests/mini_movie.mp4", is_temp=True, temp_dir=Path("tests")) # 保存データは破棄される
image_array1.read_movie()
#image_arrayから所得できるものか3階テンソルであり,最後の次数が3
for image in image_array1:
self.assertEqual(len(image.shape), 3)
self.assertEqual(image.shape[-1], 3)
image_array1.close()
if __name__ == "__main__":
unittest.main()
| 39.869565
| 122
| 0.642039
| 411
| 3,668
| 5.469586
| 0.145985
| 0.107651
| 0.1121
| 0.121441
| 0.814502
| 0.804715
| 0.781584
| 0.781584
| 0.781584
| 0.781584
| 0
| 0.026667
| 0.263904
| 3,668
| 92
| 123
| 39.869565
| 0.805926
| 0.115049
| 0
| 0.580645
| 0
| 0
| 0.044005
| 0
| 0
| 0
| 0
| 0
| 0.241935
| 1
| 0.112903
| false
| 0
| 0.064516
| 0
| 0.193548
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e472de48e2bb67ff515fab4fb5d997f1e5a72731
| 69
|
py
|
Python
|
datatypes/tuple_one.py
|
janbodnar/Python-Course
|
51705ab5a2adef52bcdb99a800e94c0d67144a38
|
[
"BSD-2-Clause"
] | 13
|
2017-08-22T12:26:07.000Z
|
2021-07-29T16:13:50.000Z
|
datatypes/tuple_one.py
|
janbodnar/Python-Course
|
51705ab5a2adef52bcdb99a800e94c0d67144a38
|
[
"BSD-2-Clause"
] | 1
|
2021-02-08T10:24:33.000Z
|
2021-02-08T10:24:33.000Z
|
datatypes/tuple_one.py
|
janbodnar/Python-Course
|
51705ab5a2adef52bcdb99a800e94c0d67144a38
|
[
"BSD-2-Clause"
] | 17
|
2018-08-13T11:10:33.000Z
|
2021-07-29T16:14:02.000Z
|
#!/usr/bin/python
# tuple_one.py
print ((3 + 7))
print ((3 + 7, ))
| 9.857143
| 17
| 0.536232
| 12
| 69
| 3
| 0.75
| 0.333333
| 0.388889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.072727
| 0.202899
| 69
| 6
| 18
| 11.5
| 0.581818
| 0.42029
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
e47f0d5e790cb4f203e804c47a4b4ce763015c33
| 85
|
py
|
Python
|
pydale/utils/__init__.py
|
sz144/TPy
|
689e38bdc2549015bc45cfacfe42e20a51c76e5a
|
[
"MIT"
] | 4
|
2018-08-20T13:38:13.000Z
|
2020-08-31T08:57:12.000Z
|
pydale/utils/__init__.py
|
sz144/pydale
|
689e38bdc2549015bc45cfacfe42e20a51c76e5a
|
[
"MIT"
] | null | null | null |
pydale/utils/__init__.py
|
sz144/pydale
|
689e38bdc2549015bc45cfacfe42e20a51c76e5a
|
[
"MIT"
] | 2
|
2021-09-28T08:24:30.000Z
|
2022-01-29T08:29:46.000Z
|
from ._base import lap_norm
from ._base import mmd_coef
from ._base import base_init
| 21.25
| 28
| 0.823529
| 15
| 85
| 4.266667
| 0.533333
| 0.375
| 0.65625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.141176
| 85
| 3
| 29
| 28.333333
| 0.876712
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
e485e0d5a58ff367650e9db686d5531ffd45d32a
| 28,833
|
py
|
Python
|
SchemaTerms/example-code/protobufs/schematerms_pb2.py
|
whiteslack/schemaorg
|
1e382bf7b40c2f85a865d2b30f911bcdac4e6da3
|
[
"Apache-2.0"
] | 4,768
|
2015-01-08T04:45:33.000Z
|
2022-03-28T07:32:59.000Z
|
software/SchemaTerms/example-code/protobufs/schematerms_pb2.py
|
lioncorpo/schenmaorg
|
d863285fde9c50572b95ceca3f0391e46ea7ef88
|
[
"Apache-2.0"
] | 2,599
|
2015-01-06T21:51:28.000Z
|
2022-03-30T12:40:09.000Z
|
software/SchemaTerms/example-code/protobufs/schematerms_pb2.py
|
lioncorpo/schenmaorg
|
d863285fde9c50572b95ceca3f0391e46ea7ef88
|
[
"Apache-2.0"
] | 878
|
2015-01-10T00:03:30.000Z
|
2022-03-31T22:54:15.000Z
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: schematerms.proto
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='schematerms.proto',
package='SchemaTerms',
syntax='proto2',
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x11schematerms.proto\x12\x0bSchemaTerms\"\x1e\n\tSuperPath\x12\x11\n\tsuperPath\x18\x01 \x03(\t\"\x97\x02\n\x07SDOTerm\x12\'\n\x08termType\x18\x01 \x02(\x0e\x32\x15.SchemaTerms.TermType\x12\x0b\n\x03uri\x18\x02 \x02(\t\x12\r\n\x05label\x18\x03 \x02(\t\x12\x0f\n\x07\x63omment\x18\x04 \x02(\t\x12\x0f\n\x07pending\x18\x05 \x02(\x08\x12\x0f\n\x07retired\x18\x06 \x02(\x08\x12*\n\nsuperPaths\x18\x07 \x03(\x0b\x32\x16.SchemaTerms.SuperPath\x12\x18\n\x10\x61\x63knowledgements\x18\x08 \x03(\t\x12\x13\n\x0b\x65quivalents\x18\t \x03(\t\x12\x14\n\x0csupersededBy\x18\n \x01(\t\x12\x12\n\nsupersedes\x18\x0b \x03(\t\x12\x0f\n\x07sources\x18\x0c \x03(\t\"\xd8\x01\n\x0bSDOBaseType\x12\n\n\x02id\x18\x01 \x02(\t\x12,\n\x0etermdescriptor\x18\x02 \x03(\x0b\x32\x14.SchemaTerms.SDOTerm\x12\x12\n\nproperties\x18\x03 \x03(\t\x12\x15\n\rallproperties\x18\x04 \x03(\t\x12\x17\n\x0f\x65xpectedTypeFor\x18\x05 \x03(\t\x12\x1a\n\x12\x65numerationMembers\x18\x06 \x03(\t\x12\x0c\n\x04subs\x18\x07 \x03(\t\x12\x0e\n\x06supers\x18\x08 \x03(\t\x12\x11\n\ttermStack\x18\t \x03(\t\"\xb8\x01\n\x0bSDOProperty\x12\n\n\x02id\x18\x01 \x02(\t\x12,\n\x0etermdescriptor\x18\x02 \x03(\x0b\x32\x14.SchemaTerms.SDOTerm\x12\x16\n\x0e\x64omainIncludes\x18\x03 \x03(\t\x12\x15\n\rrangeIncludes\x18\x04 \x03(\t\x12\x0c\n\x04subs\x18\x05 \x03(\t\x12\x0e\n\x06supers\x18\x06 \x03(\t\x12\x0f\n\x07inverse\x18\x07 \x01(\t\x12\x11\n\ttermStack\x18\x08 \x03(\t\"j\n\x13SDOEnumerationValue\x12\n\n\x02id\x18\x01 \x02(\t\x12,\n\x0etermdescriptor\x18\x02 \x03(\x0b\x32\x14.SchemaTerms.SDOTerm\x12\x19\n\x11\x65numerationParent\x18\x03 \x02(\t\"\'\n\x0cSDOReference\x12\n\n\x02id\x18\x01 \x02(\t\x12\x0b\n\x03uri\x18\x02 \x02(\t\"\xfd\x01\n\x13SDOBaseTypeExpanded\x12\n\n\x02id\x18\x01 \x02(\t\x12,\n\x0etermdescriptor\x18\x02 \x03(\x0b\x32\x14.SchemaTerms.SDOTerm\x12,\n\nproperties\x18\x03 \x03(\x0b\x32\x18.SchemaTerms.SDOProperty\x12\x31\n\x0f\x65xpectedTypeFor\x18\x04 \x03(\x0b\x32\x18.SchemaTerms.SDOProperty\x12\x1a\n\x12\x65numerationMembers\x18\x05 \x03(\t\x12\x0c\n\x04subs\x18\x06 \x03(\t\x12\x0e\n\x06supers\x18\x07 \x03(\t\x12\x11\n\ttermStack\x18\x08 \x03(\t*f\n\x08TermType\x12\x08\n\x04TYPE\x10\x00\x12\x0c\n\x08PROPERTY\x10\x01\x12\x0c\n\x08\x44\x41TATYPE\x10\x02\x12\x0f\n\x0b\x45NUMERATION\x10\x03\x12\x14\n\x10\x45NUMERATIONVALUE\x10\x04\x12\r\n\tREFERENCE\x10\x05'
)
_TERMTYPE = _descriptor.EnumDescriptor(
name='TermType',
full_name='SchemaTerms.TermType',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='TYPE', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='PROPERTY', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='DATATYPE', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='ENUMERATION', index=3, number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='ENUMERATIONVALUE', index=4, number=4,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='REFERENCE', index=5, number=5,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=1159,
serialized_end=1261,
)
_sym_db.RegisterEnumDescriptor(_TERMTYPE)
TermType = enum_type_wrapper.EnumTypeWrapper(_TERMTYPE)
TYPE = 0
PROPERTY = 1
DATATYPE = 2
ENUMERATION = 3
ENUMERATIONVALUE = 4
REFERENCE = 5
_SUPERPATH = _descriptor.Descriptor(
name='SuperPath',
full_name='SchemaTerms.SuperPath',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='superPath', full_name='SchemaTerms.SuperPath.superPath', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=34,
serialized_end=64,
)
_SDOTERM = _descriptor.Descriptor(
name='SDOTerm',
full_name='SchemaTerms.SDOTerm',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='termType', full_name='SchemaTerms.SDOTerm.termType', index=0,
number=1, type=14, cpp_type=8, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='uri', full_name='SchemaTerms.SDOTerm.uri', index=1,
number=2, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='label', full_name='SchemaTerms.SDOTerm.label', index=2,
number=3, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='comment', full_name='SchemaTerms.SDOTerm.comment', index=3,
number=4, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='pending', full_name='SchemaTerms.SDOTerm.pending', index=4,
number=5, type=8, cpp_type=7, label=2,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='retired', full_name='SchemaTerms.SDOTerm.retired', index=5,
number=6, type=8, cpp_type=7, label=2,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='superPaths', full_name='SchemaTerms.SDOTerm.superPaths', index=6,
number=7, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='acknowledgements', full_name='SchemaTerms.SDOTerm.acknowledgements', index=7,
number=8, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='equivalents', full_name='SchemaTerms.SDOTerm.equivalents', index=8,
number=9, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='supersededBy', full_name='SchemaTerms.SDOTerm.supersededBy', index=9,
number=10, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='supersedes', full_name='SchemaTerms.SDOTerm.supersedes', index=10,
number=11, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='sources', full_name='SchemaTerms.SDOTerm.sources', index=11,
number=12, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=67,
serialized_end=346,
)
_SDOBASETYPE = _descriptor.Descriptor(
name='SDOBaseType',
full_name='SchemaTerms.SDOBaseType',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='SchemaTerms.SDOBaseType.id', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='termdescriptor', full_name='SchemaTerms.SDOBaseType.termdescriptor', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='properties', full_name='SchemaTerms.SDOBaseType.properties', index=2,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='allproperties', full_name='SchemaTerms.SDOBaseType.allproperties', index=3,
number=4, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='expectedTypeFor', full_name='SchemaTerms.SDOBaseType.expectedTypeFor', index=4,
number=5, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='enumerationMembers', full_name='SchemaTerms.SDOBaseType.enumerationMembers', index=5,
number=6, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='subs', full_name='SchemaTerms.SDOBaseType.subs', index=6,
number=7, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='supers', full_name='SchemaTerms.SDOBaseType.supers', index=7,
number=8, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='termStack', full_name='SchemaTerms.SDOBaseType.termStack', index=8,
number=9, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=349,
serialized_end=565,
)
_SDOPROPERTY = _descriptor.Descriptor(
name='SDOProperty',
full_name='SchemaTerms.SDOProperty',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='SchemaTerms.SDOProperty.id', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='termdescriptor', full_name='SchemaTerms.SDOProperty.termdescriptor', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='domainIncludes', full_name='SchemaTerms.SDOProperty.domainIncludes', index=2,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='rangeIncludes', full_name='SchemaTerms.SDOProperty.rangeIncludes', index=3,
number=4, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='subs', full_name='SchemaTerms.SDOProperty.subs', index=4,
number=5, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='supers', full_name='SchemaTerms.SDOProperty.supers', index=5,
number=6, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='inverse', full_name='SchemaTerms.SDOProperty.inverse', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='termStack', full_name='SchemaTerms.SDOProperty.termStack', index=7,
number=8, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=568,
serialized_end=752,
)
_SDOENUMERATIONVALUE = _descriptor.Descriptor(
name='SDOEnumerationValue',
full_name='SchemaTerms.SDOEnumerationValue',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='SchemaTerms.SDOEnumerationValue.id', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='termdescriptor', full_name='SchemaTerms.SDOEnumerationValue.termdescriptor', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='enumerationParent', full_name='SchemaTerms.SDOEnumerationValue.enumerationParent', index=2,
number=3, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=754,
serialized_end=860,
)
_SDOREFERENCE = _descriptor.Descriptor(
name='SDOReference',
full_name='SchemaTerms.SDOReference',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='SchemaTerms.SDOReference.id', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='uri', full_name='SchemaTerms.SDOReference.uri', index=1,
number=2, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=862,
serialized_end=901,
)
_SDOBASETYPEEXPANDED = _descriptor.Descriptor(
name='SDOBaseTypeExpanded',
full_name='SchemaTerms.SDOBaseTypeExpanded',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='SchemaTerms.SDOBaseTypeExpanded.id', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='termdescriptor', full_name='SchemaTerms.SDOBaseTypeExpanded.termdescriptor', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='properties', full_name='SchemaTerms.SDOBaseTypeExpanded.properties', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='expectedTypeFor', full_name='SchemaTerms.SDOBaseTypeExpanded.expectedTypeFor', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='enumerationMembers', full_name='SchemaTerms.SDOBaseTypeExpanded.enumerationMembers', index=4,
number=5, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='subs', full_name='SchemaTerms.SDOBaseTypeExpanded.subs', index=5,
number=6, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='supers', full_name='SchemaTerms.SDOBaseTypeExpanded.supers', index=6,
number=7, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='termStack', full_name='SchemaTerms.SDOBaseTypeExpanded.termStack', index=7,
number=8, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=904,
serialized_end=1157,
)
_SDOTERM.fields_by_name['termType'].enum_type = _TERMTYPE
_SDOTERM.fields_by_name['superPaths'].message_type = _SUPERPATH
_SDOBASETYPE.fields_by_name['termdescriptor'].message_type = _SDOTERM
_SDOPROPERTY.fields_by_name['termdescriptor'].message_type = _SDOTERM
_SDOENUMERATIONVALUE.fields_by_name['termdescriptor'].message_type = _SDOTERM
_SDOBASETYPEEXPANDED.fields_by_name['termdescriptor'].message_type = _SDOTERM
_SDOBASETYPEEXPANDED.fields_by_name['properties'].message_type = _SDOPROPERTY
_SDOBASETYPEEXPANDED.fields_by_name['expectedTypeFor'].message_type = _SDOPROPERTY
DESCRIPTOR.message_types_by_name['SuperPath'] = _SUPERPATH
DESCRIPTOR.message_types_by_name['SDOTerm'] = _SDOTERM
DESCRIPTOR.message_types_by_name['SDOBaseType'] = _SDOBASETYPE
DESCRIPTOR.message_types_by_name['SDOProperty'] = _SDOPROPERTY
DESCRIPTOR.message_types_by_name['SDOEnumerationValue'] = _SDOENUMERATIONVALUE
DESCRIPTOR.message_types_by_name['SDOReference'] = _SDOREFERENCE
DESCRIPTOR.message_types_by_name['SDOBaseTypeExpanded'] = _SDOBASETYPEEXPANDED
DESCRIPTOR.enum_types_by_name['TermType'] = _TERMTYPE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
SuperPath = _reflection.GeneratedProtocolMessageType('SuperPath', (_message.Message,), {
'DESCRIPTOR' : _SUPERPATH,
'__module__' : 'schematerms_pb2'
# @@protoc_insertion_point(class_scope:SchemaTerms.SuperPath)
})
_sym_db.RegisterMessage(SuperPath)
SDOTerm = _reflection.GeneratedProtocolMessageType('SDOTerm', (_message.Message,), {
'DESCRIPTOR' : _SDOTERM,
'__module__' : 'schematerms_pb2'
# @@protoc_insertion_point(class_scope:SchemaTerms.SDOTerm)
})
_sym_db.RegisterMessage(SDOTerm)
SDOBaseType = _reflection.GeneratedProtocolMessageType('SDOBaseType', (_message.Message,), {
'DESCRIPTOR' : _SDOBASETYPE,
'__module__' : 'schematerms_pb2'
# @@protoc_insertion_point(class_scope:SchemaTerms.SDOBaseType)
})
_sym_db.RegisterMessage(SDOBaseType)
SDOProperty = _reflection.GeneratedProtocolMessageType('SDOProperty', (_message.Message,), {
'DESCRIPTOR' : _SDOPROPERTY,
'__module__' : 'schematerms_pb2'
# @@protoc_insertion_point(class_scope:SchemaTerms.SDOProperty)
})
_sym_db.RegisterMessage(SDOProperty)
SDOEnumerationValue = _reflection.GeneratedProtocolMessageType('SDOEnumerationValue', (_message.Message,), {
'DESCRIPTOR' : _SDOENUMERATIONVALUE,
'__module__' : 'schematerms_pb2'
# @@protoc_insertion_point(class_scope:SchemaTerms.SDOEnumerationValue)
})
_sym_db.RegisterMessage(SDOEnumerationValue)
SDOReference = _reflection.GeneratedProtocolMessageType('SDOReference', (_message.Message,), {
'DESCRIPTOR' : _SDOREFERENCE,
'__module__' : 'schematerms_pb2'
# @@protoc_insertion_point(class_scope:SchemaTerms.SDOReference)
})
_sym_db.RegisterMessage(SDOReference)
SDOBaseTypeExpanded = _reflection.GeneratedProtocolMessageType('SDOBaseTypeExpanded', (_message.Message,), {
'DESCRIPTOR' : _SDOBASETYPEEXPANDED,
'__module__' : 'schematerms_pb2'
# @@protoc_insertion_point(class_scope:SchemaTerms.SDOBaseTypeExpanded)
})
_sym_db.RegisterMessage(SDOBaseTypeExpanded)
# @@protoc_insertion_point(module_scope)
| 46.1328
| 2,359
| 0.751847
| 3,588
| 28,833
| 5.737179
| 0.063266
| 0.055574
| 0.091377
| 0.076075
| 0.746223
| 0.727909
| 0.714064
| 0.694875
| 0.69094
| 0.663736
| 0
| 0.036553
| 0.12614
| 28,833
| 624
| 2,360
| 46.206731
| 0.780441
| 0.021573
| 0
| 0.709845
| 1
| 0.006908
| 0.152777
| 0.107313
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.008636
| 0
| 0.008636
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e499de222c437838f9677d111c351d2cc1d63933
| 80
|
py
|
Python
|
Intensivo-Python/Cap-8/printing_functions.py
|
RodrigoTAbreu/Python-3
|
9bf0578c1ed52283c8d8516a9052557bde038947
|
[
"MIT"
] | null | null | null |
Intensivo-Python/Cap-8/printing_functions.py
|
RodrigoTAbreu/Python-3
|
9bf0578c1ed52283c8d8516a9052557bde038947
|
[
"MIT"
] | null | null | null |
Intensivo-Python/Cap-8/printing_functions.py
|
RodrigoTAbreu/Python-3
|
9bf0578c1ed52283c8d8516a9052557bde038947
|
[
"MIT"
] | null | null | null |
import print_models
print_models.unprinted_models=['android','xiaomi','iphone']
| 26.666667
| 59
| 0.8125
| 10
| 80
| 6.2
| 0.7
| 0.354839
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.0375
| 80
| 3
| 59
| 26.666667
| 0.805195
| 0
| 0
| 0
| 0
| 0
| 0.234568
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 1
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 6
|
e4a7a235a5923962364ee54071e94b926eb89812
| 262
|
py
|
Python
|
src/Python/Stage_1/pbmodel.py
|
ananthsridharan/vtol_sizing
|
3f754e1bd3cebdb5b5c68c8a2d84c47be1df2f02
|
[
"MIT"
] | 10
|
2020-03-24T10:20:52.000Z
|
2021-11-22T18:49:25.000Z
|
src/Python/Stage_1/pbmodel.py
|
ananthsridharan/vtol_sizing
|
3f754e1bd3cebdb5b5c68c8a2d84c47be1df2f02
|
[
"MIT"
] | 4
|
2020-12-08T10:26:41.000Z
|
2021-10-04T18:19:59.000Z
|
src/Python/Stage_1/pbmodel.py
|
ananthsridharan/vtol_sizing
|
3f754e1bd3cebdb5b5c68c8a2d84c47be1df2f02
|
[
"MIT"
] | 5
|
2018-11-27T21:21:19.000Z
|
2021-04-20T15:44:18.000Z
|
#====================================================================
# python function to predict weight of rotor blades and hub
# physics-based model for blades, parametric model for hubs
#====================================================================
| 43.666667
| 69
| 0.366412
| 19
| 262
| 5.052632
| 0.842105
| 0.166667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.091603
| 262
| 5
| 70
| 52.4
| 0.403361
| 0.965649
| 0
| null | 0
| null | 0
| 0
| null | 1
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e4de558f7701dcf21b7d815eebbda8a9ee4b1a1f
| 103
|
py
|
Python
|
pyqt_translucent_full_loading_screen_thread/__init__.py
|
yjg30737/pyqt-translucent-full-loading-screen-thread
|
8b9fe1422d672b1fa78a540f88a7cb4de15dc2c9
|
[
"MIT"
] | null | null | null |
pyqt_translucent_full_loading_screen_thread/__init__.py
|
yjg30737/pyqt-translucent-full-loading-screen-thread
|
8b9fe1422d672b1fa78a540f88a7cb4de15dc2c9
|
[
"MIT"
] | null | null | null |
pyqt_translucent_full_loading_screen_thread/__init__.py
|
yjg30737/pyqt-translucent-full-loading-screen-thread
|
8b9fe1422d672b1fa78a540f88a7cb4de15dc2c9
|
[
"MIT"
] | null | null | null |
from .loadingThread import LoadingThread
from .loadingTranslucentScreen import LoadingTranslucentScreen
| 51.5
| 62
| 0.912621
| 8
| 103
| 11.75
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.067961
| 103
| 2
| 62
| 51.5
| 0.979167
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
90208d48667d3a1e10f22f8d4a1e064f41fb3829
| 9,510
|
py
|
Python
|
tests/transformer/test_transformer_layers.py
|
vikua/time-series-experiments
|
2f9d3fa842866c39c8c1a9906c8c5d4870a6f7da
|
[
"MIT"
] | null | null | null |
tests/transformer/test_transformer_layers.py
|
vikua/time-series-experiments
|
2f9d3fa842866c39c8c1a9906c8c5d4870a6f7da
|
[
"MIT"
] | 4
|
2020-10-11T15:14:48.000Z
|
2022-02-10T02:28:07.000Z
|
tests/transformer/test_transformer_layers.py
|
vikua/time-series-experiments
|
2f9d3fa842866c39c8c1a9906c8c5d4870a6f7da
|
[
"MIT"
] | null | null | null |
import random
import pytest
import numpy as np
import tensorflow as tf
from tensorflow import keras
from time_series_experiments.transformer.layers import (
MultiHeadAttention,
PositionalEncoding,
)
from time_series_experiments.utils import get_initializer
from time_series_experiments.utils.metrics import rmse
from ..conftest import simple_seq_data, RANDOM_SEED
@pytest.fixture(scope="function", autouse=True)
def clear_session():
tf.keras.backend.clear_session()
tf.random.set_seed(RANDOM_SEED)
np.random.seed(RANDOM_SEED)
random.seed(RANDOM_SEED)
def _positional_encoding_reference(seq_len, dims):
def _angle_vec(pos):
return [pos / np.power(10000.0, 2 * (u // 2) / dims) for u in range(dims)]
table = np.array([_angle_vec(pos) for pos in range(seq_len)])
sines = np.sin(table[:, 0::2])
cosines = np.cos(table[:, 1::2])
# instead of concatentation: should be sin on even cos on odd positions
# table[:, 0::2] = sines
# table[:, 1::2] = cosines
table = np.concatenate([sines, cosines], axis=-1)
return table
def test_scaled_dot_product_attention():
mha = MultiHeadAttention(32, 4)
temp_q = tf.constant([[0, 10, 0]], dtype=tf.float32)
temp_k = tf.constant(
[[10, 0, 0], [0, 10, 0], [0, 0, 10], [0, 0, 10]], dtype=tf.float32
)
temp_v = tf.constant([[1, 0], [10, 0], [100, 5], [1000, 6]], dtype=tf.float32)
outputs, weights = mha.scaled_dot_product_attention(temp_q, temp_k, temp_v, None)
assert np.all(np.isclose(outputs, np.array([10.0, 0.0],), atol=1e-6))
assert np.all(np.isclose(weights, np.array([0.0, 1.0, 0.0, 0.0],), atol=1e-5))
temp_q = tf.constant([[0, 0, 10]], dtype=tf.float32)
outputs, weights = mha.scaled_dot_product_attention(temp_q, temp_k, temp_v, None)
assert np.all(np.isclose(outputs, np.array([550.0, 5.5],), atol=1e-6))
assert np.all(np.isclose(weights, np.array([0.0, 0.0, 0.5, 0.5],), atol=1e-5))
temp_q = tf.constant([[10, 10, 0]], dtype=tf.float32)
outputs, weights = mha.scaled_dot_product_attention(temp_q, temp_k, temp_v, None)
assert np.all(np.isclose(outputs, np.array([5.5, 0.0],), atol=1e-6))
assert np.all(np.isclose(weights, np.array([0.5, 0.5, 0.0, 0.0],), atol=1e-5))
temp_q = tf.constant([[0, 0, 10], [0, 10, 0], [10, 10, 0]], dtype=tf.float32)
outputs, weights = mha.scaled_dot_product_attention(temp_q, temp_k, temp_v, None)
assert np.all(
np.isclose(
outputs, np.array([[550.0, 5.5], [10.0, 0.0], [5.5, 0.0]]), atol=1e-6
)
)
assert np.all(
np.isclose(
weights,
np.array(
[[0.0, 0.0, 0.5, 0.5], [0.0, 1.0, 0.0, 0.0], [0.5, 0.5, 0.0, 0.0]]
),
atol=1e-5,
)
)
def test_multi_head_attention():
fdw = 28
fw = 7
attention_dim = 32
num_heads = 4
x_train, y_train, x_test, y_test = simple_seq_data(
nrows=1000, freq="1H", fdw=fdw, fw=fw, test_size=0.2
)
inputs = keras.Input(shape=(fdw, 1))
outputs, attention_weights = MultiHeadAttention(
attention_dim=attention_dim,
num_heads=num_heads,
kernel_initializer=get_initializer("glorot_uniform", RANDOM_SEED),
)([inputs, inputs, inputs])
outputs = keras.layers.Reshape((fdw * attention_dim * num_heads,))(outputs)
outputs = keras.layers.Dense(
fw,
kernel_initializer=get_initializer("glorot_uniform", RANDOM_SEED),
activation="linear",
)(outputs)
model = keras.Model(inputs=inputs, outputs=outputs)
model.compile(
optimizer=keras.optimizers.Adam(0.01), loss=keras.losses.MeanSquaredError()
)
model.fit(x_train, y_train, epochs=5, batch_size=32, shuffle=False)
y_pred = model.predict(x_test)
assert np.all(np.isfinite(y_pred))
error = rmse(y_test, y_pred)
assert error < 0.5
def test_multi_head_attention_padding_mask():
fdw = 28
fw = 7
attention_dim = 32
num_heads = 4
x_train, y_train, x_test, y_test = simple_seq_data(
nrows=1000, freq="1H", fdw=fdw, fw=fw, test_size=0.2
)
random_state = np.random.RandomState(RANDOM_SEED)
mask = (
random_state.random((x_train.shape[0], 1, 1, x_train.shape[1])) > 0.3
).astype(np.int)
inputs = keras.Input(shape=(fdw, 1))
padding_mask = keras.Input(shape=(1, 1, fdw))
outputs, attention_weights = MultiHeadAttention(
attention_dim=attention_dim,
num_heads=num_heads,
kernel_initializer=get_initializer("glorot_uniform", RANDOM_SEED),
)([inputs, inputs, inputs], mask=padding_mask)
outputs = keras.layers.Reshape((fdw * attention_dim * num_heads,))(outputs)
outputs = keras.layers.Dense(
fw,
kernel_initializer=get_initializer("glorot_uniform", RANDOM_SEED),
activation="linear",
)(outputs)
model = keras.Model(inputs=[inputs, padding_mask], outputs=outputs)
model.compile(
optimizer=keras.optimizers.Adam(0.01), loss=keras.losses.MeanSquaredError(),
)
model.fit([x_train, mask], y_train, epochs=5, batch_size=32, shuffle=False)
mask = (random_state.random((x_test.shape[0], 1, 1, x_test.shape[1])) > 0.3).astype(
np.int
)
y_pred = model.predict([x_test, mask])
assert np.all(np.isfinite(y_pred))
error = rmse(y_test, y_pred)
assert error < 0.5
def test_multi_head_attention_lookahead_mask():
fdw = 28
fw = 7
attention_dim = 32
num_heads = 4
x_train, y_train, x_test, y_test = simple_seq_data(
nrows=1000, freq="1H", fdw=fdw, fw=fw, test_size=0.2
)
triu = np.triu(np.ones((fdw, fdw)))
mask = np.stack([triu for _ in range(x_train.shape[0])])
mask = np.expand_dims(mask, axis=1)
inputs = keras.Input(shape=(fdw, 1))
lookahead_mask = keras.Input(shape=(1, fdw, fdw))
outputs, attention_weights = MultiHeadAttention(
attention_dim=attention_dim,
num_heads=num_heads,
kernel_initializer=get_initializer("glorot_uniform", RANDOM_SEED),
)([inputs, inputs, inputs], mask=lookahead_mask)
outputs = keras.layers.Reshape((fdw * attention_dim * num_heads,))(outputs)
outputs = keras.layers.Dense(
fw,
kernel_initializer=get_initializer("glorot_uniform", RANDOM_SEED),
activation="linear",
)(outputs)
model = keras.Model(inputs=[inputs, lookahead_mask], outputs=outputs)
model.compile(
optimizer=keras.optimizers.Adam(0.01), loss=keras.losses.MeanSquaredError(),
)
model.fit([x_train, mask], y_train, epochs=5, batch_size=32, shuffle=False)
mask = np.stack([triu for _ in range(x_test.shape[0])])
mask = np.expand_dims(mask, axis=1)
y_pred = model.predict([x_test, mask])
assert np.all(np.isfinite(y_pred))
error = rmse(y_test, y_pred)
assert error < 0.5
def test_positional_encoding_table():
fdw = 28
fw = 7
x_train, _, _, _ = simple_seq_data(
nrows=1000, freq="1H", fdw=fdw, fw=fw, test_size=0.2
)
x_train = tf.convert_to_tensor(x_train[0][np.newaxis, :, :])
pos = PositionalEncoding(128)
pos_encoding = pos.call(x_train)
pos_encoding = tf.squeeze(pos_encoding).numpy()
reference_encoding = _positional_encoding_reference(fdw, 128)
assert np.all(np.isclose(pos_encoding, reference_encoding, atol=1e-5))
def test_positional_encoding():
fdw = 28
fw = 7
x_train, y_train, x_test, y_test = simple_seq_data(
nrows=1000, freq="1H", fdw=fdw, fw=fw, test_size=0.2
)
inputs = keras.Input(shape=(fdw, 1))
outputs = PositionalEncoding(8)(inputs)
outputs = keras.layers.Concatenate()([inputs, outputs])
outputs = keras.layers.Flatten()(outputs)
outputs = keras.layers.Dense(
fw,
kernel_initializer=get_initializer("glorot_uniform", RANDOM_SEED),
activation="linear",
)(outputs)
model = keras.Model(inputs=inputs, outputs=outputs)
model.compile(
optimizer=keras.optimizers.Adam(0.01), loss=keras.losses.MeanSquaredError()
)
model.fit(x_train, y_train, epochs=5, batch_size=32, shuffle=False)
y_pred = model.predict(x_test)
assert np.all(np.isfinite(y_pred))
error = rmse(y_test, y_pred)
assert error < 0.5
def test_positional_encoding_and_attention():
fdw = 28
fw = 7
attention_dim = 32
num_heads = 4
x_train, y_train, x_test, y_test = simple_seq_data(
nrows=1000, freq="1H", fdw=fdw, fw=fw, test_size=0.2
)
inputs = keras.Input(shape=(fdw, 1))
outputs = PositionalEncoding(8)(inputs)
outputs = keras.layers.Concatenate()([inputs, outputs])
outputs, attention_weights = MultiHeadAttention(
attention_dim=attention_dim,
num_heads=num_heads,
kernel_initializer=get_initializer("glorot_uniform", RANDOM_SEED),
)([outputs, outputs, outputs])
outputs = keras.layers.Reshape((fdw * attention_dim * num_heads,))(outputs)
outputs = keras.layers.Dense(
fw,
kernel_initializer=get_initializer("glorot_uniform", RANDOM_SEED),
activation="linear",
)(outputs)
model = keras.Model(inputs=inputs, outputs=outputs)
model.compile(
optimizer=keras.optimizers.Adam(0.01), loss=keras.losses.MeanSquaredError()
)
model.fit(x_train, y_train, epochs=5, batch_size=32, shuffle=False)
y_pred = model.predict(x_test)
assert np.all(np.isfinite(y_pred))
error = rmse(y_test, y_pred)
assert error < 0.5
| 32.346939
| 88
| 0.656151
| 1,383
| 9,510
| 4.317426
| 0.120752
| 0.012728
| 0.010551
| 0.030481
| 0.803718
| 0.762854
| 0.738737
| 0.726344
| 0.717635
| 0.707252
| 0
| 0.043375
| 0.202419
| 9,510
| 293
| 89
| 32.457338
| 0.743837
| 0.012303
| 0
| 0.583333
| 0
| 0
| 0.018745
| 0
| 0
| 0
| 0
| 0
| 0.083333
| 1
| 0.04386
| false
| 0
| 0.039474
| 0.004386
| 0.092105
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
9038251a0e5fb0f56a43fbbb1f3e39dc241f7a62
| 15,789
|
py
|
Python
|
2021/09.py
|
almazkun/advent
|
bcc18f83e7eaf6579a7b6b47fa9f0f6f0d2b31b4
|
[
"MIT"
] | null | null | null |
2021/09.py
|
almazkun/advent
|
bcc18f83e7eaf6579a7b6b47fa9f0f6f0d2b31b4
|
[
"MIT"
] | null | null | null |
2021/09.py
|
almazkun/advent
|
bcc18f83e7eaf6579a7b6b47fa9f0f6f0d2b31b4
|
[
"MIT"
] | null | null | null |
from solution import Solution
class Point:
def __init__(self, loc: int, x: int, y: int):
self.loc = int(loc)
self.x = x
self.y = y
def __str__(self):
return f"{self.loc}"
def __repr__(self):
return self.__str__()
def lowest_x(self, field):
return self.left(field) > self.loc < self.right(field)
def lowest_y(self, field):
return self.top(field) > self.loc < self.bottom(field)
def left(self, field):
if self.x == 0:
return self.loc + 1
return field.field[self.y][self.x - 1].loc
def right(self, field):
try:
return field.field[self.y][self.x + 1].loc
except IndexError:
return self.loc + 1
def top(self, field):
if self.y == 0:
return self.loc + 1
return field.field[self.y - 1][self.x].loc
def bottom(self, field):
try:
return field.field[self.y + 1][self.x].loc
except IndexError:
return self.loc + 1
class Field:
def __init__(self):
self.field = []
def __str__(self):
return "\n".join(["".join(map(str, row)) for row in self.field])
class Sol(Solution):
"""
--- Day 9: Smoke Basin ---
These caves seem to be lava tubes.
Parts are even still volcanically active;
small hydrothermal vents release smoke into
the caves that slowly settles like rain.
If you can model how the smoke flows through
the caves, you might be able to avoid it and
be that much safer. The submarine generates
a heightmap of the floor of the nearby caves
for you (your puzzle input).
Smoke flows to the lowest point of the area
it's in. For example, consider the
following heightmap:
2199943210
3987894921
9856789892
8767896789
9899965678
Each number corresponds to the height of a
particular location, where 9 is the highest
and 0 is the lowest a location can be.
Your first goal is to find the low points -
the locations that are lower than any of its
adjacent locations. Most locations have four
adjacent locations (up, down, left, and right);
locations on the edge or corner of the map have
three or two adjacent locations, respectively.
(Diagonal locations do not count as adjacent.)
In the above example, there are four low points,
all highlighted: two are in the first row
(a 1 and a 0), one is in the third row (a 5),
and one is in the bottom row (also a 5).
All other locations on the heightmap have
some lower adjacent location, and so are not
low points.
The risk level of a low point is 1 plus its
height. In the above example, the risk levels
of the low points are 2, 1, 6, and 6.
The sum of the risk levels of all low points
in the heightmap is therefore 15.
Find all of the low points on your heightmap.
What is the sum of the risk levels of all
low points on your heightmap?
--- Part Two ---
Next, you need to find the largest basins so you
know what areas are most important to avoid.
A basin is all locations that eventually flow
downward to a single low point. Therefore, every
low point has a basin, although some basins are
very small. Locations of height 9 do not count
as being in any basin, and all other locations
will always be part of exactly one basin.
The size of a basin is the number of locations
within the basin, including the low point.
The example above has four basins.
The top-left basin, size 3:
2199943210
3987894921
9856789892
8767896789
9899965678
The top-right basin, size 9:
2199943210
3987894921
9856789892
8767896789
9899965678
The middle basin, size 14:
2199943210
3987894921
9856789892
8767896789
9899965678
The bottom-right basin, size 9:
2199943210
3987894921
9856789892
8767896789
9899965678
Find the three largest basins and multiply
their sizes together. In the above example,
this is 9 * 14 * 9 = 1134.
What do you get if you multiply together
the sizes of the three largest basins?
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def cleaned(self):
return [x.strip() for x in self.input.split("\n") if x]
def p1(self):
f = Field()
for i_line, line in enumerate(self.cleaned):
f.field.append([])
for i_loc, loc in enumerate(line):
p = Point(loc, i_loc, i_line)
f.field[i_line].append(p)
risk = 0
for row in f.field:
for loc in row:
if loc.lowest_x(f) and loc.lowest_y(f):
risk += loc.loc + 1
return risk
def p2(self):
pass
@property
def solution(self):
try:
return f"p1: {self.p1()}\np2: {self.p2()}\n"
except Exception as e:
return f"Error: {e}"
test_ = """
2199943210
3987894921
9856789892
8767896789
9899965678
"""
input_ = """
6769876887698999876367898543212378997654321291098765432398767667989976543210123456987678999766598921
5456965476567999985457897654301456989743210989989898540987656545678987654323634679876569998757387892
4349864345476889876778999864323699879654329879878987659876543434569998789434545798765459879543296989
3298753212345678989889298765459989768965498764567898899865432523459999998587657987654365965654135678
4349432101346789999992169876798778957896796553456789956976921012367899987699788998543234984321015789
5498763212457899999743456989987667945997985432377894344989854323456789998989999987642149765632134899
6569854343568999897654599797898543234589876545688943243498765434678999999878891298753259898543236789
7679878987699598789775988676987632123578987656799654102349876545678999898566789987654345997654545678
8999989398789699678989876545698821014567898787898743212357987656789998787355679999965456789765656799
9889895459893987565699876123799935223698979898987654523456898967899987676234589999876767899878767898
8679789599912398434989765234899876434899765989998765634567989978969876535123467892987878944989878987
7567678989101995429878984349998989545999854677899876749879878989656998321012678921298989432192989656
5434569878919876998767895498987987656789543456789987857998767898945987633234567899349999543943496545
6323498767899989897656789987676698767898632355678998998987654567932398545345678988999987659894965436
3210997856789998756545679878534569898987321234567899799999543656891239659459789467989998798769876547
4329896545456987643334789964323457989876320125688965679898932345994398798998994359878999987756987678
5679765432346987532123458953212345678985431234599954398767921256789459987987893249867899876545698789
6789874321234597645634567894101558789996563545999895987656890127899569876546789197757598765434569896
9892983210145698656747698943212467893987854559886789998967892234568978965435678986543459876545678945
4921294929236798787958999656543569912398967699775698999878964345878999974324567996432345987676899432
3210239898947899898767898769654578923999878987654567896989875458989999865435678987320123498787976561
9854398787898999999898999898765689439899989299985898995490986567899989978947799987431434569898997610
8769987676799998789969899949976896598789890199876789989321299678978978599656789876532345678979989891
9898787545598987683456789534987897989678789987987895878932398789569765498767894987673476789764678989
9987655633467896542347892124998949876565678976598934567893499893488964349878923498784587890123699875
9996543212356789451234589039899934965454567895439123456954987942567899219999436569895698921234589964
8987654101236893210123478949767899876323458987321012345699876543478988998999987678976789434345678943
7898987654345789345234569998656787998434767896542324556789987654789876787898799789987896545756789432
6789698895676997656745699887545476899547898987955456789895698765698765456789678999899998787867894321
5456569989987898767896789786431265987656999099876689892934569876987762347896569878768989898978943210
4343459878998939898987897654320123498767892129998994921012489989876543456789398765457679999999976731
3232349767899323989998999865434634569979943398999323932125678997989654567893219854324587899989895432
2101298956789219878999999979548795997898954997678919865434589876598785678954109543213456789878789543
3432397545694398767899989987656989886887899876567899876745789985419876789865998432101578898769698956
4563498434989459856989879998769878765976789965456789989899899954323987999999876563234567987654567897
5654984323878998768978568999898965654365678974345678991998987895439999548789997696549699876543476998
6979876412767899979765457989987654321254569893234789890987896989598965434598998989698989985432345699
9899865401256789989987679878698765410123456789123456789896645878987654324687899878987679876321234589
9798763212346894394399899859569896521434567893256599898795434767999743212356898767496589986410124679
8679954323567989239212999743456985432545778965346789987654323456789894101234987656325498765421235678
6569865434689678998909998632109876543656889876657899876543213456898765213456976543212349975434547899
5450976545696567997898976543238989656767996987768976998654354567897654324587987643201467986545698945
7321987968789459876567897654347898767898965498979895349765455678998765437678998754312588997656789323
5432398979899598765498989765956999979939654329296789239878567889549876548789898765423678999869893219
6543459989998679876989878979897898989529873210145892145989678999932987679896789876739789896978954998
7656569998999789989876967898799967895434998721234799259998789878893498989945678989849898765989969897
8767678967899994598765458945679656976565987632465678998999896566789599799434567899956987654197898776
9898989656789543459884301236789745697878998543456789987899975465698987678923459989878998543236789545
3999898745897692196543212345678956789989987657578899896789764314567898567894569878989999664545678934
1298769896789989987654523898789879891099898968689998765699995423458965498789679768999989775656799224
0989899999899878999765674789899998932198769989789349954569876534567894349678998957899879876767891012
9878989998998769899876785678998797893239654399895456967678987687678943254567987846789954987898932199
9767679987865456789989876789989686789398763212976769878789798798789652123979876535678893298999943988
7754579876764345678999989899876545678909854323989878989897679899898761019899987621236789129498769877
6543569885323236567897899999998656789212965439992999999978568999987653198789987540345891012389879656
7632398764210123489965458998798767896369879598901298998769456789999964987667895431456789325478998945
5421449875521238567896367987659988965456998987892987569954345678999899876543976432347895434567987934
6530123985434347678963219876545699896567997676789645459893234569989789987659876545456976765679876321
6521234596545458789954398987632346789679876585878932398789345698875678998789987678967897876789985410
8434345987657679899899987654321456898789965434567891987695467987764567899897698789598998998993494321
7545656798788789935678998985490123499999987323458989876569568996543456921976549896459789219654976452
7658769899899892123789109876989234989898765434568969875458979987632345890987698965345678929769876543
8769878987956921094599212989878949876769876745878957987567894596543456791998987654234567899878989655
9878989896545699989698993498769998765456988656789345699678943987654567899899698754345678923989898767
0989998787435678978987889987758789876323499767891234798789432398789878998765539965476989919899789878
1296987658324234569896569876646678987212349898910147899896541019899989799654429876567896798788678989
2345698943210123698765456985434568998101236989321256789987893223978998688943212989778945987666567899
3459987654321235987654329876523457899212345678932347899998987654567897567892101497989239876555456789
4598998796532349898543512987212678954393456789765478999899998785678943459983212345990198765436345999
9987889987643498765432109832103589976989569898997567898788999899889012599876364587891239876521234789
8956776798754569876543498765412367899878998946789678987657899910994123989765456698992946983210345678
7842365679897678987674987654323456789967987897896989498746989329873249878976767899989897894323458989
6531234589998989199876799985476567896754976789945799395439978998765398767897878901978789987434567899
7810123478999899012987899876587678975463465678935678989598768989887469898998989329765679876545678998
8921335567897778943498987987998789764322234569024599878987659879998567979999699498974598987656789347
9432456678976567894999976598939897653210123478934988767998545568989878967894578987653987998968991236
6543467789865478999899865439123998965323234569549878542987632459976989457993567987542196569879210145
7656569898976567898767974321045679879434545678998765431298756567895490346989879898943987899989323234
9867878967987689989856985434756789989565676789459976530459767878989321259978998789764598998996545345
9878989456798789876549876545969891297678797892345987321346998989878932498767897678975989987987675456
9989992345699899995432989658998992998789898901239876434456789596567893998654343487899879896598786568
9898943458789989989321298767987989869896969912345987545589895433456789876543212396789868789439897678
8797899879899879878934569879896878954945457893957897656678985322457899987752101245678947678921998789
7656789989998664767895699999765767893234356789898998767989876301367999898943212386789234567892369893
7545691099876543456789989987654456789101299895789459989999989212479998789654563498992123679965456921
5434593198767552325699878999732345898999989954678969899998765323589987698765678989893234567896567890
0125989987654321014598765987621234567988978912389998799879876494999896539898789678794347678987699921
3234978998798775123987654599434348979876767923498999689765989989898797921999894545689956989998989932
4549767899899654234599743398765457899765456895987698578954598875654689892988953236568897899989877893
5998456789999874345987652109979567987854345679998567467895987654343456789877542123456798999875766789
9876587893298765659876543212398999996543234567895432358999897543232345698765431012345689898754345899
4997678954109878767987676378987678987642123458789521235798765432101256789876542123456998799343234789
3498789543299989878999785459876567995431012345678944345699896953632367892987853234569877679210145678
6569899654989597989239876569985479876532123456899765656789979876543456943498964348798963458921234567
9699998799878456899123998698794321987843234569999876767898767987854567894579875499987654567894345789
8989219987656345678934599987653210198967347678989987898987654398765679965679876989999965878965756899
7478909876544234567895678998764321239878456789679998969898321239989989876989989878989876789879867998
6367899998432123456789899219965432346989767896568999656789432387895491987893498767678989894989878987
5256789987641015667899964329876563456799898965457898547896543456954320198912987654567899953492989876
4345896595432124588998765545987674567899999876345987658987659767896431239109876543456789432101299965
"""
if __name__ == "__main__":
try:
Sol(test_).solve()
except:
pass
Sol(input_).solve()
| 50.605769
| 104
| 0.841219
| 875
| 15,789
| 15.118857
| 0.354286
| 0.004762
| 0.013606
| 0.018142
| 0.051553
| 0.041046
| 0.032807
| 0.028271
| 0.026457
| 0.010734
| 0
| 0.756176
| 0.133447
| 15,789
| 311
| 105
| 50.768489
| 0.210715
| 0.168915
| 0
| 0.100559
| 0
| 0
| 0.831611
| 0.78284
| 0
| 1
| 0
| 0
| 0
| 1
| 0.089385
| false
| 0.011173
| 0.005587
| 0.03352
| 0.206704
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
5f71be04bb4de360f940f744f1758c7d47fcb4fb
| 93
|
py
|
Python
|
lib/vpm/models/__init__.py
|
aligholami/kepler
|
a272ac427e09892cd44ade70e910272c4f69c638
|
[
"Unlicense"
] | null | null | null |
lib/vpm/models/__init__.py
|
aligholami/kepler
|
a272ac427e09892cd44ade70e910272c4f69c638
|
[
"Unlicense"
] | null | null | null |
lib/vpm/models/__init__.py
|
aligholami/kepler
|
a272ac427e09892cd44ade70e910272c4f69c638
|
[
"Unlicense"
] | null | null | null |
from .naive import NaiveViewpointMatching
from .quaternion import QuaternionViewpointMatching
| 46.5
| 51
| 0.903226
| 8
| 93
| 10.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.075269
| 93
| 2
| 51
| 46.5
| 0.976744
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
5f74f393f35e2c794a751b34f340e182dff67b73
| 128
|
py
|
Python
|
math_lib.py
|
cu-swe4s-fall-2019/version-control-tprossiter0
|
8957b8905e5b4302e802e68b4c4e2ac61eea054b
|
[
"MIT"
] | null | null | null |
math_lib.py
|
cu-swe4s-fall-2019/version-control-tprossiter0
|
8957b8905e5b4302e802e68b4c4e2ac61eea054b
|
[
"MIT"
] | null | null | null |
math_lib.py
|
cu-swe4s-fall-2019/version-control-tprossiter0
|
8957b8905e5b4302e802e68b4c4e2ac61eea054b
|
[
"MIT"
] | null | null | null |
def div(a, b):
if(b != 0):
return a/b
else:
print("cannot divide by 0")
return
def add(a,b):
return a+b
| 14.222222
| 32
| 0.515625
| 24
| 128
| 2.75
| 0.541667
| 0.121212
| 0.242424
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.023256
| 0.328125
| 128
| 9
| 33
| 14.222222
| 0.744186
| 0
| 0
| 0
| 0
| 0
| 0.139535
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0
| 0.125
| 0.625
| 0.125
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
5fc764f6f305f1bef57aa8a36c50c0e2d3fb5f55
| 1,579
|
py
|
Python
|
admin/admin.py
|
mischievousdev/cogs-for-you
|
fd590a8ea9d6b99179b67c272f88a04e30392e11
|
[
"CC0-1.0"
] | 1
|
2020-02-21T15:59:21.000Z
|
2020-02-21T15:59:21.000Z
|
admin/admin.py
|
mischievousdev/cogs-for-you
|
fd590a8ea9d6b99179b67c272f88a04e30392e11
|
[
"CC0-1.0"
] | null | null | null |
admin/admin.py
|
mischievousdev/cogs-for-you
|
fd590a8ea9d6b99179b67c272f88a04e30392e11
|
[
"CC0-1.0"
] | null | null | null |
import discord
from discord.ext import commands
class Admin(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(hidden=True)
@commands.is_owner()
async def load(self, ctx, extension):
self.bot.load_extension(f"cogs.{extension}")
embed = discord.Embed(color=discord.Color.blurple())
embed.add_field(name=":inbox_tray: `Input`", value=f"`Requested for loading cogs.{extension}`", inline=True)
embed.add_field(name=":outbox_tray: `Output`", value=f"`Successfully loaded cogs.{extension}`", inline=True)
await ctx.send(embed=embed)
@commands.command(hidden=True)
@commands.is_owner()
async def unload(self, ctx, extension):
self.bot.unload_extension(f"cogs.{extension}")
embed = discord.Embed(color=discord.Color.blurple())
embed.add_field(name=":inbox_tray: `Input`", value=f"`Requested for unloading cogs.{extension}`", inline=True)
embed.add_field(name=":outbox_tray: `Output`", value=f"`Successfully unloaded cogs.{extension}`", inline=True)
await ctx.send(embed=embed)
@commands.command(hidden=True)
@commands.is_owner()
async def reload(self, ctx, extension):
self.bot.unload_extension(f"cogs.{extension}")
self.bot.load_extension(f"cogs.{extension}")
embed = discord.Embed(color=discord.Color.blurple())
embed.add_field(name=":inbox_tray: `Input`", value=f"`Requested for re-loading cogs.{extension}`", inline=True)
embed.add_field(name=":outbox_tray: `Output`", value=f"`Successfully re-loaded cogs.{extension}`", inline=True)
await ctx.send(embed=embed)
def setup(bot):
bot.add_cog(Admin(bot))
| 41.552632
| 113
| 0.734642
| 224
| 1,579
| 5.071429
| 0.21875
| 0.114437
| 0.068662
| 0.089789
| 0.867077
| 0.860915
| 0.860915
| 0.860915
| 0.860915
| 0.818662
| 0
| 0
| 0.098163
| 1,579
| 38
| 114
| 41.552632
| 0.797753
| 0
| 0
| 0.5
| 0
| 0
| 0.274684
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0
| 0.0625
| 0
| 0.15625
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
395e61093a2eacc092d2a5b10d7c0cd0efce05e0
| 2,672
|
py
|
Python
|
alembic/versions/8c09a746d436_update_experiment_columns.py
|
jonathanzong/dmca
|
70157cff983310e5951024aa80e99e7a5404d758
|
[
"MIT"
] | 2
|
2022-02-16T22:50:06.000Z
|
2022-02-21T19:38:02.000Z
|
alembic/versions/8c09a746d436_update_experiment_columns.py
|
jonathanzong/dmca
|
70157cff983310e5951024aa80e99e7a5404d758
|
[
"MIT"
] | 2
|
2022-02-01T05:48:07.000Z
|
2022-02-01T05:49:29.000Z
|
alembic/versions/8c09a746d436_update_experiment_columns.py
|
jonathanzong/bartleby
|
70157cff983310e5951024aa80e99e7a5404d758
|
[
"MIT"
] | null | null | null |
"""Update experiment columns
Revision ID: 8c09a746d436
Revises: b6bb41e569e4
Create Date: 2017-12-19 14:23:51.293811
"""
# revision identifiers, used by Alembic.
revision = '8c09a746d436'
down_revision = 'b6bb41e569e4'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_development():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('experiments', sa.Column('controller', sa.String(length=64), nullable=True))
op.add_column('experiments', sa.Column('settings_json', sa.LargeBinary(), nullable=True))
op.drop_column('experiments', 'account_found')
# ### end Alembic commands ###
def downgrade_development():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('experiments', sa.Column('account_found', mysql.TINYINT(display_width=1), autoincrement=False, nullable=True))
op.drop_column('experiments', 'settings_json')
op.drop_column('experiments', 'controller')
# ### end Alembic commands ###
def upgrade_test():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('experiments', sa.Column('controller', sa.String(length=64), nullable=True))
op.add_column('experiments', sa.Column('settings_json', sa.LargeBinary(), nullable=True))
op.drop_column('experiments', 'account_found')
# ### end Alembic commands ###
def downgrade_test():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('experiments', sa.Column('account_found', mysql.TINYINT(display_width=1), autoincrement=False, nullable=True))
op.drop_column('experiments', 'settings_json')
op.drop_column('experiments', 'controller')
# ### end Alembic commands ###
def upgrade_production():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('experiments', sa.Column('controller', sa.String(length=64), nullable=True))
op.add_column('experiments', sa.Column('settings_json', sa.LargeBinary(), nullable=True))
op.drop_column('experiments', 'account_found')
# ### end Alembic commands ###
def downgrade_production():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('experiments', sa.Column('account_found', mysql.TINYINT(display_width=1), autoincrement=False, nullable=True))
op.drop_column('experiments', 'settings_json')
op.drop_column('experiments', 'controller')
# ### end Alembic commands ###
| 34.701299
| 128
| 0.708832
| 319
| 2,672
| 5.786834
| 0.225705
| 0.165764
| 0.053629
| 0.107259
| 0.796858
| 0.796858
| 0.796858
| 0.796858
| 0.796858
| 0.796858
| 0
| 0.026556
| 0.140344
| 2,672
| 76
| 129
| 35.157895
| 0.777101
| 0.230165
| 0
| 0.514286
| 0
| 0
| 0.234335
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.228571
| false
| 0
| 0.085714
| 0
| 0.314286
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
39a6aa5ed87f23c48f7da5f9292acc9eecda8404
| 7,660
|
py
|
Python
|
src/conversation_analytics_toolkit/filtering.py
|
zzhang13/assistant-dialog-flow-analysis
|
d4e8d00ee0ff1aec33035ad654e7b5484d112040
|
[
"Apache-2.0"
] | 19
|
2020-06-07T19:13:06.000Z
|
2022-01-22T02:34:11.000Z
|
src/conversation_analytics_toolkit/filtering.py
|
watson-developer-cloud/assistant-dialog-flow-analysis
|
0c7bcd9527636dce77c74b80f60dbe23e6682e13
|
[
"Apache-2.0"
] | 32
|
2020-06-04T14:09:03.000Z
|
2021-02-11T15:05:07.000Z
|
src/conversation_analytics_toolkit/filtering.py
|
zzhang13/assistant-dialog-flow-analysis
|
d4e8d00ee0ff1aec33035ad654e7b5484d112040
|
[
"Apache-2.0"
] | 10
|
2020-06-04T18:49:53.000Z
|
2021-11-26T12:42:08.000Z
|
# (C) Copyright IBM Corp. 2019, 2020.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
import numpy as np
import datetime
def by_included_node(node, df_logs_formated):
"""
return subset of conversations that contains specific node
"""
print("filtering.by_included_node() is DEPRECATED and will be removed in a future release. Use filtering2.by_node_id() instead ")
# create an empty dataframe with the same column names and types
filtered_df_logs = pd.DataFrame(data=None, columns=df_logs_formated.columns)
for column in filtered_df_logs.columns:
filtered_df_logs[column] = filtered_df_logs[column].astype(df_logs_formated[column].dtypes.name)
# TODO: should we sort?
df_by_conversation_id = df_logs_formated.sort_values(by=["response_timestamp"]).groupby(by="conversation_id")
# TODO: potentially iterate over groupby
ConvDict = {}
for key, group in df_by_conversation_id:
ConvDict[key] = group
for i, (k, v) in enumerate(ConvDict.items()):
# consider if we need to sort
dfP1=pd.DataFrame(v, columns=v.columns).sort_values(by=['response_timestamp'])
if any(dfP1['node_visited'] == node):
filtered_df_logs = pd.concat([filtered_df_logs,dfP1])
if filtered_df_logs.empty:
print('Filtering yielded an empty dataframe. Path flow analysis requires a non-empty dataframe.')
print('Initial amount of records (before filtering):', len(df_logs_formated))
print('Amount of filtered records:', len(df_logs_formated)-len(filtered_df_logs))
print('Final amount of records (after filtering):', len(filtered_df_logs))
return filtered_df_logs
def by_included_absolute_path(path, df_logs_formated):
"""
return subset of conversations that contains specific node
@param path path: array
"""
print("filtering.by_included_absolute_path() is DEPRECATED and will be removed in a future release. ")
# create an empty dataframe with the same column names and types
filtered_df_logs = pd.DataFrame(data=None, columns=df_logs_formated.columns)
for column in filtered_df_logs.columns:
filtered_df_logs[column] = filtered_df_logs[column].astype(df_logs_formated[column].dtypes.name)
# TODO: should we sort?
df_by_conversation_id = df_logs_formated.sort_values(by=["response_timestamp"]).groupby(by="conversation_id")
# TODO: potentially iterate over groupby
ConvDict = {}
for key, group in df_by_conversation_id:
ConvDict[key] = group
for i, (k, v) in enumerate(ConvDict.items()):
# consider if we need to sort
dfP1=pd.DataFrame(v, columns=v.columns).sort_values(by=['response_timestamp'])
# length of conversation is at least length of path
if len(dfP1) >= len(path):
for i in range(len(path)):
if path[i] != dfP1.iloc[i]['node_visited']:
break
# last element
if i == len(path)-1:
filtered_df_logs = pd.concat([filtered_df_logs,dfP1])
if filtered_df_logs.empty:
print('Filtering yielded an empty dataframe. Path flow analysis requires a non-empty dataframe.')
print('Initial amount of records (before filtering):', len(df_logs_formated))
print('Amount of filtered records:', len(df_logs_formated)-len(filtered_df_logs))
print('Final amount of records (after filtering):', len(filtered_df_logs))
return filtered_df_logs
def by_initial_intent(intent, df_logs_formated):
"""
filter conversations starting with initial intent
"""
print("filtering.by_initial_intent() is DEPRECATED and will be removed in a future release. Use filtering2.by_node_id() instead.")
# create an empty dataframe with the same column names and types
filtered_df_logs = pd.DataFrame(data=None, columns=df_logs_formated.columns)
for column in filtered_df_logs.columns:
filtered_df_logs[column] = filtered_df_logs[column].astype(df_logs_formated[column].dtypes.name)
df_by_conversation_id = df_logs_formated.sort_values(by=["response_timestamp"]).groupby(by="conversation_id")
ConvDict = {}
for key, group in df_by_conversation_id:
ConvDict[key] = group
for i, (k, v) in enumerate(ConvDict.items()):
dfP1=pd.DataFrame(v, columns=v.columns).sort_values(by=['response_timestamp'])
if (dfP1['intent'].iloc[0] == intent):
filtered_df_logs = pd.concat([filtered_df_logs,dfP1])
if filtered_df_logs.empty:
print('Filtering yielded an empty dataframe. Path flow analysis requires a non-empty dataframe.')
print('Initial amount of records (before filtering):', len(df_logs_formated))
print('Amount of filtered records:', len(df_logs_formated)-len(filtered_df_logs))
print('Final amount of records (after filtering):', len(filtered_df_logs))
return filtered_df_logs
def from_node_onwards(node, df_logs_formated):
"""
filtering by truncating conversations from selected node onwards
"""
print("filtering.from_node_onwards is DEPRECATED and will be removed in a future release. Use filtering2.trim_from_node_id instead.")
# create an empty dataframe with the same column names and types
filtered_df_logs = pd.DataFrame(data=None, columns=df_logs_formated.columns)
for column in filtered_df_logs.columns:
filtered_df_logs[column] = filtered_df_logs[column].astype(df_logs_formated[column].dtypes.name)
## TODO: should we move sorting into the loop?
df_by_conversation_id = df_logs_formated.sort_values(by=["response_timestamp"]).groupby(by="conversation_id")
for conversation_id, conversation_df in df_by_conversation_id:
i=0
for index, row in conversation_df.iterrows():
i=i+1
node_visited = row["node_visited"]
if node == node_visited:
num_of_elements_to_copy = len(conversation_df)-i+1
filtered_df_logs = pd.concat([filtered_df_logs,conversation_df.tail(num_of_elements_to_copy)])
break
if filtered_df_logs.empty:
print('Filtering yielded an empty dataframe. Path flow analysis requires a non-empty dataframe.')
print('Initial amount of records (before filtering):', len(df_logs_formated))
print('Amount of filtered records:', len(df_logs_formated)-len(filtered_df_logs))
print('Final amount of records (after filtering):', len(filtered_df_logs))
return filtered_df_logs
def by_date_range(df, start_date, end_date):
print("filtering.by_date_range is DEPRECATED and will be removed in a future release. Use filtering2.by_date_range instead.")
mask = (df['response_timestamp'] >= start_date) & (df['response_timestamp'] <= end_date)
df_1 = df.loc[mask].reset_index()
if df_1.empty:
print('Filtering yielded an empty dataframe. Path flow analysis requires a non-empty dataframe.')
print('Initial amount of records (before filtering):', len(df))
print('Amount of filtered records:', len(df)-len(df_1))
print('Final amount of records (after filtering):', len(df_1))
return df_1
| 46.424242
| 137
| 0.715796
| 1,090
| 7,660
| 4.818349
| 0.166055
| 0.073115
| 0.106626
| 0.024372
| 0.740289
| 0.729246
| 0.729246
| 0.722963
| 0.714966
| 0.70773
| 0
| 0.00579
| 0.188251
| 7,660
| 165
| 138
| 46.424242
| 0.838855
| 0.176632
| 0
| 0.642105
| 0
| 0.021053
| 0.297458
| 0.038932
| 0
| 0
| 0
| 0.018182
| 0
| 1
| 0.052632
| false
| 0
| 0.031579
| 0
| 0.136842
| 0.263158
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
f2dad5196463dee843f5096f0a717f3e7ebb43f5
| 178
|
py
|
Python
|
polls/admin.py
|
bomzheg/drf-api-test-task
|
00611073c198ab4fc3d601323587fce781123a3a
|
[
"MIT"
] | null | null | null |
polls/admin.py
|
bomzheg/drf-api-test-task
|
00611073c198ab4fc3d601323587fce781123a3a
|
[
"MIT"
] | null | null | null |
polls/admin.py
|
bomzheg/drf-api-test-task
|
00611073c198ab4fc3d601323587fce781123a3a
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Question, PossibleAnswer, Poll
admin.site.register(Poll)
admin.site.register(Question)
admin.site.register(PossibleAnswer)
| 22.25
| 50
| 0.825843
| 23
| 178
| 6.391304
| 0.478261
| 0.183673
| 0.346939
| 0.285714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.08427
| 178
| 7
| 51
| 25.428571
| 0.90184
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
f2f19d35215945981d5761608e6c3d44716308ce
| 137
|
py
|
Python
|
notebooks/lib/__init__.py
|
timtyree/crypto
|
cc4e5fff15d02402edc4d157c4a74fcb1e2ae834
|
[
"MIT"
] | null | null | null |
notebooks/lib/__init__.py
|
timtyree/crypto
|
cc4e5fff15d02402edc4d157c4a74fcb1e2ae834
|
[
"MIT"
] | null | null | null |
notebooks/lib/__init__.py
|
timtyree/crypto
|
cc4e5fff15d02402edc4d157c4a74fcb1e2ae834
|
[
"MIT"
] | null | null | null |
from .utils import *
from .viewer import *
from .controller import *
from .model import *
from .measures import *
from .routines import *
| 22.833333
| 25
| 0.744526
| 18
| 137
| 5.666667
| 0.444444
| 0.490196
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.167883
| 137
| 6
| 26
| 22.833333
| 0.894737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
8408888717007f44a45ef85dbe032c0065a684fe
| 116
|
py
|
Python
|
dist/Basilisk/fswAlgorithms/magComm/__init__.py
|
ian-cooke/basilisk_mag
|
a8b1e37c31c1287549d6fd4d71fcaa35b6fc3f14
|
[
"0BSD"
] | null | null | null |
dist/Basilisk/fswAlgorithms/magComm/__init__.py
|
ian-cooke/basilisk_mag
|
a8b1e37c31c1287549d6fd4d71fcaa35b6fc3f14
|
[
"0BSD"
] | 1
|
2019-03-13T20:52:22.000Z
|
2019-03-13T20:52:22.000Z
|
dist/Basilisk/fswAlgorithms/magComm/__init__.py
|
ian-cooke/basilisk_mag
|
a8b1e37c31c1287549d6fd4d71fcaa35b6fc3f14
|
[
"0BSD"
] | null | null | null |
# This __init__.py file for the magComm package is automatically generated by the build system
from magComm import *
| 58
| 94
| 0.818966
| 18
| 116
| 5.055556
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.155172
| 116
| 2
| 95
| 58
| 0.928571
| 0.793103
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
840ee113fb8f7ba082479d0471050df70f548c78
| 646
|
py
|
Python
|
autogen/openapi_server/models/__init__.py
|
jeanyjean/locatitude-api
|
b36759f372060a3726c63edb35516303d0e85d81
|
[
"MIT"
] | null | null | null |
autogen/openapi_server/models/__init__.py
|
jeanyjean/locatitude-api
|
b36759f372060a3726c63edb35516303d0e85d81
|
[
"MIT"
] | null | null | null |
autogen/openapi_server/models/__init__.py
|
jeanyjean/locatitude-api
|
b36759f372060a3726c63edb35516303d0e85d81
|
[
"MIT"
] | null | null | null |
# coding: utf-8
# flake8: noqa
from __future__ import absolute_import
# import models into model package
from openapi_server.models.all_details import AllDetails
from openapi_server.models.covid19 import Covid19
from openapi_server.models.covid_trend import CovidTrend
from openapi_server.models.lat_long import LatLong
from openapi_server.models.new_covid_each_day import NewCovidEachDay
from openapi_server.models.pm25 import PM25
from openapi_server.models.pm_trend import PmTrend
from openapi_server.models.population import Population
from openapi_server.models.province import Province
from openapi_server.models.sum_covid import SumCovid
| 40.375
| 68
| 0.871517
| 93
| 646
| 5.806452
| 0.387097
| 0.203704
| 0.314815
| 0.425926
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.016978
| 0.088235
| 646
| 15
| 69
| 43.066667
| 0.89983
| 0.091331
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ffec802f4f43d7f3966e6705fa58059b934a3080
| 108
|
py
|
Python
|
ratbagd.py
|
staticssleever668/ratbag-python
|
285f73270cd3141a567f36536b96d24d747c6b27
|
[
"MIT"
] | null | null | null |
ratbagd.py
|
staticssleever668/ratbag-python
|
285f73270cd3141a567f36536b96d24d747c6b27
|
[
"MIT"
] | null | null | null |
ratbagd.py
|
staticssleever668/ratbag-python
|
285f73270cd3141a567f36536b96d24d747c6b27
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import ratbag.cli.ratbagd
if __name__ == "__main__":
ratbag.cli.ratbagd.main()
| 15.428571
| 29
| 0.703704
| 15
| 108
| 4.533333
| 0.733333
| 0.264706
| 0.470588
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010753
| 0.138889
| 108
| 6
| 30
| 18
| 0.72043
| 0.194444
| 0
| 0
| 0
| 0
| 0.093023
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
0832cf651bb687e158c907e0b630059b1f20f8ee
| 179
|
py
|
Python
|
demo_package/data/__init__.py
|
sdpython/demo_package
|
d057fe8e02a4c18b75696251b3e3bd4b3a9136a5
|
[
"MIT"
] | null | null | null |
demo_package/data/__init__.py
|
sdpython/demo_package
|
d057fe8e02a4c18b75696251b3e3bd4b3a9136a5
|
[
"MIT"
] | 2
|
2020-12-14T14:48:46.000Z
|
2020-12-14T15:00:18.000Z
|
demo_package/data/__init__.py
|
sdpython/demo_package
|
d057fe8e02a4c18b75696251b3e3bd4b3a9136a5
|
[
"MIT"
] | null | null | null |
"""
Shortcuts to *data*.
"""
from .data_insee import ( # noqa
data_covid_france_departments_hospitals,
data_covid_france_departments_tests,
data_france_departments)
| 19.888889
| 44
| 0.759777
| 21
| 179
| 5.952381
| 0.571429
| 0.408
| 0.24
| 0.416
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.156425
| 179
| 8
| 45
| 22.375
| 0.827815
| 0.145251
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.25
| 0
| 0.25
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
084f71e0cd63a232698b72a6a394d81d54b6d573
| 35
|
py
|
Python
|
jduargs/__init__.py
|
jeandemeusy/jdu_args
|
cbaf69d70c4cc25492989787ff97b4642b58078f
|
[
"MIT"
] | null | null | null |
jduargs/__init__.py
|
jeandemeusy/jdu_args
|
cbaf69d70c4cc25492989787ff97b4642b58078f
|
[
"MIT"
] | null | null | null |
jduargs/__init__.py
|
jeandemeusy/jdu_args
|
cbaf69d70c4cc25492989787ff97b4642b58078f
|
[
"MIT"
] | null | null | null |
from .parser import ArgumentParser
| 17.5
| 34
| 0.857143
| 4
| 35
| 7.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114286
| 35
| 1
| 35
| 35
| 0.967742
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f25ba38ad9bc528a1cd74f62a8ce3c1bd7e1365a
| 67,406
|
py
|
Python
|
alerter/test/monitors/node/test_chainlink.py
|
SimplyVC/panic
|
2f5c327ea0d14b6a49dc8f4599a255048bc2ff6d
|
[
"Apache-2.0"
] | 41
|
2019-08-23T12:40:42.000Z
|
2022-03-28T11:06:02.000Z
|
alerter/test/monitors/node/test_chainlink.py
|
SimplyVC/panic
|
2f5c327ea0d14b6a49dc8f4599a255048bc2ff6d
|
[
"Apache-2.0"
] | 147
|
2019-08-30T22:09:48.000Z
|
2022-03-30T08:46:26.000Z
|
alerter/test/monitors/node/test_chainlink.py
|
SimplyVC/panic
|
2f5c327ea0d14b6a49dc8f4599a255048bc2ff6d
|
[
"Apache-2.0"
] | 3
|
2019-09-03T21:12:28.000Z
|
2021-08-18T14:27:56.000Z
|
import copy
import json
import logging
import unittest
from collections import ChainMap
from datetime import datetime
from datetime import timedelta
from http.client import IncompleteRead
from typing import Dict
from unittest import mock
from unittest.mock import call
import pika
from freezegun import freeze_time
from parameterized import parameterized
from pika.exceptions import AMQPConnectionError, AMQPChannelError
from requests.exceptions import (ConnectionError as ReqConnectionError,
ReadTimeout, ChunkedEncodingError,
MissingSchema, InvalidSchema, InvalidURL)
from urllib3.exceptions import ProtocolError
from src.configs.nodes.chainlink import ChainlinkNodeConfig
from src.message_broker.rabbitmq import RabbitMQApi
from src.monitors.node.chainlink import ChainlinkNodeMonitor
from src.utils import env
from src.utils.constants.rabbitmq import (HEALTH_CHECK_EXCHANGE,
RAW_DATA_EXCHANGE,
CHAINLINK_NODE_RAW_DATA_ROUTING_KEY,
HEARTBEAT_OUTPUT_WORKER_ROUTING_KEY)
from src.utils.exceptions import (PANICException,
EnabledSourceIsEmptyException,
MetricNotFoundException, NodeIsDownException,
DataReadingException, InvalidUrlException,
MessageWasNotDeliveredException)
from test.utils.utils import (connect_to_rabbit, delete_queue_if_exists,
delete_exchange_if_exists, disconnect_from_rabbit,
assert_not_called_with)
class TestChainlinkNodeMonitor(unittest.TestCase):
def setUp(self) -> None:
self.dummy_logger = logging.getLogger('Dummy')
self.dummy_logger.disabled = True
self.connection_check_time_interval = timedelta(seconds=0)
self.rabbit_ip = env.RABBIT_IP
self.rabbitmq = RabbitMQApi(
self.dummy_logger, self.rabbit_ip,
connection_check_time_interval=self.connection_check_time_interval)
self.monitor_name = 'test_monitor'
self.monitoring_period = 10
self.node_id = 'test_node_id'
self.parent_id = 'test_parent_id'
self.node_name = 'test_node'
self.monitor_node = True
self.monitor_prometheus = True
self.node_prometheus_urls = ['https://test_ip_1:1000',
'https://test_ip_2:1000',
'https://test_ip_3:1000']
self.routing_key = 'test_routing_key'
self.test_data_str = 'test data'
self.test_data_dict = {
'test_key_1': 'test_val_1',
'test_key_2': 'test_val_2',
}
self.test_heartbeat = {
'component_name': 'Test Component',
'is_alive': True,
'timestamp': datetime(2012, 1, 1).timestamp(),
}
self.test_queue_name = 'Test Queue'
self.prometheus_metrics = {
'head_tracker_current_head': 'strict',
'head_tracker_heads_received_total': 'strict',
'max_unconfirmed_blocks': 'strict',
'process_start_time_seconds': 'strict',
'tx_manager_num_gas_bumps_total': 'strict',
'tx_manager_gas_bump_exceeds_limit_total': 'strict',
'unconfirmed_transactions': 'strict',
'gas_updater_set_gas_price': 'optional',
'eth_balance': 'strict',
'run_status_update_total': 'optional',
}
self.retrieved_prometheus_data_example = {
'eth_balance': {'{"account": "eth_add_1"}': 26.043292035081947},
'gas_updater_set_gas_price': {
'{"percentile": "20%"}': 5000000000.0
},
'head_tracker_current_head': 6924314.0,
'head_tracker_heads_received_total': 26392.0,
'max_unconfirmed_blocks': 0.0,
'process_start_time_seconds': 1619431240.24,
'run_status_update_total': {
'{"from_status": "", "job_spec_id": '
'"03ba2f182d5e4245b8492e7f8672482e", '
'"status": "in_progress"}': 129.0,
'{"from_status": "", "job_spec_id": '
'"0b7dd91f5e8a40d8b0493fc0799fe5d3", '
'"status": "in_progress"}': 189.0,
'{"from_status": "in_progress", "job_spec_id": '
'"03ba2f182d5e4245b8492e7f8672482e", '
'"status": "completed"}': 389.0,
'{"from_status": "in_progress", "job_spec_id": '
'"03ba2f182d5e4245b8492e7f8672482e", "status": '
'"pending_outgoing_confirmations"}': 1898.0,
'{"from_status": "in_progress", "job_spec_id": '
'"0b7dd91f5e8a40d8b0493fc0799fe5d3", '
'"status": "completed"}': 569.0,
'{"from_status": "in_progress", "job_spec_id": '
'"0b7dd91f5e8a40d8b0493fc0799fe5d3", '
'"status": "pending_outgoing_confirmations"}': 2780.0,
'{"from_status": "in_progress", "job_spec_id": '
'"2aacf8ce6827410dae6ff2ce68938edb", "status": "errored"}': 1.0,
'{"from_status": "in_progress", "job_spec_id": '
'"3cc0a79b77f8404fa193c1e56b3f29bf", '
'"status": "errored"}': 90.0,
'{"from_status": "in_progress", '
'"job_spec_id": "4ae35b033a294c3db78a45db9ada9a57", '
'"status": "errored"}': 1.0,
'{"from_status": "in_progress", "job_spec_id": '
'"7594586a567d4700b1a794f3363569e1", "status": "errored"}': 1.0,
'{"from_status": "in_progress", "job_spec_id": '
'"834275814b3b46de83aa7770dbc90912", "status": "errored"}': 4.0,
'{"from_status": "in_progress", "job_spec_id": '
'"8d2cde397b17415486bbd79de84c901e", '
'"status": "errored"}': 112.0,
'{"from_status": "in_progress", "job_spec_id": '
'"d0dd062c26794ff1a9b9460cd5d529f6", "status": "errored"}': 2.0,
'{"from_status": "in_progress", "job_spec_id": '
'"f2e35bcb37b04198a9241121cd936572", "status": "errored"}': 4.0,
},
'tx_manager_gas_bump_exceeds_limit_total': 0.0,
'tx_manager_num_gas_bumps_total': 2031.0,
'unconfirmed_transactions': 1.0
}
self.retrieved_prometheus_data_example_optionals_none = copy.deepcopy(
self.retrieved_prometheus_data_example)
self.retrieved_prometheus_data_example_optionals_none[
'gas_updater_set_gas_price'] = None
self.retrieved_prometheus_data_example_optionals_none[
'run_status_update_total'] = None
self.processed_prometheus_data_example = {
'head_tracker_current_head': 6924314.0,
'head_tracker_heads_received_total': 26392.0,
'max_unconfirmed_blocks': 0.0,
'process_start_time_seconds': 1619431240.24,
'tx_manager_gas_bump_exceeds_limit_total': 0.0,
'tx_manager_num_gas_bumps_total': 2031.0,
'unconfirmed_transactions': 1.0,
'gas_updater_set_gas_price': {
'percentile': '20%',
'price': 5000000000.0
},
'eth_balance': {
'address': 'eth_add_1',
'balance': 26.043292035081947
},
'run_status_update_total_errors': 8
}
self.processed_prometheus_data_example_optionals_none = copy.deepcopy(
self.processed_prometheus_data_example)
self.processed_prometheus_data_example_optionals_none[
'gas_updater_set_gas_price'] = None
self.processed_prometheus_data_example_optionals_none[
'run_status_update_total_errors'] = 0
self.test_exception = PANICException('test_exception', 1)
self.node_config = ChainlinkNodeConfig(
self.node_id, self.parent_id, self.node_name, self.monitor_node,
self.monitor_prometheus, self.node_prometheus_urls)
self.test_monitor = ChainlinkNodeMonitor(
self.monitor_name, self.node_config, self.dummy_logger,
self.monitoring_period, self.rabbitmq
)
# The dicts below will make more sense when more source types are added
self.received_retrieval_info_all_source_types_enabled = {
'prometheus': {
'data': self.retrieved_prometheus_data_example,
'data_retrieval_failed': False,
'data_retrieval_exception': None,
'get_function': self.test_monitor._get_prometheus_data,
'processing_function':
self.test_monitor._process_retrieved_prometheus_data,
'last_source_used_var': 'self._last_prometheus_source_used',
'monitoring_enabled_var': 'self.node_config._monitor_prometheus'
}
# When more sources are added this should contain source types with
# successfully obtained data.
}
self.received_retrieval_info_prometheus_disabled = {
'prometheus': {
'data': {},
'data_retrieval_failed': True,
'data_retrieval_exception': None,
'get_function': self.test_monitor._get_prometheus_data,
'processing_function':
self.test_monitor._process_retrieved_prometheus_data,
'last_source_used_var': 'self._last_prometheus_source_used',
'monitoring_enabled_var': 'self.node_config._monitor_prometheus'
}
# When more sources are added this should contain source types with
# successfully obtained data.
}
self.received_retrieval_info_all_source_types_enabled_err = {
'prometheus': {
'data': {},
'data_retrieval_failed': True,
'data_retrieval_exception': None,
'get_function': self.test_monitor._get_prometheus_data,
'processing_function':
self.test_monitor._process_retrieved_prometheus_data,
'last_source_used_var': 'self._last_prometheus_source_used',
'monitoring_enabled_var': 'self.node_config._monitor_prometheus'
}
# When more sources are added this should contain source types with
# successfully obtained data.
}
# TODO: When more sources are added we can add
# self.received_retrieval_info_prometheus_disabled_err
def tearDown(self) -> None:
# Delete any queues and exchanges which are common across many tests
connect_to_rabbit(self.test_monitor.rabbitmq)
delete_queue_if_exists(self.test_monitor.rabbitmq, self.test_queue_name)
delete_exchange_if_exists(self.test_monitor.rabbitmq,
HEALTH_CHECK_EXCHANGE)
delete_exchange_if_exists(self.test_monitor.rabbitmq, RAW_DATA_EXCHANGE)
disconnect_from_rabbit(self.test_monitor.rabbitmq)
self.dummy_logger = None
self.connection_check_time_interval = None
self.rabbitmq = None
self.test_exception = None
self.node_config = None
self.test_monitor = None
def test_str_returns_monitor_name(self) -> None:
self.assertEqual(self.monitor_name, str(self.test_monitor))
def test_get_monitor_period_returns_monitor_period(self) -> None:
self.assertEqual(self.monitoring_period,
self.test_monitor.monitor_period)
def test_get_monitor_name_returns_monitor_name(self) -> None:
self.assertEqual(self.monitor_name, self.test_monitor.monitor_name)
def test_node_config_returns_node_config(self) -> None:
self.assertEqual(self.node_config, self.test_monitor.node_config)
def test_prometheus_metrics_returns_prometheus_metrics(self) -> None:
self.assertEqual(self.prometheus_metrics,
self.test_monitor.prometheus_metrics)
def test_last_prometheus_source_used_returns_last_prometheus_source_used(
self) -> None:
# Check that on startup
# last_prometheus_source_used = node_prometheus_urls[0]
self.assertEqual(self.node_prometheus_urls[0],
self.test_monitor.last_prometheus_source_used)
# Check for any other value
self.test_monitor._last_prometheus_source_used = \
self.node_prometheus_urls[1]
self.assertEqual(self.node_prometheus_urls[1],
self.test_monitor.last_prometheus_source_used)
@parameterized.expand([
([],)
])
def test_init_raises_EnabledSourceIsEmptyException_if_empty_enabled_source(
self, node_prometheus_urls) -> None:
"""
This function should be parameterized further once we increase the
number of data sources.
"""
node_config = ChainlinkNodeConfig(
self.node_id, self.parent_id, self.node_name, self.monitor_node,
self.monitor_prometheus, node_prometheus_urls)
self.assertRaises(
EnabledSourceIsEmptyException, ChainlinkNodeMonitor,
self.monitor_name, node_config, self.dummy_logger,
self.monitoring_period, self.rabbitmq)
def test_initialise_rabbitmq_initialises_everything_as_expected(
self) -> None:
# To make sure that there is no connection/channel already
# established
self.assertIsNone(self.rabbitmq.connection)
self.assertIsNone(self.rabbitmq.channel)
# To make sure that the exchanges have not already been declared
connect_to_rabbit(self.rabbitmq)
self.rabbitmq.exchange_delete(RAW_DATA_EXCHANGE)
self.rabbitmq.exchange_delete(HEALTH_CHECK_EXCHANGE)
disconnect_from_rabbit(self.rabbitmq)
self.test_monitor._initialise_rabbitmq()
# Perform checks that the connection has been opened, marked as open
# and that the delivery confirmation variable is set.
self.assertTrue(self.test_monitor.rabbitmq.is_connected)
self.assertTrue(self.test_monitor.rabbitmq.connection.is_open)
self.assertTrue(
self.test_monitor.rabbitmq.channel._delivery_confirmation)
# Check whether the exchange has been creating by sending messages
# to it. If this fails an exception is raised hence the test fails.
self.test_monitor.rabbitmq.basic_publish_confirm(
exchange=RAW_DATA_EXCHANGE, routing_key=self.routing_key,
body=self.test_data_str, is_body_dict=False,
properties=pika.BasicProperties(delivery_mode=2),
mandatory=False)
self.test_monitor.rabbitmq.basic_publish_confirm(
exchange=HEALTH_CHECK_EXCHANGE, routing_key=self.routing_key,
body=self.test_data_str, is_body_dict=False,
properties=pika.BasicProperties(delivery_mode=2),
mandatory=False)
@mock.patch.object(ChainlinkNodeMonitor, "_process_retrieved_data")
@mock.patch.object(ChainlinkNodeMonitor, "_process_error")
def test_process_data_calls_process_error_on_retrieval_error(
self, mock_process_error, mock_process_retrieved_data) -> None:
# Do not test the processing of data for now
mock_process_error.return_value = self.test_data_dict
self.test_monitor._process_data(True, [self.test_exception],
[self.test_data_dict])
# Test passes if _process_error is called once and
# process_retrieved_data is not called
self.assertEqual(1, mock_process_error.call_count)
self.assertEqual(0, mock_process_retrieved_data.call_count)
@mock.patch.object(ChainlinkNodeMonitor, "_process_retrieved_data")
@mock.patch.object(ChainlinkNodeMonitor, "_process_error")
def test_process_data_calls_process_retrieved_data_on_retrieval_success(
self, mock_process_error, mock_process_retrieved_data) -> None:
# Do not test the processing of data for now
mock_process_retrieved_data.return_value = self.test_data_dict
self.test_monitor._process_data(False, [self.test_exception],
[self.test_data_dict])
# Test passes if _process_error is not called and process_retrieved_data
# is called once
self.assertEqual(0, mock_process_error.call_count)
self.assertEqual(1, mock_process_retrieved_data.call_count)
def test_send_heartbeat_sends_a_heartbeat_correctly(self) -> None:
# This test creates a queue which receives messages with the same
# routing key as the ones sent by send_heartbeat, and checks that the
# heartbeat is received
self.test_monitor._initialise_rabbitmq()
# Delete the queue before to avoid messages in the queue on error.
self.test_monitor.rabbitmq.queue_delete(self.test_queue_name)
res = self.test_monitor.rabbitmq.queue_declare(
queue=self.test_queue_name, durable=True, exclusive=False,
auto_delete=False, passive=False
)
self.assertEqual(0, res.method.message_count)
self.test_monitor.rabbitmq.queue_bind(
queue=self.test_queue_name, exchange=HEALTH_CHECK_EXCHANGE,
routing_key=HEARTBEAT_OUTPUT_WORKER_ROUTING_KEY)
self.test_monitor._send_heartbeat(self.test_heartbeat)
# By re-declaring the queue again we can get the number of messages
# in the queue.
res = self.test_monitor.rabbitmq.queue_declare(
queue=self.test_queue_name, durable=True, exclusive=False,
auto_delete=False, passive=True
)
self.assertEqual(1, res.method.message_count)
# Check that the message received is actually the HB
_, _, body = self.test_monitor.rabbitmq.basic_get(
self.test_queue_name)
self.assertEqual(self.test_heartbeat, json.loads(body))
def test_display_data_returns_the_correct_string_if_all_metrics_present(
self) -> None:
# Test when optionals are not None
expected_output = \
"head_tracker_current_head={}, " \
"head_tracker_heads_received_total={}, " \
"max_unconfirmed_blocks={}, process_start_time_seconds={}, " \
"tx_manager_num_gas_bumps_total={}, " \
"tx_manager_gas_bump_exceeds_limit_total={}, " \
"unconfirmed_transactions={}, gas_updater_set_gas_price={}, " \
"eth_balance={}, run_status_update_total_errors={}" \
"".format(
self.processed_prometheus_data_example[
'head_tracker_current_head'],
self.processed_prometheus_data_example[
'head_tracker_heads_received_total'],
self.processed_prometheus_data_example[
'max_unconfirmed_blocks'],
self.processed_prometheus_data_example[
'process_start_time_seconds'],
self.processed_prometheus_data_example[
'tx_manager_num_gas_bumps_total'],
self.processed_prometheus_data_example[
'tx_manager_gas_bump_exceeds_limit_total'],
self.processed_prometheus_data_example[
'unconfirmed_transactions'],
self.processed_prometheus_data_example[
'gas_updater_set_gas_price'],
self.processed_prometheus_data_example['eth_balance'],
self.processed_prometheus_data_example[
'run_status_update_total_errors']
)
actual_output = self.test_monitor._display_data(
self.processed_prometheus_data_example)
self.assertEqual(expected_output, actual_output)
# Test when optionals are None
expected_output = \
"head_tracker_current_head={}, " \
"head_tracker_heads_received_total={}, " \
"max_unconfirmed_blocks={}, " \
"process_start_time_seconds={}, " \
"tx_manager_num_gas_bumps_total={}, " \
"tx_manager_gas_bump_exceeds_limit_total={}, " \
"unconfirmed_transactions={}, gas_updater_set_gas_price={}, " \
"eth_balance={}, run_status_update_total_errors={}" \
"".format(
self.processed_prometheus_data_example_optionals_none[
'head_tracker_current_head'],
self.processed_prometheus_data_example_optionals_none[
'head_tracker_heads_received_total'],
self.processed_prometheus_data_example_optionals_none[
'max_unconfirmed_blocks'],
self.processed_prometheus_data_example_optionals_none[
'process_start_time_seconds'],
self.processed_prometheus_data_example_optionals_none[
'tx_manager_num_gas_bumps_total'],
self.processed_prometheus_data_example_optionals_none[
'tx_manager_gas_bump_exceeds_limit_total'],
self.processed_prometheus_data_example_optionals_none[
'unconfirmed_transactions'],
self.processed_prometheus_data_example_optionals_none[
'gas_updater_set_gas_price'],
self.processed_prometheus_data_example_optionals_none[
'eth_balance'],
self.processed_prometheus_data_example_optionals_none[
'run_status_update_total_errors']
)
actual_output = self.test_monitor._display_data(
self.processed_prometheus_data_example_optionals_none)
self.assertEqual(expected_output, actual_output)
def test_display_data_returns_the_correct_string_if_not_all_metrics_present(
self) -> None:
# Test when optionals are not None
del self.processed_prometheus_data_example['head_tracker_current_head']
del self.processed_prometheus_data_example['eth_balance']
expected_output = \
"head_tracker_current_head={}, " \
"head_tracker_heads_received_total={}, " \
"max_unconfirmed_blocks={}, process_start_time_seconds={}, " \
"tx_manager_num_gas_bumps_total={}, " \
"tx_manager_gas_bump_exceeds_limit_total={}, " \
"unconfirmed_transactions={}, gas_updater_set_gas_price={}, " \
"eth_balance={}, run_status_update_total_errors={}" \
"".format(
"Disabled",
self.processed_prometheus_data_example[
'head_tracker_heads_received_total'],
self.processed_prometheus_data_example[
'max_unconfirmed_blocks'],
self.processed_prometheus_data_example[
'process_start_time_seconds'],
self.processed_prometheus_data_example[
'tx_manager_num_gas_bumps_total'],
self.processed_prometheus_data_example[
'tx_manager_gas_bump_exceeds_limit_total'],
self.processed_prometheus_data_example[
'unconfirmed_transactions'],
self.processed_prometheus_data_example[
'gas_updater_set_gas_price'],
"Disabled",
self.processed_prometheus_data_example[
'run_status_update_total_errors']
)
actual_output = self.test_monitor._display_data(
self.processed_prometheus_data_example)
self.assertEqual(expected_output, actual_output)
# Test when optionals are None
del self.processed_prometheus_data_example_optionals_none[
'head_tracker_current_head']
del self.processed_prometheus_data_example_optionals_none[
'eth_balance']
expected_output = \
"head_tracker_current_head={}, " \
"head_tracker_heads_received_total={}, " \
"max_unconfirmed_blocks={}, process_start_time_seconds={}, " \
"tx_manager_num_gas_bumps_total={}, " \
"tx_manager_gas_bump_exceeds_limit_total={}, " \
"unconfirmed_transactions={}, gas_updater_set_gas_price={}, " \
"eth_balance={}, run_status_update_total_errors={}" \
"".format(
"Disabled",
self.processed_prometheus_data_example_optionals_none[
'head_tracker_heads_received_total'],
self.processed_prometheus_data_example_optionals_none[
'max_unconfirmed_blocks'],
self.processed_prometheus_data_example_optionals_none[
'process_start_time_seconds'],
self.processed_prometheus_data_example_optionals_none[
'tx_manager_num_gas_bumps_total'],
self.processed_prometheus_data_example_optionals_none[
'tx_manager_gas_bump_exceeds_limit_total'],
self.processed_prometheus_data_example_optionals_none[
'unconfirmed_transactions'],
self.processed_prometheus_data_example_optionals_none[
'gas_updater_set_gas_price'],
"Disabled",
self.processed_prometheus_data_example_optionals_none[
'run_status_update_total_errors']
)
actual_output = self.test_monitor._display_data(
self.processed_prometheus_data_example_optionals_none)
self.assertEqual(expected_output, actual_output)
@mock.patch("src.monitors.node.chainlink.get_prometheus_metrics_data")
def test_get_prom_data_first_attempts_retrieval_using_last_prom_source_used(
self, mock_get_prometheus_metrics_data) -> None:
mock_get_prometheus_metrics_data.return_value = \
self.processed_prometheus_data_example
old_last_prometheus_source_used = \
self.test_monitor.last_prometheus_source_used
actual_output = self.test_monitor._get_prometheus_data()
mock_get_prometheus_metrics_data.assert_called_once_with(
old_last_prometheus_source_used, self.prometheus_metrics,
self.dummy_logger, verify=False)
self.assertEqual(self.processed_prometheus_data_example, actual_output)
@mock.patch("src.monitors.node.chainlink.get_prometheus_metrics_data")
def test_get_prom_data_does_not_change_last_prom_sourced_used_if_online(
self, mock_get_prometheus_metrics_data) -> None:
mock_get_prometheus_metrics_data.return_value = \
self.processed_prometheus_data_example
old_last_prometheus_source_used = \
self.test_monitor.last_prometheus_source_used
self.test_monitor._get_prometheus_data()
self.assertEqual(old_last_prometheus_source_used,
self.test_monitor.last_prometheus_source_used)
@parameterized.expand([
(IncompleteRead, IncompleteRead('test'),),
(ChunkedEncodingError, ChunkedEncodingError('test'),),
(ProtocolError, ProtocolError('test'),),
(InvalidURL, InvalidURL('test'),),
(InvalidSchema, InvalidSchema('test'),),
(MissingSchema, MissingSchema('test'),),
(MetricNotFoundException, MetricNotFoundException('test', 'test'),),
(Exception, Exception('test'),),
])
@mock.patch("src.monitors.node.chainlink.get_prometheus_metrics_data")
def test_get_prom_data_raises_non_conn_err_if_last_source_used_on_and_errs(
self, exception_class, exception_instance,
mock_get_prometheus_metrics_data) -> None:
mock_get_prometheus_metrics_data.side_effect = exception_instance
old_last_prometheus_source_used = \
self.test_monitor.last_prometheus_source_used
self.assertRaises(exception_class,
self.test_monitor._get_prometheus_data)
mock_get_prometheus_metrics_data.assert_called_once_with(
old_last_prometheus_source_used, self.prometheus_metrics,
self.dummy_logger, verify=False)
@parameterized.expand([
(IncompleteRead, IncompleteRead('test'),),
(ChunkedEncodingError, ChunkedEncodingError('test'),),
(ProtocolError, ProtocolError('test'),),
(InvalidURL, InvalidURL('test'),),
(InvalidSchema, InvalidSchema('test'),),
(MissingSchema, MissingSchema('test'),),
(MetricNotFoundException, MetricNotFoundException('test', 'test'),),
(Exception, Exception('test'),),
])
@mock.patch("src.monitors.node.chainlink.get_prometheus_metrics_data")
def test_get_prom_data_no_change_last_source_used_if_online_and_it_errors(
self, exception_class, exception_instance,
mock_get_prometheus_metrics_data) -> None:
# Here we are assuming that the error is not connection related
mock_get_prometheus_metrics_data.side_effect = exception_instance
old_last_prometheus_source_used = \
self.test_monitor.last_prometheus_source_used
try:
self.test_monitor._get_data()
except exception_class:
pass
self.assertEqual(old_last_prometheus_source_used,
self.test_monitor.last_prometheus_source_used)
@parameterized.expand([
(ReadTimeout('test'),),
(ReqConnectionError('test'),),
])
@mock.patch("src.monitors.node.chainlink.get_prometheus_metrics_data")
def test_get_prom_data_gets_data_from_online_source_if_last_source_used_off(
self, exception_instance, mock_get_prometheus_metrics_data) -> None:
# In this case we are setting the final source to be online
mock_get_prometheus_metrics_data.side_effect = [
exception_instance, exception_instance, exception_instance,
self.processed_prometheus_data_example]
old_last_prometheus_source_used = \
self.test_monitor.last_prometheus_source_used
actual_output = self.test_monitor._get_prometheus_data()
actual_calls = mock_get_prometheus_metrics_data.call_args_list
self.assertEqual(4, len(actual_calls))
# In this case there are two calls to
# self.test_monitor.node_config._node_prometheus_urls[0] because
# initially this url was also the last prometheus source used.
expected_calls = [call(old_last_prometheus_source_used,
self.prometheus_metrics, self.dummy_logger,
verify=False)]
for i in range(0, len(self.node_prometheus_urls)):
expected_calls.append(call(
self.test_monitor.node_config.node_prometheus_urls[i],
self.prometheus_metrics, self.dummy_logger, verify=False))
self.assertEqual(expected_calls, actual_calls)
self.assertEqual(self.processed_prometheus_data_example, actual_output)
@parameterized.expand([
(ReadTimeout('test'),),
(ReqConnectionError('test'),),
])
@mock.patch("src.monitors.node.chainlink.get_prometheus_metrics_data")
def test_get_prom_data_changes_last_source_if_last_source_off_other_node_on(
self, exception_instance, mock_get_prometheus_metrics_data) -> None:
# In this case we are setting the final source to be online
mock_get_prometheus_metrics_data.side_effect = [
exception_instance, exception_instance, exception_instance,
self.processed_prometheus_data_example]
self.test_monitor._get_prometheus_data()
self.assertEqual(self.test_monitor.node_config.node_prometheus_urls[-1],
self.test_monitor.last_prometheus_source_used)
@parameterized.expand([
(IncompleteRead, IncompleteRead('test'),),
(ChunkedEncodingError, ChunkedEncodingError('test'),),
(ProtocolError, ProtocolError('test'),),
(InvalidURL, InvalidURL('test'),),
(InvalidSchema, InvalidSchema('test'),),
(MissingSchema, MissingSchema('test'),),
(MetricNotFoundException, MetricNotFoundException('test', 'test'),),
(Exception, Exception('test'),),
])
@mock.patch("src.monitors.node.chainlink.get_prometheus_metrics_data")
def test_get_prom_data_raises_non_connection_err_if_online_source_errors(
self, exception_class, exception_instance,
mock_get_prometheus_metrics_data) -> None:
# Here we will assume that the last prometheus source used was deemed as
# offline as we have already tested the online case in a previous test.
# We will also assume that the second source is online but it errors.
mock_get_prometheus_metrics_data.side_effect = [
ReqConnectionError('test'), ReqConnectionError('test'),
exception_instance]
old_last_prometheus_source_used = \
self.test_monitor.last_prometheus_source_used
self.assertRaises(exception_class,
self.test_monitor._get_prometheus_data)
actual_calls = mock_get_prometheus_metrics_data.call_args_list
self.assertEqual(3, len(actual_calls))
self.assertEqual([
call(old_last_prometheus_source_used, self.prometheus_metrics,
self.dummy_logger, verify=False),
call(self.test_monitor.node_config._node_prometheus_urls[0],
self.prometheus_metrics, self.dummy_logger, verify=False),
call(self.test_monitor.node_config._node_prometheus_urls[1],
self.prometheus_metrics, self.dummy_logger, verify=False)],
actual_calls)
@parameterized.expand([
(IncompleteRead, IncompleteRead('test'),),
(ChunkedEncodingError, ChunkedEncodingError('test'),),
(ProtocolError, ProtocolError('test'),),
(InvalidURL, InvalidURL('test'),),
(InvalidSchema, InvalidSchema('test'),),
(MissingSchema, MissingSchema('test'),),
(MetricNotFoundException, MetricNotFoundException('test', 'test'),),
(Exception, Exception('test'),),
])
@mock.patch("src.monitors.node.chainlink.get_prometheus_metrics_data")
def test_get_prom_data_changes_last_prom_source_used_if_online_source_errs(
self, exception_class, exception_instance,
mock_get_prometheus_metrics_data) -> None:
# Here we will assume that the last prometheus source used was deemed as
# offline as we have already tested when it is online in a previous
# test. We will also assume that the second source is online but it
# errors.
mock_get_prometheus_metrics_data.side_effect = [
ReqConnectionError('test'), ReqConnectionError('test'),
exception_instance]
try:
self.test_monitor._get_prometheus_data()
except exception_class:
pass
self.assertEqual(self.test_monitor.node_config.node_prometheus_urls[1],
self.test_monitor.last_prometheus_source_used)
@parameterized.expand([
(ReadTimeout('test'),),
(ReqConnectionError('test'),),
])
@mock.patch("src.monitors.node.chainlink.get_prometheus_metrics_data")
def test_get_prom_data_raises_NodeIsDownException_if_all_prom_sources_down(
self, exception_instance, mock_get_prometheus_metrics_data) -> None:
mock_get_prometheus_metrics_data.side_effect = [
exception_instance, exception_instance, exception_instance,
exception_instance]
old_last_prometheus_source_used = \
self.test_monitor.last_prometheus_source_used
self.assertRaises(NodeIsDownException,
self.test_monitor._get_prometheus_data)
actual_calls = mock_get_prometheus_metrics_data.call_args_list
self.assertEqual(4, len(actual_calls))
# In this case there are two calls to
# self.test_monitor.node_config._node_prometheus_urls[0] because
# initially this url was also the last prometheus source used.
expected_calls = [call(old_last_prometheus_source_used,
self.prometheus_metrics, self.dummy_logger,
verify=False)]
for i in range(0, len(self.node_prometheus_urls)):
expected_calls.append(call(
self.test_monitor.node_config.node_prometheus_urls[i],
self.prometheus_metrics, self.dummy_logger, verify=False))
self.assertEqual(expected_calls, actual_calls)
@parameterized.expand([
(ReadTimeout('test'),),
(ReqConnectionError('test'),),
])
@mock.patch("src.monitors.node.chainlink.get_prometheus_metrics_data")
def test_get_prom_data_does_not_change_last_prom_source_used_if_all_down(
self, exception_instance, mock_get_prometheus_metrics_data) -> None:
mock_get_prometheus_metrics_data.side_effect = [
exception_instance, exception_instance, exception_instance,
exception_instance]
old_last_prometheus_source_used = \
self.test_monitor.last_prometheus_source_used
try:
self.test_monitor._get_prometheus_data()
except NodeIsDownException:
pass
self.assertEqual(old_last_prometheus_source_used,
self.test_monitor.last_prometheus_source_used)
@parameterized.expand([
('self.received_retrieval_info_prometheus_disabled', [], False,),
('self.received_retrieval_info_all_source_types_enabled',
['self.retrieved_prometheus_data_example'], True,),
])
@mock.patch("src.monitors.node.chainlink.get_prometheus_metrics_data")
def test_get_data_return_if_no_errors_raised(
self, expected_return, retrieved_prometheus_data,
monitor_prometheus, mock_get_prometheus_metrics_data) -> None:
get_prometheus_metrics_data_return = list(map(
eval, retrieved_prometheus_data))
mock_get_prometheus_metrics_data.side_effect = \
get_prometheus_metrics_data_return
self.test_monitor._node_config._monitor_prometheus = monitor_prometheus
actual_ret = self.test_monitor._get_data()
expected_ret = eval(expected_return)
self.assertEqual(expected_ret, actual_ret)
@parameterized.expand([
("IncompleteRead('test')",
"DataReadingException(self.test_monitor.monitor_name, "
"self.test_monitor.last_prometheus_source_used)",
'self.received_retrieval_info_all_source_types_enabled_err', True,
'prometheus'),
("ChunkedEncodingError('test')",
"DataReadingException(self.test_monitor.monitor_name, "
"self.test_monitor.last_prometheus_source_used)",
'self.received_retrieval_info_all_source_types_enabled_err', True,
'prometheus'),
("ProtocolError('test')",
"DataReadingException(self.test_monitor.monitor_name, "
"self.test_monitor.last_prometheus_source_used)",
'self.received_retrieval_info_all_source_types_enabled_err', True,
'prometheus'),
("InvalidURL('test')",
"InvalidUrlException(self.test_monitor.last_prometheus_source_used)",
'self.received_retrieval_info_all_source_types_enabled_err', True,
'prometheus'),
("InvalidSchema('test')",
"InvalidUrlException(self.test_monitor.last_prometheus_source_used)",
'self.received_retrieval_info_all_source_types_enabled_err', True,
'prometheus'),
("MissingSchema('test')",
"InvalidUrlException(self.test_monitor.last_prometheus_source_used)",
'self.received_retrieval_info_all_source_types_enabled_err', True,
'prometheus'),
("MetricNotFoundException('test_metric', 'test_endpoint')",
"MetricNotFoundException('test_metric', 'test_endpoint')",
'self.received_retrieval_info_all_source_types_enabled_err', True,
'prometheus'),
('NodeIsDownException(self.node_name)',
'NodeIsDownException(self.node_name)',
'self.received_retrieval_info_all_source_types_enabled_err', True,
'prometheus'),
])
@mock.patch("src.monitors.node.chainlink.get_prometheus_metrics_data")
def test_get_data_return_if_recognised_error_raised(
self, raised_err, returned_err, expected_return, monitor_prometheus,
errored_source_type, mock_get_prometheus_metrics_data) -> None:
# This test will be expanded when adding more source types to cater for
# when monitor_prometheus is False
mock_get_prometheus_metrics_data.side_effect = \
eval(raised_err) if errored_source_type == "prometheus" else None
self.test_monitor._node_config._monitor_prometheus = monitor_prometheus
actual_ret = self.test_monitor._get_data()
expected_ret = eval(expected_return)
expected_ret[errored_source_type][
'data_retrieval_exception'] = eval(returned_err)
self.assertEqual(expected_ret, actual_ret)
@mock.patch("src.monitors.node.chainlink.get_prometheus_metrics_data")
def test_get_data_raises_unrecognised_error_if_raised(
self, mock_get_prometheus_metrics_data) -> None:
mock_get_prometheus_metrics_data.side_effect = self.test_exception
self.assertRaises(PANICException, self.test_monitor._get_data)
@parameterized.expand([
("self.test_monitor.last_prometheus_source_used",),
])
@freeze_time("2012-01-01")
def test_process_error_returns_expected_data(self,
last_source_used) -> None:
# We will add more parameters to this test as the source types increase
expected_output = {
'error': {
'meta_data': {
'monitor_name': self.test_monitor.monitor_name,
'node_name': self.test_monitor.node_config.node_name,
'last_source_used': eval(last_source_used),
'node_id': self.test_monitor.node_config.node_id,
'node_parent_id': self.test_monitor.node_config.parent_id,
'time': datetime(2012, 1, 1).timestamp()
},
'message': self.test_exception.message,
'code': self.test_exception.code,
}
}
actual_output = self.test_monitor._process_error(self.test_exception,
eval(last_source_used))
self.assertEqual(actual_output, expected_output)
@parameterized.expand([
("self.processed_prometheus_data_example",
"self.retrieved_prometheus_data_example"),
("self.processed_prometheus_data_example_optionals_none",
"self.retrieved_prometheus_data_example_optionals_none"),
])
@freeze_time("2012-01-01")
def test_process_retrieved_prometheus_data_returns_expected_data(
self, expected_data_output, retrieved_data) -> None:
expected_output = {
'result': {
'meta_data': {
'monitor_name': self.test_monitor.monitor_name,
'node_name': self.test_monitor.node_config.node_name,
'last_source_used':
self.test_monitor.last_prometheus_source_used,
'node_id': self.test_monitor.node_config.node_id,
'node_parent_id': self.test_monitor.node_config.parent_id,
'time': datetime(2012, 1, 1).timestamp()
},
'data': eval(expected_data_output),
}
}
actual_output = self.test_monitor._process_retrieved_prometheus_data(
eval(retrieved_data))
self.assertEqual(expected_output, actual_output)
def test_process_retrieved_data_returns_the_correct_dict(self) -> None:
def test_fn(x: Dict): return x
actual_ret = self.test_monitor._process_retrieved_data(
test_fn, self.test_data_dict)
expected_ret = test_fn(self.test_data_dict)
self.assertEqual(expected_ret, actual_ret)
def test_send_data_sends_data_correctly(self) -> None:
# This test creates a queue which receives messages with the same
# routing key as the ones sent by send_data, and checks that the
# data is received
self.test_monitor._initialise_rabbitmq()
# Delete the queue before to avoid messages in the queue on error.
self.test_monitor.rabbitmq.queue_delete(self.test_queue_name)
res = self.test_monitor.rabbitmq.queue_declare(
queue=self.test_queue_name, durable=True, exclusive=False,
auto_delete=False, passive=False
)
self.assertEqual(0, res.method.message_count)
self.test_monitor.rabbitmq.queue_bind(
queue=self.test_queue_name, exchange=RAW_DATA_EXCHANGE,
routing_key=CHAINLINK_NODE_RAW_DATA_ROUTING_KEY)
self.test_monitor._send_data(self.processed_prometheus_data_example)
# By re-declaring the queue again we can get the number of messages
# in the queue.
res = self.test_monitor.rabbitmq.queue_declare(
queue=self.test_queue_name, durable=True, exclusive=False,
auto_delete=False, passive=True
)
self.assertEqual(1, res.method.message_count)
# Check that the message received is actually the processed data
_, _, body = self.test_monitor.rabbitmq.basic_get(self.test_queue_name)
self.assertEqual(self.processed_prometheus_data_example,
json.loads(body))
@freeze_time("2012-01-01")
@mock.patch.object(ChainlinkNodeMonitor, "_get_data")
def test_monitor_sends_data_and_hb_if_data_retrieve_and_processing_success(
self, mock_get_data) -> None:
# Here we are assuming that all sources are enabled.
expected_output_data = {
'prometheus': {
'result': {
'meta_data': {
'monitor_name': self.test_monitor.monitor_name,
'node_name': self.test_monitor.node_config.node_name,
'last_source_used':
self.test_monitor.last_prometheus_source_used,
'node_id': self.test_monitor.node_config.node_id,
'node_parent_id':
self.test_monitor.node_config.parent_id,
'time': datetime(2012, 1, 1).timestamp()
},
'data': self.processed_prometheus_data_example,
}
}
}
expected_output_hb = {
'component_name': self.test_monitor.monitor_name,
'is_alive': True,
'timestamp': datetime(2012, 1, 1).timestamp()
}
mock_get_data.return_value = \
self.received_retrieval_info_all_source_types_enabled
self.test_monitor._initialise_rabbitmq()
# Delete the queue before to avoid messages in the queue on error.
self.test_monitor.rabbitmq.queue_delete(self.test_queue_name)
res = self.test_monitor.rabbitmq.queue_declare(
queue=self.test_queue_name, durable=True, exclusive=False,
auto_delete=False, passive=False
)
self.assertEqual(0, res.method.message_count)
self.test_monitor.rabbitmq.queue_bind(
queue=self.test_queue_name, exchange=RAW_DATA_EXCHANGE,
routing_key=CHAINLINK_NODE_RAW_DATA_ROUTING_KEY)
self.test_monitor.rabbitmq.queue_bind(
queue=self.test_queue_name, exchange=HEALTH_CHECK_EXCHANGE,
routing_key=HEARTBEAT_OUTPUT_WORKER_ROUTING_KEY)
self.test_monitor._monitor()
# By re-declaring the queue again we can get the number of messages
# in the queue.
res = self.test_monitor.rabbitmq.queue_declare(
queue=self.test_queue_name, durable=True, exclusive=False,
auto_delete=False, passive=True
)
# There must be 2 messages in the queue, the heartbeat and the
# processed data
self.assertEqual(2, res.method.message_count)
# Check that the message received is actually the processed data
_, _, body = self.test_monitor.rabbitmq.basic_get(
self.test_queue_name)
self.assertEqual(expected_output_data, json.loads(body))
# Check that the message received is actually the HB
_, _, body = self.test_monitor.rabbitmq.basic_get(
self.test_queue_name)
self.assertEqual(expected_output_hb, json.loads(body))
@parameterized.expand([
(False, ['prometheus'],)
])
@freeze_time("2012-01-01")
@mock.patch.object(ChainlinkNodeMonitor, "_get_data")
def test_monitor_sends_empty_dict_for_disabled_source(
self, monitor_prometheus, disabled_sources, mock_get_data) -> None:
# Once more sources are added this test will make more sense.
self.test_monitor.node_config._monitor_prometheus = monitor_prometheus
expected_output_data = {
'prometheus': {
'result': {
'meta_data': {
'monitor_name': self.test_monitor.monitor_name,
'node_name': self.test_monitor.node_config.node_name,
'last_source_used':
self.test_monitor.last_prometheus_source_used,
'node_id': self.test_monitor.node_config.node_id,
'node_parent_id':
self.test_monitor.node_config.parent_id,
'time': datetime(2012, 1, 1).timestamp()
},
'data': self.processed_prometheus_data_example,
}
}
}
for disabled_source in disabled_sources:
expected_output_data[disabled_source] = {}
expected_output_hb = {
'component_name': self.test_monitor.monitor_name,
'is_alive': True,
'timestamp': datetime(2012, 1, 1).timestamp()
}
# We can get all data since that won't effect how _monitor() works
mock_get_data.return_value = \
self.received_retrieval_info_all_source_types_enabled
self.test_monitor._initialise_rabbitmq()
# Delete the queue before to avoid messages in the queue on error.
self.test_monitor.rabbitmq.queue_delete(self.test_queue_name)
res = self.test_monitor.rabbitmq.queue_declare(
queue=self.test_queue_name, durable=True, exclusive=False,
auto_delete=False, passive=False
)
self.assertEqual(0, res.method.message_count)
self.test_monitor.rabbitmq.queue_bind(
queue=self.test_queue_name, exchange=RAW_DATA_EXCHANGE,
routing_key=CHAINLINK_NODE_RAW_DATA_ROUTING_KEY)
self.test_monitor.rabbitmq.queue_bind(
queue=self.test_queue_name, exchange=HEALTH_CHECK_EXCHANGE,
routing_key=HEARTBEAT_OUTPUT_WORKER_ROUTING_KEY)
self.test_monitor._monitor()
# By re-declaring the queue again we can get the number of messages
# in the queue.
res = self.test_monitor.rabbitmq.queue_declare(
queue=self.test_queue_name, durable=True, exclusive=False,
auto_delete=False, passive=True
)
# There must be 2 messages in the queue, the heartbeat and the
# processed data
self.assertEqual(2, res.method.message_count)
# Check that the message received is actually the processed data
_, _, body = self.test_monitor.rabbitmq.basic_get(
self.test_queue_name)
self.assertEqual(expected_output_data, json.loads(body))
# Check that the message received is actually the HB
_, _, body = self.test_monitor.rabbitmq.basic_get(
self.test_queue_name)
self.assertEqual(expected_output_hb, json.loads(body))
@parameterized.expand([
(['self.test_exception'],)
])
@mock.patch.object(ChainlinkNodeMonitor, "_process_data")
@mock.patch.object(ChainlinkNodeMonitor, "_get_data")
def test_monitor_sends_no_data_and_hb_if_data_ret_success_and_proc_fails(
self, process_data_side_effect, mock_get_data,
mock_process_data) -> None:
# This test will be expanded further once more sources are added. We
# can eventually test for when example the first source is processed
# correctly but the second fails.
process_data_side_effect_eval = list(map(
eval, process_data_side_effect))
mock_process_data.side_effect = process_data_side_effect_eval
mock_get_data.return_value = \
self.received_retrieval_info_all_source_types_enabled
self.test_monitor._initialise_rabbitmq()
# Delete the queue before to avoid messages in the queue on error.
self.test_monitor.rabbitmq.queue_delete(self.test_queue_name)
res = self.test_monitor.rabbitmq.queue_declare(
queue=self.test_queue_name, durable=True, exclusive=False,
auto_delete=False, passive=False
)
self.assertEqual(0, res.method.message_count)
self.test_monitor.rabbitmq.queue_bind(
queue=self.test_queue_name, exchange=RAW_DATA_EXCHANGE,
routing_key=CHAINLINK_NODE_RAW_DATA_ROUTING_KEY)
self.test_monitor.rabbitmq.queue_bind(
queue=self.test_queue_name, exchange=HEALTH_CHECK_EXCHANGE,
routing_key=HEARTBEAT_OUTPUT_WORKER_ROUTING_KEY)
self.test_monitor._monitor()
# By re-declaring the queue again we can get the number of messages
# in the queue.
res = self.test_monitor.rabbitmq.queue_declare(
queue=self.test_queue_name, durable=True, exclusive=False,
auto_delete=False, passive=True
)
# There must be 0 messages in the queue.
self.assertEqual(0, res.method.message_count)
@mock.patch.object(ChainlinkNodeMonitor, "_get_data")
def test_monitor_sends_no_data_and_no_hb_on_get_data_unexpected_exception(
self, mock_get_data) -> None:
mock_get_data.side_effect = self.test_exception
self.test_monitor._initialise_rabbitmq()
# Delete the queue before to avoid messages in the queue on error.
self.test_monitor.rabbitmq.queue_delete(self.test_queue_name)
res = self.test_monitor.rabbitmq.queue_declare(
queue=self.test_queue_name, durable=True, exclusive=False,
auto_delete=False, passive=False
)
self.assertEqual(0, res.method.message_count)
self.test_monitor.rabbitmq.queue_bind(
queue=self.test_queue_name, exchange=RAW_DATA_EXCHANGE,
routing_key=CHAINLINK_NODE_RAW_DATA_ROUTING_KEY)
self.test_monitor.rabbitmq.queue_bind(
queue=self.test_queue_name, exchange=HEALTH_CHECK_EXCHANGE,
routing_key=HEARTBEAT_OUTPUT_WORKER_ROUTING_KEY)
self.assertRaises(PANICException, self.test_monitor._monitor)
# By re-declaring the queue again we can get the number of messages
# in the queue.
res = self.test_monitor.rabbitmq.queue_declare(
queue=self.test_queue_name, durable=True, exclusive=False,
auto_delete=False, passive=True
)
# There must be 0 messages in the queue.
self.assertEqual(0, res.method.message_count)
@mock.patch.object(ChainlinkNodeMonitor, "_get_data")
def test_monitor_raises_msg_not_delivered_exception_if_data_not_routed(
self, mock_get_data) -> None:
mock_get_data.return_value = \
self.received_retrieval_info_all_source_types_enabled
self.test_monitor._initialise_rabbitmq()
self.assertRaises(MessageWasNotDeliveredException,
self.test_monitor._monitor)
@parameterized.expand([
(AMQPConnectionError, AMQPConnectionError('test'),),
(AMQPChannelError, AMQPChannelError('test'),),
(InvalidUrlException, InvalidUrlException('test'),),
(DataReadingException, DataReadingException('test', 'test'),),
(Exception, Exception('test'),),
])
@mock.patch.object(ChainlinkNodeMonitor, "_send_data")
@mock.patch.object(ChainlinkNodeMonitor, "_get_data")
def test_monitor_raises_error_if_raised_by_send_data(
self, exception_class, exception_instance, mock_get_data,
mock_send_data) -> None:
mock_get_data.return_value = \
self.received_retrieval_info_all_source_types_enabled
mock_send_data.side_effect = exception_instance
self.test_monitor._initialise_rabbitmq()
self.assertRaises(exception_class, self.test_monitor._monitor)
@parameterized.expand([
(AMQPConnectionError, AMQPConnectionError('test'),),
(AMQPChannelError, AMQPChannelError('test'),),
(MessageWasNotDeliveredException,
MessageWasNotDeliveredException('test'),),
(Exception, Exception('test'),),
])
@mock.patch.object(ChainlinkNodeMonitor, "_send_data")
@mock.patch.object(ChainlinkNodeMonitor, "_get_data")
def test_monitor_does_not_send_hb_and_data_if_send_data_fails(
self, exception_class, exception_instance, mock_get_data,
mock_send_data) -> None:
mock_get_data.return_value = \
self.received_retrieval_info_all_source_types_enabled
mock_send_data.side_effect = exception_instance
self.test_monitor._initialise_rabbitmq()
self.test_monitor.rabbitmq.queue_delete(
self.test_queue_name)
res = self.test_monitor.rabbitmq.queue_declare(
queue=self.test_queue_name, durable=True, exclusive=False,
auto_delete=False, passive=False
)
self.assertEqual(0, res.method.message_count)
self.test_monitor.rabbitmq.queue_bind(
queue=self.test_queue_name,
exchange=HEALTH_CHECK_EXCHANGE,
routing_key=HEARTBEAT_OUTPUT_WORKER_ROUTING_KEY)
self.test_monitor.rabbitmq.queue_bind(
queue=self.test_queue_name, exchange=RAW_DATA_EXCHANGE,
routing_key=CHAINLINK_NODE_RAW_DATA_ROUTING_KEY)
try:
self.test_monitor._monitor()
except exception_class:
pass
# By re-declaring the queue again we can get the number of
# messages in the queue.
res = self.test_monitor.rabbitmq.queue_declare(
queue=self.test_queue_name, durable=True,
exclusive=False, auto_delete=False, passive=True
)
# There must be no messages in the queue.
self.assertEqual(0, res.method.message_count)
@freeze_time("2012-01-01")
@mock.patch.object(ChainlinkNodeMonitor, "_get_data")
def test_monitor_raises_msg_not_del_except_if_hb_not_routed_and_sends_data(
self, mock_get_data) -> None:
mock_get_data.return_value = \
self.received_retrieval_info_all_source_types_enabled
expected_output_data = {
'prometheus': {
'result': {
'meta_data': {
'monitor_name': self.test_monitor.monitor_name,
'node_name': self.test_monitor.node_config.node_name,
'last_source_used':
self.test_monitor.last_prometheus_source_used,
'node_id': self.test_monitor.node_config.node_id,
'node_parent_id':
self.test_monitor.node_config.parent_id,
'time': datetime(2012, 1, 1).timestamp()
},
'data': self.processed_prometheus_data_example,
}
}
}
self.test_monitor._initialise_rabbitmq()
self.test_monitor.rabbitmq.queue_delete(self.test_queue_name)
res = self.test_monitor.rabbitmq.queue_declare(
queue=self.test_queue_name, durable=True, exclusive=False,
auto_delete=False, passive=False
)
self.assertEqual(0, res.method.message_count)
self.test_monitor.rabbitmq.queue_bind(
queue=self.test_queue_name, exchange=RAW_DATA_EXCHANGE,
routing_key=CHAINLINK_NODE_RAW_DATA_ROUTING_KEY)
self.assertRaises(MessageWasNotDeliveredException,
self.test_monitor._monitor)
# By re-declaring the queue again we can get the number of
# messages in the queue.
res = self.test_monitor.rabbitmq.queue_declare(
queue=self.test_queue_name, durable=True, exclusive=False,
auto_delete=False, passive=True
)
# There must be 1 message in the queue, the processed data
self.assertEqual(1, res.method.message_count)
# Check that the message received is actually the processed data
_, _, body = self.test_monitor.rabbitmq.basic_get(
self.test_queue_name)
self.assertEqual(expected_output_data, json.loads(body))
@parameterized.expand([
(AMQPConnectionError, AMQPConnectionError('test'),),
(AMQPChannelError, AMQPChannelError('test'),),
(Exception, Exception('test'),),
])
@freeze_time("2012-01-01")
@mock.patch.object(ChainlinkNodeMonitor, "_send_heartbeat")
@mock.patch.object(ChainlinkNodeMonitor, "_get_data")
def test_monitor_raises_error_if_raised_by_send_hb_and_sends_data(
self, exception_class, exception_instance, mock_get_data,
mock_send_hb) -> None:
mock_get_data.return_value = \
self.received_retrieval_info_all_source_types_enabled
expected_output_data = {
'prometheus': {
'result': {
'meta_data': {
'monitor_name': self.test_monitor.monitor_name,
'node_name': self.test_monitor.node_config.node_name,
'last_source_used':
self.test_monitor.last_prometheus_source_used,
'node_id': self.test_monitor.node_config.node_id,
'node_parent_id':
self.test_monitor.node_config.parent_id,
'time': datetime(2012, 1, 1).timestamp()
},
'data': self.processed_prometheus_data_example,
}
}
}
mock_send_hb.side_effect = exception_instance
self.test_monitor._initialise_rabbitmq()
self.test_monitor.rabbitmq.queue_delete(self.test_queue_name)
res = self.test_monitor.rabbitmq.queue_declare(
queue=self.test_queue_name, durable=True, exclusive=False,
auto_delete=False, passive=False
)
self.assertEqual(0, res.method.message_count)
self.test_monitor.rabbitmq.queue_bind(
queue=self.test_queue_name, exchange=HEALTH_CHECK_EXCHANGE,
routing_key=HEARTBEAT_OUTPUT_WORKER_ROUTING_KEY)
self.test_monitor.rabbitmq.queue_bind(
queue=self.test_queue_name, exchange=RAW_DATA_EXCHANGE,
routing_key=CHAINLINK_NODE_RAW_DATA_ROUTING_KEY)
self.assertRaises(exception_class, self.test_monitor._monitor)
# By re-declaring the queue again we can get the number of
# messages in the queue.
res = self.test_monitor.rabbitmq.queue_declare(
queue=self.test_queue_name, durable=True, exclusive=False,
auto_delete=False, passive=True
)
# There must be 1 message in the queue, the processed data
self.assertEqual(1, res.method.message_count)
# Check that the message received is actually the processed data
_, _, body = self.test_monitor.rabbitmq.basic_get(
self.test_queue_name)
self.assertEqual(expected_output_data, json.loads(body))
@mock.patch.object(logging.Logger, "info")
@mock.patch.object(ChainlinkNodeMonitor, "_send_heartbeat")
@mock.patch.object(ChainlinkNodeMonitor, "_send_data")
@mock.patch.object(ChainlinkNodeMonitor, "_get_data")
def test_monitor_logs_data_if_all_sources_enabled_and_no_retrieval_error(
self, mock_get_data, mock_send_data, mock_send_hb,
mock_log) -> None:
mock_send_data.return_value = None
mock_send_hb.return_value = None
mock_get_data.return_value = \
self.received_retrieval_info_all_source_types_enabled
self.test_monitor._monitor()
mock_log.assert_called_with(self.test_monitor._display_data(
self.processed_prometheus_data_example))
@mock.patch.object(logging.Logger, "info")
@mock.patch.object(ChainlinkNodeMonitor, "_send_heartbeat")
@mock.patch.object(ChainlinkNodeMonitor, "_send_data")
@mock.patch.object(ChainlinkNodeMonitor, "_get_data")
def test_monitor_does_not_log_if_no_retrieval_performed(
self, mock_get_data, mock_send_data, mock_send_hb,
mock_log) -> None:
# This needs to be updated as we increase the number of sources
mock_send_data.return_value = None
mock_send_hb.return_value = None
mock_get_data.return_value = \
self.received_retrieval_info_all_source_types_enabled
self.test_monitor.node_config._monitor_prometheus = False
processed_data = dict(ChainMap(
*[self.processed_prometheus_data_example]))
self.test_monitor._monitor()
assert_not_called_with(mock_log,
self.test_monitor._display_data(processed_data))
@mock.patch.object(logging.Logger, "info")
@mock.patch.object(ChainlinkNodeMonitor, "_send_heartbeat")
@mock.patch.object(ChainlinkNodeMonitor, "_send_data")
@mock.patch.object(ChainlinkNodeMonitor, "_get_data")
def test_monitor_does_not_log_if_retrieval_error(
self, mock_get_data, mock_send_data, mock_send_hb,
mock_log) -> None:
# This needs to be updated as we increase the number of sources
mock_send_data.return_value = None
mock_send_hb.return_value = None
mock_get_data.return_value = \
self.received_retrieval_info_all_source_types_enabled_err
processed_data = {}
self.test_monitor._monitor()
assert_not_called_with(mock_log,
self.test_monitor._display_data(processed_data))
# TODO: When more sources are added we need to test for when some sources
# : are enabled and some disabled
| 47.805674
| 80
| 0.658161
| 7,391
| 67,406
| 5.584224
| 0.058991
| 0.051947
| 0.070143
| 0.042522
| 0.833523
| 0.800863
| 0.779638
| 0.759407
| 0.73055
| 0.717006
| 0
| 0.012942
| 0.26063
| 67,406
| 1,409
| 81
| 47.839603
| 0.815201
| 0.080112
| 0
| 0.670951
| 0
| 0
| 0.151902
| 0.102555
| 0
| 0
| 0
| 0.001419
| 0.068552
| 1
| 0.03856
| false
| 0.018852
| 0.020566
| 0.000857
| 0.059983
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
f2989a63525352cb9639a86eb6d55e031e5791e9
| 2,935
|
py
|
Python
|
tests/test_projects.py
|
geraxe/dolib
|
2728db044a65b0bba15e7bfbc633d24a21b955d0
|
[
"MIT"
] | 5
|
2020-05-30T05:20:06.000Z
|
2021-05-21T21:42:34.000Z
|
tests/test_projects.py
|
geraxe/dolib
|
2728db044a65b0bba15e7bfbc633d24a21b955d0
|
[
"MIT"
] | 17
|
2020-05-30T08:17:10.000Z
|
2021-06-20T13:26:37.000Z
|
tests/test_projects.py
|
geraxe/dolib
|
2728db044a65b0bba15e7bfbc633d24a21b955d0
|
[
"MIT"
] | 3
|
2020-05-30T05:28:08.000Z
|
2021-04-10T17:07:02.000Z
|
import pytest
from dolib.client import AsyncClient, Client
from dolib.models import Project
@pytest.mark.vcr
@pytest.mark.block_network()
def test_crud_projects(client: Client) -> None:
project = Project(
name="dolib-test",
description="Python library for digital ocean API",
purpose="For test purposes",
environment="Development",
)
# create project
created_project = client.projects.create(project)
assert isinstance(created_project, Project)
assert created_project.id is not None
# list projects
projects = client.projects.all()
assert len(projects) > 0
# read project
read_project = client.projects.get(str(projects[0].id))
assert read_project.id == projects[0].id
assert isinstance(read_project, Project)
# update project
read_project.is_default = False
read_project.name = "dolib-test-renamed"
updated_project = client.projects.update(read_project)
assert isinstance(updated_project, Project)
assert read_project.name == updated_project.name
# assign resource
volume = client.volumes.all()[-1]
client.projects.assign_resources(
str(read_project.id),
[Project.Resource(urn=f"do:volume:{volume.id}")],
)
# list resources
resources = client.projects.resources(str(read_project.id))
assert len(resources) > 0
# delete project
client.projects.delete(project=read_project)
@pytest.mark.vcr
@pytest.mark.block_network()
@pytest.mark.asyncio
async def test_async_crud_projects(async_client: AsyncClient) -> None:
project = Project(
name="dolib-test",
description="Python library for digital ocean API",
purpose="For test purposes",
environment="Development",
)
# create project
created_project = await async_client.projects.create(project)
assert isinstance(created_project, Project)
assert created_project.id is not None
# list projects
projects = await async_client.projects.all()
assert len(projects) > 0
# read project
read_project = await async_client.projects.get(str(projects[0].id))
assert read_project.id == projects[0].id
assert isinstance(read_project, Project)
# update project
read_project.is_default = False
read_project.name = "dolib-test-renamed"
updated_project = await async_client.projects.update(read_project)
assert isinstance(updated_project, Project)
assert read_project.name == updated_project.name
# assign resource
volume = (await async_client.volumes.all())[-1]
await async_client.projects.assign_resources(
str(read_project.id),
[Project.Resource(urn=f"do:volume:{volume.id}")],
)
# list resources
resources = await async_client.projects.resources(str(read_project.id))
assert len(resources) > 0
# delete project
await async_client.projects.delete(project=read_project)
| 29.94898
| 75
| 0.707666
| 359
| 2,935
| 5.640669
| 0.169916
| 0.119506
| 0.06321
| 0.082963
| 0.879506
| 0.859753
| 0.825679
| 0.825679
| 0.784198
| 0.784198
| 0
| 0.004218
| 0.192164
| 2,935
| 97
| 76
| 30.257732
| 0.849852
| 0.069847
| 0
| 0.59375
| 0
| 0
| 0.083241
| 0.01547
| 0
| 0
| 0
| 0
| 0.25
| 1
| 0.015625
| false
| 0
| 0.046875
| 0
| 0.0625
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
f2bf6f5f6f1b38f6e7f456740dadb32b5459f9e4
| 144
|
py
|
Python
|
ionyweb/plugin_app/plugin_text/admin.py
|
makinacorpus/ionyweb
|
2f18e3dc1fdc86c7e19bae3778e67e28a37567be
|
[
"BSD-3-Clause"
] | 4
|
2015-09-28T10:07:39.000Z
|
2019-10-18T20:14:07.000Z
|
ionyweb/plugin_app/plugin_text/admin.py
|
makinacorpus/ionyweb
|
2f18e3dc1fdc86c7e19bae3778e67e28a37567be
|
[
"BSD-3-Clause"
] | 1
|
2021-03-19T21:41:33.000Z
|
2021-03-19T21:41:33.000Z
|
ionyweb/plugin_app/plugin_text/admin.py
|
makinacorpus/ionyweb
|
2f18e3dc1fdc86c7e19bae3778e67e28a37567be
|
[
"BSD-3-Clause"
] | 1
|
2017-10-12T09:25:19.000Z
|
2017-10-12T09:25:19.000Z
|
# -*- coding: utf-8 -*-
from django.contrib import admin
from ionyweb.plugin_app.plugin_text.models import *
admin.site.register(Plugin_Text)
| 20.571429
| 51
| 0.763889
| 21
| 144
| 5.095238
| 0.714286
| 0.205607
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007813
| 0.111111
| 144
| 6
| 52
| 24
| 0.828125
| 0.145833
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4b4d6f551efd56925bdd292d5e3dd4563b2f53fb
| 246
|
py
|
Python
|
app/models/__init__.py
|
cmcunningham27/python-newsfeed
|
3eef42d693a41807d36946af47ff7e6442185c5c
|
[
"MIT"
] | null | null | null |
app/models/__init__.py
|
cmcunningham27/python-newsfeed
|
3eef42d693a41807d36946af47ff7e6442185c5c
|
[
"MIT"
] | null | null | null |
app/models/__init__.py
|
cmcunningham27/python-newsfeed
|
3eef42d693a41807d36946af47ff7e6442185c5c
|
[
"MIT"
] | null | null | null |
# syncs User model with the database
from .User import User
# syncs Post model with the database
from .Post import Post
# sync Comment model with the database
from .Comment import Comment
# sync Vote model with the database
from .Vote import Vote
| 30.75
| 38
| 0.792683
| 40
| 246
| 4.875
| 0.3
| 0.184615
| 0.246154
| 0.410256
| 0.492308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.174797
| 246
| 8
| 39
| 30.75
| 0.960591
| 0.569106
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4b5a6b62ebb8551e0e1bd3c5bbdc61fe31bac4f5
| 57
|
py
|
Python
|
ahvl/options/set/__init__.py
|
gardar/ahvl
|
045b5882d94fc2d4ba7b194bf65ebfbf9d2e1d6d
|
[
"MIT"
] | 4
|
2019-10-12T12:11:23.000Z
|
2021-12-20T13:53:28.000Z
|
ahvl/options/set/__init__.py
|
gardar/ahvl
|
045b5882d94fc2d4ba7b194bf65ebfbf9d2e1d6d
|
[
"MIT"
] | 2
|
2021-02-05T12:52:55.000Z
|
2022-02-11T10:58:52.000Z
|
ahvl/options/set/__init__.py
|
gardar/ahvl
|
045b5882d94fc2d4ba7b194bf65ebfbf9d2e1d6d
|
[
"MIT"
] | 1
|
2020-08-13T07:52:27.000Z
|
2020-08-13T07:52:27.000Z
|
from ahvl.options.set.password import OptionsSetPassword
| 28.5
| 56
| 0.877193
| 7
| 57
| 7.142857
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.070175
| 57
| 1
| 57
| 57
| 0.943396
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 1
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
4bc25cf81cee74db5d88de84d3613e690fbdf2a2
| 40
|
py
|
Python
|
rdm/wrappers/wordification/__init__.py
|
Alshak/rdm
|
0c969665a4a3c8e6258c3d603de8987bd9639fd1
|
[
"MIT"
] | null | null | null |
rdm/wrappers/wordification/__init__.py
|
Alshak/rdm
|
0c969665a4a3c8e6258c3d603de8987bd9639fd1
|
[
"MIT"
] | null | null | null |
rdm/wrappers/wordification/__init__.py
|
Alshak/rdm
|
0c969665a4a3c8e6258c3d603de8987bd9639fd1
|
[
"MIT"
] | 1
|
2020-02-29T17:40:32.000Z
|
2020-02-29T17:40:32.000Z
|
from wordification import Wordification
| 20
| 39
| 0.9
| 4
| 40
| 9
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 40
| 1
| 40
| 40
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4bc6239870be59dbed26ba81b2a0f52c2c74b8d6
| 4,824
|
py
|
Python
|
bot.py
|
AviationTools/Weather-Bot
|
44f946cef7d15428e2c4da8219ad609c56292727
|
[
"MIT"
] | null | null | null |
bot.py
|
AviationTools/Weather-Bot
|
44f946cef7d15428e2c4da8219ad609c56292727
|
[
"MIT"
] | null | null | null |
bot.py
|
AviationTools/Weather-Bot
|
44f946cef7d15428e2c4da8219ad609c56292727
|
[
"MIT"
] | null | null | null |
import os
import discord
import requests
import datetime
from PIL import Image
from dotenv import load_dotenv
load_dotenv()
TOKEN = os.getenv("DISCORD_TOKEN")
client = discord.Client()
@client.event
async def on_message(message):
if message.author == client.user:
return
if message.content == "!weather":
print("handling !weather")
utc = datetime.datetime.utcnow()
plain_date = utc.strftime("%y%m%d")
link_date = utc.strftime("%Y/%m/%d")
times = ["0000", "0600", "1200", "1800"]
last_url = None
async with message.channel.typing():
for time in times:
response = requests.get(f"https://www.zamg.ac.at/fix/wetter/bodenkarte/{link_date}/BK_BodAna_Sat_{plain_date}{time}.png")
if response.status_code == 200:
last_url = f"https://www.zamg.ac.at/fix/wetter/bodenkarte/{link_date}/BK_BodAna_Sat_{plain_date}{time}.png"
elif response.status_code == 404:
print("not found weather map for ", time)
break
if last_url:
print("found weather map ", last_url)
await message.channel.send(last_url)
else:
await message.channel.send("Could not fetch last weather map.")
if message.content == "!satellite":
images = []
utc = datetime.datetime.utcnow()
plain_date = utc.strftime("%y%m%d")
print(plain_date)
times = ["0000", "0030", "0100", "0130", "0200", "0230", "0300", "0330", "0400", "0430", "0500", "0530", "0600", "0630", "0700", "0730", "0800", "0830", "0900", "0930", "1000", "1030", "1100", "1130", "1200", "1200", "1230", "1300", "1330", "1400", "1430", "1500", "1530", "1600", "1630", "01700", "1730", "1800", "1830", "1900", "1930", "2000", "2030", "2100", "2130", "2200", "2230", "2300", "2330", "2400"]
async with message.channel.typing():
for time in times:
response = requests.get(f"https://www.zamg.ac.at/dyn/pictures/Hsatimg/H{plain_date}{time}.gif", stream=True)
if response.status_code == 200:
img = Image.open(response.raw)
images.append(img)
elif response.status_code == 404:
print("not found weather map for ", time)
images[0].save('satellite.gif', save_all=True, append_images=images[1:], loop=0, duration=200)
await message.channel.send(file=discord.File('satellite.gif'))
os.remove("satellite.gif")
if message.content == "!world":
images = []
utc = datetime.datetime.utcnow()
plain_date = utc.strftime("%Y%m%d")
print(plain_date)
times = ["0000", "0030", "0100", "0130", "0200", "0230", "0300", "0330", "0400", "0430", "0500", "0530", "0600", "0630", "0700", "0730", "0800", "0830", "0900", "0930", "1000", "1030", "1100", "1130", "1200", "1200", "1230", "1300", "1330", "1400", "1430", "1500", "1530", "1600", "1630", "01700", "1730", "1800", "1830", "1900", "1930", "2000", "2030", "2100", "2130", "2200", "2230", "2300", "2330", "2400"]
async with message.channel.typing():
for time in times:
response = requests.get(f"https://www.zamg.ac.at/zamgWeb/wetter/weltsatbilder/worldsatimg/WCM/SAT_WCM_{plain_date}{time}.gif", stream=True)
if response.status_code == 200:
img = Image.open(response.raw)
images.append(img)
elif response.status_code == 404:
print("not found weather map for ", time)
images[0].save('world.gif', save_all=True, append_images=images[1:], loop=0, duration=200)
await message.channel.send(file=discord.File('world.gif'))
os.remove("world.gif")
if message.content == "!sigwx":
print("handling !weather")
utc = datetime.datetime.utcnow()
plain_date = utc.strftime("%y%m%d")
link_date = utc.strftime("%Y/%m/%d")
times = ["0000", "0600", "1200", "1800"]
last_url = None
async with message.channel.typing():
for time in times:
response = requests.get(f"http://brunnur.vedur.is/flugkort/PGDE14_EGRR_{time}.PNG")
if response.status_code == 200:
last_url = f"http://brunnur.vedur.is/flugkort/PGDE14_EGRR_{time}.PNG"
elif response.status_code == 404:
print("not found weather map for ", time)
break
if last_url:
print("found weather map ", last_url)
await message.channel.send(last_url)
else:
await message.channel.send("Could not fetch last weather map.")
print("starting weather bot")
client.run(TOKEN)
| 40.2
| 417
| 0.566128
| 592
| 4,824
| 4.525338
| 0.27027
| 0.033595
| 0.053751
| 0.035834
| 0.822695
| 0.822695
| 0.822695
| 0.822695
| 0.822695
| 0.822695
| 0
| 0.133823
| 0.265755
| 4,824
| 119
| 418
| 40.537815
| 0.62253
| 0
| 0
| 0.640449
| 0
| 0.044944
| 0.270315
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.067416
| 0
| 0.078652
| 0.123596
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
29d22f41a246483051d3caf02752368e644d0615
| 174
|
py
|
Python
|
hello.py
|
vanfalen/simple-pi-oled
|
b3534c795f7cda4fb67a6b844c311eedf3ba8525
|
[
"Apache-2.0"
] | null | null | null |
hello.py
|
vanfalen/simple-pi-oled
|
b3534c795f7cda4fb67a6b844c311eedf3ba8525
|
[
"Apache-2.0"
] | null | null | null |
hello.py
|
vanfalen/simple-pi-oled
|
b3534c795f7cda4fb67a6b844c311eedf3ba8525
|
[
"Apache-2.0"
] | null | null | null |
from flask import Flask
from flask import request
app = Flask(__name__)
@app.route("/terminal",methods=['POST'])
def hello():
return "Hello, %s"%(request.form["user"])
| 19.333333
| 45
| 0.689655
| 24
| 174
| 4.833333
| 0.666667
| 0.155172
| 0.258621
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.132184
| 174
| 8
| 46
| 21.75
| 0.768212
| 0
| 0
| 0
| 0
| 0
| 0.149425
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.333333
| 0.166667
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
4b066366a9e2c33bf2584337f289d21864090178
| 117
|
py
|
Python
|
rest_api/controller/__init__.py
|
ArzelaAscoIi/haystack
|
be8f50c9e3de4e264b3f345f5f4b9c9ec518ed08
|
[
"Apache-2.0"
] | 4,544
|
2019-11-14T11:57:49.000Z
|
2022-03-31T17:41:18.000Z
|
rest_api/controller/__init__.py
|
ArzelaAscoIi/haystack
|
be8f50c9e3de4e264b3f345f5f4b9c9ec518ed08
|
[
"Apache-2.0"
] | 1,679
|
2020-01-14T15:55:58.000Z
|
2022-03-31T20:55:25.000Z
|
rest_api/controller/__init__.py
|
ArzelaAscoIi/haystack
|
be8f50c9e3de4e264b3f345f5f4b9c9ec518ed08
|
[
"Apache-2.0"
] | 820
|
2019-11-27T13:01:42.000Z
|
2022-03-31T12:54:34.000Z
|
from rest_api.pipeline import custom_component # this import is required for the Custom Components to be registered
| 58.5
| 116
| 0.837607
| 18
| 117
| 5.333333
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.145299
| 117
| 1
| 117
| 117
| 0.96
| 0.564103
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d9a86d35609ccc5e41e5471a6be676f5d69ff793
| 5,262
|
py
|
Python
|
Keras/1-Flowers/PhotoHandler.py
|
SSRMori/Tensorflow-Selflearning
|
931a015c81b955ab68a7042490a728db1cd29ff2
|
[
"MIT"
] | 1
|
2019-07-06T02:11:00.000Z
|
2019-07-06T02:11:00.000Z
|
Keras/1-Flowers/PhotoHandler.py
|
SSRMori/Tensorflow-Selflearning
|
931a015c81b955ab68a7042490a728db1cd29ff2
|
[
"MIT"
] | null | null | null |
Keras/1-Flowers/PhotoHandler.py
|
SSRMori/Tensorflow-Selflearning
|
931a015c81b955ab68a7042490a728db1cd29ff2
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
import tensorflow as tf
import numpy as np
import os
import math
from PIL import Image
data_top_path = "./flowers/"
flower_labels = os.listdir(data_top_path)
label_size = len(flower_labels)
jpg_width = 100
jpg_height = 100
def read_jpg_files(file_path):
origin_jpg_file = Image.open(file_path)
resized_jpg_file = origin_jpg_file.resize((jpg_width, jpg_height))
return resized_jpg_file
def read_files_from_fold(fold_path):
file_list = []
jpg_name_list = []
for file_name in os.listdir(fold_path):
if file_name[-4:] != ".jpg":
continue
jpg_file_name = str(fold_path + "/" + file_name)
jpg_name_list.append(jpg_file_name)
file_list.append(read_jpg_files(jpg_file_name))
return (jpg_name_list, file_list)
def get_feature_vector(image):
ans_list = []
for i in range(0, jpg_width):
for j in range(0, jpg_height):
temp_rgba_info = image.getpixel((i, j))
for k in range(0, len(temp_rgba_info)):
ans_list.append(temp_rgba_info[k])
return np.array(ans_list)
def read_one_class(class_label_number):
class_name = flower_labels[class_label_number]
ans_matrix_list = []
image_file_name_list, image_file_list = read_files_from_fold(str(data_top_path + class_name))
for i in range(0, len(image_file_name_list)):
ans_matrix_list.append(get_feature_vector(image_file_list[i]))
print(str(flower_labels[class_label_number] + " loaded"))
np_ans_matrix_list = np.array(ans_matrix_list)
np_labels = np.ones((np_ans_matrix_list.shape[0], 1)) * class_label_number
return (np_ans_matrix_list, np_labels)
def load_data():
test_list = []
label_list = []
for i in range(0, len(flower_labels)):
temp_test, temp_label = read_one_class(i)
for j in range(0, len(temp_test)):
test_list.append(temp_test[j])
label_list.append(temp_label[j])
return (np.array(test_list), np.array(label_list))
def shuffle(test_list, label_list):
state = np.random.get_state()
np.random.shuffle(test_list)
np.random.set_state(state)
np.random.shuffle(label_list)
return (test_list, label_list)
def get_training_set_and_test_set():
origin_data_set, origin_label_set = load_data()
data_set, label_set = shuffle(origin_data_set, origin_label_set)
divide_line = math.floor(4 * data_set.shape[0] / 5)
x_train = data_set[:divide_line]
y_train = label_set[:divide_line]
x_test = data_set[divide_line:]
y_test = label_set[divide_line:]
return ((x_train, y_train), (x_test, y_test))
CNN_jpg_width = 100
CNN_jpg_height = 100
# def read_CNN_jpg_files(file_path):
# origin_jpg_file = Image.open(file_path)
# resized_jpg_file = origin_jpg_file.resize((CNN_jpg_width, CNN_jpg_height))
# return resized_jpg_file
def read_CNN_jpg_files(file_path):
origin_jpg_file = Image.open(file_path)
# resized_jpg_file = origin_jpg_file.resize((CNN_jpg_width, CNN_jpg_height))
return origin_jpg_file# resized_jpg_file
def read_CNN_files_from_fold(fold_path):
file_list = []
jpg_name_list = []
for file_name in os.listdir(fold_path):
if file_name[-4:] != ".jpg":
continue
jpg_file_name = str(fold_path + "/" + file_name)
tempFile = read_CNN_jpg_files(jpg_file_name)
if tempFile.shape[0] > 100 and tempFile.shape[1] > 100:
jpg_name_list.append(jpg_file_name)
file_list.append(tempFile)
return (jpg_name_list, file_list)
def get_CNN_feature_vector(image):
np_ans = np.zeros((CNN_jpg_width, CNN_jpg_height, 3))
for i in range(0, CNN_jpg_width):
for j in range(0, CNN_jpg_height):
temp_rgba_info = image.getpixel((i, j))
for k in range(0, 3):
np_ans[i][j][k] = temp_rgba_info[k]
return np_ans
def read_CNN_one_class(class_label_number):
class_name = flower_labels[class_label_number]
ans_matrix_list = []
image_file_name_list, image_file_list = read_CNN_files_from_fold(str(data_top_path + class_name))
for i in range(0, len(image_file_name_list)):
ans_matrix_list.append(get_CNN_feature_vector(image_file_list[i]))
print(str(flower_labels[class_label_number] + " loaded"))
np_ans_matrix_list = np.array(ans_matrix_list)
np_labels = np.ones((np_ans_matrix_list.shape[0], 1)) * class_label_number
return (np_ans_matrix_list, np_labels)
def CNN_load_data():
test_list = []
label_list = []
for i in range(0, len(flower_labels)):
temp_test, temp_label = read_CNN_one_class(i)
for j in range(0, len(temp_test)):
test_list.append(temp_test[j])
label_list.append(temp_label[j])
return (np.array(test_list), np.array(label_list))
def get_CNN_training_set_and_test_set():
origin_data_set, origin_label_set = CNN_load_data()
data_set, label_set = shuffle(origin_data_set, origin_label_set)
divide_line = math.floor(4 * data_set.shape[0] / 5)
x_train = data_set[:divide_line]
y_train = label_set[:divide_line]
x_test = data_set[divide_line:]
y_test = label_set[divide_line:]
return ((x_train, y_train), (x_test, y_test))
| 36.289655
| 101
| 0.702585
| 843
| 5,262
| 3.956109
| 0.104389
| 0.03988
| 0.028786
| 0.023088
| 0.837481
| 0.801199
| 0.769715
| 0.757721
| 0.724138
| 0.724138
| 0
| 0.010834
| 0.193082
| 5,262
| 144
| 102
| 36.541667
| 0.774611
| 0.052642
| 0
| 0.516667
| 0
| 0
| 0.006831
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.108333
| false
| 0
| 0.05
| 0
| 0.266667
| 0.025
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d9d5b91a01311b857fef40f3ab59bffc826a0995
| 17,948
|
py
|
Python
|
polaris/polaris/tests/sep6/test_withdraw.py
|
lijamie98/django-polaris
|
5cdda7434281988deb761b34f574dfcaf7ae9f5d
|
[
"Apache-2.0"
] | null | null | null |
polaris/polaris/tests/sep6/test_withdraw.py
|
lijamie98/django-polaris
|
5cdda7434281988deb761b34f574dfcaf7ae9f5d
|
[
"Apache-2.0"
] | null | null | null |
polaris/polaris/tests/sep6/test_withdraw.py
|
lijamie98/django-polaris
|
5cdda7434281988deb761b34f574dfcaf7ae9f5d
|
[
"Apache-2.0"
] | null | null | null |
import pytest
import json
from typing import Dict
from unittest.mock import patch, Mock
from stellar_sdk.keypair import Keypair
from rest_framework.request import Request
from polaris.tests.conftest import USD_DISTRIBUTION_SEED
from polaris.tests.helpers import (
mock_check_auth_success,
mock_check_auth_success_client_domain,
)
from polaris.integrations import WithdrawalIntegration
from polaris.models import Transaction, Asset
from polaris.sep10.token import SEP10Token
WITHDRAW_PATH = "/sep6/withdraw"
class GoodWithdrawalIntegration(WithdrawalIntegration):
def process_sep6_request(
self,
token: SEP10Token,
request: Request,
params: Dict,
transaction: Transaction,
*args,
**kwargs
) -> Dict:
if params.get("type") == "bad type":
raise ValueError()
transaction.save()
return {"extra_info": {"test": "test"}}
@pytest.mark.django_db
@patch("polaris.sep6.withdraw.rwi", GoodWithdrawalIntegration())
@patch("polaris.sep10.utils.check_auth", mock_check_auth_success)
def test_good_withdrawal_integration(client, usd_asset_factory):
asset = Asset.objects.create(
code="USD",
issuer=Keypair.random().public_key,
sep6_enabled=True,
withdrawal_enabled=True,
withdrawal_min_amount=10,
withdrawal_max_amount=1000,
distribution_seed=Keypair.random().secret,
)
response = client.get(
WITHDRAW_PATH,
{
"asset_code": asset.code,
"type": "bank_account",
"dest": "test bank account number",
},
)
content = response.json()
assert response.status_code == 200
assert content.pop("memo")
assert content.pop("memo_type") == Transaction.MEMO_TYPES.hash
assert content == {
"id": str(Transaction.objects.first().id),
"account_id": asset.distribution_account,
"min_amount": round(asset.withdrawal_min_amount, asset.significant_decimals),
"max_amount": round(asset.withdrawal_max_amount, asset.significant_decimals),
"fee_fixed": round(asset.withdrawal_fee_fixed, asset.significant_decimals),
"fee_percent": asset.withdrawal_fee_percent,
"extra_info": {"test": "test"},
}
@pytest.mark.django_db
@patch("polaris.sep6.withdraw.rwi.process_sep6_request")
@patch("polaris.sep10.utils.check_auth", mock_check_auth_success)
def test_withdrawal_success_no_min_max_amounts(mock_process_sep6_request, client):
asset = Asset.objects.create(
code="USD",
issuer=Keypair.random().public_key,
sep6_enabled=True,
withdrawal_enabled=True,
distribution_seed=Keypair.random().secret,
)
mock_process_sep6_request.return_value = {
"extra_info": {"test": "test"},
}
response = client.get(
WITHDRAW_PATH,
{
"asset_code": asset.code,
"type": "bank_account",
"dest": "test bank account number",
},
)
mock_process_sep6_request.assert_called_once()
assert Transaction.objects.count() == 1
assert response.status_code == 200
content = response.json()
assert content.pop("memo")
assert content.pop("memo_type") == Transaction.MEMO_TYPES.hash
assert content == {
"id": str(Transaction.objects.first().id),
"account_id": asset.distribution_account,
"extra_info": {"test": "test"},
"fee_fixed": round(asset.deposit_fee_fixed, asset.significant_decimals),
"fee_percent": asset.deposit_fee_percent,
}
@pytest.mark.django_db
@patch("polaris.sep6.withdraw.rwi.process_sep6_request")
@patch("polaris.sep10.utils.check_auth", mock_check_auth_success)
def test_withdrawal_success_custom_min_max_amounts(mock_process_sep6_request, client):
asset = Asset.objects.create(
code="USD",
issuer=Keypair.random().public_key,
sep6_enabled=True,
withdrawal_enabled=True,
withdrawal_min_amount=10,
withdrawal_max_amount=1000,
distribution_seed=Keypair.random().secret,
)
mock_process_sep6_request.return_value = {
"extra_info": {"test": "test"},
"min_amount": 1000,
"max_amount": 10000,
}
response = client.get(
WITHDRAW_PATH,
{
"asset_code": asset.code,
"type": "bank_account",
"dest": "test bank account number",
},
)
mock_process_sep6_request.assert_called_once()
assert Transaction.objects.count() == 1
content = response.json()
assert response.status_code == 200, content
assert content.pop("memo")
assert content.pop("memo_type") == Transaction.MEMO_TYPES.hash
assert content == {
"id": str(Transaction.objects.first().id),
"account_id": asset.distribution_account,
"min_amount": 1000,
"max_amount": 10000,
"extra_info": {"test": "test"},
"fee_fixed": round(asset.deposit_fee_fixed, asset.significant_decimals),
"fee_percent": asset.deposit_fee_percent,
}
@pytest.mark.django_db
@patch("polaris.sep10.utils.check_auth", mock_check_auth_success)
def test_withdraw_bad_memo_type(client, acc1_usd_withdrawal_transaction_factory):
withdraw = acc1_usd_withdrawal_transaction_factory(
protocol=Transaction.PROTOCOL.sep6
)
asset = withdraw.asset
response = client.get(
WITHDRAW_PATH,
{
"asset_code": asset.code,
"type": "good type",
"dest": "test",
"memo_type": "none",
},
)
content = json.loads(response.content)
assert response.status_code == 400
assert content == {"error": "invalid 'memo_type'"}
@pytest.mark.django_db
@patch("polaris.sep10.utils.check_auth", mock_check_auth_success)
def test_withdraw_bad_memo(client, acc1_usd_withdrawal_transaction_factory):
withdraw = acc1_usd_withdrawal_transaction_factory(
protocol=Transaction.PROTOCOL.sep6
)
asset = withdraw.asset
response = client.get(
WITHDRAW_PATH,
{
"asset_code": asset.code,
"type": "good type",
"dest": "test",
"memo_type": "id",
"memo": "not an id",
},
)
content = json.loads(response.content)
assert response.status_code == 400
assert content == {"error": "invalid 'memo' for 'memo_type'"}
@pytest.mark.django_db
@patch("polaris.sep6.withdraw.rwi", GoodWithdrawalIntegration())
@patch("polaris.sep10.utils.check_auth", mock_check_auth_success)
def test_withdraw_bad_type(client, acc1_usd_withdrawal_transaction_factory):
withdraw = acc1_usd_withdrawal_transaction_factory(
protocol=Transaction.PROTOCOL.sep6
)
response = client.get(
WITHDRAW_PATH,
{
"asset_code": withdraw.asset.code,
"type": "bad type",
"dest": "test bank account number",
},
)
content = json.loads(response.content)
assert response.status_code == 400
assert "error" in content
class MissingHowDepositIntegration(WithdrawalIntegration):
def process_sep6_request(
self,
token: SEP10Token,
request: Request,
params: Dict,
transaction: Transaction,
*args,
**kwargs
) -> Dict:
return {}
@pytest.mark.django_db
@patch("polaris.sep6.withdraw.rwi", MissingHowDepositIntegration())
@patch("polaris.sep10.utils.check_auth", mock_check_auth_success)
def test_withdraw_empty_integration_response(client, usd_asset_factory):
asset = usd_asset_factory(protocols=[Transaction.PROTOCOL.sep6])
response = client.get(
WITHDRAW_PATH, {"asset_code": asset.code, "type": "good type", "dest": "test"},
)
content = json.loads(response.content)
assert response.status_code == 200
assert content.pop("memo")
assert content.pop("memo_type") == Transaction.MEMO_TYPES.hash
assert content == {
"id": str(Transaction.objects.first().id),
"account_id": Keypair.from_secret(USD_DISTRIBUTION_SEED).public_key,
"min_amount": round(asset.withdrawal_min_amount, asset.significant_decimals),
"max_amount": round(asset.withdrawal_max_amount, asset.significant_decimals),
"fee_fixed": round(asset.withdrawal_fee_fixed, asset.significant_decimals),
"fee_percent": asset.withdrawal_fee_percent,
}
class BadExtraInfoWithdrawalIntegration(WithdrawalIntegration):
def process_sep6_request(
self,
token: SEP10Token,
request: Request,
params: Dict,
transaction: Transaction,
*args,
**kwargs
) -> Dict:
return {"extra_info": "not a dict"}
@pytest.mark.django_db
@patch("polaris.sep6.withdraw.rwi", BadExtraInfoWithdrawalIntegration())
@patch("polaris.sep10.utils.check_auth", mock_check_auth_success)
def test_withdraw_bad_extra_info_integration(
client, acc1_usd_withdrawal_transaction_factory
):
withdraw = acc1_usd_withdrawal_transaction_factory(
protocol=Transaction.PROTOCOL.sep6
)
response = client.get(
WITHDRAW_PATH,
{"asset_code": withdraw.asset.code, "type": "good type", "dest": "test"},
)
content = json.loads(response.content)
assert response.status_code == 500
assert content == {"error": "unable to process the request"}
@pytest.mark.django_db
@patch("polaris.sep10.utils.check_auth", mock_check_auth_success)
def test_withdraw_missing_asset(client, acc1_usd_withdrawal_transaction_factory):
acc1_usd_withdrawal_transaction_factory(protocol=Transaction.PROTOCOL.sep6)
response = client.get(WITHDRAW_PATH, {"type": "good type", "dest": "test"})
content = json.loads(response.content)
assert response.status_code == 400
assert content == {"error": "invalid 'asset_code'"}
@pytest.mark.django_db
@patch("polaris.sep10.utils.check_auth", mock_check_auth_success)
def test_withdraw_invalid_asset(client):
response = client.get(
WITHDRAW_PATH, {"asset_code": "USD", "type": "good type", "dest": "test"}
)
content = json.loads(response.content)
assert response.status_code == 400
assert content == {"error": "invalid 'asset_code'"}
@pytest.mark.django_db
@patch("polaris.sep10.utils.check_auth", mock_check_auth_success)
def test_withdraw_missing_type(client, acc1_usd_withdrawal_transaction_factory):
withdraw = acc1_usd_withdrawal_transaction_factory(
protocol=Transaction.PROTOCOL.sep6
)
response = client.get(
WITHDRAW_PATH, {"asset_code": withdraw.asset.code, "dest": "test"}
)
content = json.loads(response.content)
assert response.status_code == 400
assert content == {"error": "'type' is required"}
@pytest.mark.django_db
@patch("polaris.sep10.utils.check_auth", mock_check_auth_success)
def test_withdraw_missing_dest(client, acc1_usd_withdrawal_transaction_factory):
withdraw = acc1_usd_withdrawal_transaction_factory(
protocol=Transaction.PROTOCOL.sep6
)
response = client.get(
WITHDRAW_PATH, {"asset_code": withdraw.asset.code, "type": "good type"}
)
content = json.loads(response.content)
assert response.status_code == 400
assert content == {"error": "'dest' is required"}
@pytest.mark.django_db
@patch("polaris.sep6.withdraw.rwi", GoodWithdrawalIntegration())
@patch("polaris.sep10.utils.check_auth", mock_check_auth_success)
def test_withdrawal_transaction_created(
client, acc1_usd_withdrawal_transaction_factory
):
withdraw = acc1_usd_withdrawal_transaction_factory(
protocol=Transaction.PROTOCOL.sep6
)
distribution_address = Keypair.from_secret(USD_DISTRIBUTION_SEED).public_key
response = client.get(
WITHDRAW_PATH,
{
"asset_code": withdraw.asset.code,
"type": "good type",
"dest": "test",
"amount": "100",
},
)
assert response.status_code == 200
t = (
Transaction.objects.filter(kind=Transaction.KIND.withdrawal)
.order_by("-started_at")
.first()
)
assert t
assert t.memo_type == Transaction.MEMO_TYPES.hash
assert t.receiving_anchor_account == distribution_address
assert t.stellar_account == "test source address"
assert t.amount_in == 100
assert t.amount_expected == 100
assert t.asset == withdraw.asset
assert t.kind == Transaction.KIND.withdrawal
assert t.status == Transaction.STATUS.pending_user_transfer_start
assert t.protocol == Transaction.PROTOCOL.sep6
class GoodInfoNeededWithdrawalIntegration(WithdrawalIntegration):
def process_sep6_request(
self,
token: SEP10Token,
request: Request,
params: Dict,
transaction: Transaction,
*args,
**kwargs
) -> Dict:
return {
"type": "non_interactive_customer_info_needed",
"fields": ["first_name", "last_name"],
}
@pytest.mark.django_db
@patch("polaris.sep6.withdraw.rwi", GoodInfoNeededWithdrawalIntegration())
@patch("polaris.sep10.utils.check_auth", mock_check_auth_success)
def test_withdraw_non_interactive_customer_info_needed(
client, acc1_usd_withdrawal_transaction_factory
):
withdraw = acc1_usd_withdrawal_transaction_factory(
protocol=Transaction.PROTOCOL.sep6
)
response = client.get(
WITHDRAW_PATH,
{"asset_code": withdraw.asset.code, "type": "good type", "dest": "test"},
)
content = json.loads(response.content)
assert response.status_code == 403
assert content == {
"type": "non_interactive_customer_info_needed",
"fields": ["first_name", "last_name"],
}
@pytest.mark.django_db
def test_deposit_bad_auth(client):
response = client.get(WITHDRAW_PATH, {})
content = json.loads(response.content)
assert response.status_code == 403
assert content == {"type": "authentication_required"}
class BadSaveWithdrawalIntegration(WithdrawalIntegration):
def process_sep6_request(
self,
token: SEP10Token,
request: Request,
params: Dict,
transaction: Transaction,
*args,
**kwargs
) -> Dict:
transaction.save()
return {
"type": "non_interactive_customer_info_needed",
"fields": ["first_name", "last_name"],
}
@pytest.mark.django_db
@patch("polaris.sep6.withdraw.rwi", BadSaveWithdrawalIntegration())
@patch("polaris.sep10.utils.check_auth", mock_check_auth_success)
def test_saved_transaction_on_failure_response(client, usd_asset_factory):
asset = usd_asset_factory(protocols=[Transaction.PROTOCOL.sep6])
response = client.get(
WITHDRAW_PATH,
{
"asset_code": asset.code,
"type": "bank_account",
"dest": "test bank account number",
},
)
assert response.status_code == 500
@pytest.mark.django_db
@patch("polaris.sep10.utils.check_auth", mock_check_auth_success)
def test_bad_amount(client, usd_asset_factory):
asset = usd_asset_factory(protocols=[Transaction.PROTOCOL.sep6])
response = client.get(
WITHDRAW_PATH,
{
"asset_code": asset.code,
"account": Keypair.random().public_key,
"type": "good type",
"amount": "not an amount",
"dest": "test bank account number",
},
)
assert response.status_code == 400
assert "amount" in json.loads(response.content)["error"]
@pytest.mark.django_db
@patch("polaris.sep10.utils.check_auth", mock_check_auth_success)
def test_amount_too_large(client, usd_asset_factory):
asset = usd_asset_factory(protocols=[Transaction.PROTOCOL.sep6])
response = client.get(
WITHDRAW_PATH,
{
"asset_code": asset.code,
"account": Keypair.random().public_key,
"type": "good type",
"dest": "test bank account number",
"amount": asset.deposit_max_amount + 1,
},
)
assert response.status_code == 400
assert "amount" in json.loads(response.content)["error"]
@pytest.mark.django_db
@patch("polaris.sep6.withdraw.rwi")
@patch("polaris.sep10.utils.check_auth", mock_check_auth_success)
def test_good_amount(mock_deposit, client, usd_asset_factory):
asset = usd_asset_factory(protocols=[Transaction.PROTOCOL.sep6])
mock_deposit.process_sep6_request = Mock(return_value={"how": "test"})
response = client.get(
WITHDRAW_PATH,
{
"asset_code": asset.code,
"account": Keypair.random().public_key,
"type": "good type",
"dest": "test bank account number",
"amount": asset.deposit_max_amount - 1,
},
)
assert response.status_code == 200
kwargs = mock_deposit.process_sep6_request.call_args_list[0][1]
assert kwargs.get("params", {}).get("amount") == asset.deposit_max_amount - 1
@pytest.mark.django_db
@patch("polaris.sep6.withdraw.rwi")
@patch("polaris.sep10.utils.check_auth", mock_check_auth_success_client_domain)
def test_withdraw_client_domain_saved(mock_withdraw, client):
kp = Keypair.random()
usd = Asset.objects.create(
code="USD",
issuer=Keypair.random().public_key,
sep6_enabled=True,
withdrawal_enabled=True,
distribution_seed=Keypair.random().secret,
)
mock_withdraw.process_sep6_request = Mock(return_value={"how": "test"})
response = client.get(
WITHDRAW_PATH,
{
"asset_code": usd.code,
"account": kp.public_key,
"type": "good type",
"dest": "test bank account number",
},
)
content = response.json()
assert response.status_code == 200, json.dumps(content, indent=2)
assert Transaction.objects.count() == 1
transaction = Transaction.objects.first()
assert transaction.client_domain == "test.com"
| 33.800377
| 87
| 0.673836
| 2,023
| 17,948
| 5.708354
| 0.088482
| 0.031174
| 0.02364
| 0.03637
| 0.85045
| 0.836941
| 0.815899
| 0.810097
| 0.791739
| 0.779702
| 0
| 0.015865
| 0.209828
| 17,948
| 530
| 88
| 33.864151
| 0.798406
| 0
| 0
| 0.655462
| 0
| 0
| 0.148875
| 0.056719
| 0
| 0
| 0
| 0
| 0.128151
| 1
| 0.052521
| false
| 0
| 0.023109
| 0.006303
| 0.096639
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d9e41d15c261b7b3a2fc68883869e9ef0cae31dc
| 26
|
py
|
Python
|
network_scanner_module/dns_scan.py
|
ikoyfman/Network---Tooling
|
5654b818feabc82c9a58bd2aecbec7436a5fc764
|
[
"MIT"
] | null | null | null |
network_scanner_module/dns_scan.py
|
ikoyfman/Network---Tooling
|
5654b818feabc82c9a58bd2aecbec7436a5fc764
|
[
"MIT"
] | null | null | null |
network_scanner_module/dns_scan.py
|
ikoyfman/Network---Tooling
|
5654b818feabc82c9a58bd2aecbec7436a5fc764
|
[
"MIT"
] | null | null | null |
#FUTURE PORTION
import dns
| 13
| 15
| 0.846154
| 4
| 26
| 5.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.115385
| 26
| 2
| 16
| 13
| 0.956522
| 0.538462
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d9ec9c40e632c92d6734c77b36854b807c433979
| 12,832
|
py
|
Python
|
dataio.py
|
sudo-michael/deepreach
|
a8affc4cc53b7671fda54dc159129315ec6b7ca8
|
[
"MIT"
] | null | null | null |
dataio.py
|
sudo-michael/deepreach
|
a8affc4cc53b7671fda54dc159129315ec6b7ca8
|
[
"MIT"
] | null | null | null |
dataio.py
|
sudo-michael/deepreach
|
a8affc4cc53b7671fda54dc159129315ec6b7ca8
|
[
"MIT"
] | null | null | null |
import csv
import glob
import math
import os
from unicodedata import normalize
import matplotlib.colors as colors
import numpy as np
import scipy.io as spio
import torch
from torch.utils.data import Dataset
from torchvision.transforms import Resize, Compose, ToTensor, Normalize
import utils
import pickle
def get_mgrid(sidelen, dim=2):
"""Generates a flattened grid of (x,y,...) coordinates in a range of -1 to 1."""
if isinstance(sidelen, int):
sidelen = dim * (sidelen,) # (sidelen, sidelen, ...)
if dim == 2:
pixel_coords = np.stack(np.mgrid[: sidelen[0], : sidelen[1]], axis=-1)[
None, ...
].astype(np.float32)
pixel_coords[0, :, :, 0] = pixel_coords[0, :, :, 0] / (sidelen[0] - 1)
pixel_coords[0, :, :, 1] = pixel_coords[0, :, :, 1] / (sidelen[1] - 1)
elif dim == 3:
pixel_coords = np.stack(
np.mgrid[: sidelen[0], : sidelen[1], : sidelen[2]], axis=-1
)[None, ...].astype(np.float32)
pixel_coords[..., 0] = pixel_coords[..., 0] / max(sidelen[0] - 1, 1)
pixel_coords[..., 1] = pixel_coords[..., 1] / (sidelen[1] - 1)
pixel_coords[..., 2] = pixel_coords[..., 2] / (sidelen[2] - 1)
else:
raise NotImplementedError("Not implemented for dim=%d" % dim)
pixel_coords -= 0.5
pixel_coords *= 2.0
pixel_coords = torch.Tensor(pixel_coords).view(-1, dim)
return pixel_coords
def to_uint8(x):
return (255.0 * x).astype(np.uint8)
def to_numpy(x):
return x.detach().cpu().numpy()
def gaussian(x, mu=[0, 0], sigma=1e-4, d=2):
x = x.numpy()
if isinstance(mu, torch.Tensor):
mu = mu.numpy()
q = -0.5 * ((x - mu) ** 2).sum(1)
return torch.from_numpy(
1 / np.sqrt(sigma ** d * (2 * np.pi) ** d) * np.exp(q / sigma)
).float()
class ReachabilityMultiVehicleCollisionSourceNE(Dataset):
def __init__(
self,
numpoints,
collisionR=0.25,
velocity=0.6,
omega_max=1.1,
pretrain=False,
tMin=0.0,
tMax=0.5,
counter_start=0,
counter_end=100e3,
numEvaders=1,
pretrain_iters=2000,
angle_alpha=1.0,
time_alpha=1.0,
num_src_samples=1000,
):
super().__init__()
torch.manual_seed(0)
self.pretrain = pretrain
self.numpoints = numpoints
self.velocity = velocity
self.omega_max = omega_max
self.collisionR = collisionR
self.alpha_angle = angle_alpha * math.pi
self.alpha_time = time_alpha
self.numEvaders = numEvaders
self.num_states_per_vehicle = 3
self.num_states = self.num_states_per_vehicle * (numEvaders + 1)
self.num_pos_states = 2 * (numEvaders + 1)
# The state sequence will be as follows
# [x-y position of vehicle 1, x-y position of vehicle 2, ...., x-y position of vehicle N, heading of vehicle 1, heading of vehicle 2, ...., heading of vehicle N]
self.tMin = tMin
self.tMax = tMax
self.N_src_samples = num_src_samples
self.pretrain_counter = 0
self.counter = counter_start
self.pretrain_iters = pretrain_iters
self.full_count = counter_end
def __len__(self):
return 1
def __getitem__(self, idx):
start_time = 0.0 # time to apply initial conditions
# uniformly sample domain and include coordinates where source is non-zero
coords = torch.zeros(self.numpoints, self.num_states).uniform_(-1, 1)
if self.pretrain:
# only sample in time around the initial condition
# time = torch.zeros(self.numpoints, 1).uniform_(start_time - 0.001, start_time + 0.001)
time = torch.ones(self.numpoints, 1) * start_time
coords = torch.cat((time, coords), dim=1)
else:
# slowly grow time values from start time
# this currently assumes start_time = tMin and max time value is tMax
time = self.tMin + torch.zeros(self.numpoints, 1).uniform_(
0, (self.tMax - self.tMin) * (self.counter / self.full_count)
)
coords = torch.cat((time, coords), dim=1)
# make sure we always have training samples at the initial time
coords[-self.N_src_samples :, 0] = start_time
# set up the initial value function
# Collision cost between the pursuer and the evaders
boundary_values = (
torch.norm(coords[:, 1:3] - coords[:, 3:5], dim=1, keepdim=True)
- self.collisionR
)
for i in range(1, self.numEvaders):
boundary_values_current = (
torch.norm(
coords[:, 1:3] - coords[:, 2 * (i + 1) + 1 : 2 * (i + 1) + 3],
dim=1,
keepdim=True,
)
- self.collisionR
)
boundary_values = torch.min(boundary_values, boundary_values_current)
# Collision cost between the evaders themselves
for i in range(self.numEvaders):
for j in range(i + 1, self.numEvaders):
evader1_coords_index = 1 + (i + 1) * 2
evader2_coords_index = 1 + (j + 1) * 2
boundary_values_current = (
torch.norm(
coords[:, evader1_coords_index : evader1_coords_index + 2]
- coords[:, evader2_coords_index : evader2_coords_index + 2],
dim=1,
keepdim=True,
)
- self.collisionR
)
boundary_values = torch.min(boundary_values, boundary_values_current)
# normalize the value function
norm_to = 0.02
mean = 0.25
var = 0.5
boundary_values = (boundary_values - mean) * norm_to / var
if self.pretrain:
dirichlet_mask = torch.ones(coords.shape[0], 1) > 0
else:
# only enforce initial conditions around start_time
dirichlet_mask = coords[:, 0, None] == start_time
if self.pretrain:
self.pretrain_counter += 1
elif self.counter < self.full_count:
self.counter += 1
if self.pretrain and self.pretrain_counter == self.pretrain_iters:
self.pretrain = False
return (
{"coords": coords},
{
"source_boundary_values": boundary_values,
"dirichlet_mask": dirichlet_mask,
},
)
class ReachabilityAir3DSource(Dataset):
def __init__(
self,
numpoints,
collisionR=0.25,
velocity=0.6,
omega_max=1.1,
pretrain=False,
tMin=0.0,
tMax=0.5,
counter_start=0,
counter_end=100e3,
pretrain_iters=2000,
angle_alpha=1.0,
num_src_samples=1000,
seed=0,
):
super().__init__()
torch.manual_seed(0)
self.pretrain = pretrain
self.numpoints = numpoints
self.velocity = velocity
self.omega_max = omega_max
self.collisionR = collisionR
self.alpha_angle = angle_alpha * math.pi
self.num_states = 3
self.tMax = tMax
self.tMin = tMin
self.N_src_samples = num_src_samples
self.pretrain_counter = 0
self.counter = counter_start
self.pretrain_iters = pretrain_iters
self.full_count = counter_end
# Set the seed
torch.manual_seed(seed)
def __len__(self):
return 1
def __getitem__(self, idx):
start_time = 0.0 # time to apply initial conditions
# uniformly sample domain and include coordinates where source is non-zero
coords = torch.zeros(self.numpoints, self.num_states).uniform_(-1, 1)
if self.pretrain:
# only sample in time around the initial condition
time = torch.ones(self.numpoints, 1) * start_time
coords = torch.cat((time, coords), dim=1)
else:
# slowly grow time values from start time
# this currently assumes start_time = 0 and max time value is tMax
time = self.tMin + torch.zeros(self.numpoints, 1).uniform_(
0, (self.tMax - self.tMin) * (self.counter / self.full_count)
)
coords = torch.cat((time, coords), dim=1)
# make sure we always have training samples at the initial time
coords[-self.N_src_samples :, 0] = start_time
# set up the initial value function
boundary_values = (
torch.norm(coords[:, 1:3], dim=1, keepdim=True) - self.collisionR
)
# normalize the value function
norm_to = 0.02
mean = 0.25
var = 0.5
boundary_values = (boundary_values - mean) * norm_to / var
if self.pretrain:
dirichlet_mask = torch.ones(coords.shape[0], 1) > 0
else:
# only enforce initial conditions around start_time
dirichlet_mask = coords[:, 0, None] == start_time
if self.pretrain:
self.pretrain_counter += 1
elif self.counter < self.full_count:
self.counter += 1
if self.pretrain and self.pretrain_counter == self.pretrain_iters:
self.pretrain = False
return (
{"coords": coords},
{
"source_boundary_values": boundary_values,
"dirichlet_mask": dirichlet_mask,
},
)
class ReachabilityAir3DSource(Dataset):
def __init__(
self,
numpoints,
collisionR=0.25,
velocity=0.6,
omega_max=1.1,
pretrain=False,
tMin=0.0,
tMax=0.5,
counter_start=0,
counter_end=100e3,
pretrain_iters=2000,
angle_alpha=1.0,
num_src_samples=1000,
seed=0,
):
super().__init__()
torch.manual_seed(0)
self.pretrain = pretrain
self.numpoints = numpoints
self.velocity = velocity
self.omega_max = omega_max
self.collisionR = collisionR
self.alpha_angle = angle_alpha * math.pi
self.num_states = 3
self.tMax = tMax
self.tMin = tMin
self.N_src_samples = num_src_samples
self.pretrain_counter = 0
self.counter = counter_start
self.pretrain_iters = pretrain_iters
self.full_count = counter_end
# Set the seed
torch.manual_seed(seed)
def __len__(self):
return 1
def __getitem__(self, idx):
start_time = 0.0 # time to apply initial conditions
# uniformly sample domain and include coordinates where source is non-zero
coords = torch.zeros(self.numpoints, self.num_states).uniform_(-1, 1)
if self.pretrain:
# only sample in time around the initial condition
time = torch.ones(self.numpoints, 1) * start_time
# cat t with coords
# numpoints x ([t x y relative_heading])
coords = torch.cat((time, coords), dim=1)
else:
# create samples of t from 0 to (t_max - t_min) / count
# this currently assumes start_time = 0 and max time value is tMax
time = self.tMin + torch.zeros(self.numpoints, 1).uniform_(
0, (self.tMax - self.tMin) * (self.counter / self.full_count)
)
coords = torch.cat((time, coords), dim=1)
# ensure training samples at the initial time
coords[-self.N_src_samples :, 0] = start_time
# set up the initial value function
# sqrt(x^2 + y^2) - R
boundary_values = (
# coords[:, 0] is t
# coords[:, 1:3] is x and y
torch.norm(coords[:, 1:3], dim=1, keepdim=True)
- self.collisionR
)
# normalize the value function
norm_to = 0.02
mean = 0.25
var = 0.5
boundary_values = (boundary_values - mean) * norm_to / var
if self.pretrain:
# all true
dirichlet_mask = torch.ones(coords.shape[0], 1) > 0
else:
# only enforce initial conditions around start_time
dirichlet_mask = coords[:, 0, None] == start_time
if self.pretrain:
self.pretrain_counter += 1
elif self.counter < self.full_count:
self.counter += 1
if self.pretrain and self.pretrain_counter == self.pretrain_iters:
self.pretrain = False
return (
{"coords": coords},
{
"source_boundary_values": boundary_values,
"dirichlet_mask": dirichlet_mask,
},
)
| 31.221411
| 169
| 0.56515
| 1,559
| 12,832
| 4.481719
| 0.130212
| 0.056677
| 0.024045
| 0.03206
| 0.772435
| 0.757693
| 0.733505
| 0.719479
| 0.714613
| 0.704308
| 0
| 0.034341
| 0.3351
| 12,832
| 410
| 170
| 31.297561
| 0.784576
| 0.151496
| 0
| 0.713311
| 0
| 0
| 0.014021
| 0.006088
| 0
| 0
| 0
| 0
| 0
| 1
| 0.044369
| false
| 0
| 0.044369
| 0.017065
| 0.133106
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
8a258f9c0b65813f0d1f31d7bcfe33d60776054a
| 121
|
py
|
Python
|
django_uicomponents/settings.py
|
koenwoortman/django-uicomponents
|
833dd219ebbbdaa7dc2b41730d5f21afa55641f1
|
[
"MIT"
] | 3
|
2021-05-22T10:45:51.000Z
|
2021-08-12T14:40:45.000Z
|
django_uicomponents/settings.py
|
koenwoortman/django-uicomponents
|
833dd219ebbbdaa7dc2b41730d5f21afa55641f1
|
[
"MIT"
] | null | null | null |
django_uicomponents/settings.py
|
koenwoortman/django-uicomponents
|
833dd219ebbbdaa7dc2b41730d5f21afa55641f1
|
[
"MIT"
] | null | null | null |
from django.conf import settings
if not hasattr(settings, 'COMPONENTS_DIR'):
settings.COMPONENTS_DIR = 'components'
| 24.2
| 43
| 0.77686
| 15
| 121
| 6.133333
| 0.666667
| 0.391304
| 0.456522
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.132231
| 121
| 4
| 44
| 30.25
| 0.87619
| 0
| 0
| 0
| 0
| 0
| 0.198347
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
8a3d4ba2ae42ed05c83016768ef91833512499da
| 21,899
|
py
|
Python
|
app/models.py
|
JeremyKimotho/blogs
|
f0fc056032534dc93f1ac725b60c5ae46d9407ea
|
[
"MIT"
] | null | null | null |
app/models.py
|
JeremyKimotho/blogs
|
f0fc056032534dc93f1ac725b60c5ae46d9407ea
|
[
"MIT"
] | null | null | null |
app/models.py
|
JeremyKimotho/blogs
|
f0fc056032534dc93f1ac725b60c5ae46d9407ea
|
[
"MIT"
] | null | null | null |
from . import db
from werkzeug.security import generate_password_hash, check_password_hash
from flask_login import UserMixin
from datetime import datetime
from . import login_manager
ACCESS = {
'user': 0,
'admin': 1
}
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
class User(UserMixin, db.Model):
__tablename__='users'
id = db.Column(db.Integer, primary_key = True)
username = db.Column(db.String(255))
email = db.Column(db.String(255))
joined=db.Column(db.DateTime,default=datetime.now)
first_name = db.Column(db.String(255))
surname = db.Column(db.String(255))
pass_secure = db.Column(db.String(255))
access=db.Column(db.String(255), default=ACCESS['user'])
comments = db.relationship('Comments', backref='user', lazy='dynamic')
@property
def password(self):
raise AttributeError('You do not have the permissions to access this')
@password.setter
def password(self, password):
self.pass_secure = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.pass_secure, password)
def save_user(self):
db.session.add(self)
db.session.commit()
def find_by_username(username):
user = User.query.filter_by(username=username).first()
return user
def is_admin(self):
return self.access == ACCESS['admin']
def allowed(self, access_level):
return self.access >= access_level
def init_db():
if User.query.count() == 0:
master = User(username='master', password='master', first_name='Jeremy', surname='Kimotho', email='projectsjeremy1000@gmail.com', access=ACCESS['admin'])
db.session.add(master)
db.session.commit()
def __repr__(self):
return f'User {self.username}'
class Comments(db.Model):
__tablename__='comments'
id = db.Column(db.Integer, primary_key = True)
comment = db.Column(db.String)
user_id = db.Column(db.Integer, db.ForeignKey('users.id'))
posted = db.Column(db.DateTime,default=datetime.now)
post = db.Column(db.Integer, db.ForeignKey('posts.id'))
def save_comment(self):
db.session.add(self)
db.session.commit()
def delete_comments(self):
db.session.delete(self)
db.session.commit()
@classmethod
def get_comments(cls, id):
comments = Comments.query.filter_by(posted=id).all()
return comments
class Post(db.Model):
__tablename__='posts'
id = db.Column(db.Integer, primary_key = True)
title = db.Column(db.String(255))
body = db.Column(db.String)
posted = db.Column(db.DateTime,default=datetime.utcnow)
comments = db.relationship('Comments', backref='post_comments', lazy='dynamic')
def save_post(self):
db.session.add(self)
db.session.commit()
def delete_post(self):
db.session.delete(self)
db.session.commit()
def get_specific_post(id):
post = Post.query.filter_by(id=id).first()
return post
@classmethod
def get_posts(cls):
posts = Post.query.all()
return posts
def get_comments(self):
post = Post.query.filter_by(id = self.id).first()
comments = Comments.query.filter_by(post=post.id)
return comments
def default_posts():
if Post.query.count() == 0:
post1=Post(title='Let\'s talk Behavioral Psychology?', body="Let's define behavioral psychology. Behavioral psychology is the study of the connection between our minds and our behavior. Sometimes you will hear behavioral psychology referred to as behaviorism. The researchers and scientists who study behavioral psychology are trying to understand why we behave the way we do and they are concerned with discovering patterns in our actions and behaviors. The hope is that if we can use behavioral psychology to help us predict how humans will behave, we can build better habits as individuals, create better products as companies, and develop better living spaces as communities.")
post2=Post(title='Motivation?', body="So what is motivation, exactly? The author Steven Pressfield has a great line in his book, The War of Art, which I think gets at the core of motivation. To paraphrase Pressfield, 'At some point, the pain of not doing it becomes greater than the pain of doing it.' In other words, at some point, it is easier to change than to stay the same. It is easier to take action and feel insecure at the gym than to sit still and experience self-loathing on the couch. It is easier to feel awkward while making the sales call than to feel disappointed about your dwindling bank account. This, I think, is the essence of motivation. Every choice has a price, but when we are motivated, it is easier to bear the inconvenience of action than the pain of remaining the same. Somehow we cross a mental threshold—usually after weeks of procrastination and in the face of an impending deadline—and it becomes more painful to not do the work than to actually do it.")
post3=Post(title='What is Procrastination?', body="Human beings have been procrastinating for centuries. The problem is so timeless, in fact, that ancient Greek philosophers like Socrates and Aristotle developed a word to describe this type of behavior: Akrasia. Akrasia is the state of acting against your better judgment. It is when you do one thing even though you know you should do something else. Loosely translated, you could say that akrasia is procrastination or a lack of self-control.' Ok, definitions are great and all, but why do we procrastinate? What is going on in the brain that causes us to avoid the things we know we should be doing? This is a good time to bring some science into our discussion. Behavioral psychology research has revealed a phenomenon called “time inconsistency,” which helps explain why procrastination seems to pull us in despite our good intentions. Time inconsistency refers to the tendency of the human brain to value immediate rewards more highly than future rewards. The best way to understand this is by imagining that you have two selves: your Present Self and your Future Self. When you set goals for yourself — like losing weight or writing a book or learning a language — you are actually making plans for your Future Self. You are envisioning what you want your life to be like in the future. Researchers have found that when you think about your Future Self, it is quite easy for your brain to see the value in taking actions with long-term benefits. The Future Self values long-term rewards. However, while the Future Self can set goals, only the Present Self can take action. When the time comes to make a decision, you are no longer making a choice for your Future Self. Now you are in the present moment, and your brain is thinking about the Present Self. Researchers have discovered that the Present Self really likes instant gratification, not long-term payoff. So, the Present Self and the Future Self are often at odds with one another. The Future Self wants to be trim and fit, but the Present Self wants a donut. Sure, everyone knows you should eat healthy today to avoid being overweight in 10 years. But consequences like an increased risk for diabetes or heart failure are years away. Similarly, many young people know that saving for retirement in their 20s and 30s is crucial, but the benefit of doing so is decades off. It is far easier for the Present Self to see the value in buying a new pair of shoes than in socking away $100 for 70-year-old you. (If you're curious, there are some very good evolutionary reasons for why our brain values immediate rewards more highly than long-term rewards.) This is one reason why you might go to bed feeling motivated to make a change in your life, but when you wake up you find yourself falling back into old patterns. Your brain values long-term benefits when they are in the future (tomorrow), but it values immediate gratification when it comes to the present moment (today).")
post4=Post(title='Creativity', body="The creative process is the act of making new connections between old ideas or recognizing relationships between concepts. Creative thinking is not about generating something new from a blank slate, but rather about taking what is already present and combining those bits and pieces in a way that has not been done previously. While being creative isn't easy, nearly all great ideas follow a similar creative process. In 1940, an advertising executive named James Webb Young published a short guide titled, A Technique for Producing Ideas. Young believed the process of creative connection always occurred in five steps. The Creative Process. Step 1: Gather new material. At first, you learn. During this stage you focus on 1) learning specific material directly related to your task and 2) learning general material by becoming fascinated with a wide range of concepts. Step 2: Thoroughly work over the materials in your mind. During this stage, you examine what you have learned by looking at the facts from different angles and experimenting with fitting various ideas together. Step 3: Step away from the problem. Next, you put the problem completely out of your mind and go do something else that excites you and energizes you. Step 4: Let your idea return to you. At some point, but only after you have stopped thinking about it, your idea will come back to you with a flash of insight and renewed energy. Step 5: Shape and develop your idea based on feedback. For any idea to succeed, you must release it out into the world, submit it to criticism, and adapt it as needed. While we often think of creativity as an event or as a natural skill that some people have and some don't, research actually suggests that both creativity and non-creativity are learned. According to psychology professor Barbara Kerr, “approximately 22 percent of the variance [in creativity] is due to the influence of genes.” This discovery was made by studying the differences in creative thinking between sets of twins. All of this to say, claiming that “I'm just not the creative type” is a pretty weak excuse for avoiding creative thinking. Certainly, some people are primed to be more creative than others. However, nearly every person is born with some level of creative skill and the majority of our creative thinking abilities are trainable.")
post5=Post(title='How to Make Smart Decisions and Avoid Bad Ones', body="Decision making is just what it sounds like: the action or process of making decisions. Sometimes we make logical decisions, but there are many times when we make emotional, irrational, and confusing choices. This page covers why we make poor decisions and discusses useful frameworks to expand your decision-making toolbox. I like to think of myself as a rational person, but I’m not one. The good news is it’s not just me — or you. We are all irrational. For a long time, researchers and economists believed that humans made logical, well-considered decisions. In recent decades, however, researchers have uncovered a wide range of mental errors that derail our thinking. 5 Common Mental Errors That Sway You From Making Good Decisions: Let's talk about the mental errors that show up most frequently in our lives and break them down in easy-to-understand language. This blog outlines how survivorship bias, loss aversion, the availability heuristic, anchoring, and confirmation bias sway you from making good decisions. How to Spot a Common Mental Error That Leads to Misguided Thinking: Hundreds of psychology studies have proven that we tend to overestimate the importance of events we can easily recall and underestimate the importance of events we have trouble recalling. Psychologists refer to this little brain mistake as an “illusory correlation.” In this article, we talk about a simple strategy you can use to spot your hidden assumptions and prevent yourself from making an illusory correlation. Two Harvard Professors Reveal One Reason Our Brains Love to Procrastinate: We have a tendency to care too much about our present selves and not enough about our future selves. If you want to beat procrastination and make better long-term choices, then you have to find a way to make your present self act in the best interest of your future self. This article breaks down three simple ways to do just that. How to Use Mental Models for Smart Decision Making The smartest way to improve your decision making skills is to learn mental models. A mental model is a framework or theory that helps to explain why the world works the way it does. Each mental model is a concept that helps us make sense of the world and offers a way of looking at the problems of life.")
post6=Post(title='Be more productive everyday', body="Let's define productivity. Productivity is a measure of efficiency of a person completing a task. We often assume that productivity means getting more things done each day. Wrong. Productivity is getting important things done consistently. And no matter what you are working on, there are only a few things that are truly important. Being productive is about maintaining a steady, average speed on a few things, not maximum speed on everything. Before we talk about how to get started, I wanted to let you know I researched and compiled science-backed ways to stick to good habits and stop procrastinating. My Top Productivity Strategies One: Eliminate Time Wasting Activities by Using the Eisenhower Box: This simple decision matrix will help you take action, organize tasks, and get more done. The great thing about this matrix is that it can be used for broad productivity plans (“How should I spend my time each week?”) and for smaller, daily plans (“What should I do today?”). Two: Warren Buffett’s “2 List” Strategy: How to Maximize Your Focus and Master Your Priorities: This method comes from the famous investor Warren Buffett and uses a simple 3-step productivity strategy to help you determine your priorities and actions. You may find this method useful for making decisions and getting yourself to commit to doing one thing right away. Three: The Ivy Lee Method: The Daily Routine Experts Recommend for Peak Productivity: This productivity strategy is straightforward: Do the most important thing first each day. The Ivy Lee Method is a dead simple way to implement this strategy. Four: The 15-Minute Routine Anthony Trollope Used to Write 40+ Books: There is one common problem with the approach of ranking your priorities and doing the most important thing first, though. After ranking your priorities for the day, if the number one task is a really big project then it can leave you feeling frustrated because it takes a long time to finish. Writer Anthony Trollope, however, developed a solution to this common problem. Most productivity strategies focus on short-term efficiency: how to manage your to-do list effectively, how to get more done each morning, how to shorten your weekly meetings, and so on. These are all reasonable ideas. We often fail to realize, however, that there are certain strategic choices we need to make if we want to maximize our productivity for the long-term. In these articles below, I break down some ideas about long-term productivity. Here Are More Simple Ways to Be More Productive Every Day: Step 1: Manage your energy, not your time. If you take a moment to think about it, you’ll probably realize that you are better at doing certain tasks at certain times. What type of energy do you have in the morning? Afternoon? Evening? Determine what tasks each energy level and time of day are best suited for. Step 2: Prepare the night before. If you only do one thing each day then spend a few minutes each night organizing your to–do list for tomorrow. When I do it right, I’ll outline the article I’m going to write the next day and develop a short list of the most important items for me to accomplish. It takes 10 minutes that night and saves 3 hours the next day. Step 3: Don’t open email until noon. Sounds simple. Nobody does it. It took me awhile to get over the urge to open my inbox, but eventually I realized that everything can wait a few hours. Nobody is going to email you about a true emergency (a death in the family, etc.), so leave your email alone for the first few hours of each day. Use the morning to do what’s important rather than responding to what is “urgent.” Step 4: Turn your phone off and leave it in another room. Or on your colleague's desk. Or at the very least, put it somewhere that is out of sight. This eliminates the urge to check text messages, Facebook, Twitter, and so on. This simple strategy eliminates the likelihood of slipping into half–work where you waste time dividing your attention among meaningless tasks. Step 5: Work in a cool place. Have you ever noticed how you feel groggy and sluggish in a hot room? Turning the temperature down or moving to a cooler place is an easy way to focus your mind and body. (Hat tip to Michael Hyatt for this one.) Step 6: Sit up or stand up. When you sit hunched over, your chest is in a collapsed position and your diaphragm is pressing against the bottom of your lungs, which hinders your ability to breathe easily and deeply. Sit up straight or stand up and you’ll find that you can breathe easier and more fully. As a result, your brain will get more oxygen and you’ll be able to concentrate better. Step 7: Develop a “pre–game routine” to start your day. My morning routine starts by pouring a cold glass of water. Some people kick off their day with ten minutes of meditation. Similarly, you should have a sequence that starts your morning ritual. This tiny routine signals to your brain that it’s time to get into work mode or exercise mode or whatever mode you need to be in to accomplish your task. Additionally, a pre–game routine helps you overcome a lack of motivation and get things done even when you don’t feel like it.")
post7=Post(title='Continous Improvement', body="Let's define continuous improvement. Continuous improvement is a dedication to making small changes and improvements every day, with the expectation that those small improvements will add up to something significant. The typical approach to self-improvement is to set a large goal, then try to take big leaps in order to accomplish the goal in as little time as possible. While this may sound good in theory, it often ends in burnout, frustration, and failure. Instead, we should focus on continuous improvement by slowly and slightly adjusting our normal everyday habits and behaviors. It is so easy to dismiss the value of making slightly better decisions on a daily basis. Sticking with the fundamentals is not impressive. Falling in love with boredom is not sexy. Getting one percent better isn't going to make headlines. There is one thing about it though: it works. How Does Continuous Improvement Work? So often we convince ourselves that change is only meaningful if there is some large, visible outcome associated with it. Whether it is losing weight, building a business, traveling the world or any other goal, we often put pressure on ourselves to make some earth-shattering improvement that everyone will talk about. Meanwhile, improving by just 1 percent isn't notable (and sometimes it isn't even noticeable). But it can be just as meaningful, especially in the long run. In the beginning, there is basically no difference between making a choice that is 1 percent better or 1 percent worse. (In other words, it won't impact you very much today.) But as time goes on, these small improvements or declines compound and you suddenly find a very big gap between people who make slightly better decisions on a daily basis and those who don't. Here's the punchline: If you get one percent better each day for one year, you'll end up thirty-seven times better by the time you’re done. This is why small choices don't make much of a difference at the time, but add up over the long-term.")
post8=Post(title='We\'re talking about practise', body="Deliberate practice refers to a special type of practice that is purposeful and systematic. While regular practice might include mindless repetitions, deliberate practice requires focused attention and is conducted with the specific goal of improving performance.Can You Achieve Anything With Enough Practice?Deliberate practice does not mean that you can fashion yourself into anything with enough work and effort, though. While human beings do possess a remarkable ability to develop their skills, there are limits to how far any individual can go. Your genes set a boundary around what is possible. However, while genetics influence performance, they do not determine performance. Do not confuse destiny with opportunity. Genes provide opportunity. They do not determine our destiny. It’s similar to a game of cards. You have a better opportunity if you are dealt a better hand, but you also need to play the hand well to win. Regardless of where we choose to apply ourselves, deliberate practice can help us maximize our potential—no matter what cards we were dealt. It turns potential into reality. Read The Myth and Magic of Deliberate Practice for more on genetics, practice, and how to maximize your genetic potential in life. Examples of Deliberate Practice: Joe DiMaggio was one of the greatest hitters in baseball history. I recently heard a little-known story about how DiMaggio developed his exceptional ability. In some circles, golfer Ben Hogan is credited with “inventing practice.” Hogan methodically broke the game of golf down into chunks and figured out how he could master each section. Today, experts have a new term for his rigorous style of improvement.")
db.session.add(post1)
db.session.add(post2)
db.session.add(post3)
db.session.add(post4)
db.session.add(post5)
db.session.add(post6)
db.session.add(post7)
db.session.add(post8)
db.session.commit()
| 157.546763
| 5,231
| 0.784282
| 3,673
| 21,899
| 4.661312
| 0.280969
| 0.011039
| 0.009929
| 0.008411
| 0.066819
| 0.038199
| 0.02862
| 0.017581
| 0.007359
| 0.00514
| 0
| 0.004891
| 0.16914
| 21,899
| 139
| 5,232
| 157.546763
| 0.935532
| 0
| 0
| 0.175926
| 1
| 0.074074
| 0.808539
| 0.001279
| 0
| 0
| 0
| 0
| 0
| 1
| 0.175926
| false
| 0.083333
| 0.064815
| 0.046296
| 0.564815
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 6
|
8a50c79907852fccf1b106fa066ff0c1f6430154
| 9,187
|
py
|
Python
|
UnityEngine/Collider/__init__.py
|
Grim-es/udon-pie-auto-completion
|
c2cd86554ed615cdbbb01e19fa40665eafdfaedc
|
[
"MIT"
] | null | null | null |
UnityEngine/Collider/__init__.py
|
Grim-es/udon-pie-auto-completion
|
c2cd86554ed615cdbbb01e19fa40665eafdfaedc
|
[
"MIT"
] | null | null | null |
UnityEngine/Collider/__init__.py
|
Grim-es/udon-pie-auto-completion
|
c2cd86554ed615cdbbb01e19fa40665eafdfaedc
|
[
"MIT"
] | null | null | null |
from typing import overload
from UdonPie import System
from UdonPie import UnityEngine
from UdonPie.Undefined import *
class Collider:
def __new__(cls, arg1=None):
'''
:returns: Collider
:rtype: UnityEngine.Collider
'''
pass
@staticmethod
def op_Implicit(arg1):
'''
:param arg1: Object
:type arg1: UnityEngine.Object
:returns: Boolean
:rtype: System.Boolean
'''
pass
@staticmethod
def op_Equality(arg1, arg2):
'''
:param arg1: Object
:type arg1: UnityEngine.Object
:param arg2: Object
:type arg2: UnityEngine.Object
:returns: Boolean
:rtype: System.Boolean
'''
pass
@staticmethod
def op_Inequality(arg1, arg2):
'''
:param arg1: Object
:type arg1: UnityEngine.Object
:param arg2: Object
:type arg2: UnityEngine.Object
:returns: Boolean
:rtype: System.Boolean
'''
pass
@staticmethod
def get_enabled():
'''
:returns: Boolean
:rtype: System.Boolean
'''
pass
@staticmethod
def set_enabled(arg1):
'''
:param arg1: Boolean
:type arg1: System.Boolean or bool
'''
pass
@staticmethod
def get_attachedRigidbody():
'''
:returns: Rigidbody
:rtype: UnityEngine.Rigidbody
'''
pass
@staticmethod
def get_isTrigger():
'''
:returns: Boolean
:rtype: System.Boolean
'''
pass
@staticmethod
def set_isTrigger(arg1):
'''
:param arg1: Boolean
:type arg1: System.Boolean or bool
'''
pass
@staticmethod
def get_contactOffset():
'''
:returns: Single
:rtype: System.Single
'''
pass
@staticmethod
def set_contactOffset(arg1):
'''
:param arg1: Single
:type arg1: System.Single or float
'''
pass
@staticmethod
def ClosestPoint(arg1):
'''
:param arg1: Vector3
:type arg1: UnityEngine.Vector3
:returns: Vector3
:rtype: UnityEngine.Vector3
'''
pass
@staticmethod
def get_bounds():
'''
:returns: Bounds
:rtype: UnityEngine.Bounds
'''
pass
@staticmethod
def get_sharedMaterial():
'''
:returns: PhysicMaterial
:rtype: UnityEngine.PhysicMaterial
'''
pass
@staticmethod
def set_sharedMaterial(arg1):
'''
:param arg1: PhysicMaterial
:type arg1: UnityEngine.PhysicMaterial
'''
pass
@staticmethod
def get_material():
'''
:returns: PhysicMaterial
:rtype: UnityEngine.PhysicMaterial
'''
pass
@staticmethod
def set_material(arg1):
'''
:param arg1: PhysicMaterial
:type arg1: UnityEngine.PhysicMaterial
'''
pass
@staticmethod
def Raycast(arg1, arg2, arg3):
'''
:param arg1: Ray
:type arg1: UnityEngine.Ray
:param arg2: Undefined variable
:type arg2: RaycastHitRef.RaycastHitRef
:param arg3: Single
:type arg3: System.Single or float
:returns: Boolean
:rtype: System.Boolean
'''
pass
@staticmethod
def ClosestPointOnBounds(arg1):
'''
:param arg1: Vector3
:type arg1: UnityEngine.Vector3
:returns: Vector3
:rtype: UnityEngine.Vector3
'''
pass
@staticmethod
def get_transform():
'''
:returns: Transform
:rtype: UnityEngine.Transform
'''
pass
@staticmethod
def get_gameObject():
'''
:returns: GameObject
:rtype: UnityEngine.GameObject
'''
pass
@staticmethod
@overload
def GetComponent(arg1):
'''
:param arg1: Type
:type arg1: System.Type
:returns: Component
:rtype: UnityEngine.Component
'''
pass
@staticmethod
@overload
def GetComponent(arg1):
'''
:param arg1: String
:type arg1: System.String or str
:returns: Component
:rtype: UnityEngine.Component
'''
pass
@staticmethod
def GetComponent(arg1=None):
pass
@staticmethod
@overload
def GetComponentInChildren(arg1, arg2):
'''
:param arg1: Type
:type arg1: System.Type
:param arg2: Boolean
:type arg2: System.Boolean or bool
:returns: Component
:rtype: UnityEngine.Component
'''
pass
@staticmethod
@overload
def GetComponentInChildren(arg1):
'''
:param arg1: Type
:type arg1: System.Type
:returns: Component
:rtype: UnityEngine.Component
'''
pass
@staticmethod
def GetComponentInChildren(arg1=None, arg2=None):
pass
@staticmethod
@overload
def GetComponentsInChildren(arg1, arg2):
'''
:param arg1: Type
:type arg1: System.Type
:param arg2: Boolean
:type arg2: System.Boolean or bool
:returns: ComponentArray
:rtype: UnityEngine.ComponentArray
'''
pass
@staticmethod
@overload
def GetComponentsInChildren(arg1):
'''
:param arg1: Type
:type arg1: System.Type
:returns: ComponentArray
:rtype: UnityEngine.ComponentArray
'''
pass
@staticmethod
@overload
def GetComponentsInChildren(arg1, arg2):
'''
:param arg1: Boolean
:type arg1: System.Boolean or bool
:param arg2: Undefined variable
:type arg2: ListT.ListT
'''
pass
@staticmethod
@overload
def GetComponentsInChildren(arg1):
'''
:param arg1: Undefined variable
:type arg1: ListT.ListT
'''
pass
@staticmethod
def GetComponentsInChildren(arg1=None, arg2=None):
pass
@staticmethod
@overload
def GetComponentInParent(arg1):
'''
:param arg1: Type
:type arg1: System.Type
:returns: Component
:rtype: UnityEngine.Component
'''
pass
@staticmethod
def GetComponentInParent(arg1=None):
pass
@staticmethod
@overload
def GetComponentsInParent(arg1, arg2):
'''
:param arg1: Type
:type arg1: System.Type
:param arg2: Boolean
:type arg2: System.Boolean or bool
:returns: ComponentArray
:rtype: UnityEngine.ComponentArray
'''
pass
@staticmethod
@overload
def GetComponentsInParent(arg1):
'''
:param arg1: Type
:type arg1: System.Type
:returns: ComponentArray
:rtype: UnityEngine.ComponentArray
'''
pass
@staticmethod
@overload
def GetComponentsInParent(arg1, arg2):
'''
:param arg1: Boolean
:type arg1: System.Boolean or bool
:param arg2: Undefined variable
:type arg2: ListT.ListT
'''
pass
@staticmethod
def GetComponentsInParent(arg1=None, arg2=None):
pass
@staticmethod
@overload
def GetComponents(arg1):
'''
:param arg1: Type
:type arg1: System.Type
:returns: ComponentArray
:rtype: UnityEngine.ComponentArray
'''
pass
@staticmethod
@overload
def GetComponents(arg1, arg2):
'''
:param arg1: Type
:type arg1: System.Type
:param arg2: Undefined variable
:type arg2: SystemCollectionsGenericList.SystemCollectionsGenericList
'''
pass
@staticmethod
@overload
def GetComponents(arg1):
'''
:param arg1: Undefined variable
:type arg1: ListT.ListT
'''
pass
@staticmethod
def GetComponents(arg1=None, arg2=None):
pass
@staticmethod
def GetInstanceID():
'''
:returns: Int32
:rtype: System.Int32
'''
pass
@staticmethod
def GetHashCode():
'''
:returns: Int32
:rtype: System.Int32
'''
pass
@staticmethod
def Equals(arg1):
'''
:param arg1: Object
:type arg1: System.Object
:returns: Boolean
:rtype: System.Boolean
'''
pass
@staticmethod
def get_name():
'''
:returns: String
:rtype: System.String
'''
pass
@staticmethod
def set_name(arg1):
'''
:param arg1: String
:type arg1: System.String or str
'''
pass
@staticmethod
def ToString():
'''
:returns: String
:rtype: System.String
'''
pass
@staticmethod
def GetType():
'''
:returns: Type
:rtype: System.Type
'''
pass
| 20.691441
| 77
| 0.53641
| 762
| 9,187
| 6.437008
| 0.094488
| 0.156575
| 0.127829
| 0.082569
| 0.763303
| 0.762691
| 0.717431
| 0.712742
| 0.602039
| 0.515596
| 0
| 0.024416
| 0.371394
| 9,187
| 443
| 78
| 20.738149
| 0.824935
| 0.384239
| 0
| 0.73494
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.295181
| false
| 0.295181
| 0.024096
| 0
| 0.325301
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
8a5aa53a45f3630def4820dcdef3fb71478123ba
| 28
|
py
|
Python
|
test.py
|
ror-bot/ror-records
|
3ed9b35afe6b49e7415d5609b7bf7106370808c8
|
[
"MIT"
] | null | null | null |
test.py
|
ror-bot/ror-records
|
3ed9b35afe6b49e7415d5609b7bf7106370808c8
|
[
"MIT"
] | null | null | null |
test.py
|
ror-bot/ror-records
|
3ed9b35afe6b49e7415d5609b7bf7106370808c8
|
[
"MIT"
] | null | null | null |
print("hi from the script")
| 14
| 27
| 0.714286
| 5
| 28
| 4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 28
| 1
| 28
| 28
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0.642857
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
8ac814b7464d4c9e36a7f4f27861aefded46b663
| 7,889
|
py
|
Python
|
test/test_processor_qa.py
|
skiran252/FARM
|
8460d78910a20d19a5da12de6e9bff11f68332a7
|
[
"Apache-2.0"
] | 1,551
|
2019-07-17T18:21:08.000Z
|
2022-03-24T18:09:07.000Z
|
test/test_processor_qa.py
|
skiran252/FARM
|
8460d78910a20d19a5da12de6e9bff11f68332a7
|
[
"Apache-2.0"
] | 555
|
2019-07-23T09:00:54.000Z
|
2022-03-31T15:31:06.000Z
|
test/test_processor_qa.py
|
skiran252/FARM
|
8460d78910a20d19a5da12de6e9bff11f68332a7
|
[
"Apache-2.0"
] | 259
|
2019-07-22T08:12:01.000Z
|
2022-03-26T09:41:00.000Z
|
import logging
import json
from farm.data_handler.processor import SquadProcessor
from farm.modeling.tokenization import Tokenizer
# during inference (parameter return_baskets = False) we do not convert labels
def test_dataset_from_dicts_qa_inference(caplog=None):
if caplog:
caplog.set_level(logging.CRITICAL)
models = [
"deepset/roberta-base-squad2",
"deepset/bert-base-cased-squad2",
"deepset/xlm-roberta-large-squad2",
"deepset/minilm-uncased-squad2",
"deepset/electra-base-squad2",
]
sample_types = ["answer-wrong", "answer-offset-wrong", "noanswer", "vanilla"]
for model in models:
tokenizer = Tokenizer.load(pretrained_model_name_or_path=model, use_fast=True)
processor = SquadProcessor(tokenizer, max_seq_len=256, data_dir=None)
for sample_type in sample_types:
dicts = processor.file_to_dicts(f"samples/qa/{sample_type}.json")
dataset, tensor_names, problematic_sample_ids, baskets = processor.dataset_from_dicts(dicts, indices=[1], return_baskets=True)
assert tensor_names == ['input_ids', 'padding_mask', 'segment_ids', 'passage_start_t', 'start_of_word', 'labels', 'id', 'seq_2_start_t', 'span_mask'], f"Processing for {model} has changed."
assert len(problematic_sample_ids) == 0, f"Processing for {model} has changed."
assert baskets[0].id_external == '5ad3d560604f3c001a3ff2c8', f"Processing for {model} has changed."
assert baskets[0].id_internal == '1-0', f"Processing for {model} has changed."
# roberta
if model == "deepset/roberta-base-squad2":
assert len(baskets[0].samples[0].tokenized["passage_tokens"]) == 6, f"Processing for {model} has changed."
assert len(baskets[0].samples[0].tokenized["question_tokens"]) == 7, f"Processing for {model} has changed."
if sample_type == "noanswer":
assert baskets[0].samples[0].features[0]["input_ids"][:13] == \
[0, 6179, 171, 82, 697, 11, 2201, 116, 2, 2, 26795, 2614, 34], \
f"Processing for {model} and {sample_type}-testsample has changed."
else:
assert baskets[0].samples[0].features[0]["input_ids"][:13] == \
[0, 6179, 171, 82, 697, 11, 5459, 116, 2, 2, 26795, 2614, 34], \
f"Processing for {model} and {sample_type}-testsample has changed."
# bert
if model == "deepset/bert-base-cased-squad2":
assert len(baskets[0].samples[0].tokenized["passage_tokens"]) == 5, f"Processing for {model} has changed."
assert len(baskets[0].samples[0].tokenized["question_tokens"]) == 7, f"Processing for {model} has changed."
if sample_type == "noanswer":
assert baskets[0].samples[0].features[0]["input_ids"][:10] == \
[101, 1731, 1242, 1234, 1686, 1107, 2123, 136, 102, 3206], \
f"Processing for {model} and {sample_type}-testsample has changed."
else:
assert baskets[0].samples[0].features[0]["input_ids"][:10] == \
[101, 1731, 1242, 1234, 1686, 1107, 3206, 136, 102, 3206], \
f"Processing for {model} and {sample_type}-testsample has changed."
# xlm-roberta
if model == "deepset/xlm-roberta-large-squad2":
assert len(baskets[0].samples[0].tokenized["passage_tokens"]) == 7, f"Processing for {model} has changed."
assert len(baskets[0].samples[0].tokenized["question_tokens"]) == 7, f"Processing for {model} has changed."
if sample_type == "noanswer":
assert baskets[0].samples[0].features[0]["input_ids"][:12] == \
[0, 11249, 5941, 3395, 6867, 23, 7270, 32, 2, 2, 10271, 1556], \
f"Processing for {model} and {sample_type}-testsample has changed."
else:
assert baskets[0].samples[0].features[0]["input_ids"][:12] == \
[0, 11249, 5941, 3395, 6867, 23, 10271, 32, 2, 2, 10271, 1556], \
f"Processing for {model} and {sample_type}-testsample has changed."
# minilm and electra have same vocab + tokenizer
if model == "deepset/minilm-uncased-squad2" or model == "deepset/electra-base-squad2":
assert len(baskets[0].samples[0].tokenized["passage_tokens"]) == 5, f"Processing for {model} has changed."
assert len(baskets[0].samples[0].tokenized["question_tokens"]) == 7, f"Processing for {model} has changed."
if sample_type == "noanswer":
assert baskets[0].samples[0].features[0]["input_ids"][:10] == \
[101, 2129, 2116, 2111, 2444, 1999, 3000, 1029, 102, 4068], \
f"Processing for {model} and {sample_type}-testsample has changed."
else:
assert baskets[0].samples[0].features[0]["input_ids"][:10] == \
[101, 2129, 2116, 2111, 2444, 1999, 4068, 1029, 102, 4068], \
f"Processing for {model} and {sample_type}-testsample has changed."
def test_dataset_from_dicts_qa_labelconversion(caplog=None):
if caplog:
caplog.set_level(logging.CRITICAL)
models = [
"deepset/roberta-base-squad2",
"deepset/bert-base-cased-squad2",
"deepset/xlm-roberta-large-squad2",
"deepset/minilm-uncased-squad2",
"deepset/electra-base-squad2",
]
sample_types = ["answer-wrong", "answer-offset-wrong", "noanswer", "vanilla"]
for model in models:
tokenizer = Tokenizer.load(pretrained_model_name_or_path=model, use_fast=True)
processor = SquadProcessor(tokenizer, max_seq_len=256, data_dir=None)
for sample_type in sample_types:
dicts = processor.file_to_dicts(f"samples/qa/{sample_type}.json")
dataset, tensor_names, problematic_sample_ids = processor.dataset_from_dicts(dicts, indices=[1], return_baskets=False)
if sample_type == "answer-wrong" or sample_type == "answer-offset-wrong":
assert len(problematic_sample_ids) == 1, f"Processing labels for {model} has changed."
if sample_type == "noanswer":
assert list(dataset.tensors[tensor_names.index("labels")].numpy()[0, 0, :]) == [0, 0], f"Processing labels for {model} has changed."
assert list(dataset.tensors[tensor_names.index("labels")].numpy()[0, 1, :]) == [-1, -1], f"Processing labels for {model} has changed."
if sample_type == "vanilla":
# roberta
if model == "deepset/roberta-base-squad2":
assert list(dataset.tensors[tensor_names.index("labels")].numpy()[0,0,:]) == [13, 13], f"Processing labels for {model} has changed."
assert list(dataset.tensors[tensor_names.index("labels")].numpy()[0,1,:]) == [13, 14], f"Processing labels for {model} has changed."
# bert, minilm, electra
if model == "deepset/bert-base-cased-squad2" or model == "deepset/minilm-uncased-squad2" or model == "deepset/electra-base-squad2":
assert list(dataset.tensors[tensor_names.index("labels")].numpy()[0,0,:]) == [11, 11], f"Processing labels for {model} has changed."
# xlm-roberta
if model == "deepset/xlm-roberta-large-squad2":
assert list(dataset.tensors[tensor_names.index("labels")].numpy()[0,0,:]) == [12, 12], f"Processing labels for {model} has changed."
if(__name__=="__main__"):
test_dataset_from_dicts_qa_labelconversion()
| 61.155039
| 201
| 0.595259
| 949
| 7,889
| 4.818757
| 0.169652
| 0.050733
| 0.061229
| 0.083096
| 0.88607
| 0.875355
| 0.853051
| 0.820031
| 0.791166
| 0.767111
| 0
| 0.076113
| 0.26556
| 7,889
| 129
| 202
| 61.155039
| 0.713152
| 0.024084
| 0
| 0.623762
| 0
| 0
| 0.309843
| 0.111039
| 0
| 0
| 0
| 0
| 0.267327
| 1
| 0.019802
| false
| 0.049505
| 0.039604
| 0
| 0.059406
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
76dbc2a44857973cf0480bc56f8bbef9179f5bc8
| 44
|
py
|
Python
|
django/contrib/messages/__init__.py
|
egenerat/gae-django
|
f12379483cf3917ed3cb46ca5ff0b94daf89fc50
|
[
"MIT"
] | 3
|
2016-07-08T23:49:32.000Z
|
2018-04-15T22:55:01.000Z
|
django/contrib/messages/__init__.py
|
egenerat/gae-django
|
f12379483cf3917ed3cb46ca5ff0b94daf89fc50
|
[
"MIT"
] | 27
|
2017-02-05T15:57:04.000Z
|
2018-04-15T22:57:26.000Z
|
django/contrib/messages/__init__.py
|
egenerat/gae-django
|
f12379483cf3917ed3cb46ca5ff0b94daf89fc50
|
[
"MIT"
] | null | null | null |
from api import *
from constants import *
| 14.666667
| 24
| 0.727273
| 6
| 44
| 5.333333
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.227273
| 44
| 2
| 25
| 22
| 0.941176
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0a0517f8ec739a367f066c688a229f9ac1aa820f
| 19,411
|
py
|
Python
|
istio/datadog_checks/istio/metrics.py
|
grosser/integrations-core
|
4afe8e448fec0e152e0e2a8deb70b1efff7b2128
|
[
"BSD-3-Clause"
] | null | null | null |
istio/datadog_checks/istio/metrics.py
|
grosser/integrations-core
|
4afe8e448fec0e152e0e2a8deb70b1efff7b2128
|
[
"BSD-3-Clause"
] | null | null | null |
istio/datadog_checks/istio/metrics.py
|
grosser/integrations-core
|
4afe8e448fec0e152e0e2a8deb70b1efff7b2128
|
[
"BSD-3-Clause"
] | null | null | null |
# (C) Datadog, Inc. 2020 - Present
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
GENERIC_METRICS = {
'go_gc_duration_seconds': 'go.gc_duration_seconds',
'go_goroutines': 'go.goroutines',
'go_info': 'go.info',
'go_memstats_alloc_bytes': 'go.memstats.alloc_bytes',
'go_memstats_alloc_bytes_total': 'go.memstats.alloc_bytes_total',
'go_memstats_buck_hash_sys_bytes': 'go.memstats.buck_hash_sys_bytes',
'go_memstats_frees_total': 'go.memstats.frees_total',
'go_memstats_gc_cpu_fraction': 'go.memstats.gc_cpu_fraction',
'go_memstats_gc_sys_bytes': 'go.memstats.gc_sys_bytes',
'go_memstats_heap_alloc_bytes': 'go.memstats.heap_alloc_bytes',
'go_memstats_heap_idle_bytes': 'go.memstats.heap_idle_bytes',
'go_memstats_heap_inuse_bytes': 'go.memstats.heap_inuse_bytes',
'go_memstats_heap_objects': 'go.memstats.heap_objects',
'go_memstats_heap_released_bytes': 'go.memstats.heap_released_bytes',
'go_memstats_heap_sys_bytes': 'go.memstats.heap_sys_bytes',
'go_memstats_last_gc_time_seconds': 'go.memstats.last_gc_time_seconds',
'go_memstats_lookups_total': 'go.memstats.lookups_total',
'go_memstats_mallocs_total': 'go.memstats.mallocs_total',
'go_memstats_mcache_inuse_bytes': 'go.memstats.mcache_inuse_bytes',
'go_memstats_mcache_sys_bytes': 'go.memstats.mcache_sys_bytes',
'go_memstats_mspan_inuse_bytes': 'go.memstats.mspan_inuse_bytes',
'go_memstats_mspan_sys_bytes': 'go.memstats.mspan_sys_bytes',
'go_memstats_next_gc_bytes': 'go.memstats.next_gc_bytes',
'go_memstats_other_sys_bytes': 'go.memstats.other_sys_bytes',
'go_memstats_stack_inuse_bytes': 'go.memstats.stack_inuse_bytes',
'go_memstats_stack_sys_bytes': 'go.memstats.stack_sys_bytes',
'go_memstats_sys_bytes': 'go.memstats.sys_bytes',
'go_threads': 'go.threads',
'process_cpu_seconds_total': 'process.cpu_seconds_total',
'process_max_fds': 'process.max_fds',
'process_open_fds': 'process.open_fds',
'process_resident_memory_bytes': 'process.resident_memory_bytes',
'process_start_time_seconds': 'process.start_time_seconds',
'process_virtual_memory_bytes': 'process.virtual_memory_bytes',
}
CITADEL_METRICS = {
'citadel_secret_controller_csr_err_count': 'secret_controller.csr_err_count',
'citadel_secret_controller_secret_deleted_cert_count': ('secret_controller.secret_deleted_cert_count'),
'citadel_secret_controller_svc_acc_created_cert_count': ('secret_controller.svc_acc_created_cert_count'),
'citadel_secret_controller_svc_acc_deleted_cert_count': ('secret_controller.svc_acc_deleted_cert_count'),
'citadel_server_authentication_failure_count': 'server.authentication_failure_count',
'citadel_server_citadel_root_cert_expiry_timestamp': ('server.citadel_root_cert_expiry_timestamp'),
'citadel_server_csr_count': 'server.csr_count',
'citadel_server_csr_parsing_err_count': 'server.csr_parsing_err_count',
'citadel_server_id_extraction_err_count': 'server.id_extraction_err_count',
'citadel_server_success_cert_issuance_count': 'server.success_cert_issuance_count',
'citadel_server_root_cert_expiry_timestamp': 'server.root_cert_expiry_timestamp',
}
GALLEY_METRICS = {
'endpoint_no_pod': 'endpoint_no_pod',
'galley_mcp_source_clients_total': 'mcp_source.clients_total',
'galley_runtime_processor_event_span_duration_milliseconds': ('runtime_processor.event_span_duration_milliseconds'),
'galley_runtime_processor_events_processed_total': 'runtime_processor.events_processed_total',
'galley_runtime_processor_snapshot_events_total': 'runtime_processor.snapshot_events_total',
'galley_runtime_processor_snapshot_lifetime_duration_milliseconds': (
'runtime_processor.snapshot_lifetime_duration_milliseconds'
),
'galley_runtime_processor_snapshots_published_total': ('runtime_processor.snapshots_published_total'),
'galley_runtime_state_type_instances_total': 'runtime_state_type_instances_total',
'galley_runtime_strategy_on_change_total': 'runtime_strategy.on_change_total',
'galley_runtime_strategy_timer_max_time_reached_total': ('runtime_strategy.timer_max_time_reached_total'),
'galley_runtime_strategy_timer_quiesce_reached_total': 'runtime_strategy.quiesce_reached_total',
'galley_runtime_strategy_timer_resets_total': 'runtime_strategy.timer_resets_total',
'galley_source_kube_dynamic_converter_success_total': ('source_kube.dynamic_converter_success_total'),
'galley_source_kube_event_success_total': 'source_kube.event_success_total',
'galley_validation_cert_key_updates': 'validation.cert_key_updates',
'galley_validation_config_load': 'validation.config_load',
'galley_validation_config_updates': 'validation.config_update',
'galley_validation_passed': 'validation.passed',
# These metrics supported Istio 1.5
'galley_validation_config_update_error': 'validation.config_update_error',
}
MESH_METRICS = {
# These metrics support Istio 1.5
'istio_request_duration_milliseconds': 'request.duration.milliseconds',
# These metrics support Istio 1.0
'istio_requests_total': 'request.count',
'istio_request_duration_seconds': 'request.duration',
'istio_request_bytes': 'request.size',
'istio_response_bytes': 'response.size',
# These metrics support Istio 0.8
'istio_request_count': 'request.count',
'istio_request_duration': 'request.duration',
'istio_request_size': 'request.size',
'istio_response_size': 'response.size',
# TCP metrics
'istio_tcp_connections_closed_total': 'tcp.connections_closed.total',
'istio_tcp_connections_opened_total': 'tcp.connections_opened.total',
'istio_tcp_received_bytes_total': 'tcp.received_bytes.total',
'istio_tcp_sent_bytes_total': 'tcp.send_bytes.total',
}
MIXER_METRICS = {
# Pre 1.1 metrics
'grpc_server_handled_total': 'grpc.server.handled_total',
'grpc_server_handling_seconds': 'grpc.server.handling_seconds',
'grpc_server_msg_received_total': 'grpc.server.msg_received_total',
'grpc_server_msg_sent_total': 'grpc.server.msg_sent_total',
'grpc_server_started_total': 'grpc.server.started_total',
'mixer_adapter_dispatch_count': 'adapter.dispatch_count',
'mixer_adapter_dispatch_duration': 'adapter.dispatch_duration',
'mixer_adapter_old_dispatch_count': 'adapter.old_dispatch_count',
'mixer_adapter_old_dispatch_duration': 'adapter.old_dispatch_duration',
'mixer_config_resolve_actions': 'config.resolve_actions',
'mixer_config_resolve_count': 'config.resolve_count',
'mixer_config_resolve_duration': 'config.resolve_duration',
'mixer_config_resolve_rules': 'config.resolve_rules',
# 1.1 metrics
'grpc_io_server_completed_rpcs': 'grpc_io_server.completed_rpcs',
'grpc_io_server_received_bytes_per_rpc': 'grpc_io_server.received_bytes_per_rpc',
'grpc_io_server_sent_bytes_per_rpc': 'grpc_io_server.sent_bytes_per_rpc',
'grpc_io_server_server_latency': 'grpc_io_server.server_latency',
'mixer_config_attributes_total': 'config.attributes_total',
'mixer_config_handler_configs_total': 'config.handler_configs_total',
'mixer_config_instance_configs_total': 'config.instance_configs_total',
'mixer_config_rule_configs_total': 'config.rule_configs_total',
'mixer_dispatcher_destinations_per_request': 'dispatcher.destinations_per_request',
'mixer_dispatcher_instances_per_request': 'dispatcher.instances_per_request',
'mixer_handler_daemons_total': 'handler.daemons_total',
'mixer_handler_new_handlers_total': 'handler.new_handlers_total',
'mixer_mcp_sink_reconnections': 'mcp_sink.reconnections',
'mixer_mcp_sink_request_acks_total': 'mcp_sink.request_acks_total',
'mixer_runtime_dispatches_total': 'runtime.dispatches_total',
'mixer_runtime_dispatch_duration_seconds': 'runtime.dispatch_duration_seconds',
}
PILOT_METRICS = {
'pilot_conflict_inbound_listener': 'conflict.inbound_listener',
'pilot_conflict_outbound_listener_http_over_current_tcp': ('conflict.outbound_listener.http_over_current_tcp'),
'pilot_conflict_outbound_listener_tcp_over_current_http': ('conflict.outbound_listener.tcp_over_current_http'),
'pilot_conflict_outbound_listener_tcp_over_current_tcp': ('conflict.outbound_listener.tcp_over_current_tcp'),
'pilot_destrule_subsets': 'destrule_subsets',
'pilot_duplicate_envoy_clusters': 'duplicate_envoy_clusters',
'pilot_eds_no_instances': 'eds_no_instances',
'pilot_endpoint_not_ready': 'endpoint_not_ready',
'pilot_invalid_out_listeners': 'invalid_out_listeners',
'pilot_mcp_sink_reconnections': 'mcp_sink.reconnections',
'pilot_mcp_sink_recv_failures_total': 'mcp_sink.recv_failures_total',
'pilot_mcp_sink_request_acks_total': 'mcp_sink.request_acks_total',
'pilot_no_ip': 'no_ip',
'pilot_proxy_convergence_time': 'proxy_convergence_time',
'pilot_rds_expired_nonce': 'rds_expired_nonce',
'pilot_services': 'services',
'pilot_total_xds_internal_errors': 'total_xds_internal_errors',
'pilot_total_xds_rejects': 'total_xds_rejects',
'pilot_virt_services': 'virt_services',
'pilot_vservice_dup_domain': 'vservice_dup_domain',
'pilot_xds': 'xds',
'pilot_xds_eds_instances': 'xds.eds_instances',
'pilot_xds_push_context_errors': 'xds.push.context_errors',
'pilot_xds_push_timeout': 'xds.push.timeout',
'pilot_xds_push_timeout_failures': 'xds.push.timeout_failures',
'pilot_xds_pushes': 'xds.pushes',
'pilot_xds_write_timeout': 'xds.write_timeout',
'pilot_xds_rds_reject': 'pilot.xds.rds_reject',
'pilot_xds_eds_reject': 'pilot.xds.eds_reject',
'pilot_xds_cds_reject': 'pilot.xds.cds_reject',
'pilot_xds_lds_reject': 'pilot.xds.lds_reject',
}
ISTIOD_METRICS = {
# Maintain namespace compatibility from legacy components
# Generic metrics
'go_gc_duration_seconds': 'go.gc_duration_seconds',
'go_goroutines': 'go.goroutines',
'go_info': 'go.info',
'go_memstats_alloc_bytes': 'go.memstats.alloc_bytes',
'go_memstats_alloc_bytes_total': 'go.memstats.alloc_bytes_total',
'go_memstats_buck_hash_sys_bytes': 'go.memstats.buck_hash_sys_bytes',
'go_memstats_frees_total': 'go.memstats.frees_total',
'go_memstats_gc_cpu_fraction': 'go.memstats.gc_cpu_fraction',
'go_memstats_gc_sys_bytes': 'go.memstats.gc_sys_bytes',
'go_memstats_heap_alloc_bytes': 'go.memstats.heap_alloc_bytes',
'go_memstats_heap_idle_bytes': 'go.memstats.heap_idle_bytes',
'go_memstats_heap_inuse_bytes': 'go.memstats.heap_inuse_bytes',
'go_memstats_heap_objects': 'go.memstats.heap_objects',
'go_memstats_heap_released_bytes': 'go.memstats.heap_released_bytes',
'go_memstats_heap_sys_bytes': 'go.memstats.heap_sys_bytes',
'go_memstats_last_gc_time_seconds': 'go.memstats.last_gc_time_seconds',
'go_memstats_lookups_total': 'go.memstats.lookups_total',
'go_memstats_mallocs_total': 'go.memstats.mallocs_total',
'go_memstats_mcache_inuse_bytes': 'go.memstats.mcache_inuse_bytes',
'go_memstats_mcache_sys_bytes': 'go.memstats.mcache_sys_bytes',
'go_memstats_mspan_inuse_bytes': 'go.memstats.mspan_inuse_bytes',
'go_memstats_mspan_sys_bytes': 'go.memstats.mspan_sys_bytes',
'go_memstats_next_gc_bytes': 'go.memstats.next_gc_bytes',
'go_memstats_other_sys_bytes': 'go.memstats.other_sys_bytes',
'go_memstats_stack_inuse_bytes': 'go.memstats.stack_inuse_bytes',
'go_memstats_stack_sys_bytes': 'go.memstats.stack_sys_bytes',
'go_memstats_sys_bytes': 'go.memstats.sys_bytes',
'go_threads': 'go.threads',
'process_cpu_seconds_total': 'process.cpu_seconds_total',
'process_max_fds': 'process.max_fds',
'process_open_fds': 'process.open_fds',
'process_resident_memory_bytes': 'process.resident_memory_bytes',
'process_start_time_seconds': 'process.start_time_seconds',
'process_virtual_memory_bytes': 'process.virtual_memory_bytes',
'pilot_conflict_inbound_listener': 'pilot.conflict.inbound_listener',
'pilot_conflict_outbound_listener_http_over_current_tcp': (
'pilot.conflict.outbound_listener.http_over_current_tcp'
),
'pilot_conflict_outbound_listener_tcp_over_current_http': (
'pilot.conflict.outbound_listener.tcp_over_current_http'
),
'pilot_conflict_outbound_listener_tcp_over_current_tcp': ('pilot.conflict.outbound_listener.tcp_over_current_tcp'),
'pilot_destrule_subsets': 'pilot.destrule_subsets',
'pilot_duplicate_envoy_clusters': 'pilot.duplicate_envoy_clusters',
'pilot_eds_no_instances': 'pilot.eds_no_instances',
'pilot_endpoint_not_ready': 'pilot.endpoint_not_ready',
'pilot_invalid_out_listeners': 'pilot.invalid_out_listeners',
'pilot_mcp_sink_reconnections': 'pilot.mcp_sink.reconnections',
'pilot_mcp_sink_recv_failures_total': 'pilot.mcp_sink.recv_failures_total',
'pilot_mcp_sink_request_acks_total': 'pilot.mcp_sink.request_acks_total',
'pilot_no_ip': 'pilot.no_ip',
'pilot_proxy_convergence_time': 'pilot.proxy_convergence_time',
'pilot_rds_expired_nonce': 'pilot.rds_expired_nonce',
'pilot_services': 'pilot.services',
'pilot_total_xds_internal_errors': 'pilot.total_xds_internal_errors',
'pilot_total_xds_rejects': 'pilot.total_xds_rejects',
'pilot_virt_services': 'pilot.virt_services',
'pilot_vservice_dup_domain': 'pilot.vservice_dup_domain',
'pilot_xds': 'pilot.xds',
'pilot_xds_eds_instances': 'pilot.xds.eds_instances',
'pilot_xds_push_context_errors': 'pilot.xds.push.context_errors',
'pilot_xds_push_timeout': 'pilot.xds.push.timeout',
'pilot_xds_push_timeout_failures': 'pilot.xds.push.timeout_failures',
'pilot_xds_pushes': 'pilot.xds.pushes',
'pilot_xds_write_timeout': 'pilot.xds.write_timeout',
'pilot_xds_rds_reject': 'pilot.xds.rds_reject',
'pilot_xds_eds_reject': 'pilot.xds.eds_reject',
'pilot_xds_cds_reject': 'pilot.xds.cds_reject',
'pilot_xds_lds_reject': 'pilot.xds.lds_reject',
'grpc_server_handled_total': 'grpc.server.handled_total',
'grpc_server_handling_seconds': 'grpc.server.handling_seconds',
'grpc_server_msg_received_total': 'grpc.server.msg_received_total',
'grpc_server_msg_sent_total': 'grpc.server.msg_sent_total',
'grpc_server_started_total': 'grpc.server.started_total',
'grpc_io_server_completed_rpcs': 'mixer.grpc_io_server.completed_rpcs',
'grpc_io_server_received_bytes_per_rpc': 'mixer.grpc_io_server.received_bytes_per_rpc',
'grpc_io_server_sent_bytes_per_rpc': 'mixer.grpc_io_server.sent_bytes_per_rpc',
'grpc_io_server_server_latency': 'mixer.grpc_io_server.server_latency',
'mixer_config_attributes_total': 'mixer.config.attributes_total',
'mixer_config_handler_configs_total': 'mixer.config.handler_configs_total',
'mixer_config_instance_configs_total': 'mixer.config.instance_configs_total',
'mixer_config_rule_configs_total': 'mixer.config.rule_configs_total',
'mixer_dispatcher_destinations_per_request': 'mixer.dispatcher.destinations_per_request',
'mixer_dispatcher_instances_per_request': 'mixer.dispatcher.instances_per_request',
'mixer_handler_daemons_total': 'mixer.handler.daemons_total',
'mixer_handler_new_handlers_total': 'mixer.handler.new_handlers_total',
'mixer_mcp_sink_reconnections': 'mixer.mcp_sink.reconnections',
'mixer_mcp_sink_request_acks_total': 'mixer.mcp_sink.request_acks_total',
'mixer_runtime_dispatches_total': 'mixer.runtime.dispatches_total',
'mixer_runtime_dispatch_duration_seconds': 'mixer.runtime.dispatch_duration_seconds',
'endpoint_no_pod': 'galley.endpoint_no_pod',
'galley_mcp_source_clients_total': 'galley.mcp_source.clients_total',
'galley_runtime_processor_event_span_duration_milliseconds': (
'galley.runtime_processor.event_span_duration_milliseconds'
),
'galley_runtime_processor_events_processed_total': 'galley.runtime_processor.events_processed_total',
'galley_runtime_processor_snapshot_events_total': 'galley.runtime_processor.snapshot_events_total',
'galley_runtime_processor_snapshot_lifetime_duration_milliseconds': (
'galley.runtime_processor.snapshot_lifetime_duration_milliseconds'
),
'galley_runtime_processor_snapshots_published_total': ('galley.runtime_processor.snapshots_published_total'),
'galley_runtime_state_type_instances_total': 'galley.runtime_state_type_instances_total',
'galley_runtime_strategy_on_change_total': 'galley.runtime_strategy.on_change_total',
'galley_runtime_strategy_timer_max_time_reached_total': ('galley.runtime_strategy.timer_max_time_reached_total'),
'galley_runtime_strategy_timer_quiesce_reached_total': 'galley.runtime_strategy.quiesce_reached_total',
'galley_runtime_strategy_timer_resets_total': 'galley.runtime_strategy.timer_resets_total',
'galley_source_kube_dynamic_converter_success_total': ('galley.source_kube.dynamic_converter_success_total'),
'galley_source_kube_event_success_total': 'galley.source_kube.event_success_total',
'galley_validation_config_load': 'galley.validation.config_load',
'galley_validation_config_updates': 'galley.validation.config_update',
'citadel_secret_controller_csr_err_count': 'citadel.secret_controller.csr_err_count',
'citadel_secret_controller_secret_deleted_cert_count': ('citadel.secret_controller.secret_deleted_cert_count'),
'citadel_secret_controller_svc_acc_created_cert_count': ('citadel.secret_controller.svc_acc_created_cert_count'),
'citadel_secret_controller_svc_acc_deleted_cert_count': ('citadel.secret_controller.svc_acc_deleted_cert_count'),
'citadel_server_authentication_failure_count': 'citadel.server.authentication_failure_count',
'citadel_server_citadel_root_cert_expiry_timestamp': ('citadel.server.citadel_root_cert_expiry_timestamp'),
'citadel_server_csr_count': 'citadel.server.csr_count',
'citadel_server_csr_parsing_err_count': 'citadel.server.csr_parsing_err_count',
'citadel_server_id_extraction_err_count': 'citadel.server.id_extraction_err_count',
'citadel_server_success_cert_issuance_count': 'citadel.server.success_cert_issuance_count',
# These metrics supported Istio 1.5
'galley_validation_config_update_error': 'galley.validation.config_update_error',
'citadel_server_root_cert_expiry_timestamp': 'citadel.server.root_cert_expiry_timestamp',
'galley_validation_passed': 'galley.validation.passed',
'galley_validation_failed': 'galley.validation.failed',
'pilot_conflict_outbound_listener_http_over_https': 'pilot.conflict.outbound_listener.http_over_https',
'pilot_inbound_updates': 'pilot.inbound_updates',
'pilot_k8s_cfg_events': 'pilot.k8s.cfg_events',
'pilot_k8s_reg_events': 'pilot.k8s.reg_events',
'pilot_proxy_queue_time': 'pilot.proxy_queue_time',
'pilot_push_triggers': 'pilot.push.triggers',
'pilot_xds_eds_all_locality_endpoints': 'pilot.xds.eds_all_locality_endpoints',
'pilot_xds_push_time': 'pilot.xds.push.time',
'process_virtual_memory_max_bytes': 'process.virtual_memory_max_bytes',
'sidecar_injection_requests_total': 'sidecar_injection.requests_total',
'sidecar_injection_success_total': 'sidecar_injection.success_total',
'sidecar_injection_failure_total': 'sidecar_injection.failure_total',
'sidecar_injection_skip_total': 'sidecar_injection.skip_total',
}
| 61.233438
| 120
| 0.798774
| 2,511
| 19,411
| 5.57268
| 0.092393
| 0.068606
| 0.07075
| 0.038591
| 0.893375
| 0.853212
| 0.798542
| 0.733938
| 0.646609
| 0.57343
| 0
| 0.00125
| 0.093555
| 19,411
| 316
| 121
| 61.427215
| 0.794032
| 0.019577
| 0
| 0.32069
| 0
| 0
| 0.81485
| 0.737025
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.006897
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0a0e09eca6f88004a43e7b6443d11f7c60dd37b9
| 159
|
py
|
Python
|
src/bindings/python/__init__.py
|
Georepublic/valhalla
|
079c11978093608e730b22a52c2363d39eefdc15
|
[
"MIT"
] | 1
|
2022-02-19T05:31:55.000Z
|
2022-02-19T05:31:55.000Z
|
src/bindings/python/__init__.py
|
Georepublic/valhalla
|
079c11978093608e730b22a52c2363d39eefdc15
|
[
"MIT"
] | null | null | null |
src/bindings/python/__init__.py
|
Georepublic/valhalla
|
079c11978093608e730b22a52c2363d39eefdc15
|
[
"MIT"
] | null | null | null |
try:
from .python_valhalla import *
except ModuleNotFoundError:
from python_valhalla import *
from .actor import Actor
from .config import get_config
| 19.875
| 34
| 0.779874
| 20
| 159
| 6.05
| 0.5
| 0.165289
| 0.297521
| 0.396694
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.176101
| 159
| 7
| 35
| 22.714286
| 0.923664
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
0a29088fbf5324b45efa5699af22995f9ebcd2e8
| 45
|
py
|
Python
|
sharpy/__init__.py
|
MadManSC2/sharpy-sc2
|
13950357df2db58033daab24f076e3ae83f0b2a8
|
[
"MIT"
] | 1
|
2020-03-05T19:21:56.000Z
|
2020-03-05T19:21:56.000Z
|
sharpy/__init__.py
|
MadManSC2/sharpy-sc2
|
13950357df2db58033daab24f076e3ae83f0b2a8
|
[
"MIT"
] | null | null | null |
sharpy/__init__.py
|
MadManSC2/sharpy-sc2
|
13950357df2db58033daab24f076e3ae83f0b2a8
|
[
"MIT"
] | null | null | null |
# re-export
from .constants import Constants
| 22.5
| 32
| 0.8
| 6
| 45
| 6
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.133333
| 45
| 2
| 32
| 22.5
| 0.923077
| 0.2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0a3d0752a0ca7e9afbc6e991f12468ea88b23894
| 216
|
py
|
Python
|
Server/test/apply/extension/test_extension_map_get.py
|
miraedbswo/DMS-Backend-V3-Student
|
37269d1d24b2c57b5edb96b69c74eecd3995fd2f
|
[
"MIT"
] | 10
|
2019-08-19T09:46:09.000Z
|
2021-04-29T10:47:54.000Z
|
Server/test/apply/extension/test_extension_map_get.py
|
DSM-DMS/DMS-Backend-V3-Student
|
3eedd071fab46ae29e9cc3d7b05ef9e5cfd446b6
|
[
"MIT"
] | 6
|
2018-10-10T23:37:20.000Z
|
2018-12-27T04:57:32.000Z
|
Server/test/apply/extension/test_extension_map_get.py
|
DSM-DMS/DMS-Backend-V3-Student
|
3eedd071fab46ae29e9cc3d7b05ef9e5cfd446b6
|
[
"MIT"
] | 3
|
2021-02-27T05:41:11.000Z
|
2021-06-28T03:10:31.000Z
|
from flask import Response
from app.model.apply import ExtensionApplyModel
from test import TCBase, check_status_code
from test.request import ApplyRequest
class TestGetExtensionMap(TCBase, ApplyRequest):
pass
| 24
| 48
| 0.837963
| 27
| 216
| 6.62963
| 0.666667
| 0.089385
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 216
| 9
| 49
| 24
| 0.94709
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.166667
| 0.666667
| 0
| 0.833333
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
6a7e8c4cb7dc46112f2a9c9d7d76bc7361af2318
| 3,137
|
py
|
Python
|
tests/test_engine/test_update/test_update_currentDate.py
|
bobuk/montydb
|
9ee299e7f1d3a7236abb683e0dfe4f7817859b2c
|
[
"BSD-3-Clause"
] | 478
|
2019-07-31T00:48:11.000Z
|
2022-03-18T09:12:29.000Z
|
tests/test_engine/test_update/test_update_currentDate.py
|
bobuk/montydb
|
9ee299e7f1d3a7236abb683e0dfe4f7817859b2c
|
[
"BSD-3-Clause"
] | 47
|
2019-07-28T10:12:22.000Z
|
2022-01-04T16:25:12.000Z
|
tests/test_engine/test_update/test_update_currentDate.py
|
bobuk/montydb
|
9ee299e7f1d3a7236abb683e0dfe4f7817859b2c
|
[
"BSD-3-Clause"
] | 26
|
2019-08-09T14:28:29.000Z
|
2022-02-22T02:49:51.000Z
|
import pytest
from pymongo.errors import WriteError as mongo_write_err
from montydb.errors import WriteError as monty_write_err
from ...conftest import skip_if_no_bson
def test_update_currentDate_1(monty_update, mongo_update):
docs = [
{"a": None}
]
spec = {"$currentDate": {"a": True}}
monty_c = monty_update(docs, spec)
mongo_c = mongo_update(docs, spec)
mg_date = next(mongo_c)["a"]
mt_date = next(monty_c)["a"]
assert mg_date.date() == mt_date.date()
assert mg_date.hour == mt_date.hour
assert mg_date.minute == mt_date.minute
def test_update_currentDate_2(monty_update, mongo_update):
docs = [
{"a": None}
]
spec = {"$currentDate": {"a": False}} # still set date
monty_c = monty_update(docs, spec)
mongo_c = mongo_update(docs, spec)
mg_date = next(mongo_c)["a"]
mt_date = next(monty_c)["a"]
assert mg_date.date() == mt_date.date()
assert mg_date.hour == mt_date.hour
assert mg_date.minute == mt_date.minute
def test_update_currentDate_3(monty_update, mongo_update):
docs = [
{"a": None}
]
spec = {"$currentDate": {"a": {"$type": "date"}}}
monty_c = monty_update(docs, spec)
mongo_c = mongo_update(docs, spec)
mg_date = next(mongo_c)["a"]
mt_date = next(monty_c)["a"]
assert mg_date.date() == mt_date.date()
assert mg_date.hour == mt_date.hour
assert mg_date.minute == mt_date.minute
@skip_if_no_bson
def test_update_currentDate_4(monty_update, mongo_update):
docs = [
{"a": None}
]
spec = {"$currentDate": {"a": {"$type": "timestamp"}}}
monty_c = monty_update(docs, spec)
mongo_c = mongo_update(docs, spec)
mg_tstamp = next(mongo_c)["a"]
mt_tstamp = next(monty_c)["a"]
assert mg_tstamp.time - mt_tstamp.time < 10
assert mg_tstamp.inc == mt_tstamp.inc
def test_update_currentDate_5(monty_update, mongo_update):
docs = [
{"a": None}
]
spec = {"$currentDate": {"a": 1}}
with pytest.raises(mongo_write_err) as mongo_err:
mongo_update(docs, spec)
with pytest.raises(monty_write_err) as monty_err:
monty_update(docs, spec)
# ignore comparing error code
# assert mongo_err.value.code == monty_err.value.code
def test_update_currentDate_6(monty_update, mongo_update):
docs = [
{"a": None}
]
spec = {"$currentDate": {"a": {"not_op": True}}}
with pytest.raises(mongo_write_err) as mongo_err:
mongo_update(docs, spec)
with pytest.raises(monty_write_err) as monty_err:
monty_update(docs, spec)
# ignore comparing error code
# assert mongo_err.value.code == monty_err.value.code
def test_update_currentDate_7(monty_update, mongo_update):
docs = [
{"a": None}
]
spec = {"$currentDate": {"a": {"$type": "not date nor timestamp"}}}
with pytest.raises(mongo_write_err) as mongo_err:
mongo_update(docs, spec)
with pytest.raises(monty_write_err) as monty_err:
monty_update(docs, spec)
# ignore comparing error code
# assert mongo_err.value.code == monty_err.value.code
| 25.504065
| 71
| 0.651897
| 445
| 3,137
| 4.303371
| 0.134831
| 0.109661
| 0.109661
| 0.087728
| 0.855875
| 0.849086
| 0.839164
| 0.839164
| 0.801567
| 0.801567
| 0
| 0.004067
| 0.21613
| 3,137
| 122
| 72
| 25.713115
| 0.774705
| 0.080969
| 0
| 0.620253
| 0
| 0
| 0.056367
| 0
| 0
| 0
| 0
| 0
| 0.139241
| 1
| 0.088608
| false
| 0
| 0.050633
| 0
| 0.139241
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
6aa6908338b46c742f8e827b7137e230aeec8285
| 14,116
|
py
|
Python
|
dandi/tests/test_delete.py
|
TheChymera/dandi-cli
|
9a42b1fa2f9af3be01254f7457f5a21d834c1864
|
[
"Apache-2.0"
] | null | null | null |
dandi/tests/test_delete.py
|
TheChymera/dandi-cli
|
9a42b1fa2f9af3be01254f7457f5a21d834c1864
|
[
"Apache-2.0"
] | null | null | null |
dandi/tests/test_delete.py
|
TheChymera/dandi-cli
|
9a42b1fa2f9af3be01254f7457f5a21d834c1864
|
[
"Apache-2.0"
] | null | null | null |
from pathlib import Path
from typing import List
import pytest
from pytest_mock import MockerFixture
from .fixtures import DandiAPI, SampleDandiset
from ..consts import DRAFT, dandiset_metadata_file
from ..dandiapi import RESTFullAPIClient
from ..delete import delete
from ..download import download
from ..exceptions import NotFoundError
from ..utils import list_paths
@pytest.mark.parametrize(
"paths,remainder",
[
(
["subdir2/coconut.txt"],
[
Path("file.txt"),
Path("subdir1", "apple.txt"),
Path("subdir2", "banana.txt"),
],
),
(["subdir2"], [Path("file.txt"), Path("subdir1", "apple.txt")]),
(
["subdir2", "subdir2/coconut.txt"],
[Path("file.txt"), Path("subdir1", "apple.txt")],
),
(
["dandi://{instance}/{dandiset_id}/subdir2/coconut.txt"],
[
Path("file.txt"),
Path("subdir1", "apple.txt"),
Path("subdir2", "banana.txt"),
],
),
(
["dandi://{instance}/{dandiset_id}/subdir2/"],
[Path("file.txt"), Path("subdir1", "apple.txt")],
),
(
[
"dandi://{instance}/{dandiset_id}/subdir2/",
"dandi://{instance}/{dandiset_id}/subdir2/coconut.txt",
],
[Path("file.txt"), Path("subdir1", "apple.txt")],
),
(
[
"subdir1",
"dandi://{instance}/{dandiset_id}/subdir2/coconut.txt",
],
[Path("file.txt"), Path("subdir2", "banana.txt")],
),
],
)
def test_delete_paths(
mocker: MockerFixture,
monkeypatch: pytest.MonkeyPatch,
text_dandiset: SampleDandiset,
tmp_path: Path,
paths: List[str],
remainder: List[Path],
) -> None:
monkeypatch.chdir(text_dandiset.dspath)
monkeypatch.setenv("DANDI_API_KEY", text_dandiset.api.api_key)
instance = text_dandiset.api.instance_id
dandiset_id = text_dandiset.dandiset_id
delete_spy = mocker.spy(RESTFullAPIClient, "delete")
delete(
[p.format(instance=instance, dandiset_id=dandiset_id) for p in paths],
dandi_instance=instance,
devel_debug=True,
force=True,
)
delete_spy.assert_called()
download(text_dandiset.dandiset.version_api_url, tmp_path)
assert list_paths(tmp_path) == [
tmp_path / dandiset_id / f for f in [Path("dandiset.yaml")] + remainder
]
@pytest.mark.parametrize("confirm", [True, False])
def test_delete_path_confirm(
confirm: bool,
mocker: MockerFixture,
monkeypatch: pytest.MonkeyPatch,
text_dandiset: SampleDandiset,
) -> None:
monkeypatch.chdir(text_dandiset.dspath)
monkeypatch.setenv("DANDI_API_KEY", text_dandiset.api.api_key)
instance = text_dandiset.api.instance_id
dandiset_id = text_dandiset.dandiset_id
delete_spy = mocker.spy(RESTFullAPIClient, "delete")
confirm_mock = mocker.patch("click.confirm", return_value=confirm)
delete(["subdir2/coconut.txt"], dandi_instance=instance, devel_debug=True)
confirm_mock.assert_called_with(
f"Delete 1 assets on server from Dandiset {dandiset_id}?"
)
if confirm:
delete_spy.assert_called()
else:
delete_spy.assert_not_called()
def test_delete_path_pyout(
mocker: MockerFixture,
monkeypatch: pytest.MonkeyPatch,
text_dandiset: SampleDandiset,
) -> None:
monkeypatch.chdir(text_dandiset.dspath)
monkeypatch.setenv("DANDI_API_KEY", text_dandiset.api.api_key)
instance = text_dandiset.api.instance_id
delete_spy = mocker.spy(RESTFullAPIClient, "delete")
delete(["subdir2/coconut.txt"], dandi_instance=instance, force=True)
delete_spy.assert_called()
@pytest.mark.parametrize(
"paths",
[
["dandi://{instance}/{dandiset_id}"],
["dandi://{instance}/{dandiset_id}", "file.txt"],
["file.txt", "dandi://{instance}/{dandiset_id}"],
[
"dandi://{instance}/{dandiset_id}",
"dandi://{instance}/{dandiset_id}/subdir2/coconut.txt",
],
[
"dandi://{instance}/{dandiset_id}/subdir2/coconut.txt",
"dandi://{instance}/{dandiset_id}",
],
],
)
def test_delete_dandiset(
mocker: MockerFixture,
monkeypatch: pytest.MonkeyPatch,
text_dandiset: SampleDandiset,
paths: List[str],
) -> None:
monkeypatch.chdir(text_dandiset.dspath)
monkeypatch.setenv("DANDI_API_KEY", text_dandiset.api.api_key)
instance = text_dandiset.api.instance_id
dandiset_id = text_dandiset.dandiset_id
delete_spy = mocker.spy(RESTFullAPIClient, "delete")
delete(
[p.format(instance=instance, dandiset_id=dandiset_id) for p in paths],
dandi_instance=instance,
devel_debug=True,
force=True,
)
delete_spy.assert_called()
with pytest.raises(NotFoundError):
text_dandiset.client.get_dandiset(dandiset_id, DRAFT, lazy=False)
@pytest.mark.parametrize("confirm", [True, False])
def test_delete_dandiset_confirm(
confirm: bool,
mocker: MockerFixture,
monkeypatch: pytest.MonkeyPatch,
text_dandiset: SampleDandiset,
) -> None:
monkeypatch.chdir(text_dandiset.dspath)
monkeypatch.setenv("DANDI_API_KEY", text_dandiset.api.api_key)
instance = text_dandiset.api.instance_id
dandiset_id = text_dandiset.dandiset_id
delete_spy = mocker.spy(RESTFullAPIClient, "delete")
confirm_mock = mocker.patch("click.confirm", return_value=confirm)
delete(
[f"dandi://{instance}/{dandiset_id}"], dandi_instance=instance, devel_debug=True
)
confirm_mock.assert_called_with(f"Delete Dandiset {dandiset_id}?")
if confirm:
delete_spy.assert_called()
else:
delete_spy.assert_not_called()
def test_delete_dandiset_mismatch(
mocker: MockerFixture,
monkeypatch: pytest.MonkeyPatch,
text_dandiset: SampleDandiset,
) -> None:
monkeypatch.chdir(text_dandiset.dspath)
monkeypatch.setenv("DANDI_API_KEY", text_dandiset.api.api_key)
instance = text_dandiset.api.instance_id
dandiset_id = text_dandiset.dandiset_id
not_dandiset = str(int(dandiset_id) - 1).zfill(6)
delete_spy = mocker.spy(RESTFullAPIClient, "delete")
for paths in [
[
"subdir1/apple.txt",
f"dandi://{instance}/{not_dandiset}/subdir2/coconut.txt",
],
[
f"dandi://{instance}/{dandiset_id}/subdir1/apple.txt",
f"dandi://{instance}/{not_dandiset}/subdir2/coconut.txt",
],
]:
with pytest.raises(ValueError) as excinfo:
delete(paths, dandi_instance=instance, devel_debug=True, force=True)
assert (
str(excinfo.value) == "Cannot delete assets from multiple Dandisets at once"
)
delete_spy.assert_not_called()
def test_delete_instance_mismatch(
mocker: MockerFixture,
monkeypatch: pytest.MonkeyPatch,
text_dandiset: SampleDandiset,
) -> None:
monkeypatch.chdir(text_dandiset.dspath)
monkeypatch.setenv("DANDI_API_KEY", text_dandiset.api.api_key)
instance = text_dandiset.api.instance_id
dandiset_id = text_dandiset.dandiset_id
delete_spy = mocker.spy(RESTFullAPIClient, "delete")
for paths in [
[
"subdir1/apple.txt",
f"dandi://dandi/{dandiset_id}/subdir2/coconut.txt",
],
[
f"dandi://{instance}/{dandiset_id}/subdir2/coconut.txt",
f"dandi://dandi/{dandiset_id}/subdir1/apple.txt",
],
]:
with pytest.raises(ValueError) as excinfo:
delete(paths, dandi_instance=instance, devel_debug=True, force=True)
assert (
str(excinfo.value)
== "Cannot delete assets from multiple API instances at once"
)
delete_spy.assert_not_called()
def test_delete_nonexistent_dandiset(
local_dandi_api: DandiAPI, mocker: MockerFixture, monkeypatch: pytest.MonkeyPatch
) -> None:
monkeypatch.setenv("DANDI_API_KEY", local_dandi_api.api_key)
instance = local_dandi_api.instance_id
delete_spy = mocker.spy(RESTFullAPIClient, "delete")
with pytest.raises(NotFoundError) as excinfo:
delete(
[f"dandi://{instance}/999999/subdir1/apple.txt"],
dandi_instance=instance,
devel_debug=True,
force=True,
)
assert str(excinfo.value) == "No such Dandiset: '999999'"
delete_spy.assert_not_called()
def test_delete_nonexistent_dandiset_skip_missing(
local_dandi_api: DandiAPI, mocker: MockerFixture, monkeypatch: pytest.MonkeyPatch
) -> None:
monkeypatch.setenv("DANDI_API_KEY", local_dandi_api.api_key)
instance = local_dandi_api.instance_id
delete_spy = mocker.spy(RESTFullAPIClient, "delete")
delete(
[f"dandi://{instance}/999999/subdir1/apple.txt"],
dandi_instance=instance,
devel_debug=True,
force=True,
skip_missing=True,
)
delete_spy.assert_not_called()
def test_delete_nonexistent_asset(
mocker: MockerFixture,
monkeypatch: pytest.MonkeyPatch,
text_dandiset: SampleDandiset,
) -> None:
monkeypatch.setenv("DANDI_API_KEY", text_dandiset.api.api_key)
instance = text_dandiset.api.instance_id
dandiset_id = text_dandiset.dandiset_id
delete_spy = mocker.spy(RESTFullAPIClient, "delete")
with pytest.raises(NotFoundError) as excinfo:
delete(
[
f"dandi://{instance}/{dandiset_id}/file.txt",
f"dandi://{instance}/{dandiset_id}/subdir3/mango.txt",
],
dandi_instance=instance,
devel_debug=True,
force=True,
)
assert (
str(excinfo.value)
== f"No assets found for dandi://{instance}/{dandiset_id}/subdir3/mango.txt"
)
delete_spy.assert_not_called()
def test_delete_nonexistent_asset_skip_missing(
mocker: MockerFixture,
monkeypatch: pytest.MonkeyPatch,
text_dandiset: SampleDandiset,
tmp_path: Path,
) -> None:
monkeypatch.setenv("DANDI_API_KEY", text_dandiset.api.api_key)
instance = text_dandiset.api.instance_id
dandiset_id = text_dandiset.dandiset_id
delete_spy = mocker.spy(RESTFullAPIClient, "delete")
delete(
[
f"dandi://{instance}/{dandiset_id}/file.txt",
f"dandi://{instance}/{dandiset_id}/subdir3/mango.txt",
],
dandi_instance=instance,
devel_debug=True,
force=True,
skip_missing=True,
)
delete_spy.assert_called()
download(text_dandiset.dandiset.version_api_url, tmp_path)
assert list_paths(tmp_path) == [
tmp_path / dandiset_id / "dandiset.yaml",
tmp_path / dandiset_id / "subdir1" / "apple.txt",
tmp_path / dandiset_id / "subdir2" / "banana.txt",
tmp_path / dandiset_id / "subdir2" / "coconut.txt",
]
def test_delete_nonexistent_asset_folder(
mocker: MockerFixture,
monkeypatch: pytest.MonkeyPatch,
text_dandiset: SampleDandiset,
) -> None:
monkeypatch.setenv("DANDI_API_KEY", text_dandiset.api.api_key)
instance = text_dandiset.api.instance_id
dandiset_id = text_dandiset.dandiset_id
delete_spy = mocker.spy(RESTFullAPIClient, "delete")
with pytest.raises(NotFoundError) as excinfo:
delete(
[
f"dandi://{instance}/{dandiset_id}/subdir1/",
f"dandi://{instance}/{dandiset_id}/subdir3/",
],
dandi_instance=instance,
devel_debug=True,
force=True,
)
assert (
str(excinfo.value)
== f"No assets found for dandi://{instance}/{dandiset_id}/subdir3/"
)
delete_spy.assert_not_called()
def test_delete_nonexistent_asset_folder_skip_missing(
mocker: MockerFixture,
monkeypatch: pytest.MonkeyPatch,
text_dandiset: SampleDandiset,
tmp_path: Path,
) -> None:
monkeypatch.setenv("DANDI_API_KEY", text_dandiset.api.api_key)
instance = text_dandiset.api.instance_id
dandiset_id = text_dandiset.dandiset_id
delete_spy = mocker.spy(RESTFullAPIClient, "delete")
delete(
[
f"dandi://{instance}/{dandiset_id}/subdir1/",
f"dandi://{instance}/{dandiset_id}/subdir3/",
],
dandi_instance=instance,
devel_debug=True,
force=True,
skip_missing=True,
)
delete_spy.assert_called()
download(text_dandiset.dandiset.version_api_url, tmp_path)
assert list_paths(tmp_path) == [
tmp_path / dandiset_id / "dandiset.yaml",
tmp_path / dandiset_id / "file.txt",
tmp_path / dandiset_id / "subdir2" / "banana.txt",
tmp_path / dandiset_id / "subdir2" / "coconut.txt",
]
def test_delete_version(
local_dandi_api: DandiAPI, mocker: MockerFixture, monkeypatch: pytest.MonkeyPatch
) -> None:
monkeypatch.setenv("DANDI_API_KEY", local_dandi_api.api_key)
instance = local_dandi_api.instance_id
delete_spy = mocker.spy(RESTFullAPIClient, "delete")
with pytest.raises(NotImplementedError) as excinfo:
delete(
[f"dandi://{instance}/999999@draft"],
dandi_instance=instance,
devel_debug=True,
force=True,
)
assert str(excinfo.value) == (
"Dandi API server does not support deletion of individual versions of a"
" dandiset"
)
delete_spy.assert_not_called()
def test_delete_no_dandiset(
mocker: MockerFixture, monkeypatch: pytest.MonkeyPatch, tmp_path: Path
) -> None:
monkeypatch.chdir(tmp_path)
delete_spy = mocker.spy(RESTFullAPIClient, "delete")
with pytest.raises(RuntimeError) as excinfo:
delete(
["dir/file.txt"],
dandi_instance="dandi",
devel_debug=True,
force=True,
)
assert str(excinfo.value) == (
f"Found no {dandiset_metadata_file} anywhere. "
"Use 'dandi download' or 'organize' first"
)
delete_spy.assert_not_called()
| 33.371158
| 88
| 0.646642
| 1,563
| 14,116
| 5.595649
| 0.079335
| 0.073176
| 0.055568
| 0.065744
| 0.891265
| 0.880631
| 0.858335
| 0.843357
| 0.807798
| 0.789847
| 0
| 0.00674
| 0.232715
| 14,116
| 422
| 89
| 33.450237
| 0.800757
| 0
| 0
| 0.7
| 0
| 0
| 0.187164
| 0.100028
| 0
| 0
| 0
| 0
| 0.074359
| 1
| 0.038462
| false
| 0
| 0.028205
| 0
| 0.066667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
6ab7543ed5b346a22795cdc057bf7a3272dc4fa1
| 58,222
|
py
|
Python
|
ironic/tests/unit/drivers/modules/test_pxe.py
|
isabella232/ironic
|
9a0bd8a774143e6f767aaa3031be6b70554bc332
|
[
"Apache-2.0"
] | 2
|
2019-06-17T21:37:53.000Z
|
2020-07-11T03:58:39.000Z
|
ironic/tests/unit/drivers/modules/test_pxe.py
|
openshift/ironic
|
9a0bd8a774143e6f767aaa3031be6b70554bc332
|
[
"Apache-2.0"
] | 1
|
2019-06-16T22:53:49.000Z
|
2019-09-16T09:37:35.000Z
|
ironic/tests/unit/drivers/modules/test_pxe.py
|
isabella232/ironic
|
9a0bd8a774143e6f767aaa3031be6b70554bc332
|
[
"Apache-2.0"
] | 6
|
2019-06-13T12:49:33.000Z
|
2021-04-17T16:33:19.000Z
|
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test class for PXE driver."""
import os
import tempfile
import mock
from oslo_config import cfg
from oslo_serialization import jsonutils as json
from oslo_utils import uuidutils
from ironic.common import boot_devices
from ironic.common import boot_modes
from ironic.common import dhcp_factory
from ironic.common import exception
from ironic.common.glance_service import base_image_service
from ironic.common import pxe_utils
from ironic.common import states
from ironic.common import utils as common_utils
from ironic.conductor import task_manager
from ironic.conductor import utils as manager_utils
from ironic.drivers import base as drivers_base
from ironic.drivers.modules import agent_base_vendor
from ironic.drivers.modules import deploy_utils
from ironic.drivers.modules import fake
from ironic.drivers.modules import ipxe
from ironic.drivers.modules import pxe
from ironic.drivers.modules.storage import noop as noop_storage
from ironic.tests.unit.db import base as db_base
from ironic.tests.unit.db import utils as db_utils
from ironic.tests.unit.objects import utils as obj_utils
CONF = cfg.CONF
INST_INFO_DICT = db_utils.get_test_pxe_instance_info()
DRV_INFO_DICT = db_utils.get_test_pxe_driver_info()
DRV_INTERNAL_INFO_DICT = db_utils.get_test_pxe_driver_internal_info()
# NOTE(TheJulia): Mark pxe interface loading as None in order
# to prent false counts for individual method tests.
@mock.patch.object(ipxe.iPXEBoot, '__init__', lambda self: None)
@mock.patch.object(pxe.PXEBoot, '__init__', lambda self: None)
class PXEBootTestCase(db_base.DbTestCase):
driver = 'fake-hardware'
boot_interface = 'pxe'
driver_info = DRV_INFO_DICT
driver_internal_info = DRV_INTERNAL_INFO_DICT
def setUp(self):
super(PXEBootTestCase, self).setUp()
self.context.auth_token = 'fake'
self.config_temp_dir('tftp_root', group='pxe')
self.config_temp_dir('images_path', group='pxe')
self.config_temp_dir('http_root', group='deploy')
instance_info = INST_INFO_DICT
instance_info['deploy_key'] = 'fake-56789'
self.config(enabled_boot_interfaces=[self.boot_interface,
'ipxe', 'fake'])
self.node = obj_utils.create_test_node(
self.context,
driver=self.driver,
boot_interface=self.boot_interface,
# Avoid fake properties in get_properties() output
vendor_interface='no-vendor',
instance_info=instance_info,
driver_info=self.driver_info,
driver_internal_info=self.driver_internal_info)
self.port = obj_utils.create_test_port(self.context,
node_id=self.node.id)
self.config(group='conductor', api_url='http://127.0.0.1:1234/')
def test_get_properties(self):
expected = pxe.COMMON_PROPERTIES
expected.update(agent_base_vendor.VENDOR_PROPERTIES)
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertEqual(expected, task.driver.get_properties())
@mock.patch.object(base_image_service.BaseImageService, '_show',
autospec=True)
def test_validate_good(self, mock_glance):
mock_glance.return_value = {'properties': {'kernel_id': 'fake-kernel',
'ramdisk_id': 'fake-initr'}}
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
task.driver.boot.validate(task)
@mock.patch.object(base_image_service.BaseImageService, '_show',
autospec=True)
def test_validate_good_whole_disk_image(self, mock_glance):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
task.node.driver_internal_info['is_whole_disk_image'] = True
task.driver.boot.validate(task)
@mock.patch.object(base_image_service.BaseImageService, '_show',
autospec=True)
@mock.patch.object(noop_storage.NoopStorage, 'should_write_image',
autospec=True)
def test_validate_skip_check_write_image_false(self, mock_write,
mock_glance):
mock_write.return_value = False
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
task.driver.boot.validate(task)
self.assertFalse(mock_glance.called)
def test_validate_fail_missing_deploy_kernel(self):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
del task.node.driver_info['deploy_kernel']
self.assertRaises(exception.MissingParameterValue,
task.driver.boot.validate, task)
def test_validate_fail_missing_deploy_ramdisk(self):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
del task.node.driver_info['deploy_ramdisk']
self.assertRaises(exception.MissingParameterValue,
task.driver.boot.validate, task)
def test_validate_fail_missing_image_source(self):
info = dict(INST_INFO_DICT)
del info['image_source']
self.node.instance_info = json.dumps(info)
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
task.node['instance_info'] = json.dumps(info)
self.assertRaises(exception.MissingParameterValue,
task.driver.boot.validate, task)
def test_validate_fail_no_port(self):
new_node = obj_utils.create_test_node(
self.context,
uuid='aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee',
driver=self.driver, boot_interface=self.boot_interface,
instance_info=INST_INFO_DICT, driver_info=DRV_INFO_DICT)
with task_manager.acquire(self.context, new_node.uuid,
shared=True) as task:
self.assertRaises(exception.MissingParameterValue,
task.driver.boot.validate, task)
def test_validate_fail_trusted_boot_with_secure_boot(self):
instance_info = {"boot_option": "netboot",
"secure_boot": "true",
"trusted_boot": "true"}
properties = {'capabilities': 'trusted_boot:true'}
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
task.node.instance_info['capabilities'] = instance_info
task.node.properties = properties
task.node.driver_internal_info['is_whole_disk_image'] = False
self.assertRaises(exception.InvalidParameterValue,
task.driver.boot.validate, task)
def test_validate_fail_invalid_trusted_boot_value(self):
properties = {'capabilities': 'trusted_boot:value'}
instance_info = {"trusted_boot": "value"}
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
task.node.properties = properties
task.node.instance_info['capabilities'] = instance_info
self.assertRaises(exception.InvalidParameterValue,
task.driver.boot.validate, task)
@mock.patch.object(base_image_service.BaseImageService, '_show',
autospec=True)
def test_validate_fail_no_image_kernel_ramdisk_props(self, mock_glance):
mock_glance.return_value = {'properties': {}}
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertRaises(exception.MissingParameterValue,
task.driver.boot.validate,
task)
@mock.patch.object(base_image_service.BaseImageService, '_show',
autospec=True)
def test_validate_fail_glance_image_doesnt_exists(self, mock_glance):
mock_glance.side_effect = exception.ImageNotFound('not found')
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertRaises(exception.InvalidParameterValue,
task.driver.boot.validate, task)
@mock.patch.object(base_image_service.BaseImageService, '_show',
autospec=True)
def test_validate_fail_glance_conn_problem(self, mock_glance):
exceptions = (exception.GlanceConnectionFailed('connection fail'),
exception.ImageNotAuthorized('not authorized'),
exception.Invalid('invalid'))
mock_glance.side_effect = exceptions
for exc in exceptions:
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertRaises(exception.InvalidParameterValue,
task.driver.boot.validate, task)
@mock.patch.object(manager_utils, 'node_get_boot_mode', autospec=True)
@mock.patch.object(manager_utils, 'node_set_boot_device', autospec=True)
@mock.patch.object(dhcp_factory, 'DHCPFactory')
@mock.patch.object(pxe_utils, 'get_instance_image_info', autospec=True)
@mock.patch.object(pxe_utils, 'get_image_info', autospec=True)
@mock.patch.object(pxe_utils, 'cache_ramdisk_kernel', autospec=True)
@mock.patch.object(pxe_utils, 'build_pxe_config_options', autospec=True)
@mock.patch.object(pxe_utils, 'create_pxe_config', autospec=True)
def _test_prepare_ramdisk(self, mock_pxe_config,
mock_build_pxe, mock_cache_r_k,
mock_deploy_img_info,
mock_instance_img_info,
dhcp_factory_mock,
set_boot_device_mock,
get_boot_mode_mock,
uefi=False,
cleaning=False,
ipxe_use_swift=False,
whole_disk_image=False,
mode='deploy',
node_boot_mode=None,
persistent=False):
mock_build_pxe.return_value = {}
kernel_label = '%s_kernel' % mode
ramdisk_label = '%s_ramdisk' % mode
mock_deploy_img_info.return_value = {kernel_label: 'a',
ramdisk_label: 'r'}
if whole_disk_image:
mock_instance_img_info.return_value = {}
else:
mock_instance_img_info.return_value = {'kernel': 'b'}
mock_pxe_config.return_value = None
mock_cache_r_k.return_value = None
provider_mock = mock.MagicMock()
dhcp_factory_mock.return_value = provider_mock
get_boot_mode_mock.return_value = node_boot_mode
driver_internal_info = self.node.driver_internal_info
driver_internal_info['is_whole_disk_image'] = whole_disk_image
self.node.driver_internal_info = driver_internal_info
if mode == 'rescue':
mock_deploy_img_info.return_value = {
'rescue_kernel': 'a',
'rescue_ramdisk': 'r'}
self.node.save()
with task_manager.acquire(self.context, self.node.uuid) as task:
dhcp_opts = pxe_utils.dhcp_options_for_instance(
task, ipxe_enabled=CONF.pxe.ipxe_enabled)
task.driver.boot.prepare_ramdisk(task, {'foo': 'bar'})
mock_deploy_img_info.assert_called_once_with(task.node, mode=mode)
provider_mock.update_dhcp.assert_called_once_with(task, dhcp_opts)
if self.node.provision_state == states.DEPLOYING:
get_boot_mode_mock.assert_called_once_with(task)
set_boot_device_mock.assert_called_once_with(task,
boot_devices.PXE,
persistent=persistent)
if ipxe_use_swift:
if whole_disk_image:
self.assertFalse(mock_cache_r_k.called)
else:
mock_cache_r_k.assert_called_once_with(
task,
{'kernel': 'b'},
ipxe_enabled=CONF.pxe.ipxe_enabled)
mock_instance_img_info.assert_called_once_with(task)
elif not cleaning and mode == 'deploy':
mock_cache_r_k.assert_called_once_with(
task,
{'deploy_kernel': 'a', 'deploy_ramdisk': 'r',
'kernel': 'b'},
ipxe_enabled=CONF.pxe.ipxe_enabled)
mock_instance_img_info.assert_called_once_with(task)
elif mode == 'deploy':
mock_cache_r_k.assert_called_once_with(
task,
{'deploy_kernel': 'a', 'deploy_ramdisk': 'r'},
ipxe_enabled=CONF.pxe.ipxe_enabled)
elif mode == 'rescue':
mock_cache_r_k.assert_called_once_with(
task,
{'rescue_kernel': 'a', 'rescue_ramdisk': 'r'},
ipxe_enabled=CONF.pxe.ipxe_enabled)
if uefi:
mock_pxe_config.assert_called_once_with(
task, {'foo': 'bar'}, CONF.pxe.uefi_pxe_config_template,
ipxe_enabled=CONF.pxe.ipxe_enabled)
else:
mock_pxe_config.assert_called_once_with(
task, {'foo': 'bar'}, CONF.pxe.pxe_config_template,
ipxe_enabled=CONF.pxe.ipxe_enabled)
def test_prepare_ramdisk(self):
self.node.provision_state = states.DEPLOYING
self.node.save()
self._test_prepare_ramdisk()
def test_prepare_ramdisk_force_persistent_boot_device_true(self):
self.node.provision_state = states.DEPLOYING
driver_info = self.node.driver_info
driver_info['force_persistent_boot_device'] = 'True'
self.node.driver_info = driver_info
self.node.save()
self._test_prepare_ramdisk(persistent=True)
def test_prepare_ramdisk_force_persistent_boot_device_bool_true(self):
self.node.provision_state = states.DEPLOYING
driver_info = self.node.driver_info
driver_info['force_persistent_boot_device'] = True
self.node.driver_info = driver_info
self.node.save()
self._test_prepare_ramdisk(persistent=True)
def test_prepare_ramdisk_force_persistent_boot_device_sloppy_true(self):
for value in ['true', 't', '1', 'on', 'y', 'YES']:
self.node.provision_state = states.DEPLOYING
driver_info = self.node.driver_info
driver_info['force_persistent_boot_device'] = value
self.node.driver_info = driver_info
self.node.save()
self._test_prepare_ramdisk(persistent=True)
def test_prepare_ramdisk_force_persistent_boot_device_false(self):
self.node.provision_state = states.DEPLOYING
driver_info = self.node.driver_info
driver_info['force_persistent_boot_device'] = 'False'
self.node.driver_info = driver_info
self.node.save()
self._test_prepare_ramdisk()
def test_prepare_ramdisk_force_persistent_boot_device_bool_false(self):
self.node.provision_state = states.DEPLOYING
driver_info = self.node.driver_info
driver_info['force_persistent_boot_device'] = False
self.node.driver_info = driver_info
self.node.save()
self._test_prepare_ramdisk(persistent=False)
def test_prepare_ramdisk_force_persistent_boot_device_sloppy_false(self):
for value in ['false', 'f', '0', 'off', 'n', 'NO', 'yxz']:
self.node.provision_state = states.DEPLOYING
driver_info = self.node.driver_info
driver_info['force_persistent_boot_device'] = value
self.node.driver_info = driver_info
self.node.save()
self._test_prepare_ramdisk()
def test_prepare_ramdisk_force_persistent_boot_device_default(self):
self.node.provision_state = states.DEPLOYING
driver_info = self.node.driver_info
driver_info['force_persistent_boot_device'] = 'Default'
self.node.driver_info = driver_info
self.node.save()
self._test_prepare_ramdisk(persistent=False)
def test_prepare_ramdisk_force_persistent_boot_device_always(self):
self.node.provision_state = states.DEPLOYING
driver_info = self.node.driver_info
driver_info['force_persistent_boot_device'] = 'Always'
self.node.driver_info = driver_info
self.node.save()
self._test_prepare_ramdisk(persistent=True)
def test_prepare_ramdisk_force_persistent_boot_device_never(self):
self.node.provision_state = states.DEPLOYING
driver_info = self.node.driver_info
driver_info['force_persistent_boot_device'] = 'Never'
self.node.driver_info = driver_info
self.node.save()
self._test_prepare_ramdisk(persistent=False)
def test_prepare_ramdisk_rescue(self):
self.node.provision_state = states.RESCUING
self.node.save()
self._test_prepare_ramdisk(mode='rescue')
def test_prepare_ramdisk_uefi(self):
self.node.provision_state = states.DEPLOYING
self.node.save()
properties = self.node.properties
properties['capabilities'] = 'boot_mode:uefi'
self.node.properties = properties
self.node.save()
self._test_prepare_ramdisk(uefi=True)
@mock.patch.object(os.path, 'isfile', lambda path: True)
@mock.patch.object(common_utils, 'file_has_content', lambda *args: False)
@mock.patch('ironic.common.utils.write_to_file', autospec=True)
@mock.patch('ironic.common.utils.render_template', autospec=True)
def test_prepare_ramdisk_ipxe_with_copy_file_different(
self, render_mock, write_mock):
self.node.provision_state = states.DEPLOYING
self.node.save()
self.config(group='pxe', ipxe_enabled=True)
self.config(group='deploy', http_url='http://myserver')
render_mock.return_value = 'foo'
self._test_prepare_ramdisk()
write_mock.assert_called_once_with(
os.path.join(
CONF.deploy.http_root,
os.path.basename(CONF.pxe.ipxe_boot_script)),
'foo')
render_mock.assert_called_once_with(
CONF.pxe.ipxe_boot_script,
{'ipxe_for_mac_uri': 'pxelinux.cfg/'})
@mock.patch.object(os.path, 'isfile', lambda path: False)
@mock.patch('ironic.common.utils.file_has_content', autospec=True)
@mock.patch('ironic.common.utils.write_to_file', autospec=True)
@mock.patch('ironic.common.utils.render_template', autospec=True)
def test_prepare_ramdisk_ipxe_with_copy_no_file(
self, render_mock, write_mock, file_has_content_mock):
self.node.provision_state = states.DEPLOYING
self.node.save()
self.config(group='pxe', ipxe_enabled=True)
self.config(group='deploy', http_url='http://myserver')
render_mock.return_value = 'foo'
self._test_prepare_ramdisk()
self.assertFalse(file_has_content_mock.called)
write_mock.assert_called_once_with(
os.path.join(
CONF.deploy.http_root,
os.path.basename(CONF.pxe.ipxe_boot_script)),
'foo')
render_mock.assert_called_once_with(
CONF.pxe.ipxe_boot_script,
{'ipxe_for_mac_uri': 'pxelinux.cfg/'})
@mock.patch.object(os.path, 'isfile', lambda path: True)
@mock.patch.object(common_utils, 'file_has_content', lambda *args: True)
@mock.patch('ironic.common.utils.write_to_file', autospec=True)
@mock.patch('ironic.common.utils.render_template', autospec=True)
def test_prepare_ramdisk_ipxe_without_copy(
self, render_mock, write_mock):
self.node.provision_state = states.DEPLOYING
self.node.save()
self.config(group='pxe', ipxe_enabled=True)
self.config(group='deploy', http_url='http://myserver')
self._test_prepare_ramdisk()
self.assertFalse(write_mock.called)
@mock.patch.object(common_utils, 'render_template', lambda *args: 'foo')
@mock.patch('ironic.common.utils.write_to_file', autospec=True)
def test_prepare_ramdisk_ipxe_swift(self, write_mock):
self.node.provision_state = states.DEPLOYING
self.node.save()
self.config(group='pxe', ipxe_enabled=True)
self.config(group='pxe', ipxe_use_swift=True)
self.config(group='deploy', http_url='http://myserver')
self._test_prepare_ramdisk(ipxe_use_swift=True)
write_mock.assert_called_once_with(
os.path.join(
CONF.deploy.http_root,
os.path.basename(CONF.pxe.ipxe_boot_script)),
'foo')
@mock.patch.object(common_utils, 'render_template', lambda *args: 'foo')
@mock.patch('ironic.common.utils.write_to_file', autospec=True)
def test_prepare_ramdisk_ipxe_swift_whole_disk_image(
self, write_mock):
self.node.provision_state = states.DEPLOYING
self.node.save()
self.config(group='pxe', ipxe_enabled=True)
self.config(group='pxe', ipxe_use_swift=True)
self.config(group='deploy', http_url='http://myserver')
self._test_prepare_ramdisk(ipxe_use_swift=True, whole_disk_image=True)
write_mock.assert_called_once_with(
os.path.join(
CONF.deploy.http_root,
os.path.basename(CONF.pxe.ipxe_boot_script)),
'foo')
def test_prepare_ramdisk_cleaning(self):
self.node.provision_state = states.CLEANING
self.node.save()
self._test_prepare_ramdisk(cleaning=True)
@mock.patch.object(manager_utils, 'node_set_boot_mode', autospec=True)
def test_prepare_ramdisk_set_boot_mode_on_bm(
self, set_boot_mode_mock):
self.node.provision_state = states.DEPLOYING
properties = self.node.properties
properties['capabilities'] = 'boot_mode:uefi'
self.node.properties = properties
self.node.save()
self._test_prepare_ramdisk(uefi=True)
set_boot_mode_mock.assert_called_once_with(mock.ANY, boot_modes.UEFI)
@mock.patch.object(manager_utils, 'node_set_boot_mode', autospec=True)
def test_prepare_ramdisk_set_boot_mode_on_ironic(
self, set_boot_mode_mock):
self.node.provision_state = states.DEPLOYING
self.node.save()
self._test_prepare_ramdisk(node_boot_mode=boot_modes.LEGACY_BIOS)
with task_manager.acquire(self.context, self.node.uuid) as task:
driver_internal_info = task.node.driver_internal_info
self.assertIn('deploy_boot_mode', driver_internal_info)
self.assertEqual(boot_modes.LEGACY_BIOS,
driver_internal_info['deploy_boot_mode'])
self.assertEqual(set_boot_mode_mock.call_count, 0)
@mock.patch.object(manager_utils, 'node_set_boot_mode', autospec=True)
def test_prepare_ramdisk_set_default_boot_mode_on_ironic_bios(
self, set_boot_mode_mock):
self.node.provision_state = states.DEPLOYING
self.node.save()
self.config(default_boot_mode=boot_modes.LEGACY_BIOS, group='deploy')
self._test_prepare_ramdisk()
with task_manager.acquire(self.context, self.node.uuid) as task:
driver_internal_info = task.node.driver_internal_info
self.assertIn('deploy_boot_mode', driver_internal_info)
self.assertEqual(boot_modes.LEGACY_BIOS,
driver_internal_info['deploy_boot_mode'])
self.assertEqual(set_boot_mode_mock.call_count, 1)
@mock.patch.object(manager_utils, 'node_set_boot_mode', autospec=True)
def test_prepare_ramdisk_set_default_boot_mode_on_ironic_uefi(
self, set_boot_mode_mock):
self.node.provision_state = states.DEPLOYING
self.node.save()
self.config(default_boot_mode=boot_modes.UEFI, group='deploy')
self._test_prepare_ramdisk(uefi=True)
with task_manager.acquire(self.context, self.node.uuid) as task:
driver_internal_info = task.node.driver_internal_info
self.assertIn('deploy_boot_mode', driver_internal_info)
self.assertEqual(boot_modes.UEFI,
driver_internal_info['deploy_boot_mode'])
self.assertEqual(set_boot_mode_mock.call_count, 1)
@mock.patch.object(manager_utils, 'node_set_boot_mode', autospec=True)
def test_prepare_ramdisk_conflicting_boot_modes(
self, set_boot_mode_mock):
self.node.provision_state = states.DEPLOYING
properties = self.node.properties
properties['capabilities'] = 'boot_mode:uefi'
self.node.properties = properties
self.node.save()
self._test_prepare_ramdisk(uefi=True,
node_boot_mode=boot_modes.LEGACY_BIOS)
set_boot_mode_mock.assert_called_once_with(mock.ANY, boot_modes.UEFI)
@mock.patch.object(manager_utils, 'node_set_boot_mode', autospec=True)
def test_prepare_ramdisk_conflicting_boot_modes_set_unsupported(
self, set_boot_mode_mock):
self.node.provision_state = states.DEPLOYING
properties = self.node.properties
properties['capabilities'] = 'boot_mode:uefi'
self.node.properties = properties
self.node.save()
set_boot_mode_mock.side_effect = exception.UnsupportedDriverExtension(
extension='management', driver='test-driver'
)
self.assertRaises(exception.UnsupportedDriverExtension,
self._test_prepare_ramdisk,
uefi=True, node_boot_mode=boot_modes.LEGACY_BIOS)
@mock.patch.object(manager_utils, 'node_set_boot_mode', autospec=True)
def test_prepare_ramdisk_set_boot_mode_not_called(
self, set_boot_mode_mock):
self.node.provision_state = states.DEPLOYING
self.node.save()
properties = self.node.properties
properties['capabilities'] = 'boot_mode:uefi'
self.node.properties = properties
self.node.save()
self._test_prepare_ramdisk(uefi=True, node_boot_mode=boot_modes.UEFI)
self.assertEqual(set_boot_mode_mock.call_count, 0)
@mock.patch.object(pxe_utils, 'clean_up_pxe_env', autospec=True)
@mock.patch.object(pxe_utils, 'get_image_info', autospec=True)
def _test_clean_up_ramdisk(self, get_image_info_mock,
clean_up_pxe_env_mock, mode='deploy'):
with task_manager.acquire(self.context, self.node.uuid) as task:
kernel_label = '%s_kernel' % mode
ramdisk_label = '%s_ramdisk' % mode
image_info = {kernel_label: ['', '/path/to/' + kernel_label],
ramdisk_label: ['', '/path/to/' + ramdisk_label]}
get_image_info_mock.return_value = image_info
task.driver.boot.clean_up_ramdisk(task)
clean_up_pxe_env_mock.assert_called_once_with(task, image_info)
get_image_info_mock.assert_called_once_with(task.node, mode=mode)
def test_clean_up_ramdisk(self):
self.node.provision_state = states.DEPLOYING
self.node.save()
self._test_clean_up_ramdisk()
def test_clean_up_ramdisk_rescue(self):
self.node.provision_state = states.RESCUING
self.node.save()
self._test_clean_up_ramdisk(mode='rescue')
@mock.patch.object(manager_utils, 'node_set_boot_device', autospec=True)
@mock.patch.object(deploy_utils, 'switch_pxe_config', autospec=True)
@mock.patch.object(dhcp_factory, 'DHCPFactory', autospec=True)
@mock.patch.object(pxe_utils, 'cache_ramdisk_kernel', autospec=True)
@mock.patch.object(pxe_utils, 'get_instance_image_info', autospec=True)
def test_prepare_instance_netboot(
self, get_image_info_mock, cache_mock,
dhcp_factory_mock, switch_pxe_config_mock,
set_boot_device_mock):
provider_mock = mock.MagicMock()
dhcp_factory_mock.return_value = provider_mock
image_info = {'kernel': ('', '/path/to/kernel'),
'ramdisk': ('', '/path/to/ramdisk')}
get_image_info_mock.return_value = image_info
with task_manager.acquire(self.context, self.node.uuid) as task:
dhcp_opts = pxe_utils.dhcp_options_for_instance(
task, ipxe_enabled=CONF.pxe.ipxe_enabled)
pxe_config_path = pxe_utils.get_pxe_config_file_path(
task.node.uuid)
task.node.properties['capabilities'] = 'boot_mode:bios'
task.node.driver_internal_info['root_uuid_or_disk_id'] = (
"30212642-09d3-467f-8e09-21685826ab50")
task.node.driver_internal_info['is_whole_disk_image'] = False
task.driver.boot.prepare_instance(task)
get_image_info_mock.assert_called_once_with(
task)
cache_mock.assert_called_once_with(
task, image_info, ipxe_enabled=CONF.pxe.ipxe_enabled)
provider_mock.update_dhcp.assert_called_once_with(task, dhcp_opts)
switch_pxe_config_mock.assert_called_once_with(
pxe_config_path, "30212642-09d3-467f-8e09-21685826ab50",
'bios', False, False, False, False, ipxe_enabled=False)
set_boot_device_mock.assert_called_once_with(task,
boot_devices.PXE,
persistent=True)
@mock.patch('os.path.isfile', return_value=False)
@mock.patch.object(pxe_utils, 'create_pxe_config', autospec=True)
@mock.patch.object(manager_utils, 'node_set_boot_device', autospec=True)
@mock.patch.object(deploy_utils, 'switch_pxe_config', autospec=True)
@mock.patch.object(dhcp_factory, 'DHCPFactory', autospec=True)
@mock.patch.object(pxe_utils, 'cache_ramdisk_kernel', autospec=True)
@mock.patch.object(pxe_utils, 'get_instance_image_info', autospec=True)
def test_prepare_instance_netboot_active(
self, get_image_info_mock, cache_mock,
dhcp_factory_mock, switch_pxe_config_mock,
set_boot_device_mock, create_pxe_config_mock, isfile_mock):
provider_mock = mock.MagicMock()
dhcp_factory_mock.return_value = provider_mock
image_info = {'kernel': ('', '/path/to/kernel'),
'ramdisk': ('', '/path/to/ramdisk')}
get_image_info_mock.return_value = image_info
self.node.provision_state = states.ACTIVE
self.node.save()
with task_manager.acquire(self.context, self.node.uuid) as task:
dhcp_opts = pxe_utils.dhcp_options_for_instance(
task, ipxe_enabled=CONF.pxe.ipxe_enabled)
pxe_config_path = pxe_utils.get_pxe_config_file_path(
task.node.uuid)
task.node.properties['capabilities'] = 'boot_mode:bios'
task.node.driver_internal_info['root_uuid_or_disk_id'] = (
"30212642-09d3-467f-8e09-21685826ab50")
task.node.driver_internal_info['is_whole_disk_image'] = False
task.driver.boot.prepare_instance(task)
get_image_info_mock.assert_called_once_with(
task)
cache_mock.assert_called_once_with(
task, image_info, ipxe_enabled=CONF.pxe.ipxe_enabled)
provider_mock.update_dhcp.assert_called_once_with(task, dhcp_opts)
create_pxe_config_mock.assert_called_once_with(
task, mock.ANY, CONF.pxe.pxe_config_template,
ipxe_enabled=False)
switch_pxe_config_mock.assert_called_once_with(
pxe_config_path, "30212642-09d3-467f-8e09-21685826ab50",
'bios', False, False, False, False, ipxe_enabled=False)
self.assertFalse(set_boot_device_mock.called)
@mock.patch.object(manager_utils, 'node_set_boot_device', autospec=True)
@mock.patch.object(deploy_utils, 'switch_pxe_config', autospec=True)
@mock.patch.object(dhcp_factory, 'DHCPFactory')
@mock.patch.object(pxe_utils, 'cache_ramdisk_kernel', autospec=True)
@mock.patch.object(pxe_utils, 'get_instance_image_info', autospec=True)
def test_prepare_instance_netboot_missing_root_uuid(
self, get_image_info_mock, cache_mock,
dhcp_factory_mock, switch_pxe_config_mock,
set_boot_device_mock):
provider_mock = mock.MagicMock()
dhcp_factory_mock.return_value = provider_mock
image_info = {'kernel': ('', '/path/to/kernel'),
'ramdisk': ('', '/path/to/ramdisk')}
get_image_info_mock.return_value = image_info
with task_manager.acquire(self.context, self.node.uuid) as task:
dhcp_opts = pxe_utils.dhcp_options_for_instance(
task, ipxe_enabled=CONF.pxe.ipxe_enabled)
task.node.properties['capabilities'] = 'boot_mode:bios'
task.node.driver_internal_info['is_whole_disk_image'] = False
task.driver.boot.prepare_instance(task)
get_image_info_mock.assert_called_once_with(task)
cache_mock.assert_called_once_with(
task, image_info, ipxe_enabled=CONF.pxe.ipxe_enabled)
provider_mock.update_dhcp.assert_called_once_with(task, dhcp_opts)
self.assertFalse(switch_pxe_config_mock.called)
self.assertFalse(set_boot_device_mock.called)
@mock.patch.object(pxe.LOG, 'warning', autospec=True)
@mock.patch.object(pxe_utils, 'clean_up_pxe_config', autospec=True)
@mock.patch.object(manager_utils, 'node_set_boot_device', autospec=True)
@mock.patch.object(dhcp_factory, 'DHCPFactory')
@mock.patch.object(pxe_utils, 'cache_ramdisk_kernel', autospec=True)
@mock.patch.object(pxe_utils, 'get_instance_image_info', autospec=True)
def test_prepare_instance_whole_disk_image_missing_root_uuid(
self, get_image_info_mock, cache_mock,
dhcp_factory_mock, set_boot_device_mock,
clean_up_pxe_mock, log_mock):
provider_mock = mock.MagicMock()
dhcp_factory_mock.return_value = provider_mock
get_image_info_mock.return_value = {}
with task_manager.acquire(self.context, self.node.uuid) as task:
dhcp_opts = pxe_utils.dhcp_options_for_instance(
task, CONF.pxe.ipxe_enabled)
task.node.properties['capabilities'] = 'boot_mode:bios'
task.node.driver_internal_info['is_whole_disk_image'] = True
task.driver.boot.prepare_instance(task)
get_image_info_mock.assert_called_once_with(task)
cache_mock.assert_called_once_with(
task, {}, ipxe_enabled=CONF.pxe.ipxe_enabled)
provider_mock.update_dhcp.assert_called_once_with(task, dhcp_opts)
self.assertTrue(log_mock.called)
clean_up_pxe_mock.assert_called_once_with(
task, ipxe_enabled=CONF.pxe.ipxe_enabled)
set_boot_device_mock.assert_called_once_with(
task, boot_devices.DISK, persistent=True)
@mock.patch('os.path.isfile', lambda filename: False)
@mock.patch.object(pxe_utils, 'create_pxe_config', autospec=True)
@mock.patch.object(deploy_utils, 'is_iscsi_boot', lambda task: True)
@mock.patch.object(noop_storage.NoopStorage, 'should_write_image',
lambda task: False)
@mock.patch.object(manager_utils, 'node_set_boot_device', autospec=True)
@mock.patch.object(deploy_utils, 'switch_pxe_config', autospec=True)
@mock.patch.object(dhcp_factory, 'DHCPFactory', autospec=True)
@mock.patch.object(pxe_utils, 'cache_ramdisk_kernel', autospec=True)
@mock.patch.object(pxe_utils, 'get_instance_image_info', autospec=True)
def test_prepare_instance_netboot_iscsi(
self, get_image_info_mock, cache_mock,
dhcp_factory_mock, switch_pxe_config_mock,
set_boot_device_mock, create_pxe_config_mock):
http_url = 'http://192.1.2.3:1234'
self.config(ipxe_enabled=True, group='pxe')
self.config(http_url=http_url, group='deploy')
provider_mock = mock.MagicMock()
dhcp_factory_mock.return_value = provider_mock
vol_id = uuidutils.generate_uuid()
obj_utils.create_test_volume_target(
self.context, node_id=self.node.id, volume_type='iscsi',
boot_index=0, volume_id='1234', uuid=vol_id,
properties={'target_lun': 0,
'target_portal': 'fake_host:3260',
'target_iqn': 'fake_iqn',
'auth_username': 'fake_username',
'auth_password': 'fake_password'})
with task_manager.acquire(self.context, self.node.uuid) as task:
task.node.driver_internal_info = {
'boot_from_volume': vol_id}
dhcp_opts = pxe_utils.dhcp_options_for_instance(task,
ipxe_enabled=True)
pxe_config_path = pxe_utils.get_pxe_config_file_path(
task.node.uuid)
task.node.properties['capabilities'] = 'boot_mode:bios'
task.driver.boot.prepare_instance(task)
self.assertFalse(get_image_info_mock.called)
self.assertFalse(cache_mock.called)
provider_mock.update_dhcp.assert_called_once_with(task, dhcp_opts)
create_pxe_config_mock.assert_called_once_with(
task, mock.ANY, CONF.pxe.pxe_config_template,
ipxe_enabled=True)
switch_pxe_config_mock.assert_called_once_with(
pxe_config_path, None, boot_modes.LEGACY_BIOS, False,
ipxe_enabled=True, ramdisk_boot=False, iscsi_boot=True)
set_boot_device_mock.assert_called_once_with(task,
boot_devices.PXE,
persistent=True)
@mock.patch.object(manager_utils, 'node_set_boot_device', autospec=True)
@mock.patch.object(pxe_utils, 'clean_up_pxe_config', autospec=True)
def test_prepare_instance_localboot(self, clean_up_pxe_config_mock,
set_boot_device_mock):
with task_manager.acquire(self.context, self.node.uuid) as task:
instance_info = task.node.instance_info
instance_info['capabilities'] = {'boot_option': 'local'}
task.node.instance_info = instance_info
task.node.save()
task.driver.boot.prepare_instance(task)
clean_up_pxe_config_mock.assert_called_once_with(
task, ipxe_enabled=CONF.pxe.ipxe_enabled)
set_boot_device_mock.assert_called_once_with(task,
boot_devices.DISK,
persistent=True)
@mock.patch.object(manager_utils, 'node_set_boot_device', autospec=True)
@mock.patch.object(pxe_utils, 'clean_up_pxe_config', autospec=True)
def test_prepare_instance_localboot_active(self, clean_up_pxe_config_mock,
set_boot_device_mock):
self.node.provision_state = states.ACTIVE
self.node.save()
with task_manager.acquire(self.context, self.node.uuid) as task:
instance_info = task.node.instance_info
instance_info['capabilities'] = {'boot_option': 'local'}
task.node.instance_info = instance_info
task.node.save()
task.driver.boot.prepare_instance(task)
clean_up_pxe_config_mock.assert_called_once_with(
task, ipxe_enabled=CONF.pxe.ipxe_enabled)
self.assertFalse(set_boot_device_mock.called)
@mock.patch.object(manager_utils, 'node_set_boot_device', autospec=True)
@mock.patch.object(deploy_utils, 'switch_pxe_config', autospec=True)
@mock.patch.object(pxe_utils, 'create_pxe_config', autospec=True)
@mock.patch.object(dhcp_factory, 'DHCPFactory', autospec=True)
@mock.patch.object(pxe_utils, 'cache_ramdisk_kernel', autospec=True)
@mock.patch.object(pxe_utils, 'get_instance_image_info', autospec=True)
def _test_prepare_instance_ramdisk(
self, get_image_info_mock, cache_mock,
dhcp_factory_mock, create_pxe_config_mock,
switch_pxe_config_mock,
set_boot_device_mock, config_file_exits=False):
image_info = {'kernel': ['', '/path/to/kernel'],
'ramdisk': ['', '/path/to/ramdisk']}
get_image_info_mock.return_value = image_info
provider_mock = mock.MagicMock()
dhcp_factory_mock.return_value = provider_mock
self.node.provision_state = states.DEPLOYING
get_image_info_mock.return_value = image_info
with task_manager.acquire(self.context, self.node.uuid) as task:
instance_info = task.node.instance_info
instance_info['capabilities'] = {'boot_option': 'ramdisk'}
task.node.instance_info = instance_info
task.node.save()
dhcp_opts = pxe_utils.dhcp_options_for_instance(
task, ipxe_enabled=CONF.pxe.ipxe_enabled)
pxe_config_path = pxe_utils.get_pxe_config_file_path(
task.node.uuid)
task.driver.boot.prepare_instance(task)
get_image_info_mock.assert_called_once_with(task)
cache_mock.assert_called_once_with(
task, image_info, CONF.pxe.ipxe_enabled)
provider_mock.update_dhcp.assert_called_once_with(task, dhcp_opts)
if config_file_exits:
self.assertFalse(create_pxe_config_mock.called)
else:
create_pxe_config_mock.assert_called_once_with(
task, mock.ANY, CONF.pxe.pxe_config_template,
ipxe_enabled=False)
switch_pxe_config_mock.assert_called_once_with(
pxe_config_path, None,
'bios', False, ipxe_enabled=False, iscsi_boot=False,
ramdisk_boot=True)
set_boot_device_mock.assert_called_once_with(task,
boot_devices.PXE,
persistent=True)
@mock.patch.object(os.path, 'isfile', lambda path: True)
def test_prepare_instance_ramdisk_pxe_conf_missing(self):
self._test_prepare_instance_ramdisk(config_file_exits=True)
@mock.patch.object(os.path, 'isfile', lambda path: False)
def test_prepare_instance_ramdisk_pxe_conf_exists(self):
self._test_prepare_instance_ramdisk(config_file_exits=False)
@mock.patch.object(pxe_utils, 'clean_up_pxe_env', autospec=True)
@mock.patch.object(pxe_utils, 'get_instance_image_info', autospec=True)
def test_clean_up_instance(self, get_image_info_mock,
clean_up_pxe_env_mock):
with task_manager.acquire(self.context, self.node.uuid) as task:
image_info = {'kernel': ['', '/path/to/kernel'],
'ramdisk': ['', '/path/to/ramdisk']}
get_image_info_mock.return_value = image_info
task.driver.boot.clean_up_instance(task)
clean_up_pxe_env_mock.assert_called_once_with(task, image_info)
get_image_info_mock.assert_called_once_with(task)
class PXERamdiskDeployTestCase(db_base.DbTestCase):
def setUp(self):
super(PXERamdiskDeployTestCase, self).setUp()
self.temp_dir = tempfile.mkdtemp()
self.config(tftp_root=self.temp_dir, group='pxe')
self.temp_dir = tempfile.mkdtemp()
self.config(images_path=self.temp_dir, group='pxe')
self.config(enabled_deploy_interfaces=['ramdisk'])
self.config(enabled_boot_interfaces=['pxe'])
for iface in drivers_base.ALL_INTERFACES:
impl = 'fake'
if iface == 'network':
impl = 'noop'
if iface == 'deploy':
impl = 'ramdisk'
if iface == 'boot':
impl = 'pxe'
config_kwarg = {'enabled_%s_interfaces' % iface: [impl],
'default_%s_interface' % iface: impl}
self.config(**config_kwarg)
self.config(enabled_hardware_types=['fake-hardware'])
instance_info = INST_INFO_DICT
self.node = obj_utils.create_test_node(
self.context,
driver='fake-hardware',
instance_info=instance_info,
driver_info=DRV_INFO_DICT,
driver_internal_info=DRV_INTERNAL_INFO_DICT)
self.port = obj_utils.create_test_port(self.context,
node_id=self.node.id)
@mock.patch.object(manager_utils, 'node_set_boot_device', autospec=True)
@mock.patch.object(deploy_utils, 'switch_pxe_config', autospec=True)
@mock.patch.object(dhcp_factory, 'DHCPFactory', autospec=True)
@mock.patch.object(pxe_utils, 'cache_ramdisk_kernel', autospec=True)
@mock.patch.object(pxe_utils, 'get_instance_image_info', autospec=True)
def test_prepare_instance_ramdisk(
self, get_image_info_mock, cache_mock,
dhcp_factory_mock, switch_pxe_config_mock,
set_boot_device_mock):
provider_mock = mock.MagicMock()
dhcp_factory_mock.return_value = provider_mock
self.node.provision_state = states.DEPLOYING
image_info = {'kernel': ('', '/path/to/kernel'),
'ramdisk': ('', '/path/to/ramdisk')}
get_image_info_mock.return_value = image_info
with task_manager.acquire(self.context, self.node.uuid) as task:
dhcp_opts = pxe_utils.dhcp_options_for_instance(
task, ipxe_enabled=CONF.pxe.ipxe_enabled)
pxe_config_path = pxe_utils.get_pxe_config_file_path(
task.node.uuid)
task.node.properties['capabilities'] = 'boot_option:netboot'
task.node.driver_internal_info['is_whole_disk_image'] = False
task.driver.deploy.prepare(task)
task.driver.deploy.deploy(task)
get_image_info_mock.assert_called_once_with(task)
cache_mock.assert_called_once_with(
task, image_info, ipxe_enabled=CONF.pxe.ipxe_enabled)
provider_mock.update_dhcp.assert_called_once_with(task, dhcp_opts)
switch_pxe_config_mock.assert_called_once_with(
pxe_config_path, None,
'bios', False, ipxe_enabled=False, iscsi_boot=False,
ramdisk_boot=True)
set_boot_device_mock.assert_called_once_with(task,
boot_devices.PXE,
persistent=True)
@mock.patch.object(pxe.LOG, 'warning', autospec=True)
@mock.patch.object(deploy_utils, 'switch_pxe_config', autospec=True)
@mock.patch.object(dhcp_factory, 'DHCPFactory', autospec=True)
@mock.patch.object(pxe_utils, 'cache_ramdisk_kernel', autospec=True)
@mock.patch.object(pxe_utils, 'get_instance_image_info', autospec=True)
def test_deploy(self, mock_image_info, mock_cache,
mock_dhcp_factory, mock_switch_config, mock_warning):
image_info = {'kernel': ('', '/path/to/kernel'),
'ramdisk': ('', '/path/to/ramdisk')}
mock_image_info.return_value = image_info
i_info = self.node.instance_info
i_info.update({'capabilities': {'boot_option': 'ramdisk'}})
self.node.instance_info = i_info
self.node.save()
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertIsNone(task.driver.deploy.deploy(task))
mock_image_info.assert_called_once_with(task)
mock_cache.assert_called_once_with(
task, image_info, ipxe_enabled=CONF.pxe.ipxe_enabled)
self.assertFalse(mock_warning.called)
i_info['configdrive'] = 'meow'
self.node.instance_info = i_info
self.node.save()
mock_warning.reset_mock()
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertIsNone(task.driver.deploy.deploy(task))
self.assertTrue(mock_warning.called)
@mock.patch.object(pxe.PXEBoot, 'prepare_instance', autospec=True)
def test_prepare(self, mock_prepare_instance):
node = self.node
node.provision_state = states.DEPLOYING
node.instance_info = {}
node.save()
with task_manager.acquire(self.context, node.uuid) as task:
task.driver.deploy.prepare(task)
self.assertFalse(mock_prepare_instance.called)
self.assertEqual({'boot_option': 'ramdisk'},
task.node.instance_info['capabilities'])
@mock.patch.object(pxe.PXEBoot, 'prepare_instance', autospec=True)
def test_prepare_active(self, mock_prepare_instance):
node = self.node
node.provision_state = states.ACTIVE
node.save()
with task_manager.acquire(self.context, node.uuid) as task:
task.driver.deploy.prepare(task)
mock_prepare_instance.assert_called_once_with(mock.ANY, task)
@mock.patch.object(pxe.PXEBoot, 'prepare_instance', autospec=True)
def test_prepare_unrescuing(self, mock_prepare_instance):
node = self.node
node.provision_state = states.UNRESCUING
node.save()
with task_manager.acquire(self.context, node.uuid) as task:
task.driver.deploy.prepare(task)
mock_prepare_instance.assert_called_once_with(mock.ANY, task)
@mock.patch.object(pxe.LOG, 'warning', autospec=True)
@mock.patch.object(pxe.PXEBoot, 'prepare_instance', autospec=True)
def test_prepare_fixes_and_logs_boot_option_warning(
self, mock_prepare_instance, mock_warning):
node = self.node
node.properties['capabilities'] = 'boot_option:ramdisk'
node.provision_state = states.DEPLOYING
node.instance_info = {}
node.save()
with task_manager.acquire(self.context, node.uuid) as task:
task.driver.deploy.prepare(task)
self.assertFalse(mock_prepare_instance.called)
self.assertEqual({'boot_option': 'ramdisk'},
task.node.instance_info['capabilities'])
self.assertTrue(mock_warning.called)
@mock.patch.object(deploy_utils, 'validate_image_properties',
autospec=True)
def test_validate(self, mock_validate_img):
node = self.node
node.properties['capabilities'] = 'boot_option:netboot'
node.save()
with task_manager.acquire(self.context, node.uuid) as task:
task.driver.deploy.validate(task)
self.assertTrue(mock_validate_img.called)
@mock.patch.object(fake.FakeBoot, 'validate', autospec=True)
@mock.patch.object(deploy_utils, 'validate_image_properties',
autospec=True)
def test_validate_interface_mismatch(self, mock_validate_image,
mock_boot_validate):
node = self.node
node.boot_interface = 'fake'
node.save()
self.config(enabled_boot_interfaces=['fake'],
default_boot_interface='fake')
with task_manager.acquire(self.context, node.uuid) as task:
error = self.assertRaises(exception.InvalidParameterValue,
task.driver.deploy.validate, task)
error_message = ('Invalid configuration: The boot interface must '
'have the `ramdisk_boot` capability. You are '
'using an incompatible boot interface.')
self.assertEqual(error_message, str(error))
self.assertFalse(mock_boot_validate.called)
self.assertFalse(mock_validate_image.called)
@mock.patch.object(pxe.PXEBoot, 'validate', autospec=True)
def test_validate_calls_boot_validate(self, mock_validate):
with task_manager.acquire(self.context, self.node.uuid) as task:
task.driver.deploy.validate(task)
mock_validate.assert_called_once_with(mock.ANY, task)
@mock.patch.object(manager_utils, 'restore_power_state_if_needed',
autospec=True)
@mock.patch.object(manager_utils, 'power_on_node_if_needed',
autospec=True)
@mock.patch.object(pxe.LOG, 'warning', autospec=True)
@mock.patch.object(deploy_utils, 'switch_pxe_config', autospec=True)
@mock.patch.object(dhcp_factory, 'DHCPFactory', autospec=True)
@mock.patch.object(pxe_utils, 'cache_ramdisk_kernel', autospec=True)
@mock.patch.object(pxe_utils, 'get_instance_image_info', autospec=True)
def test_deploy_with_smartnic_port(
self, mock_image_info, mock_cache,
mock_dhcp_factory, mock_switch_config, mock_warning,
power_on_node_if_needed_mock, restore_power_state_mock):
image_info = {'kernel': ('', '/path/to/kernel'),
'ramdisk': ('', '/path/to/ramdisk')}
mock_image_info.return_value = image_info
i_info = self.node.instance_info
i_info.update({'capabilities': {'boot_option': 'ramdisk'}})
self.node.instance_info = i_info
self.node.save()
with task_manager.acquire(self.context, self.node.uuid) as task:
power_on_node_if_needed_mock.return_value = states.POWER_OFF
self.assertIsNone(task.driver.deploy.deploy(task))
mock_image_info.assert_called_once_with(task)
mock_cache.assert_called_once_with(
task, image_info, ipxe_enabled=CONF.pxe.ipxe_enabled)
self.assertFalse(mock_warning.called)
power_on_node_if_needed_mock.assert_called_once_with(task)
restore_power_state_mock.assert_called_once_with(
task, states.POWER_OFF)
i_info['configdrive'] = 'meow'
self.node.instance_info = i_info
self.node.save()
mock_warning.reset_mock()
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertIsNone(task.driver.deploy.deploy(task))
self.assertTrue(mock_warning.called)
class PXEValidateRescueTestCase(db_base.DbTestCase):
def setUp(self):
super(PXEValidateRescueTestCase, self).setUp()
for iface in drivers_base.ALL_INTERFACES:
impl = 'fake'
if iface == 'network':
impl = 'flat'
if iface == 'rescue':
impl = 'agent'
if iface == 'boot':
impl = 'pxe'
config_kwarg = {'enabled_%s_interfaces' % iface: [impl],
'default_%s_interface' % iface: impl}
self.config(**config_kwarg)
self.config(enabled_hardware_types=['fake-hardware'])
driver_info = DRV_INFO_DICT
driver_info.update({'rescue_ramdisk': 'my_ramdisk',
'rescue_kernel': 'my_kernel'})
instance_info = INST_INFO_DICT
instance_info.update({'rescue_password': 'password'})
n = {
'driver': 'fake-hardware',
'instance_info': instance_info,
'driver_info': driver_info,
'driver_internal_info': DRV_INTERNAL_INFO_DICT,
}
self.node = obj_utils.create_test_node(self.context, **n)
def test_validate_rescue(self):
with task_manager.acquire(self.context, self.node.uuid) as task:
task.driver.boot.validate_rescue(task)
def test_validate_rescue_no_rescue_ramdisk(self):
driver_info = self.node.driver_info
del driver_info['rescue_ramdisk']
self.node.driver_info = driver_info
self.node.save()
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertRaisesRegex(exception.MissingParameterValue,
'Missing.*rescue_ramdisk',
task.driver.boot.validate_rescue, task)
def test_validate_rescue_fails_no_rescue_kernel(self):
driver_info = self.node.driver_info
del driver_info['rescue_kernel']
self.node.driver_info = driver_info
self.node.save()
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertRaisesRegex(exception.MissingParameterValue,
'Missing.*rescue_kernel',
task.driver.boot.validate_rescue, task)
| 49.719898
| 79
| 0.648466
| 6,961
| 58,222
| 5.075995
| 0.05459
| 0.035773
| 0.043726
| 0.039056
| 0.844767
| 0.823852
| 0.791759
| 0.769514
| 0.750495
| 0.732722
| 0
| 0.003633
| 0.257841
| 58,222
| 1,170
| 80
| 49.762393
| 0.814094
| 0.013998
| 0
| 0.667932
| 0
| 0
| 0.093456
| 0.020981
| 0
| 0
| 0
| 0
| 0.11575
| 1
| 0.065465
| false
| 0.001898
| 0.024668
| 0
| 0.096774
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
6aea03aee927de9288877466e8b780cf42cff828
| 22,578
|
py
|
Python
|
yardstick/tests/unit/benchmark/scenarios/networking/test_pktgen.py
|
mythwm/yardstick
|
ea13581f450c9c44f6f73d383e6a192697a95cc1
|
[
"Apache-2.0"
] | null | null | null |
yardstick/tests/unit/benchmark/scenarios/networking/test_pktgen.py
|
mythwm/yardstick
|
ea13581f450c9c44f6f73d383e6a192697a95cc1
|
[
"Apache-2.0"
] | null | null | null |
yardstick/tests/unit/benchmark/scenarios/networking/test_pktgen.py
|
mythwm/yardstick
|
ea13581f450c9c44f6f73d383e6a192697a95cc1
|
[
"Apache-2.0"
] | null | null | null |
##############################################################################
# Copyright (c) 2015 Ericsson AB and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
import mock
import unittest
from oslo_serialization import jsonutils
from yardstick.benchmark.scenarios.networking import pktgen
@mock.patch('yardstick.benchmark.scenarios.networking.pktgen.ssh')
class PktgenTestCase(unittest.TestCase):
def setUp(self):
self.ctx = {
'host': {
'ip': '172.16.0.137',
'user': 'root',
'key_filename': 'mykey.key'
},
'target': {
'ip': '172.16.0.138',
'user': 'root',
'key_filename': 'mykey.key',
'ipaddr': '172.16.0.138'
}
}
def test_pktgen_successful_setup(self, mock_ssh):
args = {
'options': {'packetsize': 60},
}
p = pktgen.Pktgen(args, self.ctx)
p.setup()
mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
self.assertIsNotNone(p.server)
self.assertIsNotNone(p.client)
self.assertTrue(p.setup_done)
def test_pktgen_successful_iptables_setup(self, mock_ssh):
args = {
'options': {'packetsize': 60, 'number_of_ports': 10},
}
p = pktgen.Pktgen(args, self.ctx)
p.server = mock_ssh.SSH.from_node()
p.number_of_ports = args['options']['number_of_ports']
mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
p._iptables_setup()
mock_ssh.SSH.from_node().execute.assert_called_with(
"sudo iptables -F; "
"sudo iptables -A INPUT -p udp --dport 1000:%s -j DROP"
% 1010, timeout=60)
def test_pktgen_unsuccessful_iptables_setup(self, mock_ssh):
args = {
'options': {'packetsize': 60, 'number_of_ports': 10},
}
p = pktgen.Pktgen(args, self.ctx)
p.server = mock_ssh.SSH.from_node()
p.number_of_ports = args['options']['number_of_ports']
mock_ssh.SSH.from_node().execute.return_value = (1, '', 'FOOBAR')
self.assertRaises(RuntimeError, p._iptables_setup)
def test_pktgen_successful_iptables_get_result(self, mock_ssh):
args = {
'options': {'packetsize': 60, 'number_of_ports': 10},
}
p = pktgen.Pktgen(args, self.ctx)
p.server = mock_ssh.SSH.from_node()
p.number_of_ports = args['options']['number_of_ports']
mock_ssh.SSH.from_node().execute.return_value = (0, '150000', '')
p._iptables_get_result()
mock_ssh.SSH.from_node().execute.assert_called_with(
"sudo iptables -L INPUT -vnx |"
"awk '/dpts:1000:%s/ {{printf \"%%s\", $1}}'"
% 1010)
def test_pktgen_unsuccessful_iptables_get_result(self, mock_ssh):
args = {
'options': {'packetsize': 60, 'number_of_ports': 10},
}
p = pktgen.Pktgen(args, self.ctx)
p.server = mock_ssh.SSH.from_node()
p.number_of_ports = args['options']['number_of_ports']
mock_ssh.SSH.from_node().execute.return_value = (1, '', 'FOOBAR')
self.assertRaises(RuntimeError, p._iptables_get_result)
def test_pktgen_successful_no_sla(self, mock_ssh):
args = {
'options': {'packetsize': 60, 'number_of_ports': 10},
}
result = {}
p = pktgen.Pktgen(args, self.ctx)
p.server = mock_ssh.SSH.from_node()
p.client = mock_ssh.SSH.from_node()
p._iptables_get_result = mock.Mock(return_value=149300)
sample_output = '{"packets_per_second": 9753, "errors": 0, \
"packets_sent": 149776, "packetsize": 60, "flows": 110, "ppm": 3179}'
mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
p.run(result)
expected_result = jsonutils.loads(sample_output)
expected_result["packets_received"] = 149300
expected_result["packetsize"] = 60
self.assertEqual(result, expected_result)
def test_pktgen_successful_sla(self, mock_ssh):
args = {
'options': {'packetsize': 60, 'number_of_ports': 10},
'sla': {'max_ppm': 10000}
}
result = {}
p = pktgen.Pktgen(args, self.ctx)
p.server = mock_ssh.SSH.from_node()
p.client = mock_ssh.SSH.from_node()
p._iptables_get_result = mock.Mock(return_value=149300)
sample_output = '{"packets_per_second": 9753, "errors": 0, \
"packets_sent": 149776, "packetsize": 60, "flows": 110, "ppm": 3179}'
mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
p.run(result)
expected_result = jsonutils.loads(sample_output)
expected_result["packets_received"] = 149300
expected_result["packetsize"] = 60
self.assertEqual(result, expected_result)
def test_pktgen_unsuccessful_sla(self, mock_ssh):
args = {
'options': {'packetsize': 60, 'number_of_ports': 10},
'sla': {'max_ppm': 1000}
}
result = {}
p = pktgen.Pktgen(args, self.ctx)
p.server = mock_ssh.SSH.from_node()
p.client = mock_ssh.SSH.from_node()
p._iptables_get_result = mock.Mock(return_value=149300)
sample_output = '{"packets_per_second": 9753, "errors": 0, \
"packets_sent": 149776, "packetsize": 60, "flows": 110}'
mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
self.assertRaises(AssertionError, p.run, result)
def test_pktgen_unsuccessful_script_error(self, mock_ssh):
args = {
'options': {'packetsize': 60, 'number_of_ports': 10},
'sla': {'max_ppm': 1000}
}
result = {}
p = pktgen.Pktgen(args, self.ctx)
p.server = mock_ssh.SSH.from_node()
p.client = mock_ssh.SSH.from_node()
mock_ssh.SSH.from_node().execute.return_value = (1, '', 'FOOBAR')
self.assertRaises(RuntimeError, p.run, result)
def test_pktgen_get_vnic_driver_name(self, mock_ssh):
args = {
'options': {'packetsize': 60},
}
p = pktgen.Pktgen(args, self.ctx)
p.server = mock_ssh.SSH.from_node()
mock_ssh.SSH.from_node().execute.return_value = (0, 'ixgbevf', '')
vnic_driver_name = p._get_vnic_driver_name()
self.assertEqual(vnic_driver_name, 'ixgbevf')
def test_pktgen_unsuccessful_get_vnic_driver_name(self, mock_ssh):
args = {
'options': {'packetsize': 60},
}
p = pktgen.Pktgen(args, self.ctx)
p.server = mock_ssh.SSH.from_node()
mock_ssh.SSH.from_node().execute.return_value = (1, '', '')
self.assertRaises(RuntimeError, p._get_vnic_driver_name)
def test_pktgen_get_sriov_queue_number(self, mock_ssh):
args = {
'options': {'packetsize': 60},
}
p = pktgen.Pktgen(args, self.ctx)
p.server = mock_ssh.SSH.from_node()
mock_ssh.SSH.from_node().execute.return_value = (0, '2', '')
p.queue_number = p._get_sriov_queue_number()
self.assertEqual(p.queue_number, 2)
def test_pktgen_unsuccessful_get_sriov_queue_number(self, mock_ssh):
args = {
'options': {'packetsize': 60},
}
p = pktgen.Pktgen(args, self.ctx)
p.server = mock_ssh.SSH.from_node()
mock_ssh.SSH.from_node().execute.return_value = (1, '', '')
self.assertRaises(RuntimeError, p._get_sriov_queue_number)
def test_pktgen_get_available_queue_number(self, mock_ssh):
args = {
'options': {'packetsize': 60},
}
p = pktgen.Pktgen(args, self.ctx)
p.server = mock_ssh.SSH.from_node()
mock_ssh.SSH.from_node().execute.return_value = (0, '4', '')
p._get_available_queue_number()
mock_ssh.SSH.from_node().execute.assert_called_with(
"sudo ethtool -l eth0 | grep Combined | head -1 |"
"awk '{printf $2}'")
def test_pktgen_unsuccessful_get_available_queue_number(self, mock_ssh):
args = {
'options': {'packetsize': 60},
}
p = pktgen.Pktgen(args, self.ctx)
p.server = mock_ssh.SSH.from_node()
mock_ssh.SSH.from_node().execute.return_value = (1, '', '')
self.assertRaises(RuntimeError, p._get_available_queue_number)
def test_pktgen_get_usable_queue_number(self, mock_ssh):
args = {
'options': {'packetsize': 60},
}
p = pktgen.Pktgen(args, self.ctx)
p.server = mock_ssh.SSH.from_node()
mock_ssh.SSH.from_node().execute.return_value = (0, '1', '')
p._get_usable_queue_number()
mock_ssh.SSH.from_node().execute.assert_called_with(
"sudo ethtool -l eth0 | grep Combined | tail -1 |"
"awk '{printf $2}'")
def test_pktgen_unsuccessful_get_usable_queue_number(self, mock_ssh):
args = {
'options': {'packetsize': 60},
}
p = pktgen.Pktgen(args, self.ctx)
p.server = mock_ssh.SSH.from_node()
mock_ssh.SSH.from_node().execute.return_value = (1, '', '')
self.assertRaises(RuntimeError, p._get_usable_queue_number)
def test_pktgen_enable_ovs_multiqueue(self, mock_ssh):
args = {
'options': {'packetsize': 60},
}
p = pktgen.Pktgen(args, self.ctx)
p.server = mock_ssh.SSH.from_node()
p.client = mock_ssh.SSH.from_node()
mock_ssh.SSH.from_node().execute.return_value = (0, '4', '')
p._get_usable_queue_number = mock.Mock(return_value=1)
p._get_available_queue_number = mock.Mock(return_value=4)
p.queue_number = p._enable_ovs_multiqueue()
self.assertEqual(p.queue_number, 4)
def test_pktgen_enable_ovs_multiqueue_1q(self, mock_ssh):
args = {
'options': {'packetsize': 60},
}
p = pktgen.Pktgen(args, self.ctx)
p.server = mock_ssh.SSH.from_node()
p.client = mock_ssh.SSH.from_node()
mock_ssh.SSH.from_node().execute.return_value = (0, '1', '')
p._get_usable_queue_number = mock.Mock(return_value=1)
p._get_available_queue_number = mock.Mock(return_value=1)
p.queue_number = p._enable_ovs_multiqueue()
self.assertEqual(p.queue_number, 1)
def test_pktgen_unsuccessful_enable_ovs_multiqueue(self, mock_ssh):
args = {
'options': {'packetsize': 60},
}
p = pktgen.Pktgen(args, self.ctx)
p.server = mock_ssh.SSH.from_node()
p.client = mock_ssh.SSH.from_node()
mock_ssh.SSH.from_node().execute.return_value = (1, '', '')
p._get_usable_queue_number = mock.Mock(return_value=1)
p._get_available_queue_number = mock.Mock(return_value=4)
self.assertRaises(RuntimeError, p._enable_ovs_multiqueue)
def test_pktgen_setup_irqmapping_ovs(self, mock_ssh):
args = {
'options': {'packetsize': 60},
}
p = pktgen.Pktgen(args, self.ctx)
p.server = mock_ssh.SSH.from_node()
p.client = mock_ssh.SSH.from_node()
mock_ssh.SSH.from_node().execute.return_value = (0, '10', '')
p._setup_irqmapping_ovs(4)
mock_ssh.SSH.from_node().execute.assert_called_with(
"echo 8 | sudo tee /proc/irq/10/smp_affinity")
def test_pktgen_setup_irqmapping_ovs_1q(self, mock_ssh):
args = {
'options': {'packetsize': 60},
}
p = pktgen.Pktgen(args, self.ctx)
p.server = mock_ssh.SSH.from_node()
p.client = mock_ssh.SSH.from_node()
mock_ssh.SSH.from_node().execute.return_value = (0, '10', '')
p._setup_irqmapping_ovs(1)
mock_ssh.SSH.from_node().execute.assert_called_with(
"echo 1 | sudo tee /proc/irq/10/smp_affinity")
def test_pktgen_unsuccessful_setup_irqmapping_ovs(self, mock_ssh):
args = {
'options': {'packetsize': 60},
}
p = pktgen.Pktgen(args, self.ctx)
p.server = mock_ssh.SSH.from_node()
p.client = mock_ssh.SSH.from_node()
mock_ssh.SSH.from_node().execute.return_value = (1, '', '')
self.assertRaises(RuntimeError, p._setup_irqmapping_ovs, 4)
def test_pktgen_unsuccessful_setup_irqmapping_ovs_1q(self, mock_ssh):
args = {
'options': {'packetsize': 60},
}
p = pktgen.Pktgen(args, self.ctx)
p.server = mock_ssh.SSH.from_node()
p.client = mock_ssh.SSH.from_node()
mock_ssh.SSH.from_node().execute.return_value = (1, '', '')
self.assertRaises(RuntimeError, p._setup_irqmapping_ovs, 1)
def test_pktgen_setup_irqmapping_sriov(self, mock_ssh):
args = {
'options': {'packetsize': 60},
}
p = pktgen.Pktgen(args, self.ctx)
p.server = mock_ssh.SSH.from_node()
p.client = mock_ssh.SSH.from_node()
mock_ssh.SSH.from_node().execute.return_value = (0, '10', '')
p._setup_irqmapping_sriov(2)
mock_ssh.SSH.from_node().execute.assert_called_with(
"echo 2 | sudo tee /proc/irq/10/smp_affinity")
def test_pktgen_setup_irqmapping_sriov_1q(self, mock_ssh):
args = {
'options': {'packetsize': 60},
}
p = pktgen.Pktgen(args, self.ctx)
p.server = mock_ssh.SSH.from_node()
p.client = mock_ssh.SSH.from_node()
mock_ssh.SSH.from_node().execute.return_value = (0, '10', '')
p._setup_irqmapping_sriov(1)
mock_ssh.SSH.from_node().execute.assert_called_with(
"echo 1 | sudo tee /proc/irq/10/smp_affinity")
def test_pktgen_unsuccessful_setup_irqmapping_sriov(self, mock_ssh):
args = {
'options': {'packetsize': 60},
}
p = pktgen.Pktgen(args, self.ctx)
p.server = mock_ssh.SSH.from_node()
p.client = mock_ssh.SSH.from_node()
mock_ssh.SSH.from_node().execute.return_value = (1, '', '')
self.assertRaises(RuntimeError, p._setup_irqmapping_sriov, 2)
def test_pktgen_unsuccessful_setup_irqmapping_sriov_1q(self, mock_ssh):
args = {
'options': {'packetsize': 60},
}
p = pktgen.Pktgen(args, self.ctx)
p.server = mock_ssh.SSH.from_node()
p.client = mock_ssh.SSH.from_node()
mock_ssh.SSH.from_node().execute.return_value = (1, '', '')
self.assertRaises(RuntimeError, p._setup_irqmapping_sriov, 1)
def test_pktgen_is_irqbalance_disabled(self, mock_ssh):
args = {
'options': {'packetsize': 60},
}
p = pktgen.Pktgen(args, self.ctx)
p.server = mock_ssh.SSH.from_node()
mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
p._is_irqbalance_disabled()
mock_ssh.SSH.from_node().execute.assert_called_with(
"grep ENABLED /etc/default/irqbalance")
def test_pktgen_unsuccessful_is_irqbalance_disabled(self, mock_ssh):
args = {
'options': {'packetsize': 60},
}
p = pktgen.Pktgen(args, self.ctx)
p.server = mock_ssh.SSH.from_node()
mock_ssh.SSH.from_node().execute.return_value = (1, '', '')
self.assertRaises(RuntimeError, p._is_irqbalance_disabled)
def test_pktgen_disable_irqbalance(self, mock_ssh):
args = {
'options': {'packetsize': 60},
}
p = pktgen.Pktgen(args, self.ctx)
p.server = mock_ssh.SSH.from_node()
p.client = mock_ssh.SSH.from_node()
mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
p._disable_irqbalance()
mock_ssh.SSH.from_node().execute.assert_called_with(
"sudo service irqbalance disable")
def test_pktgen_unsuccessful_disable_irqbalance(self, mock_ssh):
args = {
'options': {'packetsize': 60},
}
p = pktgen.Pktgen(args, self.ctx)
p.server = mock_ssh.SSH.from_node()
p.client = mock_ssh.SSH.from_node()
mock_ssh.SSH.from_node().execute.return_value = (1, '', '')
self.assertRaises(RuntimeError, p._disable_irqbalance)
def test_pktgen_multiqueue_setup_ovs(self, mock_ssh):
args = {
'options': {'packetsize': 60, 'multiqueue': True},
}
p = pktgen.Pktgen(args, self.ctx)
p.server = mock_ssh.SSH.from_node()
p.client = mock_ssh.SSH.from_node()
mock_ssh.SSH.from_node().execute.return_value = (0, '4', '')
p._is_irqbalance_disabled = mock.Mock(return_value=False)
p._get_vnic_driver_name = mock.Mock(return_value="virtio_net")
p._get_usable_queue_number = mock.Mock(return_value=1)
p._get_available_queue_number = mock.Mock(return_value=4)
p.multiqueue_setup()
self.assertEqual(p.queue_number, 4)
def test_pktgen_multiqueue_setup_ovs_1q(self, mock_ssh):
args = {
'options': {'packetsize': 60, 'multiqueue': True},
}
p = pktgen.Pktgen(args, self.ctx)
p.server = mock_ssh.SSH.from_node()
p.client = mock_ssh.SSH.from_node()
mock_ssh.SSH.from_node().execute.return_value = (0, '1', '')
p._is_irqbalance_disabled = mock.Mock(return_value=False)
p._get_vnic_driver_name = mock.Mock(return_value="virtio_net")
p._get_usable_queue_number = mock.Mock(return_value=1)
p._get_available_queue_number = mock.Mock(return_value=1)
p.multiqueue_setup()
self.assertEqual(p.queue_number, 1)
def test_pktgen_multiqueue_setup_sriov(self, mock_ssh):
args = {
'options': {'packetsize': 60, 'multiqueue': True},
}
p = pktgen.Pktgen(args, self.ctx)
p.server = mock_ssh.SSH.from_node()
p.client = mock_ssh.SSH.from_node()
mock_ssh.SSH.from_node().execute.return_value = (0, '2', '')
p._is_irqbalance_disabled = mock.Mock(return_value=False)
p._get_vnic_driver_name = mock.Mock(return_value="ixgbevf")
p.multiqueue_setup()
self.assertEqual(p.queue_number, 2)
def test_pktgen_multiqueue_setup_sriov_1q(self, mock_ssh):
args = {
'options': {'packetsize': 60, 'multiqueue': True},
}
p = pktgen.Pktgen(args, self.ctx)
p.server = mock_ssh.SSH.from_node()
p.client = mock_ssh.SSH.from_node()
mock_ssh.SSH.from_node().execute.return_value = (0, '1', '')
p._is_irqbalance_disabled = mock.Mock(return_value=False)
p._get_vnic_driver_name = mock.Mock(return_value="ixgbevf")
p.multiqueue_setup()
self.assertEqual(p.queue_number, 1)
def test_pktgen_run_with_setup_done(self, mock_ssh):
args = {
'options': {
'packetsize': 60,
'number_of_ports': 10,
'duration': 20,
'multiqueue': True},
'sla': {
'max_ppm': 1}}
result = {}
p = pktgen.Pktgen(args, self.ctx)
p.server = mock_ssh.SSH.from_node()
p.client = mock_ssh.SSH.from_node()
p.setup_done = True
p.multiqueue_setup_done = True
mock_iptables_result = mock.Mock()
mock_iptables_result.return_value = 149300
p._iptables_get_result = mock_iptables_result
sample_output = '{"packets_per_second": 9753, "errors": 0, \
"packets_sent": 149300, "flows": 110, "ppm": 0}'
mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
p.run(result)
expected_result = jsonutils.loads(sample_output)
expected_result["packets_received"] = 149300
expected_result["packetsize"] = 60
self.assertEqual(result, expected_result)
def test_pktgen_run_with_ovs_multiqueque(self, mock_ssh):
args = {
'options': {
'packetsize': 60,
'number_of_ports': 10,
'duration': 20,
'multiqueue': True},
'sla': {
'max_ppm': 1}}
result = {}
p = pktgen.Pktgen(args, self.ctx)
p.server = mock_ssh.SSH.from_node()
p.client = mock_ssh.SSH.from_node()
p._get_vnic_driver_name = mock.Mock(return_value="virtio_net")
p._get_usable_queue_number = mock.Mock(return_value=1)
p._get_available_queue_number = mock.Mock(return_value=4)
p._enable_ovs_multiqueue = mock.Mock(return_value=4)
p._setup_irqmapping_ovs = mock.Mock()
p._iptables_get_result = mock.Mock(return_value=149300)
sample_output = '{"packets_per_second": 9753, "errors": 0, \
"packets_sent": 149300, "flows": 110, "ppm": 0}'
mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
p.run(result)
expected_result = jsonutils.loads(sample_output)
expected_result["packets_received"] = 149300
expected_result["packetsize"] = 60
self.assertEqual(result, expected_result)
def test_pktgen_run_with_sriov_multiqueque(self, mock_ssh):
args = {
'options': {
'packetsize': 60,
'number_of_ports': 10,
'duration': 20,
'multiqueue': True},
'sla': {
'max_ppm': 1}}
result = {}
p = pktgen.Pktgen(args, self.ctx)
p.server = mock_ssh.SSH.from_node()
p.client = mock_ssh.SSH.from_node()
p._get_vnic_driver_name = mock.Mock(return_value="ixgbevf")
p._get_sriov_queue_number = mock.Mock(return_value=2)
p._setup_irqmapping_sriov = mock.Mock()
p._iptables_get_result = mock.Mock(return_value=149300)
sample_output = '{"packets_per_second": 9753, "errors": 0, \
"packets_sent": 149300, "flows": 110, "ppm": 0}'
mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
p.run(result)
expected_result = jsonutils.loads(sample_output)
expected_result["packets_received"] = 149300
expected_result["packetsize"] = 60
self.assertEqual(result, expected_result)
| 33.799401
| 81
| 0.601825
| 2,796
| 22,578
| 4.54721
| 0.065451
| 0.082586
| 0.087305
| 0.122227
| 0.900346
| 0.87266
| 0.845131
| 0.840412
| 0.83821
| 0.81603
| 0
| 0.028077
| 0.260165
| 22,578
| 667
| 82
| 33.850075
| 0.733058
| 0.012269
| 0
| 0.693252
| 0
| 0
| 0.102448
| 0.00786
| 0
| 0
| 0
| 0
| 0.083845
| 1
| 0.0818
| false
| 0
| 0.00818
| 0
| 0.092025
| 0.006135
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0a7afd02bf2f95c23824af798fe6e8b3168e798a
| 47
|
py
|
Python
|
sr/utils/__init__.py
|
marcocamma/sr
|
221026b6e5bcaf1aab5e418260adf3724e517287
|
[
"MIT"
] | null | null | null |
sr/utils/__init__.py
|
marcocamma/sr
|
221026b6e5bcaf1aab5e418260adf3724e517287
|
[
"MIT"
] | null | null | null |
sr/utils/__init__.py
|
marcocamma/sr
|
221026b6e5bcaf1aab5e418260adf3724e517287
|
[
"MIT"
] | null | null | null |
from . import unicode
from . import conversion
| 15.666667
| 24
| 0.787234
| 6
| 47
| 6.166667
| 0.666667
| 0.540541
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.170213
| 47
| 2
| 25
| 23.5
| 0.948718
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
0a8197d1b7bc435e7e509cf003502a33ebc433f7
| 7,948
|
py
|
Python
|
xlsxwriter/test/styles/test_write_font.py
|
haiyangd/XlsxWriter
|
81f8c9435b3e03a1458bf9ba314b5d3f7508290f
|
[
"BSD-2-Clause-FreeBSD"
] | 3
|
2018-02-26T12:31:41.000Z
|
2020-10-10T14:14:11.000Z
|
xlsxwriter/test/styles/test_write_font.py
|
haiyangd/XlsxWriter
|
81f8c9435b3e03a1458bf9ba314b5d3f7508290f
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
xlsxwriter/test/styles/test_write_font.py
|
haiyangd/XlsxWriter
|
81f8c9435b3e03a1458bf9ba314b5d3f7508290f
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2017, John McNamara, jmcnamara@cpan.org
#
import unittest
from ...compatibility import StringIO
from ...styles import Styles
from ...format import Format
class TestWriteFont(unittest.TestCase):
"""
Test the Styles _write_font() method.
"""
def setUp(self):
self.fh = StringIO()
self.styles = Styles()
self.styles._set_filehandle(self.fh)
def test_write_font_1(self):
"""Test the _write_font() method. Default properties."""
properties = {}
xf_format = Format(properties)
self.styles._write_font(xf_format)
exp = """<font><sz val="11"/><color theme="1"/><name val="Calibri"/><family val="2"/><scheme val="minor"/></font>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_font_2(self):
"""Test the _write_font() method. Bold."""
properties = {'bold': 1}
xf_format = Format(properties)
self.styles._write_font(xf_format)
exp = """<font><b/><sz val="11"/><color theme="1"/><name val="Calibri"/><family val="2"/><scheme val="minor"/></font>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_font_3(self):
"""Test the _write_font() method. Italic."""
properties = {'italic': 1}
xf_format = Format(properties)
self.styles._write_font(xf_format)
exp = """<font><i/><sz val="11"/><color theme="1"/><name val="Calibri"/><family val="2"/><scheme val="minor"/></font>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_font_4(self):
"""Test the _write_font() method. Underline."""
properties = {'underline': 1}
xf_format = Format(properties)
self.styles._write_font(xf_format)
exp = """<font><u/><sz val="11"/><color theme="1"/><name val="Calibri"/><family val="2"/><scheme val="minor"/></font>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_font_5(self):
"""Test the _write_font() method. Strikeout."""
properties = {'font_strikeout': 1}
xf_format = Format(properties)
self.styles._write_font(xf_format)
exp = """<font><strike/><sz val="11"/><color theme="1"/><name val="Calibri"/><family val="2"/><scheme val="minor"/></font>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_font_6(self):
"""Test the _write_font() method. Superscript."""
properties = {'font_script': 1}
xf_format = Format(properties)
self.styles._write_font(xf_format)
exp = """<font><vertAlign val="superscript"/><sz val="11"/><color theme="1"/><name val="Calibri"/><family val="2"/><scheme val="minor"/></font>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_font_7(self):
"""Test the _write_font() method. Subscript."""
properties = {'font_script': 2}
xf_format = Format(properties)
self.styles._write_font(xf_format)
exp = """<font><vertAlign val="subscript"/><sz val="11"/><color theme="1"/><name val="Calibri"/><family val="2"/><scheme val="minor"/></font>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_font_8(self):
"""Test the _write_font() method. Font name."""
properties = {'font_name': 'Arial'}
xf_format = Format(properties)
self.styles._write_font(xf_format)
exp = """<font><sz val="11"/><color theme="1"/><name val="Arial"/><family val="2"/></font>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_font_9(self):
"""Test the _write_font() method. Font size."""
properties = {'size': 12}
xf_format = Format(properties)
self.styles._write_font(xf_format)
exp = """<font><sz val="12"/><color theme="1"/><name val="Calibri"/><family val="2"/><scheme val="minor"/></font>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_font_10(self):
"""Test the _write_font() method. Outline."""
properties = {'font_outline': 1}
xf_format = Format(properties)
self.styles._write_font(xf_format)
exp = """<font><outline/><sz val="11"/><color theme="1"/><name val="Calibri"/><family val="2"/><scheme val="minor"/></font>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_font_11(self):
"""Test the _write_font() method. Shadow."""
properties = {'font_shadow': 1}
xf_format = Format(properties)
self.styles._write_font(xf_format)
exp = """<font><shadow/><sz val="11"/><color theme="1"/><name val="Calibri"/><family val="2"/><scheme val="minor"/></font>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_font_12(self):
"""Test the _write_font() method. Colour = red."""
properties = {'color': '#FF0000'}
xf_format = Format(properties)
self.styles._write_font(xf_format)
exp = """<font><sz val="11"/><color rgb="FFFF0000"/><name val="Calibri"/><family val="2"/><scheme val="minor"/></font>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_font_13(self):
"""Test the _write_font() method. All font attributes to check order."""
properties = {
'bold': 1,
'color': '#FF0000',
'font_outline': 1,
'font_script': 1,
'font_shadow': 1,
'font_strikeout': 1,
'italic': 1,
'size': 12,
'underline': 1,
}
xf_format = Format(properties)
self.styles._write_font(xf_format)
exp = """<font><b/><i/><strike/><outline/><shadow/><u/><vertAlign val="superscript"/><sz val="12"/><color rgb="FFFF0000"/><name val="Calibri"/><family val="2"/><scheme val="minor"/></font>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_font_14(self):
"""Test the _write_font() method. Double underline."""
properties = {'underline': 2}
xf_format = Format(properties)
self.styles._write_font(xf_format)
exp = """<font><u val="double"/><sz val="11"/><color theme="1"/><name val="Calibri"/><family val="2"/><scheme val="minor"/></font>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_font_15(self):
"""Test the _write_font() method. Double underline."""
properties = {'underline': 33}
xf_format = Format(properties)
self.styles._write_font(xf_format)
exp = """<font><u val="singleAccounting"/><sz val="11"/><color theme="1"/><name val="Calibri"/><family val="2"/><scheme val="minor"/></font>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_font_16(self):
"""Test the _write_font() method. Double underline."""
properties = {'underline': 34}
xf_format = Format(properties)
self.styles._write_font(xf_format)
exp = """<font><u val="doubleAccounting"/><sz val="11"/><color theme="1"/><name val="Calibri"/><family val="2"/><scheme val="minor"/></font>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_font_17(self):
"""Test the _write_font() method. Hyperlink."""
properties = {'hyperlink': 1}
xf_format = Format(properties)
self.styles._write_font(xf_format)
exp = """<font><u/><sz val="11"/><color theme="10"/><name val="Calibri"/><family val="2"/></font>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
| 31.168627
| 199
| 0.576623
| 979
| 7,948
| 4.510725
| 0.100102
| 0.105978
| 0.061141
| 0.061594
| 0.800045
| 0.791214
| 0.717391
| 0.703804
| 0.703804
| 0.703804
| 0
| 0.02302
| 0.229366
| 7,948
| 254
| 200
| 31.291339
| 0.697959
| 0.110846
| 0
| 0.492754
| 0
| 0.123188
| 0.320401
| 0.079628
| 0
| 0
| 0
| 0
| 0.123188
| 1
| 0.130435
| false
| 0
| 0.028986
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0abe383ca309301febf25e471bffd58ee3e40cd1
| 17,936
|
py
|
Python
|
system/t09_repo/include.py
|
Yelp/aptly
|
59a0c0140ba0f0f12554d57d99110511eb3e6229
|
[
"MIT"
] | null | null | null |
system/t09_repo/include.py
|
Yelp/aptly
|
59a0c0140ba0f0f12554d57d99110511eb3e6229
|
[
"MIT"
] | null | null | null |
system/t09_repo/include.py
|
Yelp/aptly
|
59a0c0140ba0f0f12554d57d99110511eb3e6229
|
[
"MIT"
] | 1
|
2022-03-18T11:33:21.000Z
|
2022-03-18T11:33:21.000Z
|
import tempfile
import shutil
import os
import inspect
import re
from lib import BaseTest
def gpgRemove(_, s):
return re.sub(r'Signature made .* using|gpgv: keyblock resource .*$|gpgv: Can\'t check signature: .*$', '', s, flags=re.MULTILINE)
def changesRemove(_, s):
return s.replace(os.path.join(os.path.dirname(inspect.getsourcefile(BaseTest)), "changes"), "")
def tempDirRemove(self, s):
return s.replace(self.tempSrcDir, "")
class IncludeRepo1Test(BaseTest):
"""
include packages to local repo: .changes file from directory
"""
fixtureCmds = [
"aptly repo create unstable",
]
runCmd = "aptly repo include -no-remove-files -keyring=${files}/aptly.pub ${changes}"
outputMatchPrepare = gpgRemove
def check(self):
self.check_output()
self.check_cmd_output("aptly repo show -with-packages unstable", "repo_show")
# check pool
self.check_exists('pool/66/83/99580590bf1ffcd9eb161b6e5747_hardlink_0.2.1_amd64.deb')
self.check_exists('pool/c0/d7/458aa2ca3886cd6885f395a289ef_hardlink_0.2.1.dsc')
self.check_exists('pool/4d/f0/adce005526a1f0e1b38171ddb1f0_hardlink_0.2.1.tar.gz')
class IncludeRepo2Test(BaseTest):
"""
include packages to local repo: .changes file from file + custom repo
"""
fixtureCmds = [
"aptly repo create my-unstable",
"aptly repo add my-unstable ${files}",
]
runCmd = "aptly repo include -no-remove-files -keyring=${files}/aptly.pub -repo=my-{{.Distribution}} ${changes}/hardlink_0.2.1_amd64.changes"
outputMatchPrepare = gpgRemove
def check(self):
self.check_output()
self.check_cmd_output("aptly repo show -with-packages my-unstable", "repo_show")
# check pool
self.check_exists('pool/66/83/99580590bf1ffcd9eb161b6e5747_hardlink_0.2.1_amd64.deb')
self.check_exists('pool/c0/d7/458aa2ca3886cd6885f395a289ef_hardlink_0.2.1.dsc')
self.check_exists('pool/4d/f0/adce005526a1f0e1b38171ddb1f0_hardlink_0.2.1.tar.gz')
class IncludeRepo3Test(BaseTest):
"""
include packages to local repo: broken repo flag
"""
fixtureCmds = [
]
runCmd = "aptly repo include -no-remove-files -keyring=${files}/aptly.pub -repo=my-{{.Distribution} ${changes}"
expectedCode = 1
def outputMatchPrepare(_, s):
return s.replace('; missing space?', '')
class IncludeRepo4Test(BaseTest):
"""
include packages to local repo: missing repo
"""
fixtureCmds = [
]
runCmd = "aptly repo include -no-remove-files -ignore-signatures -keyring=${files}/aptly.pub ${changes}"
outputMatchPrepare = changesRemove
expectedCode = 1
class IncludeRepo5Test(BaseTest):
"""
include packages to local repo: remove files being added
"""
fixtureCmds = [
"aptly repo create unstable",
]
runCmd = "aptly repo include -keyring=${files}/aptly.pub "
outputMatchPrepare = gpgRemove
def prepare(self):
super(IncludeRepo5Test, self).prepare()
self.tempSrcDir = tempfile.mkdtemp()
shutil.copytree(os.path.join(os.path.dirname(inspect.getsourcefile(BaseTest)), "changes"), os.path.join(self.tempSrcDir, "01"))
shutil.copy(os.path.join(os.path.dirname(inspect.getsourcefile(BaseTest)), "files", "pyspi_0.6.1-1.3.diff.gz"),
os.path.join(self.tempSrcDir, "01", "pyspi_0.6.1-1.3.diff.gz"))
self.runCmd += self.tempSrcDir
def check(self):
try:
self.check_output()
self.check_cmd_output("aptly repo show -with-packages unstable", "repo_show")
# check pool
self.check_exists('pool/66/83/99580590bf1ffcd9eb161b6e5747_hardlink_0.2.1_amd64.deb')
self.check_exists('pool/c0/d7/458aa2ca3886cd6885f395a289ef_hardlink_0.2.1.dsc')
self.check_exists('pool/4d/f0/adce005526a1f0e1b38171ddb1f0_hardlink_0.2.1.tar.gz')
for path in ["hardlink_0.2.1.dsc", "hardlink_0.2.1.tar.gz", "hardlink_0.2.1_amd64.changes", "hardlink_0.2.1_amd64.deb"]:
path = os.path.join(self.tempSrcDir, "01", path)
if os.path.exists(path):
raise Exception("path %s shouldn't exist" % (path, ))
path = os.path.join(self.tempSrcDir, "01", "pyspi_0.6.1-1.3.diff.gz")
if not os.path.exists(path):
raise Exception("path %s doesn't exist" % (path, ))
finally:
shutil.rmtree(self.tempSrcDir)
class IncludeRepo6Test(BaseTest):
"""
include packages to local repo: missing files
"""
fixtureCmds = [
"aptly repo create unstable",
]
runCmd = "aptly repo include -keyring=${files}/aptly.pub "
expectedCode = 1
def outputMatchPrepare(self, s):
return gpgRemove(self, tempDirRemove(self, s))
def prepare(self):
super(IncludeRepo6Test, self).prepare()
self.tempSrcDir = tempfile.mkdtemp()
os.makedirs(os.path.join(self.tempSrcDir, "01"), 0o755)
for path in ["hardlink_0.2.1.dsc", "hardlink_0.2.1_amd64.changes", "hardlink_0.2.1_amd64.deb"]:
shutil.copy(os.path.join(os.path.dirname(inspect.getsourcefile(BaseTest)), "changes", path),
os.path.join(self.tempSrcDir, "01", path))
self.runCmd += self.tempSrcDir
def check(self):
try:
super(IncludeRepo6Test, self).check()
for path in ["hardlink_0.2.1.dsc", "hardlink_0.2.1_amd64.changes", "hardlink_0.2.1_amd64.deb"]:
path = os.path.join(self.tempSrcDir, "01", path)
if not os.path.exists(path):
raise Exception("path %s doesn't exist" % (path, ))
finally:
shutil.rmtree(self.tempSrcDir)
class IncludeRepo7Test(BaseTest):
"""
include packages to local repo: wrong checksum
"""
fixtureCmds = [
"aptly repo create unstable",
]
runCmd = "aptly repo include -keyring=${files}/aptly.pub "
expectedCode = 1
def outputMatchPrepare(self, s):
return gpgRemove(self, tempDirRemove(self, s))
def prepare(self):
super(IncludeRepo7Test, self).prepare()
self.tempSrcDir = tempfile.mkdtemp()
shutil.copytree(os.path.join(os.path.dirname(inspect.getsourcefile(BaseTest)), "changes"), os.path.join(self.tempSrcDir, "01"))
with open(os.path.join(self.tempSrcDir, "01", "hardlink_0.2.1.dsc"), "w") as f:
f.write("A" * 949) # file size
self.runCmd += self.tempSrcDir
def check(self):
try:
super(IncludeRepo7Test, self).check()
finally:
shutil.rmtree(self.tempSrcDir)
class IncludeRepo8Test(BaseTest):
"""
include packages to local repo: wrong signature
"""
fixtureCmds = [
"aptly repo create unstable",
]
runCmd = "aptly repo include -keyring=${files}/aptly.pub "
expectedCode = 1
def outputMatchPrepare(self, s):
return gpgRemove(self, tempDirRemove(self, s))
def prepare(self):
super(IncludeRepo8Test, self).prepare()
self.tempSrcDir = tempfile.mkdtemp()
shutil.copytree(os.path.join(os.path.dirname(inspect.getsourcefile(BaseTest)), "changes"), os.path.join(self.tempSrcDir, "01"))
with open(os.path.join(self.tempSrcDir, "01", "hardlink_0.2.1_amd64.changes"), "r+") as f:
contents = f.read()
f.seek(0, 0)
f.write(contents.replace('Julian', 'Andrey'))
f.truncate()
self.runCmd += self.tempSrcDir
def check(self):
try:
super(IncludeRepo8Test, self).check()
finally:
shutil.rmtree(self.tempSrcDir)
class IncludeRepo9Test(BaseTest):
"""
include packages to local repo: unsigned
"""
fixtureCmds = [
"aptly repo create unstable",
]
runCmd = "aptly repo include -keyring=${files}/aptly.pub "
expectedCode = 1
def outputMatchPrepare(self, s):
return gpgRemove(self, tempDirRemove(self, s))
def prepare(self):
super(IncludeRepo9Test, self).prepare()
self.tempSrcDir = tempfile.mkdtemp()
shutil.copytree(os.path.join(os.path.dirname(inspect.getsourcefile(BaseTest)), "changes"), os.path.join(self.tempSrcDir, "01"))
with open(os.path.join(self.tempSrcDir, "01", "hardlink_0.2.1_amd64.changes"), "r+") as f:
contents = f.readlines()
contents = contents[3:31]
f.seek(0, 0)
f.write("".join(contents))
f.truncate()
self.runCmd += self.tempSrcDir
def check(self):
try:
super(IncludeRepo9Test, self).check()
finally:
shutil.rmtree(self.tempSrcDir)
class IncludeRepo10Test(BaseTest):
"""
include packages to local repo: wrong signature + -ignore-signatures
"""
fixtureCmds = [
"aptly repo create unstable",
]
runCmd = "aptly repo include -ignore-signatures "
def outputMatchPrepare(self, s):
return gpgRemove(self, tempDirRemove(self, s))
def prepare(self):
super(IncludeRepo10Test, self).prepare()
self.tempSrcDir = tempfile.mkdtemp()
shutil.copytree(os.path.join(os.path.dirname(inspect.getsourcefile(BaseTest)), "changes"), os.path.join(self.tempSrcDir, "01"))
with open(os.path.join(self.tempSrcDir, "01", "hardlink_0.2.1_amd64.changes"), "r+") as f:
contents = f.read()
f.seek(0, 0)
f.write(contents.replace('Julian', 'Andrey'))
f.truncate()
self.runCmd += self.tempSrcDir
def check(self):
try:
super(IncludeRepo10Test, self).check()
finally:
shutil.rmtree(self.tempSrcDir)
class IncludeRepo11Test(BaseTest):
"""
include packages to local repo: unsigned + -accept-unsigned
"""
fixtureCmds = [
"aptly repo create unstable",
]
runCmd = "aptly repo include -accept-unsigned -keyring=${files}/aptly.pub "
def outputMatchPrepare(self, s):
return gpgRemove(self, tempDirRemove(self, s))
def prepare(self):
super(IncludeRepo11Test, self).prepare()
self.tempSrcDir = tempfile.mkdtemp()
shutil.copytree(os.path.join(os.path.dirname(inspect.getsourcefile(BaseTest)), "changes"), os.path.join(self.tempSrcDir, "01"))
with open(os.path.join(self.tempSrcDir, "01", "hardlink_0.2.1_amd64.changes"), "r+") as f:
contents = f.readlines()
contents = contents[3:31]
f.seek(0, 0)
f.write("".join(contents))
f.truncate()
self.runCmd += self.tempSrcDir
def check(self):
try:
super(IncludeRepo11Test, self).check()
finally:
shutil.rmtree(self.tempSrcDir)
class IncludeRepo12Test(BaseTest):
"""
include packages to local repo: unsigned + -accept-unsigned + restriction breakage
"""
fixtureCmds = [
"aptly repo create unstable",
]
runCmd = "aptly repo include -accept-unsigned -keyring=${files}/aptly.pub "
expectedCode = 1
def outputMatchPrepare(self, s):
return gpgRemove(self, tempDirRemove(self, s))
def prepare(self):
super(IncludeRepo12Test, self).prepare()
self.tempSrcDir = tempfile.mkdtemp()
shutil.copytree(os.path.join(os.path.dirname(inspect.getsourcefile(BaseTest)), "changes"), os.path.join(self.tempSrcDir, "01"))
with open(os.path.join(self.tempSrcDir, "01", "hardlink_0.2.1_amd64.changes"), "r+") as f:
contents = f.readlines()
contents = contents[3:31]
contents[3] = "Binary: hardlink-dbg\n"
f.seek(0, 0)
f.write("".join(contents))
f.truncate()
self.runCmd += self.tempSrcDir
def check(self):
try:
super(IncludeRepo12Test, self).check()
finally:
shutil.rmtree(self.tempSrcDir)
class IncludeRepo13Test(BaseTest):
"""
include packages to local repo: with denying uploaders.json
"""
fixtureCmds = [
"aptly repo create unstable",
]
runCmd = "aptly repo include -uploaders-file=${changes}/uploaders1.json -no-remove-files -keyring=${files}/aptly.pub ${changes}"
expectedCode = 1
def outputMatchPrepare(_, s):
return changesRemove(_, gpgRemove(_, s))
class IncludeRepo14Test(BaseTest):
"""
include packages to local repo: allow with uploaders.json
"""
fixtureCmds = [
"aptly repo create unstable",
]
runCmd = "aptly repo include -uploaders-file=${changes}/uploaders2.json -no-remove-files -keyring=${files}/aptly.pub ${changes}"
def outputMatchPrepare(_, s):
return changesRemove(_, gpgRemove(_, s))
class IncludeRepo15Test(BaseTest):
"""
include packages to local repo: no uploaders.json
"""
fixtureCmds = [
"aptly repo create unstable",
]
runCmd = "aptly repo include -uploaders-file=${changes}/uploaders-404.json -no-remove-files -keyring=${files}/aptly.pub ${changes}"
expectedCode = 1
def outputMatchPrepare(_, s):
return changesRemove(_, gpgRemove(_, s))
class IncludeRepo16Test(BaseTest):
"""
include packages to local repo: malformed JSON
"""
fixtureCmds = [
"aptly repo create unstable",
]
runCmd = "aptly repo include -uploaders-file=${changes}/uploaders3.json -no-remove-files -keyring=${files}/aptly.pub ${changes}"
expectedCode = 1
def outputMatchPrepare(_, s):
return changesRemove(_, gpgRemove(_, s))
class IncludeRepo17Test(BaseTest):
"""
include packages to local repo: malformed rule
"""
fixtureCmds = [
"aptly repo create unstable",
]
runCmd = "aptly repo include -uploaders-file=${changes}/uploaders4.json -no-remove-files -keyring=${files}/aptly.pub ${changes}"
expectedCode = 1
def outputMatchPrepare(_, s):
return changesRemove(_, gpgRemove(_, s))
class IncludeRepo18Test(BaseTest):
"""
include packages to local repo: repo uploaders.json + global uploaders.json
"""
fixtureCmds = [
"aptly repo create -uploaders-file=${changes}/uploaders2.json unstable",
]
runCmd = "aptly repo include -uploaders-file=${changes}/uploaders1.json -no-remove-files -keyring=${files}/aptly.pub ${changes}"
def outputMatchPrepare(_, s):
return changesRemove(_, gpgRemove(_, s))
class IncludeRepo19Test(BaseTest):
"""
include packages to local repo: per-repo uploaders.json
"""
fixtureCmds = [
"aptly repo create -uploaders-file=${changes}/uploaders1.json unstable",
]
runCmd = "aptly repo include -no-remove-files -keyring=${files}/aptly.pub ${changes}"
expectedCode = 1
def outputMatchPrepare(_, s):
return changesRemove(_, gpgRemove(_, s))
class IncludeRepo20Test(BaseTest):
"""
include packages to local repo: .changes file from directory (internal PGP implementation)
"""
fixtureCmds = [
"aptly repo create unstable",
]
runCmd = "aptly repo include -no-remove-files -keyring=${files}/aptly.pub ${changes}"
outputMatchPrepare = gpgRemove
configOverride = {"gpgProvider": "internal"}
class IncludeRepo21Test(BaseTest):
"""
include packages to local repo: wrong signature (internal PGP implementation)
"""
fixtureCmds = [
"aptly repo create unstable",
]
runCmd = "aptly repo include -keyring=${files}/aptly.pub "
expectedCode = 1
configOverride = {"gpgProvider": "internal"}
def outputMatchPrepare(self, s):
return gpgRemove(self, tempDirRemove(self, s))
def prepare(self):
super(IncludeRepo21Test, self).prepare()
self.tempSrcDir = tempfile.mkdtemp()
shutil.copytree(os.path.join(os.path.dirname(inspect.getsourcefile(BaseTest)), "changes"), os.path.join(self.tempSrcDir, "01"))
with open(os.path.join(self.tempSrcDir, "01", "hardlink_0.2.1_amd64.changes"), "r+") as f:
contents = f.read()
f.seek(0, 0)
f.write(contents.replace('Julian', 'Andrey'))
f.truncate()
self.runCmd += self.tempSrcDir
def check(self):
try:
super(IncludeRepo21Test, self).check()
finally:
shutil.rmtree(self.tempSrcDir)
class IncludeRepo22Test(BaseTest):
"""
include packages to local repo: missing files, but files aready in the pool
"""
fixtureCmds = [
"aptly repo create stable",
"aptly repo create unstable",
"aptly repo add stable ${changes}"
]
runCmd = "aptly repo include -ignore-signatures -keyring=${files}/aptly.pub "
def outputMatchPrepare(self, s):
return gpgRemove(self, tempDirRemove(self, s))
def prepare(self):
super(IncludeRepo22Test, self).prepare()
self.tempSrcDir = tempfile.mkdtemp()
os.makedirs(os.path.join(self.tempSrcDir, "01"), 0o755)
for path in ["hardlink_0.2.1.dsc", "hardlink_0.2.1_amd64.deb"]:
shutil.copy(os.path.join(os.path.dirname(inspect.getsourcefile(BaseTest)), "changes", path),
os.path.join(self.tempSrcDir, "01", path))
path = "hardlink_0.2.1_amd64.changes"
with open(os.path.join(os.path.dirname(inspect.getsourcefile(BaseTest)), "changes", path), "r") as source:
with open(os.path.join(self.tempSrcDir, "01", path), "w") as dest:
content = source.readlines()
# remove reference to .tar.gz file
content = [line for line in content if "hardlink_0.2.1.tar.gz" not in line]
dest.write("".join(content))
self.runCmd += self.tempSrcDir
def check(self):
try:
super(IncludeRepo22Test, self).check()
finally:
shutil.rmtree(self.tempSrcDir)
| 32.028571
| 145
| 0.630966
| 2,036
| 17,936
| 5.506385
| 0.09332
| 0.068683
| 0.033003
| 0.030417
| 0.850772
| 0.847114
| 0.809562
| 0.783605
| 0.717064
| 0.676122
| 0
| 0.035933
| 0.236619
| 17,936
| 559
| 146
| 32.085868
| 0.782866
| 0.077554
| 0
| 0.69209
| 0
| 0.059322
| 0.254843
| 0.125355
| 0
| 0
| 0
| 0
| 0
| 1
| 0.118644
| false
| 0
| 0.016949
| 0.056497
| 0.435028
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0ac5b265377df4976a6598a69e82f04731e923b6
| 290
|
py
|
Python
|
app/exception.py
|
zhangmingkai4315/ContactCli
|
5a7030bed2374179b65b70d53f01eb7400ef865f
|
[
"MIT"
] | null | null | null |
app/exception.py
|
zhangmingkai4315/ContactCli
|
5a7030bed2374179b65b70d53f01eb7400ef865f
|
[
"MIT"
] | null | null | null |
app/exception.py
|
zhangmingkai4315/ContactCli
|
5a7030bed2374179b65b70d53f01eb7400ef865f
|
[
"MIT"
] | null | null | null |
class UserNotValidException(Exception):
pass
class PhoneNotValidException(Exception):
pass
class ConfigFileParseException(Exception):
pass
class DuplicateUserException(Exception):
pass
class IndexOutofRangeException(Exception):
pass
class IndexNotGivenException(Exception):
pass
| 24.166667
| 42
| 0.837931
| 24
| 290
| 10.125
| 0.375
| 0.320988
| 0.37037
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 290
| 12
| 43
| 24.166667
| 0.931034
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 0
| 1
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
0ac67a36c5cbacfbcd92145e7888c1052733f56f
| 29,451
|
py
|
Python
|
module_statistics.py
|
ambra-dipiano/thesis
|
c24bb1a19c2fad652202527145851f1b22980bfe
|
[
"BSD-3-Clause"
] | null | null | null |
module_statistics.py
|
ambra-dipiano/thesis
|
c24bb1a19c2fad652202527145851f1b22980bfe
|
[
"BSD-3-Clause"
] | null | null | null |
module_statistics.py
|
ambra-dipiano/thesis
|
c24bb1a19c2fad652202527145851f1b22980bfe
|
[
"BSD-3-Clause"
] | 1
|
2020-10-01T12:37:35.000Z
|
2020-10-01T12:37:35.000Z
|
# MIT License
# ----------------------------- #
# Copyright 2020 Ambra Di Piano #
# ----------------------------- # -------------------------------------------------- #
# Redistribution and use in source and binary forms, with or without modification, #
# are permitted provided that the following conditions are met: #
# 1. Redistributions of source code must retain the above copyright notice, #
# this list of conditions and the following disclaimer. #
# 2. Redistributions in binary form must reproduce the above copyright notice, #
# this list of conditions and the following disclaimer in the documentation and/or #
# other materials provided with the distribution. #
# 3. Neither the name of the copyright holder nor the names of its contributors #
# may be used to endorse or promote products derived from this software without #
# specific prior written permission. #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND #
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED #
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. #
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, #
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, #
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, #
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF #
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE #
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED #
# OF THE POSSIBILITY OF SUCH DAMAGE. #
# ---------------------------------------------------------------------------------- #
# ============================================
# !!! MODULE FOR STATISTICS AND HISTOGRAMS !!!
# ============================================
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from matplotlib.patches import Rectangle
from scipy import stats
from scipy.stats import rayleigh, norm, chi2
import pandas as pd
from matplotlib.colors import LogNorm
from matplotlib.lines import Line2D
from matplotlib.patches import Ellipse, Circle
from scipy.ndimage.filters import gaussian_filter
from matplotlib.ticker import FormatStrFormatter
import matplotlib as mpl
import scipy.ndimage as sp
from matplotlib.image import NonUniformImage
from scipy.ndimage.filters import gaussian_filter
extra = Rectangle((0, 0), 1, 1, fc="w", fill=False, edgecolor='none', linewidth=0)
extra2 = Line2D([0], [0], ls='-.', color='k', lw='1')
def hist1d(x, mean, nbin=20, hist=True, fontsize=20, color='b', xscale='linear', figsize=(15,12), rotation=0,
alpha=0.5, title='gaussian fit', ax_thresh=None, xlabel='x', ylabel='y', leglabel='data',
filename='hist1d_gauss.png', show=True):
fig = plt.figure(figsize=figsize)
plt.rc('text', usetex=True)
sns.set()
ax = plt.subplot(111, xscale=xscale)
plt.xticks(fontsize=fontsize, rotation=rotation)
plt.yticks(fontsize=fontsize, rotation=rotation)
# plt.plot([],[], color='none', label='wbin=%.2fdeg' %width)
for index, el in enumerate(x):
if el[0] is list():
el=el[0]
sns.distplot(el, bins=nbin, kde=False, hist=hist,
fit=norm, norm_hist=True, fit_kws={"color": color[index]},
color=color[index], hist_kws={'alpha':alpha}, label=leglabel[index])
plt.axvline(mean[index], c=color[index], ls='--', lw=2,
label='mean $\\approx$ %.1E' %mean[index]) if mean != None else None
plt.title(title, fontsize=fontsize)
plt.xlabel(xlabel, fontsize=fontsize)
plt.ylabel(ylabel, fontsize=fontsize)
plt.legend(fontsize=fontsize)
plt.tight_layout()
fig.savefig(filename)
# show fig ---!
plt.show() if show == True else None
plt.close()
return fig, ax
# HIST 1D GAUSSIAN DISTRIBUTION ---!
def hist1d_gauss(x, mean, loc=0, threshold=1, nbin=20, width=None, hist=True, fontsize=20, figsize=(15,12), color='b',
alpha=0.5, title='gaussian fit', ax_thresh=0.2, xlabel='x', ylabel='y', leglabel='data', rotation=0,
filename='hist1d_gauss.png', show=True):
if nbin == None:
if width == None:
print('Error: set either nbin or width')
nbin = int(threshold/width)
fig = plt.figure(figsize=figsize)
plt.rc('text', usetex=True)
sns.set()
ax = plt.subplot(111)
plt.xticks(fontsize=fontsize, rotation=rotation)
plt.yticks(fontsize=fontsize, rotation=rotation)
# plt.plot([],[], color='none', label='wbin=%.2fdeg' %width)
for index, el in enumerate(x):
if el[0] is list():
el=el[0]
sns.distplot(el, bins=nbin, kde=False, hist=hist,
fit=norm, norm_hist=True, fit_kws={"color": color[index]},
color=color[index], hist_kws={'alpha':alpha, 'range':[loc-threshold, loc+threshold]}, label=leglabel[index])
plt.axvline(mean[index], c=color[index], ls='--', lw=2, label='mean $\\approx$ %.3fdeg' %mean[index]) if mean != None else None
plt.axvline(loc, c='k', ls='-', lw=2, label='true $\\approx$ %.3fdeg' %loc) if loc != None else None
plt.title(title, fontsize=fontsize)
plt.xlabel(xlabel, fontsize=fontsize)
plt.ylabel(ylabel, fontsize=fontsize)
plt.legend(fontsize=fontsize)
plt.xlim([loc-ax_thresh, loc+ax_thresh])
plt.tight_layout()
fig.savefig(filename)
# show fig ---!
plt.show() if show == True else None
plt.close()
return fig, ax
# HIST 1D RAYLEIGH DISTRIBUTION ---!
def hist1d_rayleigh(x, mean, rayleigh_prms={'loc':0, 'scale':[1]}, threshold=1, nbin=None, width=None, hist=True,
fontsize=20, figsize=(15,12), rotation=0, color='b', alpha=0.5, title='rayleigh fit', ax_thresh=0.2, xlabel='x', ylabel='y',
leglabel='data', filename='hist1d_rayleigh.png', show=True):
if width == None:
width = threshold/nbin
if nbin == None:
nbin = int(threshold/width)
if nbin == None and width == None:
print('Error: set either nbin or width')
fig = plt.figure(figsize=figsize)
plt.rc('text', usetex=True)
sns.set()
ax = plt.subplot(111)
plt.xticks(fontsize=fontsize, rotation=rotation)
plt.yticks(fontsize=fontsize, rotation=rotation)
# plt.plot([],[], color='none', label='wbin=%.2fdeg' %width)
for index, el in enumerate(x):
if el[0] is list():
el=el[0]
sns.distplot(el, bins=nbin, kde=False, hist=hist,
fit=rayleigh, norm_hist=True, fit_kws={"color": color[index]},
color=color[index], hist_kws={'alpha':alpha, 'range':[0.0, threshold]}, label=leglabel[index])
plt.axvline(mean[index], c=color[index], ls='--', lw=2, label='mean $\\approx$ %.3fdeg' %mean[index]) if mean != None else None
if rayleigh_prms['scale'] != None:
plt.axvline(rayleigh_prms['scale'][index], c=color[index], ls='-', lw=2, label='mode $\\approx$ %.3fdeg' %rayleigh_prms['scale'][index])
plt.title(title, fontsize=fontsize)
plt.xlabel(xlabel, fontsize=fontsize)
plt.ylabel(ylabel, fontsize=fontsize)
plt.legend(fontsize=fontsize)
plt.xlim([rayleigh_prms['loc'], rayleigh_prms['loc']+ax_thresh]) if rayleigh_prms['loc'] != None else None
plt.tight_layout()
fig.savefig(filename)
# show fig ---!
plt.show() if show == True else None
plt.close()
return fig, ax
# RAYLEIGH CDF WITH CONFIDENCE INTERVAL ---!
def rayleigh_cdf(x, loc=0, scale=1, if_CI=True, probs=(0.6827, 0.9545, 0.9973, 0.99994),
xlabel='x', title='x$\\sim$ RA($\\gamma$) CDF', colors=('k', 'r', 'orange', 'm'),
fontsize=20, figsize=(15,12), rotation=0, filename='theo_rayleigh_cdf.png', show=False):
fig = plt.figure(figsize=figsize)
plt.rc('text', usetex=True)
sns.set()
ax = plt.subplot(111)
plt.xticks(fontsize=fontsize, rotation=rotation)
plt.yticks(fontsize=fontsize, rotation=rotation)
ax.plot(np.sort(x), stats.rayleigh.cdf(np.sort(x), loc=loc, scale=scale), ls='-', label='cdf')
ax.axvline(scale, c='maroon', label='$\gamma$')
ax.axvline(np.std(x), c='maroon', ls=':', label='1 std =%.2f' %(np.std(x)))
if if_CI is True:
x_critical = []
for i in range(len(probs)):
x_critical.append(stats.rayleigh.ppf(q=probs[i], loc=loc, scale=scale))
ax.axvline(x_critical[i], c=colors[i], ls='-.',
label='x=%.2f, %.2f' %(x_critical[i],probs[i]*100)+'%')
plt.ylabel('1-$\\alpha$', rotation=90, fontsize=fontsize)
plt.xlabel(xlabel, fontsize=fontsize)
plt.title(title, fontsize=fontsize)
ax.set_xlim(left=0)
ax.set_ylim(bottom=0)
plt.legend(loc=0)
plt.tight_layout()
fig.savefig(filename)
# show fig ---!
plt.show() if show == True else None
plt.close()
return fig, ax
# RAYLEIGH PDF WITH CONFIDENCE INTERVAL ---!
def rayleigh_pdf(x, loc=0, scale=1, if_CI=True, probs=(0.6827, 0.9545, 0.9973, 0.99994),
xlabel='x', title='x$\\sim$ RA($\\gamma$) CDF', colors=('k', 'r', 'orange', 'm'),
fontsize=20, figsize=(15,12), rotation=0, filename='theo_rayleigh_cdf.png', show=False):
fig = plt.figure(figsize=figsize)
plt.rc('text', usetex=True)
sns.set()
ax = plt.subplot(111)
plt.xticks(fontsize=fontsize, rotation=rotation)
plt.yticks(fontsize=fontsize, rotation=rotation)
ax.plot(np.sort(x), stats.rayleigh.pdf(np.sort(x), loc=loc, scale=scale), ls='-', label='cdf')
ax.axvline(scale, c='maroon', label='$\gamma$')
ax.axvline(np.std(x), c='maroon', ls=':', label='1 std =%.2f' %(np.std(x)))
if if_CI is True:
x_critical = []
for i in range(len(probs)):
x_critical.append(stats.rayleigh.ppf(q=probs[i], loc=loc, scale=scale))
ax.axvline(x_critical[i], c=colors[i], ls='-.',
label='x=%.2f, %.2f' %(x_critical[i],probs[i]*100)+'%')
plt.ylabel('counts density', rotation=90, fontsize=fontsize)
plt.xlabel(xlabel, fontsize=fontsize)
plt.title(title, fontsize=fontsize)
ax.set_xlim(left=0)
ax.set_ylim(bottom=0)
plt.legend(loc=0)
plt.tight_layout()
fig.savefig(filename)
# show fig ---!
plt.show() if show == True else None
plt.close()
return fig, ax
# 2D HISTOGRAM WITH RAYLEIGH CONFIDENCE INTERVAL ---!
def hist2d_rayleigh_CI(x, y, nbin=None, width=None, rayleigh_prms={'loc':0, 'scale':1}, xcentre=0, ycentre=0, interp=None,
threshold=1, probs=(0.6827, 0.9545, 0.9973, 0.99994), colors=('k', 'r', 'orange', 'm'), lw=2, ms=2e2,
ax_thresh=0.2, xlabel='x', ylabel='y', title='confidence intervals from theoretical distribution',
fontsize=20 , figsize=(10,8), rotation=0, filename='hist2d_CIrayleigh.png', show=False):
xmean = np.mean(x)
ymean = np.mean(y)
if width is None:
width = threshold/nbin
if nbin is None:
nbin = int(threshold/width)
if nbin is None and width is None:
print('Error: set either nbin or width')
fig = plt.figure(figsize=figsize)
plt.rc('text', usetex=True)
sns.set()
ax = plt.subplot(111)
if interp == None:
h = plt.hist2d(x, y, bins=nbin, cmap='jet',
range=[[xcentre - threshold, xcentre + threshold], [ycentre - threshold, ycentre + threshold]])
else:
h, xedges, yedges = np.histogram2d(x, y, bins=nbin,
range=[[xcentre - threshold, xcentre + threshold], [ycentre - threshold, ycentre + threshold]])
h = h.T
plt.imshow(h, interpolation=interp, cmap='gist_heat',
extent=[[xcentre - threshold, xcentre + threshold], [ycentre - threshold, ycentre + threshold]])
plt.xticks(fontsize=fontsize, rotation=rotation)
plt.yticks(fontsize=fontsize, rotation=rotation)
plt.scatter(xcentre, ycentre, c='w', marker='*', s=ms)
plt.plot([], [], c='none', label='Reyleigh')
for i in range(len(probs)):
plt.plot([], [], c=colors[i], label='%.2f' % (probs[i] * 100) + '\%')
r = stats.rayleigh.ppf(q=probs[i], loc=rayleigh_prms['loc'], scale=rayleigh_prms['scale'])
# q = rayleigh['scale'] * np.sqrt(-2 * np.log(probs[i]))
# r = stats.rayleigh.ppf(q=q, loc=rayleigh['loc'], scale=rayleigh['scale'])
cir = Circle(xy=(float(xmean), float(ymean)),
radius=r,
color=colors[i], lw=lw)
cir.set_facecolor('none')
ax.add_artist(cir)
if interp == None:
cbar = plt.colorbar(h[3], ax=ax).set_label('counts', fontsize=fontsize)
plt.axis([xcentre - ax_thresh, xcentre + ax_thresh, ycentre - ax_thresh, ycentre + ax_thresh], 'equal') if ax_thresh != None else None
plt.xlabel(xlabel, fontsize=fontsize)
plt.ylabel(ylabel, fontsize=fontsize)
plt.title(title, fontsize=fontsize)
plt.legend(ncol=3, fontsize=fontsize)
plt.tight_layout()
fig.savefig(filename)
# show fig ---!
plt.show() if show == True else None
plt.close()
return fig, ax
# COVARIANCE EIGENVALUES ---!
def eigsorted(cov):
vals, vecs = np.linalg.eigh(cov)
order = vals.argsort()[::-1]
return vals[order], vecs[:, order]
# 2D HISTOGRAM WITH GAUSSIAN COVARIANCE CONFIDENCE INTERVAL ---!
def hist2d_gauss_CI(x, y, nbin=None, width=None, xcentre=0, ycentre=0, threshold=1, nstd=(1, 2, 3, 5), lw=2,
colors=('k', 'r', 'orange', 'm'), ax_thresh=0.2, xlabel='x', ylabel='y', interp=None, ms=2e2,
title='confidence intervals from theoretical distribution', fontsize=20, figsize=(10,8), rotation=0,
filename='hist2d_CIgauss.png', show=False):
xmean = np.mean(x)
ymean = np.mean(y)
if width is None:
width = threshold/nbin
if nbin is None:
nbin = int(threshold/width)
if nbin is None and width is None:
print('Error: set either nbin or width')
fig = plt.figure(figsize=figsize)
plt.rc('text', usetex=True)
sns.set()
ax = plt.subplot(111)
h = plt.hist2d(x, y, bins=nbin, cmap='jet',
range=[[xcentre - threshold, xcentre + threshold], [ycentre - threshold, ycentre + threshold]])
if interp != None:
plt.cla()
plt.imshow(h[0], origin='lower', interpolation=interp, cmap='gist_heat')
plt.xticks(fontsize=fontsize, rotation=rotation)
plt.yticks(fontsize=fontsize, rotation=rotation)
plt.scatter(xcentre, ycentre, c='w', marker='*', s=ms)
plt.plot([], [], c='none', label='gauss')
for i in range(len(nstd)):
plt.plot([], [], c=colors[i], label='%d $\sigma$' % (nstd[i]))
cov = np.cov(x, y)
vals, vecs = eigsorted(cov)
theta = np.degrees(np.arctan2(*vecs[:, 0][::-1]))
w, v = 2 * nstd[i] * np.sqrt(vals)
ell = Ellipse(xy=(float(xmean), float(ymean)),
width=w, height=v,
angle=float(theta), color=colors[i], lw=lw)
ell.set_facecolor('none')
ax.add_artist(ell)
if interp == None:
cbar = plt.colorbar(h[3], ax=ax).set_label('counts', fontsize=fontsize)
plt.axis([xcentre - ax_thresh, xcentre + ax_thresh, ycentre - ax_thresh, ycentre + ax_thresh], 'equal') if ax_thresh != None else None
plt.xlabel(xlabel, fontsize=fontsize)
plt.ylabel(ylabel, fontsize=fontsize)
plt.title(title, fontsize=fontsize)
plt.legend(ncol=3, fontsize=fontsize)
plt.tight_layout()
fig.savefig(filename)
# show fig ---!
plt.show() if show == True else None
plt.close()
return fig, ax
# 2D HISTOGRAM WITH GAUSSIAN COVARIANCE CONFIDENCE INTERVAL ---!
def contour_gauss_CI(x, y, nbin=None, width=None, xcentre=0, ycentre=0, threshold=1, nstd=(1, 2, 3, 5),
colors=('k', 'r', 'orange', 'm'), ax_thresh=0.2, xlabel='x', ylabel='y', interp=None,
title='confidence intervals from theoretical distribution', fontsize=20, figsize=(10,8), rotation=0,
filename='hist2d_CIgauss.png', show=False):
xmean = np.mean(x)
ymean = np.mean(y)
if width is None:
width = threshold/nbin
if nbin is None:
nbin = int(threshold/width)
if nbin is None and width is None:
print('Error: set either nbin or width')
fig = plt.figure(figsize=figsize)
plt.rc('text', usetex=True)
sns.set()
ax = plt.subplot(111)
h = plt.hist2d(x, y, bins=nbin, cmap='jet',
range=[[xcentre - threshold, xcentre + threshold], [ycentre - threshold, ycentre + threshold]])
plt.xticks(fontsize=fontsize, rotation=rotation)
plt.yticks(fontsize=fontsize, rotation=rotation)
plt.scatter(xcentre, ycentre, c='w', marker='*', s=1e2)
plt.plot([], [], c='none', label='gauss')
for i in range(len(nstd)):
plt.plot([], [], c=colors[i], label='%d $\sigma$' % (nstd[i]))
cov = np.cov(x, y)
vals, vecs = eigsorted(cov)
theta = np.degrees(np.arctan2(*vecs[:, 0][::-1]))
w, v = 2 * nstd[i] * np.sqrt(vals)
ell = Ellipse(xy=(xmean, ymean),
width=w, height=v,
angle=theta, color=colors[i], lw=2)
ell.set_facecolor('none')
ax.add_artist(ell)
cbar = plt.colorbar(h[3], ax=ax).set_label('counts', fontsize=fontsize)
plt.axis([xcentre - ax_thresh, xcentre + ax_thresh, ycentre - ax_thresh, ycentre + ax_thresh], 'equal') if ax_thresh != None else None
plt.xlabel(xlabel, fontsize=fontsize)
plt.ylabel(ylabel, fontsize=fontsize)
plt.title(title, fontsize=fontsize)
plt.legend(ncol=3, fontsize=fontsize)
plt.tight_layout()
fig.savefig(filename)
# show fig ---!
plt.show() if show == True else None
plt.close()
return fig, ax
# 2D HISTOGRAM MAP ---!
def hist2d_map(x, y, trials, nbin=None, width=None, xcentre=0, ycentre=0, threshold=1, ax_thresh=0.2, xlabel='x', ylabel='y',
title='probability map', fontsize=20, figsize=(10,8), rotation=0, filename='hist2d_map.png', if_CI=None,
rayleigh={'loc':0, 'scale':1}, nstd=(1, 2, 3, 5), colors=('k', 'r', 'orange', 'm'),
probs=(0.6827, 0.9545, 0.9973, 0.99994), smooth=True, show=False):
if width is None:
width = threshold/nbin
if nbin is None:
nbin = int(threshold/width)
if nbin is None and width is None:
print('Error: set either nbin or width')
fig = plt.figure(figsize=figsize)
plt.rc('text', usetex=True)
sns.set()
ax = plt.subplot(111)
plt.xticks(fontsize=fontsize, rotation=rotation)
plt.yticks(fontsize=fontsize, rotation=rotation)
h = ax.hist2d(x, y, bins=nbin, cmap='jet', vmin=0.0, vmax=trials,
range=[[xcentre - threshold, xcentre + threshold], [ycentre - threshold, ycentre + threshold]])
if smooth:
plt.clf()
# hist2d with numpy (invert axis since imshow stumbles them) ---!
# sigma=2
# X = gaussian_filter(x, sigma)
# Y = gaussian_filter(y, sigma)
heatmap, xedges, yedges = np.histogram2d(x, y, bins=nbin,
range=[[xcentre - threshold, xcentre + threshold], [ycentre - threshold, ycentre + threshold]])
extent = [xedges[0], xedges[-1], yedges[0], yedges[-1]]
ax.imshow(heatmap, extent=extent, cmap='jet',
interpolation='gaussian', filterrad=1, filternorm=True, resample=False, origin='lower')
plt.scatter(xcentre, ycentre, c='w', marker='*', s=1e2)
if if_CI is None:
pass
elif if_CI.lower() is 'rayleigh':
xmean = np.mean(x)
ymean = np.mean(y)
plt.plot([], [], c='none', label='Reyleigh')
for i in range(len(probs)):
plt.plot([], [], c=colors[i], label='%.2f' % (probs[i] * 100) + '%')
r = stats.rayleigh.ppf(q=probs[i], loc=rayleigh['loc'], scale=rayleigh['scale'])
cir = Circle(xy=(xmean, ymean),
radius=r,
color=colors[i], lw=2)
cir.set_facecolor('none')
ax.add_artist(cir)
elif if_CI.lower() is 'gauss' or if_CI.lower is 'covariance' or if_CI.lower is 'cov':
xmean = np.mean(x)
ymean = np.mean(y)
plt.plot([], [], c='none', label='gauss')
for i in range(len(nstd)):
plt.plot([], [], c=colors[i], label='%.2f' % (nstd[i] * 100) + '%')
cov = np.cov(x, y)
vals, vecs = eigsorted(cov)
theta = np.degrees(np.arctan2(*vecs[:, 0][::-1]))
w, v = 2 * nstd[i] * np.sqrt(vals)
ell = Ellipse(xy=(xmean, ymean),
width=w, height=v,
angle=theta, color=colors[i], lw=2)
ell.set_facecolor('none')
ax.add_artist(ell)
else:
print('Error: if_CI parameter value not understood')
m = plt.cm.ScalarMappable(cmap='jet')
m.set_clim(0., trials/100)
cbar = plt.colorbar(m, boundaries=np.linspace(0, 100, 11)).set_label('cts \%', fontsize=fontsize)
plt.axis([xcentre - ax_thresh, xcentre + ax_thresh, ycentre - ax_thresh, ycentre + ax_thresh], 'equal')
plt.xlabel(xlabel, fontsize=fontsize)
plt.ylabel(ylabel, fontsize=fontsize)
plt.title(title, fontsize=fontsize) if title!=None else None
plt.axis('equal')
plt.tight_layout()
fig.savefig(filename)
# show fig ---!
plt.show() if show == True else None
plt.close()
return fig, ax
# WILKS THEOREM DIST FOR EMPTY FIELDS ---!
def ts_wilks(x, trials, df=1, nbin=None, width=None, ylim=None, xlim=None, show=False,
fontsize=20, figsize=(15,12), rotation=0, xlabel='TS', ylabel='normalised counts',
title='TS distribution (empty fields)', filename='wilks_preTrials.png'):
if width is None:
width = (max(x)-min(x))/nbin
if nbin is None:
nbin = int((max(x)-min(x))/width)
if nbin is None and width is None:
print('Error: set either nbin or width')
fig = plt.figure(figsize=figsize)
plt.rc('text', usetex=True)
sns.set()
ax = plt.subplot(111, yscale='log')
plt.xticks(fontsize=fontsize, rotation=rotation)
plt.yticks(fontsize=fontsize, rotation=rotation)
h, edges = np.histogram(x, bins=int(nbin), density=False, range=(min(x), max(x)))
yerr = np.sqrt(h)/trials
h = h/trials
cbin = (edges[1:] + edges[:-1]) / 2
xerr = (edges[:-1] - edges[1:]) / 2
x2 = np.arange(0, 30, 1)
plt.errorbar(cbin, h, fmt='k+', yerr=yerr, xerr=xerr, markersize=5, label='ts')
plt.plot(x2, stats.chi2.pdf(x2, df=df), c='orange', lw=1, ls='--', label='$\\chi^2$(dof=%d)' %df)
plt.plot(x2, stats.chi2.pdf(x2, df=df)/2, c='b', lw=1, ls='--', label='$\\chi^2$/2(dof=%d)' %df)
plt.xlabel(xlabel, fontsize=fontsize)
plt.ylabel(ylabel, fontsize=fontsize)
plt.title(title, fontsize=fontsize)
plt.legend(loc=0, fontsize=fontsize)
plt.xlim(xlim) if xlim is not None else None
plt.ylim(ylim) if ylim is not None else None
plt.tight_layout()
fig.savefig(filename)
# show fig ---!
plt.show() if show == True else None
plt.close()
return fig, ax
# WILKS THEOREM P-VALUES FOR EMPTY FIELDS ---!
def p_values(x, trials, df=1, nbin=None, width=None, ylim=None, xlim=None, show=False,
fontsize=20, figsize=(15,12), rotation=0, xlabel='h', ylabel='p-values',
title='p-value (empty fields)', filename='pvalue_preTrials.png'):
if width is None:
width = (max(x)-min(x))/nbin
if nbin is None:
nbin = int((max(x)-min(x))/width)
if nbin is None and width is None:
print('Error: set either nbin or width')
fig = plt.figure(figsize=figsize)
plt.rc('text', usetex=True)
sns.set()
ax = plt.subplot(111, yscale='log')
plt.xticks(fontsize=fontsize, rotation=rotation)
plt.yticks(fontsize=fontsize, rotation=rotation)
h = np.empty(len(np.arange(int(max(x)))))
p = np.empty(len(np.arange(int(max(x)))))
cbin, xerr = [], []
for i in range(int(max(x))):
cbin.append(i+1)
xerr.append(0.5)
for idx, val in enumerate(x):
if val >= i+1:
h[i] += 1
p = h/trials
yerr = np.sqrt(h)/trials
x2 = np.arange(min(x), max(x)+5, 1)
plt.errorbar(cbin[0], p[0], yerr=yerr[0], xerr=xerr[0], fmt='k+', markersize=5)
plt.errorbar(cbin[1:], p[1:], yerr=yerr[1:], xerr=xerr[1:], fmt='k+', markersize=5, label='ts')
plt.plot(x2, (1 - stats.chi2.cdf(x2, df=df)), lw=1, ls='-.', c='green', label='$\\chi^2$(dof=%d)' %df)
plt.plot(x2, (1 - stats.chi2.cdf(x2, df=df))/2, lw=1, ls='-.', c='maroon', label='$\\chi^2$/2(dof=%d)' %df)
# plt.legend(('$\\chi^2$/2(dof=%d)', '$\\chi^2$(dof=%d)', 'ts'), loc=0, fontsize=fontsize)
plt.axhline(3e-7, c='gray', ls=':', alpha=1, lw=2)
plt.text(23, 2e-7, '5$\sigma$', fontsize=fontsize, alpha=1)
plt.xlabel(xlabel, fontsize=fontsize)
plt.ylabel(ylabel, fontsize=fontsize)
plt.title(title, fontsize=fontsize)
plt.legend(loc=0, fontsize=fontsize)
plt.xlim(xlim) if xlim is not None else None
plt.ylim(ylim) if ylim is not None else None
plt.tight_layout()
fig.savefig(filename)
# show fig ---!
plt.show() if show == True else None
plt.close()
return fig, ax
# WILKS THEOREM P-VALUES FOR EMPTY FIELDS ---!
def ts_wilks_cumulative(x, trials, df=1, nbin=None, width=None, ylim=None, xlim=None, show=False,
fontsize=20, figsize=(15,12), rotation=0, xlabel='h', ylabel='cumulative probability',
title='p-value (empty fields)', filename='cumulative_preTrials.png'):
if width is None:
width = (max(x)-min(x))/nbin
if nbin is None:
nbin = int((max(x)-min(x))/width)
if nbin is None and width is None:
print('Error: set either nbin or width')
fig = plt.figure(figsize=figsize)
plt.rc('text', usetex=True)
sns.set()
ax = plt.subplot(111)
plt.xticks(fontsize=fontsize, rotation=rotation)
plt.yticks(fontsize=fontsize, rotation=rotation)
h = np.empty(len(np.arange(int(max(x)))))
p = np.empty(len(np.arange(int(max(x)))))
cbin, xerr = [], []
for i in range(int(max(x))):
cbin.append(i+1)
xerr.append(0.5)
for idx, val in enumerate(x):
if val >= i+1:
h[i] += 1
p = 1 - h/trials
yerr = np.sqrt(h)/trials
x2 = np.arange(min(x), max(x)+5, 1)
plt.errorbar(cbin[0], p[0], yerr=yerr[0], xerr=xerr[0], fmt='k+', markersize=5)
plt.errorbar(cbin[1:], p[1:], yerr=yerr[1:], xerr=xerr[1:], fmt='k+', markersize=5, label='ts')
plt.plot(x2, stats.chi2.cdf(x2, df=df), lw=1, ls='-.', c='maroon', label='$P$(dof=%d)' %df)
# plt.legend(('$\\chi^2$/2(dof=%d)', '$\\chi^2$(dof=%d)', 'ts'), loc=0, fontsize=fontsize)
plt.axhline(1-3e-7, c='gray', ls=':', lw=2, alpha=1)
plt.text(1, 0.95, '5$\sigma$', fontsize=fontsize, alpha=1)
plt.xlabel(xlabel, fontsize=fontsize)
plt.ylabel(ylabel, fontsize=fontsize)
plt.title(title, fontsize=fontsize)
plt.legend(loc=0, fontsize=fontsize)
plt.xlim(xlim) if xlim is not None else None
plt.ylim(ylim) if ylim is not None else None
plt.tight_layout()
fig.savefig(filename)
# show fig ---!
plt.show() if show == True else None
plt.close()
return fig, ax
def chi2_reduced(x, trials, df=1, nbin=None, width=None, var=True):
np.seterr(divide='ignore', invalid='ignore')
if width is None:
width = (max(x)-min(x))/nbin
if nbin is None:
nbin = int((max(x)-min(x))/width)
if nbin is None and width is None:
print('Error: set either nbin or width')
h, edges = np.histogram(x, bins=int(nbin), density=False, range=(0., max(x)))
yerr = np.sqrt(h)/trials
h = h/trials
cbin = (edges[1:] + edges[:-1])/2
p = (1 - stats.chi2.pdf(cbin, df=df))/2
#p = stats.chi2.pdf(cbin, df=df)/2
#err = yerr/h
#print('values', h, '\nerrors', yerr, '\nerror perc', err)
with np.errstate(invalid='raise'):
if var:
chi2 = 2*np.sum((h[1:] - p[1:])**2/h[1:])
#chi2 = np.sum((h[1:] - p[1:])**2/err[1:])
else:
chi2 = 2*np.sum((h[1:] - p[1:])**2/h[1:])
#chi2 = np.sum((h[1:] - p[1:])**2/err[1:])
h[1:] = np.array(h[1:])
N = np.count_nonzero(h[1:])
chi2r = chi2 / (N - 1)
return chi2, chi2r
# MANUAL NORMALISED HISTOGRAM ---!
def normedHist(x, trials=None, step=None, nbin=None, ylim=None, xlim=None, show=False, normed=True,
xscale='linear', yscale='log', fontsize=20, figsize=(15,12), rotation=0, xlabel='x',
ylabel='normalised counts', leglabel='legend', title='normed histogram',
usetex=True, usesns=False, filename='normed_histogram.png'):
x = np.sort(x)
if step is None:
step = (max(x)-min(x))/nbin
if nbin is None:
nbin = int((max(x)-min(x))/step)
if nbin is None and step is None:
print('Error: set either nbin or step')
if trials is None:
trials = len(x)
fig = plt.figure(figsize=figsize)
plt.rc('text', usetex=True) if usetex else None
if usesns:
sns.set()
else:
plt.grid()
ax = plt.subplot(111, yscale=yscale, xscale=xscale)
plt.xticks(fontsize=fontsize, rotation=rotation)
plt.yticks(fontsize=fontsize, rotation=rotation)
h = np.empty(len(np.arange(nbin)))
cbin, xerr = [], []
for i in range(nbin):
cbin.append(step*i + step/2)
xerr.append(step/2)
for idx, val in enumerate(x):
if val <= cbin[i]:
h[i] += 1
x=x[(x>=cbin[i])]
h_norm = h/trials
if normed:
yerr = np.sqrt(h) / trials
plt.errorbar(cbin, h_norm, yerr=yerr, xerr=xerr, fmt='k+', markersize=5, label=leglabel)
else:
yerr = h / trials
plt.errorbar(cbin, h, yerr=yerr, xerr=xerr, fmt='k+', markersize=5, label=leglabel)
plt.xlabel(xlabel, fontsize=fontsize)
plt.ylabel(ylabel, fontsize=fontsize)
plt.title(title, fontsize=fontsize)
plt.legend(loc=0, fontsize=fontsize)
plt.xlim(xlim) if xlim is not None else None
plt.ylim(ylim) if ylim is not None else None
plt.tight_layout()
fig.savefig(filename)
# show fig ---!
plt.show() if show else None
plt.close()
return fig, ax
| 38.751316
| 144
| 0.625446
| 4,375
| 29,451
| 4.178514
| 0.101714
| 0.072644
| 0.054045
| 0.045512
| 0.790766
| 0.767081
| 0.747716
| 0.730102
| 0.703463
| 0.676987
| 0
| 0.024605
| 0.192693
| 29,451
| 760
| 145
| 38.751316
| 0.74428
| 0.122203
| 0
| 0.728696
| 0
| 0
| 0.077455
| 0.003378
| 0
| 0
| 0
| 0
| 0
| 1
| 0.026087
| false
| 0.001739
| 0.027826
| 0
| 0.08
| 0.02087
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0ad0134e50901402cc76e39899d74112cd4600f2
| 70,542
|
py
|
Python
|
econml/ortho_iv.py
|
jaronowitz/EconML
|
3df959d120d429537a62ebfb22a84b9b28530457
|
[
"MIT"
] | null | null | null |
econml/ortho_iv.py
|
jaronowitz/EconML
|
3df959d120d429537a62ebfb22a84b9b28530457
|
[
"MIT"
] | null | null | null |
econml/ortho_iv.py
|
jaronowitz/EconML
|
3df959d120d429537a62ebfb22a84b9b28530457
|
[
"MIT"
] | null | null | null |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
"""Orthogonal IV for Heterogeneous Treatment Effects.
A Double/Orthogonal machine learning approach to estimation of heterogeneous
treatment effect with an endogenous treatment and an instrument. It
implements the DMLIV and related algorithms from the paper:
Machine Learning Estimation of Heterogeneous Treatment Effects with Instruments
Vasilis Syrgkanis, Victor Lei, Miruna Oprescu, Maggie Hei, Keith Battocchi, Greg Lewis
https://arxiv.org/abs/1905.10176
"""
import numpy as np
from sklearn.base import clone
from sklearn.linear_model import LinearRegression
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import FunctionTransformer
from ._ortho_learner import _OrthoLearner
from .cate_estimator import StatsModelsCateEstimatorMixin
from .dml import _FinalWrapper
from .inference import StatsModelsInference
from .sklearn_extensions.linear_model import StatsModelsLinearRegression
from .utilities import (_deprecate_positional, add_intercept, fit_with_groups, filter_none_kwargs,
hstack, inverse_onehot)
# A cut-down version of the DML first stage wrapper, since we don't need to support linear first stages
class _FirstStageWrapper:
def __init__(self, model, discrete_target):
self._model = clone(model, safe=False)
self._discrete_target = discrete_target
def _combine(self, X, W, Z, n_samples, fitting=True):
# output is
# * a column of ones if X, W, and Z are all None
# * just X or W or Z if both of the others are None
# * hstack([arrs]) for whatever subset are not None otherwise
# ensure Z is 2D
if Z is not None:
Z = Z.reshape(n_samples, -1)
if X is None and W is None and Z is None:
return np.ones((n_samples, 1))
arrs = [arr for arr in [X, W, Z] if arr is not None]
if len(arrs) == 1:
return arrs[0]
else:
return hstack(arrs)
def fit(self, *, X, W, Target, Z=None, sample_weight=None, groups=None):
if self._discrete_target:
# In this case, the Target is the one-hot-encoding of the treatment variable
# We need to go back to the label representation of the one-hot so as to call
# the classifier.
if np.any(np.all(Target == 0, axis=0)) or (not np.any(np.all(Target == 0, axis=1))):
raise AttributeError("Provided crossfit folds contain training splits that " +
"don't contain all treatments")
Target = inverse_onehot(Target)
if sample_weight is not None:
fit_with_groups(self._model, self._combine(X, W, Z, Target.shape[0]), Target,
groups=groups, sample_weight=sample_weight)
else:
fit_with_groups(self._model, self._combine(X, W, Z, Target.shape[0]), Target,
groups=groups)
def score(self, *, X, W, Target, Z=None, sample_weight=None):
if hasattr(self._model, 'score'):
if self._discrete_target:
# In this case, the Target is the one-hot-encoding of the treatment variable
# We need to go back to the label representation of the one-hot so as to call
# the classifier.
if np.any(np.all(Target == 0, axis=0)) or (not np.any(np.all(Target == 0, axis=1))):
raise AttributeError("Provided crossfit folds contain training splits that " +
"don't contain all treatments")
Target = inverse_onehot(Target)
if sample_weight is not None:
return self._model.score(self._combine(X, W, Z, Target.shape[0]), Target, sample_weight=sample_weight)
else:
return self._model.score(self._combine(X, W, Z, Target.shape[0]), Target)
else:
return None
def predict(self, X, W, Z=None):
arrs = [arr for arr in [X, W, Z] if arr is not None]
n_samples = arrs[0].shape[0] if arrs else 1
if self._discrete_target:
return self._model.predict_proba(self._combine(X, W, Z, n_samples, fitting=False))[:, 1:]
else:
return self._model.predict(self._combine(X, W, Z, n_samples, fitting=False))
class _BaseDMLATEIVModelFinal:
def __init__(self):
self._first_stage = LinearRegression(fit_intercept=False)
self._model_final = _FinalWrapper(LinearRegression(fit_intercept=False),
fit_cate_intercept=True, featurizer=None, use_weight_trick=False)
def fit(self, Y, T, X=None, W=None, Z=None, nuisances=None, sample_weight=None, sample_var=None):
Y_res, T_res, Z_res = nuisances
if Z_res.ndim == 1:
Z_res = Z_res.reshape(-1, 1)
# DMLATEIV is just like 2SLS; first regress T_res on Z_res, then regress Y_res on predicted T_res
T_res_pred = self._first_stage.fit(Z_res, T_res,
sample_weight=sample_weight).predict(Z_res)
# TODO: allow the final model to actually use X? Then we'd need to rename the class
# since we would actually be calculating a CATE rather than ATE.
self._model_final.fit(X=None, T_res=T_res_pred, Y_res=Y_res, sample_weight=sample_weight)
return self
def predict(self, X=None):
# TODO: allow the final model to actually use X?
return self._model_final.predict(X=None)
def score(self, Y, T, X=None, W=None, Z=None, nuisances=None, sample_weight=None, sample_var=None):
Y_res, T_res, Z_res = nuisances
if Y_res.ndim == 1:
Y_res = Y_res.reshape((-1, 1))
if T_res.ndim == 1:
T_res = T_res.reshape((-1, 1))
# TODO: allow the final model to actually use X?
effects = self._model_final.predict(X=None).reshape((-1, Y_res.shape[1], T_res.shape[1]))
Y_res_pred = np.einsum('ijk,ik->ij', effects, T_res).reshape(Y_res.shape)
if sample_weight is not None:
return np.mean(np.average((Y_res - Y_res_pred)**2, weights=sample_weight, axis=0))
else:
return np.mean((Y_res - Y_res_pred) ** 2)
class _BaseDMLATEIV(_OrthoLearner):
def __init__(self, model_nuisance,
discrete_instrument=False, discrete_treatment=False,
categories='auto',
n_splits=2, random_state=None):
super().__init__(model_nuisance, _BaseDMLATEIVModelFinal(),
discrete_treatment=discrete_treatment, discrete_instrument=discrete_instrument,
categories=categories,
n_splits=n_splits, random_state=random_state)
@_deprecate_positional("W and Z should be passed by keyword only. In a future release "
"we will disallow passing W and Z by position.", ['W', 'Z'])
def fit(self, Y, T, Z, W=None, *, sample_weight=None, sample_var=None, groups=None, inference=None):
"""
Estimate the counterfactual model from data, i.e. estimates function :math:`\\theta(\\cdot)`.
Parameters
----------
Y: (n, d_y) matrix or vector of length n
Outcomes for each sample
T: (n, d_t) matrix or vector of length n
Treatments for each sample
Z: (n, d_z) matrix
Instruments for each sample
X: optional(n, d_x) matrix or None (Default=None)
Features for each sample
sample_weight: optional(n,) vector or None (Default=None)
Weights for each samples
sample_var: optional(n,) vector or None (Default=None)
Sample variance for each sample
groups: (n,) vector, optional
All rows corresponding to the same group will be kept together during splitting.
If groups is not None, the n_splits argument passed to this class's initializer
must support a 'groups' argument to its split method.
inference: string,:class:`.Inference` instance, or None
Method for performing inference. This estimator supports 'bootstrap'
(or an instance of:class:`.BootstrapInference`).
Returns
-------
self: _BaseDMLATEIV instance
"""
# Replacing fit from _OrthoLearner, to enforce W=None and improve the docstring
return super().fit(Y, T, W=W, Z=Z,
sample_weight=sample_weight, sample_var=sample_var, groups=groups,
inference=inference)
def score(self, Y, T, Z, W=None):
"""
Score the fitted CATE model on a new data set. Generates nuisance parameters
for the new data set based on the fitted residual nuisance models created at fit time.
It uses the mean prediction of the models fitted by the different crossfit folds.
Then calculates the MSE of the final residual Y on residual T regression.
If model_final does not have a score method, then it raises an :exc:`.AttributeError`
Parameters
----------
Y: (n, d_y) matrix or vector of length n
Outcomes for each sample
T: (n, d_t) matrix or vector of length n
Treatments for each sample
Z: optional(n, d_z) matrix
Instruments for each sample
X: optional(n, d_x) matrix or None (Default=None)
Features for each sample
Returns
-------
score: float
The MSE of the final CATE model on the new data.
"""
# Replacing score from _OrthoLearner, to enforce X=None and improve the docstring
return super().score(Y, T, W=W, Z=Z)
class _DMLATEIVModelNuisance:
def __init__(self, model_Y_W, model_T_W, model_Z_W):
self._model_Y_W = clone(model_Y_W, safe=False)
self._model_T_W = clone(model_T_W, safe=False)
self._model_Z_W = clone(model_Z_W, safe=False)
def fit(self, Y, T, X=None, W=None, Z=None, sample_weight=None, groups=None):
assert X is None, "DML ATE IV does not accept features"
self._model_Y_W.fit(X=X, W=W, Target=Y, sample_weight=sample_weight, groups=groups)
self._model_T_W.fit(X=X, W=W, Target=T, sample_weight=sample_weight, groups=groups)
self._model_Z_W.fit(X=X, W=W, Target=Z, sample_weight=sample_weight, groups=groups)
return self
def score(self, Y, T, X=None, W=None, Z=None, sample_weight=None):
assert X is None, "DML ATE IV does not accept features"
if hasattr(self._model_Y_W, 'score'):
Y_X_score = self._model_Y_W.score(X=X, W=W, Target=Y, sample_weight=sample_weight)
else:
Y_X_score = None
if hasattr(self._model_T_W, 'score'):
T_X_score = self._model_T_W.score(X=X, W=W, Target=T, sample_weight=sample_weight)
else:
T_X_score = None
if hasattr(self._model_Z_W, 'score'):
Z_X_score = self._model_Z_W.score(X=X, W=W, Target=Z, sample_weight=sample_weight)
else:
Z_X_score = None
return Y_X_score, T_X_score, Z_X_score
def predict(self, Y, T, X=None, W=None, Z=None, sample_weight=None):
assert X is None, "DML ATE IV does not accept features"
Y_pred = self._model_Y_W.predict(X=X, W=W)
T_pred = self._model_T_W.predict(X=X, W=W)
Z_pred = self._model_Z_W.predict(X=X, W=W)
if W is None: # In this case predict above returns a single row
Y_pred = np.tile(Y_pred.reshape(1, -1), (Y.shape[0], 1))
T_pred = np.tile(T_pred.reshape(1, -1), (T.shape[0], 1))
Z_pred = np.tile(Z_pred.reshape(1, -1), (Z.shape[0], 1))
Y_res = Y - Y_pred.reshape(Y.shape)
T_res = T - T_pred.reshape(T.shape)
Z_res = Z - Z_pred.reshape(Z.shape)
return Y_res, T_res, Z_res
class DMLATEIV(_BaseDMLATEIV):
"""
Implementation of the orthogonal/double ml method for ATE estimation with
IV as described in
Double/Debiased Machine Learning for Treatment and Causal Parameters
Victor Chernozhukov, Denis Chetverikov, Mert Demirer, Esther Duflo, Christian Hansen, Whitney Newey, James Robins
https://arxiv.org/abs/1608.00060
Requires that either co-variance of T, Z is independent of X or that effect
is not heterogeneous in X for correct recovery. Otherwise it estimates
a biased ATE.
"""
def __init__(self, model_Y_W, model_T_W, model_Z_W,
discrete_treatment=False, discrete_instrument=False,
categories='auto',
n_splits=2, random_state=None):
super().__init__(_DMLATEIVModelNuisance(model_Y_W=_FirstStageWrapper(model_Y_W, discrete_target=False),
model_T_W=_FirstStageWrapper(
model_T_W, discrete_target=discrete_treatment),
model_Z_W=_FirstStageWrapper(
model_Z_W, discrete_target=discrete_instrument)),
discrete_instrument=discrete_instrument, discrete_treatment=discrete_treatment,
categories=categories,
n_splits=n_splits, random_state=random_state)
class _ProjectedDMLATEIVModelNuisance:
def __init__(self, model_Y_W, model_T_W, model_T_WZ):
self._model_Y_W = clone(model_Y_W, safe=False)
self._model_T_W = clone(model_T_W, safe=False)
self._model_T_WZ = clone(model_T_WZ, safe=False)
def fit(self, Y, T, X=None, W=None, Z=None, sample_weight=None, groups=None):
assert X is None, "DML ATE IV does not accept features"
self._model_Y_W.fit(X=X, W=W, Target=Y, sample_weight=sample_weight, groups=groups)
self._model_T_W.fit(X=X, W=W, Target=T, sample_weight=sample_weight, groups=groups)
self._model_T_WZ.fit(X=X, W=W, Z=Z, Target=T, sample_weight=sample_weight, groups=groups)
return self
def score(self, Y, T, X=None, W=None, Z=None, sample_weight=None):
assert X is None, "DML ATE IV does not accept features"
if hasattr(self._model_Y_W, 'score'):
Y_X_score = self._model_Y_W.score(X=X, W=W, Target=Y, sample_weight=sample_weight)
else:
Y_X_score = None
if hasattr(self._model_T_W, 'score'):
T_X_score = self._model_T_W.score(X=X, W=W, Target=T, sample_weight=sample_weight)
else:
T_X_score = None
if hasattr(self._model_T_WZ, 'score'):
T_XZ_score = self._model_T_WZ.score(X=X, W=W, Z=Z, Target=T, sample_weight=sample_weight)
else:
T_XZ_score = None
return Y_X_score, T_X_score, T_XZ_score
def predict(self, Y, T, X=None, W=None, Z=None, sample_weight=None):
assert X is None, "DML ATE IV does not accept features"
Y_pred = self._model_Y_W.predict(X, W)
TX_pred = self._model_T_W.predict(X, W)
TXZ_pred = self._model_T_WZ.predict(X, W, Z)
if W is None: # In this case predict above returns a single row
Y_pred = np.tile(Y_pred.reshape(1, -1), (Y.shape[0], 1))
TX_pred = np.tile(TX_pred.reshape(1, -1), (T.shape[0], 1))
Y_res = Y - Y_pred.reshape(Y.shape)
T_res = T - TX_pred.reshape(T.shape)
Z_res = TXZ_pred.reshape(T.shape) - TX_pred.reshape(T.shape)
return Y_res, T_res, Z_res
class ProjectedDMLATEIV(_BaseDMLATEIV):
def __init__(self, model_Y_W, model_T_W, model_T_WZ,
discrete_treatment=False, discrete_instrument=False,
categories='auto',
n_splits=2, random_state=None):
super().__init__(_ProjectedDMLATEIVModelNuisance(
model_Y_W=_FirstStageWrapper(
model_Y_W, discrete_target=False),
model_T_W=_FirstStageWrapper(
model_T_W, discrete_target=discrete_treatment),
model_T_WZ=_FirstStageWrapper(
model_T_WZ, discrete_target=discrete_treatment)),
discrete_treatment=discrete_treatment, discrete_instrument=discrete_instrument,
categories=categories,
n_splits=n_splits, random_state=random_state)
class _BaseDMLIVModelNuisance:
"""
Nuisance model fits the three models at fit time and at predict time
returns :math:`Y-\\E[Y|X]` and :math:`\\E[T|X,Z]-\\E[T|X]` as residuals.
"""
def __init__(self, model_Y_X, model_T_X, model_T_XZ):
self._model_Y_X = clone(model_Y_X, safe=False)
self._model_T_X = clone(model_T_X, safe=False)
self._model_T_XZ = clone(model_T_XZ, safe=False)
def fit(self, Y, T, X=None, W=None, Z=None, sample_weight=None, groups=None):
# TODO: would it be useful to extend to handle controls ala vanilla DML?
assert W is None, "DML IV does not accept controls"
self._model_Y_X.fit(X=X, W=None, Target=Y, sample_weight=sample_weight, groups=groups)
self._model_T_X.fit(X=X, W=None, Target=T, sample_weight=sample_weight, groups=groups)
self._model_T_XZ.fit(X=X, W=None, Z=Z, Target=T, sample_weight=sample_weight, groups=groups)
return self
def score(self, Y, T, X=None, W=None, Z=None, sample_weight=None):
assert W is None, "DML IV does not accept controls"
if hasattr(self._model_Y_X, 'score'):
Y_X_score = self._model_Y_X.score(X=X, W=W, Target=Y, sample_weight=sample_weight)
else:
Y_X_score = None
if hasattr(self._model_T_X, 'score'):
T_X_score = self._model_T_X.score(X=X, W=W, Target=T, sample_weight=sample_weight)
else:
T_X_score = None
if hasattr(self._model_T_XZ, 'score'):
T_XZ_score = self._model_T_XZ.score(X=X, W=W, Z=Z, Target=T, sample_weight=sample_weight)
else:
T_XZ_score = None
return Y_X_score, T_X_score, T_XZ_score
def predict(self, Y, T, X=None, W=None, Z=None, sample_weight=None):
assert W is None, "DML IV does not accept controls"
Y_pred = self._model_Y_X.predict(X, W)
TXZ_pred = self._model_T_XZ.predict(X, W, Z)
TX_pred = self._model_T_X.predict(X, W)
if X is None: # In this case predict above returns a single row
Y_pred = np.tile(Y_pred.reshape(1, -1), (Y.shape[0], 1))
TX_pred = np.tile(TX_pred.reshape(1, -1), (T.shape[0], 1))
Y_res = Y - Y_pred.reshape(Y.shape)
T_res = TXZ_pred.reshape(T.shape) - TX_pred.reshape(T.shape)
return Y_res, T_res
class _BaseDMLIVModelFinal:
"""
Final model at fit time, fits a residual on residual regression with a heterogeneous coefficient
that depends on X, i.e.
.. math ::
Y - \\E[Y | X] = \\theta(X) \\cdot (\\E[T | X, Z] - \\E[T | X]) + \\epsilon
and at predict time returns :math:`\\theta(X)`. The score method returns the MSE of this final
residual on residual regression.
"""
def __init__(self, model_final):
self._model_final = clone(model_final, safe=False)
def fit(self, Y, T, X=None, W=None, Z=None, nuisances=None, sample_weight=None, sample_var=None):
Y_res, T_res = nuisances
self._model_final.fit(X, T_res, Y_res, sample_weight=sample_weight, sample_var=sample_var)
return self
def predict(self, X=None):
return self._model_final.predict(X)
def score(self, Y, T, X=None, W=None, Z=None, nuisances=None, sample_weight=None, sample_var=None):
Y_res, T_res = nuisances
if Y_res.ndim == 1:
Y_res = Y_res.reshape((-1, 1))
if T_res.ndim == 1:
T_res = T_res.reshape((-1, 1))
effects = self._model_final.predict(X).reshape((-1, Y_res.shape[1], T_res.shape[1]))
Y_res_pred = np.einsum('ijk,ik->ij', effects, T_res).reshape(Y_res.shape)
if sample_weight is not None:
return np.mean(np.average((Y_res - Y_res_pred)**2, weights=sample_weight, axis=0))
else:
return np.mean((Y_res - Y_res_pred)**2)
class _BaseDMLIV(_OrthoLearner):
"""
The class _BaseDMLIV implements the base class of the DMLIV
algorithm for estimating a CATE. It accepts three generic machine
learning models:
1) model_Y_X that estimates :math:`\\E[Y | X]`
2) model_T_X that estimates :math:`\\E[T | X]`
3) model_T_XZ that estimates :math:`\\E[T | X, Z]`
These are estimated in a cross-fitting manner for each sample in the training set.
Then it minimizes the square loss:
.. math::
\\sum_i (Y_i - \\E[Y|X_i] - \theta(X) * (\\E[T|X_i, Z_i] - \\E[T|X_i]))^2
This loss is minimized by the model_final class, which is passed as an input.
In the two children classes {DMLIV, GenericDMLIV}, we implement different strategies of how to invoke
machine learning algorithms to minimize this final square loss.
Parameters
----------
model_Y_X : estimator
model to estimate :math:`\\E[Y | X]`. Must support `fit` and `predict` methods.
model_T_X : estimator
model to estimate :math:`\\E[T | X]`. Must support `fit` and `predict` methods
model_T_XZ : estimator
model to estimate :math:`\\E[T | X, Z]`. Must support `fit(X, Z, T, *, sample_weights)`
and `predict(X, Z)` methods.
model_final : estimator
final model that at fit time takes as input :math:`(Y-\\E[Y|X])`, :math:`(\\E[T|X,Z]-\\E[T|X])` and X
and supports method predict(X) that produces the CATE at X
discrete_instrument: bool, optional, default False
Whether the instrument values should be treated as categorical, rather than continuous, quantities
discrete_treatment: bool, optional, default False
Whether the treatment values should be treated as categorical, rather than continuous, quantities
categories: 'auto' or list, default 'auto'
The categories to use when encoding discrete treatments (or 'auto' to use the unique sorted values).
The first category will be treated as the control treatment.
n_splits: int, cross-validation generator or an iterable, optional, default 2
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- :term:`cv splitter`
- An iterable yielding (train, test) splits as arrays of indices.
For integer/None inputs, if the treatment is discrete
:class:`~sklearn.model_selection.StratifiedKFold` is used, else,
:class:`~sklearn.model_selection.KFold` is used
(with a random shuffle in either case).
Unless an iterable is used, we call `split(concat[W, X], T)` to generate the splits. If all
W, X are None, then we call `split(ones((T.shape[0], 1)), T)`.
random_state: int, :class:`~numpy.random.mtrand.RandomState` instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If :class:`~numpy.random.mtrand.RandomState` instance, random_state is the random number generator;
If None, the random number generator is the :class:`~numpy.random.mtrand.RandomState` instance used
by :mod:`np.random<numpy.random>`.
"""
def __init__(self, model_Y_X, model_T_X, model_T_XZ, model_final,
discrete_instrument=False, discrete_treatment=False, categories='auto',
n_splits=2, random_state=None):
super().__init__(_BaseDMLIVModelNuisance(model_Y_X, model_T_X, model_T_XZ),
_BaseDMLIVModelFinal(model_final),
discrete_treatment=discrete_treatment, discrete_instrument=discrete_instrument,
categories=categories,
n_splits=n_splits, random_state=random_state)
@_deprecate_positional("Z and X should be passed by keyword only. In a future release "
"we will disallow passing Z and X by position.", ['X', 'Z'])
def fit(self, Y, T, Z, X=None, *, sample_weight=None, sample_var=None, groups=None, inference=None):
"""
Estimate the counterfactual model from data, i.e. estimates function :math:`\\theta(\\cdot)`.
Parameters
----------
Y: (n, d_y) matrix or vector of length n
Outcomes for each sample
T: (n, d_t) matrix or vector of length n
Treatments for each sample
Z: (n, d_z) matrix
Instruments for each sample
X: optional(n, d_x) matrix or None (Default=None)
Features for each sample
sample_weight: optional(n,) vector or None (Default=None)
Weights for each samples
sample_var: optional(n,) vector or None (Default=None)
Sample variance for each sample
groups: (n,) vector, optional
All rows corresponding to the same group will be kept together during splitting.
If groups is not None, the n_splits argument passed to this class's initializer
must support a 'groups' argument to its split method.
inference: string,:class:`.Inference` instance, or None
Method for performing inference. This estimator supports 'bootstrap'
(or an instance of:class:`.BootstrapInference`).
Returns
-------
self: _BaseDMLIV
"""
# Replacing fit from _OrthoLearner, to enforce W=None and improve the docstring
return super().fit(Y, T, X=X, Z=Z,
sample_weight=sample_weight, sample_var=sample_var, groups=groups,
inference=inference)
def score(self, Y, T, Z, X=None):
"""
Score the fitted CATE model on a new data set. Generates nuisance parameters
for the new data set based on the fitted residual nuisance models created at fit time.
It uses the mean prediction of the models fitted by the different crossfit folds.
Then calculates the MSE of the final residual Y on residual T regression.
If model_final does not have a score method, then it raises an :exc:`.AttributeError`
Parameters
----------
Y: (n, d_y) matrix or vector of length n
Outcomes for each sample
T: (n, d_t) matrix or vector of length n
Treatments for each sample
Z: optional(n, d_z) matrix
Instruments for each sample
X: optional(n, d_x) matrix or None (Default=None)
Features for each sample
Returns
-------
score: float
The MSE of the final CATE model on the new data.
"""
# Replacing score from _OrthoLearner, to enforce W=None and improve the docstring
return super().score(Y, T, X=X, Z=Z)
@property
def original_featurizer(self):
return super().model_final._model_final._original_featurizer
@property
def featurizer(self):
# NOTE This is used by the inference methods and has to be the overall featurizer. intended
# for internal use by the library
return super().model_final._model_final._featurizer
@property
def model_final(self):
# NOTE This is used by the inference methods and is more for internal use to the library
return super().model_final._model_final._model
@property
def model_cate(self):
"""
Get the fitted final CATE model.
Returns
-------
model_cate: object of type(model_final)
An instance of the model_final object that was fitted after calling fit which corresponds
to the constant marginal CATE model.
"""
return super().model_final._model_final._model
@property
def models_Y_X(self):
"""
Get the fitted models for :math:`\\E[Y | X]`.
Returns
-------
models_Y_X: list of objects of type(`model_Y_X`)
A list of instances of the `model_Y_X` object. Each element corresponds to a crossfitting
fold and is the model instance that was fitted for that training fold.
"""
return [mdl._model for mdl in super().models_Y_X]
@property
def models_T_X(self):
"""
Get the fitted models for :math:`\\E[T | X]`.
Returns
-------
models_T_X: list of objects of type(`model_T_X`)
A list of instances of the `model_T_X` object. Each element corresponds to a crossfitting
fold and is the model instance that was fitted for that training fold.
"""
return [mdl._model for mdl in super().models_T_X]
@property
def models_T_XZ(self):
"""
Get the fitted models for :math:`\\E[T | X, Z]`.
Returns
-------
models_T_XZ: list of objects of type(`model_T_XZ`)
A list of instances of the `model_T_XZ` object. Each element corresponds to a crossfitting
fold and is the model instance that was fitted for that training fold.
"""
return [mdl._model for mdl in super().models_T_XZ]
@property
def nuisance_scores_Y_X(self):
"""
Get the scores for Y_X model on the out-of-sample training data
"""
return self.nuisance_scores_[0]
@property
def nuisance_scores_T_X(self):
"""
Get the scores for T_X model on the out-of-sample training data
"""
return self.nuisance_scores_[1]
@property
def nuisance_scores_T_XZ(self):
"""
Get the scores for T_XZ model on the out-of-sample training data
"""
return self.nuisance_scores_[2]
def cate_feature_names(self, feature_names=None):
"""
Get the output feature names.
Parameters
----------
feature_names: list of strings of length X.shape[1] or None
The names of the input features. If None and X is a dataframe, it defaults to the column names
from the dataframe.
Returns
-------
out_feature_names: list of strings or None
The names of the output features :math:`\\phi(X)`, i.e. the features with respect to which the
final constant marginal CATE model is linear. It is the names of the features that are associated
with each entry of the :meth:`coef_` parameter. Not available when the featurizer is not None and
does not have a method: `get_feature_names(feature_names)`. Otherwise None is returned.
"""
if feature_names is None:
feature_names = self._input_names["feature_names"]
if self.original_featurizer is None:
return feature_names
elif hasattr(self.original_featurizer, 'get_feature_names'):
return self.original_featurizer.get_feature_names(feature_names)
else:
raise AttributeError("Featurizer does not have a method: get_feature_names!")
class DMLIV(_BaseDMLIV):
"""
A child of the _BaseDMLIV class that specifies a particular effect model
where the treatment effect is linear in some featurization of the variable X
The features are created by a provided featurizer that supports fit_transform.
Then an arbitrary model fits on the composite set of features.
Concretely, it assumes that :math:`\\theta(X)=<\\theta, \\phi(X)>` for some features :math:`\\phi(X)`
and runs a linear model regression of :math:`Y-\\E[Y|X]` on :math:`phi(X)*(\\E[T|X,Z]-\\E[T|X])`.
The features are created by the featurizer provided by the user. The particular
linear model regression is also specified by the user (e.g. Lasso, ElasticNet)
Parameters
----------
model_Y_X : estimator
model to estimate :math:`\\E[Y | X]`. Must support `fit` and `predict` methods.
model_T_X : estimator
model to estimate :math:`\\E[T | X]`. Must support `fit` and either `predict` or `predict_proba` methods,
depending on whether the treatment is discrete.
model_T_XZ : estimator
model to estimate :math:`\\E[T | X, Z]`. Must support `fit` and either `predict` or `predict_proba` methods,
depending on whether the treatment is discrete.
model_final : estimator
final linear model for predicting :math:`(Y-\\E[Y|X])` from :math:`\\phi(X) \\cdot (\\E[T|X,Z]-\\E[T|X])`
Method is incorrect if this model is not linear (e.g. Lasso, ElasticNet, LinearRegression).
featurizer: :term:`transformer`, optional, default None
Must support fit_transform and transform. Used to create composite features in the final CATE regression.
It is ignored if X is None. The final CATE will be trained on the outcome of featurizer.fit_transform(X).
If featurizer=None, then CATE is trained on X.
fit_cate_intercept : bool, optional, default True
Whether the linear CATE model should have a constant term.
n_splits: int, cross-validation generator or an iterable, optional, default 2
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- :term:`cv splitter`
- An iterable yielding (train, test) splits as arrays of indices.
For integer/None inputs, if the treatment is discrete
:class:`~sklearn.model_selection.StratifiedKFold` is used, else,
:class:`~sklearn.model_selection.KFold` is used
(with a random shuffle in either case).
Unless an iterable is used, we call `split(concat[W, X], T)` to generate the splits. If all
W, X are None, then we call `split(ones((T.shape[0], 1)), T)`.
discrete_instrument: bool, optional, default False
Whether the instrument values should be treated as categorical, rather than continuous, quantities
discrete_treatment: bool, optional, default False
Whether the treatment values should be treated as categorical, rather than continuous, quantities
categories: 'auto' or list, default 'auto'
The categories to use when encoding discrete treatments (or 'auto' to use the unique sorted values).
The first category will be treated as the control treatment.
random_state: int, :class:`~numpy.random.mtrand.RandomState` instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If :class:`~numpy.random.mtrand.RandomState` instance, random_state is the random number generator;
If None, the random number generator is the :class:`~numpy.random.mtrand.RandomState` instance used
by :mod:`np.random<numpy.random>`.
"""
def __init__(self, model_Y_X, model_T_X, model_T_XZ, model_final, featurizer=None,
fit_cate_intercept=True,
n_splits=2, discrete_instrument=False, discrete_treatment=False,
categories='auto', random_state=None):
self.bias_part_of_coef = fit_cate_intercept
self.fit_cate_intercept = fit_cate_intercept
super().__init__(_FirstStageWrapper(model_Y_X, False),
_FirstStageWrapper(model_T_X, discrete_treatment),
_FirstStageWrapper(model_T_XZ, discrete_treatment),
_FinalWrapper(model_final,
fit_cate_intercept=fit_cate_intercept,
featurizer=featurizer,
use_weight_trick=False),
n_splits=n_splits,
discrete_instrument=discrete_instrument,
discrete_treatment=discrete_treatment,
categories=categories,
random_state=random_state)
class NonParamDMLIV(_BaseDMLIV):
"""
A child of the _BaseDMLIV class that allows for an arbitrary square loss based ML
method in the final stage of the DMLIV algorithm. The method has to support
sample weights and the fit method has to take as input sample_weights (e.g. random forests), i.e.
fit(X, y, sample_weight=None)
It achieves this by re-writing the final stage square loss of the DMLIV algorithm as:
.. math ::
\\sum_i (\\E[T|X_i, Z_i] - \\E[T|X_i])^2 * ((Y_i - \\E[Y|X_i])/(\\E[T|X_i, Z_i] - \\E[T|X_i]) - \\theta(X))^2
Then this can be viewed as a weighted square loss regression, where the target label is
.. math ::
\\tilde{Y}_i = (Y_i - \\E[Y|X_i])/(\\E[T|X_i, Z_i] - \\E[T|X_i])
and each sample has a weight of
.. math ::
V(X_i) = (\\E[T|X_i, Z_i] - \\E[T|X_i])^2
Thus we can call any regression model with inputs:
fit(X, :math:`\\tilde{Y}_i`, sample_weight= :math:`V(X_i)`)
Parameters
----------
model_Y_X : estimator
model to estimate :math:`\\E[Y | X]`. Must support `fit` and `predict` methods.
model_T_X : estimator
model to estimate :math:`\\E[T | X]`. Must support `fit` and either `predict` or `predict_proba` methods,
depending on whether the treatment is discrete.
model_T_XZ : estimator
model to estimate :math:`\\E[T | X, Z]`. Must support `fit` and either `predict` or `predict_proba` methods,
depending on whether the treatment is discrete.
model_final : estimator
final model for predicting :math:`\\tilde{Y}` from X with sample weights V(X)
n_splits: int, cross-validation generator or an iterable, optional, default 2
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- :term:`cv splitter`
- An iterable yielding (train, test) splits as arrays of indices.
For integer/None inputs, if the treatment is discrete
:class:`~sklearn.model_selection.StratifiedKFold` is used, else,
:class:`~sklearn.model_selection.KFold` is used
(with a random shuffle in either case).
Unless an iterable is used, we call `split(concat[W, X], T)` to generate the splits. If all
W, X are None, then we call `split(ones((T.shape[0], 1)), T)`.
discrete_instrument: bool, optional, default False
Whether the instrument values should be treated as categorical, rather than continuous, quantities
discrete_treatment: bool, optional, default False
Whether the treatment values should be treated as categorical, rather than continuous, quantities
categories: 'auto' or list, default 'auto'
The categories to use when encoding discrete treatments (or 'auto' to use the unique sorted values).
The first category will be treated as the control treatment.
random_state: int, :class:`~numpy.random.mtrand.RandomState` instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If :class:`~numpy.random.mtrand.RandomState` instance, random_state is the random number generator;
If None, the random number generator is the :class:`~numpy.random.mtrand.RandomState` instance used
by :mod:`np.random<numpy.random>`.
"""
def __init__(self, model_Y_X, model_T_X, model_T_XZ, model_final,
featurizer=None, fit_cate_intercept=True,
n_splits=2, discrete_instrument=False, discrete_treatment=False, categories='auto',
random_state=None):
super().__init__(_FirstStageWrapper(model_Y_X, False),
_FirstStageWrapper(model_T_X, discrete_treatment),
_FirstStageWrapper(model_T_XZ, discrete_treatment),
_FinalWrapper(model_final,
fit_cate_intercept=fit_cate_intercept,
featurizer=featurizer,
use_weight_trick=True),
n_splits=n_splits,
discrete_instrument=discrete_instrument,
discrete_treatment=discrete_treatment,
categories=categories,
random_state=random_state)
class _BaseDRIVModelFinal:
"""
Final model at fit time, fits a residual on residual regression with a heterogeneous coefficient
that depends on X, i.e.
.. math ::
Y - \\E[Y | X] = \\theta(X) \\cdot (\\E[T | X, Z] - \\E[T | X]) + \\epsilon
and at predict time returns :math:`\\theta(X)`. The score method returns the MSE of this final
residual on residual regression.
"""
def __init__(self, model_final, featurizer,
discrete_treatment, discrete_instrument,
fit_cate_intercept, cov_clip, opt_reweighted):
self._model_final = clone(model_final, safe=False)
self._fit_cate_intercept = fit_cate_intercept
self._original_featurizer = clone(featurizer, safe=False)
self._discrete_treatment = discrete_treatment
self._discrete_instrument = discrete_instrument
if self._fit_cate_intercept:
add_intercept_trans = FunctionTransformer(add_intercept,
validate=True)
if featurizer:
self._featurizer = Pipeline([('featurize', self._original_featurizer),
('add_intercept', add_intercept_trans)])
else:
self._featurizer = add_intercept_trans
else:
self._featurizer = self._original_featurizer
self._cov_clip = cov_clip
self._opt_reweighted = opt_reweighted
def _effect_estimate(self, nuisances):
prel_theta, res_t, res_y, res_z, cov = [nuisance.reshape(nuisances[0].shape) for nuisance in nuisances]
# Estimate final model of theta(X) by minimizing the square loss:
# (prel_theta(X) + (Y_res - prel_theta(X) * T_res) * Z_res / cov[T,Z | X] - theta(X))^2
# We clip the covariance so that it is bounded away from zero, so as to reduce variance
# at the expense of some small bias. For points with very small covariance we revert
# to the model-based preliminary estimate and do not add the correction term.
cov_sign = np.sign(cov)
cov_sign[cov_sign == 0] = 1
clipped_cov = cov_sign * np.clip(np.abs(cov),
self._cov_clip, np.inf)
return prel_theta + (res_y - prel_theta * res_t) * res_z / clipped_cov, clipped_cov
def fit(self, Y, T, X=None, W=None, Z=None, nuisances=None, sample_weight=None, sample_var=None):
self.d_y = Y.shape[1:]
self.d_t = nuisances[1].shape[1:]
self.d_z = nuisances[3].shape[1:]
# TODO: if opt_reweighted is False, we could change the logic to support multidimensional treatments,
# instruments, and outcomes
if self.d_y and self.d_y[0] > 2:
raise AttributeError("DRIV only supports a single outcome")
if self.d_t and self.d_t[0] > 1:
if self._discrete_treatment:
raise AttributeError("DRIV only supports binary treatments")
else:
raise AttributeError("DRIV only supports single-dimensional continuous treatments")
if self.d_z and self.d_z[0] > 1:
if self._discrete_instrument:
raise AttributeError("DRIV only supports binary instruments")
else:
raise AttributeError("DRIV only supports single-dimensional continuous instruments")
theta_dr, clipped_cov = self._effect_estimate(nuisances)
if (X is not None) and (self._featurizer is not None):
X = self._featurizer.fit_transform(X)
if self._opt_reweighted and (sample_weight is not None):
sample_weight = sample_weight * clipped_cov.ravel()**2
elif self._opt_reweighted:
sample_weight = clipped_cov.ravel()**2
self._model_final.fit(X, theta_dr, **filter_none_kwargs(sample_weight=sample_weight, sample_var=sample_var))
return self
def predict(self, X=None):
if (X is not None) and (self._featurizer is not None):
X = self._featurizer.transform(X)
return self._model_final.predict(X).reshape((-1,) + self.d_y + self.d_t)
def score(self, Y, T, X=None, W=None, Z=None, nuisances=None, sample_weight=None, sample_var=None):
theta_dr, clipped_cov = self._effect_estimate(nuisances)
if (X is not None) and (self._featurizer is not None):
X = self._featurizer.transform(X)
if self._opt_reweighted and (sample_weight is not None):
sample_weight = sample_weight * clipped_cov.ravel()**2
elif self._opt_reweighted:
sample_weight = clipped_cov.ravel()**2
return np.average((theta_dr.ravel() - self._model_final.predict(X).ravel())**2,
weights=sample_weight, axis=0)
class _BaseDRIV(_OrthoLearner):
"""
The _BaseDRIV algorithm for estimating CATE with IVs. It is the parent of the
two public classes {DRIV, ProjectedDRIV}
Parameters
----------
nuisance_models : dictionary of nuisance models, with {'name_of_model' : EstimatorObject, ...}
model_final : estimator
model compatible with the sklearn regression API, used to fit the effect on X
featurizer : :term:`transformer`, optional, default None
Must support fit_transform and transform. Used to create composite features in the final CATE regression.
It is ignored if X is None. The final CATE will be trained on the outcome of featurizer.fit_transform(X).
If featurizer=None, then CATE is trained on X.
fit_cate_intercept : bool, optional, default True
Whether the linear CATE model should have a constant term.
cov_clip : float, optional, default 0.1
clipping of the covariate for regions with low "overlap", to reduce variance
opt_reweighted : bool, optional, default False
Whether to reweight the samples to minimize variance. If True then
model_final.fit must accept sample_weight as a kw argument. If True then
assumes the model_final is flexible enough to fit the true CATE model. Otherwise,
it method will return a biased projection to the model_final space, biased
to give more weight on parts of the feature space where the instrument is strong.
discrete_instrument: bool, optional, default False
Whether the instrument values should be treated as categorical, rather than continuous, quantities
discrete_treatment: bool, optional, default False
Whether the treatment values should be treated as categorical, rather than continuous, quantities
categories: 'auto' or list, default 'auto'
The categories to use when encoding discrete treatments (or 'auto' to use the unique sorted values).
The first category will be treated as the control treatment.
n_splits: int, cross-validation generator or an iterable, optional, default 2
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- :term:`cv splitter`
- An iterable yielding (train, test) splits as arrays of indices.
For integer/None inputs, if the treatment is discrete
:class:`~sklearn.model_selection.StratifiedKFold` is used, else,
:class:`~sklearn.model_selection.KFold` is used
(with a random shuffle in either case).
Unless an iterable is used, we call `split(concat[W, X], T)` to generate the splits. If all
W, X are None, then we call `split(ones((T.shape[0], 1)), T)`.
random_state: int, :class:`~numpy.random.mtrand.RandomState` instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If :class:`~numpy.random.mtrand.RandomState` instance, random_state is the random number generator;
If None, the random number generator is the :class:`~numpy.random.mtrand.RandomState` instance used
by :mod:`np.random<numpy.random>`.
"""
def __init__(self,
nuisance_models,
model_final,
featurizer=None,
fit_cate_intercept=True,
cov_clip=0.1, opt_reweighted=False,
discrete_instrument=False, discrete_treatment=False,
categories='auto',
n_splits=2, random_state=None):
self.fit_cate_intercept = fit_cate_intercept
self.bias_part_of_coef = fit_cate_intercept
self.cov_clip = cov_clip
self.opt_reweighted = opt_reweighted
super().__init__(nuisance_models, _BaseDRIVModelFinal(model_final,
featurizer,
discrete_treatment,
discrete_instrument,
fit_cate_intercept,
cov_clip,
opt_reweighted),
discrete_instrument=discrete_instrument, discrete_treatment=discrete_treatment,
categories=categories, n_splits=n_splits, random_state=random_state)
@_deprecate_positional("X, W, and Z should be passed by keyword only. In a future release "
"we will disallow passing X, W, and Z by position.", ['X', 'W', 'Z'])
def fit(self, Y, T, Z, X=None, W=None, *, sample_weight=None, sample_var=None, groups=None, inference=None):
"""
Estimate the counterfactual model from data, i.e. estimates function :math:`\\theta(\\cdot)`.
Parameters
----------
Y: (n, d_y) matrix or vector of length n
Outcomes for each sample
T: (n, d_t) matrix or vector of length n
Treatments for each sample
Z: (n, d_z) matrix
Instruments for each sample
X: optional(n, d_x) matrix or None (Default=None)
Features for each sample
W: optional(n, d_w) matrix or None (Default=None)
Controls for each sample
sample_weight: optional(n,) vector or None (Default=None)
Weights for each samples
sample_var: optional(n,) vector or None (Default=None)
Sample variance for each sample
groups: (n,) vector, optional
All rows corresponding to the same group will be kept together during splitting.
If groups is not None, the n_splits argument passed to this class's initializer
must support a 'groups' argument to its split method.
inference: string,:class:`.Inference` instance, or None
Method for performing inference. This estimator supports 'bootstrap'
(or an instance of:class:`.BootstrapInference`).
Returns
-------
self: _BaseDRIV instance
"""
# Replacing fit from _OrthoLearner, to reorder arguments and improve the docstring
return super().fit(Y, T, X=X, W=W, Z=Z,
sample_weight=sample_weight, sample_var=sample_var, groups=groups,
inference=inference)
def score(self, Y, T, Z, X=None, W=None, sample_weight=None):
"""
Score the fitted CATE model on a new data set. Generates nuisance parameters
for the new data set based on the fitted nuisance models created at fit time.
It uses the mean prediction of the models fitted by the different crossfit folds.
Then calls the score function of the model_final and returns the calculated score.
The model_final model must have a score method.
If model_final does not have a score method, then it raises an :exc:`.AttributeError`
Parameters
----------
Y: (n, d_y) matrix or vector of length n
Outcomes for each sample
T: (n, d_t) matrix or vector of length n
Treatments for each sample
Z: (n, d_z) matrix or None (Default=None)
Instruments for each sample
X: optional (n, d_x) matrix or None (Default=None)
Features for each sample
W: optional(n, d_w) matrix or None (Default=None)
Controls for each sample
sample_weight: optional(n,) vector or None (Default=None)
Weights for each samples
Returns
-------
score : float or (array of float)
The score of the final CATE model on the new data. Same type as the return
type of the model_final.score method.
"""
# Replacing score from _OrthoLearner, to reorder arguments and improve the docstring
return super().score(Y, T, X=X, W=W, Z=Z, sample_weight=sample_weight)
@property
def original_featurizer(self):
return super().model_final._original_featurizer
@property
def featurizer(self):
# NOTE This is used by the inference methods and has to be the overall featurizer. intended
# for internal use by the library
return super().model_final._featurizer
@property
def model_final(self):
# NOTE This is used by the inference methods and is more for internal use to the library
return super().model_final._model_final
def cate_feature_names(self, feature_names=None):
"""
Get the output feature names.
Parameters
----------
feature_names: list of strings of length X.shape[1] or None
The names of the input features. If None and X is a dataframe, it defaults to the column names
from the dataframe.
Returns
-------
out_feature_names: list of strings or None
The names of the output features :math:`\\phi(X)`, i.e. the features with respect to which the
final constant marginal CATE model is linear. It is the names of the features that are associated
with each entry of the :meth:`coef_` parameter. Not available when the featurizer is not None and
does not have a method: `get_feature_names(feature_names)`. Otherwise None is returned.
"""
if feature_names is None:
feature_names = self._input_names["feature_names"]
if self.original_featurizer is None:
return feature_names
elif hasattr(self.original_featurizer, 'get_feature_names'):
return self.original_featurizer.get_feature_names(feature_names)
else:
raise AttributeError("Featurizer does not have a method: get_feature_names!")
class _IntentToTreatDRIVModelNuisance:
"""
Nuisance model fits the three models at fit time and at predict time
returns :math:`Y-\\E[Y|X]` and :math:`\\E[T|X,Z]-\\E[T|X]` as residuals.
"""
def __init__(self, model_Y_X, model_T_XZ, prel_model_effect):
self._model_Y_X = clone(model_Y_X, safe=False)
self._model_T_XZ = clone(model_T_XZ, safe=False)
self._prel_model_effect = clone(prel_model_effect, safe=False)
def fit(self, Y, T, X=None, W=None, Z=None, sample_weight=None, groups=None):
self._model_Y_X.fit(X=X, W=W, Target=Y, sample_weight=sample_weight, groups=groups)
self._model_T_XZ.fit(X=X, W=W, Z=Z, Target=T, sample_weight=sample_weight, groups=groups)
# we need to undo the one-hot encoding for calling effect,
# since it expects raw values
self._prel_model_effect.fit(Y, inverse_onehot(T), Z=inverse_onehot(Z), X=X, W=W,
sample_weight=sample_weight, groups=groups)
return self
def score(self, Y, T, X=None, W=None, Z=None, sample_weight=None):
if hasattr(self._model_Y_X, 'score'):
Y_X_score = self._model_Y_X.score(X=X, W=W, Target=Y, sample_weight=sample_weight)
else:
Y_X_score = None
if hasattr(self._model_T_XZ, 'score'):
T_XZ_score = self._model_T_XZ.score(X=X, W=W, Z=Z, Target=T, sample_weight=sample_weight)
else:
T_XZ_score = None
if hasattr(self._prel_model_effect, 'score'):
# we need to undo the one-hot encoding for calling effect,
# since it expects raw values
effect_score = self._prel_model_effect.score(Y, inverse_onehot(T),
Z=inverse_onehot(Z), X=X, W=W, sample_weight=sample_weight)
else:
effect_score = None
return Y_X_score, T_XZ_score, effect_score
def predict(self, Y, T, X=None, W=None, Z=None, sample_weight=None):
Y_pred = self._model_Y_X.predict(X, W)
T_pred_zero = self._model_T_XZ.predict(X, W, np.zeros(Z.shape))
T_pred_one = self._model_T_XZ.predict(X, W, np.ones(Z.shape))
delta = (T_pred_one - T_pred_zero) / 2
T_pred_mean = (T_pred_one + T_pred_zero) / 2
prel_theta = self._prel_model_effect.effect(X)
if X is None: # In this case predict above returns a single row
Y_pred = np.tile(Y_pred.reshape(1, -1), (Y.shape[0], 1))
prel_theta = np.tile(prel_theta.reshape(1, -1), (T.shape[0], 1))
Y_res = Y - Y_pred.reshape(Y.shape)
T_res = T - T_pred_mean.reshape(T.shape)
return prel_theta, T_res, Y_res, 2 * Z - 1, delta
class _IntentToTreatDRIV(_BaseDRIV):
"""
Helper class for the DRIV algorithm for the intent-to-treat A/B test setting
"""
def __init__(self, model_Y_X, model_T_XZ,
prel_model_effect,
model_effect,
featurizer=None,
fit_cate_intercept=True,
cov_clip=.1,
n_splits=3,
opt_reweighted=False,
categories='auto',
random_state=None):
"""
"""
# TODO: check that Y, T, Z do not have multiple columns
super().__init__(_IntentToTreatDRIVModelNuisance(model_Y_X, model_T_XZ, prel_model_effect),
model_effect,
featurizer=featurizer,
fit_cate_intercept=fit_cate_intercept,
cov_clip=cov_clip,
n_splits=n_splits,
discrete_instrument=True, discrete_treatment=True,
categories=categories,
opt_reweighted=opt_reweighted,
random_state=random_state)
class _DummyCATE:
"""
A dummy cate effect model that always returns zero effect
"""
def __init__(self):
return
def fit(self, y, T, *, Z, X, W=None, sample_weight=None, groups=None):
return self
def effect(self, X):
if X is None:
return np.zeros(1)
return np.zeros(X.shape[0])
class IntentToTreatDRIV(_IntentToTreatDRIV):
"""
Implements the DRIV algorithm for the intent-to-treat A/B test setting
Parameters
----------
model_Y_X : estimator
model to estimate :math:`\\E[Y | X]`. Must support `fit` and `predict` methods.
model_T_XZ : estimator
model to estimate :math:`\\E[T | X, Z]`. Must support `fit` and `predict_proba` methods.
flexible_model_effect : estimator
a flexible model for a preliminary version of the CATE, must accept sample_weight at fit time.
final_model_effect : estimator, optional
a final model for the CATE and projections. If None, then flexible_model_effect is also used as a final model
featurizer : :term:`transformer`, optional, default None
Must support fit_transform and transform. Used to create composite features in the final CATE regression.
It is ignored if X is None. The final CATE will be trained on the outcome of featurizer.fit_transform(X).
If featurizer=None, then CATE is trained on X.
fit_cate_intercept : bool, optional, default True
Whether the linear CATE model should have a constant term.
cov_clip : float, optional, default 0.1
clipping of the covariate for regions with low "overlap", to reduce variance
n_splits: int, cross-validation generator or an iterable, optional, default 3
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- :term:`cv splitter`
- An iterable yielding (train, test) splits as arrays of indices.
For integer/None inputs, if the treatment is discrete
:class:`~sklearn.model_selection.StratifiedKFold` is used, else,
:class:`~sklearn.model_selection.KFold` is used
(with a random shuffle in either case).
Unless an iterable is used, we call `split(concat[W, X], T)` to generate the splits. If all
W, X are None, then we call `split(ones((T.shape[0], 1)), T)`.
opt_reweighted : bool, optional, default False
Whether to reweight the samples to minimize variance. If True then
final_model_effect.fit must accept sample_weight as a kw argument (WeightWrapper from
utilities can be used for any linear model to enable sample_weights). If True then
assumes the final_model_effect is flexible enough to fit the true CATE model. Otherwise,
it method will return a biased projection to the model_effect space, biased
to give more weight on parts of the feature space where the instrument is strong.
categories: 'auto' or list, default 'auto'
The categories to use when encoding discrete treatments (or 'auto' to use the unique sorted values).
The first category will be treated as the control treatment.
random_state: int, :class:`~numpy.random.mtrand.RandomState` instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If :class:`~numpy.random.mtrand.RandomState` instance, random_state is the random number generator;
If None, the random number generator is the :class:`~numpy.random.mtrand.RandomState` instance used
by :mod:`np.random<numpy.random>`.
"""
def __init__(self, model_Y_X, model_T_XZ,
flexible_model_effect,
final_model_effect=None,
featurizer=None,
fit_cate_intercept=True,
cov_clip=.1,
n_splits=3,
opt_reweighted=False,
categories='auto',
random_state=None):
model_Y_X = _FirstStageWrapper(model_Y_X, discrete_target=False)
model_T_XZ = _FirstStageWrapper(model_T_XZ, discrete_target=True)
prel_model_effect = _IntentToTreatDRIV(model_Y_X,
model_T_XZ,
_DummyCATE(),
flexible_model_effect,
cov_clip=1e-7, n_splits=1,
opt_reweighted=True,
random_state=random_state)
if final_model_effect is None:
final_model_effect = flexible_model_effect
super().__init__(model_Y_X, model_T_XZ, prel_model_effect,
final_model_effect,
featurizer=featurizer,
fit_cate_intercept=fit_cate_intercept,
cov_clip=cov_clip,
n_splits=n_splits,
opt_reweighted=opt_reweighted,
categories=categories,
random_state=random_state)
@property
def models_Y_X(self):
return [mdl._model_Y_X._model for mdl in super().models_nuisance]
@property
def models_T_XZ(self):
return [mdl._model_T_XZ._model for mdl in super().models_nuisance]
@property
def nuisance_scores_Y_X(self):
return self.nuisance_scores_[0]
@property
def nuisance_scores_T_XZ(self):
return self.nuisance_scores_[1]
@property
def nuisance_scores_effect(self):
return self.nuisance_scores_[2]
class LinearIntentToTreatDRIV(StatsModelsCateEstimatorMixin, IntentToTreatDRIV):
"""
Implements the DRIV algorithm for the intent-to-treat A/B test setting
Parameters
----------
model_Y_X : estimator
model to estimate :math:`\\E[Y | X]`. Must support `fit` and `predict` methods.
model_T_XZ : estimator
model to estimate :math:`\\E[T | X, Z]`. Must support `fit` and `predict_proba` methods.
flexible_model_effect : estimator
a flexible model for a preliminary version of the CATE, must accept sample_weight at fit time.
featurizer : :term:`transformer`, optional, default None
Must support fit_transform and transform. Used to create composite features in the final CATE regression.
It is ignored if X is None. The final CATE will be trained on the outcome of featurizer.fit_transform(X).
If featurizer=None, then CATE is trained on X.
fit_cate_intercept : bool, optional, default True
Whether the linear CATE model should have a constant term.
cov_clip : float, optional, default 0.1
clipping of the covariate for regions with low "overlap", to reduce variance
n_splits: int, cross-validation generator or an iterable, optional, default 3
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- :term:`cv splitter`
- An iterable yielding (train, test) splits as arrays of indices.
For integer/None inputs, if the treatment is discrete
:class:`~sklearn.model_selection.StratifiedKFold` is used, else,
:class:`~sklearn.model_selection.KFold` is used
(with a random shuffle in either case).
Unless an iterable is used, we call `split(concat[W, X], T)` to generate the splits. If all
W, X are None, then we call `split(ones((T.shape[0], 1)), T)`.
categories: 'auto' or list, default 'auto'
The categories to use when encoding discrete treatments (or 'auto' to use the unique sorted values).
The first category will be treated as the control treatment.
random_state: int, :class:`~numpy.random.mtrand.RandomState` instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If :class:`~numpy.random.mtrand.RandomState` instance, random_state is the random number generator;
If None, the random number generator is the :class:`~numpy.random.mtrand.RandomState` instance used
by :mod:`np.random<numpy.random>`.
"""
def __init__(self, model_Y_X, model_T_XZ,
flexible_model_effect,
featurizer=None,
fit_cate_intercept=True,
cov_clip=.1,
n_splits=3,
categories='auto',
random_state=None):
super().__init__(model_Y_X, model_T_XZ,
flexible_model_effect=flexible_model_effect,
featurizer=featurizer,
fit_cate_intercept=fit_cate_intercept,
final_model_effect=StatsModelsLinearRegression(fit_intercept=False),
cov_clip=cov_clip, n_splits=n_splits, opt_reweighted=False,
categories=categories, random_state=random_state)
# override only so that we can update the docstring to indicate support for `StatsModelsInference`
@_deprecate_positional("X, W, and Z should be passed by keyword only. In a future release "
"we will disallow passing X, W, and Z by position.", ['X', 'W', 'Z'])
def fit(self, Y, T, Z, X=None, W=None, *, sample_weight=None, sample_var=None, groups=None, inference='auto'):
"""
Estimate the counterfactual model from data, i.e. estimates function :math:`\\theta(\\cdot)`.
Parameters
----------
Y: (n, d_y) matrix or vector of length n
Outcomes for each sample
T: (n, d_t) matrix or vector of length n
Treatments for each sample
Z: (n, d_z) matrix or vector of length n
Instruments for each sample
X: optional(n, d_x) matrix or None (Default=None)
Features for each sample
W: optional(n, d_w) matrix or None (Default=None)
Controls for each sample
sample_weight: optional(n,) vector or None (Default=None)
Weights for each samples
sample_var: optional(n,) vector or None (Default=None)
Sample variance for each sample
groups: (n,) vector, optional
All rows corresponding to the same group will be kept together during splitting.
If groups is not None, the n_splits argument passed to this class's initializer
must support a 'groups' argument to its split method.
inference: string,:class:`.Inference` instance, or None
Method for performing inference. This estimator supports 'bootstrap'
(or an instance of:class:`.BootstrapInference`) and 'statsmodels'
(or an instance of :class:`.StatsModelsInference`).
Returns
-------
self : instance
"""
return super().fit(Y, T, Z=Z, X=X, W=W,
sample_weight=sample_weight, sample_var=sample_var, groups=groups,
inference=inference)
| 46.562376
| 118
| 0.633523
| 9,710
| 70,542
| 4.426468
| 0.06241
| 0.034062
| 0.018008
| 0.02066
| 0.814267
| 0.79463
| 0.777762
| 0.764454
| 0.75203
| 0.725041
| 0
| 0.003821
| 0.280174
| 70,542
| 1,514
| 119
| 46.593131
| 0.842629
| 0.470202
| 0
| 0.578689
| 0
| 0
| 0.043238
| 0
| 0
| 0
| 0
| 0.002642
| 0.014754
| 1
| 0.121311
| false
| 0.013115
| 0.018033
| 0.02459
| 0.277049
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0ad14b028d2634f04a60d8f9deff786f998d6e8d
| 26,643
|
py
|
Python
|
tests/test_stream_xep_0060.py
|
imo/SleekXMPP
|
8175ed572888551314fe43304ab8acd2278c809b
|
[
"BSD-3-Clause"
] | 3
|
2019-02-01T06:50:08.000Z
|
2020-03-24T00:45:31.000Z
|
tests/test_stream_xep_0060.py
|
imo/SleekXMPP
|
8175ed572888551314fe43304ab8acd2278c809b
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_stream_xep_0060.py
|
imo/SleekXMPP
|
8175ed572888551314fe43304ab8acd2278c809b
|
[
"BSD-3-Clause"
] | null | null | null |
import sys
import time
import threading
from sleekxmpp.test import *
from sleekxmpp.stanza.atom import AtomEntry
from sleekxmpp.xmlstream import register_stanza_plugin
class TestStreamPubsub(SleekTest):
"""
Test using the XEP-0030 plugin.
"""
def setUp(self):
self.stream_start()
def tearDown(self):
self.stream_close()
def testCreateInstantNode(self):
"""Test creating an instant node"""
t = threading.Thread(name='create_node',
target=self.xmpp['xep_0060'].create_node,
args=('pubsub.example.com', None))
t.start()
self.send("""
<iq type="set" id="1" to="pubsub.example.com">
<pubsub xmlns="http://jabber.org/protocol/pubsub">
<create />
</pubsub>
</iq>
""")
self.recv("""
<iq type="result" id="1"
to="tester@localhost" from="pubsub.example.com">
<pubsub xmlns="http://jabber.org/protocol/pubsub">
<create node="25e3d37dabbab9541f7523321421edc5bfeb2dae" />
</pubsub>
</iq>
""")
t.join()
def testCreateNodeNoConfig(self):
"""Test creating a node without a config"""
self.xmpp['xep_0060'].create_node(
'pubsub.example.com',
'princely_musings',
block=False)
self.send("""
<iq type="set" id="1" to="pubsub.example.com">
<pubsub xmlns="http://jabber.org/protocol/pubsub">
<create node="princely_musings" />
</pubsub>
</iq>
""")
def testCreateNodeConfig(self):
"""Test creating a node with a config"""
form = self.xmpp['xep_0004'].stanza.Form()
form['type'] = 'submit'
form.add_field(var='pubsub#access_model', value='whitelist')
self.xmpp['xep_0060'].create_node(
'pubsub.example.com',
'princely_musings',
config=form, block=False)
self.send("""
<iq type="set" id="1" to="pubsub.example.com">
<pubsub xmlns="http://jabber.org/protocol/pubsub">
<create node="princely_musings" />
<configure>
<x xmlns="jabber:x:data" type="submit">
<field var="pubsub#access_model">
<value>whitelist</value>
</field>
<field var="FORM_TYPE">
<value>http://jabber.org/protocol/pubsub#node_config</value>
</field>
</x>
</configure>
</pubsub>
</iq>
""")
def testDeleteNode(self):
"""Test deleting a node"""
self.xmpp['xep_0060'].delete_node(
'pubsub.example.com',
'some_node',
block=False)
self.send("""
<iq type="set" to="pubsub.example.com" id="1">
<pubsub xmlns="http://jabber.org/protocol/pubsub#owner">
<delete node="some_node" />
</pubsub>
</iq>
""")
def testSubscribeCase1(self):
"""
Test subscribing to a node: Case 1:
No subscribee, default 'from' JID, bare JID
"""
self.xmpp['xep_0060'].subscribe(
'pubsub.example.com',
'somenode',
block=False)
self.send("""
<iq type="set" id="1" to="pubsub.example.com">
<pubsub xmlns="http://jabber.org/protocol/pubsub">
<subscribe node="somenode" jid="tester@localhost" />
</pubsub>
</iq>
""")
def testSubscribeCase2(self):
"""
Test subscribing to a node: Case 2:
No subscribee, given 'from' JID, bare JID
"""
self.xmpp['xep_0060'].subscribe(
'pubsub.example.com',
'somenode',
ifrom='foo@comp.example.com/bar',
block=False)
self.send("""
<iq type="set" id="1"
to="pubsub.example.com" from="foo@comp.example.com/bar">
<pubsub xmlns="http://jabber.org/protocol/pubsub">
<subscribe node="somenode" jid="foo@comp.example.com" />
</pubsub>
</iq>
""")
def testSubscribeCase3(self):
"""
Test subscribing to a node: Case 3:
No subscribee, given 'from' JID, full JID
"""
self.xmpp['xep_0060'].subscribe(
'pubsub.example.com',
'somenode',
ifrom='foo@comp.example.com/bar',
bare=False,
block=False)
self.send("""
<iq type="set" id="1"
to="pubsub.example.com" from="foo@comp.example.com/bar">
<pubsub xmlns="http://jabber.org/protocol/pubsub">
<subscribe node="somenode" jid="foo@comp.example.com/bar" />
</pubsub>
</iq>
""")
def testSubscribeCase4(self):
"""
Test subscribing to a node: Case 4:
No subscribee, no 'from' JID, full JID
"""
self.stream_close()
self.stream_start(jid='tester@localhost/full')
self.xmpp['xep_0060'].subscribe(
'pubsub.example.com',
'somenode',
bare=False,
block=False)
self.send("""
<iq type="set" id="1"
to="pubsub.example.com">
<pubsub xmlns="http://jabber.org/protocol/pubsub">
<subscribe node="somenode" jid="tester@localhost/full" />
</pubsub>
</iq>
""")
def testSubscribeCase5(self):
"""
Test subscribing to a node: Case 5:
Subscribee given
"""
self.xmpp['xep_0060'].subscribe(
'pubsub.example.com',
'somenode',
subscribee='user@example.com/foo',
ifrom='foo@comp.example.com/bar',
block=False)
self.send("""
<iq type="set" id="1"
to="pubsub.example.com" from="foo@comp.example.com/bar">
<pubsub xmlns="http://jabber.org/protocol/pubsub">
<subscribe node="somenode" jid="user@example.com/foo" />
</pubsub>
</iq>
""")
def testSubscribeWithOptions(self):
"""Test subscribing to a node, with options."""
opts = self.xmpp['xep_0004'].make_form()
opts.add_field(
var='FORM_TYPE',
value='http://jabber.org/protocol/pubsub#subscribe_options',
ftype='hidden')
opts.add_field(
var='pubsub#digest',
value=False,
ftype='boolean')
opts['type'] = 'submit'
self.xmpp['xep_0060'].subscribe(
'pubsub.example.com',
'somenode',
options=opts,
block=False)
self.send("""
<iq type="set" id="1" to="pubsub.example.com">
<pubsub xmlns="http://jabber.org/protocol/pubsub">
<subscribe node="somenode" jid="tester@localhost" />
<options>
<x xmlns="jabber:x:data" type="submit">
<field var="FORM_TYPE">
<value>http://jabber.org/protocol/pubsub#subscribe_options</value>
</field>
<field var="pubsub#digest">
<value>0</value>
</field>
</x>
</options>
</pubsub>
</iq>
""")
def testUnsubscribeCase1(self):
"""
Test unsubscribing from a node: Case 1:
No subscribee, default 'from' JID, bare JID
"""
self.xmpp['xep_0060'].unsubscribe(
'pubsub.example.com',
'somenode',
block=False)
self.send("""
<iq type="set" id="1" to="pubsub.example.com">
<pubsub xmlns="http://jabber.org/protocol/pubsub">
<unsubscribe node="somenode" jid="tester@localhost" />
</pubsub>
</iq>
""")
def testUnsubscribeCase2(self):
"""
Test unsubscribing from a node: Case 2:
No subscribee, given 'from' JID, bare JID
"""
self.xmpp['xep_0060'].unsubscribe(
'pubsub.example.com',
'somenode',
ifrom='foo@comp.example.com/bar',
block=False)
self.send("""
<iq type="set" id="1"
to="pubsub.example.com" from="foo@comp.example.com/bar">
<pubsub xmlns="http://jabber.org/protocol/pubsub">
<unsubscribe node="somenode" jid="foo@comp.example.com" />
</pubsub>
</iq>
""")
def testUnsubscribeCase3(self):
"""
Test unsubscribing from a node: Case 3:
No subscribee, given 'from' JID, full JID
"""
self.xmpp['xep_0060'].unsubscribe(
'pubsub.example.com',
'somenode',
ifrom='foo@comp.example.com/bar',
bare=False,
block=False)
self.send("""
<iq type="set" id="1"
to="pubsub.example.com" from="foo@comp.example.com/bar">
<pubsub xmlns="http://jabber.org/protocol/pubsub">
<unsubscribe node="somenode" jid="foo@comp.example.com/bar" />
</pubsub>
</iq>
""")
def testUnsubscribeCase4(self):
"""
Test unsubscribing from a node: Case 4:
No subscribee, no 'from' JID, full JID
"""
self.stream_close()
self.stream_start(jid='tester@localhost/full')
self.xmpp['xep_0060'].unsubscribe(
'pubsub.example.com',
'somenode',
bare=False,
block=False)
self.send("""
<iq type="set" id="1"
to="pubsub.example.com">
<pubsub xmlns="http://jabber.org/protocol/pubsub">
<unsubscribe node="somenode" jid="tester@localhost/full" />
</pubsub>
</iq>
""")
def testUnsubscribeCase5(self):
"""
Test unsubscribing from a node: Case 5:
Subscribee given
"""
self.xmpp['xep_0060'].unsubscribe(
'pubsub.example.com',
'somenode',
subscribee='user@example.com/foo',
ifrom='foo@comp.example.com/bar',
block=False)
self.send("""
<iq type="set" id="1"
to="pubsub.example.com" from="foo@comp.example.com/bar">
<pubsub xmlns="http://jabber.org/protocol/pubsub">
<unsubscribe node="somenode" jid="user@example.com/foo" />
</pubsub>
</iq>
""")
def testGetDefaultNodeConfig(self):
"""Test retrieving the default node config for a pubsub service."""
self.xmpp['xep_0060'].get_node_config(
'pubsub.example.com',
block=False)
self.send("""
<iq type="get" id="1" to="pubsub.example.com">
<pubsub xmlns="http://jabber.org/protocol/pubsub#owner">
<default />
</pubsub>
</iq>
""", use_values=False)
def testGetNodeConfig(self):
"""Test getting the config for a given node."""
self.xmpp['xep_0060'].get_node_config(
'pubsub.example.com',
'somenode',
block=False)
self.send("""
<iq type="get" id="1" to="pubsub.example.com">
<pubsub xmlns="http://jabber.org/protocol/pubsub#owner">
<configure node="somenode" />
</pubsub>
</iq>
""", use_values=False)
def testSetNodeConfig(self):
"""Test setting the configuration for a node."""
form = self.xmpp['xep_0004'].make_form()
form.add_field(var='FORM_TYPE', ftype='hidden',
value='http://jabber.org/protocol/pubsub#node_config')
form.add_field(var='pubsub#title', ftype='text-single',
value='This is awesome!')
form['type'] = 'submit'
self.xmpp['xep_0060'].set_node_config(
'pubsub.example.com',
'somenode',
form,
block=False)
self.send("""
<iq type="set" id="1" to="pubsub.example.com">
<pubsub xmlns="http://jabber.org/protocol/pubsub#owner">
<configure node="somenode">
<x xmlns="jabber:x:data" type="submit">
<field var="FORM_TYPE">
<value>http://jabber.org/protocol/pubsub#node_config</value>
</field>
<field var="pubsub#title">
<value>This is awesome!</value>
</field>
</x>
</configure>
</pubsub>
</iq>
""")
def testPublishNoItems(self):
"""Test publishing no items (in order to generate events)"""
self.xmpp['xep_0060'].publish(
'pubsub.example.com',
'somenode',
block=False)
self.send("""
<iq type="set" id="1" to="pubsub.example.com">
<pubsub xmlns="http://jabber.org/protocol/pubsub">
<publish node="somenode" />
</pubsub>
</iq>
""")
def testPublishSingle(self):
"""Test publishing a single item."""
payload = AtomEntry()
payload['title'] = 'Test'
register_stanza_plugin(self.xmpp['xep_0060'].stanza.Item, AtomEntry)
self.xmpp['xep_0060'].publish(
'pubsub.example.com',
'somenode',
id='id42',
payload=payload,
block=False)
self.send("""
<iq type="set" id="1" to="pubsub.example.com">
<pubsub xmlns="http://jabber.org/protocol/pubsub">
<publish node="somenode">
<item id="id42">
<entry xmlns="http://www.w3.org/2005/Atom">
<title>Test</title>
</entry>
</item>
</publish>
</pubsub>
</iq>
""", use_values=False)
def testPublishSingleOptions(self):
"""Test publishing a single item, with options."""
payload = AtomEntry()
payload['title'] = 'Test'
register_stanza_plugin(self.xmpp['xep_0060'].stanza.Item, AtomEntry)
options = self.xmpp['xep_0004'].make_form()
options.add_field(var='FORM_TYPE', ftype='hidden',
value='http://jabber.org/protocol/pubsub#publish-options')
options.add_field(var='pubsub#access_model', ftype='text-single',
value='presence')
options['type'] = 'submit'
self.xmpp['xep_0060'].publish(
'pubsub.example.com',
'somenode',
id='ID42',
payload=payload,
options=options,
block=False)
self.send("""
<iq type="set" id="1" to="pubsub.example.com">
<pubsub xmlns="http://jabber.org/protocol/pubsub">
<publish node="somenode">
<item id="ID42">
<entry xmlns="http://www.w3.org/2005/Atom">
<title>Test</title>
</entry>
</item>
</publish>
<publish-options>
<x xmlns="jabber:x:data" type="submit">
<field var="FORM_TYPE">
<value>http://jabber.org/protocol/pubsub#publish-options</value>
</field>
<field var="pubsub#access_model">
<value>presence</value>
</field>
</x>
</publish-options>
</pubsub>
</iq>
""", use_values=False)
def testRetract(self):
"""Test deleting an item."""
self.xmpp['xep_0060'].retract(
'pubsub.example.com',
'somenode',
'ID1',
notify=True,
block=False)
self.send("""
<iq type="set" id="1" to="pubsub.example.com">
<pubsub xmlns="http://jabber.org/protocol/pubsub">
<retract node="somenode" notify="true">
<item id="ID1" />
</retract>
</pubsub>
</iq>
""")
def testRetract(self):
"""Test deleting an item."""
self.xmpp['xep_0060'].retract(
'pubsub.example.com',
'somenode',
'ID1',
block=False)
self.send("""
<iq type="set" id="1" to="pubsub.example.com">
<pubsub xmlns="http://jabber.org/protocol/pubsub">
<retract node="somenode">
<item id="ID1" />
</retract>
</pubsub>
</iq>
""")
def testPurge(self):
"""Test removing all items from a node."""
self.xmpp['xep_0060'].purge(
'pubsub.example.com',
'somenode',
block=False)
self.send("""
<iq type="set" id="1" to="pubsub.example.com">
<pubsub xmlns="http://jabber.org/protocol/pubsub#owner">
<purge node="somenode" />
</pubsub>
</iq>
""")
def testGetItem(self):
"""Test retrieving a single item."""
self.xmpp['xep_0060'].get_item(
'pubsub.example.com',
'somenode',
'id42',
block=False)
self.send("""
<iq type="get" id="1" to="pubsub.example.com">
<pubsub xmlns="http://jabber.org/protocol/pubsub">
<items node="somenode">
<item id="id42" />
</items>
</pubsub>
</iq>
""")
def testGetLatestItems(self):
"""Test retrieving the most recent N items."""
self.xmpp['xep_0060'].get_items(
'pubsub.example.com',
'somenode',
max_items=3,
block=False)
self.send("""
<iq type="get" id="1" to="pubsub.example.com">
<pubsub xmlns="http://jabber.org/protocol/pubsub">
<items node="somenode" max_items="3" />
</pubsub>
</iq>
""")
def testGetAllItems(self):
"""Test retrieving all items."""
self.xmpp['xep_0060'].get_items(
'pubsub.example.com',
'somenode',
block=False)
self.send("""
<iq type="get" id="1" to="pubsub.example.com">
<pubsub xmlns="http://jabber.org/protocol/pubsub">
<items node="somenode" />
</pubsub>
</iq>
""")
def testGetSpecificItems(self):
"""Test retrieving a specific set of items."""
self.xmpp['xep_0060'].get_items(
'pubsub.example.com',
'somenode',
item_ids=['A', 'B', 'C'],
block=False)
self.send("""
<iq type="get" id="1" to="pubsub.example.com">
<pubsub xmlns="http://jabber.org/protocol/pubsub">
<items node="somenode">
<item id="A" />
<item id="B" />
<item id="C" />
</items>
</pubsub>
</iq>
""")
def testGetSubscriptionGlobalDefaultOptions(self):
"""Test getting the subscription options for a node/JID."""
self.xmpp['xep_0060'].get_subscription_options(
'pubsub.example.com',
block=False)
self.send("""
<iq type="get" id="1" to="pubsub.example.com">
<pubsub xmlns="http://jabber.org/protocol/pubsub">
<default />
</pubsub>
</iq>
""", use_values=False)
def testGetSubscriptionNodeDefaultOptions(self):
"""Test getting the subscription options for a node/JID."""
self.xmpp['xep_0060'].get_subscription_options(
'pubsub.example.com',
node='somenode',
block=False)
self.send("""
<iq type="get" id="1" to="pubsub.example.com">
<pubsub xmlns="http://jabber.org/protocol/pubsub">
<default node="somenode" />
</pubsub>
</iq>
""", use_values=False)
def testGetSubscriptionOptions(self):
"""Test getting the subscription options for a node/JID."""
self.xmpp['xep_0060'].get_subscription_options(
'pubsub.example.com',
'somenode',
'tester@localhost',
block=False)
self.send("""
<iq type="get" id="1" to="pubsub.example.com">
<pubsub xmlns="http://jabber.org/protocol/pubsub">
<options node="somenode" jid="tester@localhost" />
</pubsub>
</iq>
""", use_values=False)
def testSetSubscriptionOptions(self):
"""Test setting the subscription options for a node/JID."""
opts = self.xmpp['xep_0004'].make_form()
opts.add_field(
var='FORM_TYPE',
value='http://jabber.org/protocol/pubsub#subscribe_options',
ftype='hidden')
opts.add_field(
var='pubsub#digest',
value=False,
ftype='boolean')
opts['type'] = 'submit'
self.xmpp['xep_0060'].set_subscription_options(
'pubsub.example.com',
'somenode',
'tester@localhost',
opts,
block=False)
self.send("""
<iq type="get" id="1" to="pubsub.example.com">
<pubsub xmlns="http://jabber.org/protocol/pubsub">
<options node="somenode" jid="tester@localhost">
<x xmlns="jabber:x:data" type="submit">
<field var="FORM_TYPE">
<value>http://jabber.org/protocol/pubsub#subscribe_options</value>
</field>
<field var="pubsub#digest">
<value>0</value>
</field>
</x>
</options>
</pubsub>
</iq>
""")
def testGetNodeSubscriptions(self):
"""Test retrieving all subscriptions for a node."""
self.xmpp['xep_0060'].get_node_subscriptions(
'pubsub.example.com',
'somenode',
block=False)
self.send("""
<iq type="get" id="1" to="pubsub.example.com">
<pubsub xmlns="http://jabber.org/protocol/pubsub#owner">
<subscriptions node="somenode" />
</pubsub>
</iq>
""")
def testGetSubscriptions(self):
"""Test retrieving a users's subscriptions."""
self.xmpp['xep_0060'].get_subscriptions(
'pubsub.example.com',
block=False)
self.send("""
<iq type="get" id="1" to="pubsub.example.com">
<pubsub xmlns="http://jabber.org/protocol/pubsub">
<subscriptions />
</pubsub>
</iq>
""")
def testGetSubscriptionsForNode(self):
"""Test retrieving a users's subscriptions for a given node."""
self.xmpp['xep_0060'].get_subscriptions(
'pubsub.example.com',
node='somenode',
block=False)
self.send("""
<iq type="get" id="1" to="pubsub.example.com">
<pubsub xmlns="http://jabber.org/protocol/pubsub">
<subscriptions node="somenode" />
</pubsub>
</iq>
""")
def testGetAffiliations(self):
"""Test retrieving a users's affiliations."""
self.xmpp['xep_0060'].get_affiliations(
'pubsub.example.com',
block=False)
self.send("""
<iq type="get" id="1" to="pubsub.example.com">
<pubsub xmlns="http://jabber.org/protocol/pubsub">
<affiliations />
</pubsub>
</iq>
""")
def testGetAffiliatinssForNode(self):
"""Test retrieving a users's affiliations for a given node."""
self.xmpp['xep_0060'].get_affiliations(
'pubsub.example.com',
node='somenode',
block=False)
self.send("""
<iq type="get" id="1" to="pubsub.example.com">
<pubsub xmlns="http://jabber.org/protocol/pubsub">
<affiliations node="somenode" />
</pubsub>
</iq>
""")
def testGetNodeAffiliations(self):
"""Test getting the affiliations for a node."""
self.xmpp['xep_0060'].get_node_affiliations(
'pubsub.example.com',
'somenode',
block=False)
self.send("""
<iq type="get" id="1" to="pubsub.example.com">
<pubsub xmlns="http://jabber.org/protocol/pubsub#owner">
<affiliations node="somenode" />
</pubsub>
</iq>
""")
def testModifySubscriptions(self):
"""Test owner modifying node subscriptions."""
self.xmpp['xep_0060'].modify_subscriptions(
'pubsub.example.com',
'somenode',
subscriptions=[('user@example.com', 'subscribed'),
('foo@example.net', 'none')],
block=False)
self.send("""
<iq type="set" id="1" to="pubsub.example.com">
<pubsub xmlns="http://jabber.org/protocol/pubsub#owner">
<subscriptions node="somenode">
<subscription jid="user@example.com" subscription="subscribed" />
<subscription jid="foo@example.net" subscription="none" />
</subscriptions>
</pubsub>
</iq>
""")
def testModifyAffiliations(self):
"""Test owner modifying node affiliations."""
self.xmpp['xep_0060'].modify_affiliations(
'pubsub.example.com',
'somenode',
affiliations=[('user@example.com', 'publisher'),
('foo@example.net', 'none')],
block=False)
self.send("""
<iq type="set" id="1" to="pubsub.example.com">
<pubsub xmlns="http://jabber.org/protocol/pubsub#owner">
<affiliations node="somenode">
<affiliation jid="user@example.com" affiliation="publisher" />
<affiliation jid="foo@example.net" affiliation="none" />
</affiliations>
</pubsub>
</iq>
""")
suite = unittest.TestLoader().loadTestsFromTestCase(TestStreamPubsub)
| 33.513208
| 86
| 0.497504
| 2,627
| 26,643
| 4.992006
| 0.082223
| 0.080067
| 0.098826
| 0.080067
| 0.804713
| 0.774821
| 0.741116
| 0.686442
| 0.667683
| 0.63863
| 0
| 0.0176
| 0.351687
| 26,643
| 794
| 87
| 33.555416
| 0.74162
| 0.07608
| 0
| 0.784195
| 0
| 0
| 0.606462
| 0.088567
| 0
| 0
| 0
| 0
| 0
| 1
| 0.06383
| false
| 0
| 0.009119
| 0
| 0.074468
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
7c0e46978194e747d3d02077a5f5051129276bde
| 22,513
|
py
|
Python
|
tests/test_prior.py
|
jmeyers314/DP_SNe
|
d5a91e96425fe99c8f4450a9f11256f3abbd43eb
|
[
"BSD-2-Clause"
] | 20
|
2017-11-20T09:44:14.000Z
|
2022-02-11T17:38:24.000Z
|
tests/test_prior.py
|
jmeyers314/DP_SNe
|
d5a91e96425fe99c8f4450a9f11256f3abbd43eb
|
[
"BSD-2-Clause"
] | 1
|
2016-07-08T01:12:51.000Z
|
2016-07-08T01:12:51.000Z
|
tests/test_prior.py
|
jmeyers314/DP_SNe
|
d5a91e96425fe99c8f4450a9f11256f3abbd43eb
|
[
"BSD-2-Clause"
] | 9
|
2017-06-30T20:46:57.000Z
|
2021-08-17T06:47:58.000Z
|
import warnings
import numpy as np
from scipy.integrate import quad, dblquad, tplquad
import dpmm
from test_utils import timer
@timer
def test_GaussianMeanKnownVariance():
mu_0 = 0.15
sigsqr_0 = 1.2
sigsqr = 0.15
model = dpmm.GaussianMeanKnownVariance(mu_0, sigsqr_0, sigsqr)
D = np.r_[1.0, 2.2, 1.1, -1.13]
mus = np.r_[1.1, 2.0, 0.1]
# Check prior density
r = quad(model, -np.inf, np.inf)
np.testing.assert_almost_equal(r[0], 1.0, 10,
"GaussianMeanKnownVariance prior density does not integrate to 1.0")
# Check prior predictive density
r = quad(model.pred, -np.inf, np.inf)
np.testing.assert_almost_equal(
r[0], 1.0, 10,
"GaussianMeanKnownVariance prior predictive density does not integrate to 1.0")
# Check posterior density
r = quad(model.post(D), -np.inf, np.inf)
np.testing.assert_almost_equal(
r[0], 1.0, 10,
"GaussianMeanKnownVariance posterior density does not integrate to 1.0")
# Check posterior predictive density
r = quad(model.post(D).pred, -np.inf, np.inf)
np.testing.assert_almost_equal(
r[0], 1.0, 10,
"GaussianMeanKnownVariance posterior predictive density does not integrate to 1.0")
# Check that the likelihood integrates to 1.
r = quad(lambda x: model.like1(x, mu=1.1), -np.inf, np.inf)
np.testing.assert_almost_equal(r[0], 1.0, 10,
"GaussianMeanKnownVariance likelihood does not integrate to 1.0")
# # Check that evidence (of single data point) integrates to 1.
# r = quad(lambda x: model.evidence(x), -np.inf, np.inf)
# np.testing.assert_almost_equal(r[0], 1.0, 10,
# "GaussianMeanKnownVariance evidence does not integrate to 1.0")
# # Check evidence for two data points.
# r = dblquad(lambda x, y: model.evidence([x, y]),
# -np.inf, np.inf,
# lambda x: -np.inf, lambda x: np.inf)
# np.testing.assert_almost_equal(r[0], 1.0, 5,
# "GaussianMeanKnownVariance evidence does not integrate to 1.0")
# # Check that posterior = prior * likelihood / evidence
# post = model.post(D)
# post1 = [model(mu)*model.likelihood(mu, D=D) / model.evidence(D) for mu in mus]
# post2 = [post(mu) for mu in mus]
# np.testing.assert_array_almost_equal(
# post1, post2, 10,
# "GaussianMeanKnownVariance posterior != prior * likelihood / evidence")
# Check that posterior is proportional to prior * likelihood
# Add some more data points
posts = [model.post(D)(mu) for mu in mus]
posts2 = [model(mu)*model.likelihood(D, mu) for mu in mus]
np.testing.assert_array_almost_equal(
posts/posts[0], posts2/posts2[0], 5,
"GaussianMeanKnownVariance posterior not proportional to prior * likelihood.")
# Check that integrating out theta yields the prior predictive.
xs = [0.1, 0.2, 0.3, 0.4]
preds1 = np.array([quad(lambda theta: model(theta) * model.like1(x, theta), -np.inf, np.inf)[0] for x in xs])
preds2 = np.array([model.pred(x) for x in xs])
np.testing.assert_array_almost_equal(
preds1/preds1[0], preds2/preds2[0], 5,
"Prior predictive not proportional to integral of likelihood * prior")
@timer
def test_InvGamma():
alpha = 1.1
beta = 1.2
mu = 0.1
ig = dpmm.InvGamma(alpha, beta, mu)
ig.sample()
# Check prior density
r = quad(ig, 0.0, np.inf)
np.testing.assert_almost_equal(r[0], 1.0, 5, "InvGamma prior density does not integrate to 1.0")
# Check prior predictive density
r = quad(ig.pred, -np.inf, np.inf)
np.testing.assert_almost_equal(r[0], 1.0, 10,
"InvGamma prior predictive density does not integrate to 1.0")
# Check posterior density
D = [1.0, 2.0, 3.0]
r = quad(ig.post(D), 0.0, np.inf)
np.testing.assert_almost_equal(r[0], 1.0, 7,
"InvGamma posterior density does not integrate to 1.0")
# Check posterior predictive density
r = quad(ig.post(D).pred, -np.inf, np.inf)
np.testing.assert_almost_equal(
r[0], 1.0, 10, "InvGamma posterior predictive density does not integrate to 1.0")
# Check that the likelihood integrates to 1.
r = quad(lambda x: ig.like1(x, var=2.1), -np.inf, np.inf)
np.testing.assert_almost_equal(r[0], 1.0, 10,
"InvGamma likelihood does not integrate to 1.0")
# Check that posterior is proportional to prior * likelihood
# Add some more data points
D = np.array([1.0, 2.0, 3.0, 2.2, 2.3, 1.2])
vars_ = [0.7, 1.1, 1.2, 1.5]
posts = [ig.post(D)(var) for var in vars_]
posts2 = [ig(var)*ig.likelihood(D, var) for var in vars_]
np.testing.assert_array_almost_equal(
posts/posts[0], posts2/posts2[0], 5,
"InvGamma posterior not proportional to prior * likelihood.")
# Check mean and variance
mean = 1./beta/(alpha-1.0)
np.testing.assert_almost_equal(quad(lambda x: ig(x)*x, 0.0, np.inf)[0], mean, 10,
"InvGamma has wrong mean.")
var = beta**(-2)/(alpha-1)**2/(alpha-2)
with warnings.catch_warnings():
warnings.simplefilter('ignore')
np.testing.assert_almost_equal(quad(lambda x: ig(x)*(x-mean)**2, 0.0, np.inf)[0], var, 5,
"InvGamma has wrong variance.")
# Check that integrating out theta yields the prior predictive.
xs = [0.1, 0.2, 0.3, 0.4]
preds1 = np.array([quad(lambda theta: ig(theta) * ig.like1(x, theta), 0, np.inf)[0] for x in xs])
preds2 = np.array([ig.pred(x) for x in xs])
np.testing.assert_array_almost_equal(
preds1/preds1[0], preds2/preds2[0], 5,
"Prior predictive not proportional to integral of likelihood * prior")
@timer
def test_InvGamma2D(full=False):
alpha = 1.1
beta = 1.2
mu = np.r_[0.1, 0.2]
ig2d = dpmm.InvGamma2D(alpha, beta, mu)
ig2d.sample()
# Check prior density
r = quad(ig2d, 0.0, np.inf)
np.testing.assert_almost_equal(
r[0], 1.0, 5, "InvGamma2D prior density does not integrate to 1.0")
if __name__ == '__main__' and full:
# Check prior predictive density
r = dblquad(lambda x, y: ig2d.pred(np.r_[x, y]),
-np.inf, np.inf,
lambda x: -np.inf, lambda x: np.inf)
np.testing.assert_almost_equal(
r[0], 1.0, 5, "InvGamma2D prior predictive density does not integrate to 1.0")
# Check posterior density
D = np.array([[0.1, 0.2], [0.2, 0.3]])
r = quad(ig2d.post(D), 0.0, np.inf)
np.testing.assert_almost_equal(r[0], 1.0, 7,
"InvGamma2D posterior density does not integrate to 1.0")
# Check posterior predictive density
r = dblquad(lambda x, y: ig2d.post(D).pred(np.r_[x, y]),
-np.inf, np.inf,
lambda x: -np.inf, lambda x: np.inf)
np.testing.assert_almost_equal(
r[0], 1.0, 5, "InvGamma2D posterior predictive density does not integrate to 1.0")
# Check that the likelihood integrates to 1.
r = dblquad(lambda x, y: ig2d.like1(np.r_[x, y], var=2.1),
-np.inf, np.inf,
lambda x: -np.inf, lambda x: np.inf)
np.testing.assert_almost_equal(r[0], 1.0, 10,
"InvGamma2D likelihood does not integrate to 1.0")
# Check that posterior is proportional to prior * likelihood
vars_ = [0.7, 1.1, 1.2, 1.5]
posts = np.array([ig2d.post(D)(var) for var in vars_])
posts2 = np.array([ig2d(var)*ig2d.likelihood(D, var) for var in vars_])
np.testing.assert_array_almost_equal(
posts/posts[0], posts2/posts2[0], 5,
"InvGamma2D posterior not proportional to prior * likelihood.")
# Check mean and variance
mean = 1./beta/(alpha-1.0)
np.testing.assert_almost_equal(quad(lambda x: ig2d(x)*x, 0.0, np.inf)[0], mean, 10,
"InvGamma2D has wrong mean.")
var = beta**(-2)/(alpha-1)**2/(alpha-2)
with warnings.catch_warnings():
warnings.simplefilter('ignore')
np.testing.assert_almost_equal(quad(lambda x: ig2d(x)*(x-mean)**2, 0.0, np.inf)[0], var, 5,
"InvGamma2D has wrong variance.")
# Check that integrating out theta yields the prior predictive.
xs = [np.r_[0.1, 0.2], np.r_[0.2, 0.3], np.r_[0.1, 0.3]]
preds1 = np.array([quad(lambda theta: ig2d(theta) * ig2d.like1(x, theta), 0, np.inf)[0] for x in xs])
preds2 = np.array([ig2d.pred(x) for x in xs])
np.testing.assert_array_almost_equal(
preds1/preds1[0], preds2/preds2[0], 5,
"Prior predictive not proportional to integral of likelihood * prior")
@timer
def test_NormInvChi2():
mu_0 = -0.1
sigsqr_0 = 1.1
kappa_0 = 2
nu_0 = 3
nix = dpmm.NormInvChi2(mu_0, kappa_0, sigsqr_0, nu_0)
D = np.r_[1.0, 2.0, 3.0]
mus = np.r_[1.1, 1.2, 1.3]
vars_ = np.r_[1.2, 3.2, 2.3]
# Check prior density
with warnings.catch_warnings():
warnings.simplefilter('ignore')
r = dblquad(nix, 0.0, np.inf, lambda x: -np.inf, lambda x: np.inf)
np.testing.assert_almost_equal(r[0], 1.0, 5,
"NormInvChi2 prior density does not integrate to 1.0")
# Check prior predictive density
r = quad(nix.pred, -np.inf, np.inf)
np.testing.assert_almost_equal(r[0], 1.0, 10,
"NormInvChi2 prior predictive density does not integrate to 1.0")
# Check posterior density
r = dblquad(nix.post(D), 0.0, np.inf, lambda x: -np.inf, lambda x: np.inf)
np.testing.assert_almost_equal(r[0], 1.0, 7,
"NormInvChi2 posterior density does not integrate to 1.0")
# Check posterior predictive density
r = quad(nix.post(D).pred, -np.inf, np.inf)
np.testing.assert_almost_equal(
r[0], 1.0, 10,
"NormInvChi2 posterior predictive density does not integrate to 1.0")
# Check that the likelihood integrates to 1.
r = quad(lambda x: nix.like1(x, 1.1, 2.1), -np.inf, np.inf)
np.testing.assert_almost_equal(r[0], 1.0, 10,
"NormInvChi2 likelihood does not integrate to 1.0")
# Check that evidence (of single data point) integrates to 1.
r = quad(lambda x: nix.evidence(x), -np.inf, np.inf)
np.testing.assert_almost_equal(r[0], 1.0, 10,
"NormInvChi2 evidence does not integrate to 1.0")
# Check evidence for two data points.
r = dblquad(lambda x, y: nix.evidence([x, y]),
-np.inf, np.inf,
lambda x: -np.inf, lambda x: np.inf)
np.testing.assert_almost_equal(r[0], 1.0, 5,
"NormInvChi2 evidence does not integrate to 1.0")
# Check that posterior = prior * likelihood / evidence
post = nix.post(D)
post1 = [nix(mu, var)*nix.likelihood(D, mu, var) / nix.evidence(D)
for mu, var in zip(mus, vars_)]
post2 = [post(mu, var) for mu, var in zip(mus, vars_)]
np.testing.assert_array_almost_equal(post1, post2, 10,
"NormInvChi2 posterior != prior * likelihood / evidence")
# Test that marginal variance probability method matches integrated result.
Pr_var1 = [nix.marginal_var(var) for var in vars_]
Pr_var2 = [quad(lambda mu: nix(mu, var), -np.inf, np.inf)[0] for var in vars_]
np.testing.assert_array_almost_equal(
Pr_var1, Pr_var2, 10,
"Pr(var) method calculation does not match integrated result.")
# Test that marginal mean probability method matches integrated result.
Pr_mu1 = [nix.marginal_mu(mu) for mu in mus]
Pr_mu2 = [quad(lambda var: nix(mu, var), 0.0, np.inf)[0] for mu in mus]
np.testing.assert_array_almost_equal(
Pr_mu1, Pr_mu2, 10,
"Pr(mu) method calculation does not match integrated result.")
# Check that integrating out theta yields the prior predictive.
xs = [0.1, 0.2, 0.3, 0.4]
preds1 = np.array([dblquad(lambda mu, var: nix(mu, var) * nix.like1(x, mu, var),
0, np.inf,
lambda var: -np.inf, lambda var: np.inf)[0]
for x in xs])
preds2 = np.array([nix.pred(x) for x in xs])
np.testing.assert_array_almost_equal(
preds1/preds1[0], preds2/preds2[0], 5,
"Prior predictive not proportional to integral of likelihood * prior")
@timer
def test_NormInvGamma():
m_0 = -0.1
V_0 = 1.1
a_0 = 2.0
b_0 = 3.0
nig = dpmm.NormInvGamma(m_0, V_0, a_0, b_0)
D = np.r_[1.0, 2.0, 3.0]
mus = np.r_[1.1, 1.2, 1.3]
vars_ = np.r_[1.2, 3.2, 2.3]
# Check prior density
with warnings.catch_warnings():
warnings.simplefilter('ignore')
r = dblquad(nig, 0.0, np.inf, lambda x: -np.inf, lambda x: np.inf)
np.testing.assert_almost_equal(r[0], 1.0, 5,
"NormInvGamma prior density does not integrate to 1.0")
# Check prior predictive density
r = quad(nig.pred, -np.inf, np.inf)
np.testing.assert_almost_equal(
r[0], 1.0, 10,
"NormInvGamma prior predictive density does not integrate to 1.0")
# Check posterior density
r = dblquad(nig.post(D), 0.0, np.inf, lambda x: -np.inf, lambda x: np.inf)
np.testing.assert_almost_equal(r[0], 1.0, 7,
"NormInvGamma posterior density does not integrate to 1.0")
# Check posterior predictive density
r = quad(nig.post(D).pred, -np.inf, np.inf)
np.testing.assert_almost_equal(
r[0], 1.0, 10,
"NormInvGamma posterior predictive density does not integrate to 1.0")
# Check that the likelihood integrates to 1.
r = quad(lambda x: nig.like1(x, 1.1, 2.1), -np.inf, np.inf)
np.testing.assert_almost_equal(r[0], 1.0, 10,
"NormInvGamma likelihood does not integrate to 1.0")
# Check that evidence (of single data point) integrates to 1.
r = quad(lambda x: nig.evidence(x), -np.inf, np.inf)
np.testing.assert_almost_equal(r[0], 1.0, 10,
"NormInvGamma evidence does not integrate to 1.0")
# Check evidence for two data points.
r = dblquad(lambda x, y: nig.evidence([x, y]),
-np.inf, np.inf,
lambda x: -np.inf, lambda x: np.inf)
np.testing.assert_almost_equal(r[0], 1.0, 5,
"NormInvGamma evidence does not integrate to 1.0")
# Check that posterior = prior * likelihood / evidence
post = nig.post(D)
post1 = [nig(mu, var)*nig.likelihood(D, mu, var) / nig.evidence(D)
for mu, var in zip(mus, vars_)]
post2 = [post(mu, var) for mu, var in zip(mus, vars_)]
np.testing.assert_array_almost_equal(post1, post2, 10,
"NormInvGamma posterior != prior * likelihood / evidence")
# Test that marginal variance probability method matches integrated result.
Pr_var1 = [nig.marginal_var(var) for var in vars_]
Pr_var2 = [quad(lambda mu: nig(mu, var), -np.inf, np.inf)[0] for var in vars_]
np.testing.assert_array_almost_equal(
Pr_var1, Pr_var2, 10,
"Pr(var) method calculation does not match integrated result.")
# Test that marginal mean probability method matches integrated result.
Pr_mu1 = [nig.marginal_mu(mu) for mu in mus]
Pr_mu2 = [quad(lambda var: nig(mu, var), 0.0, np.inf)[0] for mu in mus]
np.testing.assert_array_almost_equal(
Pr_mu1, Pr_mu2, 10,
"Pr(mu) method calculation does not match integrated result.")
# Check that integrating out theta yields the prior predictive.
xs = [0.1, 0.2, 0.3, 0.4]
preds1 = np.array([dblquad(lambda mu, var: nig(mu, var) * nig.like1(x, mu, var),
0, np.inf,
lambda var: -np.inf, lambda var: np.inf)[0]
for x in xs])
preds2 = np.array([nig.pred(x) for x in xs])
np.testing.assert_array_almost_equal(
preds1/preds1[0], preds2/preds2[0], 5,
"Prior predictive not proportional to integral of likelihood * prior")
@timer
def test_NormInvChi2_eq_NormInvGamma():
mu_0 = 0.1
sigsqr_0 = 1.1
kappa_0 = 2
nu_0 = 3
m_0 = mu_0
V_0 = 1./kappa_0
a_0 = nu_0/2.0
b_0 = nu_0*sigsqr_0/2.0
model1 = dpmm.NormInvChi2(mu_0, kappa_0, sigsqr_0, nu_0)
model2 = dpmm.NormInvGamma(m_0, V_0, a_0, b_0)
mus = np.linspace(-2.2, 2.2, 5)
vars_ = np.linspace(1.0, 4.0, 5)
xs = np.arange(-1.1, 1.1, 5)
for x in xs:
np.testing.assert_equal(
model1.pred(x), model2.pred(x),
"NormInvChi2 and NormInvGamma prior predictive densities don't agree at x = ".format(x))
np.testing.assert_equal(
model1.post(x).pred(x), model2.post(x).pred(x),
"NormInvChi2 and NormInvGamma posterior " +
"predictive densities don't agree at x = {}".format(x))
for mu, var in zip(mus, vars_):
np.testing.assert_almost_equal(
model1(mu, var), model2(mu, var), 10,
"NormInvChi2 and NormInvGamma prior densities " +
"don't agree at mu, var = {}, {}".format(mu, var))
post1 = model1.post(xs)
post2 = model2.post(xs)
for mu, var in zip(mus, vars_):
np.testing.assert_almost_equal(
post1(mu, var), post2(mu, var), 10,
"NormInvChi2 and NormInvGamma posterior densities " +
"don't agree at mu, var = {}, {}".format(mu, var))
for mu, var, x in zip(mus, vars_, xs):
np.testing.assert_almost_equal(
model1.like1(x, mu, var), model2.like1(x, mu, var), 10,
"NormInvChi2 and NormInvGamma likelihoods don't " +
"agree at mu, var, x = {}, {}, {}".format(mu, var, x))
np.testing.assert_almost_equal(
model1.evidence(xs), model2.evidence(xs), 10,
"NormInvChi2 and NormInvGamma evidences don't agree")
@timer
def test_NormInvWish(full=False):
mu_0 = np.r_[0.2, 0.1]
kappa_0 = 2.0
Lam_0 = np.eye(2)+0.1
nu_0 = 3
# Create a Normal-Inverse-Wishart prior.
niw = dpmm.NormInvWish(mu_0, kappa_0, Lam_0, nu_0)
# Check that we can draw samples from NormInvWish.
niw.sample()
niw.sample(size=10)
# Check that we can evaluate a likelihood given data.
theta = np.zeros(1, dtype=niw.model_dtype)
theta['mu'] = np.r_[1.0, 1.0]
theta['Sig'] = np.eye(2)+0.12
D = np.array([[0.1, 0.2], [0.2, 0.3], [0.1, 0.2], [0.4, 0.3]])
niw.likelihood(D, theta)
# Evaluate prior
niw(theta)
if __name__ == "__main__" and full:
# Check prior predictive density
with warnings.catch_warnings():
warnings.simplefilter('ignore')
r = dblquad(lambda x, y: niw.pred(np.r_[x, y]), -np.inf, np.inf,
lambda x: -np.inf, lambda x: np.inf)
np.testing.assert_almost_equal(r[0], 1.0, 5,
"NormInvWish prior predictive density does not integrate to 1.0")
# Check posterior predictive density
r = dblquad(lambda x, y: niw.post(D).pred(np.r_[x, y]), -np.inf, np.inf,
lambda x: -np.inf, lambda x: np.inf)
np.testing.assert_almost_equal(
r[0], 1.0, 5, "NormInvWish posterior predictive density does not integrate to 1.0")
# Check that the likelihood of a single point in 2 dimensions integrates to 1.
r = dblquad(lambda x, y: niw.like1(np.r_[x, y], np.r_[1.2, 1.1], np.eye(2)+0.12),
-np.inf, np.inf, lambda x: -np.inf, lambda x: np.inf)
np.testing.assert_almost_equal(r[0], 1.0, 10,
"NormInvWish likelihood does not integrate to 1.0")
if __name__ == "__main__" and full:
# Check that likelihood of a single point in 3 dimensions integrates to 1.
niw3 = dpmm.NormInvWish(np.r_[1, 1, 1], 2.0, np.eye(3), 3)
r = tplquad(lambda x, y, z: niw3.like1(np.r_[x, y, z], np.r_[0.1, 0.2, 0.3], np.eye(3)+0.1),
-np.inf, np.inf,
lambda x: -np.inf, lambda x: np.inf,
lambda x, y: -np.inf, lambda x, y: np.inf)
np.testing.assert_almost_equal(r[0], 1.0, 8,
"NormInvWish likelihood does not integrate to 1.0")
# Check that posterior is proportional to prior * likelihood
D = np.array([[0.1, 0.2], [0.2, 0.3], [0.1, 0.2], [0.4, 0.3]])
mus = [np.r_[2.1, 1.1], np.r_[0.9, 1.2], np.r_[0.9, 1.1]]
Sigs = [np.eye(2)*1.5, np.eye(2)*0.7, np.array([[1.1, -0.1], [-0.1, 1.2]])]
posts = [niw.post(D)(mu, Sig) for mu, Sig in zip(mus, Sigs)]
posts2 = [niw(mu, Sig)*niw.likelihood(D, mu, Sig) for mu, Sig, in zip(mus, Sigs)]
np.testing.assert_array_almost_equal(
posts/posts[0], posts2/posts2[0], 5,
"NormInvWish posterior not proportional to prior * likelihood.")
# Check that posterior = prior * likelihood / evidence
mus = [np.r_[1.1, 1.1], np.r_[1.1, 1.2], np.r_[0.7, 1.3]]
Sigs = [np.eye(2)*0.2, np.eye(2)*0.1, np.array([[2.1, -0.1], [-0.1, 2.2]])]
post = niw.post(D)
post1 = [niw(mu, Sig) * niw.likelihood(D, mu, Sig) / niw.evidence(D)
for mu, Sig in zip(mus, Sigs)]
post2 = [post(mu, Sig) for mu, Sig in zip(mus, Sigs)]
np.testing.assert_array_almost_equal(post1, post2, 10,
"NormInvWish posterior != prior * likelihood / evidence")
# Would like to check that pred(x) == int prior(theta) * like1(x, theta) d(theta), but I don't
# know how to integrate over all covariance matrices. Plus, integrating over a 2D covariance
# matrix plus a 2D mean is a 5 dimensional integral, which sounds nasty to do.
if __name__ == "__main__":
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('--full', action='store_true', help="Run full test suite (slow).")
args = parser.parse_args()
test_GaussianMeanKnownVariance()
test_InvGamma()
test_InvGamma2D()
test_NormInvChi2()
test_NormInvGamma()
test_NormInvChi2_eq_NormInvGamma()
test_NormInvWish(args.full)
| 41.768089
| 113
| 0.596811
| 3,477
| 22,513
| 3.772793
| 0.057521
| 0.042689
| 0.034685
| 0.068837
| 0.815826
| 0.79349
| 0.75789
| 0.750724
| 0.725797
| 0.709636
| 0
| 0.055366
| 0.271532
| 22,513
| 538
| 114
| 41.845725
| 0.744512
| 0.158442
| 0
| 0.441417
| 0
| 0
| 0.186801
| 0.007951
| 0
| 0
| 0
| 0
| 0.160763
| 1
| 0.019074
| false
| 0
| 0.016349
| 0
| 0.035422
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
7c21be921705f6079de57f15c3dd0aff925ce379
| 788
|
py
|
Python
|
tests/test_equilibrium.py
|
MartinKliemank/lettuce
|
ee1b4dbfdbcf1bd87ac6b867b091a923d033403e
|
[
"MIT"
] | 53
|
2021-04-01T06:17:52.000Z
|
2022-03-21T18:27:13.000Z
|
tests/test_equilibrium.py
|
MartinKliemank/lettuce
|
ee1b4dbfdbcf1bd87ac6b867b091a923d033403e
|
[
"MIT"
] | 48
|
2019-09-17T14:07:57.000Z
|
2020-11-18T19:53:24.000Z
|
tests/test_equilibrium.py
|
MartinKliemank/lettuce
|
ee1b4dbfdbcf1bd87ac6b867b091a923d033403e
|
[
"MIT"
] | 9
|
2021-04-16T12:57:19.000Z
|
2022-03-08T11:40:50.000Z
|
"""
Tests for equilibria
"""
import pytest
from lettuce.equilibrium import *
@pytest.mark.parametrize("Equilibrium", [QuadraticEquilibrium])
def test_equilibrium_conserves_mass(f_all_lattices, Equilibrium):
f, lattice = f_all_lattices
equilibrium = Equilibrium(lattice)
feq = equilibrium(rho=lattice.rho(f), u=lattice.u(f))
assert lattice.rho(feq).cpu().numpy() == pytest.approx(lattice.rho(f).cpu().numpy())
@pytest.mark.parametrize("Equilibrium", [QuadraticEquilibrium])
def test_equilibrium_conserves_momentum(f_all_lattices, Equilibrium):
f, lattice = f_all_lattices
equilibrium = Equilibrium(lattice)
feq = equilibrium(rho=lattice.rho(f), u=lattice.u(f))
assert lattice.j(feq).cpu().numpy() == pytest.approx(lattice.j(f).cpu().numpy(), abs=1e-6)
| 34.26087
| 94
| 0.736041
| 102
| 788
| 5.54902
| 0.303922
| 0.028269
| 0.084806
| 0.162544
| 0.819788
| 0.819788
| 0.713781
| 0.713781
| 0.713781
| 0.434629
| 0
| 0.002861
| 0.112944
| 788
| 22
| 95
| 35.818182
| 0.806867
| 0.025381
| 0
| 0.571429
| 0
| 0
| 0.028947
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 1
| 0.142857
| false
| 0
| 0.142857
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
7c28fa168e1426d1c6f374e02c4e4454eab053d9
| 92
|
py
|
Python
|
02_ldprofile_pybind11/src/wrap/__init__.py
|
martinschwinzerl/pyhep2020-cxx-bindings
|
27562b6500210dbb9066e86bba5dd3abe4a47328
|
[
"MIT"
] | 1
|
2020-07-17T14:00:52.000Z
|
2020-07-17T14:00:52.000Z
|
02_ldprofile_pybind11/src/wrap/__init__.py
|
martinschwinzerl/pyhep2020-cxx-bindings
|
27562b6500210dbb9066e86bba5dd3abe4a47328
|
[
"MIT"
] | null | null | null |
02_ldprofile_pybind11/src/wrap/__init__.py
|
martinschwinzerl/pyhep2020-cxx-bindings
|
27562b6500210dbb9066e86bba5dd3abe4a47328
|
[
"MIT"
] | null | null | null |
from .ldprofile_pybind11 import CoastingLDProfile, QGaussianLDProfile, LinInterpolLDProfile
| 46
| 91
| 0.902174
| 7
| 92
| 11.714286
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.023256
| 0.065217
| 92
| 1
| 92
| 92
| 0.930233
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
7c2a0a19a3350a841aa7362339722606e571f500
| 8,031
|
py
|
Python
|
pjtk2/tests/api/test_project_points_polygons_api.py
|
AdamCottrill/Project-Tracker-2
|
ba37441e1936b7825b2cfc1507858d207ff874f8
|
[
"MIT"
] | null | null | null |
pjtk2/tests/api/test_project_points_polygons_api.py
|
AdamCottrill/Project-Tracker-2
|
ba37441e1936b7825b2cfc1507858d207ff874f8
|
[
"MIT"
] | null | null | null |
pjtk2/tests/api/test_project_points_polygons_api.py
|
AdamCottrill/Project-Tracker-2
|
ba37441e1936b7825b2cfc1507858d207ff874f8
|
[
"MIT"
] | null | null | null |
from django.contrib.gis.geos import GEOSGeometry
from django.test import TestCase, Client, RequestFactory
from django.urls import reverse
from rest_framework import status
from pjtk2.models import SamplePoint, ProjectPolygon
from pjtk2.api.serializers import ProjectPolygonSerializer, ProjectPointSerializer
from pjtk2.tests.factories import ProjectFactory, SamplePointFactory
from rest_framework.test import APITestCase
class ProjectAPITest(APITestCase):
def setUp(self):
self.factory = RequestFactory()
# we need to create some models with different years - starting
# with the current year.
prj_cd = "LHA_IA16_INN"
self.project1 = ProjectFactory.create(prj_cd=prj_cd, prj_nm="All In Roi")
# these are four randomly selected points that all fall within the roi
pts = [
"POINT(-82.081126628131 44.000970817096)",
"POINT(-82.0456637754061 44.0649121962459)",
"POINT(-82.024922507764 44.0171801372301)",
"POINT(-82.0017671634393 44.0513359855003)",
]
for i, pt in enumerate(pts):
SamplePointFactory.create(
project=self.project1, label="In-{}".format(i), geom=GEOSGeometry(pt)
)
self.project1.update_convex_hull()
prj_cd = "LHA_IA16_000"
self.project4 = ProjectFactory.create(prj_cd=prj_cd, prj_nm="No Points")
# ===================
# PROJECT POINTS
def test_project_points_api_get_good_project_code(self):
"""The project points api should return a series of geojson points
corresponding to sampling locations associated with a
project.
"""
slug = self.project1.slug
url = reverse("api:project_points", kwargs={"slug": slug})
request = self.factory.get(url)
response = self.client.get(url)
# get data from db
points = SamplePoint.objects.filter(project__slug=slug)
serializer = ProjectPointSerializer(
points, many=True, context={"request": request}
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, serializer.data)
def test_project_points_api_get_bad_project_code(self):
"""If we try to access the project points api with a malformed project
code it will return an error.
"""
slug = "LHA_XX15_X01"
url = reverse("api:project_points", kwargs={"slug": slug})
response = self.client.get(url.replace("X", ""))
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_project_points_api_get_project_code_doesnot_exist(self):
"""If we try to access the project points api for a project that
does not exists, code it will return a 404 error.
"""
slug = "LHA_IA15_ABC"
url = reverse("api:project_points", kwargs={"slug": slug})
request = self.factory.get(url)
response = self.client.get(url)
points = SamplePoint.objects.filter(project__slug=slug)
serializer = ProjectPointSerializer(
points, many=True, context={"request": request}
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, serializer.data)
self.assertEqual(len(response.data), 0)
self.assertEqual(len(serializer.data), 0)
def test_project_points_api_get_project_code_without_points(self):
"""If we try to access the project points api for a project without
sample points, it will handle it gracefully.
"""
slug = self.project4.slug
url = reverse("api:project_points", kwargs={"slug": slug})
request = self.factory.get(url)
response = self.client.get(url)
points = SamplePoint.objects.filter(project__slug=slug)
serializer = ProjectPointSerializer(
points, many=True, context={"request": request}
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, serializer.data)
self.assertEqual(len(response.data), 0)
self.assertEqual(len(serializer.data), 0)
def test_project_points_api_post_put_delete(self):
"""the project points api is currently readonly - any other request
type should throw an error.
"""
slug = self.project4.slug
url = reverse("api:project_points", kwargs={"slug": slug})
data = {"label": "Test-1"}
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
# =================================
# PROJECT POLYGON
def test_project_polygon_api_get_good_project_code(self):
"""The project polygon api should return a series of geojson polygon
corresponding to sampling locations associated with a
project.
"""
slug = self.project1.slug
url = reverse("api:project_polygon", kwargs={"slug": slug})
request = self.factory.get(url)
response = self.client.get(url)
# get data from db
polygon = ProjectPolygon.objects.filter(project__slug=slug)
serializer = ProjectPolygonSerializer(
polygon, many=True, context={"request": request}
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, serializer.data)
def test_project_polygon_api_get_bad_project_code(self):
"""If we try to access the project polygon api with a malformed project
code it will return an error.
"""
slug = "LHA_XX15_X01"
url = reverse("api:project_polygon", kwargs={"slug": slug})
response = self.client.get(url.replace("X", ""))
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_project_polygon_api_get_project_code_doesnot_exist(self):
"""If we try to access the project polygon api for a project that
does not exists, code it will return an empty array.
"""
slug = "LHA_IA15_ABC"
url = reverse("api:project_polygon", kwargs={"slug": slug})
request = self.factory.get(url)
response = self.client.get(url)
# get data from db
polygon = ProjectPolygon.objects.filter(project__slug=slug)
serializer = ProjectPolygonSerializer(
polygon, many=True, context={"request": request}
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, serializer.data)
self.assertEqual(len(response.data), 0)
self.assertEqual(len(serializer.data), 0)
def test_project_polygon_api_get_project_code_without_polygon(self):
"""If we try to access the project polygon api for a project without
sample polygon, it will handle it gracefully.
"""
slug = self.project4.slug
url = reverse("api:project_polygon", kwargs={"slug": slug})
request = self.factory.get(url)
response = self.client.get(url)
# get data from db
polygon = ProjectPolygon.objects.filter(project__slug=slug)
serializer = ProjectPolygonSerializer(
polygon, many=True, context={"request": request}
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, serializer.data)
self.assertEqual(len(response.data), 0)
self.assertEqual(len(serializer.data), 0)
def test_project_polygon_api_post_put_delete(self):
"""the project polygon api is currently readonly - any other request
type should throw an error.
"""
slug = self.project4.slug
url = reverse("api:project_polygon", kwargs={"slug": slug})
data = {"label": "Test-1"}
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
| 36.504545
| 85
| 0.658199
| 962
| 8,031
| 5.334719
| 0.173597
| 0.070148
| 0.071707
| 0.038971
| 0.809821
| 0.801637
| 0.786048
| 0.763445
| 0.720187
| 0.720187
| 0
| 0.031439
| 0.239572
| 8,031
| 219
| 86
| 36.671233
| 0.808908
| 0.174449
| 0
| 0.655738
| 0
| 0
| 0.085893
| 0.014107
| 0
| 0
| 0
| 0
| 0.196721
| 1
| 0.090164
| false
| 0
| 0.065574
| 0
| 0.163934
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
7c884e67d811cd4cebb9e894c032818c505e522d
| 27
|
py
|
Python
|
src/detector/__init__.py
|
ymlsam/deepgaau-detector
|
3f6d8195b4b1857bc1317035d999e9bca226ce7c
|
[
"MIT"
] | null | null | null |
src/detector/__init__.py
|
ymlsam/deepgaau-detector
|
3f6d8195b4b1857bc1317035d999e9bca226ce7c
|
[
"MIT"
] | null | null | null |
src/detector/__init__.py
|
ymlsam/deepgaau-detector
|
3f6d8195b4b1857bc1317035d999e9bca226ce7c
|
[
"MIT"
] | null | null | null |
from .detector import main
| 13.5
| 26
| 0.814815
| 4
| 27
| 5.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.148148
| 27
| 1
| 27
| 27
| 0.956522
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
7cda91da3939985ac1873e9204a896cc248eac5e
| 142
|
py
|
Python
|
app/schema/mutations/todo/__init__.py
|
rjNemo/graphql_python_template
|
14bc5fd657f6bdba8d7293f21cfcec821fa6374f
|
[
"MIT"
] | 1
|
2021-05-02T01:47:57.000Z
|
2021-05-02T01:47:57.000Z
|
app/schema/mutations/todo/__init__.py
|
rjNemo/graphql_python_template
|
14bc5fd657f6bdba8d7293f21cfcec821fa6374f
|
[
"MIT"
] | null | null | null |
app/schema/mutations/todo/__init__.py
|
rjNemo/graphql_python_template
|
14bc5fd657f6bdba8d7293f21cfcec821fa6374f
|
[
"MIT"
] | null | null | null |
from .close_todo import CloseTodo
from .create_todo import CreateTodo
from .delete_todo import DeleteTodo
from .update_todo import UpdateTodo
| 28.4
| 35
| 0.859155
| 20
| 142
| 5.9
| 0.55
| 0.338983
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.112676
| 142
| 4
| 36
| 35.5
| 0.936508
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
7cef326530a204235c99ed313d3a473debe866ed
| 47
|
py
|
Python
|
taotao-cloud-python/taotao-cloud-oldboy/day42-python-js/test.py
|
shuigedeng/taotao-cloud-paren
|
3d281b919490f7cbee4520211e2eee5da7387564
|
[
"Apache-2.0"
] | 47
|
2021-04-13T10:32:13.000Z
|
2022-03-31T10:30:30.000Z
|
taotao-cloud-python/taotao-cloud-oldboy/day42-python-js/test.py
|
shuigedeng/taotao-cloud-paren
|
3d281b919490f7cbee4520211e2eee5da7387564
|
[
"Apache-2.0"
] | 1
|
2021-11-01T07:41:04.000Z
|
2021-11-01T07:41:10.000Z
|
taotao-cloud-python/taotao-cloud-oldboy/day42-python-js/test.py
|
shuigedeng/taotao-cloud-paren
|
3d281b919490f7cbee4520211e2eee5da7387564
|
[
"Apache-2.0"
] | 21
|
2021-04-13T10:32:17.000Z
|
2022-03-26T07:43:22.000Z
|
# def f():
# x=10
if 1:
x=10
print(x)
| 6.714286
| 10
| 0.404255
| 10
| 47
| 1.9
| 0.7
| 0.315789
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.172414
| 0.382979
| 47
| 7
| 11
| 6.714286
| 0.482759
| 0.361702
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
6b1856d785c18d72b922e94ed4d2792383baa9d9
| 107
|
py
|
Python
|
generator_app/__init__.py
|
badf00d21/JSD2021
|
0af83e671bdc2570b617ed29b395db4193dd7daf
|
[
"MIT"
] | null | null | null |
generator_app/__init__.py
|
badf00d21/JSD2021
|
0af83e671bdc2570b617ed29b395db4193dd7daf
|
[
"MIT"
] | null | null | null |
generator_app/__init__.py
|
badf00d21/JSD2021
|
0af83e671bdc2570b617ed29b395db4193dd7daf
|
[
"MIT"
] | null | null | null |
import generator_app.generator_app as app
def call_generate():
print('call_generate')
app.main()
| 15.285714
| 41
| 0.728972
| 15
| 107
| 4.933333
| 0.6
| 0.324324
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.168224
| 107
| 6
| 42
| 17.833333
| 0.831461
| 0
| 0
| 0
| 1
| 0
| 0.121495
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0
| 0.25
| 0
| 0.5
| 0.25
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
6b219f509501b5473aa18d8201d2069a3706934f
| 1,320
|
py
|
Python
|
dusk/script/math.py
|
mroethlin/dusk
|
7c494d5270b2a9ecd40ce90bd122d36d27cdc5d9
|
[
"MIT"
] | null | null | null |
dusk/script/math.py
|
mroethlin/dusk
|
7c494d5270b2a9ecd40ce90bd122d36d27cdc5d9
|
[
"MIT"
] | 1
|
2020-09-29T12:14:44.000Z
|
2020-10-13T07:15:12.000Z
|
dusk/script/math.py
|
mroethlin/dusk
|
7c494d5270b2a9ecd40ce90bd122d36d27cdc5d9
|
[
"MIT"
] | null | null | null |
__all__ = [
"max",
"min",
"pow",
"sqrt",
"exp",
"log",
"sin",
"cos",
"tan",
"arcsin",
"arccos",
"arctan",
"fabs",
"floor",
"ceil",
"isinf",
"isnan",
]
def max(a: float, b: float) -> float:
raise NotImplementedError
def min(a: float, b: float) -> float:
raise NotImplementedError
def pow(base: float, exp: float) -> float:
raise NotImplementedError
def sqrt(arg: float) -> float:
raise NotImplementedError
def exp(exp: float) -> float:
raise NotImplementedError
def log(arg: float) -> float:
raise NotImplementedError
def sin(arg: float) -> float:
raise NotImplementedError
def cos(arg: float) -> float:
raise NotImplementedError
def tan(arg: float) -> float:
raise NotImplementedError
def arcsin(arg: float) -> float:
raise NotImplementedError
def arccos(arg: float) -> float:
raise NotImplementedError
def arctan(arg: float) -> float:
raise NotImplementedError
def fabs(arg: float) -> float:
raise NotImplementedError
def floor(arg: float) -> float:
raise NotImplementedError
def ceil(arg: float) -> float:
raise NotImplementedError
def isinf(arg: float) -> float:
raise NotImplementedError
def isnan(arg: float) -> float:
raise NotImplementedError
| 15
| 42
| 0.637879
| 143
| 1,320
| 5.86014
| 0.181818
| 0.202864
| 0.304296
| 0.689737
| 0.817422
| 0.77327
| 0.105012
| 0.105012
| 0
| 0
| 0
| 0
| 0.239394
| 1,320
| 87
| 43
| 15.172414
| 0.834661
| 0
| 0
| 0.320755
| 0
| 0
| 0.052273
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.320755
| false
| 0
| 0
| 0
| 0.320755
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
862e2d8c378243ec96afd025d93354f3ade461cc
| 187
|
py
|
Python
|
include/const.py
|
Ayvytr/PythonBox
|
ab15c4fad79be827e7e04cdce30335d6a979655a
|
[
"Apache-2.0"
] | 1
|
2020-01-03T00:18:36.000Z
|
2020-01-03T00:18:36.000Z
|
include/const.py
|
Ayvytr/PyBox
|
ab15c4fad79be827e7e04cdce30335d6a979655a
|
[
"Apache-2.0"
] | null | null | null |
include/const.py
|
Ayvytr/PyBox
|
ab15c4fad79be827e7e04cdce30335d6a979655a
|
[
"Apache-2.0"
] | null | null | null |
class Const:
GITHUB = "https://github.com/ayvytr/PythonBox"
ISSUE = "https://github.com/Ayvytr/PythonBox/issues"
MAIL = "mailto:ayvytr@163.com?subject=Bug-Report&body={}"
| 37.4
| 62
| 0.679144
| 24
| 187
| 5.291667
| 0.666667
| 0.173228
| 0.220472
| 0.314961
| 0.456693
| 0
| 0
| 0
| 0
| 0
| 0
| 0.01875
| 0.144385
| 187
| 4
| 63
| 46.75
| 0.775
| 0
| 0
| 0
| 0
| 0
| 0.68306
| 0.262295
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
8649d1c5e53f71e0eb876bb6163f00cb8e794a8f
| 4,092
|
py
|
Python
|
src/transformers/utils/dummy_flax_objects.py
|
theainerd/transformers
|
f7328de46dbeda4992a093a0501932bf0fc7b76f
|
[
"Apache-2.0"
] | 34
|
2021-07-05T02:44:31.000Z
|
2022-03-28T14:39:57.000Z
|
src/transformers/utils/dummy_flax_objects.py
|
theainerd/transformers
|
f7328de46dbeda4992a093a0501932bf0fc7b76f
|
[
"Apache-2.0"
] | 3
|
2021-07-22T15:49:44.000Z
|
2022-03-19T08:46:27.000Z
|
src/transformers/utils/dummy_flax_objects.py
|
theainerd/transformers
|
f7328de46dbeda4992a093a0501932bf0fc7b76f
|
[
"Apache-2.0"
] | 6
|
2021-07-05T02:44:32.000Z
|
2022-02-14T10:10:13.000Z
|
# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..file_utils import requires_flax
class FlaxPreTrainedModel:
def __init__(self, *args, **kwargs):
requires_flax(self)
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_flax(self)
FLAX_MODEL_FOR_MASKED_LM_MAPPING = None
FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING = None
FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING = None
FLAX_MODEL_FOR_PRETRAINING_MAPPING = None
FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING = None
FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING = None
FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING = None
FLAX_MODEL_MAPPING = None
class FlaxAutoModel:
def __init__(self, *args, **kwargs):
requires_flax(self)
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_flax(self)
class FlaxAutoModelForMaskedLM:
def __init__(self, *args, **kwargs):
requires_flax(self)
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_flax(self)
class FlaxAutoModelForMultipleChoice:
def __init__(self, *args, **kwargs):
requires_flax(self)
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_flax(self)
class FlaxAutoModelForNextSentencePrediction:
def __init__(self, *args, **kwargs):
requires_flax(self)
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_flax(self)
class FlaxAutoModelForPreTraining:
def __init__(self, *args, **kwargs):
requires_flax(self)
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_flax(self)
class FlaxAutoModelForQuestionAnswering:
def __init__(self, *args, **kwargs):
requires_flax(self)
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_flax(self)
class FlaxAutoModelForSequenceClassification:
def __init__(self, *args, **kwargs):
requires_flax(self)
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_flax(self)
class FlaxAutoModelForTokenClassification:
def __init__(self, *args, **kwargs):
requires_flax(self)
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_flax(self)
class FlaxBertForMaskedLM:
def __init__(self, *args, **kwargs):
requires_flax(self)
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_flax(self)
class FlaxBertForMultipleChoice:
def __init__(self, *args, **kwargs):
requires_flax(self)
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_flax(self)
class FlaxBertForNextSentencePrediction:
def __init__(self, *args, **kwargs):
requires_flax(self)
class FlaxBertForPreTraining:
def __init__(self, *args, **kwargs):
requires_flax(self)
class FlaxBertForQuestionAnswering:
def __init__(self, *args, **kwargs):
requires_flax(self)
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_flax(self)
class FlaxBertForSequenceClassification:
def __init__(self, *args, **kwargs):
requires_flax(self)
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_flax(self)
class FlaxBertForTokenClassification:
def __init__(self, *args, **kwargs):
requires_flax(self)
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_flax(self)
class FlaxBertModel:
def __init__(self, *args, **kwargs):
requires_flax(self)
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_flax(self)
class FlaxBertPreTrainedModel:
def __init__(self, *args, **kwargs):
requires_flax(self)
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_flax(self)
class FlaxRobertaModel:
def __init__(self, *args, **kwargs):
requires_flax(self)
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_flax(self)
| 21.536842
| 75
| 0.692326
| 432
| 4,092
| 6.168981
| 0.143519
| 0.166604
| 0.189118
| 0.297186
| 0.73546
| 0.665666
| 0.665666
| 0.665666
| 0.665666
| 0.634146
| 0
| 0
| 0.206989
| 4,092
| 189
| 76
| 21.650794
| 0.821263
| 0.01784
| 0
| 0.760684
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.307692
| false
| 0
| 0.008547
| 0
| 0.478632
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
8696259ca5e3968fea17fb25752d8c6fd5fe54f1
| 103
|
py
|
Python
|
personalcapital/exceptions.py
|
aagnone3/personalcapital
|
55471602885c6c495a1dff9229c84213f2220ab2
|
[
"MIT"
] | 9
|
2019-04-01T01:16:38.000Z
|
2021-12-26T20:38:32.000Z
|
personalcapital/exceptions.py
|
Kpasha/personal-capital-plus
|
55471602885c6c495a1dff9229c84213f2220ab2
|
[
"MIT"
] | null | null | null |
personalcapital/exceptions.py
|
Kpasha/personal-capital-plus
|
55471602885c6c495a1dff9229c84213f2220ab2
|
[
"MIT"
] | 3
|
2020-06-23T02:58:53.000Z
|
2021-04-03T05:31:32.000Z
|
class RequireTwoFactorException(Exception):
pass
class LoginFailedException(Exception):
pass
| 14.714286
| 43
| 0.786408
| 8
| 103
| 10.125
| 0.625
| 0.320988
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15534
| 103
| 6
| 44
| 17.166667
| 0.931034
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 0
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
86d17965ffceecd8ef98d35a760dfa3656b18a60
| 76
|
py
|
Python
|
tinycat/__init__.py
|
kbobrowski/tinycat
|
4f1e177530ac891123e1df6beeee6d7833936ddf
|
[
"MIT"
] | null | null | null |
tinycat/__init__.py
|
kbobrowski/tinycat
|
4f1e177530ac891123e1df6beeee6d7833936ddf
|
[
"MIT"
] | null | null | null |
tinycat/__init__.py
|
kbobrowski/tinycat
|
4f1e177530ac891123e1df6beeee6d7833936ddf
|
[
"MIT"
] | null | null | null |
from .translate import generate_task, paragraph_parser, process_translation
| 38
| 75
| 0.881579
| 9
| 76
| 7.111111
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.078947
| 76
| 1
| 76
| 76
| 0.914286
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
811003e6d6c4c5ee2768dc99ff4b20f807b78bec
| 19
|
py
|
Python
|
pygridtools/viz/__init__.py
|
phobson/gridtools
|
53c5792ed6826bb9487d3e6b8e943760ab421a75
|
[
"BSD-3-Clause"
] | 26
|
2016-01-20T15:40:21.000Z
|
2021-10-08T00:35:15.000Z
|
pygridtools/viz/__init__.py
|
phobson/gridtools
|
53c5792ed6826bb9487d3e6b8e943760ab421a75
|
[
"BSD-3-Clause"
] | 48
|
2015-10-01T02:51:52.000Z
|
2021-05-05T15:31:11.000Z
|
pygridtools/viz/__init__.py
|
phobson/gridtools
|
53c5792ed6826bb9487d3e6b8e943760ab421a75
|
[
"BSD-3-Clause"
] | 8
|
2015-09-30T19:53:03.000Z
|
2022-02-23T03:29:24.000Z
|
from .viz import *
| 9.5
| 18
| 0.684211
| 3
| 19
| 4.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.210526
| 19
| 1
| 19
| 19
| 0.866667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d4f5a1d4f3f9712da91744b84ef61eda79f0d2e4
| 95
|
py
|
Python
|
r2base/processors/readers/reader_cli.py
|
ariafyy/R2Base
|
19c65a7907d2b40fcad15f31574f6d4e4901776c
|
[
"Apache-2.0"
] | null | null | null |
r2base/processors/readers/reader_cli.py
|
ariafyy/R2Base
|
19c65a7907d2b40fcad15f31574f6d4e4901776c
|
[
"Apache-2.0"
] | null | null | null |
r2base/processors/readers/reader_cli.py
|
ariafyy/R2Base
|
19c65a7907d2b40fcad15f31574f6d4e4901776c
|
[
"Apache-2.0"
] | 1
|
2021-08-02T05:07:44.000Z
|
2021-08-02T05:07:44.000Z
|
from r2base.processors.bases import ProcessorBase
class ReaderClient(ProcessorBase):
pass
| 19
| 49
| 0.821053
| 10
| 95
| 7.8
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012048
| 0.126316
| 95
| 5
| 50
| 19
| 0.927711
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
077310e94bb5a1b0bbcd1ef9b405981f93aeaf7e
| 43
|
py
|
Python
|
rofm/__init__.py
|
PurelyApplied/roll_one_for_me
|
1130d6bb29db4795f3ef84ea0540e94290b6e58d
|
[
"Apache-2.0"
] | 13
|
2016-05-10T22:11:46.000Z
|
2019-02-15T03:44:01.000Z
|
rofm/__init__.py
|
PurelyApplied/roll_one_for_me
|
1130d6bb29db4795f3ef84ea0540e94290b6e58d
|
[
"Apache-2.0"
] | 6
|
2017-07-06T22:13:18.000Z
|
2017-07-07T18:18:12.000Z
|
rofm/__init__.py
|
PurelyApplied/roll_one_for_me
|
1130d6bb29db4795f3ef84ea0540e94290b6e58d
|
[
"Apache-2.0"
] | 10
|
2016-02-10T20:23:51.000Z
|
2022-03-25T14:06:05.000Z
|
from . import classes
from . import legacy
| 14.333333
| 21
| 0.767442
| 6
| 43
| 5.5
| 0.666667
| 0.606061
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.186047
| 43
| 2
| 22
| 21.5
| 0.942857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
079cd6e8ac3bf0849751bf8fc8c9e7d66a644886
| 118
|
py
|
Python
|
oommfc/tests/test_init.py
|
fgr1986/oommfc
|
8c57683fd480b910c0eca0fbca57d8d0b009ed7a
|
[
"BSD-3-Clause"
] | 23
|
2019-09-18T10:58:00.000Z
|
2022-02-07T07:05:49.000Z
|
oommfc/tests/test_init.py
|
fgr1986/oommfc
|
8c57683fd480b910c0eca0fbca57d8d0b009ed7a
|
[
"BSD-3-Clause"
] | 43
|
2019-08-22T04:31:36.000Z
|
2022-03-28T09:09:15.000Z
|
oommfc/tests/test_init.py
|
fgr1986/oommfc
|
8c57683fd480b910c0eca0fbca57d8d0b009ed7a
|
[
"BSD-3-Clause"
] | 7
|
2020-04-25T13:25:25.000Z
|
2021-12-06T15:06:28.000Z
|
import oommfc as oc
def test_version():
assert isinstance(oc.__version__, str)
assert '.' in oc.__version__
| 16.857143
| 42
| 0.711864
| 16
| 118
| 4.6875
| 0.6875
| 0.24
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.194915
| 118
| 6
| 43
| 19.666667
| 0.789474
| 0
| 0
| 0
| 0
| 0
| 0.008475
| 0
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0.25
| true
| 0
| 0.25
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
079cdd52f25ed79982d9e2d2b0264a235121c7ed
| 164
|
py
|
Python
|
core/admin.py
|
johncmacy/django-react-graphql
|
723ea2fb7d482d3d955e336dbd099b24cf0c6d3c
|
[
"MIT"
] | null | null | null |
core/admin.py
|
johncmacy/django-react-graphql
|
723ea2fb7d482d3d955e336dbd099b24cf0c6d3c
|
[
"MIT"
] | null | null | null |
core/admin.py
|
johncmacy/django-react-graphql
|
723ea2fb7d482d3d955e336dbd099b24cf0c6d3c
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import *
admin.site.register(Shape)
admin.site.register(Color)
admin.site.register(Thing)
admin.site.register(Widget)
| 23.428571
| 32
| 0.810976
| 24
| 164
| 5.541667
| 0.5
| 0.270677
| 0.511278
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.073171
| 164
| 7
| 33
| 23.428571
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
07aab67200eaadf6a6aa65bb96d6f76c7a9f8e97
| 32
|
py
|
Python
|
graff/query/__init__.py
|
apontzen/graff
|
edbc7dcb292bba5a723d5acd9478af75a601038c
|
[
"BSD-3-Clause"
] | 3
|
2020-08-28T18:52:16.000Z
|
2020-09-05T01:51:40.000Z
|
graff/query/__init__.py
|
apontzen/graff
|
edbc7dcb292bba5a723d5acd9478af75a601038c
|
[
"BSD-3-Clause"
] | null | null | null |
graff/query/__init__.py
|
apontzen/graff
|
edbc7dcb292bba5a723d5acd9478af75a601038c
|
[
"BSD-3-Clause"
] | null | null | null |
from . import base, node, edge
| 10.666667
| 30
| 0.6875
| 5
| 32
| 4.4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.21875
| 32
| 2
| 31
| 16
| 0.88
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
07c118b365beb31deda561082ceb91d2f2e90d8a
| 23,204
|
py
|
Python
|
semi_supervised_learning_pca.py
|
hkaneko1985/semi_supervised_learning
|
b2d0cd2d6e734eccd591a4e5ad984c002bdc9476
|
[
"MIT"
] | 4
|
2019-11-08T08:58:14.000Z
|
2021-01-23T08:50:27.000Z
|
semi_supervised_learning_pca.py
|
1309822673/semi_supervised_learning
|
b2d0cd2d6e734eccd591a4e5ad984c002bdc9476
|
[
"MIT"
] | null | null | null |
semi_supervised_learning_pca.py
|
1309822673/semi_supervised_learning
|
b2d0cd2d6e734eccd591a4e5ad984c002bdc9476
|
[
"MIT"
] | 1
|
2020-11-20T10:48:22.000Z
|
2020-11-20T10:48:22.000Z
|
# -*- coding: utf-8 -*-
"""
@author: Hiromasa Kaneko
"""
import math
import warnings
import matplotlib.figure as figure
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn import model_selection, svm, tree
from sklearn.cross_decomposition import PLSRegression
from sklearn.decomposition import PCA
from sklearn.ensemble import RandomForestRegressor
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import Matern, DotProduct, WhiteKernel, RBF, ConstantKernel
from sklearn.linear_model import Ridge, Lasso, ElasticNet, ElasticNetCV
from sklearn.model_selection import GridSearchCV
warnings.filterwarnings('ignore')
regression_method = 'pls' # 'pls' or 'rr' or 'lasso' or 'en' or 'lsvr' or 'nsvr' or 'dt' or 'rf' or 'gp'
max_pca_component_number = 150
threshold_of_rate_of_same_value = 1
fold_number = 2
max_pls_component_number = 30
ridge_lambdas = 2 ** np.arange(-5, 10, dtype=float) # L2 weight in ridge regression
lasso_lambdas = np.arange(0.01, 0.71, 0.01, dtype=float) # L1 weight in LASSO
elastic_net_lambdas = np.arange(0.01, 0.71, 0.01, dtype=float) # Lambda in elastic net
elastic_net_alphas = np.arange(0.01, 1.00, 0.01, dtype=float) # Alpha in elastic net
linear_svr_cs = 2 ** np.arange(-5, 5, dtype=float) # C for linear svr
linear_svr_epsilons = 2 ** np.arange(-10, 0, dtype=float) # Epsilon for linear svr
nonlinear_svr_cs = 2 ** np.arange(-5, 10, dtype=float) # C for nonlinear svr
nonlinear_svr_epsilons = 2 ** np.arange(-10, 0, dtype=float) # Epsilon for nonlinear svr
nonlinear_svr_gammas = 2 ** np.arange(-20, 10, dtype=float) # Gamma for nonlinear svr
dt_max_max_depth = 30 # 木の深さの最大値、の最大値
dt_min_samples_leaf = 3 # 葉ごとのサンプル数の最小値
random_forest_number_of_trees = 300 # Number of decision trees for random forest
random_forest_x_variables_rates = np.arange(1, 10,
dtype=float) / 10 # Ratio of the number of X-variables for random forest
# load data set
supervised_dataset = pd.read_csv('descriptors_with_logS.csv', encoding='SHIFT-JIS', index_col=0)
unsupervised_dataset = pd.read_csv('descriptors_for_prediction.csv', encoding='SHIFT-JIS', index_col=0)
number_of_supervised_samples = supervised_dataset.shape[0]
x_all_dataset = pd.concat([supervised_dataset.iloc[:, 1:], unsupervised_dataset], axis=0)
x_all_dataset = x_all_dataset.loc[:, x_all_dataset.mean().index] # 平均を計算できる変数だけ選択
x_all_dataset = x_all_dataset.replace(np.inf, np.nan).fillna(np.nan) # infをnanに置き換えておく
x_all_dataset = x_all_dataset.dropna(axis=1) # nanのある変数を削除
y_train = supervised_dataset.iloc[:, 0]
rate_of_same_value = list()
num = 0
for X_variable_name in x_all_dataset.columns:
num += 1
# print('{0} / {1}'.format(num, x_all_dataset.shape[1]))
same_value_number = x_all_dataset[X_variable_name].value_counts()
rate_of_same_value.append(float(same_value_number[same_value_number.index[0]] / x_all_dataset.shape[0]))
deleting_variable_numbers = np.where(np.array(rate_of_same_value) >= threshold_of_rate_of_same_value)
"""
# delete descriptors with zero variance
deleting_variable_numbers = np.where( raw_Xtrain.var() == 0 )
"""
if len(deleting_variable_numbers[0]) == 0:
x_all = x_all_dataset.copy()
else:
x_all = x_all_dataset.drop(x_all_dataset.columns[deleting_variable_numbers], axis=1)
print('Variable numbers zero variance: {0}'.format(deleting_variable_numbers[0] + 1))
print('# of X-variables: {0}'.format(x_all.shape[1]))
# autoscaling
autoscaled_x_all = (x_all - x_all.mean(axis=0)) / x_all.std(axis=0, ddof=1)
autoscaled_y_train = (y_train - y_train.mean(axis=0)) / y_train.std(axis=0, ddof=1)
# PCA
pca = PCA() # PCA を行ったり PCA の結果を格納したりするための変数を、pca として宣言
pca.fit(autoscaled_x_all) # PCA を実行
# score
score_all = pd.DataFrame(pca.transform(autoscaled_x_all), index=x_all.index) # 主成分スコアの計算した後、pandas の DataFrame 型に変換
score_train = score_all.iloc[:number_of_supervised_samples, :]
score_test = score_all.iloc[number_of_supervised_samples:, :]
# scaling
autoscaled_score_train = score_train / score_train.std(axis=0, ddof=1)
autoscaled_score_test = score_test / score_train.std(axis=0, ddof=1)
# optimization of number of PCs
set_max_pca_component_number = min(np.linalg.matrix_rank(autoscaled_score_train), max_pca_component_number)
r2cvs = []
for number_of_pcs in range(set_max_pca_component_number):
print('PC:', number_of_pcs + 1, '/', set_max_pca_component_number)
autoscaled_x_train = autoscaled_score_train.iloc[:, :number_of_pcs + 1]
if regression_method == 'pls': # Partial Least Squares
pls_components = np.arange(1, min(np.linalg.matrix_rank(autoscaled_x_train) + 1, max_pls_component_number + 1),
1)
r2cvall = []
for pls_component in pls_components:
pls_model_in_cv = PLSRegression(n_components=pls_component)
estimated_y_in_cv = np.ndarray.flatten(
model_selection.cross_val_predict(pls_model_in_cv, autoscaled_x_train, autoscaled_y_train,
cv=fold_number))
estimated_y_in_cv = estimated_y_in_cv * y_train.std(ddof=1) + y_train.mean()
r2cvall.append(float(1 - sum((y_train - estimated_y_in_cv) ** 2) / sum((y_train - y_train.mean()) ** 2)))
optimal_pls_component_number = np.where(r2cvall == np.max(r2cvall))[0][0] + 1
regression_model = PLSRegression(n_components=optimal_pls_component_number)
elif regression_method == 'rr': # ridge regression
r2cvall = list()
for ridge_lambda in ridge_lambdas:
rr_model_in_cv = Ridge(alpha=ridge_lambda)
estimated_y_in_cv = model_selection.cross_val_predict(rr_model_in_cv, autoscaled_x_train,
autoscaled_y_train,
cv=fold_number)
estimated_y_in_cv = estimated_y_in_cv * y_train.std(ddof=1) + y_train.mean()
r2cvall.append(float(1 - sum((y_train - estimated_y_in_cv) ** 2) / sum((y_train - y_train.mean()) ** 2)))
optimal_ridge_lambda = ridge_lambdas[np.where(r2cvall == np.max(r2cvall))[0][0]]
regression_model = Ridge(alpha=optimal_ridge_lambda)
elif regression_method == 'lasso': # LASSO
r2cvall = list()
for lasso_lambda in lasso_lambdas:
lasso_model_in_cv = Lasso(alpha=lasso_lambda)
estimated_y_in_cv = model_selection.cross_val_predict(lasso_model_in_cv, autoscaled_x_train,
autoscaled_y_train,
cv=fold_number)
estimated_y_in_cv = estimated_y_in_cv * y_train.std(ddof=1) + y_train.mean()
r2cvall.append(float(1 - sum((y_train - estimated_y_in_cv) ** 2) / sum((y_train - y_train.mean()) ** 2)))
optimal_lasso_lambda = lasso_lambdas[np.where(r2cvall == np.max(r2cvall))[0][0]]
regression_model = Lasso(alpha=optimal_lasso_lambda)
elif regression_method == 'en': # Elastic net
elastic_net_in_cv = ElasticNetCV(cv=fold_number, l1_ratio=elastic_net_lambdas, alphas=elastic_net_alphas)
elastic_net_in_cv.fit(autoscaled_x_train, autoscaled_y_train)
optimal_elastic_net_alpha = elastic_net_in_cv.alpha_
optimal_elastic_net_lambda = elastic_net_in_cv.l1_ratio_
regression_model = ElasticNet(l1_ratio=optimal_elastic_net_lambda, alpha=optimal_elastic_net_alpha)
elif regression_method == 'lsvr': # Linear SVR
linear_svr_in_cv = GridSearchCV(svm.SVR(kernel='linear'), {'C': linear_svr_cs, 'epsilon': linear_svr_epsilons},
cv=fold_number)
linear_svr_in_cv.fit(autoscaled_x_train, autoscaled_y_train)
optimal_linear_svr_c = linear_svr_in_cv.best_params_['C']
optimal_linear_svr_epsilon = linear_svr_in_cv.best_params_['epsilon']
regression_model = svm.SVR(kernel='linear', C=optimal_linear_svr_c, epsilon=optimal_linear_svr_epsilon)
elif regression_method == 'nsvr': # Nonlinear SVR
variance_of_gram_matrix = list()
numpy_autoscaled_Xtrain = np.array(autoscaled_x_train)
for nonlinear_svr_gamma in nonlinear_svr_gammas:
gram_matrix = np.exp(
-nonlinear_svr_gamma * ((numpy_autoscaled_Xtrain[:, np.newaxis] - numpy_autoscaled_Xtrain) ** 2).sum(
axis=2))
variance_of_gram_matrix.append(gram_matrix.var(ddof=1))
optimal_nonlinear_gamma = nonlinear_svr_gammas[
np.where(variance_of_gram_matrix == np.max(variance_of_gram_matrix))[0][0]]
# CV による ε の最適化
model_in_cv = GridSearchCV(svm.SVR(kernel='rbf', C=3, gamma=optimal_nonlinear_gamma),
{'epsilon': nonlinear_svr_epsilons},
cv=fold_number, iid=False, verbose=0)
model_in_cv.fit(autoscaled_x_train, autoscaled_y_train)
optimal_nonlinear_epsilon = model_in_cv.best_params_['epsilon']
# CV による C の最適化
model_in_cv = GridSearchCV(
svm.SVR(kernel='rbf', epsilon=optimal_nonlinear_epsilon, gamma=optimal_nonlinear_gamma),
{'C': nonlinear_svr_cs}, cv=fold_number, iid=False, verbose=0)
model_in_cv.fit(autoscaled_x_train, autoscaled_y_train)
optimal_nonlinear_c = model_in_cv.best_params_['C']
# CV による γ の最適化
model_in_cv = GridSearchCV(svm.SVR(kernel='rbf', epsilon=optimal_nonlinear_epsilon, C=optimal_nonlinear_c),
{'gamma': nonlinear_svr_gammas}, cv=fold_number, iid=False, verbose=0)
model_in_cv.fit(autoscaled_x_train, autoscaled_y_train)
optimal_nonlinear_gamma = model_in_cv.best_params_['gamma']
regression_model = svm.SVR(kernel='rbf', C=optimal_nonlinear_c, epsilon=optimal_nonlinear_epsilon,
gamma=optimal_nonlinear_gamma)
elif regression_method == 'dt': # Decision tree
# クロスバリデーションによる木の深さの最適化
r2cv_all = []
for max_depth in range(2, dt_max_max_depth):
model_in_cv = tree.DecisionTreeRegressor(max_depth=max_depth, min_samples_leaf=dt_min_samples_leaf)
estimated_y_in_cv = model_selection.cross_val_predict(model_in_cv, autoscaled_x_train, autoscaled_y_train,
cv=fold_number) * y_train.std(ddof=1) + y_train.mean()
r2cv_all.append(1 - sum((y_train - estimated_y_in_cv) ** 2) / sum((y_train - y_train.mean()) ** 2))
optimal_max_depth = np.where(r2cv_all == np.max(r2cv_all))[0][0] + 2 # r2cvが最も大きい木の深さ
regression_model = tree.DecisionTreeRegressor(max_depth=optimal_max_depth,
min_samples_leaf=dt_min_samples_leaf) # DTモデルの宣言
elif regression_method == 'rf': # Random forest
rmse_oob_all = list()
for random_forest_x_variables_rate in random_forest_x_variables_rates:
RandomForestResult = RandomForestRegressor(n_estimators=random_forest_number_of_trees, max_features=int(
max(math.ceil(autoscaled_x_train.shape[1] * random_forest_x_variables_rate), 1)), oob_score=True)
RandomForestResult.fit(autoscaled_x_train, autoscaled_y_train)
estimated_y_in_cv = RandomForestResult.oob_prediction_
estimated_y_in_cv = estimated_y_in_cv * y_train.std(ddof=1) + y_train.mean()
rmse_oob_all.append((sum((y_train - estimated_y_in_cv) ** 2) / len(y_train)) ** 0.5)
optimal_random_forest_x_variables_rate = random_forest_x_variables_rates[
np.where(rmse_oob_all == np.min(rmse_oob_all))[0][0]]
regression_model = RandomForestRegressor(n_estimators=random_forest_number_of_trees, max_features=int(
max(math.ceil(autoscaled_x_train.shape[1] * optimal_random_forest_x_variables_rate), 1)), oob_score=True)
elif regression_method == 'gp': # Gaussian process
regression_model = GaussianProcessRegressor(ConstantKernel() * RBF() + WhiteKernel())
estimated_y_in_cv = np.ndarray.flatten(
model_selection.cross_val_predict(regression_model, autoscaled_x_train, autoscaled_y_train, cv=fold_number))
estimated_y_in_cv = estimated_y_in_cv * y_train.std(ddof=1) + y_train.mean()
r2cvs.append(float(1 - sum((y_train - estimated_y_in_cv) ** 2) / sum((y_train - y_train.mean()) ** 2)))
plt.rcParams['font.size'] = 18 # 横軸や縦軸の名前の文字などのフォントのサイズ
plt.plot(np.arange(set_max_pca_component_number) + 1, r2cvs, 'b.-')
plt.ylim(0, 1)
plt.xlabel('Number of PCA components')
plt.ylabel('r2cv')
plt.show()
optimal_pca_component_number = np.where(r2cvs == np.max(r2cvs))[0][0] + 1
print('Optimal PCA component number : {0}'.format(optimal_pca_component_number))
autoscaled_x_train = autoscaled_score_train.iloc[:, :optimal_pca_component_number]
autoscaled_x_test = autoscaled_score_test.iloc[:, :optimal_pca_component_number]
if regression_method == 'pls': # Partial Least Squares
pls_components = np.arange(1, min(np.linalg.matrix_rank(autoscaled_x_train) + 1, max_pls_component_number + 1), 1)
r2cvall = []
for pls_component in pls_components:
pls_model_in_cv = PLSRegression(n_components=pls_component)
estimated_y_in_cv = np.ndarray.flatten(
model_selection.cross_val_predict(pls_model_in_cv, autoscaled_x_train, autoscaled_y_train, cv=fold_number))
estimated_y_in_cv = estimated_y_in_cv * y_train.std(ddof=1) + y_train.mean()
r2cvall.append(float(1 - sum((y_train - estimated_y_in_cv) ** 2) / sum((y_train - y_train.mean()) ** 2)))
optimal_pls_component_number = np.where(r2cvall == np.max(r2cvall))[0][0] + 1
regression_model = PLSRegression(n_components=optimal_pls_component_number)
elif regression_method == 'rr': # ridge regression
r2cvall = list()
for ridge_lambda in ridge_lambdas:
rr_model_in_cv = Ridge(alpha=ridge_lambda)
estimated_y_in_cv = model_selection.cross_val_predict(rr_model_in_cv, autoscaled_x_train, autoscaled_y_train,
cv=fold_number)
estimated_y_in_cv = estimated_y_in_cv * y_train.std(ddof=1) + y_train.mean()
r2cvall.append(float(1 - sum((y_train - estimated_y_in_cv) ** 2) / sum((y_train - y_train.mean()) ** 2)))
optimal_ridge_lambda = ridge_lambdas[np.where(r2cvall == np.max(r2cvall))[0][0]]
regression_model = Ridge(alpha=optimal_ridge_lambda)
elif regression_method == 'lasso': # LASSO
r2cvall = list()
for lasso_lambda in lasso_lambdas:
lasso_model_in_cv = Lasso(alpha=lasso_lambda)
estimated_y_in_cv = model_selection.cross_val_predict(lasso_model_in_cv, autoscaled_x_train, autoscaled_y_train,
cv=fold_number)
estimated_y_in_cv = estimated_y_in_cv * y_train.std(ddof=1) + y_train.mean()
r2cvall.append(float(1 - sum((y_train - estimated_y_in_cv) ** 2) / sum((y_train - y_train.mean()) ** 2)))
optimal_lasso_lambda = lasso_lambdas[np.where(r2cvall == np.max(r2cvall))[0][0]]
regression_model = Lasso(alpha=optimal_lasso_lambda)
elif regression_method == 'en': # Elastic net
elastic_net_in_cv = ElasticNetCV(cv=fold_number, l1_ratio=elastic_net_lambdas, alphas=elastic_net_alphas)
elastic_net_in_cv.fit(autoscaled_x_train, autoscaled_y_train)
optimal_elastic_net_alpha = elastic_net_in_cv.alpha_
optimal_elastic_net_lambda = elastic_net_in_cv.l1_ratio_
regression_model = ElasticNet(l1_ratio=optimal_elastic_net_lambda, alpha=optimal_elastic_net_alpha)
elif regression_method == 'lsvr': # Linear SVR
linear_svr_in_cv = GridSearchCV(svm.SVR(kernel='linear'), {'C': linear_svr_cs, 'epsilon': linear_svr_epsilons},
cv=fold_number)
linear_svr_in_cv.fit(autoscaled_x_train, autoscaled_y_train)
optimal_linear_svr_c = linear_svr_in_cv.best_params_['C']
optimal_linear_svr_epsilon = linear_svr_in_cv.best_params_['epsilon']
regression_model = svm.SVR(kernel='linear', C=optimal_linear_svr_c, epsilon=optimal_linear_svr_epsilon)
elif regression_method == 'nsvr': # Nonlinear SVR
variance_of_gram_matrix = list()
numpy_autoscaled_Xtrain = np.array(autoscaled_x_train)
for nonlinear_svr_gamma in nonlinear_svr_gammas:
gram_matrix = np.exp(
-nonlinear_svr_gamma * ((numpy_autoscaled_Xtrain[:, np.newaxis] - numpy_autoscaled_Xtrain) ** 2).sum(
axis=2))
variance_of_gram_matrix.append(gram_matrix.var(ddof=1))
optimal_nonlinear_gamma = nonlinear_svr_gammas[
np.where(variance_of_gram_matrix == np.max(variance_of_gram_matrix))[0][0]]
# CV による ε の最適化
model_in_cv = GridSearchCV(svm.SVR(kernel='rbf', C=3, gamma=optimal_nonlinear_gamma),
{'epsilon': nonlinear_svr_epsilons},
cv=fold_number, iid=False, verbose=0)
model_in_cv.fit(autoscaled_x_train, autoscaled_y_train)
optimal_nonlinear_epsilon = model_in_cv.best_params_['epsilon']
# CV による C の最適化
model_in_cv = GridSearchCV(svm.SVR(kernel='rbf', epsilon=optimal_nonlinear_epsilon, gamma=optimal_nonlinear_gamma),
{'C': nonlinear_svr_cs}, cv=fold_number, iid=False, verbose=0)
model_in_cv.fit(autoscaled_x_train, autoscaled_y_train)
optimal_nonlinear_c = model_in_cv.best_params_['C']
# CV による γ の最適化
model_in_cv = GridSearchCV(svm.SVR(kernel='rbf', epsilon=optimal_nonlinear_epsilon, C=optimal_nonlinear_c),
{'gamma': nonlinear_svr_gammas}, cv=fold_number, iid=False, verbose=0)
model_in_cv.fit(autoscaled_x_train, autoscaled_y_train)
optimal_nonlinear_gamma = model_in_cv.best_params_['gamma']
regression_model = svm.SVR(kernel='rbf', C=optimal_nonlinear_c, epsilon=optimal_nonlinear_epsilon,
gamma=optimal_nonlinear_gamma)
elif regression_method == 'dt': # Decision tree
# クロスバリデーションによる木の深さの最適化
r2cv_all = []
for max_depth in range(2, dt_max_max_depth):
model_in_cv = tree.DecisionTreeRegressor(max_depth=max_depth, min_samples_leaf=dt_min_samples_leaf)
estimated_y_in_cv = model_selection.cross_val_predict(model_in_cv, autoscaled_x_train, autoscaled_y_train,
cv=fold_number) * y_train.std(ddof=1) + y_train.mean()
r2cv_all.append(1 - sum((y_train - estimated_y_in_cv) ** 2) / sum((y_train - y_train.mean()) ** 2))
optimal_max_depth = np.where(r2cv_all == np.max(r2cv_all))[0][0] + 2 # r2cvが最も大きい木の深さ
regression_model = tree.DecisionTreeRegressor(max_depth=optimal_max_depth,
min_samples_leaf=dt_min_samples_leaf) # DTモデルの宣言
elif regression_method == 'rf': # Random forest
rmse_oob_all = list()
for random_forest_x_variables_rate in random_forest_x_variables_rates:
RandomForestResult = RandomForestRegressor(n_estimators=random_forest_number_of_trees, max_features=int(
max(math.ceil(autoscaled_x_train.shape[1] * random_forest_x_variables_rate), 1)), oob_score=True)
RandomForestResult.fit(autoscaled_x_train, autoscaled_y_train)
estimated_y_in_cv = RandomForestResult.oob_prediction_
estimated_y_in_cv = estimated_y_in_cv * y_train.std(ddof=1) + y_train.mean()
rmse_oob_all.append((sum((y_train - estimated_y_in_cv) ** 2) / len(y_train)) ** 0.5)
optimal_random_forest_x_variables_rate = random_forest_x_variables_rates[
np.where(rmse_oob_all == np.min(rmse_oob_all))[0][0]]
regression_model = RandomForestRegressor(n_estimators=random_forest_number_of_trees, max_features=int(
max(math.ceil(autoscaled_x_train.shape[1] * optimal_random_forest_x_variables_rate), 1)), oob_score=True)
elif regression_method == 'gp': # Gaussian process
regression_model = GaussianProcessRegressor(ConstantKernel() * RBF() + WhiteKernel())
regression_model.fit(autoscaled_x_train, autoscaled_y_train)
# calculate y for training data
calculated_ytrain = np.ndarray.flatten(regression_model.predict(autoscaled_x_train))
calculated_ytrain = calculated_ytrain * y_train.std(ddof=1) + y_train.mean()
# yy-plot
plt.figure(figsize=figure.figaspect(1))
plt.scatter(y_train, calculated_ytrain)
y_max = np.max(np.array([np.array(y_train), calculated_ytrain]))
y_min = np.min(np.array([np.array(y_train), calculated_ytrain]))
plt.plot([y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min)],
[y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min)], 'k-')
plt.ylim(y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min))
plt.xlim(y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min))
plt.xlabel('Actual Y')
plt.ylabel('Calculated Y')
plt.show()
# r2, RMSE, MAE
print('r2: {0}'.format(float(1 - sum((y_train - calculated_ytrain) ** 2) / sum((y_train - y_train.mean()) ** 2))))
print('RMSE: {0}'.format(float((sum((y_train - calculated_ytrain) ** 2) / len(y_train)) ** 0.5)))
print('MAE: {0}'.format(float(sum(abs(y_train - calculated_ytrain)) / len(y_train))))
# estimated_y in cross-validation
estimated_y_in_cv = np.ndarray.flatten(
model_selection.cross_val_predict(regression_model, autoscaled_x_train, autoscaled_y_train, cv=fold_number))
estimated_y_in_cv = estimated_y_in_cv * y_train.std(ddof=1) + y_train.mean()
# yy-plot
plt.figure(figsize=figure.figaspect(1))
plt.scatter(y_train, estimated_y_in_cv)
y_max = np.max(np.array([np.array(y_train), estimated_y_in_cv]))
y_min = np.min(np.array([np.array(y_train), estimated_y_in_cv]))
plt.plot([y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min)],
[y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min)], 'k-')
plt.ylim(y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min))
plt.xlim(y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min))
plt.xlabel('Actual Y')
plt.ylabel('Estimated Y in CV')
plt.show()
# r2cv, RMSEcv, MAEcv
print('r2cv: {0}'.format(float(1 - sum((y_train - estimated_y_in_cv) ** 2) / sum((y_train - y_train.mean()) ** 2))))
print('RMSEcv: {0}'.format(float((sum((y_train - estimated_y_in_cv) ** 2) / len(y_train)) ** 0.5)))
print('MAEcv: {0}'.format(float(sum(abs(y_train - estimated_y_in_cv)) / len(y_train))))
# estimate y for test data
autoscaled_x_test = np.ndarray.flatten(regression_model.predict(autoscaled_x_test))
autoscaled_x_test = autoscaled_x_test * y_train.std(ddof=1) + y_train.mean()
autoscaled_x_test = pd.DataFrame(autoscaled_x_test, index=unsupervised_dataset.index, columns=['estimated y'])
autoscaled_x_test.to_csv('estimated_y.csv')
| 63.572603
| 121
| 0.691777
| 3,315
| 23,204
| 4.456109
| 0.082353
| 0.043461
| 0.04143
| 0.047387
| 0.803412
| 0.776943
| 0.749526
| 0.724614
| 0.714189
| 0.71209
| 0
| 0.018354
| 0.199319
| 23,204
| 364
| 122
| 63.747253
| 0.776737
| 0.056628
| 0
| 0.640127
| 0
| 0
| 0.024784
| 0.002577
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.044586
| 0
| 0.044586
| 0.031847
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
07c8bd084ca8f2e3113088632c40e18e88256820
| 6,999
|
py
|
Python
|
mobile/migrations/0001_initial.py
|
hyperoslo/django-mobile
|
7ce65c51b45c167d096021852ce8f367732eb754
|
[
"MIT"
] | 3
|
2016-10-04T08:03:26.000Z
|
2017-06-30T10:25:35.000Z
|
mobile/migrations/0001_initial.py
|
hyperoslo/django-mobile
|
7ce65c51b45c167d096021852ce8f367732eb754
|
[
"MIT"
] | 1
|
2021-04-30T09:51:29.000Z
|
2021-04-30T09:51:29.000Z
|
mobile/migrations/0001_initial.py
|
hyperoslo/django-mobile
|
7ce65c51b45c167d096021852ce8f367732eb754
|
[
"MIT"
] | 2
|
2016-10-04T08:03:28.000Z
|
2017-09-21T02:12:56.000Z
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'OutgoingSMS'
db.create_table('mobile_outgoingsms', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('recipient', self.gf('django.db.models.fields.CharField')(max_length=255)),
('sender', self.gf('django.db.models.fields.CharField')(default=2210, max_length=255)),
('message', self.gf('django.db.models.fields.TextField')()),
('sent', self.gf('django.db.models.fields.BooleanField')(default=False)),
('price', self.gf('django.db.models.fields.IntegerField')()),
('country', self.gf('django.db.models.fields.CharField')(default='NO', max_length=255)),
('delivery_status', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('sent_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
))
db.send_create_signal('mobile', ['OutgoingSMS'])
# Adding model 'IncomingSMS'
db.create_table('mobile_incomingsms', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('recipient', self.gf('django.db.models.fields.CharField')(max_length=255)),
('sender', self.gf('django.db.models.fields.CharField')(max_length=255)),
('message', self.gf('django.db.models.fields.TextField')()),
('country', self.gf('django.db.models.fields.CharField')(default='NO', max_length=255)),
('keyword', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('parameter', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('received_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('source', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal('mobile', ['IncomingSMS'])
# Adding model 'IncomingMMS'
db.create_table('mobile_incomingmms', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('recipient', self.gf('django.db.models.fields.CharField')(max_length=255)),
('country', self.gf('django.db.models.fields.CharField')(default='NO', max_length=255)),
('sender', self.gf('django.db.models.fields.CharField')(max_length=255)),
('subject', self.gf('django.db.models.fields.CharField')(max_length=255)),
('received_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('source', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal('mobile', ['IncomingMMS'])
# Adding model 'MMSFile'
db.create_table('mobile_mmsfile', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('file', self.gf('django.db.models.fields.files.FileField')(max_length=100)),
('mms', self.gf('django.db.models.fields.related.ForeignKey')(related_name='files', to=orm['mobile.IncomingMMS'])),
('content_type', self.gf('django.db.models.fields.CharField')(max_length=255)),
))
db.send_create_signal('mobile', ['MMSFile'])
def backwards(self, orm):
# Deleting model 'OutgoingSMS'
db.delete_table('mobile_outgoingsms')
# Deleting model 'IncomingSMS'
db.delete_table('mobile_incomingsms')
# Deleting model 'IncomingMMS'
db.delete_table('mobile_incomingmms')
# Deleting model 'MMSFile'
db.delete_table('mobile_mmsfile')
models = {
'mobile.incomingmms': {
'Meta': {'object_name': 'IncomingMMS'},
'country': ('django.db.models.fields.CharField', [], {'default': "'NO'", 'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'received_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'recipient': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'sender': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'source': ('django.db.models.fields.TextField', [], {}),
'subject': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'mobile.incomingsms': {
'Meta': {'object_name': 'IncomingSMS'},
'country': ('django.db.models.fields.CharField', [], {'default': "'NO'", 'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'keyword': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'parameter': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'received_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'recipient': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'sender': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'source': ('django.db.models.fields.TextField', [], {})
},
'mobile.mmsfile': {
'Meta': {'object_name': 'MMSFile'},
'content_type': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mms': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'files'", 'to': "orm['mobile.IncomingMMS']"})
},
'mobile.outgoingsms': {
'Meta': {'object_name': 'OutgoingSMS'},
'country': ('django.db.models.fields.CharField', [], {'default': "'NO'", 'max_length': '255'}),
'delivery_status': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'price': ('django.db.models.fields.IntegerField', [], {}),
'recipient': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'sender': ('django.db.models.fields.CharField', [], {'default': '2210', 'max_length': '255'}),
'sent': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sent_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
}
}
complete_apps = ['mobile']
| 57.842975
| 133
| 0.588656
| 761
| 6,999
| 5.296978
| 0.111695
| 0.117093
| 0.201439
| 0.28777
| 0.774994
| 0.755148
| 0.750682
| 0.700074
| 0.679732
| 0.61027
| 0
| 0.016771
| 0.199171
| 6,999
| 120
| 134
| 58.325
| 0.702409
| 0.033005
| 0
| 0.395833
| 0
| 0
| 0.478396
| 0.297573
| 0
| 0
| 0
| 0
| 0
| 1
| 0.020833
| false
| 0
| 0.041667
| 0
| 0.09375
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
58028b45994c9e5bc8269a5c988cb586f930ea3e
| 3,249
|
py
|
Python
|
tests/async/test_dealer_dealer.py
|
calcite/zmq_tubes
|
ab501639b310f818a85b4fa190f3a70ae28390ff
|
[
"MIT"
] | 1
|
2021-10-07T11:29:02.000Z
|
2021-10-07T11:29:02.000Z
|
tests/async/test_dealer_dealer.py
|
calcite/tubes
|
ab501639b310f818a85b4fa190f3a70ae28390ff
|
[
"MIT"
] | null | null | null |
tests/async/test_dealer_dealer.py
|
calcite/tubes
|
ab501639b310f818a85b4fa190f3a70ae28390ff
|
[
"MIT"
] | null | null | null |
import asyncio
import zmq
from ..helpers import run_test_tasks
from zmq_tubes import Tube, TubeNode, TubeMessage
ADDR = 'ipc:///tmp/dealer_dealer.pipe'
TOPIC = 'req'
def test_dealer_dealer():
data = ['request-DEALER1_REQ-0', 'request-DEALER1_REQ-1',
'request-DEALER2_REQ-0', 'request-DEALER2_REQ-1']
async def request_task(node, topic, name):
asyncio.current_task().set_name(name)
for it in range(0, 2):
node.send(topic, f"request-{name}-{it}")
await asyncio.sleep(3)
async def response_dealer_task(node, topic, name):
async def __process(response: TubeMessage):
assert response.payload in data
data.remove(response.payload)
asyncio.current_task().set_name(name)
node.register_handler(topic, __process)
await node.start()
tube_dealer1 = Tube(
name='DEALER1',
addr=ADDR,
server=True,
tube_type=zmq.DEALER
)
tube_dealer2 = Tube(
name='DEALER2',
addr=ADDR,
tube_type=zmq.DEALER
)
node_dealer1 = TubeNode()
node_dealer1.register_tube(tube_dealer1, f"{TOPIC}/#")
node_dealer1.connect()
node_dealer2 = TubeNode()
node_dealer2.register_tube(tube_dealer2, f"{TOPIC}/#")
node_dealer2.connect()
asyncio.run(
run_test_tasks(
[request_task(node_dealer1, TOPIC, 'DEALER1_REQ'),
request_task(node_dealer2, TOPIC, 'DEALER2_REQ')],
[response_dealer_task(node_dealer1, f'{TOPIC}/#', 'DEALER1_RESP'),
response_dealer_task(node_dealer2, f'{TOPIC}/#', 'DEALER2_RESP')]
)
)
assert len(data) == 0
def test_dealer_dealer_on_same_node():
data = ['request-DEALER1_REQ-0', 'request-DEALER1_REQ-1',
'request-DEALER2_REQ-0', 'request-DEALER2_REQ-1']
async def request_task(node, topic, name):
asyncio.current_task().set_name(name)
for it in range(0, 2):
node.send(topic, f"request-{name}-{it}")
await asyncio.sleep(3)
async def response_dealer_task(node, topic, name, tube):
async def __process(response: TubeMessage):
assert response.payload in data
data.remove(response.payload)
asyncio.current_task().set_name(name)
node.register_handler(topic, __process, tube)
await node.start()
tube_dealer1 = Tube(
name='DEALER1',
addr=ADDR,
server=True,
tube_type=zmq.DEALER
)
tube_dealer2 = Tube(
name='DEALER2',
addr=ADDR,
tube_type=zmq.DEALER
)
node_dealer1 = TubeNode()
node_dealer1.register_tube(tube_dealer1, f"{TOPIC}/#")
node_dealer1.register_tube(tube_dealer2, f"{TOPIC}/#")
node_dealer1.connect()
asyncio.run(
run_test_tasks(
[request_task(node_dealer1, TOPIC, 'DEALER1_REQ'),
request_task(node_dealer1, TOPIC, 'DEALER2_REQ')],
[response_dealer_task(node_dealer1, f'{TOPIC}/#',
'DEALER1_RESP', tube_dealer1),
response_dealer_task(node_dealer1, f'{TOPIC}/#',
'DEALER2_RESP', tube_dealer2)]
)
)
assert len(data) == 0
| 29.536364
| 78
| 0.614343
| 388
| 3,249
| 4.878866
| 0.149485
| 0.075541
| 0.047544
| 0.069731
| 0.824089
| 0.812995
| 0.812995
| 0.759641
| 0.759641
| 0.759641
| 0
| 0.026778
| 0.264389
| 3,249
| 109
| 79
| 29.807339
| 0.765272
| 0
| 0
| 0.636364
| 0
| 0
| 0.132348
| 0.060634
| 0
| 0
| 0
| 0
| 0.045455
| 1
| 0.022727
| false
| 0
| 0.045455
| 0
| 0.068182
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
ed09228aea18bcccf2db2b9eebc43a355daf1da7
| 130
|
py
|
Python
|
__init__.py
|
smwa/round_robin_tournament
|
8a2959e21d77f9fac722e787de57293062aa89dc
|
[
"MIT"
] | 1
|
2022-01-12T20:49:31.000Z
|
2022-01-12T20:49:31.000Z
|
__init__.py
|
smwa/round_robin_tournament
|
8a2959e21d77f9fac722e787de57293062aa89dc
|
[
"MIT"
] | null | null | null |
__init__.py
|
smwa/round_robin_tournament
|
8a2959e21d77f9fac722e787de57293062aa89dc
|
[
"MIT"
] | null | null | null |
"""
Import the classes Tournament, Match, and Participant.
"""
from .round_robin_tournament import Tournament, Match, Participant
| 26
| 66
| 0.792308
| 15
| 130
| 6.733333
| 0.666667
| 0.29703
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.115385
| 130
| 4
| 67
| 32.5
| 0.878261
| 0.415385
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ed17458a8c8eeb8fcee3108042c2b829a5cde2c1
| 1,279
|
py
|
Python
|
dbReports/iondb/bin/fix_experimentChipTypeWithExtraQuotes.py
|
sequencer2014/TS
|
465804570349d46b47c1bdf131bdafea5c582dee
|
[
"Apache-2.0"
] | null | null | null |
dbReports/iondb/bin/fix_experimentChipTypeWithExtraQuotes.py
|
sequencer2014/TS
|
465804570349d46b47c1bdf131bdafea5c582dee
|
[
"Apache-2.0"
] | null | null | null |
dbReports/iondb/bin/fix_experimentChipTypeWithExtraQuotes.py
|
sequencer2014/TS
|
465804570349d46b47c1bdf131bdafea5c582dee
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# Copyright (C) 2013 Ion Torrent Systems, Inc. All Rights Reserved
from djangoinit import *
import sys
import os
from iondb.rundb import models
''' remove extra quotes or backslash in Experiment.chipType '''
chipTypes = models.Experiment.objects.all().values_list('chipType', flat=True).distinct('chipType')
for chipType in filter(lambda x: '\\' in x, chipTypes):
badChars = '\\'
clean_chipType = chipType.replace(badChars, '')
exps = models.Experiment.objects.all().filter(chipType=chipType)
print("FIX-1: Going to fix %d experiments by replacing chipType %s with %s" %
(exps.count(), badChars, clean_chipType))
models.Experiment.objects.filter(chipType=chipType).update(chipType=clean_chipType)
chipTypes = models.Experiment.objects.all().values_list('chipType', flat=True).distinct('chipType')
for chipType in filter(lambda x: '"' in x, chipTypes):
badChars = '"'
clean_chipType = chipType.replace(badChars, '')
exps = models.Experiment.objects.all().filter(chipType=chipType)
print("FIX-2: Going to fix %d experiments by replacing chipType %s with %s" %
(exps.count(), badChars, clean_chipType))
models.Experiment.objects.filter(chipType=chipType).update(chipType=clean_chipType)
| 34.567568
| 99
| 0.721658
| 162
| 1,279
| 5.648148
| 0.364198
| 0.104918
| 0.15082
| 0.113661
| 0.813115
| 0.813115
| 0.813115
| 0.813115
| 0.813115
| 0.813115
| 0
| 0.005495
| 0.146208
| 1,279
| 36
| 100
| 35.527778
| 0.832418
| 0.066458
| 0
| 0.5
| 0
| 0
| 0.152347
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.2
| 0
| 0.2
| 0.1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
ed2cb70329a751075a4ca3db338f273d7ae9e3c4
| 1,161
|
py
|
Python
|
application/migrations/0035_auto_20200831_1609.py
|
City-of-Helsinki/events-helsinki-cms
|
64e4c1ce6cc058fb3783e417560dc244bd753d05
|
[
"MIT"
] | 2
|
2020-04-20T05:37:28.000Z
|
2021-02-19T10:33:45.000Z
|
application/migrations/0035_auto_20200831_1609.py
|
City-of-Helsinki/events-helsinki-cms
|
64e4c1ce6cc058fb3783e417560dc244bd753d05
|
[
"MIT"
] | 6
|
2020-02-12T12:55:37.000Z
|
2021-03-30T12:56:28.000Z
|
application/migrations/0035_auto_20200831_1609.py
|
City-of-Helsinki/events-helsinki-cms
|
64e4c1ce6cc058fb3783e417560dc244bd753d05
|
[
"MIT"
] | 1
|
2021-02-18T12:11:18.000Z
|
2021-02-18T12:11:18.000Z
|
# Generated by Django 2.2.9 on 2020-08-31 16:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('application', '0034_auto_20200806_1101'),
]
operations = [
migrations.AlterField(
model_name='landingpages',
name='hero_background_image_color_en',
field=models.CharField(blank=True, choices=[('FOG', 'Sumu'), ('ENGEL', 'Engel'), ('COPPER', 'Kupari'), ('SUOMENLINNA', 'Suomenlinna')], max_length=255, null=True),
),
migrations.AlterField(
model_name='landingpages',
name='hero_background_image_color_fi',
field=models.CharField(blank=True, choices=[('FOG', 'Sumu'), ('ENGEL', 'Engel'), ('COPPER', 'Kupari'), ('SUOMENLINNA', 'Suomenlinna')], max_length=255, null=True),
),
migrations.AlterField(
model_name='landingpages',
name='hero_background_image_color_sv',
field=models.CharField(blank=True, choices=[('FOG', 'Sumu'), ('ENGEL', 'Engel'), ('COPPER', 'Kupari'), ('SUOMENLINNA', 'Suomenlinna')], max_length=255, null=True),
),
]
| 40.034483
| 175
| 0.61068
| 119
| 1,161
| 5.781513
| 0.428571
| 0.087209
| 0.109012
| 0.126453
| 0.767442
| 0.767442
| 0.767442
| 0.767442
| 0.767442
| 0.767442
| 0
| 0.04415
| 0.219638
| 1,161
| 28
| 176
| 41.464286
| 0.715232
| 0.03876
| 0
| 0.545455
| 1
| 0
| 0.280969
| 0.101436
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.045455
| 0
| 0.181818
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.