hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a6e8643c4fa79c44da3d83cb0958ed57d6809154
| 3,591
|
py
|
Python
|
slurm-rest-api/tests/test_parse_slurm_node_list.py
|
nsfcac/slurm-rest-api
|
8e282d69d8237f287ffb6e5e531065f01b873db8
|
[
"Apache-2.0"
] | 1
|
2021-06-25T15:32:28.000Z
|
2021-06-25T15:32:28.000Z
|
slurm-rest-api/tests/test_parse_slurm_node_list.py
|
nsfcac/slurm-rest-api
|
8e282d69d8237f287ffb6e5e531065f01b873db8
|
[
"Apache-2.0"
] | 1
|
2018-08-02T16:54:54.000Z
|
2018-08-02T16:54:54.000Z
|
slurm-rest-api/tests/test_parse_slurm_node_list.py
|
phac-nml/slurm-rest-api
|
6906dd29ee9ee014d14d762da7b294a68d4cf750
|
[
"Apache-2.0"
] | 2
|
2018-12-05T09:11:27.000Z
|
2020-01-22T06:11:50.000Z
|
import unittest
import re
from slurmapi.parse_slurm_node_list import parse_all_lists, parse_list
class TestParsing(unittest.TestCase):
def setUp(self):
pass
def test_typical(self):
s = 'slurm-0-[0-2],slurm-1-[0-6],slurm-1-[28-31],slurm-5-[1-3]'
result = "['slurm-0-0', 'slurm-0-1', 'slurm-0-2', 'slurm-1-0', 'slurm-1-1', 'slurm-1-2', 'slurm-1-3', 'slurm-1-4', 'slurm-1-5', 'slurm-1-6', 'slurm-1-28', 'slurm-1-29', 'slurm-1-30', 'slurm-1-31', 'slurm-5-1', 'slurm-5-2', 'slurm-5-3']"
self.assertEqual(result,str(parse_list(s)))
def test_multi_range(self):
s = 'slurm-0-[0-2],slurm-1-[0-6,28-31],slurm-5-[1-3]'
result = "['slurm-0-0', 'slurm-0-1', 'slurm-0-2', 'slurm-1-0', 'slurm-1-1', 'slurm-1-2', 'slurm-1-3', 'slurm-1-4', 'slurm-1-5', 'slurm-1-6', 'slurm-1-28', 'slurm-1-29', 'slurm-1-30', 'slurm-1-31', 'slurm-5-1', 'slurm-5-2', 'slurm-5-3']"
self.assertEqual(result,str(parse_list(s)))
def test_single_3(self):
s = 'slurm-0-[0-2]'
result = "['slurm-0-0', 'slurm-0-1', 'slurm-0-2']"
self.assertEqual(result,str(parse_list(s)))
def test_single_no_range(self):
s = 'slurm-0-0'
result = "['slurm-0-0']"
self.assertEqual(result,str(parse_list(s)))
def test_double_range(self):
s = 'slurm-0-[0-2],slurm-1-[0-4]'
result = "['slurm-0-0', 'slurm-0-1', 'slurm-0-2', 'slurm-1-0', 'slurm-1-1', 'slurm-1-2', 'slurm-1-3', 'slurm-1-4']"
self.assertEqual(result,str(parse_list(s)))
def test_single_in_range(self):
s = 'slurm-0-[0-2],slurm-1-[0]'
result = "['slurm-0-0', 'slurm-0-1', 'slurm-0-2', 'slurm-1-0']"
self.assertEqual(result,str(parse_list(s)))
def test_double_first_no_range(self):
s = 'slurm-0-0,slurm-1-[0-4]'
result = "['slurm-0-0', 'slurm-1-0', 'slurm-1-1', 'slurm-1-2', 'slurm-1-3', 'slurm-1-4']"
self.assertEqual(result,str(parse_list(s)))
def test_double_last_no_range(self):
s = 'slurm-0-[0-2],slurm-1-0'
result = "['slurm-0-0', 'slurm-0-1', 'slurm-0-2', 'slurm-1-0']"
self.assertEqual(result,str(parse_list(s)))
def test_multi_range_first_no_range(self):
s = 'slurm-0-[0-2,4-5,7],slurm-1-0'
result = "['slurm-0-0', 'slurm-0-1', 'slurm-0-2', 'slurm-0-4', 'slurm-0-5', 'slurm-0-7', 'slurm-1-0']"
self.assertEqual(result,str(parse_list(s)))
class TestAllListsParsing(unittest.TestCase):
def setUp(self):
pass
def test_typical(self):
arr = []
arr.append("slurm-0-0")
arr.append("slurm-0-1")
result = "['slurm-0-0', 'slurm-0-1']"
self.assertEqual(result,str(parse_all_lists(arr)))
def test_duplicate(self):
arr = []
arr.append("slurm-0-0,slurm-0-1")
arr.append("slurm-0-1")
result = "['slurm-0-0', 'slurm-0-1']"
self.assertEqual(result,str(parse_all_lists(arr)))
def test_duplicate2(self):
arr = []
arr.append("slurm-0-0")
arr.append("slurm-0-0,slurm-0-1")
result = "['slurm-0-0', 'slurm-0-1']"
self.assertEqual(result,str(parse_all_lists(arr)))
def test_single_input(self):
arr = []
arr.append("slurm-0-0,slurm-0-1")
result = "['slurm-0-0', 'slurm-0-1']"
self.assertEqual(result,str(parse_all_lists(arr)))
def test_only_one(self):
arr = []
arr.append("slurm-0-0")
result = "['slurm-0-0']"
self.assertEqual(result,str(parse_all_lists(arr)))
if __name__ == "__main__":
unittest.main()
| 36.642857
| 244
| 0.570036
| 596
| 3,591
| 3.323826
| 0.092282
| 0.166583
| 0.102474
| 0.096921
| 0.893488
| 0.893488
| 0.886926
| 0.872792
| 0.841494
| 0.830389
| 0
| 0.085515
| 0.202172
| 3,591
| 97
| 245
| 37.020619
| 0.605934
| 0
| 0
| 0.573333
| 0
| 0.133333
| 0.378446
| 0.064327
| 0
| 0
| 0
| 0
| 0.186667
| 1
| 0.213333
| false
| 0.026667
| 0.04
| 0
| 0.28
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5b39e463d016ff2fd72b9925f4f0f6e4da82226e
| 31,340
|
py
|
Python
|
cloudmersive_validate_api_client/api/ip_address_api.py
|
Cloudmersive/Cloudmersive.APIClient.Python.Validate
|
894a3f578c3860db41b3eed179dcc52e02f565a0
|
[
"Apache-2.0"
] | 3
|
2018-06-23T21:37:21.000Z
|
2020-04-20T23:07:36.000Z
|
cloudmersive_validate_api_client/api/ip_address_api.py
|
Cloudmersive/Cloudmersive.APIClient.Python.Validate
|
894a3f578c3860db41b3eed179dcc52e02f565a0
|
[
"Apache-2.0"
] | 1
|
2019-02-04T17:03:35.000Z
|
2019-03-02T20:16:52.000Z
|
cloudmersive_validate_api_client/api/ip_address_api.py
|
Cloudmersive/Cloudmersive.APIClient.Python.Validate
|
894a3f578c3860db41b3eed179dcc52e02f565a0
|
[
"Apache-2.0"
] | 2
|
2019-03-21T15:54:15.000Z
|
2020-05-27T17:30:43.000Z
|
# coding: utf-8
"""
validateapi
The validation APIs help you validate data. Check if an E-mail address is real. Check if a domain is real. Check up on an IP address, and even where it is located. All this and much more is available in the validation API. # noqa: E501
OpenAPI spec version: v1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from cloudmersive_validate_api_client.api_client import ApiClient
class IPAddressApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def i_p_address_geolocate_street_address(self, value, **kwargs): # noqa: E501
"""Geolocate an IP address to a street address # noqa: E501
Identify an IP address's street address. Useful for security and UX applications. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.i_p_address_geolocate_street_address(value, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str value: IP address to geolocate, e.g. \"55.55.55.55\". The input is a string so be sure to enclose it in double-quotes. (required)
:return: GeolocateStreetAddressResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.i_p_address_geolocate_street_address_with_http_info(value, **kwargs) # noqa: E501
else:
(data) = self.i_p_address_geolocate_street_address_with_http_info(value, **kwargs) # noqa: E501
return data
def i_p_address_geolocate_street_address_with_http_info(self, value, **kwargs): # noqa: E501
"""Geolocate an IP address to a street address # noqa: E501
Identify an IP address's street address. Useful for security and UX applications. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.i_p_address_geolocate_street_address_with_http_info(value, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str value: IP address to geolocate, e.g. \"55.55.55.55\". The input is a string so be sure to enclose it in double-quotes. (required)
:return: GeolocateStreetAddressResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['value'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method i_p_address_geolocate_street_address" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'value' is set
if ('value' not in params or
params['value'] is None):
raise ValueError("Missing the required parameter `value` when calling `i_p_address_geolocate_street_address`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'value' in params:
body_params = params['value']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'text/json', 'application/xml', 'text/xml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'text/json']) # noqa: E501
# Authentication setting
auth_settings = ['Apikey'] # noqa: E501
return self.api_client.call_api(
'/validate/ip/geolocate/street-address', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='GeolocateStreetAddressResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def i_p_address_ip_intelligence(self, value, **kwargs): # noqa: E501
"""Get intelligence on an IP address # noqa: E501
Identify key intelligence about an IP address, including if it is a known threat IP, known bot, Tor exit node, as well as the location of the IP address. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.i_p_address_ip_intelligence(value, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str value: IP address to process, e.g. \"55.55.55.55\". The input is a string so be sure to enclose it in double-quotes. (required)
:return: IPIntelligenceResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.i_p_address_ip_intelligence_with_http_info(value, **kwargs) # noqa: E501
else:
(data) = self.i_p_address_ip_intelligence_with_http_info(value, **kwargs) # noqa: E501
return data
def i_p_address_ip_intelligence_with_http_info(self, value, **kwargs): # noqa: E501
"""Get intelligence on an IP address # noqa: E501
Identify key intelligence about an IP address, including if it is a known threat IP, known bot, Tor exit node, as well as the location of the IP address. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.i_p_address_ip_intelligence_with_http_info(value, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str value: IP address to process, e.g. \"55.55.55.55\". The input is a string so be sure to enclose it in double-quotes. (required)
:return: IPIntelligenceResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['value'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method i_p_address_ip_intelligence" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'value' is set
if ('value' not in params or
params['value'] is None):
raise ValueError("Missing the required parameter `value` when calling `i_p_address_ip_intelligence`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'value' in params:
body_params = params['value']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'text/json', 'application/xml', 'text/xml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'text/json']) # noqa: E501
# Authentication setting
auth_settings = ['Apikey'] # noqa: E501
return self.api_client.call_api(
'/validate/ip/intelligence', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IPIntelligenceResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def i_p_address_is_bot(self, value, **kwargs): # noqa: E501
"""Check if IP address is a Bot client # noqa: E501
Check if the input IP address is a Bot, robot, or otherwise a non-user entity. Leverages real-time signals to check against known high-probability bots.. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.i_p_address_is_bot(value, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str value: IP address to check, e.g. \"55.55.55.55\". The input is a string so be sure to enclose it in double-quotes. (required)
:return: BotCheckResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.i_p_address_is_bot_with_http_info(value, **kwargs) # noqa: E501
else:
(data) = self.i_p_address_is_bot_with_http_info(value, **kwargs) # noqa: E501
return data
def i_p_address_is_bot_with_http_info(self, value, **kwargs): # noqa: E501
"""Check if IP address is a Bot client # noqa: E501
Check if the input IP address is a Bot, robot, or otherwise a non-user entity. Leverages real-time signals to check against known high-probability bots.. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.i_p_address_is_bot_with_http_info(value, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str value: IP address to check, e.g. \"55.55.55.55\". The input is a string so be sure to enclose it in double-quotes. (required)
:return: BotCheckResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['value'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method i_p_address_is_bot" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'value' is set
if ('value' not in params or
params['value'] is None):
raise ValueError("Missing the required parameter `value` when calling `i_p_address_is_bot`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'value' in params:
body_params = params['value']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'text/json', 'application/xml', 'text/xml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'text/json']) # noqa: E501
# Authentication setting
auth_settings = ['Apikey'] # noqa: E501
return self.api_client.call_api(
'/validate/ip/is-bot', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='BotCheckResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def i_p_address_is_threat(self, value, **kwargs): # noqa: E501
"""Check if IP address is a known threat # noqa: E501
Check if the input IP address is a known threat IP address. Checks against known bad IPs, botnets, compromised servers, and other lists of threats. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.i_p_address_is_threat(value, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str value: IP address to check, e.g. \"55.55.55.55\". The input is a string so be sure to enclose it in double-quotes. (required)
:return: IPThreatResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.i_p_address_is_threat_with_http_info(value, **kwargs) # noqa: E501
else:
(data) = self.i_p_address_is_threat_with_http_info(value, **kwargs) # noqa: E501
return data
def i_p_address_is_threat_with_http_info(self, value, **kwargs): # noqa: E501
"""Check if IP address is a known threat # noqa: E501
Check if the input IP address is a known threat IP address. Checks against known bad IPs, botnets, compromised servers, and other lists of threats. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.i_p_address_is_threat_with_http_info(value, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str value: IP address to check, e.g. \"55.55.55.55\". The input is a string so be sure to enclose it in double-quotes. (required)
:return: IPThreatResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['value'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method i_p_address_is_threat" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'value' is set
if ('value' not in params or
params['value'] is None):
raise ValueError("Missing the required parameter `value` when calling `i_p_address_is_threat`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'value' in params:
body_params = params['value']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'text/json', 'application/xml', 'text/xml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'text/json']) # noqa: E501
# Authentication setting
auth_settings = ['Apikey'] # noqa: E501
return self.api_client.call_api(
'/validate/ip/is-threat', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IPThreatResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def i_p_address_is_tor_node(self, value, **kwargs): # noqa: E501
"""Check if IP address is a Tor node server # noqa: E501
Check if the input IP address is a Tor exit node server. Tor servers are a type of privacy-preserving technology that can hide the original IP address who makes a request. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.i_p_address_is_tor_node(value, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str value: IP address to check, e.g. \"55.55.55.55\". The input is a string so be sure to enclose it in double-quotes. (required)
:return: TorNodeResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.i_p_address_is_tor_node_with_http_info(value, **kwargs) # noqa: E501
else:
(data) = self.i_p_address_is_tor_node_with_http_info(value, **kwargs) # noqa: E501
return data
def i_p_address_is_tor_node_with_http_info(self, value, **kwargs): # noqa: E501
"""Check if IP address is a Tor node server # noqa: E501
Check if the input IP address is a Tor exit node server. Tor servers are a type of privacy-preserving technology that can hide the original IP address who makes a request. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.i_p_address_is_tor_node_with_http_info(value, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str value: IP address to check, e.g. \"55.55.55.55\". The input is a string so be sure to enclose it in double-quotes. (required)
:return: TorNodeResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['value'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method i_p_address_is_tor_node" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'value' is set
if ('value' not in params or
params['value'] is None):
raise ValueError("Missing the required parameter `value` when calling `i_p_address_is_tor_node`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'value' in params:
body_params = params['value']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'text/json', 'application/xml', 'text/xml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'text/json']) # noqa: E501
# Authentication setting
auth_settings = ['Apikey'] # noqa: E501
return self.api_client.call_api(
'/validate/ip/is-tor-node', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TorNodeResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def i_p_address_post(self, value, **kwargs): # noqa: E501
"""Geolocate an IP address # noqa: E501
Identify an IP address Country, State/Provence, City, Zip/Postal Code, etc. Useful for security and UX applications. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.i_p_address_post(value, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str value: IP address to geolocate, e.g. \"55.55.55.55\". The input is a string so be sure to enclose it in double-quotes. (required)
:return: GeolocateResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.i_p_address_post_with_http_info(value, **kwargs) # noqa: E501
else:
(data) = self.i_p_address_post_with_http_info(value, **kwargs) # noqa: E501
return data
def i_p_address_post_with_http_info(self, value, **kwargs): # noqa: E501
"""Geolocate an IP address # noqa: E501
Identify an IP address Country, State/Provence, City, Zip/Postal Code, etc. Useful for security and UX applications. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.i_p_address_post_with_http_info(value, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str value: IP address to geolocate, e.g. \"55.55.55.55\". The input is a string so be sure to enclose it in double-quotes. (required)
:return: GeolocateResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['value'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method i_p_address_post" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'value' is set
if ('value' not in params or
params['value'] is None):
raise ValueError("Missing the required parameter `value` when calling `i_p_address_post`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'value' in params:
body_params = params['value']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'text/json', 'application/xml', 'text/xml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'text/json']) # noqa: E501
# Authentication setting
auth_settings = ['Apikey'] # noqa: E501
return self.api_client.call_api(
'/validate/ip/geolocate', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='GeolocateResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def i_p_address_reverse_domain_lookup(self, value, **kwargs): # noqa: E501
"""Perform a reverse domain name (DNS) lookup on an IP address # noqa: E501
Gets the domain name, if any, associated with the IP address. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.i_p_address_reverse_domain_lookup(value, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str value: IP address to check, e.g. \"55.55.55.55\". The input is a string so be sure to enclose it in double-quotes. (required)
:return: IPReverseDNSLookupResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.i_p_address_reverse_domain_lookup_with_http_info(value, **kwargs) # noqa: E501
else:
(data) = self.i_p_address_reverse_domain_lookup_with_http_info(value, **kwargs) # noqa: E501
return data
def i_p_address_reverse_domain_lookup_with_http_info(self, value, **kwargs): # noqa: E501
"""Perform a reverse domain name (DNS) lookup on an IP address # noqa: E501
Gets the domain name, if any, associated with the IP address. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.i_p_address_reverse_domain_lookup_with_http_info(value, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str value: IP address to check, e.g. \"55.55.55.55\". The input is a string so be sure to enclose it in double-quotes. (required)
:return: IPReverseDNSLookupResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['value'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method i_p_address_reverse_domain_lookup" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'value' is set
if ('value' not in params or
params['value'] is None):
raise ValueError("Missing the required parameter `value` when calling `i_p_address_reverse_domain_lookup`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'value' in params:
body_params = params['value']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'text/json', 'application/xml', 'text/xml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'text/json']) # noqa: E501
# Authentication setting
auth_settings = ['Apikey'] # noqa: E501
return self.api_client.call_api(
'/validate/ip/reverse-domain-lookup', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IPReverseDNSLookupResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 43.049451
| 240
| 0.623006
| 3,920
| 31,340
| 4.754337
| 0.063776
| 0.045501
| 0.027043
| 0.028545
| 0.957772
| 0.955089
| 0.948704
| 0.941192
| 0.939314
| 0.936095
| 0
| 0.01955
| 0.286758
| 31,340
| 727
| 241
| 43.108666
| 0.814208
| 0.381302
| 0
| 0.813953
| 0
| 0
| 0.193707
| 0.055552
| 0
| 0
| 0
| 0
| 0
| 1
| 0.03876
| false
| 0
| 0.010336
| 0
| 0.105943
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5bdce11f6d7b2c81044d83a04ad54368f8f929a2
| 380
|
py
|
Python
|
chap5_the_matrix/mat_sparsity.py
|
lastone9182/CodingtheMatrix
|
b0d67e26940e59e51a7e2760b734c7de50490e1a
|
[
"MIT"
] | 3
|
2018-01-11T07:48:06.000Z
|
2020-04-27T20:49:02.000Z
|
chap5_the_matrix/mat_sparsity.py
|
lastone9182/CodingtheMatrix
|
b0d67e26940e59e51a7e2760b734c7de50490e1a
|
[
"MIT"
] | null | null | null |
chap5_the_matrix/mat_sparsity.py
|
lastone9182/CodingtheMatrix
|
b0d67e26940e59e51a7e2760b734c7de50490e1a
|
[
"MIT"
] | 1
|
2021-01-26T07:25:48.000Z
|
2021-01-26T07:25:48.000Z
|
'''
>>> from mat import Mat
>>> from vec import Vec
>>> (Mat((set(range(10000)),set(range(100000))),{(0,0):1})*Vec(set(range(100000)),{0:2}))[0]
2
>>> (Vec(set(range(100000)),{0:2})*Mat((set(range(10000)),set(range(100000))),{(0,0):1}).transpose())[0]
2
>>> (Mat((set(range(10000)),set(range(100000))),{(0,0):1})*Mat((set(range(100000)), set(range(9999))), {(0,0):2}))[0,0]
2
'''
| 34.545455
| 119
| 0.571053
| 68
| 380
| 3.191176
| 0.191176
| 0.368664
| 0.387097
| 0.345622
| 0.640553
| 0.640553
| 0.474654
| 0.474654
| 0.474654
| 0.474654
| 0
| 0.218487
| 0.060526
| 380
| 10
| 120
| 38
| 0.389356
| 0.976316
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
750d3c7aad13fae1d8402066c297133276f505fe
| 4,058
|
py
|
Python
|
tests/test_sonarr.py
|
TJohnson93/Comandarr
|
7db6475da14bfb62270ea35b2771306e856138be
|
[
"Apache-2.0"
] | 89
|
2018-01-01T23:36:17.000Z
|
2022-03-18T09:49:58.000Z
|
tests/test_sonarr.py
|
fieldju/Commandarr
|
01587cc2643436f232829273b9fc89e33ff45136
|
[
"Apache-2.0"
] | 1
|
2018-11-28T01:58:30.000Z
|
2020-12-02T17:17:30.000Z
|
tests/test_sonarr.py
|
fieldju/Commandarr
|
01587cc2643436f232829273b9fc89e33ff45136
|
[
"Apache-2.0"
] | 13
|
2018-01-01T23:40:20.000Z
|
2021-05-24T05:18:00.000Z
|
# #############################################################################
# File Name: test_sonarr.py
# Author: Todd Johnson
# Creation Date: 08/04/2017
#
# Description: [To be completed]
#
# Offical website: https://www.comandarr.github.io
# Github Source: https://www.github.com/comandarr/comandarr/
# Readme Source: https://www.github.com/comandarr/comandarr/README.md
#
# #############################################################################
# import pytest
# import yaml
# from comandarr import sonarr
#
# from definitions import CONFIG_PATH
# config = yaml.safe_load(open(CONFIG_PATH))
#
#
# def test_lookupSeriesByName():
# assert sonarr.lookupSeriesByName('Futurama') == [{u'certification': u'TV-14', u'overview': u'A late 20th-century New York City pizza delivery boy, Philip J. Fry, after being unwittingly cryogenically frozen for one thousand years, finds employment at Planet Express, an interplanetary delivery company in the 31st century.', u'airTime': u'22:00', u'firstAired': u'1999-03-27T14:00:00Z', u'tvRageId': 3628, u'year': 1999, u'images': [{u'coverType': u'fanart', u'url': u'http://thetvdb.com/banners/fanart/original/73871-21.jpg'}, {u'coverType': u'banner', u'url': u'http://thetvdb.com/banners/graphical/73871-g15.jpg'}, {u'coverType': u'poster', u'url': u'http://thetvdb.com/banners/posters/73871-2.jpg'}], u'ratings': {u'votes': 613, u'value': 8.8}, u'genres': [u'Animation', u'Comedy', u'Science-Fiction'], u'monitored': False, u'network': u'Comedy Central (US)', u'title': u'Futurama', u'remotePoster': u'http://thetvdb.com/banners/posters/73871-2.jpg', u'seasonCount': 7, u'seriesType': u'standard', u'status': u'ended', u'added': u'0001-01-01T00:00:00Z', u'tvdbId': 73871, u'tags': [], u'imdbId': u'tt0149460', u'seasonFolder': False, u'cleanTitle': u'futurama', u'sortTitle': u'futurama', u'seasons': [{u'monitored': False, u'seasonNumber': 0}, {u'monitored': False, u'seasonNumber': 1}, {u'monitored': False, u'seasonNumber': 2}, {u'monitored': False, u'seasonNumber': 3}, {u'monitored': False, u'seasonNumber': 4}, {u'monitored': False, u'seasonNumber': 5}, {u'monitored': False, u'seasonNumber': 6}, {u'monitored': False, u'seasonNumber': 7}], u'useSceneNumbering': False, u'titleSlug': u'futurama', u'qualityProfileId': 0, u'profileId': 0, u'runtime': 20, u'tvMazeId': 538}]
#
#
# def test_lookupSeriesByTvdbId():
# assert sonarr.lookupSeriesByTvdbId(73871) == [{u'certification': u'TV-14', u'overview': u'A late 20th-century New York City pizza delivery boy, Philip J. Fry, after being unwittingly cryogenically frozen for one thousand years, finds employment at Planet Express, an interplanetary delivery company in the 31st century.', u'airTime': u'22:00', u'firstAired': u'1999-03-27T14:00:00Z', u'tvRageId': 3628, u'year': 1999, u'images': [{u'coverType': u'fanart', u'url': u'http://thetvdb.com/banners/fanart/original/73871-21.jpg'}, {u'coverType': u'banner', u'url': u'http://thetvdb.com/banners/graphical/73871-g15.jpg'}, {u'coverType': u'poster', u'url': u'http://thetvdb.com/banners/posters/73871-2.jpg'}], u'ratings': {u'votes': 613, u'value': 8.8}, u'genres': [u'Animation', u'Comedy', u'Science-Fiction'], u'monitored': False, u'network': u'Comedy Central (US)', u'title': u'Futurama', u'remotePoster': u'http://thetvdb.com/banners/posters/73871-2.jpg', u'seasonCount': 7, u'seriesType': u'standard', u'status': u'ended', u'added': u'0001-01-01T00:00:00Z', u'tvdbId': 73871, u'tags': [], u'imdbId': u'tt0149460', u'seasonFolder': False, u'cleanTitle': u'futurama', u'sortTitle': u'futurama', u'seasons': [{u'monitored': False, u'seasonNumber': 0}, {u'monitored': False, u'seasonNumber': 1}, {u'monitored': False, u'seasonNumber': 2}, {u'monitored': False, u'seasonNumber': 3}, {u'monitored': False, u'seasonNumber': 4}, {u'monitored': False, u'seasonNumber': 5}, {u'monitored': False, u'seasonNumber': 6}, {u'monitored': False, u'seasonNumber': 7}], u'useSceneNumbering': False, u'titleSlug': u'futurama', u'qualityProfileId': 0, u'profileId': 0, u'runtime': 20, u'tvMazeId': 538}]
| 144.928571
| 1,688
| 0.675949
| 596
| 4,058
| 4.592282
| 0.266779
| 0.048228
| 0.098648
| 0.105225
| 0.866642
| 0.866642
| 0.866642
| 0.866642
| 0.832298
| 0.832298
| 0
| 0.060434
| 0.090685
| 4,058
| 27
| 1,689
| 150.296296
| 0.681301
| 0.948497
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
f3552a531087632dd6f6518986394865acd16fc6
| 1,807
|
py
|
Python
|
python/tests/test_breadth_first_graph.py
|
Yonatan1P/data-structures-and-algorithms
|
ddd647d52a3182ca01032bfdb72f94ea22a0e76b
|
[
"MIT"
] | 1
|
2020-12-16T22:38:12.000Z
|
2020-12-16T22:38:12.000Z
|
python/tests/test_breadth_first_graph.py
|
Yonatan1P/data-structures-and-algorithms
|
ddd647d52a3182ca01032bfdb72f94ea22a0e76b
|
[
"MIT"
] | 1
|
2020-11-14T05:37:48.000Z
|
2020-11-14T05:37:48.000Z
|
python/tests/test_breadth_first_graph.py
|
Yonatan1P/data-structures-and-algorithms
|
ddd647d52a3182ca01032bfdb72f94ea22a0e76b
|
[
"MIT"
] | null | null | null |
from challenges.breadth_first_graph.breadth_first_graph import Graph, Node
def test_breadth_first_graph_happy():
graph = Graph()
vertex1 = graph.add_vertex("Hi")
vertex2 = graph.add_vertex("my")
vertex3 = graph.add_vertex("name")
vertex4 = graph.add_vertex("is")
vertex5 = graph.add_vertex("Yoni")
vertex6 = graph.add_vertex("!")
graph.add_edge(vertex1,vertex2)
graph.add_edge(vertex1,vertex3)
graph.add_edge(vertex2,vertex3)
graph.add_edge(vertex3,vertex4)
graph.add_edge(vertex4,vertex3)
graph.add_edge(vertex4,vertex5)
graph.add_edge(vertex5,vertex6)
actual = graph.breadth_first_traversal(vertex1)
expected = "HimynameisYoni!"
assert actual == expected
def test_breadth_first_graph_with_loop():
graph = Graph()
vertex1 = graph.add_vertex("Hi")
vertex2 = graph.add_vertex("my")
vertex3 = graph.add_vertex("name")
vertex4 = graph.add_vertex("is")
vertex5 = graph.add_vertex("Yoni")
vertex6 = graph.add_vertex("!")
graph.add_edge(vertex1,vertex2)
graph.add_edge(vertex1,vertex3)
graph.add_edge(vertex2,vertex3)
graph.add_edge(vertex3,vertex4)
graph.add_edge(vertex4,vertex3)
graph.add_edge(vertex4,vertex5)
graph.add_edge(vertex5,vertex6)
graph.add_edge(vertex6,vertex1)
actual = graph.breadth_first_traversal(vertex1)
expected = "HimynameisYoni!"
assert actual == expected
def test_breadth_first_graph_no_edges():
graph = Graph()
vertex1 = graph.add_vertex("Hi")
vertex2 = graph.add_vertex("my")
vertex3 = graph.add_vertex("name")
vertex4 = graph.add_vertex("is")
vertex5 = graph.add_vertex("Yoni")
vertex6 = graph.add_vertex("!")
actual = graph.breadth_first_traversal(vertex1)
expected = "Hi"
assert actual == expected
| 33.462963
| 74
| 0.710017
| 232
| 1,807
| 5.284483
| 0.142241
| 0.215334
| 0.205546
| 0.092985
| 0.893148
| 0.873573
| 0.873573
| 0.835237
| 0.835237
| 0.835237
| 0
| 0.03391
| 0.167681
| 1,807
| 53
| 75
| 34.09434
| 0.78125
| 0
| 0
| 0.877551
| 0
| 0
| 0.042659
| 0
| 0
| 0
| 0
| 0
| 0.061224
| 1
| 0.061224
| false
| 0
| 0.020408
| 0
| 0.081633
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
340368e5f813e737c1552a7ba5c8d881c7234654
| 11,919
|
py
|
Python
|
tests/integration/test_properties.py
|
dciborow/pytest-nunit
|
1d76c08b807332f12f93d156f297d856aff71157
|
[
"MIT"
] | 6
|
2019-07-27T00:52:04.000Z
|
2021-02-05T13:03:15.000Z
|
tests/integration/test_properties.py
|
dciborow/pytest-nunit
|
1d76c08b807332f12f93d156f297d856aff71157
|
[
"MIT"
] | 37
|
2019-07-15T03:43:55.000Z
|
2022-03-21T16:18:34.000Z
|
tests/integration/test_properties.py
|
dciborow/pytest-nunit
|
1d76c08b807332f12f93d156f297d856aff71157
|
[
"MIT"
] | 14
|
2019-07-16T21:38:13.000Z
|
2022-03-22T09:59:59.000Z
|
"""
Test adding properties to tests
"""
import xmlschema
import os
def test_basic_property(testdir, tmpdir):
"""
Test a basic test with an additional property
"""
testdir.makepyfile(
"""
def test_basic(record_nunit_property):
record_nunit_property("test", "value")
assert 1 == 1
"""
)
outfile = tmpdir.join("out.xml")
outfile_pth = str(outfile)
result = testdir.runpytest("-v", "--nunit-xml=" + outfile_pth)
result.stdout.fnmatch_lines(["*test_basic PASSED*"])
assert result.ret == 0
os.path.exists(outfile_pth)
xs = xmlschema.XMLSchema(
os.path.join(
os.path.abspath(os.path.dirname(__file__)),
"../../ext/nunit-src/TestResult.xsd",
),
validation="lax",
)
out = xs.to_dict(outfile_pth)
assert out["@total"] == 1, out
assert out["@passed"] == 1, out
assert out["@failed"] == 0, out
assert out["@skipped"] == 0, out
assert out["test-suite"]["@total"] == 1
assert out["test-suite"]["@passed"] == 1
assert out["test-suite"]["@failed"] == 0
assert out["test-suite"]["@skipped"] == 0
assert "test" in [
i["@name"] for i in out["test-suite"]["test-case"]["properties"]["property"]
]
assert "value" in [
i["@value"] for i in out["test-suite"]["test-case"]["properties"]["property"]
]
assert "python-version" in [
i["@name"] for i in out["test-suite"]["test-case"]["properties"]["property"]
]
assert "fspath" in [
i["@name"] for i in out["test-suite"]["test-case"]["properties"]["property"]
]
assert "test_basic_property.py" in [
i["@value"] for i in out["test-suite"]["test-case"]["properties"]["property"]
]
assert out["test-suite"]["test-case"]["@classname"] == "test_basic_property.py"
assert out["test-suite"]["test-case"]["@methodname"] == "test_basic"
def test_attachment(testdir, tmpdir):
"""
Test a basic test with an additional property
"""
testdir.makepyfile(
"""
def test_basic(add_nunit_attachment):
add_nunit_attachment("file.pth", "desc")
assert 1 == 1
"""
)
outfile = tmpdir.join("out.xml")
outfile_pth = str(outfile)
result = testdir.runpytest("-v", "--nunit-xml=" + outfile_pth)
result.stdout.fnmatch_lines(["*test_basic PASSED*"])
assert result.ret == 0
os.path.exists(outfile_pth)
xs = xmlschema.XMLSchema(
os.path.join(
os.path.abspath(os.path.dirname(__file__)),
"../../ext/nunit-src/TestResult.xsd",
),
validation="lax",
)
out = xs.to_dict(outfile_pth)
assert out["@total"] == 1, out
assert out["@passed"] == 1, out
assert out["@failed"] == 0, out
assert out["@skipped"] == 0, out
assert out["test-suite"]["@total"] == 1
assert out["test-suite"]["@passed"] == 1
assert out["test-suite"]["@failed"] == 0
assert out["test-suite"]["@skipped"] == 0
assert (
out["test-suite"]["test-case"]["attachments"]["attachment"][0]["description"]
== "desc"
)
assert (
out["test-suite"]["test-case"]["attachments"]["attachment"][0]["filePath"]
== "file.pth"
)
def test_attachment_attach_on_any(testdir, tmpdir):
"""
Test that nunit_attach_on=any sets attachment properties
"""
testdir.makepyfile(
"""
def test_pass(add_nunit_attachment):
add_nunit_attachment("pass.pth", "desc")
assert 1 == 1
def test_fail(add_nunit_attachment):
add_nunit_attachment("fail.pth", "desc")
assert 1 == 0
"""
)
testdir.makefile(".ini", pytest="[pytest]\nnunit_attach_on=any\n")
outfile = tmpdir.join("out.xml")
outfile_pth = str(outfile)
result = testdir.runpytest("-v", "--nunit-xml=" + outfile_pth)
result.stdout.fnmatch_lines(["*test_pass PASSED*", "*test_fail FAILED*"])
assert result.ret == 1
os.path.exists(outfile_pth)
xs = xmlschema.XMLSchema(
os.path.join(
os.path.abspath(os.path.dirname(__file__)),
"../../ext/nunit-src/TestResult.xsd",
),
validation="lax",
)
out = xs.to_dict(outfile_pth)
assert out["@total"] == 2, out
assert out["@passed"] == 1, out
assert out["@failed"] == 1, out
assert out["@skipped"] == 0, out
assert out["test-suite"]["@total"] == 2
assert out["test-suite"]["@passed"] == 1
assert out["test-suite"]["@failed"] == 1
assert out["test-suite"]["@skipped"] == 0
for case in out["test-suite"]["test-case"]:
if case["@name"] == "test_attachment_attach_on_any.py::test_pass":
assert case["attachments"]["attachment"][0]["description"] == "desc"
assert case["attachments"]["attachment"][0]["filePath"] == "pass.pth"
else:
assert case["attachments"]["attachment"][0]["description"] == "desc"
assert case["attachments"]["attachment"][0]["filePath"] == "fail.pth"
def test_attachment_attach_on_fail(testdir, tmpdir):
"""
Test that nunit_attach_on=fail sets attachment properties
"""
testdir.makepyfile(
"""
def test_pass(add_nunit_attachment):
add_nunit_attachment("pass.pth", "desc")
assert 1 == 1
def test_fail(add_nunit_attachment):
add_nunit_attachment("fail.pth", "desc")
assert 1 == 0
"""
)
testdir.makefile(".ini", pytest="[pytest]\nnunit_attach_on=fail\n")
outfile = tmpdir.join("out.xml")
outfile_pth = str(outfile)
result = testdir.runpytest("-v", "--nunit-xml=" + outfile_pth)
result.stdout.fnmatch_lines(["*test_pass PASSED*", "*test_fail FAILED*"])
assert result.ret == 1
os.path.exists(outfile_pth)
xs = xmlschema.XMLSchema(
os.path.join(
os.path.abspath(os.path.dirname(__file__)),
"../../ext/nunit-src/TestResult.xsd",
),
validation="lax",
)
out = xs.to_dict(outfile_pth)
assert out["@total"] == 2, out
assert out["@passed"] == 1, out
assert out["@failed"] == 1, out
assert out["@skipped"] == 0, out
assert out["test-suite"]["@total"] == 2
assert out["test-suite"]["@passed"] == 1
assert out["test-suite"]["@failed"] == 1
assert out["test-suite"]["@skipped"] == 0
for case in out["test-suite"]["test-case"]:
if case["@name"] == "test_attachment_attach_on_fail.py::test_fail":
assert case["attachments"]["attachment"][0]["description"] == "desc"
assert case["attachments"]["attachment"][0]["filePath"] == "fail.pth"
else:
assert "attachments" not in case
def test_attachment_attach_on_pass(testdir, tmpdir):
"""
Test that nunit_attach_on=pass sets attachment properties
"""
testdir.makepyfile(
"""
def test_pass(add_nunit_attachment):
add_nunit_attachment("pass.pth", "desc")
assert 1 == 1
def test_fail(add_nunit_attachment):
add_nunit_attachment("fail.pth", "desc")
assert 1 == 0
"""
)
testdir.makefile(".ini", pytest="[pytest]\nnunit_attach_on=pass\n")
outfile = tmpdir.join("out.xml")
outfile_pth = str(outfile)
result = testdir.runpytest("-v", "--nunit-xml=" + outfile_pth)
result.stdout.fnmatch_lines(["*test_pass PASSED*", "*test_fail FAILED*"])
assert result.ret == 1
os.path.exists(outfile_pth)
xs = xmlschema.XMLSchema(
os.path.join(
os.path.abspath(os.path.dirname(__file__)),
"../../ext/nunit-src/TestResult.xsd",
),
validation="lax",
)
out = xs.to_dict(outfile_pth)
assert out["@total"] == 2, out
assert out["@passed"] == 1, out
assert out["@failed"] == 1, out
assert out["@skipped"] == 0, out
assert out["test-suite"]["@total"] == 2
assert out["test-suite"]["@passed"] == 1
assert out["test-suite"]["@failed"] == 1
assert out["test-suite"]["@skipped"] == 0
for case in out["test-suite"]["test-case"]:
if case["@name"] == "test_attachment_attach_on_pass.py::test_pass":
assert case["attachments"]["attachment"][0]["description"] == "desc"
assert case["attachments"]["attachment"][0]["filePath"] == "pass.pth"
else:
assert "attachments" not in case, case["attachments"]
def test_slow_test(testdir, tmpdir):
"""
Test a test that takes 3 seconds
"""
testdir.makepyfile(
"""
import time
def test_basic():
time.sleep(3)
assert 1 == 1
"""
)
outfile = tmpdir.join("out.xml")
outfile_pth = str(outfile)
result = testdir.runpytest("-v", "--nunit-xml=" + outfile_pth)
result.stdout.fnmatch_lines(["*test_basic PASSED*"])
assert result.ret == 0
os.path.exists(outfile_pth)
xs = xmlschema.XMLSchema(
os.path.join(
os.path.abspath(os.path.dirname(__file__)),
"../../ext/nunit-src/TestResult.xsd",
),
validation="lax",
)
out = xs.to_dict(outfile_pth)
assert out["@total"] == 1, out
assert out["@passed"] == 1, out
assert out["@failed"] == 0, out
assert out["@skipped"] == 0, out
assert out["test-suite"]["@total"] == 1
assert out["test-suite"]["@passed"] == 1
assert out["test-suite"]["@failed"] == 0
assert out["test-suite"]["@skipped"] == 0
assert out["test-suite"]["test-case"]["@duration"] > 3.0
def test_docstring(testdir, tmpdir):
testdir.makepyfile(
"""
'''Module description'''
def test_docstring(record_nunit_property):
'''Hello'''
record_nunit_property("test", "value")
assert 1 == 1
"""
)
outfile = tmpdir.join("out.xml")
outfile_pth = str(outfile)
result = testdir.runpytest("-v", "--nunit-xml=" + outfile_pth)
result.stdout.fnmatch_lines(["*test_docstring PASSED*"])
assert result.ret == 0
os.path.exists(outfile_pth)
xs = xmlschema.XMLSchema(
os.path.join(
os.path.abspath(os.path.dirname(__file__)),
"../../ext/nunit-src/TestResult.xsd",
),
validation="lax",
)
out = xs.to_dict(outfile_pth)
assert out["@total"] == 1, out
assert out["@passed"] == 1, out
assert out["@failed"] == 0, out
assert out["@skipped"] == 0, out
assert out["test-suite"]["@total"] == 1
assert out["test-suite"]["@passed"] == 1
assert out["test-suite"]["@failed"] == 0
assert out["test-suite"]["@skipped"] == 0
assert out["test-suite"]["@label"] == "Module description"
assert out["test-suite"]["test-case"]["@label"] == "Hello"
def test_no_docstring(testdir, tmpdir):
testdir.makepyfile(
"""
def test_no_docstring(record_nunit_property):
record_nunit_property("test", "value")
assert 1 == 1
"""
)
outfile = tmpdir.join("out.xml")
outfile_pth = str(outfile)
result = testdir.runpytest("-v", "--nunit-xml=" + outfile_pth)
result.stdout.fnmatch_lines(["*test_no_docstring PASSED*"])
assert result.ret == 0
os.path.exists(outfile_pth)
xs = xmlschema.XMLSchema(
os.path.join(
os.path.abspath(os.path.dirname(__file__)),
"../../ext/nunit-src/TestResult.xsd",
),
validation="lax",
)
out = xs.to_dict(outfile_pth)
assert out["@total"] == 1, out
assert out["@passed"] == 1, out
assert out["@failed"] == 0, out
assert out["@skipped"] == 0, out
assert out["test-suite"]["@total"] == 1
assert out["test-suite"]["@passed"] == 1
assert out["test-suite"]["@failed"] == 0
assert out["test-suite"]["@skipped"] == 0
assert out["test-suite"]["@label"] == ""
assert out["test-suite"]["test-case"]["@label"] == ""
| 33.386555
| 85
| 0.577314
| 1,443
| 11,919
| 4.63271
| 0.0693
| 0.09828
| 0.087958
| 0.110396
| 0.930591
| 0.909648
| 0.889454
| 0.855198
| 0.855198
| 0.844727
| 0
| 0.011898
| 0.238443
| 11,919
| 356
| 86
| 33.480337
| 0.724579
| 0.027603
| 0
| 0.767717
| 0
| 0
| 0.268168
| 0.054254
| 0
| 0
| 0
| 0
| 0.377953
| 1
| 0.031496
| false
| 0.11811
| 0.007874
| 0
| 0.03937
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
343dac843f956ec464ddfb5c39c1361b9bc80565
| 17,992
|
py
|
Python
|
generated/resources/interface_route_table_heat.py
|
atsgen/tf-heat-plugin
|
5c0405eb93287368f60f7e227e5af5ada6bfeed2
|
[
"Apache-2.0"
] | 1
|
2020-04-05T19:43:40.000Z
|
2020-04-05T19:43:40.000Z
|
generated/resources/interface_route_table_heat.py
|
atsgen/tf-heat-plugin
|
5c0405eb93287368f60f7e227e5af5ada6bfeed2
|
[
"Apache-2.0"
] | null | null | null |
generated/resources/interface_route_table_heat.py
|
atsgen/tf-heat-plugin
|
5c0405eb93287368f60f7e227e5af5ada6bfeed2
|
[
"Apache-2.0"
] | 1
|
2020-08-25T12:47:27.000Z
|
2020-08-25T12:47:27.000Z
|
# AUTO-GENERATED file from IFMapApiGenerator. Do Not Edit!
from contrail_heat.resources import contrail
try:
from heat.common.i18n import _
except ImportError:
pass
from heat.engine import attributes
from heat.engine import constraints
from heat.engine import properties
try:
from heat.openstack.common import log as logging
except ImportError:
from oslo_log import log as logging
import uuid
from vnc_api import vnc_api
LOG = logging.getLogger(__name__)
class ContrailInterfaceRouteTable(contrail.ContrailResource):
PROPERTIES = (
NAME, FQ_NAME, DISPLAY_NAME, INTERFACE_ROUTE_TABLE_ROUTES, INTERFACE_ROUTE_TABLE_ROUTES_ROUTE, INTERFACE_ROUTE_TABLE_ROUTES_ROUTE_PREFIX, INTERFACE_ROUTE_TABLE_ROUTES_ROUTE_NEXT_HOP, INTERFACE_ROUTE_TABLE_ROUTES_ROUTE_NEXT_HOP_TYPE, INTERFACE_ROUTE_TABLE_ROUTES_ROUTE_COMMUNITY_ATTRIBUTES, INTERFACE_ROUTE_TABLE_ROUTES_ROUTE_COMMUNITY_ATTRIBUTES_COMMUNITY_ATTRIBUTE, SERVICE_INSTANCE_REFS, SERVICE_INSTANCE_REFS_DATA, SERVICE_INSTANCE_REFS_DATA_INTERFACE_TYPE, PROJECT
) = (
'name', 'fq_name', 'display_name', 'interface_route_table_routes', 'interface_route_table_routes_route', 'interface_route_table_routes_route_prefix', 'interface_route_table_routes_route_next_hop', 'interface_route_table_routes_route_next_hop_type', 'interface_route_table_routes_route_community_attributes', 'interface_route_table_routes_route_community_attributes_community_attribute', 'service_instance_refs', 'service_instance_refs_data', 'service_instance_refs_data_interface_type', 'project'
)
properties_schema = {
NAME: properties.Schema(
properties.Schema.STRING,
_('NAME.'),
update_allowed=True,
required=False,
),
FQ_NAME: properties.Schema(
properties.Schema.STRING,
_('FQ_NAME.'),
update_allowed=True,
required=False,
),
DISPLAY_NAME: properties.Schema(
properties.Schema.STRING,
_('DISPLAY_NAME.'),
update_allowed=True,
required=False,
),
INTERFACE_ROUTE_TABLE_ROUTES: properties.Schema(
properties.Schema.MAP,
_('INTERFACE_ROUTE_TABLE_ROUTES.'),
update_allowed=True,
required=False,
schema={
INTERFACE_ROUTE_TABLE_ROUTES_ROUTE: properties.Schema(
properties.Schema.LIST,
_('INTERFACE_ROUTE_TABLE_ROUTES_ROUTE.'),
update_allowed=True,
required=False,
schema=properties.Schema(
properties.Schema.MAP,
schema={
INTERFACE_ROUTE_TABLE_ROUTES_ROUTE_PREFIX: properties.Schema(
properties.Schema.STRING,
_('INTERFACE_ROUTE_TABLE_ROUTES_ROUTE_PREFIX.'),
update_allowed=True,
required=False,
),
INTERFACE_ROUTE_TABLE_ROUTES_ROUTE_NEXT_HOP: properties.Schema(
properties.Schema.STRING,
_('INTERFACE_ROUTE_TABLE_ROUTES_ROUTE_NEXT_HOP.'),
update_allowed=True,
required=False,
),
INTERFACE_ROUTE_TABLE_ROUTES_ROUTE_NEXT_HOP_TYPE: properties.Schema(
properties.Schema.STRING,
_('INTERFACE_ROUTE_TABLE_ROUTES_ROUTE_NEXT_HOP_TYPE.'),
update_allowed=True,
required=False,
constraints=[
constraints.AllowedValues([u'service-instance', u'ip-address']),
],
),
INTERFACE_ROUTE_TABLE_ROUTES_ROUTE_COMMUNITY_ATTRIBUTES: properties.Schema(
properties.Schema.MAP,
_('INTERFACE_ROUTE_TABLE_ROUTES_ROUTE_COMMUNITY_ATTRIBUTES.'),
update_allowed=True,
required=False,
schema={
INTERFACE_ROUTE_TABLE_ROUTES_ROUTE_COMMUNITY_ATTRIBUTES_COMMUNITY_ATTRIBUTE: properties.Schema(
properties.Schema.LIST,
_('INTERFACE_ROUTE_TABLE_ROUTES_ROUTE_COMMUNITY_ATTRIBUTES_COMMUNITY_ATTRIBUTE.'),
update_allowed=True,
required=False,
),
}
),
}
)
),
}
),
SERVICE_INSTANCE_REFS: properties.Schema(
properties.Schema.LIST,
_('SERVICE_INSTANCE_REFS.'),
update_allowed=True,
required=False,
),
SERVICE_INSTANCE_REFS_DATA: properties.Schema(
properties.Schema.LIST,
_('SERVICE_INSTANCE_REFS_DATA.'),
update_allowed=True,
required=False,
schema=properties.Schema(
properties.Schema.MAP,
schema={
SERVICE_INSTANCE_REFS_DATA_INTERFACE_TYPE: properties.Schema(
properties.Schema.STRING,
_('SERVICE_INSTANCE_REFS_DATA_INTERFACE_TYPE.'),
update_allowed=True,
required=False,
),
}
)
),
PROJECT: properties.Schema(
properties.Schema.STRING,
_('PROJECT.'),
update_allowed=True,
required=False,
),
}
attributes_schema = {
NAME: attributes.Schema(
_('NAME.'),
),
FQ_NAME: attributes.Schema(
_('FQ_NAME.'),
),
DISPLAY_NAME: attributes.Schema(
_('DISPLAY_NAME.'),
),
INTERFACE_ROUTE_TABLE_ROUTES: attributes.Schema(
_('INTERFACE_ROUTE_TABLE_ROUTES.'),
),
SERVICE_INSTANCE_REFS: attributes.Schema(
_('SERVICE_INSTANCE_REFS.'),
),
SERVICE_INSTANCE_REFS_DATA: attributes.Schema(
_('SERVICE_INSTANCE_REFS_DATA.'),
),
PROJECT: attributes.Schema(
_('PROJECT.'),
),
}
update_allowed_keys = ('Properties',)
def handle_create(self):
parent_obj = None
if parent_obj is None and self.properties.get(self.PROJECT):
try:
parent_obj = self.vnc_lib().project_read(id=self.properties.get(self.PROJECT))
except vnc_api.NoIdError:
parent_obj = self.vnc_lib().project_read(fq_name_str=self.properties.get(self.PROJECT))
except:
parent_obj = None
if parent_obj is None:
tenant_id = self.stack.context.tenant_id
parent_obj = self.vnc_lib().project_read(id=str(uuid.UUID(tenant_id)))
if parent_obj is None:
raise Exception('Error: parent is not specified in template!')
obj_0 = vnc_api.InterfaceRouteTable(name=self.properties[self.NAME],
parent_obj=parent_obj)
if self.properties.get(self.DISPLAY_NAME) is not None:
obj_0.set_display_name(self.properties.get(self.DISPLAY_NAME))
if self.properties.get(self.INTERFACE_ROUTE_TABLE_ROUTES) is not None:
obj_1 = vnc_api.RouteTableType()
if self.properties.get(self.INTERFACE_ROUTE_TABLE_ROUTES, {}).get(self.INTERFACE_ROUTE_TABLE_ROUTES_ROUTE) is not None:
for index_1 in range(len(self.properties.get(self.INTERFACE_ROUTE_TABLE_ROUTES, {}).get(self.INTERFACE_ROUTE_TABLE_ROUTES_ROUTE))):
obj_2 = vnc_api.RouteType()
if self.properties.get(self.INTERFACE_ROUTE_TABLE_ROUTES, {}).get(self.INTERFACE_ROUTE_TABLE_ROUTES_ROUTE, {})[index_1].get(self.INTERFACE_ROUTE_TABLE_ROUTES_ROUTE_PREFIX) is not None:
obj_2.set_prefix(self.properties.get(self.INTERFACE_ROUTE_TABLE_ROUTES, {}).get(self.INTERFACE_ROUTE_TABLE_ROUTES_ROUTE, {})[index_1].get(self.INTERFACE_ROUTE_TABLE_ROUTES_ROUTE_PREFIX))
if self.properties.get(self.INTERFACE_ROUTE_TABLE_ROUTES, {}).get(self.INTERFACE_ROUTE_TABLE_ROUTES_ROUTE, {})[index_1].get(self.INTERFACE_ROUTE_TABLE_ROUTES_ROUTE_NEXT_HOP) is not None:
obj_2.set_next_hop(self.properties.get(self.INTERFACE_ROUTE_TABLE_ROUTES, {}).get(self.INTERFACE_ROUTE_TABLE_ROUTES_ROUTE, {})[index_1].get(self.INTERFACE_ROUTE_TABLE_ROUTES_ROUTE_NEXT_HOP))
if self.properties.get(self.INTERFACE_ROUTE_TABLE_ROUTES, {}).get(self.INTERFACE_ROUTE_TABLE_ROUTES_ROUTE, {})[index_1].get(self.INTERFACE_ROUTE_TABLE_ROUTES_ROUTE_NEXT_HOP_TYPE) is not None:
obj_2.set_next_hop_type(self.properties.get(self.INTERFACE_ROUTE_TABLE_ROUTES, {}).get(self.INTERFACE_ROUTE_TABLE_ROUTES_ROUTE, {})[index_1].get(self.INTERFACE_ROUTE_TABLE_ROUTES_ROUTE_NEXT_HOP_TYPE))
if self.properties.get(self.INTERFACE_ROUTE_TABLE_ROUTES, {}).get(self.INTERFACE_ROUTE_TABLE_ROUTES_ROUTE, {})[index_1].get(self.INTERFACE_ROUTE_TABLE_ROUTES_ROUTE_COMMUNITY_ATTRIBUTES) is not None:
obj_3 = vnc_api.CommunityAttributes()
if self.properties.get(self.INTERFACE_ROUTE_TABLE_ROUTES, {}).get(self.INTERFACE_ROUTE_TABLE_ROUTES_ROUTE, {})[index_1].get(self.INTERFACE_ROUTE_TABLE_ROUTES_ROUTE_COMMUNITY_ATTRIBUTES, {}).get(self.INTERFACE_ROUTE_TABLE_ROUTES_ROUTE_COMMUNITY_ATTRIBUTES_COMMUNITY_ATTRIBUTE) is not None:
for index_3 in range(len(self.properties.get(self.INTERFACE_ROUTE_TABLE_ROUTES, {}).get(self.INTERFACE_ROUTE_TABLE_ROUTES_ROUTE, {})[index_1].get(self.INTERFACE_ROUTE_TABLE_ROUTES_ROUTE_COMMUNITY_ATTRIBUTES, {}).get(self.INTERFACE_ROUTE_TABLE_ROUTES_ROUTE_COMMUNITY_ATTRIBUTES_COMMUNITY_ATTRIBUTE))):
obj_3.add_community_attribute(self.properties.get(self.INTERFACE_ROUTE_TABLE_ROUTES, {}).get(self.INTERFACE_ROUTE_TABLE_ROUTES_ROUTE, {})[index_1].get(self.INTERFACE_ROUTE_TABLE_ROUTES_ROUTE_COMMUNITY_ATTRIBUTES, {}).get(self.INTERFACE_ROUTE_TABLE_ROUTES_ROUTE_COMMUNITY_ATTRIBUTES_COMMUNITY_ATTRIBUTE)[index_3])
obj_2.set_community_attributes(obj_3)
obj_1.add_route(obj_2)
obj_0.set_interface_route_table_routes(obj_1)
# reference to service_instance_refs
obj_1 = None
if self.properties.get(self.SERVICE_INSTANCE_REFS_DATA) is not None:
for index_0 in range(len(self.properties.get(self.SERVICE_INSTANCE_REFS_DATA))):
obj_1 = vnc_api.ServiceInterfaceTag()
if self.properties.get(self.SERVICE_INSTANCE_REFS_DATA, {})[index_0].get(self.SERVICE_INSTANCE_REFS_DATA_INTERFACE_TYPE) is not None:
obj_1.set_interface_type(self.properties.get(self.SERVICE_INSTANCE_REFS_DATA, {})[index_0].get(self.SERVICE_INSTANCE_REFS_DATA_INTERFACE_TYPE))
if self.properties.get(self.SERVICE_INSTANCE_REFS):
try:
ref_obj = self.vnc_lib().service_instance_read(
id=self.properties.get(self.SERVICE_INSTANCE_REFS)[index_0]
)
except vnc_api.NoIdError:
ref_obj = self.vnc_lib().service_instance_read(
fq_name_str=self.properties.get(self.SERVICE_INSTANCE_REFS)[index_0]
)
obj_0.add_service_instance(ref_obj, obj_1)
try:
obj_uuid = super(ContrailInterfaceRouteTable, self).resource_create(obj_0)
except:
raise Exception(_('interface-route-table %s could not be updated.') % self.name)
self.resource_id_set(obj_uuid)
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
try:
obj_0 = self.vnc_lib().interface_route_table_read(
id=self.resource_id
)
except:
raise Exception(_('interface-route-table %s not found.') % self.name)
if prop_diff.get(self.DISPLAY_NAME) is not None:
obj_0.set_display_name(prop_diff.get(self.DISPLAY_NAME))
if prop_diff.get(self.INTERFACE_ROUTE_TABLE_ROUTES) is not None:
obj_1 = vnc_api.RouteTableType()
if prop_diff.get(self.INTERFACE_ROUTE_TABLE_ROUTES, {}).get(self.INTERFACE_ROUTE_TABLE_ROUTES_ROUTE) is not None:
for index_1 in range(len(prop_diff.get(self.INTERFACE_ROUTE_TABLE_ROUTES, {}).get(self.INTERFACE_ROUTE_TABLE_ROUTES_ROUTE))):
obj_2 = vnc_api.RouteType()
if prop_diff.get(self.INTERFACE_ROUTE_TABLE_ROUTES, {}).get(self.INTERFACE_ROUTE_TABLE_ROUTES_ROUTE, {})[index_1].get(self.INTERFACE_ROUTE_TABLE_ROUTES_ROUTE_PREFIX) is not None:
obj_2.set_prefix(prop_diff.get(self.INTERFACE_ROUTE_TABLE_ROUTES, {}).get(self.INTERFACE_ROUTE_TABLE_ROUTES_ROUTE, {})[index_1].get(self.INTERFACE_ROUTE_TABLE_ROUTES_ROUTE_PREFIX))
if prop_diff.get(self.INTERFACE_ROUTE_TABLE_ROUTES, {}).get(self.INTERFACE_ROUTE_TABLE_ROUTES_ROUTE, {})[index_1].get(self.INTERFACE_ROUTE_TABLE_ROUTES_ROUTE_NEXT_HOP) is not None:
obj_2.set_next_hop(prop_diff.get(self.INTERFACE_ROUTE_TABLE_ROUTES, {}).get(self.INTERFACE_ROUTE_TABLE_ROUTES_ROUTE, {})[index_1].get(self.INTERFACE_ROUTE_TABLE_ROUTES_ROUTE_NEXT_HOP))
if prop_diff.get(self.INTERFACE_ROUTE_TABLE_ROUTES, {}).get(self.INTERFACE_ROUTE_TABLE_ROUTES_ROUTE, {})[index_1].get(self.INTERFACE_ROUTE_TABLE_ROUTES_ROUTE_NEXT_HOP_TYPE) is not None:
obj_2.set_next_hop_type(prop_diff.get(self.INTERFACE_ROUTE_TABLE_ROUTES, {}).get(self.INTERFACE_ROUTE_TABLE_ROUTES_ROUTE, {})[index_1].get(self.INTERFACE_ROUTE_TABLE_ROUTES_ROUTE_NEXT_HOP_TYPE))
if prop_diff.get(self.INTERFACE_ROUTE_TABLE_ROUTES, {}).get(self.INTERFACE_ROUTE_TABLE_ROUTES_ROUTE, {})[index_1].get(self.INTERFACE_ROUTE_TABLE_ROUTES_ROUTE_COMMUNITY_ATTRIBUTES) is not None:
obj_3 = vnc_api.CommunityAttributes()
if prop_diff.get(self.INTERFACE_ROUTE_TABLE_ROUTES, {}).get(self.INTERFACE_ROUTE_TABLE_ROUTES_ROUTE, {})[index_1].get(self.INTERFACE_ROUTE_TABLE_ROUTES_ROUTE_COMMUNITY_ATTRIBUTES, {}).get(self.INTERFACE_ROUTE_TABLE_ROUTES_ROUTE_COMMUNITY_ATTRIBUTES_COMMUNITY_ATTRIBUTE) is not None:
for index_3 in range(len(prop_diff.get(self.INTERFACE_ROUTE_TABLE_ROUTES, {}).get(self.INTERFACE_ROUTE_TABLE_ROUTES_ROUTE, {})[index_1].get(self.INTERFACE_ROUTE_TABLE_ROUTES_ROUTE_COMMUNITY_ATTRIBUTES, {}).get(self.INTERFACE_ROUTE_TABLE_ROUTES_ROUTE_COMMUNITY_ATTRIBUTES_COMMUNITY_ATTRIBUTE))):
obj_3.add_community_attribute(prop_diff.get(self.INTERFACE_ROUTE_TABLE_ROUTES, {}).get(self.INTERFACE_ROUTE_TABLE_ROUTES_ROUTE, {})[index_1].get(self.INTERFACE_ROUTE_TABLE_ROUTES_ROUTE_COMMUNITY_ATTRIBUTES, {}).get(self.INTERFACE_ROUTE_TABLE_ROUTES_ROUTE_COMMUNITY_ATTRIBUTES_COMMUNITY_ATTRIBUTE)[index_3])
obj_2.set_community_attributes(obj_3)
obj_1.add_route(obj_2)
obj_0.set_interface_route_table_routes(obj_1)
# reference to service_instance
ref_obj_list = []
ref_data_list = []
if prop_diff.get(self.SERVICE_INSTANCE_REFS_DATA) is not None:
for index_0 in range(len(prop_diff.get(self.SERVICE_INSTANCE_REFS_DATA))):
obj_1 = vnc_api.ServiceInterfaceTag()
if prop_diff.get(self.SERVICE_INSTANCE_REFS_DATA, {})[index_0].get(self.SERVICE_INSTANCE_REFS_DATA_INTERFACE_TYPE) is not None:
obj_1.set_interface_type(prop_diff.get(self.SERVICE_INSTANCE_REFS_DATA, {})[index_0].get(self.SERVICE_INSTANCE_REFS_DATA_INTERFACE_TYPE))
ref_data_list.append(obj_1)
if self.SERVICE_INSTANCE_REFS in prop_diff:
for index_0 in range(len(prop_diff.get(self.SERVICE_INSTANCE_REFS_DATA) or [])):
try:
ref_obj = self.vnc_lib().service_instance_read(
id=prop_diff.get(self.SERVICE_INSTANCE_REFS)[index_0]
)
except:
ref_obj = self.vnc_lib().service_instance_read(
fq_name_str=prop_diff.get(self.SERVICE_INSTANCE_REFS)[index_0]
)
ref_obj_list.append(ref_obj.fq_name)
obj_0.set_service_instance_list(ref_obj_list, ref_data_list)
# End: reference to service_instance_refs
try:
self.vnc_lib().interface_route_table_update(obj_0)
except:
raise Exception(_('interface-route-table %s could not be updated.') % self.name)
def handle_delete(self):
if self.resource_id is None:
return
try:
self.vnc_lib().interface_route_table_delete(id=self.resource_id)
except Exception as ex:
self._ignore_not_found(ex)
LOG.warn(_('interface_route_table %s already deleted.') % self.name)
def _show_resource(self):
obj = self.vnc_lib().interface_route_table_read(id=self.resource_id)
obj_dict = obj.serialize_to_json()
return obj_dict
def resource_mapping():
return {
'OS::ContrailV2::InterfaceRouteTable': ContrailInterfaceRouteTable,
}
| 58.605863
| 504
| 0.642952
| 2,051
| 17,992
| 5.181863
| 0.066797
| 0.152804
| 0.207377
| 0.254046
| 0.854253
| 0.830072
| 0.771641
| 0.748589
| 0.707
| 0.662495
| 0
| 0.006188
| 0.27251
| 17,992
| 306
| 505
| 58.797386
| 0.805791
| 0.008948
| 0
| 0.463504
| 1
| 0
| 0.072482
| 0.05784
| 0
| 0
| 0
| 0
| 0
| 1
| 0.018248
| false
| 0.00365
| 0.040146
| 0.00365
| 0.087591
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
3476802b1a733ecf09e71454df24f3195cd33eac
| 78
|
py
|
Python
|
tests/python_compare/compare.py
|
hixio-mh/plugin-python
|
d59dca4b6166dc20eec3e7aa57b0649c072507ce
|
[
"MIT"
] | 362
|
2018-02-17T10:25:11.000Z
|
2022-03-30T21:04:59.000Z
|
tests/python_compare/compare.py
|
hixio-mh/plugin-python
|
d59dca4b6166dc20eec3e7aa57b0649c072507ce
|
[
"MIT"
] | 70
|
2018-02-17T04:00:14.000Z
|
2019-08-21T18:01:52.000Z
|
tests/python_compare/compare.py
|
hixio-mh/plugin-python
|
d59dca4b6166dc20eec3e7aa57b0649c072507ce
|
[
"MIT"
] | 36
|
2018-02-18T23:11:25.000Z
|
2021-09-20T07:19:36.000Z
|
x = 10
y = 12
x < 11 < y < 100
x > y
x < y
x == y
x != y
x >= y
x <= y
| 4.588235
| 16
| 0.320513
| 20
| 78
| 1.25
| 0.3
| 0.48
| 0.6
| 0.8
| 0.48
| 0.48
| 0.48
| 0.48
| 0.48
| 0.48
| 0
| 0.225
| 0.487179
| 78
| 16
| 17
| 4.875
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
1b52f980257b489d9ebe2669b6a496ed7f2e348d
| 1,049
|
py
|
Python
|
src/fang/entities/fields.py
|
CuperCupu/fang-server
|
39a9cee58d7290a18dab727c66716f4aee842211
|
[
"MIT"
] | null | null | null |
src/fang/entities/fields.py
|
CuperCupu/fang-server
|
39a9cee58d7290a18dab727c66716f4aee842211
|
[
"MIT"
] | null | null | null |
src/fang/entities/fields.py
|
CuperCupu/fang-server
|
39a9cee58d7290a18dab727c66716f4aee842211
|
[
"MIT"
] | null | null | null |
from peewee import Field, CharField, BlobField, IntegerField, FixedCharField, FloatField, ForeignKeyField, AutoField, \
BareField, BigAutoField, BigBitField, BigIntegerField, BinaryUUIDField, BitField, BooleanField, DateField, \
DateTimeField, DecimalField, DoubleField, IdentityField, IPField, ManyToManyField, PrimaryKeyField, \
SmallIntegerField, TextField, TimeField, TimestampField, UUIDField
class LongBlobField(BlobField):
db_field = 'longblob'
__all__ = [
'Field',
'CharField',
'BlobField',
'LongBlobField',
'IntegerField',
'FixedCharField',
'FloatField',
'ForeignKeyField',
'AutoField',
'BareField',
'BigAutoField',
'BigBitField',
'BigIntegerField',
'BinaryUUIDField',
'BitField',
'BooleanField',
'DateField',
'DateTimeField',
'DecimalField',
'DoubleField',
'IdentityField',
'IPField',
'ManyToManyField',
'PrimaryKeyField',
'SmallIntegerField',
'TextField',
'TimeField',
'TimestampField',
'UUIDField',
]
| 24.97619
| 119
| 0.681602
| 67
| 1,049
| 10.597015
| 0.537313
| 0.039437
| 0.064789
| 0.143662
| 0.830986
| 0.830986
| 0.830986
| 0.830986
| 0.830986
| 0.830986
| 0
| 0
| 0.198284
| 1,049
| 41
| 120
| 25.585366
| 0.844233
| 0
| 0
| 0
| 0
| 0
| 0.323165
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.027027
| 0
| 0.081081
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
1bb4f38066e944684fd0bf75a7a5c9e51955e828
| 15,910
|
py
|
Python
|
cops/constr_flow.py
|
FilipKlaesson/cops
|
67d2e5dd4534b3f3eec95b6cfda9d4c9c1746ef0
|
[
"BSD-3-Clause"
] | null | null | null |
cops/constr_flow.py
|
FilipKlaesson/cops
|
67d2e5dd4534b3f3eec95b6cfda9d4c9c1746ef0
|
[
"BSD-3-Clause"
] | null | null | null |
cops/constr_flow.py
|
FilipKlaesson/cops
|
67d2e5dd4534b3f3eec95b6cfda9d4c9c1746ef0
|
[
"BSD-3-Clause"
] | null | null | null |
from itertools import product
import numpy as np
import scipy.sparse as sp
from cops.optimization_wrappers import Constraint
def generate_flow_bridge_constraints(problem):
c_48 = _dynamic_constraint_48(problem)
c_49 = _dynamic_constraint_49(problem)
c_50 = _dynamic_constraint_50(problem)
return c_48 & c_49 & c_50
def generate_flow_connectivity_constraints(problem):
c_52_53 = _dynamic_constraint_52_53(problem)
return c_52_53
def generate_flow_master_constraints(problem):
c_48 = _dynamic_constraint_48_m(problem)
c_49 = _dynamic_constraint_49_m(problem)
c_54 = _dynamic_constraint_54(problem)
c_55 = _dynamic_constraint_55(problem)
c_58 = _dynamic_constraint_58(problem)
c_59 = _dynamic_constraint_outflow_bound(problem)
return c_48 & c_49 & c_54 & c_55 & c_58 & c_59
##########################################################
##########################################################
def _dynamic_constraint_48(problem):
# Constructing A_eq and b_eq for equality (48) as sp.coo matrix
A_iq_row = []
A_iq_col = []
A_iq_data = []
N = len(problem.graph.agents)
constraint_idx = 0
for t, b, (v1, v2) in product(
range(problem.T + 1), range(problem.num_min_src_snk), problem.graph.conn_edges()
):
A_iq_row.append(constraint_idx)
A_iq_col.append(problem.get_fbar_idx(b, v1, v2, t))
A_iq_data.append(1)
for r in problem.graph.agents:
A_iq_row.append(constraint_idx)
A_iq_col.append(problem.get_z_idx(r, v1, t))
A_iq_data.append(-N)
constraint_idx += 1
for t, b, (v1, v2) in product(
range(problem.T + 1), range(problem.num_min_src_snk), problem.graph.conn_edges()
):
A_iq_row.append(constraint_idx)
A_iq_col.append(problem.get_fbar_idx(b, v1, v2, t))
A_iq_data.append(1)
for r in problem.graph.agents:
A_iq_row.append(constraint_idx)
A_iq_col.append(problem.get_z_idx(r, v2, t))
A_iq_data.append(-N)
constraint_idx += 1
A_iq_48 = sp.coo_matrix(
(A_iq_data, (A_iq_row, A_iq_col)), shape=(constraint_idx, problem.num_vars)
)
return Constraint(A_iq=A_iq_48, b_iq=np.zeros(constraint_idx))
def _dynamic_constraint_48_m(problem):
# Constructing A_eq and b_eq for equality (48) for master as sp.coo matrix
A_iq_row = []
A_iq_col = []
A_iq_data = []
N = len(problem.graph.agents)
constraint_idx = 0
for t, (v1, v2) in product(range(problem.T + 1), problem.graph.conn_edges()):
A_iq_row.append(constraint_idx)
A_iq_col.append(problem.get_mbar_idx(v1, v2, t))
A_iq_data.append(1)
for r in problem.graph.agents:
A_iq_row.append(constraint_idx)
A_iq_col.append(problem.get_z_idx(r, v1, t))
A_iq_data.append(-N)
constraint_idx += 1
for t, (v1, v2) in product(range(problem.T + 1), problem.graph.conn_edges()):
A_iq_row.append(constraint_idx)
A_iq_col.append(problem.get_mbar_idx(v1, v2, t))
A_iq_data.append(1)
for r in problem.graph.agents:
A_iq_row.append(constraint_idx)
A_iq_col.append(problem.get_z_idx(r, v2, t))
A_iq_data.append(-N)
constraint_idx += 1
A_iq_48 = sp.coo_matrix(
(A_iq_data, (A_iq_row, A_iq_col)), shape=(constraint_idx, problem.num_vars)
)
return Constraint(A_iq=A_iq_48, b_iq=np.zeros(constraint_idx))
def _dynamic_constraint_49(problem):
# Constructing A_eq and b_eq for equality (49) as sp.coo matrix
A_iq_row = []
A_iq_col = []
A_iq_data = []
N = len(problem.graph.agents)
constraint_idx = 0
for t, b, (v1, v2) in product(
range(problem.T), range(problem.num_min_src_snk), problem.graph.tran_edges()
):
A_iq_row.append(constraint_idx)
A_iq_col.append(problem.get_f_idx(b, v1, v2, t))
A_iq_data.append(1)
for r in problem.graph.agents:
A_iq_row.append(constraint_idx)
A_iq_col.append(problem.get_xf_idx(r, v1, v2, t))
A_iq_data.append(-N)
constraint_idx += 1
A_iq_49 = sp.coo_matrix(
(A_iq_data, (A_iq_row, A_iq_col)), shape=(constraint_idx, problem.num_vars)
)
return Constraint(A_iq=A_iq_49, b_iq=np.zeros(constraint_idx))
def _dynamic_constraint_49_m(problem):
# Constructing A_eq and b_eq for equality (49) as sp.coo matrix
A_iq_row = []
A_iq_col = []
A_iq_data = []
N = len(problem.graph.agents)
constraint_idx = 0
for t, (v1, v2) in product(range(problem.T), problem.graph.tran_edges()):
A_iq_row.append(constraint_idx)
A_iq_col.append(problem.get_m_idx(v1, v2, t))
A_iq_data.append(1)
for r in problem.graph.agents:
A_iq_row.append(constraint_idx)
A_iq_col.append(problem.get_xf_idx(r, v1, v2, t))
A_iq_data.append(-N)
constraint_idx += 1
A_iq_49 = sp.coo_matrix(
(A_iq_data, (A_iq_row, A_iq_col)), shape=(constraint_idx, problem.num_vars)
)
return Constraint(A_iq=A_iq_49, b_iq=np.zeros(constraint_idx))
def _dynamic_constraint_50(problem):
"""constraint on z, y"""
A_iq_row = []
A_iq_col = []
A_iq_data = []
frontier_nodes = filter(
lambda v: "frontiers" in problem.graph.nodes[v]
and problem.graph.nodes[v]["frontiers"] != 0,
problem.graph.nodes,
)
constraint_idx = 0
for v, k in product(problem.graph.nodes, range(1, problem.num_r + 1)):
A_iq_row.append(constraint_idx)
A_iq_col.append(problem.get_y_idx(v, k))
A_iq_data.append(1)
for r in problem.graph.agents:
if v in frontier_nodes:
if r in problem.eagents:
A_iq_row.append(constraint_idx)
A_iq_col.append(problem.get_z_idx(r, v, problem.T))
A_iq_data.append(-1 / k)
else:
A_iq_row.append(constraint_idx)
A_iq_col.append(problem.get_z_idx(r, v, problem.T))
A_iq_data.append(-1 / k)
constraint_idx += 1
A_iq_50 = sp.coo_matrix(
(A_iq_data, (A_iq_row, A_iq_col)), shape=(constraint_idx, problem.num_vars)
)
return Constraint(A_iq=A_iq_50, b_iq=np.zeros(constraint_idx))
def _dynamic_constraint_52_53(problem):
# Constructing A_eq and b_eq for equality (52,53) as sp.coo matrix
A_eq_row = []
A_eq_col = []
A_eq_data = []
constraint_idx = 0
for t, v, (b, b_r) in product(
range(problem.T + 1), problem.graph.nodes, enumerate(problem.min_src_snk)
):
if t > 0:
for edge in problem.graph.tran_in_edges(v):
A_eq_row.append(constraint_idx)
A_eq_col.append(problem.get_f_idx(b, edge[0], edge[1], t - 1))
A_eq_data.append(1)
for edge in problem.graph.conn_in_edges(v):
A_eq_row.append(constraint_idx)
A_eq_col.append(problem.get_fbar_idx(b, edge[0], edge[1], t))
A_eq_data.append(1)
if t < problem.T:
for edge in problem.graph.tran_out_edges(v):
A_eq_row.append(constraint_idx)
A_eq_col.append(problem.get_f_idx(b, edge[0], edge[1], t))
A_eq_data.append(-1)
for edge in problem.graph.conn_out_edges(v):
A_eq_row.append(constraint_idx)
A_eq_col.append(problem.get_fbar_idx(b, edge[0], edge[1], t))
A_eq_data.append(-1)
if problem.always_src or len(problem.src) <= len(problem.snk):
# case (52)
if t == 0:
A_eq_row.append(constraint_idx)
A_eq_col.append(problem.get_z_idx(b_r, v, t))
A_eq_data.append(len(problem.snk))
elif t == problem.T:
for r in problem.snk:
A_eq_row.append(constraint_idx)
A_eq_col.append(problem.get_z_idx(r, v, t))
A_eq_data.append(-1)
else:
# case (53)
if t == 0:
for r in problem.src:
A_eq_row.append(constraint_idx)
A_eq_col.append(problem.get_z_idx(r, v, t))
A_eq_data.append(1)
elif t == problem.T:
A_eq_row.append(constraint_idx)
A_eq_col.append(problem.get_z_idx(b_r, v, t))
A_eq_data.append(-len(problem.src))
constraint_idx += 1
A_eq_52 = sp.coo_matrix(
(A_eq_data, (A_eq_row, A_eq_col)), shape=(constraint_idx, problem.num_vars)
)
return Constraint(A_eq=A_eq_52, b_eq=np.zeros(constraint_idx))
def _dynamic_constraint_54(problem):
# Constructing A_eq and b_eq for equality (55) as sp.coo matrix
A_iq_row = []
A_iq_col = []
A_iq_data = []
b_iq_54 = []
v0 = [problem.graph.agents[r] for r in problem.master]
constraint_idx = 0
for t, v in product(range(problem.T + 1), problem.graph.nodes):
if t > 0:
for edge in problem.graph.tran_in_edges(v):
A_iq_row.append(constraint_idx)
A_iq_col.append(problem.get_m_idx(edge[0], edge[1], t - 1))
A_iq_data.append(-1)
for edge in problem.graph.conn_in_edges(v):
A_iq_row.append(constraint_idx)
A_iq_col.append(problem.get_mbar_idx(edge[0], edge[1], t))
A_iq_data.append(-1)
if t < problem.T:
for edge in problem.graph.tran_out_edges(v):
A_iq_row.append(constraint_idx)
A_iq_col.append(problem.get_m_idx(edge[0], edge[1], t))
A_iq_data.append(1)
for edge in problem.graph.conn_out_edges(v):
A_iq_row.append(constraint_idx)
A_iq_col.append(problem.get_mbar_idx(edge[0], edge[1], t))
A_iq_data.append(1)
if t == 0 and v in v0:
b_iq_54.append(len(problem.graph))
else:
b_iq_54.append(0)
constraint_idx += 1
A_iq_54 = sp.coo_matrix(
(A_iq_data, (A_iq_row, A_iq_col)), shape=(constraint_idx, problem.num_vars)
)
return Constraint(A_iq=A_iq_54, b_iq=b_iq_54)
def _dynamic_constraint_55(problem):
# Constructing A_eq and b_eq for equality (55) as sp.coo matrix
A_iq_row = []
A_iq_col = []
A_iq_data = []
b_iq_55 = []
constraint_idx = 0
m_v0 = [problem.graph.agents[r] for r in problem.master]
for t, r in product(range(problem.T + 1), problem.graph.agents):
v0 = problem.graph.agents[r]
if r not in problem.master and v0 not in m_v0:
A_iq_row.append(constraint_idx)
A_iq_col.append(problem.get_z_idx(r, v0, t))
A_iq_data.append(-1)
for tau in range(t):
# z_t is locked unless info arrived at some point before t-1
if tau > 0:
for edge in problem.graph.tran_in_edges(v0):
A_iq_row.append(constraint_idx)
A_iq_col.append(problem.get_m_idx(edge[0], edge[1], tau - 1))
A_iq_data.append(-1)
for edge in problem.graph.conn_in_edges(v0):
A_iq_row.append(constraint_idx)
A_iq_col.append(problem.get_mbar_idx(edge[0], edge[1], tau))
A_iq_data.append(-1)
if tau < problem.T:
for edge in problem.graph.tran_out_edges(v0):
A_iq_row.append(constraint_idx)
A_iq_col.append(problem.get_m_idx(edge[0], edge[1], tau))
A_iq_data.append(1)
for edge in problem.graph.conn_out_edges(v0):
A_iq_row.append(constraint_idx)
A_iq_col.append(problem.get_mbar_idx(edge[0], edge[1], tau))
A_iq_data.append(1)
b_iq_55.append(-1)
constraint_idx += 1
A_iq_55 = sp.coo_matrix(
(A_iq_data, (A_iq_row, A_iq_col)), shape=(constraint_idx, problem.num_vars)
)
return Constraint(A_iq=A_iq_55, b_iq=b_iq_55)
def _dynamic_constraint_58(problem):
# Constructing A_iq and b_iq for equality (58) as sp.coo matrix
A_iq_row = []
A_iq_col = []
A_iq_data = []
m_v0 = [problem.graph.agents[r] for r in problem.master]
constraint_idx = 0
for v, k in product(problem.graph.nodes, range(1, problem.num_r + 1)):
if v in m_v0:
continue
A_iq_row.append(constraint_idx)
A_iq_col.append(problem.get_y_idx(v, k))
A_iq_data.append(1)
for tau in range(problem.T + 1):
if tau > 0:
for edge in problem.graph.tran_in_edges(v):
A_iq_row.append(constraint_idx)
A_iq_col.append(problem.get_m_idx(edge[0], edge[1], tau - 1))
A_iq_data.append(-1)
for edge in problem.graph.conn_in_edges(v):
A_iq_row.append(constraint_idx)
A_iq_col.append(problem.get_mbar_idx(edge[0], edge[1], tau))
A_iq_data.append(-1)
if tau < problem.T:
for edge in problem.graph.tran_out_edges(v):
A_iq_row.append(constraint_idx)
A_iq_col.append(problem.get_m_idx(edge[0], edge[1], tau))
A_iq_data.append(1)
for edge in problem.graph.conn_out_edges(v):
A_iq_row.append(constraint_idx)
A_iq_col.append(problem.get_mbar_idx(edge[0], edge[1], tau))
A_iq_data.append(1)
constraint_idx += 1
A_iq_58 = sp.coo_matrix(
(A_iq_data, (A_iq_row, A_iq_col)), shape=(constraint_idx, problem.num_vars)
)
return Constraint(A_iq=A_iq_58, b_iq=np.zeros(constraint_idx))
def _dynamic_constraint_outflow_bound(problem):
# Constructing A_iq and b_iq
A_iq_row = []
A_iq_col = []
A_iq_data = []
N = len(problem.graph.agents)
m_v0 = [problem.graph.agents[r] for r in problem.master]
constraint_idx = 0
for r, (b, _), t in product(
problem.graph.agents, enumerate(problem.min_src_snk), range(problem.T + 1)
):
v0 = problem.graph.agents[r]
if v0 not in m_v0:
for tau in range(t + 1):
if tau > 0:
for edge in problem.graph.tran_in_edges(v0):
A_iq_row.append(constraint_idx)
A_iq_col.append(problem.get_m_idx(edge[0], edge[1], tau - 1))
A_iq_data.append(-N)
for edge in problem.graph.conn_in_edges(v0):
A_iq_row.append(constraint_idx)
A_iq_col.append(problem.get_mbar_idx(edge[0], edge[1], tau))
A_iq_data.append(-N)
if tau < problem.T:
for edge in problem.graph.tran_out_edges(v0):
A_iq_row.append(constraint_idx)
A_iq_col.append(problem.get_m_idx(edge[0], edge[1], tau))
A_iq_data.append(N)
for edge in problem.graph.conn_out_edges(v0):
A_iq_row.append(constraint_idx)
A_iq_col.append(problem.get_mbar_idx(edge[0], edge[1], tau))
A_iq_data.append(N)
for edge in problem.graph.conn_out_edges(v0):
A_iq_row.append(constraint_idx)
A_iq_col.append(problem.get_fbar_idx(b, edge[0], edge[1], t))
A_iq_data.append(1)
constraint_idx += 1
A_iq = sp.coo_matrix(
(A_iq_data, (A_iq_row, A_iq_col)), shape=(constraint_idx, problem.num_vars)
)
return Constraint(A_iq=A_iq, b_iq=np.zeros(constraint_idx))
| 34.215054
| 88
| 0.59208
| 2,473
| 15,910
| 3.475536
| 0.045289
| 0.064572
| 0.0363
| 0.107504
| 0.880512
| 0.854101
| 0.834439
| 0.806166
| 0.803374
| 0.768703
| 0
| 0.029794
| 0.293275
| 15,910
| 464
| 89
| 34.288793
| 0.734614
| 0.039912
| 0
| 0.719198
| 1
| 0
| 0.001189
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037249
| false
| 0
| 0.011461
| 0
| 0.08596
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
1bcb1c84919f2df3e00cad31572aaa27457fa701
| 40,160
|
py
|
Python
|
lambdas/jumpcloud/jcapiv1/apis/systems_api.py
|
zimingd/aws-infra
|
89922f6d2a148b14119a8b8164dd4b06d465d94e
|
[
"Apache-2.0"
] | null | null | null |
lambdas/jumpcloud/jcapiv1/apis/systems_api.py
|
zimingd/aws-infra
|
89922f6d2a148b14119a8b8164dd4b06d465d94e
|
[
"Apache-2.0"
] | null | null | null |
lambdas/jumpcloud/jcapiv1/apis/systems_api.py
|
zimingd/aws-infra
|
89922f6d2a148b14119a8b8164dd4b06d465d94e
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
JumpCloud APIs
V1 & V2 versions of JumpCloud's API. The previous version of JumpCloud's API. This set of endpoints allows JumpCloud customers to manage commands, systems, & system users.
OpenAPI spec version: 1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class SystemsApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def systems_delete(self, id, content_type, accept, **kwargs):
"""
Delete a System
Delete a system record by its id. This command will cause the system to uninstall the JumpCloud agent from its self which can can take about a minute. If the system is not connected to JumpCloud the system record will simply be removed.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.systems_delete(id, content_type, accept, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: (required)
:param str content_type: (required)
:param str accept: (required)
:param str date: Current date header for the System Context API
:param str authorization: Authorization header for the System Context API
:return: System
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.systems_delete_with_http_info(id, content_type, accept, **kwargs)
else:
(data) = self.systems_delete_with_http_info(id, content_type, accept, **kwargs)
return data
def systems_delete_with_http_info(self, id, content_type, accept, **kwargs):
"""
Delete a System
Delete a system record by its id. This command will cause the system to uninstall the JumpCloud agent from its self which can can take about a minute. If the system is not connected to JumpCloud the system record will simply be removed.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.systems_delete_with_http_info(id, content_type, accept, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: (required)
:param str content_type: (required)
:param str accept: (required)
:param str date: Current date header for the System Context API
:param str authorization: Authorization header for the System Context API
:return: System
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'content_type', 'accept', 'date', 'authorization']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method systems_delete" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `systems_delete`")
# verify the required parameter 'content_type' is set
if ('content_type' not in params) or (params['content_type'] is None):
raise ValueError("Missing the required parameter `content_type` when calling `systems_delete`")
# verify the required parameter 'accept' is set
if ('accept' not in params) or (params['accept'] is None):
raise ValueError("Missing the required parameter `accept` when calling `systems_delete`")
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = []
header_params = {}
if 'content_type' in params:
header_params['Content-Type'] = params['content_type']
if 'accept' in params:
header_params['Accept'] = params['accept']
if 'date' in params:
header_params['Date'] = params['date']
if 'authorization' in params:
header_params['Authorization'] = params['authorization']
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json; charset=utf-8'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['x-api-key']
return self.api_client.call_api('/systems/{id}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='System',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def systems_get(self, id, content_type, accept, **kwargs):
"""
List an individual system
Returns an individual system.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.systems_get(id, content_type, accept, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: (required)
:param str content_type: (required)
:param str accept: (required)
:param str fields: The comma separated fields included in the returned records. If omitted the default list of fields will be returned.
:param int limit: The number of records to return at once.
:param int skip: The offset into the records to return.
:param str sort: The comma separated fields used to sort the collection. Default sort is ascending, prefix with `-` to sort descending.
:param str date: Current date header for the System Context API
:param str authorization: Authorization header for the System Context API
:return: System
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.systems_get_with_http_info(id, content_type, accept, **kwargs)
else:
(data) = self.systems_get_with_http_info(id, content_type, accept, **kwargs)
return data
def systems_get_with_http_info(self, id, content_type, accept, **kwargs):
"""
List an individual system
Returns an individual system.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.systems_get_with_http_info(id, content_type, accept, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: (required)
:param str content_type: (required)
:param str accept: (required)
:param str fields: The comma separated fields included in the returned records. If omitted the default list of fields will be returned.
:param int limit: The number of records to return at once.
:param int skip: The offset into the records to return.
:param str sort: The comma separated fields used to sort the collection. Default sort is ascending, prefix with `-` to sort descending.
:param str date: Current date header for the System Context API
:param str authorization: Authorization header for the System Context API
:return: System
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'content_type', 'accept', 'fields', 'limit', 'skip', 'sort', 'date', 'authorization']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method systems_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `systems_get`")
# verify the required parameter 'content_type' is set
if ('content_type' not in params) or (params['content_type'] is None):
raise ValueError("Missing the required parameter `content_type` when calling `systems_get`")
# verify the required parameter 'accept' is set
if ('accept' not in params) or (params['accept'] is None):
raise ValueError("Missing the required parameter `accept` when calling `systems_get`")
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = []
if 'fields' in params:
query_params.append(('fields', params['fields']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'skip' in params:
query_params.append(('skip', params['skip']))
if 'sort' in params:
query_params.append(('sort', params['sort']))
header_params = {}
if 'content_type' in params:
header_params['Content-Type'] = params['content_type']
if 'accept' in params:
header_params['Accept'] = params['accept']
if 'date' in params:
header_params['Date'] = params['date']
if 'authorization' in params:
header_params['Authorization'] = params['authorization']
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json; charset=utf-8'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['x-api-key']
return self.api_client.call_api('/systems/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='System',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def systems_list(self, content_type, accept, **kwargs):
"""
List All Systems
Returns all Systems.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.systems_list(content_type, accept, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str content_type: (required)
:param str accept: (required)
:param str fields: The comma separated fields included in the returned records. If omitted the default list of fields will be returned.
:param int limit: The number of records to return at once.
:param int skip: The offset into the records to return.
:param str sort: The comma separated fields used to sort the collection. Default sort is ascending, prefix with `-` to sort descending.
:return: Systemslist
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.systems_list_with_http_info(content_type, accept, **kwargs)
else:
(data) = self.systems_list_with_http_info(content_type, accept, **kwargs)
return data
def systems_list_with_http_info(self, content_type, accept, **kwargs):
"""
List All Systems
Returns all Systems.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.systems_list_with_http_info(content_type, accept, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str content_type: (required)
:param str accept: (required)
:param str fields: The comma separated fields included in the returned records. If omitted the default list of fields will be returned.
:param int limit: The number of records to return at once.
:param int skip: The offset into the records to return.
:param str sort: The comma separated fields used to sort the collection. Default sort is ascending, prefix with `-` to sort descending.
:return: Systemslist
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['content_type', 'accept', 'fields', 'limit', 'skip', 'sort']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method systems_list" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'content_type' is set
if ('content_type' not in params) or (params['content_type'] is None):
raise ValueError("Missing the required parameter `content_type` when calling `systems_list`")
# verify the required parameter 'accept' is set
if ('accept' not in params) or (params['accept'] is None):
raise ValueError("Missing the required parameter `accept` when calling `systems_list`")
collection_formats = {}
path_params = {}
query_params = []
if 'fields' in params:
query_params.append(('fields', params['fields']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'skip' in params:
query_params.append(('skip', params['skip']))
if 'sort' in params:
query_params.append(('sort', params['sort']))
header_params = {}
if 'content_type' in params:
header_params['Content-Type'] = params['content_type']
if 'accept' in params:
header_params['Accept'] = params['accept']
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json; charset=utf-8'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['x-api-key']
return self.api_client.call_api('/systems', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Systemslist',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def systems_put(self, id, content_type, accept, **kwargs):
"""
Update a system
Update a system record by its id and return the modified system record in single record format.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.systems_put(id, content_type, accept, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: (required)
:param str content_type: (required)
:param str accept: (required)
:param Systemput body:
:param str date: Current date header for the System Context API
:param str authorization: Authorization header for the System Context API
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.systems_put_with_http_info(id, content_type, accept, **kwargs)
else:
(data) = self.systems_put_with_http_info(id, content_type, accept, **kwargs)
return data
def systems_put_with_http_info(self, id, content_type, accept, **kwargs):
"""
Update a system
Update a system record by its id and return the modified system record in single record format.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.systems_put_with_http_info(id, content_type, accept, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: (required)
:param str content_type: (required)
:param str accept: (required)
:param Systemput body:
:param str date: Current date header for the System Context API
:param str authorization: Authorization header for the System Context API
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'content_type', 'accept', 'body', 'date', 'authorization']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method systems_put" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `systems_put`")
# verify the required parameter 'content_type' is set
if ('content_type' not in params) or (params['content_type'] is None):
raise ValueError("Missing the required parameter `content_type` when calling `systems_put`")
# verify the required parameter 'accept' is set
if ('accept' not in params) or (params['accept'] is None):
raise ValueError("Missing the required parameter `accept` when calling `systems_put`")
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = []
header_params = {}
if 'content_type' in params:
header_params['Content-Type'] = params['content_type']
if 'accept' in params:
header_params['Accept'] = params['accept']
if 'date' in params:
header_params['Date'] = params['date']
if 'authorization' in params:
header_params['Authorization'] = params['authorization']
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json; charset=utf-8'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['x-api-key']
return self.api_client.call_api('/systems/{id}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def systems_systemusers_binding_list(self, id, content_type, accept, **kwargs):
"""
List system user bindings
List system user bindings for a specific system in a system and user binding format. ### Example #### List system user bindings for specific system ``` curl \\ -H 'Content-Type: application/json' \\ -H \"x-api-key: [YOUR_API_KEY_HERE]\" \\ \"https://console.jumpcloud.com/api/systems/[SYSTEM_ID_HERE]/systemusers\" ```
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.systems_systemusers_binding_list(id, content_type, accept, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: (required)
:param str content_type: (required)
:param str accept: (required)
:param str fields: The comma separated fields included in the returned records. If omitted the default list of fields will be returned.
:param int limit: The number of records to return at once.
:param int skip: The offset into the records to return.
:param str sort: The comma separated fields used to sort the collection. Default sort is ascending, prefix with `-` to sort descending.
:return: Systemuserbinding
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.systems_systemusers_binding_list_with_http_info(id, content_type, accept, **kwargs)
else:
(data) = self.systems_systemusers_binding_list_with_http_info(id, content_type, accept, **kwargs)
return data
def systems_systemusers_binding_list_with_http_info(self, id, content_type, accept, **kwargs):
"""
List system user bindings
List system user bindings for a specific system in a system and user binding format. ### Example #### List system user bindings for specific system ``` curl \\ -H 'Content-Type: application/json' \\ -H \"x-api-key: [YOUR_API_KEY_HERE]\" \\ \"https://console.jumpcloud.com/api/systems/[SYSTEM_ID_HERE]/systemusers\" ```
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.systems_systemusers_binding_list_with_http_info(id, content_type, accept, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: (required)
:param str content_type: (required)
:param str accept: (required)
:param str fields: The comma separated fields included in the returned records. If omitted the default list of fields will be returned.
:param int limit: The number of records to return at once.
:param int skip: The offset into the records to return.
:param str sort: The comma separated fields used to sort the collection. Default sort is ascending, prefix with `-` to sort descending.
:return: Systemuserbinding
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'content_type', 'accept', 'fields', 'limit', 'skip', 'sort']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method systems_systemusers_binding_list" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `systems_systemusers_binding_list`")
# verify the required parameter 'content_type' is set
if ('content_type' not in params) or (params['content_type'] is None):
raise ValueError("Missing the required parameter `content_type` when calling `systems_systemusers_binding_list`")
# verify the required parameter 'accept' is set
if ('accept' not in params) or (params['accept'] is None):
raise ValueError("Missing the required parameter `accept` when calling `systems_systemusers_binding_list`")
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = []
if 'fields' in params:
query_params.append(('fields', params['fields']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'skip' in params:
query_params.append(('skip', params['skip']))
if 'sort' in params:
query_params.append(('sort', params['sort']))
header_params = {}
if 'content_type' in params:
header_params['Content-Type'] = params['content_type']
if 'accept' in params:
header_params['Accept'] = params['accept']
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json; charset=utf-8'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['x-api-key']
return self.api_client.call_api('/systems/{id}/systemusers', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Systemuserbinding',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def systems_systemusers_binding_put(self, id, content_type, accept, **kwargs):
"""
Update a system's or user's binding
Adds or removes a user binding for a system. This endpoint is only used for users still using JumpCloud Tags. If you are using JumpCloud Groups please refer to the documentation found [here](https://docs.jumpcloud.com/2.0/systems/manage-associations-of-a-system). ### Example #### Add (or remove) a system user to (from) a system ``` curl \\ -d '{ \"add\": [\"[SYSTEM_USER_ID_TO_ADD_HERE]\"], \"remove\": [\"[SYSTEM_USER_ID_TO_REMOVE_HERE]\"] }' \\ -X PUT \\ -H 'Content-Type: application/json' \\ -H 'Accept: application/json' \\ -H \"x-api-key: [YOUR_API_KEY_HERE]\" \\ \"https://console.jumpcloud.com/api/systems/[SYSTEM_ID_HERE]/systemusers
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.systems_systemusers_binding_put(id, content_type, accept, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: (required)
:param str content_type: (required)
:param str accept: (required)
:param Systemuserbindingsput body:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.systems_systemusers_binding_put_with_http_info(id, content_type, accept, **kwargs)
else:
(data) = self.systems_systemusers_binding_put_with_http_info(id, content_type, accept, **kwargs)
return data
def systems_systemusers_binding_put_with_http_info(self, id, content_type, accept, **kwargs):
"""
Update a system's or user's binding
Adds or removes a user binding for a system. This endpoint is only used for users still using JumpCloud Tags. If you are using JumpCloud Groups please refer to the documentation found [here](https://docs.jumpcloud.com/2.0/systems/manage-associations-of-a-system). ### Example #### Add (or remove) a system user to (from) a system ``` curl \\ -d '{ \"add\": [\"[SYSTEM_USER_ID_TO_ADD_HERE]\"], \"remove\": [\"[SYSTEM_USER_ID_TO_REMOVE_HERE]\"] }' \\ -X PUT \\ -H 'Content-Type: application/json' \\ -H 'Accept: application/json' \\ -H \"x-api-key: [YOUR_API_KEY_HERE]\" \\ \"https://console.jumpcloud.com/api/systems/[SYSTEM_ID_HERE]/systemusers
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.systems_systemusers_binding_put_with_http_info(id, content_type, accept, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: (required)
:param str content_type: (required)
:param str accept: (required)
:param Systemuserbindingsput body:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'content_type', 'accept', 'body']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method systems_systemusers_binding_put" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `systems_systemusers_binding_put`")
# verify the required parameter 'content_type' is set
if ('content_type' not in params) or (params['content_type'] is None):
raise ValueError("Missing the required parameter `content_type` when calling `systems_systemusers_binding_put`")
# verify the required parameter 'accept' is set
if ('accept' not in params) or (params['accept'] is None):
raise ValueError("Missing the required parameter `accept` when calling `systems_systemusers_binding_put`")
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = []
header_params = {}
if 'content_type' in params:
header_params['Content-Type'] = params['content_type']
if 'accept' in params:
header_params['Accept'] = params['accept']
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json; charset=utf-8'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['x-api-key']
return self.api_client.call_api('/systems/{id}/systemusers', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 48.095808
| 668
| 0.595194
| 4,454
| 40,160
| 5.191064
| 0.05366
| 0.056139
| 0.030881
| 0.028762
| 0.971498
| 0.966827
| 0.966654
| 0.963194
| 0.960685
| 0.960166
| 0
| 0.000619
| 0.316384
| 40,160
| 834
| 669
| 48.153477
| 0.841553
| 0.383441
| 0
| 0.800971
| 0
| 0
| 0.197468
| 0.031542
| 0
| 0
| 0
| 0
| 0
| 1
| 0.031553
| false
| 0
| 0.01699
| 0
| 0.09466
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
1bd40555974c24fba3c13f308af4869926161c52
| 7,726
|
py
|
Python
|
tests/tools/assigner/test_partition_operations.py
|
akashvacher/kafka-tools
|
0d98bbefc1105851b7b7203de4f6c68d9c097730
|
[
"Apache-2.0"
] | 578
|
2016-05-05T05:18:15.000Z
|
2022-03-23T07:18:07.000Z
|
tests/tools/assigner/test_partition_operations.py
|
akashvacher/kafka-tools
|
0d98bbefc1105851b7b7203de4f6c68d9c097730
|
[
"Apache-2.0"
] | 94
|
2016-04-29T23:25:38.000Z
|
2022-02-07T17:16:16.000Z
|
tests/tools/assigner/test_partition_operations.py
|
akashvacher/kafka-tools
|
0d98bbefc1105851b7b7203de4f6c68d9c097730
|
[
"Apache-2.0"
] | 150
|
2016-04-29T16:33:20.000Z
|
2022-03-14T10:05:48.000Z
|
import unittest
from kafka.tools.exceptions import ReplicaNotFoundException, ClusterConsistencyException
from kafka.tools.models.broker import Broker
from kafka.tools.models.topic import Topic
from kafka.tools.models.cluster import Cluster
class PartitionOperationTests(unittest.TestCase):
def setUp(self):
self.cluster = Cluster()
self.cluster.add_broker(Broker("brokerhost1.example.com", id=1))
self.cluster.add_broker(Broker("brokerhost2.example.com", id=2))
self.cluster.add_broker(Broker("brokerhost3.example.com", id=3))
self.cluster.add_topic(Topic("testTopic1", 2))
self.cluster.add_topic(Topic("testTopic2", 2))
def add_topics(self):
partition = self.cluster.topics['testTopic1'].partitions[0]
partition.add_replica(self.cluster.brokers[1], 0)
partition.add_replica(self.cluster.brokers[2], 1)
partition = self.cluster.topics['testTopic1'].partitions[1]
partition.add_replica(self.cluster.brokers[2], 0)
partition.add_replica(self.cluster.brokers[1], 1)
partition = self.cluster.topics['testTopic2'].partitions[0]
partition.add_replica(self.cluster.brokers[2], 0)
partition.add_replica(self.cluster.brokers[1], 1)
partition = self.cluster.topics['testTopic2'].partitions[1]
partition.add_replica(self.cluster.brokers[1], 0)
partition.add_replica(self.cluster.brokers[2], 1)
def test_partition_add_broker_partition(self):
partition = self.cluster.topics['testTopic1'].partitions[0]
partition._add_broker_partition(0, self.cluster.brokers[1])
assert self.cluster.brokers[1].partitions[0] == [partition]
def test_partition_add_broker_partition_two(self):
partition = self.cluster.topics['testTopic1'].partitions[0]
partition._add_broker_partition(0, self.cluster.brokers[1])
partition2 = self.cluster.topics['testTopic2'].partitions[1]
partition2._add_broker_partition(0, self.cluster.brokers[1])
assert self.cluster.brokers[1].partitions[0] == [partition, partition2]
def test_partition_add_replica(self):
partition = self.cluster.topics['testTopic1'].partitions[0]
partition.add_replica(self.cluster.brokers[1], 0)
assert self.cluster.brokers[1].partitions[0] == [partition]
assert self.cluster.topics['testTopic1'].partitions[0].replicas == [self.cluster.brokers[1]]
def test_partition_add_replica_two(self):
partition = self.cluster.topics['testTopic1'].partitions[0]
partition.add_replica(self.cluster.brokers[1], 0)
partition.add_replica(self.cluster.brokers[2], 1)
assert self.cluster.brokers[1].partitions[0] == [partition]
assert self.cluster.brokers[2].partitions[1] == [partition]
assert self.cluster.topics['testTopic1'].partitions[0].replicas == [self.cluster.brokers[1], self.cluster.brokers[2]]
def test_partition_remove_broker_partition(self):
partition = self.cluster.topics['testTopic1'].partitions[0]
partition.add_replica(self.cluster.brokers[1], 0)
partition._remove_broker_partition(self.cluster.brokers[1])
assert self.cluster.brokers[1].partitions[0] == []
def test_partition_remove_broker_partition_two(self):
partition = self.cluster.topics['testTopic1'].partitions[0]
partition.add_replica(self.cluster.brokers[1], 0)
partition2 = self.cluster.topics['testTopic2'].partitions[1]
partition2.add_replica(self.cluster.brokers[1], 0)
partition._remove_broker_partition(self.cluster.brokers[1])
assert self.cluster.brokers[1].partitions[0] == [partition2]
def test_partition_remove_replica(self):
partition = self.cluster.topics['testTopic1'].partitions[0]
partition.add_replica(self.cluster.brokers[1], 0)
partition.remove_replica(self.cluster.brokers[1])
assert self.cluster.brokers[1].partitions[0] == []
assert self.cluster.topics['testTopic1'].partitions[0].replicas == []
def test_partition_remove_replica_single(self):
partition = self.cluster.topics['testTopic1'].partitions[0]
partition.add_replica(self.cluster.brokers[1], 0)
partition.add_replica(self.cluster.brokers[2], 1)
partition.remove_replica(self.cluster.brokers[1])
assert self.cluster.brokers[1].partitions[0] == []
assert self.cluster.brokers[2].partitions[1] == [partition]
assert self.cluster.topics['testTopic1'].partitions[0].replicas == [self.cluster.brokers[2]]
def test_partition_remove_replica_nonexistent(self):
self.assertRaises(ReplicaNotFoundException, self.cluster.topics['testTopic1'].partitions[0].remove_replica, self.cluster.brokers[1])
def test_partition_swap_replicas(self):
partition = self.cluster.topics['testTopic1'].partitions[0]
partition.add_replica(self.cluster.brokers[1], 0)
partition.swap_replicas(self.cluster.brokers[1], self.cluster.brokers[2])
assert self.cluster.brokers[2].partitions[0] == [partition]
assert self.cluster.topics['testTopic1'].partitions[0].replicas == [self.cluster.brokers[2]]
def test_partition_swap_replicas_nonexistent(self):
self.assertRaises(ReplicaNotFoundException,
self.cluster.topics['testTopic1'].partitions[0].swap_replicas,
self.cluster.brokers[1],
self.cluster.brokers[2])
def test_partition_swap_replica_positions(self):
partition = self.cluster.topics['testTopic1'].partitions[0]
partition.add_replica(self.cluster.brokers[1], 0)
partition.add_replica(self.cluster.brokers[2], 1)
partition.swap_replica_positions(self.cluster.brokers[1], self.cluster.brokers[2])
assert self.cluster.brokers[2].partitions[0] == [partition]
assert self.cluster.brokers[1].partitions[1] == [partition]
assert self.cluster.topics['testTopic1'].partitions[0].replicas == [self.cluster.brokers[2], self.cluster.brokers[1]]
def test_partition_swap_replica_positions_nonexistent(self):
partition = self.cluster.topics['testTopic1'].partitions[0]
partition.add_replica(self.cluster.brokers[1], 0)
partition.add_replica(self.cluster.brokers[2], 1)
self.assertRaises(ReplicaNotFoundException, partition.swap_replica_positions, self.cluster.brokers[1], self.cluster.brokers[3])
def test_cluster_clone(self):
# Should have a consistent cluster state
self.add_topics()
newcluster = self.cluster.clone()
assert self.cluster is not newcluster
for bid in newcluster.brokers:
assert newcluster.brokers[bid] == self.cluster.brokers[bid]
for tname in newcluster.topics:
assert newcluster.topics[tname] == self.cluster.topics[tname]
for partition in newcluster.partitions([]):
assert partition == self.cluster.topics[partition.topic.name].partitions[partition.num]
assert partition.replicas == self.cluster.topics[partition.topic.name].partitions[partition.num].replicas
def test_cluster_changed_partitions(self):
self.add_topics()
newcluster = self.cluster.clone()
newcluster.topics['testTopic1'].partitions[0].replicas.reverse()
difference = self.cluster.changed_partitions(newcluster)
assert difference == [newcluster.topics['testTopic1'].partitions[0]]
def test_cluster_changed_partitions_inconsistent(self):
self.add_topics()
badcluster = Cluster()
self.assertRaises(ClusterConsistencyException, badcluster.changed_partitions, self.cluster)
| 53.282759
| 140
| 0.707481
| 913
| 7,726
| 5.857612
| 0.073384
| 0.201571
| 0.198579
| 0.135004
| 0.816006
| 0.74626
| 0.721391
| 0.702506
| 0.675206
| 0.632199
| 0
| 0.025752
| 0.165674
| 7,726
| 144
| 141
| 53.652778
| 0.803909
| 0.004918
| 0
| 0.479339
| 0
| 0
| 0.046708
| 0.008977
| 0
| 0
| 0
| 0
| 0.239669
| 1
| 0.14876
| false
| 0
| 0.041322
| 0
| 0.198347
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
9482c67f35c09c58366f7af0a4736a69b5357d49
| 1,120
|
py
|
Python
|
src/events.py
|
JuliaLang/pull-request-state-machine
|
d0ab68e1cce32ca60559511e394d3de3a5a1a5a8
|
[
"MIT"
] | 3
|
2020-12-10T00:41:08.000Z
|
2021-09-01T22:29:18.000Z
|
src/events.py
|
JuliaLang/pull-request-state-machine
|
d0ab68e1cce32ca60559511e394d3de3a5a1a5a8
|
[
"MIT"
] | 15
|
2020-12-04T18:50:55.000Z
|
2020-12-09T21:37:21.000Z
|
src/events.py
|
JuliaLang/pull-request-state-machine
|
d0ab68e1cce32ca60559511e394d3de3a5a1a5a8
|
[
"MIT"
] | 4
|
2021-04-26T12:52:43.000Z
|
2021-09-01T22:29:33.000Z
|
import github
def on_pr_closed(state, event, action, payload):
print(f'on_pr_closed: {state}, {event}, {action}')
return state
def on_pr_comment(state, event, action, payload):
print(f'on_pr_comment: {state}, {event}, {action}')
return state
def on_pr_opened(state, event, action, payload):
print(f'on_pr_opened: {state}, {event}, {action}')
return state
def on_pr_reopened(state, event, action, payload):
print(f'on_pr_reopened: {state}, {event}, {action}')
return state
def on_pr_review_dismissed(state, event, action, payload):
print(f'on_pr_review_dismissed: {state}, {event}, {action}')
return state
def on_pr_review_submitted(state, event, action, payload):
print(f'on_pr_review_submitted: {state}, {event}, {action}')
return state
def on_pr_synchronize(state, event, action, payload):
print(f'on_pr_synchronize: {state}, {event}, {action}')
return state
def on_status(state, event, action, payload):
# TODO: use the commit SHA to look up the corresponding pull request number
print(f'on_status: {state}, {event}, {action}')
return state
| 32
| 79
| 0.701786
| 161
| 1,120
| 4.670807
| 0.204969
| 0.212766
| 0.340426
| 0.244681
| 0.882979
| 0.807181
| 0.700798
| 0.610372
| 0.210106
| 0
| 0
| 0
| 0.160714
| 1,120
| 34
| 80
| 32.941176
| 0.8
| 0.065179
| 0
| 0.32
| 0
| 0
| 0.330144
| 0.044019
| 0
| 0
| 0
| 0.029412
| 0
| 1
| 0.32
| false
| 0
| 0.04
| 0
| 0.68
| 0.32
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 9
|
846b65a631531285944971b6501acc15d4fa1623
| 37
|
py
|
Python
|
actions_upload_testing/mymod.py
|
havakv/test_actions
|
3299eb20b35ae935d877f0cdb551235d0bd54b0a
|
[
"BSD-2-Clause"
] | null | null | null |
actions_upload_testing/mymod.py
|
havakv/test_actions
|
3299eb20b35ae935d877f0cdb551235d0bd54b0a
|
[
"BSD-2-Clause"
] | 1
|
2019-10-17T14:36:37.000Z
|
2019-10-17T14:36:37.000Z
|
actions_upload_testing/mymod.py
|
havakv/test_actions
|
3299eb20b35ae935d877f0cdb551235d0bd54b0a
|
[
"BSD-2-Clause"
] | null | null | null |
def make_list():
return [1] * 5
| 9.25
| 18
| 0.540541
| 6
| 37
| 3.166667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.076923
| 0.297297
| 37
| 3
| 19
| 12.333333
| 0.653846
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0.5
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
84babd671f3314d0372fcb0d2b4c529076900fe9
| 10,982
|
py
|
Python
|
test/test_array.py
|
cw-tan/deft
|
abb4d23fa0bb53031c13daef9942bceba4afd655
|
[
"MIT"
] | 1
|
2019-08-29T23:30:38.000Z
|
2019-08-29T23:30:38.000Z
|
test/test_array.py
|
cw-tan/deft
|
abb4d23fa0bb53031c13daef9942bceba4afd655
|
[
"MIT"
] | 3
|
2022-02-24T20:00:40.000Z
|
2022-03-07T12:05:27.000Z
|
test/test_array.py
|
cw-tan/deft
|
abb4d23fa0bb53031c13daef9942bceba4afd655
|
[
"MIT"
] | 1
|
2019-04-05T15:07:36.000Z
|
2019-04-05T15:07:36.000Z
|
import numpy as np
import os
import sys
import unittest
sys.path.append(os.path.join(
os.path.dirname(os.path.abspath(__file__)), '../build/'))
import pydeft as deft
import tools_for_tests as tools
class TestArray(unittest.TestCase):
def test_shape_size_strides_unravel(self):
# ----- Double3D -----
# create numpy array and deft array
shape = np.array([2,3,4], dtype='int')
a1 = np.linspace(0.0, 20.0, shape.prod()).reshape(shape)
arr1 = deft.Double3D(shape)
# test shape, size, and strides
self.assertTrue(arr1.shape() == list(a1.shape))
self.assertTrue(arr1.size(), len(a1))
self.assertTrue(
arr1.strides() == [int(s/a1.itemsize) for s in a1.strides])
for i in range(a1.size):
self.assertSequenceEqual(
arr1.unravel_index(i), np.unravel_index(i,shape))
# ----- Complex3D -----
# create numpy array and deft array
shape = np.array([2,3,4], dtype='int')
a1 = np.linspace(0.0, 20.0, shape.prod()).reshape(shape)
arr1 = deft.Complex3D(shape)
# test shape, size, and strides
self.assertTrue(arr1.shape() == list(a1.shape))
self.assertTrue(arr1.size(), len(a1))
self.assertTrue(
arr1.strides() == [int(s/a1.itemsize) for s in a1.strides])
for i in range(a1.size):
self.assertSequenceEqual(
arr1.unravel_index(i), np.unravel_index(i,shape))
def test_getters_and_setters(self):
# ----- Double3D -----
# create two deft arrays and two numpy arrays
shape = (2, 3, 4)
(arr1, arr2) = (deft.Double3D(shape), deft.Double3D(shape))
a1 = np.linspace(
0.0, 20.0, arr1.size(), dtype=arr1[...].dtype).reshape(shape)
a2 = 1.0 + np.indices(shape, dtype=arr2[...].dtype).sum(axis=0)
# test single index __getitem__ and __setitem__
for u in range(np.prod(shape)):
arr1[u] = a1.flatten()[u]
arr2[u] = arr1[u]
self.assertTrue(np.allclose(arr1[...], arr2[...]))
# test multi-index __getitem__ and __setitem__
for u in range(shape[0]):
for v in range(shape[1]):
for w in range(shape[2]):
arr2[u,v,w] = a2[u,v,w]
arr1[u,v,w] = arr2[u,v,w]
self.assertTrue(np.allclose(arr1[...], arr2[...]))
# test spliced __getitem__ and __setitem__
arr1[1,:,2] = a1[1,:,2]
self.assertFalse(np.allclose(arr1[...], arr2[...]))
self.assertTrue(np.allclose(arr1[1,:,2], a1[1,:,2]))
# ----- Complex3D -----
# create two deft arrays and two numpy arrays
shape = (2, 3, 4)
(arr1, arr2) = (deft.Complex3D(shape), deft.Complex3D(shape))
a1 = np.linspace(
0.0, 20.0, arr1.size(), dtype=arr1[...].dtype).reshape(shape)
a2 = 1.0 + np.indices(shape, dtype=arr2[...].dtype).sum(axis=0)
# test single index __getitem__ and __setitem__
for u in range(np.prod(shape)):
arr1[u] = a1.flatten()[u]
arr2[u] = arr1[u]
self.assertTrue(np.allclose(arr1[...], arr2[...]))
# test multi-index __getitem__ and __setitem__
for u in range(shape[0]):
for v in range(shape[1]):
for w in range(shape[2]):
arr2[u,v,w] = a2[u,v,w]
arr1[u,v,w] = arr2[u,v,w]
self.assertTrue(np.allclose(arr1[...], arr2[...]))
# test spliced __getitem__ and __setitem__
arr1[1,:,2] = a1[1,:,2]
self.assertFalse(np.allclose(arr1[...], arr2[...]))
self.assertTrue(np.allclose(arr1[1,:,2], a1[1,:,2]))
def test_arithmetic_assignments(self):
# ----- Double3D -----
# create two deft arrays and two numpy arrays
shape = (2, 3, 4)
(arr1, arr2) = (deft.Double3D(shape), deft.Double3D(shape))
a1 = np.linspace(
0.0, 20.0, arr1.size(), dtype=arr1[...].dtype).reshape(shape)
a2 = 1.0 + np.indices(shape, dtype=arr2[...].dtype).sum(axis=0)
# test += operator
(arr1[...], arr2[...]) = (a1, a2)
arr1 += 2.0
self.assertTrue(np.allclose(arr1[...], a1+2.0))
arr1 += arr2
self.assertTrue(np.allclose(arr1[...], a1+2.0+a2))
# test -= operator
(arr1[...], arr2[...]) = (a1, a2)
arr1 -= 2.0
self.assertTrue(np.allclose(arr1[...], a1-2.0))
arr1 -= arr2
self.assertTrue(np.allclose(arr1[...], a1-2.0-a2))
# test *= operator
(arr1[...], arr2[...]) = (a1, a2)
arr1 *= 2.0
self.assertTrue(np.allclose(arr1[...], a1*2.0))
arr1 *= arr2
self.assertTrue(np.allclose(arr1[...], a1*2.0*a2))
# test /= operator
(arr1[...], arr2[...]) = (a1, a2)
arr1 /= 2.0
self.assertTrue(np.allclose(arr1[...], a1/2.0))
arr1 /= arr2
self.assertTrue(np.allclose(arr1[...], a1/2.0/a2))
# ----- Complex3D -----
# create two deft arrays and two numpy arrays
shape = (2, 3, 4)
(arr1, arr2) = (deft.Complex3D(shape), deft.Complex3D(shape))
a1 = np.linspace(
0.0, 20.0, arr1.size(), dtype=arr1[...].dtype).reshape(shape)
a2 = 1.0 + np.indices(shape, dtype=arr2[...].dtype).sum(axis=0)
# test += operator
(arr1[...], arr2[...]) = (a1, a2)
arr1 += 2.0
self.assertTrue(np.allclose(arr1[...], a1+2.0))
arr1 += arr2
self.assertTrue(np.allclose(arr1[...], a1+2.0+a2))
# test -= operator
(arr1[...], arr2[...]) = (a1, a2)
arr1 -= 2.0
self.assertTrue(np.allclose(arr1[...], a1-2.0))
arr1 -= arr2
self.assertTrue(np.allclose(arr1[...], a1-2.0-a2))
# test *= operator
(arr1[...], arr2[...]) = (a1, a2)
arr1 *= 2.0
self.assertTrue(np.allclose(arr1[...], a1*2.0))
arr1 *= arr2
self.assertTrue(np.allclose(arr1[...], a1*2.0*a2))
# test /= operator
(arr1[...], arr2[...]) = (a1, a2)
arr1 /= 2.0
self.assertTrue(np.allclose(arr1[...], a1/2.0))
arr1 /= arr2
self.assertTrue(np.allclose(arr1[...], a1/2.0/a2))
def test_elementwise_math(self):
# ----- Double3D -----
# create numpy array and deft array
shape = (2, 3, 4)
arr1 = deft.Double3D(shape)
a1 = np.linspace(
0.0, 20.0, arr1.size(), dtype=arr1[...].dtype).reshape(shape)
# test sqrt
arr1[...] = a1
arr1.compute_sqrt()
self.assertTrue(np.allclose(arr1[...], np.sqrt(a1)))
# test pow
arr1[...] = a1
arr1.compute_pow(1.0/3.0)
self.assertTrue(np.allclose(arr1[...], a1**(1.0/3.0)))
# ----- Complex3D -----
# create numpy array and deft array
shape = (2, 3, 4)
arr1 = deft.Complex3D(shape)
a1 = np.linspace(
0.0, 20.0, arr1.size(), dtype=arr1[...].dtype).reshape(shape)
# test sqrt
arr1[...] = a1
arr1.compute_sqrt()
self.assertTrue(np.allclose(arr1[...], np.sqrt(a1)))
# test pow
arr1[...] = a1
arr1.compute_pow(1.0/3.0)
self.assertTrue(np.allclose(arr1[...], a1**(1.0/3.0)))
def test_negation_addition_subtraction_multiplication_division(self):
# ----- Double3D -----
# create two deft arrays and two numpy arrays
shape = (2, 3, 4)
(arr1, arr2) = (deft.Double3D(shape), deft.Double3D(shape))
a1 = np.linspace(
0.0, 20.0, arr1.size(), dtype=arr1[...].dtype).reshape(shape)
a2 = 1.0 + np.indices(shape, dtype=arr2[...].dtype).sum(axis=0)
# test unary - operator
arr1[...] = a1
arr1 = -arr1
self.assertTrue(np.allclose(arr1[...], -a1))
# test + operator
(arr1[...], arr2[...]) = (a1, a2)
arr3 = arr1 + arr2
self.assertTrue(np.allclose(arr3[...], a1+a2))
arr3 = arr1 + 2.0
self.assertTrue(np.allclose(arr3[...], a1+2.0))
arr3 = 3.0 + arr2
self.assertTrue(np.allclose(arr3[...], 3.0+a2))
# test - operator
(arr1[...], arr2[...]) = (a1, a2)
arr3 = arr1 - arr2
self.assertTrue(np.allclose(arr3[...], a1-a2))
arr3 = arr1 - 2.0
self.assertTrue(np.allclose(arr3[...], a1-2.0))
arr3 = 3.0 - arr2
self.assertTrue(np.allclose(arr3[...], 3.0-a2))
# test * operator
(arr1[...], arr2[...]) = (a1, a2)
arr3 = arr1 * arr2
self.assertTrue(np.allclose(arr3[...], a1*a2))
arr3 = arr1 * 2.0
self.assertTrue(np.allclose(arr3[...], a1*2.0))
arr3 = 3.0 * arr2
self.assertTrue(np.allclose(arr3[...], 3.0*a2))
# test / operator
(arr1[...], arr2[...]) = (a1, a2)
arr3 = arr1 / arr2
self.assertTrue(np.allclose(arr3[...], a1/a2))
arr3 = arr1 / 2.0
self.assertTrue(np.allclose(arr3[...], a1/2.0))
arr3 = 3.0 / arr2
self.assertTrue(np.allclose(arr3[...], 3.0/a2))
# ----- Complex3D -----
# create two deft arrays and two numpy arrays
shape = (2, 3, 4)
(arr1, arr2) = (deft.Complex3D(shape), deft.Complex3D(shape))
a1 = np.linspace(
0.0, 20.0, arr1.size(), dtype=arr1[...].dtype).reshape(shape)
a2 = 1.0 + np.indices(shape, dtype=arr2[...].dtype).sum(axis=0)
# test unary - operator
arr1[...] = a1
arr1 = -arr1
self.assertTrue(np.allclose(arr1[...], -a1))
# test + operator
(arr1[...], arr2[...]) = (a1, a2)
arr3 = arr1 + arr2
self.assertTrue(np.allclose(arr3[...], a1+a2))
arr3 = arr1 + 2.0
self.assertTrue(np.allclose(arr3[...], a1+2.0))
arr3 = 3.0 + arr2
self.assertTrue(np.allclose(arr3[...], 3.0+a2))
# test - operator
(arr1[...], arr2[...]) = (a1, a2)
arr3 = arr1 - arr2
self.assertTrue(np.allclose(arr3[...], a1-a2))
arr3 = arr1 - 2.0
self.assertTrue(np.allclose(arr3[...], a1-2.0))
arr3 = 3.0 - arr2
self.assertTrue(np.allclose(arr3[...], 3.0-a2))
# test * operator
(arr1[...], arr2[...]) = (a1, a2)
arr3 = arr1 * arr2
self.assertTrue(np.allclose(arr3[...], a1*a2))
arr3 = arr1 * 2.0
self.assertTrue(np.allclose(arr3[...], a1*2.0))
arr3 = 3.0 * arr2
self.assertTrue(np.allclose(arr3[...], 3.0*a2))
# test / operator
(arr1[...], arr2[...]) = (a1, a2)
arr3 = arr1 / arr2
self.assertTrue(np.allclose(arr3[...], a1/a2))
arr3 = arr1 / 2.0
self.assertTrue(np.allclose(arr3[...], a1/2.0))
arr3 = 3.0 / arr2
self.assertTrue(np.allclose(arr3[...], 3.0/a2))
if __name__ == '__main__':
unittest.main()
| 38.669014
| 77
| 0.512566
| 1,414
| 10,982
| 3.920085
| 0.067185
| 0.146491
| 0.150099
| 0.225149
| 0.931986
| 0.931986
| 0.931986
| 0.931986
| 0.928739
| 0.924409
| 0
| 0.084051
| 0.291477
| 10,982
| 283
| 78
| 38.805654
| 0.628325
| 0.116919
| 0
| 0.925234
| 0
| 0
| 0.002384
| 0
| 0
| 0
| 0
| 0
| 0.28972
| 1
| 0.023364
| false
| 0
| 0.028037
| 0
| 0.056075
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
84ef9ba2d8e99f25cdf9471566c251a28d93d78d
| 172
|
wsgi
|
Python
|
pyshacl_webservice/app.wsgi
|
CSIRO-enviro-informatics/pyshacl-webservice
|
f9067fb9e2b95f0a93cec71cf00570a86312fd49
|
[
"Apache-2.0"
] | null | null | null |
pyshacl_webservice/app.wsgi
|
CSIRO-enviro-informatics/pyshacl-webservice
|
f9067fb9e2b95f0a93cec71cf00570a86312fd49
|
[
"Apache-2.0"
] | 1
|
2018-09-17T03:02:40.000Z
|
2018-09-17T03:02:40.000Z
|
pyshacl_webservice/app.wsgi
|
CSIRO-enviro-informatics/pyshacl-webservice
|
f9067fb9e2b95f0a93cec71cf00570a86312fd49
|
[
"Apache-2.0"
] | 1
|
2018-09-10T01:55:09.000Z
|
2018-09-10T01:55:09.000Z
|
import sys
sys.path.insert(0, '/var/www/pyshacl-webservice')
sys.path.insert(0, '/var/www/pyshacl-webservice/pyshacl_webservice')
from flask_app import app as application
| 28.666667
| 68
| 0.790698
| 27
| 172
| 4.962963
| 0.518519
| 0.380597
| 0.19403
| 0.208955
| 0.552239
| 0.552239
| 0.552239
| 0.552239
| 0
| 0
| 0
| 0.012579
| 0.075581
| 172
| 5
| 69
| 34.4
| 0.830189
| 0
| 0
| 0
| 0
| 0
| 0.424419
| 0.424419
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 8
|
ca3d52ee46b298ab84139bb6b923e2f34d53bc83
| 6,171
|
py
|
Python
|
recipes/Python/115419_adduserloginpasswduserdel_not_posix_password_db_/recipe-115419.py
|
tdiprima/code
|
61a74f5f93da087d27c70b2efe779ac6bd2a3b4f
|
[
"MIT"
] | 2,023
|
2017-07-29T09:34:46.000Z
|
2022-03-24T08:00:45.000Z
|
recipes/Python/115419_adduserloginpasswduserdel_not_posix_password_db_/recipe-115419.py
|
unhacker/code
|
73b09edc1b9850c557a79296655f140ce5e853db
|
[
"MIT"
] | 32
|
2017-09-02T17:20:08.000Z
|
2022-02-11T17:49:37.000Z
|
recipes/Python/115419_adduserloginpasswduserdel_not_posix_password_db_/recipe-115419.py
|
unhacker/code
|
73b09edc1b9850c557a79296655f140ce5e853db
|
[
"MIT"
] | 780
|
2017-07-28T19:23:28.000Z
|
2022-03-25T20:39:41.000Z
|
# These are multiple .py files that can be used
# to access the password file.
# adduser.py adds a login/password to the database
# login.py attempts to login using a supplied l/p
# passwd.py allows the user to change his password
# userdel.py allows user to delete him/her self
#
# #----cut here----# is where the file ends
# adduser.py
#
# Copyright (c) 2001 Nelson Rush. All rights reserved.
#
# add user to the password database
#
from os import *
from struct import *
from binascii import *
from getpass import *
from strop import *
import md5
import sys
if access("passwords",O_RDWR):
fd = open("passwords",O_BINARY|O_RDWR)
newfile = 0
else:
fd = open("passwords",O_BINARY|O_RDWR|O_CREAT)
newfile = 1
md5sum = md5.new()
fmt = "40s 256s 30s 32s h"
print "Please enter the following information to add a user:\n\n"
user_name = raw_input("name: ")
user_home = raw_input("home: ")
user_login = raw_input("login: ")
user_password = getpass("password: ")
md5sum.update(user_password)
user_passhash = hexlify(md5sum.digest())
user_attempts = 0
user_sizeof = calcsize(fmt)
user_info = pack(fmt,user_name,user_home,user_login,user_passhash,user_attempts)
user_data = user_info
if newfile:
print "Adding user. . ."
write(fd,user_info)
close(fd)
sys.exit(1)
while len(user_data) == user_sizeof:
user_data = read(fd,user_sizeof)
if len(user_data) == user_sizeof:
(u_name,u_home,u_login,u_passhash,u_attempts) = unpack(fmt,user_data)
if find(u_login,user_login) == 0:
print "User already exists in database."
close(fd)
sys.exit(-1)
print "Adding user. . ."
write(fd,user_info)
close(fd)
#----cut here----#
# login.py
#
# Copyright (c) 2001 Nelson Rush. All rights reserved.
#
# login system which takes l/p and verifies access
#
from os import *
from struct import *
from binascii import *
from strop import *
from getpass import *
import md5
import sys
if access("passwords",O_RDONLY):
fd = open("passwords",O_BINARY|O_RDONLY)
else:
print "No password database exists."
sys.exit(-1)
md5sum = md5.new()
fmt = "40s 256s 30s 32s h"
user_name = "Nothing"
user_home = "Nothing"
user_login = raw_input("login: ")
user_password = getpass("password: ")
md5sum.update(user_password)
user_passhash = hexlify(md5sum.digest())
user_attempts = 0
user_sizeof = calcsize(fmt)
user_data = pack(fmt,user_name,user_home,user_login,user_passhash,user_attempts)
while len(user_data) == user_sizeof:
user_data = read(fd,user_sizeof)
if len(user_data) == user_sizeof:
(u_name,u_home,u_login,u_passhash,u_attempts) = unpack(fmt,user_data)
if find(u_login,user_login) == 0 and u_passhash == user_passhash:
print "Access Granted!"
close(fd)
sys.exit(1)
print "Invalid login or password."
close(fd)
sys.exit(-1)
#----cut here----#
# passwd.py
#
# Copyright (c) 2001 Nelson Rush. All rights reserved.
#
# changes a password
#
SEEK_BEG = 0
SEEK_CUR = 1
SEEK_END = 2
from os import *
from struct import *
from binascii import *
from strop import *
from getpass import *
import md5
import sys
if access("passwords",O_RDWR):
fd = open("passwords",O_BINARY|O_RDWR)
else:
print "No password database exists."
sys.exit(-1)
md5sum = md5.new()
fmt = "40s 256s 30s 32s h"
user_name = "Nothing"
user_home = "Nothing"
user_login = raw_input("login: ")
user_password = getpass("OLD password: ")
md5sum.update(user_password)
user_passhash = hexlify(md5sum.digest())
user_attempts = 0
user_sizeof = calcsize(fmt)
user_data = pack(fmt,user_name,user_home,user_login,user_passhash,user_attempts)
while len(user_data) == user_sizeof:
user_data = read(fd,user_sizeof)
if len(user_data) == user_sizeof:
(u_name,u_home,u_login,u_passhash,u_attempts) = unpack(fmt,user_data)
if find(u_login,user_login) == 0 and u_passhash == user_passhash:
new_password = getpass("NEW password: ")
newmd5sum = md5.new(new_password)
new_passhash = hexlify(newmd5sum.digest())
user_info = pack(fmt,u_name,u_home,u_login,new_passhash,u_attempts)
lseek(fd,-user_sizeof,SEEK_CUR)
write(fd,user_info)
close(fd)
print "Password changed."
sys.exit(1)
print "Invalid login or password."
close(fd)
sys.exit(-1)
#----cut here----#
# userdel.py
#
# Copyright (c) 2001 Nelson Rush. All rights reserved.
#
# deletes a user in the password database
#
SEEK_BEG = 0
SEEK_CUR = 1
SEEK_END = 2
import os
from struct import *
from binascii import *
from strop import *
from getpass import *
import md5
import sys
if os.access("passwords",os.O_RDWR):
fd = os.open("passwords",os.O_BINARY|os.O_RDWR)
else:
print "No password database exists."
sys.exit(-1)
md5sum = md5.new()
fmt = "40s 256s 30s 32s h"
(fmode, fino, fdev, fnlink, fuid, fgid, fsize, fatime, fmtime, fctime) = os.fstat(fd)
user_name = "Nothing"
user_home = "Nothing"
user_login = raw_input("login: ")
user_password = getpass("password: ")
md5sum.update(user_password)
user_passhash = hexlify(md5sum.digest())
user_attempts = 0
user_sizeof = calcsize(fmt)
user_data = pack(fmt,user_name,user_home,user_login,user_passhash,user_attempts)
while len(user_data) == user_sizeof:
user_data = os.read(fd,user_sizeof)
if len(user_data) == user_sizeof:
(u_name,u_home,u_login,u_passhash,u_attempts) = unpack(fmt,user_data)
if find(u_login,user_login) == 0 and u_passhash == user_passhash:
curpos = os.lseek(fd,0,SEEK_CUR)
if curpos == fsize:
os.close(fd)
f = open("passwords","ab+")
f.truncate(fsize - user_sizeof)
f.close()
print "User deleted."
sys.exit(1)
buf = os.read(fd,fsize - curpos)
os.lseek(fd,curpos - user_sizeof,SEEK_BEG)
os.write(fd,buf)
os.close(fd)
f = open("passwords","ab+")
f.truncate(fsize - user_sizeof)
f.close()
print "User deleted."
sys.exit(1)
print "Invalid login or password."
os.close(fd)
sys.exit(-1)
| 28.836449
| 85
| 0.675255
| 926
| 6,171
| 4.320734
| 0.159827
| 0.049988
| 0.023994
| 0.029993
| 0.747063
| 0.741315
| 0.72057
| 0.713822
| 0.707073
| 0.633592
| 0
| 0.02119
| 0.204667
| 6,171
| 213
| 86
| 28.971831
| 0.79401
| 0.121374
| 0
| 0.818713
| 0
| 0
| 0.122282
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.28655
| 0.163743
| null | null | 0.081871
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
ca5245db64fafa59333995d26b3ac67cbb8072a1
| 10,944
|
py
|
Python
|
gnuradio-3.7.13.4/gr-filter/python/filter/qa_freq_xlating_fir_filter.py
|
v1259397/cosmic-gnuradio
|
64c149520ac6a7d44179c3f4a38f38add45dd5dc
|
[
"BSD-3-Clause"
] | 1
|
2021-03-09T07:32:37.000Z
|
2021-03-09T07:32:37.000Z
|
gnuradio-3.7.13.4/gr-filter/python/filter/qa_freq_xlating_fir_filter.py
|
v1259397/cosmic-gnuradio
|
64c149520ac6a7d44179c3f4a38f38add45dd5dc
|
[
"BSD-3-Clause"
] | null | null | null |
gnuradio-3.7.13.4/gr-filter/python/filter/qa_freq_xlating_fir_filter.py
|
v1259397/cosmic-gnuradio
|
64c149520ac6a7d44179c3f4a38f38add45dd5dc
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
#
# Copyright 2008,2010,2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
from gnuradio import gr, gr_unittest, filter, blocks
import cmath, math
def fir_filter(x, taps, decim=1):
y = []
x2 = (len(taps)-1)*[0,] + x
for i in range(0, len(x), decim):
yi = 0
for j in range(len(taps)):
yi += taps[len(taps)-1-j] * x2[i+j]
y.append(yi)
return y
def sig_source_s(samp_rate, freq, amp, N):
t = map(lambda x: float(x)/samp_rate, xrange(N))
y = map(lambda x: int(100*math.sin(2.*math.pi*freq*x)), t)
return y
def sig_source_c(samp_rate, freq, amp, N):
t = map(lambda x: float(x)/samp_rate, xrange(N))
y = map(lambda x: math.cos(2.*math.pi*freq*x) + \
1j*math.sin(2.*math.pi*freq*x), t)
return y
def mix(lo, data):
y = [lo_i*data_i for lo_i, data_i in zip(lo, data)]
return y
class test_freq_xlating_filter(gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block ()
def tearDown(self):
self.tb = None
def generate_ccf_source(self):
self.fs = fs = 1
self.fc = fc = 0.3
self.bw = bw = 0.1
self.taps = filter.firdes.low_pass(1, fs, bw, bw/4)
times = xrange(100)
self.src_data = map(lambda t: cmath.exp(-2j*cmath.pi*fc/fs*(t/100.0)), times)
def generate_ccc_source(self):
self.fs = fs = 1
self.fc = fc = 0.3
self.bw = bw = 0.1
self.taps = filter.firdes.complex_band_pass(1, fs, -bw/2, bw/2, bw/4)
times = xrange(100)
self.src_data = map(lambda t: cmath.exp(-2j*cmath.pi*fc/fs*(t/100.0)), times)
def generate_fcf_source(self):
self.fs = fs = 1
self.fc = fc = 0.3
self.bw = bw = 0.1
self.taps = filter.firdes.low_pass(1, fs, bw, bw/4)
times = xrange(100)
self.src_data = map(lambda t: math.sin(2*cmath.pi*fc/fs*(t/100.0)), times)
def generate_fcc_source(self):
self.fs = fs = 1
self.fc = fc = 0.3
self.bw = bw = 0.1
self.taps = filter.firdes.complex_band_pass(1, fs, -bw/2, bw/2, bw/4)
times = xrange(100)
self.src_data = map(lambda t: math.sin(2*cmath.pi*fc/fs*(t/100.0)), times)
def generate_scf_source(self):
self.fs = fs = 1
self.fc = fc = 0.3
self.bw = bw = 0.12
self.taps = filter.firdes.low_pass(1, fs, bw, bw/4)
times = xrange(100)
self.src_data = map(lambda t: int(100*math.sin(2*cmath.pi*fc/fs*(t/100.0))), times)
def generate_scc_source(self):
self.fs = fs = 1
self.fc = fc = 0.3
self.bw = bw = 0.12
self.taps = filter.firdes.complex_band_pass(1, fs, -bw/2, bw/2, bw/4)
times = xrange(100)
self.src_data = map(lambda t: int(100*math.sin(2*cmath.pi*fc/fs*(t/100.0))), times)
def test_fir_filter_ccf_001(self):
self.generate_ccf_source()
decim = 1
lo = sig_source_c(self.fs, -self.fc, 1, len(self.src_data))
despun = mix(lo, self.src_data)
expected_data = fir_filter(despun, self.taps, decim)
src = blocks.vector_source_c(self.src_data)
op = filter.freq_xlating_fir_filter_ccf(decim, self.taps, self.fc, self.fs)
dst = blocks.vector_sink_c()
self.tb.connect(src, op, dst)
self.tb.run()
result_data = dst.data()
self.assertComplexTuplesAlmostEqual(expected_data, result_data, 5)
def test_fir_filter_ccf_002(self):
self.generate_ccf_source()
decim = 4
lo = sig_source_c(self.fs, -self.fc, 1, len(self.src_data))
despun = mix(lo, self.src_data)
expected_data = fir_filter(despun, self.taps, decim)
src = blocks.vector_source_c(self.src_data)
op = filter.freq_xlating_fir_filter_ccf(decim, self.taps, self.fc, self.fs)
dst = blocks.vector_sink_c()
self.tb.connect(src, op, dst)
self.tb.run()
result_data = dst.data()
self.assertComplexTuplesAlmostEqual(expected_data, result_data, 5)
def test_fir_filter_ccc_001(self):
self.generate_ccc_source()
decim = 1
lo = sig_source_c(self.fs, -self.fc, 1, len(self.src_data))
despun = mix(lo, self.src_data)
expected_data = fir_filter(despun, self.taps, decim)
src = blocks.vector_source_c(self.src_data)
op = filter.freq_xlating_fir_filter_ccc(decim, self.taps, self.fc, self.fs)
dst = blocks.vector_sink_c()
self.tb.connect(src, op, dst)
self.tb.run()
result_data = dst.data()
self.assertComplexTuplesAlmostEqual(expected_data, result_data, 5)
def test_fir_filter_ccc_002(self):
self.generate_ccc_source()
decim = 4
lo = sig_source_c(self.fs, -self.fc, 1, len(self.src_data))
despun = mix(lo, self.src_data)
expected_data = fir_filter(despun, self.taps, decim)
src = blocks.vector_source_c(self.src_data)
op = filter.freq_xlating_fir_filter_ccc(decim, self.taps, self.fc, self.fs)
dst = blocks.vector_sink_c()
self.tb.connect(src, op, dst)
self.tb.run()
result_data = dst.data()
self.assertComplexTuplesAlmostEqual(expected_data, result_data, 5)
def test_fir_filter_fcf_001(self):
self.generate_fcf_source()
decim = 1
lo = sig_source_c(self.fs, -self.fc, 1, len(self.src_data))
despun = mix(lo, self.src_data)
expected_data = fir_filter(despun, self.taps, decim)
src = blocks.vector_source_f(self.src_data)
op = filter.freq_xlating_fir_filter_fcf(decim, self.taps, self.fc, self.fs)
dst = blocks.vector_sink_c()
self.tb.connect(src, op, dst)
self.tb.run()
result_data = dst.data()
self.assertComplexTuplesAlmostEqual(expected_data, result_data, 5)
def test_fir_filter_fcf_002(self):
self.generate_fcf_source()
decim = 4
lo = sig_source_c(self.fs, -self.fc, 1, len(self.src_data))
despun = mix(lo, self.src_data)
expected_data = fir_filter(despun, self.taps, decim)
src = blocks.vector_source_f(self.src_data)
op = filter.freq_xlating_fir_filter_fcf(decim, self.taps, self.fc, self.fs)
dst = blocks.vector_sink_c()
self.tb.connect(src, op, dst)
self.tb.run()
result_data = dst.data()
self.assertComplexTuplesAlmostEqual(expected_data, result_data, 5)
def test_fir_filter_fcc_001(self):
self.generate_fcc_source()
decim = 1
lo = sig_source_c(self.fs, -self.fc, 1, len(self.src_data))
despun = mix(lo, self.src_data)
expected_data = fir_filter(despun, self.taps, decim)
src = blocks.vector_source_f(self.src_data)
op = filter.freq_xlating_fir_filter_fcc(decim, self.taps, self.fc, self.fs)
dst = blocks.vector_sink_c()
self.tb.connect(src, op, dst)
self.tb.run()
result_data = dst.data()
self.assertComplexTuplesAlmostEqual(expected_data, result_data, 5)
def test_fir_filter_fcc_002(self):
self.generate_fcc_source()
decim = 4
lo = sig_source_c(self.fs, -self.fc, 1, len(self.src_data))
despun = mix(lo, self.src_data)
expected_data = fir_filter(despun, self.taps, decim)
src = blocks.vector_source_f(self.src_data)
op = filter.freq_xlating_fir_filter_fcc(decim, self.taps, self.fc, self.fs)
dst = blocks.vector_sink_c()
self.tb.connect(src, op, dst)
self.tb.run()
result_data = dst.data()
self.assertComplexTuplesAlmostEqual(expected_data, result_data, 5)
def test_fir_filter_scf_001(self):
self.generate_scf_source()
decim = 1
lo = sig_source_c(self.fs, -self.fc, 1, len(self.src_data))
despun = mix(lo, self.src_data)
expected_data = fir_filter(despun, self.taps, decim)
src = blocks.vector_source_s(self.src_data)
op = filter.freq_xlating_fir_filter_scf(decim, self.taps, self.fc, self.fs)
dst = blocks.vector_sink_c()
self.tb.connect(src, op, dst)
self.tb.run()
result_data = dst.data()
self.assertComplexTuplesAlmostEqual(expected_data, result_data, 4)
def test_fir_filter_scf_002(self):
self.generate_scf_source()
decim = 4
lo = sig_source_c(self.fs, -self.fc, 1, len(self.src_data))
despun = mix(lo, self.src_data)
expected_data = fir_filter(despun, self.taps, decim)
src = blocks.vector_source_s(self.src_data)
op = filter.freq_xlating_fir_filter_scf(decim, self.taps, self.fc, self.fs)
dst = blocks.vector_sink_c()
self.tb.connect(src, op, dst)
self.tb.run()
result_data = dst.data()
self.assertComplexTuplesAlmostEqual(expected_data, result_data, 4)
def test_fir_filter_scc_001(self):
self.generate_scc_source()
decim = 1
lo = sig_source_c(self.fs, -self.fc, 1, len(self.src_data))
despun = mix(lo, self.src_data)
expected_data = fir_filter(despun, self.taps, decim)
src = blocks.vector_source_s(self.src_data)
op = filter.freq_xlating_fir_filter_scc(decim, self.taps, self.fc, self.fs)
dst = blocks.vector_sink_c()
self.tb.connect(src, op, dst)
self.tb.run()
result_data = dst.data()
self.assertComplexTuplesAlmostEqual(expected_data, result_data, 4)
def test_fir_filter_scc_002(self):
self.generate_scc_source()
decim = 4
lo = sig_source_c(self.fs, -self.fc, 1, len(self.src_data))
despun = mix(lo, self.src_data)
expected_data = fir_filter(despun, self.taps, decim)
src = blocks.vector_source_s(self.src_data)
op = filter.freq_xlating_fir_filter_scc(decim, self.taps, self.fc, self.fs)
dst = blocks.vector_sink_c()
self.tb.connect(src, op, dst)
self.tb.run()
result_data = dst.data()
self.assertComplexTuplesAlmostEqual(expected_data, result_data, 4)
if __name__ == '__main__':
gr_unittest.run(test_freq_xlating_filter, "test_freq_xlating_filter.xml")
| 36
| 91
| 0.634412
| 1,680
| 10,944
| 3.925
| 0.110119
| 0.044586
| 0.070064
| 0.029117
| 0.850925
| 0.836215
| 0.793145
| 0.793145
| 0.793145
| 0.793145
| 0
| 0.0265
| 0.244883
| 10,944
| 303
| 92
| 36.118812
| 0.771418
| 0.070267
| 0
| 0.815789
| 0
| 0
| 0.003545
| 0.002758
| 0
| 0
| 0
| 0
| 0.052632
| 1
| 0.105263
| false
| 0.026316
| 0.008772
| 0
| 0.135965
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ca8f84861eb12427c8d5920213a20bca77bb3bfc
| 115
|
py
|
Python
|
model/rnn/nn/__int__.py
|
tiagopms/fast-conversational-banking
|
b9d3ddfe3adb78522fafab91c2d20495db063dda
|
[
"MIT"
] | 2
|
2018-03-06T13:00:33.000Z
|
2018-05-29T00:27:01.000Z
|
model/rnn/nn/__int__.py
|
tiagopms/fast-conversational-banking
|
b9d3ddfe3adb78522fafab91c2d20495db063dda
|
[
"MIT"
] | null | null | null |
model/rnn/nn/__int__.py
|
tiagopms/fast-conversational-banking
|
b9d3ddfe3adb78522fafab91c2d20495db063dda
|
[
"MIT"
] | null | null | null |
from model.rnn.nn.gru_module import EncoderRNN, DecoderRNN
from model.rnn.nn.attn_gru_module import AttnDecoderRNN
| 38.333333
| 58
| 0.86087
| 18
| 115
| 5.333333
| 0.611111
| 0.1875
| 0.25
| 0.291667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.078261
| 115
| 2
| 59
| 57.5
| 0.90566
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
04ba4174d01b71f5a89be0cf9c9830458b288872
| 161
|
py
|
Python
|
platform/radio/efr32_multiphy_configurator/pyradioconfig/parts/bobcat/calculators/calc_frame_coding.py
|
lmnotran/gecko_sdk
|
2e82050dc8823c9fe0e8908c1b2666fb83056230
|
[
"Zlib"
] | 82
|
2016-06-29T17:24:43.000Z
|
2021-04-16T06:49:17.000Z
|
platform/radio/efr32_multiphy_configurator/pyradioconfig/parts/bobcat/calculators/calc_frame_coding.py
|
lmnotran/gecko_sdk
|
2e82050dc8823c9fe0e8908c1b2666fb83056230
|
[
"Zlib"
] | 6
|
2022-01-12T18:22:08.000Z
|
2022-03-25T10:19:27.000Z
|
platform/radio/efr32_multiphy_configurator/pyradioconfig/parts/bobcat/calculators/calc_frame_coding.py
|
lmnotran/gecko_sdk
|
2e82050dc8823c9fe0e8908c1b2666fb83056230
|
[
"Zlib"
] | 56
|
2016-08-02T10:50:50.000Z
|
2021-07-19T08:57:34.000Z
|
from pyradioconfig.parts.ocelot.calculators.calc_frame_coding import CALC_Frame_Coding_Ocelot
class Calc_Frame_Coding_Bobcat(CALC_Frame_Coding_Ocelot):
pass
| 40.25
| 93
| 0.888199
| 23
| 161
| 5.73913
| 0.521739
| 0.272727
| 0.454545
| 0.318182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.068323
| 161
| 4
| 94
| 40.25
| 0.88
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 9
|
04cb51a92446fd8428f7167613f8b0663fb396cd
| 13,200
|
py
|
Python
|
deeplabcut/utils/frameselectiontools.py
|
serre-lab/deeplabcut_mgh
|
aa37b104ba4967932528d4f79665648474f51112
|
[
"MIT"
] | null | null | null |
deeplabcut/utils/frameselectiontools.py
|
serre-lab/deeplabcut_mgh
|
aa37b104ba4967932528d4f79665648474f51112
|
[
"MIT"
] | 1
|
2019-12-15T00:37:15.000Z
|
2019-12-15T00:37:15.000Z
|
deeplabcut/utils/frameselectiontools.py
|
kalpitthakkar/deeplabcut_mgh_pose
|
8fa4a59f422ff0357552e290230838239edcfe1b
|
[
"MIT"
] | null | null | null |
"""
DeepLabCut2.0 Toolbox (deeplabcut.org)
© A. & M. Mathis Labs
https://github.com/AlexEMG/DeepLabCut
Please see AUTHORS for contributors.
https://github.com/AlexEMG/DeepLabCut/blob/master/AUTHORS
Licensed under GNU Lesser General Public License v3.0
"""
import numpy as np
import math
from skimage import io
from skimage.util import img_as_ubyte
from sklearn.cluster import MiniBatchKMeans
import cv2
from tqdm import tqdm
def UniformFrames(clip,numframes2pick,start,stop,Index=None):
''' Temporally uniformly sampling frames in interval (start,stop).
Visual information of video is irrelevant for this method. This code is fast and sufficient (to extract distinct frames),
when behavioral videos naturally covers many states.
The variable Index allows to pass on a subindex for the frames.
'''
print("Uniformly extracting of frames from", round(start*clip.duration,2)," seconds to", round(stop*clip.duration,2), " seconds.")
if Index is None:
if start==0:
frames2pick = np.random.choice(math.ceil(clip.duration * clip.fps * stop), size=numframes2pick, replace = False)
else:
frames2pick = np.random.choice(range(math.floor(start*clip.duration * clip.fps),math.ceil(clip.duration * clip.fps * stop)), size=numframes2pick, replace = False)
return frames2pick
else:
startindex=int(np.floor(clip.fps*clip.duration*start))
stopindex=int(np.ceil(clip.fps*clip.duration*stop))
Index=np.array(Index,dtype=np.int)
Index=Index[(Index>startindex)*(Index<stopindex)] #crop to range!
if len(Index)>=numframes2pick:
return list(np.random.permutation(Index)[:numframes2pick])
else:
return list(Index)
#uses openCV
def UniformFramescv2(cap,numframes2pick,start,stop,Index=None):
''' Temporally uniformly sampling frames in interval (start,stop).
Visual information of video is irrelevant for this method. This code is fast and sufficient (to extract distinct frames),
when behavioral videos naturally covers many states.
The variable Index allows to pass on a subindex for the frames.
'''
nframes = int(cap.get(7))
print("Uniformly extracting of frames from", round(start*nframes*1./cap.get(5),2)," seconds to", round(stop*nframes*1./cap.get(5),2), " seconds.")
sframe = math.floor(start * nframes)
eframe = math.ceil(stop * nframes)
if len(range(sframe, eframe)) < numframes2pick:
numframes2pick = len(range(sframe, eframe))
print("You asked for more frames than available for extraction. Extracting {} frames instead.".format(numframes2pick))
if Index is None:
if start==0:
frames2pick = np.random.choice(math.ceil(nframes * stop), size=numframes2pick, replace = False)
else:
frames2pick = np.random.choice(range(math.floor(nframes * start),math.ceil(nframes * stop)), size=numframes2pick, replace = False)
return frames2pick
else:
startindex=int(np.floor(nframes*start))
stopindex=int(np.ceil(nframes*stop))
Index=np.array(Index,dtype=np.int)
Index=Index[(Index>startindex)*(Index<stopindex)] #crop to range!
if len(Index)>=numframes2pick:
return list(np.random.permutation(Index)[:numframes2pick])
else:
return list(Index)
def KmeansbasedFrameselection(clip,numframes2pick,start,stop,Index=None,step=1,resizewidth=30,batchsize=100,max_iter=50,color=False):
''' This code downsamples the video to a width of resizewidth.
The video is extracted as a numpy array, which is then clustered with kmeans, whereby each frames is treated as a vector.
Frames from different clusters are then selected for labeling. This procedure makes sure that the frames "look different",
i.e. different postures etc. On large videos this code is slow.
Consider not extracting the frames from the whole video but rather set start and stop to a period around interesting behavior.
Note: this method can return fewer images than numframes2pick.'''
print("Kmeans-quantization based extracting of frames from", round(start*clip.duration,2)," seconds to", round(stop*clip.duration,2), " seconds.")
startindex=int(np.floor(clip.fps*clip.duration*start))
stopindex=int(np.ceil(clip.fps*clip.duration*stop))
if Index is None:
Index=np.arange(startindex,stopindex,step)
else:
Index=np.array(Index)
Index=Index[(Index>startindex)*(Index<stopindex)] #crop to range!
nframes=len(Index)
if batchsize>nframes:
batchsize=int(nframes/2)
if len(Index)>=numframes2pick-1:
clipresized=clip.resize(width=resizewidth)
ny, nx = clipresized.size
frame0=img_as_ubyte(clip.get_frame(0))
if np.ndim(frame0)==3:
ncolors=np.shape(frame0)[2]
else:
ncolors=1
print("Extracting and downsampling...",nframes, " frames from the video.")
if color and ncolors>1:
DATA=np.zeros((nframes,nx*3,ny))
for counter,index in tqdm(enumerate(Index)):
image=img_as_ubyte(clipresized.get_frame(index * 1. / clipresized.fps))
DATA[counter,:,:] = np.vstack([image[:,:,0],image[:,:,1],image[:,:,2]])
else:
DATA=np.zeros((nframes,nx,ny))
for counter,index in tqdm(enumerate(Index)):
if ncolors==1:
DATA[counter,:,:] = img_as_ubyte(clipresized.get_frame(index * 1. / clipresized.fps))
else: #attention: averages over color channels to keep size small / perhaps you want to use color information?
DATA[counter,:,:] = img_as_ubyte(np.array(np.mean(clipresized.get_frame(index * 1. / clipresized.fps),2),dtype=np.uint8))
print("Kmeans clustering ... (this might take a while)")
data = DATA - DATA.mean(axis=0)
data=data.reshape(nframes,-1) #stacking
kmeans=MiniBatchKMeans(n_clusters=numframes2pick, tol=1e-3, batch_size=batchsize,max_iter=max_iter)
kmeans.fit(data)
frames2pick=[]
for clusterid in range(numframes2pick): #pick one frame per cluster
clusterids=np.where(clusterid==kmeans.labels_)[0]
numimagesofcluster=len(clusterids)
if numimagesofcluster>0:
frames2pick.append(Index[clusterids[np.random.randint(numimagesofcluster)]])
clipresized.close()
del clipresized
return list(np.array(frames2pick))
else:
return list(Index)
def KmeansbasedFrameselectioncv2(cap,numframes2pick,start,stop,crop,coords,Index=None,step=1,resizewidth=30,batchsize=100,max_iter=50,color=False):
''' This code downsamples the video to a width of resizewidth.
The video is extracted as a numpy array, which is then clustered with kmeans, whereby each frames is treated as a vector.
Frames from different clusters are then selected for labeling. This procedure makes sure that the frames "look different",
i.e. different postures etc. On large videos this code is slow.
Consider not extracting the frames from the whole video but rather set start and stop to a period around interesting behavior.
Note: this method can return fewer images than numframes2pick.
Attention: the flow of commands was not optimized for readability, but rather speed. This is why it might appear tedious and repetetive.'''
nframes=cap.get(7)
ny=int(cap.get(4))
nx=int(cap.get(3))
ratio=resizewidth*1./nx
if ratio>1:
raise Exception("Choise of resizewidth actually upsamples!")
print("Kmeans-quantization based extracting of frames from", round(start*nframes*1./cap.get(5),2)," seconds to", round(stop*nframes*1./cap.get(5),2), " seconds.")
startindex=int(np.floor(nframes*start))
stopindex=int(np.ceil(nframes*stop))
if Index is None:
Index=np.arange(startindex,stopindex,step)
else:
Index=np.array(Index)
Index=Index[(Index>startindex)*(Index<stopindex)] #crop to range!
nframes=len(Index)
if batchsize>nframes:
batchsize=int(nframes/2)
allocated=False
if len(Index)>=numframes2pick-1:
if np.mean(np.diff(Index))>1: #then non-consecutive indices are present, thus cap.set is required (which slows everything down!)
print("Extracting and downsampling...",nframes, " frames from the video.")
if color:
for counter,index in tqdm(enumerate(Index)):
cap.set(1,index) #extract a particular frame
ret, frame = cap.read()
if ret:
if crop:
frame=frame[int(coords[2]):int(coords[3]),int(coords[0]):int(coords[1]),:]
#image=img_as_ubyte(cv2.resize(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB),None,fx=ratio,fy=ratio))
image=img_as_ubyte(cv2.resize(frame,None,fx=ratio,fy=ratio,interpolation=cv2.INTER_NEAREST)) #color trafo not necessary; lack thereof improves speed.
if not allocated: #'DATA' not in locals(): #allocate memory in first pass
DATA=np.empty((nframes,np.shape(image)[0],np.shape(image)[1]*3))
allocated=True
DATA[counter,:,:] = np.hstack([image[:,:,0],image[:,:,1],image[:,:,2]])
else:
for counter,index in tqdm(enumerate(Index)):
cap.set(1,index) #extract a particular frame
ret, frame = cap.read()
if ret:
if crop:
frame=frame[int(coords[2]):int(coords[3]),int(coords[0]):int(coords[1]),:]
#image=img_as_ubyte(cv2.resize(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB),None,fx=ratio,fy=ratio))
image=img_as_ubyte(cv2.resize(frame,None,fx=ratio,fy=ratio,interpolation=cv2.INTER_NEAREST)) #color trafo not necessary; lack thereof improves speed.
if not allocated: #'DATA' not in locals(): #allocate memory in first pass
DATA=np.empty((nframes,np.shape(image)[0],np.shape(image)[1]))
allocated=True
DATA[counter,:,:] = np.mean(image,2)
else:
print("Extracting and downsampling...",nframes, " frames from the video.")
if color:
for counter,index in tqdm(enumerate(Index)):
ret, frame = cap.read()
if ret:
if crop:
frame=frame[int(coords[2]):int(coords[3]),int(coords[0]):int(coords[1]),:]
#image=img_as_ubyte(cv2.resize(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB),None,fx=ratio,fy=ratio))
image=img_as_ubyte(cv2.resize(frame,None,fx=ratio,fy=ratio,interpolation=cv2.INTER_NEAREST)) #color trafo not necessary; lack thereof improves speed.
if not allocated: #'DATA' not in locals(): #allocate memory in first pass
DATA=np.empty((nframes,np.shape(image)[0],np.shape(image)[1]*3))
allocated=True
DATA[counter,:,:] = np.hstack([image[:,:,0],image[:,:,1],image[:,:,2]])
else:
for counter,index in tqdm(enumerate(Index)):
ret, frame = cap.read()
if ret:
if crop:
frame=frame[int(coords[2]):int(coords[3]),int(coords[0]):int(coords[1]),:]
#image=img_as_ubyte(cv2.resize(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB),None,fx=ratio,fy=ratio))
image=img_as_ubyte(cv2.resize(frame,None,fx=ratio,fy=ratio,interpolation=cv2.INTER_NEAREST)) #color trafo not necessary; lack thereof improves speed.
if not allocated: #'DATA' not in locals(): #allocate memory in first pass
DATA=np.empty((nframes,np.shape(image)[0],np.shape(image)[1]))
allocated=True
DATA[counter,:,:] = np.mean(image,2)
print("Kmeans clustering ... (this might take a while)")
data = DATA - DATA.mean(axis=0)
data=data.reshape(nframes,-1) #stacking
kmeans=MiniBatchKMeans(n_clusters=numframes2pick, tol=1e-3, batch_size=batchsize,max_iter=max_iter)
kmeans.fit(data)
frames2pick=[]
for clusterid in range(numframes2pick): #pick one frame per cluster
clusterids=np.where(clusterid==kmeans.labels_)[0]
numimagesofcluster=len(clusterids)
if numimagesofcluster>0:
frames2pick.append(Index[clusterids[np.random.randint(numimagesofcluster)]])
#cap.release() >> still used in frame_extraction!
return list(np.array(frames2pick))
else:
return list(Index)
| 52.380952
| 174
| 0.626818
| 1,668
| 13,200
| 4.930456
| 0.178058
| 0.01751
| 0.015807
| 0.016415
| 0.831226
| 0.808123
| 0.804353
| 0.796936
| 0.781858
| 0.770428
| 0
| 0.018085
| 0.258561
| 13,200
| 251
| 175
| 52.589641
| 0.822111
| 0.255
| 0
| 0.727778
| 0
| 0
| 0.065404
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.022222
| false
| 0
| 0.038889
| 0
| 0.116667
| 0.055556
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
04ff7cb60dd5347a448eb4e32c084bda88fd3e5f
| 77
|
py
|
Python
|
__init__.py
|
sam-lb/python-grapher
|
657c423fa6e1f2260988749807db9a5beaf1fef2
|
[
"MIT"
] | 2
|
2019-08-21T15:02:51.000Z
|
2019-09-03T00:26:48.000Z
|
__init__.py
|
sam-lb/mathgraph3d
|
657c423fa6e1f2260988749807db9a5beaf1fef2
|
[
"MIT"
] | 6
|
2019-07-28T21:28:11.000Z
|
2019-11-05T12:08:23.000Z
|
__init__.py
|
sam-lb/mathgraph3d
|
657c423fa6e1f2260988749807db9a5beaf1fef2
|
[
"MIT"
] | null | null | null |
from mathgraph3D.core.plot import *
from mathgraph3D.core.functions import *
| 25.666667
| 40
| 0.818182
| 10
| 77
| 6.3
| 0.6
| 0.47619
| 0.603175
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.028986
| 0.103896
| 77
| 2
| 41
| 38.5
| 0.884058
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
b6dcb63b8c2f0927f8055f2b27330233c3755a73
| 61,979
|
py
|
Python
|
examples/census/categ-models/model_500.py
|
mariusvniekerk/impyla
|
db04ac4887e104c4b4423f2c6a597f93e316453d
|
[
"Apache-2.0"
] | null | null | null |
examples/census/categ-models/model_500.py
|
mariusvniekerk/impyla
|
db04ac4887e104c4b4423f2c6a597f93e316453d
|
[
"Apache-2.0"
] | null | null | null |
examples/census/categ-models/model_500.py
|
mariusvniekerk/impyla
|
db04ac4887e104c4b4423f2c6a597f93e316453d
|
[
"Apache-2.0"
] | null | null | null |
def predict_income(impala_function_context, age, workclass, final_weight, education, education_num, marital_status, occupation, relationship, race, sex, hours_per_week, native_country, income):
""" Predictor for income from model/5360311dffa04466f60007dc
https://archive.ics.uci.edu/ml/machine-learning-databases/adult/
"""
if (marital_status is None):
return -7811984082516642400
if (marital_status == -7213454403760958791):
if (education_num is None):
return -7811984082516642400
if (education_num > 12):
if (hours_per_week is None):
return -671483940756762216
if (hours_per_week > 31):
if (age is None):
return -671483940756762216
if (age > 28):
if (education_num > 13):
if (age > 58):
if (education_num > 14):
if (workclass is None):
return -671483940756762216
if (workclass == -857656620414700721):
return -7811984082516642400
if (workclass != -857656620414700721):
if (occupation is None):
return -671483940756762216
if (occupation == -5484833051640498835):
return -7811984082516642400
if (occupation != -5484833051640498835):
return -671483940756762216
if (education_num <= 14):
if (hours_per_week > 36):
if (workclass is None):
return -671483940756762216
if (workclass == -7197995106135439896):
return -671483940756762216
if (workclass != -7197995106135439896):
return -671483940756762216
if (hours_per_week <= 36):
return -7811984082516642400
if (age <= 58):
if (age > 38):
if (education_num > 14):
if (hours_per_week > 49):
return -671483940756762216
if (hours_per_week <= 49):
return -671483940756762216
if (education_num <= 14):
if (workclass is None):
return -671483940756762216
if (workclass == -1136074064918994416):
return -671483940756762216
if (workclass != -1136074064918994416):
return -671483940756762216
if (age <= 38):
if (occupation is None):
return -671483940756762216
if (occupation == 3088227676756162338):
return -7811984082516642400
if (occupation != 3088227676756162338):
if (hours_per_week > 42):
return -671483940756762216
if (hours_per_week <= 42):
return -671483940756762216
if (education_num <= 13):
if (occupation is None):
return -671483940756762216
if (occupation == -6990906632015037778):
if (workclass is None):
return -671483940756762216
if (workclass == -1136074064918994416):
if (final_weight is None):
return -671483940756762216
if (final_weight > 90244):
if (age > 48):
return -671483940756762216
if (age <= 48):
return -7811984082516642400
if (final_weight <= 90244):
return -7811984082516642400
if (workclass != -1136074064918994416):
if (hours_per_week > 67):
if (hours_per_week > 73):
return -671483940756762216
if (hours_per_week <= 73):
return -7811984082516642400
if (hours_per_week <= 67):
if (race is None):
return -671483940756762216
if (race == 3939476748445039507):
return -7811984082516642400
if (race != 3939476748445039507):
return -671483940756762216
if (occupation != -6990906632015037778):
if (relationship is None):
return -671483940756762216
if (relationship == 8744150760759310329):
return -7811984082516642400
if (relationship != 8744150760759310329):
if (race is None):
return -671483940756762216
if (race == 3939476748445039507):
return -7811984082516642400
if (race != 3939476748445039507):
if (final_weight is None):
return -671483940756762216
if (final_weight > 121061):
return -671483940756762216
if (final_weight <= 121061):
return -671483940756762216
if (age <= 28):
if (age > 24):
if (occupation is None):
return -7811984082516642400
if (occupation == 4779842868628447834):
return -671483940756762216
if (occupation != 4779842868628447834):
if (hours_per_week > 41):
if (hours_per_week > 46):
if (education_num > 14):
return -7811984082516642400
if (education_num <= 14):
return -7811984082516642400
if (hours_per_week <= 46):
if (occupation == 5332362397248960598):
return -7811984082516642400
if (occupation != 5332362397248960598):
return -671483940756762216
if (hours_per_week <= 41):
if (final_weight is None):
return -7811984082516642400
if (final_weight > 159383):
if (final_weight > 260996):
return -671483940756762216
if (final_weight <= 260996):
return -7811984082516642400
if (final_weight <= 159383):
if (final_weight > 100631):
return -671483940756762216
if (final_weight <= 100631):
return -7811984082516642400
if (age <= 24):
if (final_weight is None):
return -7811984082516642400
if (final_weight > 492053):
return -671483940756762216
if (final_weight <= 492053):
return -7811984082516642400
if (hours_per_week <= 31):
if (sex is None):
return -7811984082516642400
if (sex == 6306819796163687131):
if (age is None):
return -7811984082516642400
if (age > 29):
if (age > 62):
if (age > 78):
if (hours_per_week > 9):
return -671483940756762216
if (hours_per_week <= 9):
return -7811984082516642400
if (age <= 78):
if (hours_per_week > 13):
if (race is None):
return -7811984082516642400
if (race == -1569537633132385766):
return -7811984082516642400
if (race != -1569537633132385766):
return -671483940756762216
if (hours_per_week <= 13):
if (occupation is None):
return -7811984082516642400
if (occupation == -6990906632015037778):
return -671483940756762216
if (occupation != -6990906632015037778):
return -7811984082516642400
if (age <= 62):
if (hours_per_week > 12):
if (workclass is None):
return -671483940756762216
if (workclass == -4284295320506787287):
return -7811984082516642400
if (workclass != -4284295320506787287):
if (hours_per_week > 21):
return -7811984082516642400
if (hours_per_week <= 21):
return -671483940756762216
if (hours_per_week <= 12):
if (hours_per_week > 2):
if (education_num > 14):
return -7811984082516642400
if (education_num <= 14):
return -7811984082516642400
if (hours_per_week <= 2):
return -671483940756762216
if (age <= 29):
return -7811984082516642400
if (sex != 6306819796163687131):
if (final_weight is None):
return -671483940756762216
if (final_weight > 264521):
if (hours_per_week > 7):
return -7811984082516642400
if (hours_per_week <= 7):
return -671483940756762216
if (final_weight <= 264521):
if (age is None):
return -671483940756762216
if (age > 26):
if (workclass is None):
return -671483940756762216
if (workclass == -1136074064918994416):
if (hours_per_week > 26):
return -671483940756762216
if (hours_per_week <= 26):
return -7811984082516642400
if (workclass != -1136074064918994416):
if (final_weight > 36352):
if (occupation is None):
return -671483940756762216
if (occupation == 8618684898378336489):
return -7811984082516642400
if (occupation != 8618684898378336489):
return -671483940756762216
if (final_weight <= 36352):
return -7811984082516642400
if (age <= 26):
return -7811984082516642400
if (education_num <= 12):
if (education_num > 8):
if (age is None):
return -7811984082516642400
if (age > 35):
if (hours_per_week is None):
return -7811984082516642400
if (hours_per_week > 33):
if (education_num > 9):
if (occupation is None):
return -671483940756762216
if (occupation == 3088227676756162338):
if (hours_per_week > 71):
return -7811984082516642400
if (hours_per_week <= 71):
if (final_weight is None):
return -7811984082516642400
if (final_weight > 182378):
return -7811984082516642400
if (final_weight <= 182378):
return -7811984082516642400
if (occupation != 3088227676756162338):
if (occupation == 8618684898378336489):
if (age > 40):
return -7811984082516642400
if (age <= 40):
return -7811984082516642400
if (occupation != 8618684898378336489):
if (occupation == -6990906632015037778):
return -671483940756762216
if (occupation != -6990906632015037778):
return -671483940756762216
if (education_num <= 9):
if (occupation is None):
return -7811984082516642400
if (occupation == -6990906632015037778):
if (workclass is None):
return -671483940756762216
if (workclass == -1136074064918994416):
if (final_weight is None):
return -7811984082516642400
if (final_weight > 92214):
return -7811984082516642400
if (final_weight <= 92214):
return -671483940756762216
if (workclass != -1136074064918994416):
if (final_weight is None):
return -671483940756762216
if (final_weight > 189527):
return -671483940756762216
if (final_weight <= 189527):
return -671483940756762216
if (occupation != -6990906632015037778):
if (occupation == 8618684898378336489):
if (sex is None):
return -7811984082516642400
if (sex == 6306819796163687131):
return -7811984082516642400
if (sex != 6306819796163687131):
return -7811984082516642400
if (occupation != 8618684898378336489):
if (occupation == 3088227676756162338):
return -7811984082516642400
if (occupation != 3088227676756162338):
return -7811984082516642400
if (hours_per_week <= 33):
if (workclass is None):
return -7811984082516642400
if (workclass == -7197995106135439896):
if (age > 54):
if (final_weight is None):
return -671483940756762216
if (final_weight > 181769):
if (hours_per_week > 27):
return -671483940756762216
if (hours_per_week <= 27):
return -671483940756762216
if (final_weight <= 181769):
if (sex is None):
return -7811984082516642400
if (sex == 6306819796163687131):
return -7811984082516642400
if (sex != 6306819796163687131):
return -671483940756762216
if (age <= 54):
return -7811984082516642400
if (workclass != -7197995106135439896):
if (relationship is None):
return -7811984082516642400
if (relationship == -7487827120114232249):
if (age > 59):
return -7811984082516642400
if (age <= 59):
if (education_num > 9):
return -671483940756762216
if (education_num <= 9):
return -7811984082516642400
if (relationship != -7487827120114232249):
if (occupation is None):
return -7811984082516642400
if (occupation == 4779842868628447834):
if (workclass == -1136074064918994416):
return -7811984082516642400
if (workclass != -1136074064918994416):
return -671483940756762216
if (occupation != 4779842868628447834):
if (final_weight is None):
return -7811984082516642400
if (final_weight > 286449):
return -7811984082516642400
if (final_weight <= 286449):
return -7811984082516642400
if (age <= 35):
if (age > 24):
if (occupation is None):
return -7811984082516642400
if (occupation == -6990906632015037778):
if (age > 27):
if (workclass is None):
return -7811984082516642400
if (workclass == -1136074064918994416):
if (final_weight is None):
return -7811984082516642400
if (final_weight > 237985):
return -7811984082516642400
if (final_weight <= 237985):
return -7811984082516642400
if (workclass != -1136074064918994416):
if (age > 32):
return -671483940756762216
if (age <= 32):
return -7811984082516642400
if (age <= 27):
if (final_weight is None):
return -7811984082516642400
if (final_weight > 162313):
if (final_weight > 190463):
return -7811984082516642400
if (final_weight <= 190463):
return -671483940756762216
if (final_weight <= 162313):
return -7811984082516642400
if (occupation != -6990906632015037778):
if (occupation == 3088227676756162338):
if (education_num > 10):
if (hours_per_week is None):
return -7811984082516642400
if (hours_per_week > 57):
return -7811984082516642400
if (hours_per_week <= 57):
return -7811984082516642400
if (education_num <= 10):
return -7811984082516642400
if (occupation != 3088227676756162338):
if (hours_per_week is None):
return -7811984082516642400
if (hours_per_week > 46):
if (age > 31):
return -7811984082516642400
if (age <= 31):
return -7811984082516642400
if (hours_per_week <= 46):
if (occupation == 1581590029918088140):
return -7811984082516642400
if (occupation != 1581590029918088140):
return -7811984082516642400
if (age <= 24):
if (hours_per_week is None):
return -7811984082516642400
if (hours_per_week > 45):
if (final_weight is None):
return -7811984082516642400
if (final_weight > 79991):
if (race is None):
return -7811984082516642400
if (race == -681598405395175136):
return -671483940756762216
if (race != -681598405395175136):
if (workclass is None):
return -7811984082516642400
if (workclass == -4284295320506787287):
return -671483940756762216
if (workclass != -4284295320506787287):
return -7811984082516642400
if (final_weight <= 79991):
if (education_num > 9):
return -671483940756762216
if (education_num <= 9):
return -7811984082516642400
if (hours_per_week <= 45):
if (occupation is None):
return -7811984082516642400
if (occupation == 1581590029918088140):
if (hours_per_week > 38):
return -671483940756762216
if (hours_per_week <= 38):
return -7811984082516642400
if (occupation != 1581590029918088140):
if (occupation == 5332362397248960598):
if (sex is None):
return -7811984082516642400
if (sex == 6306819796163687131):
return -7811984082516642400
if (sex != 6306819796163687131):
return -7811984082516642400
if (occupation != 5332362397248960598):
if (occupation == -6951104699562914960):
return -7811984082516642400
if (occupation != -6951104699562914960):
return -7811984082516642400
if (education_num <= 8):
if (age is None):
return -7811984082516642400
if (age > 36):
if (hours_per_week is None):
return -7811984082516642400
if (hours_per_week > 22):
if (education_num > 5):
if (age > 53):
if (occupation is None):
return -7811984082516642400
if (occupation == 2812191937831880778):
if (final_weight is None):
return -7811984082516642400
if (final_weight > 89485):
return -7811984082516642400
if (final_weight <= 89485):
return -7811984082516642400
if (occupation != 2812191937831880778):
if (final_weight is None):
return -7811984082516642400
if (final_weight > 162184):
return -7811984082516642400
if (final_weight <= 162184):
return -7811984082516642400
if (age <= 53):
if (occupation is None):
return -7811984082516642400
if (occupation == -5484833051640498835):
if (hours_per_week > 52):
return -7811984082516642400
if (hours_per_week <= 52):
return -671483940756762216
if (occupation != -5484833051640498835):
if (workclass is None):
return -7811984082516642400
if (workclass == -857656620414700721):
return -7811984082516642400
if (workclass != -857656620414700721):
return -7811984082516642400
if (education_num <= 5):
if (workclass is None):
return -7811984082516642400
if (workclass == 8585012838816931822):
if (occupation is None):
return -7811984082516642400
if (occupation == -6990906632015037778):
if (hours_per_week > 46):
return -7811984082516642400
if (hours_per_week <= 46):
return -671483940756762216
if (occupation != -6990906632015037778):
if (occupation == -5484833051640498835):
return -7811984082516642400
if (occupation != -5484833051640498835):
return -7811984082516642400
if (workclass != 8585012838816931822):
if (hours_per_week > 55):
if (workclass == -1136074064918994416):
return -671483940756762216
if (workclass != -1136074064918994416):
return -7811984082516642400
if (hours_per_week <= 55):
if (workclass == -7197995106135439896):
return -671483940756762216
if (workclass != -7197995106135439896):
return -7811984082516642400
if (hours_per_week <= 22):
return -7811984082516642400
if (age <= 36):
if (workclass is None):
return -7811984082516642400
if (workclass == 8585012838816931822):
if (age > 35):
if (occupation is None):
return -7811984082516642400
if (occupation == -5484833051640498835):
return -671483940756762216
if (occupation != -5484833051640498835):
if (education_num > 3):
if (final_weight is None):
return -7811984082516642400
if (final_weight > 185266):
return -7811984082516642400
if (final_weight <= 185266):
return -7811984082516642400
if (education_num <= 3):
return -671483940756762216
if (age <= 35):
if (hours_per_week is None):
return -7811984082516642400
if (hours_per_week > 67):
if (hours_per_week > 83):
return -7811984082516642400
if (hours_per_week <= 83):
return -671483940756762216
if (hours_per_week <= 67):
if (occupation is None):
return -7811984082516642400
if (occupation == 5332362397248960598):
if (final_weight is None):
return -7811984082516642400
if (final_weight > 219946):
return -7811984082516642400
if (final_weight <= 219946):
return -7811984082516642400
if (occupation != 5332362397248960598):
if (final_weight is None):
return -7811984082516642400
if (final_weight > 145325):
return -7811984082516642400
if (final_weight <= 145325):
return -7811984082516642400
if (workclass != 8585012838816931822):
if (occupation is None):
return -7811984082516642400
if (occupation == -8227066636055033186):
return -671483940756762216
if (occupation != -8227066636055033186):
if (age > 29):
return -7811984082516642400
if (age <= 29):
if (relationship is None):
return -7811984082516642400
if (relationship == -408487193273916322):
if (final_weight is None):
return -7811984082516642400
if (final_weight > 149463):
return -7811984082516642400
if (final_weight <= 149463):
return -7811984082516642400
if (relationship != -408487193273916322):
return -7811984082516642400
if (marital_status != -7213454403760958791):
if (education_num is None):
return -7811984082516642400
if (education_num > 12):
if (age is None):
return -7811984082516642400
if (age > 27):
if (hours_per_week is None):
return -7811984082516642400
if (hours_per_week > 43):
if (occupation is None):
return -7811984082516642400
if (occupation == -6990906632015037778):
if (age > 41):
if (final_weight is None):
return -671483940756762216
if (final_weight > 160393):
if (hours_per_week > 58):
if (education_num > 13):
return -671483940756762216
if (education_num <= 13):
return -7811984082516642400
if (hours_per_week <= 58):
if (race is None):
return -671483940756762216
if (race == -681598405395175136):
return -7811984082516642400
if (race != -681598405395175136):
return -671483940756762216
if (final_weight <= 160393):
if (hours_per_week > 47):
if (final_weight > 51818):
return -671483940756762216
if (final_weight <= 51818):
return -671483940756762216
if (hours_per_week <= 47):
if (age > 49):
return -671483940756762216
if (age <= 49):
return -7811984082516642400
if (age <= 41):
if (final_weight is None):
return -7811984082516642400
if (final_weight > 307855):
return -7811984082516642400
if (final_weight <= 307855):
if (workclass is None):
return -7811984082516642400
if (workclass == -1136074064918994416):
return -7811984082516642400
if (workclass != -1136074064918994416):
if (marital_status == 7568786824864426784):
return -671483940756762216
if (marital_status != 7568786824864426784):
return -671483940756762216
if (occupation != -6990906632015037778):
if (education_num > 14):
if (age > 32):
if (age > 52):
if (marital_status == -5485661916787442206):
return -671483940756762216
if (marital_status != -5485661916787442206):
return -7811984082516642400
if (age <= 52):
if (hours_per_week > 52):
return -671483940756762216
if (hours_per_week <= 52):
return -671483940756762216
if (age <= 32):
if (age > 29):
return -7811984082516642400
if (age <= 29):
if (marital_status == -2843050270188924016):
return -7811984082516642400
if (marital_status != -2843050270188924016):
return -671483940756762216
if (education_num <= 14):
if (sex is None):
return -7811984082516642400
if (sex == 6306819796163687131):
if (hours_per_week > 55):
if (occupation == -5484833051640498835):
return -7811984082516642400
if (occupation != -5484833051640498835):
return -7811984082516642400
if (hours_per_week <= 55):
if (workclass is None):
return -7811984082516642400
if (workclass == -4284295320506787287):
return -7811984082516642400
if (workclass != -4284295320506787287):
return -7811984082516642400
if (sex != 6306819796163687131):
if (final_weight is None):
return -7811984082516642400
if (final_weight > 151124):
if (final_weight > 158605):
return -7811984082516642400
if (final_weight <= 158605):
return -671483940756762216
if (final_weight <= 151124):
if (workclass is None):
return -7811984082516642400
if (workclass == 8161495398349361779):
return -671483940756762216
if (workclass != 8161495398349361779):
return -7811984082516642400
if (hours_per_week <= 43):
if (education_num > 14):
if (age > 32):
if (sex is None):
return -671483940756762216
if (sex == 6306819796163687131):
if (hours_per_week > 21):
if (final_weight is None):
return -671483940756762216
if (final_weight > 107803):
return -671483940756762216
if (final_weight <= 107803):
return -671483940756762216
if (hours_per_week <= 21):
if (marital_status == -5485661916787442206):
return -671483940756762216
if (marital_status != -5485661916787442206):
return -7811984082516642400
if (sex != 6306819796163687131):
if (marital_status == -2843050270188924016):
if (final_weight is None):
return -671483940756762216
if (final_weight > 386027):
return -7811984082516642400
if (final_weight <= 386027):
return -671483940756762216
if (marital_status != -2843050270188924016):
if (final_weight is None):
return -7811984082516642400
if (final_weight > 170081):
return -7811984082516642400
if (final_weight <= 170081):
return -7811984082516642400
if (age <= 32):
return -7811984082516642400
if (education_num <= 14):
if (age > 45):
if (hours_per_week > 31):
if (relationship is None):
return -7811984082516642400
if (relationship == 5722155880036500383):
if (workclass is None):
return -7811984082516642400
if (workclass == 8161495398349361779):
return -7811984082516642400
if (workclass != 8161495398349361779):
return -7811984082516642400
if (relationship != 5722155880036500383):
if (age > 49):
return -7811984082516642400
if (age <= 49):
return -7811984082516642400
if (hours_per_week <= 31):
if (marital_status == -8271725530730535226):
return -7811984082516642400
if (marital_status != -8271725530730535226):
if (occupation is None):
return -7811984082516642400
if (occupation == -8005258492814722552):
return -671483940756762216
if (occupation != -8005258492814722552):
return -7811984082516642400
if (age <= 45):
if (hours_per_week > 34):
if (workclass is None):
return -7811984082516642400
if (workclass == -4284295320506787287):
return -7811984082516642400
if (workclass != -4284295320506787287):
if (workclass == 8161495398349361779):
return -7811984082516642400
if (workclass != 8161495398349361779):
return -7811984082516642400
if (hours_per_week <= 34):
if (final_weight is None):
return -7811984082516642400
if (final_weight > 391238):
return -671483940756762216
if (final_weight <= 391238):
return -7811984082516642400
if (age <= 27):
if (hours_per_week is None):
return -7811984082516642400
if (hours_per_week > 38):
if (relationship is None):
return -7811984082516642400
if (relationship == -7487827120114232249):
return -671483940756762216
if (relationship != -7487827120114232249):
if (hours_per_week > 77):
if (final_weight is None):
return -7811984082516642400
if (final_weight > 156075):
return -671483940756762216
if (final_weight <= 156075):
return -7811984082516642400
if (hours_per_week <= 77):
if (race is None):
return -7811984082516642400
if (race == -681598405395175136):
if (hours_per_week > 41):
return -7811984082516642400
if (hours_per_week <= 41):
return -671483940756762216
if (race != -681598405395175136):
if (workclass is None):
return -7811984082516642400
if (workclass == -1136074064918994416):
if (relationship == 5722155880036500383):
return -7811984082516642400
if (relationship != 5722155880036500383):
return -671483940756762216
if (workclass != -1136074064918994416):
if (workclass == 8585012838816931822):
return -7811984082516642400
if (workclass != 8585012838816931822):
return -7811984082516642400
if (hours_per_week <= 38):
return -7811984082516642400
if (education_num <= 12):
if (age is None):
return -7811984082516642400
if (age > 31):
if (hours_per_week is None):
return -7811984082516642400
if (hours_per_week > 41):
if (education_num > 5):
if (age > 53):
if (occupation is None):
return -7811984082516642400
if (occupation == 5332362397248960598):
return -7811984082516642400
if (occupation != 5332362397248960598):
if (hours_per_week > 67):
return -7811984082516642400
if (hours_per_week <= 67):
if (marital_status == 7568786824864426784):
return -671483940756762216
if (marital_status != 7568786824864426784):
return -7811984082516642400
if (age <= 53):
if (relationship is None):
return -7811984082516642400
if (relationship == 5722155880036500383):
if (education is None):
return -7811984082516642400
if (education == 4595982442865070163):
if (hours_per_week > 47):
return -7811984082516642400
if (hours_per_week <= 47):
return -7811984082516642400
if (education != 4595982442865070163):
if (workclass is None):
return -7811984082516642400
if (workclass == -1136074064918994416):
return -7811984082516642400
if (workclass != -1136074064918994416):
return -7811984082516642400
if (relationship != 5722155880036500383):
if (age > 39):
if (age > 45):
return -7811984082516642400
if (age <= 45):
return -7811984082516642400
if (age <= 39):
return -7811984082516642400
if (education_num <= 5):
return -7811984082516642400
if (hours_per_week <= 41):
if (occupation is None):
return -7811984082516642400
if (occupation == 8618684898378336489):
if (relationship is None):
return -7811984082516642400
if (relationship == -7487827120114232249):
if (hours_per_week > 32):
return -671483940756762216
if (hours_per_week <= 32):
return -7811984082516642400
if (relationship != -7487827120114232249):
if (age > 59):
if (workclass is None):
return -7811984082516642400
if (workclass == 8585012838816931822):
return -7811984082516642400
if (workclass != 8585012838816931822):
if (education is None):
return -7811984082516642400
if (education == -1620783280160849416):
return -671483940756762216
if (education != -1620783280160849416):
return -7811984082516642400
if (age <= 59):
return -7811984082516642400
if (occupation != 8618684898378336489):
if (occupation == -8227066636055033186):
if (relationship is None):
return -7811984082516642400
if (relationship == -7729121122090457494):
if (sex is None):
return -7811984082516642400
if (sex == 6306819796163687131):
if (age > 37):
return -7811984082516642400
if (age <= 37):
return -7811984082516642400
if (sex != 6306819796163687131):
return -7811984082516642400
if (relationship != -7729121122090457494):
return -7811984082516642400
if (occupation != -8227066636055033186):
if (relationship is None):
return -7811984082516642400
if (relationship == 5722155880036500383):
if (occupation == 5332362397248960598):
if (marital_status == -1035125786006291861):
return -7811984082516642400
if (marital_status != -1035125786006291861):
return -7811984082516642400
if (occupation != 5332362397248960598):
if (workclass is None):
return -7811984082516642400
if (workclass == -7197995106135439896):
return -7811984082516642400
if (workclass != -7197995106135439896):
return -7811984082516642400
if (relationship != 5722155880036500383):
if (occupation == 1581590029918088140):
if (workclass is None):
return -7811984082516642400
if (workclass == 8161495398349361779):
return -7811984082516642400
if (workclass != 8161495398349361779):
return -7811984082516642400
if (occupation != 1581590029918088140):
if (relationship == -7487827120114232249):
return -7811984082516642400
if (relationship != -7487827120114232249):
return -7811984082516642400
if (age <= 31):
if (age > 21):
if (hours_per_week is None):
return -7811984082516642400
if (hours_per_week > 41):
if (workclass is None):
return -7811984082516642400
if (workclass == 8585012838816931822):
if (relationship is None):
return -7811984082516642400
if (relationship == 5722155880036500383):
if (occupation is None):
return -7811984082516642400
if (occupation == -6990906632015037778):
if (marital_status == -2843050270188924016):
return -7811984082516642400
if (marital_status != -2843050270188924016):
return -671483940756762216
if (occupation != -6990906632015037778):
if (education is None):
return -7811984082516642400
if (education == -8844931991724242570):
return -7811984082516642400
if (education != -8844931991724242570):
return -7811984082516642400
if (relationship != 5722155880036500383):
return -7811984082516642400
if (workclass != 8585012838816931822):
if (sex is None):
return -7811984082516642400
if (sex == 6306819796163687131):
if (hours_per_week > 49):
if (occupation is None):
return -7811984082516642400
if (occupation == -6990906632015037778):
return -671483940756762216
if (occupation != -6990906632015037778):
return -7811984082516642400
if (hours_per_week <= 49):
if (education_num > 8):
return -7811984082516642400
if (education_num <= 8):
return -7811984082516642400
if (sex != 6306819796163687131):
return -7811984082516642400
if (hours_per_week <= 41):
if (education_num > 9):
if (hours_per_week > 29):
if (relationship is None):
return -7811984082516642400
if (relationship == -7487827120114232249):
return -671483940756762216
if (relationship != -7487827120114232249):
if (occupation is None):
return -7811984082516642400
if (occupation == -3959269231467008119):
return -7811984082516642400
if (occupation != -3959269231467008119):
return -7811984082516642400
if (hours_per_week <= 29):
return -7811984082516642400
if (education_num <= 9):
if (age > 27):
if (final_weight is None):
return -7811984082516642400
if (final_weight > 94030):
if (final_weight > 334106):
return -7811984082516642400
if (final_weight <= 334106):
return -7811984082516642400
if (final_weight <= 94030):
if (marital_status == -8271725530730535226):
return -7811984082516642400
if (marital_status != -8271725530730535226):
return -7811984082516642400
if (age <= 27):
return -7811984082516642400
if (age <= 21):
if (education is None):
return -7811984082516642400
if (education == -3305009427453673313):
if (occupation is None):
return -7811984082516642400
if (occupation == 8618684898378336489):
if (hours_per_week is None):
return -7811984082516642400
if (hours_per_week > 50):
return -671483940756762216
if (hours_per_week <= 50):
return -7811984082516642400
if (occupation != 8618684898378336489):
return -7811984082516642400
if (education != -3305009427453673313):
return -7811984082516642400
| 61.183613
| 193
| 0.368948
| 3,186
| 61,979
| 7.056811
| 0.052103
| 0.300227
| 0.323044
| 0.066005
| 0.95014
| 0.899569
| 0.770449
| 0.707646
| 0.574656
| 0.441133
| 0
| 0.46193
| 0.578099
| 61,979
| 1,012
| 194
| 61.244071
| 0.397874
| 0.001968
| 0
| 0.821429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.000992
| false
| 0
| 0
| 0
| 0.376984
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
8e19548b6f05340452170fd6e66fb0e0fc218880
| 412
|
py
|
Python
|
tests/test_select_rules/rules_to_test_select_rules_function.py
|
AlexLitvino/pyASSA
|
8ce0925ee20431cc38d218d1b7cb05dee2f3f938
|
[
"Apache-2.0"
] | null | null | null |
tests/test_select_rules/rules_to_test_select_rules_function.py
|
AlexLitvino/pyASSA
|
8ce0925ee20431cc38d218d1b7cb05dee2f3f938
|
[
"Apache-2.0"
] | null | null | null |
tests/test_select_rules/rules_to_test_select_rules_function.py
|
AlexLitvino/pyASSA
|
8ce0925ee20431cc38d218d1b7cb05dee2f3f938
|
[
"Apache-2.0"
] | null | null | null |
def rule_error_test1(**kwargs):
pass
def rule_error_test2(**kwargs):
pass
def rule_error_test3(**kwargs):
pass
def rule_warning_test1(**kwargs):
pass
def rule_warning_test2(**kwargs):
pass
def rule_warning_test3(**kwargs):
pass
def rule_notcategorized_test1(**kwargs):
pass
def rule_notcategorized_test2(**kwargs):
pass
def rule_notcategorized_test3(**kwargs):
pass
| 14.206897
| 40
| 0.713592
| 54
| 412
| 5.111111
| 0.185185
| 0.228261
| 0.376812
| 0.492754
| 0.902174
| 0
| 0
| 0
| 0
| 0
| 0
| 0.026549
| 0.177184
| 412
| 29
| 41
| 14.206897
| 0.787611
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
6d0dffc19fb690c9ccc61158097c5b9af7b4a88b
| 130
|
py
|
Python
|
src/txCascil/client/authentication_controllers/__init__.py
|
DanSeraf/spyd
|
af893b7f9c67785613b25754eb2cf150523a9fe4
|
[
"Zlib"
] | null | null | null |
src/txCascil/client/authentication_controllers/__init__.py
|
DanSeraf/spyd
|
af893b7f9c67785613b25754eb2cf150523a9fe4
|
[
"Zlib"
] | null | null | null |
src/txCascil/client/authentication_controllers/__init__.py
|
DanSeraf/spyd
|
af893b7f9c67785613b25754eb2cf150523a9fe4
|
[
"Zlib"
] | null | null | null |
from txCascil.utils.import_all import import_all
import_all(__file__, 'txCascil.client.authentication_controllers', ['__init__'])
| 43.333333
| 80
| 0.838462
| 16
| 130
| 6.0625
| 0.625
| 0.278351
| 0.309278
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.053846
| 130
| 2
| 81
| 65
| 0.788618
| 0
| 0
| 0
| 0
| 0
| 0.384615
| 0.323077
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
6d25134fafda1c1a25a956740bef72c84add214d
| 6,200
|
py
|
Python
|
kelvin/tests/test_ccsd.py
|
MoleOrbitalHybridAnalyst/kelvin
|
99538f8360975e2f80941446d8fbf2e848f74cf9
|
[
"MIT"
] | 1
|
2021-08-05T15:53:46.000Z
|
2021-08-05T15:53:46.000Z
|
kelvin/tests/test_ccsd.py
|
MoleOrbitalHybridAnalyst/kelvin
|
99538f8360975e2f80941446d8fbf2e848f74cf9
|
[
"MIT"
] | null | null | null |
kelvin/tests/test_ccsd.py
|
MoleOrbitalHybridAnalyst/kelvin
|
99538f8360975e2f80941446d8fbf2e848f74cf9
|
[
"MIT"
] | 1
|
2022-01-13T18:41:06.000Z
|
2022-01-13T18:41:06.000Z
|
import unittest
from pyscf import gto, scf, cc
from pyscf.pbc import cc as pbc_cc
from kelvin.ccsd import ccsd
from kelvin.scf_system import SCFSystem
def get_ccsd_gen(m):
mycc = cc.CCSD(m)
mycc.conv_tol = 1e-12
mycc.run()
sys = SCFSystem(m, 0.0, 0.0, orbtype='g')
ccsd0 = ccsd(sys, iprint=0, max_iter=44, econv=1e-11)
Etot, Ecc = ccsd0.run()
return (mycc.e_corr, Ecc)
def get_ccsd(m):
mycc = cc.CCSD(m)
mycc.conv_tol = 1e-12
mycc.run()
sys = SCFSystem(m, 0.0, 0.0)
ccsd0 = ccsd(sys, iprint=0, max_iter=44, econv=1e-11)
Etot, Ecc = ccsd0.run()
return (mycc.e_corr, Ecc)
class CCSDTest(unittest.TestCase):
def setUp(self):
self.thresh = 1e-10
def test_Be_sto3g_gen(self):
mol = gto.M(
verbose=0,
atom='Be 0 0 0',
basis='sto-3G')
m = scf.RHF(mol)
m.conv_tol = 1e-12
m.scf()
res = get_ccsd_gen(m)
diff = abs(res[1] - res[0])
error = "Expected: {} Actual: {}".format(res[0], res[1])
self.assertTrue(diff < self.thresh, error)
m = scf.UHF(mol)
def test_N2p_631G_gen(self):
mol = gto.M(
verbose=0,
atom='N 0 0 0; N 0 0 1.1',
basis='6-31G',
charge=1,
spin=1)
m = scf.UHF(mol)
m.conv_tol = 1e-12
m.scf()
res = get_ccsd_gen(m)
diff = abs(res[1] - res[0])
error = "Expected: {} Actual: {}".format(res[0], res[1])
self.assertTrue(diff < self.thresh, error)
def test_Be_sto3g(self):
mol = gto.M(
verbose=0,
atom='Be 0 0 0',
basis='sto-3G')
m = scf.RHF(mol)
m.conv_tol = 1e-12
m.scf()
res = get_ccsd(m)
diff = abs(res[1] - res[0])
error = "Expected: {} Actual: {}".format(res[0], res[1])
self.assertTrue(diff < self.thresh, error)
m = scf.UHF(mol)
def test_N2p_631G(self):
mol = gto.M(
verbose=0,
atom='N 0 0 0; N 0 0 1.1',
basis='6-31G',
charge=1,
spin=1)
m = scf.UHF(mol)
m.conv_tol = 1e-13
m.scf()
res = get_ccsd(m)
diff = abs(res[1] - res[0])
error = "Expected: {} Actual: {}".format(res[0], res[1])
self.assertTrue(diff < self.thresh, error)
@unittest.skip("Skipped for time")
def test_diamond_g(self):
from pyscf.pbc import gto, scf
cell = gto.Cell()
cell.a = '''
3.5668 0 0
0 3.5668 0
0 0 3.5668'''
cell.atom = '''C 0. 0. 0.
C 0.8917 0.8917 0.8917
C 1.7834 1.7834 0.
C 2.6751 2.6751 0.8917
C 1.7834 0. 1.7834
C 2.6751 0.8917 2.6751
C 0. 1.7834 1.7834
C 0.8917 2.6751 2.6751'''
cell.basis = 'gth-szv'
cell.pseudo = 'gth-pade'
cell.verbose = 0
cell.build()
mf = scf.RHF(cell, exxdiv=None)
mf.conv_tol_grad = 1e-8
mf.conv_tol = 1e-12
mf.kernel()
mycc = cc.CCSD(mf)
mycc.conv_tol = 1e-11
mycc.conv_tol_normt = 1e-9
Ecc = mycc.kernel()
sys = SCFSystem(mf, 0.0, 0.0, orbtype='g')
ccsd0 = ccsd(sys, iprint=0, max_iter=100, econv=1e-11, damp=0.0)
Etot, Ecc2 = ccsd0.run()
diff = abs(Ecc[0] - Ecc2)
self.assertTrue(diff < self.thresh)
@unittest.skip("Skipped for time")
def test_diamond_u(self):
from pyscf.pbc import gto, scf
cell = gto.Cell()
cell.a = '''
3.5668 0 0
0 3.5668 0
0 0 3.5668'''
cell.atom = '''C 0. 0. 0.
C 0.8917 0.8917 0.8917
C 1.7834 1.7834 0.
C 2.6751 2.6751 0.8917
C 1.7834 0. 1.7834
C 2.6751 0.8917 2.6751
C 0. 1.7834 1.7834
C 0.8917 2.6751 2.6751'''
cell.basis = 'gth-szv'
cell.pseudo = 'gth-pade'
cell.verbose = 0
cell.build()
mf = scf.RHF(cell, exxdiv=None)
mf.conv_tol_grad = 1e-8
mf.conv_tol = 1e-12
mf.kernel()
mycc = cc.CCSD(mf)
mycc.conv_tol = 1e-11
mycc.conv_tol_normt = 1e-9
Ecc = mycc.kernel()
sys = SCFSystem(mf, 0.0, 0.0)
ccsd0 = ccsd(sys, iprint=0, max_iter=100, econv=1e-11, damp=0.0)
Etot, Ecc2 = ccsd0.run()
diff = abs(Ecc[0] - Ecc2)
self.assertTrue(diff < self.thresh)
@unittest.skip("Skipped for time")
def test_diamond_uk(self):
from pyscf.pbc import gto, scf
cell = gto.Cell()
cell.a = '''
3.5668 0 0
0 3.5668 0
0 0 3.5668'''
cell.atom = '''C 0. 0. 0.
C 0.8917 0.8917 0.8917
C 1.7834 1.7834 0.
C 2.6751 2.6751 0.8917
C 1.7834 0. 1.7834
C 2.6751 0.8917 2.6751
C 0. 1.7834 1.7834
C 0.8917 2.6751 2.6751'''
cell.basis = 'gth-szv'
cell.pseudo = 'gth-pade'
cell.verbose = 4
cell.build()
kpt = cell.make_kpts((1, 1, 1), scaled_center=(0, 0, 1./3.))
mf = scf.RHF(cell, kpt=kpt, exxdiv=None)
mf.conv_tol_grad = 1e-8
mf.conv_tol = 1e-12
mf.kernel()
mycc = pbc_cc.CCSD(mf)
mycc.conv_tol = 1e-11
mycc.conv_tol_normt = 1e-9
Ecc = mycc.kernel()
sys = SCFSystem(mf, 0.0, 0.0)
ccsd0 = ccsd(sys, iprint=1, max_iter=100, econv=1e-11, damp=0.0)
Etot, Ecc2 = ccsd0.run()
diff = abs(Ecc[0] - Ecc2)
self.assertTrue(diff < self.thresh)
if __name__ == '__main__':
unittest.main()
| 30.845771
| 72
| 0.462097
| 900
| 6,200
| 3.106667
| 0.127778
| 0.033619
| 0.024678
| 0.031474
| 0.884835
| 0.884835
| 0.884835
| 0.884835
| 0.868383
| 0.868383
| 0
| 0.150368
| 0.407903
| 6,200
| 200
| 73
| 31
| 0.611278
| 0
| 0
| 0.844444
| 0
| 0
| 0.266613
| 0
| 0
| 0
| 0
| 0
| 0.038889
| 1
| 0.055556
| false
| 0
| 0.044444
| 0
| 0.116667
| 0.027778
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b63298a1cc9e686c3ba8c94eeaa70cba69df1ce7
| 3,689
|
py
|
Python
|
conf.py
|
edulima19c3/phishings2021
|
62cb67da3e1d1221c4e8eb60701a959a347415fa
|
[
"MIT"
] | 102
|
2019-09-10T09:54:57.000Z
|
2022-03-09T10:24:44.000Z
|
conf.py
|
edulima19c3/phishings2021
|
62cb67da3e1d1221c4e8eb60701a959a347415fa
|
[
"MIT"
] | 1
|
2020-09-30T08:51:50.000Z
|
2020-09-30T08:51:50.000Z
|
conf.py
|
edulima19c3/phishings2021
|
62cb67da3e1d1221c4e8eb60701a959a347415fa
|
[
"MIT"
] | 14
|
2019-12-06T12:32:05.000Z
|
2022-03-30T08:17:59.000Z
|
import sys
from utx import *
def conf():
os.system("clear")
if pt == True:
print ("Configuração\n")
print (" [1] Mudar idioma")
print (" [2] Voltar ao menu\n")
configura = input("Selecione uma opção: ")
if configura == "1" or configura == "01":
os.system("clear")
print (" [1] Português")
print (" [2] English")
print (" [3] Español\n")
dec = input("Selecione: ")
if dec == "1" or dec == "01":
f = open("modulos/idioma.txt", "r")
filedata = f.read()
f.close()
repor = filedata.replace('pt', 'pt')
f = open("modulos/idioma.txt", "w")
f.write(repor)
f.close()
restart_program()
if dec == "2" or dec == "02":
f = open("modulos/idioma.txt", "r")
filedata = f.read()
f.close()
repor = filedata.replace("pt", "en")
f = open("modulos/idioma.txt", "w")
f.write(repor)
f.close()
restart_program()
if dec == "3" or dec == "03":
f = open("modulos/idioma.txt", "r")
filedata = f.read()
f.close()
repor = filedata.replace("pt", "es")
f = open("modulos/idioma.txt", "w")
f.write(repor)
f.close()
restart_program()
if configura == "2" or configura == "02":
restart_program()
if es == True:
print ("Configuración\n")
print (" [1] Cambiar el idioma")
print (" [2] Regresar al menú\n")
configura = input("Seleccione una opción: ")
if configura == "1" or configura == "01":
os.system("clear")
print (" [1] Português")
print (" [2] English")
print (" [3] Español\n")
dec = input("Selecione: ")
if dec == "1" or dec == "01":
f = open("modulos/idioma.txt", "r")
filedata = f.read()
f.close()
repor = filedata.replace('es', 'pt')
f = open("modulos/idioma.txt", "w")
f.write(repor)
f.close()
restart_program()
if dec == "2" or dec == "02":
f = open("modulos/idioma.txt", "r")
filedata = f.read()
f.close()
repor = filedata.replace("es", "en")
f = open("modulos/idioma.txt", "w")
f.write(repor)
f.close()
restart_program()
if dec == "3" or dec == "03":
f = open("modulos/idioma.txt", "r")
filedata = f.read()
f.close()
repor = filedata.replace("es", "es")
f = open("modulos/idioma.txt", "w")
f.write(repor)
f.close()
restart_program()
if configura == "2" or configura == "02":
restart_program()
if en == True:
print ("Settings\n")
print (" [1] Change language")
print (" [2] Back to menu\n")
configura = input("Choose an option: ")
if configura == "1" or configura == "01":
os.system("clear")
print (" [1] Português")
print (" [2] English")
print (" [3] Español\n")
dec = input("Selecione: ")
if dec == "1" or dec == "01":
f = open("modulos/idioma.txt", "r")
filedata = f.read()
f.close()
repor = filedata.replace('en', 'pt')
f = open("modulos/idioma.txt", "w")
f.write(repor)
f.close()
restart_program()
if dec == "2" or dec == "02":
f = open("modulos/idioma.txt", "r")
filedata = f.read()
f.close()
repor = filedata.replace("en", "en")
f = open("modulos/idioma.txt", "w")
f.write(repor)
f.close()
restart_program()
if dec == "3" or dec == "03":
f = open("modulos/idioma.txt", "r")
filedata = f.read()
f.close()
repor = filedata.replace("en", "es")
f = open("modulos/idioma.txt", "w")
f.write(repor)
f.close()
restart_program()
if configura == "2" or configura == "02":
restart_program()
| 21.958333
| 47
| 0.521279
| 479
| 3,689
| 3.989562
| 0.14405
| 0.047096
| 0.11303
| 0.169545
| 0.832548
| 0.832548
| 0.832548
| 0.832548
| 0.832548
| 0.832548
| 0
| 0.022727
| 0.284359
| 3,689
| 167
| 48
| 22.08982
| 0.701136
| 0
| 0
| 0.782258
| 0
| 0
| 0.234575
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.008065
| false
| 0
| 0.016129
| 0
| 0.024194
| 0.145161
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
b660e5ba30150d956933324648cb6f5a4193f215
| 12,504
|
py
|
Python
|
tests/test_prettify.py
|
alvistack/daveoncode-python-string-utils
|
78929d88d90b1f90cb4837528ed955166bf0f559
|
[
"MIT"
] | 3
|
2020-08-20T10:27:13.000Z
|
2021-11-02T20:28:16.000Z
|
tests/test_prettify.py
|
alvistack/daveoncode-python-string-utils
|
78929d88d90b1f90cb4837528ed955166bf0f559
|
[
"MIT"
] | null | null | null |
tests/test_prettify.py
|
alvistack/daveoncode-python-string-utils
|
78929d88d90b1f90cb4837528ed955166bf0f559
|
[
"MIT"
] | null | null | null |
from unittest import TestCase
from string_utils import prettify, is_email
class PrettifyTestCase(TestCase):
def test_cannot_handle_non_string_objects(self):
with self.assertRaises(TypeError) as raised:
# noinspection PyTypeChecker
prettify(None)
self.assertEqual(str(raised.exception), 'Expected "str", received "NoneType"')
with self.assertRaises(TypeError) as raised:
# noinspection PyTypeChecker
prettify(False)
self.assertEqual(str(raised.exception), 'Expected "str", received "bool"')
with self.assertRaises(TypeError) as raised:
# noinspection PyTypeChecker
prettify(0)
self.assertEqual(str(raised.exception), 'Expected "str", received "int"')
with self.assertRaises(TypeError) as raised:
# noinspection PyTypeChecker
prettify([])
self.assertEqual(str(raised.exception), 'Expected "str", received "list"')
with self.assertRaises(TypeError) as raised:
# noinspection PyTypeChecker
prettify({'a': 1})
self.assertEqual(str(raised.exception), 'Expected "str", received "dict"')
def test_should_return_empty_string_from_empty_string_or_space_only_string(self):
self.assertEqual('', prettify(''))
self.assertEqual('', prettify(' '))
def test_should_uppercase_first_letter(self):
self.assertEqual('Hello world', prettify('hello world'))
def test_should_strip_string(self):
self.assertEqual('Hello world', prettify(' hello world '))
def test_should_strip_empty_lines(self):
self.assertEqual('Hello world', prettify('''
hello world
'''))
def test_should_replace_multiple_brackets_with_single_ones(self):
self.assertEqual('(foo)', prettify('((foo)'))
self.assertEqual('(foo)', prettify('(foo))'))
self.assertEqual('(foo)', prettify('((foo))'))
self.assertEqual('(foo)', prettify('((((((((foo)))'))
self.assertEqual('[foo]', prettify('[[foo]'))
self.assertEqual('[foo]', prettify('[foo]]'))
self.assertEqual('[foo]', prettify('[[foo]]'))
self.assertEqual('[foo]', prettify('[[[[[[[[foo]]]'))
self.assertEqual('{foo}', prettify('{{foo}'))
self.assertEqual('{foo}', prettify('{foo}}'))
self.assertEqual('{foo}', prettify('{{foo}}'))
self.assertEqual('{foo}', prettify('{{{{{{{{foo}}}'))
def test_should_remove_internal_spaces_in_brackets(self):
self.assertEqual('(foo)', prettify('( foo)'))
self.assertEqual('(foo)', prettify('(foo )'))
self.assertEqual('(foo)', prettify('( foo )'))
def test_should_add_spaces_outside_brackets(self):
self.assertEqual('Boo (bar) baz', prettify('boo(bar)baz'))
def test_should_not_add_right_space_after_bracket_if_followed_by_punctuation(self):
self.assertEqual('Foo (bar)? Yes!', prettify('Foo(bar)? Yes!'))
self.assertEqual('Foo (bar): Yes!', prettify('Foo(bar): Yes!'))
self.assertEqual('Foo (bar). Yes!', prettify('Foo(bar). Yes!'))
self.assertEqual('Foo (bar); yes!', prettify('Foo(bar); yes!'))
self.assertEqual('Foo (bar), yes!', prettify('Foo(bar), yes!'))
def test_should_replace_multiple_commas_with_single_ones(self):
self.assertEqual('Hello, world', prettify('Hello,,, world'))
self.assertEqual('Hello, world, banana', prettify('Hello,,, world,, banana'))
def test_should_replace_multiple_colons_with_single_ones(self):
self.assertEqual('Hello: world', prettify('Hello::: world'))
self.assertEqual('Hello: world: banana', prettify('Hello::: world:: banana'))
def test_should_replace_multiple_semicolons_with_single_ones(self):
self.assertEqual('Hello; world', prettify('Hello;;; world'))
self.assertEqual('Hello; world; banana', prettify('Hello;;; world;; banana'))
def test_should_replace_multiple_double_quotes_with_single_ones(self):
self.assertEqual('"hello" world', prettify('""hello"" world'))
self.assertEqual('"hello" world', prettify('""hello" world'))
self.assertEqual('"hello" world', prettify('"hello"" world'))
self.assertEqual('"hello" world', prettify('""""""hello""""" world'))
def test_should_add_spaces_for_double_quotes(self):
self.assertEqual('Foo "bar" baz', prettify('foo"bar"baz'))
self.assertEqual('Foo "bar" baz', prettify('foo"bar" baz'))
self.assertEqual('Foo "bar" baz', prettify('foo "bar"baz'))
def test_should_trim_spaces_inside_double_quotes(self):
self.assertEqual('Foo "bar" baz', prettify('foo " bar " baz'))
self.assertEqual('Foo "bar" baz', prettify('foo "bar " baz'))
self.assertEqual('Foo "bar" baz', prettify('foo " bar" baz'))
def test_should_not_add_right_space_after_double_quotes_if_followed_by_punctuation(self):
self.assertEqual('Foo "bar"? Yes!', prettify('Foo"bar"? Yes!'))
self.assertEqual('Foo "bar": Yes!', prettify('Foo"bar": Yes!'))
self.assertEqual('Foo "bar". Yes!', prettify('Foo"bar". Yes!'))
self.assertEqual('Foo "bar"; yes!', prettify('Foo"bar"; yes!'))
self.assertEqual('Foo "bar", yes!', prettify('Foo"bar", yes!'))
def test_should_replace_multiple_single_quotes_with_single_ones(self):
self.assertEqual('Dave\'s job', prettify("Dave''s job"))
self.assertEqual("'destiny'", prettify("'''destiny'''"))
def test_should_fix_saxon_genitive_spaces(self):
self.assertEqual("Dave's dog", prettify("Dave' s dog"))
self.assertEqual("Dave's dog", prettify("Dave 's dog"))
self.assertEqual("Dave's dog", prettify("Dave 'sdog"))
def test_should_replace_multiple_percentage_with_single_ones(self):
self.assertEqual('%', prettify('%%%'))
self.assertEqual('A % b % c', prettify('a %% b %%%%%% c'))
def test_should_add_space_after_comma_if_missing(self):
self.assertEqual('One, two, three', prettify('one,two,three'))
def test_should_not_add_right_space_after_dot_for_numbers(self):
self.assertEqual('12,55', prettify('12,55'))
def test_should_remove_space_before_comma(self):
self.assertEqual('One, two, three', prettify('one , two , three'))
def test_should_uppercase_first_letter_after_period(self):
self.assertEqual('Foo. Bar', prettify('Foo. bar'))
def test_should_add_space_after_period_if_missing(self):
self.assertEqual('One. Two. Three', prettify('one.two.three'))
def test_should_not_add_right_space_after_comma_for_numbers(self):
self.assertEqual('12.55', prettify('12.55'))
def test_should_remove_space_before_period(self):
self.assertEqual('One. Two. Three', prettify('one . two . three'))
def test_should_add_space_after_colon_if_missing(self):
self.assertEqual('Test: this', prettify('Test:this'))
def test_should_remove_space_before_colon(self):
self.assertEqual('Test: this', prettify('Test :this'))
self.assertEqual('Test:', prettify('Test :'))
def test_should_add_space_after_semicolon_if_missing(self):
self.assertEqual('Test; this', prettify('Test;this'))
def test_should_remove_space_before_semicolon(self):
self.assertEqual('Test; this', prettify('Test ;this'))
self.assertEqual('Test;', prettify('Test ;'))
def test_should_uppercase_first_letter_after_exclamation(self):
self.assertEqual('Foo! Bar', prettify('Foo! bar'))
def test_should_add_space_after_exclamation_if_missing(self):
self.assertEqual('Test! This', prettify('Test!this'))
def test_should_remove_space_before_exclamation(self):
self.assertEqual('Test! This', prettify('Test !this'))
self.assertEqual('Test!', prettify('Test !'))
def test_should_uppercase_first_letter_after_question(self):
self.assertEqual('Foo? Bar', prettify('Foo? bar'))
def test_should_add_space_after_question_if_missing(self):
self.assertEqual('Test? This', prettify('Test?this'))
def test_should_remove_space_before_question(self):
self.assertEqual('Test? This', prettify('Test ?this'))
self.assertEqual('Test?', prettify('Test ?'))
def test_should_remove_space_before_dot(self):
self.assertEqual('Test. This', prettify('Test . This'))
self.assertEqual('Test.', prettify('Test .'))
def test_should_remove_space_after_number_if_followed_by_percentage(self):
self.assertEqual('100% python', prettify('100 % python'))
self.assertEqual('100%', prettify('100 %'))
def test_should_add_space_after_percentage_if_missing(self):
self.assertEqual('100% python code', prettify('100%python code'))
def test_should_add_spaces_around_plus_if_missing(self):
self.assertEqual('5 + 2', prettify('5 +2'))
self.assertEqual('5 + 2', prettify('5+ 2'))
self.assertEqual('5 + 2', prettify('5+2'))
def test_should_add_spaces_around_minus_if_missing(self):
self.assertEqual('5 - 2', prettify('5 -2'))
self.assertEqual('5 - 2', prettify('5- 2'))
self.assertEqual('5 - 2', prettify('5-2'))
def test_should_add_spaces_around_equal_if_missing(self):
self.assertEqual('5 - 2 = 3', prettify('5 - 2=3'))
self.assertEqual('5 - 2 = 3', prettify('5 - 2 =3'))
self.assertEqual('5 - 2 = 3', prettify('5 - 2= 3'))
def test_should_add_spaces_around_division_if_missing(self):
self.assertEqual('5 / 2 = 2.5', prettify('5/ 2 = 2.5'))
self.assertEqual('5 / 2 = 2.5', prettify('5 /2 = 2.5'))
self.assertEqual('5 / 2 = 2.5', prettify('5/2 = 2.5'))
def test_should_add_spaces_around_multiplication_if_missing(self):
self.assertEqual('5 * 2 = 10', prettify('5* 2 = 10'))
self.assertEqual('5 * 2 = 10', prettify('5 *2 = 10'))
self.assertEqual('5 * 2 = 10', prettify('5*2 = 10'))
def test_triple_dot_preserved(self):
self.assertEqual('Test...', prettify('Test...'))
self.assertEqual('Test... This', prettify('Test...This'))
def test_triple_exclamation_preserved(self):
self.assertEqual('Test!!!', prettify('Test!!!'))
self.assertEqual('Test!!! This', prettify('Test!!!This'))
def test_triple_question_preserved(self):
self.assertEqual('Test???', prettify('Test???'))
self.assertEqual('Test??? This', prettify('Test???This'))
def test_should_prettify_string_as_expected(self):
original = ' unprettified string ,, like this one,will be"prettified" .it\' s awesome!( like python)) '
pretty = 'Unprettified string, like this one, will be "prettified". It\'s awesome! (like python)'
self.assertEqual(pretty, prettify(original))
def test_should_work_as_expected_for_multiple_lines_string(self):
original = '''
unprettified string ,,
like this one,will be"prettified"
.it' s awesome!( like python))
'''
pretty = 'Unprettified string, like this one, will be "prettified". It\'s awesome! (like python)'
self.assertEqual(pretty, prettify(original))
def test_does_not_try_to_format_email(self):
email = 'my.email_name@gmail.com'
self.assertTrue(is_email(email))
self.assertEqual(email, prettify(email))
self.assertEqual('This is the email: {}'.format(email), prettify('this is the email : {}'.format(email)))
multiple_emails = ['mail.one@gmail.com', 'mail.two@gmail.com', 'mail.three@gmail.com']
self.assertEqual(prettify(','.join(multiple_emails)), ', '.join(multiple_emails))
def test_does_not_try_to_format_url(self):
url = 'https://www.mysite.com/path/page.php?query=foo'
self.assertEqual(url, prettify(url))
self.assertEqual('This is the url: {}'.format(url), prettify('this is the url : {}'.format(url)))
multiple_urls = ['http://www.site1.com', 'http://foo.com', 'https://www.something.it']
self.assertEqual(prettify(','.join(multiple_urls)), ', '.join(multiple_urls))
def test_does_not_try_to_format_ip(self):
ip = '127.0.0.1'
self.assertEqual(ip, prettify(ip))
self.assertEqual('This is the ip: {}'.format(ip), prettify('this is the ip : {}'.format(ip)))
multiple_ip = ['255.255.10.1', '255.255.10.2', '255.255.10.3']
self.assertEqual(prettify(' '.join(multiple_ip)), ' '.join(multiple_ip))
| 45.140794
| 113
| 0.654591
| 1,546
| 12,504
| 5.070505
| 0.108668
| 0.223881
| 0.111494
| 0.050899
| 0.827146
| 0.79079
| 0.734277
| 0.711698
| 0.67853
| 0.633882
| 0
| 0.015649
| 0.182342
| 12,504
| 276
| 114
| 45.304348
| 0.751076
| 0.010717
| 0
| 0.261307
| 0
| 0.025126
| 0.230508
| 0.00186
| 0
| 0
| 0
| 0
| 0.61809
| 1
| 0.261307
| false
| 0
| 0.01005
| 0
| 0.276382
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
b68517ced604fb003aba133693610266205a8a6c
| 151
|
py
|
Python
|
Handlers/__init__.py
|
EmilPi/PuzzleLib
|
31aa0fab3b5e9472b9b9871ca52e4d94ea683fa9
|
[
"Apache-2.0"
] | 52
|
2020-02-28T20:40:15.000Z
|
2021-08-25T05:35:17.000Z
|
Handlers/__init__.py
|
EmilPi/PuzzleLib
|
31aa0fab3b5e9472b9b9871ca52e4d94ea683fa9
|
[
"Apache-2.0"
] | 2
|
2021-02-14T15:57:03.000Z
|
2021-10-05T12:21:34.000Z
|
Handlers/__init__.py
|
EmilPi/PuzzleLib
|
31aa0fab3b5e9472b9b9871ca52e4d94ea683fa9
|
[
"Apache-2.0"
] | 8
|
2020-02-28T20:40:11.000Z
|
2020-07-09T13:27:23.000Z
|
from PuzzleLib.Handlers.Calculator import Calculator
from PuzzleLib.Handlers.Trainer import Trainer
from PuzzleLib.Handlers.Validator import Validator
| 37.75
| 52
| 0.880795
| 18
| 151
| 7.388889
| 0.388889
| 0.293233
| 0.473684
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.07947
| 151
| 3
| 53
| 50.333333
| 0.956835
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
b6ac8e2875179cca07121085090b7b0d54f38098
| 60
|
py
|
Python
|
data/__init__.py
|
MichaelHopwood/PyTorch-BayesianCNN
|
655c10ed72d87f9c0125b15566a014963e2572e7
|
[
"MIT"
] | 1,027
|
2018-08-17T01:53:38.000Z
|
2022-03-28T21:43:42.000Z
|
data/__init__.py
|
MichaelHopwood/PyTorch-BayesianCNN
|
655c10ed72d87f9c0125b15566a014963e2572e7
|
[
"MIT"
] | 59
|
2018-11-21T15:20:50.000Z
|
2022-03-16T08:34:10.000Z
|
data/__init__.py
|
MichaelHopwood/PyTorch-BayesianCNN
|
655c10ed72d87f9c0125b15566a014963e2572e7
|
[
"MIT"
] | 269
|
2018-09-10T01:29:40.000Z
|
2022-03-15T07:31:46.000Z
|
from .data import getDataset
from .data import getDataloader
| 30
| 31
| 0.85
| 8
| 60
| 6.375
| 0.625
| 0.313725
| 0.54902
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.116667
| 60
| 2
| 31
| 30
| 0.962264
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
fcc0f24701d1e5135787b0eb49149716b7bb1225
| 9,366
|
py
|
Python
|
tests/python/test_recommend.py
|
iscoe/dragonfly
|
f81f47d990dd5ef1c7cf1bedee8a2d7f9fb84cd3
|
[
"Apache-2.0"
] | 6
|
2018-09-12T19:38:13.000Z
|
2021-10-02T17:23:47.000Z
|
tests/python/test_recommend.py
|
iscoe/dragonfly
|
f81f47d990dd5ef1c7cf1bedee8a2d7f9fb84cd3
|
[
"Apache-2.0"
] | 49
|
2018-05-23T14:38:23.000Z
|
2020-10-29T18:12:39.000Z
|
tests/python/test_recommend.py
|
iscoe/dragonfly
|
f81f47d990dd5ef1c7cf1bedee8a2d7f9fb84cd3
|
[
"Apache-2.0"
] | 3
|
2018-05-20T20:04:09.000Z
|
2021-08-10T06:38:16.000Z
|
import unittest
import collections
import shutil
import tempfile
from dragonfly.recommend import Recommender, TaggedTokenFrequencies
class RecommenderTest(unittest.TestCase):
def test_prepare_words(self):
test_string = 'Test one two\t three\r\nfour'
self.assertEqual(['test', 'one', 'two', 'three', 'four'], Recommender._prepare_words(test_string))
class TaggedTokenFrequenciesTest(unittest.TestCase):
# 5 sentence document with 6 unique tags
DATA1 = {'filename': 'IL3_NW_020005_20150728_G004007SS.conll.txt', 'tokens': [{'token': 'ميۇنخېندىكى', 'tag': 'O'}, {'token': 'ئۇيغۇرلار', 'tag': 'O'}, {'token': 'خىتاي', 'tag': 'B-GPE'}, {'token': 'كونسۇلخانىسى', 'tag': 'O'}, {'token': 'ئالدىدا', 'tag': 'O'}, {'token': 'يەكەن', 'tag': 'B-GPE'}, {'token': 'ۋەقەسىنىڭ', 'tag': 'O'}, {'token': 'بىر', 'tag': 'O'}, {'token': 'يىللىقى', 'tag': 'O'}, {'token': 'مۇناسىۋىتى', 'tag': 'O'}, {'token': 'بىلەن', 'tag': 'O'}, {'token': 'نامايىش', 'tag': 'O'}, {'token': 'قىلدى', 'tag': 'O'}, {}, {'token': 'د', 'tag': 'O'}, {'token': 'ئۇ', 'tag': 'O'}, {'token': 'ق', 'tag': 'O'}, {'token': 'نىڭ', 'tag': 'O'}, {'token': 'چاقىرىقى', 'tag': 'O'}, {'token': '،', 'tag': 'O'}, {'token': 'ياۋرۇپا', 'tag': 'B-LOC'}, {'token': 'شەرقىي', 'tag': 'B-GPE'}, {'token': 'تۈركىستان', 'tag': 'I-GPE'}, {'token': 'بىرلىكى', 'tag': 'O'}, {'token': 'تەشكىلاتىنىڭ', 'tag': 'O'}, {'token': 'ئورۇنلاشتۇرۇشى', 'tag': 'O'}, {'token': 'بىلەن', 'tag': 'O'}, {'token': '28', 'tag': 'O'}, {'token': '-', 'tag': 'O'}, {'token': 'ئىيۇل', 'tag': 'O'}, {'token': 'كۈنى', 'tag': 'O'}, {'token': 'گېرمانىيىنىڭ', 'tag': 'O'}, {'token': 'ميۇنخېن', 'tag': 'B-GPE'}, {'token': 'شەھىرىدىكى', 'tag': 'O'}, {'token': 'خىتاي', 'tag': 'B-GPE'}, {'token': 'كونسۇلخانىسى', 'tag': 'O'}, {'token': 'ئالدىدا', 'tag': 'O'}, {'token': 'يەكەن', 'tag': 'B-GPE'}, {'token': 'ۋەقەسىنىڭ', 'tag': 'O'}, {'token': 'بىر', 'tag': 'O'}, {'token': 'يىللىقى', 'tag': 'O'}, {'token': 'مۇناسىۋىتى', 'tag': 'O'}, {'token': 'بىلەن', 'tag': 'O'}, {'token': 'نامايىش', 'tag': 'O'}, {'token': 'ئېلىپ', 'tag': 'O'}, {'token': 'بېرىلدى', 'tag': 'O'}, {'token': '.', 'tag': 'O'}, {}, {'token': 'بۇ', 'tag': 'O'}, {'token': 'نامايىشقا', 'tag': 'O'}, {'token': 'ميۇنخېن', 'tag': 'B-GPE'}, {'token': 'شەھىرى', 'tag': 'O'}, {'token': 'ۋە', 'tag': 'O'}, {'token': 'ئەتراپ', 'tag': 'O'}, {'token': 'رايونلاردىكى', 'tag': 'O'}, {'token': 'ئۇيغۇر', 'tag': 'O'}, {'token': 'جامائىتىدىن', 'tag': 'O'}, {'token': 'باشقا', 'tag': 'O'}, {'token': '،', 'tag': 'O'}, {'token': 'تۈرك', 'tag': 'O'}, {'token': 'تەشكىلاتلىرى', 'tag': 'O'}, {'token': 'ۋەكىللىرى', 'tag': 'O'}, {'token': 'ھەمدە', 'tag': 'O'}, {'token': 'بىر', 'tag': 'O'}, {'token': 'قىسىم', 'tag': 'O'}, {'token': 'كىشىلىك', 'tag': 'O'}, {'token': 'ھوقۇق', 'tag': 'O'}, {'token': 'تەشكىلاتلىرىنىڭ', 'tag': 'O'}, {'token': 'ئەزالىرىمۇ', 'tag': 'O'}, {'token': 'قاتناشتى', 'tag': 'O'}, {'token': '.', 'tag': 'O'}, {}, {'token': 'نامايىش', 'tag': 'O'}, {'token': 'سائەت', 'tag': 'O'}, {'token': '14:00', 'tag': 'O'}, {'token': 'دىن', 'tag': 'O'}, {'token': '16:00', 'tag': 'O'}, {'token': 'گىچە', 'tag': 'O'}, {'token': 'ئىككى', 'tag': 'O'}, {'token': 'سائەت', 'tag': 'O'}, {'token': 'داۋاملاشتى', 'tag': 'O'}, {'token': '.', 'tag': 'O'}, {}, {'token': 'نامايىشچىلار', 'tag': 'O'}, {'token': 'يەكەن', 'tag': 'B-GPE'}, {'token': 'ۋەقەسىدە', 'tag': 'O'}, {'token': 'قەتلى', 'tag': 'O'}, {'token': 'قىلىنغان', 'tag': 'O'}, {'token': 'شېھىتلارنى', 'tag': 'O'}, {'token': 'ئەسلەپ', 'tag': 'O'}, {'token': '،', 'tag': 'O'}, {'token': 'خىتاينىڭ', 'tag': 'O'}, {'token': 'قانلىق', 'tag': 'O'}, {'token': 'قىرغىنچىلىقىنى', 'tag': 'O'}, {'token': 'ئەيىبلىدى', 'tag': 'O'}, {'token': '.', 'tag': 'O'}]}
# same doc as above, removed one instance of a tag that appeared 3 times and added new tag
# removes a sentence from set of tagged sentences
DATA2 = {'filename': 'IL3_NW_020005_20150728_G004007SS.conll.txt', 'tokens': [{'token': 'ميۇنخېندىكى', 'tag': 'O'}, {'token': 'ئۇيغۇرلار', 'tag': 'O'}, {'token': 'خىتاي', 'tag': 'B-GPE'}, {'token': 'كونسۇلخانىسى', 'tag': 'O'}, {'token': 'ئالدىدا', 'tag': 'O'}, {'token': 'يەكەن', 'tag': 'B-GPE'}, {'token': 'ۋەقەسىنىڭ', 'tag': 'O'}, {'token': 'بىر', 'tag': 'O'}, {'token': 'يىللىقى', 'tag': 'O'}, {'token': 'مۇناسىۋىتى', 'tag': 'O'}, {'token': 'بىلەن', 'tag': 'O'}, {'token': 'نامايىش', 'tag': 'O'}, {'token': 'قىلدى', 'tag': 'O'}, {}, {'token': 'د', 'tag': 'O'}, {'token': 'ئۇ', 'tag': 'O'}, {'token': 'ق', 'tag': 'O'}, {'token': 'نىڭ', 'tag': 'O'}, {'token': 'چاقىرىقى', 'tag': 'O'}, {'token': '،', 'tag': 'O'}, {'token': 'ياۋرۇپا', 'tag': 'B-LOC'}, {'token': 'شەرقىي', 'tag': 'B-GPE'}, {'token': 'تۈركىستان', 'tag': 'I-GPE'}, {'token': 'بىرلىكى', 'tag': 'O'}, {'token': 'تەشكىلاتىنىڭ', 'tag': 'O'}, {'token': 'ئورۇنلاشتۇرۇشى', 'tag': 'O'}, {'token': 'بىلەن', 'tag': 'O'}, {'token': '28', 'tag': 'O'}, {'token': '-', 'tag': 'O'}, {'token': 'ئىيۇل', 'tag': 'O'}, {'token': 'كۈنى', 'tag': 'O'}, {'token': 'گېرمانىيىنىڭ', 'tag': 'O'}, {'token': 'ميۇنخېن', 'tag': 'B-GPE'}, {'token': 'شەھىرىدىكى', 'tag': 'O'}, {'token': 'خىتاي', 'tag': 'B-GPE'}, {'token': 'كونسۇلخانىسى', 'tag': 'O'}, {'token': 'ئالدىدا', 'tag': 'O'}, {'token': 'يەكەن', 'tag': 'B-GPE'}, {'token': 'ۋەقەسىنىڭ', 'tag': 'O'}, {'token': 'بىر', 'tag': 'O'}, {'token': 'يىللىقى', 'tag': 'O'}, {'token': 'مۇناسىۋىتى', 'tag': 'O'}, {'token': 'بىلەن', 'tag': 'O'}, {'token': 'نامايىش', 'tag': 'O'}, {'token': 'ئېلىپ', 'tag': 'O'}, {'token': 'بېرىلدى', 'tag': 'O'}, {'token': '.', 'tag': 'O'}, {}, {'token': 'بۇ', 'tag': 'O'}, {'token': 'نامايىشقا', 'tag': 'O'}, {'token': 'ميۇنخېن', 'tag': 'B-GPE'}, {'token': 'شەھىرى', 'tag': 'O'}, {'token': 'ۋە', 'tag': 'O'}, {'token': 'ئەتراپ', 'tag': 'O'}, {'token': 'رايونلاردىكى', 'tag': 'O'}, {'token': 'ئۇيغۇر', 'tag': 'B-PER'}, {'token': 'جامائىتىدىن', 'tag': 'O'}, {'token': 'باشقا', 'tag': 'O'}, {'token': '،', 'tag': 'O'}, {'token': 'تۈرك', 'tag': 'O'}, {'token': 'تەشكىلاتلىرى', 'tag': 'O'}, {'token': 'ۋەكىللىرى', 'tag': 'O'}, {'token': 'ھەمدە', 'tag': 'O'}, {'token': 'بىر', 'tag': 'O'}, {'token': 'قىسىم', 'tag': 'O'}, {'token': 'كىشىلىك', 'tag': 'O'}, {'token': 'ھوقۇق', 'tag': 'O'}, {'token': 'تەشكىلاتلىرىنىڭ', 'tag': 'O'}, {'token': 'ئەزالىرىمۇ', 'tag': 'O'}, {'token': 'قاتناشتى', 'tag': 'O'}, {'token': '.', 'tag': 'O'}, {}, {'token': 'نامايىش', 'tag': 'O'}, {'token': 'سائەت', 'tag': 'O'}, {'token': '14:00', 'tag': 'O'}, {'token': 'دىن', 'tag': 'O'}, {'token': '16:00', 'tag': 'O'}, {'token': 'گىچە', 'tag': 'O'}, {'token': 'ئىككى', 'tag': 'O'}, {'token': 'سائەت', 'tag': 'O'}, {'token': 'داۋاملاشتى', 'tag': 'O'}, {'token': '.', 'tag': 'O'}, {}, {'token': 'نامايىشچىلار', 'tag': 'O'}, {'token': 'يەكەن', 'tag': 'O'}, {'token': 'ۋەقەسىدە', 'tag': 'O'}, {'token': 'قەتلى', 'tag': 'O'}, {'token': 'قىلىنغان', 'tag': 'O'}, {'token': 'شېھىتلارنى', 'tag': 'O'}, {'token': 'ئەسلەپ', 'tag': 'O'}, {'token': '،', 'tag': 'O'}, {'token': 'خىتاينىڭ', 'tag': 'O'}, {'token': 'قانلىق', 'tag': 'O'}, {'token': 'قىرغىنچىلىقىنى', 'tag': 'O'}, {'token': 'ئەيىبلىدى', 'tag': 'O'}, {'token': '.', 'tag': 'O'}]}
# one sentence document with a new tagged token
DATA3 = {'filename': 'IL3_SN_000370_20160127_G0T0004NM.conll.txt', 'tokens': [{'token': 'خىتاينىڭ', 'tag': 'B-GPE'}, {'token': 'چاغان', 'tag': 'O'}, {'token': 'بايرىمىنى', 'tag': 'O'}, {'token': 'ئۇيغۇرلارنى', 'tag': 'O'}, {'token': 'ئۇسۇلغا', 'tag': 'O'}, {'token': 'سېلىش', 'tag': 'O'}, {'token': 'بىلەن', 'tag': 'O'}, {'token': 'تەبرىكلىمەكتە', 'tag': 'O'}, {'token': '.', 'tag': 'O'}, {'token': 'شەرقىي', 'tag': 'B-GPE'}, {'token': 'تۈركىستان', 'tag': 'I-GPE'}, {'token': 'تەشۋىقات', 'tag': 'O'}, {'token': 'مەركىزى', 'tag': 'O'}, {'token': 'https://t.co/PRzzEUIwsp', 'tag': 'O'}]}
def setUp(self):
self.test_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.test_dir)
def test_first_update(self):
freq = TaggedTokenFrequencies(self.test_dir)
corpus_data = freq.update(self.DATA1)
self.assertEqual(6, len(corpus_data.tagged_counts))
tags = collections.Counter({'يەكەن': 3, 'خىتاي': 2, 'ميۇنخېن': 2, 'ياۋرۇپا': 1, 'شەرقىي': 1, 'تۈركىستان': 1})
self.assertEqual(tags, corpus_data.tagged_counts)
self.assertEqual(3, corpus_data.counts['.'])
def test_update_on_same_doc(self):
freq = TaggedTokenFrequencies(self.test_dir)
freq.update(self.DATA1)
corpus_data = freq.update(self.DATA2)
self.assertEqual(7, len(corpus_data.tagged_counts))
tags = collections.Counter({'خىتاي': 2, 'يەكەن': 2, 'ميۇنخېن': 2, 'ياۋرۇپا': 1, 'شەرقىي': 1, 'تۈركىستان': 1, 'ئۇيغۇر': 1})
self.assertEqual(tags, corpus_data.tagged_counts)
self.assertEqual(2, corpus_data.counts['.'])
def test_update_on_new_doc(self):
freq = TaggedTokenFrequencies(self.test_dir)
freq.update(self.DATA1)
corpus_data = freq.update(self.DATA3)
self.assertEqual(7, len(corpus_data.tagged_counts))
self.assertEqual(4, corpus_data.counts['.'])
def test_update_on_new_doc_with_no_tags(self):
freq = TaggedTokenFrequencies(self.test_dir)
freq.update(self.DATA1)
corpus_data = freq.update({'filename': 'test', 'tokens': [{'token': 'a', 'tag': 'O'}, {'token': '.', 'tag': 'O'}]})
self.assertEqual(6, len(corpus_data.tagged_counts))
self.assertEqual(3, corpus_data.counts['.'])
| 158.745763
| 3,211
| 0.522849
| 1,141
| 9,366
| 4.243646
| 0.156004
| 0.146221
| 0.321561
| 0.042131
| 0.810615
| 0.802974
| 0.791822
| 0.780876
| 0.741636
| 0.713135
| 0
| 0.014217
| 0.136344
| 9,366
| 58
| 3,212
| 161.482759
| 0.583632
| 0.023596
| 0
| 0.340909
| 0
| 0
| 0.384136
| 0.013786
| 0
| 0
| 0
| 0
| 0.25
| 1
| 0.159091
| false
| 0
| 0.113636
| 0
| 0.386364
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
fcfd6b62a79d10016b85ea60c5113a0269fe6d60
| 11,442
|
py
|
Python
|
tests/resources/common_resource/test_list.py
|
Arfey/aiohttp_admin2
|
2b3782389ec9e25809635811b76ef8111b27d8ba
|
[
"MIT"
] | 12
|
2021-10-15T11:48:12.000Z
|
2022-03-24T07:31:43.000Z
|
tests/resources/common_resource/test_list.py
|
Arfey/aiohttp_admin2
|
2b3782389ec9e25809635811b76ef8111b27d8ba
|
[
"MIT"
] | 2
|
2021-12-29T16:31:05.000Z
|
2021-12-30T00:50:40.000Z
|
tests/resources/common_resource/test_list.py
|
Arfey/aiohttp_admin2
|
2b3782389ec9e25809635811b76ef8111b27d8ba
|
[
"MIT"
] | null | null | null |
import pytest
from aiohttp_admin2.resources.exceptions import (
ClientException,
BadParameters,
)
from aiohttp_admin2.resources.types import FilterTuple
from .utils import generate_fake_instance
@pytest.mark.asyncio
async def test_list_order(resource):
"""
In this test check corrected work sort in get_list method of resource.
1. Check default order
2. Check asc order
3. Error of ordering for cursor pagination
"""
await generate_fake_instance(resource, 10)
# 1. Check default order
list_objects = await resource.get_list()
assert len(list_objects.instances) == 10
# 2. Check desc order
list_objects_second = await resource.get_list(order_by='id')
compare_list = zip(
list_objects.instances,
reversed(list_objects_second.instances),
)
for a, b in compare_list:
assert a.get_pk() == b.get_pk()
# 3. Error of ordering for cursor pagination
with pytest.raises(ClientException):
await resource.get_list(order_by='val', cursor=1)
@pytest.mark.asyncio
@pytest.mark.parametrize("ordering", ("id", "-id"))
async def test_list_page_pagination(resource, ordering):
"""
In this test check corrected work page pagination in get_list method of
resource. + ordering
1. Check of correct work page pagination
- Check of correct work has_next and has_prev values
- Check of correct work count value
2. Check of correct work page pagination with remainder
"""
instance_count = 9
await generate_fake_instance(resource, instance_count)
# 1. Check of correct work page pagination
full_list_objects = \
await resource.get_list(limit=instance_count, order_by=ordering)
full_list_objects_ids = [i.get_pk() for i in full_list_objects.instances]
assert len(full_list_objects_ids) == instance_count
# page 1
list_objects = await resource.get_list(limit=3, order_by=ordering)
list_objects_ids = [i.get_pk() for i in list_objects.instances]
assert len(list_objects_ids) == 3
assert set(full_list_objects_ids[:3]) == set(list_objects_ids)
# Check of correct work has_next and has_prev values
assert list_objects.has_next
assert not list_objects.has_prev
assert list_objects.count == instance_count
# page 2
list_objects = await resource.get_list(limit=3, page=2, order_by=ordering)
list_objects_ids = [i.get_pk() for i in list_objects.instances]
assert len(list_objects_ids) == 3
assert set(full_list_objects_ids[3:6]) == set(list_objects_ids)
# Check of correct work has_next and has_prev values
assert list_objects.has_next
assert list_objects.has_prev
assert list_objects.count == instance_count
# page 3
list_objects = await resource.get_list(limit=3, page=3, order_by=ordering)
list_objects_ids = [i.get_pk() for i in list_objects.instances]
assert len(list_objects_ids) == 3
assert set(full_list_objects_ids[6:9]) == set(list_objects_ids)
# Check of correct work has_next and has_prev values
assert not list_objects.has_next
assert list_objects.has_prev
assert list_objects.count == instance_count
# 2. Check of correct work page pagination with remainder
await generate_fake_instance(resource, 1)
# page 4
list_objects = await resource.get_list(limit=3, page=4, order_by=ordering)
list_objects_ids = [i.get_pk() for i in list_objects.instances]
assert len(list_objects_ids) == 1
# Check of correct work has_next and has_prev values
assert not list_objects.has_next
assert list_objects.has_prev
@pytest.mark.asyncio
async def test_list_page_pagination_parameters_error(resource):
"""
In this test check errors which can been raised if pass bad arguments.
1. limit must be greater than zero
2. cursor can't be use together with page
3. page must be greater than zero
"""
# 1. limit must be greater than zero
with pytest.raises(BadParameters):
await resource.get_list(limit=0)
# 2. cursor can't be use together with page
instances = await generate_fake_instance(resource, 1)
with pytest.raises(BadParameters):
await resource.get_list(page=2, cursor=instances[0].get_pk())
# 3. page must be greater than zero
with pytest.raises(BadParameters):
await resource.get_list(page=0)
@pytest.mark.asyncio
@pytest.mark.parametrize("ordering", ("id", "-id"))
async def test_list_cursor_pagination(resource, ordering):
"""
In this test check corrected work cursor pagination in get_list method of
resource. + ordering
1. Check of correct work cursor pagination
- Check of correct work has_next and has_prev values
- Check of correct work count value
"""
instance_count = 5
await generate_fake_instance(resource, instance_count)
# 1. Check of correct work cursor pagination
full_list_objects = \
await resource.get_list(limit=instance_count, order_by=ordering)
full_list_objects_ids = [i.get_pk() for i in full_list_objects.instances]
assert len(full_list_objects_ids) == instance_count
# page 1
list_objects = await resource.get_list(
cursor=full_list_objects_ids[0],
limit=3,
order_by=ordering,
)
list_objects_ids = [i.get_pk() for i in list_objects.instances]
assert len(list_objects_ids) == 3
assert set(full_list_objects_ids[1:4]) == set(list_objects_ids)
assert list_objects.has_next
assert list_objects.has_prev
assert list_objects.count is None
# page 2
list_objects = await resource.get_list(
cursor=full_list_objects_ids[3],
limit=3,
order_by=ordering,
)
list_objects_ids = [i.get_pk() for i in list_objects.instances]
assert len(list_objects_ids) == 1
assert set(full_list_objects_ids[4:7]) == set(list_objects_ids)
assert not list_objects.has_next
assert list_objects.has_prev
assert list_objects.count is None
@pytest.mark.asyncio
async def test_filter_api_for_get_list(resource):
"""
In this test check corrected work filter api in get_list method of resource.
+ ordering
1. Check corrected work of one filter + ordering
2. Check corrected work of two filter + ordering
"""
# 1. Check corrected work of one filter + ordering
instances = await generate_fake_instance(resource, 10)
full_list_objects_ids = [i.get_pk() for i in instances]
# desc
list_objects = await resource.get_list(
filters=[
FilterTuple('id', full_list_objects_ids[0], "gt"),
],
limit=len(full_list_objects_ids),
)
list_objects_ids = [i.get_pk() for i in list_objects.instances]
assert set(list_objects_ids) == set(full_list_objects_ids[1:])
# asc
list_objects = await resource.get_list(
filters=[
FilterTuple('id', full_list_objects_ids[-1], "lt"),
],
limit=len(full_list_objects_ids),
order_by='id'
)
asc_list_objects_ids = [i.get_pk() for i in list_objects.instances]
for x, y in zip(list_objects_ids[1:], reversed(asc_list_objects_ids[1:])):
assert x == y
# 2. Check corrected work of two filter + ordering
list_objects = await resource.get_list(
filters=[
FilterTuple('id', full_list_objects_ids[0], "gt"),
FilterTuple('id', full_list_objects_ids[2], "lt"),
],
limit=len(full_list_objects_ids),
)
list_objects_ids = [i.get_pk() for i in list_objects.instances]
assert len(list_objects_ids) == 1
assert list_objects_ids[0] == full_list_objects_ids[1]
@pytest.mark.asyncio
async def test_common_filters_for_get_list(resource):
"""
In this test we check corrected work of common filters in get_list method
of resource.
Check corrected work of filters:
eq, ne, lt, lte, gt, gte, in, nin, like
"""
instances = await generate_fake_instance(resource, 10)
full_list_objects_ids = [i.get_pk() for i in instances]
# eq
list_objects = await resource.get_list(
filters=[
FilterTuple('id', full_list_objects_ids[0], "eq"),
],
limit=len(full_list_objects_ids),
)
list_objects_ids = [i.get_pk() for i in list_objects.instances]
assert len(list_objects_ids) == 1
assert list_objects_ids[0] == full_list_objects_ids[0]
# ne
list_objects = await resource.get_list(
filters=[
FilterTuple('id', full_list_objects_ids[0], "ne"),
],
limit=len(full_list_objects_ids),
)
list_objects_ids = [i.get_pk() for i in list_objects.instances]
assert len(list_objects_ids) == len(full_list_objects_ids) - 1
assert full_list_objects_ids[0] not in list_objects_ids
# gt
list_objects = await resource.get_list(
filters=[
FilterTuple('id', full_list_objects_ids[-2], "gt"),
],
limit=len(full_list_objects_ids),
)
list_objects_ids = [i.get_pk() for i in list_objects.instances]
assert len(list_objects_ids) == 1
assert list_objects_ids[0] == full_list_objects_ids[-1]
# gte
list_objects = await resource.get_list(
filters=[
FilterTuple('id', full_list_objects_ids[-2], "gte"),
],
limit=len(full_list_objects_ids),
)
list_objects_ids = [i.get_pk() for i in list_objects.instances]
assert len(list_objects_ids) == 2
assert set(list_objects_ids) == set(full_list_objects_ids[-2:])
# lt
list_objects = await resource.get_list(
filters=[
FilterTuple('id', full_list_objects_ids[1], "lt"),
],
limit=len(full_list_objects_ids),
)
list_objects_ids = [i.get_pk() for i in list_objects.instances]
assert len(list_objects_ids) == 1
assert list_objects_ids[0] == full_list_objects_ids[0]
# lte
list_objects = await resource.get_list(
filters=[
FilterTuple('id', full_list_objects_ids[1], "lte"),
],
limit=len(full_list_objects_ids),
)
list_objects_ids = [i.get_pk() for i in list_objects.instances]
assert len(list_objects_ids) == 2
assert set(list_objects_ids) == set(full_list_objects_ids[:2])
# in
ids = full_list_objects_ids[1], full_list_objects_ids[0]
list_objects = await resource.get_list(
filters=[
FilterTuple('id', ids, "in"),
],
limit=len(full_list_objects_ids),
)
list_objects_ids = [i.get_pk() for i in list_objects.instances]
assert len(list_objects_ids) == 2
assert set(list_objects_ids) == set(ids)
# nin
list_objects = await resource.get_list(
filters=[
FilterTuple('id', ids, "nin"),
],
limit=len(full_list_objects_ids),
)
list_objects_ids = [i.get_pk() for i in list_objects.instances]
assert len(list_objects_ids) == len(full_list_objects_ids) - 2
assert not (set(list_objects_ids) & set(ids))
# like
list_objects = await resource.get_list(
filters=[
FilterTuple('val', instances[0].data.val, "like"),
],
limit=len(full_list_objects_ids),
)
list_objects_ids = [i.get_pk() for i in list_objects.instances]
assert len(list_objects_ids) == 1
assert list_objects_ids[0] == instances[0].get_pk()
| 31.00813
| 80
| 0.681
| 1,642
| 11,442
| 4.464677
| 0.0743
| 0.241577
| 0.18906
| 0.1154
| 0.885145
| 0.863457
| 0.811895
| 0.762515
| 0.745737
| 0.669213
| 0
| 0.01265
| 0.226184
| 11,442
| 368
| 81
| 31.092391
| 0.815338
| 0.063013
| 0
| 0.604651
| 1
| 0
| 0.009839
| 0
| 0
| 0
| 0
| 0
| 0.251163
| 1
| 0
| false
| 0
| 0.018605
| 0
| 0.018605
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
1e3f7a6adf725042f98d2b68576535514e19dcc6
| 22,270
|
py
|
Python
|
isi_sdk/apis/snapshot_snapshots_api.py
|
Atomicology/isilon_sdk_python
|
91039da803ae37ed4abf8d2a3f59c333f3ef1866
|
[
"MIT"
] | null | null | null |
isi_sdk/apis/snapshot_snapshots_api.py
|
Atomicology/isilon_sdk_python
|
91039da803ae37ed4abf8d2a3f59c333f3ef1866
|
[
"MIT"
] | null | null | null |
isi_sdk/apis/snapshot_snapshots_api.py
|
Atomicology/isilon_sdk_python
|
91039da803ae37ed4abf8d2a3f59c333f3ef1866
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
SnapshotSnapshotsApi.py
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class SnapshotSnapshotsApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def create_snapshot_lock(self, snapshot_lock, sid, **kwargs):
"""
Create a new lock on this snapshot.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_snapshot_lock(snapshot_lock, sid, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param SnapshotLockCreateParams snapshot_lock: (required)
:param str sid: (required)
:return: CreateSnapshotLockResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['snapshot_lock', 'sid']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_snapshot_lock" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'snapshot_lock' is set
if ('snapshot_lock' not in params) or (params['snapshot_lock'] is None):
raise ValueError("Missing the required parameter `snapshot_lock` when calling `create_snapshot_lock`")
# verify the required parameter 'sid' is set
if ('sid' not in params) or (params['sid'] is None):
raise ValueError("Missing the required parameter `sid` when calling `create_snapshot_lock`")
resource_path = '/platform/1/snapshot/snapshots/{Sid}/locks'.replace('{format}', 'json')
path_params = {}
if 'sid' in params:
path_params['Sid'] = params['sid']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'snapshot_lock' in params:
body_params = params['snapshot_lock']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['basic_auth']
response = self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CreateSnapshotLockResponse',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def delete_snapshot_lock(self, snapshot_lock_id, sid, **kwargs):
"""
Delete the snapshot lock.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_snapshot_lock(snapshot_lock_id, sid, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str snapshot_lock_id: Delete the snapshot lock. (required)
:param str sid: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['snapshot_lock_id', 'sid']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_snapshot_lock" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'snapshot_lock_id' is set
if ('snapshot_lock_id' not in params) or (params['snapshot_lock_id'] is None):
raise ValueError("Missing the required parameter `snapshot_lock_id` when calling `delete_snapshot_lock`")
# verify the required parameter 'sid' is set
if ('sid' not in params) or (params['sid'] is None):
raise ValueError("Missing the required parameter `sid` when calling `delete_snapshot_lock`")
resource_path = '/platform/1/snapshot/snapshots/{Sid}/locks/{SnapshotLockId}'.replace('{format}', 'json')
path_params = {}
if 'snapshot_lock_id' in params:
path_params['SnapshotLockId'] = params['snapshot_lock_id']
if 'sid' in params:
path_params['Sid'] = params['sid']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['basic_auth']
response = self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def delete_snapshot_locks(self, sid, **kwargs):
"""
Delete all locks. Will try to drain count of recursively held locks so that the snapshot can be deleted.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_snapshot_locks(sid, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str sid: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['sid']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_snapshot_locks" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'sid' is set
if ('sid' not in params) or (params['sid'] is None):
raise ValueError("Missing the required parameter `sid` when calling `delete_snapshot_locks`")
resource_path = '/platform/1/snapshot/snapshots/{Sid}/locks'.replace('{format}', 'json')
path_params = {}
if 'sid' in params:
path_params['Sid'] = params['sid']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['basic_auth']
response = self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def get_snapshot_lock(self, snapshot_lock_id, sid, **kwargs):
"""
Retrieve lock information.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_snapshot_lock(snapshot_lock_id, sid, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str snapshot_lock_id: Retrieve lock information. (required)
:param str sid: (required)
:return: SnapshotLocks
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['snapshot_lock_id', 'sid']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_snapshot_lock" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'snapshot_lock_id' is set
if ('snapshot_lock_id' not in params) or (params['snapshot_lock_id'] is None):
raise ValueError("Missing the required parameter `snapshot_lock_id` when calling `get_snapshot_lock`")
# verify the required parameter 'sid' is set
if ('sid' not in params) or (params['sid'] is None):
raise ValueError("Missing the required parameter `sid` when calling `get_snapshot_lock`")
resource_path = '/platform/1/snapshot/snapshots/{Sid}/locks/{SnapshotLockId}'.replace('{format}', 'json')
path_params = {}
if 'snapshot_lock_id' in params:
path_params['SnapshotLockId'] = params['snapshot_lock_id']
if 'sid' in params:
path_params['Sid'] = params['sid']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['basic_auth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SnapshotLocks',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def list_snapshot_locks(self, sid, **kwargs):
"""
List all locks.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_snapshot_locks(sid, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str sid: (required)
:param str sort: The field that will be used for sorting. Choices are id, expires, and comment. Default is id.
:param int limit: Return no more than this many results at once (see resume).
:param str dir: The direction of the sort.
:param str resume: Continue returning results from previous call using this token (token should come from the previous call, resume cannot be used with other options).
:return: SnapshotLocksExtended
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['sid', 'sort', 'limit', 'dir', 'resume']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_snapshot_locks" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'sid' is set
if ('sid' not in params) or (params['sid'] is None):
raise ValueError("Missing the required parameter `sid` when calling `list_snapshot_locks`")
if 'limit' in params and params['limit'] < 1.0:
raise ValueError("Invalid value for parameter `limit` when calling `list_snapshot_locks`, must be a value greater than or equal to `1.0`")
resource_path = '/platform/1/snapshot/snapshots/{Sid}/locks'.replace('{format}', 'json')
path_params = {}
if 'sid' in params:
path_params['Sid'] = params['sid']
query_params = {}
if 'sort' in params:
query_params['sort'] = params['sort']
if 'limit' in params:
query_params['limit'] = params['limit']
if 'dir' in params:
query_params['dir'] = params['dir']
if 'resume' in params:
query_params['resume'] = params['resume']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['basic_auth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SnapshotLocksExtended',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def update_snapshot_lock(self, snapshot_lock, snapshot_lock_id, sid, **kwargs):
"""
Modify lock. All input fields are optional, but one or more must be supplied.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_snapshot_lock(snapshot_lock, snapshot_lock_id, sid, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param SnapshotLock snapshot_lock: (required)
:param str snapshot_lock_id: Modify lock. All input fields are optional, but one or more must be supplied. (required)
:param str sid: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['snapshot_lock', 'snapshot_lock_id', 'sid']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_snapshot_lock" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'snapshot_lock' is set
if ('snapshot_lock' not in params) or (params['snapshot_lock'] is None):
raise ValueError("Missing the required parameter `snapshot_lock` when calling `update_snapshot_lock`")
# verify the required parameter 'snapshot_lock_id' is set
if ('snapshot_lock_id' not in params) or (params['snapshot_lock_id'] is None):
raise ValueError("Missing the required parameter `snapshot_lock_id` when calling `update_snapshot_lock`")
# verify the required parameter 'sid' is set
if ('sid' not in params) or (params['sid'] is None):
raise ValueError("Missing the required parameter `sid` when calling `update_snapshot_lock`")
resource_path = '/platform/1/snapshot/snapshots/{Sid}/locks/{SnapshotLockId}'.replace('{format}', 'json')
path_params = {}
if 'snapshot_lock_id' in params:
path_params['SnapshotLockId'] = params['snapshot_lock_id']
if 'sid' in params:
path_params['Sid'] = params['sid']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'snapshot_lock' in params:
body_params = params['snapshot_lock']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['basic_auth']
response = self.api_client.call_api(resource_path, 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'))
return response
| 39.767857
| 175
| 0.559901
| 2,294
| 22,270
| 5.255449
| 0.109416
| 0.072661
| 0.034837
| 0.022893
| 0.834439
| 0.812956
| 0.807814
| 0.807814
| 0.797279
| 0.795289
| 0
| 0.001463
| 0.355276
| 22,270
| 559
| 176
| 39.838998
| 0.838209
| 0.275707
| 0
| 0.780822
| 0
| 0.003425
| 0.200382
| 0.03893
| 0
| 0
| 0
| 0
| 0
| 1
| 0.023973
| false
| 0
| 0.023973
| 0
| 0.071918
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
1e5aac9150872fdb6c9ad65913273dab404183bd
| 276
|
py
|
Python
|
tcpwave_client/__init__.py
|
TCPWAVE/tims-python-client
|
fb4f61b5467d2ad7f1170f1c17fc4e3d4b4d759d
|
[
"Apache-2.0"
] | 1
|
2020-04-14T09:15:44.000Z
|
2020-04-14T09:15:44.000Z
|
tcpwave_client/__init__.py
|
TCPWAVE/tims-python-client
|
fb4f61b5467d2ad7f1170f1c17fc4e3d4b4d759d
|
[
"Apache-2.0"
] | null | null | null |
tcpwave_client/__init__.py
|
TCPWAVE/tims-python-client
|
fb4f61b5467d2ad7f1170f1c17fc4e3d4b4d759d
|
[
"Apache-2.0"
] | null | null | null |
from tcpwave_client.exceptions import IPAMException
from tcpwave_client.exceptions import APICallFailedException
from tcpwave_client.exceptions import UnsupportedMethodException
from tcpwave_client.connector import Connector
from tcpwave_client.networks import NetworkManager
| 46
| 64
| 0.90942
| 30
| 276
| 8.2
| 0.366667
| 0.223577
| 0.345528
| 0.329268
| 0.402439
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.072464
| 276
| 5
| 65
| 55.2
| 0.960938
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
1e880f25228f9cac42ac8d201107bcbc52709283
| 37
|
py
|
Python
|
services/__init__.py
|
hirusha-adi/GifGang
|
7b49767d9c0321844d7fc70a55a288d72c48acb5
|
[
"MIT"
] | null | null | null |
services/__init__.py
|
hirusha-adi/GifGang
|
7b49767d9c0321844d7fc70a55a288d72c48acb5
|
[
"MIT"
] | null | null | null |
services/__init__.py
|
hirusha-adi/GifGang
|
7b49767d9c0321844d7fc70a55a288d72c48acb5
|
[
"MIT"
] | null | null | null |
from . import sfw
from . import nsfw
| 12.333333
| 18
| 0.72973
| 6
| 37
| 4.5
| 0.666667
| 0.740741
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.216216
| 37
| 2
| 19
| 18.5
| 0.931034
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
1ee38a5e6bb3de059ea2e8d62ac4318b5f6ab47f
| 165
|
py
|
Python
|
7KYU/generate_pairs.py
|
yaznasivasai/python_codewars
|
25493591dde4649dc9c1ec3bece8191a3bed6818
|
[
"MIT"
] | 4
|
2021-07-17T22:48:03.000Z
|
2022-03-25T14:10:58.000Z
|
7KYU/generate_pairs.py
|
yaznasivasai/python_codewars
|
25493591dde4649dc9c1ec3bece8191a3bed6818
|
[
"MIT"
] | null | null | null |
7KYU/generate_pairs.py
|
yaznasivasai/python_codewars
|
25493591dde4649dc9c1ec3bece8191a3bed6818
|
[
"MIT"
] | 3
|
2021-06-14T14:18:16.000Z
|
2022-03-16T06:02:02.000Z
|
from itertools import combinations_with_replacement
def generate_pairs(n: int) -> list:
return [list(i) for i in combinations_with_replacement(range(n+1), 2)]
| 27.5
| 74
| 0.769697
| 25
| 165
| 4.88
| 0.76
| 0.262295
| 0.442623
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013986
| 0.133333
| 165
| 5
| 75
| 33
| 0.839161
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 9
|
94904f3da1ced4cfd6f0756bc280c7f82b1614e7
| 5,323
|
py
|
Python
|
NeurASP-master/examples/offset_sudoku/trainer.py
|
Damzwan/deepproblog
|
56bcf5208e79c17510b5d288068fabc6cd64f3cf
|
[
"Apache-2.0"
] | 2
|
2022-02-25T10:09:59.000Z
|
2022-02-26T20:29:56.000Z
|
NeurASP-master/examples/offset_sudoku/trainer.py
|
Damzwan/deepproblog
|
56bcf5208e79c17510b5d288068fabc6cd64f3cf
|
[
"Apache-2.0"
] | null | null | null |
NeurASP-master/examples/offset_sudoku/trainer.py
|
Damzwan/deepproblog
|
56bcf5208e79c17510b5d288068fabc6cd64f3cf
|
[
"Apache-2.0"
] | null | null | null |
from dataGen import to_onehot
def Train_Test(model, train_loader, validation_loader, opt, criterion, epochs):
print("Training...")
opt.zero_grad()
acc_hist_train=[]
acc_hist_val=[]
loss_hist=[]
sample_hist=[]
whole_board_acc=[]
whole_acc_weighted_hist=[]
for epoch in range(epochs):
model.train()
acc=0
correct=0.
total=0.
for i_batch,batch in enumerate(train_loader):
opt.zero_grad()
x=batch[0].cuda()
labels=batch[1].float()
labels_acc=labels.view(-1,81)
labels=to_onehot(labels,len(labels)).cuda()
outputs_acc=model(x)
outputs=outputs_acc.view(len(labels),-1)
outputs_acc=outputs_acc.max(2)[1].float() # take max over the probabilities
loss = criterion(outputs,labels)
loss.backward()
labels_acc=labels_acc.cuda()
acc=float((outputs_acc==labels_acc).sum())/outputs_acc.numel()
acc_hist_train.append(acc)
true_acc=0.
for l,o in zip(labels_acc,outputs_acc):
if (l.tolist()==o.tolist()):
true_acc+=1
true_acc/=len(labels_acc)
if (epoch+1)%100==0:
print('epoch {0} out of {1}'.format(epoch+1,epochs))
print('Training set (single batch): Loss = {0} grid_cell_acc = {1} whole_board_acc={2}'.format(loss,acc,true_acc))
loss_hist.append(loss.data.item())
opt.step()
model.eval()
acc=0
correct=0.
total=0.
whole_acc_weighted=0.
whole_val_denom=0.
if (epoch+1)%100==0:
for i_batch,batch in enumerate(validation_loader):
opt.zero_grad()
x=batch[0].cuda()
labels=batch[1].float()
labels_acc=labels.view(-1,81)
labels=to_onehot(labels,len(labels)).cuda()
outputs_acc=model(x)
outputs=outputs_acc.view(len(labels),-1)
outputs_acc=outputs_acc.max(2)[1].float()
loss = criterion(outputs,labels)
labels_acc=labels_acc.cuda()
acc=float((outputs_acc==labels_acc).sum())/outputs_acc.numel()
acc_hist_val.append(acc)
true_acc=0.
whole_val_denom+=len(batch[0])
for l,o in zip(labels_acc,outputs_acc):
if (l.tolist()==o.tolist()):
true_acc+=1
true_acc/=len(labels_acc)
whole_acc_weighted+=true_acc*len(batch[0])
whole_board_acc.append(true_acc)
loss = criterion(outputs,labels)
whole_acc_weighted_hist.append(whole_acc_weighted/whole_val_denom);
print('validation set : Loss = {0} grid_cell_acc = {1} whole_board_acc, is {2}'.format(loss, acc, whole_acc_weighted/whole_val_denom))
return model
def Test(model, train_loader, validation_loader, opt, criterion, epochs):
print("Testing...")
opt.zero_grad()
acc_hist_train=[]
acc_hist_val=[]
loss_hist=[]
sample_hist=[]
whole_board_acc=[]
whole_acc_weighted_hist=[]
for epoch in range(epochs):
model.eval()
acc=0
correct=0.
total=0.
whole_acc_weighted=0.
whole_val_denom=0.
if (epoch+1)%100==0:
for i_batch,batch in enumerate(validation_loader):
opt.zero_grad()
x=batch[0].cuda()
labels=batch[1].float()
labels_acc=labels.view(-1,81)
labels=to_onehot(labels,len(labels)).cuda()
outputs_acc=model(x)
outputs=outputs_acc.view(len(labels),-1)
outputs_acc=outputs_acc.max(2)[1].float()
loss = criterion(outputs,labels)
labels_acc=labels_acc.cuda()
acc=float((outputs_acc==labels_acc).sum())/outputs_acc.numel()
acc_hist_val.append(acc)
true_acc=0.
whole_val_denom+=len(batch[0])
for l,o in zip(labels_acc,outputs_acc):
if (l.tolist()==o.tolist()):
true_acc+=1
true_acc/=len(labels_acc)
whole_acc_weighted+=true_acc*len(batch[0])
whole_board_acc.append(true_acc)
loss = criterion(outputs,labels)
whole_acc_weighted_hist.append(whole_acc_weighted/whole_val_denom);
print('validation set : Loss = {0} grid_cell_acc = {1} whole_board_acc, is {2}'.format(loss, acc, whole_acc_weighted/whole_val_denom))
| 34.564935
| 163
| 0.493707
| 598
| 5,323
| 4.133779
| 0.128763
| 0.084951
| 0.07767
| 0.046117
| 0.895631
| 0.884304
| 0.877427
| 0.866909
| 0.866909
| 0.854773
| 0
| 0.023082
| 0.397708
| 5,323
| 154
| 164
| 34.564935
| 0.747973
| 0.005824
| 0
| 0.875
| 0
| 0
| 0.057816
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.017857
| false
| 0
| 0.008929
| 0
| 0.035714
| 0.053571
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
949d8d33aecbba1618f080745415e3370e75046e
| 24,469
|
py
|
Python
|
mbuild/tests/test_rigid.py
|
tcmoore3/mbuild
|
820e53821caa79dcce5642f4a6bf724e0e384900
|
[
"MIT"
] | null | null | null |
mbuild/tests/test_rigid.py
|
tcmoore3/mbuild
|
820e53821caa79dcce5642f4a6bf724e0e384900
|
[
"MIT"
] | null | null | null |
mbuild/tests/test_rigid.py
|
tcmoore3/mbuild
|
820e53821caa79dcce5642f4a6bf724e0e384900
|
[
"MIT"
] | null | null | null |
import pytest
import numpy as np
import mbuild as mb
from mbuild.tests.base_test import BaseTest
from mbuild.utils.io import get_fn
class TestRigid(BaseTest):
def test_load_rigid(self, rigid_benzene):
assert rigid_benzene.contains_rigid is True
assert rigid_benzene[0].contains_rigid is False
assert rigid_benzene.rigid_id is None
assert rigid_benzene.max_rigid_id is 0
assert len(list(rigid_benzene.rigid_particles(rigid_id=0))) == 12
def test_load_nonrigid(self, benzene):
assert benzene.contains_rigid is False
assert benzene.rigid_id is None
assert benzene.max_rigid_id is None
assert len(list(benzene.rigid_particles())) == 0
def test_rigid_from_parts(self, rigid_ch):
benzene = mb.Compound()
benzene.add(rigid_ch)
current = rigid_ch
for _ in range(5):
ch_new = mb.clone(rigid_ch)
mb.force_overlap(move_this=ch_new,
from_positions=ch_new['a'],
to_positions=current['b'])
current = ch_new
benzene.add(ch_new, reset_rigid_ids=False)
carbons = [p for p in benzene.particles_by_name('C')]
benzene.add_bond((carbons[0],carbons[-1]))
assert benzene.contains_rigid is True
assert benzene.rigid_id is None
assert benzene.max_rigid_id is 0
assert benzene.children[0].contains_rigid == True
assert benzene.children[0].rigid_id is None
assert len(list(benzene.rigid_particles(rigid_id=0))) == 12
def test_rigid_from_parts2(self, rigid_ch):
benzene = mb.Compound()
benzene.add(rigid_ch, reset_rigid_ids=False)
current = rigid_ch
for _ in range(5):
ch_new = mb.clone(rigid_ch)
mb.force_overlap(move_this=ch_new,
from_positions=ch_new['a'],
to_positions=current['b'])
current = ch_new
benzene.add(ch_new, reset_rigid_ids=False)
carbons = [p for p in benzene.particles_by_name('C')]
benzene.add_bond((carbons[0],carbons[-1]))
assert benzene.contains_rigid is True
assert benzene.rigid_id is None
assert benzene.max_rigid_id is 0
assert benzene.children[0].contains_rigid == True
assert benzene.children[0].rigid_id is None
assert len(list(benzene.rigid_particles(rigid_id=0))) == 12
def test_rigid_from_parts3(self, rigid_ch):
benzene = mb.Compound()
benzene.add(rigid_ch)
current = rigid_ch
for _ in range(5):
ch_new = mb.clone(rigid_ch)
mb.force_overlap(move_this=ch_new,
from_positions=ch_new['a'],
to_positions=current['b'])
current = ch_new
benzene.add(ch_new)
carbons = [p for p in benzene.particles_by_name('C')]
benzene.add_bond((carbons[0],carbons[-1]))
assert benzene.contains_rigid is True
assert benzene.rigid_id is None
assert benzene.max_rigid_id is 5
assert benzene.children[0].contains_rigid == True
assert benzene.children[0].rigid_id is None
for rigid_id in range(6):
assert len(list(benzene.rigid_particles(rigid_id=rigid_id))) == 2
def test_nonrigid_from_parts(self, benzene_from_parts):
assert benzene_from_parts.contains_rigid is False
assert benzene_from_parts.rigid_id is None
assert benzene_from_parts.max_rigid_id is None
assert len(list(benzene_from_parts.rigid_particles())) == 0
def test_label_rigid_bodies_single_partial(self, benzene):
benzene.label_rigid_bodies(rigid_particles='C')
assert benzene.contains_rigid == True
assert benzene.rigid_id is None
assert benzene.max_rigid_id is 0
assert len(list(benzene.rigid_particles())) == 6
assert len(list(benzene.rigid_particles(rigid_id=0))) == 6
def test_save_non_sequential_rigid_ids(self, benzene):
n_benzenes = 10
filled = mb.fill_box(benzene,
n_compounds=n_benzenes,
box=[0, 0, 0, 4, 4, 4])
filled.label_rigid_bodies(discrete_bodies='Benzene')
filled.children[0]._increment_rigid_ids(increment=3)
with pytest.warns(UserWarning):
filled.save('benzene-box.hoomdxml')
def test_increment_rigid_id(self, rigid_benzene):
compound = mb.Compound()
rigid_benzene2 = mb.clone(rigid_benzene)
compound.add(rigid_benzene)
compound.add(rigid_benzene2)
assert rigid_benzene.contains_rigid is True
assert rigid_benzene.rigid_id is None
assert rigid_benzene.max_rigid_id is 0
assert rigid_benzene2.contains_rigid is True
assert rigid_benzene2.rigid_id is None
assert rigid_benzene2.max_rigid_id is 1
assert compound.contains_rigid is True
assert compound.rigid_id is None
assert compound.max_rigid_id is 1
assert len(list(compound.rigid_particles(rigid_id=0))) == 12
assert len(list(compound.rigid_particles(rigid_id=1))) == 12
def test_increment_rigid_id_partial(self, benzene):
compound = mb.Compound()
benzene.label_rigid_bodies(rigid_particles='C')
benzene2 = mb.clone(benzene)
compound.add(benzene)
compound.add(benzene2)
assert benzene.contains_rigid is True
assert benzene.rigid_id is None
assert benzene.max_rigid_id is 0
assert benzene2.contains_rigid is True
assert benzene2.rigid_id is None
assert benzene2.max_rigid_id is 1
assert compound.contains_rigid is True
assert compound.rigid_id is None
assert compound.max_rigid_id is 1
assert len(list(compound.rigid_particles())) == 12
assert len(list(compound.rigid_particles(rigid_id=0))) == 6
assert len(list(compound.rigid_particles(rigid_id=1))) == 6
def test_turn_into_rigid_after_add(self, benzene):
compound = mb.Compound()
benzene2 = mb.clone(benzene)
compound.add(benzene)
compound.add(benzene2)
benzene.label_rigid_bodies()
assert benzene.contains_rigid is True
assert benzene.rigid_id is None
assert benzene.max_rigid_id is 0
assert benzene2.contains_rigid is False
assert benzene2.rigid_id is None
assert benzene2.max_rigid_id is None
assert compound.contains_rigid is True
assert compound.rigid_id is None
assert compound.max_rigid_id is 0
assert len(list(compound.rigid_particles())) == 12
assert len(list(compound.rigid_particles(rigid_id=0))) == 12
assert len(list(benzene.rigid_particles(rigid_id=0))) == 12
assert len(list(benzene2.rigid_particles())) == 0
def test_turn_into_rigid_after_add_multi(self, benzene):
compound = mb.Compound()
benzene2 = mb.clone(benzene)
compound.add(benzene)
compound.add(benzene2)
benzene.label_rigid_bodies()
benzene2.label_rigid_bodies()
assert benzene.contains_rigid is True
assert benzene.rigid_id is None
assert benzene.max_rigid_id is 0
assert benzene2.contains_rigid is True
assert benzene2.rigid_id is None
assert benzene2.max_rigid_id is 1
assert compound.contains_rigid is True
assert compound.rigid_id is None
assert compound.max_rigid_id is 1
assert len(list(compound.rigid_particles())) == 24
assert len(list(compound.rigid_particles(rigid_id=0))) == 12
assert len(list(compound.rigid_particles(rigid_id=1))) == 12
assert len(list(benzene.rigid_particles(rigid_id=0))) == 12
assert len(list(benzene2.rigid_particles(rigid_id=1))) == 12
def test_turn_into_rigid_after_add_parent(self, benzene):
compound = mb.Compound()
benzene2 = mb.clone(benzene)
compound.add(benzene)
compound.add(benzene2)
compound.label_rigid_bodies()
assert benzene.contains_rigid is True
assert benzene.rigid_id is None
assert benzene.max_rigid_id is 0
assert benzene2.contains_rigid is True
assert benzene2.rigid_id is None
assert benzene2.max_rigid_id is 0
assert compound.contains_rigid is True
assert compound.rigid_id is None
assert compound.max_rigid_id is 0
assert len(list(compound.rigid_particles())) == 24
assert len(list(compound.rigid_particles(rigid_id=0))) == 24
assert len(list(benzene.rigid_particles(rigid_id=0))) == 12
assert len(list(benzene2.rigid_particles(rigid_id=0))) == 12
def test_label_rigid_bodies_multi(self, benzene):
compound = mb.Compound()
benzene2 = mb.clone(benzene)
compound.add(benzene)
compound.add(benzene2)
compound.label_rigid_bodies(discrete_bodies='Benzene')
assert benzene.contains_rigid is True
assert benzene.rigid_id is None
assert benzene.max_rigid_id is 0
assert benzene2.contains_rigid is True
assert benzene2.rigid_id is None
assert benzene2.max_rigid_id is 1
assert compound.contains_rigid is True
assert compound.rigid_id is None
assert compound.max_rigid_id is 1
assert len(list(compound.rigid_particles())) == 24
assert len(list(compound.rigid_particles(rigid_id=0))) == 12
assert len(list(compound.rigid_particles(rigid_id=1))) == 12
assert len(list(benzene.rigid_particles(rigid_id=0))) == 12
assert len(list(benzene2.rigid_particles(rigid_id=1))) == 12
def test_label_rigid_bodies_multi_partial(self, benzene):
compound = mb.Compound()
benzene2 = mb.clone(benzene)
compound.add(benzene)
compound.add(benzene2)
compound.label_rigid_bodies(discrete_bodies='Benzene', rigid_particles='C')
assert benzene.contains_rigid is True
assert benzene.rigid_id is None
assert benzene.max_rigid_id is 0
assert benzene2.contains_rigid is True
assert benzene2.rigid_id is None
assert benzene2.max_rigid_id is 1
assert compound.contains_rigid is True
assert compound.rigid_id is None
assert compound.max_rigid_id is 1
assert len(list(compound.rigid_particles())) == 12
assert len(list(compound.rigid_particles(rigid_id=0))) == 6
assert len(list(compound.rigid_particles(rigid_id=1))) == 6
assert len(list(benzene.rigid_particles(rigid_id=0))) == 6
assert len(list(benzene2.rigid_particles(rigid_id=1))) == 6
def test_fill_box_rigid(self, rigid_benzene):
n_benzenes = 10
filled = mb.fill_box(rigid_benzene,
n_compounds=n_benzenes,
box=[0, 0, 0, 4, 4, 4])
assert filled.contains_rigid is True
assert filled.rigid_id is None
assert filled.max_rigid_id == n_benzenes - 1
assert len(list(filled.rigid_particles())) == n_benzenes * rigid_benzene.n_particles
def test_fill_box_semi_rigid(self, benzene):
n_benzenes = 10
benzene.label_rigid_bodies(rigid_particles='C')
filled = mb.fill_box(benzene,
n_compounds=n_benzenes,
box=[0, 0, 0, 4, 4, 4])
assert filled.contains_rigid is True
assert filled.rigid_id is None
assert filled.max_rigid_id == n_benzenes - 1
assert len(list(filled.rigid_particles())) == n_benzenes * 6
def test_label_rigid_bodies_after_fill(self, benzene):
n_benzenes = 10
filled = mb.fill_box(benzene,
n_compounds=n_benzenes,
box=[0, 0, 0, 4, 4, 4])
filled.label_rigid_bodies(discrete_bodies='Benzene')
assert filled.contains_rigid is True
assert filled.rigid_id is None
assert filled.max_rigid_id == n_benzenes - 1
assert len(list(filled.rigid_particles())) == n_benzenes * benzene.n_particles
def test_label_rigid_bodies_list(self, benzene):
n_benzenes = 10
filled = mb.fill_box(benzene,
n_compounds=n_benzenes,
box=[0, 0, 0, 4, 4, 4])
filled.children[0].name = 'Benzene0'
filled.children[1].name = 'Benzene1'
filled.label_rigid_bodies(discrete_bodies=['Benzene0', 'Benzene1'])
assert filled.contains_rigid is True
assert filled.rigid_id is None
assert filled.max_rigid_id == 1
assert len(list(filled.rigid_particles())) == 2 * benzene.n_particles
def test_label_rigid_bodies_list_particle_list(self, benzene):
n_benzenes = 10
filled = mb.fill_box(benzene,
n_compounds=n_benzenes,
box=[0, 0, 0, 4, 4, 4])
filled.children[0].name = 'Benzene0'
filled.children[1].name = 'Benzene1'
filled.label_rigid_bodies(discrete_bodies=['Benzene0', 'Benzene1'],
rigid_particles=['C', 'H'])
assert filled.contains_rigid is True
assert filled.rigid_id is None
assert filled.max_rigid_id == 1
assert len(list(filled.rigid_particles())) == 2 * benzene.n_particles
def test_label_rigid_bodies_duplicate_warn(self, rigid_benzene):
with pytest.warns(UserWarning):
n_benzenes = 10
filled = mb.fill_box(rigid_benzene,
n_compounds=n_benzenes,
box=[0, 0, 0, 4, 4, 4])
filled.label_rigid_bodies(discrete_bodies='Benzene')
def test_label_semi_rigid_bodies_after_fill(self, benzene):
n_benzenes = 10
filled = mb.fill_box(benzene,
n_compounds=n_benzenes,
box=[0, 0, 0, 4, 4, 4])
filled.label_rigid_bodies(discrete_bodies='Benzene', rigid_particles='C')
assert filled.max_rigid_id == n_benzenes - 1
assert len(list(filled.rigid_particles())) == n_benzenes * 6
def test_create_semi_rigid_bodies_hierarchy(self, benzene_from_parts):
n_benzenes = 10
filled = mb.fill_box(benzene_from_parts,
n_compounds=n_benzenes,
box=[0, 0, 0, 4, 4, 4])
filled.name = 'Benzene box'
filled2 = mb.clone(filled)
compound = mb.Compound(subcompounds=[filled, filled2])
compound.label_rigid_bodies(discrete_bodies='Benzene box')
assert compound.max_rigid_id == 1
assert filled.max_rigid_id == 0
assert filled2.max_rigid_id == 1
assert len(list(compound.rigid_particles())) == n_benzenes * 2 * 12
compound.unlabel_rigid_bodies()
compound.label_rigid_bodies(discrete_bodies='Benzene', rigid_particles='C')
assert compound.max_rigid_id == (n_benzenes*2) - 1
assert filled.max_rigid_id == n_benzenes - 1
assert filled2.max_rigid_id == (n_benzenes*2) - 1
assert len(list(compound.rigid_particles())) == n_benzenes * 2 * 6
assert len(list(filled.rigid_particles())) == n_benzenes * 6
assert len(list(filled2.rigid_particles())) == n_benzenes * 6
compound.unlabel_rigid_bodies()
compound.label_rigid_bodies(discrete_bodies='CH')
assert compound.max_rigid_id == (n_benzenes*2*6) - 1
assert filled.max_rigid_id == (n_benzenes*6) - 1
assert filled2.max_rigid_id == (n_benzenes*2*6) - 1
assert len(list(compound.rigid_particles())) == n_benzenes * 2 * 12
assert len(list(filled.rigid_particles())) == n_benzenes * 12
assert len(list(filled2.rigid_particles())) == n_benzenes * 12
def test_create_semi_rigid_bodies_filled_clone(self, benzene_from_parts):
n_benzenes = 10
filled = mb.fill_box(benzene_from_parts,
n_compounds=n_benzenes,
box=[0, 0, 0, 4, 4, 4])
filled.label_rigid_bodies(discrete_bodies='Benzene', rigid_particles='C')
filled2 = mb.clone(filled)
filled.add(filled2)
assert filled.max_rigid_id == (n_benzenes*2) - 1
assert len(list(filled.rigid_particles())) == n_benzenes * 2 * 6
for rigid_id in range(n_benzenes * 2):
assert len(list(filled.rigid_particles(rigid_id=rigid_id))) == 6
def test_create_semi_rigid_bodies_filled_no_increment(self, benzene_from_parts):
n_benzenes = 10
filled = mb.fill_box(benzene_from_parts,
n_compounds=n_benzenes,
box=[0, 0, 0, 4, 4, 4])
filled.label_rigid_bodies(discrete_bodies='Benzene', rigid_particles='C')
filled2 = mb.clone(filled)
filled.add(filled2, reset_rigid_ids=False)
assert filled.max_rigid_id == n_benzenes - 1
assert len(list(filled.rigid_particles())) == n_benzenes * 2 * 6
for rigid_id in range(n_benzenes):
assert len(list(filled.rigid_particles(rigid_id=rigid_id))) == 12
def test_delete_body(self, rigid_benzene):
n_benzenes = 10
filled = mb.fill_box(rigid_benzene,
n_compounds=n_benzenes,
box=[0, 0, 0, 4, 4, 4])
filled.remove(filled.children[0])
assert filled.max_rigid_id == n_benzenes - 2
assert len(list(filled.rigid_particles())) == (n_benzenes - 1) * rigid_benzene.n_particles
def test_delete_body_particle_by_particle(self, rigid_benzene):
n_benzenes = 10
filled = mb.fill_box(rigid_benzene,
n_compounds=n_benzenes,
box=[0, 0, 0, 4, 4, 4])
for particle in filled.children[0].particles():
filled.remove(particle)
assert filled.max_rigid_id == n_benzenes - 2
assert len(list(filled.rigid_particles())) == (n_benzenes - 1) * rigid_benzene.n_particles
def test_delete_body_multiple(self, rigid_benzene):
n_benzenes = 10
filled = mb.fill_box(rigid_benzene,
n_compounds=n_benzenes,
box=[0, 0, 0, 4, 4, 4])
filled.remove([filled.children[0], filled.children[1]])
assert filled.max_rigid_id == n_benzenes - 3
assert len(list(filled.rigid_particles())) == (n_benzenes - 2) * rigid_benzene.n_particles
def test_delete_body_all(self, rigid_benzene):
n_benzenes = 10
filled = mb.fill_box(rigid_benzene,
n_compounds=n_benzenes,
box=[0, 0, 0, 4, 4, 4])
for i, child in enumerate(filled.children[:-1]):
filled.remove(child)
assert filled.max_rigid_id == n_benzenes - 1 - (i + 1)
assert len(list(filled.rigid_particles())) == (n_benzenes - (i + 1)) * rigid_benzene.n_particles
assert filled.contains_rigid is True
filled.remove(filled.children[0])
assert filled.contains_rigid is False
assert filled.max_rigid_id is None
def test_delete_body_semi_rigid(self, benzene):
n_benzenes = 10
filled = mb.fill_box(benzene,
n_compounds=n_benzenes,
box=[0, 0, 0, 4, 4, 4])
filled.label_rigid_bodies(discrete_bodies='Benzene', rigid_particles='C')
filled.remove(filled.children[0])
assert filled.max_rigid_id == n_benzenes - 2
assert len(list(filled.rigid_particles())) == (n_benzenes - 1) * 6
def test_rigid_with_subcompounds1(self, rigid_benzene):
compound = mb.Compound(subcompounds=rigid_benzene)
assert compound.contains_rigid is True
assert compound.rigid_id is None
assert compound.max_rigid_id is 0
assert rigid_benzene.contains_rigid is True
assert rigid_benzene.rigid_id is None
assert rigid_benzene.max_rigid_id is 0
assert len(list(compound.rigid_particles())) == 12
assert len(list(compound.rigid_particles(rigid_id=0))) == 12
def test_rigid_with_subcompounds2(self, rigid_benzene):
rigid_benzene2 = mb.clone(rigid_benzene)
compound = mb.Compound(subcompounds=[rigid_benzene, rigid_benzene2])
assert compound.max_rigid_id is 1
assert rigid_benzene.max_rigid_id is 0
assert rigid_benzene2.max_rigid_id is 1
assert len(list(compound.rigid_particles())) == 24
assert len(list(compound.rigid_particles(rigid_id=0))) == 12
assert len(list(compound.rigid_particles(rigid_id=1))) == 12
def test_rigid_with_subcompounds3(self, benzene):
benzene.label_rigid_bodies(rigid_particles='C')
compound = mb.Compound(subcompounds=benzene)
assert compound.contains_rigid is True
assert compound.rigid_id is None
assert compound.max_rigid_id is 0
assert benzene.contains_rigid is True
assert benzene.rigid_id is None
assert benzene.max_rigid_id is 0
assert len(list(compound.rigid_particles())) == 6
assert len(list(compound.rigid_particles(rigid_id=0))) == 6
def test_rigid_with_subcompounds4(self, benzene):
benzene.label_rigid_bodies(rigid_particles='C')
benzene2 = mb.clone(benzene)
compound = mb.Compound(subcompounds=[benzene, benzene2])
assert compound.contains_rigid is True
assert compound.rigid_id is None
assert compound.max_rigid_id is 1
assert benzene.contains_rigid is True
assert benzene.rigid_id is None
assert benzene.max_rigid_id is 0
assert benzene2.contains_rigid is True
assert benzene2.rigid_id is None
assert benzene2.max_rigid_id is 1
assert len(list(compound.rigid_particles())) == 12
assert len(list(compound.rigid_particles(rigid_id=0))) == 6
assert len(list(compound.rigid_particles(rigid_id=1))) == 6
def test_rigid_with_subcompounds5(self, rigid_benzene):
rigid_benzene2 = mb.clone(rigid_benzene)
double = mb.Compound(subcompounds=[rigid_benzene, rigid_benzene2])
double2 = mb.clone(double)
compound = mb.Compound(subcompounds=[double, double2])
assert compound.max_rigid_id is 3
assert len(list(compound.rigid_particles())) == 48
for rigid_id in range(4):
assert len(list(compound.rigid_particles(rigid_id=rigid_id))) == 12
def test_set_rigid_not_particle(self, benzene_from_parts):
benzene_from_parts.label_rigid_bodies(rigid_particles=['C','H'])
assert benzene_from_parts.contains_rigid is True
assert benzene_from_parts.rigid_id is None
assert benzene_from_parts.max_rigid_id is 0
assert len(list(benzene_from_parts.rigid_particles())) == 12
assert len(list(benzene_from_parts.rigid_particles(rigid_id=0))) == 12
def test_manual_set_rigid_id(self, benzene):
benzene[0].rigid_id = 0
assert benzene.contains_rigid is True
assert benzene[0].contains_rigid is False
assert benzene.max_rigid_id is 0
assert len(list(benzene.rigid_particles())) == 1
def test_manual_set_rigid_id_error(self, benzene):
with pytest.raises(AttributeError):
benzene.rigid_id = 0
def test_build_from_single_particle(self):
compound = mb.Compound()
compound.rigid_id = 0
atom = mb.Compound(name='atom')
atom.rigid_id = 0
atom2 = mb.clone(atom)
compound.add([atom, atom2], reset_rigid_ids=False)
assert compound.contains_rigid == True
assert compound.rigid_id is None
assert compound.max_rigid_id is 0
assert len(list(compound.rigid_particles())) == 2
def test_build_from_single_particle2(self):
compound = mb.Compound()
compound.rigid_id = 0
atom = mb.Compound(name='atom')
atom.rigid_id = 0
atom2 = mb.clone(atom)
compound.add(atom)
compound.add(atom2, reset_rigid_ids=False)
assert compound.contains_rigid == True
assert compound.rigid_id is None
assert compound.max_rigid_id is 0
assert len(list(compound.rigid_particles())) == 2
| 42.407279
| 108
| 0.645878
| 3,256
| 24,469
| 4.589681
| 0.042383
| 0.077757
| 0.054202
| 0.043496
| 0.924317
| 0.898354
| 0.867037
| 0.826151
| 0.786938
| 0.735345
| 0
| 0.026292
| 0.264784
| 24,469
| 576
| 109
| 42.480903
| 0.804391
| 0
| 0
| 0.711382
| 0
| 0
| 0.008582
| 0
| 0
| 0
| 0
| 0
| 0.48374
| 1
| 0.081301
| false
| 0
| 0.010163
| 0
| 0.093496
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
94bf830b353ce4937104bff7a6221ba172acc4b3
| 1,608
|
py
|
Python
|
Day 3/ml.py
|
RedoC-github/Gifted-Information-2021
|
90a3cd1100d2d1407083a42a2afdffe521e21f76
|
[
"MIT"
] | null | null | null |
Day 3/ml.py
|
RedoC-github/Gifted-Information-2021
|
90a3cd1100d2d1407083a42a2afdffe521e21f76
|
[
"MIT"
] | null | null | null |
Day 3/ml.py
|
RedoC-github/Gifted-Information-2021
|
90a3cd1100d2d1407083a42a2afdffe521e21f76
|
[
"MIT"
] | null | null | null |
# ML
import tensorflow as tf
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
# hyperparameter
learning_rate = 0.01
epochs = 100
def LinearRegression(datax, datay):
X = tf.placeholder(tf.float32)
Y = tf.placeholder(tf.float32)
def model(w_, x_, b_):
return tf.add(tf.multiply(x_, w_), b_)
w = tf.Variable(0.0, name="Weight")
b = tf.Variable(0.0, name="Bias")
model_y = model(w, X, b)
cost = (tf.pow(Y-model_y, 2))
train = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
# session
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
for epoch in range(epochs):
for (x, y) in zip(datax, datay):
sess.run(train, feed_dict={X: x, Y: y})
w_val = sess.run(w)
b_val = sess.run(b)
sess.close()
return w_val, b_val
def LogRegression(datax, datay):
X = tf.placeholder(tf.float32)
Y = tf.placeholder(tf.float32)
def model(w_, x_, b_):
return tf.add(tf.multiply(tf.log(x_), w_), b_)
w = tf.Variable(0.0, name="Weight")
b = tf.Variable(0.0, name="Bias")
model_y = model(w, X, b)
cost = (tf.pow(Y-model_y, 2))
train = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
# session
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
for epoch in range(epochs):
for (x, y) in zip(datax, datay):
sess.run(train, feed_dict={X: x, Y: y})
w_val = sess.run(w)
b_val = sess.run(b)
sess.close()
return w_val, b_val
| 25.125
| 75
| 0.61194
| 248
| 1,608
| 3.826613
| 0.237903
| 0.059009
| 0.063224
| 0.092729
| 0.8451
| 0.8451
| 0.8451
| 0.8451
| 0.8451
| 0.8451
| 0
| 0.021364
| 0.243159
| 1,608
| 64
| 76
| 25.125
| 0.758422
| 0.020522
| 0
| 0.8
| 0
| 0
| 0.012731
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.088889
| false
| 0
| 0.044444
| 0.044444
| 0.222222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
94e3aa8b811be7b8efd5e9d9f5a96cbc4254b37a
| 108
|
py
|
Python
|
pydestruct/data/export.py
|
FilippoC/-deep-syntactic-dependency-parsing-release
|
30e2571ea930c2fd81559f5a2a971e3738cc6d39
|
[
"MIT"
] | null | null | null |
pydestruct/data/export.py
|
FilippoC/-deep-syntactic-dependency-parsing-release
|
30e2571ea930c2fd81559f5a2a971e3738cc6d39
|
[
"MIT"
] | null | null | null |
pydestruct/data/export.py
|
FilippoC/-deep-syntactic-dependency-parsing-release
|
30e2571ea930c2fd81559f5a2a971e3738cc6d39
|
[
"MIT"
] | null | null | null |
import ctypes
import cpp_disc_span_parser
def read(path):
return cpp_disc_span_parser.read_data(path)
| 15.428571
| 47
| 0.814815
| 18
| 108
| 4.5
| 0.611111
| 0.17284
| 0.271605
| 0.419753
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12963
| 108
| 6
| 48
| 18
| 0.861702
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.5
| 0.25
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 0
| 0
|
0
| 7
|
a22620eefc7b9a03016850d2cea65d79718cae76
| 657
|
py
|
Python
|
ENV/lib/python3.6/site-packages/venusian/tests/fixtures/lifting5.py
|
captain-c00keys/pyramid-stocks
|
0acf3363a6a7ee61cd41b855f43c9d6f9582ae6a
|
[
"MIT"
] | null | null | null |
ENV/lib/python3.6/site-packages/venusian/tests/fixtures/lifting5.py
|
captain-c00keys/pyramid-stocks
|
0acf3363a6a7ee61cd41b855f43c9d6f9582ae6a
|
[
"MIT"
] | null | null | null |
ENV/lib/python3.6/site-packages/venusian/tests/fixtures/lifting5.py
|
captain-c00keys/pyramid-stocks
|
0acf3363a6a7ee61cd41b855f43c9d6f9582ae6a
|
[
"MIT"
] | null | null | null |
from venusian import lift
from venusian.tests.fixtures import decorator
class Super1(object): # pragma: no cover
@decorator()
def classname(self): pass
@decorator()
def boo(self): pass
@decorator()
def ram(self): pass
@decorator()
def jump(self): pass
@lift()
class Super2(Super1): # pragma: no cover
@decorator()
def boo(self): pass
@decorator()
def hiss(self): pass
@decorator()
def jump(self): pass
@lift()
class Sub(Super2): # pragma: no cover
@decorator()
def boo(self): pass
@decorator()
def hiss(self): pass
@decorator()
def smack(self): pass
| 16.846154
| 45
| 0.607306
| 80
| 657
| 4.9875
| 0.3
| 0.300752
| 0.298246
| 0.350877
| 0.666667
| 0.60401
| 0.60401
| 0.516291
| 0.516291
| 0.360902
| 0
| 0.008299
| 0.266362
| 657
| 38
| 46
| 17.289474
| 0.819502
| 0.076104
| 0
| 0.703704
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.37037
| false
| 0.37037
| 0.074074
| 0
| 0.555556
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 7
|
bf5ef343ddb820399d39338db3e05678f700ad83
| 104
|
py
|
Python
|
klv/__init__.py
|
cdusold/python3-klv
|
0f27cec891cef24184c1c683f8ef36d58decab75
|
[
"MIT"
] | 1
|
2017-08-03T23:08:50.000Z
|
2017-08-03T23:08:50.000Z
|
klv/__init__.py
|
cdusold/python3-klv
|
0f27cec891cef24184c1c683f8ef36d58decab75
|
[
"MIT"
] | null | null | null |
klv/__init__.py
|
cdusold/python3-klv
|
0f27cec891cef24184c1c683f8ef36d58decab75
|
[
"MIT"
] | null | null | null |
from .klv import decode_ber
from .klv import encode_ber
from .klv import encode
from .klv import decode
| 20.8
| 27
| 0.807692
| 18
| 104
| 4.555556
| 0.333333
| 0.341463
| 0.634146
| 0.463415
| 0.536585
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.153846
| 104
| 4
| 28
| 26
| 0.931818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
bfbfd64fd944cae4818c9ac723787134c7c9a36a
| 8,728
|
py
|
Python
|
tests/test_na_building.py
|
isambard-uob/isambard
|
febb9d48cb16d75be7cf4252b5c36bad57897d58
|
[
"MIT"
] | 13
|
2018-06-18T14:34:27.000Z
|
2021-11-27T19:49:07.000Z
|
tests/test_na_building.py
|
isambard-uob/isambard
|
febb9d48cb16d75be7cf4252b5c36bad57897d58
|
[
"MIT"
] | 14
|
2018-06-18T11:47:39.000Z
|
2022-03-18T17:34:40.000Z
|
tests/test_na_building.py
|
isambard-uob/isambard
|
febb9d48cb16d75be7cf4252b5c36bad57897d58
|
[
"MIT"
] | 5
|
2018-06-19T16:01:07.000Z
|
2021-07-10T14:23:25.000Z
|
import unittest
import ampal
from hypothesis import given, settings
from hypothesis.strategies import integers, floats, tuples, text
from numpy import allclose
import isambard
import isambard.specifications
def count_bonds(sequence, phos=False):
bond_counts = {
'G': 24,
'C': 20,
'A': 23,
'T': 21
}
if not phos:
return sum([bond_counts[x] for x in sequence]) + (len(sequence) - 1) - 3
else:
return sum([bond_counts[x] for x in sequence]) + (len(sequence) - 1)
class TestSingleStrandHelix(unittest.TestCase):
@given(text('ATGC'))
@settings(max_examples=50)
def test_hna_random_seq(self, sequence):
"""Test straight building using random DNA sequences."""
if not sequence:
with self.assertRaises(ValueError):
isambard.specifications.NucleicAcidStrand(sequence)
else:
ssnh = isambard.specifications.NucleicAcidStrand(sequence)
self.assertEqual(len(ssnh), len(sequence))
ideal_bonds = count_bonds(sequence)
found_bonds = len(ampal.interactions.find_covalent_bonds(ssnh))
self.assertEqual(ideal_bonds, found_bonds)
@given(text('ATGC'))
@settings(max_examples=50)
def test_hna_bond_numbers_w_phos(self, sequence):
if not sequence:
with self.assertRaises(ValueError):
isambard.specifications.NucleicAcidStrand(sequence)
else:
ideal_bonds = count_bonds(sequence, phos=True)
ssnh = isambard.specifications.NucleicAcidStrand(
sequence, phos_3_prime=True)
found_bonds = len(ampal.interactions.find_covalent_bonds(ssnh))
self.assertEqual(ideal_bonds, found_bonds)
@given(tuples(*[integers(min_value=-10000, max_value=10000) for _ in range(3)]),
tuples(*[integers(min_value=-10000, max_value=10000) for _ in range(3)]))
@settings(max_examples=50)
def test_hna_start_end_int(self, start, end):
"""Test the from_start_and_end class method of the SingleStrandedHelix using ints."""
sequence = 'GAGATATACACA'
if start == end:
with self.assertRaises(ValueError):
isambard.specifications.NucleicAcidStrand.from_start_and_end(
start, end, sequence)
else:
ssnh = isambard.specifications.NucleicAcidStrand.from_start_and_end(
start, end, sequence)
self.assertEqual(len(ssnh), 12)
@given(tuples(*[floats(min_value=-10000, max_value=10000) for _ in range(3)]),
tuples(*[floats(min_value=-10000, max_value=10000) for _ in range(3)]))
@settings(max_examples=50)
def test_hna_start_end_floats(self, start, end):
"""Test the from_start_and_end class method of the SingleStrandedHelix using floats."""
sequence = 'GAGATATACACA'
if allclose(start, end):
with self.assertRaises(ValueError):
isambard.specifications.NucleicAcidStrand.from_start_and_end(
start, end, sequence)
else:
ssnh = isambard.specifications.NucleicAcidStrand.from_start_and_end(
start, end, sequence)
self.assertEqual(len(ssnh), 12)
@given(text('ATGC'),
tuples(*[integers(min_value=-10000, max_value=10000)
for _ in range(3)]),
tuples(*[integers(min_value=-10000, max_value=10000) for _ in range(3)]))
@settings(max_examples=50)
def test_hna_start_end_ran_seq(self, sequence, start, end):
"""Test SingleStrandHelix with random sequence, start and end."""
if allclose(start, end) or not sequence:
with self.assertRaises(ValueError):
isambard.specifications.NucleicAcidStrand.from_start_and_end(
start, end, sequence)
else:
ssnh = isambard.specifications.NucleicAcidStrand.from_start_and_end(
start, end, sequence)
self.assertEqual(len(ssnh), len(sequence))
ideal_bonds = count_bonds(sequence)
found_bonds = len(ampal.interactions.find_covalent_bonds(ssnh))
self.assertEqual(ideal_bonds, found_bonds)
class TestDNADuplex(unittest.TestCase):
@given(text('ATGC'))
@settings(max_examples=50)
def test_hna_random_seq(self, sequence):
"""Test straight duplex building using random DNA sequences."""
if not sequence:
with self.assertRaises(ValueError):
isambard.specifications.DNADuplex.from_sequence(sequence)
else:
dd = isambard.specifications.DNADuplex.from_sequence(
sequence)
self.assertEqual(len(dd), 2)
self.assertEqual(len(dd[0]), len(sequence))
self.assertEqual(len(dd[1]), len(sequence))
ideal_bonds_s1 = count_bonds(sequence)
ideal_bonds_s2 = count_bonds(
isambard.specifications.nucleic_acid_duplex.generate_antisense_sequence(sequence))
found_bonds_s1 = len(
ampal.interactions.find_covalent_bonds(dd[0]))
found_bonds_s2 = len(
ampal.interactions.find_covalent_bonds(dd[1]))
found_bonds_dd = len(
ampal.interactions.find_covalent_bonds(dd))
self.assertEqual(ideal_bonds_s1, found_bonds_s1)
self.assertEqual(ideal_bonds_s2, found_bonds_s2)
self.assertEqual(ideal_bonds_s1 + ideal_bonds_s2, found_bonds_dd)
@given(tuples(*[integers(min_value=-10000, max_value=10000) for _ in range(3)]),
tuples(*[integers(min_value=-10000, max_value=10000) for _ in range(3)]))
@settings(max_examples=50)
def test_dna_duplex_ints(self, start, end):
"""Test DNADuplex with random int start and end."""
sequence = 'GAGATATACACA'
if allclose(start, end):
with self.assertRaises(ValueError):
isambard.specifications.DNADuplex.from_start_and_end(
start, end, sequence)
else:
dd = isambard.specifications.DNADuplex.from_start_and_end(
start, end, sequence)
self.assertEqual(len(dd), 2)
self.assertEqual(len(dd[0]), 12)
self.assertEqual(len(dd[1]), 12)
@given(tuples(*[floats(min_value=-10000, max_value=10000) for _ in range(3)]),
tuples(*[floats(min_value=-10000, max_value=10000) for _ in range(3)]))
@settings(max_examples=50)
def test_dna_duplex_floats(self, start, end):
"""Test DNADuplex with random float start and end."""
sequence = 'GAGATATACACA'
if allclose(start, end):
with self.assertRaises(ValueError):
isambard.specifications.DNADuplex.from_start_and_end(
start, end, sequence)
else:
dd = isambard.specifications.DNADuplex.from_start_and_end(
start, end, sequence)
self.assertEqual(len(dd), 2)
self.assertEqual(len(dd[0]), 12)
self.assertEqual(len(dd[1]), 12)
@given(text('ATGC'),
tuples(*[integers(min_value=-10000, max_value=10000)
for _ in range(3)]),
tuples(*[integers(min_value=-10000, max_value=10000) for _ in range(3)]))
@settings(max_examples=50)
def test_dna_duplex_start_end_ran_seq(self, sequence, start, end):
"""Test SingleStrandHelix with random sequence, start and end."""
if allclose(start, end) or not sequence:
with self.assertRaises(ValueError):
isambard.specifications.DNADuplex.from_start_and_end(
start, end, sequence)
else:
dd = isambard.specifications.DNADuplex.from_start_and_end(
start, end, sequence)
self.assertEqual(len(dd), 2)
self.assertEqual(len(dd[0]), len(sequence))
self.assertEqual(len(dd[1]), len(sequence))
ideal_bonds_s1 = count_bonds(sequence)
ideal_bonds_s2 = count_bonds(
isambard.specifications.nucleic_acid_duplex.generate_antisense_sequence(sequence))
found_bonds_s1 = len(
ampal.interactions.find_covalent_bonds(dd[0]))
found_bonds_s2 = len(
ampal.interactions.find_covalent_bonds(dd[1]))
found_bonds_dd = len(
ampal.interactions.find_covalent_bonds(dd))
self.assertEqual(ideal_bonds_s1, found_bonds_s1)
self.assertEqual(ideal_bonds_s2, found_bonds_s2)
self.assertEqual(ideal_bonds_s1 + ideal_bonds_s2, found_bonds_dd)
__author__ = 'Christopher W. Wood'
| 43.207921
| 98
| 0.633478
| 1,001
| 8,728
| 5.295704
| 0.108891
| 0.042256
| 0.037351
| 0.039615
| 0.922279
| 0.905112
| 0.893982
| 0.871534
| 0.871534
| 0.871534
| 0
| 0.032731
| 0.264895
| 8,728
| 201
| 99
| 43.422886
| 0.793485
| 0.055454
| 0
| 0.794118
| 0
| 0
| 0.011094
| 0
| 0
| 0
| 0
| 0
| 0.2
| 1
| 0.058824
| false
| 0
| 0.041176
| 0
| 0.123529
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
44ce96920cebc7612fc98141250302f8dcac85b8
| 127
|
py
|
Python
|
apgl/util/__init__.py
|
mathemaphysics/APGL
|
6ca7c176e04017feeae00c4cee069fd126df0fbc
|
[
"BSD-3-Clause"
] | 13
|
2015-02-19T14:39:09.000Z
|
2021-04-12T01:22:32.000Z
|
apgl/util/__init__.py
|
mathemaphysics/APGL
|
6ca7c176e04017feeae00c4cee069fd126df0fbc
|
[
"BSD-3-Clause"
] | 1
|
2020-07-29T07:09:33.000Z
|
2020-07-29T07:09:33.000Z
|
apgl/util/__init__.py
|
mathemaphysics/APGL
|
6ca7c176e04017feeae00c4cee069fd126df0fbc
|
[
"BSD-3-Clause"
] | 7
|
2015-03-16T07:26:49.000Z
|
2021-01-12T06:57:27.000Z
|
from apgl.util.Parameter import Parameter
from apgl.util.PathDefaults import PathDefaults
from apgl.util.Util import Util
| 25.4
| 48
| 0.818898
| 18
| 127
| 5.777778
| 0.333333
| 0.230769
| 0.346154
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.133858
| 127
| 4
| 49
| 31.75
| 0.945455
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
44f203b0ffe66552838e9469ae775e5138fa4d54
| 1,337
|
py
|
Python
|
main.py
|
MasoonZhang/FasterRConvMixer
|
a7a17d00f716a28a5b301088053e00840c222524
|
[
"MIT"
] | null | null | null |
main.py
|
MasoonZhang/FasterRConvMixer
|
a7a17d00f716a28a5b301088053e00840c222524
|
[
"MIT"
] | null | null | null |
main.py
|
MasoonZhang/FasterRConvMixer
|
a7a17d00f716a28a5b301088053e00840c222524
|
[
"MIT"
] | null | null | null |
from utils.callbacks import LossHistory
loss_history = LossHistory("logs/")
file = open('logs/loss_2022_01_31_09_56_19/epoch_loss_2022_01_31_09_56_19.txt')
loss = []
while 1:
line = file.readline()
if not line:
break
loss.append(float(line))
file.close()
print(len(loss))
file = open('logs/loss_2022_01_31_09_56_19/epoch_val_loss_2022_01_31_09_56_19.txt')
loss1 = []
while 1:
line = file.readline()
if not line:
break
loss1.append(float(line))
print(len(loss1))
file = open('logs/loss_2022_01_31_22_02_37/epoch_loss_2022_01_31_22_02_37.txt')
while 1:
line = file.readline()
if not line:
break
loss.append(float(line))
file.close()
print(len(loss))
file = open('logs/loss_2022_01_31_22_02_37/epoch_val_loss_2022_01_31_22_02_37.txt')
while 1:
line = file.readline()
if not line:
break
loss1.append(float(line))
print(len(loss1))
file = open('logs/loss_2022_02_01_09_32_26/epoch_loss_2022_02_01_09_32_26.txt')
while 1:
line = file.readline()
if not line:
break
loss.append(float(line))
file.close()
print(len(loss))
file = open('logs/loss_2022_02_01_09_32_26/epoch_val_loss_2022_02_01_09_32_26.txt')
while 1:
line = file.readline()
if not line:
break
loss1.append(float(line))
print(len(loss1))
for i in range(100):
print(i)
loss_history.append_loss(loss[i], loss1[i])
| 23.45614
| 83
| 0.737472
| 245
| 1,337
| 3.681633
| 0.171429
| 0.10643
| 0.088692
| 0.10643
| 0.861419
| 0.861419
| 0.854767
| 0.854767
| 0.808204
| 0.808204
| 0
| 0.15962
| 0.133134
| 1,337
| 57
| 84
| 23.45614
| 0.618637
| 0
| 0
| 0.75
| 0
| 0
| 0.299701
| 0.295964
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.019231
| 0
| 0.019231
| 0.134615
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
784e390a22d18e0c755eb956e373a9e996fbb751
| 12,642
|
py
|
Python
|
AICity-track1-MTMC/Img_model/feature_validation.py
|
yrims/AIC19
|
551a909380d473a65dac70b06466725f536a140c
|
[
"MIT"
] | 8
|
2019-06-03T08:22:12.000Z
|
2021-11-04T07:43:52.000Z
|
AICity-track1-MTMC/Img_model/feature_validation.py
|
yrims/AIC19
|
551a909380d473a65dac70b06466725f536a140c
|
[
"MIT"
] | 3
|
2019-07-09T15:55:22.000Z
|
2020-03-16T07:53:14.000Z
|
AICity-track1-MTMC/Img_model/feature_validation.py
|
yrims/AIC19
|
551a909380d473a65dac70b06466725f536a140c
|
[
"MIT"
] | 5
|
2019-06-01T03:54:18.000Z
|
2021-03-03T02:21:04.000Z
|
import os
import sys
import time
import math
import numpy as np
from PIL import Image
from random import sample
from scipy.spatial.distance import cdist
import argparse
TRACKLET = 'GroundTruth'
parser = argparse.ArgumentParser(description='reid')
parser.add_argument('--data_path',
default="D:/users/linjian/workspace/AICity19/dataset_track1/%s/bbox_img_resize"%TRACKLET,
help='path of dataset')
parser.add_argument('--feature_save_path',
default="D:/users/linjian/workspace/AICity19/dataset_track1/%s/bbox_img_feature"%TRACKLET,
help='path to feature saving')
parser.add_argument('--scene',
default='S1',
help='which scene')
opt = parser.parse_args()
class Feature_validation():
def __init__(self):
self.path = os.path.join(opt.feature_save_path, opt.scene+'_res152')
self.feature_dict = {}
self.f_len = len(os.listdir(self.path))
self.select_n = 3
self.key_len = 0
def classify_feature(self):
all_feature_file = os.listdir(self.path)
for i in all_feature_file:
key = '%s_%s'%(i[0:5],i[24:29]) # camid, sctid
# key = '%s'%(i[24:29]) # only sctid for GroundTruth
if key not in self.feature_dict.keys():
self.feature_dict[key] = [i]
else:
self.feature_dict[key].append(i)
self.key_len = len(self.feature_dict.keys())
def feature_select(self):
for key in self.feature_dict.keys():
if len(self.feature_dict[key]) < self.select_n:
sp = sample(self.feature_dict[key], 1) * self.select_n
else:
sp = sample(self.feature_dict[key], (self.select_n))
print('key:',key,'values:',sp)
self.feature_dict[key] = sp
def cal_diff_id_feature_dist(self, fs): # fs is feature selected
d = []
c = 0
iters = 0
for key_a in fs.keys():
for n in range(self.select_n):
p = os.path.join(self.path, fs[key_a][n])
if n == 0:
fa = np.reshape(np.load(p), (1, 2048))
else:
f_ = np.reshape(np.load(p), (1, 2048))
fa = np.append(fa, f_, axis=0)
for key_b in fs.keys():
if key_b != key_a:
for n in range(self.select_n):
p = os.path.join(self.path, fs[key_b][n])
if n == 0:
fb = np.reshape(np.load(p), (1, 2048))
else:
f_ = np.reshape(np.load(p), (1, 2048))
fb = np.append(fb, f_, axis=0)
for i in range(self.select_n):
Y = cdist(fa[np.newaxis,i], fb[np.newaxis,i], 'braycurtis')
d.append(Y)
c += 1
iters += 1
loss = np.mean(d)
std = np.std(d)
print('\riter: %d loss(diff): %f std: %f'%(iters, loss, std), end='')
def cal_diff_id_feature_dist_GT(self, fs): # fs is feature selected
d = []
c = 0
iters = 0
for key_a in fs.keys():
for n in range(self.select_n):
p = os.path.join(self.path, fs[key_a][n])
if n == 0:
fa = np.reshape(np.load(p), (1, 2048))
else:
f_ = np.reshape(np.load(p), (1, 2048))
fa = np.append(fa, f_, axis=0)
for key_b in fs.keys():
if (key_a[0:5] != key_b[0:5]) and (key_a[6:11] != key_b[6:11]):
for n in range(self.select_n):
p = os.path.join(self.path, fs[key_b][n])
if n == 0:
fb = np.reshape(np.load(p), (1, 2048))
else:
f_ = np.reshape(np.load(p), (1, 2048))
fb = np.append(fb, f_, axis=0)
for i in range(self.select_n):
Y = cdist(fa[np.newaxis,i], fb[np.newaxis,i], 'cosine')
d.append(Y)
c += 1
iters += 1
loss = np.mean(d)
std = np.std(d)
print('\riter: %d loss(diff): %f std: %f'%(iters, loss, std), end='')
def cal_same_id_feature_dist(self, fsa, fsb):
d = []
c = 0
iters = 0
for key_a in fsa.keys():
for n in range(self.select_n):
pa = os.path.join(self.path, fsa[key_a][n])
pb = os.path.join(self.path, fsb[key_a][n])
# print('pa:',pa)
# print('pb:',pb)
if n == 0:
fa = np.reshape(np.load(pa), (1,2048))
fb = np.reshape(np.load(pb), (1,2048))
else:
fa_ = np.reshape(np.load(pa), (1,2048))
fa = np.append(fa, fa_, axis=0)
fb_ = np.reshape(np.load(pb), (1,2048))
fb = np.append(fb, fb_, axis=0)
for i in range(self.select_n):
# print('fa.shape', fa[np.newaxis,i].shape)
# print('fb.shape', fb[np.newaxis,i].shape)
Y = cdist(fa[np.newaxis,i], fb[np.newaxis,i], 'cosine')
d.append(Y)
c += 1
iters += 1
loss = np.mean(d)
std = np.std(d)
print('\riter: %d loss(same): %f std: %f'%(iters, loss, std), end='')
print('')
def cal_same_id_feature_dist_GT(self, fsa, fsb):
d = []
c = 0
iters = 0
for key_a in fsa.keys():
for key_b in fsb.keys():
if (key_a[0:5] != key_b[0:5]) and (key_a[6:11] == key_b[6:11]):
for n in range(self.select_n):
pa = os.path.join(self.path, fsa[key_a][n])
pb = os.path.join(self.path, fsb[key_b][n])
# print('pa:',pa)
# print('pb:',pb)
if n == 0:
fa = np.reshape(np.load(pa), (1,2048))
fb = np.reshape(np.load(pb), (1,2048))
else:
fa_ = np.reshape(np.load(pa), (1,2048))
fa = np.append(fa, fa_, axis=0)
fb_ = np.reshape(np.load(pb), (1,2048))
fb = np.append(fb, fb_, axis=0)
for i in range(self.select_n):
# print('fa.shape', fa[np.newaxis,i].shape)
# print('fb.shape', fb[np.newaxis,i].shape)
Y = cdist(fa[np.newaxis,i], fb[np.newaxis,i], 'cosine')
d.append(Y)
c += 1
iters += 1
loss = np.mean(d)
std = np.std(d)
print('\riter: %d loss(same): %f std: %f'%(iters, loss, std), end='')
print('')
class Feature_validation_his():
def __init__(self):
self.path = os.path.join(opt.data_path, opt.scene)
self.feature_dict = {}
self.f_len = len(os.listdir(self.path))
self.select_n = 3
self.key_len = 0
def classify_feature(self):
all_feature_file = os.listdir(self.path)
for i in all_feature_file:
# key = '%s_%s'%(i[0:5],i[24:29]) # camid, sctid
key = '%s'%(i[24:29]) # only sctid for GroundTruth
if key not in self.feature_dict.keys():
self.feature_dict[key] = [i]
else:
self.feature_dict[key].append(i)
self.key_len = len(self.feature_dict.keys())
print('key_len:', self.key_len)
def feature_select(self):
for key in self.feature_dict.keys():
if len(self.feature_dict[key]) < self.select_n:
sp = sample(self.feature_dict[key], 1) * self.select_n
else:
sp = sample(self.feature_dict[key], (self.select_n))
print('key:',key,'values:',sp)
self.feature_dict[key] = sp
def cal_diff_id_feature_dist_GT(self, fs): # fs is feature selected
d = []
c = 0
iters = 0
for key_a in fs.keys():
for n in range(self.select_n):
p = os.path.join(self.path, fs[key_a][n])
if n == 0:
fa = Image.open(p)
fa = fa.histogram()
fa = np.reshape(np.array(fa), (1, 768))
else:
f_ = Image.open(p)
f_ = f_.histogram()
f_ = np.reshape(np.array(f_), (1, 768))
fa = np.append(fa, f_, axis=0)
for key_b in fs.keys():
if (key_a[0:5] != key_b[0:5]) and (key_a[6:11] != key_b[6:11]):
for n in range(self.select_n):
p = os.path.join(self.path, fs[key_b][n])
if n == 0:
fb = Image.open(p)
fb = fb.histogram()
fb = np.reshape(np.array(fb), (1, 768))
else:
f_ = Image.open(p)
f_ = f_.histogram()
f_ = np.reshape(np.array(f_), (1, 768))
fb = np.append(fb, f_, axis=0)
for i in range(self.select_n):
Y = cdist(fa[np.newaxis,i], fb[np.newaxis,i], 'braycurtis')
d.append(Y)
c += 1
iters += 1
loss = np.mean(d)
std = np.std(d)
print('iter: %d loss(diff): %f std: %f'%(iters, loss, std), end='\r')
def cal_same_id_feature_dist_GT(self, fsa, fsb):
d = []
c = 0
iters = 0
for key_a in fsa.keys():
for key_b in fsb.keys():
if (key_a[0:5] != key_b[0:5]) and (key_a[6:11] == key_b[6:11]):
for n in range(self.select_n):
pa = os.path.join(self.path, fsa[key_a][n])
pb = os.path.join(self.path, fsb[key_b][n])
# print('pa:',pa)
# print('pb:',pb)
if n == 0:
fa = Image.open(pa)
fa = fa.histogram()
fa = np.reshape(np.array(fa), (1, 768))
fb = Image.open(pb)
fb = fb.histogram()
fb = np.reshape(np.array(fb), (1, 768))
# fa = np.reshape(np.load(pa), (1,2048))
# fb = np.reshape(np.load(pb), (1,2048))
else:
f_ = Image.open(pa)
f_ = f_.histogram()
f_ = np.reshape(np.array(f_), (1, 768))
fa = np.append(fa, f_, axis=0)
f_ = Image.open(pb)
f_ = f_.histogram()
f_ = np.reshape(np.array(f_), (1, 768))
fb = np.append(fb, f_, axis=0)
for i in range(self.select_n):
# print('fa.shape', fa[np.newaxis,i].shape)
# print('fb.shape', fb[np.newaxis,i].shape)
Y = cdist(fa[np.newaxis,i], fb[np.newaxis,i], 'braycurtis')
d.append(Y)
c += 1
iters += 1
loss = np.mean(d)
std = np.std(d)
print('iter: %d loss(same): %f std: %f'%(iters, loss, std), end='\r')
print('')
fv = Feature_validation()
fv_ = Feature_validation()
fv.classify_feature()
fv_.classify_feature()
fv.feature_select()
fv_.feature_select()
fv.cal_same_id_feature_dist_GT(fv.feature_dict, fv_.feature_dict)
fv.cal_diff_id_feature_dist_GT(fv.feature_dict)
'''
for j in fv.feature_dict.keys():
print('key:',j)
for k in fv.feature_dict[j]:
print('value:',k)
'''
| 39.018519
| 110
| 0.430312
| 1,612
| 12,642
| 3.233871
| 0.085608
| 0.044888
| 0.054863
| 0.051794
| 0.863994
| 0.858047
| 0.848648
| 0.837905
| 0.837522
| 0.825245
| 0
| 0.033886
| 0.432764
| 12,642
| 323
| 111
| 39.139319
| 0.693069
| 0.049834
| 0
| 0.815385
| 0
| 0
| 0.044738
| 0.011711
| 0
| 0
| 0
| 0
| 0
| 1
| 0.046154
| false
| 0
| 0.034615
| 0
| 0.088462
| 0.046154
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
786655104c6ff5e6c05a7e83a3772261b56ca191
| 314
|
py
|
Python
|
SBaaS_rnasequencing/stage01_rnasequencing_genesCountTable_execute.py
|
dmccloskey/SBaaS_rnasequencing
|
521ad0b671b0bca02e9cebfc1b372f2265955418
|
[
"MIT"
] | null | null | null |
SBaaS_rnasequencing/stage01_rnasequencing_genesCountTable_execute.py
|
dmccloskey/SBaaS_rnasequencing
|
521ad0b671b0bca02e9cebfc1b372f2265955418
|
[
"MIT"
] | null | null | null |
SBaaS_rnasequencing/stage01_rnasequencing_genesCountTable_execute.py
|
dmccloskey/SBaaS_rnasequencing
|
521ad0b671b0bca02e9cebfc1b372f2265955418
|
[
"MIT"
] | null | null | null |
from copy import copy
#sbaas
from .stage01_rnasequencing_genesCountTable_io import stage01_rnasequencing_genesCountTable_io
#sbaas models
from .stage01_rnasequencing_genesCountTable_postgresql_models import *
class stage01_rnasequencing_genesCountTable_execute(stage01_rnasequencing_genesCountTable_io):
pass;
| 39.25
| 94
| 0.901274
| 34
| 314
| 7.852941
| 0.382353
| 0.374532
| 0.655431
| 0.41573
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.034247
| 0.070064
| 314
| 8
| 95
| 39.25
| 0.880137
| 0.05414
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.2
| 0.6
| 0
| 0.8
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 8
|
78a7485bc7c370968ea590e9a598448f5ec3aaf3
| 2,924
|
py
|
Python
|
AlteMatrix/converter/octal.py
|
Ir0n-c0d3X/AlteMatrix
|
9479ddeec9839b88d8f7079d00fd62f3ee47157d
|
[
"MIT"
] | 10
|
2021-09-19T13:55:58.000Z
|
2022-01-16T02:15:28.000Z
|
AlteMatrix/converter/octal.py
|
Ir0n-c0d3X/AlteMatrix
|
9479ddeec9839b88d8f7079d00fd62f3ee47157d
|
[
"MIT"
] | null | null | null |
AlteMatrix/converter/octal.py
|
Ir0n-c0d3X/AlteMatrix
|
9479ddeec9839b88d8f7079d00fd62f3ee47157d
|
[
"MIT"
] | 2
|
2021-09-19T23:51:51.000Z
|
2022-01-16T02:15:42.000Z
|
# NOTE: All functions are in pairs of the main function and an optional "silent" function
# which would simply return a value instead of printing
# list of invalid octal figures
inv = ['8','9']
# octal to binary function
def octal_to_binary(data: str):
print("Octal value:",data)
for digit in data:
if digit in inv:
raise ValueError("Non-octal number entered!")
try:
bnr = int(data,8)
bnr = bin(bnr).replace('0b', '')
except ValueError:
raise ValueError("Non-octal number entered!")
print("Binary value: ",bnr)
def octal_to_binary_s(data: str):
for digit in data:
if digit in inv:
raise ValueError("Non-octal number entered!")
try:
bnr = int(data,8)
bnr = bin(bnr).replace('0b', '')
except ValueError:
raise ValueError("Non-octal number entered!")
return bnr
# octal to decimal function
def octal_to_decimal(data: str):
print("Octal value:",data)
decimal = 0
for i in data:
if i in inv:
raise ValueError("Non-octal number entered!")
try:
decimal = decimal*8 + int(i)
except ValueError:
raise ValueError("Non-octal number entered!")
print("Decimal value:",decimal)
def octal_to_decimal_s(data: str):
decimal = 0
for i in data:
if i in inv:
raise ValueError("Non-octal number entered!")
try:
decimal = decimal*8 + int(i)
except ValueError:
raise ValueError("Non-octal number entered!")
return decimal
# octal to hexadecimal function
def octal_to_hexadecimal(data: str):
print("Octal value:",data)
for digit in data:
if digit in inv:
raise ValueError("Non-octal number entered!")
try:
hxd = int(data,8)
hxd = hex(hxd).replace('0x', '')
except ValueError:
raise ValueError("Non-octal number entered!")
print("Hexadecimal value: ",hxd)
def octal_to_hexadecimal_s(data: str):
for digit in data:
if digit in inv:
raise ValueError("Non-octal number entered!")
try:
hxd = int(data,8)
hxd = hex(hxd).replace('0x', '')
except ValueError:
raise ValueError("Non-octal number entered!")
return hxd
# octal to text function
def octal_to_text(data: str):
data = data.split()
l = []
for i in data:
try:
z = int(i, 8)
l.append(chr(z))
except ValueError:
raise ValueError("Non-octal number entered!")
print("ASCII value:",''.join(l))
def octal_to_text_s(data: str):
data = data.split()
l = []
for i in data:
try:
z = int(i, 8)
l.append(chr(z))
except ValueError:
raise ValueError("Non-octal number entered!")
return ''.join(l)
| 30.778947
| 90
| 0.575581
| 376
| 2,924
| 4.422872
| 0.175532
| 0.126278
| 0.151533
| 0.193626
| 0.721587
| 0.721587
| 0.705953
| 0.705953
| 0.705953
| 0.679495
| 0
| 0.007988
| 0.314979
| 2,924
| 95
| 91
| 30.778947
| 0.822267
| 0.094391
| 0
| 0.797619
| 0
| 0
| 0.172283
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.095238
| false
| 0
| 0
| 0
| 0.142857
| 0.083333
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
154d0f36444f4c35ea2d37c139ab13a68ab1d315
| 427
|
py
|
Python
|
ratart.py
|
ashbys01/Rapid-Antigen-Test-Alert
|
af0c0ee800c3a4d2700c7cca5ed642a8adff68d0
|
[
"MIT"
] | null | null | null |
ratart.py
|
ashbys01/Rapid-Antigen-Test-Alert
|
af0c0ee800c3a4d2700c7cca5ed642a8adff68d0
|
[
"MIT"
] | null | null | null |
ratart.py
|
ashbys01/Rapid-Antigen-Test-Alert
|
af0c0ee800c3a4d2700c7cca5ed642a8adff68d0
|
[
"MIT"
] | null | null | null |
def theRat():
print(" (\,/)")
print(" oo '''//, _")
print(" ,/_;~, \, / '")
print(" \"' \ ( \ !")
print(" ',| \ |__.'")
print(" '~ '~----''")
def ratFound():
print(" (\,;,/)")
print(" (o o)\//,")
print(" \ / \,")
print(" `+'( ( \ )")
print(" // \ |_./")
print(" '~' '~----'")
| 28.466667
| 39
| 0.208431
| 19
| 427
| 4.421053
| 0.315789
| 0.952381
| 0.714286
| 0.47619
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.468384
| 427
| 15
| 40
| 28.466667
| 0.370044
| 0
| 0
| 0
| 0
| 0
| 0.468599
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| true
| 0
| 0
| 0
| 0.142857
| 0.857143
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 7
|
1564a7b17101d6b111b4c6486e252a88690a2ee6
| 4,957
|
py
|
Python
|
images_into_array/images_into_array.py
|
sujitmandal/Convert-Images-Into-Array
|
279071a70e51edbd5cec765366caac6a58fe77b5
|
[
"MIT"
] | null | null | null |
images_into_array/images_into_array.py
|
sujitmandal/Convert-Images-Into-Array
|
279071a70e51edbd5cec765366caac6a58fe77b5
|
[
"MIT"
] | null | null | null |
images_into_array/images_into_array.py
|
sujitmandal/Convert-Images-Into-Array
|
279071a70e51edbd5cec765366caac6a58fe77b5
|
[
"MIT"
] | null | null | null |
import os
import cv2
import numpy as np
from tqdm import tqdm
from random import shuffle
#Github: https://github.com/sujitmandal
#This programe is create by Sujit Mandal
"""
Github: https://github.com/sujitmandal
This programe is create by Sujit Mandal
LinkedIn : https://www.linkedin.com/in/sujit-mandal-91215013a/
"""
#Document : https://docs.opencv.org/3.4/de/d25/imgproc_color_conversions.html
#images_path = input('Enter Image Folder Path : ') #Path of the images folder
#image_height = int(input('Enter The Image Size [32, 64, 128] : '))
#image_width = int(input('Enter The Image Size [32, 64, 128] : '))
images_path = ('') #Path of the images folder
image_height = 32 #[32, 64, 128]
image_width = 32 #[32, 64, 128]
#NORNAL
def images(images_path, image_height, image_width):
imges_list = []
for image in tqdm(os.listdir(images_path)):
path = os.path.join(images_path, image)
image = cv2.imread(path)
image = cv2.resize(image , (image_height, image_width))
imges_list.append([np.array(image)])
shuffle(imges_list)
#Convert List Into Array
array_image = np.array(imges_list)
#Removed Dimention
images = array_image[:,0,:,:]
return(images)
#RGB ↔ GRAY
def rgb_gray(images_path, image_height, image_width):
imges_list = []
for image in tqdm(os.listdir(images_path)):
path = os.path.join(images_path, image)
image = cv2.imread(path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
image = cv2.resize(image , (image_height, image_width))
imges_list.append([np.array(image)])
shuffle(imges_list)
#Convert List Into Array
array_image = np.array(imges_list)
#Removed Dimention
images = array_image[:,0,:,:]
return(images)
#RGB ↔ CIE L*a*b*
def rgb_lab(images_path, image_height, image_width):
imges_list = []
for image in tqdm(os.listdir(images_path)):
path = os.path.join(images_path, image)
image = cv2.imread(path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2Lab)
image = cv2.resize(image , (image_height, image_width))
imges_list.append([np.array(image)])
shuffle(imges_list)
#Convert List Into Array
array_image = np.array(imges_list)
#Removed Dimention
images = array_image[:,0,:,:]
return(images)
#RGB ↔ HSV
def rgb_hsv(images_path, image_height, image_width):
imges_list = []
for image in tqdm(os.listdir(images_path)):
path = os.path.join(images_path, image)
image = cv2.imread(path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
image = cv2.resize(image , (image_height, image_width))
imges_list.append([np.array(image)])
shuffle(imges_list)
#Convert List Into Array
array_image = np.array(imges_list)
#Removed Dimention
images = array_image[:,0,:,:]
return(images)
#RGB ↔ YCrCb JPEG (or YCC)
def rgb_ycrcb(images_path, image_height, image_width):
imges_list = []
for image in tqdm(os.listdir(images_path)):
path = os.path.join(images_path, image)
image = cv2.imread(path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2YCrCb)
image = cv2.resize(image , (image_height, image_width))
imges_list.append([np.array(image)])
shuffle(imges_list)
#Convert List Into Array
array_image = np.array(imges_list)
#Removed Dimention
images = array_image[:,0,:,:]
return(images)
#RGB ↔ HLS
def rgb_hls(images_path, image_height, image_width):
imges_list = []
for image in tqdm(os.listdir(images_path)):
path = os.path.join(images_path, image)
image = cv2.imread(path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2HLS)
image = cv2.resize(image , (image_height, image_width))
imges_list.append([np.array(image)])
shuffle(imges_list)
#Convert List Into Array
array_image = np.array(imges_list)
#Removed Dimention
images = array_image[:,0,:,:]
return(images)
#RGB ↔ CIE L*u*v*
def rgb_luv(images_path, image_height, image_width):
imges_list = []
for image in tqdm(os.listdir(images_path)):
path = os.path.join(images_path, image)
image = cv2.imread(path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2Luv)
image = cv2.resize(image , (image_height, image_width))
imges_list.append([np.array(image)])
shuffle(imges_list)
#Convert List Into Array
array_image = np.array(imges_list)
#Removed Dimention
images = array_image[:,0,:,:]
return(images)
if __name__ == "__main__":
images(images_path, image_height, image_width)
rgb_gray(images_path, image_height, image_width)
rgb_lab(images_path, image_height, image_width)
rgb_hsv(images_path, image_height, image_width)
rgb_ycrcb(images_path, image_height, image_width)
rgb_hls(images_path, image_height, image_width)
rgb_luv(images_path, image_height, image_width)
| 28.164773
| 77
| 0.674198
| 697
| 4,957
| 4.593974
| 0.137733
| 0.093691
| 0.098376
| 0.137726
| 0.893816
| 0.885384
| 0.885384
| 0.871955
| 0.767021
| 0.747033
| 0
| 0.021314
| 0.204963
| 4,957
| 176
| 78
| 28.164773
| 0.789647
| 0.158967
| 0
| 0.707071
| 0
| 0
| 0.002006
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.070707
| false
| 0
| 0.050505
| 0
| 0.121212
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
15a43c849544c721030f030d55ea58faffddaf8f
| 115
|
py
|
Python
|
nd/io/rasterio_.py
|
elfmanryan/geo
|
d83ce1d994c0a8a7fc461c22f8fd86e30216eefc
|
[
"MIT"
] | null | null | null |
nd/io/rasterio_.py
|
elfmanryan/geo
|
d83ce1d994c0a8a7fc461c22f8fd86e30216eefc
|
[
"MIT"
] | null | null | null |
nd/io/rasterio_.py
|
elfmanryan/geo
|
d83ce1d994c0a8a7fc461c22f8fd86e30216eefc
|
[
"MIT"
] | null | null | null |
import xarray as xr
def open_rasterio(path, *args, **kwargs):
return xr.open_rasterio(path, *args, **kwargs)
| 19.166667
| 50
| 0.704348
| 17
| 115
| 4.647059
| 0.647059
| 0.303797
| 0.405063
| 0.506329
| 0.658228
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.156522
| 115
| 5
| 51
| 23
| 0.814433
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 9
|
ec6d27db57136768280bc829191219f4bb8d4c7b
| 29,649
|
py
|
Python
|
operations/fleet_management/migrations/0001_initial.py
|
kaizer88/emps
|
2669b32c46befcf1a19390fb25013817e6b00980
|
[
"MIT"
] | null | null | null |
operations/fleet_management/migrations/0001_initial.py
|
kaizer88/emps
|
2669b32c46befcf1a19390fb25013817e6b00980
|
[
"MIT"
] | null | null | null |
operations/fleet_management/migrations/0001_initial.py
|
kaizer88/emps
|
2669b32c46befcf1a19390fb25013817e6b00980
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-12-06 11:31
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Branding',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('changed_at', models.DateTimeField(auto_now=True, null=True)),
('deleted', models.BooleanField(db_index=True, default=False)),
('supplier_name', models.CharField(max_length=255)),
('installer_name', models.CharField(max_length=255)),
('installation_date', models.DateTimeField(blank=True, null=True)),
],
options={
'default_permissions': [],
'abstract': False,
},
),
migrations.CreateModel(
name='FinanceDetail',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('changed_at', models.DateTimeField(auto_now=True, null=True)),
('deleted', models.BooleanField(db_index=True, default=False)),
('financier', models.CharField(max_length=255)),
('contract_number', models.CharField(max_length=255)),
('contract_term_months', models.IntegerField(blank=True, null=True, validators=[django.core.validators.MinValueValidator(0)])),
('deposit', models.IntegerField(default=0, validators=[django.core.validators.MinValueValidator(0)])),
('installment', models.IntegerField(default=0, validators=[django.core.validators.MinValueValidator(0)])),
('settlement_amount', models.IntegerField(blank=True, default=0, null=True, validators=[django.core.validators.MinValueValidator(0)])),
('settlement_amount_date', models.DateTimeField(blank=True, null=True)),
],
options={
'default_permissions': [],
'abstract': False,
},
),
migrations.CreateModel(
name='FuelCard',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('changed_at', models.DateTimeField(auto_now=True, null=True)),
('deleted', models.BooleanField(db_index=True, default=False)),
('supplier', models.CharField(choices=[(b'absa', b'ABSA'), (b'edenred', b'EDENRED')], max_length=255)),
('card_type', models.CharField(choices=[(b'fuel', b'Fuel Only'), (b'fuel_oil', b'Fuel & Oil'), (b'fuel_oil_toll', b'Fuel, Oil & Toll'), (b'fuel_oil_etag', b'Fuel, Oil & eTag')], max_length=255)),
('card_number', models.CharField(max_length=255)),
],
options={
'default_permissions': [],
'abstract': False,
},
),
migrations.CreateModel(
name='FuelCardUsage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('changed_at', models.DateTimeField(auto_now=True, null=True)),
('deleted', models.BooleanField(db_index=True, default=False)),
('date_used', models.DateTimeField()),
('amount', models.IntegerField()),
],
options={
'default_permissions': [],
'abstract': False,
},
),
migrations.CreateModel(
name='HistoricalBranding',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('created_at', models.DateTimeField(blank=True, editable=False, null=True)),
('changed_at', models.DateTimeField(blank=True, editable=False, null=True)),
('deleted', models.BooleanField(db_index=True, default=False)),
('supplier_name', models.CharField(max_length=255)),
('installer_name', models.CharField(max_length=255)),
('installation_date', models.DateTimeField(blank=True, null=True)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
],
options={
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
'verbose_name': 'historical branding',
},
),
migrations.CreateModel(
name='HistoricalFinanceDetail',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('created_at', models.DateTimeField(blank=True, editable=False, null=True)),
('changed_at', models.DateTimeField(blank=True, editable=False, null=True)),
('deleted', models.BooleanField(db_index=True, default=False)),
('financier', models.CharField(max_length=255)),
('contract_number', models.CharField(max_length=255)),
('contract_term_months', models.IntegerField(blank=True, null=True, validators=[django.core.validators.MinValueValidator(0)])),
('deposit', models.IntegerField(default=0, validators=[django.core.validators.MinValueValidator(0)])),
('installment', models.IntegerField(default=0, validators=[django.core.validators.MinValueValidator(0)])),
('settlement_amount', models.IntegerField(blank=True, default=0, null=True, validators=[django.core.validators.MinValueValidator(0)])),
('settlement_amount_date', models.DateTimeField(blank=True, null=True)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
],
options={
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
'verbose_name': 'historical finance detail',
},
),
migrations.CreateModel(
name='HistoricalFuelCard',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('created_at', models.DateTimeField(blank=True, editable=False, null=True)),
('changed_at', models.DateTimeField(blank=True, editable=False, null=True)),
('deleted', models.BooleanField(db_index=True, default=False)),
('supplier', models.CharField(choices=[(b'absa', b'ABSA'), (b'edenred', b'EDENRED')], max_length=255)),
('card_type', models.CharField(choices=[(b'fuel', b'Fuel Only'), (b'fuel_oil', b'Fuel & Oil'), (b'fuel_oil_toll', b'Fuel, Oil & Toll'), (b'fuel_oil_etag', b'Fuel, Oil & eTag')], max_length=255)),
('card_number', models.CharField(max_length=255)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
],
options={
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
'verbose_name': 'historical fuel card',
},
),
migrations.CreateModel(
name='HistoricalInsurance',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('created_at', models.DateTimeField(blank=True, editable=False, null=True)),
('changed_at', models.DateTimeField(blank=True, editable=False, null=True)),
('deleted', models.BooleanField(db_index=True, default=False)),
('broker_name', models.CharField(blank=True, max_length=255, null=True)),
('insurance_type', models.CharField(choices=[(b'insurance', b'Insurance'), (b'shortfall', b'Shortfall Cover')], default=b'insurance', max_length=255)),
('insured_amount', models.IntegerField(default=0, validators=[django.core.validators.MinValueValidator(0)])),
('installment', models.IntegerField(default=0, validators=[django.core.validators.MinValueValidator(0)])),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
],
options={
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
'verbose_name': 'historical insurance',
},
),
migrations.CreateModel(
name='HistoricalPurchaseDetail',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('created_at', models.DateTimeField(blank=True, editable=False, null=True)),
('changed_at', models.DateTimeField(blank=True, editable=False, null=True)),
('deleted', models.BooleanField(db_index=True, default=False)),
('dealership', models.CharField(blank=True, max_length=255, null=True)),
('invoice_number', models.CharField(max_length=255)),
('purchase_amount', models.IntegerField(validators=[django.core.validators.MinValueValidator(0)])),
('purchase_date', models.DateTimeField(blank=True, null=True)),
('purchase_type', models.CharField(choices=[(b'cash', b'Cash'), (b'hp', b'HP - Hire Purchase'), (b'lease', b'Lease'), (b'rental', b'Rental')], max_length=50)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
],
options={
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
'verbose_name': 'historical purchase detail',
},
),
migrations.CreateModel(
name='HistoricalVehicle',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('created_at', models.DateTimeField(blank=True, editable=False, null=True)),
('changed_at', models.DateTimeField(blank=True, editable=False, null=True)),
('deleted', models.BooleanField(db_index=True, default=False)),
('ownership', models.CharField(choices=[(b'emerald', b'Emerald'), (b'private', b'Private')], default=b'emerald', max_length=50)),
('division', models.CharField(choices=[(b'sales', b'Sales'), (b'marketing', b'Marketing'), (b'other', b'Other')], default=b'sales', max_length=120)),
('status_at_create', models.CharField(choices=[(b'new', b'New'), (b'used', b'Used')], default=b'new', max_length=50)),
('make', models.CharField(max_length=255)),
('model', models.CharField(max_length=255)),
('year_model', models.IntegerField(blank=True, null=True)),
('registration_number', models.CharField(max_length=255)),
('registration_date', models.DateTimeField(blank=True, null=True)),
('licence_disk_expiry', models.DateTimeField(blank=True, null=True)),
('vin_number', models.CharField(max_length=255)),
('engine_number', models.CharField(max_length=255)),
('colour', models.CharField(blank=True, max_length=255, null=True)),
('transmission', models.CharField(choices=[(b'automatic', b'Automatic'), (b'manual', b'Manual')], default=b'manual', max_length=50)),
('fuel_type', models.CharField(choices=[(b'petrol', b'Petrol'), (b'diesel', b'Diesel')], default=b'petrol', max_length=120)),
('engine_capacity', models.IntegerField(blank=True, null=True, validators=[django.core.validators.MinValueValidator(0)])),
('tank_capacity', models.IntegerField(blank=True, null=True, validators=[django.core.validators.MinValueValidator(0)])),
('delivery_mileage', models.IntegerField(blank=True, null=True, validators=[django.core.validators.MinValueValidator(0)])),
('has_aircon', models.NullBooleanField()),
('has_radio', models.NullBooleanField()),
('has_bluetooth', models.NullBooleanField()),
('has_jack', models.NullBooleanField()),
('has_spanner', models.NullBooleanField()),
('has_triangle', models.NullBooleanField()),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
],
options={
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
'verbose_name': 'historical vehicle',
},
),
migrations.CreateModel(
name='HistoricalVehicleMaintenance',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('created_at', models.DateTimeField(blank=True, editable=False, null=True)),
('changed_at', models.DateTimeField(blank=True, editable=False, null=True)),
('deleted', models.BooleanField(db_index=True, default=False)),
('plan_type', models.CharField(choices=[(b'service_plan', b'Service Plan'), (b'maintenance_plan', b'Maintenance Plan'), (b'warranty', b'Warranty')], max_length=50)),
('end_date', models.DateTimeField(blank=True, null=True)),
('end_mileage', models.IntegerField(blank=True, default=0, null=True, validators=[django.core.validators.MinValueValidator(0)])),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
],
options={
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
'verbose_name': 'historical vehicle maintenance',
},
),
migrations.CreateModel(
name='HistoricalVehicleTyre',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('created_at', models.DateTimeField(blank=True, editable=False, null=True)),
('changed_at', models.DateTimeField(blank=True, editable=False, null=True)),
('deleted', models.BooleanField(db_index=True, default=False)),
('make', models.CharField(max_length=255)),
('size', models.CharField(max_length=255)),
('position', models.CharField(choices=[(b'fr', b'Front Right'), (b'fl', b'Front Left'), (b'rr', b'Rear Right'), (b'rl', b'Rear Left'), (b'spare', b'Spare')], max_length=50)),
('serial_number', models.CharField(blank=True, max_length=255, null=True)),
('mileage_at_replacement', models.IntegerField(blank=True, null=True, validators=[django.core.validators.MinValueValidator(0)])),
('in_use', models.BooleanField(default=False)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
],
options={
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
'verbose_name': 'historical vehicle tyre',
},
),
migrations.CreateModel(
name='Incident',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('changed_at', models.DateTimeField(auto_now=True, null=True)),
('deleted', models.BooleanField(db_index=True, default=False)),
('incident_date', models.DateTimeField()),
('incident_type', models.CharField(choices=[(b'windscreen', b'Windscreen'), (b'vehicle theft', b'Vehicle Theft'), (b'smash and grab', b'Smash And Grab'), (b'road accident', b'Road Accident'), (b'traffic fine', b'Traffic Fine'), (b'tracker warning', b'Tracker Warning')], max_length=255)),
('description', models.CharField(max_length=255)),
('resolved', models.BooleanField(default=False)),
('cost', models.IntegerField(blank=True, null=True)),
],
options={
'default_permissions': [],
'abstract': False,
},
),
migrations.CreateModel(
name='Insurance',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('changed_at', models.DateTimeField(auto_now=True, null=True)),
('deleted', models.BooleanField(db_index=True, default=False)),
('broker_name', models.CharField(blank=True, max_length=255, null=True)),
('insurance_type', models.CharField(choices=[(b'insurance', b'Insurance'), (b'shortfall', b'Shortfall Cover')], default=b'insurance', max_length=255)),
('insured_amount', models.IntegerField(default=0, validators=[django.core.validators.MinValueValidator(0)])),
('installment', models.IntegerField(default=0, validators=[django.core.validators.MinValueValidator(0)])),
],
options={
'default_permissions': [],
'abstract': False,
},
),
migrations.CreateModel(
name='PurchaseDetail',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('changed_at', models.DateTimeField(auto_now=True, null=True)),
('deleted', models.BooleanField(db_index=True, default=False)),
('dealership', models.CharField(blank=True, max_length=255, null=True)),
('invoice_number', models.CharField(max_length=255)),
('purchase_amount', models.IntegerField(validators=[django.core.validators.MinValueValidator(0)])),
('purchase_date', models.DateTimeField(blank=True, null=True)),
('purchase_type', models.CharField(choices=[(b'cash', b'Cash'), (b'hp', b'HP - Hire Purchase'), (b'lease', b'Lease'), (b'rental', b'Rental')], max_length=50)),
],
options={
'default_permissions': [],
'abstract': False,
},
),
migrations.CreateModel(
name='Tracker',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('changed_at', models.DateTimeField(auto_now=True, null=True)),
('deleted', models.BooleanField(db_index=True, default=False)),
('installation_type', models.CharField(choices=[(b'new', b'New'), (b'reinstall', b'Re-Install')], default=b'new', max_length=255)),
('installation_date', models.DateTimeField()),
('dealership_name', models.CharField(max_length=255)),
('purchase_type', models.CharField(choices=[(b'cash', b'Cash'), (b'rental', b'Rental')], default=b'cash', max_length=255)),
('contract_term_months', models.IntegerField(blank=True, null=True, validators=[django.core.validators.MinValueValidator(0)])),
('deposit', models.IntegerField(default=0, validators=[django.core.validators.MinValueValidator(0)])),
('installment', models.IntegerField(default=0, validators=[django.core.validators.MinValueValidator(0)])),
('settlement_amount', models.IntegerField(default=0, validators=[django.core.validators.MinValueValidator(0)])),
('settlement_amount_date', models.DateTimeField(blank=True, null=True)),
('previous_vehicle_reg_number', models.CharField(blank=True, max_length=255, null=True)),
('active', models.BooleanField(default=False)),
],
options={
'default_permissions': [],
'abstract': False,
},
),
migrations.CreateModel(
name='Vehicle',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('changed_at', models.DateTimeField(auto_now=True, null=True)),
('deleted', models.BooleanField(db_index=True, default=False)),
('ownership', models.CharField(choices=[(b'emerald', b'Emerald'), (b'private', b'Private')], default=b'emerald', max_length=50)),
('division', models.CharField(choices=[(b'sales', b'Sales'), (b'marketing', b'Marketing'), (b'other', b'Other')], default=b'sales', max_length=120)),
('status_at_create', models.CharField(choices=[(b'new', b'New'), (b'used', b'Used')], default=b'new', max_length=50)),
('make', models.CharField(max_length=255)),
('model', models.CharField(max_length=255)),
('year_model', models.IntegerField(blank=True, null=True)),
('registration_number', models.CharField(max_length=255)),
('registration_date', models.DateTimeField(blank=True, null=True)),
('licence_disk_expiry', models.DateTimeField(blank=True, null=True)),
('vin_number', models.CharField(max_length=255)),
('engine_number', models.CharField(max_length=255)),
('colour', models.CharField(blank=True, max_length=255, null=True)),
('transmission', models.CharField(choices=[(b'automatic', b'Automatic'), (b'manual', b'Manual')], default=b'manual', max_length=50)),
('fuel_type', models.CharField(choices=[(b'petrol', b'Petrol'), (b'diesel', b'Diesel')], default=b'petrol', max_length=120)),
('engine_capacity', models.IntegerField(blank=True, null=True, validators=[django.core.validators.MinValueValidator(0)])),
('tank_capacity', models.IntegerField(blank=True, null=True, validators=[django.core.validators.MinValueValidator(0)])),
('delivery_mileage', models.IntegerField(blank=True, null=True, validators=[django.core.validators.MinValueValidator(0)])),
('has_aircon', models.NullBooleanField()),
('has_radio', models.NullBooleanField()),
('has_bluetooth', models.NullBooleanField()),
('has_jack', models.NullBooleanField()),
('has_spanner', models.NullBooleanField()),
('has_triangle', models.NullBooleanField()),
],
options={
'default_permissions': [],
'abstract': False,
},
),
migrations.CreateModel(
name='VehicleDocument',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('changed_at', models.DateTimeField(auto_now=True, null=True)),
('deleted', models.BooleanField(db_index=True, default=False)),
],
options={
'default_permissions': [],
'abstract': False,
},
),
migrations.CreateModel(
name='VehicleDriver',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('changed_at', models.DateTimeField(auto_now=True, null=True)),
('deleted', models.BooleanField(db_index=True, default=False)),
('start_date', models.DateTimeField()),
('end_date', models.DateTimeField(blank=True, null=True)),
],
options={
'default_permissions': [],
'abstract': False,
},
),
migrations.CreateModel(
name='VehicleMaintenance',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('changed_at', models.DateTimeField(auto_now=True, null=True)),
('deleted', models.BooleanField(db_index=True, default=False)),
('plan_type', models.CharField(choices=[(b'service_plan', b'Service Plan'), (b'maintenance_plan', b'Maintenance Plan'), (b'warranty', b'Warranty')], max_length=50)),
('end_date', models.DateTimeField(blank=True, null=True)),
('end_mileage', models.IntegerField(blank=True, default=0, null=True, validators=[django.core.validators.MinValueValidator(0)])),
],
options={
'default_permissions': [],
'abstract': False,
},
),
migrations.CreateModel(
name='VehicleTyre',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('changed_at', models.DateTimeField(auto_now=True, null=True)),
('deleted', models.BooleanField(db_index=True, default=False)),
('make', models.CharField(max_length=255)),
('size', models.CharField(max_length=255)),
('position', models.CharField(choices=[(b'fr', b'Front Right'), (b'fl', b'Front Left'), (b'rr', b'Rear Right'), (b'rl', b'Rear Left'), (b'spare', b'Spare')], max_length=50)),
('serial_number', models.CharField(blank=True, max_length=255, null=True)),
('mileage_at_replacement', models.IntegerField(blank=True, null=True, validators=[django.core.validators.MinValueValidator(0)])),
('in_use', models.BooleanField(default=False)),
],
options={
'default_permissions': [],
'abstract': False,
},
),
]
| 64.594771
| 304
| 0.581841
| 2,931
| 29,649
| 5.723303
| 0.07233
| 0.043398
| 0.038629
| 0.051505
| 0.928167
| 0.922444
| 0.919523
| 0.916721
| 0.899672
| 0.896334
| 0
| 0.012071
| 0.256771
| 29,649
| 458
| 305
| 64.735808
| 0.749183
| 0.002294
| 0
| 0.835556
| 1
| 0
| 0.18321
| 0.013557
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.006667
| 0
| 0.015556
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
ec6e699a83f67d50c9c59a80ae9d69f5c22538c3
| 24,029
|
py
|
Python
|
empyric/collection/sourcemeters.py
|
dmerthe/expyriments
|
33e94921c978b3bbe658cffaf0335d4d5a0bf87c
|
[
"MIT"
] | null | null | null |
empyric/collection/sourcemeters.py
|
dmerthe/expyriments
|
33e94921c978b3bbe658cffaf0335d4d5a0bf87c
|
[
"MIT"
] | null | null | null |
empyric/collection/sourcemeters.py
|
dmerthe/expyriments
|
33e94921c978b3bbe658cffaf0335d4d5a0bf87c
|
[
"MIT"
] | null | null | null |
import os
import re
import datetime
import numpy as np
import pandas as pd
import numbers
from empyric.adapters import *
from empyric.collection.instrument import *
class Keithley2400(Instrument):
"""
Keithley 2400 Sourcemeter, a 20 W power supply and picoammeter
"""
name = 'Keithley2400'
supported_adapters = (
(LinuxGPIB, {}),
(PrologixGPIB, {}),
(VISAGPIB, {})
)
# Available knobs
knobs = (
'voltage',
'fast voltages',
'current',
'voltage range',
'voltage limit',
'current range',
'current limit',
'nplc',
'delay',
'output',
'source',
'meter',
'source delay'
)
presets = {
'source': 'voltage',
'meter': 'current',
'voltage':0,
'output': 'ON',
'voltage range': 200,
'current range': 100e-3,
'nplc': 1,
'source_delay': 0,
}
postsets = {
'voltage': 0,
'output': 'OFF'
}
# Available meters
meters = (
'voltage',
'current',
'fast currents'
)
fast_voltages = None
current_ranges = (1e-6, 10e-6, 100e-6, 1e-3, 10e-3, 100e-3, 1, 'AUTO') # allowed current range settings
voltage_ranges = (0.2, 2, 20, 200, 'AUTO') # allowed voltage range settings
ovp_levels = (20, 40, 60, 80, 100, 120, 140, 160, 210) # over-voltage protection level settings
@setter
def set_source(self, variable):
if variable not in ['voltage', 'current']:
raise ValueError('Source must be either "current" or "voltage"')
self.write(':SOUR:CLE:AUTO OFF') # disable auto output-off
self.set_output('OFF')
if variable == 'voltage':
self.write(':SOUR:FUNC VOLT')
self.current = None
if variable == 'current':
self.write(':SOUR:FUNC CURR')
self.voltage = None
@setter
def set_meter(self, variable):
if variable not in ['voltage', 'current']:
raise ValueError('Source must be either "current" or "voltage"')
if variable == 'voltage':
self.write(':SENS:FUNC "VOLT"')
self.write(':FORM:ELEM VOLT')
if variable == 'current':
self.write(':SENS:FUNC "CURR"')
self.write(':FORM:ELEM CURR')
@setter
def set_output(self, output):
if output in [0, 'OFF', 'off']:
self.write(':OUTP OFF')
self.query(':OUTP?') # for some reason, this is needed to ensure output off
elif output in [1, 'ON', 'on']:
self.write(':OUTP ON')
else:
raise ValueError(f'Ouput setting {output} not recognized!')
@measurer
def measure_voltage(self):
if self.meter != 'voltage':
self.set_meter('voltage')
self.set_output('ON')
def validator(response):
match = re.match('.\d\.\d+E.\d\d', response)
return bool(match)
return float(self.query(':READ?', validator=validator))
@measurer
def measure_current(self):
if self.meter != 'current':
self.set_meter('current')
self.set_output('ON')
self.write(':TRIG:COUN 1')
def validator(response):
match = re.match('.\d\.\d+E.\d\d', response)
return bool(match)
return float(self.query(':READ?', validator=validator))
@setter
def set_voltage(self, voltage):
if self.source != 'voltage':
Warning(f'Switching source mode to voltage!')
self.set_source('voltage')
self.set_output('ON')
self.write(':SOUR:VOLT:LEV %.2E' % voltage)
@setter
def set_current(self, current):
if self.source != 'current':
Warning(f'Switching source mode to current!')
self.set_source('current')
self.set_output('ON')
self.write(':SOUR:CURR:LEV %.2E' % current)
@setter
def set_voltage_range(self, voltage_range):
if voltage_range in self.voltage_ranges:
if self.source == 'voltage':
self.write(':SOUR:VOLT:RANGE %.2E' % voltage_range)
else:
if voltage_range == 'AUTO':
self.write(':SENS:VOLT:RANGE AUTO')
else:
self.write(':SENS:VOLT:RANGE %.2E' % voltage_range)
else:
first_line = f'Given voltage range {voltage_range} is not a valid value for {self.name}\n'
second_line = f'Valid values are {self.voltage_ranges}'
raise ValueError(first_line + second_line)
@setter
def set_voltage_limit(self, voltage_limit):
if self.source == 'voltage':
if int(voltage_limit) in self.ovp_levels:
self.write(':SOUR:VOLT:PROT %d' % int(voltage_limit))
else:
first_line = f'{self.name} is sourcing voltage, but given voltage limit {voltage_limit} is not a valid value\n'
second_line = f'Valid values are {self.ovp_levels}'
raise ValueError(first_line + second_line)
else:
self.write(':SENS:VOLT:PROT %.2E' % voltage_limit)
@setter
def set_current_range(self, current_range):
if current_range in self.current_ranges:
if self.source == 'current':
self.write(':SOUR:CURR:RANGE %.2E' % current_range)
else:
if current_range == 'AUTO':
self.write(':SENS:CURR:RANGE AUTO')
else:
self.write(':SENS:CURR:RANGE %.2E' % current_range)
else:
first_line = f'Given current range {current_range} is not a valid value for {self.name}\n'
second_line = f'Valid values are {self.current_ranges}'
raise ValueError(first_line + second_line)
@setter
def set_current_limit(self, current_limit):
self.write(':SENS:CURR:PROT %.2E' % current_limit)
@setter
def set_nplc(self, nplc):
if self.meter == 'current':
self.write(':SENS:CURR:NPLC %.2E' % nplc)
elif self.meter == 'voltage':
self.write(':SENS:VOLT:NPLC %.2E' % nplc)
@setter
def set_delay(self, delay):
self.adapter.delay = delay
@setter
def set_fast_voltages(self, voltages):
self.fast_voltages = voltages
# import fast voltages, if specified as a path
if type(self.fast_voltages) == str: # can be specified as a path
try:
fast_voltage_data = pd.read_csv(self.fast_voltages)
except FileNotFoundError:
# probably in an experiment data directory; try going up a level
working_subdir = os.getcwd()
os.chdir('..')
fast_voltage_data = pd.read_csv(self.fast_voltages)
os.chdir(working_subdir)
columns = fast_voltage_data.columns
self.fast_voltages = fast_voltage_data[columns[0]].astype(float).values
@measurer
def measure_fast_currents(self):
if self.source != 'voltage':
self.set_source('voltage')
self.set_output('ON')
try:
if len(self.fast_voltages) == 0:
raise ValueError('Fast IV sweep voltages have not been set!')
except AttributeError:
raise ValueError('Fast IV sweep voltages have not been set!')
self.write(':SOUR:VOLT:MODE LIST')
list_length = len(self.fast_voltages)
if list_length >= 100: # can only take 100 voltages at a time
sub_lists = [self.fast_voltages[i*100:(i+1)*100] for i in range(list_length // 100)]
else:
sub_lists = []
if list_length % 100 > 0:
sub_lists.append(self.fast_voltages[-(list_length % 100):])
current_list = []
normal_timeout = self.adapter.timeout
self.adapter.timeout = None # the response times can be long
for voltage_list in sub_lists:
voltage_str = ', '.join(['%.4E' % voltage for voltage in voltage_list])
self.write(':SOUR:LIST:VOLT ' + voltage_str)
self.write(':TRIG:COUN %d' % len(voltage_list))
raw_response = self.query(':READ?').strip()
current_list += [float(current_str) for current_str in raw_response.split(',')]
self.adapter.timeout = normal_timeout # put it back
self.write(':SOUR:VOLT:MODE FIX')
self.write(':TRIG:COUN 1')
return np.array(current_list)
@setter
def set_source_delay(self, delay):
self.write(':SOUR:DEL %.4E' % delay)
class Keithley2460(Instrument):
"""
Keithley 2460 Sourcemeter, a 100 W power supply and picoammeter
"""
name = 'Keithley2460'
supported_adapters = (
(LinuxGPIB, {}),
(PrologixGPIB, {}),
(VISAGPIB, {})
)
# Available knobs
knobs = (
'voltage',
'fast voltages',
'current',
'voltage range',
'voltage limit',
'current range',
'current limit',
'nplc',
'delay',
'output',
'source',
'meter',
'remote sense',
'source delay'
)
presets = {
'source': 'voltage',
'meter': 'current',
'voltage':0,
'output': 'ON',
'nplc': 1,
'source delay': 0,
'remote sense': 'OFF'
}
postsets = {
'voltage': 0,
'output': 'OFF'
}
# Available meters
meters = (
'voltage',
'current',
'fast currents'
)
fast_voltages = None
current_ranges = (1e-6, 10e-6, 100e-6, 1e-3, 10e-3, 100e-3, 1, 4, 5, 7)
voltage_ranges = (0.2, 2, 7, 10, 20, 100)
ovp_levels = (2, 5, 10, 20, 40, 60, 80, 100, 120, 140, 160, 180)
@setter
def set_source(self, variable):
if variable not in ['voltage', 'current']:
raise ValueError('Source must be either "current" or "voltage"')
self.set_output('OFF')
if variable == 'voltage':
self.write('SOUR:FUNC VOLT')
self.current = None
if variable == 'current':
self.write('SOUR:FUNC CURR')
self.voltage = None
@setter
def set_meter(self, variable):
if variable == 'voltage':
self.write('SENS:FUNC "VOLT"')
self.write('DISP:VOLT:DIG 5')
elif variable == 'current':
self.write('SENS:FUNC "CURR"')
self.write('DISP:CURR:DIG 5')
else:
raise ValueError('Source must be either "current" or "voltage"')
@setter
def set_output(self, output):
if output in [0, 'OFF', 'off']:
self.write(':OUTP OFF')
elif output in [1, 'ON', 'on']:
self.write(':OUTP ON')
else:
raise ValueError(f'Output setting {output} not recognized!')
@measurer
def measure_voltage(self):
if self.meter != 'voltage':
self.set_meter('voltage')
if self.output == 'ON':
return float(self.query('READ?').strip())
else:
return np.nan
@measurer
def measure_current(self):
if self.meter != 'current':
self.set_meter('current')
if self.output == 'ON':
return float(self.query('READ?').strip())
else:
return 0
@setter
def set_voltage(self, voltage):
if self.source != 'voltage':
Warning(f'Switching sourcing mode to voltage!')
self.set_source('voltage')
self.set_output('ON') # output if automatically shut off when the source mode is changed
self.write('SOUR:VOLT:LEV %.4E' % voltage)
@setter
def set_current(self, current):
if self.source != 'current':
Warning(f'Switching sourcing mode to current!')
self.set_source('current')
self.set_output('ON') # output if automatically shut off when the source mode is changed
self.write('SOUR:CURR:LEV %.4E' % current)
@setter
def set_voltage_range(self, voltage_range):
if voltage_range in self.voltage_ranges:
if self.source == 'voltage':
self.write(':SOUR:VOLT:RANGE %.2E' % voltage_range)
else:
if voltage_range == 'AUTO':
self.write(':SENS:VOLT:RANGE:AUTO ON')
else:
self.write(':SENS:VOLT:RANGE %.2E' % voltage_range)
else:
first_line = f'Given voltage range {voltage_range} is not a valid value for {self.name}\n'
second_line = f'Valid values are {self.voltage_ranges}'
raise ValueError(first_line + second_line)
@setter
def set_voltage_limit(self, voltage_limit):
if self.source == 'voltage':
if int(voltage_limit) in self.ovp_levels:
self.write(':SOUR:VOLT:PROT PROT%d' % int(voltage_limit))
else:
first_line = f'{self.name} is sourcing voltage, but given voltage limit {voltage_limit} is not a valid value\n'
second_line = f'Valid values are {self.ovp_levels}'
raise ValueError(first_line + second_line)
else:
self.write(':SOUR:CURR:VLIM %.2E' % voltage_limit)
@setter
def set_current_range(self, current_range):
if current_range in self.current_ranges:
if self.source == 'current':
self.write(':SOUR:CURR:RANGE %.2E' % current_range)
else:
if current_range == 'AUTO':
self.write(':SENS:CURR:RANGE:AUTO ON')
else:
self.write(':SENS:CURR:RANGE %.2E' % current_range)
else:
first_line = f'Given current range {current_range} is not a valid value for {self.name}\n'
second_line = f'Valid values are {self.current_ranges}'
raise ValueError(first_line + second_line)
@setter
def set_current_limit(self, current_limit):
self.write(':SOUR:VOLT:ILIM %.2E' % current_limit)
@setter
def set_nplc(self, nplc):
if self.meter == 'current':
self.write('CURR:NPLC %.2E' % nplc)
elif self.meter == 'voltage':
self.write('VOLT:NPLC %.2E' % nplc)
@setter
def set_delay(self, delay):
self.adapter.delay = delay
@setter
def set_fast_voltages(self, voltages):
self.fast_voltages = voltages
# import fast voltages, if specified as a path
if type(self.fast_voltages) == str: # can be specified as a path
try:
fast_voltage_data = pd.read_csv(self.fast_voltages)
except FileNotFoundError:
# probably in an experiment data directory; try going up a level
working_subdir = os.getcwd()
os.chdir('..')
fast_voltage_data = pd.read_csv(self.fast_voltages)
os.chdir(working_subdir)
columns = fast_voltage_data.columns
self.fast_voltages = fast_voltage_data[columns[0]].astype(float).values
@measurer
def measure_fast_currents(self):
try:
self.fast_voltages
except AttributeError:
raise ValueError('Fast IV sweep voltages have not been set!')
if len(self.fast_voltages) == 0:
raise ValueError('Fast IV sweep voltages have not been set!')
path = self.name+'-fast_iv_measurement.csv'
list_length = len(self.fast_voltages)
if list_length >= 100:
sub_lists = [self.fast_voltages[i*100:(i+1)*100] for i in range(list_length // 100)]
else:
sub_lists = []
if list_length % 100 > 0:
sub_lists.append(self.fast_voltages[-(list_length % 100):])
current_list = []
normal_timeout = self.adapter.timeout
self.adapter.timeout = None # the response times can be long
start = datetime.datetime.now()
for voltage_list in sub_lists:
voltage_str = ', '.join(['%.4E' % voltage for voltage in voltage_list])
self.write('SOUR:LIST:VOLT ' + voltage_str)
self.write('SOUR:SWE:VOLT:LIST 1, %.2e' % self.source_delay)
self.write('INIT')
self.write('*WAI')
raw_response = self.query('TRAC:DATA? 1, %d, "defbuffer1", SOUR, READ' % len(voltage_list)).strip()
current_list += [float(current_str) for current_str in raw_response.split(',')[1::2]]
self.adapter.timeout = normal_timeout # put it back
end = datetime.datetime.now()
return np.array(current_list)
@setter
def set_source_delay(self, delay):
if self.source == 'voltage':
self.write('SOUR:VOLT:DEL %.4e' % delay)
else:
self.write('SOUR:CURR:DEL %.4e' % delay)
@setter
def set_remote_sense(self, state):
if bool(state) or state in ['ON', '1']:
self.write('VOLT:RSEN ON')
else:
self.write('VOLT:RSEN OFF')
self.set_output('ON')
@getter
def get_remote_sense(self):
if int(self.query('VOLT:RSEN?').strip()):
return 'ON'
else:
return 'OFF'
class Keithley2651A(Instrument):
"""
Keithley 2651A High Power (200 W) Sourcemeter
"""
name = 'Keithley2651A'
supported_adapters = (
(LinuxGPIB, {}),
(PrologixGPIB, {}),
(VISAGPIB, {})
)
# Available knobs
knobs = (
'voltage',
'fast voltages',
'current',
'voltage range',
'voltage limit',
'current range',
'current limit',
'nplc',
'output',
'source',
'meter',
'source delay'
)
presets = {
'voltage range': 40,
'current range': 5,
'voltage':0,
'output': 'ON',
'nplc': 1,
'source': 'voltage',
'meter': 'current',
'source_delay': 0
}
postsets = {
'voltage': 0,
'output': 'OFF'
}
# Available meters
meters = (
'voltage',
'current',
'fast currents'
)
fast_voltages = None
@setter
def set_source(self, variable):
if variable == 'voltage':
self.write('smua.source.func = smua.OUTPUT_DCVOLTS')
elif variable == 'current':
self.write('smua.source.func = smua.OUTPUT_DCAMPS')
else:
raise ValueError('source must be either "current" or "voltage"')
@setter
def set_meter(self,variable):
self.write('display.screen = display.SMUA')
if variable == 'current':
self.write('display.smua.measure.func = display.MEASURE_DCAMPS')
if variable == 'voltage':
self.write('display.smua.measure.func = display.MEASURE_DCVOLTS')
# This sourcemeter does not require specifying the meter before taking a measurement
@setter
def set_output(self, output):
if output in [0, 'OFF', 'off']:
self.write('smua.source.output = smua.OUTPUT_OFF')
elif output in [1, 'ON', 'on']:
self.write('smua.source.output = smua.OUTPUT_ON')
else:
raise ValueError(f'Ouput setting {output} not recognized!')
@measurer
def measure_voltage(self):
if self.meter != 'voltage':
self.set_meter('voltage')
if self.output == 'ON':
return float(self.query('print(smua.measure.v())').strip())
else:
return np.nan
@measurer
def measure_current(self):
if self.meter != 'current':
self.set_meter('current')
if self.output == 'ON':
return float(self.query('print(smua.measure.i())').strip())
else:
return 0
@measurer
def set_voltage(self, voltage):
if self.source != 'voltage':
Warning(f'Switching sourcing mode!')
self.set_source('voltage')
self.set_output('ON') # output if automatically shut off when the source mode is changed
self.write(f'smua.source.levelv = {voltage}')
@setter
def set_current(self, current):
if self.source != 'current':
Warning(f'Switching sourcing mode!')
self.set_source('current')
self.set_output('ON') # output if automatically shut off when the source mode is changed
self.write(f'smua.source.leveli = {current}')
@setter
def set_voltage_range(self, voltage_range):
if voltage_range == 'auto':
self.write('smua.source.autorangev = smua.AUTORANGE_ON')
else:
self.write(f'smua.source.rangev = {voltage_range}')
@setter
def set_voltage_limit(self, voltage_limit):
self.write(f'smua.source.limitv = {voltage_limit}')
@setter
def set_current_range(self, current_range):
if current_range == 'auto':
self.write('smua.source.autorangei = smua.AUTORANGE_ON')
else:
self.write(f'smua.source.rangei = {current_range}')
@setter
def set_current_limt(self, current_limit):
self.write(f'smua.source.limiti = {current_limit}')
@setter
def set_nplc(self, nplc):
self.write(f'smua.measure.nplc = {nplc}')
@setter
def set_fast_voltages(self, voltages):
self.fast_voltages = voltages
# import fast voltages, if specified as a path
if type(self.fast_voltages) == str: # can be specified as a path
try:
fast_voltage_data = pd.read_csv(self.fast_voltages)
except FileNotFoundError:
# probably in an experiment data directory; try going up a level
working_subdir = os.getcwd()
os.chdir('..')
fast_voltage_data = pd.read_csv(self.fast_voltages)
os.chdir(working_subdir)
columns = fast_voltage_data.columns
self.fast_voltages = fast_voltage_data[columns[0]].astype(float).values
@measurer
def measure_fast_currents(self):
try:
if len(self.fast_voltages) == 0:
raise ValueError('Fast IV sweep voltages have not been set!')
except AttributeError:
raise ValueError('Fast IV sweep voltages have not been set!')
voltage_lists = []
current_list = []
list_length = 100 # maximum number of voltages to sweep at a time
for i in range(len(self.fast_voltages) // list_length):
voltage_lists.append(self.fast_voltages[i*list_length:(i+1)*list_length])
remainder = len(self.fast_voltages) % list_length
if remainder:
voltage_lists.append(self.fast_voltages[-remainder:])
normal_timeout = self.backend.timeout
self.backend.timeout = 60 # give it up to a minute to do sweep
start = datetime.datetime.now()
for voltage_list in voltage_lists:
voltage_string = ', '.join([f'{voltage}' for voltage in voltage_list])
self.write('vlist = {%s}' % voltage_string)
self.write(f'SweepVListMeasureI(smua, vlist, 0.01, {len(voltage_list)})')
raw_response = self.query(f'printbuffer(1, {len(voltage_list)}, smua.nvbuffer1)').strip()
current_list += [float(current_str) for current_str in raw_response.split(',')]
self.set_voltage(voltage_list[-1]) # hold last voltage until next sub-sweep
end = datetime.datetime.now()
self.connection.timeout = normal_timeout # put it back
self.write('display.screen = display.SMUA')
self.write('display.smua.measure.func = display.MEASURE_DCAMPS')
return np.array(current_list)
@setter
def set_source_delay(self,delay):
self.write(f'smua.source.delay = {delay}')
| 29.702101
| 127
| 0.567606
| 2,829
| 24,029
| 4.69954
| 0.098975
| 0.053479
| 0.034299
| 0.011282
| 0.854532
| 0.833772
| 0.803836
| 0.782851
| 0.748026
| 0.724558
| 0
| 0.016989
| 0.311665
| 24,029
| 808
| 128
| 29.738861
| 0.78682
| 0.060469
| 0
| 0.759259
| 0
| 0.003367
| 0.210028
| 0.018047
| 0
| 0
| 0
| 0
| 0
| 1
| 0.085859
| false
| 0
| 0.013468
| 0
| 0.178451
| 0.005051
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ecdf9fa40e482644a8b19c95883a66f9748605fb
| 251
|
py
|
Python
|
04/01/class_method/combine.py
|
pylangstudy/201709
|
53d868786d7327a83bfa7f4149549c6f9855a6c6
|
[
"CC0-1.0"
] | null | null | null |
04/01/class_method/combine.py
|
pylangstudy/201709
|
53d868786d7327a83bfa7f4149549c6f9855a6c6
|
[
"CC0-1.0"
] | 32
|
2017-09-01T00:52:17.000Z
|
2017-10-01T00:30:02.000Z
|
04/01/class_method/combine.py
|
pylangstudy/201709
|
53d868786d7327a83bfa7f4149549c6f9855a6c6
|
[
"CC0-1.0"
] | null | null | null |
import datetime
import time
#print(datetime.datetime.combine(datetime.date.today(), time.time()))#TypeError: combine() argument 2 must be datetime.time, not float
print(datetime.datetime.combine(datetime.date.today(), datetime.datetime.now().time()))
| 50.2
| 134
| 0.780876
| 34
| 251
| 5.764706
| 0.441176
| 0.244898
| 0.214286
| 0.285714
| 0.459184
| 0.459184
| 0.459184
| 0
| 0
| 0
| 0
| 0.004255
| 0.063745
| 251
| 4
| 135
| 62.75
| 0.829787
| 0.525896
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0.333333
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
01a46b85c05a1b25d1c8031ddf705db3c2b20146
| 89,499
|
py
|
Python
|
4.0/Controller/Controller.py
|
betosardinha/SONDA-Validation
|
eb48d8267dd90f5e315662e9b2e305a69af7328e
|
[
"Apache-2.0"
] | null | null | null |
4.0/Controller/Controller.py
|
betosardinha/SONDA-Validation
|
eb48d8267dd90f5e315662e9b2e305a69af7328e
|
[
"Apache-2.0"
] | null | null | null |
4.0/Controller/Controller.py
|
betosardinha/SONDA-Validation
|
eb48d8267dd90f5e315662e9b2e305a69af7328e
|
[
"Apache-2.0"
] | null | null | null |
import os
import numpy as np
from tqdm import *
from Loader import Loader
class Controller:
def __init__(self, input1=None, input2=None):
# Constant values used to get solar geometry data
self.d0 = 0.006918
self.dc1 = 0.399912
self.dc2 = 0.006758
self.dc3 = 0.002697
self.ds1 = 0.070257
self.ds2 = 0.000907
self.ds3 = 0.001480
self.et0 = 0.000075
self.tc1 = 0.001868
self.tc2 = 0.014615
self.ts1 = 0.032077
self.ts2 = 0.040890
self.e1 = 1.000110
self.e2 = 0.034221
self.e3 = 0.001280
self.e4 = 0.000719
self.e5 = 0.000077
# Threshold values used to qualify solar and meteorological data - level 1
self.HUMI_MX = 100
self.HUMI_MI = 0
self.PRES_MX = None
self.PRES_MI = None
self.TEMP_MX = None
self.TEMP_MI = None
self.PREC_MX = None
self.PREC_MI = 0
self.WS10_MX = 25
self.WS10_MI = 0
self.WD10_MX = 360
self.WD10_MI = 0
# BSRN criteria for solar data qualification
self.LWDN_MX = 700
self.LWDN_MI = 40
self.GLOBAL_MX = None
self.GLOBAL_MI = -4
self.DIFUSE_MX = None
self.DIFUSE_MI = -4
self.DIRECT_MX = None
self.DIRECT_MI = -4
self.PAR_MX = None
self.PAR_MI = -4
self.LUX_MX = None
self.LUX_MI = -4
# Variables used to validate meteorological data - levels 2 and 3
self.temp_max = 0
self.temp_min = 999
self.temp1h = 59
self.temp12h = 719
self.variation_temp1h = None
self.variation_temp12h = None
self.pres_max = 0
self.pres_min = 999
self.pres3h = 179
self.variation_pres3h = None
self.prec_max = 0
self.prec_min = 999
self.prec1h = 59
self.prec24h = 1439
self.variation_prec1h = None
self.variation_prec24h = None
self.ws10_max = 0
self.ws10_min = 999
self.ws103h = 179
self.ws1012h = 719
self.variation_ws103h = None
self.variation_ws1012h = None
self.wd10_max = 0
self.wd10_min = 999
self.wd103h = 179
self.wd1018h = 1079
self.variation_wd103h = None
self.variation_wd1018h = None
# Variables used to get solar geometry data
self.e0 = None
self.u0 = None # Cosine of solar zenith angle
self.zenith_angle = None # Zenith angle
self.rtoa = None # Solar Irradiation at the top of atmosphere
self.sa = None
self.day_angle = None # Diary angle
self.dec = None # Declination angle
self.eqtime = None # Equation time
self.tcorr = None # Time correction
self.hour_angle = None # Hour angle
# Other variables
self.CDR = np.pi / 180
self.rows = None # total file lines
self.cont = 0 # Count rows number
self.num = None # Measurement time in minutes
self.dia_jul = None # Day number
self.horacorr = None # Time correction considering longitude data for the measurement site
self.div = None # Measurement time in decimal hours
self.i = None
self.cont_std = 0
self.pb = None
self.dialog = None
self.title = None
# Variables used to count meteorological data valid - level 2 and 3
self.contTempValid = 0
self.contPresValid = 0
self.contPrecValid = 0
self.contWspdValid = 0
self.contWdirValid = 0
# Variables used to save the last valid meteorological data - level 2 and 3
self.lastTempValid = None
self.lastPresValid = None
self.lastPrecValid = None
self.lastWs10Valid = None
self.lastWd10Valid = None
# Variables for clear sky model
self.Isc = 1367.0
self.Wo = 0.95
self.Fc = 0.84
self.Iglob = None
self.porc = None
self.kt = None
self.kn = None
self.loader = None
if input1 is not None:
self.loader = Loader.Loader()
self.loader.buildsMatrixData(input1)
self.loader.buildsMatrixCode(input1)
if input2 is not None:
self.title = input2
def progressBar(self):
self.pb = tqdm(total=self.rows*4, desc="Validação - "+self.title)
def validate(self, latitude, longitude, station, month):
self.rows = self.loader.getRows() - 1
self.progressBar()
for i in range(self.rows+1):
# Funções de atribuição repetidas a cada nível de validação - talvez não necessário
self.num = self.loader.data[i][3]
self.div = self.num / 60 # Measurement time in utc time
self.dia_jul = int(self.loader.data[i][2])
# Calculating astronomical data
self.day_angle = (2 * np.pi / 365.25 * self.dia_jul)
self.dec = (self.d0 - self.dc1 * np.cos(self.day_angle) + self.ds1 * np.sin(self.day_angle) - self.dc2 * np.cos(2 * self.day_angle) + self.ds2 * np.sin(2 * self.day_angle) - self.dc3 * np.cos(3 * self.day_angle) + self.ds3 * np.sin(3 * self.day_angle))
self.eqtime = (self.et0 + self.tc1 * np.cos(self.day_angle) - self.ts1 * np.sin(self.day_angle) - self.tc2 * np.cos(2 * self.day_angle) - self.ts2 * np.sin(2 * self.day_angle)) * 229.18
self.tcorr = (self.eqtime + 4 * (longitude - 0)) / 60
self.horacorr = self.tcorr + self.div # Local time obtained from utc time
self.hour_angle = (12.00 - self.horacorr) * 15
self.e0 = self.e1 + self.e2 * np.cos(self.day_angle) + self.e3 * np.sin(self.day_angle) + self.e4 * np.cos(2 * self.day_angle) + self.e5 * np.sin(2 * self.day_angle)
self.u0 = np.sin(self.dec) * np.sin(latitude * self.CDR) + np.cos(self.dec) * np.cos(latitude * self.CDR) * np.cos(self.hour_angle * self.CDR)
self.zenith_angle = np.arccos(self.u0) * 180 / np.pi
self.sa = 1368 * self.e0
# Start level 1
# Routine to check the misalignment of the tracker
if self.zenith_angle < 87:
if self.loader.data[i][4] != 3333 and self.loader.data[i][4] != -5555 and self.loader.data[i][4] != -6999:
if self.loader.data[i][28] != 3333 and self.loader.data[i][28] != -5555 and self.loader.data[i][28] != -6999:
if self.loader.data[i][4] > 50:
self.rtoa = self.sa * self.u0
self.kt = self.loader.data[i][4] / self.rtoa
self.kn = self.loader.data[i][28] / self.loader.data[i][4]
if self.kt >= 0.50:
if self.kn > 0.30:
self.loader.code[i][8] = 9
self.loader.code[i][28] = 9
else:
self.loader.code[i][8] = 552
self.loader.code[i][28] = 552
elif self.kt >= 0.40 and self.kt < 0.50:
if self.kn > 0.10:
self.loader.code[i][8] = 9
self.loader.code[i][28] = 9
else:
self.loader.code[i][8] = 552
self.loader.code[i][28] = 552
# End of routine to check the misalignment of the tracker
# ----------------------------------------------------------------------------------------------------------------------
# Start of the routine validation: Global Radiation (W/m²) level 1
if self.loader.data[i][4] != 3333:
if self.loader.data[i][4] != -5555:
if self.loader.data[i][4] != -6999:
if self.loader.data[i][5] != 0:
if self.u0 > 0:
self.GLOBAL_MX = (self.sa * 1.5 * (self.u0**1.2) + 100)
else:
self.GLOBAL_MX = 100
if self.loader.data[i][4] > self.GLOBAL_MI and self.loader.data[i][4] < self.GLOBAL_MX:
self.loader.code[i][4] = 9
else:
self.loader.code[i][4] = 552
else:
if self.zenith_angle > 90:
if self.u0 > 0:
self.GLOBAL_MX = (self.sa * 1.5 * (self.u0 ** 1.2) + 100)
else:
self.GLOBAL_MX = 100
if self.loader.data[i][4] > self.GLOBAL_MI and self.loader.data[i][4] < self.GLOBAL_MX:
self.loader.code[i][4] = 9
else:
self.loader.code[i][4] = 552
else:
self.cont_std += 1
if self.loader.data[i][4] != self.loader.data[i-1][4] and self.loader.data[i][4] != self.loader.data[i+1][4]:
if self.u0 > 0:
self.GLOBAL_MX = (self.sa * 1.5 * (self.u0 ** 1.2) + 100)
else:
self.GLOBAL_MX = 100
if self.loader.data[i][4] > self.GLOBAL_MI and self.loader.data[i][4] < self.GLOBAL_MX:
self.loader.code[i][4] = 9
else:
self.loader.code[i][4] = 552
else:
self.loader.code[i][4] = 552
else:
self.loader.code[i][4] = -6999
else:
self.loader.code[i][4] = -5555
else:
self.loader.code[i][4] = 3333
# End of the routine validation: Global Radiation (W/m²) level 1
# ----------------------------------------------------------------------------------------------------------------------
# Start of the routine validation: Diffuse Radiation (W/m²) level 1
if self.loader.data[i][8] != 3333:
if self.loader.data[i][8] != -5555:
if self.loader.data[i][8] != -6999:
if self.loader.code[i][8] != 552:
if self.loader.data[i][9] != 0:
if self.u0 > 0:
self.DIFUSE_MX = (self.sa * 0.95 * (self.u0**1.2) + 50)
else:
self.DIFUSE_MX = 50
if self.loader.data[i][8] > self.DIFUSE_MI and self.loader.data[i][8] < self.DIFUSE_MX:
self.loader.code[i][8] = 9
else:
self.loader.code[i][8] = 552
else:
if self.zenith_angle > 90:
if self.u0 > 0:
self.DIFUSE_MX = (self.sa * 0.95 * (self.u0**1.2) + 50)
else:
self.DIFUSE_MX = 50
if self.loader.data[i][8] > self.DIFUSE_MI and self.loader.data[i][8] < self.DIFUSE_MX:
self.loader.code[i][8] = 9
else:
self.loader.code[i][8] = 552
else:
self.cont_std += 1
if self.loader.data[i][8] != self.loader.data[i-1][8] and self.loader.data[i][8] != self.loader.data[i+1][8]:
if self.u0 > 0:
self.DIFUSE_MX = (self.sa * 0.95 * (self.u0 ** 1.2) + 50)
else:
self.DIFUSE_MX = 50
if self.loader.data[i][8] > self.DIFUSE_MI and self.loader.data[i][8] < self.DIFUSE_MX:
self.loader.code[i][8] = 9
else:
self.loader.code[i][8] = 552
else:
self.loader.code[i][8] = 552
else:
self.loader.code[i][8] = -6999
else:
self.loader.code[i][8] = -5555
else:
self.loader.code[i][8] = 3333
# End of the routine validation: Diffuse Radiation (W/m²) level 1
# ----------------------------------------------------------------------------------------------------------------------
# Start of the routine validation: Par Radiation (�mols s� m�) level 1
if self.loader.data[i][12] != 3333:
if self.loader.data[i][12] != -5555:
if self.loader.data[i][12] != -6999:
if self.loader.data[i][13] != 0:
if self.u0 > 0:
self.PAR_MX = 2.07 * (self.sa * 1.5 * (self.u0**1.2) + 100)
else:
self.PAR_MX = 2.07 * 100
if self.loader.data[i][12] > self.PAR_MI and self.loader.data[i][12] < self.PAR_MX:
self.loader.code[i][12] = 9
else:
self.loader.code[i][12] = 552
else:
if self.zenith_angle > 90:
if self.u0 > 0:
self.PAR_MX = 2.07 * (self.sa * 1.5 * (self.u0 ** 1.2) + 100)
else:
self.PAR_MX = 2.07 * 100
if self.loader.data[i][12] > self.PAR_MI and self.loader.data[i][12] < self.PAR_MX:
self.loader.code[i][12] = 9
else:
self.loader.code[i][12] = 552
else:
self.cont_std += 1
if self.loader.data[i][12] != self.loader.data[i-1][12] and self.loader.data[i][12] != self.loader.data[i+1][12]:
if self.u0 > 0:
self.PAR_MX = 2.07 * (self.sa * 1.5 * (self.u0 ** 1.2) + 100)
else:
self.PAR_MX = 2.07 * 100
if self.loader.data[i][12] > self.PAR_MI and self.loader.data[i][12] < self.PAR_MX:
self.loader.code[i][12] = 9
else:
self.loader.code[i][12] = 552
else:
self.loader.code[i][12] = 552
else:
self.loader.code[i][12] = -6999
else:
self.loader.code[i][12] = -5555
else:
self.loader.code[i][12] = 3333
# End of the routine validation: Par Radiation (�mols s� m�) level 1
# ----------------------------------------------------------------------------------------------------------------------
# Start of the routine validation: Lux Radiation (kLux) level 1
if self.loader.data[i][16] != 3333:
if self.loader.data[i][16] != -5555:
if self.loader.data[i][16] != -6999:
if self.loader.data[i][17] != 0:
if self.u0 > 0:
self.LUX_MX = 0.115 * (self.sa * 1.5 * (self.u0**1.2) + 100)
else:
self.LUX_MX = 0.115 * 100
if self.loader.data[i][16] > self.LUX_MI and self.loader.data[i][16] < self.LUX_MX:
self.loader.code[i][16] = 9
else:
self.loader.code[i][16] = 552
else:
if self.zenith_angle > 90:
if self.u0 > 0:
self.LUX_MX = 0.115 * (self.sa * 1.5 * (self.u0 ** 1.2) + 100)
else:
self.LUX_MX = 0.115 * 100
if self.loader.data[i][16] > self.LUX_MI and self.loader.data[i][16] < self.LUX_MX:
self.loader.code[i][16] = 9
else:
self.loader.code[i][16] = 552
else:
self.cont_std += 1
if self.loader.data[i][16] != self.loader.data[i-1][16] and self.loader.data[i][16] != self.loader.data[i+1][16]:
if self.u0 > 0:
self.LUX_MX = 0.115 * (self.sa * 1.5 * (self.u0 ** 1.2) + 100)
else:
self.LUX_MX = 0.115 * 100
if self.loader.data[i][16] > self.LUX_MI and self.loader.data[i][16] < self.LUX_MX:
self.loader.code[i][16] = 9
else:
self.loader.code[i][16] = 552
else:
self.loader.code[i][16] = 552
else:
self.loader.code[i][16] = -6999
else:
self.loader.code[i][16] = -5555
else:
self.loader.code[i][16] = 3333
# End of the routine validation: Lux Radiation (kLux) level 1
# ----------------------------------------------------------------------------------------------------------------------
# Start of the routine validation: Air Temperature (°C) level 1
if self.loader.data[i][20] != 3333:
if self.loader.data[i][20] != -5555:
self.TEMP_MX = self.loader.getTempMax(os.path.abspath(".")+ os.path.sep + "." + "limits" + os.path.sep + "temp.max", station, month)
self.TEMP_MI = self.loader.getTempMin(os.path.abspath(".")+ os.path.sep + "." + "limits" + os.path.sep + "temp.min", station, month)
if self.loader.data[i][20] > self.TEMP_MI and self.loader.data[i][20] < self.TEMP_MX:
self.loader.code[i][20] = 9
else:
self.loader.code[i][20] = 552
else:
self.loader.code[i][20] = -5555
else:
self.loader.code[i][20] = 3333
# End of the routine validation: Air Temperature (°C) level 1
# ----------------------------------------------------------------------------------------------------------------------
# Start of the routine validation: Relative Air Humidity (%) level 1
if self.loader.data[i][21] != 3333:
if self.loader.data[i][21] != -5555:
if self.loader.data[i][21] > self.HUMI_MI and self.loader.data[i][21] <= self.HUMI_MX:
self.loader.code[i][21] = 9
else:
self.loader.code[i][21] = 552
else:
self.loader.code[i][21] = -5555
else:
self.loader.code[i][21] = 3333
# End of the routine validation: Relative Air Humidity (%) level 1
# ----------------------------------------------------------------------------------------------------------------------
# Start of the routine validation: Atmospheric Pressure (mbar) level 1
if self.loader.data[i][22] != 3333:
if self.loader.data[i][22] != -5555:
self.PRES_MX = self.loader.getPresMax(os.path.abspath(".")+ os.path.sep + "." +"limits" + os.path.sep + "pres.max", station)
self.PRES_MI = self.loader.getPresMin(os.path.abspath(".")+ os.path.sep + "." + "limits" + os.path.sep + "pres.min", station)
if self.loader.data[i][22] > self.PRES_MI and self.loader.data[i][22] < self.PRES_MX:
self.loader.code[i][22] = 9
else:
self.loader.code[i][22] = 552
else:
self.loader.code[i][22] = -5555
else:
self.loader.code[i][22] = 3333
# End of the routine validation: Atmospheric Pressure (mbar) level 1
# ----------------------------------------------------------------------------------------------------------------------
# Start of the routine validation: Accumulated Precipitation (mm) level 1
if self.loader.data[i][23] != 3333:
if self.loader.data[i][23] != -5555:
self.PREC_MX = self.loader.getPrecMax(os.path.abspath(".")+ os.path.sep + "." + "limits" + os.path.sep + "prec.max", station, month)
if self.loader.data[i][23] >= self.PREC_MI and self.loader.data[i][23] < self.PREC_MX:
self.loader.code[i][23] = 9
else:
self.loader.code[i][23] = 552
else:
self.loader.code[i][23] = -5555
else:
self.loader.code[i][23] = 3333
# End of the routine validation: Accumulated Precipitation (mm) level 1
# ----------------------------------------------------------------------------------------------------------------------
# Start of the routine validation: Wind Speed 10m (m/s) level 1
if self.loader.data[i][24] != 3333:
if self.loader.data[i][24] != -5555:
if self.loader.data[i][24] > self.WS10_MI and self.loader.data[i][24] < self.WS10_MX:
self.loader.code[i][24] = 9
else:
self.loader.code[i][24] = 552
else:
self.loader.code[i][24] = -5555
else:
self.loader.code[i][24] = 3333
# End of the routine validation: Wind Speed 10m (m/s) level 1
# ----------------------------------------------------------------------------------------------------------------------
# Start of the routine validation: Wind Direction 10m (°) level 1
if self.loader.data[i][25] != 3333:
if self.loader.data[i][25] != -5555:
if self.loader.data[i][26] != 0:
if self.loader.data[i][25] > self.WD10_MI and self.loader.data[i][25] < self.WD10_MX:
self.loader.code[i][25] = 9
else:
self.loader.code[i][25] = 552
else:
self.loader.code[i][25] = 552
else:
self.loader.code[i][25] = -5555
else:
self.loader.code[i][25] = 3333
# End of the routine validation: Wind Direction 10m (°) level 1
# ----------------------------------------------------------------------------------------------------------------------
# Start of the routine validation: Direct Radiation (W/m²) level 1
if self.loader.data[i][28] != 3333:
if self.loader.data[i][28] != -5555:
if self.loader.data[i][28] != -6999:
if self.loader.code[i][28] != 552:
if self.loader.data[i][29] != 0:
if self.u0 > 0:
self.DIRECT_MX = self.sa
else:
self.DIRECT_MX = 50
if self.loader.data[i][28] > self.DIRECT_MI and self.loader.data[i][28] < self.DIRECT_MX:
self.loader.code[i][28] = 9
else:
self.loader.code[i][28] = 552
else:
if self.zenith_angle > 90:
if self.u0 > 0:
self.DIRECT_MX = self.sa
else:
self.DIRECT_MX = 50
if self.loader.data[i][28] > self.DIRECT_MI and self.loader.data[i][28] < self.DIRECT_MX:
self.loader.code[i][28] = 9
else:
self.loader.code[i][28] = 552
else:
self.cont_std+=1
if self.loader.data[i][28] != self.loader.data[i-1][28] and self.loader.data[i][28] != self.loader.data[i+1][28]:
if self.u0 > 0:
self.DIRECT_MX = self.sa
else:
self.DIRECT_MX = 50
if self.loader.data[i][28] > self.DIRECT_MI and self.loader.data[i][28] < self.DIRECT_MX:
self.loader.code[i][28] = 9
else:
self.loader.code[i][28] = 552
else:
self.loader.code[i][28] = 552
else:
self.loader.code[i][28] = -6999
else:
self.loader.code[i][28] = -5555
else:
self.loader.code[i][28] = 3333
# End of the routine validation: Direct Radiation (W/m²) level 1
# ----------------------------------------------------------------------------------------------------------------------
# Start of the routine validation: Long Wave Radiation (W/m²) level 1
if self.loader.data[i][32] != 3333:
if self.loader.data[i][32] != -5555:
if self.loader.data[i][32] != -6999:
if self.loader.data[i][33] != 0:
if self.loader.data[i][32] > self.LWDN_MI and self.loader.data[i][32] < self.LWDN_MX:
self.loader.code[i][32] = 9
else:
self.loader.code[i][32] = 552
else:
if self.zenith_angle > 90:
if self.loader.data[i][32] > self.LWDN_MI and self.loader.data[i][32] < self.LWDN_MX:
self.loader.code[i][32] = 9
else:
self.loader.code[i][32] = 552
else:
self.cont_std+=1
if self.loader.data[i][32] != self.loader.data[i-1][32] and self.loader.data[i][32] != self.loader.data[i+1][32]:
if self.loader.data[i][32] > self.LWDN_MI and self.loader.data[i][32] < self.LWDN_MX:
self.loader.code[i][32] = 9
else:
self.loader.code[i][32] = 552
else:
self.loader.code[i][32] = 552
else:
self.loader.code[i][32] = -6999
else:
self.loader.code[i][32] = -5555
else:
self.loader.code[i][32] = 3333
self.pb.update(1)
# End of the routine validation: Long Wave Radiation (W/m²) level 1
# ----------------------------------------------------------------------------------------------------------------------
# End of loop level 1
# Start level 2
for i in range(self.rows + 1):
self.num = self.loader.data[i][3]
self.div = self.num / 60
self.dia_jul = int(self.loader.data[i][2])
# Calculating astronomical data
self.day_angle = (2 * np.pi / 365.25 * self.dia_jul)
self.dec = (self.d0 - self.dc1 * np.cos(self.day_angle) + self.ds1 * np.sin(self.day_angle) - self.dc2 * np.cos(2*self.day_angle) + self.ds2 * np.sin(2*self.day_angle) - self.dc3 * np.cos(3*self.day_angle) + self.ds3 * np.sin(3*self.day_angle))
self.eqtime = (self.et0 + self.tc1 * np.cos(self.day_angle) - self.ts1 *np.sin(self.day_angle) - self.tc2 * np.cos(2*self.day_angle) - self.ts2 * np.sin(2*self.day_angle)) * 229.18
self.tcorr = (self.eqtime + 4 * (longitude - 0)) / 60
self.horacorr = self.tcorr + self.div
self.hour_angle = (12.00 - self.horacorr) * 15
self.e0 = self.e1 + self.e2 * np.cos(self.day_angle) + self.e3 * np.sin(self.day_angle) + self.e4 * np.cos(2*self.day_angle) + self.e5 * np.sin(2*self.day_angle)
self.u0 = np.sin(self.dec) * np.sin(latitude * self.CDR) + np.cos(self.dec) * np.cos(latitude * self.CDR) * np.cos(self.hour_angle * self.CDR)
self.zenith_angle = (np.arccos(self.u0)) * 180 / np.pi
self.sa = 1368 * self.e0
# BSRN criteria used to qualify solar data as RARE events
self.GLOBAL_MI = -2
self.DIFUSE_MI = -2
self.DIRECT_MI = -2
self.PAR_MI = -2
self.LUX_MI = -2
self.LWDN_MX = 500
self.LWDN_MI = 60
# Variables used to validate meteorological data - level 2
totalTemp1h_1 = self.rows - self.temp1h
totalTemp1h_2 = self.rows - self.temp1h + 1
totalPres3h_1 = self.rows - self.pres3h
totalPres3h_2 = self.rows - self.pres3h + 1
totalPrec1h_1 = self.rows - self.prec1h
totalPrec1h_2 = self.rows - self.prec1h + 1
totalWs103h_1 = self.rows - self.ws103h
totalWs103h_2 = self.rows - self.ws103h
totalWd103h_1 = self.rows - self.wd103h
totalWd103h_2 = self.rows - self.wd103h + 1
# Start of the routine validation: Global Radiation (W/m²) level 2
if self.loader.code[i][4] != 3333 and self.loader.code[i][4] != -5555 and self.loader.code[i][4] != -6999 and self.loader.code[i][4] != 552:
if self.u0 > 0:
self.GLOBAL_MX = (self.sa * 1.2 * (self.u0**1.2) + 50)
else:
self.GLOBAL_MX = 50
if self.loader.data[i][4] > self.GLOBAL_MI and self.loader.data[i][4] < self.GLOBAL_MX:
self.loader.code[i][4] = 99
else:
self.loader.code[i][4] = 29
# End of the routine validation: Global Radiation (W/m²) level 2
# ----------------------------------------------------------------------------------------------------------------------
# Start of the routine validation: Diffuse Radiation (W/m²) level 2
if self.loader.code[i][8] != 3333 and self.loader.code[i][8] != -5555 and self.loader.code[i][8] != -6999 and self.loader.code[i][8] != 552:
if self.u0 > 0:
self.DIFUSE_MX = (self.sa * 0.75 * (self.u0**1.2) + 30)
else:
self.DIFUSE_MX = 30
if self.loader.data[i][8] > self.DIFUSE_MI and self.loader.data[i][8] < self.DIFUSE_MX:
self.loader.code[i][8] = 99
else:
self.loader.code[i][8] = 29
# End of the routine validation: Diffuse Radiation (W/m²) level 2
# ----------------------------------------------------------------------------------------------------------------------
# Start of the routine validation: Par Radiation (�mols s� m�) level 2
if self.loader.code[i][12] != 3333 and self.loader.code[i][12] != -5555 and self.loader.code[i][12] != -6999 and self.loader.code[i][12] != 552:
if self.u0 > 0:
self.PAR_MX = 2.07 * (self.sa * 1.2 * (self.u0**1.2) + 50)
else:
self.PAR_MX = 2.07 * 50
if self.loader.data[i][12] > self.PAR_MI and self.loader.data[i][12] < self.PAR_MX:
self.loader.code[i][12] = 99
else:
self.loader.code[i][12] = 29
# End of the routine validation: Par Radiation (�mols s� m�) level 2
# ----------------------------------------------------------------------------------------------------------------------
# Start of the routine validation: Lux Radiation (kLux) level 2
if self.loader.code[i][16] != 3333 and self.loader.code[i][16] != -5555 and self.loader.code[i][16] != -6999 and self.loader.code[i][16] != 552:
if self.u0 > 0:
self.LUX_MX = 0.115 * (self.sa * 1.2 * (self.u0**1.2) + 50)
else:
self.LUX_MX = 0.115 * 50
if self.loader.data[i][16] > self.LUX_MI and self.loader.data[i][16] < self.LUX_MX:
self.loader.code[i][16] = 99
else:
self.loader.code[i][16] = 29
# End of the routine validation: Lux Radiation (kLux) level 2
# ----------------------------------------------------------------------------------------------------------------------
# Start of the routine validation: Air Temperature (°C) level 2
if self.loader.code[i][20] != 3333 and self.loader.code[i][20] != -5555 and self.loader.code[i][20] != 552:
if i <= totalTemp1h_1:
j = 0
while j <= self.temp1h:
if self.loader.code[i+j][20] != 3333 and self.loader.code[i+j][20] != 552:
self.contTempValid += 1
if self.loader.data[i+j][20] > self.temp_max:
self.temp_max = self.loader.data[i+j][20]
if self.loader.data[i+j][20] < self.temp_min:
self.temp_min = self.loader.data[i+j][20]
self.variation_temp1h = self.temp_max - self.temp_min
j += 1
if self.contTempValid >= 40:
if self.variation_temp1h < 5:
self.loader.code[i][20] = 99
self.lastTempValid = self.loader.code[i][20]
else:
self.loader.code[i][20] = 529
else:
l = 0
while l <= self.temp1h:
if self.loader.code[i+l][20] != 3333 and self.loader.code[i+l][20] != 552:
if i == 0:
self.loader.code[i][20] = 559
else:
self.loader.code[i][20] = self.lastTempValid
l += 1
self.contTempValid = 0
self.temp_max = 0
self.temp_min = 999
if i >= totalTemp1h_2:
self.loader.code[i][20] = self.loader.code[totalTemp1h_1][20]
# End of the routine validation: Air Temperature (°C) level 2
# ----------------------------------------------------------------------------------------------------------------------
# Start of the routine validation: Atmospheric Pressure (mbar) level 2
if self.loader.code[i][22] != 3333 and self.loader.code[i][22] != -5555 and self.loader.code[i][22] != 552:
if i <= totalPres3h_1:
j = 0
while j <= self.pres3h:
if self.loader.code[i+j][22] != 3333 and self.loader.code[i+j][22] != 552:
self.contPresValid += 1
if self.loader.data[i+j][22] > self.pres_max:
self.pres_max = self.loader.data[i+j][22]
if self.loader.data[i+j][22] < self.pres_min:
self.pres_min = self.loader.data[i+j][22]
self.variation_pres3h = self.pres_max - self.pres_min
j += 1
if self.contPresValid >= 40:
if self.variation_pres3h < 6:
self.loader.code[i][22] = 99
self.lastPresValid = self.loader.code[i][22]
else:
self.loader.code[i][22] = 529
else:
l = 0
while l <= self.pres3h:
if self.loader.code[i+l][22] != 3333 and self.loader.code[i+l][22] != 552:
if i == 0:
self.loader.code[i][22] = 59
else:
self.loader.code[i][22] = self.lastPresValid
l += 1
self.contPresValid = 0
self.pres_max = 0
self.pres_min = 999
if i >= totalPres3h_2:
self.loader.code[i][22] = self.loader.code[totalPres3h_1][22]
# End of the routine validation: Atmospheric Pressure (mbar) level 2
# ----------------------------------------------------------------------------------------------------------------------
# Start of the routine validation: Accumulated Precipitation (mm) level 2
if self.loader.code[i][23] != 3333 and self.loader.code[i][23] != -5555 and self.loader.code[i][23] != 552:
if i <= totalPrec1h_1:
j = 0
while j <= self.prec1h:
if self.loader.code[i+j][23] != 3333 and self.loader.code[i+j][23] != 552:
self.contPrecValid += 1
if self.loader.data[i+j][23] > self.prec_max:
self.prec_max = self.loader.data[i+j][23]
if self.loader.data[i+j][23] < self.prec_min:
self.prec_min = self.loader.data[i+j][23]
self.variation_prec1h = self.prec_max - self.prec_min
j += 1
if self.contPrecValid >= 40:
if self.variation_prec1h < 25:
self.loader.code[i][23] = 99
self.lastPrecValid = self.loader.code[i][23]
else:
self.loader.code[i][23] = 529
else:
l = 0
while l <= self.prec1h:
if self.loader.code[i+l][23] != 3333 and self.loader.code[i+l][23] != 552:
if i == 0:
self.loader.code[i][23] = 559
else:
self.loader.code[i][23] = self.lastPrecValid
l += 1
self.contPrecValid = 0
self.prec_max = 0
self.prec_min = 999
if i >= totalPrec1h_2:
self.loader.code[i][23] = self.loader.code[totalPrec1h_1][23]
# End of the routine validation: Accumulated Precipitation (mm) level 2
# ----------------------------------------------------------------------------------------------------------------------
# Start of the routine validation: Wind Speed 10m (m/s) level 2
if self.loader.code[i][24] != 3333 and self.loader.code[i][24] != -5555 and self.loader.code[i][24] != 552:
if i <= totalWs103h_1:
j = 0
while j <= self.ws103h:
if self.loader.code[i+j][24] != 3333 and self.loader.code[i+j][24] != 552:
self.contWspdValid += 1
if self.loader.data[i+j][24] > self.ws10_max:
self.ws10_max = self.loader.data[i+j][24]
if self.loader.data[i+j][24] < self.ws10_min:
self.ws10_min = self.loader.data[i+j][24]
self.variation_ws103h = self.ws10_max - self.ws10_min
j += 1
if self.contWspdValid >= 40:
if self.variation_ws103h > 0.1:
self.loader.code[i][24] = 99
self.lastWs10Valid = self.loader.code[i][24]
else:
self.loader.code[i][24] = 529
else:
l = 0
while l <= self.ws103h:
if self.loader.code[i+l][24] != 3333 and self.loader.code[i+l][24] != 552:
if i == 0:
self.loader.code[i][24] = 559
else:
self.loader.code[i][24] = self.lastWs10Valid
l += 1
self.contWspdValid = 0
self.ws10_max = 0
self.ws10_min = 999
if i >= totalWs103h_2:
self.loader.code[i][24] = self.loader.code[totalWs103h_1][24]
# End of the routine validation: Wind Speed 10m (m/s) level 2
# ----------------------------------------------------------------------------------------------------------------------
# Start of the routine validation: Wind Direction 10m (°) level 2
if self.loader.code[i][25] != 3333 and self.loader.code[i][25] != -5555 and self.loader.code[i][25] != 552:
if i <= totalWd103h_1:
j = 0
while j <= self.wd103h:
if self.loader.code[i+j][25] != 3333 and self.loader.code[i+j][25] != 552:
self.contWdirValid += 1
if self.loader.data[i+j][25] > self.wd10_max:
self.wd10_max = self.loader.data[i+j][25]
if self.loader.data[i+j][25] < self.wd10_min:
self.wd10_min = self.loader.data[i+j][25]
self.variation_wd103h = self.wd10_max - self.wd10_min
j += 1
if self.contWdirValid >= 40:
if self.variation_wd103h > 1:
self.loader.code[i][25] = 99
self.lastWd10Valid = self.loader.code[i][25]
else:
self.loader.code[i][25] = 529
else:
l = 0
while l <= self.wd103h:
if self.loader.code[i+l][25] != 3333 and self.loader.code[i+l][25] != 552:
if i == 0:
self.loader.code[i][25] = 559
else:
self.loader.code[i][25] = self.lastWd10Valid
l += 1
self.contWdirValid = 0
self.wd10_max = 0
self.wd10_min = 999
if i >= totalWd103h_2:
self.loader.code[i][25] = self.loader.code[totalWd103h_1][25]
# End of the routine validation: Wind Direction 10m (°) level 2
# ----------------------------------------------------------------------------------------------------------------------
# Start of the routine validation: Direct Radiation (W/m²) level 2
if self.loader.code[i][28] != 3333 and self.loader.code[i][28] != -5555 and self.loader.code[i][28] != -6999 and self.loader.code[i][28] != 552:
if self.u0 > 0:
self.DIRECT_MX = (self.sa * 0.95 * (self.u0 ** 0.2) + 10)
else:
self.DIRECT_MX = 10
if self.loader.data[i][28] > self.DIRECT_MI and self.loader.data[i][28] < self.DIRECT_MX:
self.loader.code[i][28] = 99
else:
self.loader.code[i][28] = 29
# End of the routine validation: Direct Radiation (W/m²) level 2
# ----------------------------------------------------------------------------------------------------------------------
# Start of the routine validation: Long Wave Radiation (W/m²) level 2
if self.loader.code[i][32] != 3333 and self.loader.code[i][32] != -5555 and self.loader.code[i][32] != -6999 and self.loader.code[i][32] != 552:
if self.loader.data[i][32] > self.LWDN_MI and self.loader.data[i][32] < self.LWDN_MX:
self.loader.code[i][32] = 99
else:
self.loader.code[i][32] = 29
self.pb.update(1)
# End of the routine validation: Long Wave Radiation (W/m²) level 2
# ----------------------------------------------------------------------------------------------------------------------
# End of loop level 2
# Start level 3
for i in range(self.rows + 1):
self.num = self.loader.data[i][3]
self.div = self.num / 60
self.dia_jul = int(self.loader.data[i][2])
# Calculating astronomical geometry data
self.day_angle = (2 * np.pi / 365.25 * self.dia_jul)
self.dec = (self.d0 - self.dc1 * np.cos(self.day_angle) + self.ds1 * np.sin(self.day_angle) - self.dc2 * np.cos(2*self.day_angle) + self.ds2 * np.sin(2*self.day_angle) - self.dc3 * np.cos(3*self.day_angle) + self.ds3 * np.sin(3*self.day_angle))
self.eqtime = (self.et0 + self.tc1 * np.cos(self.day_angle) - self.ts1 * np.sin(self.day_angle) - self.tc2 * np.cos(2*self.day_angle) - self.ts2 * np.sin(2*self.day_angle)) * 229.18
self.tcorr = (self.eqtime + 4*(longitude-0)) / 60
self.horacorr = self.tcorr + self.div
self.hour_angle = (12.00 - self.horacorr) * 15
self.e0 = self.e1 + self.e2 * np.cos(self.day_angle) + self.e3 * np.sin(self.day_angle) + self.e4 * np.cos(2*self.day_angle) + self.e5 * np.sin(2*self.day_angle)
self.u0 = np.sin(self.dec) * np.sin(latitude * self.CDR) + np.cos(self.dec) * np.cos(latitude*self.CDR) * np.cos(self.hour_angle * self.CDR)
self.zenith_angle = (np.arccos(self.u0)) * 180 / np.pi
self.sa = 1368 * self.e0
# Variables used to validate radiation Global and Diffuse - level 3
difSw = 0
sumSw = 0
divSw = 0
# Variables used to validate radiation Direct - level 3
direct_h = None
direct_n = None
direct_p = None
# Variables used to validate radiation long wave - level 3
sigma = 5.76E-8
temp = None
temp_a = None
temp_b = None
# Variables used in the comparison of global radiation with par and lux - level 3
lux_global = None
par_global = None
lux_par = None
par_lux = None
mat_desvio = np.zeros(4)
mat_limite = np.zeros(4)
mat_lppl = None
# Variables used to validate meteorological data - level 3
totalTemp12h_1 = self.rows - self.temp12h
totalTemp12h_2 = self.rows - self.temp12h + 1
totalPrec24h_1 = self.rows - self.prec24h
totalPrec24h_2 = self.rows - self.prec24h + 1
totalWs1012h_1 = self.rows - self.ws1012h
totalWs1012h_2 = self.rows - self.ws1012h + 1
totalWd1018h_1 = self.rows - self.wd1018h
totalWd1018h_2 = self.rows - self.wd1018h + 1
# Start of the routine validation: Comparison of global radiation with par and lux level 3
if self.loader.code[i][4] != 3333 and self.loader.code[i][4] != -5555 and self.loader.code[i][4] != -6999 and self.loader.code[i][4] != 552 and self.loader.code[i][4] != 29:
if self.loader.code[i][12] != 3333 and self.loader.code[i][12] != -5555 and self.loader.code[i][12] != -6999 and self.loader.code[i][12] != 552 and self.loader.code[i][12] != 29:
if self.loader.code[i][16] != 3333 and self.loader.code[i][16] != -5555 and self.loader.code[i][16] != -6999 and self.loader.code[i][16] != 552 and self.loader.code[i][16] != 29:
if self.zenith_angle < 90:
lux_global = 0.115 * self.loader.data[i][4]
par_global = 2.07 * self.loader.data[i][4]
# There are some dependency with photosynthetic photon flux and sometimes 18 is not a better number.
# This version (3.5) these terms were recalculated by Prof. Enio.
lux_par = self.loader.data[i][12] / 20.83
par_lux = 20.83 * self.loader.data[i][16]
if self.loader.data[i][12] == 0:
self.loader.data[i][12] = 0.001
if self.loader.data[i][16] == 0:
self.loader.data[i][16] = 0.001
# LUX - LUX < -GLO
mat_desvio[0] = (np.abs(self.loader.data[i][16] - lux_global) / self.loader.data[i][16]) * 100
# LUX - LUX<-PAR
mat_desvio[2] = (np.abs(self.loader.data[i][16] - lux_par) / self.loader.data[i][16]) * 100
# PAR - PAR < -GLO
mat_desvio[1] = (np.abs(self.loader.data[i][12] - par_global) / self.loader.data[i][12]) * 100
# PAR - PAR < -LUX
mat_desvio[3] = (np.abs(self.loader.data[i][12] - par_lux) / self.loader.data[i][12]) * 100
if self.zenith_angle < 80:
# LUX<-GLO
if mat_desvio[0] < 9.5: # Padrao 0
mat_limite[0] = 0
elif mat_desvio[0] < 33.5: # Padrao 1
mat_limite[0] = 1
else: # Padrao 2
mat_limite[0] = 2
# PAR<-GLO
if mat_desvio[1] < 12: # Padrao 0
mat_limite[1] = 0
elif mat_desvio[1] < 34: # Padrao 1
mat_limite[1] = 1
else: # Padrao 2
mat_limite[1] = 2
# LUX<-PAR
if mat_desvio[2] < 7.25: # Padrao 0
mat_limite[2] = 0
elif mat_desvio[2] < 15.5: # Padrao 1
mat_limite[2] = 1
else: # Padrao 2
mat_limite[2] = 2
# PAR<-LUX
if mat_desvio[3] < 6.75: # Padrao 0
mat_limite[3] = 0
elif mat_desvio[3] < 13.5: # Padrao 1
mat_limite[3] = 1
else: # Padrao 2
mat_limite[3] = 2
elif self.zenith_angle <= 88:
# LUX<-GLO
if mat_desvio[0] < 16.5: # Padrao 0
mat_limite[0] = 0
elif mat_desvio[1] < 65: # Padrao 1 # Deveria ser 0?
mat_limite[0] = 1
else: # Padrao 2
mat_limite[0] = 2
# PAR<-GLO
if mat_desvio[1] < 17: # Padrao 0
mat_limite[1] = 0
elif mat_desvio[1] < 66.5: # Padrao 1
mat_limite[1] = 1
else: # Padrao 2
mat_limite[1] = 2
# LUX<-PAR
if mat_desvio[2] < 11.75: # Padrao 0
mat_limite[2] = 0
elif mat_desvio[2] < 23.5: # Padrao 1
mat_limite[2] = 1
else: # Padrao 2
mat_limite[2] = 2
# PAR<-LUX
if mat_desvio[3] < 10.5: # Padrao 0
mat_limite[3] = 0
elif mat_desvio[3] < 19: # Padrao 1
mat_limite[3] = 1
else: # Padrao 2
mat_limite[3] = 2
elif self.zenith_angle > 80 and self.zenith_angle <= 90:
# LUX<-GLO
if mat_desvio[0] < 45.5: # Padrao 0
mat_limite[0] = 0
elif mat_desvio[0] < 100: # Padrao 1
mat_limite[0] = 1
else: # Padrao 2
mat_limite[0] = 2
# PAR<-GLO
if mat_desvio[1] < 51.25: # Padrao 0
mat_limite[1] = 0
elif mat_desvio[1] < 99.25: # Padrao 1
mat_limite[1] = 1
else: # Padrao 2
mat_limite[1] = 2
# LUX<-PAR
if mat_desvio[2] < 17.75: # Padrao 0
mat_limite[2] = 0
elif mat_desvio[2] < 43.5: # Padrao 1
mat_limite[2] = 1
else: # Padrao 2
mat_limite[2] = 2
# PAR<-LUX
if mat_desvio[3] < 15.5: # Padrao 0
mat_limite[3] = 0
elif mat_desvio[3] < 30.5: # Padrao 1
mat_limite[3] = 1
else: # Padrao 2
mat_limite[3] = 2
if mat_limite[2] >= mat_limite[3]:
mat_lppl = mat_limite[2]
else:
mat_lppl = mat_limite[3]
# Diminuir comparações a seguir
if ((mat_limite[1] == 0) and (mat_lppl == 0) and (mat_limite[0] == 0)):
self.loader.code[i][16] = 999
self.loader.code[i][12] = 999
if ((mat_limite[1] == 0) and (mat_lppl == 0) and (mat_limite[0] == 1)):
self.loader.code[i][16] = 999
self.loader.code[i][12] = 999
if ((mat_limite[1] == 0) and (mat_lppl == 0) and (mat_limite[0] == 2)):
self.loader.code[i][16] = 299
self.loader.code[i][12] = 299
if ((mat_limite[1] == 0) and (mat_lppl == 1) and (mat_limite[0] == 0)):
self.loader.code[i][16] = 999
self.loader.code[i][12] = 999
if ((mat_limite[1] == 0) and (mat_lppl == 1) and (mat_limite[0] == 1)):
self.loader.code[i][16] = 299
self.loader.code[i][12] = 999
if ((mat_limite[1] == 0) and (mat_lppl == 1) and (mat_limite[0] == 2)):
self.loader.code[i][16] = 299
self.loader.code[i][12] = 999
if ((mat_limite[1] == 0) and (mat_lppl == 2) and (mat_limite[0] == 0)):
self.loader.code[i][16] = 299
self.loader.code[i][12] = 299
if ((mat_limite[1] == 0) and (mat_lppl == 2) and (mat_limite[0] == 1)):
self.loader.code[i][16] = 299
self.loader.code[i][12] = 999
if ((mat_limite[1] == 0) and (mat_lppl == 2) and (mat_limite[0] == 2)):
self.loader.code[i][16] = 299
self.loader.code[i][12] = 999
if ((mat_limite[1] == 1) and (mat_lppl == 0) and (mat_limite[0] == 0)):
self.loader.code[i][16] = 999
self.loader.code[i][12] = 999
if ((mat_limite[1] == 1) and (mat_lppl == 0) and (mat_limite[0] == 1)):
self.loader.code[i][16] = 999
self.loader.code[i][12] = 999
if ((mat_limite[1] == 1) and (mat_lppl == 0) and (mat_limite[0] == 2)):
self.loader.code[i][16] = 999
self.loader.code[i][12] = 999
if ((mat_limite[1] == 1) and (mat_lppl == 1) and (mat_limite[0] == 0)):
self.loader.code[i][16] = 999
self.loader.code[i][12] = 299
if ((mat_limite[1] == 1) and (mat_lppl == 1) and (mat_limite[0] == 1)):
self.loader.code[i][16] = 299
self.loader.code[i][12] = 299
if ((mat_limite[1] == 1) and (mat_lppl == 1) and (mat_limite[0] == 2)):
self.loader.code[i][16] = 299
self.loader.code[i][12] = 299
if ((mat_limite[1] == 1) and (mat_lppl == 2) and (mat_limite[0] == 0)):
self.loader.code[i][16] = 999
self.loader.code[i][12] = 299
if ((mat_limite[1] == 1) and (mat_lppl == 2) and (mat_limite[0] == 1)):
self.loader.code[i][16] = 299
self.loader.code[i][12] = 299
if ((mat_limite[1] == 1) and (mat_lppl == 2) and (mat_limite[0] == 2)):
self.loader.code[i][16] = 299
self.loader.code[i][12] = 299
if ((mat_limite[1] == 2) and (mat_lppl == 0) and (mat_limite[0] == 0)):
self.loader.code[i][16] = 299
self.loader.code[i][12] = 299
if ((mat_limite[1] == 2) and (mat_lppl == 0) and (mat_limite[0] == 1)):
self.loader.code[i][16] = 999
self.loader.code[i][12] = 999
if ((mat_limite[1] == 2) and (mat_lppl == 0) and (mat_limite[0] == 2)):
self.loader.code[i][16] = 999
self.loader.code[i][12] = 999
if ((mat_limite[1] == 2) and (mat_lppl == 1) and (mat_limite[0] == 0)):
self.loader.code[i][16] = 999
self.loader.code[i][12] = 299
if ((mat_limite[1] == 2) and (mat_lppl == 1) and (mat_limite[0] == 1)):
self.loader.code[i][16] = 299
self.loader.code[i][12] = 299
if ((mat_limite[1] == 2) and (mat_lppl == 1) and (mat_limite[0] == 2)):
self.loader.code[i][16] = 299
self.loader.code[i][12] = 299
if ((mat_limite[1] == 2) and (mat_lppl == 2) and (mat_limite[0] == 0)):
self.loader.code[i][16] = 999
self.loader.code[i][12] = 299
if ((mat_limite[1] == 2) and (mat_lppl == 2) and (mat_limite[0] == 1)):
self.loader.code[i][16] = 299
self.loader.code[i][12] = 299
if ((mat_limite[1] == 2) and (mat_lppl == 2) and (mat_limite[0] == 2)):
self.loader.code[i][16] = 299
self.loader.code[i][12] = 299
else:
if self.loader.code[i][16] == 99:
self.loader.code[i][16] = 599
if self.loader.code[i][12] == 99:
self.loader.code[i][12] = 599
else:
if self.loader.code[i][16] == 99:
self.loader.code[i][16] = 599
if self.loader.code[i][16] == 29:
self.loader.code[i][16] = 529
if self.loader.code[i][12] == 99:
self.loader.code[i][12] = 599
else:
if self.loader.code[i][12] == 99:
self.loader.code[i][12] = 599
if self.loader.code[i][12] == 29:
self.loader.code[i][12] = 529
if self.loader.code[i][16] == 99:
self.loader.code[i][16] = 599
else:
if self.loader.code[i][16] == 99:
self.loader.code[i][16] = 599
if self.loader.code[i][16] == 29:
self.loader.code[i][16] = 529
if self.loader.code[i][12] == 99:
self.loader.code[i][12] = 599
if self.loader.code[i][12] == 29:
self.loader.code[i][12] = 529
# End of the routine validation: Comparison of global radiation with par and lux level 3
# ----------------------------------------------------------------------------------------------------------------------
# Start of the routine validation: Global Radiation (W/m²) level 3
if self.loader.code[i][4] != 3333 and self.loader.code[i][4] != -5555 and self.loader.code[i][4] != -6999 and self.loader.code[i][4] != 552 and self.loader.code[i][4] != 29:
if self.loader.code[i][8] != 3333 and self.loader.code[i][8] != -5555 and self.loader.code[i][8] != -6999 and self.loader.code[i][8] != 552 and self.loader.code[i][8] != 29 and self.loader.code[i][28] != 3333 and self.loader.code[i][28] != -5555 and self.loader.code[i][28] != -6999 and self.loader.code[i][28] != 552 and self.loader.code[i][28] != 29:
if self.loader.data[i][4] >= 50:
if self.zenith_angle >= 75:
if self.loader.data[i][8] / self.loader.data[i][4] < 1.10:
self.loader.code[i][4] = 999
else:
self.loader.code[i][4] = 299
else:
if self.loader.data[i][8] / self.loader.data[i][4] < 1.05:
self.loader.code[i][4] = 999
else:
self.loader.code[i][4] = 299
else:
if self.loader.data[i][4] < 50:
if self.loader.data[i][8] - self.loader.data[i][4] < 15:
self.loader.code[i][4] = 999
else:
self.loader.code[i][4] = 299
else:
self.loader.code[i][4] = 599
# sumSw = self.loader.data[i][8] + (self.loader.data[i][28] * self.u0)
#
# # ////////////////////// TEST BSRN LEVEL 3 ////////////////////////
# if sumSw > 50:
# if self.zenith_angle < 75:
# if divSw > 0.90 and divSw < 1.10:
# self.loader.code[i][4] = 999
# else:
# self.loader.code[i][4] = 299
#
# if self.zenith_angle > 75 and self.zenith_angle < 93:
# if divSw > 0.85 and divSw < 1.15:
# self.loader.code[i][4] = 999
# else:
# self.loader.code[i][4] = 299
# else:
# self.loader.code[i][4] = 599
# ////////////////////// TEST BSRN LEVEL 3 ////////////////////////
# ////////////////////// Se a DIFUSA e DIRETA for RUIM - VERIFICA PAR E LUX
else:
if self.loader.code[i][12] != 529 and self.loader.code[i][12] != 299 and self.loader.code[i][12] != 552 and self.loader.code[i][12] != 29 and self.loader.code[i][16] != 529 and self.loader.code[i][16] != 299 and self.loader.code[i][16] != 552 and self.loader.code[i][16] != 29:
self.loader.code[i][4] = 999
else:
self.loader.code[i][4] = 599
else:
if self.loader.code[i][4] == 29:
self.loader.code[i][4] = 529
# End of the routine validation: Global Radiation (W/m²) level 3
# ----------------------------------------------------------------------------------------------------------------------
# Start of the routine validation: Diffuse Radiation (W/m²) level 3
if self.loader.code[i][8] != 3333 and self.loader.code[i][8] != -5555 and self.loader.code[i][8] != -6999 and self.loader.code[i][8] != 552 and self.loader.code[i][8] != 29:
if self.loader.code[i][4] != 3333 and self.loader.code[i][4] != -5555 and self.loader.code[i][4] != -6999 and self.loader.code[i][4] != 552 and self.loader.code[i][4] != 529:
if self.loader.data[i][4] > 50:
difSw = self.loader.data[i][8] / self.loader.data[i][4]
if self.zenith_angle < 75:
if difSw < 1.05:
self.loader.code[i][8] = 999
else:
self.loader.code[i][8] = 299
if self.zenith_angle > 75 and self.zenith_angle < 93:
if difSw < 1.10:
self.loader.code[i][8] = 999
else:
self.loader.code[i][8] = 299
else:
self.loader.code[i][8] = 599
else:
if self.loader.code[i][8] == 99:
self.loader.code[i][8] = 599
else:
if self.loader.code[i][8] == 29:
self.loader.code[i][8] = 529
# End of the routine validation: Diffuse Radiation (W/m²) level 3
# ----------------------------------------------------------------------------------------------------------------------
# Start of the routine validation: Air Temperature (°C) level 3
if self.loader.code[i][20] != 3333 and self.loader.code[i][20] != -5555 and self.loader.code[i][20] != 552 and self.loader.code[i][20] != 529:
if i <= totalTemp12h_1:
j = 0
while j <= self.temp12h:
if self.loader.code[i+j][20] != 3333 and self.loader.code[i+j][20] != 552 and self.loader.code[i+j][20] != 529:
self.contTempValid += 1
if self.loader.data[i+j][20] > self.temp_max:
self.temp_max = self.loader.data[i+j][20]
if self.loader.data[i+j][20] < self.temp_min:
self.temp_min = self.loader.data[i+j][20]
self.variation_temp12h = self.temp_max - self.temp_min
j += 1
if self.contTempValid >= 40:
if self.variation_temp12h > 0.5:
self.loader.code[i][20] = 999
self.lastTempValid = self.loader.code[i][20]
else:
self.loader.code[i][20] = 299
else:
l = 0
while l <= self.temp12h:
if self.loader.code[i+l][20] != 3333 and self.loader.code[i+l][20] != 552 and self.loader.code[i+l][20] != 529:
if i == 0:
self.loader.code[i][20] = 559
else:
self.loader.code[i][20] = self.lastTempValid
l += 1
self.contTempValid = 0
self.temp_max = 0
self.temp_min = 999
if i >= totalTemp12h_2:
self.loader.code[i][20] = self.loader.code[totalTemp12h_1][20]
# End of the routine validation: Air Temperature (°C) level 3
# ----------------------------------------------------------------------------------------------------------------------
# Start of the routine validation: Accumulated Precipitation (mm) level 3
if self.loader.code[i][23] != 3333 and self.loader.code[i][23] != -5555 and self.loader.code[i][23] != 552 and self.loader.code[i][23] != 529:
if i <= totalPrec24h_1:
j = 0
while j <= self.prec24h:
if self.loader.code[i+j][23] != 3333 and self.loader.code[i+j][23] != 552 and self.loader.code[i+j][23] != 529:
self.contPrecValid += 1
if self.loader.data[i+j][23] > self.prec_max:
self.prec_max = self.loader.data[i+j][23]
if self.loader.data[i+j][23] < self.prec_min:
self.prec_min = self.loader.data[i+j][23]
self.variation_prec24h = self.temp_max - self.prec_min #prec_max
j += 1
if self.contPrecValid >= 40:
if self.variation_prec24h < 100:
self.loader.code[i][23] = 999
self.lastPrecValid = self.loader.code[i][23]
else:
self.loader.code[i][23] = 299
else:
l = 0
while l <= self.prec24h:
if self.loader.code[i+l][23] != 3333 and self.loader.code[i+l][23] != 552 and self.loader.code[i+l][23] != 529:
if i == 0:
self.loader.code[i][23] = 559
else:
self.loader.code[i][23] = self.lastPrecValid
l += 1
self.contPrecValid = 0
self.prec_max = 0
self.prec_min = 999
if i >= totalPrec24h_2:
self.loader.code[i][23] = self.loader.code[totalPrec24h_1][23]
# End of the routine validation: Accumulated Precipitation (mm) level 3
# ----------------------------------------------------------------------------------------------------------------------
# Start of the routine validation: Wind Speed 10m (m/s) level 3
if self.loader.code[i][24] != 3333 and self.loader.code[i][24] != -5555 and self.loader.code[i][24] != 552 and self.loader.code[i][24] != 529:
if i <= totalWs1012h_1:
j = 0
while j <= self.ws1012h:
if self.loader.code[i+j][24] != 3333 and self.loader.code[i+j][24] != 552 and self.loader.code[i+j][24] != 529:
self.contWspdValid += 1
if self.loader.data[i+j][24] > self.ws10_max:
self.ws10_max = self.loader.data[i+j][24]
if self.loader.data[i+j][24] < self.ws10_min:
self.ws10_min = self.loader.data[i+j][24]
self.variation_ws1012h = self.ws10_max - self.ws10_min
j += 1
if self.contWspdValid >= 40:
if self.variation_ws1012h > 0.5:
self.loader.code[i][24] = 999
self.lastWs10Valid = self.loader.code[i][24]
else:
self.loader.code[i][24] = 299
else:
l = 0
while l <= self.ws1012h:
if self.loader.code[i+l][24] != 3333 and self.loader.code[i+l][24] != 552 and self.loader.code[i+l][24] != 529:
if i == 0:
self.loader.code[i][24] = 559
else:
self.loader.code[i][24] = self.lastWs10Valid
l += 1
self.contWspdValid = 0
self.ws10_max = 0
self.ws10_min = 999
if i >= totalWs1012h_2:
self.loader.code[i][24] = self.loader.code[totalWs1012h_1][24]
# End of the routine validation: Wind Speed 10m (m/s) level 3
# ----------------------------------------------------------------------------------------------------------------------
# Start of the routine validation: Wind Direction 10m (°) level 3
if self.loader.code[i][25] != 3333 and self.loader.code[i][25] != -5555 and self.loader.code[i][25] != 552 and self.loader.code[i][25] != 529:
if i <= totalWd1018h_1:
j = 0
while j <= self.wd1018h:
if self.loader.code[i+j][25] != 3333 and self.loader.code[i+j][25] != 552 and self.loader.code[i+j][25] != 529:
self.contWdirValid += 1
if self.loader.data[i+j][25] > self.wd10_max:
self.wd10_max = self.loader.data[i+j][25]
if self.loader.data[i+j][25] < self.wd10_min:
self.wd10_min = self.loader.data[i+j][25]
self.variation_wd1018h = self.wd10_max - self.wd10_min
j += 1
if self.contWdirValid >= 40:
if self.variation_wd1018h > 10:
self.loader.code[i][25] = 999
self.lastWd10Valid = self.loader.code[i][25]
else:
self.loader.code[i][25] = 299
else:
l = 0
while l <= self.wd1018h:
if self.loader.code[i+l][25] != 3333 and self.loader.code[i+l][25] != 552 and self.loader.code[i+l][25] != 529:
if i == 0:
self.loader.code[i][25] = 559
else:
self.loader.code[i][25] = self.lastWd10Valid
l += 1
self.contWdirValid = 0
self.wd10_max = 0
self.wd10_min = 999
if i >= totalWd1018h_2:
self.loader.code[i][25] = self.loader.code[totalWd1018h_1][25]
# End of the routine validation: Wind Direction 10m (°) level 3
# ----------------------------------------------------------------------------------------------------------------------
# Start of the routine validation: Direct Radiation (W/m²) level 3
if self.loader.code[i][28] != 3333 and self.loader.code[i][28] != -5555 and self.loader.code[i][28] != -6999 and self.loader.code[i][28] != 552 and self.loader.code[i][28] != 29:
if self.loader.code[i][8] != 3333 and self.loader.code[i][8] != -5555 and self.loader.code[i][8] != -6999 and self.loader.code[i][8] != 552 and self.loader.code[i][8] != 529:
if self.loader.code[i][4] != 3333 and self.loader.code[i][4] != -5555 and self.loader.code[i][4] != -6999 and self.loader.code[i][4] != 552 and self.loader.code[i][4] != 529:
direct_h = self.loader.data[i][4] - self.loader.data[i][8]
direct_n = (self.loader.data[i][28] * self.u0) - 50
direct_p = (self.loader.data[i][28] * self.u0) + 50
if direct_n <= direct_h and direct_h <= direct_p:
self.loader.code[i][28] = 999
else:
self.loader.code[i][28] = 299
else:
if self.loader.code[i][28] == 99:
self.loader.code[i][28] = 599
else:
if self.loader.code[i][28] == 99:
self.loader.code[i][28] = 599
else:
if self.loader.code[i][28] == 29:
self.loader.code[i][28] = 529
# End of the routine validation: Direct Radiation (W/m²) level 3
# ----------------------------------------------------------------------------------------------------------------------
# Start of the routine validation: Long Wave Radiation (W/m²) level 3
if self.loader.code[i][32] != 3333 and self.loader.code[i][32] != -5555 and self.loader.code[i][32] != -6999 and self.loader.code[i][32] != 552 and self.loader.code[i][32] != 29:
if self.loader.code[i][20] != 3333 and self.loader.code[i][20] != -5555 and self.loader.code[i][20] != 552 and self.loader.code[i][20] != 529:
temp = self.loader.data[i][20] + 273.15
temp_a = 0.4 * sigma * (temp ** 4)
temp_b = sigma * (temp ** 4) + 25
if temp_a < self.loader.data[i][32] and self.loader.data[i][32] < temp_b:
self.loader.code[i][32] = 999
else:
self.loader.code[i][32] = 299
else:
if self.loader.code[i][32] == 99:
self.loader.code[i][32] = 599
else:
if self.loader.code[i][32] == 29:
self.loader.code[i][32] = 529
self.pb.update(1)
# End of the routine validation: Long Wave Radiation (W/m²) level 3
# ----------------------------------------------------------------------------------------------------------------------
# End of loop level 3
# Start level 4
# Define vertical ozone layer thickness value (cm)
# Iqbal average table (5.3.2)
if latitude >= -10:
L = 0.24
elif latitude < -10 and latitude >= -20:
L = 0.25
elif latitude < -20 and latitude >= -30:
L = 0.28
elif latitude < -30 and latitude >= -40:
L = 0.30
# Start level 4
np.seterr(all='ignore')
for i in range(self.rows + 1):
self.num = self.loader.data[i][3]
self.div = self.num / 60
self.dia_jul = int(self.loader.data[i][2])
# Calculating astronomical geometry data
self.day_angle = (2 * np.pi / 365.25 * self.dia_jul)
self.dec = (self.d0 - self.dc1 * np.cos(self.day_angle) + self.ds1 * np.sin(
self.day_angle) - self.dc2 * np.cos(2 * self.day_angle) + self.ds2 * np.sin(
2 * self.day_angle) - self.dc3 * np.cos(3 * self.day_angle) + self.ds3 * np.sin(3 * self.day_angle))
self.eqtime = (self.et0 + self.tc1 * np.cos(self.day_angle) - self.ts1 * np.sin(
self.day_angle) - self.tc2 * np.cos(2 * self.day_angle) - self.ts2 * np.sin(
2 * self.day_angle)) * 229.18
self.tcorr = (self.eqtime + 4 * (longitude - 0)) / 60
self.horacorr = self.tcorr + self.div
self.hour_angle = (12.00 - self.horacorr) * 15
self.e0 = self.e1 + self.e2 * np.cos(self.day_angle) + self.e3 * np.sin(
self.day_angle) + self.e4 * np.cos(2 * self.day_angle) + self.e5 * np.sin(2 * self.day_angle)
self.u0 = np.sin(self.dec) * np.sin(latitude * self.CDR) + np.cos(self.dec) * np.cos(
latitude * self.CDR) * np.cos(self.hour_angle * self.CDR)
self.zenith_angle = (np.arccos(self.u0)) * 180 / np.pi
self.sa = 1368 * self.e0
if self.loader.code[i][22] == 99 and self.loader.code[i][20] == 999 and self.loader.code[i][21] == 9:
# Relative optical mass applied
Mr = 1 / (np.cos(self.zenith_angle))
# Relative optical mass
Ma = Mr * (self.loader.data[i][22] / 1013.25)
# Ozone relative optical path length
U3 = L * Mr
# Water vapor
Ps = np.exp(26.23 - (5416 / self.loader.data[i][20] + 273.15))
# Precipitable water in terms of relative humidity
Pw = 0.493 * (self.loader.data[i][21] / 100) * (Ps / (self.loader.data[i][20] + 273.15))
# Pressure-corrected relative optical path length of precipitable water
U1 = Pw * Mr
# Transmittance by Rayleigh scattering
Tr = np.exp(-0.0903*(Ma**0.84)*(1.0 + Ma - (Ma**1.01)))
# Transmittance by ozone
To = 1-(0.1611*U3*((1.0+139.48*U3)**(-0.3035))-0.002715*U3*((1.0+0.44*U3+0.0003*(U3**2))**(-1)))
# Transmittance by uniformely mixed gases
Tg = np.exp(-0.0127*Ma**0.26)
# Transmittance by water vapor
Tw = 1-2.4959*U1*((((1.0+79.034*U1)**0.6828)+(6.385*U1))**(-1))
# Aerosol optical thickness - Kah(0.38)=0.087 and Kah(0.5)=0.069
Ka = (0.2758*0.087) + (0.35*0.069)
# Aerosol transmittance
Ta = np.exp(-Ka**0.0873*(1.0+Ka-(Ka**0.07088))*(Ma**0.9108))
# Ecentricy correction factor of the earth's orbit
Eo = 1+(0.033*np.cos(((2*np.pi*self.dia_jul)/365)))
# Direct normal irradiance
Irn = Eo*0.9751*self.Isc*Tr*To*Tg*Tw*Ta
# Extraterrestrial irradiance on a horizontal surface
Io = Eo*self.Isc*np.cos(self.zenith_angle)
# Total beam irradiance on a horizontal surface
Ib = Irn*np.cos(self.zenith_angle)
# Transmittance of direct radiation due to aerosol absortance
Taa = 1-(1-self.Wo)*(1-Ma+(Ma**1.06)*(1-Ta))
# Ration between Ta/Taa
Tas = Ta/Taa
# Reyleigh-scattered diffuse irradiance
Idr = Eo*0.79*self.Isc*(np.cos(self.zenith_angle))*To*Tg*Tw*Taa*0.5*((1-Tr)/(1-Ma+(Ma**1.02)))
# Aerosol-scattered diffuse irradiance
Ida = Eo*0.79*self.Isc*(np.cos(self.zenith_angle))*To*Tg*Tw*Taa*(self.Fc*(1-Tas))/(1-Ma+(Ma**1.02))
# Albedo for cloudless sky
Pa = 0.0685+(1-self.Fc)*(1-Tas)
# Ground albedo
Pg = 0.2
# Diffuse irradiance produced by multiple reflections between ground and atmosphere
Idm = (Irn*(np.cos(self.zenith_angle))+Idr+Ida)*Pg*Pa/(1-Pg*Pa)
# Total diffuse irradiance on a horizontal surface
Idiff = Idr+Ida+Idm
# Total global irradiance on a horizontal surface
self.Iglob = Ib + Idiff
self.porc = (self.loader.data[i][4] * 100) / self.Iglob
# Validation for global irradiance
if self.loader.code[i][4] != 3333 and self.loader.code[i][4] != -5555 and self.loader.code[i][4] != -6999 and self.loader.code[i][4] != 552 and self.loader.code[i][4] != 529 and self.loader.code[i][4] != 299:
if self.loader.data[i][4]:
if self.Iglob > 1367 or Idiff <= 0:
self.loader.code[i][4] += 2000
else:
porc_10 = self.Iglob * 0.1
if self.loader.data[i][4] <= (self.Iglob + porc_10):
self.loader.code[i][4] += 9000
else:
self.loader.code[i][4] += 2000
elif self.loader.code[i][4] == 552 or self.loader.code[i][4] == 529 or self.loader.code[i][4] == 299:
self.loader.code[i][4] += 5000
elif self.loader.code[i][4] != 3333 and self.loader.code[i][4] != -5555 and self.loader.code[i][4] != -6999:
self.loader.code[i][4] += 5000
# Write global irradiance
self.loader.clearSky[i][0] = self.Iglob
self.loader.clearSky[i][1] = self.loader.data[i][4]
self.loader.clearSky[i][2] = self.porc
self.Iglob = None
self.porc = None
# Sum count
self.pb.update(1)
self.cont += 1
self.pb.close()
return self.loader.code, self.loader.clearSky
| 48.853166
| 368
| 0.40282
| 10,141
| 89,499
| 3.498669
| 0.050192
| 0.200676
| 0.189008
| 0.198281
| 0.812091
| 0.787711
| 0.712542
| 0.662204
| 0.646787
| 0.608653
| 0.000134
| 0.099387
| 0.440698
| 89,499
| 1,831
| 369
| 48.879847
| 0.608926
| 0.138404
| 0
| 0.537781
| 0
| 0
| 0.001275
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.002412
| false
| 0
| 0.003215
| 0
| 0.007235
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
01ead0d504fbc73e16a5b832614a938b6072eea4
| 12,783
|
py
|
Python
|
tests/test_pagination.py
|
BBooijLiewes/django-binder
|
b5bf0aad14657fd57d575f9a0ef21468533f64a7
|
[
"MIT"
] | null | null | null |
tests/test_pagination.py
|
BBooijLiewes/django-binder
|
b5bf0aad14657fd57d575f9a0ef21468533f64a7
|
[
"MIT"
] | null | null | null |
tests/test_pagination.py
|
BBooijLiewes/django-binder
|
b5bf0aad14657fd57d575f9a0ef21468533f64a7
|
[
"MIT"
] | null | null | null |
from django.test import TestCase, Client
from django.contrib.auth.models import User
from binder.json import jsonloads
from .testapp.models import Animal, ContactPerson, Zoo, Caretaker
from .testapp.views import AnimalView
class CustomDefaultLimit:
def __init__(self, cls, limit):
self.cls = cls
self.new_limit = limit
def __enter__(self):
self.old_limit = self.cls.limit_default
self.cls.limit_default = self.new_limit
def __exit__(self, *args, **kwargs):
self.cls.limit_default = self.old_limit
class CustomMaxLimit:
def __init__(self, cls, limit):
self.cls = cls
self.new_limit = limit
def __enter__(self):
self.old_limit = self.cls.limit_max
self.cls.limit_max = self.new_limit
def __exit__(self, *args, **kwargs):
self.cls.limit_max = self.old_limit
class TestPagination(TestCase):
def setUp(self):
super().setUp()
u = User(username='testuser', is_active=True, is_superuser=True)
u.set_password('test')
u.save()
self.client = Client()
r = self.client.login(username='testuser', password='test')
self.assertTrue(r)
self.gaia = Zoo(name='GaiaZOO') # 3
self.gaia.save()
self.wildlands = Zoo(name='Wildlands Adventure Zoo Emmen') # 4
self.wildlands.full_clean()
self.wildlands.save()
self.artis = Zoo(name='Artis') # 1
self.artis.full_clean()
self.artis.save()
self.harderwijk = Zoo(name='Dolfinarium Harderwijk') # 2
self.harderwijk.full_clean()
self.harderwijk.save()
self.donald = Animal(name='Donald Duck', zoo=self.wildlands) # 1
self.donald.save()
self.mickey = Animal(name='Mickey Mouse', zoo=self.gaia) # 2
self.mickey.save()
self.pluto = Animal(name='Pluto', zoo=self.artis) # 4
self.pluto.save()
self.minnie = Animal(name='Minnie Mouse', zoo=self.gaia) # 3
self.minnie.save()
self.scrooge = Animal(name='Scrooge McDuck', zoo=self.artis) # 5
self.scrooge.save()
self.director = ContactPerson(name='Director') # 2
self.director.save()
self.gaia.contacts.add(self.director)
self.wildlands.contacts.add(self.director)
self.janitor = ContactPerson(name='Janitor') # 3
self.janitor.save()
self.gaia.contacts.add(self.janitor)
self.cleaning_lady = ContactPerson(name='Cleaning lady') # 1
self.cleaning_lady.save()
self.gaia.contacts.add(self.cleaning_lady)
self.caretaker1 = Caretaker(name='Caretaker 1') # 1 (Ran out of inspiration)
self.caretaker1.save()
self.caretaker2 = Caretaker(name='Caretaker 2') # 2
self.caretaker2.save()
self.caretaker1.animals.add(self.donald)
self.caretaker1.animals.add(self.mickey)
self.caretaker1.animals.add(self.scrooge)
self.caretaker2.animals.add(self.pluto)
self.caretaker2.animals.add(self.minnie)
def test_limit_parsing(self):
response = self.client.get('/animal/', data={'order_by': 'name', 'limit': 'haha'})
self.assertEqual(response.status_code, 418)
response = self.client.get('/animal/', data={'order_by': 'name', 'limit': '-1'})
self.assertEqual(response.status_code, 418)
response = self.client.get('/animal/', data={'order_by': 'name', 'limit': '0'})
self.assertEqual(response.status_code, 200)
response = self.client.get('/animal/', data={'order_by': 'name', 'limit': 'none'})
self.assertEqual(response.status_code, 200)
response = self.client.get('/animal/', data={'order_by': 'name', 'limit': 'nope'})
self.assertEqual(response.status_code, 418)
response = self.client.get('/animal/', data={'order_by': 'name', 'limit': ''})
self.assertEqual(response.status_code, 418)
def test_basic_limit_offset(self):
response = self.client.get('/animal/', data={'order_by': 'name'})
self.assertEqual(response.status_code, 200)
data = jsonloads(response.content)
self.assertEqual(5, data['meta']['total_records'])
self.assertEqual(5, len(data['data']))
response = self.client.get('/animal/', data={'limit': 1, 'order_by': 'name'})
self.assertEqual(response.status_code, 200)
data = jsonloads(response.content)
self.assertEqual(5, data['meta']['total_records'])
self.assertEqual(1, len(data['data']))
self.assertEqual(self.donald.id, data['data'][0]['id'])
response = self.client.get('/animal/', data={'limit': 1, 'offset': 1, 'order_by': 'name'})
self.assertEqual(response.status_code, 200)
data = jsonloads(response.content)
self.assertEqual(5, data['meta']['total_records'])
self.assertEqual(1, len(data['data']))
self.assertEqual(self.mickey.id, data['data'][0]['id'])
response = self.client.get('/animal/', data={'limit': 2, 'offset': 1, 'order_by': 'name'})
self.assertEqual(response.status_code, 200)
data = jsonloads(response.content)
self.assertEqual(5, data['meta']['total_records'])
self.assertEqual(2, len(data['data']))
self.assertEqual(self.mickey.id, data['data'][0]['id'])
self.assertEqual(self.minnie.id, data['data'][1]['id'])
response = self.client.get('/animal/', data={'limit': 2, 'offset': 100, 'order_by': 'name'})
self.assertEqual(response.status_code, 200)
data = jsonloads(response.content)
self.assertEqual(5, data['meta']['total_records'])
self.assertEqual(0, len(data['data']))
def test_basic_limit_offset_honors_custom_default(self):
with CustomDefaultLimit(AnimalView, 2):
response = self.client.get('/animal/', data={'order_by': 'name'})
self.assertEqual(response.status_code, 200)
data = jsonloads(response.content)
self.assertEqual(5, data['meta']['total_records'])
self.assertEqual(2, len(data['data']))
self.assertEqual(self.donald.id, data['data'][0]['id'])
self.assertEqual(self.mickey.id, data['data'][1]['id'])
response = self.client.get('/animal/', data={'order_by': 'name', 'limit': 'none'})
self.assertEqual(response.status_code, 200)
data = jsonloads(response.content)
self.assertEqual(5, data['meta']['total_records'])
self.assertEqual(5, len(data['data']))
def test_basic_limit_offset_honors_custom_maximum(self):
with CustomMaxLimit(AnimalView, 3):
# This is inconsistent with the default value, and Binder fails
response = self.client.get('/animal/', data={'order_by': 'name'})
self.assertEqual(response.status_code, 418)
data = jsonloads(response.content)
response = self.client.get('/animal/', data={'order_by': 'name', 'limit': 2})
self.assertEqual(response.status_code, 200)
data = jsonloads(response.content)
self.assertEqual(5, data['meta']['total_records'])
self.assertEqual(2, len(data['data']))
self.assertEqual(self.donald.id, data['data'][0]['id'])
self.assertEqual(self.mickey.id, data['data'][1]['id'])
with CustomDefaultLimit(AnimalView, 2):
response = self.client.get('/animal/', data={'order_by': 'name'})
self.assertEqual(response.status_code, 200)
data = jsonloads(response.content)
self.assertEqual(5, data['meta']['total_records'])
self.assertEqual(2, len(data['data']))
self.assertEqual(self.donald.id, data['data'][0]['id'])
self.assertEqual(self.mickey.id, data['data'][1]['id'])
response = self.client.get('/animal/', data={'order_by': 'name', 'offset': 3})
self.assertEqual(response.status_code, 200)
data = jsonloads(response.content)
self.assertEqual(5, data['meta']['total_records'])
self.assertEqual(2, len(data['data']))
self.assertEqual(self.pluto.id, data['data'][0]['id'])
self.assertEqual(self.scrooge.id, data['data'][1]['id'])
response = self.client.get('/animal/', data={'order_by': 'name', 'limit': 'none'})
self.assertEqual(response.status_code, 418)
response = self.client.get('/animal/', data={'order_by': 'name', 'limit': 4})
self.assertEqual(response.status_code, 418)
response = self.client.get('/animal/', data={'order_by': 'name', 'limit': 3})
self.assertEqual(response.status_code, 200)
def test_limit_offset_using_with(self):
response = self.client.get('/zoo/', data={'order_by': 'name', 'limit': 2, 'with': 'animals'})
self.assertEqual(response.status_code, 200)
data = jsonloads(response.content)
self.assertEqual(4, data['meta']['total_records'])
self.assertEqual(2, len(data['data']))
self.assertEqual(self.artis.id, data['data'][0]['id'])
self.assertEqual(self.harderwijk.id, data['data'][1]['id'])
self.assertEqual('animal', data['with_mapping']['animals'])
self.assertEqual('zoo', data['with_related_name_mapping']['animals'])
self.assertEqual({self.pluto.id, self.scrooge.id}, set(data['data'][0]['animals']))
self.assertEqual([], data['data'][1]['animals'])
response = self.client.get('/zoo/', data={'order_by': 'name', 'limit': 2, 'offset': 1, 'with': 'animals'})
self.assertEqual(response.status_code, 200)
data = jsonloads(response.content)
self.assertEqual(4, data['meta']['total_records'])
self.assertEqual(2, len(data['data']))
self.assertEqual(self.harderwijk.id, data['data'][0]['id'])
self.assertEqual(self.gaia.id, data['data'][1]['id'])
self.assertEqual('animal', data['with_mapping']['animals'])
self.assertEqual('zoo', data['with_related_name_mapping']['animals'])
self.assertEqual([], data['data'][0]['animals'])
self.assertEqual({self.mickey.id, self.minnie.id}, set(data['data'][1]['animals']))
def test_limit_offset_filtering(self):
response = self.client.get('/zoo/', data={'order_by': 'name', 'limit': 2, '.name:not': 'GaiaZOO'})
self.assertEqual(response.status_code, 200)
data = jsonloads(response.content)
self.assertEqual(3, data['meta']['total_records'])
self.assertEqual(2, len(data['data']))
self.assertEqual(self.artis.id, data['data'][0]['id'])
self.assertEqual(self.harderwijk.id, data['data'][1]['id'])
response = self.client.get('/zoo/', data={'order_by': 'name', 'limit': 2, 'offset': 1, '.name:not': 'GaiaZOO'})
self.assertEqual(response.status_code, 200)
data = jsonloads(response.content)
self.assertEqual(3, data['meta']['total_records'])
self.assertEqual(2, len(data['data']))
self.assertEqual(self.harderwijk.id, data['data'][0]['id'])
self.assertEqual(self.wildlands.id, data['data'][1]['id'])
def test_limit_offset_related_filtering(self):
response = self.client.get('/contact_person/', data={'order_by': 'name', 'limit': 2, '.zoos.name': 'Wildlands Adventure Zoo Emmen'})
self.assertEqual(response.status_code, 200)
data = jsonloads(response.content)
self.assertEqual(1, data['meta']['total_records'])
self.assertEqual(1, len(data['data']))
self.assertEqual(self.director.id, data['data'][0]['id'])
response = self.client.get('/contact_person/', data={'order_by': 'name', 'limit': 2, 'offset': 1, '.zoos.name': 'Wildlands Adventure Zoo Emmen'})
self.assertEqual(response.status_code, 200)
data = jsonloads(response.content)
self.assertEqual(1, data['meta']['total_records'])
self.assertEqual(0, len(data['data']))
response = self.client.get('/contact_person/', data={'order_by': 'name', 'limit': 2, 'offset': 1, '.zoos.name': 'GaiaZOO'})
self.assertEqual(response.status_code, 200)
data = jsonloads(response.content)
self.assertEqual(3, data['meta']['total_records'])
self.assertEqual(2, len(data['data']))
self.assertEqual(self.director.id, data['data'][0]['id'])
self.assertEqual(self.janitor.id, data['data'][1]['id'])
# Same set, but deeper filtering
response = self.client.get('/contact_person/', data={'order_by': 'name', 'limit': 2, 'offset': 1, '.zoos.animals.name': 'Mickey Mouse'})
self.assertEqual(response.status_code, 200)
data = jsonloads(response.content)
self.assertEqual(3, data['meta']['total_records'])
self.assertEqual(2, len(data['data']))
self.assertEqual(self.director.id, data['data'][0]['id'])
self.assertEqual(self.janitor.id, data['data'][1]['id'])
def test_limit_offset_filtering_on_annotations(self):
response = self.client.get('/caretaker/', data={'order_by': 'name', 'limit': 1, 'offset': 0, '.animal_count:gt': 1})
self.assertEqual(response.status_code, 200)
data = jsonloads(response.content)
self.assertEqual(2, data['meta']['total_records'])
self.assertEqual(1, len(data['data']))
self.assertEqual(self.caretaker1.id, data['data'][0]['id'])
response = self.client.get('/caretaker/', data={'order_by': 'name', 'limit': 1, 'offset': 1, '.animal_count:gt': 1})
self.assertEqual(response.status_code, 200)
data = jsonloads(response.content)
self.assertEqual(2, data['meta']['total_records'])
self.assertEqual(1, len(data['data']))
self.assertEqual(self.caretaker2.id, data['data'][0]['id'])
response = self.client.get('/caretaker/', data={'order_by': 'name', 'limit': 1, 'offset': 0, '.animal_count:gt': 2})
self.assertEqual(response.status_code, 200)
data = jsonloads(response.content)
self.assertEqual(1, data['meta']['total_records'])
self.assertEqual(1, len(data['data']))
self.assertEqual(self.caretaker1.id, data['data'][0]['id'])
| 36.522857
| 147
| 0.691622
| 1,733
| 12,783
| 4.998846
| 0.079631
| 0.188734
| 0.064412
| 0.075147
| 0.809535
| 0.767863
| 0.744199
| 0.739813
| 0.729655
| 0.723999
| 0
| 0.020701
| 0.111946
| 12,783
| 349
| 148
| 36.627507
| 0.742424
| 0.011343
| 0
| 0.571429
| 0
| 0
| 0.163616
| 0.003962
| 0
| 0
| 0
| 0
| 0.44898
| 1
| 0.061224
| false
| 0.008163
| 0.020408
| 0
| 0.093878
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
01fd4becd7d0fc186a3e8330b869e2bd7619b135
| 158
|
py
|
Python
|
multitasking_transformers/heads/__init__.py
|
NLPatVCU/multitasking_transformers
|
3245518a6cb3748916214233ce77965384df72f9
|
[
"MIT"
] | 19
|
2020-09-22T08:26:23.000Z
|
2022-03-29T03:06:56.000Z
|
multitasking_transformers/heads/__init__.py
|
NLPatVCU/multitasking_transformers
|
3245518a6cb3748916214233ce77965384df72f9
|
[
"MIT"
] | 2
|
2020-06-08T21:27:31.000Z
|
2020-06-19T18:00:19.000Z
|
multitasking_transformers/heads/__init__.py
|
AndriyMulyar/multitasking_transformers
|
3245518a6cb3748916214233ce77965384df72f9
|
[
"MIT"
] | 5
|
2020-04-03T23:33:01.000Z
|
2020-07-02T05:42:46.000Z
|
from .heads import TransformerHead, SubwordClassificationHead, CLSClassificationHead, CLSRegressionHead, MaskedLMHead
from .heads import TransformerHeadConfig
| 79
| 117
| 0.892405
| 12
| 158
| 11.75
| 0.75
| 0.12766
| 0.212766
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.06962
| 158
| 2
| 118
| 79
| 0.959184
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
bf182eddeda22e88bee2be9422cc4983329f28d4
| 59,387
|
py
|
Python
|
test_asnake.py
|
martin-sucha/pyconsk-2018-snakepit-bot
|
dda6b32d24babbbeacc1856628cbf61899e06496
|
[
"MIT"
] | null | null | null |
test_asnake.py
|
martin-sucha/pyconsk-2018-snakepit-bot
|
dda6b32d24babbbeacc1856628cbf61899e06496
|
[
"MIT"
] | null | null | null |
test_asnake.py
|
martin-sucha/pyconsk-2018-snakepit-bot
|
dda6b32d24babbbeacc1856628cbf61899e06496
|
[
"MIT"
] | null | null | null |
from collections import deque
from typing import Tuple, List
from asnake import GameState, Snake, MyRobotSnake, DIR_DOWN, DIR_RIGHT, DIR_UP, GAME_CHARS, XY
from snakepit.robot_snake import World
def parse_world(lines: List[str]) -> Tuple[List[List[Tuple[str, int]]], XY]:
"""Parse the world lines to world data and world size"""
size_y = len(lines)
if len(lines[0]) % 2 != 0:
raise ValueError('Odd character count in line')
size_x = len(lines[0])//2
rows = []
for line in lines:
if len(line) % 2 != 0:
raise ValueError('Odd character count in line')
if len(line) != size_x * 2:
raise ValueError('Lines of different length')
row = []
for x in range(size_x):
char = line[2*x]
color_char = line[2*x + 1]
if color_char.isdigit():
color = int(color_char)
else:
color = 0
row.append((char, color))
rows.append(row)
return rows, XY(size_x, size_y)
REVERSE_CHARS = {v: k for k, v in GAME_CHARS.items()}
def serialize_world(state: GameState) -> List[str]:
"""Serialize world data to lines"""
def describe(x, y):
char, color = state.world_get(XY(x, y))
return '{}{}'.format(REVERSE_CHARS[char], ' ' if color < 1 or color > 9 else str(color))
return [''.join(describe(x, y) for x in range(state.world_size.x)) for y in range(state.world_size.y)]
def test_parse_world():
world, world_size = parse_world([
' ',
' $1*1@1',
' ',
' ',
])
assert world_size.x == 4
assert world_size.y == 4
assert len(world) == 4
assert world == [
[(' ', 0), (' ', 0), (' ', 0), (' ', 0)],
[(' ', 0), ('$', 1), ('*', 1), ('@', 1)],
[(' ', 0), (' ', 0), (' ', 0), (' ', 0)],
[(' ', 0), (' ', 0), (' ', 0), (' ', 0)],
]
def test_serialize_world():
lines = serialize_world(GameState([
[(' ', 0), (' ', 0), (' ', 0), (' ', 0)],
[(' ', 0), ('$', 1), ('*', 1), ('@', 1)],
[(' ', 0), (' ', 0), (' ', 0), (' ', 0)],
[(' ', 0), (' ', 0), (' ', 0), (' ', 0)],
], XY(4, 4), {}, 0))
assert lines == [
' ',
' $1*1@1',
' ',
' ',
]
def test_decode_encode():
def check(value):
assert value == GameState._decode_value(GameState._encode_value(value))
for char in GAME_CHARS.values():
for color in range(8):
check((char, color))
def test_observe_state_changes_first():
world, world_size = parse_world([
' ',
' $1*1@1',
' ',
' ',
])
robot = MyRobotSnake(World(world_size.x, world_size.y, world))
game_state = robot.observe_state_changes(None, robot.world, 1)
assert game_state.world_size == XY(4, 4)
my_snake = game_state.my_snake
assert my_snake is not None
assert my_snake.color == 1
assert my_snake.length == 3
assert list(my_snake.head_history) == [XY(2, 1), XY(1, 1)]
assert my_snake.grow_uncertain
assert my_snake.grow == 0
assert my_snake.alive
assert my_snake.score == 0
assert my_snake.head_pos == XY(3, 1)
assert my_snake.tail_pos == XY(1, 1)
def test_observe_state_changes_nontraceable():
world, world_size = parse_world([
' ',
' @1*1*1',
' *1*1*1',
' *1*1$1',
])
robot = MyRobotSnake(World(world_size.x, world_size.y, world))
game_state = robot.observe_state_changes(None, robot.world, 1)
assert game_state.world_size == XY(4, 4)
my_snake = game_state.my_snake
assert my_snake is not None
assert my_snake.color == 1
assert my_snake.length == 9
assert list(my_snake.head_history) == []
assert my_snake.grow_uncertain
assert my_snake.grow == 0
assert my_snake.alive
assert my_snake.score == 0
assert my_snake.head_pos == XY(1, 1)
assert my_snake.tail_pos == XY(3, 3)
def test_observe_state_changes_nontraceable2():
world, world_size = parse_world([
'@1*1 ',
' *1*1*1',
' *1*1*1',
' *1*1$1',
])
robot = MyRobotSnake(World(world_size.x, world_size.y, world))
game_state = robot.observe_state_changes(None, robot.world, 1)
assert game_state.world_size == XY(4, 4)
my_snake = game_state.my_snake
assert my_snake is not None
assert my_snake.color == 1
assert my_snake.length == 11
assert list(my_snake.head_history) == [XY(1, 0), XY(1, 1)]
assert my_snake.grow_uncertain
assert my_snake.grow == 0
assert my_snake.alive
assert my_snake.score == 0
assert my_snake.head_pos == XY(0, 0)
assert my_snake.tail_pos == XY(3, 3)
def test_observe_state_changes_grow_uncertain():
old_world, old_world_size = parse_world([
' ',
' $1*1@1',
' ',
' ',
])
snake1 = Snake(True, XY(3, 1), XY(1, 1), 1)
snake1.length = 3
snake1.head_history = deque([XY(2, 1), XY(1, 1)])
snake1.grow_uncertain = True
snake1.grow = 1
snake1.score = 5
old_state = GameState(old_world, old_world_size, {1: snake1}, 0)
world, world_size = parse_world([
' ',
' $1*1*1',
' @1',
' ',
])
robot = MyRobotSnake(World(world_size.x, world_size.y, world))
game_state = robot.observe_state_changes(old_state, robot.world, 1)
assert game_state.world_size == XY(4, 4)
my_snake = game_state.my_snake
assert my_snake is not None
assert my_snake.color == 1
assert my_snake.length == 4
assert list(my_snake.head_history) == [XY(3, 1), XY(2, 1), XY(1, 1)]
assert my_snake.grow_uncertain
assert my_snake.grow == 0
assert my_snake.alive
assert my_snake.score == 5
assert my_snake.head_pos == XY(3, 2)
assert my_snake.tail_pos == XY(1, 1)
def test_observe_state_changes_stop_growing():
old_world, old_world_size = parse_world([
' ',
' $1*1@1',
' ',
' ',
])
snake1 = Snake(True, XY(3, 1), XY(1, 1), 1)
snake1.length = 3
snake1.head_history = deque([XY(2, 1), XY(1, 1)])
snake1.grow_uncertain = True
snake1.grow = 0
snake1.score = 5
old_state = GameState(old_world, old_world_size, {1: snake1}, 0)
world, world_size = parse_world([
' ',
' $1*1',
' @1',
' ',
])
robot = MyRobotSnake(World(world_size.x, world_size.y, world))
game_state = robot.observe_state_changes(old_state, robot.world, 1)
assert game_state.world_size == XY(4, 4)
my_snake = game_state.my_snake
assert my_snake is not None
assert my_snake.color == 1
assert my_snake.head_pos == XY(3, 2)
assert my_snake.tail_pos == XY(2, 1)
assert my_snake.length == 3
assert list(my_snake.head_history) == [XY(3, 1), XY(2, 1)]
assert not my_snake.grow_uncertain
assert my_snake.grow == 0
assert my_snake.alive
assert my_snake.score == 5
def test_observe_state_changes_eat():
old_world, old_world_size = parse_world([
' 84',
' $1*1@1',
' ',
' ',
])
snake1 = Snake(True, XY(3, 1), XY(1, 1), 1)
snake1.length = 3
snake1.head_history = deque([XY(2, 1), XY(1, 1)])
snake1.grow_uncertain = True
snake1.grow = 0
snake1.score = 5
old_state = GameState(old_world, old_world_size, {1: snake1}, 0)
world, world_size = parse_world([
' @1',
' $1*1',
' ',
' ',
])
robot = MyRobotSnake(World(world_size.x, world_size.y, world))
game_state = robot.observe_state_changes(old_state, robot.world, 1)
assert game_state.world_size == XY(4, 4)
my_snake = game_state.my_snake
assert my_snake is not None
assert my_snake.color == 1
assert my_snake.head_pos == XY(3, 0)
assert my_snake.tail_pos == XY(2, 1)
assert my_snake.length == 3
assert list(my_snake.head_history) == [XY(3, 1), XY(2, 1)]
assert not my_snake.grow_uncertain
assert my_snake.grow == 8
assert my_snake.alive
assert my_snake.score == 13
def test_observe_state_changes_missed_frame():
old_world, old_world_size = parse_world([
' ',
' $1*1@1',
' ',
' ',
])
snake1 = Snake(True, XY(3, 1), XY(1, 1), 1)
snake1.length = 3
snake1.head_history = deque([XY(2, 1), XY(1, 1)])
snake1.grow_uncertain = True
snake1.grow = 0
snake1.score = 5
old_state = GameState(old_world, old_world_size, {1: snake1}, 0)
world, world_size = parse_world([
' ',
' $1',
' *1',
' @1',
])
robot = MyRobotSnake(World(world_size.x, world_size.y, world))
game_state = robot.observe_state_changes(old_state, robot.world, 1)
assert game_state.world_size == XY(4, 4)
my_snake = game_state.my_snake
assert my_snake is not None
assert my_snake.color == 1
assert my_snake.head_pos == XY(3, 3)
assert my_snake.tail_pos == XY(3, 1)
assert my_snake.length == 3
assert list(my_snake.head_history) == [XY(3, 2), XY(3, 1)]
assert my_snake.grow_uncertain
assert my_snake.grow == 0
assert my_snake.alive
assert my_snake.score == 5
def test_observe_state_changes_missed_frame2():
old_world, old_world_size = parse_world([
' ',
' $1*1@1',
' ',
' ',
])
snake1 = Snake(True, XY(3, 1), XY(1, 1), 1)
snake1.length = 3
snake1.head_history = deque([XY(2, 1), XY(1, 1)])
snake1.grow_uncertain = True
snake1.grow = 0
snake1.score = 5
old_state = GameState(old_world, old_world_size, {1: snake1}, 0)
world, world_size = parse_world([
' ',
' $1*1*1',
' *1',
' @1',
])
robot = MyRobotSnake(World(world_size.x, world_size.y, world))
game_state = robot.observe_state_changes(old_state, robot.world, 1)
assert game_state.world_size == XY(4, 4)
my_snake = game_state.my_snake
assert my_snake is not None
assert my_snake.color == 1
assert my_snake.head_pos == XY(3, 3)
assert my_snake.tail_pos == XY(1, 1)
assert my_snake.length == 5
assert list(my_snake.head_history) == [XY(3, 2), XY(3, 1), XY(2, 1), XY(1, 1)]
assert my_snake.grow_uncertain
assert my_snake.grow == 0
assert my_snake.alive
assert my_snake.score == 5
def test_observe_state_changes_appear():
old_world, old_world_size = parse_world([
' ',
' $1*1@1',
' ',
' ',
])
snake1 = Snake(True, XY(3, 1), XY(1, 1), 1)
snake1.length = 3
snake1.head_history = deque([XY(2, 1), XY(1, 1)])
snake1.grow_uncertain = True
snake1.grow = 1
snake1.score = 5
old_state = GameState(old_world, old_world_size, {1: snake1}, 0)
world, world_size = parse_world([
'$2 ',
'*2$1*1*1',
'@2 @1',
' ',
])
robot = MyRobotSnake(World(world_size.x, world_size.y, world))
game_state = robot.observe_state_changes(old_state, robot.world, 1)
assert game_state.world_size == XY(4, 4)
my_snake = game_state.my_snake
assert my_snake is not None
assert my_snake.color == 1
assert my_snake.length == 4
assert list(my_snake.head_history) == [XY(3, 1), XY(2, 1), XY(1, 1)]
assert my_snake.grow_uncertain
assert my_snake.grow == 0
assert my_snake.alive
assert my_snake.score == 5
assert my_snake.head_pos == XY(3, 2)
assert my_snake.tail_pos == XY(1, 1)
assert game_state.enemy_snake is not None
assert game_state.enemy_snake.color == 2
snake2 = game_state.snakes_by_color[2]
assert snake2.color == 2
assert snake2.length == 3
assert list(snake2.head_history) == [XY(0, 1), XY(0, 0)]
assert snake2.grow_uncertain
assert snake2.grow == 0
assert snake2.alive
assert snake2.score == 0
assert snake2.head_pos == XY(0, 2)
assert snake2.tail_pos == XY(0, 0)
def test_observe_state_changes_die():
old_world, old_world_size = parse_world([
'$2 ',
'*2$1*1@1',
'@2 ',
' ',
])
old_snake1 = Snake(True, XY(3, 1), XY(1, 1), 1)
old_snake1.length = 3
old_snake1.head_history = deque([XY(2, 1), XY(1, 1)])
old_snake1.grow_uncertain = True
old_snake1.grow = 1
old_snake1.score = 5
old_snake2 = Snake(True, XY(0, 2), XY(0, 0), 2)
old_snake2.length = 3
old_snake2.head_history = deque([XY(0, 1), XY(0, 0)])
old_snake2.grow_uncertain = True
old_snake2.grow = 0
old_snake2.score = 9
old_state = GameState(old_world, old_world_size, {1: old_snake1, 2: old_snake2}, 0)
world, world_size = parse_world([
'% ',
'+ $1*1*1',
'x @1',
' ',
])
robot = MyRobotSnake(World(world_size.x, world_size.y, world))
game_state = robot.observe_state_changes(old_state, robot.world, 1)
assert game_state.world_size == XY(4, 4)
my_snake = game_state.my_snake
assert my_snake is not None
assert my_snake.color == 1
assert my_snake.length == 4
assert list(my_snake.head_history) == [XY(3, 1), XY(2, 1), XY(1, 1)]
assert my_snake.grow_uncertain
assert my_snake.grow == 0
assert my_snake.alive
assert my_snake.score == 5
assert my_snake.head_pos == XY(3, 2)
assert my_snake.tail_pos == XY(1, 1)
snake2 = game_state.snakes_by_color[2]
assert not snake2.alive
assert snake2.score == 9
assert snake2.color == 2
assert snake2.length == 3
assert list(snake2.head_history) == [XY(0, 1), XY(0, 0)]
assert snake2.grow_uncertain
assert snake2.grow == 0
assert snake2.head_pos == XY(0, 2)
assert snake2.tail_pos == XY(0, 0)
def test_advance_game_simple_move():
world, world_size = parse_world([
' ',
' $1*1@1',
' ',
' ',
])
snake1 = Snake(True, XY(3, 1), XY(1, 1), 1)
snake1.grow = 0
snake1.grow_uncertain = False
snake1.length = 3
snake1.head_history = deque([XY(2, 1), XY(1, 1)])
game_state = GameState(world, world_size, {1: snake1}, 0)
robot = MyRobotSnake(World(world_size.x, world_size.y, world))
new_state, uncertainty = robot.advance_game(game_state, {1: DIR_DOWN})
assert not uncertainty
assert serialize_world(new_state) == [
' ',
' $1*1',
' @1',
' ',
]
new_snake1 = new_state.snakes_by_color[1]
assert new_snake1.alive
assert list(new_snake1.head_history) == [XY(3, 1), XY(2, 1)]
assert new_snake1.length == 3
assert new_snake1.grow == 0
assert not new_snake1.grow_uncertain
def test_advance_game_simple_grow():
world, world_size = parse_world([
' ',
' $1*1*@',
' ',
' ',
])
snake1 = Snake(True, XY(3, 1), XY(1, 1), 1)
snake1.grow = 2
snake1.grow_uncertain = False
snake1.length = 3
snake1.head_history = deque([XY(2, 1), XY(1, 1)])
game_state = GameState(world, world_size, {1: snake1}, 0)
robot = MyRobotSnake(World(world_size.x, world_size.y, world))
new_state, uncertainty = robot.advance_game(game_state, {1: DIR_DOWN})
assert not uncertainty
assert serialize_world(new_state) == [
' ',
' $1*1*1',
' @1',
' ',
]
new_snake1 = new_state.snakes_by_color[1]
assert new_snake1.alive
assert list(new_snake1.head_history) == [XY(3, 1), XY(2, 1), XY(1, 1)]
assert new_snake1.length == 4
assert new_snake1.grow == 1
assert not new_snake1.grow_uncertain
def test_advance_game_simple_eat():
world, world_size = parse_world([
' ',
' $1*1@1',
' 21',
' ',
])
snake1 = Snake(True, XY(3, 1), XY(1, 1), 1)
snake1.grow = 0
snake1.grow_uncertain = False
snake1.length = 3
snake1.score = 4
snake1.head_history = deque([XY(2, 1), XY(1, 1)])
game_state = GameState(world, world_size, {1: snake1}, 0)
robot = MyRobotSnake(World(world_size.x, world_size.y, world))
new_state, uncertainty = robot.advance_game(game_state, {1: DIR_DOWN})
assert not uncertainty
assert serialize_world(new_state) == [
' ',
' $1*1',
' @1',
' ',
]
new_snake1 = new_state.snakes_by_color[1]
assert new_snake1.alive
assert list(new_snake1.head_history) == [XY(3, 1), XY(2, 1)]
assert new_snake1.length == 3
assert new_snake1.grow == 2
assert new_snake1.score == 4 + 2
assert not new_snake1.grow_uncertain
def test_advance_game_simple_eat_growing():
world, world_size = parse_world([
' ',
' $1*1@1',
' 21',
' ',
])
snake1 = Snake(True, XY(3, 1), XY(1, 1), 1)
snake1.grow = 2
snake1.grow_uncertain = False
snake1.length = 3
snake1.score = 4
snake1.head_history = deque([XY(2, 1), XY(1, 1)])
game_state = GameState(world, world_size, {1: snake1}, 0)
robot = MyRobotSnake(World(world_size.x, world_size.y, world))
new_state, uncertainty = robot.advance_game(game_state, {1: DIR_DOWN})
assert not uncertainty
assert serialize_world(new_state) == [
' ',
' $1*1*1',
' @1',
' ',
]
new_snake1 = new_state.snakes_by_color[1]
assert new_snake1.alive
assert list(new_snake1.head_history) == [XY(3, 1), XY(2, 1), XY(1, 1)]
assert new_snake1.length == 4
assert new_snake1.grow == 3
assert new_snake1.score == 4 + 2
assert not new_snake1.grow_uncertain
def test_advance_game_simple_crash_wall():
world, world_size = parse_world([
' ',
' $1*1@1',
' ',
' ',
])
snake1 = Snake(True, XY(3, 1), XY(1, 1), 1)
snake1.grow = 0
snake1.grow_uncertain = False
snake1.length = 3
snake1.score = 4
snake1.head_history = deque([XY(2, 1), XY(1, 1)])
game_state = GameState(world, world_size, {1: snake1}, 0)
robot = MyRobotSnake(World(world_size.x, world_size.y, world))
new_state, uncertainty = robot.advance_game(game_state, {1: DIR_RIGHT})
assert not uncertainty
assert serialize_world(new_state) == [
' ',
' % + x ',
' ',
' ',
]
new_snake1 = new_state.snakes_by_color[1]
assert not new_snake1.alive
assert list(new_snake1.head_history) == [XY(2, 1), XY(1, 1)]
assert new_snake1.length == 3
assert new_snake1.grow == 0
assert new_snake1.score == 4
assert not new_snake1.grow_uncertain
def test_advance_game_simple_crash_dead_tail():
world, world_size = parse_world([
' ',
' $1*1@1',
' % ',
' x + ',
])
snake1 = Snake(True, XY(3, 1), XY(1, 1), 1)
snake1.grow = 0
snake1.grow_uncertain = False
snake1.length = 3
snake1.score = 4
snake1.head_history = deque([XY(2, 1), XY(1, 1)])
game_state = GameState(world, world_size, {1: snake1}, 0)
robot = MyRobotSnake(World(world_size.x, world_size.y, world))
new_state, uncertainty = robot.advance_game(game_state, {1: DIR_DOWN})
assert not uncertainty
assert serialize_world(new_state) == [
' ',
' % + x ',
' % ',
' x + ',
]
new_snake1 = new_state.snakes_by_color[1]
assert not new_snake1.alive
assert list(new_snake1.head_history) == [XY(2, 1), XY(1, 1)]
assert new_snake1.length == 3
assert new_snake1.grow == 0
assert new_snake1.score == 4
assert not new_snake1.grow_uncertain
def test_advance_game_simple_crash_dead_body():
world, world_size = parse_world([
' ',
' $1*1@1',
' % + ',
' x + ',
])
snake1 = Snake(True, XY(3, 1), XY(1, 1), 1)
snake1.grow = 0
snake1.grow_uncertain = False
snake1.length = 3
snake1.score = 4
snake1.head_history = deque([XY(2, 1), XY(1, 1)])
game_state = GameState(world, world_size, {1: snake1}, 0)
robot = MyRobotSnake(World(world_size.x, world_size.y, world))
new_state, uncertainty = robot.advance_game(game_state, {1: DIR_DOWN})
assert not uncertainty
assert serialize_world(new_state) == [
' ',
' % + x ',
' % + ',
' x + ',
]
new_snake1 = new_state.snakes_by_color[1]
assert not new_snake1.alive
assert list(new_snake1.head_history) == [XY(2, 1), XY(1, 1)]
assert new_snake1.length == 3
assert new_snake1.grow == 0
assert new_snake1.score == 4
assert not new_snake1.grow_uncertain
def test_advance_game_simple_crash_dead_head():
world, world_size = parse_world([
' ',
' $1*1@1',
' % + x ',
' ',
])
snake1 = Snake(True, XY(3, 1), XY(1, 1), 1)
snake1.grow = 0
snake1.grow_uncertain = False
snake1.length = 3
snake1.score = 4
snake1.head_history = deque([XY(2, 1), XY(1, 1)])
game_state = GameState(world, world_size, {1: snake1}, 0)
robot = MyRobotSnake(World(world_size.x, world_size.y, world))
new_state, uncertainty = robot.advance_game(game_state, {1: DIR_DOWN})
assert not uncertainty
assert serialize_world(new_state) == [
' ',
' % + x ',
' % + x ',
' ',
]
new_snake1 = new_state.snakes_by_color[1]
assert not new_snake1.alive
assert list(new_snake1.head_history) == [XY(2, 1), XY(1, 1)]
assert new_snake1.length == 3
assert new_snake1.grow == 0
assert new_snake1.score == 4
assert not new_snake1.grow_uncertain
def test_advance_game_simple_suicide():
world, world_size = parse_world([
' ',
' $1*1*1',
' @1*1',
' ',
])
snake1 = Snake(True, XY(2, 2), XY(1, 1), 1)
snake1.grow = 0
snake1.grow_uncertain = False
snake1.length = 5
snake1.score = 4
snake1.head_history = deque([XY(3, 2), XY(3, 1), XY(2, 1), XY(1, 1)])
game_state = GameState(world, world_size, {1: snake1}, 0)
robot = MyRobotSnake(World(world_size.x, world_size.y, world))
new_state, uncertainty = robot.advance_game(game_state, {1: DIR_UP})
assert not uncertainty
assert serialize_world(new_state) == [
' ',
' % + + ',
' x + ',
' ',
]
new_snake1 = new_state.snakes_by_color[1]
assert not new_snake1.alive
assert list(new_snake1.head_history) == [XY(3, 2), XY(3, 1), XY(2, 1), XY(1, 1)]
assert new_snake1.length == 5
assert new_snake1.grow == 0
assert new_snake1.score == 4
assert not new_snake1.grow_uncertain
def test_advance_game_simple_tail_grow_suicide():
world, world_size = parse_world([
' ',
' $1*1*1',
' @1*1*1',
' ',
])
snake1 = Snake(True, XY(1, 2), XY(1, 1), 1)
snake1.grow = 1
snake1.grow_uncertain = False
snake1.length = 6
snake1.score = 4
snake1.head_history = deque([XY(2, 2), XY(3, 2), XY(3, 1), XY(2, 1), XY(1, 1)])
game_state = GameState(world, world_size, {1: snake1}, 0)
robot = MyRobotSnake(World(world_size.x, world_size.y, world))
new_state, uncertainty = robot.advance_game(game_state, {1: DIR_UP})
assert not uncertainty
assert serialize_world(new_state) == [
' ',
' % + + ',
' x + + ',
' ',
]
new_snake1 = new_state.snakes_by_color[1]
assert not new_snake1.alive
assert list(new_snake1.head_history) == [XY(2, 2), XY(3, 2), XY(3, 1), XY(2, 1),
XY(1, 1)]
assert new_snake1.length == 6
assert new_snake1.grow == 1
assert new_snake1.score == 4
assert not new_snake1.grow_uncertain
def test_advance_game_simple_tail_chase():
world, world_size = parse_world([
' ',
' $1*1*1',
' @1*1*1',
' ',
])
snake1 = Snake(True, XY(1, 2), XY(1, 1), 1)
snake1.grow = 0
snake1.grow_uncertain = False
snake1.length = 6
snake1.score = 4
snake1.head_history = deque([XY(2, 2), XY(3, 2), XY(3, 1), XY(2, 1), XY(1, 1)])
game_state = GameState(world, world_size, {1: snake1}, 0)
robot = MyRobotSnake(World(world_size.x, world_size.y, world))
new_state, uncertainty = robot.advance_game(game_state, {1: DIR_UP})
assert not uncertainty
assert serialize_world(new_state) == [
' ',
' @1$1*1',
' *1*1*1',
' ',
]
new_snake1 = new_state.snakes_by_color[1]
assert new_snake1.alive
assert list(new_snake1.head_history) == [XY(1, 2), XY(2, 2), XY(3, 2), XY(3, 1),
XY(2, 1)]
assert new_snake1.length == 6
assert new_snake1.grow == 0
assert new_snake1.score == 4
assert not new_snake1.grow_uncertain
def test_advance_game_double_move():
world, world_size = parse_world([
' ',
' $1*1@1',
' $2*2@2',
' ',
])
snake1 = Snake(True, XY(3, 1), XY(1, 1), 1)
snake1.grow = 0
snake1.grow_uncertain = False
snake1.length = 3
snake1.score = 4
snake1.head_history = deque([XY(2, 1), XY(1, 1)])
snake2 = Snake(True, XY(3, 2), XY(1, 2), 2)
snake2.grow = 0
snake2.grow_uncertain = False
snake2.length = 3
snake2.score = 6
snake2.head_history = deque([XY(2, 2), XY(1, 2)])
game_state = GameState(world, world_size, {1: snake1, 2: snake2}, 0)
robot = MyRobotSnake(World(world_size.x, world_size.y, world))
new_state, uncertainty = robot.advance_game(game_state, {1: DIR_UP, 2: DIR_DOWN})
assert not uncertainty
assert serialize_world(new_state) == [
' @1',
' $1*1',
' $2*2',
' @2',
]
new_snake1 = new_state.snakes_by_color[1]
assert new_snake1.alive
assert list(new_snake1.head_history) == [XY(3, 1), XY(2, 1)]
assert new_snake1.length == 3
assert new_snake1.grow == 0
assert new_snake1.score == 4
assert not new_snake1.grow_uncertain
new_snake2 = new_state.snakes_by_color[2]
assert new_snake2.alive
assert list(new_snake2.head_history) == [XY(3, 2), XY(2, 2)]
assert new_snake2.length == 3
assert new_snake2.grow == 0
assert new_snake2.score == 6
assert not new_snake2.grow_uncertain
def test_advance_game_double_grow_one():
world, world_size = parse_world([
' ',
' $1*1@1',
' $2*2@2',
' ',
])
snake1 = Snake(True, XY(3, 1), XY(1, 1), 1)
snake1.grow = 2
snake1.grow_uncertain = False
snake1.length = 3
snake1.score = 4
snake1.head_history = deque([XY(2, 1), XY(1, 1)])
snake2 = Snake(True, XY(3, 2), XY(1, 2), 2)
snake2.grow = 0
snake2.grow_uncertain = False
snake2.length = 3
snake2.score = 6
snake2.head_history = deque([XY(2, 2), XY(1, 2)])
game_state = GameState(world, world_size, {1: snake1, 2: snake2}, 0)
robot = MyRobotSnake(World(world_size.x, world_size.y, world))
new_state, uncertainty = robot.advance_game(game_state, {1: DIR_UP, 2: DIR_DOWN})
assert not uncertainty
assert serialize_world(new_state) == [
' @1',
' $1*1*1',
' $2*2',
' @2',
]
new_snake1 = new_state.snakes_by_color[1]
assert new_snake1.alive
assert list(new_snake1.head_history) == [XY(3, 1), XY(2, 1), XY(1, 1)]
assert new_snake1.length == 4
assert new_snake1.grow == 1
assert new_snake1.score == 4
assert not new_snake1.grow_uncertain
new_snake2 = new_state.snakes_by_color[2]
assert new_snake2.alive
assert list(new_snake2.head_history) == [XY(3, 2), XY(2, 2)]
assert new_snake2.length == 3
assert new_snake2.grow == 0
assert new_snake2.score == 6
assert not new_snake2.grow_uncertain
def test_advance_game_double_grow_two():
world, world_size = parse_world([
' ',
' $1*1@1',
' $2*2@2',
' ',
])
snake1 = Snake(True, XY(3, 1), XY(1, 1), 1)
snake1.grow = 0
snake1.grow_uncertain = False
snake1.length = 3
snake1.score = 4
snake1.head_history = deque([XY(2, 1), XY(1, 1)])
snake2 = Snake(True, XY(3, 2), XY(1, 2), 2)
snake2.grow = 1
snake2.grow_uncertain = False
snake2.length = 3
snake2.score = 6
snake2.head_history = deque([XY(2, 2), XY(1, 2)])
game_state = GameState(world, world_size, {1: snake1, 2: snake2}, 0)
robot = MyRobotSnake(World(world_size.x, world_size.y, world))
new_state, uncertainty = robot.advance_game(game_state, {1: DIR_UP, 2: DIR_DOWN})
assert not uncertainty
assert serialize_world(new_state) == [
' @1',
' $1*1',
' $2*2*2',
' @2',
]
new_snake1 = new_state.snakes_by_color[1]
assert new_snake1.alive
assert list(new_snake1.head_history) == [XY(3, 1), XY(2, 1)]
assert new_snake1.length == 3
assert new_snake1.grow == 0
assert new_snake1.score == 4
assert not new_snake1.grow_uncertain
new_snake2 = new_state.snakes_by_color[2]
assert new_snake2.alive
assert list(new_snake2.head_history) == [XY(3, 2), XY(2, 2), XY(1, 2)]
assert new_snake2.length == 4
assert new_snake2.grow == 0
assert new_snake2.score == 6
assert not new_snake2.grow_uncertain
def test_advance_game_double_grow_both():
world, world_size = parse_world([
' ',
' $1*1@1',
' $2*2@2',
' ',
])
snake1 = Snake(True, XY(3, 1), XY(1, 1), 1)
snake1.grow = 2
snake1.grow_uncertain = False
snake1.length = 3
snake1.score = 4
snake1.head_history = deque([XY(2, 1), XY(1, 1)])
snake2 = Snake(True, XY(3, 2), XY(1, 2), 2)
snake2.grow = 1
snake2.grow_uncertain = False
snake2.length = 3
snake2.score = 6
snake2.head_history = deque([XY(2, 2), XY(1, 2)])
game_state = GameState(world, world_size, {1: snake1, 2: snake2}, 0)
robot = MyRobotSnake(World(world_size.x, world_size.y, world))
new_state, uncertainty = robot.advance_game(game_state, {1: DIR_UP, 2: DIR_DOWN})
assert not uncertainty
assert serialize_world(new_state) == [
' @1',
' $1*1*1',
' $2*2*2',
' @2',
]
new_snake1 = new_state.snakes_by_color[1]
assert new_snake1.alive
assert list(new_snake1.head_history) == [XY(3, 1), XY(2, 1), XY(1, 1)]
assert new_snake1.length == 4
assert new_snake1.grow == 1
assert new_snake1.score == 4
assert not new_snake1.grow_uncertain
new_snake2 = new_state.snakes_by_color[2]
assert new_snake2.alive
assert list(new_snake2.head_history) == [XY(3, 2), XY(2, 2), XY(1, 2)]
assert new_snake2.length == 4
assert new_snake2.grow == 0
assert new_snake2.score == 6
assert not new_snake2.grow_uncertain
def test_advance_game_double_crash_to_dying_body():
world, world_size = parse_world([
' ',
' $1*1@1',
'$2*2@2 ',
' ',
])
snake1 = Snake(True, XY(3, 1), XY(1, 1), 1)
snake1.grow = 0
snake1.grow_uncertain = False
snake1.length = 3
snake1.score = 4
snake1.head_history = deque([XY(2, 1), XY(1, 1)])
snake2 = Snake(True, XY(2, 2), XY(0, 2), 2)
snake2.grow = 0
snake2.grow_uncertain = False
snake2.length = 3
snake2.score = 6
snake2.head_history = deque([XY(1, 2), XY(0, 2)])
game_state = GameState(world, world_size, {1: snake1, 2: snake2}, 0)
robot = MyRobotSnake(World(world_size.x, world_size.y, world))
new_state, uncertainty = robot.advance_game(game_state, {1: DIR_RIGHT, 2: DIR_UP})
assert not uncertainty
assert serialize_world(new_state) == [
' ',
' % + x ',
'% + x ',
' ',
]
new_snake1 = new_state.snakes_by_color[1]
assert not new_snake1.alive
assert list(new_snake1.head_history) == [XY(2, 1), XY(1, 1)]
assert new_snake1.length == 3
assert new_snake1.grow == 0
assert new_snake1.score == 4
assert not new_snake1.grow_uncertain
new_snake2 = new_state.snakes_by_color[2]
assert not new_snake2.alive
assert list(new_snake2.head_history) == [XY(1, 2), XY(0, 2)]
assert new_snake2.length == 3
assert new_snake2.grow == 0
assert new_snake2.score == 6
assert not new_snake2.grow_uncertain
def test_advance_game_double_crash_to_dying_body2():
world, world_size = parse_world([
' ',
'$1*1@1 ',
' $2*2@2',
' ',
])
snake1 = Snake(True, XY(2, 1), XY(0, 1), 1)
snake1.grow = 0
snake1.grow_uncertain = False
snake1.length = 3
snake1.score = 4
snake1.head_history = deque([XY(1, 1), XY(0, 1)])
snake2 = Snake(True, XY(3, 2), XY(1, 2), 2)
snake2.grow = 0
snake2.grow_uncertain = False
snake2.length = 3
snake2.score = 6
snake2.head_history = deque([XY(2, 2), XY(1, 2)])
game_state = GameState(world, world_size, {1: snake1, 2: snake2}, 0)
robot = MyRobotSnake(World(world_size.x, world_size.y, world))
new_state, uncertainty = robot.advance_game(game_state, {1: DIR_DOWN, 2: DIR_RIGHT})
assert not uncertainty
assert serialize_world(new_state) == [
' ',
'% + x ',
' % + x ',
' ',
]
new_snake1 = new_state.snakes_by_color[1]
assert not new_snake1.alive
assert list(new_snake1.head_history) == [XY(1, 1), XY(0, 1)]
assert new_snake1.length == 3
assert new_snake1.grow == 0
assert new_snake1.score == 4
assert not new_snake1.grow_uncertain
new_snake2 = new_state.snakes_by_color[2]
assert not new_snake2.alive
assert list(new_snake2.head_history) == [XY(2, 2), XY(1, 2)]
assert new_snake2.length == 3
assert new_snake2.grow == 0
assert new_snake2.score == 6
assert not new_snake2.grow_uncertain
def test_advance_game_double_crash_to_dying_tail():
world, world_size = parse_world([
' ',
' $1*1@1',
'*2@2 ',
'$2 ',
])
snake1 = Snake(True, XY(3, 1), XY(1, 1), 1)
snake1.grow = 0
snake1.grow_uncertain = False
snake1.length = 3
snake1.score = 4
snake1.head_history = deque([XY(2, 1), XY(1, 1)])
snake2 = Snake(True, XY(1, 2), XY(0, 3), 2)
snake2.grow = 0
snake2.grow_uncertain = False
snake2.length = 3
snake2.score = 6
snake2.head_history = deque([XY(0, 2), XY(0, 3)])
game_state = GameState(world, world_size, {1: snake1, 2: snake2}, 0)
robot = MyRobotSnake(World(world_size.x, world_size.y, world))
new_state, uncertainty = robot.advance_game(game_state, {1: DIR_RIGHT, 2: DIR_UP})
assert not uncertainty
assert serialize_world(new_state) == [
' ',
' % + x ',
'+ x ',
'% ',
]
new_snake1 = new_state.snakes_by_color[1]
assert not new_snake1.alive
assert list(new_snake1.head_history) == [XY(2, 1), XY(1, 1)]
assert new_snake1.length == 3
assert new_snake1.grow == 0
assert new_snake1.score == 4
assert not new_snake1.grow_uncertain
new_snake2 = new_state.snakes_by_color[2]
assert not new_snake2.alive
assert list(new_snake2.head_history) == [XY(0, 2), XY(0, 3)]
assert new_snake2.length == 3
assert new_snake2.grow == 0
assert new_snake2.score == 6
assert not new_snake2.grow_uncertain
def test_advance_game_double_crash_to_dying_tail2():
world, world_size = parse_world([
'$1 ',
'*1@1 ',
' $2*2@2',
' ',
])
snake1 = Snake(True, XY(1, 1), XY(0, 0), 1)
snake1.grow = 0
snake1.grow_uncertain = False
snake1.length = 3
snake1.score = 4
snake1.head_history = deque([XY(0, 1), XY(0, 0)])
snake2 = Snake(True, XY(3, 2), XY(1, 2), 2)
snake2.grow = 0
snake2.grow_uncertain = False
snake2.length = 3
snake2.score = 6
snake2.head_history = deque([XY(2, 2), XY(1, 2)])
game_state = GameState(world, world_size, {1: snake1, 2: snake2}, 0)
robot = MyRobotSnake(World(world_size.x, world_size.y, world))
new_state, uncertainty = robot.advance_game(game_state, {1: DIR_DOWN, 2: DIR_RIGHT})
assert not uncertainty
assert serialize_world(new_state) == [
'% ',
'+ x ',
' % + x ',
' ',
]
new_snake1 = new_state.snakes_by_color[1]
assert not new_snake1.alive
assert list(new_snake1.head_history) == [XY(0, 1), XY(0, 0)]
assert new_snake1.length == 3
assert new_snake1.grow == 0
assert new_snake1.score == 4
assert not new_snake1.grow_uncertain
new_snake2 = new_state.snakes_by_color[2]
assert not new_snake2.alive
assert list(new_snake2.head_history) == [XY(2, 2), XY(1, 2)]
assert new_snake2.length == 3
assert new_snake2.grow == 0
assert new_snake2.score == 6
assert not new_snake2.grow_uncertain
def test_advance_game_double_crash_to_dying_head():
world, world_size = parse_world([
' ',
' $1*1@1',
' $2*2@2',
' ',
])
snake1 = Snake(True, XY(3, 1), XY(1, 1), 1)
snake1.grow = 0
snake1.grow_uncertain = False
snake1.length = 3
snake1.score = 4
snake1.head_history = deque([XY(2, 1), XY(1, 1)])
snake2 = Snake(True, XY(3, 2), XY(1, 2), 2)
snake2.grow = 0
snake2.grow_uncertain = False
snake2.length = 3
snake2.score = 6
snake2.head_history = deque([XY(2, 2), XY(1, 2)])
game_state = GameState(world, world_size, {1: snake1, 2: snake2}, 0)
robot = MyRobotSnake(World(world_size.x, world_size.y, world))
new_state, uncertainty = robot.advance_game(game_state, {1: DIR_RIGHT, 2: DIR_UP})
assert not uncertainty
assert serialize_world(new_state) == [
' ',
' % + x ',
' % + x ',
' ',
]
new_snake1 = new_state.snakes_by_color[1]
assert not new_snake1.alive
assert list(new_snake1.head_history) == [XY(2, 1), XY(1, 1)]
assert new_snake1.length == 3
assert new_snake1.grow == 0
assert new_snake1.score == 4
assert not new_snake1.grow_uncertain
new_snake2 = new_state.snakes_by_color[2]
assert not new_snake2.alive
assert list(new_snake2.head_history) == [XY(2, 2), XY(1, 2)]
assert new_snake2.length == 3
assert new_snake2.grow == 0
assert new_snake2.score == 6
assert not new_snake2.grow_uncertain
def test_advance_game_double_crash_to_dying_head2():
world, world_size = parse_world([
' ',
' $1*1@1',
' $2*2@2',
' ',
])
snake1 = Snake(True, XY(3, 1), XY(1, 1), 1)
snake1.grow = 0
snake1.grow_uncertain = False
snake1.length = 3
snake1.score = 4
snake1.head_history = deque([XY(2, 1), XY(1, 1)])
snake2 = Snake(True, XY(3, 2), XY(1, 2), 2)
snake2.grow = 0
snake2.grow_uncertain = False
snake2.length = 3
snake2.score = 6
snake2.head_history = deque([XY(2, 2), XY(1, 2)])
game_state = GameState(world, world_size, {1: snake1, 2: snake2}, 0)
robot = MyRobotSnake(World(world_size.x, world_size.y, world))
new_state, uncertainty = robot.advance_game(game_state, {1: DIR_DOWN, 2: DIR_RIGHT})
assert not uncertainty
assert serialize_world(new_state) == [
' ',
' % + x ',
' % + x ',
' ',
]
new_snake1 = new_state.snakes_by_color[1]
assert not new_snake1.alive
assert list(new_snake1.head_history) == [XY(2, 1), XY(1, 1)]
assert new_snake1.length == 3
assert new_snake1.grow == 0
assert new_snake1.score == 4
assert not new_snake1.grow_uncertain
new_snake2 = new_state.snakes_by_color[2]
assert not new_snake2.alive
assert list(new_snake2.head_history) == [XY(2, 2), XY(1, 2)]
assert new_snake2.length == 3
assert new_snake2.grow == 0
assert new_snake2.score == 6
assert not new_snake2.grow_uncertain
def test_advance_game_double_frontal_crash():
world, world_size = parse_world([
' ',
' $1*1@1',
' $2*2@2',
' ',
])
snake1 = Snake(True, XY(3, 1), XY(1, 1), 1)
snake1.grow = 0
snake1.grow_uncertain = False
snake1.length = 3
snake1.score = 4
snake1.head_history = deque([XY(2, 1), XY(1, 1)])
snake2 = Snake(True, XY(3, 2), XY(1, 2), 2)
snake2.grow = 0
snake2.grow_uncertain = False
snake2.length = 3
snake2.score = 6
snake2.head_history = deque([XY(2, 2), XY(1, 2)])
game_state = GameState(world, world_size, {1: snake1, 2: snake2}, 0)
robot = MyRobotSnake(World(world_size.x, world_size.y, world))
new_state, uncertainty = robot.advance_game(game_state, {1: DIR_DOWN, 2: DIR_UP})
assert not uncertainty
assert serialize_world(new_state) == [
' ',
' % + x ',
' % + x ',
' ',
]
new_snake1 = new_state.snakes_by_color[1]
assert not new_snake1.alive
assert list(new_snake1.head_history) == [XY(2, 1), XY(1, 1)]
assert new_snake1.length == 3
assert new_snake1.grow == 0
assert new_snake1.score == 4
assert not new_snake1.grow_uncertain
new_snake2 = new_state.snakes_by_color[2]
assert not new_snake2.alive
assert list(new_snake2.head_history) == [XY(2, 2), XY(1, 2)]
assert new_snake2.length == 3
assert new_snake2.grow == 0
assert new_snake2.score == 6
assert not new_snake2.grow_uncertain
def test_advance_game_double_frontal_crash2():
world, world_size = parse_world([
' ',
' $2*2@2',
' $1*1@1',
' ',
])
snake1 = Snake(True, XY(3, 2), XY(1, 2), 1)
snake1.grow = 0
snake1.grow_uncertain = False
snake1.length = 3
snake1.score = 4
snake1.head_history = deque([XY(2, 2), XY(1, 2)])
snake2 = Snake(True, XY(3, 1), XY(1, 1), 2)
snake2.grow = 0
snake2.grow_uncertain = False
snake2.length = 3
snake2.score = 6
snake2.head_history = deque([XY(2, 1), XY(1, 1)])
game_state = GameState(world, world_size, {1: snake1, 2: snake2}, 0)
robot = MyRobotSnake(World(world_size.x, world_size.y, world))
new_state, uncertainty = robot.advance_game(game_state, {1: DIR_UP, 2: DIR_DOWN})
assert not uncertainty
assert serialize_world(new_state) == [
' ',
' % + x ',
' % + x ',
' ',
]
new_snake1 = new_state.snakes_by_color[1]
assert not new_snake1.alive
assert list(new_snake1.head_history) == [XY(2, 2), XY(1, 2)]
assert new_snake1.length == 3
assert new_snake1.grow == 0
assert new_snake1.score == 4
assert not new_snake1.grow_uncertain
new_snake2 = new_state.snakes_by_color[2]
assert not new_snake2.alive
assert list(new_snake2.head_history) == [XY(2, 1), XY(1, 1)]
assert new_snake2.length == 3
assert new_snake2.grow == 0
assert new_snake2.score == 6
assert not new_snake2.grow_uncertain
def test_advance_game_double_frontal_crash3():
world, world_size = parse_world([
' $1*1@1',
' ',
' $2*2@2',
' ',
])
snake1 = Snake(True, XY(3, 0), XY(1, 0), 1)
snake1.grow = 0
snake1.grow_uncertain = False
snake1.length = 3
snake1.score = 4
snake1.head_history = deque([XY(2, 0), XY(1, 0)])
snake2 = Snake(True, XY(3, 2), XY(1, 2), 2)
snake2.grow = 0
snake2.grow_uncertain = False
snake2.length = 3
snake2.score = 6
snake2.head_history = deque([XY(2, 2), XY(1, 2)])
game_state = GameState(world, world_size, {1: snake1, 2: snake2}, 0)
robot = MyRobotSnake(World(world_size.x, world_size.y, world))
new_state, uncertainty = robot.advance_game(game_state, {1: DIR_DOWN, 2: DIR_UP})
assert not uncertainty
assert serialize_world(new_state) == [
' % + ',
' x ',
' % + ',
' ',
]
new_snake1 = new_state.snakes_by_color[1]
assert not new_snake1.alive
assert list(new_snake1.head_history) == [XY(3, 0), XY(2, 0)]
assert new_snake1.length == 3
assert new_snake1.grow == 0
assert new_snake1.score == 4
assert not new_snake1.grow_uncertain
new_snake2 = new_state.snakes_by_color[2]
assert not new_snake2.alive
assert list(new_snake2.head_history) == [XY(3, 2), XY(2, 2)]
assert new_snake2.length == 3
assert new_snake2.grow == 0
assert new_snake2.score == 6
assert not new_snake2.grow_uncertain
def test_advance_game_double_frontal_crash3_eat():
world, world_size = parse_world([
' $1*1@1',
' 23',
' $2*2@2',
' ',
])
snake1 = Snake(True, XY(3, 0), XY(1, 0), 1)
snake1.grow = 0
snake1.grow_uncertain = False
snake1.length = 3
snake1.score = 4
snake1.head_history = deque([XY(2, 0), XY(1, 0)])
snake2 = Snake(True, XY(3, 2), XY(1, 2), 2)
snake2.grow = 0
snake2.grow_uncertain = False
snake2.length = 3
snake2.score = 6
snake2.head_history = deque([XY(2, 2), XY(1, 2)])
game_state = GameState(world, world_size, {1: snake1, 2: snake2}, 0)
robot = MyRobotSnake(World(world_size.x, world_size.y, world))
new_state, uncertainty = robot.advance_game(game_state, {1: DIR_DOWN, 2: DIR_UP})
assert not uncertainty
assert serialize_world(new_state) == [
' % + ',
' x ',
' % + ',
' ',
]
new_snake1 = new_state.snakes_by_color[1]
assert not new_snake1.alive
assert list(new_snake1.head_history) == [XY(3, 0), XY(2, 0)]
assert new_snake1.length == 3
assert new_snake1.grow == 0
assert new_snake1.score == 4
assert not new_snake1.grow_uncertain
new_snake2 = new_state.snakes_by_color[2]
assert not new_snake2.alive
assert list(new_snake2.head_history) == [XY(3, 2), XY(2, 2)]
assert new_snake2.length == 3
assert new_snake2.grow == 0
assert new_snake2.score == 6
assert not new_snake2.grow_uncertain
def test_advance_game_double_tail_grow_kill():
world, world_size = parse_world([
' @1*1$1',
' $2 ',
' *2*2@2',
' ',
])
snake1 = Snake(True, XY(1, 0), XY(3, 0), 1)
snake1.grow = 0
snake1.grow_uncertain = False
snake1.length = 3
snake1.score = 4
snake1.head_history = deque([XY(2, 0), XY(3, 0)])
snake2 = Snake(True, XY(3, 2), XY(1, 1), 2)
snake2.grow = 1
snake2.grow_uncertain = False
snake2.length = 4
snake2.score = 6
snake2.head_history = deque([XY(2, 2), XY(1, 2), XY(1, 1)])
game_state = GameState(world, world_size, {1: snake1, 2: snake2}, 0)
robot = MyRobotSnake(World(world_size.x, world_size.y, world))
new_state, uncertainty = robot.advance_game(game_state, {1: DIR_DOWN, 2: DIR_DOWN})
assert not uncertainty
assert serialize_world(new_state) == [
' x + % ',
' $2 ',
' *2*2*2',
' @2',
]
new_snake1 = new_state.snakes_by_color[1]
assert not new_snake1.alive
assert list(new_snake1.head_history) == [XY(2, 0), XY(3, 0)]
assert new_snake1.length == 3
assert new_snake1.grow == 0
assert new_snake1.score == 4
assert not new_snake1.grow_uncertain
new_snake2 = new_state.snakes_by_color[2]
assert new_snake2.alive
assert list(new_snake2.head_history) == [XY(3, 2), XY(2, 2), XY(1, 2), XY(1, 1)]
assert new_snake2.length == 5
assert new_snake2.grow == 0
assert new_snake2.score == 1006
assert not new_snake2.grow_uncertain
def test_advance_game_double_tail_chase():
world, world_size = parse_world([
' @1*1$1',
' $2 ',
' *2*2@2',
' ',
])
snake1 = Snake(True, XY(1, 0), XY(3, 0), 1)
snake1.grow = 0
snake1.grow_uncertain = False
snake1.length = 3
snake1.score = 4
snake1.head_history = deque([XY(2, 0), XY(3, 0)])
snake2 = Snake(True, XY(3, 2), XY(1, 1), 2)
snake2.grow = 0
snake2.grow_uncertain = False
snake2.length = 4
snake2.score = 6
snake2.head_history = deque([XY(2, 2), XY(1, 2), XY(1, 1)])
game_state = GameState(world, world_size, {1: snake1, 2: snake2}, 0)
robot = MyRobotSnake(World(world_size.x, world_size.y, world))
new_state, uncertainty = robot.advance_game(game_state, {1: DIR_DOWN, 2: DIR_DOWN})
assert not uncertainty
assert serialize_world(new_state) == [
' *1$1 ',
' @1 ',
' $2*2*2',
' @2',
]
new_snake1 = new_state.snakes_by_color[1]
assert new_snake1.alive
assert list(new_snake1.head_history) == [XY(1, 0), XY(2, 0)]
assert new_snake1.length == 3
assert new_snake1.grow == 0
assert new_snake1.score == 4
assert not new_snake1.grow_uncertain
new_snake2 = new_state.snakes_by_color[2]
assert new_snake2.alive
assert list(new_snake2.head_history) == [XY(3, 2), XY(2, 2), XY(1, 2)]
assert new_snake2.length == 4
assert new_snake2.grow == 0
assert new_snake2.score == 6
assert not new_snake2.grow_uncertain
def test_advance_game_double_tail_chase_loop():
world, world_size = parse_world([
' @1*1*1',
' $2 $1',
' *2*2@2',
' ',
])
snake1 = Snake(True, XY(1, 0), XY(3, 1), 1)
snake1.grow = 0
snake1.grow_uncertain = False
snake1.length = 4
snake1.score = 4
snake1.head_history = deque([XY(2, 0), XY(3, 0), XY(3, 1)])
snake2 = Snake(True, XY(3, 2), XY(1, 1), 2)
snake2.grow = 0
snake2.grow_uncertain = False
snake2.length = 4
snake2.score = 6
snake2.head_history = deque([XY(2, 2), XY(1, 2), XY(1, 1)])
game_state = GameState(world, world_size, {1: snake1, 2: snake2}, 0)
robot = MyRobotSnake(World(world_size.x, world_size.y, world))
new_state, uncertainty = robot.advance_game(game_state, {1: DIR_DOWN, 2: DIR_UP})
assert not uncertainty
assert serialize_world(new_state) == [
' *1*1$1',
' @1 @2',
' $2*2*2',
' ',
]
new_snake1 = new_state.snakes_by_color[1]
assert new_snake1.alive
assert list(new_snake1.head_history) == [XY(1, 0), XY(2, 0), XY(3, 0)]
assert new_snake1.length == 4
assert new_snake1.grow == 0
assert new_snake1.score == 4
assert not new_snake1.grow_uncertain
new_snake2 = new_state.snakes_by_color[2]
assert new_snake2.alive
assert list(new_snake2.head_history) == [XY(3, 2), XY(2, 2), XY(1, 2)]
assert new_snake2.length == 4
assert new_snake2.grow == 0
assert new_snake2.score == 6
assert not new_snake2.grow_uncertain
def test_advance_game_double_tail_chase_frontal_crash():
world, world_size = parse_world([
' @1*1*1',
'@2$1 *1',
'*2*1*1*1',
'$2 ',
])
snake1 = Snake(True, XY(1, 0), XY(1, 1), 1)
snake1.grow = 0
snake1.grow_uncertain = False
snake1.length = 8
snake1.score = 4
snake1.head_history = deque([XY(2, 0), XY(3, 0), XY(3, 1), XY(3, 2),
XY(2, 2), XY(1, 2), XY(1, 1)])
snake2 = Snake(True, XY(0, 1), XY(0, 3), 2)
snake2.grow = 0
snake2.grow_uncertain = False
snake2.length = 3
snake2.score = 6
snake2.head_history = deque([XY(0, 2), XY(0, 3)])
game_state = GameState(world, world_size, {1: snake1, 2: snake2}, 0)
robot = MyRobotSnake(World(world_size.x, world_size.y, world))
new_state, uncertainty = robot.advance_game(game_state, {1: DIR_DOWN, 2: DIR_RIGHT})
assert not uncertainty
assert serialize_world(new_state) == [
' + + + ',
'+ x + ',
'% % + + ',
' ',
]
new_snake1 = new_state.snakes_by_color[1]
assert not new_snake1.alive
assert list(new_snake1.head_history) == [XY(1, 0), XY(2, 0), XY(3, 0), XY(3, 1),
XY(3, 2), XY(2, 2), XY(1, 2)]
assert new_snake1.length == 8
assert new_snake1.grow == 0
assert new_snake1.score == 4
assert not new_snake1.grow_uncertain
new_snake2 = new_state.snakes_by_color[2]
assert not new_snake2.alive
assert list(new_snake2.head_history) == [XY(0, 1), XY(0, 2)]
assert new_snake2.length == 3
assert new_snake2.grow == 0
assert new_snake2.score == 6
assert not new_snake2.grow_uncertain
def test_advance_game_double_tail_chase_frontal_crash_grow():
world, world_size = parse_world([
' @1*1*1',
'@2$1 *1',
'*2*1*1*1',
'$2 ',
])
snake1 = Snake(True, XY(1, 0), XY(1, 1), 1)
snake1.grow = 1
snake1.grow_uncertain = False
snake1.length = 8
snake1.score = 4
snake1.head_history = deque([XY(2, 0), XY(3, 0), XY(3, 1), XY(3, 2),
XY(2, 2), XY(1, 2), XY(1, 1)])
snake2 = Snake(True, XY(0, 1), XY(0, 3), 2)
snake2.grow = 0
snake2.grow_uncertain = False
snake2.length = 3
snake2.score = 6
snake2.head_history = deque([XY(0, 2), XY(0, 3)])
game_state = GameState(world, world_size, {1: snake1, 2: snake2}, 0)
robot = MyRobotSnake(World(world_size.x, world_size.y, world))
new_state, uncertainty = robot.advance_game(game_state, {1: DIR_DOWN, 2: DIR_RIGHT})
assert not uncertainty
assert serialize_world(new_state) == [
' x + + ',
'x % + ',
'+ + + + ',
'% ',
]
new_snake1 = new_state.snakes_by_color[1]
assert not new_snake1.alive
assert list(new_snake1.head_history) == [XY(2, 0), XY(3, 0), XY(3, 1), XY(3, 2),
XY(2, 2), XY(1, 2), XY(1, 1)]
assert new_snake1.length == 8
assert new_snake1.grow == 1
assert new_snake1.score == 4
assert not new_snake1.grow_uncertain
new_snake2 = new_state.snakes_by_color[2]
assert not new_snake2.alive
assert list(new_snake2.head_history) == [XY(0, 2), XY(0, 3)]
assert new_snake2.length == 3
assert new_snake2.grow == 0
assert new_snake2.score == 6
assert not new_snake2.grow_uncertain
def test_advance_game_double_body_kill():
world, world_size = parse_world([
' @1*1$1',
'$2*2@2 ',
' ',
' ',
])
snake1 = Snake(True, XY(1, 0), XY(3, 0), 1)
snake1.grow = 0
snake1.grow_uncertain = False
snake1.length = 3
snake1.score = 4
snake1.head_history = deque([XY(2, 0), XY(3, 0)])
snake2 = Snake(True, XY(2, 1), XY(0, 1), 2)
snake2.grow = 0
snake2.grow_uncertain = False
snake2.length = 3
snake2.score = 6
snake2.head_history = deque([XY(1, 1), XY(0, 1)])
game_state = GameState(world, world_size, {1: snake1, 2: snake2}, 0)
robot = MyRobotSnake(World(world_size.x, world_size.y, world))
new_state, uncertainty = robot.advance_game(game_state, {1: DIR_DOWN, 2: DIR_DOWN})
assert not uncertainty
assert serialize_world(new_state) == [
' x + % ',
' $2*2 ',
' @2 ',
' ',
]
new_snake1 = new_state.snakes_by_color[1]
assert not new_snake1.alive
assert list(new_snake1.head_history) == [XY(2, 0), XY(3, 0)]
assert new_snake1.length == 3
assert new_snake1.grow == 0
assert new_snake1.score == 4
assert not new_snake1.grow_uncertain
new_snake2 = new_state.snakes_by_color[2]
assert new_snake2.alive
assert list(new_snake2.head_history) == [XY(2, 1), XY(1, 1)]
assert new_snake2.length == 3
assert new_snake2.grow == 0
assert new_snake2.score == 1006
assert not new_snake2.grow_uncertain
def test_advance_game_double_body_kill2():
world, world_size = parse_world([
' @1*1$1',
'*2@2 ',
'$2 ',
' ',
])
snake1 = Snake(True, XY(1, 0), XY(3, 0), 1)
snake1.grow = 0
snake1.grow_uncertain = False
snake1.length = 3
snake1.score = 4
snake1.head_history = deque([XY(2, 0), XY(3, 0)])
snake2 = Snake(True, XY(1, 1), XY(0, 2), 2)
snake2.grow = 0
snake2.grow_uncertain = False
snake2.length = 3
snake2.score = 6
snake2.head_history = deque([XY(0, 1), XY(0, 2)])
game_state = GameState(world, world_size, {1: snake1, 2: snake2}, 0)
robot = MyRobotSnake(World(world_size.x, world_size.y, world))
new_state, uncertainty = robot.advance_game(game_state, {1: DIR_DOWN, 2: DIR_DOWN})
assert not uncertainty
assert serialize_world(new_state) == [
' x + % ',
'$2*2 ',
' @2 ',
' ',
]
new_snake1 = new_state.snakes_by_color[1]
assert not new_snake1.alive
assert list(new_snake1.head_history) == [XY(2, 0), XY(3, 0)]
assert new_snake1.length == 3
assert new_snake1.grow == 0
assert new_snake1.score == 4
assert not new_snake1.grow_uncertain
new_snake2 = new_state.snakes_by_color[2]
assert new_snake2.alive
assert list(new_snake2.head_history) == [XY(1, 1), XY(0, 1)]
assert new_snake2.length == 3
assert new_snake2.grow == 0
assert new_snake2.score == 1006
assert not new_snake2.grow_uncertain
def test_advance_game_double_mutual_body_kill():
world, world_size = parse_world([
'$1*1*1 ',
' @2@1 ',
' *2*2$2',
' ',
])
snake1 = Snake(True, XY(2, 1), XY(0, 0), 1)
snake1.grow = 0
snake1.grow_uncertain = False
snake1.length = 4
snake1.score = 4
snake1.head_history = deque([XY(2, 0), XY(1, 0), XY(0, 0)])
snake2 = Snake(True, XY(1, 1), XY(3, 2), 2)
snake2.grow = 0
snake2.grow_uncertain = False
snake2.length = 4
snake2.score = 6
snake2.head_history = deque([XY(1, 2), XY(2, 2), XY(3, 2)])
game_state = GameState(world, world_size, {1: snake1, 2: snake2}, 0)
robot = MyRobotSnake(World(world_size.x, world_size.y, world))
new_state, uncertainty = robot.advance_game(game_state, {1: DIR_DOWN, 2: DIR_UP})
assert not uncertainty
assert serialize_world(new_state) == [
'% + + ',
' x x ',
' + + % ',
' ',
]
new_snake1 = new_state.snakes_by_color[1]
assert not new_snake1.alive
assert list(new_snake1.head_history) == [XY(2, 0), XY(1, 0), XY(0, 0)]
assert new_snake1.length == 4
assert new_snake1.grow == 0
assert new_snake1.score == 4
assert not new_snake1.grow_uncertain
new_snake2 = new_state.snakes_by_color[2]
assert not new_snake2.alive
assert list(new_snake2.head_history) == [XY(1, 2), XY(2, 2), XY(3, 2)]
assert new_snake2.length == 4
assert new_snake2.grow == 0
assert new_snake2.score == 6
assert not new_snake2.grow_uncertain
| 30.611856
| 106
| 0.58629
| 8,595
| 59,387
| 3.838743
| 0.017103
| 0.018367
| 0.011638
| 0.011366
| 0.95872
| 0.951385
| 0.949173
| 0.945172
| 0.937261
| 0.927563
| 0
| 0.067625
| 0.274909
| 59,387
| 1,939
| 107
| 30.627643
| 0.69859
| 0.001347
| 0
| 0.864397
| 0
| 0
| 0.047828
| 0
| 0
| 0
| 0
| 0
| 0.316607
| 1
| 0.029869
| false
| 0
| 0.002389
| 0
| 0.03405
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
bf1b45abaf7c30ff140a162fd7a40d49f38f6d9e
| 93
|
py
|
Python
|
config/__init__.py
|
LandRegistry/matching-alpha
|
2755c28f819d0768fd3c6e413d888ff6cd8a71cc
|
[
"MIT"
] | 1
|
2017-02-16T18:56:32.000Z
|
2017-02-16T18:56:32.000Z
|
config/__init__.py
|
LandRegistry-Attic/matching-alpha
|
2755c28f819d0768fd3c6e413d888ff6cd8a71cc
|
[
"MIT"
] | 1
|
2015-01-21T12:30:52.000Z
|
2015-01-21T12:30:52.000Z
|
config/__init__.py
|
LandRegistry-Attic/matching-alpha
|
2755c28f819d0768fd3c6e413d888ff6cd8a71cc
|
[
"MIT"
] | 1
|
2021-04-11T06:06:27.000Z
|
2021-04-11T06:06:27.000Z
|
from config import Config
from config import DevelopmentConfig
from config import TestConfig
| 23.25
| 36
| 0.870968
| 12
| 93
| 6.75
| 0.416667
| 0.37037
| 0.592593
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.129032
| 93
| 3
| 37
| 31
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
bf2b8fb82cb4a8d54d3c688e39ac3e45e5082ac4
| 34,820
|
py
|
Python
|
src/pre_filter.py
|
ivdpol/QuantifierComplexity
|
754cb76a7f2bb61458a12395082edb95e8dc9c5c
|
[
"MIT"
] | null | null | null |
src/pre_filter.py
|
ivdpol/QuantifierComplexity
|
754cb76a7f2bb61458a12395082edb95e8dc9c5c
|
[
"MIT"
] | null | null | null |
src/pre_filter.py
|
ivdpol/QuantifierComplexity
|
754cb76a7f2bb61458a12395082edb95e8dc9c5c
|
[
"MIT"
] | 1
|
2022-03-25T02:42:58.000Z
|
2022-03-25T02:42:58.000Z
|
'''
This file is part of QuantifierComplexity.
'''
def pre_filter(lang_gen, expr: tuple):
'''Filter out expressions based on syntax.
Return whether the given expression has the same meaning as a shorter
equivalent expression, based on several syntactic checks. If the function
returns True, the expression can safely be disregarded. If the function
returns False, there might still be a shorter equivalent expression that
the (incomplete) syntactic check does not detect.
'''
# Manually add a tautology to the language. Other tautologies
# are filtered out.
if expr == (">", "1", "0"):
return False
# Manually add a falsehood to the language. Other falsehoods
# are filtered out.
if expr == (">", "0", "0"):
return False
return "filter" in pre_filter_recursive(lang_gen, expr)
def pre_filter_recursive(lang_gen, expr: tuple):
'''Recursive auxiliary funcion for pre_filter().
Return a list of labels that carry some information about the meaning of
the given expression (based on the labels of subexpressions). These labels
are then used to detect whether an expression can safely be filtered out
because there is a shorter equivalent expression.
The following labels with the following meaning are used:
- "filter": the expression can safely be filtered out
- "subsetOfA": the expression has as meaning a subset of the set A
- "subsetOfB": the expression has as meaning a subset of the set B
- "supersetOfA": the expression has as meaning a superset of the set A
- "supersetOfB": the expression has as meaning a superset of the set B
- "primary": the expression has as meaning one of the 'primary' sets (i.e.
sets that can be formed from A and B using basic set operations)
- "isDigit": the expression is a number constant
- "atmostsingleton": the expression has as meaning a set of size at most 1
- "combinedset": the expression has as main operator one of the set
theoretic operators (union, intersection, difference)
'''
# For atomic expressions, return list of appropriate labels.
if expr == "A":
return ["subsetOfA", "supersetOfA", "primary"]
if expr == "B":
return ["subsetOfB", "supersetOfB", "primary"]
if str(expr).isdigit():
return ["isDigit"]
main_operator = expr[0]
# Filter out some handpicked duplicates with smaller equivalent
# expressions.
if expr == ('>', ('card', ('union', 'A', 'B')), '0'):
return ["filter"]
# For primary areas (except the empty set), return list of
# appropriate labels.
if expr == ("union", "A", "B"):
return ["primary", "supersetOfA", "supersetOfB"]
if expr == ("diff", "A", "B"):
return ["primary", "subsetOfA"]
if "intersection" in lang_gen.operators:
if expr == ("intersection", "A", "B"):
return ["primary", "subsetOfA", "subsetOfB"]
else:
if expr == ("diff", "A", ("diff", "A", "B")):
return ["primary", "subsetOfA", "subsetOfB"]
if expr == ("diff", "B", "A"):
return ["primary", "subsetOfB"]
if expr == ("union", ("diff", "A", "B"), ("diff", "B", "A")):
return ["primary"]
# Recursively compute the labels of subexpressions.
info_tuple = tuple(map(pre_filter_recursive, [lang_gen] * 2, expr[1:]))
# If any subexpression contains the label "filter" (meaning the
# expression should be filtered), return the list with just the
# label "filter".
for info in info_tuple:
if "filter" in info:
return ["filter"]
# Comparisons with two constant numbers are always either true
# or false, so we can filter them.
if len(expr) >= 3 and str(expr[1]).isdigit() and str(expr[2]).isdigit():
return ["filter"]
# Filter expressions of the form (0 > X), because equivalent
# to falsum.
if main_operator == ">" and str(expr[1]) == "0":
return ["filter"]
# Filter expressions of the form (X > max_model_size), because
# equivalent to falsum.
if main_operator == ">" and str(expr[2]) == str(lang_gen.max_model_size):
return ["filter"]
# If the two subexpressions are the same, for these operators,
# we can filter the entire expression.
if (len(expr) >= 3 and main_operator in [
# A \ A is the emptyset, which is never present in minimal
# expressions, therefore filtered.
"subset", "union", "intersection", "diff", ">", "="
] and str(expr[1]) == str(expr[2])):
return ["filter"]
# For expressions formed with the = operator:
if main_operator == "=":
# For number comparisons with "=", only allow constants on
# the right hand side.
if str(expr[1]).isdigit():
return ["filter"]
# For number comparisons with "=", the cardinality of a
# singleton (with constant) and a constant, there always
# exists another shorter expression.
if (expr[1][0] == "card" and
# expr[1][1][0] == "index" and
"atmostsingleton" in pre_filter_recursive(
lang_gen,
expr[1][1]
) and
str(expr[1][1][1]).isdigit() and
str(expr[2]).isdigit()):
return ["filter"]
# Forbid expressions of the form |i(...)| = 2+, because
# equivalent to falsum.
if (expr[1][0] == "card" and
# expr[1][1][0] == "index" and
"atmostsingleton" in pre_filter_recursive(
lang_gen,
expr[1][1]
) and
str(expr[2]).isdigit() and
int(str(expr[2])) >= 2):
return ["filter"]
# For expressions formed with the > operator:
if main_operator == ">":
# For number comparisons with ">", forbid "1" on the left
# hand side (because that's equivalent to "= 0").
if expr[1] == "1":
return ["filter"]
# For number comparisons with ">", the cardinality of a
# singleton (with constant) and a constant, there always
# exists another shorter expression.
if (expr[1][0] == "card" and
expr[1][1][0] == "index" and
str(expr[1][1][1]).isdigit() and
str(expr[2]).isdigit()):
return ["filter"]
if (expr[2][0] == "card" and
expr[2][1][0] == "index" and
str(expr[2][1][1]).isdigit() and
str(expr[1]).isdigit()):
return ["filter"]
# Forbid expressions of the form |i(...)| > 1+, because
# equivalent to falsum.
if (expr[1][0] == "card" and
# expr[1][1][0] == "index" and
"atmostsingleton" in pre_filter_recursive(
lang_gen,
expr[1][1]
) and
str(expr[2]).isdigit() and
int(str(expr[2])) >= 1):
return ["filter"]
# Forbid expressions of the form 2+ > |i(...)|, because
# equivalent to tautology.
if (expr[2][0] == "card" and
# expr[2][1][0] == "index" and
"atmostsingleton" in pre_filter_recursive(
lang_gen,
expr[2][1]
) and
str(expr[1]).isdigit() and
int(str(expr[1])) >= 2):
return ["filter"]
# For number comparisons with ">", forbid (max_model_size-1)
# on the right hand side (because that's equivalent to
# "= max_model_size").
if main_operator == ">":
if expr[2] == str(lang_gen.max_model_size-1):
return ["filter"]
# We can filter out some combinations of Boolean combinations.
if main_operator == "not":
if str(expr[1]).startswith("('not"):
return ["filter"]
if (str(expr[1]).startswith("('>") and
(str(expr[1][1]).isdigit() or
str(expr[1][2]).isdigit())):
return["filter"]
if (main_operator == "and" and
str(expr[1]).startswith("('not") and
str(expr[2]).startswith("('not")):
return ["filter"]
if (main_operator == "or" and
str(expr[1]).startswith("('not") and
str(expr[2]).startswith("('not")):
return ["filter"]
# For expressions formed with the subset operator:
if main_operator == "subset":
# Subsets of A are always a subset of supersets of A,
# so expressions like this are always true (and thus we
# can filter them out)
if ("subsetOfA" in info_tuple[0] and
"supersetOfA" in info_tuple[1]):
return ["filter"]
if ("subsetOfB" in info_tuple[0] and
"supersetOfB" in info_tuple[1]):
return ["filter"]
# Filter out expressions of the form X ⊆ i(2+,Y) where X is a
# superset of A and Y is a subset of A, because equivalent
# to falsum
if ("supersetOfA" in info_tuple[0] and
expr[1][0] == "index" and
str(expr[1][1]).isdigit() and
int(str(expr[1][1])) >= 2 and
"subsetOfA" in pre_filter_recursive(lang_gen, expr[1][2])):
return ["filter"]
if ("supersetOfB" in info_tuple[0] and
expr[1][0] == "index" and
str(expr[1][1]).isdigit() and
int(str(expr[1][1])) >= 2 and
"subsetOfB" in pre_filter_recursive(lang_gen, expr[1][2])):
return ["filter"]
# Filter out expressions of the form: X ⊆ index(c,X) for
# a constant c, because equivalent to |X| < 1, |X| < 2, or
# to falsum (depending on the value of c).
if (expr[2][0] == "index" and
str(expr[2][1]).isdigit() and
expr[1] == expr[2][2]):
return ["filter"]
# Filter out expressions of the form (X \ Y) ⊆ Z,
# where Y is a subset of A and Z is a superset of A,
# (because equivalent to X ⊆ Z), and similarly for B.
if (expr[1][0] == "diff" and
"subsetOfA" in pre_filter_recursive(lang_gen, expr[1][2]) and
"supersetOfA" in info_tuple[1]):
return ["filter"]
if (expr[1][0] == "diff" and
"subsetOfB" in pre_filter_recursive(lang_gen, expr[1][2]) and
"supersetOfB" in info_tuple[1]):
return ["filter"]
# Filter out expressions of the form (X ∪ Y) ⊆ Z,
# where Y is a subset of A and Z is a superset of A,
# (because equivalent to X ⊆ Z), and similarly for B.
if (expr[1][0] == "union" and
"subsetOfA" in pre_filter_recursive(lang_gen, expr[1][2]) and
"supersetOfA" in info_tuple[1]):
return ["filter"]
if (expr[1][0] == "union" and
"subsetOfA" in pre_filter_recursive(lang_gen, expr[1][1]) and
"supersetOfA" in info_tuple[1]):
return ["filter"]
if (expr[1][0] == "union" and
"subsetOfB" in pre_filter_recursive(lang_gen, expr[1][2]) and
"supersetOfB" in info_tuple[1]):
return ["filter"]
if (expr[1][0] == "union" and
"subsetOfB" in pre_filter_recursive(lang_gen, expr[1][1]) and
"supersetOfB" in info_tuple[1]):
return ["filter"]
# Filter out expressions of the form X ⊆ (Y ∪ Z),
# where X is a superset of A and Y is a subset of A,
# (because equivalent to X ⊆ Z), and similarly for B.
if (expr[2][0] == "union" and
"subsetOfA" in pre_filter_recursive(lang_gen, expr[2][1]) and
"supersetOfA" in info_tuple[0]):
return ["filter"]
if (expr[2][0] == "union" and
"subsetOfA" in pre_filter_recursive(lang_gen, expr[2][2]) and
"supersetOfA" in info_tuple[0]):
return ["filter"]
if (expr[2][0] == "union" and
"subsetOfB" in pre_filter_recursive(lang_gen, expr[2][1]) and
"supersetOfB" in info_tuple[0]):
return ["filter"]
if (expr[2][0] == "union" and
"subsetOfB" in pre_filter_recursive(lang_gen, expr[2][2]) and
"supersetOfB" in info_tuple[0]):
return ["filter"]
# Filter out expressions of the form X ⊆ (X ∩ Y)
# (because equivalent to X ⊆ Y).
if (expr[2][0] == "intersection" and
expr[1] == expr[2][1]):
return ["filter"]
if (expr[2][0] == "intersection" and
expr[1] == expr[2][2]):
return ["filter"]
# Filter out expressions of the form X ⊆ (X ∪ Y)
# (because equivalent to T).
if (expr[2][0] == "union" and
expr[1] == expr[2][1]):
return ["filter"]
if (expr[2][0] == "union" and
expr[1] == expr[2][2]):
return ["filter"]
# Filter out expressions of the form X ⊆ (Y \ X)
# (because equivalent to |X| = 0).
if (expr[2][0] == "diff" and
expr[1] == expr[2][2]):
return ["filter"]
# Filter out expressions of the form (X ∩ Y) ⊆ X
# (because equivalent to T).
if (expr[1][0] == "intersection" and
expr[2] == expr[1][1]):
return ["filter"]
if (expr[1][0] == "intersection" and
expr[2] == expr[1][2]):
return ["filter"]
# Filter out expressions of the form (X ∪ Y) ⊆ X
# (because equivalent to Y ⊆ X).
if (expr[1][0] == "union" and
expr[2] == expr[1][1]):
return ["filter"]
if (expr[1][0] == "union" and
expr[2] == expr[1][2]):
return ["filter"]
# Filter out expressions of the form (X \ Y) ⊆ X
# (because equivalent to T).
if (expr[1][0] == "diff" and
expr[2] == expr[1][1]):
return ["filter"]
# Filter out expressions of the form (Y \ X) ⊆ X
# (because equivalent to |Y| = 0).
if (expr[1][0] == "diff" and
expr[2] == expr[1][2]):
return ["filter"]
# Filter out expressions of the form (X \ Y) ⊆ (X \ Z)
# because equivalent to Z ⊆ Y.
if (expr[1][0] == "diff" and
expr[2][0] == "diff" and
expr[1][1] == expr[2][1]):
return ["filter"]
# Filter out expressions of the form (X ∪ Y) ⊆ A
# where X is a subset of A
# (because equivalent to Y ⊆ A),
# and similarly for B.
if (expr[1][0] == "union" and
expr[2] == "A" and
"subsetOfA" in pre_filter_recursive(lang_gen, expr[1][1])):
return ["filter"]
if (expr[1][0] == "union" and
expr[2] == "A" and
"subsetOfA" in pre_filter_recursive(lang_gen, expr[1][2])):
return ["filter"]
if (expr[1][0] == "union" and
expr[2] == "B" and
"subsetOfB" in pre_filter_recursive(lang_gen, expr[1][1])):
return ["filter"]
if (expr[1][0] == "union" and
expr[2] == "B" and
"subsetOfB" in pre_filter_recursive(lang_gen, expr[1][2])):
return ["filter"]
# Filter out expressions of the form index(j,X) ⊆ X
# (because equivalent to T).
if (expr[1][0] == "index" and
expr[2] == expr[1][2]):
return ["filter"]
# For expressions formed with the union operator:
if main_operator == "union":
# Label them with "combinedset" if they are not filtered out.
info = ["combinedset"]
# Filter out expressions that are equivalent to (A ∪ B)
if ("subsetOfA" in info_tuple[0] and
"supersetOfA" in info_tuple[0] and
"subsetOfB" in info_tuple[1] and
"supersetOfB" in info_tuple[1]):
return ["filter"]
if ("subsetOfA" in info_tuple[1] and
"supersetOfA" in info_tuple[1] and
"subsetOfB" in info_tuple[0] and
"supersetOfB" in info_tuple[0]):
return ["filter"]
# Filter out expressions of the form (X ∪ Y) where X is a
# superset of A and Y is a subset of A (because equivalent
# to Y); and similarly for B.
if "supersetOfA" in info_tuple[0] and "subsetOfA" in info_tuple[1]:
return ["filter"]
if "supersetOfA" in info_tuple[1] and "subsetOfA" in info_tuple[0]:
return ["filter"]
if "supersetOfB" in info_tuple[0] and "subsetOfB" in info_tuple[1]:
return ["filter"]
if "supersetOfB" in info_tuple[1] and "subsetOfB" in info_tuple[0]:
return ["filter"]
# Add the labels "subsetOfA"/"supersetOfA"/etc as needed
if "supersetOfA" in info_tuple[0]:
info.append("supersetOfA")
if "supersetOfA" in info_tuple[1]:
info.append("supersetOfA")
if "supersetOfB" in info_tuple[0]:
info.append("supersetOfB")
if "supersetOfB" in info_tuple[1]:
info.append("supersetOfB")
if "subsetOfA" in info_tuple[0] and "subsetOfA" in info_tuple[1]:
info.append("subsetOfA")
if "subsetOfB" in info_tuple[0] and "subsetOfB" in info_tuple[1]:
info.append("subsetOfB")
# Adding a subset of A to a superset of A will just give the
# superset, so we can filter these expressions (similarly for B)
if "supersetOfA" in info_tuple[0] and "subsetOfA" in info_tuple[1]:
return ["filter"]
if "supersetOfA" in info_tuple[1] and "subsetOfA" in info_tuple[0]:
return ["filter"]
if "supersetOfB" in info_tuple[0] and "subsetOfB" in info_tuple[1]:
return ["filter"]
if "supersetOfB" in info_tuple[1] and "subsetOfB" in info_tuple[0]:
return ["filter"]
# We only allow primary sets on the left hand side
if "primary" in info_tuple[1]:
return ["filter"]
# For joining combinedsets with singletons, only allow one order
# (where the singleton is on the right hand side)
if ("singleton" in info_tuple[0] and
"combinedset" in info_tuple[1]):
return ["filter"]
# For joining combinedsets with combinedsets or
# or joining singletons with singletons, only allow them
# in alphabetic order (to remove duplicates).
if ("combinedset" in info_tuple[0] and
"combinedset" in info_tuple[1] and
str(expr[1]) > str(expr[2])):
return ["filter"]
if ("singleton" in info_tuple[0] and
"singleton" in info_tuple[1] and
str(expr[1]) > str(expr[2])):
return ["filter"]
# Forbid expressions of the form (X ∩ (Y ∩ Z)),
# because equivalent to ((X ∩ Y) ∩ Z).
if expr[2][0] == "union":
return ["filter"]
# Forbid expressions of the form (X ∪ Y ∪ Z)
# where one is a subset of A and another is a superset of A
# (and similarly for B).
if ("subsetOfA" in info_tuple[0] and
expr[2][0] == "union" and
"supersetOfA" in pre_filter_recursive(lang_gen, expr[2][1])):
return ["filter"]
if ("subsetOfA" in info_tuple[0] and
expr[2][0] == "union" and
"supersetOfA" in pre_filter_recursive(lang_gen, expr[2][2])):
return ["filter"]
if ("supersetOfA" in info_tuple[0] and
expr[2][0] == "union" and
"subsetOfA" in pre_filter_recursive(lang_gen, expr[2][1])):
return ["filter"]
if ("supersetOfA" in info_tuple[0] and
expr[2][0] == "union" and
"subsetOfA" in pre_filter_recursive(lang_gen, expr[2][2])):
return ["filter"]
if (expr[1][0] == "union" and
"subsetOfA" in pre_filter_recursive(lang_gen, expr[1][1]) and
"supersetOfA" in info_tuple[1]):
return ["filter"]
if (expr[1][0] == "union" and
"subsetOfA" in pre_filter_recursive(lang_gen, expr[1][2]) and
"supersetOfA" in info_tuple[1]):
return ["filter"]
if (expr[1][0] == "union" and
"supersetOfA" in pre_filter_recursive(lang_gen, expr[1][1]) and
"subsetOfA" in info_tuple[1]):
return ["filter"]
if (expr[1][0] == "union" and
"supersetOfA" in pre_filter_recursive(lang_gen, expr[1][2]) and
"subsetOfA" in info_tuple[1]):
return ["filter"]
#
if ("subsetOfB" in info_tuple[0] and
expr[2][0] == "union" and
"supersetOfB" in pre_filter_recursive(lang_gen, expr[2][1])):
return ["filter"]
if ("subsetOfB" in info_tuple[0] and
expr[2][0] == "union" and
"supersetOfB" in pre_filter_recursive(lang_gen, expr[2][2])):
return ["filter"]
if ("supersetOfB" in info_tuple[0] and
expr[2][0] == "union" and
"subsetOfB" in pre_filter_recursive(lang_gen, expr[2][1])):
return ["filter"]
if ("supersetOfB" in info_tuple[0] and
expr[2][0] == "union" and
"subsetOfB" in pre_filter_recursive(lang_gen, expr[2][2])):
return ["filter"]
if (expr[1][0] == "union" and
"subsetOfB" in pre_filter_recursive(lang_gen, expr[1][1]) and
"supersetOfB" in info_tuple[1]):
return ["filter"]
if (expr[1][0] == "union" and
"subsetOfB" in pre_filter_recursive(lang_gen, expr[1][2]) and
"supersetOfB" in info_tuple[1]):
return ["filter"]
if (expr[1][0] == "union" and
"supersetOfB" in pre_filter_recursive(lang_gen, expr[1][1]) and
"subsetOfB" in info_tuple[1]):
return ["filter"]
if (expr[1][0] == "union" and
"supersetOfB" in pre_filter_recursive(lang_gen, expr[1][2]) and
"subsetOfB" in info_tuple[1]):
return ["filter"]
# Forbid expressions of the form X ∪ (Y \ Z) where X is a
# superset of A and Z is a subset of A.
if (expr[2][0] == "diff" and
"supersetOfA" in info_tuple[0] and
"subsetOfA" in pre_filter_recursive(lang_gen, expr[2][2])):
return ["filter"]
if (expr[1][0] == "diff" and
"supersetOfA" in info_tuple[1] and
"subsetOfA" in pre_filter_recursive(lang_gen, expr[1][2])):
return ["filter"]
if (expr[2][0] == "diff" and
"supersetOfB" in info_tuple[0] and
"subsetOfB" in pre_filter_recursive(lang_gen, expr[2][2])):
return ["filter"]
if (expr[1][0] == "diff" and
"supersetOfB" in info_tuple[1] and
"subsetOfB" in pre_filter_recursive(lang_gen, expr[1][2])):
return ["filter"]
return info
# For expressions formed with the intersection operator:
if main_operator == "intersection":
# Label them with "combinedset" if they are not filtered out
info = ["combinedset"]
# Add the labels "subsetOfA"/"supersetOfA"/etc as needed
if "subsetOfA" in info_tuple[0]:
info.append("subsetOfA")
if "subsetOfA" in info_tuple[1]:
info.append("subsetOfA")
if "subsetOfB" in info_tuple[0]:
info.append("subsetOfB")
if "subsetOfB" in info_tuple[1]:
info.append("subsetOfB")
if "supersetOfA" in info_tuple[0] and "supersetOfA" in info_tuple[1]:
info.append("supersetOfA")
if "supersetOfB" in info_tuple[0] and "supersetOfB" in info_tuple[1]:
info.append("supersetOfB")
if "atmostsingleton" in info_tuple[0]:
info.append("atmostsingleton")
if "atmostsingleton" in info_tuple[1]:
info.append("atmostsingleton")
# Intersecting a subset of A with a superset of A will just give the
# subset, so we can filter these expressions (similarly for B)
if "supersetOfA" in info_tuple[0] and "subsetOfA" in info_tuple[1]:
return ["filter"]
if "supersetOfA" in info_tuple[1] and "subsetOfA" in info_tuple[0]:
return ["filter"]
if "supersetOfB" in info_tuple[0] and "subsetOfB" in info_tuple[1]:
return ["filter"]
if "supersetOfB" in info_tuple[1] and "subsetOfB" in info_tuple[0]:
return ["filter"]
# We only allow primary sets on the left hand side.
if "primary" in info_tuple[1]:
return ["filter"]
# For intersecting combinedsets with singletons, only allow one
# order (where the singleton is on the right hand side)
if ("singleton" in info_tuple[0] and
"combinedset" in info_tuple[1]):
return ["filter"]
# For intersecting combinedsets with combinedsets or
# or intersecting singletons with singletons, only allow them
# in alphabetic order (to remove duplicates).
if ("combinedset" in info_tuple[0] and
"combinedset" in info_tuple[1] and
str(expr[1]) > str(expr[2])):
return ["filter"]
if ("singleton" in info_tuple[0] and
"singleton" in info_tuple[1] and
str(expr[1]) > str(expr[2])):
return ["filter"]
# Forbid expressions of the form (X ∩ (Y ∩ Z)),
# because equivalent to ((X ∩ Y) ∩ Z).
if expr[2][0] == "intersection":
return ["filter"]
# Forbid expressions of the form (X ∩ Y ∩ Z)
# where one is a subset of A and another is a superset of A
# (and similarly for B).
if ("subsetOfA" in info_tuple[0] and
expr[2][0] == "intersection" and
"supersetOfA" in pre_filter_recursive(lang_gen, expr[2][1])):
return ["filter"]
if ("subsetOfA" in info_tuple[0] and
expr[2][0] == "intersection" and
"supersetOfA" in pre_filter_recursive(lang_gen, expr[2][2])):
return ["filter"]
if ("supersetOfA" in info_tuple[0] and
expr[2][0] == "intersection" and
"subsetOfA" in pre_filter_recursive(lang_gen, expr[2][1])):
return ["filter"]
if ("supersetOfA" in info_tuple[0] and
expr[2][0] == "intersection" and
"subsetOfA" in pre_filter_recursive(lang_gen, expr[2][2])):
return ["filter"]
if (expr[1][0] == "intersection" and
"subsetOfA" in pre_filter_recursive(lang_gen, expr[1][1]) and
"supersetOfA" in info_tuple[1]):
return ["filter"]
if (expr[1][0] == "intersection" and
"subsetOfA" in pre_filter_recursive(lang_gen, expr[1][2]) and
"supersetOfA" in info_tuple[1]):
return ["filter"]
if (expr[1][0] == "intersection" and
"supersetOfA" in pre_filter_recursive(lang_gen, expr[1][1]) and
"subsetOfA" in info_tuple[1]):
return ["filter"]
if (expr[1][0] == "intersection" and
"supersetOfA" in pre_filter_recursive(lang_gen, expr[1][2]) and
"subsetOfA" in info_tuple[1]):
return ["filter"]
#
if ("subsetOfB" in info_tuple[0] and
expr[2][0] == "intersection" and
"supersetOfB" in pre_filter_recursive(lang_gen, expr[2][1])):
return ["filter"]
if ("subsetOfB" in info_tuple[0] and
expr[2][0] == "intersection" and
"supersetOfB" in pre_filter_recursive(lang_gen, expr[2][2])):
return ["filter"]
if ("supersetOfB" in info_tuple[0] and
expr[2][0] == "intersection" and
"subsetOfB" in pre_filter_recursive(lang_gen, expr[2][1])):
return ["filter"]
if ("supersetOfB" in info_tuple[0] and
expr[2][0] == "intersection" and
"subsetOfB" in pre_filter_recursive(lang_gen, expr[2][2])):
return ["filter"]
if (expr[1][0] == "intersection" and
"subsetOfB" in pre_filter_recursive(lang_gen, expr[1][1]) and
"supersetOfB" in info_tuple[1]):
return ["filter"]
if (expr[1][0] == "intersection" and
"subsetOfB" in pre_filter_recursive(lang_gen, expr[1][2]) and
"supersetOfB" in info_tuple[1]):
return ["filter"]
if (expr[1][0] == "intersection" and
"supersetOfB" in pre_filter_recursive(lang_gen, expr[1][1]) and
"subsetOfB" in info_tuple[1]):
return ["filter"]
if (expr[1][0] == "intersection" and
"supersetOfB" in pre_filter_recursive(lang_gen, expr[1][2]) and
"subsetOfB" in info_tuple[1]):
return ["filter"]
# Forbid expressions of the form i(c,X) ∩ i(c',X) for
# constants c != c'.
if (expr[1][0] == "index" and
str(expr[1][1]).isdigit() and
expr[2][0] == "index" and
str(expr[2][1]).isdigit() and
str(expr[1][1]) != str(expr[2][1]) and
expr[1][2] == expr[2][2]):
return ["filter"]
# Forbid expressions of the form X ∩ (Y \ Z) where X is a
# subset of A and Z is a superset of A.
if (expr[2][0] == "diff" and
"subsetOfA" in info_tuple[0] and
"supersetOfA" in pre_filter_recursive(lang_gen, expr[2][2])):
return ["filter"]
if (expr[1][0] == "diff" and
"subsetOfA" in info_tuple[1] and
"supersetOfA" in pre_filter_recursive(lang_gen, expr[1][2])):
return ["filter"]
if (expr[2][0] == "diff" and
"subsetOfB" in info_tuple[0] and
"supersetOfB" in pre_filter_recursive(lang_gen, expr[2][2])):
return ["filter"]
if (expr[1][0] == "diff" and
"subsetOfB" in info_tuple[1] and
"supersetOfB" in pre_filter_recursive(lang_gen, expr[1][2])):
return ["filter"]
return info
# For expressions formed with the difference operator:
if main_operator == "diff":
# Label them with "combinedset" if they are not filtered out
info = ["combinedset"]
# Add the labels "subsetOfA"/"atmostsingleton"/etc as needed
if "subsetOfA" in info_tuple[0]:
info.append("subsetOfA")
if "subsetOfB" in info_tuple[0]:
info.append("subsetOfB")
if "atmostsingleton" in info_tuple[0]:
info.append("atmostsingleton")
# A subset of A minus a superset of A is always empty, so we
# can filter it out (similarly for B)
if ("subsetOfA" in info_tuple[0] and
"supersetOfA" in info_tuple[1]):
return ["filter"]
if ("subsetOfB" in info_tuple[0] and
"supersetOfB" in info_tuple[1]):
return ["filter"]
# Forbid subtracting a primary set from another primary set
# (because it is another primary set, which we already have)
if ("primary" in info_tuple[0] and
"primary" in info_tuple[1]):
return ["filter"]
# Filter out expressions of the form (X \ Y) \ Z where
# Y is a subset of A and Z is a superset of A, because
# equivalent to (X \ Z), and similarly for B.
if (expr[1][0] == "diff" and
"subsetOfA" in pre_filter_recursive(lang_gen, expr[1][2]) and
"supersetOfA" in info_tuple[1]):
return ["filter"]
if (expr[1][0] == "diff" and
"subsetOfB" in pre_filter_recursive(lang_gen, expr[1][2]) and
"supersetOfB" in info_tuple[1]):
return ["filter"]
# Forbid expressions of the form i(c,X) \ i(c',X) for
# constants c != c'.
if (expr[1][0] == "index" and
str(expr[1][1]).isdigit() and
expr[2][0] == "index" and
str(expr[2][1]).isdigit() and
str(expr[1][1]) != str(expr[2][1]) and
expr[1][2] == expr[2][2]):
return ["filter"]
# Forbid expressions of the form (X \ Y) \ X, because
# equivalent to the empty set.
if (expr[1][0] == "diff" and expr[1][1] == expr[2]):
return ["filter"]
# Forbid expressions of the form X \ (Y \ X), because
# equivalent to X.
if (expr[2][0] == "diff" and expr[2][2] == expr[1]):
return ["filter"]
# Forbid expressions of the form X \ (X \ Y), because
# equivalent to X ∩ Y.
if "intersection" in lang_gen.operators:
if (expr[2][0] == "diff" and expr[2][1] == expr[1]):
return ["filter"]
return info
# Filter out expressions of the form index(0, set), because it
# represents the empty set, which is never part of any shortest
# expression with 'unique meaning'.
if main_operator == "index":
info = ["singleton", "atmostsingleton"]
# Forbid taking the 0'th element of something (always results
# in the empty set)
if str(expr[1]) == "0":
return ["filter"]
# Forbid taking the first element of something that is at
# most a singleton (because it will just give the same set again).
if ("atmostsingleton" in info_tuple[1] and expr[1] == "1"):
return ["filter"]
# Forbid taking the c'th element of a singleton for any
# constant c, because it's either the empty set or the
# singleton itself.
if (str(expr[1]).isdigit() and
expr[2][0] == "index"):
return ["filter"]
# Add the labels "subsetOfA" and "subsetOfB" as needed
if "subsetOfA" in info_tuple[1]:
info.append("subsetOfA")
if "subsetOfB" in info_tuple[1]:
info.append("subsetOfB")
return info
# If none of the cases applies, return the empty list.
return []
| 42.411693
| 79
| 0.537565
| 4,478
| 34,820
| 4.109424
| 0.051362
| 0.038583
| 0.080698
| 0.044995
| 0.820346
| 0.793609
| 0.759102
| 0.736442
| 0.703619
| 0.671286
| 0
| 0.026963
| 0.332165
| 34,820
| 820
| 80
| 42.463415
| 0.762363
| 0.25359
| 0
| 0.825137
| 0
| 0
| 0.15516
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.003643
| false
| 0
| 0
| 0
| 0.265938
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
174a83312345752785578170f70f20fa9aac21bc
| 548
|
py
|
Python
|
granule_ingester/granule_ingester/processors/__init__.py
|
kevinmarlis/incubator-sdap-ingester
|
7ee17fdf16201c499f7bd35cf398844f2c70f046
|
[
"Apache-2.0"
] | null | null | null |
granule_ingester/granule_ingester/processors/__init__.py
|
kevinmarlis/incubator-sdap-ingester
|
7ee17fdf16201c499f7bd35cf398844f2c70f046
|
[
"Apache-2.0"
] | 1
|
2021-05-03T22:13:11.000Z
|
2021-05-03T22:13:11.000Z
|
granule_ingester/granule_ingester/processors/__init__.py
|
kevinmarlis/incubator-sdap-ingester
|
7ee17fdf16201c499f7bd35cf398844f2c70f046
|
[
"Apache-2.0"
] | null | null | null |
from granule_ingester.processors.EmptyTileFilter import EmptyTileFilter
from granule_ingester.processors.GenerateTileId import GenerateTileId
from granule_ingester.processors.TileProcessor import TileProcessor
from granule_ingester.processors.TileSummarizingProcessor import TileSummarizingProcessor
from granule_ingester.processors.kelvintocelsius import KelvinToCelsius
from granule_ingester.processors.Subtract180FromLongitude import Subtract180FromLongitude
from granule_ingester.processors.ForceAscendingLatitude import ForceAscendingLatitude
| 68.5
| 89
| 0.923358
| 49
| 548
| 10.183673
| 0.244898
| 0.154309
| 0.266533
| 0.406814
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011538
| 0.051095
| 548
| 7
| 90
| 78.285714
| 0.948077
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 1
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
17939849db52ee8405fbea2c2f1aaec7ada35eb0
| 179
|
py
|
Python
|
knx_stack/encode/layer/application/a_group_value_write/__init__.py
|
majamassarini/knx-stack
|
11a9baac6b7600649b5fbca43c93b200b23676b4
|
[
"MIT"
] | 2
|
2021-07-28T07:42:28.000Z
|
2022-01-25T18:56:05.000Z
|
knx_stack/encode/layer/application/a_group_value_write/__init__.py
|
majamassarini/knx-stack
|
11a9baac6b7600649b5fbca43c93b200b23676b4
|
[
"MIT"
] | 6
|
2021-07-25T21:36:01.000Z
|
2022-02-20T21:11:31.000Z
|
knx_stack/encode/layer/application/a_group_value_write/__init__.py
|
majamassarini/knx-stack
|
11a9baac6b7600649b5fbca43c93b200b23676b4
|
[
"MIT"
] | null | null | null |
from knx_stack.encode.layer.application.a_group_value_write.encode import (
al_encode as encode,
)
from knx_stack.encode.layer.application.a_group_value_write import req, ind
| 35.8
| 75
| 0.832402
| 29
| 179
| 4.827586
| 0.517241
| 0.1
| 0.171429
| 0.257143
| 0.714286
| 0.714286
| 0.714286
| 0.714286
| 0.714286
| 0.714286
| 0
| 0
| 0.094972
| 179
| 4
| 76
| 44.75
| 0.864198
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 10
|
bd69f463b4e52655034ac38b210b0ad82a17bb2c
| 167
|
py
|
Python
|
python1901.py
|
denniszj/python1901
|
2f1b13f60b992a3120c5066dc61e94237a0c490e
|
[
"Apache-2.0"
] | null | null | null |
python1901.py
|
denniszj/python1901
|
2f1b13f60b992a3120c5066dc61e94237a0c490e
|
[
"Apache-2.0"
] | 1
|
2019-03-05T06:21:15.000Z
|
2019-03-05T06:21:15.000Z
|
python1901.py
|
denniszj/python1901
|
2f1b13f60b992a3120c5066dc61e94237a0c490e
|
[
"Apache-2.0"
] | null | null | null |
print("hello python01")
print("hello python01")
print("hello python01")
print("hello python01")
print("hello python01")
print("hello python01")
print("hello python01")
| 23.857143
| 23
| 0.754491
| 21
| 167
| 6
| 0.142857
| 0.555556
| 1
| 1.095238
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0.090909
| 0.077844
| 167
| 7
| 24
| 23.857143
| 0.727273
| 0
| 0
| 1
| 0
| 0
| 0.583333
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 14
|
bda1f4b695e2658645048d8cff681315af9e2bb8
| 4,274
|
py
|
Python
|
setup.py
|
parzingis/corpy
|
638dedb3eaa619046d3c3fb9652f9e82800a8557
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
parzingis/corpy
|
638dedb3eaa619046d3c3fb9652f9e82800a8557
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
parzingis/corpy
|
638dedb3eaa619046d3c3fb9652f9e82800a8557
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
ORR API Documentation
The main ORR documentation is located at: https://mmisw.org/orrdoc/ __Please note__: - The ORR API is approaching a stable version but is still work in progress. Please [let us know](https://github.com/mmisw/mmiorr-docs/issues) if you have any questions or suggestions. - Besides the documentation itself, this page lets you directly exercise and test the API. Click on any operation header below to learn more details about it, and see a \"Try it out\" button. - You can click on the \"Authorize\" button at the top right of this page (or the `!` icon under the particular operation) to retrieve an authentication token corresponding to your ORR instance credentials (username and password). Once authorized, the authentication token will be automatically included in the corresponding request. You will be able to not only perform the basic `GET` operations, but also see expanded responses according to your access privileges as well as perform other operations. - The \"Try it out\" button will also show the corresponding API call that you can submit from the command line using [`curl`](https://curl.haxx.se/). - This API includes administrative operations related with the triple store. The SPARQL endpoint itself (located at `http://cor.esipfed.org/sparql` for the MMI ORR instance) is not described here. (General SPARQL information can be found [here](https://en.wikipedia.org/wiki/SPARQL), and regarding the current service used by the ORR to support the SPARQL interface [here](http://franz.com/agraph/support/documentation/current/http-protocol.html).) - Actual requests from this page are against the specific endpoint at `http://cor.esipfed.org/ont`.
OpenAPI spec version: v0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import sys
from setuptools import setup, find_packages
NAME = "swagger-client"
VERSION = "1.0.0"
# To install the library, run the following
#
# python setup.py install
#
# prerequisite: setuptools
# http://pypi.python.org/pypi/setuptools
REQUIRES = ["urllib3 >= 1.15", "six >= 1.10", "certifi", "python-dateutil"]
setup(
name=NAME,
version=VERSION,
description="ORR API Documentation",
author_email="",
url="",
keywords=["Swagger", "ORR API Documentation"],
install_requires=REQUIRES,
packages=find_packages(),
include_package_data=True,
long_description="""\
The main ORR documentation is located at: https://mmisw.org/orrdoc/ __Please note__: - The ORR API is approaching a stable version but is still work in progress. Please [let us know](https://github.com/mmisw/mmiorr-docs/issues) if you have any questions or suggestions. - Besides the documentation itself, this page lets you directly exercise and test the API. Click on any operation header below to learn more details about it, and see a \"Try it out\" button. - You can click on the \"Authorize\" button at the top right of this page (or the `!` icon under the particular operation) to retrieve an authentication token corresponding to your ORR instance credentials (username and password). Once authorized, the authentication token will be automatically included in the corresponding request. You will be able to not only perform the basic `GET` operations, but also see expanded responses according to your access privileges as well as perform other operations. - The \"Try it out\" button will also show the corresponding API call that you can submit from the command line using [`curl`](https://curl.haxx.se/). - This API includes administrative operations related with the triple store. The SPARQL endpoint itself (located at `http://cor.esipfed.org/sparql` for the MMI ORR instance) is not described here. (General SPARQL information can be found [here](https://en.wikipedia.org/wiki/SPARQL), and regarding the current service used by the ORR to support the SPARQL interface [here](http://franz.com/agraph/support/documentation/current/http-protocol.html).) - Actual requests from this page are against the specific endpoint at `http://cor.esipfed.org/ont`.
"""
)
| 101.761905
| 1,790
| 0.747309
| 637
| 4,274
| 4.990581
| 0.312402
| 0.015099
| 0.010066
| 0.02139
| 0.814092
| 0.797735
| 0.761246
| 0.761246
| 0.761246
| 0.761246
| 0
| 0.008954
| 0.163781
| 4,274
| 41
| 1,791
| 104.243902
| 0.880526
| 0.458587
| 0
| 0
| 0
| 0.052632
| 0.841249
| 0.010114
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.052632
| 0.105263
| 0
| 0.105263
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 10
|
bdb1735c1de637351ea2314dc21fa1fd4bb98426
| 47
|
py
|
Python
|
image_analysis/__init__.py
|
idkidkaaa/streamlit-image-editor
|
4584c3ebbe272673a3086409ef093e9d3df9e485
|
[
"MIT"
] | 2
|
2021-12-08T08:57:38.000Z
|
2022-02-07T16:16:20.000Z
|
image_analysis/__init__.py
|
idkidkaaa/streamlit-image-editor
|
4584c3ebbe272673a3086409ef093e9d3df9e485
|
[
"MIT"
] | 1
|
2021-12-08T17:55:22.000Z
|
2021-12-08T17:55:22.000Z
|
image_analysis/__init__.py
|
idkidkaaa/streamlit-image-editor
|
4584c3ebbe272673a3086409ef093e9d3df9e485
|
[
"MIT"
] | 8
|
2021-11-28T10:57:25.000Z
|
2021-12-15T16:04:27.000Z
|
from .display_analysis import display_analysis
| 23.5
| 46
| 0.893617
| 6
| 47
| 6.666667
| 0.666667
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.085106
| 47
| 1
| 47
| 47
| 0.930233
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
bdef476d1338ac4576cb18d84cd6b938b3001487
| 723
|
py
|
Python
|
sub-action.py
|
minoplhy/filters-development
|
0a80c1c181454cf13acaa2a095a6ff445d96ab1a
|
[
"MIT"
] | 5
|
2021-08-15T15:06:55.000Z
|
2021-12-28T18:56:02.000Z
|
sub-action.py
|
minoplhy/filters-development
|
0a80c1c181454cf13acaa2a095a6ff445d96ab1a
|
[
"MIT"
] | 4
|
2021-07-31T16:12:19.000Z
|
2022-03-20T09:22:03.000Z
|
sub-action.py
|
minoplhy/filters-development
|
0a80c1c181454cf13acaa2a095a6ff445d96ab1a
|
[
"MIT"
] | null | null | null |
import os
import sys
sys.path.append('/filters-maker')
import build_make
os.makedirs('/gh-releases',exist_ok=True)
build_make.filepath_mass_changer('Allowlist' ,'/reprwiki/Private-build/Allowlist' ,'/gh-releases','txt')
build_make.filepath_mass_changer('Veneto' ,'/reprwiki/Private-build/veneto' ,'/gh-releases','txt')
build_make.filepath_mass_changer('ucate' ,'/reprwiki/Private-build/ucate' ,'/gh-releases','txt')
build_make.filepath_mass_changer('Allowlist' ,'/reprwiki/Private-build/Allowlist' ,'/gh-releases','conf')
build_make.filepath_mass_changer('Veneto' ,'/reprwiki/Private-build/veneto' ,'/gh-releases','conf')
build_make.filepath_mass_changer('ucate' ,'/reprwiki/Private-build/ucate' ,'/gh-releases','conf')
| 48.2
| 105
| 0.770401
| 97
| 723
| 5.536082
| 0.257732
| 0.117318
| 0.189944
| 0.234637
| 0.828678
| 0.828678
| 0.828678
| 0.828678
| 0.79702
| 0.79702
| 0
| 0
| 0.040111
| 723
| 14
| 106
| 51.642857
| 0.773775
| 0
| 0
| 0
| 0
| 0
| 0.474412
| 0.254495
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.272727
| 0
| 0.272727
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
da4c4b7c2b6691e65036d53a5e21bc3544fabcbe
| 157
|
py
|
Python
|
generated-libraries/python/netapp/vscan/privileged_user.py
|
radekg/netapp-ontap-lib-get
|
6445ebb071ec147ea82a486fbe9f094c56c5c40d
|
[
"MIT"
] | 2
|
2017-03-28T15:31:26.000Z
|
2018-08-16T22:15:18.000Z
|
generated-libraries/python/netapp/vscan/privileged_user.py
|
radekg/netapp-ontap-lib-get
|
6445ebb071ec147ea82a486fbe9f094c56c5c40d
|
[
"MIT"
] | null | null | null |
generated-libraries/python/netapp/vscan/privileged_user.py
|
radekg/netapp-ontap-lib-get
|
6445ebb071ec147ea82a486fbe9f094c56c5c40d
|
[
"MIT"
] | null | null | null |
class PrivilegedUser(basestring):
"""
Privileged user
"""
@staticmethod
def get_api_name():
return "privileged-user"
| 15.7
| 34
| 0.573248
| 13
| 157
| 6.769231
| 0.846154
| 0.318182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.324841
| 157
| 9
| 35
| 17.444444
| 0.830189
| 0.095541
| 0
| 0
| 0
| 0
| 0.119048
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0
| 0
| 0.25
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
e550d358dd73ded0b103bbf9defb0a70f6e06700
| 6,733
|
py
|
Python
|
vouchers.py
|
joydeep1701/GSTBull
|
e7c0e82442b19d40e99d4aac99a6996626ee7318
|
[
"MIT"
] | null | null | null |
vouchers.py
|
joydeep1701/GSTBull
|
e7c0e82442b19d40e99d4aac99a6996626ee7318
|
[
"MIT"
] | null | null | null |
vouchers.py
|
joydeep1701/GSTBull
|
e7c0e82442b19d40e99d4aac99a6996626ee7318
|
[
"MIT"
] | null | null | null |
from flask import redirect, render_template, request, url_for, flash
from sql import *
import ledgers
db = SQL("sqlite:///watchdog.db")
def getTaxrates():
rates = db.execute("""SELECT * FROM taxrates""")
return rates
def getSalesVoucherByInvNo(inv_no, company_id):
master_table = str(company_id) + '_master_sales'
secondary_table = str(company_id) + '_secondary_sales'
ledger_table = str(company_id) + '_ledgers'
rows = db.execute("""SELECT * FROM :table
INNER JOIN (SELECT id AS l_id,name,gstin FROM :ledger_table) ON l_id=ledger_id
WHERE inv_no = :inv_no""",
table=master_table,ledger_table=ledger_table,inv_no=inv_no)
if rows:
master_id = rows[0]['id']
secondary_data = db.execute("""SELECT * FROM :table WHERE master_id=:master_id""",
table=secondary_table, master_id=master_id)
voucher_data = dict(rows[0])
voucher_data['tax_data'] = secondary_data
return voucher_data
return []
def getSalesVoucherByMonth(month, year, company_id):
view_table = str(company_id) + '_sales_view'
ledger_table = str(company_id) + '_ledgers'
rows = db.execute("""SELECT * FROM :table
INNER JOIN (SELECT id AS l_id,name FROM :ledger_table) ON l_id=ledger_id
WHERE month=:month AND year=:year GROUP BY master_id
ORDER BY inv_no ASC""", table=view_table,ledger_table=ledger_table,
month=month, year=year)
return rows
def createSalesVoucher(request, company_id):
master_table = str(company_id) + '_master_sales'
secondary_table = str(company_id) + '_secondary_sales'
ledger_id = request.get('ledger_id')
ledger_data = ledgers.getLedgerById(ledger_id, company_id)
inv_no = request.get('inv_no')
date = request.get('date').split('-')
if len(date[0]) < 2:
day = '0' + date[0]
else:
day = date[0]
month = date[1]
year = date[2]
pos = ledger_data[0]['place_of_supply']
un_reg = ledger_data[0]['unregistered']
comp = ledger_data[0]['composition']
sez = ledger_data[0]['sez']
roundoff = request.get('roundoff')
request = dict(request)
# Check for duplicates
if len(getSalesVoucherByInvNo(inv_no, company_id)) > 0:
flash('Invoice Number must be unique','red')
return
# insert in master table
row_id = db.execute("""INSERT INTO :table
(ledger_id, day, month, year, inv_no, pos, comp, un_reg, sez, roundoff)
VALUES
(:ledger_id, :day, :month, :year, :inv_no, :pos, :comp, :un_reg, :sez, :roundoff)""",
table=master_table, ledger_id=ledger_id, day=day, month=month,year=year,
inv_no=inv_no, pos=pos, comp=comp, un_reg=un_reg, sez=sez, roundoff=roundoff)
if row_id is None:
flash('Server Error','red')
return
for rate,amount in zip(request.get('rate'),request.get('amount')):
db.execute("""INSERT INTO :table
(master_id, rate, amount) VALUES (:master_id, :rate, :amount)
""",table=secondary_table,master_id=row_id,rate=rate,amount=amount)
flash('Invoice Added','yellow')
def deleteSalesVoucher(voucher_id, company_id):
master_table = str(company_id) + '_master_sales'
secondary_table = str(company_id) + '_secondary_sales'
db.execute("""DELETE FROM :table WHERE id=:id""",table=master_table,
id=voucher_id)
db.execute("""DELETE FROM :table WHERE master_id=:id""",table= secondary_table,
id=voucher_id)
def getPurchaseVoucherByInvNo(inv_no, company_id):
master_table = str(company_id) + '_master_purchase'
secondary_table = str(company_id) + '_secondary_purchase'
ledger_table = str(company_id) + '_ledgers'
rows = db.execute("""SELECT * FROM :table
INNER JOIN (SELECT id AS l_id,name,gstin FROM :ledger_table) ON l_id=ledger_id
WHERE inv_no = :inv_no""",
table=master_table,ledger_table=ledger_table,inv_no=inv_no)
if rows:
master_id = rows[0]['id']
secondary_data = db.execute("""SELECT * FROM :table WHERE master_id=:master_id""",
table=secondary_table, master_id=master_id)
voucher_data = dict(rows[0])
voucher_data['tax_data'] = secondary_data
return voucher_data
return []
def getPurchaseVoucherByMonth(month, year, company_id):
view_table = str(company_id) + '_purchase_view'
ledger_table = str(company_id) + '_ledgers'
rows = db.execute("""SELECT * FROM :table
INNER JOIN (SELECT id AS l_id,name FROM :ledger_table) ON l_id=ledger_id
WHERE month=:month AND year=:year GROUP BY master_id
ORDER BY inv_no ASC""", table=view_table,ledger_table=ledger_table,
month=month, year=year)
return rows
def createPurchaseVoucher(request, company_id):
master_table = str(company_id) + '_master_purchase'
secondary_table = str(company_id) + '_secondary_purchase'
ledger_id = request.get('ledger_id')
ledger_data = ledgers.getLedgerById(ledger_id, company_id)
inv_no = request.get('inv_no')
date = request.get('date').split('-')
if len(date[0]) < 2:
day = '0' + date[0]
else:
day = date[0]
month = date[1]
year = date[2]
pos = ledger_data[0]['place_of_supply']
un_reg = ledger_data[0]['unregistered']
comp = ledger_data[0]['composition']
sez = ledger_data[0]['sez']
roundoff = request.get('roundoff')
request = dict(request)
# insert in master table
row_id = db.execute("""INSERT INTO :table
(ledger_id, day, month, year, inv_no, pos, comp, un_reg, sez, roundoff)
VALUES
(:ledger_id, :day, :month, :year, :inv_no, :pos, :comp, :un_reg, :sez, :roundoff)""",
table=master_table, ledger_id=ledger_id, day=day, month=month,year=year,
inv_no=inv_no, pos=pos, comp=comp, un_reg=un_reg, sez=sez, roundoff=roundoff)
if row_id is None:
flash('Server Error','red')
return
for rate,amount in zip(request.get('rate'),request.get('amount')):
db.execute("""INSERT INTO :table
(master_id, rate, amount) VALUES (:master_id, :rate, :amount)
""",table=secondary_table,master_id=row_id,rate=rate,amount=amount)
flash('Invoice Added','yellow')
def deletePurchaseVoucher(voucher_id, company_id):
master_table = str(company_id) + '_master_purchase'
secondary_table = str(company_id) + '_secondary_purchase'
db.execute("""DELETE FROM :table WHERE id=:id""",table=master_table,
id=voucher_id)
db.execute("""DELETE FROM :table WHERE master_id=:id""",table= secondary_table,
id=voucher_id)
| 38.695402
| 97
| 0.652161
| 917
| 6,733
| 4.541985
| 0.116685
| 0.062665
| 0.064826
| 0.073469
| 0.906122
| 0.892197
| 0.892197
| 0.892197
| 0.892197
| 0.870108
| 0
| 0.00514
| 0.219813
| 6,733
| 173
| 98
| 38.919075
| 0.78774
| 0.009802
| 0
| 0.864286
| 0
| 0.028571
| 0.319976
| 0.003152
| 0
| 0
| 0
| 0
| 0
| 1
| 0.064286
| false
| 0
| 0.021429
| 0
| 0.157143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e55c65c3b346354be8d04e79275d783643c9df4f
| 71,674
|
py
|
Python
|
octavia/tests/unit/controller/worker/v2/test_controller_worker.py
|
johnsom/octavia
|
41c628a084002017d2003926cf0e25ba3ffeee0c
|
[
"Apache-2.0"
] | null | null | null |
octavia/tests/unit/controller/worker/v2/test_controller_worker.py
|
johnsom/octavia
|
41c628a084002017d2003926cf0e25ba3ffeee0c
|
[
"Apache-2.0"
] | null | null | null |
octavia/tests/unit/controller/worker/v2/test_controller_worker.py
|
johnsom/octavia
|
41c628a084002017d2003926cf0e25ba3ffeee0c
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from unittest import mock
from oslo_config import cfg
from oslo_config import fixture as oslo_fixture
from oslo_utils import uuidutils
from octavia.api.drivers import utils as provider_utils
from octavia.common import constants
from octavia.common import data_models
from octavia.controller.worker.v2 import controller_worker
from octavia.controller.worker.v2.flows import flow_utils
import octavia.tests.unit.base as base
AMP_ID = uuidutils.generate_uuid()
LB_ID = uuidutils.generate_uuid()
LISTENER_ID = uuidutils.generate_uuid()
POOL_ID = uuidutils.generate_uuid()
PROJECT_ID = uuidutils.generate_uuid()
HM_ID = uuidutils.generate_uuid()
MEMBER_ID = uuidutils.generate_uuid()
COMPUTE_ID = uuidutils.generate_uuid()
L7POLICY_ID = uuidutils.generate_uuid()
L7RULE_ID = uuidutils.generate_uuid()
PROJECT_ID = uuidutils.generate_uuid()
LISTENER_ID = uuidutils.generate_uuid()
HEALTH_UPDATE_DICT = {'delay': 1, 'timeout': 2}
LISTENER_UPDATE_DICT = {'name': 'test', 'description': 'test2'}
MEMBER_UPDATE_DICT = {'weight': 1, 'ip_address': '10.0.0.0'}
POOL_UPDATE_DICT = {'name': 'test', 'description': 'test2'}
L7POLICY_UPDATE_DICT = {'action': constants.L7POLICY_ACTION_REJECT}
L7RULE_UPDATE_DICT = {
'type': constants.L7RULE_TYPE_PATH,
'compare_type': constants.L7RULE_COMPARE_TYPE_STARTS_WITH,
'value': '/api'}
_db_amphora_mock = mock.MagicMock()
_amphora_mock = {
constants.ID: AMP_ID,
constants.LOAD_BALANCER_ID: LB_ID,
}
_flow_mock = mock.MagicMock()
_db_health_mon_mock = mock.MagicMock()
_health_mon_mock = {
constants.HEALTHMONITOR_ID: HM_ID,
constants.POOL_ID: POOL_ID
}
_vip_mock = mock.MagicMock()
_listener_mock = mock.MagicMock()
_db_load_balancer_mock = mock.MagicMock()
_load_balancer_mock = {
constants.LOADBALANCER_ID: LB_ID
}
_member_mock = mock.MagicMock()
_pool_mock = {constants.POOL_ID: POOL_ID}
_db_pool_mock = mock.MagicMock()
_db_pool_mock.load_balancer = _db_load_balancer_mock
_member_mock.pool = _db_pool_mock
_l7policy_mock = mock.MagicMock()
_l7policy_mock.id = L7POLICY_ID
_l7policy_mock.to_dict.return_value = {constants.ID: L7POLICY_ID}
_l7rule_mock = mock.MagicMock()
_create_map_flow_mock = mock.MagicMock()
_db_amphora_mock.load_balancer_id = LB_ID
_db_amphora_mock.id = AMP_ID
_db_session = mock.MagicMock()
CONF = cfg.CONF
class TestException(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
@mock.patch('octavia.db.repositories.AmphoraRepository.get',
return_value=_db_amphora_mock)
@mock.patch('octavia.db.repositories.HealthMonitorRepository.get',
return_value=_db_health_mon_mock)
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get',
return_value=_db_load_balancer_mock)
@mock.patch('octavia.db.repositories.ListenerRepository.get',
return_value=_listener_mock)
@mock.patch('octavia.db.repositories.L7PolicyRepository.get',
return_value=_l7policy_mock)
@mock.patch('octavia.db.repositories.L7RuleRepository.get',
return_value=_l7rule_mock)
@mock.patch('octavia.db.repositories.MemberRepository.get',
return_value=_member_mock)
@mock.patch('octavia.db.repositories.PoolRepository.get',
return_value=_db_pool_mock)
@mock.patch('octavia.common.base_taskflow.TaskFlowServiceController',
return_value=_flow_mock)
@mock.patch('taskflow.listeners.logging.DynamicLoggingListener')
@mock.patch('octavia.db.api.get_session', return_value=_db_session)
class TestControllerWorker(base.TestCase):
def setUp(self):
self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF))
_db_pool_mock.listeners = [_listener_mock]
_db_pool_mock.load_balancer = _db_load_balancer_mock
_db_health_mon_mock.pool = _db_pool_mock
_db_load_balancer_mock.amphorae = _db_amphora_mock
_db_load_balancer_mock.vip = _vip_mock
_db_load_balancer_mock.id = LB_ID
_db_load_balancer_mock.project_id = PROJECT_ID
_listener_mock.load_balancer = _db_load_balancer_mock
_listener_mock.id = LISTENER_ID
_listener_mock.to_dict.return_value = {
constants.ID: LISTENER_ID, constants.LOAD_BALANCER_ID: LB_ID}
self.ref_listener_dict = {constants.LISTENER_ID: LISTENER_ID,
constants.LOADBALANCER_ID: LB_ID}
_member_mock.pool = _db_pool_mock
_l7policy_mock.listener = _listener_mock
_l7rule_mock.l7policy = _l7policy_mock
_db_load_balancer_mock.listeners = [_listener_mock]
_db_load_balancer_mock.to_dict.return_value = {'id': LB_ID}
fetch_mock = mock.MagicMock()
_flow_mock.driver.persistence = fetch_mock
_db_pool_mock.id = POOL_ID
_db_health_mon_mock.pool_id = POOL_ID
_db_health_mon_mock.id = HM_ID
_db_health_mon_mock.to_dict.return_value = {
'id': HM_ID,
constants.POOL_ID: POOL_ID
}
super(TestControllerWorker, self).setUp()
@mock.patch('octavia.controller.worker.v2.flows.'
'amphora_flows.AmphoraFlows.get_create_amphora_flow',
return_value='TEST')
def test_create_amphora(self,
mock_get_create_amp_flow,
mock_api_get_session,
mock_dyn_log_listener,
mock_taskflow_load,
mock_pool_repo_get,
mock_member_repo_get,
mock_l7rule_repo_get,
mock_l7policy_repo_get,
mock_listener_repo_get,
mock_lb_repo_get,
mock_health_mon_repo_get,
mock_amp_repo_get):
_flow_mock.reset_mock()
cw = controller_worker.ControllerWorker()
cw.create_amphora()
(cw.services_controller.run_poster.
assert_called_once_with(
flow_utils.get_create_amphora_flow,
wait=True,
store={constants.BUILD_TYPE_PRIORITY:
constants.LB_CREATE_SPARES_POOL_PRIORITY,
constants.FLAVOR: None,
constants.AVAILABILITY_ZONE: None}))
@mock.patch('octavia.db.repositories.AvailabilityZoneRepository.'
'get_availability_zone_metadata_dict')
@mock.patch('octavia.controller.worker.v2.flows.'
'amphora_flows.AmphoraFlows.get_create_amphora_flow',
return_value='TEST')
def test_create_amphora_with_az(self,
mock_get_create_amp_flow,
mock_get_az_metadata,
mock_api_get_session,
mock_dyn_log_listener,
mock_taskflow_load,
mock_pool_repo_get,
mock_member_repo_get,
mock_l7rule_repo_get,
mock_l7policy_repo_get,
mock_listener_repo_get,
mock_lb_repo_get,
mock_health_mon_repo_get,
mock_amp_repo_get):
_flow_mock.reset_mock()
az = 'fake_az'
az_data = {constants.COMPUTE_ZONE: az}
mock_get_az_metadata.return_value = az_data
cw = controller_worker.ControllerWorker()
cw.create_amphora(availability_zone=az)
mock_get_az_metadata.assert_called_once_with(_db_session, az)
(cw.services_controller.run_poster.
assert_called_once_with(
flow_utils.get_create_amphora_flow,
wait=True,
store={constants.BUILD_TYPE_PRIORITY:
constants.LB_CREATE_SPARES_POOL_PRIORITY,
constants.FLAVOR: None,
constants.AVAILABILITY_ZONE: az_data}))
@mock.patch('octavia.controller.worker.v2.flows.'
'amphora_flows.AmphoraFlows.get_delete_amphora_flow',
return_value='TEST')
def test_delete_amphora(self,
mock_get_delete_amp_flow,
mock_api_get_session,
mock_dyn_log_listener,
mock_taskflow_load,
mock_pool_repo_get,
mock_member_repo_get,
mock_l7rule_repo_get,
mock_l7policy_repo_get,
mock_listener_repo_get,
mock_lb_repo_get,
mock_health_mon_repo_get,
mock_amp_repo_get):
_flow_mock.reset_mock()
cw = controller_worker.ControllerWorker()
cw.delete_amphora(AMP_ID)
mock_amp_repo_get.assert_called_once_with(
_db_session,
id=AMP_ID)
mock_amp_repo_get.return_value = _db_amphora_mock
(cw.services_controller.run_poster.
assert_called_once_with(
flow_utils.get_delete_amphora_flow,
store={constants.AMPHORA: _db_amphora_mock.to_dict()}))
@mock.patch('octavia.controller.worker.v2.flows.'
'health_monitor_flows.HealthMonitorFlows.'
'get_create_health_monitor_flow',
return_value=_flow_mock)
def test_create_health_monitor(self,
mock_get_create_hm_flow,
mock_api_get_session,
mock_dyn_log_listener,
mock_taskflow_load,
mock_pool_repo_get,
mock_member_repo_get,
mock_l7rule_repo_get,
mock_l7policy_repo_get,
mock_listener_repo_get,
mock_lb_repo_get,
mock_health_mon_repo_get,
mock_amp_repo_get):
_flow_mock.reset_mock()
cw = controller_worker.ControllerWorker()
cw.create_health_monitor(_health_mon_mock)
provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
_db_load_balancer_mock).to_dict()
mock_health_mon_repo_get.return_value = _db_health_mon_mock
(cw.services_controller.run_poster.
assert_called_once_with(flow_utils.get_create_health_monitor_flow,
store={constants.HEALTH_MON:
_health_mon_mock,
constants.LISTENERS:
[self.ref_listener_dict],
constants.LOADBALANCER_ID:
LB_ID,
constants.LOADBALANCER:
provider_lb,
constants.POOL_ID:
POOL_ID}))
def test_delete_health_monitor(self,
mock_api_get_session,
mock_dyn_log_listener,
mock_taskflow_load,
mock_pool_repo_get,
mock_member_repo_get,
mock_l7rule_repo_get,
mock_l7policy_repo_get,
mock_listener_repo_get,
mock_lb_repo_get,
mock_health_mon_repo_get,
mock_amp_repo_get):
_flow_mock.reset_mock()
cw = controller_worker.ControllerWorker()
provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
_db_load_balancer_mock).to_dict()
cw.delete_health_monitor(_health_mon_mock)
mock_health_mon_repo_get.return_value = _db_health_mon_mock
(cw.services_controller.run_poster.
assert_called_once_with(flow_utils.get_delete_health_monitor_flow,
store={constants.HEALTH_MON:
_health_mon_mock,
constants.LISTENERS:
[self.ref_listener_dict],
constants.LOADBALANCER_ID:
LB_ID,
constants.LOADBALANCER:
provider_lb,
constants.POOL_ID:
POOL_ID,
constants.PROJECT_ID: PROJECT_ID}))
def test_update_health_monitor(self,
mock_api_get_session,
mock_dyn_log_listener,
mock_taskflow_load,
mock_pool_repo_get,
mock_member_repo_get,
mock_l7rule_repo_get,
mock_l7policy_repo_get,
mock_listener_repo_get,
mock_lb_repo_get,
mock_health_mon_repo_get,
mock_amp_repo_get):
_flow_mock.reset_mock()
mock_health_mon_repo_get.return_value = _db_health_mon_mock
_db_health_mon_mock.provisioning_status = constants.PENDING_UPDATE
cw = controller_worker.ControllerWorker()
cw.update_health_monitor(_health_mon_mock,
HEALTH_UPDATE_DICT)
provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
_db_load_balancer_mock).to_dict()
(cw.services_controller.run_poster.
assert_called_once_with(flow_utils.get_update_health_monitor_flow,
store={constants.HEALTH_MON:
_health_mon_mock,
constants.POOL_ID: POOL_ID,
constants.LOADBALANCER_ID:
LB_ID,
constants.LISTENERS:
[self.ref_listener_dict],
constants.LOADBALANCER:
provider_lb,
constants.UPDATE_DICT:
HEALTH_UPDATE_DICT}))
def test_create_listener(self,
mock_api_get_session,
mock_dyn_log_listener,
mock_taskflow_load,
mock_pool_repo_get,
mock_member_repo_get,
mock_l7rule_repo_get,
mock_l7policy_repo_get,
mock_listener_repo_get,
mock_lb_repo_get,
mock_health_mon_repo_get,
mock_amp_repo_get):
_flow_mock.reset_mock()
cw = controller_worker.ControllerWorker()
listener_dict = {constants.LISTENER_ID: LISTENER_ID,
constants.LOADBALANCER_ID: LB_ID}
cw.create_listener(listener_dict)
provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
_db_load_balancer_mock).to_dict()
(cw.services_controller.run_poster.
assert_called_once_with(
flow_utils.get_create_listener_flow, store={
constants.LOADBALANCER: provider_lb,
constants.LOADBALANCER_ID: LB_ID,
constants.LISTENERS: [listener_dict]}))
def test_delete_listener(self,
mock_api_get_session,
mock_dyn_log_listener,
mock_taskflow_load,
mock_pool_repo_get,
mock_member_repo_get,
mock_l7rule_repo_get,
mock_l7policy_repo_get,
mock_listener_repo_get,
mock_lb_repo_get,
mock_health_mon_repo_get,
mock_amp_repo_get):
_flow_mock.reset_mock()
listener_dict = {constants.LISTENER_ID: LISTENER_ID,
constants.LOADBALANCER_ID: LB_ID}
cw = controller_worker.ControllerWorker()
cw.delete_listener(listener_dict)
(cw.services_controller.run_poster.
assert_called_once_with(
flow_utils.get_delete_listener_flow,
store={constants.LISTENER: self.ref_listener_dict,
constants.LOADBALANCER_ID: LB_ID,
constants.PROJECT_ID: PROJECT_ID}))
def test_update_listener(self,
mock_api_get_session,
mock_dyn_log_listener,
mock_taskflow_load,
mock_pool_repo_get,
mock_member_repo_get,
mock_l7rule_repo_get,
mock_l7policy_repo_get,
mock_listener_repo_get,
mock_lb_repo_get,
mock_health_mon_repo_get,
mock_amp_repo_get):
_flow_mock.reset_mock()
_listener_mock.provisioning_status = constants.PENDING_UPDATE
listener_dict = {constants.LISTENER_ID: LISTENER_ID,
constants.LOADBALANCER_ID: LB_ID}
cw = controller_worker.ControllerWorker()
cw.update_listener(listener_dict, LISTENER_UPDATE_DICT)
(cw.services_controller.run_poster.
assert_called_once_with(flow_utils.get_update_listener_flow,
store={constants.LISTENER: listener_dict,
constants.UPDATE_DICT:
LISTENER_UPDATE_DICT,
constants.LOADBALANCER_ID: LB_ID,
constants.LISTENERS:
[listener_dict]}))
def test_create_load_balancer_single(
self,
mock_api_get_session,
mock_dyn_log_listener,
mock_taskflow_load,
mock_pool_repo_get,
mock_member_repo_get,
mock_l7rule_repo_get,
mock_l7policy_repo_get,
mock_listener_repo_get,
mock_lb_repo_get,
mock_health_mon_repo_get,
mock_amp_repo_get):
# Test the code path with an SINGLE topology
self.conf.config(group="controller_worker",
loadbalancer_topology=constants.TOPOLOGY_SINGLE)
_flow_mock.reset_mock()
store = {
constants.LOADBALANCER_ID: LB_ID,
'update_dict': {'topology': constants.TOPOLOGY_SINGLE},
constants.BUILD_TYPE_PRIORITY: constants.LB_CREATE_NORMAL_PRIORITY,
constants.FLAVOR: None,
constants.AVAILABILITY_ZONE: None,
}
lb_mock = mock.MagicMock()
lb_mock.listeners = []
lb_mock.topology = constants.TOPOLOGY_SINGLE
mock_lb_repo_get.side_effect = [None, None, None, lb_mock]
cw = controller_worker.ControllerWorker()
cw.create_load_balancer(_load_balancer_mock)
cw.services_controller.run_poster.assert_called_with(
flow_utils.get_create_load_balancer_flow,
constants.TOPOLOGY_SINGLE, listeners=[], store=store)
self.assertEqual(4, mock_lb_repo_get.call_count)
def test_create_load_balancer_active_standby(
self,
mock_api_get_session,
mock_dyn_log_listener,
mock_taskflow_load,
mock_pool_repo_get,
mock_member_repo_get,
mock_l7rule_repo_get,
mock_l7policy_repo_get,
mock_listener_repo_get,
mock_lb_repo_get,
mock_health_mon_repo_get,
mock_amp_repo_get):
self.conf.config(
group="controller_worker",
loadbalancer_topology=constants.TOPOLOGY_ACTIVE_STANDBY)
_flow_mock.reset_mock()
store = {
constants.LOADBALANCER_ID: LB_ID,
'update_dict': {'topology': constants.TOPOLOGY_ACTIVE_STANDBY},
constants.BUILD_TYPE_PRIORITY: constants.LB_CREATE_NORMAL_PRIORITY,
constants.FLAVOR: None,
constants.AVAILABILITY_ZONE: None,
}
setattr(mock_lb_repo_get.return_value, 'topology',
constants.TOPOLOGY_ACTIVE_STANDBY)
setattr(mock_lb_repo_get.return_value, 'listeners', [])
cw = controller_worker.ControllerWorker()
cw.create_load_balancer(_load_balancer_mock)
cw.services_controller.run_poster.assert_called_with(
flow_utils.get_create_load_balancer_flow,
constants.TOPOLOGY_ACTIVE_STANDBY, listeners=[], store=store)
def test_create_load_balancer_full_graph_single(
self,
mock_api_get_session,
mock_dyn_log_listener,
mock_taskflow_load,
mock_pool_repo_get,
mock_member_repo_get,
mock_l7rule_repo_get,
mock_l7policy_repo_get,
mock_listener_repo_get,
mock_lb_repo_get,
mock_health_mon_repo_get,
mock_amp_repo_get):
self.conf.config(
group="controller_worker",
loadbalancer_topology=constants.TOPOLOGY_SINGLE)
listeners = [data_models.Listener(id='listener1'),
data_models.Listener(id='listener2')]
dict_listeners = [listener.to_dict() for listener in
provider_utils.db_listeners_to_provider_listeners(
listeners)]
lb = data_models.LoadBalancer(id=LB_ID, listeners=listeners,
topology=constants.TOPOLOGY_SINGLE)
mock_lb_repo_get.return_value = lb
store = {
constants.LOADBALANCER_ID: LB_ID,
'update_dict': {'topology': constants.TOPOLOGY_SINGLE},
constants.BUILD_TYPE_PRIORITY: constants.LB_CREATE_NORMAL_PRIORITY,
constants.FLAVOR: None,
constants.AVAILABILITY_ZONE: None,
}
cw = controller_worker.ControllerWorker()
cw.create_load_balancer(_load_balancer_mock)
cw.services_controller.run_poster.assert_called_with(
flow_utils.get_create_load_balancer_flow,
constants.TOPOLOGY_SINGLE, listeners=dict_listeners, store=store)
def test_create_load_balancer_full_graph_active_standby(
self,
mock_api_get_session,
mock_dyn_log_listener,
mock_taskflow_load,
mock_pool_repo_get,
mock_member_repo_get,
mock_l7rule_repo_get,
mock_l7policy_repo_get,
mock_listener_repo_get,
mock_lb_repo_get,
mock_health_mon_repo_get,
mock_amp_repo_get):
self.conf.config(
group="controller_worker",
loadbalancer_topology=constants.TOPOLOGY_ACTIVE_STANDBY)
listeners = [data_models.Listener(id='listener1'),
data_models.Listener(id='listener2')]
dict_listeners = [listener.to_dict() for listener in
provider_utils.db_listeners_to_provider_listeners(
listeners)]
lb = data_models.LoadBalancer(
id=LB_ID, listeners=listeners,
topology=constants.TOPOLOGY_ACTIVE_STANDBY)
dict_listeners = [listener.to_dict() for listener in
provider_utils.db_listeners_to_provider_listeners(
listeners)]
mock_lb_repo_get.return_value = lb
store = {
constants.LOADBALANCER_ID: LB_ID,
'update_dict': {'topology': constants.TOPOLOGY_ACTIVE_STANDBY},
constants.BUILD_TYPE_PRIORITY: constants.LB_CREATE_NORMAL_PRIORITY,
constants.FLAVOR: None,
constants.AVAILABILITY_ZONE: None,
}
cw = controller_worker.ControllerWorker()
cw.create_load_balancer(_load_balancer_mock)
cw.services_controller.run_poster.assert_called_with(
flow_utils.get_create_load_balancer_flow,
constants.TOPOLOGY_ACTIVE_STANDBY, listeners=dict_listeners,
store=store)
def test_delete_load_balancer_without_cascade(self,
mock_api_get_session,
mock_dyn_log_listener,
mock_taskflow_load,
mock_pool_repo_get,
mock_member_repo_get,
mock_l7rule_repo_get,
mock_l7policy_repo_get,
mock_listener_repo_get,
mock_lb_repo_get,
mock_health_mon_repo_get,
mock_amp_repo_get):
_flow_mock.reset_mock()
cw = controller_worker.ControllerWorker()
cw.delete_load_balancer(_load_balancer_mock, cascade=False)
mock_lb_repo_get.assert_called_once_with(
_db_session,
id=LB_ID)
(cw.services_controller.run_poster.
assert_called_once_with(
flow_utils.get_delete_load_balancer_flow,
_load_balancer_mock,
store={constants.LOADBALANCER: _load_balancer_mock,
constants.SERVER_GROUP_ID:
_db_load_balancer_mock.server_group_id,
constants.PROJECT_ID: _db_load_balancer_mock.project_id,
}))
def test_delete_load_balancer_with_cascade(self,
mock_api_get_session,
mock_dyn_log_listener,
mock_taskflow_load,
mock_pool_repo_get,
mock_member_repo_get,
mock_l7rule_repo_get,
mock_l7policy_repo_get,
mock_listener_repo_get,
mock_lb_repo_get,
mock_health_mon_repo_get,
mock_amp_repo_get):
_flow_mock.reset_mock()
cw = controller_worker.ControllerWorker()
cw.delete_load_balancer(_load_balancer_mock, cascade=True)
mock_lb_repo_get.assert_called_once_with(
_db_session,
id=LB_ID)
list_name = 'listener_%s' % _listener_mock.id
(cw.services_controller.run_poster.
assert_called_once_with(
flow_utils.get_cascade_delete_load_balancer_flow,
_load_balancer_mock,
store={constants.LOADBALANCER: _load_balancer_mock,
list_name: self.ref_listener_dict,
constants.LOADBALANCER_ID: LB_ID,
constants.SERVER_GROUP_ID:
_db_load_balancer_mock.server_group_id,
constants.PROJECT_ID: _db_load_balancer_mock.project_id,
})
)
@mock.patch('octavia.db.repositories.ListenerRepository.get_all',
return_value=([_listener_mock], None))
def test_update_load_balancer(self,
mock_listener_repo_get_all,
mock_api_get_session,
mock_dyn_log_listener,
mock_taskflow_load,
mock_pool_repo_get,
mock_member_repo_get,
mock_l7rule_repo_get,
mock_l7policy_repo_get,
mock_listener_repo_get,
mock_lb_repo_get,
mock_health_mon_repo_get,
mock_amp_repo_get):
_flow_mock.reset_mock()
_db_load_balancer_mock.provisioning_status = constants.PENDING_UPDATE
cw = controller_worker.ControllerWorker()
change = 'TEST2'
cw.update_load_balancer(_load_balancer_mock, change)
(cw.services_controller.run_poster.
assert_called_once_with(flow_utils.get_update_load_balancer_flow,
store={constants.UPDATE_DICT: change,
constants.LOADBALANCER:
_load_balancer_mock,
constants.LOADBALANCER_ID:
_db_load_balancer_mock.id,
}))
@mock.patch('octavia.controller.worker.v2.flows.'
'member_flows.MemberFlows.get_create_member_flow',
return_value=_flow_mock)
@mock.patch('octavia.db.repositories.AvailabilityZoneRepository.'
'get_availability_zone_metadata_dict')
def test_create_member(self,
mock_get_az_metadata_dict,
mock_get_create_member_flow,
mock_api_get_session,
mock_dyn_log_listener,
mock_taskflow_load,
mock_pool_repo_get,
mock_member_repo_get,
mock_l7rule_repo_get,
mock_l7policy_repo_get,
mock_listener_repo_get,
mock_lb_repo_get,
mock_health_mon_repo_get,
mock_amp_repo_get):
_flow_mock.reset_mock()
mock_get_az_metadata_dict.return_value = {}
mock_member_repo_get.side_effect = [None, _member_mock]
_member = _member_mock.to_dict()
cw = controller_worker.ControllerWorker()
cw.create_member(_member)
provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
_db_load_balancer_mock).to_dict()
(cw.services_controller.run_poster.
assert_called_once_with(flow_utils.get_create_member_flow,
store={constants.MEMBER: _member,
constants.LISTENERS:
[self.ref_listener_dict],
constants.LOADBALANCER_ID:
LB_ID,
constants.LOADBALANCER:
provider_lb,
constants.POOL_ID:
POOL_ID,
constants.AVAILABILITY_ZONE: {}}))
@mock.patch('octavia.controller.worker.v2.flows.'
'member_flows.MemberFlows.get_delete_member_flow',
return_value=_flow_mock)
@mock.patch('octavia.db.repositories.AvailabilityZoneRepository.'
'get_availability_zone_metadata_dict')
def test_delete_member(self,
mock_get_az_metadata_dict,
mock_get_delete_member_flow,
mock_api_get_session,
mock_dyn_log_listener,
mock_taskflow_load,
mock_pool_repo_get,
mock_member_repo_get,
mock_l7rule_repo_get,
mock_l7policy_repo_get,
mock_listener_repo_get,
mock_lb_repo_get,
mock_health_mon_repo_get,
mock_amp_repo_get):
_flow_mock.reset_mock()
_member = _member_mock.to_dict()
mock_get_az_metadata_dict.return_value = {}
cw = controller_worker.ControllerWorker()
cw.delete_member(_member)
provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
_db_load_balancer_mock).to_dict()
(cw.services_controller.run_poster.
assert_called_once_with(
flow_utils.get_delete_member_flow,
store={constants.MEMBER: _member,
constants.LISTENERS: [self.ref_listener_dict],
constants.LOADBALANCER_ID: LB_ID,
constants.LOADBALANCER: provider_lb,
constants.POOL_ID: POOL_ID,
constants.PROJECT_ID: PROJECT_ID,
constants.AVAILABILITY_ZONE: {}}))
@mock.patch('octavia.controller.worker.v2.flows.'
'member_flows.MemberFlows.get_update_member_flow',
return_value=_flow_mock)
@mock.patch('octavia.db.repositories.AvailabilityZoneRepository.'
'get_availability_zone_metadata_dict')
def test_update_member(self,
mock_get_az_metadata_dict,
mock_get_update_member_flow,
mock_api_get_session,
mock_dyn_log_listener,
mock_taskflow_load,
mock_pool_repo_get,
mock_member_repo_get,
mock_l7rule_repo_get,
mock_l7policy_repo_get,
mock_listener_repo_get,
mock_lb_repo_get,
mock_health_mon_repo_get,
mock_amp_repo_get):
_flow_mock.reset_mock()
_member = _member_mock.to_dict()
_member[constants.PROVISIONING_STATUS] = constants.PENDING_UPDATE
mock_get_az_metadata_dict.return_value = {}
cw = controller_worker.ControllerWorker()
cw.update_member(_member, MEMBER_UPDATE_DICT)
provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
_db_load_balancer_mock).to_dict()
(cw.services_controller.run_poster.
assert_called_once_with(flow_utils.get_update_member_flow,
store={constants.MEMBER: _member,
constants.LISTENERS:
[self.ref_listener_dict],
constants.LOADBALANCER:
provider_lb,
constants.POOL_ID:
POOL_ID,
constants.LOADBALANCER_ID:
LB_ID,
constants.UPDATE_DICT:
MEMBER_UPDATE_DICT,
constants.AVAILABILITY_ZONE: {}}))
@mock.patch('octavia.controller.worker.v2.flows.'
'member_flows.MemberFlows.get_batch_update_members_flow',
return_value=_flow_mock)
@mock.patch('octavia.db.repositories.AvailabilityZoneRepository.'
'get_availability_zone_metadata_dict')
def test_batch_update_members(self,
mock_get_az_metadata_dict,
mock_get_batch_update_members_flow,
mock_api_get_session,
mock_dyn_log_listener,
mock_taskflow_load,
mock_pool_repo_get,
mock_member_repo_get,
mock_l7rule_repo_get,
mock_l7policy_repo_get,
mock_listener_repo_get,
mock_lb_repo_get,
mock_health_mon_repo_get,
mock_amp_repo_get):
_flow_mock.reset_mock()
mock_get_az_metadata_dict.return_value = {}
cw = controller_worker.ControllerWorker()
old_member = mock.MagicMock()
old_member.to_dict.return_value = {'id': 9,
constants.POOL_ID: 'testtest'}
mock_member_repo_get.side_effect = [_member_mock, old_member]
cw.batch_update_members([{constants.MEMBER_ID: 9,
constants.POOL_ID: 'testtest'}],
[{constants.MEMBER_ID: 11}],
[MEMBER_UPDATE_DICT])
provider_m = provider_utils.db_member_to_provider_member(_member_mock)
old_provider_m = provider_utils.db_member_to_provider_member(
old_member).to_dict()
provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
_db_load_balancer_mock).to_dict()
(cw.services_controller.run_poster.
assert_called_once_with(
flow_utils.get_batch_update_members_flow,
[old_provider_m],
[{'member_id': 11}],
[(provider_m.to_dict(), MEMBER_UPDATE_DICT)],
store={constants.LISTENERS: [self.ref_listener_dict],
constants.LOADBALANCER_ID: LB_ID,
constants.LOADBALANCER: provider_lb,
constants.POOL_ID: POOL_ID,
constants.PROJECT_ID: PROJECT_ID,
constants.AVAILABILITY_ZONE: {}}))
def test_create_pool(self,
mock_api_get_session,
mock_dyn_log_listener,
mock_taskflow_load,
mock_pool_repo_get,
mock_member_repo_get,
mock_l7rule_repo_get,
mock_l7policy_repo_get,
mock_listener_repo_get,
mock_lb_repo_get,
mock_health_mon_repo_get,
mock_amp_repo_get):
_flow_mock.reset_mock()
mock_pool_repo_get.return_value = _db_pool_mock
cw = controller_worker.ControllerWorker()
cw.create_pool(_pool_mock)
provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
_db_load_balancer_mock).to_dict()
(cw.services_controller.run_poster.
assert_called_once_with(flow_utils.get_create_pool_flow,
store={constants.POOL_ID: POOL_ID,
constants.LOADBALANCER_ID:
LB_ID,
constants.LISTENERS:
[self.ref_listener_dict],
constants.LOADBALANCER:
provider_lb}))
self.assertEqual(1, mock_pool_repo_get.call_count)
def test_delete_pool(self,
mock_api_get_session,
mock_dyn_log_listener,
mock_taskflow_load,
mock_pool_repo_get,
mock_member_repo_get,
mock_l7rule_repo_get,
mock_l7policy_repo_get,
mock_listener_repo_get,
mock_lb_repo_get,
mock_health_mon_repo_get,
mock_amp_repo_get):
_flow_mock.reset_mock()
_db_pool_mock.project_id = PROJECT_ID
cw = controller_worker.ControllerWorker()
cw.delete_pool(_pool_mock)
provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
_db_load_balancer_mock).to_dict()
(cw.services_controller.run_poster.
assert_called_once_with(flow_utils.get_delete_pool_flow,
store={constants.POOL_ID: POOL_ID,
constants.LOADBALANCER_ID:
LB_ID,
constants.LISTENERS:
[self.ref_listener_dict],
constants.LOADBALANCER:
provider_lb,
constants.PROJECT_ID: PROJECT_ID}))
def test_update_pool(self,
mock_api_get_session,
mock_dyn_log_listener,
mock_taskflow_load,
mock_pool_repo_get,
mock_member_repo_get,
mock_l7rule_repo_get,
mock_l7policy_repo_get,
mock_listener_repo_get,
mock_lb_repo_get,
mock_health_mon_repo_get,
mock_amp_repo_get):
_flow_mock.reset_mock()
_db_pool_mock.provisioning_status = constants.PENDING_UPDATE
mock_pool_repo_get.return_value = _db_pool_mock
cw = controller_worker.ControllerWorker()
cw.update_pool(_pool_mock, POOL_UPDATE_DICT)
provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
_db_load_balancer_mock).to_dict()
(cw.services_controller.run_poster.
assert_called_once_with(flow_utils.get_update_pool_flow,
store={constants.POOL_ID: POOL_ID,
constants.LISTENERS:
[self.ref_listener_dict],
constants.LOADBALANCER_ID:
LB_ID,
constants.LOADBALANCER:
provider_lb,
constants.UPDATE_DICT:
POOL_UPDATE_DICT}))
def test_create_l7policy(self,
mock_api_get_session,
mock_dyn_log_listener,
mock_taskflow_load,
mock_pool_repo_get,
mock_member_repo_get,
mock_l7rule_repo_get,
mock_l7policy_repo_get,
mock_listener_repo_get,
mock_lb_repo_get,
mock_health_mon_repo_get,
mock_amp_repo_get):
_flow_mock.reset_mock()
cw = controller_worker.ControllerWorker()
l7policy_mock = {
constants.L7POLICY_ID: L7POLICY_ID,
constants.LISTENER_ID: LISTENER_ID
}
cw.create_l7policy(l7policy_mock)
(cw.services_controller.run_poster.
assert_called_once_with(flow_utils.get_create_l7policy_flow,
store={constants.L7POLICY: l7policy_mock,
constants.LISTENERS:
[self.ref_listener_dict],
constants.LOADBALANCER_ID: LB_ID}))
def test_delete_l7policy(self,
mock_api_get_session,
mock_dyn_log_listener,
mock_taskflow_load,
mock_pool_repo_get,
mock_member_repo_get,
mock_l7rule_repo_get,
mock_l7policy_repo_get,
mock_listener_repo_get,
mock_lb_repo_get,
mock_health_mon_repo_get,
mock_amp_repo_get):
_flow_mock.reset_mock()
cw = controller_worker.ControllerWorker()
l7policy_mock = {
constants.L7POLICY_ID: L7POLICY_ID,
constants.LISTENER_ID: LISTENER_ID
}
cw.delete_l7policy(l7policy_mock)
(cw.services_controller.run_poster.
assert_called_once_with(flow_utils.get_delete_l7policy_flow,
store={constants.L7POLICY: l7policy_mock,
constants.LISTENERS:
[self.ref_listener_dict],
constants.LOADBALANCER_ID:
LB_ID}))
def test_update_l7policy(self,
mock_api_get_session,
mock_dyn_log_listener,
mock_taskflow_load,
mock_pool_repo_get,
mock_member_repo_get,
mock_l7rule_repo_get,
mock_l7policy_repo_get,
mock_listener_repo_get,
mock_lb_repo_get,
mock_health_mon_repo_get,
mock_amp_repo_get):
_flow_mock.reset_mock()
mock_listener_repo_get.return_value = _listener_mock
_l7policy_mock.provisioning_status = constants.PENDING_UPDATE
cw = controller_worker.ControllerWorker()
l7policy_mock = {
constants.L7POLICY_ID: L7POLICY_ID,
constants.LISTENER_ID: LISTENER_ID
}
cw.update_l7policy(l7policy_mock, L7POLICY_UPDATE_DICT)
(cw.services_controller.run_poster.
assert_called_once_with(flow_utils.get_update_l7policy_flow,
store={constants.L7POLICY: l7policy_mock,
constants.LISTENERS:
[self.ref_listener_dict],
constants.LOADBALANCER_ID:
LB_ID,
constants.UPDATE_DICT:
L7POLICY_UPDATE_DICT}))
def test_create_l7rule(self,
mock_api_get_session,
mock_dyn_log_listener,
mock_taskflow_load,
mock_pool_repo_get,
mock_member_repo_get,
mock_l7rule_repo_get,
mock_l7policy_repo_get,
mock_listener_repo_get,
mock_lb_repo_get,
mock_health_mon_repo_get,
mock_amp_repo_get):
_flow_mock.reset_mock()
mock_l7policy_repo_get.return_value = _l7policy_mock
cw = controller_worker.ControllerWorker()
cw.create_l7rule(_l7rule_mock.to_dict())
l7_policy = provider_utils.db_l7policy_to_provider_l7policy(
_l7policy_mock)
(cw.services_controller.run_poster.
assert_called_once_with(flow_utils.get_create_l7rule_flow,
store={constants.L7RULE:
_l7rule_mock.to_dict(),
constants.L7POLICY:
l7_policy.to_dict(),
constants.L7POLICY_ID: L7POLICY_ID,
constants.LOADBALANCER_ID: LB_ID,
constants.LISTENERS:
[self.ref_listener_dict]
}))
def test_delete_l7rule(self,
mock_api_get_session,
mock_dyn_log_listener,
mock_taskflow_load,
mock_pool_repo_get,
mock_member_repo_get,
mock_l7rule_repo_get,
mock_l7policy_repo_get,
mock_listener_repo_get,
mock_lb_repo_get,
mock_health_mon_repo_get,
mock_amp_repo_get):
_flow_mock.reset_mock()
cw = controller_worker.ControllerWorker()
cw.delete_l7rule(_l7rule_mock.to_dict())
l7_policy = provider_utils.db_l7policy_to_provider_l7policy(
_l7policy_mock)
(cw.services_controller.run_poster.
assert_called_once_with(flow_utils.get_delete_l7rule_flow,
store={
constants.L7RULE:
_l7rule_mock.to_dict(),
constants.L7POLICY:
l7_policy.to_dict(),
constants.L7POLICY_ID: L7POLICY_ID,
constants.LISTENERS:
[self.ref_listener_dict],
constants.LOADBALANCER_ID: LB_ID,
}))
def test_update_l7rule(self,
mock_api_get_session,
mock_dyn_log_listener,
mock_taskflow_load,
mock_pool_repo_get,
mock_member_repo_get,
mock_l7rule_repo_get,
mock_l7policy_repo_get,
mock_listener_repo_get,
mock_lb_repo_get,
mock_health_mon_repo_get,
mock_amp_repo_get):
_flow_mock.reset_mock()
_l7rule_mock.provisioning_status = constants.PENDING_UPDATE
cw = controller_worker.ControllerWorker()
cw.update_l7rule(_l7rule_mock.to_dict(), L7RULE_UPDATE_DICT)
l7_policy = provider_utils.db_l7policy_to_provider_l7policy(
_l7policy_mock)
(cw.services_controller.run_poster.
assert_called_once_with(flow_utils.get_update_l7rule_flow,
store={
constants.L7RULE:
_l7rule_mock.to_dict(),
constants.L7POLICY:
l7_policy.to_dict(),
constants.L7POLICY_ID: L7POLICY_ID,
constants.LOADBALANCER_ID: LB_ID,
constants.LISTENERS:
[self.ref_listener_dict],
constants.UPDATE_DICT:
L7RULE_UPDATE_DICT}))
@mock.patch('octavia.db.repositories.AvailabilityZoneRepository.'
'get_availability_zone_metadata_dict', return_value={})
@mock.patch('octavia.db.repositories.FlavorRepository.'
'get_flavor_metadata_dict', return_value={})
@mock.patch('octavia.db.repositories.LoadBalancerRepository.update')
def test_failover_amphora(self,
mock_update,
mock_get_flavor_meta,
mock_get_az_meta,
mock_api_get_session,
mock_dyn_log_listener,
mock_taskflow_load,
mock_pool_repo_get,
mock_member_repo_get,
mock_l7rule_repo_get,
mock_l7policy_repo_get,
mock_listener_repo_get,
mock_lb_repo_get,
mock_health_mon_repo_get,
mock_amp_repo_get):
_flow_mock.reset_mock()
_db_amphora_mock.reset_mock()
mock_amp_repo_get.return_value = _db_amphora_mock
cw = controller_worker.ControllerWorker()
cw.failover_amphora(AMP_ID)
mock_lb_repo_get.return_value = _db_load_balancer_mock
(cw.services_controller.run_poster.
assert_called_once_with(
flow_utils.get_failover_flow,
role=_db_amphora_mock.role,
load_balancer={},
wait=True,
store={constants.FAILED_AMPHORA: _db_amphora_mock.to_dict(),
constants.LOADBALANCER_ID:
_db_amphora_mock.load_balancer_id,
constants.BUILD_TYPE_PRIORITY:
constants.LB_CREATE_FAILOVER_PRIORITY,
constants.FLAVOR: {},
constants.AVAILABILITY_ZONE: {}
}))
mock_update.assert_called_with(_db_session, LB_ID,
provisioning_status=constants.ACTIVE)
@mock.patch('octavia.controller.worker.v2.controller_worker.'
'ControllerWorker._perform_amphora_failover')
def test_failover_amp_missing_amp(self,
mock_perform_amp_failover,
mock_api_get_session,
mock_dyn_log_listener,
mock_taskflow_load,
mock_pool_repo_get,
mock_member_repo_get,
mock_l7rule_repo_get,
mock_l7policy_repo_get,
mock_listener_repo_get,
mock_lb_repo_get,
mock_health_mon_repo_get,
mock_amp_repo_get):
mock_amp_repo_get.return_value = None
cw = controller_worker.ControllerWorker()
cw.failover_amphora(AMP_ID)
mock_perform_amp_failover.assert_not_called()
@mock.patch('octavia.controller.worker.v2.controller_worker.'
'ControllerWorker._perform_amphora_failover')
@mock.patch('octavia.db.repositories.LoadBalancerRepository.update')
def test_failover_amp_flow_exception(self,
mock_update,
mock_perform_amp_failover,
mock_api_get_session,
mock_dyn_log_listener,
mock_taskflow_load,
mock_pool_repo_get,
mock_member_repo_get,
mock_l7rule_repo_get,
mock_l7policy_repo_get,
mock_listener_repo_get,
mock_lb_repo_get,
mock_health_mon_repo_get,
mock_amp_repo_get):
mock_perform_amp_failover.side_effect = TestException('boom')
cw = controller_worker.ControllerWorker()
self.assertRaises(TestException, cw.failover_amphora, AMP_ID)
mock_update.assert_called_with(_db_session, LB_ID,
provisioning_status=constants.ERROR)
@mock.patch('octavia.controller.worker.v2.controller_worker.'
'ControllerWorker._perform_amphora_failover')
@mock.patch('octavia.db.repositories.LoadBalancerRepository.update')
def test_failover_amp_no_lb(self,
mock_lb_update,
mock_perform_amp_failover,
mock_api_get_session,
mock_dyn_log_listener,
mock_taskflow_load,
mock_pool_repo_get,
mock_member_repo_get,
mock_l7rule_repo_get,
mock_l7policy_repo_get,
mock_listener_repo_get,
mock_lb_repo_get,
mock_health_mon_repo_get,
mock_amp_repo_get):
amphora = mock.MagicMock()
amphora.load_balancer_id = None
mock_amp_repo_get.return_value = amphora
cw = controller_worker.ControllerWorker()
cw.failover_amphora(AMP_ID)
mock_lb_update.assert_not_called()
mock_perform_amp_failover.assert_called_once_with(
amphora, constants.LB_CREATE_FAILOVER_PRIORITY)
@mock.patch(
'octavia.db.repositories.AmphoraRepository.get_lb_for_amphora',
return_value=None)
def test_failover_spare_amphora(self,
mock_get_lb_for_amphora,
mock_api_get_session,
mock_dyn_log_listener,
mock_taskflow_load,
mock_pool_repo_get,
mock_member_repo_get,
mock_l7rule_repo_get,
mock_l7policy_repo_get,
mock_listener_repo_get,
mock_lb_repo_get,
mock_health_mon_repo_get,
mock_amp_repo_get):
_flow_mock.reset_mock()
# simulate a spare amphora (amphora not attached to any load_balancer)
mock_amphora = mock.MagicMock()
mock_amphora.id = AMP_ID
mock_amphora.status = constants.AMPHORA_READY
mock_amphora.load_balancer_id = None
mock_amphora.role = constants.ROLE_STANDALONE
cw = controller_worker.ControllerWorker()
cw._perform_amphora_failover(mock_amphora,
constants.LB_CREATE_FAILOVER_PRIORITY)
(cw.services_controller.run_poster.
assert_called_once_with(
flow_utils.get_failover_flow,
role=constants.ROLE_STANDALONE, load_balancer=None,
wait=True,
store={constants.FAILED_AMPHORA: mock_amphora.to_dict(),
constants.LOADBALANCER_ID: None,
constants.BUILD_TYPE_PRIORITY:
constants.LB_CREATE_FAILOVER_PRIORITY,
constants.FLAVOR: {},
constants.AVAILABILITY_ZONE: {}
}))
@mock.patch('octavia.db.repositories.AmphoraHealthRepository.delete')
def test_failover_deleted_amphora(self,
mock_delete,
mock_api_get_session,
mock_dyn_log_listener,
mock_taskflow_load,
mock_pool_repo_get,
mock_member_repo_get,
mock_l7rule_repo_get,
mock_l7policy_repo_get,
mock_listener_repo_get,
mock_lb_repo_get,
mock_health_mon_repo_get,
mock_amp_repo_get):
mock_taskflow_load.reset_mock()
mock_amphora = mock.MagicMock()
mock_amphora.id = AMP_ID
mock_amphora.status = constants.DELETED
cw = controller_worker.ControllerWorker()
cw._perform_amphora_failover(mock_amphora, 10)
mock_delete.assert_called_with(_db_session, amphora_id=AMP_ID)
mock_taskflow_load.assert_not_called()
@mock.patch('octavia.controller.worker.v2.'
'controller_worker.ControllerWorker._perform_amphora_failover')
@mock.patch('octavia.db.repositories.LoadBalancerRepository.update')
def test_failover_loadbalancer(self,
mock_update,
mock_perform,
mock_api_get_session,
mock_dyn_log_listener,
mock_taskflow_load,
mock_pool_repo_get,
mock_member_repo_get,
mock_l7rule_repo_get,
mock_l7policy_repo_get,
mock_listener_repo_get,
mock_lb_repo_get,
mock_health_mon_repo_get,
mock_amp_repo_get):
_amphora_mock2 = mock.MagicMock()
_amphora_mock3 = mock.MagicMock()
_amphora_mock3.status = constants.DELETED
_db_load_balancer_mock.amphorae = [
_db_amphora_mock, _amphora_mock2, _amphora_mock3]
cw = controller_worker.ControllerWorker()
cw.failover_loadbalancer('123')
mock_perform.assert_called_with(
_amphora_mock2,
constants.LB_CREATE_ADMIN_FAILOVER_PRIORITY)
mock_update.assert_called_with(_db_session, '123',
provisioning_status=constants.ACTIVE)
mock_perform.reset_mock()
_db_load_balancer_mock.amphorae = [
_db_amphora_mock, _amphora_mock2, _amphora_mock3]
_amphora_mock2.role = constants.ROLE_BACKUP
cw.failover_loadbalancer('123')
# because mock2 gets failed over earlier now _amphora_mock
# is the last one
mock_perform.assert_called_with(
_db_amphora_mock, constants.LB_CREATE_ADMIN_FAILOVER_PRIORITY)
mock_update.assert_called_with(_db_session, '123',
provisioning_status=constants.ACTIVE)
mock_perform.side_effect = OverflowError()
self.assertRaises(OverflowError, cw.failover_loadbalancer, 123)
mock_update.assert_called_with(_db_session, 123,
provisioning_status=constants.ERROR)
@mock.patch('octavia.db.repositories.AvailabilityZoneRepository.'
'get_availability_zone_metadata_dict', return_value={})
@mock.patch('octavia.db.repositories.FlavorRepository.'
'get_flavor_metadata_dict', return_value={})
@mock.patch(
'octavia.db.repositories.AmphoraRepository.get_lb_for_amphora',
return_value=_db_load_balancer_mock)
@mock.patch('octavia.db.repositories.LoadBalancerRepository.update')
def test_failover_amphora_anti_affinity(self,
mock_update,
mock_get_lb_for_amphora,
mock_get_flavor_meta,
mock_get_az_meta,
mock_api_get_session,
mock_dyn_log_listener,
mock_taskflow_load,
mock_pool_repo_get,
mock_member_repo_get,
mock_l7rule_repo_get,
mock_l7policy_repo_get,
mock_listener_repo_get,
mock_lb_repo_get,
mock_health_mon_repo_get,
mock_amp_repo_get):
self.conf.config(group="nova", enable_anti_affinity=True)
_flow_mock.reset_mock()
_db_load_balancer_mock.server_group_id = "123"
cw = controller_worker.ControllerWorker()
cw.failover_amphora(AMP_ID)
provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
_db_load_balancer_mock).to_dict()
(cw.services_controller.run_poster.
assert_called_once_with(
flow_utils.get_failover_flow,
role=_db_amphora_mock.role,
load_balancer=provider_lb,
wait=True,
store={constants.FAILED_AMPHORA: _db_amphora_mock.to_dict(),
constants.LOADBALANCER_ID:
_db_amphora_mock.load_balancer_id,
constants.BUILD_TYPE_PRIORITY:
constants.LB_CREATE_FAILOVER_PRIORITY,
constants.SERVER_GROUP_ID: "123",
constants.FLAVOR: {},
constants.AVAILABILITY_ZONE: {}
}))
mock_update.assert_called_with(_db_session, LB_ID,
provisioning_status=constants.ACTIVE)
def test_amphora_cert_rotation(self,
mock_api_get_session,
mock_dyn_log_listener,
mock_taskflow_load,
mock_pool_repo_get,
mock_member_repo_get,
mock_l7rule_repo_get,
mock_l7policy_repo_get,
mock_listener_repo_get,
mock_lb_repo_get,
mock_health_mon_repo_get,
mock_amp_repo_get):
_flow_mock.reset_mock()
cw = controller_worker.ControllerWorker()
cw.amphora_cert_rotation(AMP_ID)
mock_amp_repo_get.return_value = _db_amphora_mock
(cw.services_controller.run_poster.
assert_called_once_with(flow_utils.cert_rotate_amphora_flow,
store={constants.AMPHORA:
_db_amphora_mock.to_dict(),
constants.AMPHORA_ID:
_amphora_mock[constants.ID]}))
@mock.patch('octavia.db.repositories.FlavorRepository.'
'get_flavor_metadata_dict')
@mock.patch('octavia.db.repositories.AmphoraRepository.get_lb_for_amphora')
def test_update_amphora_agent_config(self,
mock_get_lb_for_amp,
mock_flavor_meta,
mock_api_get_session,
mock_dyn_log_listener,
mock_taskflow_load,
mock_pool_repo_get,
mock_member_repo_get,
mock_l7rule_repo_get,
mock_l7policy_repo_get,
mock_listener_repo_get,
mock_lb_repo_get,
mock_health_mon_repo_get,
mock_amp_repo_get):
_flow_mock.reset_mock()
mock_lb = mock.MagicMock()
mock_lb.flavor_id = 'vanilla'
mock_get_lb_for_amp.return_value = mock_lb
mock_flavor_meta.return_value = {'test': 'dict'}
cw = controller_worker.ControllerWorker()
cw.update_amphora_agent_config(AMP_ID)
mock_amp_repo_get.assert_called_once_with(_db_session, id=AMP_ID)
mock_get_lb_for_amp.assert_called_once_with(_db_session, AMP_ID)
mock_flavor_meta.assert_called_once_with(_db_session, 'vanilla')
(cw.services_controller.run_poster.
assert_called_once_with(flow_utils.update_amphora_config_flow,
store={constants.AMPHORA:
_db_amphora_mock.to_dict(),
constants.FLAVOR: {'test': 'dict'}}))
# Test with no flavor
_flow_mock.reset_mock()
mock_amp_repo_get.reset_mock()
mock_get_lb_for_amp.reset_mock()
mock_flavor_meta.reset_mock()
mock_lb.flavor_id = None
cw.update_amphora_agent_config(AMP_ID)
mock_amp_repo_get.assert_called_once_with(_db_session, id=AMP_ID)
mock_get_lb_for_amp.assert_called_once_with(_db_session, AMP_ID)
mock_flavor_meta.assert_not_called()
(cw.services_controller.run_poster.
assert_called_once_with(flow_utils.update_amphora_config_flow,
store={constants.AMPHORA:
_db_amphora_mock.to_dict(),
constants.FLAVOR: {}}))
| 46.451069
| 79
| 0.524025
| 6,578
| 71,674
| 5.152022
| 0.043022
| 0.070434
| 0.089584
| 0.018413
| 0.866096
| 0.824314
| 0.784686
| 0.759221
| 0.7473
| 0.727943
| 0
| 0.006219
| 0.423473
| 71,674
| 1,542
| 80
| 46.481193
| 0.813925
| 0.010869
| 0
| 0.73131
| 0
| 0
| 0.046956
| 0.040832
| 0
| 0
| 0
| 0
| 0.045892
| 1
| 0.031088
| false
| 0
| 0.007402
| 0.00074
| 0.040711
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
da99a3e21850d4eeb5bb06f6e9f1f2bb48a3d7b4
| 194
|
py
|
Python
|
Models/BM25/src/__init__.py
|
MDoid10111/EMNLP2020
|
97e4da06abc72873a4830cfa53c035a27eb3975b
|
[
"MIT"
] | 42
|
2020-10-13T19:47:37.000Z
|
2022-03-26T09:56:46.000Z
|
Models/BM25/src/__init__.py
|
MDoid10111/EMNLP2020
|
97e4da06abc72873a4830cfa53c035a27eb3975b
|
[
"MIT"
] | null | null | null |
Models/BM25/src/__init__.py
|
MDoid10111/EMNLP2020
|
97e4da06abc72873a4830cfa53c035a27eb3975b
|
[
"MIT"
] | 5
|
2020-11-30T14:48:44.000Z
|
2022-02-19T17:18:21.000Z
|
__author__ = 'Nick Hirakawa'
from .invdx import *
from .parse import *
from .query import *
from .rank import *
from .query import QueryProcessor
__all__ = ['invdx', 'parse', 'query', 'rank']
| 19.4
| 45
| 0.701031
| 24
| 194
| 5.333333
| 0.458333
| 0.3125
| 0.234375
| 0.328125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.164948
| 194
| 9
| 46
| 21.555556
| 0.790123
| 0
| 0
| 0
| 0
| 0
| 0.164948
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.714286
| 0
| 0.714286
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
dab887377d640dc0c9508a2fea2413bfd5443319
| 7,412
|
py
|
Python
|
Round711/C_Planar_Reflections.py
|
tqa236/codeforces
|
81ad7bdb7786455f83d48d59a8884f62ded66caf
|
[
"MIT"
] | null | null | null |
Round711/C_Planar_Reflections.py
|
tqa236/codeforces
|
81ad7bdb7786455f83d48d59a8884f62ded66caf
|
[
"MIT"
] | null | null | null |
Round711/C_Planar_Reflections.py
|
tqa236/codeforces
|
81ad7bdb7786455f83d48d59a8884f62ded66caf
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import os
import sys
from io import BytesIO, IOBase
import math
from collections import Counter
sys.setrecursionlimit(10 ** 9)
def func(n, k, max_val):
ROWS = max_val
DP = [[None] * k for i in range(ROWS + 1)]
for i in range(ROWS + 1):
DP[i][0] = 1
for i in range(1, k):
DP[0][i] = 1
# print(DP)
# print(n, k, DP[n][k])
for j in range(1, k):
for i in range(1, max_val + 1):
# print(i, j)
DP[i][j] = (DP[i - 1][j] + DP[max_val - i][j - 1]) % (10 ** 9 + 7)
# if DP[n][k] is not None:
# return DP[n][k]
# if DP[n - 1][k] is None:
# DP[n - 1][k] = func(n - 1, k, max_val, DP)
# # print(max_val, n)
# if DP[max_val - n][k - 1] is None:
# DP[max_val - n][k - 1] = func(max_val - n, k - 1, max_val, DP)
# DP[n][k] = DP[n - 1][k] + DP[max_val - n][k - 1]
return DP[n][k - 1]
# def func(n, k, max_val, DP):
# ROWS = max_val
# DP = [[None] * ROWS for i in range(ROWS + 1)]
# for i in range(ROWS + 1):
# DP[i][0] = 1
# for i in range(1, ROWS):
# DP[0][i] = 1
# # print(DP)
# print(n, k, DP[n][k])
# if DP[n][k] is not None:
# return DP[n][k]
# if DP[n - 1][k] is None:
# DP[n - 1][k] = func(n - 1, k, max_val, DP)
# # print(max_val, n)
# if DP[max_val - n][k - 1] is None:
# DP[max_val - n][k - 1] = func(max_val - n, k - 1, max_val, DP)
# DP[n][k] = DP[n - 1][k] + DP[max_val - n][k - 1]
# return DP[n][k]
def main():
num_test = int(parse_input())
result = []
for _ in range(num_test):
# print(DP)
n, k = [int(i) for i in parse_input().split()]
# print(n, k)
result.append(func(n, k, n) % (10 ** 9 + 7))
print("\n".join(map(str, result)))
# region fastio
# BUFSIZE = 8192
# class FastIO(IOBase):
# newlines = 0
# def __init__(self, file):
# self._fd = file.fileno()
# self.buffer = BytesIO()
# self.writable = "x" in file.mode or "r" not in file.mode
# self.write = self.buffer.write if self.writable else None
# def read(self):
# while True:
# b = os.read(self._fd, max(os.fstat(self._fd).st_size, BUFSIZE))
# if not b:
# break
# ptr = self.buffer.tell()
# self.buffer.seek(0, 2), self.buffer.write(b), self.buffer.seek(ptr)
# self.newlines = 0
# return self.buffer.read()
# def readline(self):
# while self.newlines == 0:
# b = os.read(self._fd, max(os.fstat(self._fd).st_size, BUFSIZE))
# self.newlines = b.count(b"\n") + (not b)
# ptr = self.buffer.tell()
# self.buffer.seek(0, 2), self.buffer.write(b), self.buffer.seek(ptr)
# self.newlines -= 1
# return self.buffer.readline()
# def flush(self):
# if self.writable:
# os.write(self._fd, self.buffer.getvalue())
# self.buffer.truncate(0), self.buffer.seek(0)
# class IOWrapper(IOBase):
# def __init__(self, file):
# self.buffer = FastIO(file)
# self.flush = self.buffer.flush
# self.writable = self.buffer.writable
# self.write = lambda s: self.buffer.write(s.encode("ascii"))
# self.read = lambda: self.buffer.read().decode("ascii")
# self.readline = lambda: self.buffer.readline().decode("ascii")
# sys.stdin, sys.stdout = IOWrapper(sys.stdin), IOWrapper(sys.stdout)
parse_input = lambda: sys.stdin.readline().rstrip("\r\n")
# endregion
if __name__ == "__main__":
main()
# #!/usr/bin/env python
# import os
# import sys
# from io import BytesIO, IOBase
# import math
# from collections import Counter
# sys.setrecursionlimit(10 ** 9)
# max_val = 1001
# k = 1001
# ROWS = max_val
# DP = [[None] * k for i in range(ROWS + 1)]
# for i in range(ROWS + 1):
# DP[i][0] = 1
# for i in range(1, k):
# DP[0][i] = 1
# # print(DP)
# # print(n, k, DP[n][k])
# for j in range(1, k):
# for i in range(1, max_val + 1):
# # print(i, j)
# DP[i][j] = (DP[i - 1][j] + DP[max_val - i][j - 1]) % (10 ** 9 + 7)
# def func(n, k, max_val):
# # if DP[n][k] is not None:
# # return DP[n][k]
# # if DP[n - 1][k] is None:
# # DP[n - 1][k] = func(n - 1, k, max_val, DP)
# # # print(max_val, n)
# # if DP[max_val - n][k - 1] is None:
# # DP[max_val - n][k - 1] = func(max_val - n, k - 1, max_val, DP)
# # DP[n][k] = DP[n - 1][k] + DP[max_val - n][k - 1]
# return DP[n][k - 1]
# # def func(n, k, max_val, DP):
# # ROWS = max_val
# # DP = [[None] * ROWS for i in range(ROWS + 1)]
# # for i in range(ROWS + 1):
# # DP[i][0] = 1
# # for i in range(1, ROWS):
# # DP[0][i] = 1
# # # print(DP)
# # print(n, k, DP[n][k])
# # if DP[n][k] is not None:
# # return DP[n][k]
# # if DP[n - 1][k] is None:
# # DP[n - 1][k] = func(n - 1, k, max_val, DP)
# # # print(max_val, n)
# # if DP[max_val - n][k - 1] is None:
# # DP[max_val - n][k - 1] = func(max_val - n, k - 1, max_val, DP)
# # DP[n][k] = DP[n - 1][k] + DP[max_val - n][k - 1]
# # return DP[n][k]
# def main():
# num_test = int(parse_input())
# result = []
# for _ in range(num_test):
# # print(DP)
# n, k = [int(i) for i in parse_input().split()]
# # print(n, k)
# result.append(func(n, k, n))
# print("\n".join(map(str, result)))
# # region fastio
# # BUFSIZE = 8192
# # class FastIO(IOBase):
# # newlines = 0
# # def __init__(self, file):
# # self._fd = file.fileno()
# # self.buffer = BytesIO()
# # self.writable = "x" in file.mode or "r" not in file.mode
# # self.write = self.buffer.write if self.writable else None
# # def read(self):
# # while True:
# # b = os.read(self._fd, max(os.fstat(self._fd).st_size, BUFSIZE))
# # if not b:
# # break
# # ptr = self.buffer.tell()
# # self.buffer.seek(0, 2), self.buffer.write(b), self.buffer.seek(ptr)
# # self.newlines = 0
# # return self.buffer.read()
# # def readline(self):
# # while self.newlines == 0:
# # b = os.read(self._fd, max(os.fstat(self._fd).st_size, BUFSIZE))
# # self.newlines = b.count(b"\n") + (not b)
# # ptr = self.buffer.tell()
# # self.buffer.seek(0, 2), self.buffer.write(b), self.buffer.seek(ptr)
# # self.newlines -= 1
# # return self.buffer.readline()
# # def flush(self):
# # if self.writable:
# # os.write(self._fd, self.buffer.getvalue())
# # self.buffer.truncate(0), self.buffer.seek(0)
# # class IOWrapper(IOBase):
# # def __init__(self, file):
# # self.buffer = FastIO(file)
# # self.flush = self.buffer.flush
# # self.writable = self.buffer.writable
# # self.write = lambda s: self.buffer.write(s.encode("ascii"))
# # self.read = lambda: self.buffer.read().decode("ascii")
# # self.readline = lambda: self.buffer.readline().decode("ascii")
# # sys.stdin, sys.stdout = IOWrapper(sys.stdin), IOWrapper(sys.stdout)
# parse_input = lambda: sys.stdin.readline().rstrip("\r\n")
# # endregion
# if __name__ == "__main__":
# main()
| 29.52988
| 83
| 0.511063
| 1,148
| 7,412
| 3.211672
| 0.086237
| 0.027122
| 0.023868
| 0.034717
| 0.994847
| 0.994847
| 0.98671
| 0.98671
| 0.98671
| 0.98671
| 0
| 0.024282
| 0.299919
| 7,412
| 250
| 84
| 29.648
| 0.686259
| 0.816244
| 0
| 0
| 0
| 0
| 0.012601
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.074074
| false
| 0
| 0.185185
| 0
| 0.296296
| 0.037037
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
97149eab301e18cd898a20e6adf6f02a60ffb927
| 128
|
py
|
Python
|
stock/utils.py
|
xelaxela13/stock
|
ef7df50194be340e7faff915e9de4e3b1ade4eca
|
[
"MIT"
] | 1
|
2019-02-19T08:43:51.000Z
|
2019-02-19T08:43:51.000Z
|
stock/utils.py
|
xelaxela13/stock
|
ef7df50194be340e7faff915e9de4e3b1ade4eca
|
[
"MIT"
] | 6
|
2021-03-19T02:07:34.000Z
|
2022-02-10T08:27:57.000Z
|
stock/utils.py
|
xelaxela13/stock
|
ef7df50194be340e7faff915e9de4e3b1ade4eca
|
[
"MIT"
] | null | null | null |
def float_format(value):
return '{0:.2f}'.format(value)
def generate_cache_key(prefix, key):
return f'{prefix}_{key}'
| 18.285714
| 36
| 0.679688
| 19
| 128
| 4.368421
| 0.631579
| 0.26506
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.018349
| 0.148438
| 128
| 6
| 37
| 21.333333
| 0.743119
| 0
| 0
| 0
| 1
| 0
| 0.164063
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
97166ecc665702a8749ce9765aa335b701f9a1e0
| 44,566
|
py
|
Python
|
vplexapi-7.0.0/vplexapi/api/data_migration_api.py
|
dell/python-vplex
|
02c5df5e7f9ed61a13a2838f21ca6467a25dd392
|
[
"Apache-2.0"
] | 3
|
2020-12-01T11:22:13.000Z
|
2021-02-16T17:38:42.000Z
|
vplexapi-7.0.0/vplexapi/api/data_migration_api.py
|
dell/python-vplex
|
02c5df5e7f9ed61a13a2838f21ca6467a25dd392
|
[
"Apache-2.0"
] | null | null | null |
vplexapi-7.0.0/vplexapi/api/data_migration_api.py
|
dell/python-vplex
|
02c5df5e7f9ed61a13a2838f21ca6467a25dd392
|
[
"Apache-2.0"
] | 3
|
2021-01-01T21:07:55.000Z
|
2021-02-20T07:07:40.000Z
|
# coding: utf-8
"""
VPlex REST API
A definition for the next-gen VPlex API # noqa: E501
OpenAPI spec version: 0.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from vplexapi.api_client import ApiClient
class DataMigrationApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_device_migration(self, device_migration_payload, **kwargs): # noqa: E501
"""Create a new DeviceMigration # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_device_migration(device_migration_payload, async_req=True)
>>> result = thread.get()
:param async_req bool
:param DeviceMigrationPayload device_migration_payload: (required)
:return: DeviceMigration
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_device_migration_with_http_info(device_migration_payload, **kwargs) # noqa: E501
else:
(data) = self.create_device_migration_with_http_info(device_migration_payload, **kwargs) # noqa: E501
return data
def create_device_migration_with_http_info(self, device_migration_payload, **kwargs): # noqa: E501
"""Create a new DeviceMigration # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_device_migration_with_http_info(device_migration_payload, async_req=True)
>>> result = thread.get()
:param async_req bool
:param DeviceMigrationPayload device_migration_payload: (required)
:return: DeviceMigration
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['device_migration_payload'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_device_migration" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'device_migration_payload' is set
if ('device_migration_payload' not in params or
params['device_migration_payload'] is None):
raise ValueError("Missing the required parameter `device_migration_payload` when calling `create_device_migration`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'device_migration_payload' in params:
body_params = params['device_migration_payload']
# Authentication setting
auth_settings = ['basicAuth', 'jwtAuth'] # noqa: E501
return self.api_client.call_api(
'/data_migrations/device_migrations', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DeviceMigration', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def create_extent_migration(self, extent_migration_payload, **kwargs): # noqa: E501
"""Create a new ExtentMigration # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_extent_migration(extent_migration_payload, async_req=True)
>>> result = thread.get()
:param async_req bool
:param ExtentMigrationPayload extent_migration_payload: (required)
:return: ExtentMigration
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_extent_migration_with_http_info(extent_migration_payload, **kwargs) # noqa: E501
else:
(data) = self.create_extent_migration_with_http_info(extent_migration_payload, **kwargs) # noqa: E501
return data
def create_extent_migration_with_http_info(self, extent_migration_payload, **kwargs): # noqa: E501
"""Create a new ExtentMigration # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_extent_migration_with_http_info(extent_migration_payload, async_req=True)
>>> result = thread.get()
:param async_req bool
:param ExtentMigrationPayload extent_migration_payload: (required)
:return: ExtentMigration
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['extent_migration_payload'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_extent_migration" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'extent_migration_payload' is set
if ('extent_migration_payload' not in params or
params['extent_migration_payload'] is None):
raise ValueError("Missing the required parameter `extent_migration_payload` when calling `create_extent_migration`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'extent_migration_payload' in params:
body_params = params['extent_migration_payload']
# Authentication setting
auth_settings = ['basicAuth', 'jwtAuth'] # noqa: E501
return self.api_client.call_api(
'/data_migrations/extent_migrations', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ExtentMigration', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_device_migration(self, name, **kwargs): # noqa: E501
"""Deletes a single canceled or committed DeviceMigration # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_device_migration(name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: The name of a specific instance of the resource (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_device_migration_with_http_info(name, **kwargs) # noqa: E501
else:
(data) = self.delete_device_migration_with_http_info(name, **kwargs) # noqa: E501
return data
def delete_device_migration_with_http_info(self, name, **kwargs): # noqa: E501
"""Deletes a single canceled or committed DeviceMigration # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_device_migration_with_http_info(name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: The name of a specific instance of the resource (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_device_migration" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params or
params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `delete_device_migration`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['basicAuth', 'jwtAuth'] # noqa: E501
return self.api_client.call_api(
'/data_migrations/device_migrations/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_extent_migration(self, name, **kwargs): # noqa: E501
"""Deletes a single canceled or committed ExtentMigration # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_extent_migration(name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: The name of a specific instance of the resource (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_extent_migration_with_http_info(name, **kwargs) # noqa: E501
else:
(data) = self.delete_extent_migration_with_http_info(name, **kwargs) # noqa: E501
return data
def delete_extent_migration_with_http_info(self, name, **kwargs): # noqa: E501
"""Deletes a single canceled or committed ExtentMigration # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_extent_migration_with_http_info(name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: The name of a specific instance of the resource (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_extent_migration" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params or
params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `delete_extent_migration`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['basicAuth', 'jwtAuth'] # noqa: E501
return self.api_client.call_api(
'/data_migrations/extent_migrations/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_device_migration(self, name, **kwargs): # noqa: E501
"""Returns a single DeviceMigration object # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_device_migration(name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: The name of a specific instance of the resource (required)
:param str fields: Select which fields are included in the response. 'name' is always included. See FieldSelectionExpression for details.
:return: DeviceMigration
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_device_migration_with_http_info(name, **kwargs) # noqa: E501
else:
(data) = self.get_device_migration_with_http_info(name, **kwargs) # noqa: E501
return data
def get_device_migration_with_http_info(self, name, **kwargs): # noqa: E501
"""Returns a single DeviceMigration object # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_device_migration_with_http_info(name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: The name of a specific instance of the resource (required)
:param str fields: Select which fields are included in the response. 'name' is always included. See FieldSelectionExpression for details.
:return: DeviceMigration
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'fields'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_device_migration" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params or
params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `get_device_migration`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name'] # noqa: E501
query_params = []
if 'fields' in params:
query_params.append(('fields', params['fields'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['basicAuth', 'jwtAuth'] # noqa: E501
return self.api_client.call_api(
'/data_migrations/device_migrations/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DeviceMigration', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_device_migrations(self, **kwargs): # noqa: E501
"""Returns a list of device migrations # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_device_migrations(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: Filter results by name. See LexicalQueryExpression for details.
:param str to_cluster: Filter results by to_cluster. See LexicalQueryExpression for details.
:param str from_cluster: Filter results by from_cluster. See LexicalQueryExpression for details.
:param int offset: Index of the first element to include in paginated results.<br> <b>'limit' must also be specified.</b>
:param int limit: <p>Maximum number of elements to include in paginated results.<br> <b>'offset' must also be specified.<b>
:param str sort_by: Specify the field priority order and direction for sorting. See SortingOrderExpression for details.
:param str fields: Select which fields are included in the response. 'name' is always included. See FieldSelectionExpression for details.
:return: list[DeviceMigration]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_device_migrations_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_device_migrations_with_http_info(**kwargs) # noqa: E501
return data
def get_device_migrations_with_http_info(self, **kwargs): # noqa: E501
"""Returns a list of device migrations # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_device_migrations_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: Filter results by name. See LexicalQueryExpression for details.
:param str to_cluster: Filter results by to_cluster. See LexicalQueryExpression for details.
:param str from_cluster: Filter results by from_cluster. See LexicalQueryExpression for details.
:param int offset: Index of the first element to include in paginated results.<br> <b>'limit' must also be specified.</b>
:param int limit: <p>Maximum number of elements to include in paginated results.<br> <b>'offset' must also be specified.<b>
:param str sort_by: Specify the field priority order and direction for sorting. See SortingOrderExpression for details.
:param str fields: Select which fields are included in the response. 'name' is always included. See FieldSelectionExpression for details.
:return: list[DeviceMigration]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'to_cluster', 'from_cluster', 'offset', 'limit', 'sort_by', 'fields'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_device_migrations" % key
)
params[key] = val
del params['kwargs']
if 'offset' in params and params['offset'] < 0: # noqa: E501
raise ValueError("Invalid value for parameter `offset` when calling `get_device_migrations`, must be a value greater than or equal to `0`") # noqa: E501
if 'limit' in params and params['limit'] > 100: # noqa: E501
raise ValueError("Invalid value for parameter `limit` when calling `get_device_migrations`, must be a value less than or equal to `100`") # noqa: E501
if 'limit' in params and params['limit'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `limit` when calling `get_device_migrations`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'name' in params:
query_params.append(('name', params['name'])) # noqa: E501
if 'to_cluster' in params:
query_params.append(('to_cluster', params['to_cluster'])) # noqa: E501
if 'from_cluster' in params:
query_params.append(('from_cluster', params['from_cluster'])) # noqa: E501
if 'offset' in params:
query_params.append(('offset', params['offset'])) # noqa: E501
if 'limit' in params:
query_params.append(('limit', params['limit'])) # noqa: E501
if 'sort_by' in params:
query_params.append(('sort_by', params['sort_by'])) # noqa: E501
if 'fields' in params:
query_params.append(('fields', params['fields'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['basicAuth', 'jwtAuth'] # noqa: E501
return self.api_client.call_api(
'/data_migrations/device_migrations', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[DeviceMigration]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_extent_migration(self, name, **kwargs): # noqa: E501
"""Returns a single ExtentMigration object # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_extent_migration(name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: The name of a specific instance of the resource (required)
:param str fields: Select which fields are included in the response. 'name' is always included. See FieldSelectionExpression for details.
:return: ExtentMigration
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_extent_migration_with_http_info(name, **kwargs) # noqa: E501
else:
(data) = self.get_extent_migration_with_http_info(name, **kwargs) # noqa: E501
return data
def get_extent_migration_with_http_info(self, name, **kwargs): # noqa: E501
"""Returns a single ExtentMigration object # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_extent_migration_with_http_info(name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: The name of a specific instance of the resource (required)
:param str fields: Select which fields are included in the response. 'name' is always included. See FieldSelectionExpression for details.
:return: ExtentMigration
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'fields'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_extent_migration" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params or
params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `get_extent_migration`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name'] # noqa: E501
query_params = []
if 'fields' in params:
query_params.append(('fields', params['fields'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['basicAuth', 'jwtAuth'] # noqa: E501
return self.api_client.call_api(
'/data_migrations/extent_migrations/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ExtentMigration', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_extent_migrations(self, **kwargs): # noqa: E501
"""Returns a list of extent migrations # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_extent_migrations(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: Filter results by name. See LexicalQueryExpression for details.
:param str to_cluster: Filter results by to_cluster. See LexicalQueryExpression for details.
:param str from_cluster: Filter results by from_cluster. See LexicalQueryExpression for details.
:param int offset: Index of the first element to include in paginated results.<br> <b>'limit' must also be specified.</b>
:param int limit: <p>Maximum number of elements to include in paginated results.<br> <b>'offset' must also be specified.<b>
:param str sort_by: Specify the field priority order and direction for sorting. See SortingOrderExpression for details.
:param str fields: Select which fields are included in the response. 'name' is always included. See FieldSelectionExpression for details.
:return: list[ExtentMigration]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_extent_migrations_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_extent_migrations_with_http_info(**kwargs) # noqa: E501
return data
def get_extent_migrations_with_http_info(self, **kwargs): # noqa: E501
"""Returns a list of extent migrations # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_extent_migrations_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: Filter results by name. See LexicalQueryExpression for details.
:param str to_cluster: Filter results by to_cluster. See LexicalQueryExpression for details.
:param str from_cluster: Filter results by from_cluster. See LexicalQueryExpression for details.
:param int offset: Index of the first element to include in paginated results.<br> <b>'limit' must also be specified.</b>
:param int limit: <p>Maximum number of elements to include in paginated results.<br> <b>'offset' must also be specified.<b>
:param str sort_by: Specify the field priority order and direction for sorting. See SortingOrderExpression for details.
:param str fields: Select which fields are included in the response. 'name' is always included. See FieldSelectionExpression for details.
:return: list[ExtentMigration]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'to_cluster', 'from_cluster', 'offset', 'limit', 'sort_by', 'fields'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_extent_migrations" % key
)
params[key] = val
del params['kwargs']
if 'offset' in params and params['offset'] < 0: # noqa: E501
raise ValueError("Invalid value for parameter `offset` when calling `get_extent_migrations`, must be a value greater than or equal to `0`") # noqa: E501
if 'limit' in params and params['limit'] > 100: # noqa: E501
raise ValueError("Invalid value for parameter `limit` when calling `get_extent_migrations`, must be a value less than or equal to `100`") # noqa: E501
if 'limit' in params and params['limit'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `limit` when calling `get_extent_migrations`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'name' in params:
query_params.append(('name', params['name'])) # noqa: E501
if 'to_cluster' in params:
query_params.append(('to_cluster', params['to_cluster'])) # noqa: E501
if 'from_cluster' in params:
query_params.append(('from_cluster', params['from_cluster'])) # noqa: E501
if 'offset' in params:
query_params.append(('offset', params['offset'])) # noqa: E501
if 'limit' in params:
query_params.append(('limit', params['limit'])) # noqa: E501
if 'sort_by' in params:
query_params.append(('sort_by', params['sort_by'])) # noqa: E501
if 'fields' in params:
query_params.append(('fields', params['fields'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['basicAuth', 'jwtAuth'] # noqa: E501
return self.api_client.call_api(
'/data_migrations/extent_migrations', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[ExtentMigration]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_device_migration(self, name, device_migration_patch_payload, **kwargs): # noqa: E501
"""Update attributes on a DeviceMigration # noqa: E501
Settable attributes: 'name', 'transfer_size' and 'status'(replace) # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_device_migration(name, device_migration_patch_payload, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: The name of a specific instance of the resource (required)
:param list[JsonPatchOp] device_migration_patch_payload: (required)
:return: DeviceMigration
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_device_migration_with_http_info(name, device_migration_patch_payload, **kwargs) # noqa: E501
else:
(data) = self.patch_device_migration_with_http_info(name, device_migration_patch_payload, **kwargs) # noqa: E501
return data
def patch_device_migration_with_http_info(self, name, device_migration_patch_payload, **kwargs): # noqa: E501
"""Update attributes on a DeviceMigration # noqa: E501
Settable attributes: 'name', 'transfer_size' and 'status'(replace) # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_device_migration_with_http_info(name, device_migration_patch_payload, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: The name of a specific instance of the resource (required)
:param list[JsonPatchOp] device_migration_patch_payload: (required)
:return: DeviceMigration
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'device_migration_patch_payload'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_device_migration" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params or
params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `patch_device_migration`") # noqa: E501
# verify the required parameter 'device_migration_patch_payload' is set
if ('device_migration_patch_payload' not in params or
params['device_migration_patch_payload'] is None):
raise ValueError("Missing the required parameter `device_migration_patch_payload` when calling `patch_device_migration`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'device_migration_patch_payload' in params:
body_params = params['device_migration_patch_payload']
# Authentication setting
auth_settings = ['basicAuth', 'jwtAuth'] # noqa: E501
return self.api_client.call_api(
'/data_migrations/device_migrations/{name}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DeviceMigration', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_extent_migration(self, name, extent_migration_patch_payload, **kwargs): # noqa: E501
"""Update attributes on a ExtentMigration # noqa: E501
Settable attributes: 'name', 'transfer_size' and 'status'(replace) # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_extent_migration(name, extent_migration_patch_payload, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: The name of a specific instance of the resource (required)
:param list[JsonPatchOp] extent_migration_patch_payload: (required)
:return: ExtentMigration
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_extent_migration_with_http_info(name, extent_migration_patch_payload, **kwargs) # noqa: E501
else:
(data) = self.patch_extent_migration_with_http_info(name, extent_migration_patch_payload, **kwargs) # noqa: E501
return data
def patch_extent_migration_with_http_info(self, name, extent_migration_patch_payload, **kwargs): # noqa: E501
"""Update attributes on a ExtentMigration # noqa: E501
Settable attributes: 'name', 'transfer_size' and 'status'(replace) # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_extent_migration_with_http_info(name, extent_migration_patch_payload, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: The name of a specific instance of the resource (required)
:param list[JsonPatchOp] extent_migration_patch_payload: (required)
:return: ExtentMigration
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'extent_migration_patch_payload'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_extent_migration" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params or
params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `patch_extent_migration`") # noqa: E501
# verify the required parameter 'extent_migration_patch_payload' is set
if ('extent_migration_patch_payload' not in params or
params['extent_migration_patch_payload'] is None):
raise ValueError("Missing the required parameter `extent_migration_patch_payload` when calling `patch_extent_migration`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'extent_migration_patch_payload' in params:
body_params = params['extent_migration_patch_payload']
# Authentication setting
auth_settings = ['basicAuth', 'jwtAuth'] # noqa: E501
return self.api_client.call_api(
'/data_migrations/extent_migrations/{name}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ExtentMigration', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 44.344279
| 165
| 0.63351
| 5,175
| 44,566
| 5.217778
| 0.042512
| 0.041182
| 0.020739
| 0.026665
| 0.981224
| 0.97952
| 0.973446
| 0.959818
| 0.9473
| 0.943004
| 0
| 0.013877
| 0.28046
| 44,566
| 1,004
| 166
| 44.388446
| 0.828172
| 0.362182
| 0
| 0.788427
| 1
| 0.01085
| 0.223747
| 0.087745
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037975
| false
| 0
| 0.007233
| 0
| 0.101266
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
977a13b5e65a2dfeb1ac66c8c25c6930b63c7651
| 142
|
py
|
Python
|
src/sage/finance/time_series.py
|
UCD4IDS/sage
|
43474c96d533fd396fe29fe0782d44dc7f5164f7
|
[
"BSL-1.0"
] | 1,742
|
2015-01-04T07:06:13.000Z
|
2022-03-30T11:32:52.000Z
|
src/sage/finance/time_series.py
|
UCD4IDS/sage
|
43474c96d533fd396fe29fe0782d44dc7f5164f7
|
[
"BSL-1.0"
] | 66
|
2015-03-19T19:17:24.000Z
|
2022-03-16T11:59:30.000Z
|
src/sage/finance/time_series.py
|
UCD4IDS/sage
|
43474c96d533fd396fe29fe0782d44dc7f5164f7
|
[
"BSL-1.0"
] | 495
|
2015-01-10T10:23:18.000Z
|
2022-03-24T22:06:11.000Z
|
from sage.misc.lazy_import import lazy_import
lazy_import('sage.stats.time_series', ('TimeSeries', 'autoregressive_fit'), deprecation=32427)
| 35.5
| 94
| 0.809859
| 19
| 142
| 5.789474
| 0.684211
| 0.272727
| 0.290909
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.037594
| 0.06338
| 142
| 3
| 95
| 47.333333
| 0.789474
| 0
| 0
| 0
| 0
| 0
| 0.352113
| 0.15493
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
97ac1ccb83ec3dd1bbabd526f788d7e18b2750bd
| 15,246
|
py
|
Python
|
Proyecto/ai/compiled_krb/bc_action_rules_bc.py
|
arnel-sanchez/IA-Sim-Com
|
22023342f20202b260caa759af9cce71d803663e
|
[
"MIT"
] | 2
|
2021-11-20T23:35:20.000Z
|
2021-12-10T17:45:56.000Z
|
Proyecto/ai/compiled_krb/bc_action_rules_bc.py
|
arnel-sanchez/IA-Sim-Com
|
22023342f20202b260caa759af9cce71d803663e
|
[
"MIT"
] | null | null | null |
Proyecto/ai/compiled_krb/bc_action_rules_bc.py
|
arnel-sanchez/IA-Sim-Com
|
22023342f20202b260caa759af9cce71d803663e
|
[
"MIT"
] | 1
|
2022-02-11T07:24:50.000Z
|
2022-02-11T07:24:50.000Z
|
# bc_action_rules_bc.py
from pyke import contexts, pattern, bc_rule
pyke_version = '1.1.1'
compiler_version = 1
def speed_up(rule, arg_patterns, arg_context):
engine = rule.rule_base.engine
patterns = rule.goal_arg_patterns()
if len(arg_patterns) == len(patterns):
context = contexts.bc_context(rule)
try:
if all(map(lambda pat, arg:
pat.match_pattern(context, context,
arg, arg_context),
patterns,
arg_patterns)):
rule.rule_base.num_bc_rules_matched += 1
with engine.prove('action_facts', 'speed', context,
(rule.pattern(0),)) \
as gen_1:
for x_1 in gen_1:
assert x_1 is None, \
"bc_action_rules.speed_up: got unexpected plan from when clause 1"
rule.rule_base.num_bc_rule_successes += 1
yield
rule.rule_base.num_bc_rule_failures += 1
finally:
context.done()
def keep_speed(rule, arg_patterns, arg_context):
engine = rule.rule_base.engine
patterns = rule.goal_arg_patterns()
if len(arg_patterns) == len(patterns):
context = contexts.bc_context(rule)
try:
if all(map(lambda pat, arg:
pat.match_pattern(context, context,
arg, arg_context),
patterns,
arg_patterns)):
rule.rule_base.num_bc_rules_matched += 1
with engine.prove('action_facts', 'speed', context,
(rule.pattern(0),)) \
as gen_1:
for x_1 in gen_1:
assert x_1 is None, \
"bc_action_rules.keep_speed: got unexpected plan from when clause 1"
rule.rule_base.num_bc_rule_successes += 1
yield
rule.rule_base.num_bc_rule_failures += 1
finally:
context.done()
def brake(rule, arg_patterns, arg_context):
engine = rule.rule_base.engine
patterns = rule.goal_arg_patterns()
if len(arg_patterns) == len(patterns):
context = contexts.bc_context(rule)
try:
if all(map(lambda pat, arg:
pat.match_pattern(context, context,
arg, arg_context),
patterns,
arg_patterns)):
rule.rule_base.num_bc_rules_matched += 1
with engine.prove('action_facts', 'speed', context,
(rule.pattern(0),)) \
as gen_1:
for x_1 in gen_1:
assert x_1 is None, \
"bc_action_rules.brake: got unexpected plan from when clause 1"
rule.rule_base.num_bc_rule_successes += 1
yield
rule.rule_base.num_bc_rule_failures += 1
finally:
context.done()
def turn(rule, arg_patterns, arg_context):
engine = rule.rule_base.engine
patterns = rule.goal_arg_patterns()
if len(arg_patterns) == len(patterns):
context = contexts.bc_context(rule)
try:
if all(map(lambda pat, arg:
pat.match_pattern(context, context,
arg, arg_context),
patterns,
arg_patterns)):
rule.rule_base.num_bc_rules_matched += 1
with engine.prove('action_facts', 'section', context,
(rule.pattern(0),)) \
as gen_1:
for x_1 in gen_1:
assert x_1 is None, \
"bc_action_rules.turn: got unexpected plan from when clause 1"
rule.rule_base.num_bc_rule_successes += 1
yield
rule.rule_base.num_bc_rule_failures += 1
finally:
context.done()
def pits_1(rule, arg_patterns, arg_context):
engine = rule.rule_base.engine
patterns = rule.goal_arg_patterns()
if len(arg_patterns) == len(patterns):
context = contexts.bc_context(rule)
try:
if all(map(lambda pat, arg:
pat.match_pattern(context, context,
arg, arg_context),
patterns,
arg_patterns)):
rule.rule_base.num_bc_rules_matched += 1
with engine.prove('action_facts', 'slick_tires', context,
(rule.pattern(0),)) \
as gen_1:
for x_1 in gen_1:
assert x_1 is None, \
"bc_action_rules.pits_1: got unexpected plan from when clause 1"
with engine.prove('action_facts', 'weather', context,
(rule.pattern(1),)) \
as gen_2:
for x_2 in gen_2:
assert x_2 is None, \
"bc_action_rules.pits_1: got unexpected plan from when clause 2"
rule.rule_base.num_bc_rule_successes += 1
yield
rule.rule_base.num_bc_rule_failures += 1
finally:
context.done()
def pits_2(rule, arg_patterns, arg_context):
engine = rule.rule_base.engine
patterns = rule.goal_arg_patterns()
if len(arg_patterns) == len(patterns):
context = contexts.bc_context(rule)
try:
if all(map(lambda pat, arg:
pat.match_pattern(context, context,
arg, arg_context),
patterns,
arg_patterns)):
rule.rule_base.num_bc_rules_matched += 1
with engine.prove('action_facts', 'slick_tires', context,
(rule.pattern(0),)) \
as gen_1:
for x_1 in gen_1:
assert x_1 is None, \
"bc_action_rules.pits_2: got unexpected plan from when clause 1"
with engine.prove('action_facts', 'weather', context,
(rule.pattern(1),)) \
as gen_2:
for x_2 in gen_2:
assert x_2 is None, \
"bc_action_rules.pits_2: got unexpected plan from when clause 2"
if context.lookup_data('ans_1') != "Rainy":
with engine.prove('action_facts', 'humidity', context,
(rule.pattern(2),)) \
as gen_4:
for x_4 in gen_4:
assert x_4 is None, \
"bc_action_rules.pits_2: got unexpected plan from when clause 4"
if context.lookup_data('ans_2') <= 6:
rule.rule_base.num_bc_rule_successes += 1
yield
rule.rule_base.num_bc_rule_failures += 1
finally:
context.done()
def attack_1(rule, arg_patterns, arg_context):
engine = rule.rule_base.engine
patterns = rule.goal_arg_patterns()
if len(arg_patterns) == len(patterns):
context = contexts.bc_context(rule)
try:
if all(map(lambda pat, arg:
pat.match_pattern(context, context,
arg, arg_context),
patterns,
arg_patterns)):
rule.rule_base.num_bc_rules_matched += 1
with engine.prove('action_facts', 'section', context,
(rule.pattern(0),)) \
as gen_1:
for x_1 in gen_1:
assert x_1 is None, \
"bc_action_rules.attack_1: got unexpected plan from when clause 1"
with engine.prove('action_facts', 'nearest_forward', context,
(rule.pattern(1),)) \
as gen_2:
for x_2 in gen_2:
assert x_2 is None, \
"bc_action_rules.attack_1: got unexpected plan from when clause 2"
if context.lookup_data('ans') <= 0.5:
rule.rule_base.num_bc_rule_successes += 1
yield
rule.rule_base.num_bc_rule_failures += 1
finally:
context.done()
def attack_2(rule, arg_patterns, arg_context):
engine = rule.rule_base.engine
patterns = rule.goal_arg_patterns()
if len(arg_patterns) == len(patterns):
context = contexts.bc_context(rule)
try:
if all(map(lambda pat, arg:
pat.match_pattern(context, context,
arg, arg_context),
patterns,
arg_patterns)):
rule.rule_base.num_bc_rules_matched += 1
with engine.prove('action_facts', 'section', context,
(rule.pattern(0),)) \
as gen_1:
for x_1 in gen_1:
assert x_1 is None, \
"bc_action_rules.attack_2: got unexpected plan from when clause 1"
with engine.prove('action_facts', 'nearest_forward', context,
(rule.pattern(1),)) \
as gen_2:
for x_2 in gen_2:
assert x_2 is None, \
"bc_action_rules.attack_2: got unexpected plan from when clause 2"
if context.lookup_data('ans') <= 1:
rule.rule_base.num_bc_rule_successes += 1
yield
rule.rule_base.num_bc_rule_failures += 1
finally:
context.done()
def defend_1(rule, arg_patterns, arg_context):
engine = rule.rule_base.engine
patterns = rule.goal_arg_patterns()
if len(arg_patterns) == len(patterns):
context = contexts.bc_context(rule)
try:
if all(map(lambda pat, arg:
pat.match_pattern(context, context,
arg, arg_context),
patterns,
arg_patterns)):
rule.rule_base.num_bc_rules_matched += 1
with engine.prove('action_facts', 'section', context,
(rule.pattern(0),)) \
as gen_1:
for x_1 in gen_1:
assert x_1 is None, \
"bc_action_rules.defend_1: got unexpected plan from when clause 1"
with engine.prove('action_facts', 'nearest_behind', context,
(rule.pattern(1),)) \
as gen_2:
for x_2 in gen_2:
assert x_2 is None, \
"bc_action_rules.defend_1: got unexpected plan from when clause 2"
if context.lookup_data('ans') >= -0.5:
rule.rule_base.num_bc_rule_successes += 1
yield
rule.rule_base.num_bc_rule_failures += 1
finally:
context.done()
def defend_2(rule, arg_patterns, arg_context):
engine = rule.rule_base.engine
patterns = rule.goal_arg_patterns()
if len(arg_patterns) == len(patterns):
context = contexts.bc_context(rule)
try:
if all(map(lambda pat, arg:
pat.match_pattern(context, context,
arg, arg_context),
patterns,
arg_patterns)):
rule.rule_base.num_bc_rules_matched += 1
with engine.prove('action_facts', 'section', context,
(rule.pattern(0),)) \
as gen_1:
for x_1 in gen_1:
assert x_1 is None, \
"bc_action_rules.defend_2: got unexpected plan from when clause 1"
with engine.prove('action_facts', 'nearest_behind', context,
(rule.pattern(1),)) \
as gen_2:
for x_2 in gen_2:
assert x_2 is None, \
"bc_action_rules.defend_2: got unexpected plan from when clause 2"
if context.lookup_data('ans') >= -1:
rule.rule_base.num_bc_rule_successes += 1
yield
rule.rule_base.num_bc_rule_failures += 1
finally:
context.done()
def populate(engine):
This_rule_base = engine.get_create('bc_action_rules')
bc_rule.bc_rule('speed_up', This_rule_base, 'select_action',
speed_up, None,
(pattern.pattern_literal('SpeedUp'),),
(),
(pattern.pattern_literal("Lower"),))
bc_rule.bc_rule('keep_speed', This_rule_base, 'select_action',
keep_speed, None,
(pattern.pattern_literal('KeepSpeed'),),
(),
(pattern.pattern_literal("Same"),))
bc_rule.bc_rule('brake', This_rule_base, 'select_action',
brake, None,
(pattern.pattern_literal('Brake'),),
(),
(pattern.pattern_literal("Higher"),))
bc_rule.bc_rule('turn', This_rule_base, 'select_action',
turn, None,
(pattern.pattern_literal('Turn'),),
(),
(pattern.pattern_literal("Curve"),))
bc_rule.bc_rule('pits_1', This_rule_base, 'select_action',
pits_1, None,
(pattern.pattern_literal('Pits'),),
(),
(pattern.pattern_literal(True),
pattern.pattern_literal("Rainy"),))
bc_rule.bc_rule('pits_2', This_rule_base, 'select_action',
pits_2, None,
(pattern.pattern_literal('Pits'),),
(),
(pattern.pattern_literal(False),
contexts.variable('ans_1'),
contexts.variable('ans_2'),))
bc_rule.bc_rule('attack_1', This_rule_base, 'select_action',
attack_1, None,
(pattern.pattern_literal('Attack'),),
(),
(pattern.pattern_literal("Curve"),
contexts.variable('ans'),))
bc_rule.bc_rule('attack_2', This_rule_base, 'select_action',
attack_2, None,
(pattern.pattern_literal('Attack'),),
(),
(pattern.pattern_literal("Straight"),
contexts.variable('ans'),))
bc_rule.bc_rule('defend_1', This_rule_base, 'select_action',
defend_1, None,
(pattern.pattern_literal('Defend'),),
(),
(pattern.pattern_literal("Curve"),
contexts.variable('ans'),))
bc_rule.bc_rule('defend_2', This_rule_base, 'select_action',
defend_2, None,
(pattern.pattern_literal('Defend'),),
(),
(pattern.pattern_literal("Straight"),
contexts.variable('ans'),))
Krb_filename = '..\\bc_action_rules.krb'
Krb_lineno_map = (
((14, 18), (5, 5)),
((20, 25), (7, 7)),
((38, 42), (11, 11)),
((44, 49), (13, 13)),
((62, 66), (17, 17)),
((68, 73), (19, 19)),
((86, 90), (23, 23)),
((92, 97), (25, 25)),
((110, 114), (29, 29)),
((116, 121), (31, 31)),
((122, 127), (32, 32)),
((140, 144), (36, 36)),
((146, 151), (38, 38)),
((152, 157), (39, 39)),
((158, 158), (40, 40)),
((159, 164), (41, 41)),
((165, 165), (42, 42)),
((178, 182), (46, 46)),
((184, 189), (48, 48)),
((190, 195), (49, 49)),
((196, 196), (50, 50)),
((209, 213), (54, 54)),
((215, 220), (56, 56)),
((221, 226), (57, 57)),
((227, 227), (58, 58)),
((240, 244), (62, 62)),
((246, 251), (64, 64)),
((252, 257), (65, 65)),
((258, 258), (66, 66)),
((271, 275), (70, 70)),
((277, 282), (72, 72)),
((283, 288), (73, 73)),
((289, 289), (74, 74)),
)
| 37.831266
| 88
| 0.525187
| 1,817
| 15,246
| 4.141992
| 0.099064
| 0.054212
| 0.063779
| 0.059793
| 0.867127
| 0.839756
| 0.815573
| 0.810922
| 0.769732
| 0.769732
| 0
| 0.050031
| 0.360226
| 15,246
| 402
| 89
| 37.925373
| 0.72155
| 0.001377
| 0
| 0.702128
| 0
| 0
| 0.120213
| 0.028444
| 0
| 0
| 0
| 0
| 0.045213
| 1
| 0.029255
| false
| 0
| 0.00266
| 0
| 0.031915
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
97cf7f0cf11ff152db0c3622c3aa686713c9e4b7
| 21,471
|
py
|
Python
|
sdk/python/pulumi_pagerduty/user_contact_method.py
|
pulumi/pulumi-pagerduty
|
1c08849cda3d5fccf5eb9f615dc004b1f8f90555
|
[
"ECL-2.0",
"Apache-2.0"
] | 5
|
2020-05-27T08:18:35.000Z
|
2021-07-31T08:40:03.000Z
|
sdk/python/pulumi_pagerduty/user_contact_method.py
|
pulumi/pulumi-pagerduty
|
1c08849cda3d5fccf5eb9f615dc004b1f8f90555
|
[
"ECL-2.0",
"Apache-2.0"
] | 48
|
2020-05-26T10:59:40.000Z
|
2022-03-31T15:41:54.000Z
|
sdk/python/pulumi_pagerduty/user_contact_method.py
|
pulumi/pulumi-pagerduty
|
1c08849cda3d5fccf5eb9f615dc004b1f8f90555
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2020-05-26T17:51:56.000Z
|
2020-05-26T17:51:56.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = ['UserContactMethodArgs', 'UserContactMethod']
@pulumi.input_type
class UserContactMethodArgs:
def __init__(__self__, *,
address: pulumi.Input[str],
label: pulumi.Input[str],
type: pulumi.Input[str],
user_id: pulumi.Input[str],
country_code: Optional[pulumi.Input[int]] = None,
send_short_email: Optional[pulumi.Input[bool]] = None):
"""
The set of arguments for constructing a UserContactMethod resource.
:param pulumi.Input[str] address: The "address" to deliver to: `email`, `phone number`, etc., depending on the type.
:param pulumi.Input[str] label: The label (e.g., "Work", "Mobile", etc.).
:param pulumi.Input[str] type: The contact method type. May be (`email_contact_method`, `phone_contact_method`, `sms_contact_method`, `push_notification_contact_method`).
:param pulumi.Input[str] user_id: The ID of the user.
:param pulumi.Input[int] country_code: The 1-to-3 digit country calling code. Required when using `phone_contact_method` or `sms_contact_method`.
:param pulumi.Input[bool] send_short_email: Send an abbreviated email message instead of the standard email output.
"""
pulumi.set(__self__, "address", address)
pulumi.set(__self__, "label", label)
pulumi.set(__self__, "type", type)
pulumi.set(__self__, "user_id", user_id)
if country_code is not None:
pulumi.set(__self__, "country_code", country_code)
if send_short_email is not None:
pulumi.set(__self__, "send_short_email", send_short_email)
@property
@pulumi.getter
def address(self) -> pulumi.Input[str]:
"""
The "address" to deliver to: `email`, `phone number`, etc., depending on the type.
"""
return pulumi.get(self, "address")
@address.setter
def address(self, value: pulumi.Input[str]):
pulumi.set(self, "address", value)
@property
@pulumi.getter
def label(self) -> pulumi.Input[str]:
"""
The label (e.g., "Work", "Mobile", etc.).
"""
return pulumi.get(self, "label")
@label.setter
def label(self, value: pulumi.Input[str]):
pulumi.set(self, "label", value)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
"""
The contact method type. May be (`email_contact_method`, `phone_contact_method`, `sms_contact_method`, `push_notification_contact_method`).
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@property
@pulumi.getter(name="userId")
def user_id(self) -> pulumi.Input[str]:
"""
The ID of the user.
"""
return pulumi.get(self, "user_id")
@user_id.setter
def user_id(self, value: pulumi.Input[str]):
pulumi.set(self, "user_id", value)
@property
@pulumi.getter(name="countryCode")
def country_code(self) -> Optional[pulumi.Input[int]]:
"""
The 1-to-3 digit country calling code. Required when using `phone_contact_method` or `sms_contact_method`.
"""
return pulumi.get(self, "country_code")
@country_code.setter
def country_code(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "country_code", value)
@property
@pulumi.getter(name="sendShortEmail")
def send_short_email(self) -> Optional[pulumi.Input[bool]]:
"""
Send an abbreviated email message instead of the standard email output.
"""
return pulumi.get(self, "send_short_email")
@send_short_email.setter
def send_short_email(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "send_short_email", value)
@pulumi.input_type
class _UserContactMethodState:
def __init__(__self__, *,
address: Optional[pulumi.Input[str]] = None,
blacklisted: Optional[pulumi.Input[bool]] = None,
country_code: Optional[pulumi.Input[int]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
label: Optional[pulumi.Input[str]] = None,
send_short_email: Optional[pulumi.Input[bool]] = None,
type: Optional[pulumi.Input[str]] = None,
user_id: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering UserContactMethod resources.
:param pulumi.Input[str] address: The "address" to deliver to: `email`, `phone number`, etc., depending on the type.
:param pulumi.Input[bool] blacklisted: If true, this phone has been blacklisted by PagerDuty and no messages will be sent to it.
:param pulumi.Input[int] country_code: The 1-to-3 digit country calling code. Required when using `phone_contact_method` or `sms_contact_method`.
:param pulumi.Input[bool] enabled: If true, this phone is capable of receiving SMS messages.
:param pulumi.Input[str] label: The label (e.g., "Work", "Mobile", etc.).
:param pulumi.Input[bool] send_short_email: Send an abbreviated email message instead of the standard email output.
:param pulumi.Input[str] type: The contact method type. May be (`email_contact_method`, `phone_contact_method`, `sms_contact_method`, `push_notification_contact_method`).
:param pulumi.Input[str] user_id: The ID of the user.
"""
if address is not None:
pulumi.set(__self__, "address", address)
if blacklisted is not None:
pulumi.set(__self__, "blacklisted", blacklisted)
if country_code is not None:
pulumi.set(__self__, "country_code", country_code)
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if label is not None:
pulumi.set(__self__, "label", label)
if send_short_email is not None:
pulumi.set(__self__, "send_short_email", send_short_email)
if type is not None:
pulumi.set(__self__, "type", type)
if user_id is not None:
pulumi.set(__self__, "user_id", user_id)
@property
@pulumi.getter
def address(self) -> Optional[pulumi.Input[str]]:
"""
The "address" to deliver to: `email`, `phone number`, etc., depending on the type.
"""
return pulumi.get(self, "address")
@address.setter
def address(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "address", value)
@property
@pulumi.getter
def blacklisted(self) -> Optional[pulumi.Input[bool]]:
"""
If true, this phone has been blacklisted by PagerDuty and no messages will be sent to it.
"""
return pulumi.get(self, "blacklisted")
@blacklisted.setter
def blacklisted(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "blacklisted", value)
@property
@pulumi.getter(name="countryCode")
def country_code(self) -> Optional[pulumi.Input[int]]:
"""
The 1-to-3 digit country calling code. Required when using `phone_contact_method` or `sms_contact_method`.
"""
return pulumi.get(self, "country_code")
@country_code.setter
def country_code(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "country_code", value)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
"""
If true, this phone is capable of receiving SMS messages.
"""
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter
def label(self) -> Optional[pulumi.Input[str]]:
"""
The label (e.g., "Work", "Mobile", etc.).
"""
return pulumi.get(self, "label")
@label.setter
def label(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "label", value)
@property
@pulumi.getter(name="sendShortEmail")
def send_short_email(self) -> Optional[pulumi.Input[bool]]:
"""
Send an abbreviated email message instead of the standard email output.
"""
return pulumi.get(self, "send_short_email")
@send_short_email.setter
def send_short_email(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "send_short_email", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
"""
The contact method type. May be (`email_contact_method`, `phone_contact_method`, `sms_contact_method`, `push_notification_contact_method`).
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@property
@pulumi.getter(name="userId")
def user_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the user.
"""
return pulumi.get(self, "user_id")
@user_id.setter
def user_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "user_id", value)
class UserContactMethod(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
address: Optional[pulumi.Input[str]] = None,
country_code: Optional[pulumi.Input[int]] = None,
label: Optional[pulumi.Input[str]] = None,
send_short_email: Optional[pulumi.Input[bool]] = None,
type: Optional[pulumi.Input[str]] = None,
user_id: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
A [contact method](https://developer.pagerduty.com/api-reference/reference/REST/openapiv3.json/paths/~1users~1%7Bid%7D~1contact_methods/get) is a contact method for a PagerDuty user (email, phone or SMS).
## Example Usage
```python
import pulumi
import pulumi_pagerduty as pagerduty
example = pagerduty.User("example",
email="125.greenholt.earline@graham.name",
teams=[pagerduty_team["example"]["id"]])
email = pagerduty.UserContactMethod("email",
user_id=example.id,
type="email_contact_method",
address="foo@bar.com",
label="Work")
phone = pagerduty.UserContactMethod("phone",
user_id=example.id,
type="phone_contact_method",
country_code=1,
address="2025550199",
label="Work")
sms = pagerduty.UserContactMethod("sms",
user_id=example.id,
type="sms_contact_method",
country_code=1,
address="2025550199",
label="Work")
```
## Import
Contact methods can be imported using the `user_id` and the `id`, e.g.
```sh
$ pulumi import pagerduty:index/userContactMethod:UserContactMethod main PLBP09X:PLBP09X
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] address: The "address" to deliver to: `email`, `phone number`, etc., depending on the type.
:param pulumi.Input[int] country_code: The 1-to-3 digit country calling code. Required when using `phone_contact_method` or `sms_contact_method`.
:param pulumi.Input[str] label: The label (e.g., "Work", "Mobile", etc.).
:param pulumi.Input[bool] send_short_email: Send an abbreviated email message instead of the standard email output.
:param pulumi.Input[str] type: The contact method type. May be (`email_contact_method`, `phone_contact_method`, `sms_contact_method`, `push_notification_contact_method`).
:param pulumi.Input[str] user_id: The ID of the user.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: UserContactMethodArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
A [contact method](https://developer.pagerduty.com/api-reference/reference/REST/openapiv3.json/paths/~1users~1%7Bid%7D~1contact_methods/get) is a contact method for a PagerDuty user (email, phone or SMS).
## Example Usage
```python
import pulumi
import pulumi_pagerduty as pagerduty
example = pagerduty.User("example",
email="125.greenholt.earline@graham.name",
teams=[pagerduty_team["example"]["id"]])
email = pagerduty.UserContactMethod("email",
user_id=example.id,
type="email_contact_method",
address="foo@bar.com",
label="Work")
phone = pagerduty.UserContactMethod("phone",
user_id=example.id,
type="phone_contact_method",
country_code=1,
address="2025550199",
label="Work")
sms = pagerduty.UserContactMethod("sms",
user_id=example.id,
type="sms_contact_method",
country_code=1,
address="2025550199",
label="Work")
```
## Import
Contact methods can be imported using the `user_id` and the `id`, e.g.
```sh
$ pulumi import pagerduty:index/userContactMethod:UserContactMethod main PLBP09X:PLBP09X
```
:param str resource_name: The name of the resource.
:param UserContactMethodArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(UserContactMethodArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
address: Optional[pulumi.Input[str]] = None,
country_code: Optional[pulumi.Input[int]] = None,
label: Optional[pulumi.Input[str]] = None,
send_short_email: Optional[pulumi.Input[bool]] = None,
type: Optional[pulumi.Input[str]] = None,
user_id: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = UserContactMethodArgs.__new__(UserContactMethodArgs)
if address is None and not opts.urn:
raise TypeError("Missing required property 'address'")
__props__.__dict__["address"] = address
__props__.__dict__["country_code"] = country_code
if label is None and not opts.urn:
raise TypeError("Missing required property 'label'")
__props__.__dict__["label"] = label
__props__.__dict__["send_short_email"] = send_short_email
if type is None and not opts.urn:
raise TypeError("Missing required property 'type'")
__props__.__dict__["type"] = type
if user_id is None and not opts.urn:
raise TypeError("Missing required property 'user_id'")
__props__.__dict__["user_id"] = user_id
__props__.__dict__["blacklisted"] = None
__props__.__dict__["enabled"] = None
super(UserContactMethod, __self__).__init__(
'pagerduty:index/userContactMethod:UserContactMethod',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
address: Optional[pulumi.Input[str]] = None,
blacklisted: Optional[pulumi.Input[bool]] = None,
country_code: Optional[pulumi.Input[int]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
label: Optional[pulumi.Input[str]] = None,
send_short_email: Optional[pulumi.Input[bool]] = None,
type: Optional[pulumi.Input[str]] = None,
user_id: Optional[pulumi.Input[str]] = None) -> 'UserContactMethod':
"""
Get an existing UserContactMethod resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] address: The "address" to deliver to: `email`, `phone number`, etc., depending on the type.
:param pulumi.Input[bool] blacklisted: If true, this phone has been blacklisted by PagerDuty and no messages will be sent to it.
:param pulumi.Input[int] country_code: The 1-to-3 digit country calling code. Required when using `phone_contact_method` or `sms_contact_method`.
:param pulumi.Input[bool] enabled: If true, this phone is capable of receiving SMS messages.
:param pulumi.Input[str] label: The label (e.g., "Work", "Mobile", etc.).
:param pulumi.Input[bool] send_short_email: Send an abbreviated email message instead of the standard email output.
:param pulumi.Input[str] type: The contact method type. May be (`email_contact_method`, `phone_contact_method`, `sms_contact_method`, `push_notification_contact_method`).
:param pulumi.Input[str] user_id: The ID of the user.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _UserContactMethodState.__new__(_UserContactMethodState)
__props__.__dict__["address"] = address
__props__.__dict__["blacklisted"] = blacklisted
__props__.__dict__["country_code"] = country_code
__props__.__dict__["enabled"] = enabled
__props__.__dict__["label"] = label
__props__.__dict__["send_short_email"] = send_short_email
__props__.__dict__["type"] = type
__props__.__dict__["user_id"] = user_id
return UserContactMethod(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def address(self) -> pulumi.Output[str]:
"""
The "address" to deliver to: `email`, `phone number`, etc., depending on the type.
"""
return pulumi.get(self, "address")
@property
@pulumi.getter
def blacklisted(self) -> pulumi.Output[bool]:
"""
If true, this phone has been blacklisted by PagerDuty and no messages will be sent to it.
"""
return pulumi.get(self, "blacklisted")
@property
@pulumi.getter(name="countryCode")
def country_code(self) -> pulumi.Output[Optional[int]]:
"""
The 1-to-3 digit country calling code. Required when using `phone_contact_method` or `sms_contact_method`.
"""
return pulumi.get(self, "country_code")
@property
@pulumi.getter
def enabled(self) -> pulumi.Output[bool]:
"""
If true, this phone is capable of receiving SMS messages.
"""
return pulumi.get(self, "enabled")
@property
@pulumi.getter
def label(self) -> pulumi.Output[str]:
"""
The label (e.g., "Work", "Mobile", etc.).
"""
return pulumi.get(self, "label")
@property
@pulumi.getter(name="sendShortEmail")
def send_short_email(self) -> pulumi.Output[Optional[bool]]:
"""
Send an abbreviated email message instead of the standard email output.
"""
return pulumi.get(self, "send_short_email")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The contact method type. May be (`email_contact_method`, `phone_contact_method`, `sms_contact_method`, `push_notification_contact_method`).
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="userId")
def user_id(self) -> pulumi.Output[str]:
"""
The ID of the user.
"""
return pulumi.get(self, "user_id")
| 41.691262
| 212
| 0.628615
| 2,536
| 21,471
| 5.095426
| 0.081625
| 0.080019
| 0.058505
| 0.040861
| 0.839266
| 0.817598
| 0.782077
| 0.7512
| 0.744776
| 0.717613
| 0
| 0.005336
| 0.258022
| 21,471
| 514
| 213
| 41.772374
| 0.805787
| 0.372176
| 0
| 0.635688
| 1
| 0
| 0.08759
| 0.005961
| 0
| 0
| 0
| 0
| 0
| 1
| 0.159851
| false
| 0.003717
| 0.018587
| 0
| 0.275093
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c1015a13d728f5c9661eb733aa546974ce2c50d4
| 4,564
|
py
|
Python
|
resources/imports.py
|
axonepro/sdk-ooti
|
146ba758f571352d02daa56349e8b3affd8ca5a9
|
[
"Unlicense"
] | 1
|
2021-03-13T16:04:54.000Z
|
2021-03-13T16:04:54.000Z
|
resources/imports.py
|
axonepro/sdk-ooti
|
146ba758f571352d02daa56349e8b3affd8ca5a9
|
[
"Unlicense"
] | 7
|
2021-07-21T12:42:39.000Z
|
2022-01-06T10:34:04.000Z
|
resources/imports.py
|
axonepro/sdk-ooti
|
146ba758f571352d02daa56349e8b3affd8ca5a9
|
[
"Unlicense"
] | 2
|
2021-06-22T08:10:48.000Z
|
2021-09-01T09:16:41.000Z
|
import requests
import json
from .helper import Helper
"""
- ERROR 403 : GET v1/imports/counts/
- ERROR 400 ("Type is required"): GET & POST v1/imports/import/{org_pk}/
- POST on v1/imports/{id}/map-columns/ ?
"""
class Imports(Helper):
def __init__(self, base_url, org_pk, teams_pk, access_token, _csrf_token, headers, pagination):
super().__init__(base_url, org_pk, teams_pk, access_token, _csrf_token, headers, pagination)
def get_imports_count(self): # Error 403
""" Get the number of imports """
route = 'v1/imports/counts/'
response = self.process_request(requests, 'GET', self.base_url, route, self.headers, None, None)
return self.process_response(response)
def get_exports_list(self, page=1):
""" Get the list of exports """
route = 'v1/imports/export/list/{0}/?page_size={1}&page={2}'.format(self.org_pk, self.pagination, page)
response = self.process_request(requests, 'GET', self.base_url, route, self.headers, None, None)
return self.process_response(response, True)
def create_export(self, data):
""" Get imports list
Keywords arguments
data -- data of the new import to be created:
{
"orguser": orguser_pk,
"team": team_pk,
"project": project_id,
"type": "string",
"items": 0,
"started_processing_at": "string",
"ended_processing_at": "string",
"include_documents": true
}
"""
route = 'v1/imports/export/list/{0}/'.format(self.org_pk)
response = self.process_request(requests, 'POST', self.base_url, route, self.headers, None, json.dumps(data))
return self.process_response(response)
def get_export_details(self, export_pk):
""" Get the export details
Keywords arguments:
export_pk -- pk of the export
"""
route = 'v1/imports/export/{0}/'.format(export_pk)
response = self.process_request(requests, 'GET', self.base_url, route, self.headers, None, None)
return self.process_response(response)
def delete_export(self, export_pk):
""" Delete the export
Keywords arguments:
export_pk -- pk of the export
"""
route = 'v1/imports/export/{0}/'.format(export_pk)
response = self.process_request(requests, 'DELETE', self.base_url, route, self.headers, None, None)
return self.process_response(response)
# TODO GET on /api/v1/imports/import/{org_pk}/
# TODO POST on /api/v1/imports/import/{org_pk}/
def get_imports_list(self, page=1):
""" Get the list of imports """
route = 'v1/imports/list/{0}/?page_size={1}&page={2}'.format(self.org_pk, self.pagination, page)
response = self.process_request(requests, 'GET', self.base_url, route, self.headers, None, None)
return self.process_response(response, True)
def create_import(self, data):
""" Create a new import
Keywords arguments:
data -- data of the new import to be created:
{
"data": {
}
"type":
}
"""
route = 'v1/imports/list/{0}/'.format(self.org_pk)
response = self.process_request(requests, 'POST', self.base_url, route, self.headers, None, json.dumps(data))
return self.process_response(response)
def get_import_details(self, id):
""" Get the import details
Keywords arguments:
id -- id of the import
"""
route = 'v1/imports/{0}/'.format(id)
response = self.process_request(requests, 'GET', self.base_url, route, self.headers, None, None)
return self.process_response(response)
def update_import_details(self, id, data):
""" Update the import details
Keywords arguments:
id -- id of the import
data -- content of the update
"""
route = 'v1/imports/{0}/'.format(id)
response = self.process_request(requests, 'PATCH', self.base_url, route, self.headers, None, json.dumps(data))
return self.process_response(response)
def delete_import(self, id):
""" Delete the import
Keywords arguments:
id -- id of the import
"""
route = 'v1/imports/{0}/'.format(id)
response = self.process_request(requests, 'DELETE', self.base_url, route, self.headers, None, None)
return self.process_response(response)
# TODO POST on /api/v1/imports/{id}/map-columns/
| 33.558824
| 118
| 0.618536
| 574
| 4,564
| 4.763066
| 0.144599
| 0.080468
| 0.044258
| 0.095099
| 0.779444
| 0.738844
| 0.714704
| 0.690929
| 0.672641
| 0.672641
| 0
| 0.012041
| 0.253944
| 4,564
| 136
| 119
| 33.558824
| 0.790896
| 0.225679
| 0
| 0.521739
| 0
| 0
| 0.095381
| 0.054503
| 0
| 0
| 0
| 0.014706
| 0
| 1
| 0.23913
| false
| 0
| 0.434783
| 0
| 0.913043
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
c149f3d3a39effbcc657eada0edcaf5f130224fe
| 7,488
|
py
|
Python
|
rtamt/spec/stl/dense_time/offline.py
|
quinn-thibeault/rtamt
|
7258bd64176a113bd28dc749ccbeb204ca818370
|
[
"BSD-3-Clause"
] | null | null | null |
rtamt/spec/stl/dense_time/offline.py
|
quinn-thibeault/rtamt
|
7258bd64176a113bd28dc749ccbeb204ca818370
|
[
"BSD-3-Clause"
] | null | null | null |
rtamt/spec/stl/dense_time/offline.py
|
quinn-thibeault/rtamt
|
7258bd64176a113bd28dc749ccbeb204ca818370
|
[
"BSD-3-Clause"
] | 1
|
2022-01-28T15:59:05.000Z
|
2022-01-28T15:59:05.000Z
|
import operator
from rtamt.spec.stl.discrete_time.visitor import STLVisitor
class STLCTOffline(STLVisitor):
def __init__(self, spec):
self.spec = spec
def offline(self, element, args):
sample = self.visit(element, args)
out_sample = self.spec.var_object_dict[self.spec.out_var]
if self.spec.out_var_field:
setattr(out_sample, self.spec.out_var_field, sample)
else:
out_sample = sample
return out_sample
def visitPrevious(self, element, args):
in_sample_1 = self.visit(element.children[0], args)
in_sample_2 = self.visit(element.children[1], args)
out_sample = element.node.offline(in_sample_1, in_sample_2)
return out_sample
def visitNext(self, element, args):
in_sample_1 = self.visit(element.children[0], args)
in_sample_2 = self.visit(element.children[1], args)
out_sample = element.node.offline(in_sample_1, in_sample_2)
return out_sample
def visitPredicate(self, element, args):
in_sample_1 = self.visit(element.children[0], args)
in_sample_2 = self.visit(element.children[1], args)
out_sample = element.node.offline(in_sample_1, in_sample_2)
return out_sample
def visitVariable(self, element, args):
var = self.spec.var_object_dict[element.var]
if element.field:
value = operator.attrgetter(element.field)(var)
else:
value = var
return value
def visitConstant(self, element, args):
out_sample = element.node.offline()
return out_sample
def visitAbs(self, element, args):
in_sample = self.visit(element.children[0], args)
out_sample = element.node.offline(in_sample)
return out_sample
def visitAddition(self, element, args):
in_sample_1 = self.visit(element.children[0], args)
in_sample_2 = self.visit(element.children[1], args)
out_sample = element.node.offline(in_sample_1, in_sample_2)
return out_sample
def visitSubtraction(self, element, args):
in_sample_1 = self.visit(element.children[0], args)
in_sample_2 = self.visit(element.children[1], args)
out_sample = element.node.offline(in_sample_1, in_sample_2)
return out_sample
def visitMultiplication(self, element, args):
in_sample_1 = self.visit(element.children[0], args)
in_sample_2 = self.visit(element.children[1], args)
out_sample = element.node.offline(in_sample_1, in_sample_2)
return out_sample
def visitDivision(self, element, args):
in_sample_1 = self.visit(element.children[0], args)
in_sample_2 = self.visit(element.children[1], args)
out_sample = element.node.offline(in_sample_1, in_sample_2)
return out_sample
def visitNot(self, element, args):
in_sample = self.visit(element.children[0], args)
out_sample = element.node.offline(in_sample)
return out_sample
def visitAnd(self, element, args):
in_sample_1 = self.visit(element.children[0], args)
in_sample_2 = self.visit(element.children[1], args)
out_sample = element.node.offline(in_sample_1, in_sample_2)
return out_sample
def visitOr(self, element, args):
in_sample_1 = self.visit(element.children[0], args)
in_sample_2 = self.visit(element.children[1], args)
out_sample = element.node.offline(in_sample_1, in_sample_2)
return out_sample
def visitImplies(self, element, args):
in_sample_1 = self.visit(element.children[0], args)
in_sample_2 = self.visit(element.children[1], args)
out_sample = element.node.offline(in_sample_1, in_sample_2)
return out_sample
def visitIff(self, element, args):
in_sample_1 = self.visit(element.children[0], args)
in_sample_2 = self.visit(element.children[1], args)
out_sample = element.node.offline(in_sample_1, in_sample_2)
return out_sample
def visitXor(self, element, args):
in_sample_1 = self.visit(element.children[0], args)
in_sample_2 = self.visit(element.children[1], args)
out_sample = element.node.offline(in_sample_1, in_sample_2)
return out_sample
def visitEventually(self, element, args):
in_sample = self.visit(element.children[0], args)
out_sample = element.node.offline(in_sample)
return out_sample
def visitTimedEventually(self, element, args):
in_sample = self.visit(element.children[0], args)
out_sample = element.node.offline(in_sample)
return out_sample
def visitAlways(self, element, args):
in_sample = self.visit(element.children[0], args)
out_sample = element.node.offline(in_sample)
return out_sample
def visitTimedAlways(self, element, args):
in_sample = self.visit(element.children[0], args)
out_sample = element.node.offline(in_sample)
return out_sample
def visitUntil(self, element, args):
in_sample_1 = self.visit(element.children[0], args)
in_sample_2 = self.visit(element.children[1], args)
out_sample = element.node.offline(in_sample_1, in_sample_2)
return out_sample
def visitTimedUntil(self, element, args):
in_sample_1 = self.visit(element.children[0], args)
in_sample_2 = self.visit(element.children[1], args)
out_sample = element.node.offline(in_sample_1, in_sample_2)
return out_sample
def visitOnce(self, element, args):
in_sample = self.visit(element.children[0], args)
out_sample = element.node.offline(in_sample)
return out_sample
def visitTimedOnce(self, element, args):
in_sample = self.visit(element.children[0], args)
out_sample = element.node.offline(in_sample)
return out_sample
def visitHistorically(self, element, args):
in_sample = self.visit(element.children[0], args)
out_sample = element.node.offline(in_sample)
return out_sample
def visitTimedHistorically(self, element, args):
in_sample = self.visit(element.children[0], args)
out_sample = element.node.offline(in_sample)
return out_sample
def visitSince(self, element, args):
in_sample_1 = self.visit(element.children[0], args)
in_sample_2 = self.visit(element.children[1], args)
out_sample = element.node.offline(in_sample_1, in_sample_2)
return out_sample
def visitTimedSince(self, element, args):
in_sample_1 = self.visit(element.children[0], args)
in_sample_2 = self.visit(element.children[1], args)
out_sample = element.node.offline(in_sample_1, in_sample_2)
return out_sample
def visitTimedPrecedes(self, element, args):
in_sample_1 = self.visit(element.children[0], args)
in_sample_2 = self.visit(element.children[1], args)
out_sample = element.node.offline(in_sample_1, in_sample_2)
return out_sample
def visitRise(self, element, args):
in_sample = self.visit(element.children[0], args)
out_sample = element.node.offline(in_sample)
return out_sample
def visitFall(self, element, args):
in_sample = self.visit(element.children[0], args)
out_sample = element.node.offline(in_sample)
return out_sample
def visitDefault(self, element, args):
return None
| 37.818182
| 67
| 0.673745
| 1,016
| 7,488
| 4.731299
| 0.067913
| 0.15311
| 0.156439
| 0.229665
| 0.834824
| 0.813189
| 0.80674
| 0.80674
| 0.80674
| 0.80674
| 0
| 0.019662
| 0.225694
| 7,488
| 198
| 68
| 37.818182
| 0.809417
| 0
| 0
| 0.683544
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.21519
| false
| 0
| 0.012658
| 0.006329
| 0.443038
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
c14d2439a432ce8b2c9686488c623b64871efb64
| 13,687
|
py
|
Python
|
tests/scanner/audit/role_rules_engine_test.py
|
aarontp/forseti-security
|
6d03c14114468ff6170846392b7d14a0619fa9f0
|
[
"Apache-2.0"
] | 921
|
2017-03-09T01:01:24.000Z
|
2019-04-16T11:38:25.000Z
|
tests/scanner/audit/role_rules_engine_test.py
|
aarontp/forseti-security
|
6d03c14114468ff6170846392b7d14a0619fa9f0
|
[
"Apache-2.0"
] | 1,996
|
2017-03-03T22:07:50.000Z
|
2019-04-17T00:02:28.000Z
|
tests/scanner/audit/role_rules_engine_test.py
|
aarontp/forseti-security
|
6d03c14114468ff6170846392b7d14a0619fa9f0
|
[
"Apache-2.0"
] | 241
|
2017-03-09T01:00:04.000Z
|
2019-04-15T18:53:35.000Z
|
# Copyright 2018 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests the RoleRulesEngine."""
import unittest.mock as mock
import tempfile
import unittest
import yaml
from google.cloud.forseti.scanner.audit.errors import InvalidRulesSchemaError
from google.cloud.forseti.scanner.audit import role_rules_engine as rre
from google.cloud.forseti.scanner.scanners import role_scanner as rrs
from tests.scanner.test_data import fake_role_scanner_data as frsd
from tests.unittest_utils import get_datafile_path
from tests.unittest_utils import ForsetiTestCase
def get_rules_engine_with_rule(rule):
"""Create a rule engine based on a yaml file string"""
with tempfile.NamedTemporaryFile(suffix='.yaml') as f:
f.write(rule.encode())
f.flush()
rules_engine = rre.RoleRulesEngine(
rules_file_path=f.name)
rules_engine.build_rule_book()
return rules_engine
class RoleRulesEngineTest(ForsetiTestCase):
"""Tests for the RoleRulesEngine."""
def setUp(self):
"""Set up."""
def test_invalid_rule_with_no_name(self):
"""Test that a rule without name cannot be created"""
yaml_str_invalid_rule="""
rules:
- role_name: "forsetiBigqueryViewer"
permissions:
- "bigquery.datasets.get"
- "bigquery.tables.get"
- "bigquery.tables.list"
resource:
- type: project
resource_ids: ['*']
"""
with tempfile.NamedTemporaryFile(suffix='.yaml') as f:
f.write(yaml_str_invalid_rule.encode())
f.flush()
rules_local_path = get_datafile_path(__file__, f.name)
with self.assertRaises(InvalidRulesSchemaError):
self.scanner = rrs.RoleScanner(
{}, {}, mock.MagicMock(), '', '', rules_local_path)
def test_invalid_rule_with_no_role_name(self):
"""Test that a rule without role_name cannot be created"""
yaml_str_invalid_rule="""
rules:
- name: "forsetiBigqueryViewer rule"
permissions:
- "bigquery.datasets.get"
- "bigquery.tables.get"
- "bigquery.tables.list"
resource:
- type: project
resource_ids: ['*']
"""
with tempfile.NamedTemporaryFile(suffix='.yaml') as f:
f.write(yaml_str_invalid_rule.encode())
f.flush()
rules_local_path = get_datafile_path(__file__, f.name)
with self.assertRaises(InvalidRulesSchemaError):
self.scanner = rrs.RoleScanner(
{}, {}, mock.MagicMock(), '', '', rules_local_path)
def test_invalid_rule_with_no_permissions(self):
"""Test that a rule without permissions cannot be created"""
yaml_str_invalid_rule="""
rules:
- role_name: "forsetiBigqueryViewer"
name: "forsetiBigqueryViewer rule"
resource:
- type: project
resource_ids: ['*']
"""
with tempfile.NamedTemporaryFile(suffix='.yaml') as f:
f.write(yaml_str_invalid_rule.encode())
f.flush()
rules_local_path = get_datafile_path(__file__, f.name)
with self.assertRaises(InvalidRulesSchemaError):
self.scanner = rrs.RoleScanner(
{}, {}, mock.MagicMock(), '', '', rules_local_path)
def test_invalid_rule_with_no_resource(self):
"""Test that a rule without resource cannot be created"""
yaml_str_invalid_rule="""
rules:
- role_name: "forsetiBigqueryViewer"
name: "forsetiBigqueryViewer rule"
permissions:
- "bigquery.datasets.get"
- "bigquery.tables.get"
- "bigquery.tables.list"
"""
with tempfile.NamedTemporaryFile(suffix='.yaml') as f:
f.write(yaml_str_invalid_rule.encode())
f.flush()
rules_local_path = get_datafile_path(__file__, f.name)
with self.assertRaises(InvalidRulesSchemaError):
self.scanner = rrs.RoleScanner(
{}, {}, mock.MagicMock(), '', '', rules_local_path)
def test_invalid_rule_with_no_resource_type(self):
"""Test that a rule without resource:type cannot be created"""
yaml_str_invalid_rule="""
rules:
- role_name: "forsetiBigqueryViewer"
name: "forsetiBigqueryViewer rule"
permissions:
- "bigquery.datasets.get"
- "bigquery.tables.get"
- "bigquery.tables.list"
resource:
- resource_ids: ['*']
"""
with tempfile.NamedTemporaryFile(suffix='.yaml') as f:
f.write(yaml_str_invalid_rule.encode())
f.flush()
rules_local_path = get_datafile_path(__file__, f.name)
with self.assertRaises(InvalidRulesSchemaError):
self.scanner = rrs.RoleScanner(
{}, {}, mock.MagicMock(), '', '', rules_local_path)
def test_invalid_rule_with_no_resource_id(self):
"""Test that a rule without resource:resource_ids cannot be created"""
yaml_str_invalid_rule="""
rules:
- role_name: "forsetiBigqueryViewer"
name: "forsetiBigqueryViewer rule"
permissions:
- "bigquery.datasets.get"
- "bigquery.tables.get"
- "bigquery.tables.list"
resource:
- type: project
"""
with tempfile.NamedTemporaryFile(suffix='.yaml') as f:
f.write(yaml_str_invalid_rule.encode())
f.flush()
rules_local_path = get_datafile_path(__file__, f.name)
with self.assertRaises(InvalidRulesSchemaError):
self.scanner = rrs.RoleScanner(
{}, {}, mock.MagicMock(), '', '', rules_local_path)
yaml_str_multiple_rules_on_projects = """
rules:
- role_name: "forsetiBigqueryViewer"
name: "forsetiBigqueryViewer rule"
permissions:
- "bigquery.datasets.get"
- "bigquery.tables.get"
- "bigquery.tables.list"
resource:
- type: project
resource_ids: ['*']
- role_name: "forsetiCloudsqlViewer"
name: "forsetiCloudsqlViewer rule backupRuns"
permissions:
- "cloudsql.backupRuns.get"
- "cloudsql.backupRuns.list"
resource:
- type: project
resource_ids: ['def-project-1']
- role_name: "forsetiCloudsqlViewer"
name: "forsetiCloudsqlViewer rule databases"
permissions:
- "cloudsql.databases.get"
- "cloudsql.databases.list"
resource:
- type: project
resource_ids: ['def-project-2']
"""
def test_no_violation_for_rules_on_wildcard(self):
"""Role is a correct forsetiBigqueryViewer that should have no violation."""
rules_engine = get_rules_engine_with_rule(RoleRulesEngineTest.yaml_str_multiple_rules_on_projects)
self.assertTrue(1 <= len(rules_engine.rule_book.rules_map))
data_creater = frsd.FakeRoleDataCreater('forsetiBigqueryViewer',
["bigquery.datasets.get",
"bigquery.tables.get",
"bigquery.tables.list"], frsd.PROJECT1)
fake_role = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_role))
self.assertEqual(got_violations, [])
def test_violations_for_rules_on_wildcard(self):
"""Role is a incorrect forsetiBigqueryViewer that should have violations."""
rules_engine = get_rules_engine_with_rule(RoleRulesEngineTest.yaml_str_multiple_rules_on_projects)
self.assertTrue(1 <= len(rules_engine.rule_book.rules_map))
data_creater = frsd.FakeRoleDataCreater('forsetiBigqueryViewer',
["bigquery.datasets.get",
"bigquery.tables.list"], frsd.PROJECT1)
fake_role = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_role))
self.assertEqual(got_violations, [frsd.generate_violation(fake_role, 0, 'forsetiBigqueryViewer rule')])
def test_no_violation_for_rules(self):
"""Role is a correct forsetiCloudsqlViewer(project 1) that should have no violation."""
rules_engine = get_rules_engine_with_rule(RoleRulesEngineTest.yaml_str_multiple_rules_on_projects)
self.assertTrue(1 <= len(rules_engine.rule_book.rules_map))
data_creater = frsd.FakeRoleDataCreater('forsetiCloudsqlViewer',
["cloudsql.backupRuns.get",
"cloudsql.backupRuns.list"], frsd.PROJECT1)
fake_role = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_role))
self.assertEqual(got_violations, [])
def test_violations_for_rules(self):
"""Role is a incorrect forsetiCloudsqlViewer(project 1) that should have violations."""
rules_engine = get_rules_engine_with_rule(RoleRulesEngineTest.yaml_str_multiple_rules_on_projects)
self.assertTrue(1 <= len(rules_engine.rule_book.rules_map))
data_creater = frsd.FakeRoleDataCreater('forsetiCloudsqlViewer',
["cloudsql.databases.get",
"cloudsql.databases.list"], frsd.PROJECT1)
fake_role = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_role))
self.assertEqual(got_violations, [frsd.generate_violation(fake_role, 1, 'forsetiCloudsqlViewer rule backupRuns')])
yaml_str_multiple_rules_on_organizations = """
rules:
- role_name: "forsetiBigqueryViewer"
name: "forsetiBigqueryViewer rule"
permissions:
- "bigquery.datasets.get"
- "bigquery.tables.get"
- "bigquery.tables.list"
resource:
- type: organization
resource_ids: ['123456']
"""
def test_no_violation_for_rules_on_org(self):
"""Role is a correct forsetiBigqueryViewer that should have no violation."""
rules_engine = get_rules_engine_with_rule(RoleRulesEngineTest.yaml_str_multiple_rules_on_organizations)
self.assertTrue(1 <= len(rules_engine.rule_book.rules_map))
data_creater = frsd.FakeRoleDataCreater('forsetiBigqueryViewer',
["bigquery.datasets.get",
"bigquery.tables.get",
"bigquery.tables.list"], frsd.PROJECT1)
fake_role = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_role))
self.assertEqual(got_violations, [])
def test_violations_for_rules_on_org(self):
"""Role is a incorrect forsetiBigqueryViewer that should have violations."""
rules_engine = get_rules_engine_with_rule(RoleRulesEngineTest.yaml_str_multiple_rules_on_organizations)
self.assertTrue(1 <= len(rules_engine.rule_book.rules_map))
data_creater = frsd.FakeRoleDataCreater('forsetiBigqueryViewer',
["bigquery.datasets.get",
"bigquery.tables.list"], frsd.PROJECT1)
fake_role = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_role))
self.assertEqual(got_violations, [frsd.generate_violation(fake_role, 0, 'forsetiBigqueryViewer rule')])
yaml_str_multiple_resource_ids_rules = """
rules:
- role_name: "forsetiBigqueryViewer"
name: "forsetiBigqueryViewer rule"
permissions:
- "bigquery.datasets.get"
- "bigquery.tables.get"
- "bigquery.tables.list"
resource:
- type: project
resource_ids: ['def-project-1', 'def-project-2']
"""
def test_no_violation_for_rules_with_multi_resource_ids(self):
"""Role is a correct forsetiBigqueryViewer that should have no violation."""
rules_engine = get_rules_engine_with_rule(RoleRulesEngineTest.yaml_str_multiple_resource_ids_rules)
self.assertTrue(1 <= len(rules_engine.rule_book.rules_map))
data_creater = frsd.FakeRoleDataCreater('forsetiBigqueryViewer',
["bigquery.datasets.get",
"bigquery.tables.get",
"bigquery.tables.list"], frsd.PROJECT1)
fake_role = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_role))
self.assertEqual(got_violations, [])
def test_role_has_no_rule(self):
"""Test the role that is not covered by any rule."""
rules_engine = get_rules_engine_with_rule(RoleRulesEngineTest.yaml_str_multiple_resource_ids_rules)
self.assertTrue(1 <= len(rules_engine.rule_book.rules_map))
data_creater = frsd.FakeRoleDataCreater('someRoleName',
["bigquery.someres.get",
"bigquery.someres.list"], frsd.PROJECT1)
fake_role = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_role))
self.assertEqual(got_violations, [])
if __name__ == '__main__':
unittest.main()
| 39.787791
| 122
| 0.650763
| 1,462
| 13,687
| 5.80643
| 0.117647
| 0.047944
| 0.048062
| 0.041348
| 0.853929
| 0.836965
| 0.76982
| 0.749794
| 0.741783
| 0.715279
| 0
| 0.003799
| 0.249872
| 13,687
| 343
| 123
| 39.90379
| 0.823025
| 0.116753
| 0
| 0.775194
| 0
| 0
| 0.265226
| 0.125647
| 0
| 0
| 0
| 0
| 0.085271
| 1
| 0.062016
| false
| 0
| 0.03876
| 0
| 0.120155
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c177ef4aef49a6224ed53585ba0e8701ee295049
| 7,450
|
py
|
Python
|
python-opcua/opcua/server/standard_address_space/standard_address_space_part19.py
|
ssriblo/ionic-smarthome-test-1
|
060bc247e0b8295d6cd869d90b364756515cfc19
|
[
"MIT"
] | 1
|
2020-12-18T15:18:19.000Z
|
2020-12-18T15:18:19.000Z
|
python-opcua/opcua/server/standard_address_space/standard_address_space_part19.py
|
ssriblo/ionic-smarthome-test-1
|
060bc247e0b8295d6cd869d90b364756515cfc19
|
[
"MIT"
] | 42
|
2020-08-20T04:01:12.000Z
|
2021-01-09T18:50:21.000Z
|
python-opcua/opcua/server/standard_address_space/standard_address_space_part19.py
|
ssriblo/ionic-smarthome-test-1
|
060bc247e0b8295d6cd869d90b364756515cfc19
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
DO NOT EDIT THIS FILE!
It is automatically generated from opcfoundation.org schemas.
Date:2020-06-19 17:31:10.404212
"""
import datetime
from dateutil.tz import tzutc
from opcua import ua
from opcua.ua import NodeId, QualifiedName, NumericNodeId, StringNodeId, GuidNodeId
from opcua.ua import NodeClass, LocalizedText
def create_standard_address_space_Part19(server):
node = ua.AddNodesItem()
node.RequestedNewNodeId = NumericNodeId(19077, 0)
node.BrowseName = QualifiedName('MultiStateDictionaryEntryDiscreteBaseType', 0)
node.NodeClass = NodeClass.VariableType
node.ParentNodeId = NumericNodeId(11238, 0)
node.ReferenceTypeId = NumericNodeId(45, 0)
attrs = ua.VariableTypeAttributes()
attrs.DisplayName = LocalizedText("MultiStateDictionaryEntryDiscreteBaseType")
attrs.DisplayName = LocalizedText("MultiStateDictionaryEntryDiscreteBaseType")
attrs.DataType = ua.NodeId(ua.ObjectIds.Number)
attrs.ValueRank = -1
node.NodeAttributes = attrs
server.add_nodes([node])
refs = []
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(19077, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(19082, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(19077, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(19083, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = False
ref.ReferenceTypeId = NumericNodeId(45, 0)
ref.SourceNodeId = NumericNodeId(19077, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(11238, 0)
refs.append(ref)
server.add_references(refs)
node = ua.AddNodesItem()
node.RequestedNewNodeId = NumericNodeId(19082, 0)
node.BrowseName = QualifiedName('EnumDictionaryEntries', 0)
node.NodeClass = NodeClass.Variable
node.ParentNodeId = NumericNodeId(19077, 0)
node.ReferenceTypeId = NumericNodeId(46, 0)
node.TypeDefinition = NumericNodeId(68, 0)
attrs = ua.VariableAttributes()
attrs.DisplayName = LocalizedText("EnumDictionaryEntries")
attrs.DataType = ua.NodeId(ua.ObjectIds.NodeId)
attrs.ValueRank = 2
attrs.ArrayDimensions = [0, 0]
node.NodeAttributes = attrs
server.add_nodes([node])
refs = []
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(40, 0)
ref.SourceNodeId = NumericNodeId(19082, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(68, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(37, 0)
ref.SourceNodeId = NumericNodeId(19082, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(78, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = False
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(19082, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(19077, 0)
refs.append(ref)
server.add_references(refs)
node = ua.AddNodesItem()
node.RequestedNewNodeId = NumericNodeId(19083, 0)
node.BrowseName = QualifiedName('ValueAsDictionaryEntries', 0)
node.NodeClass = NodeClass.Variable
node.ParentNodeId = NumericNodeId(19077, 0)
node.ReferenceTypeId = NumericNodeId(46, 0)
node.TypeDefinition = NumericNodeId(68, 0)
attrs = ua.VariableAttributes()
attrs.DisplayName = LocalizedText("ValueAsDictionaryEntries")
attrs.DataType = ua.NodeId(ua.ObjectIds.NodeId)
attrs.ValueRank = 1
attrs.ArrayDimensions = [0]
node.NodeAttributes = attrs
server.add_nodes([node])
refs = []
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(40, 0)
ref.SourceNodeId = NumericNodeId(19083, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(68, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(37, 0)
ref.SourceNodeId = NumericNodeId(19083, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(80, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = False
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(19083, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(19077, 0)
refs.append(ref)
server.add_references(refs)
node = ua.AddNodesItem()
node.RequestedNewNodeId = NumericNodeId(19084, 0)
node.BrowseName = QualifiedName('MultiStateDictionaryEntryDiscreteType', 0)
node.NodeClass = NodeClass.VariableType
node.ParentNodeId = NumericNodeId(19077, 0)
node.ReferenceTypeId = NumericNodeId(45, 0)
attrs = ua.VariableTypeAttributes()
attrs.DisplayName = LocalizedText("MultiStateDictionaryEntryDiscreteType")
attrs.DisplayName = LocalizedText("MultiStateDictionaryEntryDiscreteType")
attrs.DataType = ua.NodeId(ua.ObjectIds.Number)
attrs.ValueRank = -1
node.NodeAttributes = attrs
server.add_nodes([node])
refs = []
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(19084, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(19090, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = False
ref.ReferenceTypeId = NumericNodeId(45, 0)
ref.SourceNodeId = NumericNodeId(19084, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(19077, 0)
refs.append(ref)
server.add_references(refs)
node = ua.AddNodesItem()
node.RequestedNewNodeId = NumericNodeId(19090, 0)
node.BrowseName = QualifiedName('ValueAsDictionaryEntries', 0)
node.NodeClass = NodeClass.Variable
node.ParentNodeId = NumericNodeId(19084, 0)
node.ReferenceTypeId = NumericNodeId(46, 0)
node.TypeDefinition = NumericNodeId(68, 0)
attrs = ua.VariableAttributes()
attrs.DisplayName = LocalizedText("ValueAsDictionaryEntries")
attrs.DataType = ua.NodeId(ua.ObjectIds.NodeId)
attrs.ValueRank = 1
attrs.ArrayDimensions = [0]
node.NodeAttributes = attrs
server.add_nodes([node])
refs = []
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(40, 0)
ref.SourceNodeId = NumericNodeId(19090, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(68, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(37, 0)
ref.SourceNodeId = NumericNodeId(19090, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(78, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = False
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(19090, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(19084, 0)
refs.append(ref)
server.add_references(refs)
| 37.437186
| 83
| 0.720268
| 762
| 7,450
| 7.023622
| 0.127297
| 0.020927
| 0.057549
| 0.065396
| 0.888079
| 0.848094
| 0.838191
| 0.838191
| 0.806428
| 0.796712
| 0
| 0.051094
| 0.177718
| 7,450
| 198
| 84
| 37.626263
| 0.82256
| 0.018792
| 0
| 0.862637
| 1
| 0
| 0.050952
| 0.050952
| 0
| 0
| 0
| 0
| 0
| 1
| 0.005495
| false
| 0
| 0.027473
| 0
| 0.032967
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
a9b8c81a8c9f7d7d411ab14f9733291279674378
| 19,134
|
py
|
Python
|
NTK/models.py
|
hendrydong/NTK-and-MF-examples
|
054bd28bc8da84b6db43fce2b9fafc5c8f8f592a
|
[
"MIT"
] | 12
|
2021-01-03T02:18:49.000Z
|
2022-03-10T21:38:35.000Z
|
NTK/models.py
|
hendrydong/NTK-and-MF-examples
|
054bd28bc8da84b6db43fce2b9fafc5c8f8f592a
|
[
"MIT"
] | null | null | null |
NTK/models.py
|
hendrydong/NTK-and-MF-examples
|
054bd28bc8da84b6db43fce2b9fafc5c8f8f592a
|
[
"MIT"
] | 3
|
2021-09-18T09:01:26.000Z
|
2022-02-18T01:57:54.000Z
|
import torch
from torch import optim, nn
from torchvision import datasets, transforms
from torch.autograd import Variable
import numpy as np
import matplotlib.pyplot as plt
import random
import copy
from ntk import ntk_v3
# Standard strategy: eta_theta = eta_u * alpha/m
def train_standard(train_loader, test_loader, h_dim, alpha, train_epoch, lr = 1, m = 0, SEED = 2020, print_result=True):
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED)
torch.cuda.manual_seed_all(SEED) # if you are using multi-GPU.
np.random.seed(SEED) # Numpy module.
random.seed(SEED) # Python random module.
torch.manual_seed(SEED)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
loss_full = []
acc_full = []
net1 = nn.Sequential(nn.Linear(784, h_dim, bias=False).cuda(),nn.Tanh())
torch.nn.init.normal_(net1[0].weight,mean=0.0, std=1.0)
net2 = nn.Linear(h_dim,10, bias=False).cuda()
torch.nn.init.normal_(net2.weight,mean=0.0, std=1.0*alpha/h_dim)
theta0 = list(net1.parameters())[0].detach().cpu().numpy().copy()
u0 = list(net2.parameters())[0].detach().cpu().numpy().copy()
net = nn.Sequential(net1,net2)
Loss = nn.CrossEntropyLoss()
coeff = 1
eta = lr
op = optim.SGD(net.parameters(), lr = eta, momentum = m)
relative_change = []
for epoch in range(train_epoch):
loss_train = []
for x_, y_ in train_loader:
# train discriminator D
x_ = x_.view(-1, 28 * 28).cuda()
y_ = y_.cuda()
y_pred = net(x_)*coeff
#print(y_pred.shape)
loss = Loss(y_pred,y_)
op.zero_grad()
loss.backward()
op.step()
loss_train+=[loss.item()]
acc = []
loss_test = []
for x_, y_ in test_loader:
x_ = x_.view(-1, 28 * 28).cuda()
y_ = y_.cuda()
y_pred = net(x_)*coeff
loss = Loss(y_pred,y_)
loss_test+=[loss.item()]
acc += [torch.argmax(y_pred,1)==y_]
acc = torch.mean(torch.cat(acc).float())
acc_full.append(acc.item())
loss_full.append((np.mean(loss_train[-10:]),np.mean(loss_test)))
theta1 = list(net1.parameters())[0].detach().cpu().numpy().copy()
u1 = list(net2.parameters())[0].detach().cpu().numpy().copy()
dtheta = (np.mean(np.sum((theta1-theta0)**2,1)**0.5))
du = (np.mean(np.sum((u1-u0)**2,0)**0.5))/alpha*h_dim
if print_result:
print('epoch %d'%epoch,'loss (train,test):%.2e;%.2e'%loss_full[-1],'acc:%.6f'%acc.item())
print('dtheta:',dtheta)
print('du:',du)
relative_change.append((dtheta,du))
return relative_change,acc_full,loss_full
# Standard strategy: eta_theta = eta_u * alpha/m
def train_standard_2(train_loader, test_loader, h_dim, alpha, train_epoch, lr = 1, m = 0, SEED = 2020, print_result=True):
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED)
torch.cuda.manual_seed_all(SEED) # if you are using multi-GPU.
np.random.seed(SEED) # Numpy module.
random.seed(SEED) # Python random module.
torch.manual_seed(SEED)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
loss_full = []
acc_full = []
net1 = nn.Sequential(nn.Linear(784, h_dim, bias=False).cuda(),nn.Tanh())
torch.nn.init.normal_(net1[0].weight,mean=0.0, std=1.0/np.sqrt(h_dim))
net2 = nn.Linear(h_dim,10, bias=False).cuda()
torch.nn.init.normal_(net2.weight,mean=0.0, std=1.0*alpha/h_dim)
theta0 = list(net1.parameters())[0].detach().cpu().numpy().copy()
u0 = list(net2.parameters())[0].detach().cpu().numpy().copy()
net = nn.Sequential(net1,net2)
Loss = nn.CrossEntropyLoss()
coeff = 1
eta = lr
op = optim.SGD(net.parameters(), lr = eta, momentum = m)
relative_change = []
for epoch in range(train_epoch):
loss_train = []
for x_, y_ in train_loader:
# train discriminator D
x_ = x_.view(-1, 28 * 28).cuda()
y_ = y_.cuda()
y_pred = net(x_)*coeff
#print(y_pred.shape)
loss = Loss(y_pred,y_)
op.zero_grad()
loss.backward()
op.step()
loss_train+=[loss.item()]
acc = []
loss_test = []
for x_, y_ in test_loader:
x_ = x_.view(-1, 28 * 28).cuda()
y_ = y_.cuda()
y_pred = net(x_)*coeff
loss = Loss(y_pred,y_)
loss_test+=[loss.item()]
acc += [torch.argmax(y_pred,1)==y_]
acc = torch.mean(torch.cat(acc).float())
acc_full.append(acc.item())
loss_full.append((np.mean(loss_train[-10:]),np.mean(loss_test)))
theta1 = list(net1.parameters())[0].detach().cpu().numpy().copy()
u1 = list(net2.parameters())[0].detach().cpu().numpy().copy()
dtheta = (np.mean(np.sum((theta1-theta0)**2,1)**0.5))
du = (np.mean(np.sum((u1-u0)**2,0)**0.5))/alpha*h_dim
if print_result:
print('epoch %d'%epoch,'loss (train,test):%.2e;%.2e'%loss_full[-1],'acc:%.6f'%acc.item())
print('dtheta:',dtheta)
print('du:',du)
relative_change.append((dtheta,du))
return relative_change,acc_full,loss_full
# Theoretical strategy (1): eta_theta = eta_u
def train(train_loader,test_loader,h_dim,alpha,train_epoch,lr = 1, m = 0, SEED = 2020,print_result=True):
print('Seed:',SEED)
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED)
torch.cuda.manual_seed_all(SEED) # if you are using multi-GPU.
np.random.seed(SEED) # Numpy module.
random.seed(SEED) # Python random module.
torch.manual_seed(SEED)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
loss_full = []
acc_full = []
net1 = nn.Sequential(nn.Linear(784, h_dim, bias=False).cuda(),nn.Tanh())
torch.nn.init.normal_(net1[0].weight,mean=0.0, std=1.0)
net2 = nn.Linear(h_dim,10, bias=False).cuda()
torch.nn.init.normal_(net2.weight,mean=0.0, std=1.0)
theta0 = list(net1.parameters())[0].detach().cpu().numpy().copy()
u0 = list(net2.parameters())[0].detach().cpu().numpy().copy()
net = nn.Sequential(net1,net2)
Loss = nn.CrossEntropyLoss()
coeff = alpha / h_dim
eta = h_dim / alpha * lr
op = optim.SGD(net.parameters(),lr = eta, momentum = m)
relative_change = []
for epoch in range(train_epoch):
loss_train = []
for x_, y_ in train_loader:
# train discriminator D
x_ = x_.view(-1, 28 * 28).cuda()
y_ = y_.cuda()
y_pred = net(x_)*coeff
#print(y_pred.shape)
loss = Loss(y_pred,y_)
op.zero_grad()
loss.backward()
op.step()
loss_train+=[loss.item()]
acc = []
loss_test = []
for x_, y_ in test_loader:
x_ = x_.view(-1, 28 * 28).cuda()
y_ = y_.cuda()
y_pred = net(x_)*coeff
loss = Loss(y_pred,y_)
loss_test+=[loss.item()]
acc += [torch.argmax(y_pred,1)==y_]
acc = torch.mean(torch.cat(acc).float())
acc_full.append(acc.item())
loss_full.append((np.mean(loss_train[-10:]),np.mean(loss_test)))
#print(epoch,loss.item(),loss_full[-1],acc.item())
theta1 = list(net1.parameters())[0].detach().cpu().numpy().copy()
u1 = list(net2.parameters())[0].detach().cpu().numpy().copy()
dtheta = np.mean(np.sum((theta1-theta0)**2,1)**0.5)
du = (np.mean(np.sum((u1-u0)**2,0)**0.5))
if print_result:
print('epoch %d'%epoch,'loss (train,test):%.2e;%.2e'%loss_full[-1],'acc:%.6f'%acc.item())
print('dtheta:',dtheta)
print('du:',du)
relative_change.append((dtheta,du))
return relative_change,acc_full,loss_full
# Theoretical strategy (2): eta_theta = eta_u * alpha
def train_2(train_loader,test_loader, h_dim,alpha,train_epoch, lr = 1,m = 0, SEED = 2020,print_result=True):
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED)
torch.cuda.manual_seed_all(SEED) # if you are using multi-GPU.
np.random.seed(SEED) # Numpy module.
random.seed(SEED) # Python random module.
torch.manual_seed(SEED)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
loss_full = []
acc_full = []
coeff = alpha/h_dim
net1 = nn.Sequential(nn.Linear(784, h_dim, bias=False).cuda(),nn.Tanh())
torch.nn.init.normal_(net1[0].weight,mean=0.0, std=1.0)
net2 = nn.Linear(h_dim,10, bias=False).cuda()
torch.nn.init.normal_(net2.weight,mean=0.0, std=1.0*alpha)
#torch.nn.init.normal_(net2.bias,mean=0.0, std=1.0*alpha)
theta0 = list(net1.parameters())[0].detach().cpu().numpy().copy()
u0 = list(net2.parameters())[0].detach().cpu().numpy().copy()
net = nn.Sequential(net1,net2)
Loss = nn.CrossEntropyLoss()
eta = lr*h_dim
op = optim.SGD(net.parameters(),lr = eta, momentum = m)
relative_change = []
for epoch in range(train_epoch):
loss_train = []
for x_, y_ in train_loader:
# train discriminator D
x_ = x_.view(-1, 28 * 28).cuda()
y_ = y_.cuda()
y_pred = net(x_)/h_dim
#print(y_pred.shape)
loss = Loss(y_pred,y_)
op.zero_grad()
loss.backward()
op.step()
loss_train+=[loss.item()]
acc = []
loss_test = []
for x_, y_ in test_loader:
x_ = x_.view(-1, 28 * 28).cuda()
y_ = y_.cuda()
y_pred = net(x_)/h_dim
loss = Loss(y_pred,y_)
loss_test+=[loss.item()]
acc += [torch.argmax(y_pred,1)==y_]
acc = torch.mean(torch.cat(acc).float())
acc_full.append(acc.item())
loss_full.append((np.mean(loss_train[-10:]),np.mean(loss_test)))
#print(epoch,loss.item(),loss_full[-1],acc.item())
theta1 = list(net1.parameters())[0].detach().cpu().numpy().copy()
u1 = list(net2.parameters())[0].detach().cpu().numpy().copy()
dtheta = (np.mean(np.sum((theta1-theta0)**2,1)**0.5))
du = (np.mean(np.sum((u1-u0)**2,0)**0.5))/alpha
if print_result:
print('epoch %d'%epoch,'loss (train,test):%.2e;%.2e'%loss_full[-1],'acc:%.6f'%acc.item())
print('dtheta:',dtheta)
print('du:',du)
relative_change.append((dtheta,du))
return relative_change,acc_full,loss_full
# Save Linearization during the training process
def train_ntk(train_loader, test_loader, h_dim, alpha, train_epoch, lr = 1, m = 0, SEED = 2020,print_result=True):
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED)
torch.cuda.manual_seed_all(SEED) # if you are using multi-GPU.
np.random.seed(SEED) # Numpy module.
random.seed(SEED) # Python random module.
torch.manual_seed(SEED)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
loss_full = []
acc_full = []
net1 = nn.Sequential(nn.Linear(28*28, h_dim, bias=False).cuda(),nn.Tanh())
torch.nn.init.normal_(net1[0].weight)
net2 = nn.Linear(h_dim,10, bias=False).cuda()
torch.nn.init.normal_(net2.weight,mean=0.0, std=1.0*alpha/h_dim)
#torch.nn.init.normal_(net2.bias,mean=0.0, std=1.0*alpha/h_dim)
theta0 = list(net1.parameters())[0].detach().cpu().numpy().copy()
u0 = list(net2.parameters())[0].detach().cpu().numpy().copy()
net = nn.Sequential(net1,net2)
net1_init = copy.deepcopy(net1)
net2_init = copy.deepcopy(net2)
net_init = nn.Sequential(net1_init,net2_init)
Loss = nn.CrossEntropyLoss()
coeff = 1#alpha/h_dim
eta = lr
op = optim.SGD(net.parameters(),lr = eta,momentum=m)
relative_change = []
for epoch in range(train_epoch):
#if epoch==int(0.3*train_epoch) and lr >0.1:
# lr = 0.1
# op = optim.SGD(net.parameters(),lr = 0.1,momentum=m)
#if epoch==int(0.6*train_epoch) and lr >0.01:
# op = optim.SGD(net.parameters(),lr = 0.01,momentum=m)
loss_train = []
loss_train_ntk = []
for x_, y_ in train_loader:
x_ = x_.view(-1, 28 * 28).cuda()
y_ = y_.cuda()
y_pred = net(x_)
#y_pred = net(X_tr)
#print(y_pred.shape)
#print(y_pred.dtype)
#print((Y_tr).dtype)
loss = Loss(y_pred,y_)
op.zero_grad()
loss.backward()
op.step()
j = 0
for x_, y_ in train_loader:
if j==10:
break
x_ = x_.view(-1, 28 * 28).cuda()
y_ = y_.cuda()
y_pred = net(x_)
loss = Loss(y_pred,y_)
y_tr_ntk = ntk_v3(net,net_init,x_)
loss_ntk_tr = Loss(y_tr_ntk,y_)
loss_train+=[loss.item()]
loss_train_ntk+=[loss_ntk_tr.item()]
j+=1
acc = []
loss_test = []
#for x_, y_ in test_loader:
#x_ = x_.view(-1, 28 * 28).cuda()
#y_ = y_.cuda()
loss_ntk = []
acc_ntk = []
for x_, y_ in test_loader:
x_ = x_.view(-1, 28 * 28).cuda()
y_ = y_.cuda()
y_pred = net(x_)
loss = Loss(y_pred,y_)
loss_test+=[loss.item()]
y_te_ntk = ntk_v3(net,net_init,x_)
loss_ntk_te = Loss(y_te_ntk,y_)
loss_ntk += [loss_ntk_te.item()]
acc += [torch.argmax(y_pred,1)==y_]
acc_ntk +=[torch.argmax(y_te_ntk,1)==y_]
loss_ntk_te = np.mean(loss_ntk)
acc_ntk_mean = torch.mean(torch.cat(acc_ntk).float())
acc = torch.mean(torch.cat(acc).float())
acc_full.append((acc.item(),acc_ntk_mean.item()))
loss_full.append((np.mean(loss_train),np.mean(loss_test),np.mean(loss_train_ntk),loss_ntk_te))
#print(epoch,loss.item(),loss_full[-1],acc.item())
theta1 = list(net1.parameters())[0].detach().cpu().numpy().copy()
u1 = list(net2.parameters())[0].detach().cpu().numpy().copy()
dtheta = (np.mean(np.sum((theta1-theta0)**2,1)**0.5))
du = (np.mean(np.sum((u1-u0)**2,0)**0.5))/alpha*h_dim
if print_result:
print('epoch %d'%epoch,'loss (train,test):%.2e;%.2e'%loss_full[-1][:2],'acc:%.6f'%acc.item())
print('dtheta:',dtheta)
print('du:',du)
relative_change.append((dtheta,du))
#print(epoch,loss.item(),loss_full[-1],acc.item())
#print('ntk',loss_ntk_tr.item(),loss_ntk_te,acc_ntk_mean.item())
return relative_change,acc_full,loss_full
# Save Linearization during the training process
def train_ntk_theta(train_loader, test_loader, h_dim, alpha, train_epoch, lr = 1, m = 0, SEED = 2020,print_result=True):
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED)
torch.cuda.manual_seed_all(SEED) # if you are using multi-GPU.
np.random.seed(SEED) # Numpy module.
random.seed(SEED) # Python random module.
torch.manual_seed(SEED)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
loss_full = []
acc_full = []
net1 = nn.Sequential(nn.Linear(28*28, h_dim, bias=False).cuda(),nn.Tanh())
torch.nn.init.normal_(net1[0].weight,mean=0.0, std=1.0/28)
net2 = nn.Linear(h_dim,10, bias=False).cuda()
torch.nn.init.normal_(net2.weight,mean=0.0, std=1.0*alpha/h_dim)
#torch.nn.init.normal_(net2.bias,mean=0.0, std=1.0*alpha/h_dim)
theta0 = list(net1.parameters())[0].detach().cpu().numpy().copy()
u0 = list(net2.parameters())[0].detach().cpu().numpy().copy()
net = nn.Sequential(net1,net2)
net1_init = copy.deepcopy(net1)
net2_init = copy.deepcopy(net2)
net_init = nn.Sequential(net1_init,net2_init)
Loss = nn.CrossEntropyLoss()
coeff = 1#alpha/h_dim
eta = lr
op = optim.SGD(net.parameters(),lr = eta,momentum=m)
relative_change = []
for epoch in range(train_epoch):
#if epoch==int(0.3*train_epoch) and lr >0.1:
# lr = 0.1
# op = optim.SGD(net.parameters(),lr = 0.1,momentum=m)
#if epoch==int(0.6*train_epoch) and lr >0.01:
# op = optim.SGD(net.parameters(),lr = 0.01,momentum=m)
loss_train = []
loss_train_ntk = []
for x_, y_ in train_loader:
x_ = x_.view(-1, 28 * 28).cuda()
y_ = y_.cuda()
y_pred = net(x_)
#y_pred = net(X_tr)
#print(y_pred.shape)
#print(y_pred.dtype)
#print((Y_tr).dtype)
loss = Loss(y_pred,y_)
op.zero_grad()
loss.backward()
op.step()
j = 0
for x_, y_ in train_loader:
if j==10:
break
x_ = x_.view(-1, 28 * 28).cuda()
y_ = y_.cuda()
y_pred = net(x_)
loss = Loss(y_pred,y_)
y_tr_ntk = ntk_v3(net,net_init,x_)
loss_ntk_tr = Loss(y_tr_ntk,y_)
loss_train+=[loss.item()]
loss_train_ntk+=[loss_ntk_tr.item()]
j+=1
acc = []
loss_test = []
#for x_, y_ in test_loader:
#x_ = x_.view(-1, 28 * 28).cuda()
#y_ = y_.cuda()
loss_ntk = []
acc_ntk = []
for x_, y_ in test_loader:
x_ = x_.view(-1, 28 * 28).cuda()
y_ = y_.cuda()
y_pred = net(x_)
loss = Loss(y_pred,y_)
loss_test+=[loss.item()]
y_te_ntk = ntk_v3(net,net_init,x_)
loss_ntk_te = Loss(y_te_ntk,y_)
loss_ntk += [loss_ntk_te.item()]
acc += [torch.argmax(y_pred,1)==y_]
acc_ntk +=[torch.argmax(y_te_ntk,1)==y_]
loss_ntk_te = np.mean(loss_ntk)
acc_ntk_mean = torch.mean(torch.cat(acc_ntk).float())
acc = torch.mean(torch.cat(acc).float())
acc_full.append((acc.item(),acc_ntk_mean.item()))
loss_full.append((np.mean(loss_train),np.mean(loss_test),np.mean(loss_train_ntk),loss_ntk_te))
#print(epoch,loss.item(),loss_full[-1],acc.item())
theta1 = list(net1.parameters())[0].detach().cpu().numpy().copy()
u1 = list(net2.parameters())[0].detach().cpu().numpy().copy()
dtheta = (np.mean(np.sum((theta1-theta0)**2,1)**0.5))
du = (np.mean(np.sum((u1-u0)**2,0)**0.5))/alpha*h_dim
if print_result:
print('epoch %d'%epoch,'loss (train,test):%.2e;%.2e'%loss_full[-1][:2],'acc:%.6f'%acc.item())
print('dtheta:',dtheta)
print('du:',du)
relative_change.append((dtheta,du))
#print(epoch,loss.item(),loss_full[-1],acc.item())
#print('ntk',loss_ntk_tr.item(),loss_ntk_te,acc_ntk_mean.item())
return relative_change,acc_full,loss_full
| 38.576613
| 122
| 0.569928
| 2,791
| 19,134
| 3.70799
| 0.049086
| 0.021258
| 0.039424
| 0.046381
| 0.972075
| 0.970432
| 0.970432
| 0.970432
| 0.970432
| 0.970432
| 0
| 0.035972
| 0.261942
| 19,134
| 495
| 123
| 38.654545
| 0.696856
| 0.113515
| 0
| 0.93401
| 0
| 0
| 0.019121
| 0.007814
| 0
| 0
| 0
| 0
| 0
| 1
| 0.015228
| false
| 0
| 0.022843
| 0
| 0.053299
| 0.07868
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
a9ceb5e7e057ca5d1de748841c9920610a09ba2d
| 82
|
py
|
Python
|
baseline/tf/classify/__init__.py
|
sagnik/baseline
|
8d75616e04c1cca509dbebbb6d08ad7e1a7b9f88
|
[
"Apache-2.0"
] | 241
|
2016-04-25T20:02:31.000Z
|
2019-09-03T05:44:09.000Z
|
baseline/tf/classify/__init__.py
|
sagnik/baseline
|
8d75616e04c1cca509dbebbb6d08ad7e1a7b9f88
|
[
"Apache-2.0"
] | 131
|
2019-10-12T10:53:17.000Z
|
2021-12-03T19:52:47.000Z
|
baseline/tf/classify/__init__.py
|
sagnik/baseline
|
8d75616e04c1cca509dbebbb6d08ad7e1a7b9f88
|
[
"Apache-2.0"
] | 75
|
2016-06-28T01:18:58.000Z
|
2019-08-29T06:47:22.000Z
|
from baseline.tf.classify.train import *
from baseline.tf.classify.model import *
| 27.333333
| 40
| 0.804878
| 12
| 82
| 5.5
| 0.583333
| 0.363636
| 0.424242
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.097561
| 82
| 2
| 41
| 41
| 0.891892
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
a9ee1b33162305f394c7c27e9e30fd43070ce496
| 12,999
|
py
|
Python
|
tests/messagebroker_pika/test_pika_blocking_connection_consume_generator.py
|
newrelic/newrelic-python-agen
|
4f292ec1219c0daffc5721a7b3a245b97d0f83ba
|
[
"Apache-2.0"
] | 92
|
2020-06-12T17:53:23.000Z
|
2022-03-01T11:13:21.000Z
|
tests/messagebroker_pika/test_pika_blocking_connection_consume_generator.py
|
newrelic/newrelic-python-agen
|
4f292ec1219c0daffc5721a7b3a245b97d0f83ba
|
[
"Apache-2.0"
] | 347
|
2020-07-10T00:10:19.000Z
|
2022-03-31T17:58:56.000Z
|
tests/messagebroker_pika/test_pika_blocking_connection_consume_generator.py
|
newrelic/newrelic-python-agen
|
4f292ec1219c0daffc5721a7b3a245b97d0f83ba
|
[
"Apache-2.0"
] | 58
|
2020-06-17T13:51:57.000Z
|
2022-03-06T14:26:53.000Z
|
# Copyright 2010 New Relic, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pika
from newrelic.api.background_task import background_task
from conftest import QUEUE, EXCHANGE, CORRELATION_ID, REPLY_TO, HEADERS, BODY
from testing_support.fixtures import (validate_transaction_metrics,
validate_tt_collector_json)
from testing_support.db_settings import rabbitmq_settings
DB_SETTINGS = rabbitmq_settings()[0]
_message_broker_tt_params = {
'queue_name': QUEUE,
'routing_key': QUEUE,
'correlation_id': CORRELATION_ID,
'reply_to': REPLY_TO,
'headers': HEADERS.copy(),
}
_test_blocking_connection_consume_metrics = [
('MessageBroker/RabbitMQ/Exchange/Produce/Named/%s' % EXCHANGE, None),
('MessageBroker/RabbitMQ/Exchange/Consume/Named/%s' % EXCHANGE, None),
('MessageBroker/RabbitMQ/Exchange/Consume/Named/Unknown', None),
]
@validate_transaction_metrics(
('test_pika_blocking_connection_consume_generator:'
'test_blocking_connection_consume_break'),
scoped_metrics=_test_blocking_connection_consume_metrics,
rollup_metrics=_test_blocking_connection_consume_metrics,
background_task=True)
@validate_tt_collector_json(message_broker_params=_message_broker_tt_params)
@background_task()
def test_blocking_connection_consume_break(producer):
with pika.BlockingConnection(
pika.ConnectionParameters(DB_SETTINGS['host'])) as connection:
channel = connection.channel()
for method_frame, properties, body in channel.consume(QUEUE):
assert hasattr(method_frame, '_nr_start_time')
assert body == BODY
break
@validate_transaction_metrics(
('test_pika_blocking_connection_consume_generator:'
'test_blocking_connection_consume_connection_close'),
scoped_metrics=_test_blocking_connection_consume_metrics,
rollup_metrics=_test_blocking_connection_consume_metrics,
background_task=True)
@validate_tt_collector_json(message_broker_params=_message_broker_tt_params)
@background_task()
def test_blocking_connection_consume_connection_close(producer):
connection = pika.BlockingConnection(
pika.ConnectionParameters(DB_SETTINGS['host']))
channel = connection.channel()
try:
for method_frame, properties, body in channel.consume(QUEUE):
assert hasattr(method_frame, '_nr_start_time')
assert body == BODY
channel.close()
connection.close()
except pika.exceptions.ConnectionClosed:
pass
except:
connection.close()
@validate_transaction_metrics(
('test_pika_blocking_connection_consume_generator:'
'test_blocking_connection_consume_timeout'),
scoped_metrics=_test_blocking_connection_consume_metrics,
rollup_metrics=_test_blocking_connection_consume_metrics,
background_task=True)
@validate_tt_collector_json(message_broker_params=_message_broker_tt_params)
@background_task()
def test_blocking_connection_consume_timeout(producer):
with pika.BlockingConnection(
pika.ConnectionParameters(DB_SETTINGS['host'])) as connection:
channel = connection.channel()
for result in channel.consume(QUEUE, inactivity_timeout=0.01):
# result is None if there is a timeout
if result and any(result):
method_frame, properties, body = result
channel.basic_ack(method_frame.delivery_tag)
assert hasattr(method_frame, '_nr_start_time')
assert body == BODY
else:
# timeout hit!
break
@validate_transaction_metrics(
('test_pika_blocking_connection_consume_generator:'
'test_blocking_connection_consume_exception_in_for_loop'),
scoped_metrics=_test_blocking_connection_consume_metrics,
rollup_metrics=_test_blocking_connection_consume_metrics,
background_task=True)
@validate_tt_collector_json(message_broker_params=_message_broker_tt_params)
@background_task()
def test_blocking_connection_consume_exception_in_for_loop(producer):
with pika.BlockingConnection(
pika.ConnectionParameters(DB_SETTINGS['host'])) as connection:
channel = connection.channel()
try:
# We should still create the metric in this case even if there is
# an exception
for result in channel.consume(QUEUE):
1 / 0
except ZeroDivisionError:
# Expected error
pass
except Exception as e:
assert False, 'Wrong exception was raised: %s' % e
else:
assert False, 'No exception was raised!'
_test_blocking_connection_consume_empty_metrics = [
('MessageBroker/RabbitMQ/Exchange/Produce/Named/%s' % EXCHANGE, None),
('MessageBroker/RabbitMQ/Exchange/Consume/Named/%s' % EXCHANGE, None),
('MessageBroker/RabbitMQ/Exchange/Consume/Named/Unknown', None),
]
@validate_transaction_metrics(
('test_pika_blocking_connection_consume_generator:'
'test_blocking_connection_consume_exception_in_generator'),
scoped_metrics=_test_blocking_connection_consume_empty_metrics,
rollup_metrics=_test_blocking_connection_consume_empty_metrics,
background_task=True)
@validate_tt_collector_json(message_broker_params=_message_broker_tt_params)
@background_task()
def test_blocking_connection_consume_exception_in_generator():
with pika.BlockingConnection(
pika.ConnectionParameters(DB_SETTINGS['host'])) as connection:
channel = connection.channel()
try:
# Since the pytest fixture is not used, the QUEUE will not exist
for result in channel.consume(QUEUE):
pass
except pika.exceptions.ChannelClosed:
# Expected error
pass
except Exception as e:
assert False, 'Wrong exception was raised: %s' % e
else:
assert False, 'No exception was raised!'
_test_blocking_connection_consume_many_metrics = [
('MessageBroker/RabbitMQ/Exchange/Produce/Named/%s' % EXCHANGE, None),
('MessageBroker/RabbitMQ/Exchange/Consume/Named/%s' % EXCHANGE, None),
('MessageBroker/RabbitMQ/Exchange/Consume/Named/Unknown', None),
]
@validate_transaction_metrics(
('test_pika_blocking_connection_consume_generator:'
'test_blocking_connection_consume_many'),
scoped_metrics=_test_blocking_connection_consume_many_metrics,
rollup_metrics=_test_blocking_connection_consume_many_metrics,
background_task=True)
@validate_tt_collector_json(message_broker_params=_message_broker_tt_params)
@background_task()
def test_blocking_connection_consume_many(produce_five):
with pika.BlockingConnection(
pika.ConnectionParameters(DB_SETTINGS['host'])) as connection:
channel = connection.channel()
consumed = 0
for result in channel.consume(QUEUE, inactivity_timeout=0.01):
if result and any(result):
consumed += 1
else:
assert consumed == 5
break
@validate_transaction_metrics(
('test_pika_blocking_connection_consume_generator:'
'test_blocking_connection_consume_using_methods'),
scoped_metrics=_test_blocking_connection_consume_metrics,
rollup_metrics=_test_blocking_connection_consume_metrics,
background_task=True)
@validate_tt_collector_json(message_broker_params=_message_broker_tt_params)
@background_task()
def test_blocking_connection_consume_using_methods(producer):
with pika.BlockingConnection(
pika.ConnectionParameters(DB_SETTINGS['host'])) as connection:
channel = connection.channel()
consumer = channel.consume(QUEUE, inactivity_timeout=0.01)
method, properties, body = next(consumer)
assert hasattr(method, '_nr_start_time')
assert body == BODY
result = next(consumer)
assert result is None or not any(result)
try:
consumer.throw(ZeroDivisionError)
except ZeroDivisionError:
# This is expected
pass
else:
# this is not
assert False, 'No exception was raised!'
result = consumer.close()
assert result is None
@validate_transaction_metrics(
'Named/%s' % EXCHANGE,
scoped_metrics=_test_blocking_connection_consume_metrics,
rollup_metrics=_test_blocking_connection_consume_metrics,
background_task=True,
group='Message/RabbitMQ/Exchange')
@validate_tt_collector_json(message_broker_params=_message_broker_tt_params)
def test_blocking_connection_consume_outside_txn(producer):
with pika.BlockingConnection(
pika.ConnectionParameters(DB_SETTINGS['host'])) as connection:
channel = connection.channel()
consumer = channel.consume(QUEUE)
try:
for method_frame, properties, body in consumer:
assert hasattr(method_frame, '_nr_start_time')
assert body == BODY
break
finally:
# Required for PyPy compatibility, see http://pypy.org/compat.html
consumer.close()
def test_blocking_connection_consume_many_outside_txn(produce_five):
@validate_transaction_metrics(
'Named/%s' % EXCHANGE,
scoped_metrics=_test_blocking_connection_consume_metrics,
rollup_metrics=_test_blocking_connection_consume_metrics,
background_task=True,
group='Message/RabbitMQ/Exchange')
@validate_tt_collector_json(
message_broker_params=_message_broker_tt_params)
def consume_it(consumer, up_next=None):
if up_next is None:
method_frame, properties, body = next(consumer)
else:
method_frame, properties, body = up_next
assert hasattr(method_frame, '_nr_start_time')
assert body == BODY
return next(consumer)
with pika.BlockingConnection(
pika.ConnectionParameters(DB_SETTINGS['host'])) as connection:
channel = connection.channel()
consumer = channel.consume(QUEUE)
up_next = None
for i in range(6):
try:
up_next = consume_it(consumer, up_next=up_next)
except StopIteration:
pass
finally:
consumer.close()
@validate_transaction_metrics(
'Named/%s' % EXCHANGE,
scoped_metrics=_test_blocking_connection_consume_metrics,
rollup_metrics=_test_blocking_connection_consume_metrics,
background_task=True,
group='Message/RabbitMQ/Exchange')
@validate_tt_collector_json(message_broker_params=_message_broker_tt_params)
def test_blocking_connection_consume_using_methods_outside_txn(producer):
with pika.BlockingConnection(
pika.ConnectionParameters(DB_SETTINGS['host'])) as connection:
channel = connection.channel()
consumer = channel.consume(QUEUE, inactivity_timeout=0.01)
method, properties, body = next(consumer)
assert hasattr(method, '_nr_start_time')
assert body == BODY
result = next(consumer)
assert result is None or not any(result)
try:
consumer.throw(ZeroDivisionError)
except ZeroDivisionError:
# This is expected
pass
else:
# this is not
assert False, 'No exception was raised!'
result = consumer.close()
assert result is None
@validate_transaction_metrics(
('test_pika_blocking_connection_consume_generator:'
'test_blocking_connection_consume_exception_on_creation'),
scoped_metrics=_test_blocking_connection_consume_empty_metrics,
rollup_metrics=_test_blocking_connection_consume_empty_metrics,
background_task=True)
@background_task()
def test_blocking_connection_consume_exception_on_creation():
with pika.BlockingConnection(
pika.ConnectionParameters(DB_SETTINGS['host'])) as connection:
channel = connection.channel()
try:
channel.consume(kittens=True)
except TypeError:
# this is expected
pass
else:
# this is not
assert False, 'TypeError was not raised'
| 37.787791
| 78
| 0.699438
| 1,409
| 12,999
| 6.089425
| 0.139815
| 0.109091
| 0.151515
| 0.148718
| 0.808858
| 0.792191
| 0.773893
| 0.744639
| 0.736597
| 0.736597
| 0
| 0.002691
| 0.228018
| 12,999
| 343
| 79
| 37.897959
| 0.852317
| 0.071159
| 0
| 0.742424
| 0
| 0
| 0.13905
| 0.106176
| 0
| 0
| 0
| 0
| 0.098485
| 1
| 0.045455
| false
| 0.030303
| 0.018939
| 0
| 0.068182
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e7015490ddcb9762787e86f37ecbb963516178f3
| 31,966
|
py
|
Python
|
sdk/python/pulumi_sakuracloud/provider.py
|
sacloud/pulumi-sakuracloud
|
3eff14c6ec8ef4ad6422e0cdf15585df67eb4d6e
|
[
"ECL-2.0",
"Apache-2.0"
] | 6
|
2019-12-07T07:46:05.000Z
|
2020-12-19T02:41:42.000Z
|
sdk/python/pulumi_sakuracloud/provider.py
|
sacloud/pulumi-sakuracloud
|
3eff14c6ec8ef4ad6422e0cdf15585df67eb4d6e
|
[
"ECL-2.0",
"Apache-2.0"
] | 5
|
2019-09-11T04:41:06.000Z
|
2021-10-19T07:50:34.000Z
|
sdk/python/pulumi_sakuracloud/provider.py
|
sacloud/pulumi-sakuracloud
|
3eff14c6ec8ef4ad6422e0cdf15585df67eb4d6e
|
[
"ECL-2.0",
"Apache-2.0"
] | 2
|
2019-09-08T05:38:16.000Z
|
2021-06-24T01:32:47.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = ['ProviderArgs', 'Provider']
@pulumi.input_type
class ProviderArgs:
def __init__(__self__, *,
accept_language: Optional[pulumi.Input[str]] = None,
api_request_rate_limit: Optional[pulumi.Input[int]] = None,
api_request_timeout: Optional[pulumi.Input[int]] = None,
api_root_url: Optional[pulumi.Input[str]] = None,
default_zone: Optional[pulumi.Input[str]] = None,
fake_mode: Optional[pulumi.Input[str]] = None,
fake_store_path: Optional[pulumi.Input[str]] = None,
profile: Optional[pulumi.Input[str]] = None,
retry_max: Optional[pulumi.Input[int]] = None,
retry_wait_max: Optional[pulumi.Input[int]] = None,
retry_wait_min: Optional[pulumi.Input[int]] = None,
secret: Optional[pulumi.Input[str]] = None,
token: Optional[pulumi.Input[str]] = None,
trace: Optional[pulumi.Input[str]] = None,
zone: Optional[pulumi.Input[str]] = None,
zones: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a Provider resource.
:param pulumi.Input[str] accept_language: The value of AcceptLanguage header used when calling SakuraCloud API. It can also be sourced from the
`SAKURACLOUD_ACCEPT_LANGUAGE` environment variables, or via a shared credentials file if `profile` is specified
:param pulumi.Input[int] api_request_rate_limit: The maximum number of SakuraCloud API calls per second. It can also be sourced from the `SAKURACLOUD_RATE_LIMIT`
environment variables, or via a shared credentials file if `profile` is specified. Default:`10`
:param pulumi.Input[int] api_request_timeout: The timeout seconds for each SakuraCloud API call. It can also be sourced from the `SAKURACLOUD_API_REQUEST_TIMEOUT`
environment variables, or via a shared credentials file if `profile` is specified. Default:`300`
:param pulumi.Input[str] api_root_url: The root URL of SakuraCloud API. It can also be sourced from the `SAKURACLOUD_API_ROOT_URL` environment variables, or
via a shared credentials file if `profile` is specified. Default:`https://secure.sakura.ad.jp/cloud/zone`
:param pulumi.Input[str] default_zone: The name of zone to use as default for global resources. It must be provided, but it can also be sourced from the
`SAKURACLOUD_DEFAULT_ZONE` environment variables, or via a shared credentials file if `profile` is specified
:param pulumi.Input[str] fake_mode: The flag to enable fake of SakuraCloud API call. It is for debugging or developping the provider. It can also be sourced
from the `FAKE_MODE` environment variables, or via a shared credentials file if `profile` is specified
:param pulumi.Input[str] fake_store_path: The file path used by SakuraCloud API fake driver for storing fake data. It is for debugging or developping the
provider. It can also be sourced from the `FAKE_STORE_PATH` environment variables, or via a shared credentials file if
`profile` is specified
:param pulumi.Input[str] profile: The profile name of your SakuraCloud account. Default:`default`
:param pulumi.Input[int] retry_max: The maximum number of API call retries used when SakuraCloud API returns status code `423` or `503`. It can also be
sourced from the `SAKURACLOUD_RETRY_MAX` environment variables, or via a shared credentials file if `profile` is
specified. Default:`100`
:param pulumi.Input[int] retry_wait_max: The maximum wait interval(in seconds) for retrying API call used when SakuraCloud API returns status code `423` or
`503`. It can also be sourced from the `SAKURACLOUD_RETRY_WAIT_MAX` environment variables, or via a shared credentials
file if `profile` is specified
:param pulumi.Input[int] retry_wait_min: The minimum wait interval(in seconds) for retrying API call used when SakuraCloud API returns status code `423` or
`503`. It can also be sourced from the `SAKURACLOUD_RETRY_WAIT_MIN` environment variables, or via a shared credentials
file if `profile` is specified
:param pulumi.Input[str] secret: The API secret of your SakuraCloud account. It must be provided, but it can also be sourced from the
`SAKURACLOUD_ACCESS_TOKEN_SECRET` environment variables, or via a shared credentials file if `profile` is specified
:param pulumi.Input[str] token: The API token of your SakuraCloud account. It must be provided, but it can also be sourced from the
`SAKURACLOUD_ACCESS_TOKEN` environment variables, or via a shared credentials file if `profile` is specified
:param pulumi.Input[str] trace: The flag to enable output trace log. It can also be sourced from the `SAKURACLOUD_TRACE` environment variables, or via a
shared credentials file if `profile` is specified
:param pulumi.Input[str] zone: The name of zone to use as default. It must be provided, but it can also be sourced from the `SAKURACLOUD_ZONE`
environment variables, or via a shared credentials file if `profile` is specified
:param pulumi.Input[Sequence[pulumi.Input[str]]] zones: A list of available SakuraCloud zone name. It can also be sourced via a shared credentials file if `profile` is
specified. Default:[`is1a`, `is1b`, `tk1a`, `tk1v`]
"""
if accept_language is not None:
pulumi.set(__self__, "accept_language", accept_language)
if api_request_rate_limit is not None:
pulumi.set(__self__, "api_request_rate_limit", api_request_rate_limit)
if api_request_timeout is not None:
pulumi.set(__self__, "api_request_timeout", api_request_timeout)
if api_root_url is not None:
pulumi.set(__self__, "api_root_url", api_root_url)
if default_zone is not None:
pulumi.set(__self__, "default_zone", default_zone)
if fake_mode is not None:
pulumi.set(__self__, "fake_mode", fake_mode)
if fake_store_path is not None:
pulumi.set(__self__, "fake_store_path", fake_store_path)
if profile is None:
profile = (_utilities.get_env('SAKURACLOUD_PROFILE') or 'default')
if profile is not None:
pulumi.set(__self__, "profile", profile)
if retry_max is not None:
pulumi.set(__self__, "retry_max", retry_max)
if retry_wait_max is not None:
pulumi.set(__self__, "retry_wait_max", retry_wait_max)
if retry_wait_min is not None:
pulumi.set(__self__, "retry_wait_min", retry_wait_min)
if secret is None:
secret = (_utilities.get_env('SAKURACLOUD_ACCESS_TOKEN_SECRET') or '')
if secret is not None:
pulumi.set(__self__, "secret", secret)
if token is None:
token = (_utilities.get_env('SAKURACLOUD_ACCESS_TOKEN') or '')
if token is not None:
pulumi.set(__self__, "token", token)
if trace is not None:
pulumi.set(__self__, "trace", trace)
if zone is None:
zone = (_utilities.get_env('SAKURACLOUD_ZONE') or 'is1b')
if zone is not None:
pulumi.set(__self__, "zone", zone)
if zones is not None:
pulumi.set(__self__, "zones", zones)
@property
@pulumi.getter(name="acceptLanguage")
def accept_language(self) -> Optional[pulumi.Input[str]]:
"""
The value of AcceptLanguage header used when calling SakuraCloud API. It can also be sourced from the
`SAKURACLOUD_ACCEPT_LANGUAGE` environment variables, or via a shared credentials file if `profile` is specified
"""
return pulumi.get(self, "accept_language")
@accept_language.setter
def accept_language(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "accept_language", value)
@property
@pulumi.getter(name="apiRequestRateLimit")
def api_request_rate_limit(self) -> Optional[pulumi.Input[int]]:
"""
The maximum number of SakuraCloud API calls per second. It can also be sourced from the `SAKURACLOUD_RATE_LIMIT`
environment variables, or via a shared credentials file if `profile` is specified. Default:`10`
"""
return pulumi.get(self, "api_request_rate_limit")
@api_request_rate_limit.setter
def api_request_rate_limit(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "api_request_rate_limit", value)
@property
@pulumi.getter(name="apiRequestTimeout")
def api_request_timeout(self) -> Optional[pulumi.Input[int]]:
"""
The timeout seconds for each SakuraCloud API call. It can also be sourced from the `SAKURACLOUD_API_REQUEST_TIMEOUT`
environment variables, or via a shared credentials file if `profile` is specified. Default:`300`
"""
return pulumi.get(self, "api_request_timeout")
@api_request_timeout.setter
def api_request_timeout(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "api_request_timeout", value)
@property
@pulumi.getter(name="apiRootUrl")
def api_root_url(self) -> Optional[pulumi.Input[str]]:
"""
The root URL of SakuraCloud API. It can also be sourced from the `SAKURACLOUD_API_ROOT_URL` environment variables, or
via a shared credentials file if `profile` is specified. Default:`https://secure.sakura.ad.jp/cloud/zone`
"""
return pulumi.get(self, "api_root_url")
@api_root_url.setter
def api_root_url(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "api_root_url", value)
@property
@pulumi.getter(name="defaultZone")
def default_zone(self) -> Optional[pulumi.Input[str]]:
"""
The name of zone to use as default for global resources. It must be provided, but it can also be sourced from the
`SAKURACLOUD_DEFAULT_ZONE` environment variables, or via a shared credentials file if `profile` is specified
"""
return pulumi.get(self, "default_zone")
@default_zone.setter
def default_zone(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "default_zone", value)
@property
@pulumi.getter(name="fakeMode")
def fake_mode(self) -> Optional[pulumi.Input[str]]:
"""
The flag to enable fake of SakuraCloud API call. It is for debugging or developping the provider. It can also be sourced
from the `FAKE_MODE` environment variables, or via a shared credentials file if `profile` is specified
"""
return pulumi.get(self, "fake_mode")
@fake_mode.setter
def fake_mode(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "fake_mode", value)
@property
@pulumi.getter(name="fakeStorePath")
def fake_store_path(self) -> Optional[pulumi.Input[str]]:
"""
The file path used by SakuraCloud API fake driver for storing fake data. It is for debugging or developping the
provider. It can also be sourced from the `FAKE_STORE_PATH` environment variables, or via a shared credentials file if
`profile` is specified
"""
return pulumi.get(self, "fake_store_path")
@fake_store_path.setter
def fake_store_path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "fake_store_path", value)
@property
@pulumi.getter
def profile(self) -> Optional[pulumi.Input[str]]:
"""
The profile name of your SakuraCloud account. Default:`default`
"""
return pulumi.get(self, "profile")
@profile.setter
def profile(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "profile", value)
@property
@pulumi.getter(name="retryMax")
def retry_max(self) -> Optional[pulumi.Input[int]]:
"""
The maximum number of API call retries used when SakuraCloud API returns status code `423` or `503`. It can also be
sourced from the `SAKURACLOUD_RETRY_MAX` environment variables, or via a shared credentials file if `profile` is
specified. Default:`100`
"""
return pulumi.get(self, "retry_max")
@retry_max.setter
def retry_max(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "retry_max", value)
@property
@pulumi.getter(name="retryWaitMax")
def retry_wait_max(self) -> Optional[pulumi.Input[int]]:
"""
The maximum wait interval(in seconds) for retrying API call used when SakuraCloud API returns status code `423` or
`503`. It can also be sourced from the `SAKURACLOUD_RETRY_WAIT_MAX` environment variables, or via a shared credentials
file if `profile` is specified
"""
return pulumi.get(self, "retry_wait_max")
@retry_wait_max.setter
def retry_wait_max(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "retry_wait_max", value)
@property
@pulumi.getter(name="retryWaitMin")
def retry_wait_min(self) -> Optional[pulumi.Input[int]]:
"""
The minimum wait interval(in seconds) for retrying API call used when SakuraCloud API returns status code `423` or
`503`. It can also be sourced from the `SAKURACLOUD_RETRY_WAIT_MIN` environment variables, or via a shared credentials
file if `profile` is specified
"""
return pulumi.get(self, "retry_wait_min")
@retry_wait_min.setter
def retry_wait_min(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "retry_wait_min", value)
@property
@pulumi.getter
def secret(self) -> Optional[pulumi.Input[str]]:
"""
The API secret of your SakuraCloud account. It must be provided, but it can also be sourced from the
`SAKURACLOUD_ACCESS_TOKEN_SECRET` environment variables, or via a shared credentials file if `profile` is specified
"""
return pulumi.get(self, "secret")
@secret.setter
def secret(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "secret", value)
@property
@pulumi.getter
def token(self) -> Optional[pulumi.Input[str]]:
"""
The API token of your SakuraCloud account. It must be provided, but it can also be sourced from the
`SAKURACLOUD_ACCESS_TOKEN` environment variables, or via a shared credentials file if `profile` is specified
"""
return pulumi.get(self, "token")
@token.setter
def token(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "token", value)
@property
@pulumi.getter
def trace(self) -> Optional[pulumi.Input[str]]:
"""
The flag to enable output trace log. It can also be sourced from the `SAKURACLOUD_TRACE` environment variables, or via a
shared credentials file if `profile` is specified
"""
return pulumi.get(self, "trace")
@trace.setter
def trace(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "trace", value)
@property
@pulumi.getter
def zone(self) -> Optional[pulumi.Input[str]]:
"""
The name of zone to use as default. It must be provided, but it can also be sourced from the `SAKURACLOUD_ZONE`
environment variables, or via a shared credentials file if `profile` is specified
"""
return pulumi.get(self, "zone")
@zone.setter
def zone(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "zone", value)
@property
@pulumi.getter
def zones(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of available SakuraCloud zone name. It can also be sourced via a shared credentials file if `profile` is
specified. Default:[`is1a`, `is1b`, `tk1a`, `tk1v`]
"""
return pulumi.get(self, "zones")
@zones.setter
def zones(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "zones", value)
class Provider(pulumi.ProviderResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
accept_language: Optional[pulumi.Input[str]] = None,
api_request_rate_limit: Optional[pulumi.Input[int]] = None,
api_request_timeout: Optional[pulumi.Input[int]] = None,
api_root_url: Optional[pulumi.Input[str]] = None,
default_zone: Optional[pulumi.Input[str]] = None,
fake_mode: Optional[pulumi.Input[str]] = None,
fake_store_path: Optional[pulumi.Input[str]] = None,
profile: Optional[pulumi.Input[str]] = None,
retry_max: Optional[pulumi.Input[int]] = None,
retry_wait_max: Optional[pulumi.Input[int]] = None,
retry_wait_min: Optional[pulumi.Input[int]] = None,
secret: Optional[pulumi.Input[str]] = None,
token: Optional[pulumi.Input[str]] = None,
trace: Optional[pulumi.Input[str]] = None,
zone: Optional[pulumi.Input[str]] = None,
zones: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
__props__=None):
"""
The provider type for the sakuracloud package. By default, resources use package-wide configuration
settings, however an explicit `Provider` instance may be created and passed during resource
construction to achieve fine-grained programmatic control over provider settings. See the
[documentation](https://www.pulumi.com/docs/reference/programming-model/#providers) for more information.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] accept_language: The value of AcceptLanguage header used when calling SakuraCloud API. It can also be sourced from the
`SAKURACLOUD_ACCEPT_LANGUAGE` environment variables, or via a shared credentials file if `profile` is specified
:param pulumi.Input[int] api_request_rate_limit: The maximum number of SakuraCloud API calls per second. It can also be sourced from the `SAKURACLOUD_RATE_LIMIT`
environment variables, or via a shared credentials file if `profile` is specified. Default:`10`
:param pulumi.Input[int] api_request_timeout: The timeout seconds for each SakuraCloud API call. It can also be sourced from the `SAKURACLOUD_API_REQUEST_TIMEOUT`
environment variables, or via a shared credentials file if `profile` is specified. Default:`300`
:param pulumi.Input[str] api_root_url: The root URL of SakuraCloud API. It can also be sourced from the `SAKURACLOUD_API_ROOT_URL` environment variables, or
via a shared credentials file if `profile` is specified. Default:`https://secure.sakura.ad.jp/cloud/zone`
:param pulumi.Input[str] default_zone: The name of zone to use as default for global resources. It must be provided, but it can also be sourced from the
`SAKURACLOUD_DEFAULT_ZONE` environment variables, or via a shared credentials file if `profile` is specified
:param pulumi.Input[str] fake_mode: The flag to enable fake of SakuraCloud API call. It is for debugging or developping the provider. It can also be sourced
from the `FAKE_MODE` environment variables, or via a shared credentials file if `profile` is specified
:param pulumi.Input[str] fake_store_path: The file path used by SakuraCloud API fake driver for storing fake data. It is for debugging or developping the
provider. It can also be sourced from the `FAKE_STORE_PATH` environment variables, or via a shared credentials file if
`profile` is specified
:param pulumi.Input[str] profile: The profile name of your SakuraCloud account. Default:`default`
:param pulumi.Input[int] retry_max: The maximum number of API call retries used when SakuraCloud API returns status code `423` or `503`. It can also be
sourced from the `SAKURACLOUD_RETRY_MAX` environment variables, or via a shared credentials file if `profile` is
specified. Default:`100`
:param pulumi.Input[int] retry_wait_max: The maximum wait interval(in seconds) for retrying API call used when SakuraCloud API returns status code `423` or
`503`. It can also be sourced from the `SAKURACLOUD_RETRY_WAIT_MAX` environment variables, or via a shared credentials
file if `profile` is specified
:param pulumi.Input[int] retry_wait_min: The minimum wait interval(in seconds) for retrying API call used when SakuraCloud API returns status code `423` or
`503`. It can also be sourced from the `SAKURACLOUD_RETRY_WAIT_MIN` environment variables, or via a shared credentials
file if `profile` is specified
:param pulumi.Input[str] secret: The API secret of your SakuraCloud account. It must be provided, but it can also be sourced from the
`SAKURACLOUD_ACCESS_TOKEN_SECRET` environment variables, or via a shared credentials file if `profile` is specified
:param pulumi.Input[str] token: The API token of your SakuraCloud account. It must be provided, but it can also be sourced from the
`SAKURACLOUD_ACCESS_TOKEN` environment variables, or via a shared credentials file if `profile` is specified
:param pulumi.Input[str] trace: The flag to enable output trace log. It can also be sourced from the `SAKURACLOUD_TRACE` environment variables, or via a
shared credentials file if `profile` is specified
:param pulumi.Input[str] zone: The name of zone to use as default. It must be provided, but it can also be sourced from the `SAKURACLOUD_ZONE`
environment variables, or via a shared credentials file if `profile` is specified
:param pulumi.Input[Sequence[pulumi.Input[str]]] zones: A list of available SakuraCloud zone name. It can also be sourced via a shared credentials file if `profile` is
specified. Default:[`is1a`, `is1b`, `tk1a`, `tk1v`]
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: Optional[ProviderArgs] = None,
opts: Optional[pulumi.ResourceOptions] = None):
"""
The provider type for the sakuracloud package. By default, resources use package-wide configuration
settings, however an explicit `Provider` instance may be created and passed during resource
construction to achieve fine-grained programmatic control over provider settings. See the
[documentation](https://www.pulumi.com/docs/reference/programming-model/#providers) for more information.
:param str resource_name: The name of the resource.
:param ProviderArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ProviderArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
accept_language: Optional[pulumi.Input[str]] = None,
api_request_rate_limit: Optional[pulumi.Input[int]] = None,
api_request_timeout: Optional[pulumi.Input[int]] = None,
api_root_url: Optional[pulumi.Input[str]] = None,
default_zone: Optional[pulumi.Input[str]] = None,
fake_mode: Optional[pulumi.Input[str]] = None,
fake_store_path: Optional[pulumi.Input[str]] = None,
profile: Optional[pulumi.Input[str]] = None,
retry_max: Optional[pulumi.Input[int]] = None,
retry_wait_max: Optional[pulumi.Input[int]] = None,
retry_wait_min: Optional[pulumi.Input[int]] = None,
secret: Optional[pulumi.Input[str]] = None,
token: Optional[pulumi.Input[str]] = None,
trace: Optional[pulumi.Input[str]] = None,
zone: Optional[pulumi.Input[str]] = None,
zones: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ProviderArgs.__new__(ProviderArgs)
__props__.__dict__["accept_language"] = accept_language
__props__.__dict__["api_request_rate_limit"] = pulumi.Output.from_input(api_request_rate_limit).apply(pulumi.runtime.to_json) if api_request_rate_limit is not None else None
__props__.__dict__["api_request_timeout"] = pulumi.Output.from_input(api_request_timeout).apply(pulumi.runtime.to_json) if api_request_timeout is not None else None
__props__.__dict__["api_root_url"] = api_root_url
__props__.__dict__["default_zone"] = default_zone
__props__.__dict__["fake_mode"] = fake_mode
__props__.__dict__["fake_store_path"] = fake_store_path
if profile is None:
profile = (_utilities.get_env('SAKURACLOUD_PROFILE') or 'default')
__props__.__dict__["profile"] = profile
__props__.__dict__["retry_max"] = pulumi.Output.from_input(retry_max).apply(pulumi.runtime.to_json) if retry_max is not None else None
__props__.__dict__["retry_wait_max"] = pulumi.Output.from_input(retry_wait_max).apply(pulumi.runtime.to_json) if retry_wait_max is not None else None
__props__.__dict__["retry_wait_min"] = pulumi.Output.from_input(retry_wait_min).apply(pulumi.runtime.to_json) if retry_wait_min is not None else None
if secret is None:
secret = (_utilities.get_env('SAKURACLOUD_ACCESS_TOKEN_SECRET') or '')
__props__.__dict__["secret"] = secret
if token is None:
token = (_utilities.get_env('SAKURACLOUD_ACCESS_TOKEN') or '')
__props__.__dict__["token"] = token
__props__.__dict__["trace"] = trace
if zone is None:
zone = (_utilities.get_env('SAKURACLOUD_ZONE') or 'is1b')
__props__.__dict__["zone"] = zone
__props__.__dict__["zones"] = pulumi.Output.from_input(zones).apply(pulumi.runtime.to_json) if zones is not None else None
super(Provider, __self__).__init__(
'sakuracloud',
resource_name,
__props__,
opts)
@property
@pulumi.getter(name="acceptLanguage")
def accept_language(self) -> pulumi.Output[Optional[str]]:
"""
The value of AcceptLanguage header used when calling SakuraCloud API. It can also be sourced from the
`SAKURACLOUD_ACCEPT_LANGUAGE` environment variables, or via a shared credentials file if `profile` is specified
"""
return pulumi.get(self, "accept_language")
@property
@pulumi.getter(name="apiRootUrl")
def api_root_url(self) -> pulumi.Output[Optional[str]]:
"""
The root URL of SakuraCloud API. It can also be sourced from the `SAKURACLOUD_API_ROOT_URL` environment variables, or
via a shared credentials file if `profile` is specified. Default:`https://secure.sakura.ad.jp/cloud/zone`
"""
return pulumi.get(self, "api_root_url")
@property
@pulumi.getter(name="defaultZone")
def default_zone(self) -> pulumi.Output[Optional[str]]:
"""
The name of zone to use as default for global resources. It must be provided, but it can also be sourced from the
`SAKURACLOUD_DEFAULT_ZONE` environment variables, or via a shared credentials file if `profile` is specified
"""
return pulumi.get(self, "default_zone")
@property
@pulumi.getter(name="fakeMode")
def fake_mode(self) -> pulumi.Output[Optional[str]]:
"""
The flag to enable fake of SakuraCloud API call. It is for debugging or developping the provider. It can also be sourced
from the `FAKE_MODE` environment variables, or via a shared credentials file if `profile` is specified
"""
return pulumi.get(self, "fake_mode")
@property
@pulumi.getter(name="fakeStorePath")
def fake_store_path(self) -> pulumi.Output[Optional[str]]:
"""
The file path used by SakuraCloud API fake driver for storing fake data. It is for debugging or developping the
provider. It can also be sourced from the `FAKE_STORE_PATH` environment variables, or via a shared credentials file if
`profile` is specified
"""
return pulumi.get(self, "fake_store_path")
@property
@pulumi.getter
def profile(self) -> pulumi.Output[Optional[str]]:
"""
The profile name of your SakuraCloud account. Default:`default`
"""
return pulumi.get(self, "profile")
@property
@pulumi.getter
def secret(self) -> pulumi.Output[Optional[str]]:
"""
The API secret of your SakuraCloud account. It must be provided, but it can also be sourced from the
`SAKURACLOUD_ACCESS_TOKEN_SECRET` environment variables, or via a shared credentials file if `profile` is specified
"""
return pulumi.get(self, "secret")
@property
@pulumi.getter
def token(self) -> pulumi.Output[Optional[str]]:
"""
The API token of your SakuraCloud account. It must be provided, but it can also be sourced from the
`SAKURACLOUD_ACCESS_TOKEN` environment variables, or via a shared credentials file if `profile` is specified
"""
return pulumi.get(self, "token")
@property
@pulumi.getter
def trace(self) -> pulumi.Output[Optional[str]]:
"""
The flag to enable output trace log. It can also be sourced from the `SAKURACLOUD_TRACE` environment variables, or via a
shared credentials file if `profile` is specified
"""
return pulumi.get(self, "trace")
@property
@pulumi.getter
def zone(self) -> pulumi.Output[Optional[str]]:
"""
The name of zone to use as default. It must be provided, but it can also be sourced from the `SAKURACLOUD_ZONE`
environment variables, or via a shared credentials file if `profile` is specified
"""
return pulumi.get(self, "zone")
| 56.677305
| 185
| 0.671401
| 4,183
| 31,966
| 4.955295
| 0.055463
| 0.063682
| 0.073331
| 0.028657
| 0.911569
| 0.883008
| 0.834379
| 0.804757
| 0.779043
| 0.741027
| 0
| 0.003837
| 0.241788
| 31,966
| 563
| 186
| 56.777975
| 0.851384
| 0.467465
| 0
| 0.459807
| 1
| 0
| 0.087384
| 0.012797
| 0
| 0
| 0
| 0
| 0
| 1
| 0.151125
| false
| 0.003215
| 0.016077
| 0
| 0.257235
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e745c1dc9119cca285f7bf13f5c0a5a6f505e50e
| 13,213
|
py
|
Python
|
tests/conftest.py
|
jlant/gagepy
|
049ca393de5ac2e819773d481a5b4329fd82ea04
|
[
"CC0-1.0"
] | null | null | null |
tests/conftest.py
|
jlant/gagepy
|
049ca393de5ac2e819773d481a5b4329fd82ea04
|
[
"CC0-1.0"
] | null | null | null |
tests/conftest.py
|
jlant/gagepy
|
049ca393de5ac2e819773d481a5b4329fd82ea04
|
[
"CC0-1.0"
] | 1
|
2019-05-17T18:22:31.000Z
|
2019-05-17T18:22:31.000Z
|
# -*- coding: utf-8 -*-
"""
conftest
~~~~~~~~
Configuration for tests. Contains fixtures, i.e. StringIO representations of
test data files.
:copyright: 2015 by Jeremiah Lant, see AUTHORS
:license: United States Geological Survey (USGS), see LICENSE file
"""
import pytest
import numpy as np
from datetime import datetime, timedelta
from textwrap import dedent
@pytest.fixture(scope="module")
def dates_daily():
dates = np.array([
datetime(2015, 8, 1, 0, 0),
datetime(2015, 8, 2, 0, 0),
datetime(2015, 8, 3, 0, 0),
datetime(2015, 8, 4, 0, 0),
datetime(2015, 8, 5, 0, 0)])
return dates
@pytest.fixture(scope="module")
def dates_instantaneous():
dates = np.array([
datetime(2015, 8, 1, 0, 0),
datetime(2015, 8, 1, 0, 15),
datetime(2015, 8, 1, 0, 30),
datetime(2015, 8, 1, 0, 45),
datetime(2015, 8, 1, 1, 0)])
return dates
@pytest.fixture(scope="module")
def dates_and_values():
dates = np.array([datetime(2015, 1, 1, 0, 0) + timedelta(i) for i in range(11)])
values = np.array([i for i in range(11)])
return dates, values
@pytest.fixture(scope="module")
def dates_shorter():
return np.array([datetime(2015, 1, 3, 0, 0) + timedelta(i) for i in range(11)])
@pytest.fixture(scope="module")
def dates_longer():
return np.array([datetime(2014, 12, 1, 0, 0) + timedelta(i) for i in range(180)])
@pytest.fixture(scope="module")
def usgs_gage_summary_single_parameter():
s = dedent(
"""
Quick Summary
-------------
Gage name: {{ name }}
Start date: {{ start_date }}
End date: {{ end_date }}
Number of dates/values: {{ num_vals }}
Number of parameters: {{ num_params }}
Parameters
----------
{% for parameter in parameters %}
{{ parameter.name }} ({{ parameter.units }})
Mean: {{ parameter.mean() }}
Max: {{ parameter.max() }} on {{ parameter.max_date() }}
Min: {{ parameter.min() }} on {{ parameter.min_date() }}
{% endfor %}
"""
)
@pytest.fixture(scope="module")
def daily_value_file_single_parameter():
return \
"""
# ---------------------------------- WARNING ----------------------------------------
# The data you have obtained from this automated U.S. Geological Survey database
# have not received Director"s approval and as such are provisional and subject to
# revision. The data are released on the condition that neither the USGS nor the
# United States Government may be held liable for any damages resulting from its use.
# Additional info: http://waterdata.usgs.gov/nwis/help/?provisional
#
# File-format description: http://waterdata.usgs.gov/nwis/?tab_delimited_format_info
# Automated-retrieval info: http://waterdata.usgs.gov/nwis/?automated_retrieval_info
#
# Contact: gs-w_support_nwisweb@usgs.gov
# retrieved: 2015-08-02 22:08:51 EDT (sdww01)
#
# Data for the following 1 site(s) are contained in this file
# USGS 03290500 KENTUCKY RIVER AT LOCK 2 AT LOCKPORT, KY
# -----------------------------------------------------------------------------------
#
# Data provided for site 03290500
# DD parameter statistic Description
# 06 00060 00003 Discharge, cubic feet per second (Mean)
#
# Data-value qualification codes included in this output:
# A Approved for publication -- Processing and review completed.
# P Provisional data subject to revision.
# e Value has been estimated.
#
agency_cd site_no datetime 06_00060_00003 06_00060_00003_cd
5s 15s 20d 14n 10s
USGS 03290500 2015-08-01 100 A
USGS 03290500 2015-08-02 110 A
USGS 03290500 2015-08-03 105 A
USGS 03290500 2015-08-04 107 A
USGS 03290500 2015-08-05 112 A
"""
@pytest.fixture(scope="module")
def instantaneous_value_file_single_parameter():
return \
"""
# ---------------------------------- WARNING ----------------------------------------
# The data you have obtained from this automated U.S. Geological Survey database
# have not received Director"s approval and as such are provisional and subject to
# revision. The data are released on the condition that neither the USGS nor the
# United States Government may be held liable for any damages resulting from its use.
# Additional info: http://nwis.waterdata.usgs.gov/ca/nwis/?provisional
#
# File-format description: http://nwis.waterdata.usgs.gov/nwis/?tab_delimited_format_info
# Automated-retrieval info: http://nwis.waterdata.usgs.gov/nwis/?automated_retrieval_info
#
# Contact: gs-w_support_nwisweb@usgs.gov
# retrieved: 2015-08-13 17:19:26 EDT (nadww01)
#
# Data for the following 1 site(s) are contained in this file
# USGS 11143000 BIG SUR R NR BIG SUR CA
# -----------------------------------------------------------------------------------
#
# Data provided for site 11143000
# DD parameter Description
# 03 00065 Gage height, feet
#
# Data-value qualification codes included in this output:
# A Approved for publication -- Processing and review completed.
# P Provisional data subject to revision.
#
agency_cd site_no datetime tz_cd 03_00065 03_00065_cd
5s 15s 20d 6s 14n 10s
USGS 11143000 2015-08-01 00:00 PST 5.0 A
USGS 11143000 2015-08-01 00:15 PST 10.0 A
USGS 11143000 2015-08-01 00:30 PST 15.0 A
USGS 11143000 2015-08-01 00:45 PST 4.5 A
USGS 11143000 2015-08-01 01:00 PST 5.5 A
"""
@pytest.fixture(scope="module")
def instantaneous_value_file_multi_parameter():
return \
"""
# ---------------------------------- WARNING ----------------------------------------
# The data you have obtained from this automated U.S. Geological Survey database
# have not received Director"s approval and as such are provisional and subject to
# revision. The data are released on the condition that neither the USGS nor the
# United States Government may be held liable for any damages resulting from its use.
# Additional info: http://nwis.waterdata.usgs.gov/ky/nwis/?provisional
#
# File-format description: http://nwis.waterdata.usgs.gov/nwis/?tab_delimited_format_info
# Automated-retrieval info: http://nwis.waterdata.usgs.gov/nwis/?automated_retrieval_info
#
# Contact: gs-w_support_nwisweb@usgs.gov
# retrieved: 2015-08-11 08:40:40 EDT (nadww01)
#
# Data for the following 1 site(s) are contained in this file
# USGS 03401385 DAVIS BRANCH AT HIGHWAY 988 NEAR MIDDLESBORO, KY
# -----------------------------------------------------------------------------------
#
# Data provided for site 03401385
# DD parameter Description
# 02 00065 Gage height, feet
# 03 00010 Temperature, water, degrees Celsius
# 04 00300 Dissolved oxygen, water, unfiltered, milligrams per liter
#
# Data-value qualification codes included in this output:
# Eqp Equipment malfunction
# P Provisional data subject to revision.
# ~ Value is a system interpolated value.
#
agency_cd site_no datetime tz_cd 02_00065 02_00065_cd 03_00010 03_00010_cd 04_00300 04_00300_cd
5s 15s 20d 6s 14n 10s 14n 10s 14n 10s 14n 10s 14n 10s 14n 10s
USGS 03401385 2015-08-01 00:00 EDT 1.0 P 5.0 P 2.0 P
USGS 03401385 2015-08-01 00:15 EDT 2.0 P 10.0 P 1.25 P
USGS 03401385 2015-08-01 00:30 EDT 3.0 P 15.0 P 1.20 P
USGS 03401385 2015-08-01 00:45 EDT 4.0 P 20.0 P 0.50 P
USGS 03401385 2015-08-01 01:00 EDT 5.0 P 25.0 P 0.75 P
"""
@pytest.fixture(scope="module")
def daily_value_file_single_parameter_bad_formatting():
return \
"""
# ---------------------------------- WARNING ----------------------------------------
# The data you have obtained from this automated U.S. Geological Survey database
# have not received Director"s approval and as such are provisional and subject to
# revision. The data are released on the condition that neither the USGS nor the
# United States Government may be held liable for any damages resulting from its use.
# Additional info: http://waterdata.usgs.gov/nwis/help/?provisional
#
# File-format description: http://waterdata.usgs.gov/nwis/?tab_delimited_format_info
# Automated-retrieval info: http://waterdata.usgs.gov/nwis/?automated_retrieval_info
#
# Contact: gs-w_support_nwisweb@usgs.gov
# retrieved: 2015-08-02 22:08:51 EDT (sdww01)
#
# Data for the following 1 site(s) are contained in this file
# USGS 03290500 KENTUCKY RIVER AT LOCK 2 AT LOCKPORT, KY
# -----------------------------------------------------------------------------------
#
# Data provided for site 03290500
# DD parameter statistic Description
# 06 00060 00003 Discharge, cubic feet per second (Mean)
#
# Data-value qualification codes included in this output:
# A Approved for publication -- Processing and review completed.
# P Provisional data subject to revision.
# e Value has been estimated.
#
agency_cd site_no datetime 06_00060_00003 06_00060_00003_cd
5s 15s 20d 14n 10s
USGS 03290500 2015-08-01 10_ A
USGS 03290500 2015-08-02 20* A
USGS 03290500 2015-08-03 $30 A
USGS 03290500 2015-08-04 #40_ A
USGS 03290500 2015-08-05 ~50& A
"""
@pytest.fixture(scope="module")
def instantaneous_value_file_single_parameter_missing_data():
return \
"""
# ---------------------------------- WARNING ----------------------------------------
# The data you have obtained from this automated U.S. Geological Survey database
# have not received Director"s approval and as such are provisional and subject to
# revision. The data are released on the condition that neither the USGS nor the
# United States Government may be held liable for any damages resulting from its use.
# Additional info: http://nwis.waterdata.usgs.gov/ca/nwis/?provisional
#
# File-format description: http://nwis.waterdata.usgs.gov/nwis/?tab_delimited_format_info
# Automated-retrieval info: http://nwis.waterdata.usgs.gov/nwis/?automated_retrieval_info
#
# Contact: gs-w_support_nwisweb@usgs.gov
# retrieved: 2015-08-13 17:19:26 EDT (nadww01)
#
# Data for the following 1 site(s) are contained in this file
# USGS 11143000 BIG SUR R NR BIG SUR CA
# -----------------------------------------------------------------------------------
#
# Data provided for site 11143000
# DD parameter Description
# 03 00065 Gage height, feet
#
# Data-value qualification codes included in this output:
# A Approved for publication -- Processing and review completed.
# P Provisional data subject to revision.
#
agency_cd site_no datetime tz_cd 03_00065 03_00065_cd
5s 15s 20d 6s 14n 10s
USGS 11143000 2015-08-01 00:00 PST 5.0 A
USGS 11143000 2015-08-01 00:15 PST 10.0 A
USGS 11143000 2015-08-01 00:30 PST A
USGS 11143000 2015-08-01 00:45 PST A
USGS 11143000 2015-08-01 01:00 PST 5.5 A
"""
@pytest.fixture(scope="module")
def instantaneous_value_file_single_parameter_bad_characters():
return \
"""
# ---------------------------------- WARNING ----------------------------------------
# The data you have obtained from this automated U.S. Geological Survey database
# have not received Director"s approval and as such are provisional and subject to
# revision. The data are released on the condition that neither the USGS nor the
# United States Government may be held liable for any damages resulting from its use.
# Additional info: http://nwis.waterdata.usgs.gov/ca/nwis/?provisional
#
# File-format description: http://nwis.waterdata.usgs.gov/nwis/?tab_delimited_format_info
# Automated-retrieval info: http://nwis.waterdata.usgs.gov/nwis/?automated_retrieval_info
#
# Contact: gs-w_support_nwisweb@usgs.gov
# retrieved: 2015-08-13 17:19:26 EDT (nadww01)
#
# Data for the following 1 site(s) are contained in this file
# USGS 11143000 BIG SUR R NR BIG SUR CA
# -----------------------------------------------------------------------------------
#
# Data provided for site 11143000
# DD parameter Description
# 03 00065 Gage height, feet
#
# Data-value qualification codes included in this output:
# A Approved for publication -- Processing and review completed.
# P Provisional data subject to revision.
#
agency_cd site_no datetime tz_cd 03_00065 03_00065_cd
5s 15s 20d 6s 14n 10s
USGS 11143000 2015-08-01 00:00 PST Ice A
USGS 11143000 2015-08-01 00:15 PST Ice A
USGS 11143000 2015-08-01 00:30 PST * A
USGS 11143000 2015-08-01 00:45 PST * A
USGS 11143000 2015-08-01 01:00 PST 50.0 A
"""
| 40.780864
| 99
| 0.617952
| 1,796
| 13,213
| 4.471604
| 0.141425
| 0.026896
| 0.021915
| 0.019923
| 0.859046
| 0.835637
| 0.791558
| 0.778234
| 0.772258
| 0.744241
| 0
| 0.122385
| 0.229471
| 13,213
| 323
| 100
| 40.907121
| 0.666437
| 0.018921
| 0
| 0.436364
| 0
| 0
| 0.037133
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.218182
| false
| 0
| 0.072727
| 0.145455
| 0.490909
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 8
|
e765e668c1e3acbb83935a5a55e53a137d4ea4fc
| 215
|
py
|
Python
|
EstruturaSequencial/01_Alo_mundo.py
|
FlavioFMBorges/exercicios_Python_Brasil
|
aa28afcb0d8a1f4b732e146a151ee25bb3ee25fc
|
[
"MIT"
] | null | null | null |
EstruturaSequencial/01_Alo_mundo.py
|
FlavioFMBorges/exercicios_Python_Brasil
|
aa28afcb0d8a1f4b732e146a151ee25bb3ee25fc
|
[
"MIT"
] | 5
|
2021-08-16T00:26:31.000Z
|
2021-09-03T15:21:01.000Z
|
EstruturaSequencial/01_Alo_mundo.py
|
FlavioFMBorges/exercicios_Python_Brasil
|
aa28afcb0d8a1f4b732e146a151ee25bb3ee25fc
|
[
"MIT"
] | null | null | null |
def alo_mundo():
"""
https://wiki.python.org.br/ListaDeExercicios
Faça um Programa que mostre a mensagem "Alo mundo" na tela.
:return: String
"""
return print('Alo Mundo')
print(alo_mundo())
| 23.888889
| 63
| 0.651163
| 29
| 215
| 4.758621
| 0.724138
| 0.231884
| 0.188406
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.213953
| 215
| 9
| 64
| 23.888889
| 0.816568
| 0.55814
| 0
| 0
| 0
| 0
| 0.125
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0
| 0
| 0.666667
| 0.666667
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 7
|
e78cec25a9fec1d769033ae86be6c3b1be6db7dc
| 78,834
|
py
|
Python
|
Step3/01_dwarf_exp_disassembler_v3.py
|
Aterwyn/SSTIC2019
|
d7fcddd5b223663910ec35aa1d419f0bc636e701
|
[
"MIT"
] | null | null | null |
Step3/01_dwarf_exp_disassembler_v3.py
|
Aterwyn/SSTIC2019
|
d7fcddd5b223663910ec35aa1d419f0bc636e701
|
[
"MIT"
] | null | null | null |
Step3/01_dwarf_exp_disassembler_v3.py
|
Aterwyn/SSTIC2019
|
d7fcddd5b223663910ec35aa1d419f0bc636e701
|
[
"MIT"
] | null | null | null |
to_decode = [111,8,168,34,6,8,8,34,6,18,6,22,8,8,34,18,6,22,8,8,34,18,6,22,8,8,34,18,6,22,8,8,34,148,1,40,68,0,21,3,21,3,47,73,0,14,36,28,142,142,166,2,131,101,39,22,14,71,83,46,97,241,100,117,220,39,34,22,14,19,198,110,168,116,155,198,217,39,34,22,14,213,174,106,231,54,11,133,101,39,34,40,12,0,14,152,48,64,0,0,0,0,0,47,255,127,14,184,48,64,0,0,0,0,0,47,255,127,48,23,48,21,5,21,5,47,51,0,21,4,21,4,47,202,0,23,23,19,23,23,19,49,21,2,21,2,47,30,0,21,7,21,7,47,181,0,23,23,21,3,22,21,5,49,34,18,52,28,40,204,255,19,21,3,21,3,47,122,255,52,23,18,14,255,255,255,255,0,0,0,0,26,22,14,0,0,0,0,255,255,255,255,26,8,32,37,23,23,18,14,255,255,255,255,0,0,0,0,26,22,14,0,0,0,0,255,255,255,255,26,8,32,37,18,23,21,4,34,39,12,255,255,255,255,26,22,21,3,26,22,18,21,4,22,28,12,255,255,255,255,26,22,21,3,8,255,26,8,2,36,14,180,6,64,0,0,0,0,0,34,148,4,34,12,255,255,255,255,26,18,21,4,8,8,37,39,23,23,22,8,32,36,33,23,22,8,32,36,33,22,23,23,19,23,23,19,23,23,8,1,28,18,40,111,255,19,21,2,40,3,0,47,50,255,47,68,255,22,48,23,21,2,21,5,21,5,23,23,23,48,23,21,2,47,160,1,23,23,18,21,4,28,40,3,0,47,5,0,49,34,47,233,255,19,23,23,19,22,21,3,48,22,18,14,255,255,255,255,0,0,0,0,26,22,14,0,0,0,0,255,255,255,255,26,8,32,37,47,245,0,21,4,18,14,255,255,255,255,0,0,0,0,26,22,14,0,0,0,0,255,255,255,255,26,8,32,37,47,112,0,8,32,36,33,39,23,23,19,18,23,49,22,18,14,255,255,255,255,0,0,0,0,26,22,14,0,0,0,0,255,255,255,255,26,8,32,37,47,175,0,21,4,18,14,255,255,255,255,0,0,0,0,26,22,14,0,0,0,0,255,255,255,255,26,8,32,37,47,95,0,8,32,36,33,39,23,23,19,23,23,49,34,23,22,21,2,63,28,40,65,255,23,23,19,23,23,19,23,23,19,22,23,23,40,3,0,47,98,254,47,116,254,21,2,39,22,21,3,39,18,8,4,36,22,8,28,37,33,12,255,255,255,255,26,21,2,39,22,18,8,18,37,22,8,14,36,33,12,255,255,255,255,26,21,3,39,23,23,19,23,23,19,47,91,255,22,21,2,39,18,8,26,36,22,8,6,37,33,12,255,255,255,255,26,22,21,3,39,21,2,39,18,8,14,37,22,8,18,36,33,12,255,255,255,255,26,23,23,19,23,23,19,47,111,255,18,23,48,18,50,30,52,30,14,72,6,64,0,0,0,0,0,34,148,4,22,23,34,12,255,255,255,255,26,22,23,39,21,1,50,30,49,34,52,30,14,72,6,64,0,0,0,0,0,34,6,22,18,23,33,21,3,39,8,32,36,33,23,23,19,18,14,255,255,255,255,0,0,0,0,26,22,14,0,0,0,0,255,255,255,255,26,8,32,37,23,21,2,23,23,49,34,18,54,28,40,156,255,19,19,22,23,23,40,3,0,47,153,254,47,220,254,23,22,18,14,255,255,255,255,0,0,0,0,26,22,14,0,0,0,0,255,255,255,255,26,8,32,37,23,23,18,14,255,255,255,255,0,0,0,0,26,22,14,0,0,0,0,255,255,255,255,26,8,32,37,21,4,52,30,14,120,6,64,0,0,0,0,0,34,148,4,21,4,39,21,3,12,50,101,120,69,34,12,255,255,255,255,26,18,21,4,39,21,3,18,8,4,36,22,8,28,37,33,12,255,255,255,255,26,23,23,18,21,4,22,28,12,255,255,255,255,26,21,3,12,0,0,0,128,26,40,8,0,12,73,101,36,23,47,5,0,12,23,50,101,132,47,197,4,21,2,39,21,3,39,21,4,21,4,8,32,36,33,23,8,32,36,33,23,23,19,23,23,19,23,23,19,23,23,19,23,23,19,23,23,19,23,23,19,23,23,19,22,23,23,19,47,154,253,150,150,150,150,150,150,150,150,150,150,150,150,150,150,150,150,150,150,150,150,150,150,150,150,150,150,150,150,150,150,0,0,0,0,0,0,0,0,222,221,157,72,241,144,121,6,169,116,191,149,231,30,148,119,227,128,109,14,139,175,237,45,66,205,146,251,192,103,232,208,251,163,179,242,71,206,57,108,224,153,79,231,33,242,36,90,234,143,55,214,196,168,60,226,188,177,227,132,191,16,94,206,218,100,179,162,240,80,242,65,64,112,233,15,102,82,192,28,75,126,248,22,183,38,94,81,203,141,164,238,228,87,179,98,65,32,189,57,122,56,205,114,139,172,122,243,155,179,99,89,221,90,247,48,188,236,63,16,122,46,57,0,223,122,223,53,19,189,154,225,92,179,248,218,20,130,121,248,5,35,12,179,233,128,121,6,64,217,0,105,111,135,94,3,20,112,133,163,98,225,200,86,86,143,116,233,9,228,212,145,9,90,199,220,47,245,101,172,7,221,113,133,246,220,158,1,235,249,206,81,125,177,177,30,111,68,190,10,254,124,39,59,188,105,56,132,152,162,62,178,81,111,41,124,114,153,121,205,100,10,24,98,213,82,192,10,5,98,7,240,147,49,24,19,148,203,8,185,60,205,244,75,138,212,229,222,116,154,79,246,208,21,74,214,52,212,202,178,59,1,233,100,210,193,108,244,219,233,120,157,22,146,120,17,230,137,86,51,219,126,76,7,133,97,235,230,11,23,32,208,21,170,4,243,158,182,242,188,234,210,179,78,92,77,141,215,196,191,210,126,243,240,235,88,63,28,89,139,107,31,4,211,136,174,92,0,92,111,105,186,222,142,108,193,39,187,188,154,119,141,215,86,32,62,91,118,45,33,55,207,207,45,46,25,6,40,175,140,91,87,154,188,206,33,100,119,235,185,127,82,64,67,248,105,215,123,188,173,41,195,242,115,127,138,127,115,228,211,1,227,178,190,126,5,88,184,89,88,121,25,196,44,57,166,105,236,35,213,176,83,50,245,162,57,93,227,41,139,129,206,226,68,109,161,16,204,88,255,217,68,20,44,16,119,125,129,87,251,200,200,247,60,138,134,34,18,209,213,115,65,50,238,41,53,142,245,157,122,172,37,53,81,172,75,149,129,245,204,83,206,40,135,22,121,248,96,214,162,206,201,12,243,118,156,184,240,169,179,159,8,168,219,25,201,195,77,158,31,12,78,89,162,120,225,255,52,251,20,68,176,58,179,31,211,120,2,77,24,154,106,129,44,242,242,147,185,28,96,216,228,222,238,226,73,225,12,213,156,119,30,62,192,158,134,1,169,80,222,121,117,171,196,106,114,64,72,208,56,112,18,190,234,45,129,64,140,183,118,73,232,173,4,43,23,196,6,102,117,145,132,37,102,248,190,160,181,243,92,204,107,148,174,53,165,170,135,122,201,246,168,3,145,95,158,59,204,31,190,32,187,151,239,207,255,22,79,149,144,166,225,26,80,205,137,213,110,43,176,38,104,99,242,95,86,155,54,140,142,122,190,144,105,64,184,37,53,187,215,71,24,199,64,90,53,16,159,87,163,174,236,237,233,177,122,51,208,186,229,85,99,85,83,151,136,207,243,192,94,61,33,214,160,135,147,56,117,240,22,2,228,224,204,128,217,124,198,136,108,156,65,157,130,27,69,246,59,250,123,240,17,78,21,193,196,235,53,7,189,157,223,248,156,117,190,87,228,24,189,166,99,211,127,231,239,99,27,66,131,45,7,131,127,97,15,148,68,247,205,189,248,202,2,200,97,168,249,48,10,55,59,240,127,169,197,108,162,13,87,14,225,22,12,234,149,2,107,94,160,132,83,29,200,5,219,133,119,95,76,200,146,23,70,88,5,141,254,188,130,218,161,158,85,176,205,213,79,237,31,135,157,57,85,111,221,38,239,209,78,196,19,104,254,213,113,250,28,234,58,97,213,140,155,28,15,93,196,202,43,65,15,208,101,216,224,155,104,0,17,176,104,128,210,91,99,75,93,77,149,121,127,136,114,117,122,2,206,102,28,240,252,211,27,106,0,142,28,154,25,37,238,214,135,8,159,142,147,77,29,161,216,129,77,154,43,229,210,245,182,90,50,92,209,193,175,234,100,28,182,51,253,87,189,193,67,72,240,184,55,242,124,186,92,208,12,129,114,84,244,254,171,186,132,99,167,64,20,134,216,55,88,222,54,241,3,106,15,161,143,212,16,47,236,131,88,155,12,192,168,164,254,143,97,6,162,93,160,122,233,185,255,129,103,55,138,121,180,86,49,205,94,175,228,111,224,217,135,89,212,212,180,37,125,154,235,170,254,223,89,83,245,139,220,58,60,206,109,14,151,98,33,157,146,201,232,244,155,58,108,146,83,218,69,176,231,238,156,235,212,104,63,79,67,41,205,18,247,77,14,154,198,168,177,70,15,25,28,60,135,69,43,201,223,175,70,63,136,232,97,199,24,145,151,177,145,249,112,77,96,130,31,143,244,139,193,255,244,39,179,8,117,154,81,13,155,97,250,144,20,141,38,194,55,126,86,145,118,160,37,192,89,67,66,83,12,50,19,253,66,247,239,186,69,185,72,17,231,168,207,25,181,95,143,165,50,115,43,124,118,170,16,192,12,159,104,113,165,28,12,255,255,255,255,26,12,24,0,3,255,34,12,255,255,255,255,26,18,8,2,37,28,12,255,255,255,255,26,18,8,6,36,22,8,26,37,33,12,255,255,255,255,26,18,18,8,2,37,22,8,30,36,33,12,255,255,255,255,26,39,18,8,9,36,34,12,255,255,255,255,26,18,8,7,36,28,12,255,255,255,255,26,18,18,8,5,36,22,8,27,37,33,12,255,255,255,255,26,34,12,255,255,255,255,26,18,18,8,9,36,22,8,23,37,33,12,255,255,255,255,26,39,18,8,3,36,39,18,18,8,5,36,22,8,27,37,33,12,255,255,255,255,26,34,12,255,255,255,255,26,18,18,8,6,37,22,8,26,36,33,12,255,255,255,255,26,34,12,255,255,255,255,26,18,8,3,37,39,18,18,8,2,36,22,8,30,37,33,12,255,255,255,255,26,28,12,255,255,255,255,26,18,8,9,36,34,12,255,255,255,255,26,18,18,8,9,36,22,8,23,37,33,12,255,255,255,255,26,39,18,8,3,37,39,18,8,3,37,22,8,29,36,33,12,255,255,255,255,26,18,8,6,36,22,8,26,37,33,12,255,255,255,255,26,18,8,9,36,34,12,255,255,255,255,26,18,8,3,37,22,8,29,36,33,12,255,255,255,255,26,18,8,6,36,22,8,26,37,33,12,255,255,255,255,26,18,8,3,37,39,18,18,8,9,36,22,8,23,37,33,12,255,255,255,255,26,39,18,18,8,6,37,22,8,26,36,33,12,255,255,255,255,26,28,12,255,255,255,255,26,18,18,8,6,37,22,8,26,36,33,12,255,255,255,255,26,34,12,255,255,255,255,26,12,159,104,113,165,28,12,255,255,255,255,26,18,8,10,37,34,12,255,255,255,255,26,18,8,3,37,22,8,29,36,33,12,255,255,255,255,26,18,18,8,6,37,22,8,26,36,33,12,255,255,255,255,26,28,12,255,255,255,255,26,18,18,8,5,36,22,8,27,37,33,12,255,255,255,255,26,34,12,255,255,255,255,26,12,24,0,3,255,34,12,255,255,255,255,26,12,23,85,16,210,39,18,18,8,5,36,22,8,27,37,33,12,255,255,255,255,26,34,12,255,255,255,255,26,12,23,85,16,210,39,18,8,6,36,22,8,26,37,33,12,255,255,255,255,26,12,159,104,113,165,28,12,255,255,255,255,26,18,8,6,36,22,8,26,37,33,12,255,255,255,255,26,18,8,6,36,22,8,26,37,33,12,255,255,255,255,26,18,8,2,37,28,12,255,255,255,255,26,18,18,8,6,37,22,8,26,36,33,12,255,255,255,255,26,34,12,255,255,255,255,26,12,159,104,113,165,28,12,255,255,255,255,26,18,8,9,36,34,12,255,255,255,255,26,18,8,2,37,28,12,255,255,255,255,26,18,8,7,36,28,12,255,255,255,255,26,18,8,3,36,39,12,159,104,113,165,28,12,255,255,255,255,26,18,8,10,37,34,12,255,255,255,255,26,12,159,104,113,165,28,12,255,255,255,255,26,18,18,8,2,36,22,8,30,37,33,12,255,255,255,255,26,28,12,255,255,255,255,26,18,54,22,12,23,85,16,210,39,18,18,8,9,36,22,8,23,37,33,12,255,255,255,255,26,39,18,18,8,2,36,22,8,30,37,33,12,255,255,255,255,26,28,12,255,255,255,255,26,18,18,8,6,37,22,8,26,36,33,12,255,255,255,255,26,34,12,255,255,255,255,26,18,8,10,37,34,12,255,255,255,255,26,18,8,3,37,39,18,8,3,37,22,8,29,36,33,12,255,255,255,255,26,18,8,6,36,22,8,26,37,33,12,255,255,255,255,26,12,159,104,113,165,28,12,255,255,255,255,26,18,18,8,5,36,22,8,27,37,33,12,255,255,255,255,26,34,12,255,255,255,255,26,12,159,104,113,165,28,12,255,255,255,255,26,18,18,8,6,37,22,8,26,36,33,12,255,255,255,255,26,34,12,255,255,255,255,26,12,159,104,113,165,28,12,255,255,255,255,26,12,24,0,3,255,34,12,255,255,255,255,26,18,8,3,37,39,12,24,0,3,255,34,12,255,255,255,255,26,18,8,3,37,22,8,29,36,33,12,255,255,255,255,26,18,8,3,36,39,18,8,10,37,34,12,255,255,255,255,26,18,8,3,36,39,22,49,28,18,40,242,254,19,39,18,8,2,37,28,12,255,255,255,255,26,12,159,104,113,165,28,12,255,255,255,255,26,12,24,0,3,255,34,12,255,255,255,255,26,18,8,7,36,28,12,255,255,255,255,26,18,18,8,6,37,22,8,26,36,33,12,255,255,255,255,26,28,12,255,255,255,255,26,18,8,6,36,22,8,26,37,33,12,255,255,255,255,26,12,159,104,113,165,28,12,255,255,255,255,26,18,18,8,5,36,22,8,27,37,33,12,255,255,255,255,26,34,12,255,255,255,255,26,18,8,7,36,28,12,255,255,255,255,26,18,18,8,6,37,22,8,26,36,33,12,255,255,255,255,26,34,12,255,255,255,255,26,18,18,8,6,37,22,8,26,36,33,12,255,255,255,255,26,28,12,255,255,255,255,26,18,8,6,36,22,8,26,37,33,12,255,255,255,255,26,18,18,8,5,36,22,8,27,37,33,12,255,255,255,255,26,34,12,255,255,255,255,26,18,8,3,37,22,8,29,36,33,12,255,255,255,255,26,18,18,8,6,37,22,8,26,36,33,12,255,255,255,255,26,34,12,255,255,255,255,26,18,8,9,36,34,12,255,255,255,255,26,18,8,3,36,39,18,8,6,36,22,8,26,37,33,12,255,255,255,255,26,12,23,85,16,210,39,18,18,8,9,36,22,8,23,37,33,12,255,255,255,255,26,39,18,8,9,36,34,12,255,255,255,255,26,18,8,10,37,34,12,255,255,255,255,26,18,18,8,6,37,22,8,26,36,33,12,255,255,255,255,26,28,12,255,255,255,255,26,18,8,6,36,22,8,26,37,33,12,255,255,255,255,26,12,24,0,3,255,34,12,255,255,255,255,26,18,18,8,6,37,22,8,26,36,33,12,255,255,255,255,26,34,12,255,255,255,255,26,12,159,104,113,165,28,12,255,255,255,255,26,18,8,3,36,39,18,18,8,5,36,22,8,27,37,33,12,255,255,255,255,26,34,12,255,255,255,255,26,18,18,8,2,37,22,8,30,36,33,12,255,255,255,255,26,39,18,18,8,2,37,22,8,30,36,33,12,255,255,255,255,26,39,18,8,6,36,22,8,26,37,33,12,255,255,255,255,26,18,18,8,6,37,22,8,26,36,33,12,255,255,255,255,26,28,12,255,255,255,255,26,18,8,3,37,39,18,8,2,37,28,12,255,255,255,255,26,12,24,0,3,255,34,12,255,255,255,255,26,18,8,2,37,28,12,255,255,255,255,26,18,18,8,2,36,22,8,30,37,33,12,255,255,255,255,26,28,12,255,255,255,255,26,18,8,10,37,34,12,255,255,255,255,26,18,8,2,37,28,12,255,255,255,255,26,18,8,7,36,28,12,255,255,255,255,26,18,18,8,9,36,22,8,23,37,33,12,255,255,255,255,26,39,18,8,10,37,34,12,255,255,255,255,26,18,8,9,36,34,12,255,255,255,255,26,12,23,85,16,210,39,18,18,8,9,36,22,8,23,37,33,12,255,255,255,255,26,39,18,18,8,6,37,22,8,26,36,33,12,255,255,255,255,26,28,12,255,255,255,255,26,12,159,104,113,165,28,12,255,255,255,255,26,18,8,3,37,22,8,29,36,33,12,255,255,255,255,26,18,18,8,6,37,22,8,26,36,33,12,255,255,255,255,26,34,12,255,255,255,255,26,18,8,6,36,22,8,26,37,33,12,255,255,255,255,26,18,18,8,9,36,22,8,23,37,33,12,255,255,255,255,26,39,12,23,85,16,210,39,18,8,3,36,39,18,8,6,36,22,8,26,37,33,12,255,255,255,255,26,18,8,7,36,28,12,255,255,255,255,26,18,18,8,2,36,22,8,30,37,33,12,255,255,255,255,26,28,12,255,255,255,255,26,18,18,8,9,36,22,8,23,37,33,12,255,255,255,255,26,39,18,18,8,6,37,22,8,26,36,33,12,255,255,255,255,26,34,12,255,255,255,255,26,18,8,2,37,28,12,255,255,255,255,26,18,8,3,36,39,18,8,3,37,39,18,8,3,37,39,18,18,8,6,37,22,8,26,36,33,12,255,255,255,255,26,28,12,255,255,255,255,26,18,8,3,37,22,8,29,36,33,12,255,255,255,255,26,18,8,3,36,39,18,8,6,36,22,8,26,37,33,12,255,255,255,255,26,18,8,2,37,28,12,255,255,255,255,26,18,18,8,9,36,22,8,23,37,33,12,255,255,255,255,26,39,12,159,104,113,165,28,12,255,255,255,255,26,18,18,8,2,36,22,8,30,37,33,12,255,255,255,255,26,28,12,255,255,255,255,26,18,8,2,37,28,12,255,255,255,255,26,18,8,10,37,34,12,255,255,255,255,26,18,18,8,2,37,22,8,30,36,33,12,255,255,255,255,26,39,18,8,9,36,34,12,255,255,255,255,26,18,8,7,36,28,12,255,255,255,255,26,18,8,6,36,22,8,26,37,33,12,255,255,255,255,26,18,8,6,36,22,8,26,37,33,12,255,255,255,255,26,18,18,8,6,37,22,8,26,36,33,12,255,255,255,255,26,34,12,255,255,255,255,26,18,18,8,6,37,22,8,26,36,33,12,255,255,255,255,26,34,12,255,255,255,255,26,18,18,8,9,36,22,8,23,37,33,12,255,255,255,255,26,39,18,18,8,6,37,22,8,26,36,33,12,255,255,255,255,26,34,12,255,255,255,255,26,18,8,3,36,39,18,8,9,36,34,12,255,255,255,255,26,18,8,3,36,39,18,8,3,37,39,18,8,9,36,34,12,255,255,255,255,26,18,18,8,5,36,22,8,27,37,33,12,255,255,255,255,26,34,12,255,255,255,255,26,18,8,6,36,22,8,26,37,33,12,255,255,255,255,26,18,8,3,37,39,18,8,9,36,34,12,255,255,255,255,26,18,18,8,2,37,22,8,30,36,33,12,255,255,255,255,26,39,12,23,85,16,210,39,18,8,3,37,39,18,8,3,36,39,12,24,0,3,255,34,12,255,255,255,255,26,18,18,8,5,36,22,8,27,37,33,12,255,255,255,255,26,34,12,255,255,255,255,26,18,8,6,36,22,8,26,37,33,12,255,255,255,255,26,18,8,2,37,28,12,255,255,255,255,26,18,8,10,37,34,12,255,255,255,255,26,18,8,10,37,34,12,255,255,255,255,26,12,159,104,113,165,28,12,255,255,255,255,26,18,18,8,5,36,22,8,27,37,33,12,255,255,255,255,26,34,12,255,255,255,255,26,18,8,6,36,22,8,26,37,33,12,255,255,255,255,26,18,18,8,2,36,22,8,30,37,33,12,255,255,255,255,26,28,12,255,255,255,255,26,18,18,8,5,36,22,8,27,37,33,12,255,255,255,255,26,34,12,255,255,255,255,26,12,23,85,16,210,39,18,8,7,36,28,12,255,255,255,255,26,18,8,6,36,22,8,26,37,33,12,255,255,255,255,26,18,18,8,5,36,22,8,27,37,33,12,255,255,255,255,26,34,12,255,255,255,255,26,18,8,7,36,28,12,255,255,255,255,26,18,8,3,37,39,12,23,85,16,210,39,18,8,3,37,39,18,18,8,6,37,22,8,26,36,33,12,255,255,255,255,26,28,12,255,255,255,255,26,18,8,3,37,22,8,29,36,33,12,255,255,255,255,26,18,18,8,6,37,22,8,26,36,33,12,255,255,255,255,26,34,12,255,255,255,255,26,18,18,8,6,37,22,8,26,36,33,12,255,255,255,255,26,28,12,255,255,255,255,26,12,23,85,16,210,39,18,18,8,2,37,22,8,30,36,33,12,255,255,255,255,26,39,12,159,104,113,165,28,12,255,255,255,255,26,18,8,10,37,34,12,255,255,255,255,26,18,8,3,36,39,18,18,8,6,37,22,8,26,36,33,12,255,255,255,255,26,28,12,255,255,255,255,26,18,8,6,36,22,8,26,37,33,12,255,255,255,255,26,18,18,8,9,36,22,8,23,37,33,12,255,255,255,255,26,39,18,8,2,37,28,12,255,255,255,255,26,18,8,3,37,39,18,18,8,2,36,22,8,30,37,33,12,255,255,255,255,26,28,12,255,255,255,255,26,18,18,8,6,37,22,8,26,36,33,12,255,255,255,255,26,34,12,255,255,255,255,26,12,24,0,3,255,34,12,255,255,255,255,26,18,18,8,6,37,22,8,26,36,33,12,255,255,255,255,26,34,12,255,255,255,255,26,18,18,8,5,36,22,8,27,37,33,12,255,255,255,255,26,34,12,255,255,255,255,26,12,24,0,3,255,34,12,255,255,255,255,26,18,18,8,2,36,22,8,30,37,33,12,255,255,255,255,26,28,12,255,255,255,255,26,18,8,3,37,22,8,29,36,33,12,255,255,255,255,26,18,8,7,36,28,12,255,255,255,255,26,18,8,7,36,28,12,255,255,255,255,26,12,24,0,3,255,34,12,255,255,255,255,26,18,8,9,36,34,12,255,255,255,255,26,18,8,10,37,34,12,255,255,255,255,26,18,18,8,2,37,22,8,30,36,33,12,255,255,255,255,26,39,18,8,7,36,28,12,255,255,255,255,26,18,8,9,36,34,12,255,255,255,255,26,18,18,8,2,37,22,8,30,36,33,12,255,255,255,255,26,39,18,8,9,36,34,12,255,255,255,255,26,12,23,85,16,210,39,18,8,3,36,39,18,8,2,37,28,12,255,255,255,255,26,18,18,8,2,37,22,8,30,36,33,12,255,255,255,255,26,39,18,8,10,37,34,12,255,255,255,255,26,18,8,2,37,28,12,255,255,255,255,26,18,8,9,36,34,12,255,255,255,255,26,12,24,0,3,255,34,12,255,255,255,255,26,18,8,2,37,28,12,255,255,255,255,26,18,8,7,36,28,12,255,255,255,255,26,18,18,8,5,36,22,8,27,37,33,12,255,255,255,255,26,34,12,255,255,255,255,26,18,18,8,9,36,22,8,23,37,33,12,255,255,255,255,26,39,18,18,8,6,37,22,8,26,36,33,12,255,255,255,255,26,34,12,255,255,255,255,26,18,8,3,36,39,12,24,0,3,255,34,12,255,255,255,255,26,12,159,104,113,165,28,12,255,255,255,255,26,18,18,8,6,37,22,8,26,36,33,12,255,255,255,255,26,34,12,255,255,255,255,26,18,8,7,36,28,12,255,255,255,255,26,18,18,8,6,37,22,8,26,36,33,12,255,255,255,255,26,34,12,255,255,255,255,26,18,18,8,2,37,22,8,30,36,33,12,255,255,255,255,26,39,18,8,9,36,34,12,255,255,255,255,26,18,18,8,9,36,22,8,23,37,33,12,255,255,255,255,26,39,18,18,8,6,37,22,8,26,36,33,12,255,255,255,255,26,34,12,255,255,255,255,26,18,18,8,2,37,22,8,30,36,33,12,255,255,255,255,26,39,18,8,2,37,28,12,255,255,255,255,26,18,18,8,9,36,22,8,23,37,33,12,255,255,255,255,26,39,18,8,3,36,39,18,8,3,36,39,12,159,104,113,165,28,12,255,255,255,255,26,12,24,0,3,255,34,12,255,255,255,255,26,18,8,3,36,39,18,18,8,6,37,22,8,26,36,33,12,255,255,255,255,26,28,12,255,255,255,255,26,18,8,9,36,34,12,255,255,255,255,26,12,23,85,16,210,39,18,8,3,37,22,8,29,36,33,12,255,255,255,255,26,18,18,8,2,36,22,8,30,37,33,12,255,255,255,255,26,28,12,255,255,255,255,26,12,159,104,113,165,28,12,255,255,255,255,26,18,18,8,6,37,22,8,26,36,33,12,255,255,255,255,26,34,12,255,255,255,255,26,18,18,8,6,37,22,8,26,36,33,12,255,255,255,255,26,34,12,255,255,255,255,26,18,8,7,36,28,12,255,255,255,255,26,18,8,3,36,39,18,18,8,6,37,22,8,26,36,33,12,255,255,255,255,26,34,12,255,255,255,255,26,18,18,8,2,36,22,8,30,37,33,12,255,255,255,255,26,28,12,255,255,255,255,26,18,18,8,6,37,22,8,26,36,33,12,255,255,255,255,26,34,12,255,255,255,255,26,18,8,7,36,28,12,255,255,255,255,26,18,8,3,36,39,12,23,85,16,210,39,18,8,2,37,28,12,255,255,255,255,26,12,23,85,16,210,39,12,159,104,113,165,28,12,255,255,255,255,26,18,18,8,6,37,22,8,26,36,33,12,255,255,255,255,26,34,12,255,255,255,255,26,12,23,85,16,210,39,18,8,7,36,28,12,255,255,255,255,26,18,18,8,2,37,22,8,30,36,33,12,255,255,255,255,26,39,47,109,236,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
to_decode_hex = [0x6f,0x08,0xa8,0x22,0x06,0x08,0x08,0x22,0x06,0x12,0x06,0x16,0x08,0x08,0x22,0x12,0x06,0x16,0x08,0x08,0x22,0x12,0x06,0x16,0x08,0x08,0x22,0x12,0x06,0x16,0x08,0x08,0x22,0x94,0x01,0x28,0x44,0x00,0x15,0x03,0x15,0x03,0x2f,0x49,0x00,0x0e,0x24,0x1c,0x8e,0x8e,0xa6,0x02,0x83,0x65,0x27,0x16,0x0e,0x47,0x53,0x2e,0x61,0xf1,0x64,0x75,0xdc,0x27,0x22,0x16,0x0e,0x13,0xc6,0x6e,0xa8,0x74,0x9b,0xc6,0xd9,0x27,0x22,0x16,0x0e,0xd5,0xae,0x6a,0xe7,0x36,0x0b,0x85,0x65,0x27,0x22,0x28,0x0c,0x00,0x0e,0x98,0x30,0x40,0x00,0x00,0x00,0x00,0x00,0x2f,0xff,0x7f,0x0e,0xb8,0x30,0x40,0x00,0x00,0x00,0x00,0x00,0x2f,0xff,0x7f,0x30,0x17,0x30,0x15,0x05,0x15,0x05,0x2f,0x33,0x00,0x15,0x04,0x15,0x04,0x2f,0xca,0x00,0x17,0x17,0x13,0x17,0x17,0x13,0x31,0x15,0x02,0x15,0x02,0x2f,0x1e,0x00,0x15,0x07,0x15,0x07,0x2f,0xb5,0x00,0x17,0x17,0x15,0x03,0x16,0x15,0x05,0x31,0x22,0x12,0x34,0x1c,0x28,0xcc,0xff,0x13,0x15,0x03,0x15,0x03,0x2f,0x7a,0xff,0x34,0x17,0x12,0x0e,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x1a,0x16,0x0e,0x00,0x00,0x00,0x00,0xff,0xff,0xff,0xff,0x1a,0x08,0x20,0x25,0x17,0x17,0x12,0x0e,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x1a,0x16,0x0e,0x00,0x00,0x00,0x00,0xff,0xff,0xff,0xff,0x1a,0x08,0x20,0x25,0x12,0x17,0x15,0x04,0x22,0x27,0x0c,0xff,0xff,0xff,0xff,0x1a,0x16,0x15,0x03,0x1a,0x16,0x12,0x15,0x04,0x16,0x1c,0x0c,0xff,0xff,0xff,0xff,0x1a,0x16,0x15,0x03,0x08,0xff,0x1a,0x08,0x02,0x24,0x0e,0xb4,0x06,0x40,0x00,0x00,0x00,0x00,0x00,0x22,0x94,0x04,0x22,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x15,0x04,0x08,0x08,0x25,0x27,0x17,0x17,0x16,0x08,0x20,0x24,0x21,0x17,0x16,0x08,0x20,0x24,0x21,0x16,0x17,0x17,0x13,0x17,0x17,0x13,0x17,0x17,0x08,0x01,0x1c,0x12,0x28,0x6f,0xff,0x13,0x15,0x02,0x28,0x03,0x00,0x2f,0x32,0xff,0x2f,0x44,0xff,0x16,0x30,0x17,0x15,0x02,0x15,0x05,0x15,0x05,0x17,0x17,0x17,0x30,0x17,0x15,0x02,0x2f,0xa0,0x01,0x17,0x17,0x12,0x15,0x04,0x1c,0x28,0x03,0x00,0x2f,0x05,0x00,0x31,0x22,0x2f,0xe9,0xff,0x13,0x17,0x17,0x13,0x16,0x15,0x03,0x30,0x16,0x12,0x0e,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x1a,0x16,0x0e,0x00,0x00,0x00,0x00,0xff,0xff,0xff,0xff,0x1a,0x08,0x20,0x25,0x2f,0xf5,0x00,0x15,0x04,0x12,0x0e,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x1a,0x16,0x0e,0x00,0x00,0x00,0x00,0xff,0xff,0xff,0xff,0x1a,0x08,0x20,0x25,0x2f,0x70,0x00,0x08,0x20,0x24,0x21,0x27,0x17,0x17,0x13,0x12,0x17,0x31,0x16,0x12,0x0e,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x1a,0x16,0x0e,0x00,0x00,0x00,0x00,0xff,0xff,0xff,0xff,0x1a,0x08,0x20,0x25,0x2f,0xaf,0x00,0x15,0x04,0x12,0x0e,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x1a,0x16,0x0e,0x00,0x00,0x00,0x00,0xff,0xff,0xff,0xff,0x1a,0x08,0x20,0x25,0x2f,0x5f,0x00,0x08,0x20,0x24,0x21,0x27,0x17,0x17,0x13,0x17,0x17,0x31,0x22,0x17,0x16,0x15,0x02,0x3f,0x1c,0x28,0x41,0xff,0x17,0x17,0x13,0x17,0x17,0x13,0x17,0x17,0x13,0x16,0x17,0x17,0x28,0x03,0x00,0x2f,0x62,0xfe,0x2f,0x74,0xfe,0x15,0x02,0x27,0x16,0x15,0x03,0x27,0x12,0x08,0x04,0x24,0x16,0x08,0x1c,0x25,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x15,0x02,0x27,0x16,0x12,0x08,0x12,0x25,0x16,0x08,0x0e,0x24,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x15,0x03,0x27,0x17,0x17,0x13,0x17,0x17,0x13,0x2f,0x5b,0xff,0x16,0x15,0x02,0x27,0x12,0x08,0x1a,0x24,0x16,0x08,0x06,0x25,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x16,0x15,0x03,0x27,0x15,0x02,0x27,0x12,0x08,0x0e,0x25,0x16,0x08,0x12,0x24,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x17,0x17,0x13,0x17,0x17,0x13,0x2f,0x6f,0xff,0x12,0x17,0x30,0x12,0x32,0x1e,0x34,0x1e,0x0e,0x48,0x06,0x40,0x00,0x00,0x00,0x00,0x00,0x22,0x94,0x04,0x16,0x17,0x22,0x0c,0xff,0xff,0xff,0xff,0x1a,0x16,0x17,0x27,0x15,0x01,0x32,0x1e,0x31,0x22,0x34,0x1e,0x0e,0x48,0x06,0x40,0x00,0x00,0x00,0x00,0x00,0x22,0x06,0x16,0x12,0x17,0x21,0x15,0x03,0x27,0x08,0x20,0x24,0x21,0x17,0x17,0x13,0x12,0x0e,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x1a,0x16,0x0e,0x00,0x00,0x00,0x00,0xff,0xff,0xff,0xff,0x1a,0x08,0x20,0x25,0x17,0x15,0x02,0x17,0x17,0x31,0x22,0x12,0x36,0x1c,0x28,0x9c,0xff,0x13,0x13,0x16,0x17,0x17,0x28,0x03,0x00,0x2f,0x99,0xfe,0x2f,0xdc,0xfe,0x17,0x16,0x12,0x0e,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x1a,0x16,0x0e,0x00,0x00,0x00,0x00,0xff,0xff,0xff,0xff,0x1a,0x08,0x20,0x25,0x17,0x17,0x12,0x0e,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x1a,0x16,0x0e,0x00,0x00,0x00,0x00,0xff,0xff,0xff,0xff,0x1a,0x08,0x20,0x25,0x15,0x04,0x34,0x1e,0x0e,0x78,0x06,0x40,0x00,0x00,0x00,0x00,0x00,0x22,0x94,0x04,0x15,0x04,0x27,0x15,0x03,0x0c,0x32,0x65,0x78,0x45,0x22,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x15,0x04,0x27,0x15,0x03,0x12,0x08,0x04,0x24,0x16,0x08,0x1c,0x25,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x17,0x17,0x12,0x15,0x04,0x16,0x1c,0x0c,0xff,0xff,0xff,0xff,0x1a,0x15,0x03,0x0c,0x00,0x00,0x00,0x80,0x1a,0x28,0x08,0x00,0x0c,0x49,0x65,0x24,0x17,0x2f,0x05,0x00,0x0c,0x17,0x32,0x65,0x84,0x2f,0xc5,0x04,0x15,0x02,0x27,0x15,0x03,0x27,0x15,0x04,0x15,0x04,0x08,0x20,0x24,0x21,0x17,0x08,0x20,0x24,0x21,0x17,0x17,0x13,0x17,0x17,0x13,0x17,0x17,0x13,0x17,0x17,0x13,0x17,0x17,0x13,0x17,0x17,0x13,0x17,0x17,0x13,0x17,0x17,0x13,0x16,0x17,0x17,0x13,0x2f,0x9a,0xfd,0x96,0x96,0x96,0x96,0x96,0x96,0x96,0x96,0x96,0x96,0x96,0x96,0x96,0x96,0x96,0x96,0x96,0x96,0x96,0x96,0x96,0x96,0x96,0x96,0x96,0x96,0x96,0x96,0x96,0x96,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xde,0xdd,0x9d,0x48,0xf1,0x90,0x79,0x06,0xa9,0x74,0xbf,0x95,0xe7,0x1e,0x94,0x77,0xe3,0x80,0x6d,0x0e,0x8b,0xaf,0xed,0x2d,0x42,0xcd,0x92,0xfb,0xc0,0x67,0xe8,0xd0,0xfb,0xa3,0xb3,0xf2,0x47,0xce,0x39,0x6c,0xe0,0x99,0x4f,0xe7,0x21,0xf2,0x24,0x5a,0xea,0x8f,0x37,0xd6,0xc4,0xa8,0x3c,0xe2,0xbc,0xb1,0xe3,0x84,0xbf,0x10,0x5e,0xce,0xda,0x64,0xb3,0xa2,0xf0,0x50,0xf2,0x41,0x40,0x70,0xe9,0x0f,0x66,0x52,0xc0,0x1c,0x4b,0x7e,0xf8,0x16,0xb7,0x26,0x5e,0x51,0xcb,0x8d,0xa4,0xee,0xe4,0x57,0xb3,0x62,0x41,0x20,0xbd,0x39,0x7a,0x38,0xcd,0x72,0x8b,0xac,0x7a,0xf3,0x9b,0xb3,0x63,0x59,0xdd,0x5a,0xf7,0x30,0xbc,0xec,0x3f,0x10,0x7a,0x2e,0x39,0x00,0xdf,0x7a,0xdf,0x35,0x13,0xbd,0x9a,0xe1,0x5c,0xb3,0xf8,0xda,0x14,0x82,0x79,0xf8,0x05,0x23,0x0c,0xb3,0xe9,0x80,0x79,0x06,0x40,0xd9,0x00,0x69,0x6f,0x87,0x5e,0x03,0x14,0x70,0x85,0xa3,0x62,0xe1,0xc8,0x56,0x56,0x8f,0x74,0xe9,0x09,0xe4,0xd4,0x91,0x09,0x5a,0xc7,0xdc,0x2f,0xf5,0x65,0xac,0x07,0xdd,0x71,0x85,0xf6,0xdc,0x9e,0x01,0xeb,0xf9,0xce,0x51,0x7d,0xb1,0xb1,0x1e,0x6f,0x44,0xbe,0x0a,0xfe,0x7c,0x27,0x3b,0xbc,0x69,0x38,0x84,0x98,0xa2,0x3e,0xb2,0x51,0x6f,0x29,0x7c,0x72,0x99,0x79,0xcd,0x64,0x0a,0x18,0x62,0xd5,0x52,0xc0,0x0a,0x05,0x62,0x07,0xf0,0x93,0x31,0x18,0x13,0x94,0xcb,0x08,0xb9,0x3c,0xcd,0xf4,0x4b,0x8a,0xd4,0xe5,0xde,0x74,0x9a,0x4f,0xf6,0xd0,0x15,0x4a,0xd6,0x34,0xd4,0xca,0xb2,0x3b,0x01,0xe9,0x64,0xd2,0xc1,0x6c,0xf4,0xdb,0xe9,0x78,0x9d,0x16,0x92,0x78,0x11,0xe6,0x89,0x56,0x33,0xdb,0x7e,0x4c,0x07,0x85,0x61,0xeb,0xe6,0x0b,0x17,0x20,0xd0,0x15,0xaa,0x04,0xf3,0x9e,0xb6,0xf2,0xbc,0xea,0xd2,0xb3,0x4e,0x5c,0x4d,0x8d,0xd7,0xc4,0xbf,0xd2,0x7e,0xf3,0xf0,0xeb,0x58,0x3f,0x1c,0x59,0x8b,0x6b,0x1f,0x04,0xd3,0x88,0xae,0x5c,0x00,0x5c,0x6f,0x69,0xba,0xde,0x8e,0x6c,0xc1,0x27,0xbb,0xbc,0x9a,0x77,0x8d,0xd7,0x56,0x20,0x3e,0x5b,0x76,0x2d,0x21,0x37,0xcf,0xcf,0x2d,0x2e,0x19,0x06,0x28,0xaf,0x8c,0x5b,0x57,0x9a,0xbc,0xce,0x21,0x64,0x77,0xeb,0xb9,0x7f,0x52,0x40,0x43,0xf8,0x69,0xd7,0x7b,0xbc,0xad,0x29,0xc3,0xf2,0x73,0x7f,0x8a,0x7f,0x73,0xe4,0xd3,0x01,0xe3,0xb2,0xbe,0x7e,0x05,0x58,0xb8,0x59,0x58,0x79,0x19,0xc4,0x2c,0x39,0xa6,0x69,0xec,0x23,0xd5,0xb0,0x53,0x32,0xf5,0xa2,0x39,0x5d,0xe3,0x29,0x8b,0x81,0xce,0xe2,0x44,0x6d,0xa1,0x10,0xcc,0x58,0xff,0xd9,0x44,0x14,0x2c,0x10,0x77,0x7d,0x81,0x57,0xfb,0xc8,0xc8,0xf7,0x3c,0x8a,0x86,0x22,0x12,0xd1,0xd5,0x73,0x41,0x32,0xee,0x29,0x35,0x8e,0xf5,0x9d,0x7a,0xac,0x25,0x35,0x51,0xac,0x4b,0x95,0x81,0xf5,0xcc,0x53,0xce,0x28,0x87,0x16,0x79,0xf8,0x60,0xd6,0xa2,0xce,0xc9,0x0c,0xf3,0x76,0x9c,0xb8,0xf0,0xa9,0xb3,0x9f,0x08,0xa8,0xdb,0x19,0xc9,0xc3,0x4d,0x9e,0x1f,0x0c,0x4e,0x59,0xa2,0x78,0xe1,0xff,0x34,0xfb,0x14,0x44,0xb0,0x3a,0xb3,0x1f,0xd3,0x78,0x02,0x4d,0x18,0x9a,0x6a,0x81,0x2c,0xf2,0xf2,0x93,0xb9,0x1c,0x60,0xd8,0xe4,0xde,0xee,0xe2,0x49,0xe1,0x0c,0xd5,0x9c,0x77,0x1e,0x3e,0xc0,0x9e,0x86,0x01,0xa9,0x50,0xde,0x79,0x75,0xab,0xc4,0x6a,0x72,0x40,0x48,0xd0,0x38,0x70,0x12,0xbe,0xea,0x2d,0x81,0x40,0x8c,0xb7,0x76,0x49,0xe8,0xad,0x04,0x2b,0x17,0xc4,0x06,0x66,0x75,0x91,0x84,0x25,0x66,0xf8,0xbe,0xa0,0xb5,0xf3,0x5c,0xcc,0x6b,0x94,0xae,0x35,0xa5,0xaa,0x87,0x7a,0xc9,0xf6,0xa8,0x03,0x91,0x5f,0x9e,0x3b,0xcc,0x1f,0xbe,0x20,0xbb,0x97,0xef,0xcf,0xff,0x16,0x4f,0x95,0x90,0xa6,0xe1,0x1a,0x50,0xcd,0x89,0xd5,0x6e,0x2b,0xb0,0x26,0x68,0x63,0xf2,0x5f,0x56,0x9b,0x36,0x8c,0x8e,0x7a,0xbe,0x90,0x69,0x40,0xb8,0x25,0x35,0xbb,0xd7,0x47,0x18,0xc7,0x40,0x5a,0x35,0x10,0x9f,0x57,0xa3,0xae,0xec,0xed,0xe9,0xb1,0x7a,0x33,0xd0,0xba,0xe5,0x55,0x63,0x55,0x53,0x97,0x88,0xcf,0xf3,0xc0,0x5e,0x3d,0x21,0xd6,0xa0,0x87,0x93,0x38,0x75,0xf0,0x16,0x02,0xe4,0xe0,0xcc,0x80,0xd9,0x7c,0xc6,0x88,0x6c,0x9c,0x41,0x9d,0x82,0x1b,0x45,0xf6,0x3b,0xfa,0x7b,0xf0,0x11,0x4e,0x15,0xc1,0xc4,0xeb,0x35,0x07,0xbd,0x9d,0xdf,0xf8,0x9c,0x75,0xbe,0x57,0xe4,0x18,0xbd,0xa6,0x63,0xd3,0x7f,0xe7,0xef,0x63,0x1b,0x42,0x83,0x2d,0x07,0x83,0x7f,0x61,0x0f,0x94,0x44,0xf7,0xcd,0xbd,0xf8,0xca,0x02,0xc8,0x61,0xa8,0xf9,0x30,0x0a,0x37,0x3b,0xf0,0x7f,0xa9,0xc5,0x6c,0xa2,0x0d,0x57,0x0e,0xe1,0x16,0x0c,0xea,0x95,0x02,0x6b,0x5e,0xa0,0x84,0x53,0x1d,0xc8,0x05,0xdb,0x85,0x77,0x5f,0x4c,0xc8,0x92,0x17,0x46,0x58,0x05,0x8d,0xfe,0xbc,0x82,0xda,0xa1,0x9e,0x55,0xb0,0xcd,0xd5,0x4f,0xed,0x1f,0x87,0x9d,0x39,0x55,0x6f,0xdd,0x26,0xef,0xd1,0x4e,0xc4,0x13,0x68,0xfe,0xd5,0x71,0xfa,0x1c,0xea,0x3a,0x61,0xd5,0x8c,0x9b,0x1c,0x0f,0x5d,0xc4,0xca,0x2b,0x41,0x0f,0xd0,0x65,0xd8,0xe0,0x9b,0x68,0x00,0x11,0xb0,0x68,0x80,0xd2,0x5b,0x63,0x4b,0x5d,0x4d,0x95,0x79,0x7f,0x88,0x72,0x75,0x7a,0x02,0xce,0x66,0x1c,0xf0,0xfc,0xd3,0x1b,0x6a,0x00,0x8e,0x1c,0x9a,0x19,0x25,0xee,0xd6,0x87,0x08,0x9f,0x8e,0x93,0x4d,0x1d,0xa1,0xd8,0x81,0x4d,0x9a,0x2b,0xe5,0xd2,0xf5,0xb6,0x5a,0x32,0x5c,0xd1,0xc1,0xaf,0xea,0x64,0x1c,0xb6,0x33,0xfd,0x57,0xbd,0xc1,0x43,0x48,0xf0,0xb8,0x37,0xf2,0x7c,0xba,0x5c,0xd0,0x0c,0x81,0x72,0x54,0xf4,0xfe,0xab,0xba,0x84,0x63,0xa7,0x40,0x14,0x86,0xd8,0x37,0x58,0xde,0x36,0xf1,0x03,0x6a,0x0f,0xa1,0x8f,0xd4,0x10,0x2f,0xec,0x83,0x58,0x9b,0x0c,0xc0,0xa8,0xa4,0xfe,0x8f,0x61,0x06,0xa2,0x5d,0xa0,0x7a,0xe9,0xb9,0xff,0x81,0x67,0x37,0x8a,0x79,0xb4,0x56,0x31,0xcd,0x5e,0xaf,0xe4,0x6f,0xe0,0xd9,0x87,0x59,0xd4,0xd4,0xb4,0x25,0x7d,0x9a,0xeb,0xaa,0xfe,0xdf,0x59,0x53,0xf5,0x8b,0xdc,0x3a,0x3c,0xce,0x6d,0x0e,0x97,0x62,0x21,0x9d,0x92,0xc9,0xe8,0xf4,0x9b,0x3a,0x6c,0x92,0x53,0xda,0x45,0xb0,0xe7,0xee,0x9c,0xeb,0xd4,0x68,0x3f,0x4f,0x43,0x29,0xcd,0x12,0xf7,0x4d,0x0e,0x9a,0xc6,0xa8,0xb1,0x46,0x0f,0x19,0x1c,0x3c,0x87,0x45,0x2b,0xc9,0xdf,0xaf,0x46,0x3f,0x88,0xe8,0x61,0xc7,0x18,0x91,0x97,0xb1,0x91,0xf9,0x70,0x4d,0x60,0x82,0x1f,0x8f,0xf4,0x8b,0xc1,0xff,0xf4,0x27,0xb3,0x08,0x75,0x9a,0x51,0x0d,0x9b,0x61,0xfa,0x90,0x14,0x8d,0x26,0xc2,0x37,0x7e,0x56,0x91,0x76,0xa0,0x25,0xc0,0x59,0x43,0x42,0x53,0x0c,0x32,0x13,0xfd,0x42,0xf7,0xef,0xba,0x45,0xb9,0x48,0x11,0xe7,0xa8,0xcf,0x19,0xb5,0x5f,0x8f,0xa5,0x32,0x73,0x2b,0x7c,0x76,0xaa,0x10,0xc0,0x0c,0x9f,0x68,0x71,0xa5,0x1c,0x0c,0xff,0xff,0xff,0xff,0x1a,0x0c,0x18,0x00,0x03,0xff,0x22,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x08,0x02,0x25,0x1c,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x08,0x06,0x24,0x16,0x08,0x1a,0x25,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x12,0x08,0x02,0x25,0x16,0x08,0x1e,0x24,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x27,0x12,0x08,0x09,0x24,0x22,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x08,0x07,0x24,0x1c,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x12,0x08,0x05,0x24,0x16,0x08,0x1b,0x25,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x22,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x12,0x08,0x09,0x24,0x16,0x08,0x17,0x25,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x27,0x12,0x08,0x03,0x24,0x27,0x12,0x12,0x08,0x05,0x24,0x16,0x08,0x1b,0x25,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x22,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x12,0x08,0x06,0x25,0x16,0x08,0x1a,0x24,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x22,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x08,0x03,0x25,0x27,0x12,0x12,0x08,0x02,0x24,0x16,0x08,0x1e,0x25,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x1c,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x08,0x09,0x24,0x22,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x12,0x08,0x09,0x24,0x16,0x08,0x17,0x25,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x27,0x12,0x08,0x03,0x25,0x27,0x12,0x08,0x03,0x25,0x16,0x08,0x1d,0x24,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x08,0x06,0x24,0x16,0x08,0x1a,0x25,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x08,0x09,0x24,0x22,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x08,0x03,0x25,0x16,0x08,0x1d,0x24,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x08,0x06,0x24,0x16,0x08,0x1a,0x25,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x08,0x03,0x25,0x27,0x12,0x12,0x08,0x09,0x24,0x16,0x08,0x17,0x25,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x27,0x12,0x12,0x08,0x06,0x25,0x16,0x08,0x1a,0x24,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x1c,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x12,0x08,0x06,0x25,0x16,0x08,0x1a,0x24,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x22,0x0c,0xff,0xff,0xff,0xff,0x1a,0x0c,0x9f,0x68,0x71,0xa5,0x1c,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x08,0x0a,0x25,0x22,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x08,0x03,0x25,0x16,0x08,0x1d,0x24,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x12,0x08,0x06,0x25,0x16,0x08,0x1a,0x24,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x1c,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x12,0x08,0x05,0x24,0x16,0x08,0x1b,0x25,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x22,0x0c,0xff,0xff,0xff,0xff,0x1a,0x0c,0x18,0x00,0x03,0xff,0x22,0x0c,0xff,0xff,0xff,0xff,0x1a,0x0c,0x17,0x55,0x10,0xd2,0x27,0x12,0x12,0x08,0x05,0x24,0x16,0x08,0x1b,0x25,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x22,0x0c,0xff,0xff,0xff,0xff,0x1a,0x0c,0x17,0x55,0x10,0xd2,0x27,0x12,0x08,0x06,0x24,0x16,0x08,0x1a,0x25,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x0c,0x9f,0x68,0x71,0xa5,0x1c,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x08,0x06,0x24,0x16,0x08,0x1a,0x25,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x08,0x06,0x24,0x16,0x08,0x1a,0x25,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x08,0x02,0x25,0x1c,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x12,0x08,0x06,0x25,0x16,0x08,0x1a,0x24,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x22,0x0c,0xff,0xff,0xff,0xff,0x1a,0x0c,0x9f,0x68,0x71,0xa5,0x1c,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x08,0x09,0x24,0x22,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x08,0x02,0x25,0x1c,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x08,0x07,0x24,0x1c,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x08,0x03,0x24,0x27,0x0c,0x9f,0x68,0x71,0xa5,0x1c,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x08,0x0a,0x25,0x22,0x0c,0xff,0xff,0xff,0xff,0x1a,0x0c,0x9f,0x68,0x71,0xa5,0x1c,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x12,0x08,0x02,0x24,0x16,0x08,0x1e,0x25,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x1c,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x36,0x16,0x0c,0x17,0x55,0x10,0xd2,0x27,0x12,0x12,0x08,0x09,0x24,0x16,0x08,0x17,0x25,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x27,0x12,0x12,0x08,0x02,0x24,0x16,0x08,0x1e,0x25,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x1c,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x12,0x08,0x06,0x25,0x16,0x08,0x1a,0x24,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x22,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x08,0x0a,0x25,0x22,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x08,0x03,0x25,0x27,0x12,0x08,0x03,0x25,0x16,0x08,0x1d,0x24,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x08,0x06,0x24,0x16,0x08,0x1a,0x25,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x0c,0x9f,0x68,0x71,0xa5,0x1c,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x12,0x08,0x05,0x24,0x16,0x08,0x1b,0x25,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x22,0x0c,0xff,0xff,0xff,0xff,0x1a,0x0c,0x9f,0x68,0x71,0xa5,0x1c,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x12,0x08,0x06,0x25,0x16,0x08,0x1a,0x24,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x22,0x0c,0xff,0xff,0xff,0xff,0x1a,0x0c,0x9f,0x68,0x71,0xa5,0x1c,0x0c,0xff,0xff,0xff,0xff,0x1a,0x0c,0x18,0x00,0x03,0xff,0x22,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x08,0x03,0x25,0x27,0x0c,0x18,0x00,0x03,0xff,0x22,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x08,0x03,0x25,0x16,0x08,0x1d,0x24,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x08,0x03,0x24,0x27,0x12,0x08,0x0a,0x25,0x22,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x08,0x03,0x24,0x27,0x16,0x31,0x1c,0x12,0x28,0xf2,0xfe,0x13,0x27,0x12,0x08,0x02,0x25,0x1c,0x0c,0xff,0xff,0xff,0xff,0x1a,0x0c,0x9f,0x68,0x71,0xa5,0x1c,0x0c,0xff,0xff,0xff,0xff,0x1a,0x0c,0x18,0x00,0x03,0xff,0x22,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x08,0x07,0x24,0x1c,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x12,0x08,0x06,0x25,0x16,0x08,0x1a,0x24,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x1c,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x08,0x06,0x24,0x16,0x08,0x1a,0x25,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x0c,0x9f,0x68,0x71,0xa5,0x1c,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x12,0x08,0x05,0x24,0x16,0x08,0x1b,0x25,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x22,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x08,0x07,0x24,0x1c,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x12,0x08,0x06,0x25,0x16,0x08,0x1a,0x24,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x22,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x12,0x08,0x06,0x25,0x16,0x08,0x1a,0x24,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x1c,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x08,0x06,0x24,0x16,0x08,0x1a,0x25,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x12,0x08,0x05,0x24,0x16,0x08,0x1b,0x25,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x22,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x08,0x03,0x25,0x16,0x08,0x1d,0x24,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x12,0x08,0x06,0x25,0x16,0x08,0x1a,0x24,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x22,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x08,0x09,0x24,0x22,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x08,0x03,0x24,0x27,0x12,0x08,0x06,0x24,0x16,0x08,0x1a,0x25,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x0c,0x17,0x55,0x10,0xd2,0x27,0x12,0x12,0x08,0x09,0x24,0x16,0x08,0x17,0x25,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x27,0x12,0x08,0x09,0x24,0x22,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x08,0x0a,0x25,0x22,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x12,0x08,0x06,0x25,0x16,0x08,0x1a,0x24,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x1c,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x08,0x06,0x24,0x16,0x08,0x1a,0x25,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x0c,0x18,0x00,0x03,0xff,0x22,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x12,0x08,0x06,0x25,0x16,0x08,0x1a,0x24,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x22,0x0c,0xff,0xff,0xff,0xff,0x1a,0x0c,0x9f,0x68,0x71,0xa5,0x1c,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x08,0x03,0x24,0x27,0x12,0x12,0x08,0x05,0x24,0x16,0x08,0x1b,0x25,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x22,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x12,0x08,0x02,0x25,0x16,0x08,0x1e,0x24,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x27,0x12,0x12,0x08,0x02,0x25,0x16,0x08,0x1e,0x24,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x27,0x12,0x08,0x06,0x24,0x16,0x08,0x1a,0x25,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x12,0x08,0x06,0x25,0x16,0x08,0x1a,0x24,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x1c,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x08,0x03,0x25,0x27,0x12,0x08,0x02,0x25,0x1c,0x0c,0xff,0xff,0xff,0xff,0x1a,0x0c,0x18,0x00,0x03,0xff,0x22,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x08,0x02,0x25,0x1c,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x12,0x08,0x02,0x24,0x16,0x08,0x1e,0x25,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x1c,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x08,0x0a,0x25,0x22,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x08,0x02,0x25,0x1c,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x08,0x07,0x24,0x1c,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x12,0x08,0x09,0x24,0x16,0x08,0x17,0x25,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x27,0x12,0x08,0x0a,0x25,0x22,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x08,0x09,0x24,0x22,0x0c,0xff,0xff,0xff,0xff,0x1a,0x0c,0x17,0x55,0x10,0xd2,0x27,0x12,0x12,0x08,0x09,0x24,0x16,0x08,0x17,0x25,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x27,0x12,0x12,0x08,0x06,0x25,0x16,0x08,0x1a,0x24,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x1c,0x0c,0xff,0xff,0xff,0xff,0x1a,0x0c,0x9f,0x68,0x71,0xa5,0x1c,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x08,0x03,0x25,0x16,0x08,0x1d,0x24,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x12,0x08,0x06,0x25,0x16,0x08,0x1a,0x24,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x22,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x08,0x06,0x24,0x16,0x08,0x1a,0x25,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x12,0x08,0x09,0x24,0x16,0x08,0x17,0x25,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x27,0x0c,0x17,0x55,0x10,0xd2,0x27,0x12,0x08,0x03,0x24,0x27,0x12,0x08,0x06,0x24,0x16,0x08,0x1a,0x25,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x08,0x07,0x24,0x1c,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x12,0x08,0x02,0x24,0x16,0x08,0x1e,0x25,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x1c,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x12,0x08,0x09,0x24,0x16,0x08,0x17,0x25,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x27,0x12,0x12,0x08,0x06,0x25,0x16,0x08,0x1a,0x24,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x22,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x08,0x02,0x25,0x1c,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x08,0x03,0x24,0x27,0x12,0x08,0x03,0x25,0x27,0x12,0x08,0x03,0x25,0x27,0x12,0x12,0x08,0x06,0x25,0x16,0x08,0x1a,0x24,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x1c,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x08,0x03,0x25,0x16,0x08,0x1d,0x24,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x08,0x03,0x24,0x27,0x12,0x08,0x06,0x24,0x16,0x08,0x1a,0x25,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x08,0x02,0x25,0x1c,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x12,0x08,0x09,0x24,0x16,0x08,0x17,0x25,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x27,0x0c,0x9f,0x68,0x71,0xa5,0x1c,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x12,0x08,0x02,0x24,0x16,0x08,0x1e,0x25,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x1c,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x08,0x02,0x25,0x1c,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x08,0x0a,0x25,0x22,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x12,0x08,0x02,0x25,0x16,0x08,0x1e,0x24,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x27,0x12,0x08,0x09,0x24,0x22,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x08,0x07,0x24,0x1c,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x08,0x06,0x24,0x16,0x08,0x1a,0x25,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x08,0x06,0x24,0x16,0x08,0x1a,0x25,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x12,0x08,0x06,0x25,0x16,0x08,0x1a,0x24,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x22,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x12,0x08,0x06,0x25,0x16,0x08,0x1a,0x24,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x22,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x12,0x08,0x09,0x24,0x16,0x08,0x17,0x25,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x27,0x12,0x12,0x08,0x06,0x25,0x16,0x08,0x1a,0x24,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x22,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x08,0x03,0x24,0x27,0x12,0x08,0x09,0x24,0x22,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x08,0x03,0x24,0x27,0x12,0x08,0x03,0x25,0x27,0x12,0x08,0x09,0x24,0x22,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x12,0x08,0x05,0x24,0x16,0x08,0x1b,0x25,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x22,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x08,0x06,0x24,0x16,0x08,0x1a,0x25,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x08,0x03,0x25,0x27,0x12,0x08,0x09,0x24,0x22,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x12,0x08,0x02,0x25,0x16,0x08,0x1e,0x24,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x27,0x0c,0x17,0x55,0x10,0xd2,0x27,0x12,0x08,0x03,0x25,0x27,0x12,0x08,0x03,0x24,0x27,0x0c,0x18,0x00,0x03,0xff,0x22,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x12,0x08,0x05,0x24,0x16,0x08,0x1b,0x25,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x22,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x08,0x06,0x24,0x16,0x08,0x1a,0x25,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x08,0x02,0x25,0x1c,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x08,0x0a,0x25,0x22,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x08,0x0a,0x25,0x22,0x0c,0xff,0xff,0xff,0xff,0x1a,0x0c,0x9f,0x68,0x71,0xa5,0x1c,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x12,0x08,0x05,0x24,0x16,0x08,0x1b,0x25,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x22,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x08,0x06,0x24,0x16,0x08,0x1a,0x25,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x12,0x08,0x02,0x24,0x16,0x08,0x1e,0x25,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x1c,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x12,0x08,0x05,0x24,0x16,0x08,0x1b,0x25,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x22,0x0c,0xff,0xff,0xff,0xff,0x1a,0x0c,0x17,0x55,0x10,0xd2,0x27,0x12,0x08,0x07,0x24,0x1c,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x08,0x06,0x24,0x16,0x08,0x1a,0x25,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x12,0x08,0x05,0x24,0x16,0x08,0x1b,0x25,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x22,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x08,0x07,0x24,0x1c,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x08,0x03,0x25,0x27,0x0c,0x17,0x55,0x10,0xd2,0x27,0x12,0x08,0x03,0x25,0x27,0x12,0x12,0x08,0x06,0x25,0x16,0x08,0x1a,0x24,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x1c,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x08,0x03,0x25,0x16,0x08,0x1d,0x24,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x12,0x08,0x06,0x25,0x16,0x08,0x1a,0x24,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x22,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x12,0x08,0x06,0x25,0x16,0x08,0x1a,0x24,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x1c,0x0c,0xff,0xff,0xff,0xff,0x1a,0x0c,0x17,0x55,0x10,0xd2,0x27,0x12,0x12,0x08,0x02,0x25,0x16,0x08,0x1e,0x24,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x27,0x0c,0x9f,0x68,0x71,0xa5,0x1c,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x08,0x0a,0x25,0x22,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x08,0x03,0x24,0x27,0x12,0x12,0x08,0x06,0x25,0x16,0x08,0x1a,0x24,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x1c,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x08,0x06,0x24,0x16,0x08,0x1a,0x25,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x12,0x08,0x09,0x24,0x16,0x08,0x17,0x25,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x27,0x12,0x08,0x02,0x25,0x1c,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x08,0x03,0x25,0x27,0x12,0x12,0x08,0x02,0x24,0x16,0x08,0x1e,0x25,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x1c,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x12,0x08,0x06,0x25,0x16,0x08,0x1a,0x24,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x22,0x0c,0xff,0xff,0xff,0xff,0x1a,0x0c,0x18,0x00,0x03,0xff,0x22,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x12,0x08,0x06,0x25,0x16,0x08,0x1a,0x24,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x22,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x12,0x08,0x05,0x24,0x16,0x08,0x1b,0x25,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x22,0x0c,0xff,0xff,0xff,0xff,0x1a,0x0c,0x18,0x00,0x03,0xff,0x22,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x12,0x08,0x02,0x24,0x16,0x08,0x1e,0x25,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x1c,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x08,0x03,0x25,0x16,0x08,0x1d,0x24,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x08,0x07,0x24,0x1c,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x08,0x07,0x24,0x1c,0x0c,0xff,0xff,0xff,0xff,0x1a,0x0c,0x18,0x00,0x03,0xff,0x22,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x08,0x09,0x24,0x22,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x08,0x0a,0x25,0x22,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x12,0x08,0x02,0x25,0x16,0x08,0x1e,0x24,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x27,0x12,0x08,0x07,0x24,0x1c,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x08,0x09,0x24,0x22,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x12,0x08,0x02,0x25,0x16,0x08,0x1e,0x24,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x27,0x12,0x08,0x09,0x24,0x22,0x0c,0xff,0xff,0xff,0xff,0x1a,0x0c,0x17,0x55,0x10,0xd2,0x27,0x12,0x08,0x03,0x24,0x27,0x12,0x08,0x02,0x25,0x1c,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x12,0x08,0x02,0x25,0x16,0x08,0x1e,0x24,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x27,0x12,0x08,0x0a,0x25,0x22,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x08,0x02,0x25,0x1c,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x08,0x09,0x24,0x22,0x0c,0xff,0xff,0xff,0xff,0x1a,0x0c,0x18,0x00,0x03,0xff,0x22,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x08,0x02,0x25,0x1c,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x08,0x07,0x24,0x1c,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x12,0x08,0x05,0x24,0x16,0x08,0x1b,0x25,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x22,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x12,0x08,0x09,0x24,0x16,0x08,0x17,0x25,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x27,0x12,0x12,0x08,0x06,0x25,0x16,0x08,0x1a,0x24,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x22,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x08,0x03,0x24,0x27,0x0c,0x18,0x00,0x03,0xff,0x22,0x0c,0xff,0xff,0xff,0xff,0x1a,0x0c,0x9f,0x68,0x71,0xa5,0x1c,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x12,0x08,0x06,0x25,0x16,0x08,0x1a,0x24,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x22,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x08,0x07,0x24,0x1c,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x12,0x08,0x06,0x25,0x16,0x08,0x1a,0x24,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x22,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x12,0x08,0x02,0x25,0x16,0x08,0x1e,0x24,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x27,0x12,0x08,0x09,0x24,0x22,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x12,0x08,0x09,0x24,0x16,0x08,0x17,0x25,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x27,0x12,0x12,0x08,0x06,0x25,0x16,0x08,0x1a,0x24,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x22,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x12,0x08,0x02,0x25,0x16,0x08,0x1e,0x24,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x27,0x12,0x08,0x02,0x25,0x1c,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x12,0x08,0x09,0x24,0x16,0x08,0x17,0x25,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x27,0x12,0x08,0x03,0x24,0x27,0x12,0x08,0x03,0x24,0x27,0x0c,0x9f,0x68,0x71,0xa5,0x1c,0x0c,0xff,0xff,0xff,0xff,0x1a,0x0c,0x18,0x00,0x03,0xff,0x22,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x08,0x03,0x24,0x27,0x12,0x12,0x08,0x06,0x25,0x16,0x08,0x1a,0x24,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x1c,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x08,0x09,0x24,0x22,0x0c,0xff,0xff,0xff,0xff,0x1a,0x0c,0x17,0x55,0x10,0xd2,0x27,0x12,0x08,0x03,0x25,0x16,0x08,0x1d,0x24,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x12,0x08,0x02,0x24,0x16,0x08,0x1e,0x25,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x1c,0x0c,0xff,0xff,0xff,0xff,0x1a,0x0c,0x9f,0x68,0x71,0xa5,0x1c,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x12,0x08,0x06,0x25,0x16,0x08,0x1a,0x24,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x22,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x12,0x08,0x06,0x25,0x16,0x08,0x1a,0x24,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x22,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x08,0x07,0x24,0x1c,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x08,0x03,0x24,0x27,0x12,0x12,0x08,0x06,0x25,0x16,0x08,0x1a,0x24,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x22,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x12,0x08,0x02,0x24,0x16,0x08,0x1e,0x25,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x1c,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x12,0x08,0x06,0x25,0x16,0x08,0x1a,0x24,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x22,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x08,0x07,0x24,0x1c,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x08,0x03,0x24,0x27,0x0c,0x17,0x55,0x10,0xd2,0x27,0x12,0x08,0x02,0x25,0x1c,0x0c,0xff,0xff,0xff,0xff,0x1a,0x0c,0x17,0x55,0x10,0xd2,0x27,0x0c,0x9f,0x68,0x71,0xa5,0x1c,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x12,0x08,0x06,0x25,0x16,0x08,0x1a,0x24,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x22,0x0c,0xff,0xff,0xff,0xff,0x1a,0x0c,0x17,0x55,0x10,0xd2,0x27,0x12,0x08,0x07,0x24,0x1c,0x0c,0xff,0xff,0xff,0xff,0x1a,0x12,0x12,0x08,0x02,0x25,0x16,0x08,0x1e,0x24,0x21,0x0c,0xff,0xff,0xff,0xff,0x1a,0x27,0x2f,0x6d,0xec,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00]
#to_decode=[0x49,0x6B,0xB6,0xEE,0x06,0x05,0xEC,0x29,0x45,0xD5,0x4C]
dwarf_op = {
0x3: "addr",
0x6: "deref",
0x8: "const1u",
0x9: "const1s",
0xa: "const2u",
0xb: "const2s",
0xc: "const4u",
0xd: "const4s",
0xe: "const8u",
0xf: "const8s",
0x10: "constu",
0x11: "consts",
0x12: "dup",
0x13: "drop",
0x14: "over",
0x15: "pick",
0x16: "swap",
0x17: "rot",
0x18: "xderef",
0x19: "abs",
0x1a: "and",
0x1b: "div",
0x1c: "minus",
0x1d: "mod",
0x1e: "mul",
0x1f: "neg",
0x20: "not",
0x21: "or",
0x22: "plus",
0x23: "plus_uconst",
0x24: "shl",
0x25: "shr",
0x26: "shra",
0x27: "xor",
0x28: "bra",
0x29: "eq",
0x2a: "ge",
0x2b: "gt",
0x2c: "le",
0x2d: "lt",
0x2e: "ne",
0x2f: "skip",
0x30: "lit",
0x50: "reg",
0x70: "breg",
0x90: "regx",
0x91: "fbreg",
0x92: "bregx",
0x93: "piece",
0x94: "deref_size",
0x95: "xderef_size",
0x96: "nop",
0x97: "push_object_address",
0x98: "call2",
0x99: "call4",
0x9a: "call_ref",
0x9b: "form_tls_address",
0x9c: "call_frame_cfa",
0x9d: "bit_piece",
0x9e: "implicit_value",
0x9f: "stack_value",
0xa0: "implicit_pointer",
0xa1: "addrx",
0xa2: "constx",
0xa3: "entry_value",
0xa4: "const_type",
0xa5: "regval_type",
0xa6: "deref_type",
0xa7: "xderef_type",
0xa8: "convert",
0xa9: "reinterpret",
0xe0: "lo_user",
0xff: "hi_user"
}
arch_size=4
#TO CHECK
ULEB128=0xAA
SLEB128=0xBB
dwarf_nb_operands = {
0x3: [1, arch_size],
0x8: [1,1],
0x9: [1,1],
0xa: [1,2],
0xb: [1,2],
0xc: [1,4],
0xd: [1,4],
0xe: [1,8],
0xf: [1,8],
0x10: [1,ULEB128],
0x11: [1,SLEB128],
0x15: [1,1],
0x23: [1,ULEB128],
0x28: [1,2],
0x2f: [1,2],
0x70: [1,SLEB128], #until 0x8f
0x90: [1,ULEB128],
0x91: [1,SLEB128],
0x92: [2,ULEB128, SLEB128],
0x93: [1,ULEB128],
0x94: [1,1],
0x95: [1,1],
0x98: [1,2],
0x99: [1,4],
0x9a: [1,4], #or 8
0x9d: [2,ULEB128,ULEB128],
0x9e: [2,ULEB128],
0xa0: [2,4, SLEB128], # or 8
0xa1: [1,ULEB128],
0xa2: [1,ULEB128],
0xa3: [2,ULEB128,0], #0 defines the block of that size
0xa4: [3,ULEB128, 1, 0],
0xa5: [2,ULEB128, ULEB128],
0xa6: [2,1, ULEB128],
0xa7: [2,1, ULEB128],
0xa8: [1,ULEB128],
0xa9: [1,ULEB128]
}
#lit0 + litteral for 0x30 to 0x4f
#reg0 + regnum for 0x50 to 0x6f
#breg0 + regnum for 0x70 to 0x8f
#nb_operands from 0x70 to 0x8f: 1
#takes the opcode_list
#returns (result, number of bytes)
def uleb128(op_list):
result = 0
shift = 0
i = 0
while(True):
byte = op_list[i]
result |= (0x7F & byte) << shift
if (0x80 & byte ==0):
break
else:
shift += 7
i+=1
return (result, i+1)
def sleb128(op_list):
result = 0
shift = 0
i = 0
while(True):
byte = op_list[i]
result |= (0x7F & byte) << shift
shift += 7
if (0x80 & byte == 0):
break
else:
i+=1
if (0x40 & byte):
result |= -1 << 7*(i+1)
return (result, i+1)
'''
print(uleb128([2,1,1,1,1]))
print(uleb128([127,1,1,1,1]))
print(uleb128([0x80,1,1,1,1]))
print(uleb128([0x81,1,1,1,1]))
print(uleb128([57+0x80,100,1,1,1]))
print("")
print(sleb128([2,1,1,1,1]))
print(sleb128([0x7e,1,1,1,1]))
print(sleb128([127+0x80,0,1,1,1]))
print(sleb128([0x81,0x7f,1,1,1]))
print(sleb128([0x80,1,1,1,1]))
print(sleb128([0x80,0x7f,1,1,1]))
print(sleb128([0x81,1,1,1,1]))
print(sleb128([0x7F+0x80,0x7e,1,1,1]))
'''
global debug
debug = False
def print_debug(s):
global debug
if debug:
print(s)
def convert_little_endian(s):
#convert 003043 into 433000
length = int(len(s)/2)
temp_str = ""
for i in range(length):
temp_str = s[i*2:i*2+2] + temp_str
return temp_str
def convert_two_bytes_signed(s):
num = int(s,16)
#check if MSbit is set:
MSB = num >> 15
if not MSB:
return num
else:
#need to negate
return (((num ^ 0xFFFF) + 1)&0xFFFF)* -1
print(len(to_decode))
cursor = 0
print("|gnu.hash_@| offset | [opcode] description")
while (cursor < len(to_decode)):
if ((cursor >=0x3e8) & (cursor <0x85d)):
val4 = "".join(["%02x" % i for i in to_decode[cursor:cursor+4]])
new_val = convert_little_endian(val4)
print("|0x%06x| 0x%04x | value: 0x%s" % (cursor+0x400258, cursor, new_val))
cursor+=4
else:
opcode = to_decode[cursor]
opcode_suffix = ""
#print("debug: " + str(opcode))
if opcode >= 0x30 and opcode <= 0x4f:
opcode_suffix = str(opcode-0x30)
opcode = 0x30
elif opcode >= 0x50 and opcode <= 0x6f:
opcode_suffix = str(opcode-0x50)
opcode = 0x50
elif opcode >= 0x70 and opcode <= 0x8f:
opcode_suffix = str(opcode-0x70)
opcode = 0x70
if opcode not in dwarf_op.keys():
print("|0x%06x| 0x%04x | [ 0x%02x ] unknown opcode" % (cursor+0x400258, cursor, opcode))
cursor += 1
else:
print_debug("%02x" % opcode)
operation_str = "|0x%06x| 0x%04x | [ 0x%02x ] " % ((cursor+0x400258), cursor, to_decode[cursor]) + "DW_OP_"+dwarf_op[opcode]+opcode_suffix
cursor+=1 #opcode read
operands_str = ""
if opcode in dwarf_nb_operands.keys():
nb_operand = dwarf_nb_operands[opcode][0]
print_debug("\tdebug, nb_operand: " + str(nb_operand))
for i in range(nb_operand):
size_operand = dwarf_nb_operands[opcode][1+i]
if size_operand == 0:
print_debug("Warning, size_operand is 0")
elif size_operand == ULEB128:
result, offset = uleb128(to_decode[cursor:])
size_operand = offset
elif size_operand == SLEB128:
result, offset = sleb128(to_decode[cursor:])
size_operand = offset
print_debug("\tdebug: size_operand: " + str(size_operand))
operand_value = convert_little_endian("".join(["%02x" % i for i in to_decode[cursor:cursor+size_operand]]))
operands_str += " 0x" + operand_value
print_debug("\tdebug, operands_str:" + convert_little_endian(operands_str))
cursor+=size_operand
#if opcode==0x2f: #DW_OP_skip
# cursor += int(operands_str,16)
# print(cursor)
print("%s %s" % (operation_str, operands_str))
print_debug(cursor)
| 289.830882
| 46,003
| 0.721656
| 19,311
| 78,834
| 2.940293
| 0.035886
| 0.118105
| 0.175678
| 0.232335
| 0.779306
| 0.773653
| 0.765498
| 0.761589
| 0.757115
| 0.751303
| 0
| 0.527747
| 0.030799
| 78,834
| 271
| 46,004
| 290.900369
| 0.215389
| 0.006165
| 0
| 0.153488
| 0
| 0
| 0.009109
| 0
| 0
| 1
| 0.48097
| 0
| 0
| 1
| 0.023256
| false
| 0
| 0
| 0
| 0.046512
| 0.060465
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 12
|
e7b37a01518fdb207261398d2fcbe89dbce512a1
| 49,642
|
py
|
Python
|
nexus_api_python_client/api/search_api.py
|
simonebruzzechesse/nexus-api-python-client
|
eaa1098dbd8778f6f3bda948268953b742f2ab64
|
[
"MIT"
] | 1
|
2021-11-14T12:43:38.000Z
|
2021-11-14T12:43:38.000Z
|
nexus_api_python_client/api/search_api.py
|
simonebruzzechesse/nexus-api-python-client
|
eaa1098dbd8778f6f3bda948268953b742f2ab64
|
[
"MIT"
] | null | null | null |
nexus_api_python_client/api/search_api.py
|
simonebruzzechesse/nexus-api-python-client
|
eaa1098dbd8778f6f3bda948268953b742f2ab64
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
Nexus Repository Manager REST API
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: 3.20.1-01
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from nexus_api_python_client.api_client import ApiClient
from nexus_api_python_client.exceptions import (
ApiTypeError,
ApiValueError
)
class SearchApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def search(self, **kwargs): # noqa: E501
"""Search components # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str continuation_token: A token returned by a prior request. If present, the next page of results are returned
:param str sort: The field to sort the results against, if left empty, a sort based on match weight will be used.
:param str direction: The direction to sort records in, defaults to ascending ('asc') for all sort fields, except version, which defaults to descending ('desc')
:param int timeout: How long to wait for search results in seconds. If this value is not provided, the system default timeout will be used.
:param str q: Query by keyword
:param str repository: Repository name
:param str format: Query by format
:param str group: Component group
:param str name: Component name
:param str version: Component version
:param str md5: Specific MD5 hash of component's asset
:param str sha1: Specific SHA-1 hash of component's asset
:param str sha256: Specific SHA-256 hash of component's asset
:param str sha512: Specific SHA-512 hash of component's asset
:param str prerelease: Prerelease version flag
:param str docker_image_name: Docker image name
:param str docker_image_tag: Docker image tag
:param str docker_layer_id: Docker layer ID
:param str docker_content_digest: Docker content digest
:param str maven_group_id: Maven groupId
:param str maven_artifact_id: Maven artifactId
:param str maven_base_version: Maven base version
:param str maven_extension: Maven extension of component's asset
:param str maven_classifier: Maven classifier of component's asset
:param str npm_scope: NPM scope
:param str nuget_id: Nuget id
:param str nuget_tags: Nuget tags
:param str pypi_classifiers: PyPi classifiers
:param str pypi_description: PyPi description
:param str pypi_keywords: PyPi keywords
:param str pypi_summary: PyPi summary
:param str rubygems_description: RubyGems description
:param str rubygems_platform: RubyGems platform
:param str rubygems_summary: RubyGems summary
:param str yum_architecture: Yum architecture
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: PageComponentXO
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.search_with_http_info(**kwargs) # noqa: E501
def search_with_http_info(self, **kwargs): # noqa: E501
"""Search components # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str continuation_token: A token returned by a prior request. If present, the next page of results are returned
:param str sort: The field to sort the results against, if left empty, a sort based on match weight will be used.
:param str direction: The direction to sort records in, defaults to ascending ('asc') for all sort fields, except version, which defaults to descending ('desc')
:param int timeout: How long to wait for search results in seconds. If this value is not provided, the system default timeout will be used.
:param str q: Query by keyword
:param str repository: Repository name
:param str format: Query by format
:param str group: Component group
:param str name: Component name
:param str version: Component version
:param str md5: Specific MD5 hash of component's asset
:param str sha1: Specific SHA-1 hash of component's asset
:param str sha256: Specific SHA-256 hash of component's asset
:param str sha512: Specific SHA-512 hash of component's asset
:param str prerelease: Prerelease version flag
:param str docker_image_name: Docker image name
:param str docker_image_tag: Docker image tag
:param str docker_layer_id: Docker layer ID
:param str docker_content_digest: Docker content digest
:param str maven_group_id: Maven groupId
:param str maven_artifact_id: Maven artifactId
:param str maven_base_version: Maven base version
:param str maven_extension: Maven extension of component's asset
:param str maven_classifier: Maven classifier of component's asset
:param str npm_scope: NPM scope
:param str nuget_id: Nuget id
:param str nuget_tags: Nuget tags
:param str pypi_classifiers: PyPi classifiers
:param str pypi_description: PyPi description
:param str pypi_keywords: PyPi keywords
:param str pypi_summary: PyPi summary
:param str rubygems_description: RubyGems description
:param str rubygems_platform: RubyGems platform
:param str rubygems_summary: RubyGems summary
:param str yum_architecture: Yum architecture
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(PageComponentXO, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['continuation_token', 'sort', 'direction', 'timeout', 'q', 'repository', 'format', 'group', 'name', 'version', 'md5', 'sha1', 'sha256', 'sha512', 'prerelease', 'docker_image_name', 'docker_image_tag', 'docker_layer_id', 'docker_content_digest', 'maven_group_id', 'maven_artifact_id', 'maven_base_version', 'maven_extension', 'maven_classifier', 'npm_scope', 'nuget_id', 'nuget_tags', 'pypi_classifiers', 'pypi_description', 'pypi_keywords', 'pypi_summary', 'rubygems_description', 'rubygems_platform', 'rubygems_summary', 'yum_architecture'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method search" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'continuation_token' in local_var_params and local_var_params['continuation_token'] is not None: # noqa: E501
query_params.append(('continuationToken', local_var_params['continuation_token'])) # noqa: E501
if 'sort' in local_var_params and local_var_params['sort'] is not None: # noqa: E501
query_params.append(('sort', local_var_params['sort'])) # noqa: E501
if 'direction' in local_var_params and local_var_params['direction'] is not None: # noqa: E501
query_params.append(('direction', local_var_params['direction'])) # noqa: E501
if 'timeout' in local_var_params and local_var_params['timeout'] is not None: # noqa: E501
query_params.append(('timeout', local_var_params['timeout'])) # noqa: E501
if 'q' in local_var_params and local_var_params['q'] is not None: # noqa: E501
query_params.append(('q', local_var_params['q'])) # noqa: E501
if 'repository' in local_var_params and local_var_params['repository'] is not None: # noqa: E501
query_params.append(('repository', local_var_params['repository'])) # noqa: E501
if 'format' in local_var_params and local_var_params['format'] is not None: # noqa: E501
query_params.append(('format', local_var_params['format'])) # noqa: E501
if 'group' in local_var_params and local_var_params['group'] is not None: # noqa: E501
query_params.append(('group', local_var_params['group'])) # noqa: E501
if 'name' in local_var_params and local_var_params['name'] is not None: # noqa: E501
query_params.append(('name', local_var_params['name'])) # noqa: E501
if 'version' in local_var_params and local_var_params['version'] is not None: # noqa: E501
query_params.append(('version', local_var_params['version'])) # noqa: E501
if 'md5' in local_var_params and local_var_params['md5'] is not None: # noqa: E501
query_params.append(('md5', local_var_params['md5'])) # noqa: E501
if 'sha1' in local_var_params and local_var_params['sha1'] is not None: # noqa: E501
query_params.append(('sha1', local_var_params['sha1'])) # noqa: E501
if 'sha256' in local_var_params and local_var_params['sha256'] is not None: # noqa: E501
query_params.append(('sha256', local_var_params['sha256'])) # noqa: E501
if 'sha512' in local_var_params and local_var_params['sha512'] is not None: # noqa: E501
query_params.append(('sha512', local_var_params['sha512'])) # noqa: E501
if 'prerelease' in local_var_params and local_var_params['prerelease'] is not None: # noqa: E501
query_params.append(('prerelease', local_var_params['prerelease'])) # noqa: E501
if 'docker_image_name' in local_var_params and local_var_params['docker_image_name'] is not None: # noqa: E501
query_params.append(('docker.imageName', local_var_params['docker_image_name'])) # noqa: E501
if 'docker_image_tag' in local_var_params and local_var_params['docker_image_tag'] is not None: # noqa: E501
query_params.append(('docker.imageTag', local_var_params['docker_image_tag'])) # noqa: E501
if 'docker_layer_id' in local_var_params and local_var_params['docker_layer_id'] is not None: # noqa: E501
query_params.append(('docker.layerId', local_var_params['docker_layer_id'])) # noqa: E501
if 'docker_content_digest' in local_var_params and local_var_params['docker_content_digest'] is not None: # noqa: E501
query_params.append(('docker.contentDigest', local_var_params['docker_content_digest'])) # noqa: E501
if 'maven_group_id' in local_var_params and local_var_params['maven_group_id'] is not None: # noqa: E501
query_params.append(('maven.groupId', local_var_params['maven_group_id'])) # noqa: E501
if 'maven_artifact_id' in local_var_params and local_var_params['maven_artifact_id'] is not None: # noqa: E501
query_params.append(('maven.artifactId', local_var_params['maven_artifact_id'])) # noqa: E501
if 'maven_base_version' in local_var_params and local_var_params['maven_base_version'] is not None: # noqa: E501
query_params.append(('maven.baseVersion', local_var_params['maven_base_version'])) # noqa: E501
if 'maven_extension' in local_var_params and local_var_params['maven_extension'] is not None: # noqa: E501
query_params.append(('maven.extension', local_var_params['maven_extension'])) # noqa: E501
if 'maven_classifier' in local_var_params and local_var_params['maven_classifier'] is not None: # noqa: E501
query_params.append(('maven.classifier', local_var_params['maven_classifier'])) # noqa: E501
if 'npm_scope' in local_var_params and local_var_params['npm_scope'] is not None: # noqa: E501
query_params.append(('npm.scope', local_var_params['npm_scope'])) # noqa: E501
if 'nuget_id' in local_var_params and local_var_params['nuget_id'] is not None: # noqa: E501
query_params.append(('nuget.id', local_var_params['nuget_id'])) # noqa: E501
if 'nuget_tags' in local_var_params and local_var_params['nuget_tags'] is not None: # noqa: E501
query_params.append(('nuget.tags', local_var_params['nuget_tags'])) # noqa: E501
if 'pypi_classifiers' in local_var_params and local_var_params['pypi_classifiers'] is not None: # noqa: E501
query_params.append(('pypi.classifiers', local_var_params['pypi_classifiers'])) # noqa: E501
if 'pypi_description' in local_var_params and local_var_params['pypi_description'] is not None: # noqa: E501
query_params.append(('pypi.description', local_var_params['pypi_description'])) # noqa: E501
if 'pypi_keywords' in local_var_params and local_var_params['pypi_keywords'] is not None: # noqa: E501
query_params.append(('pypi.keywords', local_var_params['pypi_keywords'])) # noqa: E501
if 'pypi_summary' in local_var_params and local_var_params['pypi_summary'] is not None: # noqa: E501
query_params.append(('pypi.summary', local_var_params['pypi_summary'])) # noqa: E501
if 'rubygems_description' in local_var_params and local_var_params['rubygems_description'] is not None: # noqa: E501
query_params.append(('rubygems.description', local_var_params['rubygems_description'])) # noqa: E501
if 'rubygems_platform' in local_var_params and local_var_params['rubygems_platform'] is not None: # noqa: E501
query_params.append(('rubygems.platform', local_var_params['rubygems_platform'])) # noqa: E501
if 'rubygems_summary' in local_var_params and local_var_params['rubygems_summary'] is not None: # noqa: E501
query_params.append(('rubygems.summary', local_var_params['rubygems_summary'])) # noqa: E501
if 'yum_architecture' in local_var_params and local_var_params['yum_architecture'] is not None: # noqa: E501
query_params.append(('yum.architecture', local_var_params['yum_architecture'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/v1/search', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PageComponentXO', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def search_and_download_assets(self, **kwargs): # noqa: E501
"""Search and download asset # noqa: E501
Returns a 302 Found with location header field set to download URL. Unless a sort parameter is supplied, the search must return a single asset to receive download URL. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_and_download_assets(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str sort: The field to sort the results against, if left empty and more than 1 result is returned, the request will fail.
:param str direction: The direction to sort records in, defaults to ascending ('asc') for all sort fields, except version, which defaults to descending ('desc')
:param int timeout: How long to wait for search results in seconds. If this value is not provided, the system default timeout will be used.
:param str q: Query by keyword
:param str repository: Repository name
:param str format: Query by format
:param str group: Component group
:param str name: Component name
:param str version: Component version
:param str md5: Specific MD5 hash of component's asset
:param str sha1: Specific SHA-1 hash of component's asset
:param str sha256: Specific SHA-256 hash of component's asset
:param str sha512: Specific SHA-512 hash of component's asset
:param str prerelease: Prerelease version flag
:param str docker_image_name: Docker image name
:param str docker_image_tag: Docker image tag
:param str docker_layer_id: Docker layer ID
:param str docker_content_digest: Docker content digest
:param str maven_group_id: Maven groupId
:param str maven_artifact_id: Maven artifactId
:param str maven_base_version: Maven base version
:param str maven_extension: Maven extension of component's asset
:param str maven_classifier: Maven classifier of component's asset
:param str npm_scope: NPM scope
:param str nuget_id: Nuget id
:param str nuget_tags: Nuget tags
:param str pypi_classifiers: PyPi classifiers
:param str pypi_description: PyPi description
:param str pypi_keywords: PyPi keywords
:param str pypi_summary: PyPi summary
:param str rubygems_description: RubyGems description
:param str rubygems_platform: RubyGems platform
:param str rubygems_summary: RubyGems summary
:param str yum_architecture: Yum architecture
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.search_and_download_assets_with_http_info(**kwargs) # noqa: E501
def search_and_download_assets_with_http_info(self, **kwargs): # noqa: E501
"""Search and download asset # noqa: E501
Returns a 302 Found with location header field set to download URL. Unless a sort parameter is supplied, the search must return a single asset to receive download URL. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_and_download_assets_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str sort: The field to sort the results against, if left empty and more than 1 result is returned, the request will fail.
:param str direction: The direction to sort records in, defaults to ascending ('asc') for all sort fields, except version, which defaults to descending ('desc')
:param int timeout: How long to wait for search results in seconds. If this value is not provided, the system default timeout will be used.
:param str q: Query by keyword
:param str repository: Repository name
:param str format: Query by format
:param str group: Component group
:param str name: Component name
:param str version: Component version
:param str md5: Specific MD5 hash of component's asset
:param str sha1: Specific SHA-1 hash of component's asset
:param str sha256: Specific SHA-256 hash of component's asset
:param str sha512: Specific SHA-512 hash of component's asset
:param str prerelease: Prerelease version flag
:param str docker_image_name: Docker image name
:param str docker_image_tag: Docker image tag
:param str docker_layer_id: Docker layer ID
:param str docker_content_digest: Docker content digest
:param str maven_group_id: Maven groupId
:param str maven_artifact_id: Maven artifactId
:param str maven_base_version: Maven base version
:param str maven_extension: Maven extension of component's asset
:param str maven_classifier: Maven classifier of component's asset
:param str npm_scope: NPM scope
:param str nuget_id: Nuget id
:param str nuget_tags: Nuget tags
:param str pypi_classifiers: PyPi classifiers
:param str pypi_description: PyPi description
:param str pypi_keywords: PyPi keywords
:param str pypi_summary: PyPi summary
:param str rubygems_description: RubyGems description
:param str rubygems_platform: RubyGems platform
:param str rubygems_summary: RubyGems summary
:param str yum_architecture: Yum architecture
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['sort', 'direction', 'timeout', 'q', 'repository', 'format', 'group', 'name', 'version', 'md5', 'sha1', 'sha256', 'sha512', 'prerelease', 'docker_image_name', 'docker_image_tag', 'docker_layer_id', 'docker_content_digest', 'maven_group_id', 'maven_artifact_id', 'maven_base_version', 'maven_extension', 'maven_classifier', 'npm_scope', 'nuget_id', 'nuget_tags', 'pypi_classifiers', 'pypi_description', 'pypi_keywords', 'pypi_summary', 'rubygems_description', 'rubygems_platform', 'rubygems_summary', 'yum_architecture'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method search_and_download_assets" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'sort' in local_var_params and local_var_params['sort'] is not None: # noqa: E501
query_params.append(('sort', local_var_params['sort'])) # noqa: E501
if 'direction' in local_var_params and local_var_params['direction'] is not None: # noqa: E501
query_params.append(('direction', local_var_params['direction'])) # noqa: E501
if 'timeout' in local_var_params and local_var_params['timeout'] is not None: # noqa: E501
query_params.append(('timeout', local_var_params['timeout'])) # noqa: E501
if 'q' in local_var_params and local_var_params['q'] is not None: # noqa: E501
query_params.append(('q', local_var_params['q'])) # noqa: E501
if 'repository' in local_var_params and local_var_params['repository'] is not None: # noqa: E501
query_params.append(('repository', local_var_params['repository'])) # noqa: E501
if 'format' in local_var_params and local_var_params['format'] is not None: # noqa: E501
query_params.append(('format', local_var_params['format'])) # noqa: E501
if 'group' in local_var_params and local_var_params['group'] is not None: # noqa: E501
query_params.append(('group', local_var_params['group'])) # noqa: E501
if 'name' in local_var_params and local_var_params['name'] is not None: # noqa: E501
query_params.append(('name', local_var_params['name'])) # noqa: E501
if 'version' in local_var_params and local_var_params['version'] is not None: # noqa: E501
query_params.append(('version', local_var_params['version'])) # noqa: E501
if 'md5' in local_var_params and local_var_params['md5'] is not None: # noqa: E501
query_params.append(('md5', local_var_params['md5'])) # noqa: E501
if 'sha1' in local_var_params and local_var_params['sha1'] is not None: # noqa: E501
query_params.append(('sha1', local_var_params['sha1'])) # noqa: E501
if 'sha256' in local_var_params and local_var_params['sha256'] is not None: # noqa: E501
query_params.append(('sha256', local_var_params['sha256'])) # noqa: E501
if 'sha512' in local_var_params and local_var_params['sha512'] is not None: # noqa: E501
query_params.append(('sha512', local_var_params['sha512'])) # noqa: E501
if 'prerelease' in local_var_params and local_var_params['prerelease'] is not None: # noqa: E501
query_params.append(('prerelease', local_var_params['prerelease'])) # noqa: E501
if 'docker_image_name' in local_var_params and local_var_params['docker_image_name'] is not None: # noqa: E501
query_params.append(('docker.imageName', local_var_params['docker_image_name'])) # noqa: E501
if 'docker_image_tag' in local_var_params and local_var_params['docker_image_tag'] is not None: # noqa: E501
query_params.append(('docker.imageTag', local_var_params['docker_image_tag'])) # noqa: E501
if 'docker_layer_id' in local_var_params and local_var_params['docker_layer_id'] is not None: # noqa: E501
query_params.append(('docker.layerId', local_var_params['docker_layer_id'])) # noqa: E501
if 'docker_content_digest' in local_var_params and local_var_params['docker_content_digest'] is not None: # noqa: E501
query_params.append(('docker.contentDigest', local_var_params['docker_content_digest'])) # noqa: E501
if 'maven_group_id' in local_var_params and local_var_params['maven_group_id'] is not None: # noqa: E501
query_params.append(('maven.groupId', local_var_params['maven_group_id'])) # noqa: E501
if 'maven_artifact_id' in local_var_params and local_var_params['maven_artifact_id'] is not None: # noqa: E501
query_params.append(('maven.artifactId', local_var_params['maven_artifact_id'])) # noqa: E501
if 'maven_base_version' in local_var_params and local_var_params['maven_base_version'] is not None: # noqa: E501
query_params.append(('maven.baseVersion', local_var_params['maven_base_version'])) # noqa: E501
if 'maven_extension' in local_var_params and local_var_params['maven_extension'] is not None: # noqa: E501
query_params.append(('maven.extension', local_var_params['maven_extension'])) # noqa: E501
if 'maven_classifier' in local_var_params and local_var_params['maven_classifier'] is not None: # noqa: E501
query_params.append(('maven.classifier', local_var_params['maven_classifier'])) # noqa: E501
if 'npm_scope' in local_var_params and local_var_params['npm_scope'] is not None: # noqa: E501
query_params.append(('npm.scope', local_var_params['npm_scope'])) # noqa: E501
if 'nuget_id' in local_var_params and local_var_params['nuget_id'] is not None: # noqa: E501
query_params.append(('nuget.id', local_var_params['nuget_id'])) # noqa: E501
if 'nuget_tags' in local_var_params and local_var_params['nuget_tags'] is not None: # noqa: E501
query_params.append(('nuget.tags', local_var_params['nuget_tags'])) # noqa: E501
if 'pypi_classifiers' in local_var_params and local_var_params['pypi_classifiers'] is not None: # noqa: E501
query_params.append(('pypi.classifiers', local_var_params['pypi_classifiers'])) # noqa: E501
if 'pypi_description' in local_var_params and local_var_params['pypi_description'] is not None: # noqa: E501
query_params.append(('pypi.description', local_var_params['pypi_description'])) # noqa: E501
if 'pypi_keywords' in local_var_params and local_var_params['pypi_keywords'] is not None: # noqa: E501
query_params.append(('pypi.keywords', local_var_params['pypi_keywords'])) # noqa: E501
if 'pypi_summary' in local_var_params and local_var_params['pypi_summary'] is not None: # noqa: E501
query_params.append(('pypi.summary', local_var_params['pypi_summary'])) # noqa: E501
if 'rubygems_description' in local_var_params and local_var_params['rubygems_description'] is not None: # noqa: E501
query_params.append(('rubygems.description', local_var_params['rubygems_description'])) # noqa: E501
if 'rubygems_platform' in local_var_params and local_var_params['rubygems_platform'] is not None: # noqa: E501
query_params.append(('rubygems.platform', local_var_params['rubygems_platform'])) # noqa: E501
if 'rubygems_summary' in local_var_params and local_var_params['rubygems_summary'] is not None: # noqa: E501
query_params.append(('rubygems.summary', local_var_params['rubygems_summary'])) # noqa: E501
if 'yum_architecture' in local_var_params and local_var_params['yum_architecture'] is not None: # noqa: E501
query_params.append(('yum.architecture', local_var_params['yum_architecture'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/v1/search/assets/download', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def search_assets(self, **kwargs): # noqa: E501
"""Search assets # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_assets(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str continuation_token: A token returned by a prior request. If present, the next page of results are returned
:param str sort: The field to sort the results against, if left empty, a sort based on match weight will be used.
:param str direction: The direction to sort records in, defaults to ascending ('asc') for all sort fields, except version, which defaults to descending ('desc')
:param int timeout: How long to wait for search results in seconds. If this value is not provided, the system default timeout will be used.
:param str q: Query by keyword
:param str repository: Repository name
:param str format: Query by format
:param str group: Component group
:param str name: Component name
:param str version: Component version
:param str md5: Specific MD5 hash of component's asset
:param str sha1: Specific SHA-1 hash of component's asset
:param str sha256: Specific SHA-256 hash of component's asset
:param str sha512: Specific SHA-512 hash of component's asset
:param str prerelease: Prerelease version flag
:param str docker_image_name: Docker image name
:param str docker_image_tag: Docker image tag
:param str docker_layer_id: Docker layer ID
:param str docker_content_digest: Docker content digest
:param str maven_group_id: Maven groupId
:param str maven_artifact_id: Maven artifactId
:param str maven_base_version: Maven base version
:param str maven_extension: Maven extension of component's asset
:param str maven_classifier: Maven classifier of component's asset
:param str npm_scope: NPM scope
:param str nuget_id: Nuget id
:param str nuget_tags: Nuget tags
:param str pypi_classifiers: PyPi classifiers
:param str pypi_description: PyPi description
:param str pypi_keywords: PyPi keywords
:param str pypi_summary: PyPi summary
:param str rubygems_description: RubyGems description
:param str rubygems_platform: RubyGems platform
:param str rubygems_summary: RubyGems summary
:param str yum_architecture: Yum architecture
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: PageAssetXO
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.search_assets_with_http_info(**kwargs) # noqa: E501
def search_assets_with_http_info(self, **kwargs): # noqa: E501
"""Search assets # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_assets_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str continuation_token: A token returned by a prior request. If present, the next page of results are returned
:param str sort: The field to sort the results against, if left empty, a sort based on match weight will be used.
:param str direction: The direction to sort records in, defaults to ascending ('asc') for all sort fields, except version, which defaults to descending ('desc')
:param int timeout: How long to wait for search results in seconds. If this value is not provided, the system default timeout will be used.
:param str q: Query by keyword
:param str repository: Repository name
:param str format: Query by format
:param str group: Component group
:param str name: Component name
:param str version: Component version
:param str md5: Specific MD5 hash of component's asset
:param str sha1: Specific SHA-1 hash of component's asset
:param str sha256: Specific SHA-256 hash of component's asset
:param str sha512: Specific SHA-512 hash of component's asset
:param str prerelease: Prerelease version flag
:param str docker_image_name: Docker image name
:param str docker_image_tag: Docker image tag
:param str docker_layer_id: Docker layer ID
:param str docker_content_digest: Docker content digest
:param str maven_group_id: Maven groupId
:param str maven_artifact_id: Maven artifactId
:param str maven_base_version: Maven base version
:param str maven_extension: Maven extension of component's asset
:param str maven_classifier: Maven classifier of component's asset
:param str npm_scope: NPM scope
:param str nuget_id: Nuget id
:param str nuget_tags: Nuget tags
:param str pypi_classifiers: PyPi classifiers
:param str pypi_description: PyPi description
:param str pypi_keywords: PyPi keywords
:param str pypi_summary: PyPi summary
:param str rubygems_description: RubyGems description
:param str rubygems_platform: RubyGems platform
:param str rubygems_summary: RubyGems summary
:param str yum_architecture: Yum architecture
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(PageAssetXO, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['continuation_token', 'sort', 'direction', 'timeout', 'q', 'repository', 'format', 'group', 'name', 'version', 'md5', 'sha1', 'sha256', 'sha512', 'prerelease', 'docker_image_name', 'docker_image_tag', 'docker_layer_id', 'docker_content_digest', 'maven_group_id', 'maven_artifact_id', 'maven_base_version', 'maven_extension', 'maven_classifier', 'npm_scope', 'nuget_id', 'nuget_tags', 'pypi_classifiers', 'pypi_description', 'pypi_keywords', 'pypi_summary', 'rubygems_description', 'rubygems_platform', 'rubygems_summary', 'yum_architecture'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method search_assets" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'continuation_token' in local_var_params and local_var_params['continuation_token'] is not None: # noqa: E501
query_params.append(('continuationToken', local_var_params['continuation_token'])) # noqa: E501
if 'sort' in local_var_params and local_var_params['sort'] is not None: # noqa: E501
query_params.append(('sort', local_var_params['sort'])) # noqa: E501
if 'direction' in local_var_params and local_var_params['direction'] is not None: # noqa: E501
query_params.append(('direction', local_var_params['direction'])) # noqa: E501
if 'timeout' in local_var_params and local_var_params['timeout'] is not None: # noqa: E501
query_params.append(('timeout', local_var_params['timeout'])) # noqa: E501
if 'q' in local_var_params and local_var_params['q'] is not None: # noqa: E501
query_params.append(('q', local_var_params['q'])) # noqa: E501
if 'repository' in local_var_params and local_var_params['repository'] is not None: # noqa: E501
query_params.append(('repository', local_var_params['repository'])) # noqa: E501
if 'format' in local_var_params and local_var_params['format'] is not None: # noqa: E501
query_params.append(('format', local_var_params['format'])) # noqa: E501
if 'group' in local_var_params and local_var_params['group'] is not None: # noqa: E501
query_params.append(('group', local_var_params['group'])) # noqa: E501
if 'name' in local_var_params and local_var_params['name'] is not None: # noqa: E501
query_params.append(('name', local_var_params['name'])) # noqa: E501
if 'version' in local_var_params and local_var_params['version'] is not None: # noqa: E501
query_params.append(('version', local_var_params['version'])) # noqa: E501
if 'md5' in local_var_params and local_var_params['md5'] is not None: # noqa: E501
query_params.append(('md5', local_var_params['md5'])) # noqa: E501
if 'sha1' in local_var_params and local_var_params['sha1'] is not None: # noqa: E501
query_params.append(('sha1', local_var_params['sha1'])) # noqa: E501
if 'sha256' in local_var_params and local_var_params['sha256'] is not None: # noqa: E501
query_params.append(('sha256', local_var_params['sha256'])) # noqa: E501
if 'sha512' in local_var_params and local_var_params['sha512'] is not None: # noqa: E501
query_params.append(('sha512', local_var_params['sha512'])) # noqa: E501
if 'prerelease' in local_var_params and local_var_params['prerelease'] is not None: # noqa: E501
query_params.append(('prerelease', local_var_params['prerelease'])) # noqa: E501
if 'docker_image_name' in local_var_params and local_var_params['docker_image_name'] is not None: # noqa: E501
query_params.append(('docker.imageName', local_var_params['docker_image_name'])) # noqa: E501
if 'docker_image_tag' in local_var_params and local_var_params['docker_image_tag'] is not None: # noqa: E501
query_params.append(('docker.imageTag', local_var_params['docker_image_tag'])) # noqa: E501
if 'docker_layer_id' in local_var_params and local_var_params['docker_layer_id'] is not None: # noqa: E501
query_params.append(('docker.layerId', local_var_params['docker_layer_id'])) # noqa: E501
if 'docker_content_digest' in local_var_params and local_var_params['docker_content_digest'] is not None: # noqa: E501
query_params.append(('docker.contentDigest', local_var_params['docker_content_digest'])) # noqa: E501
if 'maven_group_id' in local_var_params and local_var_params['maven_group_id'] is not None: # noqa: E501
query_params.append(('maven.groupId', local_var_params['maven_group_id'])) # noqa: E501
if 'maven_artifact_id' in local_var_params and local_var_params['maven_artifact_id'] is not None: # noqa: E501
query_params.append(('maven.artifactId', local_var_params['maven_artifact_id'])) # noqa: E501
if 'maven_base_version' in local_var_params and local_var_params['maven_base_version'] is not None: # noqa: E501
query_params.append(('maven.baseVersion', local_var_params['maven_base_version'])) # noqa: E501
if 'maven_extension' in local_var_params and local_var_params['maven_extension'] is not None: # noqa: E501
query_params.append(('maven.extension', local_var_params['maven_extension'])) # noqa: E501
if 'maven_classifier' in local_var_params and local_var_params['maven_classifier'] is not None: # noqa: E501
query_params.append(('maven.classifier', local_var_params['maven_classifier'])) # noqa: E501
if 'npm_scope' in local_var_params and local_var_params['npm_scope'] is not None: # noqa: E501
query_params.append(('npm.scope', local_var_params['npm_scope'])) # noqa: E501
if 'nuget_id' in local_var_params and local_var_params['nuget_id'] is not None: # noqa: E501
query_params.append(('nuget.id', local_var_params['nuget_id'])) # noqa: E501
if 'nuget_tags' in local_var_params and local_var_params['nuget_tags'] is not None: # noqa: E501
query_params.append(('nuget.tags', local_var_params['nuget_tags'])) # noqa: E501
if 'pypi_classifiers' in local_var_params and local_var_params['pypi_classifiers'] is not None: # noqa: E501
query_params.append(('pypi.classifiers', local_var_params['pypi_classifiers'])) # noqa: E501
if 'pypi_description' in local_var_params and local_var_params['pypi_description'] is not None: # noqa: E501
query_params.append(('pypi.description', local_var_params['pypi_description'])) # noqa: E501
if 'pypi_keywords' in local_var_params and local_var_params['pypi_keywords'] is not None: # noqa: E501
query_params.append(('pypi.keywords', local_var_params['pypi_keywords'])) # noqa: E501
if 'pypi_summary' in local_var_params and local_var_params['pypi_summary'] is not None: # noqa: E501
query_params.append(('pypi.summary', local_var_params['pypi_summary'])) # noqa: E501
if 'rubygems_description' in local_var_params and local_var_params['rubygems_description'] is not None: # noqa: E501
query_params.append(('rubygems.description', local_var_params['rubygems_description'])) # noqa: E501
if 'rubygems_platform' in local_var_params and local_var_params['rubygems_platform'] is not None: # noqa: E501
query_params.append(('rubygems.platform', local_var_params['rubygems_platform'])) # noqa: E501
if 'rubygems_summary' in local_var_params and local_var_params['rubygems_summary'] is not None: # noqa: E501
query_params.append(('rubygems.summary', local_var_params['rubygems_summary'])) # noqa: E501
if 'yum_architecture' in local_var_params and local_var_params['yum_architecture'] is not None: # noqa: E501
query_params.append(('yum.architecture', local_var_params['yum_architecture'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/v1/search/assets', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PageAssetXO', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
| 66.544236
| 577
| 0.673442
| 6,538
| 49,642
| 4.869685
| 0.040685
| 0.085935
| 0.147748
| 0.052265
| 0.978768
| 0.976914
| 0.976537
| 0.975846
| 0.97437
| 0.970664
| 0
| 0.025586
| 0.24024
| 49,642
| 745
| 578
| 66.633557
| 0.818565
| 0.412171
| 0
| 0.899425
| 1
| 0
| 0.254996
| 0.019038
| 0
| 0
| 0
| 0
| 0
| 1
| 0.020115
| false
| 0
| 0.014368
| 0
| 0.054598
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
822bf0e00fd5d840ecb30e747d51dee248517f63
| 1,701
|
py
|
Python
|
src/genie/libs/parser/nxos/tests/ShowInterfaceSwitchport/cli/equal/golden_output_1_expected.py
|
balmasea/genieparser
|
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
|
[
"Apache-2.0"
] | 204
|
2018-06-27T00:55:27.000Z
|
2022-03-06T21:12:18.000Z
|
src/genie/libs/parser/nxos/tests/ShowInterfaceSwitchport/cli/equal/golden_output_1_expected.py
|
balmasea/genieparser
|
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
|
[
"Apache-2.0"
] | 468
|
2018-06-19T00:33:18.000Z
|
2022-03-31T23:23:35.000Z
|
src/genie/libs/parser/nxos/tests/ShowInterfaceSwitchport/cli/equal/golden_output_1_expected.py
|
balmasea/genieparser
|
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
|
[
"Apache-2.0"
] | 309
|
2019-01-16T20:21:07.000Z
|
2022-03-30T12:56:41.000Z
|
expected_output = {
'Ethernet2/2':
{'access_vlan': 1,
'access_vlan_mode': 'default',
'admin_priv_vlan_primary_host_assoc': 'none',
'admin_priv_vlan_primary_mapping': 'none',
'admin_priv_vlan_secondary_host_assoc': 'none',
'admin_priv_vlan_secondary_mapping': 'none',
'admin_priv_vlan_trunk_encapsulation': 'dot1q',
'admin_priv_vlan_trunk_native_vlan': 'none',
'admin_priv_vlan_trunk_normal_vlans': 'none',
'admin_priv_vlan_trunk_private_vlans': 'none',
'native_vlan': 1,
'native_vlan_mode': 'default',
'operational_private_vlan': 'none',
'switchport_mode': 'trunk',
'switchport_monitor': 'Not enabled',
'switchport_status': 'enabled',
'switchport_enable': True,
'trunk_vlans': '100,300'},
'Ethernet2/3':
{'access_vlan': 100,
'access_vlan_mode': 'Vlan not created',
'admin_priv_vlan_primary_host_assoc': 'none',
'admin_priv_vlan_primary_mapping': 'none',
'admin_priv_vlan_secondary_host_assoc': 'none',
'admin_priv_vlan_secondary_mapping': 'none',
'admin_priv_vlan_trunk_encapsulation': 'dot1q',
'admin_priv_vlan_trunk_native_vlan': 'none',
'admin_priv_vlan_trunk_normal_vlans': 'none',
'admin_priv_vlan_trunk_private_vlans': 'none',
'native_vlan': 1,
'native_vlan_mode': 'default',
'operational_private_vlan': 'none',
'switchport_mode': 'access',
'switchport_monitor': 'Not enabled',
'switchport_status': 'enabled',
'switchport_enable': True,
'trunk_vlans': '1-4094'}}
| 40.5
| 56
| 0.627866
| 184
| 1,701
| 5.255435
| 0.201087
| 0.148914
| 0.215098
| 0.210962
| 0.866598
| 0.866598
| 0.866598
| 0.866598
| 0.866598
| 0.866598
| 0
| 0.017857
| 0.242798
| 1,701
| 41
| 57
| 41.487805
| 0.732919
| 0
| 0
| 0.717949
| 0
| 0
| 0.616245
| 0.347263
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
68bd0233209ffca4e3e240fc199391860b7b94dc
| 21,990
|
py
|
Python
|
tests/test_discourse_category_processor.py
|
pombredanne/measure
|
17bdca2fd579f3090ba3b191a58af28b8fc395e4
|
[
"MIT"
] | 16
|
2017-04-04T05:01:53.000Z
|
2021-10-18T09:44:48.000Z
|
tests/test_discourse_category_processor.py
|
pombredanne/measure
|
17bdca2fd579f3090ba3b191a58af28b8fc395e4
|
[
"MIT"
] | 44
|
2017-04-04T05:07:03.000Z
|
2020-03-10T05:06:15.000Z
|
tests/test_discourse_category_processor.py
|
pombredanne/measure
|
17bdca2fd579f3090ba3b191a58af28b8fc395e4
|
[
"MIT"
] | 5
|
2017-04-08T00:23:39.000Z
|
2018-07-04T00:57:52.000Z
|
import dateutil
import os
import unittest
import requests_mock
from datapackage_pipelines.utilities.lib_test_helpers import (
mock_processor_test
)
import datapackage_pipelines_measure.processors
import logging
log = logging.getLogger(__name__)
REPORT_RESPONSE = {
"report": {
"data": [
{"x": "2017-07-05", "y": 1},
{"x": "2017-07-06", "y": 2},
{"x": "2017-07-07", "y": 3},
{"x": "2017-07-09", "y": 4},
{"x": "2017-07-10", "y": 5},
{"x": "2017-07-11", "y": 6}
]
}
}
RESTRICTED_REPORT_RESPONSE = {
"report": {
"data": [
{"x": "2017-07-09", "y": 4},
{"x": "2017-07-10", "y": 5},
{"x": "2017-07-11", "y": 6}
]
}
}
SITE_RESPONSE = {
"categories": [
{
"id": 1,
"name": "Top One",
"slug": "top-one",
"has_children": True,
},
{
"id": 2,
"name": "Top Two",
"slug": "top-two",
"has_children": True,
},
{
"id": 3,
"name": "Top Three",
"slug": "top-three",
"has_children": False,
},
{
"id": 4,
"name": "Child One",
"slug": "child-one",
"parent_category_id": 1,
"has_children": False,
},
{
"id": 5,
"name": "Child Two",
"slug": "child-two",
"has_children": False,
"parent_category_id": 1
},
{
"id": 6,
"name": "Child Three",
"slug": "child-three",
"has_children": False,
"parent_category_id": 2
}
]
}
class TestDiscourseCategoriesProcessor_NoChildren(unittest.TestCase):
'''Tests for Discourse Category processor when no child treatment has been
defined.'''
@requests_mock.Mocker()
def test_add_discourse_category_resource_no_latest(self, m):
'''No latest data in forum-categories table, so populate with
historical data where possible.'''
# Mock API responses
m.get('https://discourse.example.com/site.json', json=SITE_RESPONSE)
m.get('https://discourse.example.com/admin/reports/topics.json?category_id=1', # noqa
json=REPORT_RESPONSE)
m.get('https://discourse.example.com/admin/reports/posts.json?category_id=1', # noqa
json=REPORT_RESPONSE)
# input arguments used by our mock `ingest`
datapackage = {
'name': 'my-datapackage',
'project': 'my-project',
'resources': [] # nothing here
}
params = {
'domain': 'discourse.example.com',
'category': {
'name': 'top-one'
}
}
# Path to the processor we want to test
processor_dir = \
os.path.dirname(datapackage_pipelines_measure.processors.__file__)
processor_path = os.path.join(processor_dir,
'add_discourse_category_resource.py')
# Trigger the processor with our mock `ingest` and capture what it will
# returned to `spew`.
spew_args, _ = mock_processor_test(processor_path,
(params, datapackage, iter([])))
spew_dp = spew_args[0]
spew_res_iter = spew_args[1]
# Asserts for the datapackage
dp_resources = spew_dp['resources']
assert len(dp_resources) == 1
assert dp_resources[0]['name'] == 'discourse-example-com'
field_names = \
[field['name'] for field in dp_resources[0]['schema']['fields']]
assert field_names == ['domain', 'category', 'source', 'date',
'new_topics', 'new_posts']
# Asserts for the res_iter
spew_res_iter_contents = list(spew_res_iter)
assert len(list(spew_res_iter_contents)) == 1
rows = list(spew_res_iter_contents)[0]
# six days of data
assert len(rows) == 6
assert rows[0] == {
'new_topics': 1,
'new_posts': 1,
'category': 'top-one',
'date': dateutil.parser.parse('2017-07-05').date(),
'source': 'discourse',
'domain': 'discourse.example.com'
}
assert rows[len(rows) - 1] == {
'new_topics': 6,
'new_posts': 6,
'category': 'top-one',
'date': dateutil.parser.parse('2017-07-11').date(),
'source': 'discourse',
'domain': 'discourse.example.com'
}
@requests_mock.Mocker()
def test_add_discourse_category_resource_with_latest(self, m):
'''Latest data in forum-categories table, so populate with historical
data upto latest where possible.'''
# Mock API responses
m.get('https://discourse.example.com/site.json', json=SITE_RESPONSE)
m.get('https://discourse.example.com/admin/reports/topics.json?category_id=1', # noqa
json=RESTRICTED_REPORT_RESPONSE)
m.get('https://discourse.example.com/admin/reports/posts.json?category_id=1', # noqa
json=RESTRICTED_REPORT_RESPONSE)
# input arguments used by our mock `ingest`
datapackage = {
'name': 'my-datapackage',
'project': 'my-project',
'resources': [{
'name': 'latest-project-entries',
'schema': {
'fields': []
}
}]
}
params = {
'domain': 'discourse.example.com',
'category': {
'name': 'top-one'
}
}
def latest_entries_res():
yield {
'category': 'top-one',
'domain': 'discourse.example.com',
'new_posts': 1,
'new_topics': 1,
'date': dateutil.parser.parse('2017-07-09').date(),
'source': 'discourse'
}
# Path to the processor we want to test
processor_dir = \
os.path.dirname(datapackage_pipelines_measure.processors.__file__)
processor_path = os.path.join(processor_dir,
'add_discourse_category_resource.py')
# Trigger the processor with our mock `ingest` and capture what it will
# returned to `spew`.
spew_args, _ = mock_processor_test(processor_path,
(params, datapackage,
iter([latest_entries_res()])))
spew_dp = spew_args[0]
spew_res_iter = spew_args[1]
# Asserts for the datapackage
dp_resources = spew_dp['resources']
assert len(dp_resources) == 2
assert dp_resources[0]['name'] == 'latest-project-entries'
assert dp_resources[1]['name'] == 'discourse-example-com'
# Asserts for the res_iter
spew_res_iter_contents = list(spew_res_iter)
assert len(list(spew_res_iter_contents)) == 2
rows = list(spew_res_iter_contents)[1]
# three days of data
assert len(rows) == 3
assert rows[0] == {
'new_topics': 4,
'new_posts': 4,
'category': 'top-one',
'date': dateutil.parser.parse('2017-07-09').date(),
'source': 'discourse',
'domain': 'discourse.example.com'
}
assert rows[1] == {
'new_topics': 5,
'new_posts': 5,
'category': 'top-one',
'date': dateutil.parser.parse('2017-07-10').date(),
'source': 'discourse',
'domain': 'discourse.example.com'
}
class TestDiscourseCategoriesProcessor_ChildrenAggregate(unittest.TestCase):
'''Tests for Discourse Category processor when child treatment is
`aggregate`.'''
@requests_mock.Mocker()
def test_add_discourse_category_resource_no_latest(self, m):
'''No latest data in forum-categories table, so populate with
historical data where possible.'''
# Mock API responses
m.get('https://discourse.example.com/site.json', json=SITE_RESPONSE)
m.get('https://discourse.example.com/admin/reports/topics.json?category_id=1', # noqa
json=REPORT_RESPONSE)
m.get('https://discourse.example.com/admin/reports/posts.json?category_id=1', # noqa
json=REPORT_RESPONSE)
m.get('https://discourse.example.com/admin/reports/topics.json?category_id=4', # noqa
json=REPORT_RESPONSE)
m.get('https://discourse.example.com/admin/reports/posts.json?category_id=4', # noqa
json=REPORT_RESPONSE)
m.get('https://discourse.example.com/admin/reports/topics.json?category_id=5', # noqa
json=REPORT_RESPONSE)
m.get('https://discourse.example.com/admin/reports/posts.json?category_id=5', # noqa
json=REPORT_RESPONSE)
# input arguments used by our mock `ingest`
datapackage = {
'name': 'my-datapackage',
'project': 'my-project',
'resources': [] # nothing here
}
params = {
'domain': 'discourse.example.com',
'category': {
'name': 'top-one',
'children': 'aggregate'
}
}
# Path to the processor we want to test
processor_dir = \
os.path.dirname(datapackage_pipelines_measure.processors.__file__)
processor_path = os.path.join(processor_dir,
'add_discourse_category_resource.py')
# Trigger the processor with our mock `ingest` and capture what it will
# returned to `spew`.
spew_args, _ = mock_processor_test(processor_path,
(params, datapackage, iter([])))
spew_dp = spew_args[0]
spew_res_iter = spew_args[1]
# Asserts for the datapackage
dp_resources = spew_dp['resources']
assert len(dp_resources) == 1
assert dp_resources[0]['name'] == 'discourse-example-com'
field_names = \
[field['name'] for field in dp_resources[0]['schema']['fields']]
assert field_names == ['domain', 'category', 'source', 'date',
'new_topics', 'new_posts']
# Asserts for the res_iter
spew_res_iter_contents = list(spew_res_iter)
assert len(list(spew_res_iter_contents)) == 1
rows = list(spew_res_iter_contents)[0]
# six days of data
assert len(rows) == 6
assert rows[0] == {
'new_topics': 3,
'new_posts': 3,
'category': 'top-one',
'date': dateutil.parser.parse('2017-07-05').date(),
'source': 'discourse',
'domain': 'discourse.example.com'
}
assert rows[len(rows) - 1] == {
'new_topics': 18,
'new_posts': 18,
'category': 'top-one',
'date': dateutil.parser.parse('2017-07-11').date(),
'source': 'discourse',
'domain': 'discourse.example.com'
}
@requests_mock.Mocker()
def test_add_discourse_category_resource_with_latest(self, m):
'''Latest data in forum-categories table, so populate with historical
data upto latest where possible.'''
# Mock API responses
m.get('https://discourse.example.com/site.json', json=SITE_RESPONSE)
m.get('https://discourse.example.com/admin/reports/topics.json?category_id=1', # noqa
json=RESTRICTED_REPORT_RESPONSE)
m.get('https://discourse.example.com/admin/reports/posts.json?category_id=1', # noqa
json=RESTRICTED_REPORT_RESPONSE)
m.get('https://discourse.example.com/admin/reports/topics.json?category_id=4', # noqa
json=RESTRICTED_REPORT_RESPONSE)
m.get('https://discourse.example.com/admin/reports/posts.json?category_id=4', # noqa
json=RESTRICTED_REPORT_RESPONSE)
m.get('https://discourse.example.com/admin/reports/topics.json?category_id=5', # noqa
json=RESTRICTED_REPORT_RESPONSE)
m.get('https://discourse.example.com/admin/reports/posts.json?category_id=5', # noqa
json=RESTRICTED_REPORT_RESPONSE)
# input arguments used by our mock `ingest`
datapackage = {
'name': 'my-datapackage',
'project': 'my-project',
'resources': [{
'name': 'latest-project-entries',
'schema': {
'fields': []
}
}]
}
params = {
'domain': 'discourse.example.com',
'category': {
'name': 'top-one',
'children': 'aggregate'
}
}
def latest_entries_res():
yield {
'category': 'top-one',
'domain': 'discourse.example.com',
'new_posts': 1,
'new_topics': 1,
'date': dateutil.parser.parse('2017-07-09').date(),
'source': 'discourse'
}
# Path to the processor we want to test
processor_dir = \
os.path.dirname(datapackage_pipelines_measure.processors.__file__)
processor_path = os.path.join(processor_dir,
'add_discourse_category_resource.py')
# Trigger the processor with our mock `ingest` and capture what it will
# returned to `spew`.
spew_args, _ = mock_processor_test(processor_path,
(params, datapackage,
iter([latest_entries_res()])))
spew_dp = spew_args[0]
spew_res_iter = spew_args[1]
# Asserts for the datapackage
dp_resources = spew_dp['resources']
assert len(dp_resources) == 2
assert dp_resources[0]['name'] == 'latest-project-entries'
assert dp_resources[1]['name'] == 'discourse-example-com'
# Asserts for the res_iter
spew_res_iter_contents = list(spew_res_iter)
assert len(list(spew_res_iter_contents)) == 2
rows = list(spew_res_iter_contents)[1]
# three days of data
assert len(rows) == 3
assert rows[0] == {
'new_topics': 12,
'new_posts': 12,
'category': 'top-one',
'date': dateutil.parser.parse('2017-07-09').date(),
'source': 'discourse',
'domain': 'discourse.example.com'
}
assert rows[1] == {
'new_topics': 15,
'new_posts': 15,
'category': 'top-one',
'date': dateutil.parser.parse('2017-07-10').date(),
'source': 'discourse',
'domain': 'discourse.example.com'
}
class TestDiscourseCategoriesProcessor_ChildrenExpand(unittest.TestCase):
'''Tests for Discourse Category processor when child treatment is
`expand`.'''
@requests_mock.Mocker()
def test_add_discourse_category_resource_no_latest(self, m):
'''No latest data in forum-categories table, so populate with
historical data where possible.'''
# Mock API responses
m.get('https://discourse.example.com/site.json', json=SITE_RESPONSE)
m.get('https://discourse.example.com/admin/reports/topics.json?category_id=1', # noqa
json=REPORT_RESPONSE)
m.get('https://discourse.example.com/admin/reports/posts.json?category_id=1', # noqa
json=REPORT_RESPONSE)
m.get('https://discourse.example.com/admin/reports/topics.json?category_id=4', # noqa
json=REPORT_RESPONSE)
m.get('https://discourse.example.com/admin/reports/posts.json?category_id=4', # noqa
json=REPORT_RESPONSE)
m.get('https://discourse.example.com/admin/reports/topics.json?category_id=5', # noqa
json=REPORT_RESPONSE)
m.get('https://discourse.example.com/admin/reports/posts.json?category_id=5', # noqa
json=REPORT_RESPONSE)
# input arguments used by our mock `ingest`
datapackage = {
'name': 'my-datapackage',
'project': 'my-project',
'resources': [] # nothing here
}
params = {
'domain': 'discourse.example.com',
'category': {
'name': 'top-one',
'children': 'expand'
}
}
# Path to the processor we want to test
processor_dir = \
os.path.dirname(datapackage_pipelines_measure.processors.__file__)
processor_path = os.path.join(processor_dir,
'add_discourse_category_resource.py')
# Trigger the processor with our mock `ingest` and capture what it will
# returned to `spew`.
spew_args, _ = mock_processor_test(processor_path,
(params, datapackage, iter([])))
spew_dp = spew_args[0]
spew_res_iter = spew_args[1]
# Asserts for the datapackage
dp_resources = spew_dp['resources']
assert len(dp_resources) == 1
assert dp_resources[0]['name'] == 'discourse-example-com'
field_names = \
[field['name'] for field in dp_resources[0]['schema']['fields']]
assert field_names == ['domain', 'category', 'source', 'date',
'new_topics', 'new_posts']
# Asserts for the res_iter
spew_res_iter_contents = list(spew_res_iter)
assert len(list(spew_res_iter_contents)) == 1
rows = list(spew_res_iter_contents)[0]
# six days of data for three categories
assert len(rows) == 18
categories = [(r['category'], r['new_posts']) for r in rows]
for i in range(0, 6):
assert categories[i] == ('top-one', i + 1)
for i in range(0, 6):
assert categories[i + 6] == ('child-one', i + 1)
for i in range(0, 6):
assert categories[i + 12] == ('child-two', i + 1)
@requests_mock.Mocker()
def test_add_discourse_category_resource_with_latest(self, m):
'''Latest data in forum-categories table, so populate with historical
data upto latest where possible.'''
# Mock API responses
m.get('https://discourse.example.com/site.json', json=SITE_RESPONSE)
m.get('https://discourse.example.com/admin/reports/topics.json?category_id=1', # noqa
json=RESTRICTED_REPORT_RESPONSE)
m.get('https://discourse.example.com/admin/reports/posts.json?category_id=1', # noqa
json=RESTRICTED_REPORT_RESPONSE)
m.get('https://discourse.example.com/admin/reports/topics.json?category_id=4', # noqa
json=RESTRICTED_REPORT_RESPONSE)
m.get('https://discourse.example.com/admin/reports/posts.json?category_id=4', # noqa
json=RESTRICTED_REPORT_RESPONSE)
m.get('https://discourse.example.com/admin/reports/topics.json?category_id=5', # noqa
json=RESTRICTED_REPORT_RESPONSE)
m.get('https://discourse.example.com/admin/reports/posts.json?category_id=5', # noqa
json=RESTRICTED_REPORT_RESPONSE)
# input arguments used by our mock `ingest`
datapackage = {
'name': 'my-datapackage',
'project': 'my-project',
'resources': [{
'name': 'latest-project-entries',
'schema': {
'fields': []
}
}]
}
params = {
'domain': 'discourse.example.com',
'category': {
'name': 'top-one',
'children': 'expand'
}
}
def latest_entries_res():
yield {
'category': 'top-one',
'domain': 'discourse.example.com',
'new_posts': 1,
'new_topics': 1,
'date': dateutil.parser.parse('2017-07-09').date(),
'source': 'discourse'
}
# Path to the processor we want to test
processor_dir = \
os.path.dirname(datapackage_pipelines_measure.processors.__file__)
processor_path = os.path.join(processor_dir,
'add_discourse_category_resource.py')
# Trigger the processor with our mock `ingest` and capture what it will
# returned to `spew`.
spew_args, _ = mock_processor_test(processor_path,
(params, datapackage,
iter([latest_entries_res()])))
spew_dp = spew_args[0]
spew_res_iter = spew_args[1]
# Asserts for the datapackage
dp_resources = spew_dp['resources']
assert len(dp_resources) == 2
assert dp_resources[0]['name'] == 'latest-project-entries'
assert dp_resources[1]['name'] == 'discourse-example-com'
# Asserts for the res_iter
spew_res_iter_contents = list(spew_res_iter)
assert len(list(spew_res_iter_contents)) == 2
rows = list(spew_res_iter_contents)[1]
# three days of data for three categories
assert len(rows) == 9
categories = [(r['category'], r['new_posts']) for r in rows]
for i in range(0, 3):
assert categories[i] == ('top-one', i + 4)
for i in range(0, 3):
assert categories[i + 3] == ('child-one', i + 4)
for i in range(0, 3):
assert categories[i + 6] == ('child-two', i + 4)
| 37.718696
| 94
| 0.543884
| 2,387
| 21,990
| 4.829912
| 0.068287
| 0.079105
| 0.093937
| 0.053084
| 0.936248
| 0.935207
| 0.929135
| 0.924365
| 0.919681
| 0.911181
| 0
| 0.02129
| 0.329286
| 21,990
| 582
| 95
| 37.783505
| 0.760391
| 0.117508
| 0
| 0.743875
| 0
| 0
| 0.261835
| 0.042607
| 0
| 0
| 0
| 0
| 0.097996
| 1
| 0.020045
| false
| 0
| 0.01559
| 0
| 0.042316
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ec13b9e605d69d13348357f8d5967b27803753d3
| 6,407
|
py
|
Python
|
loldib/getratings/models/NA/na_akali/na_akali_bot.py
|
koliupy/loldib
|
c9ab94deb07213cdc42b5a7c26467cdafaf81b7f
|
[
"Apache-2.0"
] | null | null | null |
loldib/getratings/models/NA/na_akali/na_akali_bot.py
|
koliupy/loldib
|
c9ab94deb07213cdc42b5a7c26467cdafaf81b7f
|
[
"Apache-2.0"
] | null | null | null |
loldib/getratings/models/NA/na_akali/na_akali_bot.py
|
koliupy/loldib
|
c9ab94deb07213cdc42b5a7c26467cdafaf81b7f
|
[
"Apache-2.0"
] | null | null | null |
from getratings.models.ratings import Ratings
class NA_Akali_Bot_Aatrox(Ratings):
pass
class NA_Akali_Bot_Ahri(Ratings):
pass
class NA_Akali_Bot_Akali(Ratings):
pass
class NA_Akali_Bot_Alistar(Ratings):
pass
class NA_Akali_Bot_Amumu(Ratings):
pass
class NA_Akali_Bot_Anivia(Ratings):
pass
class NA_Akali_Bot_Annie(Ratings):
pass
class NA_Akali_Bot_Ashe(Ratings):
pass
class NA_Akali_Bot_AurelionSol(Ratings):
pass
class NA_Akali_Bot_Azir(Ratings):
pass
class NA_Akali_Bot_Bard(Ratings):
pass
class NA_Akali_Bot_Blitzcrank(Ratings):
pass
class NA_Akali_Bot_Brand(Ratings):
pass
class NA_Akali_Bot_Braum(Ratings):
pass
class NA_Akali_Bot_Caitlyn(Ratings):
pass
class NA_Akali_Bot_Camille(Ratings):
pass
class NA_Akali_Bot_Cassiopeia(Ratings):
pass
class NA_Akali_Bot_Chogath(Ratings):
pass
class NA_Akali_Bot_Corki(Ratings):
pass
class NA_Akali_Bot_Darius(Ratings):
pass
class NA_Akali_Bot_Diana(Ratings):
pass
class NA_Akali_Bot_Draven(Ratings):
pass
class NA_Akali_Bot_DrMundo(Ratings):
pass
class NA_Akali_Bot_Ekko(Ratings):
pass
class NA_Akali_Bot_Elise(Ratings):
pass
class NA_Akali_Bot_Evelynn(Ratings):
pass
class NA_Akali_Bot_Ezreal(Ratings):
pass
class NA_Akali_Bot_Fiddlesticks(Ratings):
pass
class NA_Akali_Bot_Fiora(Ratings):
pass
class NA_Akali_Bot_Fizz(Ratings):
pass
class NA_Akali_Bot_Galio(Ratings):
pass
class NA_Akali_Bot_Gangplank(Ratings):
pass
class NA_Akali_Bot_Garen(Ratings):
pass
class NA_Akali_Bot_Gnar(Ratings):
pass
class NA_Akali_Bot_Gragas(Ratings):
pass
class NA_Akali_Bot_Graves(Ratings):
pass
class NA_Akali_Bot_Hecarim(Ratings):
pass
class NA_Akali_Bot_Heimerdinger(Ratings):
pass
class NA_Akali_Bot_Illaoi(Ratings):
pass
class NA_Akali_Bot_Irelia(Ratings):
pass
class NA_Akali_Bot_Ivern(Ratings):
pass
class NA_Akali_Bot_Janna(Ratings):
pass
class NA_Akali_Bot_JarvanIV(Ratings):
pass
class NA_Akali_Bot_Jax(Ratings):
pass
class NA_Akali_Bot_Jayce(Ratings):
pass
class NA_Akali_Bot_Jhin(Ratings):
pass
class NA_Akali_Bot_Jinx(Ratings):
pass
class NA_Akali_Bot_Kalista(Ratings):
pass
class NA_Akali_Bot_Karma(Ratings):
pass
class NA_Akali_Bot_Karthus(Ratings):
pass
class NA_Akali_Bot_Kassadin(Ratings):
pass
class NA_Akali_Bot_Katarina(Ratings):
pass
class NA_Akali_Bot_Kayle(Ratings):
pass
class NA_Akali_Bot_Kayn(Ratings):
pass
class NA_Akali_Bot_Kennen(Ratings):
pass
class NA_Akali_Bot_Khazix(Ratings):
pass
class NA_Akali_Bot_Kindred(Ratings):
pass
class NA_Akali_Bot_Kled(Ratings):
pass
class NA_Akali_Bot_KogMaw(Ratings):
pass
class NA_Akali_Bot_Leblanc(Ratings):
pass
class NA_Akali_Bot_LeeSin(Ratings):
pass
class NA_Akali_Bot_Leona(Ratings):
pass
class NA_Akali_Bot_Lissandra(Ratings):
pass
class NA_Akali_Bot_Lucian(Ratings):
pass
class NA_Akali_Bot_Lulu(Ratings):
pass
class NA_Akali_Bot_Lux(Ratings):
pass
class NA_Akali_Bot_Malphite(Ratings):
pass
class NA_Akali_Bot_Malzahar(Ratings):
pass
class NA_Akali_Bot_Maokai(Ratings):
pass
class NA_Akali_Bot_MasterYi(Ratings):
pass
class NA_Akali_Bot_MissFortune(Ratings):
pass
class NA_Akali_Bot_MonkeyKing(Ratings):
pass
class NA_Akali_Bot_Mordekaiser(Ratings):
pass
class NA_Akali_Bot_Morgana(Ratings):
pass
class NA_Akali_Bot_Nami(Ratings):
pass
class NA_Akali_Bot_Nasus(Ratings):
pass
class NA_Akali_Bot_Nautilus(Ratings):
pass
class NA_Akali_Bot_Nidalee(Ratings):
pass
class NA_Akali_Bot_Nocturne(Ratings):
pass
class NA_Akali_Bot_Nunu(Ratings):
pass
class NA_Akali_Bot_Olaf(Ratings):
pass
class NA_Akali_Bot_Orianna(Ratings):
pass
class NA_Akali_Bot_Ornn(Ratings):
pass
class NA_Akali_Bot_Pantheon(Ratings):
pass
class NA_Akali_Bot_Poppy(Ratings):
pass
class NA_Akali_Bot_Quinn(Ratings):
pass
class NA_Akali_Bot_Rakan(Ratings):
pass
class NA_Akali_Bot_Rammus(Ratings):
pass
class NA_Akali_Bot_RekSai(Ratings):
pass
class NA_Akali_Bot_Renekton(Ratings):
pass
class NA_Akali_Bot_Rengar(Ratings):
pass
class NA_Akali_Bot_Riven(Ratings):
pass
class NA_Akali_Bot_Rumble(Ratings):
pass
class NA_Akali_Bot_Ryze(Ratings):
pass
class NA_Akali_Bot_Sejuani(Ratings):
pass
class NA_Akali_Bot_Shaco(Ratings):
pass
class NA_Akali_Bot_Shen(Ratings):
pass
class NA_Akali_Bot_Shyvana(Ratings):
pass
class NA_Akali_Bot_Singed(Ratings):
pass
class NA_Akali_Bot_Sion(Ratings):
pass
class NA_Akali_Bot_Sivir(Ratings):
pass
class NA_Akali_Bot_Skarner(Ratings):
pass
class NA_Akali_Bot_Sona(Ratings):
pass
class NA_Akali_Bot_Soraka(Ratings):
pass
class NA_Akali_Bot_Swain(Ratings):
pass
class NA_Akali_Bot_Syndra(Ratings):
pass
class NA_Akali_Bot_TahmKench(Ratings):
pass
class NA_Akali_Bot_Taliyah(Ratings):
pass
class NA_Akali_Bot_Talon(Ratings):
pass
class NA_Akali_Bot_Taric(Ratings):
pass
class NA_Akali_Bot_Teemo(Ratings):
pass
class NA_Akali_Bot_Thresh(Ratings):
pass
class NA_Akali_Bot_Tristana(Ratings):
pass
class NA_Akali_Bot_Trundle(Ratings):
pass
class NA_Akali_Bot_Tryndamere(Ratings):
pass
class NA_Akali_Bot_TwistedFate(Ratings):
pass
class NA_Akali_Bot_Twitch(Ratings):
pass
class NA_Akali_Bot_Udyr(Ratings):
pass
class NA_Akali_Bot_Urgot(Ratings):
pass
class NA_Akali_Bot_Varus(Ratings):
pass
class NA_Akali_Bot_Vayne(Ratings):
pass
class NA_Akali_Bot_Veigar(Ratings):
pass
class NA_Akali_Bot_Velkoz(Ratings):
pass
class NA_Akali_Bot_Vi(Ratings):
pass
class NA_Akali_Bot_Viktor(Ratings):
pass
class NA_Akali_Bot_Vladimir(Ratings):
pass
class NA_Akali_Bot_Volibear(Ratings):
pass
class NA_Akali_Bot_Warwick(Ratings):
pass
class NA_Akali_Bot_Xayah(Ratings):
pass
class NA_Akali_Bot_Xerath(Ratings):
pass
class NA_Akali_Bot_XinZhao(Ratings):
pass
class NA_Akali_Bot_Yasuo(Ratings):
pass
class NA_Akali_Bot_Yorick(Ratings):
pass
class NA_Akali_Bot_Zac(Ratings):
pass
class NA_Akali_Bot_Zed(Ratings):
pass
class NA_Akali_Bot_Ziggs(Ratings):
pass
class NA_Akali_Bot_Zilean(Ratings):
pass
class NA_Akali_Bot_Zyra(Ratings):
pass
| 15.364508
| 46
| 0.761667
| 972
| 6,407
| 4.59465
| 0.151235
| 0.216301
| 0.370802
| 0.463502
| 0.797582
| 0.797582
| 0
| 0
| 0
| 0
| 0
| 0
| 0.173404
| 6,407
| 416
| 47
| 15.401442
| 0.843278
| 0
| 0
| 0.498195
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.498195
| 0.00361
| 0
| 0.501805
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
|
0
| 7
|
ec14b2321e4f5f5125a5b9d0a5ff054614c535cd
| 185
|
py
|
Python
|
reset.py
|
AmitJoki/SJournal
|
f491141cae9065f030c0ab6b3aebf483537bd2b1
|
[
"MIT"
] | null | null | null |
reset.py
|
AmitJoki/SJournal
|
f491141cae9065f030c0ab6b3aebf483537bd2b1
|
[
"MIT"
] | null | null | null |
reset.py
|
AmitJoki/SJournal
|
f491141cae9065f030c0ab6b3aebf483537bd2b1
|
[
"MIT"
] | null | null | null |
import model
from collections import defaultdict
import all
def reset():
model.save_obj(defaultdict(int, all.all_words), 'data')
model.save_obj(defaultdict(int, {}), 'errors')
| 23.125
| 59
| 0.735135
| 25
| 185
| 5.32
| 0.56
| 0.135338
| 0.180451
| 0.345865
| 0.390977
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.135135
| 185
| 7
| 60
| 26.428571
| 0.83125
| 0
| 0
| 0
| 0
| 0
| 0.054054
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| true
| 0
| 0.5
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
ec1d795dbad52241367819ca7144795e7c495570
| 196
|
py
|
Python
|
docs/examples/__init__.py
|
nimish/xsdata
|
7afe2781b66982428cc1731f53c065086acd35c1
|
[
"MIT"
] | null | null | null |
docs/examples/__init__.py
|
nimish/xsdata
|
7afe2781b66982428cc1731f53c065086acd35c1
|
[
"MIT"
] | null | null | null |
docs/examples/__init__.py
|
nimish/xsdata
|
7afe2781b66982428cc1731f53c065086acd35c1
|
[
"MIT"
] | null | null | null |
from examples.primer import Items
from examples.primer import PurchaseOrderType
from examples.primer import Usaddress
from examples.primer import Comment
from examples.primer import PurchaseOrder
| 32.666667
| 45
| 0.872449
| 25
| 196
| 6.84
| 0.36
| 0.350877
| 0.526316
| 0.701754
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.102041
| 196
| 5
| 46
| 39.2
| 0.971591
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
ec34fae9b5d7999d7a43a2a8b06bfd1eee1b8494
| 190
|
py
|
Python
|
darwindb/stores/__init__.py
|
fasteroute/darwin-db
|
6fb4585c43b4e85c4e5a634243b63bda6ff91c33
|
[
"Apache-2.0"
] | 3
|
2016-11-08T15:15:50.000Z
|
2019-03-09T10:57:39.000Z
|
darwindb/stores/__init__.py
|
fasteroute/darwin-db
|
6fb4585c43b4e85c4e5a634243b63bda6ff91c33
|
[
"Apache-2.0"
] | 1
|
2015-09-30T20:19:03.000Z
|
2015-11-02T15:44:15.000Z
|
darwindb/stores/__init__.py
|
fasteroute/darwin-db
|
6fb4585c43b4e85c4e5a634243b63bda6ff91c33
|
[
"Apache-2.0"
] | 2
|
2015-10-30T23:42:56.000Z
|
2020-03-02T20:20:50.000Z
|
from darwindb.stores.BaseStore import BaseStore
from darwindb.stores.PostgresStore import Store as PostgresStore
from darwindb.stores.PostgresStore import Connection as PostgresConnection
| 31.666667
| 74
| 0.873684
| 22
| 190
| 7.545455
| 0.454545
| 0.216867
| 0.325301
| 0.373494
| 0.445783
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.094737
| 190
| 5
| 75
| 38
| 0.965116
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
ec4f5c1b80f845679da8231bd0dc2db6691f55f3
| 75,557
|
py
|
Python
|
phase2_stuff/.ipynb_checkpoints/phot-checkpoint.py
|
davidjwilson/pceb
|
259cf4b18b51b7163d6ce84ab150c5f65f8cfdec
|
[
"MIT"
] | null | null | null |
phase2_stuff/.ipynb_checkpoints/phot-checkpoint.py
|
davidjwilson/pceb
|
259cf4b18b51b7163d6ce84ab150c5f65f8cfdec
|
[
"MIT"
] | null | null | null |
phase2_stuff/.ipynb_checkpoints/phot-checkpoint.py
|
davidjwilson/pceb
|
259cf4b18b51b7163d6ce84ab150c5f65f8cfdec
|
[
"MIT"
] | null | null | null |
"""Module for useful photometric transformations
add_DM -- compute apparent magnitudes for a given distance and reddening
"""
import reddenings
from numpy import *
from astLib import astSED
from pylab import *
def add_DM(M, passband, survey, dist, ebv, law="Fitzpatrick", tot_to_sel=3.1):
"""Shift to cluster distance modulus (m-M)
Inputs:
M: intrinsic magnitude
passband, surveys: allowed passbands and surveys are defined in reddenings.compute_extinction()
ebv: E(B-V)
dist: distance in pc
law: either Fitzpatrick (1999) or Cardelli et al (1989) laws
R_v: 3.1"""
Am = reddenings.compute_extinction(passband, survey, law=law, tot_to_sel=tot_to_sel, ebv=ebv)
m = M + 5*log10(dist) - 5 + Am
return m
def vega2AB(mag,err,passband):
Vega = astSED.VEGA
b1 = astSED.Passband('/storage/astro2/phsmav/data2/filters/poss/poss1_b.dat')
b2 = astSED.Passband('/storage/astro2/phsmav/data2/filters/poss/poss2_b.dat')
r1 = astSED.Passband('/storage/astro2/phsmav/data2/filters/poss/poss1_r.dat')
r2 = astSED.Passband('/storage/astro2/phsmav/data2/filters/poss/poss2_r.dat')
if passband=='b1':
f0 = Vega.calcFlux(b1)
flux,eflux = f0*10**(-0.4*mag),-0.4*f0*err*10**(-0.4*mag)
ab = []
for j in range(0, len(flux)):
ab.append(astSED.flux2Mag(flux[j],eflux[j],b1))
return array(ab)
elif passband=='r1':
f0 = Vega.calcFlux(r1)
flux,eflux = f0*10**(-0.4*mag),-0.4*f0*err*10**(-0.4*mag)
ab = []
for j in range(0, len(flux)):
ab.append(astSED.flux2Mag(flux[j],eflux[j],b1))
return array(ab)
elif passband=='b2':
f0 = Vega.calcFlux(b2)
flux,eflux = f0*10**(-0.4*mag),-0.4*f0*err*10**(-0.4*mag)
ab = []
for j in range(0, len(flux)):
ab.append(astSED.flux2Mag(flux[j],eflux[j],b1))
return array(ab)
elif passband=='r2':
f0 = Vega.calcFlux(r2)
flux,eflux = f0*10**(-0.4*mag),-0.4*f0*err*10**(-0.4*mag)
ab = []
for j in range(0, len(flux)):
ab.append(astSED.flux2Mag(flux[j],eflux[j],b1))
return array(ab)
def average_errors(color, err_color, mag, err_mag, m1,m2,dm):
r = arange(m1,m2+dm,dm)
i = 0
C,err_C,M,err_M = [], [], [], []
while i<len(r)-1:
c = color[logical_and(mag>=r[i],mag<=r[i+1])]
c = mean(c[where(isnan(c)==False)])
#m = color[logical_and(mag>=r[i],mag<=r[i+1])]
#m = mean(m[where(isnan(m)==False)])
m = r[i]+dm/2.
delta_c = err_color[logical_and(mag>=r[i],mag<=r[i+1])]
delta_c = mean(delta_c[where(isnan(delta_c)==False)])
delta_m = err_mag[logical_and(mag>=r[i],mag<=r[i+1])]
delta_m = mean(delta_m[where(isnan(delta_m)==False)])
C.append(c),err_C.append(delta_c),M.append(m),err_M.append(delta_m)
i = i +1
C,err_C,M,err_M = array(C),array(err_C),array(M),array(err_M)
return C, err_C, M, err_M
def reduced_proper_motion(mag, e_mag, PM, e_PM):
""" Reduced proper motions in the input Band
PM: is the total propermotion in arcsec/yr"""
Hm = mag + 5*log10(PM) + 5
e_Hm = sqrt(e_mag**2+(5.*e_PM/(log(10)*PM))**2)
return Hm, e_Hm
def return_fluxes(mag, mag_err, passband, survey="vphas",units='erg/cm^2/s/A'):
"""Transforming magnitudes into fluxes.
Avaliable surveys, passbands:
iphas: r, i, ha
atlas, atlas-AB: u,g,r,i,z
gaia: gp, bp, rp
tycho, hipparcos: hp, bt, vt
vphas: u, g, r, i, ha
sdss: u,g,r,i,z
panstarrs: g,r,i,y,z
galex: fuv, nuv
apass: b,v,g,r,i
2mass: j,h,k
ukidds: y,z,j,h,k
wise: w1,w2,w3,w4
spitzer: i1,i2,i3,i4
Johnson: B,V
Bessell: B,V
ctio: B,V,R,I
Output: tuple(flux,flux_error,wavelength)
"""
vega = astSED.VegaSED()
if survey=="iphas":
if passband=="r":
v_r = vega.calcFlux(reddenings.passbands.r_band_iphas)
f,e_f = v_r*10**(-mag/2.5),abs(v_r*10**(-(mag+mag_err)/2.5)-v_r*10**(-mag/2.5))
lambda_f = reddenings.passbands.r_band_iphas.effectiveWavelength()
return f,e_f,lambda_f
elif passband=="i":
v_i = vega.calcFlux(reddenings.passbands.i_band_iphas)
f,e_f = v_i*10**(-mag/2.5),abs(v_i*10**(-(mag+mag_err)/2.5)-v_i*10**(-mag/2.5))
lambda_f = reddenings.passbands.i_band_iphas.effectiveWavelength()
return f,e_f,lambda_f
elif passband=="ha":
v_ha = vega.calcFlux(reddenings.passbands.h_band_iphas)
f,e_f = v_ha*10**(-mag/2.5),abs(v_ha*10**(-(mag+mag_err)/2.5)-v_ha*10**(-mag/2.5))
lambda_f = reddenings.passbands.h_band_iphas.effectiveWavelength()
return f,e_f,lambda_f
elif survey=="atlas":
if passband=="u":
v_u = vega.calcFlux(reddenings.passbands.u_band_atlas)
f,e_f = v_u*10**(-mag/2.5),abs(v_u*10**(-(mag+mag_err)/2.5)-v_u*10**(-mag/2.5))
lambda_f = reddenings.passbands.u_band_atlas.effectiveWavelength()
return f,e_f,lambda_f
elif passband=="g":
v_g = vega.calcFlux(reddenings.passbands.g_band_atlas)
f,e_f = v_g*10**(-mag/2.5),abs(v_g*10**(-(mag+mag_err)/2.5)-v_g*10**(-mag/2.5))
lambda_f = reddenings.passbands.g_band_atlas.effectiveWavelength()
return f,e_f,lambda_f
elif passband=="r":
v_r = vega.calcFlux(reddenings.passbands.r_band_atlas)
f,e_f = v_r*10**(-mag/2.5),abs(v_r*10**(-(mag+mag_err)/2.5)-v_r*10**(-mag/2.5))
lambda_f = reddenings.passbands.r_band_atlas.effectiveWavelength()
return f,e_f,lambda_f
elif passband=="i":
v_i = vega.calcFlux(reddenings.passbands.i_band_atlas)
f,e_f = v_i*10**(-mag/2.5),abs(v_i*10**(-(mag+mag_err)/2.5)-v_i*10**(-mag/2.5))
lambda_f = reddenings.passbands.i_band_atlas.effectiveWavelength()
return f,e_f,lambda_f
elif passband=="z":
v_z = vega.calcFlux(reddenings.passbands.z_band_atlas)
f,e_f = v_z*10**(-mag/2.5),abs(v_z*10**(-(mag+mag_err)/2.5)-v_z*10**(-mag/2.5))
lambda_f = reddenings.passbands.z_band_atlas.effectiveWavelength()
return f,e_f,lambda_f
elif survey=="gaia":
if passband=="gp":
v_gp = vega.calcFlux(reddenings.passbands.Gp_band_gaia)
f,e_f = v_gp*10**(-mag/2.5),abs(v_gp*10**(-(mag+mag_err)/2.5)-v_gp*10**(-mag/2.5))
lambda_f = reddenings.passbands.Gp_band_gaia.effectiveWavelength()
return f,e_f,lambda_f
elif passband=="bp":
v_bp = vega.calcFlux(reddenings.passbands.Bp_band_gaia)
f,e_f = v_bp*10**(-mag/2.5),abs(v_bp*10**(-(mag+mag_err)/2.5)-v_bp*10**(-mag/2.5))
lambda_f = reddenings.passbands.Bp_band_gaia.effectiveWavelength()
return f,e_f,lambda_f
elif passband=="rp":
v_rp = vega.calcFlux(reddenings.passbands.Rp_band_gaia)
f,e_f = v_rp*10**(-mag/2.5),abs(v_rp*10**(-(mag+mag_err)/2.5)-v_rp*10**(-mag/2.5))
lambda_f = reddenings.passbands.Rp_band_gaia.effectiveWavelength()
return f,e_f,lambda_f
elif survey=="tycho" or survey=="hipparcos":
if passband=="hp":
v_hp = vega.calcFlux(reddenings.passbands.Hp_band_tycho)
f,e_f = v_hp*10**(-mag/2.5),abs(v_hp*10**(-(mag+mag_err)/2.5)-v_hp*10**(-mag/2.5))
lambda_f = reddenings.passbands.Hp_band_tycho.effectiveWavelength()
return f,e_f,lambda_f
elif passband=="bt":
v_bt = vega.calcFlux(reddenings.passbands.Bt_band_tycho)
f,e_f = v_bt*10**(-mag/2.5),abs(v_bt*10**(-(mag+mag_err)/2.5)-v_bt*10**(-mag/2.5))
lambda_f = reddenings.passbands.Bt_band_tycho.effectiveWavelength()
return f,e_f,lambda_f
elif passband=="vt":
v_vt = vega.calcFlux(reddenings.passbands.Vt_band_tycho)
f,e_f = v_vt*10**(-mag/2.5),abs(v_vt*10**(-(mag+mag_err)/2.5)-v_vt*10**(-mag/2.5))
lambda_f = reddenings.passbands.Vt_band_tycho.effectiveWavelength()
return f,e_f,lambda_f
elif survey=="atlas-AB":
if passband=="u":
f,e_f = astSED.mag2Flux(mag,mag_err,reddenings.passbands.u_band_atlas)
lambda_f = reddenings.passbands.u_band_atlas.effectiveWavelength()
return f,e_f,lambda_f
elif passband=="g":
f,e_f = astSED.mag2Flux(mag,mag_err,reddenings.passbands.g_band_atlas)
lambda_f = reddenings.passbands.g_band_atlas.effectiveWavelength()
return f,e_f,lambda_f
elif passband=="r":
f,e_f = astSED.mag2Flux(mag,mag_err,reddenings.passbands.r_band_atlas)
lambda_f = reddenings.passbands.r_band_atlas.effectiveWavelength()
return f,e_f,lambda_f
elif passband=="i":
f,e_f = astSED.mag2Flux(mag,mag_err,reddenings.passbands.i_band_atlas)
lambda_f = reddenings.passbands.i_band_atlas.effectiveWavelength()
return f,e_f,lambda_f
elif passband=="z":
f,e_f = astSED.mag2Flux(mag,mag_err,reddenings.passbands.z_band_atlas)
lambda_f = reddenings.passbands.z_band_atlas.effectiveWavelength()
return f,e_f,lambda_f
elif survey=="vphas":
if passband=="u":
v_u = vega.calcFlux(reddenings.passbands.u_band_vphas)
f,e_f = v_u*10**(-mag/2.5),abs(v_u*10**(-(mag+mag_err)/2.5)-v_u*10**(-mag/2.5))
lambda_f = reddenings.passbands.u_band_vphas.effectiveWavelength()
return f,e_f,lambda_f
elif passband=="g":
v_g = vega.calcFlux(reddenings.passbands.g_band_vphas)
f,e_f = v_g*10**(-mag/2.5),abs(v_g*10**(-(mag+mag_err)/2.5)-v_g*10**(-mag/2.5))
lambda_f = reddenings.passbands.g_band_vphas.effectiveWavelength()
return f,e_f,lambda_f
elif passband=="r":
v_r = vega.calcFlux(reddenings.passbands.r_band_vphas)
f,e_f = v_r*10**(-mag/2.5),abs(v_r*10**(-(mag+mag_err)/2.5)-v_r*10**(-mag/2.5))
lambda_f = reddenings.passbands.r_band_vphas.effectiveWavelength()
return f,e_f,lambda_f
elif passband=="i":
v_i = vega.calcFlux(reddenings.passbands.i_band_vphas)
f,e_f = v_i*10**(-mag/2.5),abs(v_i*10**(-(mag+mag_err)/2.5)-v_i*10**(-mag/2.5))
lambda_f = reddenings.passbands.i_band_vphas.effectiveWavelength()
return f,e_f,lambda_f
elif passband=="ha":
v_ha = vega.calcFlux(reddenings.passbands.h_band_vphas)
f,e_f = v_ha*10**(-mag/2.5),abs(v_ha*10**(-(mag+mag_err)/2.5)-v_ha*10**(-mag/2.5))
lambda_f = reddenings.passbands.h_band_vphas.effectiveWavelength()
return f,e_f,lambda_f
elif survey=="sdss":
if passband=="u":
f,e_f = astSED.mag2Flux(mag,mag_err,reddenings.passbands.u_band_sdss)
lambda_f = reddenings.passbands.u_band_sdss.effectiveWavelength()
return f,e_f,lambda_f
elif passband=="g":
f,e_f = astSED.mag2Flux(mag,mag_err,reddenings.passbands.g_band_sdss)
lambda_f = reddenings.passbands.g_band_sdss.effectiveWavelength()
return f,e_f,lambda_f
elif passband=="r":
f,e_f = astSED.mag2Flux(mag,mag_err,reddenings.passbands.r_band_sdss)
lambda_f = reddenings.passbands.r_band_sdss.effectiveWavelength()
return f,e_f,lambda_f
elif passband=="i":
f,e_f = astSED.mag2Flux(mag,mag_err,reddenings.passbands.i_band_sdss)
lambda_f = reddenings.passbands.i_band_sdss.effectiveWavelength()
return f,e_f,lambda_f
elif passband=="z":
f,e_f = astSED.mag2Flux(mag,mag_err,reddenings.passbands.z_band_sdss)
lambda_f = reddenings.passbands.z_band_sdss.effectiveWavelength()
return f,e_f,lambda_f
elif survey=="panstarrs":
if passband=="g":
f,e_f = astSED.mag2Flux(mag,mag_err,reddenings.passbands.g_band_ps1)
lambda_f = reddenings.passbands.g_band_ps1.effectiveWavelength()
return f,e_f,lambda_f
elif passband=="r":
f,e_f = astSED.mag2Flux(mag,mag_err,reddenings.passbands.r_band_ps1)
lambda_f = reddenings.passbands.r_band_ps1.effectiveWavelength()
return f,e_f,lambda_f
elif passband=="i":
f,e_f = astSED.mag2Flux(mag,mag_err,reddenings.passbands.i_band_ps1)
lambda_f = reddenings.passbands.i_band_ps1.effectiveWavelength()
return f,e_f,lambda_f
elif passband=="y":
f,e_f = astSED.mag2Flux(mag,mag_err,reddenings.passbands.y_band_ps1)
lambda_f = reddenings.passbands.y_band_ps1.effectiveWavelength()
return f,e_f,lambda_f
elif passband=="z":
f,e_f = astSED.mag2Flux(mag,mag_err,reddenings.passbands.z_band_ps1)
lambda_f = reddenings.passbands.z_band_ps1.effectiveWavelength()
return f,e_f,lambda_f
elif survey=="galex":
if passband=="fuv":
f,e_f = astSED.mag2Flux(mag,mag_err,reddenings.passbands.fuv_band_galex)
lambda_f = reddenings.passbands.fuv_band_galex.effectiveWavelength()
return f,e_f,lambda_f
elif passband=="nuv":
f,e_f = astSED.mag2Flux(mag,mag_err,reddenings.passbands.nuv_band_galex)
lambda_f = reddenings.passbands.nuv_band_galex.effectiveWavelength()
return f,e_f,lambda_f
elif survey=="apass":
if passband=="b":
v_b = vega.calcFlux(reddenings.passbands.B_band_apass)
f,e_f = v_b*10**(-mag/2.5),abs(v_b*10**(-(mag+mag_err)/2.5)-v_b*10**(-mag/2.5))
lambda_f = reddenings.passbands.B_band_apass.effectiveWavelength()
return f,e_f,lambda_f
elif passband=="v":
v_v = vega.calcFlux(reddenings.passbands.V_band_apass)
f,e_f = v_v*10**(-mag/2.5),abs(v_v*10**(-(mag+mag_err)/2.5)-v_v*10**(-mag/2.5))
lambda_f = reddenings.passbands.V_band_apass.effectiveWavelength()
return f,e_f,lambda_f
elif passband=="g":
f,e_f = astSED.mag2Flux(mag,mag_err,reddenings.passbands.g_band_apass)
lambda_f = reddenings.passbands.g_band_apass.effectiveWavelength()
return f,e_f,lambda_f
elif passband=="r":
f,e_f = astSED.mag2Flux(mag,mag_err,reddenings.passbands.r_band_apass)
lambda_f = reddenings.passbands.r_band_apass.effectiveWavelength()
return f,e_f,lambda_f
elif passband=="i":
f,e_f = astSED.mag2Flux(mag,mag_err,reddenings.passbands.i_band_apass)
lambda_f = reddenings.passbands.i_band_apass.effectiveWavelength()
return f,e_f,lambda_f
elif survey=="2mass":
if passband=="j":
v_j = vega.calcFlux(reddenings.passbands.j_band_2mass)
f,e_f = v_j*10**(-mag/2.5),abs(v_j*10**(-(mag+mag_err)/2.5)-v_j*10**(-mag/2.5))
lambda_f = reddenings.passbands.j_band_2mass.effectiveWavelength()
return f,e_f,lambda_f
elif passband=="h":
v_h = vega.calcFlux(reddenings.passbands.h_band_2mass)
f,e_f = v_h*10**(-mag/2.5),abs(v_h*10**(-(mag+mag_err)/2.5)-v_h*10**(-mag/2.5))
lambda_f = reddenings.passbands.h_band_2mass.effectiveWavelength()
return f,e_f,lambda_f
elif passband=="k":
v_k = vega.calcFlux(reddenings.passbands.k_band_2mass)
f,e_f = v_k*10**(-mag/2.5),abs(v_k*10**(-(mag+mag_err)/2.5)-v_k*10**(-mag/2.5))
lambda_f = reddenings.passbands.k_band_2mass.effectiveWavelength()
return f,e_f,lambda_f
elif survey=="ukidss":
if passband=="y":
v_y = vega.calcFlux(reddenings.passbands.y_band_ukidss)
f,e_f = v_y*10**(-mag/2.5),abs(v_y*10**(-(mag+mag_err)/2.5)-v_y*10**(-mag/2.5))
lambda_f = reddenings.passbands.y_band_ukidss.effectiveWavelength()
return f,e_f,lambda_f
elif passband=="z":
v_z = vega.calcFlux(reddenings.passbands.z_band_ukidss)
f,e_f = v_z*10**(-mag/2.5),abs(v_z*10**(-(mag+mag_err)/2.5)-v_z*10**(-mag/2.5))
lambda_f = reddenings.passbands.z_band_ukidss.effectiveWavelength()
return f,e_f,lambda_f
elif passband=="j":
v_j = vega.calcFlux(reddenings.passbands.j_band_ukidss)
f,e_f = v_j*10**(-mag/2.5),abs(v_j*10**(-(mag+mag_err)/2.5)-v_j*10**(-mag/2.5))
lambda_f = reddenings.passbands.j_band_ukidss.effectiveWavelength()
return f,e_f,lambda_f
elif passband=="h":
v_h = vega.calcFlux(reddenings.passbands.h_band_ukidss)
f,e_f = v_h*10**(-mag/2.5),abs(v_h*10**(-(mag+mag_err)/2.5)-v_h*10**(-mag/2.5))
lambda_f = reddenings.passbands.h_band_ukidss.effectiveWavelength()
return f,e_f,lambda_f
elif passband=="k":
v_k = vega.calcFlux(reddenings.passbands.k_band_ukidss)
f,e_f = v_k*10**(-mag/2.5),abs(v_k*10**(-(mag+mag_err)/2.5)-v_k*10**(-mag/2.5))
lambda_f = reddenings.passbands.k_band_ukidss.effectiveWavelength()
return f,e_f,lambda_f
elif survey=="wise":
if passband=="w1":
v_w1 = vega.calcFlux(reddenings.passbands.w1_band_wise)
f,e_f = v_w1*10**(-mag/2.5),abs(v_w1*10**(-(mag+mag_err)/2.5)-v_w1*10**(-mag/2.5))
lambda_f = reddenings.passbands.w1_band_wise.effectiveWavelength()
return f,e_f,lambda_f
elif passband=="w2":
v_w2 = vega.calcFlux(reddenings.passbands.w2_band_wise)
f,e_f = v_w2*10**(-mag/2.5),abs(v_w2*10**(-(mag+mag_err)/2.5)-v_w2*10**(-mag/2.5))
lambda_f = reddenings.passbands.w2_band_wise.effectiveWavelength()
return f,e_f,lambda_f
elif passband=="w3":
v_w3 = vega.calcFlux(reddenings.passbands.w3_band_wise)
f,e_f = v_w3*10**(-mag/2.5),abs(v_w3*10**(-(mag+mag_err)/2.5)-v_w3*10**(-mag/2.5))
lambda_f = reddenings.passbands.w3_band_wise.effectiveWavelength()
return f,e_f,lambda_f
elif passband=="w4":
v_w4 = vega.calcFlux(reddenings.passbands.w4_band_wise)
f,e_f = v_w4*10**(-mag/2.5),abs(v_w4*10**(-(mag+mag_err)/2.5)-v_w4*10**(-mag/2.5))
lambda_f = reddenings.passbands.w4_band_wise.effectiveWavelength()
return f,e_f,lambda_f
elif survey=="spitzer":
if passband=="i1":
v_i1 = vega.calcFlux(reddenings.passbands.i1_band_spitzer)
f,e_f = v_i1*10**(-mag/2.5),abs(v_i1*10**(-(mag+mag_err)/2.5)-v_i1*10**(-mag/2.5))
lambda_f = reddenings.passbands.i1_band_spitzer.effectiveWavelength()
return f,e_f,lambda_f
elif passband=="i2":
v_i2 = vega.calcFlux(reddenings.passbands.i2_band_spitzer)
f,e_f = v_i2*10**(-mag/2.5),abs(v_i2*10**(-(mag+mag_err)/2.5)-v_i2*10**(-mag/2.5))
lambda_f = reddenings.passbands.i2_band_spitzer.effectiveWavelength()
return f,e_f,lambda_f
elif passband=="i3":
v_i3 = vega.calcFlux(reddenings.passbands.i3_band_spitzer)
f,e_f = v_i3*10**(-mag/2.5),abs(v_i3*10**(-(mag+mag_err)/2.5)-v_i3*10**(-mag/2.5))
lambda_f = reddenings.passbands.i3_band_spitzer.effectiveWavelength()
return f,e_f,lambda_f
elif passband=="i4":
v_i4 = vega.calcFlux(reddenings.passbands.i4_band_spitzer)
f,e_f = v_i4*10**(-mag/2.5),abs(v_i4*10**(-(mag+mag_err)/2.5)-v_i4*10**(-mag/2.5))
lambda_f = reddenings.passbands.i4_band_spitzer.effectiveWavelength()
return f,e_f,lambda_f
elif passband=="m1":
v_m1 = vega.calcFlux(reddenings.passbands.m1_band_spitzer)
f,e_f = v_m1*10**(-mag/2.5),abs(v_m1*10**(-(mag+mag_err)/2.5)-v_m1*10**(-mag/2.5))
lambda_f = reddenings.passbands.m1_band_spitzer.effectiveWavelength()
return f,e_f,lambda_f
elif survey=="Johnson":
if passband=="V":
v_v = vega.calcFlux(reddenings.passbands.V_band_johnson)
f,e_f = v_v*10**(-mag/2.5),abs(v_v*10**(-(mag+mag_err)/2.5)-v_v*10**(-mag/2.5))
lambda_f = reddenings.passbands.V_band_johnson.effectiveWavelength()
return f,e_f,lambda_f
elif passband=="B":
v_b = vega.calcFlux(reddenings.passbands.B_band_johnson)
f,e_f = v_b*10**(-mag/2.5),abs(v_b*10**(-(mag+mag_err)/2.5)-v_b*10**(-mag/2.5))
lambda_f = reddenings.passbands.B_band_johnson.effectiveWavelength()
return f,e_f,lambda_f
elif survey=="Bessell":
if passband=="V":
v_v = vega.calcFlux(reddenings.passbands.V_band_bessell)
f,e_f = v_v*10**(-mag/2.5),abs(v_v*10**(-(mag+mag_err)/2.5)-v_v*10**(-mag/2.5))
lambda_f = reddenings.passbands.V_band_bessell.effectiveWavelength()
return f,e_f,lambda_f
elif passband=="B":
v_b = vega.calcFlux(reddenings.passbands.B_band_bessell)
f,e_f = v_b*10**(-mag/2.5),abs(v_b*10**(-(mag+mag_err)/2.5)-v_b*10**(-mag/2.5))
lambda_f = reddenings.passbands.B_band_bessell.effectiveWavelength()
return f,e_f,lambda_f
elif survey=="ctio":
if passband=="V":
v_v = vega.calcFlux(reddenings.passbands.V_band_ctio)
f,e_f = v_v*10**(-mag/2.5),abs(v_v*10**(-(mag+mag_err)/2.5)-v_v*10**(-mag/2.5))
lambda_f = reddenings.passbands.V_band_ctio.effectiveWavelength()
return f,e_f,lambda_f
elif passband=="B":
v_b = vega.calcFlux(reddenings.passbands.B_band_ctio)
f,e_f = v_b*10**(-mag/2.5),abs(v_b*10**(-(mag+mag_err)/2.5)-v_b*10**(-mag/2.5))
lambda_f = reddenings.passbands.B_band_ctio.effectiveWavelength()
return f,e_f,lambda_f
elif passband=="R":
v_b = vega.calcFlux(reddenings.passbands.R_band_ctio)
f,e_f = v_b*10**(-mag/2.5),abs(v_b*10**(-(mag+mag_err)/2.5)-v_b*10**(-mag/2.5))
lambda_f = reddenings.passbands.R_band_ctio.effectiveWavelength()
return f,e_f,lambda_f
elif passband=="I":
v_b = vega.calcFlux(reddenings.passbands.I_band_ctio)
f,e_f = v_b*10**(-mag/2.5),abs(v_b*10**(-(mag+mag_err)/2.5)-v_b*10**(-mag/2.5))
lambda_f = reddenings.passbands.I_band_ctio.effectiveWavelength()
return f,e_f,lambda_f
def galex_mag2flux(mag):
""" This gives the flux in cnts/sec
Note: the magnitude must not include the zeropoint, i.e.
mag = m -zp, where zp=20.08 for the NUV and zp=18.82 for the FUV """
flux = 10**(-mag*0.4)
return flux
def aper_corr_galex(mag, band, aperture=3):
""" Aperture correction for GALEX magnitudes
from Morrissey et al. (2007)"""
if band=="nuv":
if aperture==1:
m = mag - 2.09
return m
elif aperture==2:
m = mag - 1.33
return m
elif aperture==3:
m = mag - 0.59
return m
elif aperture==4:
m = mag - 0.23
return m
elif aperture==5:
m = mag - 0.13
return m
elif aperture==6:
m = mag - 0.09
return m
elif aperture==7:
m = mag - 0.07
return m
elif band=="fuv":
if aperture==1:
m = mag - 1.65
return m
elif aperture==2:
m = mag - 0.96
return m
elif aperture==3:
m = mag - 0.36
return m
elif aperture==4:
m = mag - 0.15
return m
elif aperture==5:
m = mag - 0.10
return m
elif aperture==6:
m = mag - 0.09
return m
elif aperture==7:
m = mag - 0.07
return m
def zp_galex(mag, band):
""" Zero points
from Morrissey et al. (2007)"""
if band=="nuv":
m = 20.08 + mag
return m
elif band=="fuv":
m = 18.82 + mag
return m
def nonlinearity_galex(mag, band, zp=False):
""" zp==False: Nonlinearity correction for GALEX magnitudes
from Morrissey et al. (2007)
zp==True: Nonlinearity correction for GALEX magnitudes
from
"""
if zp==False:
if band=="nuv":
C0,C1,C2 = -0.314,1.365,-0.103
log_MR = -0.4*mag
if type(log_MR) == ndarray:
#log_MR[where(log_MR<0.)] = nan
log_PR_plus = log_MR.copy()
log_PR_plus[where(10**log_MR<100)] = (-C1 + sqrt(C1**2 - 4*C2*(C0-log_MR[where(10**log_MR<100)])))/(2*C2)
else:
if 10**log_MR<100:
log_PR_plus = (-C1 + sqrt(C1**2 - 4*C2*(C0-log_MR)))/(2*C2)
else:
log_PR_plus = log_MR
mag_corr = -2.5*log_PR_plus
return mag_corr
elif band=="fuv":
C0,C1,C2 = -0.531,1.696,-0.225
log_MR = -0.4*mag
if type(log_MR) == ndarray:
#log_MR[where(log_MR<0.)] = nan
log_PR_plus = log_MR.copy()
log_PR_plus[where(10**log_MR<100)] = (-C1 + sqrt(C1**2 - 4*C2*(C0-log_MR[where(10**log_MR<100)])))/(2*C2)
else:
if 10**log_MR<100:
log_PR_plus = (-C1 + sqrt(C1**2 - 4*C2*(C0-log_MR)))/(2*C2)
else:
log_PR_plus = log_MR
mag_corr = -2.5*log_PR_plus
return mag_corr
if zp==True:
if band=="nuv":
C0,C1,C2 = 2.634,26.316,-245.329
mag_corr = C0+sqrt(C1*mag+C2)
return mag_corr
elif band=="fuv":
C0,C1,C2 = 5.371,20.00,-210.200
mag_corr = C0+sqrt(C1*mag+C2)
return mag_corr
def load_intrinsic_magnitudes(tipo="MS", modelli="pickles98", classe="V", logg=None):
""" This functions loads the absolute magnitudes for
main sequence stars or white dwarfs.
Allowed inputs:
tipo: MS, DA, DB, BB
modelli: pickles98, koester (MLalpha=0.8, mass-radius relation by Bergeron)
classe: V, IV, III, I, None
logg: 5.25-9.5 (DA white dwarfs), 7-9 (DB white dwarfs)
main sequence models are stored here:
/storage/astro2/phsmav/data/model_atmospheres/spectral_libraries/pickles/uvk/
white dwarf models are stored here:
/storage/astro2/phsmav/data2/model_atmospheres/DA/DA_SDSS_2014/
/storage/astro2/phsmav/data2/model_atmospheres/DB/data/sdss/
"""
if tipo=="MS" and modelli=="pickles98":
if classe=="V":
P = csv2rec('/storage/astro2/phsmav/data2/models/ms_colours/pickles/magnitudes_BC/pickles_magnitudes_0.00.csv')
P = P[where(P['lumclass']==5)]
elif classe=="IV":
P = csv2rec('/storage/astro2/phsmav/data2/models/ms_colours/pickles/magnitudes_BC/pickles_magnitudes_0.00.csv')
P = P[where(P['lumclass']==4)]
elif classe=="III":
P = csv2rec('/storage/astro2/phsmav/data2/models/ms_colours/pickles/magnitudes_BC/pickles_magnitudes_0.00.csv')
P = P[where(P['lumclass']==3)]
elif classe=="I":
P = csv2rec('/storage/astro2/phsmav/data2/models/ms_colours/pickles/magnitudes_BC/pickles_magnitudes_0.00.csv')
P = P[where(P['lumclass']==1)]
elif classe is None:
P = csv2rec('/storage/astro2/phsmav/data2/models/ms_colours/pickles/magnitudes_BC/pickles_magnitudes_0.00.csv')
elif tipo=="DA":
P = csv2rec('/storage/astro2/phsmav/data2/models/cooling_models/da_koester10_magnitudes.dat')
if logg is not None:
P= P[where(P['logg']==logg)]
elif tipo=="DB":
P = csv2rec('/storage/astro2/phsmav/data2/models/cooling_models/db_koester10_magnitudes.dat')
if logg is not None:
P= P[where(P['logg']==logg)]
elif tipo=="BB":
P = csv2rec('/storage/astro2/phsmav/data2/models/cooling_models/black-body_magnitudes.dat')
if logg is not None:
P= P[where(P['logg']==logg)]
return P
def galex_poss_intrinsic(color1,color2,classe,color='red'):
""" Function plotting MS,DA sequences in the GALEX/APASS/SDSS color-color diagrams """
#logg650 = loadtxt('/storage/astro2/phsmav/data2/galex/tracks/DA/log_g_650_Av0.txt')
logg700 = loadtxt('/storage/astro2/phsmav/data2/ppmxl/tracks/DA/log_g_700_Av0.txt')
logg750 = loadtxt('/storage/astro2/phsmav/data2/ppmxl/tracks/DA/log_g_750_Av0.txt')
logg800 = loadtxt('/storage/astro2/phsmav/data2/ppmxl/tracks/DA/log_g_800_Av0.txt')
logg850 = loadtxt('/storage/astro2/phsmav/data2/ppmxl/tracks/DA/log_g_850_Av0.txt')
logg900 = loadtxt('/storage/astro2/phsmav/data2/ppmxl/tracks/DA/log_g_900_Av0.txt')
#MS = loadtxt('/storage/astro2/phsmav/data2/ppmxl/tracks/MS/MS_Av0.txt',usecols=[1,2,3,4,5,6,7])
#ZZlogg700 = loadtxt('/storage/astro2/phsmav/data2/ppmxl/tracks/ZZCeti/log_g_7.00_Av0.txt')
#ZZlogg750 = loadtxt('/storage/astro2/phsmav/data2/ppmxl/tracks/ZZCeti/log_g_7.50_Av0.txt')
#ZZlogg800 = loadtxt('/storage/astro2/phsmav/data2/ppmxl/tracks/ZZCeti/log_g_8.00_Av0.txt')
#ZZlogg850 = loadtxt('/storage/astro2/phsmav/data2/ppmxl/tracks/ZZCeti/log_g_8.50_Av0.txt')
#ZZlogg900 = loadtxt('/storage/astro2/phsmav/data2/ppmxl/tracks/ZZCeti/log_g_9.00_Av0.txt')
if color1=="fuv-b1" and color2=="fuv-nuv" and classe=="WD":
#plot(logg650[:,3],logg650[:,1], color=color,zorder=5)
plot(logg700[:,2],logg700[:,1], color=color,zorder=5)
plot(logg750[:,2],logg750[:,1], color=color,zorder=5)
plot(logg800[:,2],logg800[:,1], color=color,zorder=5)
plot(logg850[:,2],logg850[:,1], color=color,zorder=5)
plot(logg900[:,2],logg900[:,1], color=color,zorder=5)
elif color1=="fuv-b2" and color2=="fuv-nuv" and classe=="WD":
#plot(logg650[:,3],logg650[:,1], color=color,zorder=5)
plot(logg700[:,3],logg700[:,1], color=color,zorder=5)
plot(logg750[:,3],logg750[:,1], color=color,zorder=5)
plot(logg800[:,3],logg800[:,1], color=color,zorder=5)
plot(logg850[:,3],logg850[:,1], color=color,zorder=5)
plot(logg900[:,3],logg900[:,1], color=color,zorder=5)
elif color1=="fuv-r1" and color2=="fuv-nuv" and classe=="WD":
#plot(logg650[:,3],logg650[:,1], color=color,zorder=5)
plot(logg700[:,4],logg700[:,1], color=color,zorder=5)
plot(logg750[:,4],logg750[:,1], color=color,zorder=5)
plot(logg800[:,4],logg800[:,1], color=color,zorder=5)
plot(logg850[:,4],logg850[:,1], color=color,zorder=5)
plot(logg900[:,4],logg900[:,1], color=color,zorder=5)
elif color1=="fuv-r2" and color2=="fuv-nuv" and classe=="WD":
#plot(logg650[:,3],logg650[:,1], color=color,zorder=5)
plot(logg700[:,5],logg700[:,1], color=color,zorder=5)
plot(logg750[:,5],logg750[:,1], color=color,zorder=5)
plot(logg800[:,5],logg800[:,1], color=color,zorder=5)
plot(logg850[:,5],logg850[:,1], color=color,zorder=5)
plot(logg900[:,5],logg900[:,1], color=color,zorder=5)
if color1=="nuv-b1" and color2=="fuv-nuv" and classe=="WD":
#plot(logg650[:,3],logg650[:,1], color=color,zorder=5)
plot(logg700[:,6],logg700[:,1], color=color,zorder=5)
plot(logg750[:,6],logg750[:,1], color=color,zorder=5)
plot(logg800[:,6],logg800[:,1], color=color,zorder=5)
plot(logg850[:,6],logg850[:,1], color=color,zorder=5)
plot(logg900[:,6],logg900[:,1], color=color,zorder=5)
elif color1=="nuv-b2" and color2=="fuv-nuv" and classe=="WD":
#plot(logg650[:,3],logg650[:,1], color=color,zorder=5)
plot(logg700[:,7],logg700[:,1], color=color,zorder=5)
plot(logg750[:,7],logg750[:,1], color=color,zorder=5)
plot(logg800[:,7],logg800[:,1], color=color,zorder=5)
plot(logg850[:,7],logg850[:,1], color=color,zorder=5)
plot(logg900[:,7],logg900[:,1], color=color,zorder=5)
elif color1=="nuv-r1" and color2=="fuv-nuv" and classe=="WD":
#plot(logg650[:,3],logg650[:,1], color=color,zorder=5)
plot(logg700[:,8],logg700[:,1], color=color,zorder=5)
plot(logg750[:,8],logg750[:,1], color=color,zorder=5)
plot(logg800[:,8],logg800[:,1], color=color,zorder=5)
plot(logg850[:,8],logg850[:,1], color=color,zorder=5)
plot(logg900[:,8],logg900[:,1], color=color,zorder=5)
elif color1=="nuv-r2" and color2=="fuv-nuv" and classe=="WD":
#plot(logg650[:,3],logg650[:,1], color=color,zorder=5)
plot(logg700[:,9],logg700[:,1], color=color,zorder=5)
plot(logg750[:,9],logg750[:,1], color=color,zorder=5)
plot(logg800[:,9],logg800[:,1], color=color,zorder=5)
plot(logg850[:,9],logg850[:,1], color=color,zorder=5)
plot(logg900[:,9],logg900[:,1], color=color,zorder=5)
elif color1=="b1-r1" and color2=="fuv-nuv" and classe=="WD":
#plot(logg650[:,3],logg650[:,1], color=color,zorder=5)
plot(logg700[:,10],logg700[:,1], color=color,zorder=5)
plot(logg750[:,10],logg750[:,1], color=color,zorder=5)
plot(logg800[:,10],logg800[:,1], color=color,zorder=5)
plot(logg850[:,10],logg850[:,1], color=color,zorder=5)
plot(logg900[:,10],logg900[:,1], color=color,zorder=5)
elif color1=="b2-r2" and color2=="fuv-nuv" and classe=="WD":
#plot(logg650[:,3],logg650[:,1], color=color,zorder=5)
plot(logg700[:,11],logg700[:,1], color=color,zorder=5)
plot(logg750[:,11],logg750[:,1], color=color,zorder=5)
plot(logg800[:,11],logg800[:,1], color=color,zorder=5)
plot(logg850[:,11],logg850[:,1], color=color,zorder=5)
plot(logg900[:,11],logg900[:,1], color=color,zorder=5)
if color1=="nuv-r" and color2=="fuv-nuv" and classe=="ZZ":
#plot(logg650[:,3],logg650[:,1], color=color,zorder=5)
plot(ZZlogg700[:,3],ZZlogg700[:,1], color=color,zorder=5)
plot(ZZlogg750[:,3],ZZlogg750[:,1], color=color,zorder=5)
plot(ZZlogg800[:,3],ZZlogg800[:,1], color=color,zorder=5)
plot(ZZlogg850[:,3],ZZlogg850[:,1], color=color,zorder=5)
plot(ZZlogg900[:,3],ZZlogg900[:,1], color=color,zorder=5)
elif color1=="g-i" and color2=="nuv-g" and classe=="ZZ":
#plot(logg650[:,4],logg650[:,2], color=color,zorder=5)
plot(ZZlogg700[:,4],ZZlogg700[:,2], color=color,zorder=5)
plot(ZZlogg750[:,4],ZZlogg750[:,2], color=color,zorder=5)
plot(ZZlogg800[:,4],ZZlogg800[:,2], color=color,zorder=5)
plot(ZZlogg850[:,4],ZZlogg850[:,2], color=color,zorder=5)
plot(ZZlogg900[:,4],ZZlogg900[:,2], color=color,zorder=5)
elif color1=="g-r" and color2=="nuv-g" and classe=="ZZ":
#plot(logg650[:,5],logg650[:,2], color=color,zorder=5)
plot(ZZlogg700[:,5],ZZlogg700[:,2], color=color,zorder=5)
plot(ZZlogg750[:,5],ZZlogg750[:,2], color=color,zorder=5)
plot(ZZlogg800[:,5],ZZlogg800[:,2], color=color,zorder=5)
plot(ZZlogg850[:,5],ZZlogg850[:,2], color=color,zorder=5)
plot(ZZlogg900[:,5],ZZlogg900[:,2], color=color,zorder=5)
elif color1=="nuv-r" and color2=="fuv-nuv" and classe=="MS":
plot(MS[:,3],MS[:,1], color=color,zorder=5, ls='dashed')
elif color1=="g-r" and color2=="nuv-g" and classe=="MS":
plot(MS[:,5],MS[:,2], color=color,zorder=5, ls='dashed')
elif color1=="g-i" and color2=="nuv-g" and classe=="MS":
plot(MS[:,4],MS[:,2], color=color,zorder=5, ls='dashed')
def vista_intrinsic(color1,color2,classe='MS',reddening=[0,0],dist=10,color='red',ls='solid'):
""" Vista intrinsic colours of Pickles 1998 stars. Only main sequence and giants for now"""
P = csv2rec('/storage/astro2/phsmav/data2/models/ms_colours/pickles/magnitudes/pickles_magnitudes_vista_0.00.csv')
Av = P[where(P['lumclass']==5)]
Aiii = P[where(P['lumclass']==3)]
if color1=="j-h" and color2=="z-y" and classe=="MS":
plot(Av['j']-Av['h']+reddening[0],Av['z']-Av['y']+reddening[1], color=color,zorder=5,lw=0.3,ls=ls)
elif color1=="h-k" and color2=="j-h" and classe=="MS":
plot(Av['h']-Av['k']+reddening[0],Av['j']-Av['h']+reddening[1], color=color,zorder=5,lw=0.3,ls=ls)
elif color1=="j-h" and color2=="j" and classe=="MS":
plot(Av['j']-Av['h']+reddening[0],Av['j']+reddening[1]+5*log(dist)-5, color=color,zorder=5,lw=0.3,ls=ls)
elif color1=="h-k" and color2=="h" and classe=="MS":
plot(Av['h']-Av['k']+reddening[0],Av['h']+reddening[1]+5*log(dist)-5, color=color,zorder=5,lw=0.3,ls=ls)
elif color1=="j-k" and color2=="j" and classe=="MS":
plot(Av['j']-Av['k']+reddening[0],Av['j']+reddening[1]+5*log(dist)-5, color=color,zorder=5,lw=0.3,ls=ls)
if color1=="j-h" and color2=="z-y" and classe=="GB":
plot(Aiii['j']-Aiii['h']+reddening[0],Aiii['z']-Aiii['y']+reddening[1], color=color,zorder=5,lw=0.3,ls=ls)
elif color1=="h-k" and color2=="j-h" and classe=="GB":
plot(Aiii['h']-Aiii['k']+reddening[0],Aiii['j']-Aiii['h']+reddening[1], color=color,zorder=5,lw=0.3,ls=ls)
elif color1=="j-h" and color2=="j" and classe=="GB":
plot(Aiii['j']-Aiii['h']+reddening[0],Aiii['j']+reddening[1]+5*log(dist)-5, color=color,zorder=5,lw=0.3,ls=ls)
elif color1=="h-k" and color2=="h" and classe=="GB":
plot(Aiii['h']-Aiii['k']+reddening[0],Aiii['h']+reddening[1]+5*log(dist)-5, color=color,zorder=5,lw=0.3,ls=ls)
elif color1=="j-k" and color2=="j" and classe=="GB":
plot(Aiii['j']-Aiii['k']+reddening[0],Aiii['j']+reddening[1]+5*log(dist)-5, color=color,zorder=5,lw=0.3,ls=ls)
def vphas_intrinsic(color1,color2,classe,reddening=[0,0],dist=10,color='red',ls='solid'):
""" Function plotting MS,DA sequences in the VPHAS+ color-color diagrams
reddening = [E(color1),E(color2)]"""
DA = csv2rec('/storage/astro2/phsmav/data2/models/cooling_models/da_koester10_magnitudes.dat')
logg700 = DA[where(DA['logg']==7.00)]
logg750 = DA[where(DA['logg']==7.50)]
logg800 = DA[where(DA['logg']==8.00)]
logg850 = DA[where(DA['logg']==8.50)]
logg900 = DA[where(DA['logg']==9.00)]
DB = csv2rec('/storage/astro2/phsmav/data2/models/cooling_models/db_koester10_magnitudes.dat')
DBlogg700 = DB[where(DB['logg']==7.00)]
DBlogg750 = DB[where(DB['logg']==7.50)]
DBlogg800 = DB[where(DB['logg']==8.00)]
DBlogg850 = DB[where(DB['logg']==8.50)]
DBlogg900 = DB[where(DB['logg']==9.00)]
MS = load_intrinsic_magnitudes()
if color1=="g-r" and color2=="u-g" and classe=="DA":
plot(logg700['g_vphas']-logg700['r_vphas']+reddening[0],logg700['u_vphas']-logg700['g_vphas']+reddening[1], color=color,zorder=5,lw=0.3,ls=ls)
plot(logg750['g_vphas']-logg750['r_vphas']+reddening[0],logg750['u_vphas']-logg750['g_vphas']+reddening[1], color=color,zorder=5,lw=0.3,ls=ls)
plot(logg800['g_vphas']-logg800['r_vphas']+reddening[0],logg800['u_vphas']-logg800['g_vphas']+reddening[1], color=color,zorder=5,lw=0.3,ls=ls)
plot(logg850['g_vphas']-logg850['r_vphas']+reddening[0],logg850['u_vphas']-logg850['g_vphas']+reddening[1], color=color,zorder=5,lw=0.3,ls=ls)
plot(logg900['g_vphas']-logg900['r_vphas']+reddening[0],logg900['u_vphas']-logg900['g_vphas']+reddening[1], color=color,zorder=5,lw=0.3,ls=ls)
elif color1=="r-i" and color2=="g-r" and classe=="DA":
plot(logg700['r_vphas']-logg700['i_vphas']+reddening[0],logg700['g_vphas']-logg700['r_vphas']+reddening[1], color=color,zorder=5,lw=0.3,ls=ls)
plot(logg750['r_vphas']-logg750['i_vphas']+reddening[0],logg750['g_vphas']-logg750['r_vphas']+reddening[1], color=color,zorder=5,lw=0.3,ls=ls)
plot(logg800['r_vphas']-logg800['i_vphas']+reddening[0],logg800['g_vphas']-logg800['r_vphas']+reddening[1], color=color,zorder=5,lw=0.3,ls=ls)
plot(logg850['r_vphas']-logg850['i_vphas']+reddening[0],logg850['g_vphas']-logg850['r_vphas']+reddening[1], color=color,zorder=5,lw=0.3,ls=ls)
plot(logg900['r_vphas']-logg900['i_vphas']+reddening[0],logg900['g_vphas']-logg900['r_vphas']+reddening[1], color=color,zorder=5,lw=0.3,ls=ls)
elif color1=="r-i" and color2=="r-Ha" and classe=="DA":
plot(logg700['r_vphas']-logg700['i_vphas']+reddening[0],logg700['r_vphas']-logg700['ha_vphas']+reddening[1], color=color,zorder=5,lw=0.3,ls=ls)
plot(logg750['r_vphas']-logg750['i_vphas']+reddening[0],logg750['r_vphas']-logg750['ha_vphas']+reddening[1], color=color,zorder=5,lw=0.3,ls=ls)
plot(logg800['r_vphas']-logg800['i_vphas']+reddening[0],logg800['r_vphas']-logg800['ha_vphas']+reddening[1], color=color,zorder=5,lw=0.3,ls=ls)
plot(logg850['r_vphas']-logg850['i_vphas']+reddening[0],logg850['r_vphas']-logg850['ha_vphas']+reddening[1], color=color,zorder=5,lw=0.3,ls=ls)
plot(logg900['r_vphas']-logg900['i_vphas']+reddening[0],logg900['r_vphas']-logg900['ha_vphas']+reddening[1], color=color,zorder=5,lw=0.3,ls=ls)
elif color1=="g-r" and color2=="g" and classe=="DA":
plot(logg700['g_vphas']-logg700['r_vphas']+reddening[0],logg700['g_vphas']+reddening[1]+5*log10(dist)-5, color=color,zorder=5,lw=0.3,ls=ls)
plot(logg750['g_vphas']-logg750['r_vphas']+reddening[0],logg750['g_vphas']+reddening[1]+5*log10(dist)-5, color=color,zorder=5,lw=0.3,ls=ls)
plot(logg800['g_vphas']-logg800['r_vphas']+reddening[0],logg800['g_vphas']+reddening[1]+5*log10(dist)-5, color=color,zorder=5,lw=0.3,ls=ls)
plot(logg850['g_vphas']-logg850['r_vphas']+reddening[0],logg850['g_vphas']+reddening[1]+5*log10(dist)-5, color=color,zorder=5,lw=0.3,ls=ls)
plot(logg900['g_vphas']-logg900['r_vphas']+reddening[0],logg900['g_vphas']+reddening[1]+5*log10(dist)-5, color=color,zorder=5,lw=0.3,ls=ls)
elif color1=="r-i" and color2=="r" and classe=="DA":
plot(logg700['r_vphas']-logg700['i_vphas']+reddening[0],logg700['r_vphas']+reddening[1]+5*log10(dist)-5, color=color,zorder=5,lw=0.3,ls=ls)
plot(logg750['r_vphas']-logg750['i_vphas']+reddening[0],logg750['r_vphas']+reddening[1]+5*log10(dist)-5, color=color,zorder=5,lw=0.3,ls=ls)
plot(logg800['r_vphas']-logg800['i_vphas']+reddening[0],logg800['r_vphas']+reddening[1]+5*log10(dist)-5, color=color,zorder=5,lw=0.3,ls=ls)
plot(logg850['r_vphas']-logg850['i_vphas']+reddening[0],logg850['r_vphas']+reddening[1]+5*log10(dist)-5, color=color,zorder=5,lw=0.3,ls=ls)
plot(logg900['r_vphas']-logg900['i_vphas']+reddening[0],logg900['r_vphas']+reddening[1]+5*log10(dist)-5, color=color,zorder=5,lw=0.3,ls=ls)
if color1=="g-r" and color2=="u-g" and classe=="DB":
plot(DBlogg700['g_vphas']-DBlogg700['r_vphas']+reddening[0],DBlogg700['u_vphas']-DBlogg700['g_vphas']+reddening[1], color=color,zorder=5,lw=0.3,ls=ls)
plot(DBlogg750['g_vphas']-DBlogg750['r_vphas']+reddening[0],DBlogg750['u_vphas']-DBlogg750['g_vphas']+reddening[1], color=color,zorder=5,lw=0.3,ls=ls)
plot(DBlogg800['g_vphas']-DBlogg800['r_vphas']+reddening[0],DBlogg800['u_vphas']-DBlogg800['g_vphas']+reddening[1], color=color,zorder=5,lw=0.3,ls=ls)
plot(DBlogg850['g_vphas']-DBlogg850['r_vphas']+reddening[0],DBlogg850['u_vphas']-DBlogg850['g_vphas']+reddening[1], color=color,zorder=5,lw=0.3,ls=ls)
plot(DBlogg900['g_vphas']-DBlogg900['r_vphas']+reddening[0],DBlogg900['u_vphas']-DBlogg900['g_vphas']+reddening[1], color=color,zorder=5,lw=0.3,ls=ls)
elif color1=="r-i" and color2=="g-r" and classe=="DB":
plot(DBlogg700['r_vphas']-DBlogg700['i_vphas']+reddening[0],DBlogg700['g_vphas']-DBlogg700['r_vphas']+reddening[1], color=color,zorder=5,lw=0.3,ls=ls)
plot(DBlogg750['r_vphas']-DBlogg750['i_vphas']+reddening[0],DBlogg750['g_vphas']-DBlogg750['r_vphas']+reddening[1], color=color,zorder=5,lw=0.3,ls=ls)
plot(DBlogg800['r_vphas']-DBlogg800['i_vphas']+reddening[0],DBlogg800['g_vphas']-DBlogg800['r_vphas']+reddening[1], color=color,zorder=5,lw=0.3,ls=ls)
plot(DBlogg850['r_vphas']-DBlogg850['i_vphas']+reddening[0],DBlogg850['g_vphas']-DBlogg850['r_vphas']+reddening[1], color=color,zorder=5,lw=0.3,ls=ls)
plot(DBlogg900['r_vphas']-DBlogg900['i_vphas']+reddening[0],DBlogg900['g_vphas']-DBlogg900['r_vphas']+reddening[1], color=color,zorder=5,lw=0.3,ls=ls)
elif color1=="r-i" and color2=="r-Ha" and classe=="DB":
plot(DBlogg700['r_vphas']-DBlogg700['i_vphas']+reddening[0],DBlogg700['r_vphas']-DBlogg700['ha_vphas']+reddening[1], color=color,zorder=5,lw=0.3,ls=ls)
plot(DBlogg750['r_vphas']-DBlogg750['i_vphas']+reddening[0],DBlogg750['r_vphas']-DBlogg750['ha_vphas']+reddening[1], color=color,zorder=5,lw=0.3,ls=ls)
plot(DBlogg800['r_vphas']-DBlogg800['i_vphas']+reddening[0],DBlogg800['r_vphas']-DBlogg800['ha_vphas']+reddening[1], color=color,zorder=5,lw=0.3,ls=ls)
plot(DBlogg850['r_vphas']-DBlogg850['i_vphas']+reddening[0],DBlogg850['r_vphas']-DBlogg850['ha_vphas']+reddening[1], color=color,zorder=5,lw=0.3,ls=ls)
plot(DBlogg900['r_vphas']-DBlogg900['i_vphas']+reddening[0],DBlogg900['r_vphas']-DBlogg900['ha_vphas']+reddening[1], color=color,zorder=5,lw=0.3,ls=ls)
if color1=="g-r" and color2=="u-g" and classe=="MS":
plot(MS['g_vphas']-MS['r_vphas']+reddening[0],MS['u_vphas']-MS['g_vphas']+reddening[1], color=color,zorder=5,lw=0.3,ls=ls)
elif color1=="r-i" and color2=="g-r" and classe=="MS":
plot(MS['r_vphas']-MS['i_vphas']+reddening[0],MS['g_vphas']-MS['r_vphas']+reddening[1], color=color,zorder=5,lw=0.3,ls=ls)
elif color1=="r-i" and color2=="r-Ha" and classe=="MS":
plot(MS['r_vphas']-MS['i_vphas']+reddening[0],MS['r_vphas']-MS['ha_vphas']+reddening[1], color=color,zorder=5,lw=0.3,ls=ls)
elif color1=="g-r" and color2=="g" and classe=="MS":
plot(MS['g_vphas']-MS['r_vphas']+reddening[0],MS['g_vphas']+reddening[1]+5*log10(dist)-5, color=color,zorder=5,lw=0.3,ls=ls)
elif color1=="r-i" and color2=="r" and classe=="MS":
plot(MS['r_vphas']-MS['i_vphas']+reddening[0],MS['r_vphas']+reddening[1]+5*log10(dist)-5, color=color,zorder=5,lw=0.3,ls=ls)
def iphas_intrinsic(color1,color2,classe,reddening=[0,0],dist=10,color='red',ls='solid'):
""" Function plotting MS,DA sequences in the VPHAS+ color-color diagrams
reddening = [E(color1),E(color2)]"""
DA = csv2rec('/storage/astro2/phsmav/data2/models/cooling_models/da_koester10_magnitudes.dat')
logg700 = DA[where(DA['logg']==7.00)]
logg750 = DA[where(DA['logg']==7.50)]
logg800 = DA[where(DA['logg']==8.00)]
logg850 = DA[where(DA['logg']==8.50)]
logg900 = DA[where(DA['logg']==9.00)]
DB = csv2rec('/storage/astro2/phsmav/data2/models/cooling_models/db_koester10_magnitudes.dat')
DBlogg700 = DB[where(DB['logg']==7.00)]
DBlogg750 = DB[where(DB['logg']==7.50)]
DBlogg800 = DB[where(DB['logg']==8.00)]
DBlogg850 = DB[where(DB['logg']==8.50)]
DBlogg900 = DB[where(DB['logg']==9.00)]
MS = loadtxt('/storage/astro2/phsmav/data2/iphas/tracks/iphas.txt',usecols=[1,2])
xA0 = arange(0.029, 2.9, 0.05)
yA0= -0.009+0.330*xA0-0.0455*xA0**2
if color1=="r-i" and color2=="r-Ha" and classe=="DA":
plot(logg700['r_iphas']-logg700['i_iphas']+reddening[0],logg700['r_iphas']-logg700['ha_iphas']+reddening[1], color=color,zorder=5,lw=0.3,ls=ls)
plot(logg750['r_iphas']-logg750['i_iphas']+reddening[0],logg750['r_iphas']-logg750['ha_iphas']+reddening[1], color=color,zorder=5,lw=0.3,ls=ls)
plot(logg800['r_iphas']-logg800['i_iphas']+reddening[0],logg800['r_iphas']-logg800['ha_iphas']+reddening[1], color=color,zorder=5,lw=0.3,ls=ls)
plot(logg850['r_iphas']-logg850['i_iphas']+reddening[0],logg850['r_iphas']-logg850['ha_iphas']+reddening[1], color=color,zorder=5,lw=0.3,ls=ls)
plot(logg900['r_iphas']-logg900['i_iphas']+reddening[0],logg900['r_iphas']-logg900['ha_iphas']+reddening[1], color=color,zorder=5,lw=0.3,ls=ls)
elif color1=="r-i" and color2=="r" and classe=="DA":
plot(logg700['r_iphas']-logg700['i_iphas']+reddening[0],logg700['r_iphas']+reddening[1]+5*log10(dist)-5, color=color,zorder=5,lw=0.3,ls=ls)
plot(logg750['r_iphas']-logg750['i_iphas']+reddening[0],logg750['r_iphas']+reddening[1]+5*log10(dist)-5, color=color,zorder=5,lw=0.3,ls=ls)
plot(logg800['r_iphas']-logg800['i_iphas']+reddening[0],logg800['r_iphas']+reddening[1]+5*log10(dist)-5, color=color,zorder=5,lw=0.3,ls=ls)
plot(logg850['r_iphas']-logg850['i_iphas']+reddening[0],logg850['r_iphas']+reddening[1]+5*log10(dist)-5, color=color,zorder=5,lw=0.3,ls=ls)
plot(logg900['r_iphas']-logg900['i_iphas']+reddening[0],logg900['r_iphas']+reddening[1]+5*log10(dist)-5, color=color,zorder=5,lw=0.3,ls=ls)
if color1=="r-i" and color2=="r-Ha" and classe=="DB":
plot(DBlogg700['r_iphas']-DBlogg700['i_iphas']+reddening[0],DBlogg700['r_iphas']-DBlogg700['ha_iphas']+reddening[1], color=color,zorder=5,lw=0.3,ls=ls)
plot(DBlogg750['r_iphas']-DBlogg750['i_iphas']+reddening[0],DBlogg750['r_iphas']-DBlogg750['ha_iphas']+reddening[1], color=color,zorder=5,lw=0.3,ls=ls)
plot(DBlogg800['r_iphas']-DBlogg800['i_iphas']+reddening[0],DBlogg800['r_iphas']-DBlogg800['ha_iphas']+reddening[1], color=color,zorder=5,lw=0.3,ls=ls)
plot(DBlogg850['r_iphas']-DBlogg850['i_iphas']+reddening[0],DBlogg850['r_iphas']-DBlogg850['ha_iphas']+reddening[1], color=color,zorder=5,lw=0.3,ls=ls)
plot(DBlogg900['r_iphas']-DBlogg900['i_iphas']+reddening[0],DBlogg900['r_iphas']-DBlogg900['ha_iphas']+reddening[1], color=color,zorder=5,lw=0.3,ls=ls)
elif color1=="r-i" and color2=="r" and classe=="DB":
plot(DBlogg700['r_iphas']-DBlogg700['i_iphas']+reddening[0],DBlogg700['r_iphas']+reddening[1]+5*log10(dist)-5, color=color,zorder=5,lw=0.3,ls=ls)
plot(DBlogg750['r_iphas']-DBlogg750['i_iphas']+reddening[0],DBlogg750['r_iphas']+reddening[1]+5*log10(dist)-5, color=color,zorder=5,lw=0.3,ls=ls)
plot(DBlogg800['r_iphas']-DBlogg800['i_iphas']+reddening[0],DBlogg800['r_iphas']+reddening[1]+5*log10(dist)-5, color=color,zorder=5,lw=0.3,ls=ls)
plot(DBlogg850['r_iphas']-DBlogg850['i_iphas']+reddening[0],DBlogg850['r_iphas']+reddening[1]+5*log10(dist)-5, color=color,zorder=5,lw=0.3,ls=ls)
plot(DBlogg900['r_iphas']-DBlogg900['i_iphas']+reddening[0],DBlogg900['r_iphas']+reddening[1]+5*log10(dist)-5, color=color,zorder=5,lw=0.3,ls=ls)
if color1=="r-i" and color2=="r-Ha" and classe=="MS":
plot(MS[:,0]+reddening[0],MS[:,1]+reddening[1], color=color,zorder=5,lw=0.3,ls=ls)
if color1=="r-i" and color2=="r-Ha" and classe=="A0":
plot(xA0,yA0, color=color,zorder=5,lw=0.3,ls='dashed')
def uvex_intrinsic(color1,color2,classe,reddening=[0,0],dist=10,color='red',ls='solid'):
""" Function plotting MS,DA sequences in the VPHAS+ color-color diagrams
reddening = [E(color1),E(color2)]"""
KDA = csv2rec('/storage/astro2/phsmav/data2/models/cooling_models/da_koester_uvex')
KDB = csv2rec('/storage/astro2/phsmav/data2/models/cooling_models/db_koester_uvex')
BDA = csv2rec('/storage/astro2/phsmav/data2/models/cooling_models/da_bergeron_uvex')
if color1=="r-i" and color2=="r-Ha" and classe=="DA":
plot(KDA['ri']+reddening[0],KDA['rha']+reddening[1], color=color,zorder=5,lw=0.3,ls=ls)
plot(BDA['ri']+reddening[0],BDA['rha']+reddening[1], color=color,zorder=5,lw=0.3,ls=ls)
elif color1=="g-r" and color2=="u-g" and classe=="DA":
plot(KDA['gr']+reddening[0],KDA['ug']+reddening[1], color=color,zorder=5,lw=0.3,ls=ls)
plot(BDA['gr']+reddening[0],BDA['ug']+reddening[1], color=color,zorder=5,lw=0.3,ls=ls)
elif color1=="g-r" and color2=="He-r" and classe=="DA":
plot(KDA['gr']+reddening[0],KDA['her']+reddening[1], color=color,zorder=5,lw=0.3,ls=ls)
plot(BDA['gr']+reddening[0],BDA['her']+reddening[1], color=color,zorder=5,lw=0.3,ls=ls)
elif color1=="r-i" and color2=="r-Ha" and classe=="DB":
plot(KDB['ri']+reddening[0],KDB['rha']+reddening[1], color=color,zorder=5,lw=0.3,ls=ls)
elif color1=="g-r" and color2=="u-g" and classe=="DA":
plot(KDB['gr']+reddening[0],KDB['ug']+reddening[1], color=color,zorder=5,lw=0.3,ls=ls)
elif color1=="g-r" and color2=="He-r" and classe=="DA":
plot(KDB['gr']+reddening[0],KDB['her']+reddening[1], color=color,zorder=5,lw=0.3,ls=ls)
def atlas_intrinsic(color1,color2,classe,reddening=[0,0],dist=10,color='red'):
""" Function plotting MS,DA sequences in the ATLAS color-color diagrams
reddening = [E(color1),E(color2)]"""
DA = csv2rec('/storage/astro2/phsmav/data2/models/cooling_models/da_koester10_magnitudes.dat')
logg700 = DA[where(DA['logg']==7.00)]
logg750 = DA[where(DA['logg']==7.50)]
logg800 = DA[where(DA['logg']==8.00)]
logg850 = DA[where(DA['logg']==8.50)]
logg900 = DA[where(DA['logg']==9.00)]
if color1=="g-r" and color2=="u-g" and classe=="DA":
plot(logg700['g_vphas']-logg700['r_vphas']+reddening[0],logg700['u_vphas']-logg700['g_vphas']+reddening[1], color=color,zorder=5,lw=0.3)
plot(logg750['g_vphas']-logg750['r_vphas']+reddening[0],logg750['u_vphas']-logg750['g_vphas']+reddening[1], color=color,zorder=5,lw=0.3)
plot(logg800['g_vphas']-logg800['r_vphas']+reddening[0],logg800['u_vphas']-logg800['g_vphas']+reddening[1], color=color,zorder=5,lw=0.3)
plot(logg850['g_vphas']-logg850['r_vphas']+reddening[0],logg850['u_vphas']-logg850['g_vphas']+reddening[1], color=color,zorder=5,lw=0.3)
plot(logg900['g_vphas']-logg900['r_vphas']+reddening[0],logg900['u_vphas']-logg900['g_vphas']+reddening[1], color=color,zorder=5,lw=0.3)
elif color1=="r-i" and color2=="g-r" and classe=="DA":
plot(logg700['r_vphas']-logg700['i_vphas']+reddening[0],logg700['g_vphas']-logg700['r_vphas']+reddening[1], color=color,zorder=5,lw=0.3)
plot(logg750['r_vphas']-logg750['i_vphas']+reddening[0],logg750['g_vphas']-logg750['r_vphas']+reddening[1], color=color,zorder=5,lw=0.3)
plot(logg800['r_vphas']-logg800['i_vphas']+reddening[0],logg800['g_vphas']-logg800['r_vphas']+reddening[1], color=color,zorder=5,lw=0.3)
plot(logg850['r_vphas']-logg850['i_vphas']+reddening[0],logg850['g_vphas']-logg850['r_vphas']+reddening[1], color=color,zorder=5,lw=0.3)
plot(logg900['r_vphas']-logg900['i_vphas']+reddening[0],logg900['g_vphas']-logg900['r_vphas']+reddening[1], color=color,zorder=5,lw=0.3)
elif color1=="i-z" and color2=="r-i" and classe=="DA":
plot(logg700['i_vphas']-logg700['z_atlas']+reddening[0],logg700['r_vphas']-logg700['i_vphas']+reddening[1], color=color,zorder=5,lw=0.3)
plot(logg750['i_vphas']-logg750['z_atlas']+reddening[0],logg750['r_vphas']-logg750['i_vphas']+reddening[1], color=color,zorder=5,lw=0.3)
plot(logg800['i_vphas']-logg800['z_atlas']+reddening[0],logg800['r_vphas']-logg800['i_vphas']+reddening[1], color=color,zorder=5,lw=0.3)
plot(logg850['i_vphas']-logg850['z_atlas']+reddening[0],logg850['r_vphas']-logg850['i_vphas']+reddening[1], color=color,zorder=5,lw=0.3)
plot(logg900['i_vphas']-logg900['z_atlas']+reddening[0],logg900['r_vphas']-logg900['i_vphas']+reddening[1], color=color,zorder=5,lw=0.3)
elif color1=="g-r" and color2=="g" and classe=="DA":
plot(logg700['g_vphas']-logg700['r_vphas']+reddening[0],logg700['g_vphas']+reddening[1]+5*log10(dist)-5, color=color,zorder=5,lw=0.3)
plot(logg750['g_vphas']-logg750['r_vphas']+reddening[0],logg750['g_vphas']+reddening[1]+5*log10(dist)-5, color=color,zorder=5,lw=0.3)
plot(logg800['g_vphas']-logg800['r_vphas']+reddening[0],logg800['g_vphas']+reddening[1]+5*log10(dist)-5, color=color,zorder=5,lw=0.3)
plot(logg850['g_vphas']-logg850['r_vphas']+reddening[0],logg850['g_vphas']+reddening[1]+5*log10(dist)-5, color=color,zorder=5,lw=0.3)
plot(logg900['g_vphas']-logg900['r_vphas']+reddening[0],logg900['g_vphas']+reddening[1]+5*log10(dist)-5, color=color,zorder=5,lw=0.3)
elif color1=="r-i" and color2=="r" and classe=="DA":
plot(logg700['r_vphas']-logg700['i_vphas']+reddening[0],logg700['r_vphas']+reddening[1]+5*log10(dist)-5, color=color,zorder=5,lw=0.3)
plot(logg750['r_vphas']-logg750['i_vphas']+reddening[0],logg750['r_vphas']+reddening[1]+5*log10(dist)-5, color=color,zorder=5,lw=0.3)
plot(logg800['r_vphas']-logg800['i_vphas']+reddening[0],logg800['r_vphas']+reddening[1]+5*log10(dist)-5, color=color,zorder=5,lw=0.3)
plot(logg850['r_vphas']-logg850['i_vphas']+reddening[0],logg850['r_vphas']+reddening[1]+5*log10(dist)-5, color=color,zorder=5,lw=0.3)
plot(logg900['r_vphas']-logg900['i_vphas']+reddening[0],logg900['r_vphas']+reddening[1]+5*log10(dist)-5, color=color,zorder=5,lw=0.3)
elif color1=="g-r" and color2=="Hg" and classe=="DA":
plot(logg800['g_vphas']-logg800['r_vphas'],logg800['r_vphas']+ 5*log10(20)-3.38, color=color,zorder=5,lw=0.3)
plot(logg800['g_vphas']-logg800['r_vphas'],logg800['r_vphas']+ 5*log10(40)-3.38, color=color,zorder=5,lw=0.3)
plot(logg800['g_vphas']-logg800['r_vphas'],logg800['r_vphas']+5*log10(150)-3.38, color=color,zorder=5,lw=0.3)
elif color1=="g-i" and color2=="Hg" and classe=="DA":
plot(logg800['g_vphas']-logg800['i_vphas'],logg800['r_vphas']+ 5*log10(20)-3.38, color=color,zorder=5,lw=0.3)
#plot(logg800['g_vphas']-logg800['i_vphas'],logg800['r_vphas']+ 5*log10(40)-3.38, color=color,zorder=5,lw=0.3)
#plot(logg800['g_vphas']-logg800['i_vphas'],logg800['r_vphas']+5*log10(150)-3.38, color=color,zorder=5,lw=0.3)
elif color1=="g-z" and color2=="Hg" and classe=="DA":
plot(logg800['g_vphas']-logg800['z_atlas'],logg800['r_vphas']+ 5*log10(20)-3.38, color=color,zorder=5,lw=0.3)
plot(logg800['g_vphas']-logg800['z_atlas'],logg800['r_vphas']+ 5*log10(40)-3.38, color=color,zorder=5,lw=0.3)
plot(logg800['g_vphas']-logg800['z_atlas'],logg800['r_vphas']+5*log10(150)-3.38, color=color,zorder=5,lw=0.3)
def intrinsic_colours(color1,color2,classe,diagram='ccd',reddening=[0,0],dist=10,color='red'):
"""Colours are defined as lists, i.e.:
['r_apass','i_apass'], or ['g_apass'],
or reduced proper motion: mag + 5 +5*log10(Vt)-3.38
Available bands: galex, sdss, apass, vphas,
atlas, iphas, 2mass, ukidss, wise
classe: DA, DB, MS,III
diagram: ccd, cmd, rpm
reddening: [ebv1,ebv2], or [ebv1,A_lambda]
dist: pc
"""
if classe=="DA":
M = csv2rec('/storage/astro2/phsmav/data2/models/cooling_models/da_koester10_magnitudes.dat')
M = M[where(M['teff']<100000.)]
logg700 = M[where(M['logg']==7.00)]
logg750 = M[where(M['logg']==7.50)]
logg800 = M[where(M['logg']==8.00)]
logg850 = M[where(M['logg']==8.50)]
logg900 = M[where(M['logg']==9.00)]
if diagram=="ccd":
plot(logg700[color1[0]]-logg700[color1[1]]+reddening[0],logg700[color2[0]]-logg700[color2[1]]+reddening[1], color=color,zorder=2,lw=0.3)
plot(logg750[color1[0]]-logg750[color1[1]]+reddening[0],logg750[color2[0]]-logg750[color2[1]]+reddening[1], color=color,zorder=2,lw=0.3)
plot(logg800[color1[0]]-logg800[color1[1]]+reddening[0],logg800[color2[0]]-logg800[color2[1]]+reddening[1], color=color,zorder=2,lw=0.3)
plot(logg850[color1[0]]-logg850[color1[1]]+reddening[0],logg850[color2[0]]-logg850[color2[1]]+reddening[1], color=color,zorder=2,lw=0.3)
plot(logg900[color1[0]]-logg900[color1[1]]+reddening[0],logg900[color2[0]]-logg900[color2[1]]+reddening[1], color=color,zorder=2,lw=0.3)
elif diagram=="cmd":
plot(logg700[color1[0]]-logg700[color1[1]]+reddening[0],logg700[color2[0]]+reddening[1]+5*log10(dist)-5, color=color,zorder=2,lw=0.3)
plot(logg750[color1[0]]-logg750[color1[1]]+reddening[0],logg750[color2[0]]+reddening[1]+5*log10(dist)-5, color=color,zorder=2,lw=0.3)
plot(logg800[color1[0]]-logg800[color1[1]]+reddening[0],logg800[color2[0]]+reddening[1]+5*log10(dist)-5, color=color,zorder=2,lw=0.3)
plot(logg850[color1[0]]-logg850[color1[1]]+reddening[0],logg850[color2[0]]+reddening[1]+5*log10(dist)-5, color=color,zorder=2,lw=0.3)
plot(logg900[color1[0]]-logg900[color1[1]]+reddening[0],logg900[color2[0]]+reddening[1]+5*log10(dist)-5, color=color,zorder=2,lw=0.3)
elif diagram=="rpm":
plot(logg800[color1[0]]-logg800[color1[1]],logg800[color2[0]]+5*log10(20)-3.38, color=color,zorder=2,lw=0.3)
plot(logg800[color1[0]]-logg800[color1[1]],logg800[color2[0]]+5*log10(40)-3.38, color=color,zorder=2,lw=0.3)
plot(logg800[color1[0]]-logg800[color1[1]],logg800[color2[0]]+5*log10(150)-3.38, color=color,zorder=2,lw=0.3)
elif classe=="DB":
M = csv2rec('/storage/astro2/phsmav/data2/models/cooling_models/db_koester10_magnitudes.dat')
logg700 = M[where(M['logg']==7.00)]
logg750 = M[where(M['logg']==7.50)]
logg800 = M[where(M['logg']==8.00)]
logg850 = M[where(M['logg']==8.50)]
if diagram=="ccd":
plot(logg700[color1[0]]-logg700[color1[1]]+reddening[0],logg700[color2[0]]-logg700[color2[1]]+reddening[1], color=color,zorder=2,lw=0.3)
plot(logg750[color1[0]]-logg750[color1[1]]+reddening[0],logg750[color2[0]]-logg750[color2[1]]+reddening[1], color=color,zorder=2,lw=0.3)
plot(logg800[color1[0]]-logg800[color1[1]]+reddening[0],logg800[color2[0]]-logg800[color2[1]]+reddening[1], color=color,zorder=2,lw=0.3)
plot(logg850[color1[0]]-logg850[color1[1]]+reddening[0],logg850[color2[0]]-logg850[color2[1]]+reddening[1], color=color,zorder=2,lw=0.3)
elif diagram=="cmd":
plot(logg700[color1[0]]-logg700[color1[1]]+reddening[0],logg700[color2[0]]+reddening[1]+5*log10(dist)-5, color=color,zorder=2,lw=0.3)
plot(logg750[color1[0]]-logg750[color1[1]]+reddening[0],logg750[color2[0]]+reddening[1]+5*log10(dist)-5, color=color,zorder=2,lw=0.3)
plot(logg800[color1[0]]-logg800[color1[1]]+reddening[0],logg800[color2[0]]+reddening[1]+5*log10(dist)-5, color=color,zorder=2,lw=0.3)
plot(logg850[color1[0]]-logg850[color1[1]]+reddening[0],logg850[color2[0]]+reddening[1]+5*log10(dist)-5, color=color,zorder=2,lw=0.3)
elif diagram=="rpm":
plot(logg800[color1[0]]-logg800[color1[1]],logg800[color2[0]]+5*log10(20)-3.38, color=color,zorder=2,lw=0.3)
plot(logg800[color1[0]]-logg800[color1[1]],logg800[color2[0]]+5*log10(40)-3.38, color=color,zorder=2,lw=0.3)
plot(logg800[color1[0]]-logg800[color1[1]],logg800[color2[0]]+5*log10(150)-3.38, color=color,zorder=2,lw=0.3)
elif classe=="MS":
M = load_intrinsic_magnitudes()
if diagram=="ccd":
plot(M[color1[0]]-M[color1[1]]+reddening[0],M[color2[0]]-M[color2[1]]+reddening[1], color=color,zorder=2,lw=0.3)
elif diagram=="cmd":
plot(M[color1[0]]-M[color1[1]]+reddening[0],M[color2[0]]+reddening[1]+5*log10(dist)-5, color=color,zorder=2,lw=0.3)
elif classe=="IV":
M = load_intrinsic_magnitudes(classe="IV")
if diagram=="ccd":
plot(M[color1[0]]-M[color1[1]]+reddening[0],M[color2[0]]-M[color2[1]]+reddening[1], color=color,zorder=2,lw=0.3)
elif diagram=="cmd":
plot(M[color1[0]]-M[color1[1]]+reddening[0],M[color2[0]]+reddening[1]+5*log10(dist)-5, color=color,zorder=2,lw=0.3)
elif classe=="III":
M = load_intrinsic_magnitudes(classe="III")
if diagram=="ccd":
plot(M[color1[0]]-M[color1[1]]+reddening[0],M[color2[0]]-M[color2[1]]+reddening[1], color=color,zorder=2,lw=0.3)
elif diagram=="cmd":
plot(M[color1[0]]-M[color1[1]]+reddening[0],M[color2[0]]+reddening[1]+5*log10(dist)-5, color=color,zorder=2,lw=0.3)
elif classe=="I":
M = load_intrinsic_magnitudes(classe="I")
if diagram=="ccd":
plot(M[color1[0]]-M[color1[1]]+reddening[0],M[color2[0]]-M[color2[1]]+reddening[1], color=color,zorder=2,lw=0.3)
elif diagram=="cmd":
plot(M[color1[0]]-M[color1[1]]+reddening[0],M[color2[0]]+reddening[1]+5*log10(dist)-5, color=color,zorder=2,lw=0.3)
def galex_apass_intrinsic(color1,color2,classe,color='red'):
""" Function plotting MS,DA sequences in the GALEX/APASS/SDSS color-color diagrams
classe: MS, DA
"""
#logg650 = loadtxt('/storage/astro2/phsmav/data2/galex/tracks/DA/log_g_650_Av0.txt')
DA = csv2rec('/storage/astro2/phsmav/data2/models/cooling_models/da_koester10_magnitudes.dat')
logg700 = DA[where(DA['logg']==7.00)]
logg750 = DA[where(DA['logg']==7.50)]
logg800 = DA[where(DA['logg']==8.00)]
logg850 = DA[where(DA['logg']==8.50)]
logg900 = DA[where(DA['logg']==9.00)]
MS = load_intrinsic_magnitudes()
#MS = loadtxt('/storage/astro2/phsmav/data2/galex/tracks/MS/MS_Av0.txt',usecols=[1,2,3,4,5,6,7])
if color1=="nuv-r" and color2=="fuv-nuv" and classe=="DA":
plot(logg700['nuv']-logg700['r_apass'],logg700['fuv']-logg700['nuv'], color=color,zorder=5,lw=0.3)
plot(logg750['nuv']-logg750['r_apass'],logg750['fuv']-logg750['nuv'], color=color,zorder=5,lw=0.3)
plot(logg800['nuv']-logg800['r_apass'],logg800['fuv']-logg800['nuv'], color=color,zorder=5,lw=0.3)
plot(logg850['nuv']-logg850['r_apass'],logg850['fuv']-logg850['nuv'], color=color,zorder=5,lw=0.3)
plot(logg900['nuv']-logg900['r_apass'],logg900['fuv']-logg900['nuv'], color=color,zorder=5,lw=0.3)
elif color1=="nuv-g" and color2=="fuv-nuv" and classe=="DA":
plot(logg700['nuv']-logg700['g_apass'],logg700['fuv']-logg700['nuv'], color=color,zorder=5,lw=0.3)
plot(logg750['nuv']-logg750['g_apass'],logg750['fuv']-logg750['nuv'], color=color,zorder=5,lw=0.3)
plot(logg800['nuv']-logg800['g_apass'],logg800['fuv']-logg800['nuv'], color=color,zorder=5,lw=0.3)
plot(logg850['nuv']-logg850['g_apass'],logg850['fuv']-logg850['nuv'], color=color,zorder=5,lw=0.3)
plot(logg900['nuv']-logg900['g_apass'],logg900['fuv']-logg900['nuv'], color=color,zorder=5,lw=0.3)
elif color1=="g-i" and color2=="nuv-g" and classe=="DA":
plot(logg700['g_apass']-logg700['i_apass'],logg700['nuv']-logg700['g_apass'], color=color,zorder=5,lw=0.3)
plot(logg750['g_apass']-logg750['i_apass'],logg750['nuv']-logg750['g_apass'], color=color,zorder=5,lw=0.3)
plot(logg800['g_apass']-logg800['i_apass'],logg800['nuv']-logg800['g_apass'], color=color,zorder=5,lw=0.3)
plot(logg850['g_apass']-logg850['i_apass'],logg850['nuv']-logg850['g_apass'], color=color,zorder=5,lw=0.3)
plot(logg900['g_apass']-logg900['i_apass'],logg900['nuv']-logg900['g_apass'], color=color,zorder=5,lw=0.3)
elif color1=="r-i" and color2=="nuv-r" and classe=="DA":
plot(logg700['r_apass']-logg700['i_apass'],logg700['nuv']-logg700['r_apass'], color=color,zorder=5,lw=0.3)
plot(logg750['r_apass']-logg750['i_apass'],logg750['nuv']-logg750['r_apass'], color=color,zorder=5,lw=0.3)
plot(logg800['r_apass']-logg800['i_apass'],logg800['nuv']-logg800['r_apass'], color=color,zorder=5,lw=0.3)
plot(logg850['r_apass']-logg850['i_apass'],logg850['nuv']-logg850['r_apass'], color=color,zorder=5,lw=0.3)
plot(logg900['r_apass']-logg900['i_apass'],logg900['nuv']-logg900['r_apass'], color=color,zorder=5,lw=0.3)
elif color1=="g-r" and color2=="nuv-g" and classe=="DA":
plot(logg700['g_apass']-logg700['r_apass'],logg700['nuv']-logg700['g_apass'], color=color,zorder=5,lw=0.3)
plot(logg750['g_apass']-logg750['r_apass'],logg750['nuv']-logg750['g_apass'], color=color,zorder=5,lw=0.3)
plot(logg800['g_apass']-logg800['r_apass'],logg800['nuv']-logg800['g_apass'], color=color,zorder=5,lw=0.3)
plot(logg850['g_apass']-logg850['r_apass'],logg850['nuv']-logg850['g_apass'], color=color,zorder=5,lw=0.3)
plot(logg900['g_apass']-logg900['r_apass'],logg900['nuv']-logg900['g_apass'], color=color,zorder=5,lw=0.3)
elif color1=="b-v" and color2=="nuv-v" and classe=="DA":
plot(logg700['b_apass']-logg700['v_apass'],logg700['nuv']-logg700['v_apass'], color=color,zorder=5,lw=0.3)
plot(logg750['b_apass']-logg750['v_apass'],logg750['nuv']-logg750['v_apass'], color=color,zorder=5,lw=0.3)
plot(logg800['b_apass']-logg800['v_apass'],logg800['nuv']-logg800['v_apass'], color=color,zorder=5,lw=0.3)
plot(logg850['b_apass']-logg850['v_apass'],logg850['nuv']-logg850['v_apass'], color=color,zorder=5,lw=0.3)
plot(logg900['b_apass']-logg900['v_apass'],logg900['nuv']-logg900['v_apass'], color=color,zorder=5,lw=0.3)
elif color1=="nuv-v" and color2=="fuv-nuv" and classe=="DA":
plot(logg700['nuv']-logg700['v_apass'],logg700['fuv']-logg700['nuv'], color=color,zorder=5,lw=0.3)
plot(logg750['nuv']-logg750['v_apass'],logg750['fuv']-logg750['nuv'], color=color,zorder=5,lw=0.3)
plot(logg800['nuv']-logg800['v_apass'],logg800['fuv']-logg800['nuv'], color=color,zorder=5,lw=0.3)
plot(logg850['nuv']-logg850['v_apass'],logg850['fuv']-logg850['nuv'], color=color,zorder=5,lw=0.3)
plot(logg900['nuv']-logg900['v_apass'],logg900['fuv']-logg900['nuv'], color=color,zorder=5,lw=0.3)
elif color1=="g-h" and color2=="nuv-g" and classe=="DA":
plot(logg700['g_apass']-logg700['h_2mass'],logg700['nuv']-logg700['g_apass'], color=color,zorder=5,lw=0.3)
plot(logg750['g_apass']-logg750['h_2mass'],logg750['nuv']-logg750['g_apass'], color=color,zorder=5,lw=0.3)
plot(logg800['g_apass']-logg800['h_2mass'],logg800['nuv']-logg800['g_apass'], color=color,zorder=5,lw=0.3)
plot(logg850['g_apass']-logg850['h_2mass'],logg850['nuv']-logg850['g_apass'], color=color,zorder=5,lw=0.3)
plot(logg900['g_apass']-logg900['h_2mass'],logg900['nuv']-logg900['g_apass'], color=color,zorder=5,lw=0.3)
# reduced proper motions #
elif color1=="nuv-g" and color2=="Hg" and classe=="DA":
plot(logg800['nuv']-logg800['g_apass'],logg800['g_apass']+ 5*log10(20)-3.38, color=color,zorder=5,lw=0.3)
plot(logg800['nuv']-logg800['g_apass'],logg800['g_apass']+ 5*log10(40)-3.38, color=color,zorder=5,lw=0.3)
plot(logg800['nuv']-logg800['g_apass'],logg800['g_apass']+5*log10(150)-3.38, color=color,zorder=5,lw=0.3)
elif color1=="nuv-r" and color2=="Hr" and classe=="DA":
plot(logg800['nuv']-logg800['r_apass'],logg800['g_apass']+ 5*log10(20)-3.38, color=color,zorder=5,lw=0.3)
plot(logg800['nuv']-logg800['r_apass'],logg800['g_apass']+ 5*log10(40)-3.38, color=color,zorder=5,lw=0.3)
plot(logg800['nuv']-logg800['r_apass'],logg800['g_apass']+5*log10(150)-3.38, color=color,zorder=5,lw=0.3)
elif color1=="g-r" and color2=="Hg" and classe=="DA":
plot(logg800['g_apass']-logg800['r_apass'],logg800['g_apass']+ 5*log10(20)-3.38, color=color,zorder=5,lw=0.3)
plot(logg800['g_apass']-logg800['r_apass'],logg800['g_apass']+ 5*log10(40)-3.38, color=color,zorder=5,lw=0.3)
plot(logg800['g_apass']-logg800['r_apass'],logg800['g_apass']+5*log10(150)-3.38, color=color,zorder=5,lw=0.3)
elif color1=="g-i" and color2=="Hg" and classe=="DA":
plot(logg800['g_apass']-logg800['i_apass'],logg800['g_apass']+ 5*log10(20)-3.38, color=color,zorder=5,lw=0.3)
plot(logg800['g_apass']-logg800['i_apass'],logg800['g_apass']+ 5*log10(40)-3.38, color=color,zorder=5,lw=0.3)
plot(logg800['g_apass']-logg800['i_apass'],logg800['g_apass']+5*log10(150)-3.38, color=color,zorder=5,lw=0.3)
elif color1=="g-h" and color2=="Hg" and classe=="DA":
plot(logg800['g_apass']-logg800['h_2mass'],logg800['g_apass']+ 5*log10(20)-3.38, color=color,zorder=5,lw=0.3)
plot(logg800['g_apass']-logg800['h_2mass'],logg800['g_apass']+ 5*log10(40)-3.38, color=color,zorder=5,lw=0.3)
plot(logg800['g_apass']-logg800['h_2mass'],logg800['g_apass']+5*log10(150)-3.38, color=color,zorder=5,lw=0.3)
elif color1=="b-v" and color2=="Hv" and classe=="DA":
plot(logg800['b_apass']-logg800['v_apass'],logg800['v_apass']+ 5*log10(20)-3.38, color=color,zorder=5,lw=0.3)
plot(logg800['b_apass']-logg800['v_apass'],logg800['v_apass']+ 5*log10(40)-3.38, color=color,zorder=5,lw=0.3)
plot(logg800['b_apass']-logg800['v_apass'],logg800['v_apass']+5*log10(150)-3.38, color=color,zorder=5,lw=0.3)
elif color1=="nuv-r" and color2=="fuv-nuv" and classe=="ZZ":
#plot(logg650[:,3],logg650[:,1], color=color,zorder=5)
plot(ZZlogg700[:,3],ZZlogg700[:,1], color=color,zorder=5)
plot(ZZlogg750[:,3],ZZlogg750[:,1], color=color,zorder=5)
plot(ZZlogg800[:,3],ZZlogg800[:,1], color=color,zorder=5)
plot(ZZlogg850[:,3],ZZlogg850[:,1], color=color,zorder=5)
plot(ZZlogg900[:,3],ZZlogg900[:,1], color=color,zorder=5)
elif color1=="nuv-g" and color2=="fuv-nuv" and classe=="ZZ":
#plot(logg650[:,3],logg650[:,1], color=color,zorder=5)
plot(ZZlogg700[:,2],ZZlogg700[:,1], color=color,zorder=5)
plot(ZZlogg750[:,2],ZZlogg750[:,1], color=color,zorder=5)
plot(ZZlogg800[:,2],ZZlogg800[:,1], color=color,zorder=5)
plot(ZZlogg850[:,2],ZZlogg850[:,1], color=color,zorder=5)
plot(ZZlogg900[:,2],ZZlogg900[:,1], color=color,zorder=5)
elif color1=="g-i" and color2=="nuv-g" and classe=="ZZ":
#plot(logg650[:,4],logg650[:,2], color=color,zorder=5)
plot(ZZlogg700[:,4],ZZlogg700[:,2], color=color,zorder=5)
plot(ZZlogg750[:,4],ZZlogg750[:,2], color=color,zorder=5)
plot(ZZlogg800[:,4],ZZlogg800[:,2], color=color,zorder=5)
plot(ZZlogg850[:,4],ZZlogg850[:,2], color=color,zorder=5)
plot(ZZlogg900[:,4],ZZlogg900[:,2], color=color,zorder=5)
elif color1=="r-i" and color2=="nuv-r" and classe=="ZZ":
#plot(ZZlogg650[:,4],ZZlogg650[:,2], color=color,zorder=5)
plot(ZZlogg700[:,6],ZZlogg700[:,3], color=color,zorder=5)
plot(ZZlogg750[:,6],ZZlogg750[:,3], color=color,zorder=5)
plot(ZZlogg800[:,6],ZZlogg800[:,3], color=color,zorder=5)
plot(ZZlogg850[:,6],ZZlogg850[:,3], color=color,zorder=5)
plot(ZZlogg900[:,6],ZZlogg900[:,3], color=color,zorder=5)
elif color1=="g-r" and color2=="nuv-g" and classe=="ZZ":
#plot(logg650[:,5],logg650[:,2], color=color,zorder=5)
plot(ZZlogg700[:,5],ZZlogg700[:,2], color=color,zorder=5)
plot(ZZlogg750[:,5],ZZlogg750[:,2], color=color,zorder=5)
plot(ZZlogg800[:,5],ZZlogg800[:,2], color=color,zorder=5)
plot(ZZlogg850[:,5],ZZlogg850[:,2], color=color,zorder=5)
plot(ZZlogg900[:,5],ZZlogg900[:,2], color=color,zorder=5)
# main sequence #
elif color1=="nuv-r" and color2=="fuv-nuv" and classe=="MS":
plot(MS['nuv']-MS['r_apass'],MS['fuv']-MS['nuv'], color=color,zorder=5, ls='dashed',lw=0.3)
elif color1=="nuv-g" and color2=="fuv-nuv" and classe=="MS":
plot(MS['nuv']-MS['r_apass'],MS['fuv']-MS['nuv'], color=color,zorder=5, ls='dashed',lw=0.3)
elif color1=="g-r" and color2=="nuv-g" and classe=="MS":
plot(MS['g_apass']-MS['r_apass'],MS['nuv']-MS['g_apass'], color=color,zorder=5, ls='dashed',lw=0.3)
elif color1=="g-i" and color2=="nuv-g" and classe=="MS":
plot(MS['g_apass']-MS['i_apass'],MS['nuv']-MS['g_apass'], color=color,zorder=5, ls='dashed',lw=0.3)
elif color1=="nuv-v" and color2=="fuv-nuv" and classe=="MS":
plot(MS['nuv']-MS['v_apass'],MS['fuv']-MS['nuv'], color=color,zorder=5, ls='dashed',lw=0.3)
elif color1=="b-v" and color2=="nuv-v" and classe=="MS":
plot(MS['b_apass']-MS['v_apass'],MS['nuv']-MS['v_apass'], color=color,zorder=5, ls='dashed',lw=0.3)
elif color1=="g-h" and color2=="nuv-g" and classe=="MS":
plot(MS['g_apass']-MS['h_2mass'],MS['nuv']-MS['g_apass'], color=color,zorder=5, ls='dashed',lw=0.3)
def galex_sdss_intrinsic(color1,color2,classe,color='red'):
""" Function plotting MS,DA sequences in the GALEX/APASS/SDSS color-color diagrams """
#logg650 = loadtxt('/storage/astro2/phsmav/data2/galex/tracks/DA/log_g_650_Av0.txt')
logg700 = loadtxt('/storage/astro2/phsmav/data2/galex/tracks/DA/log_g_700_Av0.txt')
logg750 = loadtxt('/storage/astro2/phsmav/data2/galex/tracks/DA/log_g_750_Av0.txt')
logg800 = loadtxt('/storage/astro2/phsmav/data2/galex/tracks/DA/log_g_800_Av0.txt')
logg850 = loadtxt('/storage/astro2/phsmav/data2/galex/tracks/DA/log_g_850_Av0.txt')
logg900 = loadtxt('/storage/astro2/phsmav/data2/galex/tracks/DA/log_g_900_Av0.txt')
MS = loadtxt('/storage/astro2/phsmav/data2/galex/tracks/MS/MS_Av0.txt',usecols=[1,2,3,4,5,6,7])
ZZlogg700 = loadtxt('/storage/astro2/phsmav/data2/galex/tracks/ZZCeti/log_g_7.00_Av0.txt')
ZZlogg750 = loadtxt('/storage/astro2/phsmav/data2/galex/tracks/ZZCeti/log_g_7.50_Av0.txt')
ZZlogg800 = loadtxt('/storage/astro2/phsmav/data2/galex/tracks/ZZCeti/log_g_8.00_Av0.txt')
ZZlogg850 = loadtxt('/storage/astro2/phsmav/data2/galex/tracks/ZZCeti/log_g_8.50_Av0.txt')
ZZlogg900 = loadtxt('/storage/astro2/phsmav/data2/galex/tracks/ZZCeti/log_g_9.00_Av0.txt')
if color1=="nuv-r" and color2=="fuv-nuv" and classe=="WD":
#plot(logg650[:,3],logg650[:,1], color=color,zorder=5)
plot(logg700[:,3],logg700[:,1], color=color,zorder=5)
plot(logg750[:,3],logg750[:,1], color=color,zorder=5)
plot(logg800[:,3],logg800[:,1], color=color,zorder=5)
plot(logg850[:,3],logg850[:,1], color=color,zorder=5)
plot(logg900[:,3],logg900[:,1], color=color,zorder=5)
elif color1=="g-r" and color2=="u-g" and classe=="WD":
#plot(logg650[:,3],logg650[:,1], color=color,zorder=5)
plot(logg700[:,5],logg700[:,7], color=color,zorder=5)
plot(logg750[:,5],logg750[:,7], color=color,zorder=5)
plot(logg800[:,5],logg800[:,7], color=color,zorder=5)
plot(logg850[:,5],logg850[:,7], color=color,zorder=5)
plot(logg900[:,5],logg900[:,7], color=color,zorder=5)
elif color1=="nuv-g" and color2=="fuv-nuv" and classe=="WD":
#plot(logg650[:,3],logg650[:,1], color=color,zorder=5)
plot(logg700[:,2],logg700[:,1], color=color,zorder=5)
plot(logg750[:,2],logg750[:,1], color=color,zorder=5)
plot(logg800[:,2],logg800[:,1], color=color,zorder=5)
plot(logg850[:,2],logg850[:,1], color=color,zorder=5)
plot(logg900[:,2],logg900[:,1], color=color,zorder=5)
elif color1=="g-i" and color2=="nuv-g" and classe=="WD":
#plot(logg650[:,4],logg650[:,2], color=color,zorder=5)
plot(logg700[:,4],logg700[:,2], color=color,zorder=5)
plot(logg750[:,4],logg750[:,2], color=color,zorder=5)
plot(logg800[:,4],logg800[:,2], color=color,zorder=5)
plot(logg850[:,4],logg850[:,2], color=color,zorder=5)
plot(logg900[:,4],logg900[:,2], color=color,zorder=5)
elif color1=="g-r" and color2=="nuv-g" and classe=="WD":
#plot(logg650[:,5],logg650[:,2], color=color,zorder=5)
plot(logg700[:,5],logg700[:,2], color=color,zorder=5)
plot(logg750[:,5],logg750[:,2], color=color,zorder=5)
plot(logg800[:,5],logg800[:,2], color=color,zorder=5)
plot(logg850[:,5],logg850[:,2], color=color,zorder=5)
plot(logg900[:,5],logg900[:,2], color=color,zorder=5)
if color1=="nuv-r" and color2=="fuv-nuv" and classe=="ZZ":
#plot(logg650[:,3],logg650[:,1], color=color,zorder=5)
plot(ZZlogg700[:,3],ZZlogg700[:,1], color=color,zorder=5)
plot(ZZlogg750[:,3],ZZlogg750[:,1], color=color,zorder=5)
plot(ZZlogg800[:,3],ZZlogg800[:,1], color=color,zorder=5)
plot(ZZlogg850[:,3],ZZlogg850[:,1], color=color,zorder=5)
plot(ZZlogg900[:,3],ZZlogg900[:,1], color=color,zorder=5)
elif color1=="nuv-g" and color2=="fuv-nuv" and classe=="ZZ":
#plot(logg650[:,3],logg650[:,1], color=color,zorder=5)
plot(ZZlogg700[:,2],ZZlogg700[:,1], color=color,zorder=5)
plot(ZZlogg750[:,2],ZZlogg750[:,1], color=color,zorder=5)
plot(ZZlogg800[:,2],ZZlogg800[:,1], color=color,zorder=5)
plot(ZZlogg850[:,2],ZZlogg850[:,1], color=color,zorder=5)
plot(ZZlogg900[:,2],ZZlogg900[:,1], color=color,zorder=5)
elif color1=="g-r" and color2=="u-g" and classe=="ZZ":
#plot(logg650[:,3],logg650[:,1], color=color,zorder=5)
plot(ZZlogg700[:,5],ZZlogg700[:,7], color=color,zorder=5)
plot(ZZlogg750[:,5],ZZlogg750[:,7], color=color,zorder=5)
plot(ZZlogg800[:,5],ZZlogg800[:,7], color=color,zorder=5)
plot(ZZlogg850[:,5],ZZlogg850[:,7], color=color,zorder=5)
plot(ZZlogg900[:,5],ZZlogg900[:,7], color=color,zorder=5)
elif color1=="g-i" and color2=="nuv-g" and classe=="ZZ":
#plot(logg650[:,4],logg650[:,2], color=color,zorder=5)
plot(ZZlogg700[:,4],ZZlogg700[:,2], color=color,zorder=5)
plot(ZZlogg750[:,4],ZZlogg750[:,2], color=color,zorder=5)
plot(ZZlogg800[:,4],ZZlogg800[:,2], color=color,zorder=5)
plot(ZZlogg850[:,4],ZZlogg850[:,2], color=color,zorder=5)
plot(ZZlogg900[:,4],ZZlogg900[:,2], color=color,zorder=5)
elif color1=="g-r" and color2=="nuv-g" and classe=="ZZ":
#plot(logg650[:,5],logg650[:,2], color=color,zorder=5)
plot(ZZlogg700[:,5],ZZlogg700[:,2], color=color,zorder=5)
plot(ZZlogg750[:,5],ZZlogg750[:,2], color=color,zorder=5)
plot(ZZlogg800[:,5],ZZlogg800[:,2], color=color,zorder=5)
plot(ZZlogg850[:,5],ZZlogg850[:,2], color=color,zorder=5)
plot(ZZlogg900[:,5],ZZlogg900[:,2], color=color,zorder=5)
def ZZCeti_box(plane="ugr"):
"""ZZ Ceti selection box in SDSS colou-colour-plane"""
p1 = polyfit([-0.04,-0.15],[-0.7,0.8],deg=1)
p2 = polyfit([-0.04,-0.27],[-0.7,0.8],deg=1)
x = arange(-0.27,-0.05,0.01)
y1 = polyval(p1,x)
y2 = polyval(p2,x)
plot(x[y1<=0.8],y1[y1<=0.8], ls='dashed',color='m')
plot(x[y2<=0.8],y2[y2<=0.8], ls='dashed',color='m')
| 58.845016
| 153
| 0.685906
| 13,296
| 75,557
| 3.779708
| 0.031288
| 0.078599
| 0.12353
| 0.120426
| 0.912586
| 0.897443
| 0.864471
| 0.844473
| 0.791603
| 0.770232
| 0
| 0.102014
| 0.088728
| 75,557
| 1,283
| 154
| 58.890881
| 0.627874
| 0.036727
| 0
| 0.420907
| 0
| 0.00555
| 0.121115
| 0.04183
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.256244
| 0.0037
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
6b654c66283442b9b061c05d357b0884f2c5425f
| 3,234
|
py
|
Python
|
circle_ci_test_stable/gui_test/calculator/calculator.py
|
JE-Chen/Python_JEAutoControl
|
477bf9612e28e9ab6d0a8e269db2f699e50a3744
|
[
"MIT"
] | 9
|
2020-10-12T06:33:36.000Z
|
2021-09-13T07:07:36.000Z
|
circle_ci_test_stable/gui_test/calculator/calculator.py
|
JE-Chen/Python_JEAutoControl
|
477bf9612e28e9ab6d0a8e269db2f699e50a3744
|
[
"MIT"
] | null | null | null |
circle_ci_test_stable/gui_test/calculator/calculator.py
|
JE-Chen/Python_JEAutoControl
|
477bf9612e28e9ab6d0a8e269db2f699e50a3744
|
[
"MIT"
] | null | null | null |
import os
import subprocess
from time import sleep
from je_auto_control import locate_and_click
subprocess.Popen("calc", stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, shell=True)
sleep(3)
locate_and_click(
os.getcwd() + "/circle_ci_test_dev/test_source/1.png",
mouse_keycode="mouse_left",
detect_threshold=0.9,
draw_image=False
)
locate_and_click(
os.getcwd() + "/circle_ci_test_dev/test_source/plus.png",
mouse_keycode="mouse_left",
detect_threshold=0.9,
draw_image=False
)
locate_and_click(
os.getcwd() + "/circle_ci_test_dev/test_source/5.png",
mouse_keycode="mouse_left",
detect_threshold=0.9,
draw_image=False
)
locate_and_click(
os.getcwd() + "/circle_ci_test_dev/test_source/equal.png",
mouse_keycode="mouse_left",
detect_threshold=0.9,
draw_image=False
)
locate_and_click(
os.getcwd() + "/circle_ci_test_dev/test_source/plus.png",
mouse_keycode="mouse_left",
detect_threshold=0.9,
draw_image=False
)
locate_and_click(
os.getcwd() + "/circle_ci_test_dev/test_source/2.png",
mouse_keycode="mouse_left",
detect_threshold=0.9,
draw_image=False
)
locate_and_click(
os.getcwd() + "/circle_ci_test_dev/test_source/plus.png",
mouse_keycode="mouse_left",
detect_threshold=0.9,
draw_image=False
)
locate_and_click(
os.getcwd() + "/circle_ci_test_dev/test_source/3.png",
mouse_keycode="mouse_left",
detect_threshold=0.9,
draw_image=False
)
locate_and_click(
os.getcwd() + "/circle_ci_test_dev/test_source/plus.png",
mouse_keycode="mouse_left",
detect_threshold=0.9,
draw_image=False
)
locate_and_click(
os.getcwd() + "/circle_ci_test_dev/test_source/4.png",
mouse_keycode="mouse_left",
detect_threshold=0.9,
draw_image=False
)
locate_and_click(
os.getcwd() + "/circle_ci_test_dev/test_source/plus.png",
mouse_keycode="mouse_left",
detect_threshold=0.9,
draw_image=False
)
locate_and_click(
os.getcwd() + "/circle_ci_test_dev/test_source/6.png",
mouse_keycode="mouse_left",
detect_threshold=0.9,
draw_image=False
)
locate_and_click(
os.getcwd() + "/circle_ci_test_dev/test_source/plus.png",
mouse_keycode="mouse_left",
detect_threshold=0.9,
draw_image=False
)
locate_and_click(
os.getcwd() + "/circle_ci_test_dev/test_source/7.png",
mouse_keycode="mouse_left",
detect_threshold=0.9,
draw_image=False
)
locate_and_click(
os.getcwd() + "/circle_ci_test_dev/test_source/plus.png",
mouse_keycode="mouse_left",
detect_threshold=0.9,
draw_image=False
)
locate_and_click(
os.getcwd() + "/circle_ci_test_dev/test_source/8.png",
mouse_keycode="mouse_left",
detect_threshold=0.9,
draw_image=False
)
locate_and_click(
os.getcwd() + "/circle_ci_test_dev/test_source/plus.png",
mouse_keycode="mouse_left",
detect_threshold=0.9,
draw_image=False
)
locate_and_click(
os.getcwd() + "/circle_ci_test_dev/test_source/9.png",
mouse_keycode="mouse_left",
detect_threshold=0.9,
draw_image=False
)
locate_and_click(
os.getcwd() + "/circle_ci_test_dev/test_source/equal.png",
mouse_keycode="mouse_left",
detect_threshold=0.9,
draw_image=False
)
| 25.872
| 90
| 0.72449
| 485
| 3,234
| 4.430928
| 0.094845
| 0.08376
| 0.130293
| 0.141461
| 0.921359
| 0.921359
| 0.921359
| 0.921359
| 0.921359
| 0.921359
| 0
| 0.01741
| 0.147495
| 3,234
| 124
| 91
| 26.080645
| 0.76206
| 0
| 0
| 0.716667
| 0
| 0
| 0.28726
| 0.227273
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.033333
| 0
| 0.033333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
6bb34c4cdcccae2c5bc89334c5a6a0d50a149ae1
| 2,697
|
py
|
Python
|
tests/test_root_finding.py
|
rocketscience0/cs207-FinalProject
|
bb2a38bc2ca341c55cf544d316318798b42efde7
|
[
"MIT"
] | 1
|
2019-11-12T18:03:52.000Z
|
2019-11-12T18:03:52.000Z
|
tests/test_root_finding.py
|
rocketscience0/cs207-FinalProject
|
bb2a38bc2ca341c55cf544d316318798b42efde7
|
[
"MIT"
] | 3
|
2019-11-19T20:45:05.000Z
|
2019-12-10T14:33:21.000Z
|
tests/test_root_finding.py
|
rocketscience0/cs207-FinalProject
|
bb2a38bc2ca341c55cf544d316318798b42efde7
|
[
"MIT"
] | null | null | null |
"""Tests for the root-finding module
"""
import sys
import pytest
sys.path.append('..')
import pytest
import numpy as np
from autodiff import operations, root_finding
from autodiff.structures import Number, Array
# def func_array(x):
# return x[0] ** 2 * (x[1] + 2)
def func_bowl(x):
return (x[0] - 1) ** 2 + (x[1] - 1) ** 2
# def func_2d(x):
# return 1 - 4 * x[0] + 2 * x[0] ** 2 - 2 * x[1] ** 3, -4 + x[0] ** 4 + 4 * x[1] + 4 * x[1] ** 4
def func_scalar(x):
return 2 * (x - 1) ** 2
def func_2d(x):
return (x[0] - 1, x[1] - 1)
def test_newtons_method_rn_to_rn():
initial_guess = Array((Number(-0.1), Number(-1)))
xstar, _ = root_finding.newtons_method(
func_2d,
initial_guess,
verbose=True,
)
assert xstar[0].val == pytest.approx(1)
assert xstar[1].val == pytest.approx(1)
def test_newtons_method_vector():
initial_guess = Array((Number(-0.1), Number(-1)))
xstar, _ = root_finding.newtons_method(func_bowl, initial_guess, tolerance=1e-6)
assert xstar[0].val == pytest.approx(1, abs=1e-3)
assert xstar[1].val == pytest.approx(1, abs=1e-3)
def test_newtons_method_scalar():
initial_guess = Number(2)
xstar, _ = root_finding.newtons_method(func_scalar, initial_guess, tolerance=1e-10)
assert xstar.val == pytest.approx(1, abs=1e-3)
def test_newtons_method_vector_show_fxn_rn_to_rn():
initial_guess = Array((Number(-0.1), Number(-1)))
xstar, _, _ = root_finding.newtons_method(func_2d, initial_guess, tolerance=1e-6, show_fxn=True)
assert xstar[0].val == pytest.approx(1, abs=1e-3)
assert xstar[1].val == pytest.approx(1, abs=1e-3)
def test_newtons_method_vector_show_fxn_rn_to_r1():
initial_guess = Array((Number(-0.1), Number(-1)))
xstar, _, _ = root_finding.newtons_method(func_bowl, initial_guess, tolerance=1e-6, show_fxn=True)
assert xstar[0].val == pytest.approx(1, abs=1e-3)
assert xstar[1].val == pytest.approx(1, abs=1e-3)
def test_newtons_method_scalar_show_fxn():
initial_guess = Number(2)
xstar, _, _ = root_finding.newtons_method(func_scalar, initial_guess, show_fxn=True)
assert xstar.val == pytest.approx(1, abs=1e-3)
def test_newtons_method_vector_verbose():
initial_guess = Array((Number(-0.1), Number(-1)))
xstar, _ = root_finding.newtons_method(func_bowl, initial_guess, tolerance=1e-6, verbose=True)
assert xstar[0].val == pytest.approx(1, abs=1e-3)
assert xstar[1].val == pytest.approx(1, abs=1e-3)
def test_newtons_method_scalar_verbose():
initial_guess = Number(2)
xstar, _ = root_finding.newtons_method(func_scalar, initial_guess, verbose=True)
assert xstar.val == pytest.approx(1, abs=1e-3)
| 33.7125
| 102
| 0.675936
| 434
| 2,697
| 3.970046
| 0.124424
| 0.12072
| 0.113175
| 0.12072
| 0.839814
| 0.800929
| 0.789321
| 0.752176
| 0.752176
| 0.725479
| 0
| 0.050424
| 0.169077
| 2,697
| 79
| 103
| 34.139241
| 0.718429
| 0.074898
| 0
| 0.388889
| 0
| 0
| 0.000805
| 0
| 0
| 0
| 0
| 0
| 0.240741
| 1
| 0.203704
| false
| 0
| 0.111111
| 0.055556
| 0.37037
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
6bc42b6415b21f7403e9813a7cff609d90b9cf0a
| 8,426
|
py
|
Python
|
osm_pla/test/test_mznPlacementConductor.py
|
TCSOSM-20/PLA
|
de4f7820a050a7ed18aa93a298c5a59e379e752b
|
[
"Apache-2.0"
] | null | null | null |
osm_pla/test/test_mznPlacementConductor.py
|
TCSOSM-20/PLA
|
de4f7820a050a7ed18aa93a298c5a59e379e752b
|
[
"Apache-2.0"
] | null | null | null |
osm_pla/test/test_mznPlacementConductor.py
|
TCSOSM-20/PLA
|
de4f7820a050a7ed18aa93a298c5a59e379e752b
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 ArctosLabs Scandinavia AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
# from collections import Counter
from unittest import TestCase, mock
# import osm_pla
from osm_pla.placement.mznplacement import MznPlacementConductor, MznModelGenerator
test_mzn_model = """
% This minizinc model is generated using
% C:/Users/LG/PycharmProjects/dynamic_jijna2_mzn/osm_pla/placement/mznplacement.py
% at 2019-10-24 11:12:02.058905.
%This is the NETWORK RESOURCE MODEL
enum Vims = {
vimaaaaaaaa_38f5_438d_b8ee_3f93b3531f87,
vimbbbbbbbb_38f5_438d_b8ee_3f93b3531f87,
vimcccccccc_ed84_4e49_b5df_a9d117bd731f,
vimdddddddd_ed84_4e49_b5df_a9d117bd731f,
vimeeeeeeee_38f5_438d_b8ee_3f93b3531f87}; % The vim-accounts
array[Vims, Vims] of int: trp_link_latency = [|0,50,100,150,200,
|0,0,100,150,200,
|0,0,0,150,200,
|0,0,0,0,200,
|0,0,0,0,0,
|]; % Transport link latency between data centers
array[Vims, Vims] of int: trp_link_jitter = [|0,50,100,150,200,
|0,0,100,150,200,
|0,0,0,150,200,
|0,0,0,0,200,
|0,0,0,0,0,
|]; % Transport link jitter between data centers
array[Vims, Vims] of int: trp_link_price_list = [|0,5,6,6,7,
|0,0,6,6,7,
|0,0,0,6,7,
|0,0,0,0,7,
|0,0,0,0,0,
|]; % Transport link price list
array[Vims] of int: vim_price_list_1 = [500,51,52,53,54];
array[Vims] of int: vim_price_list_2 = [20,21,22,23,24];
array[Vims] of int: vim_price_list_3 = [70,71,72,73,74];
array[Vims] of int: vim_price_list_4 = [40,41,42,43,44];
% This is the NETWORK BASIC LOAD MODEL (CONSUMED)
% NOTE. This is not applicable in OSM Release 7
% This is the SERVICE CONSUMPTION MODEL
% These are the variables, i.e. which DC to select for each VNF
var Vims: VNF1;
var Vims: VNF2;
var Vims: VNF3;
var Vims: VNF4;
% These are the set of rules for selecting DCs to VNFs
constraint trp_link_latency[VNF1, VNF2] <= 150;
constraint trp_link_latency[VNF2, VNF3] <= 140;
constraint trp_link_latency[VNF3, VNF4] <= 130;
constraint trp_link_jitter[VNF1, VNF2] <= 30;
constraint trp_link_jitter[VNF2, VNF3] <= 30;
constraint trp_link_jitter[VNF3, VNF4] <= 30;
% Calculate the cost for VNFs and cost for transport link and total cost
var int: used_transport_cost =trp_link_price_list[VNF1, VNF2]+
trp_link_price_list[VNF2, VNF3]+
trp_link_price_list[VNF3, VNF4];
var int: used_vim_cost =vim_price_list_1[VNF1]+
vim_price_list_2[VNF2]+
vim_price_list_3[VNF3]+
vim_price_list_4[VNF4];
var int: total_cost = used_transport_cost + used_vim_cost;
solve minimize total_cost;
"""
test_mzn_model_w_pinning = """
% This minizinc model is generated using
% C:/Users/LG/PycharmProjects/dynamic_jijna2_mzn/osm_pla/placement/mznplacement.py
% at 2019-10-24 11:12:02.058905.
%This is the NETWORK RESOURCE MODEL
enum Vims = {
vimaaaaaaaa_38f5_438d_b8ee_3f93b3531f87,
vimbbbbbbbb_38f5_438d_b8ee_3f93b3531f87,
vimcccccccc_ed84_4e49_b5df_a9d117bd731f,
vimdddddddd_ed84_4e49_b5df_a9d117bd731f,
vimeeeeeeee_38f5_438d_b8ee_3f93b3531f87}; % The vim-accounts
array[Vims, Vims] of int: trp_link_latency = [|0,50,100,150,200,
|0,0,100,150,200,
|0,0,0,150,200,
|0,0,0,0,200,
|0,0,0,0,0,
|]; % Transport link latency between data centers
array[Vims, Vims] of int: trp_link_jitter = [|0,50,100,150,200,
|0,0,100,150,200,
|0,0,0,150,200,
|0,0,0,0,200,
|0,0,0,0,0,
|]; % Transport link jitter between data centers
array[Vims, Vims] of int: trp_link_price_list = [|0,5,6,6,7,
|0,0,6,6,7,
|0,0,0,6,7,
|0,0,0,0,7,
|0,0,0,0,0,
|]; % Transport link price list
array[Vims] of int: vim_price_list_1 = [500,51,52,53,54];
array[Vims] of int: vim_price_list_2 = [20,21,22,23,24];
array[Vims] of int: vim_price_list_3 = [70,71,72,73,74];
array[Vims] of int: vim_price_list_4 = [40,41,42,43,44];
% This is the NETWORK BASIC LOAD MODEL (CONSUMED)
% NOTE. This is not applicable in OSM Release 7
% This is the SERVICE CONSUMPTION MODEL
% These are the variables, i.e. which DC to select for each VNF
Vims: VNF1 = vimbbbbbbbb_38f5_438d_b8ee_3f93b3531f87;
var Vims: VNF2;
Vims: VNF3 = vimbbbbbbbb_38f5_438d_b8ee_3f93b3531f87;
var Vims: VNF4;
% These are the set of rules for selecting DCs to VNFs
constraint trp_link_latency[VNF1, VNF2] <= 150;
constraint trp_link_latency[VNF2, VNF3] <= 140;
constraint trp_link_latency[VNF3, VNF4] <= 130;
constraint trp_link_jitter[VNF1, VNF2] <= 30;
constraint trp_link_jitter[VNF2, VNF3] <= 30;
constraint trp_link_jitter[VNF3, VNF4] <= 30;
% Calculate the cost for VNFs and cost for transport link and total cost
var int: used_transport_cost =trp_link_price_list[VNF1, VNF2]+
trp_link_price_list[VNF2, VNF3]+
trp_link_price_list[VNF3, VNF4];
var int: used_vim_cost =vim_price_list_1[VNF1]+
vim_price_list_2[VNF2]+
vim_price_list_3[VNF3]+
vim_price_list_4[VNF4];
var int: total_cost = used_transport_cost + used_vim_cost;
solve minimize total_cost;
"""
test_mzn_unsatisfiable_model = """
var 1..2: item1;
var 1..2: item2;
constraint item1 + item2 == 5;
solve satisfy;
"""
class TestMznPlacementConductor(TestCase):
def test__run_placement_model(self):
expected_result = [{'vimAccountId': 'bbbbbbbb-38f5-438d-b8ee-3f93b3531f87', 'member-vnf-index': '1'},
{'vimAccountId': 'aaaaaaaa-38f5-438d-b8ee-3f93b3531f87', 'member-vnf-index': '2'},
{'vimAccountId': 'aaaaaaaa-38f5-438d-b8ee-3f93b3531f87', 'member-vnf-index': '3'},
{'vimAccountId': 'aaaaaaaa-38f5-438d-b8ee-3f93b3531f87', 'member-vnf-index': '4'}]
mpc = MznPlacementConductor(logging.getLogger(__name__))
placement = mpc._run_placement_model(mzn_model=test_mzn_model, ns_desc={})
# sort the result to ease assert with expected result
sorted_placement = sorted(placement, key=lambda k: k['member-vnf-index'])
self.assertEqual(expected_result, sorted_placement, 'Faulty syntax or content')
def test__run_placement_model_w_pinning(self):
expected_result = [{'vimAccountId': 'bbbbbbbb-38f5-438d-b8ee-3f93b3531f87', 'member-vnf-index': '1'},
{'vimAccountId': 'bbbbbbbb-38f5-438d-b8ee-3f93b3531f87', 'member-vnf-index': '2'},
{'vimAccountId': 'bbbbbbbb-38f5-438d-b8ee-3f93b3531f87', 'member-vnf-index': '3'},
{'vimAccountId': 'aaaaaaaa-38f5-438d-b8ee-3f93b3531f87', 'member-vnf-index': '4'}]
ns_desc = [{'vnf_price_per_vim': [10, 9, 7, 8], 'vnf_id': '2'},
{'vim_account': 'vimbbbbbbbb_38f5_438d_b8ee_3f93b3531f87', 'vnf_price_per_vim': [10, 9, 7, 8],
'vnf_id': '1'},
{'vnf_price_per_vim': [10, 9, 7, 8], 'vnf_id': '4'},
{'vim_account': 'vimbbbbbbbb_38f5_438d_b8ee_3f93b3531f87', 'vnf_price_per_vim': [10, 9, 7, 8],
'vnf_id': '3'}
]
mpc = MznPlacementConductor(logging.getLogger(__name__))
placement = mpc._run_placement_model(mzn_model=test_mzn_model_w_pinning, ns_desc=ns_desc)
# sort the result to ease assert with expected result
sorted_placement = sorted(placement, key=lambda k: k['member-vnf-index'])
self.assertEqual(expected_result, sorted_placement, 'Faulty syntax or content')
def test__run_placement_model_unsatisfiable(self):
mpc = MznPlacementConductor(logging.getLogger(__name__))
self.assertEqual([{}], mpc._run_placement_model(mzn_model=test_mzn_unsatisfiable_model, ns_desc={}),
"Faulty syntax or content for unsatisfiable model")
@mock.patch.object(MznModelGenerator, 'create_model', side_effect=['%model'])
@mock.patch.object(MznPlacementConductor, '_run_placement_model')
def test_do_placement_computation(self, mock_run, mock_create):
mpc = MznPlacementConductor(logging.getLogger(__name__))
dummy_nspd = {'ns_desc': {}}
_ = mpc.do_placement_computation(dummy_nspd)
mock_create.assert_called_with(dummy_nspd)
mock_run.assert_called_with('%model', {})
| 38.474886
| 113
| 0.717185
| 1,305
| 8,426
| 4.398467
| 0.186973
| 0.020906
| 0.018815
| 0.075261
| 0.794599
| 0.771429
| 0.771429
| 0.758014
| 0.751916
| 0.728571
| 0
| 0.118236
| 0.157845
| 8,426
| 218
| 114
| 38.651376
| 0.690671
| 0.0845
| 0
| 0.769697
| 0
| 0.036364
| 0.701884
| 0.208447
| 0
| 0
| 0
| 0
| 0.030303
| 1
| 0.024242
| false
| 0
| 0.018182
| 0
| 0.048485
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
6bf8294e6020ae1bffac2e3d6670237ee697ea7c
| 99
|
py
|
Python
|
MCA/Machine Learning/swap.py
|
muhammadmuzzammil1998/CollegeStuff
|
618cec9ebfbfd29a2d1e5a182b90cfb36b38a906
|
[
"MIT"
] | 3
|
2018-03-13T12:34:51.000Z
|
2018-10-02T18:54:22.000Z
|
MCA/Machine Learning/swap.py
|
muhammadmuzzammil1998/CollegeStuff
|
618cec9ebfbfd29a2d1e5a182b90cfb36b38a906
|
[
"MIT"
] | null | null | null |
MCA/Machine Learning/swap.py
|
muhammadmuzzammil1998/CollegeStuff
|
618cec9ebfbfd29a2d1e5a182b90cfb36b38a906
|
[
"MIT"
] | null | null | null |
a, b = 5, 6
print("a = {}, b = {}".format(a, b))
a, b = b, a
print("a = {}, b = {}".format(a, b))
| 16.5
| 36
| 0.383838
| 20
| 99
| 1.9
| 0.3
| 0.315789
| 0.368421
| 0.684211
| 0.789474
| 0.789474
| 0
| 0
| 0
| 0
| 0
| 0.027027
| 0.252525
| 99
| 5
| 37
| 19.8
| 0.486486
| 0
| 0
| 0.5
| 0
| 0
| 0.282828
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0
| 1
| null | 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 10
|
d45794039dc4fb3ca4c522c668eeb6dee54a2bdc
| 20,469
|
py
|
Python
|
py/primes.py
|
scoraig52/code
|
c9335071266267227b56e48861a4f188d16ca4a4
|
[
"MIT"
] | 2
|
2021-02-18T04:42:40.000Z
|
2021-12-12T00:27:42.000Z
|
py/primes.py
|
akar-0/code
|
be15d79e7c9de107cc66cbdfcb3ae91a799607dd
|
[
"MIT"
] | null | null | null |
py/primes.py
|
akar-0/code
|
be15d79e7c9de107cc66cbdfcb3ae91a799607dd
|
[
"MIT"
] | 1
|
2021-11-20T10:24:09.000Z
|
2021-11-20T10:24:09.000Z
|
#see also https://stackoverflow.com/questions/2211990/how-to-implement-an-efficient-infinite-generator-of-prime-numbers-in-python/10733621#10733621
cycle [2, 4, 2, 4, 6, 2, 6, 4, 2, 4, 6, 6, 2, 6, 4, 2, 6, 4, 6, 8, 4, 2, 4, 2, 4, 8, 6, 4, 6, 2, 4, 6, 2, 6, 6, 4, 2, 4, 6, 2, 6, 4, 2, 4, 2, 10, 2, 10]
48 elements, start from 11 to avoid multiples of primes until 11 excluded
next:
L=[2,3,5,7,11]
p=13
i=iter(cycle((4, 2, 4, 6, 2, 6, 4, 2, 4, 6, 6, 2, 6, 4, 2, 6, 4, 6, 8, 4, 2, 4, 2, 4, 14, 4, 6, 2, 10, 2, 6, 6, 4, 2, 4, 6, 2, 10, 2, 4, 2, 12, 10, 2, 4, 2, 4, 6, 2, 6, 4, 6, 6, 6, 2, 6, 4, 2, 6, 4, 6, 8, 4, 2, 4, 6, 8, 6, 10, 2, 4, 6, 2, 6, 6, 4, 2, 4, 6, 2, 6, 4, 2, 6, 10, 2, 10, 2, 4, 2, 4, 6, 8, 4, 2, 4, 12, 2, 6, 4, 2, 6, 4, 6, 12, 2, 4, 2, 4, 8, 6, 4, 6, 2, 4, 6, 2, 6, 10, 2, 4, 6, 2, 6, 4, 2, 4, 2, 10, 2, 10, 2, 4, 6, 6, 2, 6, 6, 4, 6, 6, 2, 6, 4, 2, 6, 4, 6, 8, 4, 2, 6, 4, 8, 6, 4, 6, 2, 4, 6, 8, 6, 4, 2, 10, 2, 6, 4, 2, 4, 2, 10, 2, 10, 2, 4, 2, 4, 8, 6, 4, 2, 4, 6, 6, 2, 6, 4, 8, 4, 6, 8, 4, 2, 4, 2, 4, 8, 6, 4, 6, 6, 6, 2, 6, 6, 4, 2, 4, 6, 2, 6, 4, 2, 4, 2, 10, 2, 10, 2, 6, 4, 6, 2, 6, 4, 2, 4, 6, 6, 8, 4, 2, 6, 10, 8, 4, 2, 4, 2, 4, 8, 10, 6, 2, 4, 8, 6, 6, 4, 2, 4, 6, 2, 6, 4, 6, 2, 10, 2, 10, 2, 4, 2, 4, 6, 2, 6, 4, 2, 4, 6, 6, 2, 6, 6, 6, 4, 6, 8, 4, 2, 4, 2, 4, 8, 6, 4, 8, 4, 6, 2, 6, 6, 4, 2, 4, 6, 8, 4, 2, 4, 2, 10, 2, 10, 2, 4, 2, 4, 6, 2, 10, 2, 4, 6, 8, 6, 4, 2, 6, 4, 6, 8, 4, 6, 2, 4, 8, 6, 4, 6, 2, 4, 6, 2, 6, 6, 4, 6, 6, 2, 6, 6, 4, 2, 10, 2, 10, 2, 4, 2, 4, 6, 2, 6, 4, 2, 10, 6, 2, 6, 4, 2, 6, 4, 6, 8, 4, 2, 4, 2, 12, 6, 4, 6, 2, 4, 6, 2, 12, 4, 2, 4, 8, 6, 4, 2, 4, 2, 10, 2, 10, 6, 2, 4, 6, 2, 6, 4, 2, 4, 6, 6, 2, 6, 4, 2, 10, 6, 8, 6, 4, 2, 4, 8, 6, 4, 6, 2, 4, 6, 2, 6, 6, 6, 4, 6, 2, 6, 4, 2, 4, 2, 10, 12, 2, 4, 2, 10, 2, 6, 4, 2, 4, 6, 6, 2, 10, 2, 6, 4, 14, 4, 2, 4, 2, 4, 8, 6, 4, 6, 2, 4, 6, 2, 6, 6, 4, 2, 4, 6, 2, 6, 4, 2, 4, 12, 2, 12)))
while len(L)<50000:
if is_prime(p):L.append(p)
p += next(i)
next (5760)
P=[2,3,5,7,11,13]
i=iter(cycle((2, 4, 6, 2, 6, 4, 2, 4, 6, 6, 2, 6, 4, 2, 6, 4, 6, 8, 4, 2, 4, 2, 4, 14, 4, 6, 2, 10, 2, 6, 6, 4, 6, 6, 2, 10, 2, 4, 2, 12, 12, 4, 2, 4, 6, 2, 10, 6, 6, 6, 2, 6, 4, 2, 6, 4, 14, 4, 2, 4, 6, 8, 6, 10, 2, 4, 6, 2, 6, 6, 6, 4, 6, 2, 6, 4, 8, 10, 2, 10, 2, 4, 2, 4, 6, 8, 4, 2, 4, 12, 8, 4, 2, 6, 4, 6, 12, 2, 4, 2, 12, 6, 4, 6, 6, 6, 2, 6, 10, 2, 4, 6, 2, 6, 6, 4, 2, 10, 2, 10, 2, 4, 6, 6, 2, 6, 6, 4, 6, 8, 6, 4, 2, 6, 4, 6, 8, 4, 2, 6, 4, 8, 6, 4, 8, 4, 6, 8, 10, 2, 10, 2, 6, 4, 2, 4, 2, 10, 2, 10, 2, 4, 2, 4, 14, 4, 2, 4, 6, 6, 2, 6, 4, 8, 10, 8, 4, 2, 4, 6, 8, 6, 4, 6, 6, 6, 2, 6, 6, 4, 2, 4, 6, 2, 10, 2, 4, 2, 10, 2, 10, 2, 6, 4, 8, 6, 4, 2, 4, 6, 6, 8, 4, 2, 6, 10, 8, 4, 2, 6, 4, 8, 10, 6, 2, 4, 8, 6, 6, 4, 2, 4, 6, 2, 6, 4, 6, 2, 10, 12, 2, 4, 2, 4, 6, 2, 6, 4, 2, 4, 12, 2, 6, 6, 10, 6, 8, 4, 2, 4, 2, 4, 8, 6, 12, 4, 6, 2, 12, 4, 2, 4, 6, 8, 4, 2, 4, 2, 12, 10, 2, 4, 2, 4, 6, 2, 10, 2, 4, 6, 8, 6, 4, 2, 6, 4, 6, 8, 4, 6, 2, 4, 8, 6, 4, 6, 2, 4, 6, 2, 6, 6, 4, 6, 6, 8, 6, 4, 2, 10, 2, 10, 2, 4, 2, 10, 2, 6, 4, 2, 10, 6, 2, 6, 4, 2, 6, 4, 6, 8, 6, 4, 2, 12, 10, 6, 2, 4, 6, 2, 12, 4, 2, 4, 8, 6, 4, 2, 4, 2, 10, 2, 10, 6, 2, 4, 6, 2, 6, 4, 2, 10, 6, 2, 6, 4, 12, 6, 8, 6, 4, 2, 4, 8, 6, 4, 6, 2, 4, 6, 8, 6, 6, 4, 6, 2, 6, 4, 2, 4, 2, 10, 12, 2, 4, 12, 2, 6, 4, 2, 4, 6, 6, 2, 12, 6, 4, 18, 2, 4, 2, 4, 8, 6, 4, 6, 2, 4, 8, 6, 6, 4, 2, 4, 6, 2, 6, 4, 2, 4, 12, 2, 12, 6, 4, 6, 2, 6, 4, 6, 6, 6, 2, 6, 4, 2, 6, 4, 6, 8, 4, 2, 4, 2, 4, 14, 4, 6, 2, 10, 2, 6, 6, 4, 2, 10, 2, 10, 2, 4, 14, 10, 2, 4, 2, 4, 6, 2, 6, 10, 6, 6, 2, 10, 2, 6, 4, 6, 8, 4, 2, 4, 6, 8, 6, 10, 2, 4, 6, 2, 6, 6, 4, 2, 4, 6, 2, 6, 4, 2, 6, 10, 2, 10, 6, 2, 4, 6, 8, 4, 2, 4, 12, 2, 6, 4, 2, 6, 4, 6, 12, 2, 4, 2, 4, 8, 6, 4, 6, 2, 10, 2, 6, 10, 6, 6, 2, 6, 4, 2, 4, 2, 10, 2, 12, 4, 6, 6, 2, 12, 4, 6, 6, 2, 6, 4, 2, 6, 4, 14, 4, 2, 6, 4, 8, 6, 4, 6, 2, 4, 6, 8, 6, 6, 10, 2, 6, 4, 6, 2, 10, 2, 10, 2, 4, 2, 4, 8, 6, 4, 2, 4, 6, 6, 8, 4, 8, 4, 6, 8, 4, 2, 4, 2, 12, 6, 4, 6, 6, 6, 2, 6, 6, 4, 2, 4, 6, 2, 6, 6, 4, 2, 10, 2, 10, 2, 6, 4, 6, 2, 6, 4, 2, 4, 6, 14, 4, 2, 6, 10, 8, 4, 2, 4, 2, 4, 8, 10, 8, 4, 8, 6, 10, 2, 4, 6, 2, 6, 4, 6, 2, 10, 2, 10, 2, 4, 2, 4, 6, 8, 4, 2, 4, 6, 6, 2, 6, 6, 6, 10, 8, 4, 2, 4, 6, 8, 6, 4, 8, 4, 6, 2, 6, 6, 4, 2, 4, 6, 12, 2, 4, 2, 10, 2, 10, 2, 4, 2, 4, 8, 10, 2, 4, 6, 8, 6, 4, 2, 6, 4, 6, 8, 4, 8, 4, 8, 6, 4, 6, 2, 4, 6, 2, 6, 6, 4, 6, 6, 2, 6, 6, 4, 2, 10, 12, 2, 4, 2, 4, 6, 2, 6, 4, 2, 16, 2, 6, 4, 2, 10, 6, 8, 4, 2, 4, 2, 12, 6, 10, 2, 4, 6, 2, 12, 4, 2, 4, 8, 6, 4, 2, 4, 2, 12, 10, 6, 2, 4, 6, 2, 6, 4, 2, 4, 6, 6, 2, 6, 4, 2, 10, 6, 8, 10, 2, 4, 8, 6, 4, 6, 2, 4, 6, 2, 6, 6, 6, 4, 6, 8, 4, 2, 4, 2, 10, 12, 2, 4, 2, 10, 2, 6, 4, 2, 4, 6, 6, 2, 10, 2, 6, 4, 14, 6, 4, 2, 4, 8, 10, 6, 2, 4, 6, 2, 6, 6, 4, 2, 4, 8, 6, 4, 2, 4, 12, 2, 12, 4, 2, 4, 6, 2, 6, 4, 2, 10, 6, 2, 6, 4, 8, 4, 6, 8, 4, 2, 4, 2, 4, 14, 4, 6, 2, 10, 8, 6, 4, 2, 4, 6, 2, 10, 2, 4, 2, 12, 10, 2, 4, 6, 6, 2, 6, 4, 6, 6, 6, 2, 6, 6, 6, 4, 6, 12, 2, 4, 6, 8, 6, 10, 2, 4, 8, 6, 6, 4, 2, 4, 6, 2, 6, 4, 2, 6, 10, 2, 10, 2, 6, 4, 6, 8, 4, 6, 12, 2, 6, 4, 2, 6, 4, 6, 12, 2, 4, 2, 4, 14, 4, 6, 2, 4, 6, 2, 6, 10, 2, 10, 2, 6, 4, 2, 4, 12, 2, 10, 2, 4, 6, 6, 2, 6, 6, 4, 6, 6, 2, 10, 2, 6, 4, 6, 8, 4, 2, 6, 4, 8, 6, 4, 6, 2, 4, 6, 8, 6, 4, 2, 10, 2, 6, 4, 2, 6, 10, 2, 10, 6, 2, 4, 8, 6, 4, 2, 4, 6, 6, 2, 6, 4, 8, 4, 6, 8, 4, 2, 4, 2, 4, 8, 6, 4, 6, 12, 2, 6, 6, 4, 6, 6, 2, 6, 4, 2, 4, 2, 10, 2, 12, 6, 4, 6, 2, 10, 2, 4, 6, 6, 8, 4, 2, 6, 18, 4, 2, 4, 2, 4, 8, 10, 6, 2, 4, 8, 6, 6, 6, 4, 6, 2, 6, 4, 6, 2, 10, 2, 10, 2, 4, 2, 4, 6, 2, 6, 4, 2, 4, 6, 6, 8, 6, 6, 4, 6, 8, 4, 2, 4, 2, 12, 6, 4, 12, 6, 2, 6, 6, 4, 2, 4, 6, 8, 6, 4, 2, 10, 2, 10, 2, 4, 2, 4, 6, 2, 10, 2, 4, 6, 8, 6, 4, 2, 6, 4, 6, 8, 4, 6, 2, 4, 8, 6, 4, 8, 4, 6, 2, 6, 10, 6, 6, 2, 6, 6, 4, 2, 10, 2, 10, 2, 4, 2, 4, 6, 8, 4, 2, 10, 6, 2, 6, 4, 2, 6, 10, 8, 4, 2, 4, 14, 6, 4, 6, 2, 4, 6, 2, 12, 4, 2, 4, 8, 10, 2, 4, 2, 10, 2, 10, 6, 2, 4, 8, 6, 4, 2, 4, 6, 6, 2, 6, 4, 2, 10, 6, 8, 6, 6, 4, 8, 6, 4, 6, 2, 4, 6, 2, 6, 6, 6, 4, 6, 2, 6, 4, 2, 4, 2, 10, 12, 2, 4, 2, 10, 2, 6, 4, 2, 4, 12, 2, 10, 2, 10, 14, 4, 2, 4, 2, 4, 8, 6, 10, 2, 4, 6, 2, 12, 4, 2, 4, 6, 2, 6, 4, 2, 4, 14, 12, 4, 2, 4, 6, 2, 6, 4, 2, 4, 6, 6, 2, 6, 4, 2, 6, 4, 6, 8, 4, 6, 2, 4, 14, 4, 6, 2, 10, 2, 6, 6, 4, 2, 4, 6, 12, 2, 4, 2, 12, 10, 2, 4, 2, 10, 2, 6, 4, 6, 6, 6, 2, 6, 4, 2, 6, 4, 6, 8, 6, 4, 6, 8, 16, 2, 4, 6, 2, 6, 6, 4, 2, 4, 8, 6, 4, 2, 6, 10, 2, 10, 2, 4, 2, 4, 6, 8, 4, 2, 16, 2, 6, 4, 8, 4, 6, 12, 2, 4, 2, 4, 8, 6, 4, 6, 2, 4, 6, 8, 10, 2, 4, 6, 2, 6, 4, 2, 4, 2, 10, 2, 10, 2, 4, 6, 6, 2, 6, 6, 4, 6, 6, 2, 6, 6, 6, 4, 6, 12, 2, 6, 4, 8, 6, 4, 6, 2, 4, 14, 6, 4, 2, 10, 2, 6, 4, 2, 4, 2, 10, 2, 10, 2, 6, 4, 8, 6, 4, 6, 6, 6, 2, 6, 4, 8, 4, 6, 8, 4, 2, 4, 2, 4, 14, 4, 6, 6, 6, 2, 6, 6, 4, 2, 10, 2, 6, 4, 2, 4, 12, 2, 10, 2, 6, 4, 6, 2, 6, 6, 4, 6, 6, 12, 2, 6, 10, 8, 4, 2, 4, 2, 4, 8, 10, 6, 2, 4, 8, 6, 6, 4, 2, 4, 6, 2, 6, 4, 8, 10, 2, 10, 6, 2, 4, 6, 2, 6, 4, 2, 4, 6, 6, 2, 6, 6, 6, 4, 6, 8, 4, 2, 4, 2, 4, 8, 6, 4, 8, 10, 2, 6, 6, 4, 6, 6, 8, 4, 2, 4, 2, 10, 2, 12, 4, 2, 4, 6, 2, 10, 2, 4, 6, 8, 6, 4, 2, 6, 4, 14, 4, 6, 2, 4, 8, 6, 4, 6, 2, 4, 6, 2, 6, 6, 10, 6, 2, 6, 10, 2, 10, 2, 10, 2, 4, 2, 4, 6, 2, 6, 4, 2, 10, 6, 8, 4, 2, 6, 4, 6, 8, 4, 2, 4, 2, 12, 6, 4, 6, 6, 6, 2, 12, 4, 2, 4, 8, 6, 6, 4, 2, 10, 2, 10, 6, 2, 4, 6, 2, 6, 4, 2, 4, 6, 8, 6, 4, 2, 10, 6, 8, 6, 4, 2, 4, 8, 6, 4, 8, 4, 6, 2, 6, 12, 4, 6, 2, 6, 4, 2, 4, 2, 10, 12, 2, 4, 2, 10, 8, 4, 2, 4, 6, 6, 2, 10, 2, 6, 18, 4, 2, 4, 6, 8, 6, 4, 6, 2, 4, 6, 2, 6, 6, 4, 2, 4, 6, 2, 10, 2, 4, 12, 2, 12, 4, 2, 4, 8, 6, 4, 2, 4, 6, 6, 2, 6, 4, 2, 6, 4, 6, 8, 4, 2, 6, 4, 14, 4, 6, 2, 10, 2, 6, 6, 4, 2, 4, 6, 2, 10, 2, 4, 2, 22, 2, 4, 2, 4, 6, 2, 6, 4, 6, 12, 2, 6, 4, 2, 10, 6, 8, 4, 2, 4, 6, 8, 6, 10, 2, 4, 6, 2, 12, 4, 2, 4, 6, 2, 6, 4, 2, 6, 12, 10, 2, 4, 2, 4, 6, 8, 4, 2, 4, 12, 2, 6, 4, 2, 6, 4, 6, 12, 6, 2, 4, 8, 6, 4, 6, 2, 4, 6, 2, 6, 10, 2, 4, 6, 8, 4, 2, 4, 2, 10, 2, 10, 2, 4, 12, 2, 6, 6, 4, 6, 6, 2, 6, 4, 2, 6, 4, 6, 8, 6, 6, 4, 8, 10, 6, 2, 4, 6, 8, 6, 4, 2, 12, 6, 4, 2, 4, 2, 10, 2, 10, 2, 4, 2, 4, 8, 6, 4, 2, 10, 6, 2, 6, 4, 8, 4, 6, 8, 4, 2, 4, 2, 4, 8, 6, 4, 6, 6, 6, 8, 6, 4, 2, 4, 6, 2, 6, 4, 2, 4, 2, 10, 2, 10, 2, 10, 6, 2, 6, 4, 2, 4, 6, 6, 8, 6, 6, 10, 12, 2, 4, 2, 4, 8, 10, 6, 2, 4, 8, 6, 6, 4, 2, 4, 6, 2, 6, 4, 6, 2, 10, 2, 10, 2, 6, 4, 6, 2, 6, 4, 6, 6, 6, 2, 6, 6, 6, 4, 6, 8, 4, 2, 4, 2, 4, 14, 4, 8, 4, 6, 2, 6, 6, 4, 2, 10, 8, 4, 2, 4, 12, 2, 10, 2, 4, 2, 4, 6, 2, 12, 4, 6, 8, 10, 2, 6, 4, 6, 8, 4, 6, 2, 4, 8, 6, 4, 6, 2, 4, 6, 2, 6, 6, 4, 6, 6, 2, 6, 6, 6, 10, 2, 10, 6, 2, 4, 6, 2, 6, 4, 2, 10, 6, 2, 6, 4, 2, 6, 4, 6, 8, 4, 2, 4, 2, 12, 6, 4, 6, 2, 10, 2, 12, 4, 6, 8, 6, 4, 2, 4, 2, 10, 2, 16, 2, 4, 6, 2, 10, 2, 4, 6, 6, 2, 6, 4, 2, 10, 14, 6, 4, 2, 4, 8, 6, 4, 6, 2, 4, 6, 2, 6, 6, 6, 4, 6, 2, 6, 4, 6, 2, 10, 12, 2, 4, 2, 10, 2, 6, 4, 2, 4, 6, 6, 12, 2, 6, 4, 14, 4, 2, 4, 2, 12, 6, 4, 6, 6, 6, 2, 6, 6, 4, 2, 4, 6, 2, 6, 6, 4, 12, 2, 12, 4, 2, 4, 6, 2, 6, 4, 2, 4, 6, 8, 6, 4, 2, 6, 4, 6, 8, 4, 2, 4, 2, 4, 14, 4, 8, 10, 2, 6, 10, 2, 4, 6, 2, 10, 2, 4, 2, 12, 10, 2, 4, 2, 4, 6, 8, 4, 6, 6, 6, 2, 6, 4, 2, 6, 10, 8, 4, 2, 4, 6, 8, 6, 10, 2, 4, 6, 2, 6, 6, 4, 2, 4, 6, 2, 10, 2, 6, 10, 2, 10, 2, 4, 2, 4, 14, 4, 2, 4, 12, 2, 6, 4, 2, 6, 4, 6, 12, 2, 6, 4, 8, 6, 4, 6, 2, 4, 6, 2, 6, 10, 2, 4, 6, 2, 6, 4, 2, 4, 2, 10, 12, 2, 4, 6, 6, 2, 6, 6, 4, 12, 2, 6, 4, 2, 10, 6, 8, 4, 2, 6, 4, 8, 6, 10, 2, 4, 6, 14, 4, 2, 10, 2, 6, 4, 2, 4, 2, 12, 10, 2, 4, 2, 4, 8, 6, 4, 2, 4, 6, 6, 2, 6, 4, 8, 4, 6, 8, 4, 6, 2, 4, 8, 6, 4, 6, 6, 6, 2, 6, 6, 4, 2, 4, 6, 8, 4, 2, 4, 2, 10, 2, 10, 2, 6, 10, 2, 6, 4, 2, 4, 6, 6, 8, 4, 2, 6, 10, 8, 6, 4, 2, 4, 8, 10, 6, 2, 4, 8, 6, 6, 4, 2, 4, 8, 6, 4, 6, 2, 10, 2, 10, 2, 4, 2, 4, 6, 2, 6, 4, 2, 10, 6, 2, 6, 12, 4, 6, 8, 4, 2, 4, 2, 4, 8, 6, 4, 8, 4, 6, 8, 6, 4, 2, 4, 6, 8, 4, 2, 4, 2, 10, 2, 10, 2, 4, 6, 6, 2, 10, 2, 4, 6, 8, 6, 6, 6, 4, 6, 12, 6, 2, 4, 8, 6, 4, 6, 2, 4, 8, 6, 6, 4, 6, 6, 2, 6, 6, 4, 2, 10, 2, 10, 2, 6, 4, 6, 2, 6, 4, 12, 6, 2, 6, 4, 2, 6, 4, 6, 8, 4, 2, 4, 2, 18, 4, 6, 2, 4, 6, 2, 12, 4, 2, 12, 6, 4, 2, 4, 12, 2, 10, 6, 2, 4, 6, 2, 6, 6, 4, 6, 6, 2, 10, 2, 10, 6, 8, 6, 4, 2, 4, 8, 6, 4, 6, 2, 4, 6, 2, 6, 6, 6, 4, 6, 2, 6, 4, 2, 6, 10, 12, 6, 2, 10, 2, 6, 4, 2, 4, 6, 6, 2, 10, 2, 6, 4, 14, 4, 2, 4, 2, 4, 8, 6, 4, 6, 2, 10, 2, 6, 6, 4, 6, 6, 2, 6, 4, 2, 4, 12, 2, 12, 4, 2, 4, 6, 2, 10, 2, 4, 6, 6, 2, 6, 4, 2, 6, 4, 14, 4, 2, 4, 2, 4, 14, 4, 6, 2, 10, 2, 6, 6, 6, 4, 6, 2, 10, 6, 2, 12, 10, 2, 4, 2, 4, 6, 2, 6, 4, 6, 6, 6, 8, 4, 2, 6, 4, 6, 8, 4, 2, 4, 14, 6, 10, 6, 6, 2, 6, 6, 4, 2, 4, 6, 2, 6, 6, 6, 10, 2, 10, 2, 4, 2, 4, 6, 8, 4, 2, 4, 14, 6, 4, 2, 6, 4, 6, 12, 2, 4, 2, 4, 8, 6, 4, 8, 4, 6, 2, 6, 10, 2, 4, 6, 2, 6, 4, 2, 4, 2, 10, 2, 10, 2, 4, 6, 6, 8, 6, 4, 6, 6, 2, 6, 4, 2, 6, 10, 8, 4, 2, 10, 8, 6, 4, 6, 2, 4, 6, 8, 6, 4, 2, 10, 2, 10, 2, 4, 2, 10, 2, 10, 2, 4, 2, 4, 8, 6, 4, 2, 4, 6, 6, 2, 6, 4, 8, 4, 6, 8, 4, 2, 6, 4, 8, 6, 4, 6, 6, 6, 2, 6, 6, 4, 2, 4, 6, 2, 6, 4, 2, 4, 2, 10, 12, 2, 6, 4, 6, 2, 6, 4, 2, 4, 12, 8, 4, 2, 16, 8, 4, 2, 4, 2, 4, 8, 16, 2, 4, 8, 12, 4, 2, 4, 6, 2, 6, 4, 6, 2, 12, 10, 2, 4, 2, 4, 6, 2, 6, 4, 2, 4, 6, 6, 2, 6, 6, 6, 4, 6, 8, 4, 6, 2, 4, 8, 6, 4, 8, 4, 6, 2, 6, 6, 4, 2, 4, 6, 8, 4, 2, 4, 2, 10, 2, 10, 2, 4, 2, 10, 2, 10, 2, 4, 6, 8, 6, 4, 2, 6, 4, 6, 8, 10, 2, 4, 8, 10, 6, 2, 4, 6, 2, 6, 6, 4, 6, 8, 6, 6, 4, 2, 10, 2, 10, 2, 4, 2, 4, 6, 2, 6, 4, 2, 10, 6, 2, 6, 4, 8, 4, 6, 8, 4, 2, 4, 2, 12, 6, 4, 6, 2, 4, 6, 14, 4, 2, 4, 8, 6, 4, 2, 4, 2, 10, 2, 10, 6, 6, 6, 2, 6, 4, 2, 4, 6, 6, 2, 6, 6, 10, 6, 14, 4, 2, 4, 8, 6, 4, 6, 2, 4, 8, 6, 6, 6, 4, 6, 2, 6, 4, 2, 4, 2, 10, 12, 2, 6, 10, 2, 6, 4, 6, 6, 6, 2, 10, 2, 6, 4, 14, 4, 2, 4, 2, 4, 14, 4, 6, 2, 4, 6, 2, 6, 6, 4, 2, 10, 2, 6, 4, 2, 4, 12, 2, 12, 4, 2, 4, 6, 2, 6, 6, 4, 6, 6, 2, 10, 2, 6, 4, 6, 8, 4, 2, 4, 2, 4, 14, 4, 6, 2, 10, 2, 6, 6, 4, 2, 4, 6, 2, 10, 2, 6, 12, 10, 6, 2, 4, 6, 2, 6, 4, 6, 6, 6, 2, 6, 4, 2, 6, 4, 6, 8, 4, 2, 4, 6, 8, 6, 10, 2, 10, 2, 6, 6, 4, 6, 6, 2, 6, 4, 2, 6, 10, 2, 12, 4, 2, 4, 6, 12, 2, 4, 12, 2, 6, 4, 2, 6, 4, 18, 2, 4, 2, 4, 8, 6, 4, 6, 2, 4, 6, 2, 6, 12, 4, 6, 2, 6, 4, 6, 2, 10, 2, 10, 2, 4, 6, 6, 2, 6, 6, 4, 6, 6, 8, 4, 2, 6, 4, 6, 8, 4, 2, 6, 12, 6, 4, 6, 6, 6, 8, 6, 4, 2, 10, 2, 6, 6, 4, 2, 10, 2, 10, 2, 4, 2, 4, 8, 6, 4, 2, 4, 6, 8, 6, 4, 8, 4, 6, 8, 4, 2, 4, 2, 4, 8, 6, 4, 12, 6, 2, 6, 10, 2, 4, 6, 2, 6, 4, 2, 4, 2, 10, 2, 10, 2, 6, 4, 6, 8, 4, 2, 4, 6, 6, 8, 4, 2, 6, 10, 8, 4, 2, 4, 6, 8, 10, 6, 2, 4, 8, 6, 6, 4, 2, 4, 6, 2, 10, 6, 2, 10, 2, 10, 2, 4, 2, 4, 8, 6, 4, 2, 4, 6, 6, 2, 6, 6, 6, 4, 6, 8, 4, 2, 6, 4, 8, 6, 4, 8, 4, 6, 2, 6, 6, 4, 2, 4, 6, 8, 4, 2, 4, 2, 10, 12, 2, 4, 2, 4, 6, 2, 10, 2, 4, 14, 6, 4, 2, 10, 6, 8, 4, 6, 2, 4, 8, 6, 10, 2, 4, 6, 2, 12, 4, 6, 6, 2, 6, 6, 4, 2, 12, 10, 2, 4, 2, 4, 6, 2, 6, 4, 2, 10, 6, 2, 6, 4, 2, 6, 4, 6, 8, 4, 6, 2, 12, 6, 4, 6, 2, 4, 6, 2, 12, 4, 2, 4, 14, 4, 2, 4, 2, 10, 2, 10, 6, 2, 10, 2, 6, 4, 2, 4, 6, 6, 2, 6, 4, 2, 10, 6, 8, 6, 4, 2, 4, 8, 10, 6, 2, 4, 6, 2, 6, 6, 6, 4, 8, 6, 4, 2, 4, 2, 10, 12, 2, 4, 2, 10, 2, 6, 4, 2, 10, 6, 2, 10, 8, 4, 14, 4, 2, 4, 2, 4, 8, 6, 4, 6, 2, 4, 6, 8, 6, 4, 2, 4, 6, 2, 6, 4, 2, 4, 12, 2, 12, 4, 6, 6, 2, 6, 4, 2, 4, 6, 6, 2, 6, 6, 6, 4, 6, 12, 2, 4, 2, 4, 14, 4, 6, 2, 12, 6, 6, 4, 2, 4, 6, 2, 10, 2, 4, 2, 12, 10, 2, 6, 4, 6, 2, 6, 4, 6, 6, 6, 2, 6, 4, 2, 6, 4, 6, 8, 4, 2, 4, 6, 14, 10, 2, 4, 6, 2, 6, 6, 4, 2, 10, 2, 6, 4, 2, 16, 2, 10, 2, 4, 2, 4, 6, 8, 6, 4, 12, 2, 10, 2, 6, 4, 6, 12, 2, 4, 2, 4, 8, 6, 4, 6, 2, 4, 6, 2, 6, 10, 2, 4, 6, 2, 6, 4, 2, 6, 10, 2, 10, 6, 6, 6, 2, 6, 6, 4, 6, 6, 2, 6, 4, 2, 6, 4, 6, 8, 4, 2, 6, 4, 8, 6, 4, 6, 2, 10, 8, 6, 4, 12, 2, 6, 4, 2, 4, 2, 10, 2, 12, 4, 2, 4, 8, 10, 2, 4, 6, 6, 2, 6, 4, 8, 4, 14, 4, 2, 4, 2, 4, 8, 6, 4, 6, 6, 6, 2, 6, 6, 6, 4, 6, 2, 6, 4, 6, 2, 10, 2, 10, 2, 6, 4, 6, 2, 6, 4, 2, 4, 6, 6, 8, 4, 2, 6, 10, 8, 4, 2, 4, 2, 12, 10, 6, 6, 8, 6, 6, 4, 2, 4, 6, 2, 6, 10, 2, 10, 2, 10, 2, 4, 2, 4, 6, 2, 6, 4, 2, 4, 6, 8, 6, 6, 6, 4, 6, 8, 4, 2, 4, 2, 4, 8, 6, 4, 8, 4, 6, 2, 6, 10, 2, 4, 6, 8, 4, 2, 4, 2, 10, 2, 10, 2, 4, 2, 4, 6, 12, 2, 4, 6, 8, 6, 4, 2, 6, 10, 8, 4, 6, 6, 8, 6, 4, 6, 2, 4, 6, 2, 6, 6, 4, 6, 6, 2, 12, 4, 2, 10, 2, 10, 2, 4, 2, 4, 8, 6, 4, 2, 10, 6, 2, 6, 4, 2, 6, 4, 6, 8, 4, 2, 6, 12, 6, 4, 6, 2, 4, 6, 2, 12, 4, 2, 4, 8, 6, 4, 2, 4, 2, 10, 12, 6, 2, 4, 6, 2, 6, 4, 2, 4, 12, 2, 6, 4, 2, 10, 6, 8, 6, 4, 2, 4, 8, 6, 10, 2, 4, 6, 2, 12, 6, 4, 6, 2, 6, 4, 2, 4, 2, 22, 2, 4, 2, 10, 2, 6, 4, 2, 4, 6, 6, 2, 10, 2, 6, 4, 14, 4, 6, 2, 4, 8, 6, 4, 6, 2, 4, 6, 2, 6, 6, 4, 2, 4, 6, 8, 4, 2, 4, 12, 2, 12, 4, 2, 10, 2, 6, 4, 2, 4, 6, 6, 2, 6, 4, 2, 6, 4, 6, 8, 6, 4, 2, 4, 18, 6, 2, 10, 2, 6, 6, 4, 2, 4, 8, 10, 2, 4, 2, 12, 10, 2, 4, 2, 4, 6, 2, 6, 4, 12, 6, 2, 6, 4, 8, 4, 6, 8, 4, 2, 4, 6, 8, 6, 10, 2, 4, 6, 8, 6, 4, 2, 4, 6, 2, 6, 4, 2, 6, 10, 2, 10, 2, 4, 6, 6, 8, 4, 2, 4, 12, 2, 6, 6, 6, 4, 6, 12, 2, 4, 2, 4, 8, 6, 4, 6, 2, 4, 8, 6, 10, 2, 4, 6, 2, 6, 4, 2, 4, 2, 10, 2, 10, 2, 10, 6, 2, 6, 10, 6, 6, 2, 6, 4, 2, 6, 4, 6, 8, 4, 2, 6, 4, 14, 4, 6, 2, 4, 6, 8, 6, 4, 2, 10, 2, 6, 4, 2, 4, 12, 2, 10, 2, 4, 2, 4, 8, 6, 6, 4, 6, 6, 2, 10, 8, 4, 6, 8, 4, 2, 4, 2, 4, 8, 6, 4, 6, 6, 6, 2, 6, 6, 4, 2, 4, 6, 2, 6, 4, 2, 6, 10, 2, 10, 8, 4, 6, 2, 6, 4, 2, 4, 6, 6, 8, 4, 2, 6, 10, 8, 4, 2, 4, 2, 4, 8, 10, 6, 2, 12, 6, 6, 4, 6, 6, 2, 6, 4, 6, 2, 10, 2, 12, 4, 2, 4, 6, 2, 10, 2, 4, 6, 6, 2, 6, 6, 6, 4, 14, 4, 2, 4, 2, 4, 8, 6, 4, 8, 4, 6, 2, 6, 6, 6, 4, 6, 8, 4, 6, 2, 10, 2, 10, 2, 4, 2, 4, 6, 2, 10, 2, 4, 6, 14, 4, 2, 6, 4, 6, 8, 4, 6, 2, 12, 6, 4, 6, 6, 6, 2, 6, 6, 4, 6, 6, 2, 6, 6, 4, 2, 10, 2, 10, 2, 4, 2, 4, 6, 2, 6, 4, 2, 10, 8, 6, 4, 2, 6, 4, 6, 8, 4, 2, 4, 2, 12, 6, 4, 8, 4, 6, 2, 16, 2, 4, 8, 6, 4, 2, 4, 2, 10, 2, 10, 6, 2, 4, 6, 8, 4, 2, 4, 6, 6, 2, 6, 4, 2, 16, 8, 6, 4, 6, 8, 6, 4, 6, 2, 4, 6, 2, 6, 6, 6, 4, 6, 2, 10, 2, 4, 2, 10, 12, 2, 4, 2, 12, 6, 4, 2, 4, 6, 6, 2, 10, 2, 6, 4, 14, 4, 2, 6, 4, 8, 6, 4, 6, 2, 4, 6, 2, 6, 6, 4, 2, 4, 6, 2, 6, 4, 2, 4, 12, 14, 4, 2, 4, 6, 2, 6, 4, 2, 4, 12, 2, 6, 4, 2, 10, 6, 8, 4, 2, 4, 2, 4, 14, 10, 2, 10, 2, 12, 4, 2, 4, 6, 2, 10, 2, 4, 2, 12, 10, 2, 4, 2, 4, 6, 2, 6, 4, 6, 6, 6, 2, 6, 4, 2, 6, 4, 6, 8, 4, 6, 6, 8, 6, 10, 2, 4, 6, 2, 6, 6, 4, 2, 4, 6, 8, 4, 2, 6, 10, 2, 10, 2, 4, 2, 10, 8, 4, 2, 4, 12, 2, 6, 4, 2, 6, 4, 6, 14, 4, 2, 4, 8, 10, 6, 2, 4, 6, 2, 6, 10, 2, 4, 8, 6, 4, 2, 4, 2, 10, 2, 10, 2, 4, 6, 6, 2, 6, 6, 10, 6, 2, 6, 4, 8, 4, 6, 8, 4, 2, 6, 4, 8, 6, 4, 6, 2, 4, 6, 8, 6, 4, 2, 10, 2, 6, 4, 2, 4, 2, 10, 2, 10, 2, 4, 6, 8, 6, 4, 2, 4, 6, 6, 2, 6, 12, 4, 6, 12, 2, 4, 2, 4, 8, 6, 4, 6, 6, 8, 6, 6, 4, 2, 4, 6, 2, 6, 4, 2, 4, 2, 10, 2, 10, 2, 6, 4, 6, 2, 6, 4, 6, 6, 6, 8, 4, 2, 6, 10, 8, 4, 2, 4, 2, 4, 18, 6, 2, 4, 8, 6, 6, 4, 2, 10, 2, 6, 4, 6, 12, 2, 10, 2, 4, 2, 4, 6, 2, 6, 6, 4, 6, 6, 2, 12, 6, 4, 6, 8, 4, 2, 4, 2, 4, 8, 6, 4, 8, 4, 6, 2, 6, 6, 4, 2, 4, 6, 8, 4, 2, 6, 10, 2, 10, 6, 2, 4, 6, 2, 10, 2, 4, 6, 8, 6, 4, 2, 6, 4, 6, 8, 4, 6, 2, 4, 8, 6, 4, 6, 2, 10, 2, 6, 6, 4, 6, 6, 2, 6, 6, 4, 2, 10, 2, 12, 4, 2, 4, 6, 2, 10, 2, 10, 6, 2, 6, 4, 2, 6, 4, 14, 4, 2, 4, 2, 12, 6, 4, 6, 2, 4, 6, 2, 12, 6, 4, 8, 6, 4, 6, 2, 10, 2, 10, 6, 2, 4, 6, 2, 6, 4, 2, 4, 6, 6, 8, 4, 2, 10, 6, 8, 6, 4, 2, 12, 6, 4, 6, 6, 6, 2, 6, 6, 6, 4, 6, 2, 6, 6, 4, 2, 10, 12, 2, 4, 2, 10, 2, 6, 4, 2, 4, 6, 8, 10, 2, 6, 4, 14, 4, 2, 4, 2, 4, 8, 6, 4, 8, 4, 6, 2, 6, 10, 2, 4, 6, 2, 6, 4, 2, 4, 12, 2, 12, 4, 2, 4, 6, 8, 4, 2, 4, 6, 6, 2, 6, 4, 2, 6, 10, 8, 4, 2, 4, 6, 14, 4, 6, 2, 10, 2, 6, 6, 4, 2, 4, 6, 2, 10, 2, 4, 2, 12, 10, 2, 4, 2, 4, 8, 6, 4, 6, 6, 6, 2, 6, 4, 2, 6, 4, 6, 8, 4, 2, 10, 8, 6, 10, 2, 4, 6, 2, 6, 6, 4, 2, 4, 6, 2, 6, 4, 2, 6, 10, 12, 2, 4, 2, 4, 6, 8, 4, 2, 4, 12, 2, 6, 4, 2, 10, 6, 12, 2, 4, 2, 4, 8, 6, 10, 2, 4, 6, 2, 16, 2, 4, 6, 2, 6, 4, 2, 4, 2, 12, 10, 2, 4, 6, 6, 2, 6, 6, 4, 6, 6, 2, 6, 4, 2, 6, 4, 6, 8, 4, 8, 4, 8, 6, 4, 6, 2, 4, 6, 8, 6, 4, 2, 10, 8, 4, 2, 4, 2, 10, 2, 10, 2, 4, 2, 12, 6, 4, 2, 4, 6, 6, 2, 6, 4, 8, 4, 6, 8, 6, 4, 2, 4, 8, 10, 6, 6, 6, 2, 6, 6, 4, 2, 4, 8, 6, 4, 2, 4, 2, 10, 2, 10, 2, 6, 4, 6, 2, 6, 4, 2, 10, 6, 8, 4, 8, 10, 8, 4, 2, 4, 2, 4, 8, 10, 6, 2, 4, 14, 6, 4, 2, 4, 6, 2, 6, 4, 6, 2, 10, 2, 10, 2, 4, 6, 6, 2, 6, 4, 2, 4, 6, 6, 2, 6, 6, 6, 4, 6, 12, 2, 4, 2, 4, 8, 6, 4, 8, 4, 8, 6, 6, 4, 2, 4, 6, 8, 4, 2, 4, 2, 10, 2, 10, 2, 6, 4, 6, 2, 10, 6, 6, 8, 6, 4, 2, 6, 4, 6, 8, 4, 6, 2, 4, 14, 4, 6, 2, 4, 6, 2, 6, 6, 4, 12, 2, 6, 6, 4, 12, 2, 10, 2, 4, 2, 4, 6, 2, 6, 6, 10, 6, 2, 10, 2, 6, 4, 6, 8, 4, 2, 4, 2, 12, 6, 4, 6, 2, 4, 6, 2, 12, 4, 2, 4, 8, 6, 4, 2, 6, 10, 2, 10, 6, 2, 4, 6, 2, 6, 4, 2, 4, 6, 6, 2, 6, 4, 2, 10, 6, 8, 6, 4, 2, 4, 8, 6, 4, 6, 2, 10, 2, 6, 6, 10, 6, 2, 6, 4, 2, 4, 2, 10, 14, 4, 2, 10, 2, 10, 2, 4, 6, 6, 2, 10, 2, 6, 4, 14, 4, 2, 4, 2, 4, 8, 6, 4, 6, 2, 4, 6, 2, 6, 6, 6, 4, 6, 2, 6, 4, 6, 12, 2, 12, 4, 2, 4, 6, 2, 6, 4, 2, 4, 6, 6, 8, 4, 2, 6, 4, 6, 8, 4, 2, 4, 2, 18, 4, 6, 12, 2, 6, 6, 4, 2, 4, 6, 2, 12, 4, 2, 12, 10, 2, 4, 2, 4, 6, 2, 6, 4, 6, 6, 8, 6, 4, 2, 6, 4, 6, 8, 4, 2, 4, 6, 8, 6, 12, 4, 6, 2, 6, 10, 2, 4, 6, 2, 6, 4, 2, 6, 10, 2, 10, 2, 4, 2, 4, 6, 8, 4, 2, 4, 12, 2, 6, 4, 2, 6, 10, 12, 2, 4, 6, 8, 6, 4, 6, 2, 4, 6, 2, 6, 10, 2, 4, 6, 2, 10, 2, 4, 2, 10, 2, 10, 2, 4, 6, 8, 6, 6, 4, 6, 6, 2, 6, 4, 2, 6, 4, 6, 8, 4, 2, 6, 4, 8, 6, 4, 6, 2, 4, 6, 8, 6, 4, 2, 10, 2, 6, 4, 2, 4, 2, 10, 12, 2, 4, 2, 4, 8, 6, 4, 2, 4, 12, 2, 6, 4, 12, 6, 8, 4, 2, 4, 2, 4, 8, 6, 10, 6, 6, 2, 12, 4, 2, 4, 6, 2, 6, 4, 2, 4, 2, 12, 10, 2, 6, 4, 6, 2, 6, 4, 2, 4, 6, 6, 8, 4, 2, 6, 10, 8, 4, 6, 2, 4, 8, 10, 6, 2, 4, 8, 6, 6, 4, 2, 4, 6, 8, 4, 6, 2, 10, 2, 10, 2, 4, 2, 10, 2, 6, 4, 2, 4, 6, 6, 2, 6, 6, 6, 4, 6, 8, 6, 4, 2, 4, 8, 10, 8, 4, 6, 2, 6, 6, 4, 2, 4, 14, 4, 2, 4, 2, 10, 2, 10, 2, 4, 2, 4, 6, 2, 10, 2, 10, 8, 6, 4, 8, 4, 6, 8, 4, 6, 2, 4, 8, 6, 4, 6, 2, 4, 6, 8, 6, 4, 6, 6, 2, 6, 6, 4, 2, 10, 2, 10, 2, 4, 6, 6, 2, 6, 4, 2, 10, 6, 2, 6, 6, 6, 4, 6, 12, 2, 4, 2, 12, 6, 4, 6, 2, 4, 8, 12, 4, 2, 4, 8, 6, 4, 2, 4, 2, 10, 2, 10, 8, 4, 6, 2, 6, 4, 6, 6, 6, 2, 6, 4, 2, 10, 6, 8, 6, 4, 2, 4, 14, 4, 6, 2, 4, 6, 2, 6, 6, 6, 10, 2, 6, 4, 2, 4, 12, 12, 2, 4, 2, 10, 2, 6, 6, 4, 6, 6, 2, 10, 2, 6, 4, 14, 4, 2, 4, 2, 4, 8, 6, 4, 6, 2, 4, 6, 2, 6, 6, 4, 2, 4, 6, 2, 6, 4, 2, 16, 2, 16)))
p=17
while p<=999983:
if is_prime(p):P.append(p)
p += next(i)
-----
n=1000000
c = n%6>1
n={0:n, 1:n-1, 2:n+4, 3:n+3, 4:n+2, 5:n+1}[n%6]
L=[True]*(n//3)
L[0]=False
for i in range(int(n**.5)// 3 + 1):
if L[i]:
j=(3*i+1)|1
h=j*j
L[h//3::2*j]=[False] *((n//6-1-h//6)//j+1)
L[(h+4*j-2*j*(i%2))//3::2*j]=[False]*(1+(n//6-1-(h+4*j-2*j*(i%2))//6)//j)
P=[2,3]+[(3*i+1)|1 for i in filter(lambda x:L[x],range(1,n//3-c))]
| 499.243902
| 18,001
| 0.367092
| 6,487
| 20,469
| 1.158008
| 0.011407
| 0.189563
| 0.170128
| 0.095314
| 0.942758
| 0.935304
| 0.926251
| 0.907215
| 0.875
| 0.833733
| 0
| 0.509402
| 0.314085
| 20,469
| 40
| 18,002
| 511.725
| 0.025641
| 0.007084
| 0
| 0.068966
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 12
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.