hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1e9c781b419e835c7eced196ef491e5128f9bbdb
| 62
|
py
|
Python
|
ACM-Solution/powerof2.py
|
wasi0013/Python-CodeBase
|
4a7a36395162f68f84ded9085fa34cc7c9b19233
|
[
"MIT"
] | 2
|
2016-04-26T15:40:40.000Z
|
2018-07-18T10:16:42.000Z
|
ACM-Solution/powerof2.py
|
wasi0013/Python-CodeBase
|
4a7a36395162f68f84ded9085fa34cc7c9b19233
|
[
"MIT"
] | 1
|
2016-04-26T15:44:15.000Z
|
2016-04-29T14:44:40.000Z
|
ACM-Solution/powerof2.py
|
wasi0013/Python-CodeBase
|
4a7a36395162f68f84ded9085fa34cc7c9b19233
|
[
"MIT"
] | 1
|
2018-10-02T16:12:19.000Z
|
2018-10-02T16:12:19.000Z
|
exec("x=0;N=int(input());print(2**(sum()+1));"*int(input()))
| 31
| 61
| 0.532258
| 12
| 62
| 2.75
| 0.833333
| 0.484848
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.05
| 0.032258
| 62
| 1
| 62
| 62
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0.639344
| 0.639344
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 7
|
94ab75228b05a2963c183365296413528d246a91
| 92
|
py
|
Python
|
parameters_8000.py
|
ayusharora99/E-CollegeBooks
|
bc9897215a9ed4d17e372a0371318967e3d480eb
|
[
"BSD-3-Clause"
] | 2
|
2018-12-08T23:59:12.000Z
|
2019-02-13T23:04:36.000Z
|
parameters_8000.py
|
ayusharora99/E-CollegeBooks
|
bc9897215a9ed4d17e372a0371318967e3d480eb
|
[
"BSD-3-Clause"
] | null | null | null |
parameters_8000.py
|
ayusharora99/E-CollegeBooks
|
bc9897215a9ed4d17e372a0371318967e3d480eb
|
[
"BSD-3-Clause"
] | 3
|
2018-12-08T23:59:17.000Z
|
2019-02-13T23:04:38.000Z
|
password="pbkdf2(1000,20,sha512)$ba0f5757f7b448ec$d9c2e5887e65f45311047c8ff518f200ee11b98f"
| 46
| 91
| 0.891304
| 7
| 92
| 11.714286
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.505495
| 0.01087
| 92
| 1
| 92
| 92
| 0.395604
| 0
| 0
| 0
| 0
| 0
| 0.869565
| 0.869565
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
94ac6044959d983d47c287f915e389ed8d95187d
| 328
|
py
|
Python
|
src/omfgp/cli.py
|
miketlk/omfgp
|
6e5a0f52f2688d81bde3e5169a37311c9517fe1d
|
[
"MIT"
] | null | null | null |
src/omfgp/cli.py
|
miketlk/omfgp
|
6e5a0f52f2688d81bde3e5169a37311c9517fe1d
|
[
"MIT"
] | null | null | null |
src/omfgp/cli.py
|
miketlk/omfgp
|
6e5a0f52f2688d81bde3e5169a37311c9517fe1d
|
[
"MIT"
] | 1
|
2021-08-16T10:19:52.000Z
|
2021-08-16T10:19:52.000Z
|
DEFAULT_KEY = {
"ENC": b"\x40\x41\x42\x43\x44\x45\x46\x47\x48\x49\x4A\x4B\x4C\x4D\x4E\x4F",
"MAC": b"\x40\x41\x42\x43\x44\x45\x46\x47\x48\x49\x4A\x4B\x4C\x4D\x4E\x4F",
"DEK": b"\x40\x41\x42\x43\x44\x45\x46\x47\x48\x49\x4A\x4B\x4C\x4D\x4E\x4F",
}
def list(key=DEFAULT_KEY):
"""Lists all applets"""
print(key)
| 36.444444
| 79
| 0.643293
| 66
| 328
| 3.166667
| 0.424242
| 0.057416
| 0.100478
| 0.143541
| 0.703349
| 0.703349
| 0.703349
| 0.703349
| 0.703349
| 0.703349
| 0
| 0.267123
| 0.109756
| 328
| 9
| 80
| 36.444444
| 0.44863
| 0.051829
| 0
| 0
| 0
| 0.428571
| 0.656863
| 0.627451
| 0
| 1
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0
| 0
| 0.142857
| 0.142857
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 12
|
94f3b18944a11c75445cef1a85dda665ee609119
| 46,746
|
py
|
Python
|
lib/jnpr/healthbot/swagger/api/organization_api.py
|
Juniper/healthbot-py-client
|
49f0884b5d01ac8430aa7ed4c9acb4e7a2b717a6
|
[
"Apache-2.0"
] | 10
|
2019-10-23T12:54:37.000Z
|
2022-02-07T19:24:30.000Z
|
lib/jnpr/healthbot/swagger/api/organization_api.py
|
Juniper/healthbot-py-client
|
49f0884b5d01ac8430aa7ed4c9acb4e7a2b717a6
|
[
"Apache-2.0"
] | 5
|
2019-09-30T04:29:25.000Z
|
2022-02-16T12:21:06.000Z
|
lib/jnpr/healthbot/swagger/api/organization_api.py
|
Juniper/healthbot-py-client
|
49f0884b5d01ac8430aa7ed4c9acb4e7a2b717a6
|
[
"Apache-2.0"
] | 4
|
2019-09-30T01:17:48.000Z
|
2020-08-25T07:27:54.000Z
|
# coding: utf-8
"""
Paragon Insights APIs
API interface for PI application # noqa: E501
OpenAPI spec version: 4.0.0
Contact: healthbot-feedback@juniper.net
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from jnpr.healthbot.swagger.api_client import ApiClient
class OrganizationApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_healthbot_organization_site_edge_edge_by_id(self, organization_name, site_name, edge_name, edge, **kwargs): # noqa: E501
"""Create edge by ID # noqa: E501
Create operation of resource: edge # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_healthbot_organization_site_edge_edge_by_id(organization_name, site_name, edge_name, edge, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str organization_name: ID of organization-name (required)
:param str site_name: ID of site-name (required)
:param str edge_name: ID of edge-name (required)
:param EdgeSchema edge: edgebody object (required)
:param str x_iam_token: authentication header object
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_healthbot_organization_site_edge_edge_by_id_with_http_info(organization_name, site_name, edge_name, edge, **kwargs) # noqa: E501
else:
(data) = self.create_healthbot_organization_site_edge_edge_by_id_with_http_info(organization_name, site_name, edge_name, edge, **kwargs) # noqa: E501
return data
def create_healthbot_organization_site_edge_edge_by_id_with_http_info(self, organization_name, site_name, edge_name, edge, **kwargs): # noqa: E501
"""Create edge by ID # noqa: E501
Create operation of resource: edge # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_healthbot_organization_site_edge_edge_by_id_with_http_info(organization_name, site_name, edge_name, edge, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str organization_name: ID of organization-name (required)
:param str site_name: ID of site-name (required)
:param str edge_name: ID of edge-name (required)
:param EdgeSchema edge: edgebody object (required)
:param str x_iam_token: authentication header object
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['organization_name', 'site_name', 'edge_name', 'edge', 'x_iam_token'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_healthbot_organization_site_edge_edge_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'organization_name' is set
if ('organization_name' not in params or
params['organization_name'] is None):
raise ValueError("Missing the required parameter `organization_name` when calling `create_healthbot_organization_site_edge_edge_by_id`") # noqa: E501
# verify the required parameter 'site_name' is set
if ('site_name' not in params or
params['site_name'] is None):
raise ValueError("Missing the required parameter `site_name` when calling `create_healthbot_organization_site_edge_edge_by_id`") # noqa: E501
# verify the required parameter 'edge_name' is set
if ('edge_name' not in params or
params['edge_name'] is None):
raise ValueError("Missing the required parameter `edge_name` when calling `create_healthbot_organization_site_edge_edge_by_id`") # noqa: E501
# verify the required parameter 'edge' is set
if ('edge' not in params or
params['edge'] is None):
raise ValueError("Missing the required parameter `edge` when calling `create_healthbot_organization_site_edge_edge_by_id`") # noqa: E501
collection_formats = {}
path_params = {}
if 'organization_name' in params:
path_params['organization_name'] = params['organization_name'] # noqa: E501
if 'site_name' in params:
path_params['site_name'] = params['site_name'] # noqa: E501
if 'edge_name' in params:
path_params['edge_name'] = params['edge_name'] # noqa: E501
query_params = []
header_params = {}
if 'x_iam_token' in params:
header_params['x-iam-token'] = params['x_iam_token'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'edge' in params:
body_params = params['edge']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/config/organization/{organization_name}/site/{site_name}/edge/{edge_name}/', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def create_healthbot_organization_site_site_by_id(self, organization_name, site_name, site, **kwargs): # noqa: E501
"""Create site by ID # noqa: E501
Create operation of resource: site # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_healthbot_organization_site_site_by_id(organization_name, site_name, site, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str organization_name: ID of organization-name (required)
:param str site_name: ID of site-name (required)
:param SiteSchema site: sitebody object (required)
:param str x_iam_token: authentication header object
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_healthbot_organization_site_site_by_id_with_http_info(organization_name, site_name, site, **kwargs) # noqa: E501
else:
(data) = self.create_healthbot_organization_site_site_by_id_with_http_info(organization_name, site_name, site, **kwargs) # noqa: E501
return data
def create_healthbot_organization_site_site_by_id_with_http_info(self, organization_name, site_name, site, **kwargs): # noqa: E501
"""Create site by ID # noqa: E501
Create operation of resource: site # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_healthbot_organization_site_site_by_id_with_http_info(organization_name, site_name, site, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str organization_name: ID of organization-name (required)
:param str site_name: ID of site-name (required)
:param SiteSchema site: sitebody object (required)
:param str x_iam_token: authentication header object
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['organization_name', 'site_name', 'site', 'x_iam_token'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_healthbot_organization_site_site_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'organization_name' is set
if ('organization_name' not in params or
params['organization_name'] is None):
raise ValueError("Missing the required parameter `organization_name` when calling `create_healthbot_organization_site_site_by_id`") # noqa: E501
# verify the required parameter 'site_name' is set
if ('site_name' not in params or
params['site_name'] is None):
raise ValueError("Missing the required parameter `site_name` when calling `create_healthbot_organization_site_site_by_id`") # noqa: E501
# verify the required parameter 'site' is set
if ('site' not in params or
params['site'] is None):
raise ValueError("Missing the required parameter `site` when calling `create_healthbot_organization_site_site_by_id`") # noqa: E501
collection_formats = {}
path_params = {}
if 'organization_name' in params:
path_params['organization_name'] = params['organization_name'] # noqa: E501
if 'site_name' in params:
path_params['site_name'] = params['site_name'] # noqa: E501
query_params = []
header_params = {}
if 'x_iam_token' in params:
header_params['x-iam-token'] = params['x_iam_token'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'site' in params:
body_params = params['site']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/config/organization/{organization_name}/site/{site_name}/', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_healthbot_organization_site_edge_edge_by_id(self, organization_name, site_name, edge_name, **kwargs): # noqa: E501
"""Delete edge by ID # noqa: E501
Delete operation of resource: edge # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_healthbot_organization_site_edge_edge_by_id(organization_name, site_name, edge_name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str organization_name: ID of organization-name (required)
:param str site_name: ID of site-name (required)
:param str edge_name: ID of edge-name (required)
:param str x_iam_token: authentication header object
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_healthbot_organization_site_edge_edge_by_id_with_http_info(organization_name, site_name, edge_name, **kwargs) # noqa: E501
else:
(data) = self.delete_healthbot_organization_site_edge_edge_by_id_with_http_info(organization_name, site_name, edge_name, **kwargs) # noqa: E501
return data
def delete_healthbot_organization_site_edge_edge_by_id_with_http_info(self, organization_name, site_name, edge_name, **kwargs): # noqa: E501
"""Delete edge by ID # noqa: E501
Delete operation of resource: edge # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_healthbot_organization_site_edge_edge_by_id_with_http_info(organization_name, site_name, edge_name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str organization_name: ID of organization-name (required)
:param str site_name: ID of site-name (required)
:param str edge_name: ID of edge-name (required)
:param str x_iam_token: authentication header object
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['organization_name', 'site_name', 'edge_name', 'x_iam_token'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_healthbot_organization_site_edge_edge_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'organization_name' is set
if ('organization_name' not in params or
params['organization_name'] is None):
raise ValueError("Missing the required parameter `organization_name` when calling `delete_healthbot_organization_site_edge_edge_by_id`") # noqa: E501
# verify the required parameter 'site_name' is set
if ('site_name' not in params or
params['site_name'] is None):
raise ValueError("Missing the required parameter `site_name` when calling `delete_healthbot_organization_site_edge_edge_by_id`") # noqa: E501
# verify the required parameter 'edge_name' is set
if ('edge_name' not in params or
params['edge_name'] is None):
raise ValueError("Missing the required parameter `edge_name` when calling `delete_healthbot_organization_site_edge_edge_by_id`") # noqa: E501
collection_formats = {}
path_params = {}
if 'organization_name' in params:
path_params['organization_name'] = params['organization_name'] # noqa: E501
if 'site_name' in params:
path_params['site_name'] = params['site_name'] # noqa: E501
if 'edge_name' in params:
path_params['edge_name'] = params['edge_name'] # noqa: E501
query_params = []
header_params = {}
if 'x_iam_token' in params:
header_params['x-iam-token'] = params['x_iam_token'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/config/organization/{organization_name}/site/{site_name}/edge/{edge_name}/', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_healthbot_organization_site_site_by_id(self, organization_name, site_name, **kwargs): # noqa: E501
"""Delete site by ID # noqa: E501
Delete operation of resource: site # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_healthbot_organization_site_site_by_id(organization_name, site_name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str organization_name: ID of organization-name (required)
:param str site_name: ID of site-name (required)
:param str x_iam_token: authentication header object
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_healthbot_organization_site_site_by_id_with_http_info(organization_name, site_name, **kwargs) # noqa: E501
else:
(data) = self.delete_healthbot_organization_site_site_by_id_with_http_info(organization_name, site_name, **kwargs) # noqa: E501
return data
def delete_healthbot_organization_site_site_by_id_with_http_info(self, organization_name, site_name, **kwargs): # noqa: E501
"""Delete site by ID # noqa: E501
Delete operation of resource: site # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_healthbot_organization_site_site_by_id_with_http_info(organization_name, site_name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str organization_name: ID of organization-name (required)
:param str site_name: ID of site-name (required)
:param str x_iam_token: authentication header object
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['organization_name', 'site_name', 'x_iam_token'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_healthbot_organization_site_site_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'organization_name' is set
if ('organization_name' not in params or
params['organization_name'] is None):
raise ValueError("Missing the required parameter `organization_name` when calling `delete_healthbot_organization_site_site_by_id`") # noqa: E501
# verify the required parameter 'site_name' is set
if ('site_name' not in params or
params['site_name'] is None):
raise ValueError("Missing the required parameter `site_name` when calling `delete_healthbot_organization_site_site_by_id`") # noqa: E501
collection_formats = {}
path_params = {}
if 'organization_name' in params:
path_params['organization_name'] = params['organization_name'] # noqa: E501
if 'site_name' in params:
path_params['site_name'] = params['site_name'] # noqa: E501
query_params = []
header_params = {}
if 'x_iam_token' in params:
header_params['x-iam-token'] = params['x_iam_token'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/config/organization/{organization_name}/site/{site_name}/', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def retrieve_healthbot_organization_site_edge_edge_by_id(self, organization_name, site_name, edge_name, **kwargs): # noqa: E501
"""Retrieve edge by ID # noqa: E501
Retrieve operation of resource: edge # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.retrieve_healthbot_organization_site_edge_edge_by_id(organization_name, site_name, edge_name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str organization_name: ID of organization-name (required)
:param str site_name: ID of site-name (required)
:param str edge_name: ID of edge-name (required)
:param str x_iam_token: authentication header object
:param bool working: true queries undeployed configuration
:return: EdgeSchema
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.retrieve_healthbot_organization_site_edge_edge_by_id_with_http_info(organization_name, site_name, edge_name, **kwargs) # noqa: E501
else:
(data) = self.retrieve_healthbot_organization_site_edge_edge_by_id_with_http_info(organization_name, site_name, edge_name, **kwargs) # noqa: E501
return data
def retrieve_healthbot_organization_site_edge_edge_by_id_with_http_info(self, organization_name, site_name, edge_name, **kwargs): # noqa: E501
"""Retrieve edge by ID # noqa: E501
Retrieve operation of resource: edge # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.retrieve_healthbot_organization_site_edge_edge_by_id_with_http_info(organization_name, site_name, edge_name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str organization_name: ID of organization-name (required)
:param str site_name: ID of site-name (required)
:param str edge_name: ID of edge-name (required)
:param str x_iam_token: authentication header object
:param bool working: true queries undeployed configuration
:return: EdgeSchema
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['organization_name', 'site_name', 'edge_name', 'x_iam_token', 'working'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method retrieve_healthbot_organization_site_edge_edge_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'organization_name' is set
if ('organization_name' not in params or
params['organization_name'] is None):
raise ValueError("Missing the required parameter `organization_name` when calling `retrieve_healthbot_organization_site_edge_edge_by_id`") # noqa: E501
# verify the required parameter 'site_name' is set
if ('site_name' not in params or
params['site_name'] is None):
raise ValueError("Missing the required parameter `site_name` when calling `retrieve_healthbot_organization_site_edge_edge_by_id`") # noqa: E501
# verify the required parameter 'edge_name' is set
if ('edge_name' not in params or
params['edge_name'] is None):
raise ValueError("Missing the required parameter `edge_name` when calling `retrieve_healthbot_organization_site_edge_edge_by_id`") # noqa: E501
collection_formats = {}
path_params = {}
if 'organization_name' in params:
path_params['organization_name'] = params['organization_name'] # noqa: E501
if 'site_name' in params:
path_params['site_name'] = params['site_name'] # noqa: E501
if 'edge_name' in params:
path_params['edge_name'] = params['edge_name'] # noqa: E501
query_params = []
if 'working' in params:
query_params.append(('working', params['working'])) # noqa: E501
header_params = {}
if 'x_iam_token' in params:
header_params['x-iam-token'] = params['x_iam_token'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/config/organization/{organization_name}/site/{site_name}/edge/{edge_name}/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='EdgeSchema', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def retrieve_healthbot_organization_site_site_by_id(self, organization_name, site_name, **kwargs): # noqa: E501
"""Retrieve site by ID # noqa: E501
Retrieve operation of resource: site # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.retrieve_healthbot_organization_site_site_by_id(organization_name, site_name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str organization_name: ID of organization-name (required)
:param str site_name: ID of site-name (required)
:param str x_iam_token: authentication header object
:param bool working: true queries undeployed configuration
:return: SiteSchema
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.retrieve_healthbot_organization_site_site_by_id_with_http_info(organization_name, site_name, **kwargs) # noqa: E501
else:
(data) = self.retrieve_healthbot_organization_site_site_by_id_with_http_info(organization_name, site_name, **kwargs) # noqa: E501
return data
def retrieve_healthbot_organization_site_site_by_id_with_http_info(self, organization_name, site_name, **kwargs): # noqa: E501
"""Retrieve site by ID # noqa: E501
Retrieve operation of resource: site # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.retrieve_healthbot_organization_site_site_by_id_with_http_info(organization_name, site_name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str organization_name: ID of organization-name (required)
:param str site_name: ID of site-name (required)
:param str x_iam_token: authentication header object
:param bool working: true queries undeployed configuration
:return: SiteSchema
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['organization_name', 'site_name', 'x_iam_token', 'working'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method retrieve_healthbot_organization_site_site_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'organization_name' is set
if ('organization_name' not in params or
params['organization_name'] is None):
raise ValueError("Missing the required parameter `organization_name` when calling `retrieve_healthbot_organization_site_site_by_id`") # noqa: E501
# verify the required parameter 'site_name' is set
if ('site_name' not in params or
params['site_name'] is None):
raise ValueError("Missing the required parameter `site_name` when calling `retrieve_healthbot_organization_site_site_by_id`") # noqa: E501
collection_formats = {}
path_params = {}
if 'organization_name' in params:
path_params['organization_name'] = params['organization_name'] # noqa: E501
if 'site_name' in params:
path_params['site_name'] = params['site_name'] # noqa: E501
query_params = []
if 'working' in params:
query_params.append(('working', params['working'])) # noqa: E501
header_params = {}
if 'x_iam_token' in params:
header_params['x-iam-token'] = params['x_iam_token'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/config/organization/{organization_name}/site/{site_name}/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SiteSchema', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_healthbot_organization_site_edge_edge_by_id(self, organization_name, site_name, edge_name, edge, **kwargs): # noqa: E501
"""Update edge by ID # noqa: E501
Update operation of resource: edge # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_healthbot_organization_site_edge_edge_by_id(organization_name, site_name, edge_name, edge, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str organization_name: ID of organization-name (required)
:param str site_name: ID of site-name (required)
:param str edge_name: ID of edge-name (required)
:param EdgeSchema edge: edgebody object (required)
:param str x_iam_token: authentication header object
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.update_healthbot_organization_site_edge_edge_by_id_with_http_info(organization_name, site_name, edge_name, edge, **kwargs) # noqa: E501
else:
(data) = self.update_healthbot_organization_site_edge_edge_by_id_with_http_info(organization_name, site_name, edge_name, edge, **kwargs) # noqa: E501
return data
def update_healthbot_organization_site_edge_edge_by_id_with_http_info(self, organization_name, site_name, edge_name, edge, **kwargs): # noqa: E501
"""Update edge by ID # noqa: E501
Update operation of resource: edge # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_healthbot_organization_site_edge_edge_by_id_with_http_info(organization_name, site_name, edge_name, edge, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str organization_name: ID of organization-name (required)
:param str site_name: ID of site-name (required)
:param str edge_name: ID of edge-name (required)
:param EdgeSchema edge: edgebody object (required)
:param str x_iam_token: authentication header object
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['organization_name', 'site_name', 'edge_name', 'edge', 'x_iam_token'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_healthbot_organization_site_edge_edge_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'organization_name' is set
if ('organization_name' not in params or
params['organization_name'] is None):
raise ValueError("Missing the required parameter `organization_name` when calling `update_healthbot_organization_site_edge_edge_by_id`") # noqa: E501
# verify the required parameter 'site_name' is set
if ('site_name' not in params or
params['site_name'] is None):
raise ValueError("Missing the required parameter `site_name` when calling `update_healthbot_organization_site_edge_edge_by_id`") # noqa: E501
# verify the required parameter 'edge_name' is set
if ('edge_name' not in params or
params['edge_name'] is None):
raise ValueError("Missing the required parameter `edge_name` when calling `update_healthbot_organization_site_edge_edge_by_id`") # noqa: E501
# verify the required parameter 'edge' is set
if ('edge' not in params or
params['edge'] is None):
raise ValueError("Missing the required parameter `edge` when calling `update_healthbot_organization_site_edge_edge_by_id`") # noqa: E501
collection_formats = {}
path_params = {}
if 'organization_name' in params:
path_params['organization_name'] = params['organization_name'] # noqa: E501
if 'site_name' in params:
path_params['site_name'] = params['site_name'] # noqa: E501
if 'edge_name' in params:
path_params['edge_name'] = params['edge_name'] # noqa: E501
query_params = []
header_params = {}
if 'x_iam_token' in params:
header_params['x-iam-token'] = params['x_iam_token'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'edge' in params:
body_params = params['edge']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/config/organization/{organization_name}/site/{site_name}/edge/{edge_name}/', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_healthbot_organization_site_site_by_id(self, organization_name, site_name, site, **kwargs): # noqa: E501
"""Update site by ID # noqa: E501
Update operation of resource: site # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_healthbot_organization_site_site_by_id(organization_name, site_name, site, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str organization_name: ID of organization-name (required)
:param str site_name: ID of site-name (required)
:param SiteSchema site: sitebody object (required)
:param str x_iam_token: authentication header object
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.update_healthbot_organization_site_site_by_id_with_http_info(organization_name, site_name, site, **kwargs) # noqa: E501
else:
(data) = self.update_healthbot_organization_site_site_by_id_with_http_info(organization_name, site_name, site, **kwargs) # noqa: E501
return data
def update_healthbot_organization_site_site_by_id_with_http_info(self, organization_name, site_name, site, **kwargs): # noqa: E501
"""Update site by ID # noqa: E501
Update operation of resource: site # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_healthbot_organization_site_site_by_id_with_http_info(organization_name, site_name, site, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str organization_name: ID of organization-name (required)
:param str site_name: ID of site-name (required)
:param SiteSchema site: sitebody object (required)
:param str x_iam_token: authentication header object
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['organization_name', 'site_name', 'site', 'x_iam_token'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_healthbot_organization_site_site_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'organization_name' is set
if ('organization_name' not in params or
params['organization_name'] is None):
raise ValueError("Missing the required parameter `organization_name` when calling `update_healthbot_organization_site_site_by_id`") # noqa: E501
# verify the required parameter 'site_name' is set
if ('site_name' not in params or
params['site_name'] is None):
raise ValueError("Missing the required parameter `site_name` when calling `update_healthbot_organization_site_site_by_id`") # noqa: E501
# verify the required parameter 'site' is set
if ('site' not in params or
params['site'] is None):
raise ValueError("Missing the required parameter `site` when calling `update_healthbot_organization_site_site_by_id`") # noqa: E501
collection_formats = {}
path_params = {}
if 'organization_name' in params:
path_params['organization_name'] = params['organization_name'] # noqa: E501
if 'site_name' in params:
path_params['site_name'] = params['site_name'] # noqa: E501
query_params = []
header_params = {}
if 'x_iam_token' in params:
header_params['x-iam-token'] = params['x_iam_token'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'site' in params:
body_params = params['site']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/config/organization/{organization_name}/site/{site_name}/', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 46.980905
| 164
| 0.648013
| 5,652
| 46,746
| 5.058209
| 0.030786
| 0.046731
| 0.069957
| 0.047011
| 0.982896
| 0.982896
| 0.982896
| 0.980237
| 0.980027
| 0.979712
| 0
| 0.014873
| 0.266461
| 46,746
| 994
| 165
| 47.028169
| 0.818868
| 0.323835
| 0
| 0.824399
| 1
| 0
| 0.260303
| 0.090026
| 0
| 0
| 0
| 0
| 0
| 1
| 0.031423
| false
| 0
| 0.007394
| 0
| 0.085028
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
bf5d47be37cf903e8580c7840db1fffd47cb12b1
| 86
|
py
|
Python
|
berth/builder/backends/__init__.py
|
joealcorn/berth.cc
|
9cba1355d49705a13ae58cfdffa26ee6a3fb9e31
|
[
"MIT"
] | null | null | null |
berth/builder/backends/__init__.py
|
joealcorn/berth.cc
|
9cba1355d49705a13ae58cfdffa26ee6a3fb9e31
|
[
"MIT"
] | null | null | null |
berth/builder/backends/__init__.py
|
joealcorn/berth.cc
|
9cba1355d49705a13ae58cfdffa26ee6a3fb9e31
|
[
"MIT"
] | null | null | null |
from berth.builder.backends.base import *
from berth.builder.backends.sphinx import *
| 28.666667
| 43
| 0.813953
| 12
| 86
| 5.833333
| 0.583333
| 0.257143
| 0.457143
| 0.685714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.093023
| 86
| 2
| 44
| 43
| 0.897436
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
44c253f81c7a3412e5a62fbd60011c97050af7d2
| 2,720
|
py
|
Python
|
tests/test_year_2014.py
|
l0pht511/jpholiday
|
083145737b61fad3420c066968c4329d17dc3baf
|
[
"MIT"
] | 179
|
2017-10-05T12:41:10.000Z
|
2022-03-24T22:18:25.000Z
|
tests/test_year_2014.py
|
l0pht511/jpholiday
|
083145737b61fad3420c066968c4329d17dc3baf
|
[
"MIT"
] | 17
|
2018-10-23T00:51:13.000Z
|
2021-11-22T11:40:06.000Z
|
tests/test_year_2014.py
|
l0pht511/jpholiday
|
083145737b61fad3420c066968c4329d17dc3baf
|
[
"MIT"
] | 17
|
2018-10-19T11:13:07.000Z
|
2022-01-29T08:05:56.000Z
|
# coding: utf-8
import datetime
import unittest
import jpholiday
class TestYear2014(unittest.TestCase):
def test_holiday(self):
"""
2014年祝日
"""
self.assertEqual(jpholiday.is_holiday_name(datetime.date(2014, 1, 1)), '元日')
self.assertEqual(jpholiday.is_holiday_name(datetime.date(2014, 1, 13)), '成人の日')
self.assertEqual(jpholiday.is_holiday_name(datetime.date(2014, 2, 11)), '建国記念の日')
self.assertEqual(jpholiday.is_holiday_name(datetime.date(2014, 3, 21)), '春分の日')
self.assertEqual(jpholiday.is_holiday_name(datetime.date(2014, 4, 29)), '昭和の日')
self.assertEqual(jpholiday.is_holiday_name(datetime.date(2014, 5, 3)), '憲法記念日')
self.assertEqual(jpholiday.is_holiday_name(datetime.date(2014, 5, 4)), 'みどりの日')
self.assertEqual(jpholiday.is_holiday_name(datetime.date(2014, 5, 5)), 'こどもの日')
self.assertEqual(jpholiday.is_holiday_name(datetime.date(2014, 5, 6)), 'みどりの日 振替休日')
self.assertEqual(jpholiday.is_holiday_name(datetime.date(2014, 7, 21)), '海の日')
self.assertEqual(jpholiday.is_holiday_name(datetime.date(2014, 9, 15)), '敬老の日')
self.assertEqual(jpholiday.is_holiday_name(datetime.date(2014, 9, 23)), '秋分の日')
self.assertEqual(jpholiday.is_holiday_name(datetime.date(2014, 10, 13)), '体育の日')
self.assertEqual(jpholiday.is_holiday_name(datetime.date(2014, 11, 3)), '文化の日')
self.assertEqual(jpholiday.is_holiday_name(datetime.date(2014, 11, 23)), '勤労感謝の日')
self.assertEqual(jpholiday.is_holiday_name(datetime.date(2014, 11, 24)), '勤労感謝の日 振替休日')
self.assertEqual(jpholiday.is_holiday_name(datetime.date(2014, 12, 23)), '天皇誕生日')
def test_count_month(self):
"""
2014年月祝日数
"""
self.assertEqual(len(jpholiday.month_holidays(2014, 1)), 2)
self.assertEqual(len(jpholiday.month_holidays(2014, 2)), 1)
self.assertEqual(len(jpholiday.month_holidays(2014, 3)), 1)
self.assertEqual(len(jpholiday.month_holidays(2014, 4)), 1)
self.assertEqual(len(jpholiday.month_holidays(2014, 5)), 4)
self.assertEqual(len(jpholiday.month_holidays(2014, 6)), 0)
self.assertEqual(len(jpholiday.month_holidays(2014, 7)), 1)
self.assertEqual(len(jpholiday.month_holidays(2014, 8)), 0)
self.assertEqual(len(jpholiday.month_holidays(2014, 9)), 2)
self.assertEqual(len(jpholiday.month_holidays(2014, 10)), 1)
self.assertEqual(len(jpholiday.month_holidays(2014, 11)), 3)
self.assertEqual(len(jpholiday.month_holidays(2014, 12)), 1)
def test_count_year(self):
"""
2014年祝日数
"""
self.assertEqual(len(jpholiday.year_holidays(2014)), 17)
| 51.320755
| 95
| 0.682353
| 352
| 2,720
| 5.125
| 0.176136
| 0.249446
| 0.226164
| 0.245011
| 0.809313
| 0.809313
| 0.809313
| 0.736142
| 0.511641
| 0.394124
| 0
| 0.095702
| 0.170221
| 2,720
| 52
| 96
| 52.307692
| 0.703589
| 0.015074
| 0
| 0
| 0
| 0
| 0.032963
| 0
| 0
| 0
| 0
| 0
| 0.810811
| 1
| 0.081081
| false
| 0
| 0.081081
| 0
| 0.189189
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
7852458867e37b272e2b2586e35216cc3c5811b9
| 137
|
py
|
Python
|
module.py
|
supperhappie/web_scraper
|
334cc877d06fe0083f52fca84de7fb906a0ff5f7
|
[
"MIT"
] | null | null | null |
module.py
|
supperhappie/web_scraper
|
334cc877d06fe0083f52fca84de7fb906a0ff5f7
|
[
"MIT"
] | null | null | null |
module.py
|
supperhappie/web_scraper
|
334cc877d06fe0083f52fca84de7fb906a0ff5f7
|
[
"MIT"
] | null | null | null |
from math import *
print(ceil(4.2))
print(sum([0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1]))
print(fsum([0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1]))
| 27.4
| 50
| 0.569343
| 47
| 137
| 1.659574
| 0.234043
| 0.461538
| 0.615385
| 0.820513
| 0.461538
| 0.461538
| 0.461538
| 0.461538
| 0.461538
| 0.461538
| 0
| 0.292308
| 0.051095
| 137
| 5
| 50
| 27.4
| 0.307692
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.25
| 0
| 0.25
| 0.75
| 0
| 0
| 1
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 9
|
78758a785c0289c32fa159acab530e62fdfcc65f
| 24,304
|
py
|
Python
|
mayan/apps/documents/tests/test_document_file_views.py
|
nattangwiwat/Mayan-EDMS-recitation
|
fcf16afb56eae812fb99144d65ae1ae6749de0b7
|
[
"Apache-2.0"
] | 343
|
2015-01-05T14:19:35.000Z
|
2018-12-10T19:07:48.000Z
|
mayan/apps/documents/tests/test_document_file_views.py
|
nattangwiwat/Mayan-EDMS-recitation
|
fcf16afb56eae812fb99144d65ae1ae6749de0b7
|
[
"Apache-2.0"
] | 191
|
2015-01-03T00:48:19.000Z
|
2018-11-30T09:10:25.000Z
|
mayan/apps/documents/tests/test_document_file_views.py
|
nattangwiwat/Mayan-EDMS-recitation
|
fcf16afb56eae812fb99144d65ae1ae6749de0b7
|
[
"Apache-2.0"
] | 257
|
2019-05-14T10:26:37.000Z
|
2022-03-30T03:37:36.000Z
|
from mayan.apps.converter.layers import layer_saved_transformations
from mayan.apps.converter.permissions import (
permission_transformation_delete, permission_transformation_edit
)
from mayan.apps.converter.tests.mixins import LayerTestMixin
from mayan.apps.documents.tests.literals import TEST_MULTI_PAGE_TIFF
from mayan.apps.file_caching.events import event_cache_partition_purged
from mayan.apps.file_caching.models import CachePartitionFile
from mayan.apps.file_caching.permissions import permission_cache_partition_purge
from mayan.apps.file_caching.tests.mixins import CachePartitionViewTestMixin
from ..events import (
event_document_file_deleted, event_document_file_downloaded,
event_document_file_edited,
)
from ..permissions import (
permission_document_file_delete, permission_document_file_download,
permission_document_file_edit, permission_document_file_print,
permission_document_file_view
)
from .base import GenericDocumentViewTestCase
from .mixins.document_file_mixins import (
DocumentFileTestMixin, DocumentFileTransformationTestMixin,
DocumentFileTransformationViewTestMixin, DocumentFileViewTestMixin
)
class DocumentFileViewTestCase(
DocumentFileTestMixin, DocumentFileViewTestMixin,
GenericDocumentViewTestCase
):
def test_document_file_delete_no_permission(self):
first_file = self.test_document.file_latest
self._upload_new_file()
test_document_file_count = self.test_document.files.count()
self._clear_events()
response = self._request_test_document_file_delete_view(
document_file=first_file
)
self.assertEqual(response.status_code, 404)
self.assertEqual(
self.test_document.files.count(), test_document_file_count
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_document_file_delete_with_access(self):
first_file = self.test_document.file_latest
self._upload_new_file()
self.grant_access(
obj=self.test_document,
permission=permission_document_file_delete
)
test_document_file_count = self.test_document.files.count()
self._clear_events()
response = self._request_test_document_file_delete_view(
document_file=first_file
)
self.assertEqual(response.status_code, 302)
self.assertEqual(
self.test_document.files.count(), test_document_file_count - 1
)
events = self._get_test_events()
self.assertEqual(events.count(), 1)
self.assertEqual(events[0].action_object, None)
self.assertEqual(events[0].actor, self._test_case_user)
self.assertEqual(events[0].target, self.test_document)
self.assertEqual(events[0].verb, event_document_file_deleted.id)
def test_trashed_document_file_delete_with_access(self):
first_file = self.test_document.file_latest
self._upload_new_file()
self.grant_access(
obj=self.test_document,
permission=permission_document_file_delete
)
test_document_file_count = self.test_document.files.count()
self.test_document.delete()
self._clear_events()
response = self._request_test_document_file_delete_view(
document_file=first_file
)
self.assertEqual(response.status_code, 404)
self.assertEqual(
self.test_document.files.count(), test_document_file_count
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_document_file_delete_multiple_no_permission(self):
self._upload_new_file()
test_document_file_count = self.test_document.files.count()
self._clear_events()
response = self._request_test_document_file_delete_multiple_view()
self.assertEqual(response.status_code, 404)
self.assertEqual(
self.test_document.files.count(), test_document_file_count
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_document_file_delete_multiple_with_access(self):
self._upload_new_file()
self.grant_access(
obj=self.test_document,
permission=permission_document_file_delete
)
test_document_file_count = self.test_document.files.count()
self._clear_events()
response = self._request_test_document_file_delete_multiple_view()
self.assertEqual(response.status_code, 302)
self.assertEqual(
self.test_document.files.count(), test_document_file_count - 1
)
events = self._get_test_events()
self.assertEqual(events.count(), 1)
self.assertEqual(events[0].action_object, None)
self.assertEqual(events[0].actor, self._test_case_user)
self.assertEqual(events[0].target, self.test_document)
self.assertEqual(events[0].verb, event_document_file_deleted.id)
def test_document_file_edit_view_no_permission(self):
document_file_comment = self.test_document_file.comment
self._clear_events()
response = self._request_test_document_file_edit_view()
self.assertEqual(response.status_code, 404)
self.test_document_file.refresh_from_db()
self.assertEqual(
self.test_document_file.comment,
document_file_comment
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_document_file_edit_view_with_access(self):
self.grant_access(
obj=self.test_document_file,
permission=permission_document_file_edit
)
document_file_comment = self.test_document_file.comment
document_file_filename = self.test_document_file.filename
self._clear_events()
response = self._request_test_document_file_edit_view()
self.assertEqual(response.status_code, 302)
self.test_document_file.refresh_from_db()
self.assertNotEqual(
self.test_document_file.comment,
document_file_comment
)
self.assertNotEqual(
self.test_document_file.filename,
document_file_filename
)
events = self._get_test_events()
self.assertEqual(events.count(), 1)
self.assertEqual(events[0].action_object, self.test_document)
self.assertEqual(events[0].actor, self._test_case_user)
self.assertEqual(events[0].target, self.test_document_file)
self.assertEqual(events[0].verb, event_document_file_edited.id)
def test_trashed_document_file_edit_view_with_access(self):
self.grant_access(
obj=self.test_document_file,
permission=permission_document_file_edit
)
document_file_comment = self.test_document_file.comment
document_file_filename = self.test_document_file.filename
self.test_document.delete()
self._clear_events()
response = self._request_test_document_file_edit_view()
self.assertEqual(response.status_code, 404)
self.test_document_file.refresh_from_db()
self.assertEqual(
self.test_document_file.comment,
document_file_comment
)
self.assertEqual(
self.test_document_file.filename,
document_file_filename
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_document_file_list_no_permission(self):
self._clear_events()
response = self._request_test_document_file_list_view()
self.assertEqual(response.status_code, 404)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_document_file_list_with_access(self):
self.grant_access(
obj=self.test_document,
permission=permission_document_file_view
)
self._clear_events()
response = self._request_test_document_file_list_view()
self.assertContains(
response=response, status_code=200,
text=str(self.test_document_file)
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_trashed_document_file_list_with_access(self):
self.grant_access(
obj=self.test_document,
permission=permission_document_file_view
)
self.test_document.delete()
self._clear_events()
response = self._request_test_document_file_list_view()
self.assertEqual(response.status_code, 404)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_document_file_print_form_view_no_permission(self):
self._clear_events()
response = self._request_test_document_file_print_form_view()
self.assertEqual(response.status_code, 404)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_document_file_print_form_view_with_access(self):
self.grant_access(
obj=self.test_document_file,
permission=permission_document_file_print
)
self._clear_events()
response = self._request_test_document_file_print_form_view()
self.assertEqual(response.status_code, 200)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_trashed_document_file_print_form_view_with_access(self):
self.grant_access(
obj=self.test_document_file,
permission=permission_document_file_print
)
self.test_document.delete()
self._clear_events()
response = self._request_test_document_file_print_form_view()
self.assertEqual(response.status_code, 404)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_document_file_print_view_no_permission(self):
self._clear_events()
response = self._request_test_document_file_print_view()
self.assertEqual(response.status_code, 404)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_document_file_print_view_with_access(self):
self.grant_access(
obj=self.test_document_file,
permission=permission_document_file_print
)
self._clear_events()
response = self._request_test_document_file_print_view()
self.assertEqual(response.status_code, 200)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_trashed_document_file_print_view_with_access(self):
self.grant_access(
obj=self.test_document_file,
permission=permission_document_file_print
)
self.test_document.delete()
self._clear_events()
response = self._request_test_document_file_print_view()
self.assertEqual(response.status_code, 404)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_document_file_properties_view_no_permission(self):
self._clear_events()
response = self._request_test_document_file_properties_view()
self.assertEqual(response.status_code, 404)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_document_file_properties_view_with_access(self):
self.grant_access(
obj=self.test_document_file,
permission=permission_document_file_view
)
self._clear_events()
response = self._request_test_document_file_properties_view()
self.assertContains(
response=response, text=self.test_document_file.filename,
status_code=200
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_trashed_document_file_properties_view_with_access(self):
self.grant_access(
obj=self.test_document_file,
permission=permission_document_file_view
)
self.test_document.delete()
self._clear_events()
response = self._request_test_document_file_properties_view()
self.assertEqual(response.status_code, 404)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
class DocumentFileDownloadViewTestCase(
DocumentFileViewTestMixin, GenericDocumentViewTestCase
):
def test_document_file_download_view_no_permission(self):
self._clear_events()
response = self._request_test_document_file_download_view()
self.assertEqual(response.status_code, 404)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_document_file_download_view_with_permission(self):
# Set the expected_content_types for
# common.tests.mixins.ContentTypeCheckMixin
self.expected_content_types = (
self.test_document.file_latest.mimetype,
)
self.grant_access(
obj=self.test_document,
permission=permission_document_file_download
)
self._clear_events()
response = self._request_test_document_file_download_view()
self.assertEqual(response.status_code, 200)
with self.test_document.file_latest.open() as file_object:
self.assert_download_response(
response=response, content=file_object.read(),
filename=self.test_document.file_latest.filename,
mime_type=self.test_document.file_latest.mimetype
)
events = self._get_test_events()
self.assertEqual(events.count(), 1)
self.assertEqual(events[0].action_object, self.test_document)
self.assertEqual(events[0].actor, self._test_case_user)
self.assertEqual(events[0].target, self.test_document_file)
self.assertEqual(events[0].verb, event_document_file_downloaded.id)
def test_trashed_document_file_download_view_with_permission(self):
self.grant_access(
obj=self.test_document,
permission=permission_document_file_download
)
self.test_document.delete()
self._clear_events()
response = self._request_test_document_file_download_view()
self.assertEqual(response.status_code, 404)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
class DocumentFileTransformationViewTestCase(
LayerTestMixin, DocumentFileTransformationTestMixin,
DocumentFileTransformationViewTestMixin, GenericDocumentViewTestCase
):
test_document_filename = TEST_MULTI_PAGE_TIFF
def test_document_file_transformations_clear_view_no_permission(self):
self._create_document_file_transformation()
transformation_count = layer_saved_transformations.get_transformations_for(
obj=self.test_document_file.pages.first()
).count()
self._clear_events()
response = self._request_test_document_file_transformations_clear_view()
self.assertEqual(response.status_code, 404)
self.assertEqual(
layer_saved_transformations.get_transformations_for(
obj=self.test_document_file.pages.first()
).count(), transformation_count
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_document_file_transformations_clear_view_with_access(self):
self._create_document_file_transformation()
transformation_count = layer_saved_transformations.get_transformations_for(
obj=self.test_document_file.pages.first()
).count()
self.grant_access(
obj=self.test_document_file,
permission=permission_transformation_delete
)
self._clear_events()
response = self._request_test_document_file_transformations_clear_view()
self.assertEqual(response.status_code, 302)
self.assertEqual(
layer_saved_transformations.get_transformations_for(
obj=self.test_document_file.pages.first()
).count(), transformation_count - 1
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_trashed_document_file_transformations_clear_view_with_access(self):
self._create_document_file_transformation()
transformation_count = layer_saved_transformations.get_transformations_for(
obj=self.test_document_file.pages.first()
).count()
self.grant_access(
obj=self.test_document_file,
permission=permission_transformation_delete
)
self.test_document.delete()
self._clear_events()
response = self._request_test_document_file_transformations_clear_view()
self.assertEqual(response.status_code, 404)
self.assertEqual(
layer_saved_transformations.get_transformations_for(
obj=self.test_document_file.pages.first()
).count(), transformation_count
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_document_file_multiple_transformations_clear_view_no_permission(self):
self._create_document_file_transformation()
transformation_count = layer_saved_transformations.get_transformations_for(
obj=self.test_document_file.pages.first()
).count()
self._clear_events()
response = self._request_test_document_file_multiple_transformations_clear_view()
self.assertEqual(response.status_code, 404)
self.assertEqual(
layer_saved_transformations.get_transformations_for(
obj=self.test_document_file.pages.first()
).count(), transformation_count
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_document_file_multiple_transformations_clear_view_with_access(self):
self._create_document_file_transformation()
transformation_count = layer_saved_transformations.get_transformations_for(
obj=self.test_document_file.pages.first()
).count()
self.grant_access(
obj=self.test_document_file,
permission=permission_document_file_view
)
self.grant_access(
obj=self.test_document_file,
permission=permission_transformation_delete
)
self._clear_events()
response = self._request_test_document_file_multiple_transformations_clear_view()
self.assertEqual(response.status_code, 302)
self.assertEqual(
layer_saved_transformations.get_transformations_for(
obj=self.test_document_file.pages.first()
).count(), transformation_count - 1,
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_document_file_transformations_clone_view_no_permission(self):
self._create_document_file_transformation()
page_first_transformation_count = layer_saved_transformations.get_transformations_for(
obj=self.test_document_file.pages.first()
).count()
page_last_transformation_count = layer_saved_transformations.get_transformations_for(
obj=self.test_document_file.pages.last()
).count()
self._clear_events()
response = self._request_test_document_file_transformations_clone_view()
self.assertEqual(response.status_code, 404)
self.assertEqual(
layer_saved_transformations.get_transformations_for(
obj=self.test_document_file.pages.first()
).count(), page_first_transformation_count
)
self.assertEqual(
layer_saved_transformations.get_transformations_for(
obj=self.test_document_file.pages.last()
).count(), page_last_transformation_count
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_document_file_transformations_clone_view_with_access(self):
self._create_document_file_transformation()
page_first_transformation_count = layer_saved_transformations.get_transformations_for(
obj=self.test_document_file.pages.first()
).count()
page_last_transformation_count = layer_saved_transformations.get_transformations_for(
obj=self.test_document_file.pages.last()
).count()
self.grant_access(
obj=self.test_document_file,
permission=permission_transformation_edit
)
self._clear_events()
response = self._request_test_document_file_transformations_clone_view()
self.assertEqual(response.status_code, 302)
self.assertEqual(
layer_saved_transformations.get_transformations_for(
obj=self.test_document_file.pages.first()
).count(), page_first_transformation_count
)
self.assertEqual(
layer_saved_transformations.get_transformations_for(
obj=self.test_document_file.pages.last()
).count(), page_last_transformation_count + 1
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
class DocumentFileCachePurgeViewTestCase(
CachePartitionViewTestMixin, GenericDocumentViewTestCase
):
def test_document_file_cache_purge_no_permission(self):
self.test_object = self.test_document_file
self._inject_test_object_content_type()
self.test_document_file.file_pages.first().generate_image()
test_document_file_cache_partitions = self.test_document_file.get_cache_partitions()
cache_partition_file_count = CachePartitionFile.objects.filter(
partition__in=test_document_file_cache_partitions
).count()
self._clear_events()
response = self._request_test_object_file_cache_partition_purge_view()
self.assertEqual(response.status_code, 404)
self.assertEqual(
CachePartitionFile.objects.filter(
partition__in=test_document_file_cache_partitions
).count(), cache_partition_file_count
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_document_file_cache_purge_with_access(self):
self.test_object = self.test_document_file
self._inject_test_object_content_type()
self.grant_access(
obj=self.test_document_file,
permission=permission_cache_partition_purge
)
self.test_document_file.file_pages.first().generate_image()
test_document_file_cache_partitions = self.test_document_file.get_cache_partitions()
cache_partition_file_count = CachePartitionFile.objects.filter(
partition__in=test_document_file_cache_partitions
).count()
self._clear_events()
cache_partitions = self.test_document_file.get_cache_partitions()
response = self._request_test_object_file_cache_partition_purge_view()
self.assertEqual(response.status_code, 302)
self.assertNotEqual(
CachePartitionFile.objects.filter(
partition__in=test_document_file_cache_partitions
).count(), cache_partition_file_count
)
events = self._get_test_events()
self.assertEqual(events.count(), 2)
self.assertEqual(events[0].action_object, self.test_document_file)
self.assertEqual(events[0].actor, self._test_case_user)
self.assertEqual(events[0].target, cache_partitions[0])
self.assertEqual(events[0].verb, event_cache_partition_purged.id)
self.assertEqual(events[1].action_object, self.test_document_file)
self.assertEqual(events[1].actor, self._test_case_user)
self.assertEqual(events[1].target, cache_partitions[1])
self.assertEqual(events[1].verb, event_cache_partition_purged.id)
| 33.615491
| 94
| 0.697704
| 2,687
| 24,304
| 5.852624
| 0.046892
| 0.146509
| 0.137352
| 0.082666
| 0.895714
| 0.877337
| 0.849421
| 0.843698
| 0.835114
| 0.818962
| 0
| 0.008451
| 0.225889
| 24,304
| 722
| 95
| 33.66205
| 0.827416
| 0.003127
| 0
| 0.727273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.208333
| 1
| 0.060606
| false
| 0
| 0.022727
| 0
| 0.092803
| 0.032197
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
159e0993e20c2d1a01b6aa2da9021b6c99e89aa2
| 4,417
|
py
|
Python
|
xndtools/kernel_generator/tests/test_test_array.py
|
xnd-project/xndtools
|
9478f31954091d861ce538ba278f7f888e23d19b
|
[
"BSD-3-Clause"
] | 3
|
2019-11-12T16:01:26.000Z
|
2020-06-27T19:27:27.000Z
|
xndtools/kernel_generator/tests/test_test_array.py
|
xnd-project/xndtools
|
9478f31954091d861ce538ba278f7f888e23d19b
|
[
"BSD-3-Clause"
] | 4
|
2018-04-25T17:12:43.000Z
|
2018-08-23T18:17:24.000Z
|
xndtools/kernel_generator/tests/test_test_array.py
|
xnd-project/xndtools
|
9478f31954091d861ce538ba278f7f888e23d19b
|
[
"BSD-3-Clause"
] | 6
|
2018-05-04T08:10:40.000Z
|
2019-03-19T10:00:21.000Z
|
import pytest
from xndtools.kernel_generator.utils import NormalizedTypeMap
from xnd import xnd
import test_array as m
long_t = NormalizedTypeMap()('long')
def assert_equal(x, y):
assert x == y and x.dtype == y.dtype
def test_array_range_input():
# C kernel
a = xnd([1, 2, 3], dtype=long_t)
r = m.test_array_range_input(a)
assert_equal(r, xnd(6, type=long_t))
assert_equal(a,
xnd([0, 1, 2], dtype=long_t)) # because `a` matches exactly
# F kernel
# TODO
# Xnd kernel
a = xnd([1, 2, 3, 4, 5, 6, 7], dtype=long_t)
x = a[1::2]
r = m.test_array_range_input(x)
assert_equal(r, xnd(12, type=long_t))
assert_equal(x, xnd([2, 4, 6], dtype=long_t))
assert_equal(a, xnd([1, 2, 3, 4, 5, 6, 7], dtype=long_t))
# Strided kernel
# TODO
def test_array_range_inplace():
# C kernel
a = xnd([1, 2, 3], dtype=long_t)
r = m.test_array_range_inplace(a)
assert_equal(r, xnd(6, type=long_t))
assert_equal(a, xnd([0, 1, 2], dtype=long_t))
# F kernel
# TODO
# Xnd kernel
a = xnd([1, 2, 3, 4, 5, 6, 7], dtype=long_t)
x = a[1::2]
assert_equal(x, xnd([2, 4, 6], dtype=long_t))
r = m.test_array_range_inplace(x)
assert_equal(r, xnd(12, type=long_t))
assert_equal(x, xnd([0, 1, 2], dtype=long_t))
assert_equal(a, xnd([1, 0, 3, 1, 5, 2, 7], dtype=long_t))
# Strided kernel
# TODO
def test_array_range_inout():
# C kernel
a = xnd([1, 2, 3], dtype=long_t)
r = m.test_array_range_inout(a)
assert_equal(r, xnd(6, type=long_t))
assert_equal(a, xnd([0, 1, 2], dtype=long_t))
# F kernel
# TODO
# Xnd kernel
a = xnd([1, 2, 3, 4, 5, 6, 7], dtype=long_t)
x = a[1::2]
assert_equal(x, xnd([2, 4, 6], dtype=long_t))
with pytest.raises(ValueError, match=r'.* must be C-contiguous .*'):
r = m.test_array_range_inout(x)
# Strided kernel
# TODO
def test_array_range_input_output():
# C kernel
a = xnd([1, 2, 3], dtype=long_t)
o, r = m.test_array_range_input_output(a)
assert_equal(r, xnd(6, type=long_t))
assert_equal(o, xnd([0, 1, 2], dtype=long_t))
assert_equal(a, xnd([1, 2, 3], dtype=long_t))
# F kernel
# TODO
# Xnd kernel
a = xnd([1, 2, 3, 4, 5, 6, 7], dtype=long_t)
x = a[1::2]
assert_equal(x, xnd([2, 4, 6], dtype=long_t))
o, r = m.test_array_range_input_output(x)
assert_equal(r, xnd(12, type=long_t))
assert_equal(o, xnd([0, 1, 2], dtype=long_t))
assert_equal(x, xnd([2, 4, 6], dtype=long_t))
assert_equal(a, xnd([1, 2, 3, 4, 5, 6, 7], dtype=long_t))
# Strided kernel
# TODO
def test_array_range_inplace_output():
# C kernel
a = xnd([1, 2, 3], dtype=long_t)
o, r = m.test_array_range_inplace_output(a)
assert_equal(r, xnd(6, type=long_t))
assert_equal(o, xnd([0, 1, 2], dtype=long_t))
assert_equal(a, xnd([0, 1, 2], dtype=long_t))
# F kernel
# TODO
# Xnd kernel
a = xnd([1, 2, 3, 4, 5, 6, 7], dtype=long_t)
x = a[1::2]
assert_equal(x, xnd([2, 4, 6], dtype=long_t))
o, r = m.test_array_range_inplace_output(x)
assert_equal(r, xnd(12, type=long_t))
assert_equal(o, xnd([0, 1, 2], dtype=long_t))
assert_equal(x, xnd([0, 1, 2], dtype=long_t))
assert_equal(a, xnd([1, 0, 3, 1, 5, 2, 7], dtype=long_t))
# Strided kernel
# TODO
def test_array_range_inout_output():
# C kernel
a = xnd([1, 2, 3], dtype=long_t)
o, r = m.test_array_range_inout_output(a)
assert_equal(r, xnd(6, type=long_t))
assert_equal(o, xnd([0, 1, 2], dtype=long_t))
assert_equal(a, xnd([0, 1, 2], dtype=long_t))
# F kernel
# TODO
# Xnd kernel
a = xnd([1, 2, 3, 4, 5, 6, 7], dtype=long_t)
x = a[1::2]
assert_equal(x, xnd([2, 4, 6], dtype=long_t))
with pytest.raises(ValueError, match=r'.* must be C-contiguous .*'):
o, r = m.test_array_range_inout_output(x)
# Strided kernel
# TODO
def test_array_range_output():
# using C, F, or Xnd kernel if defined
o, r = m.test_array_range_output(xnd(3, type=long_t))
assert_equal(r, xnd(0, type=long_t)) # could be random
assert_equal(o, xnd([0, 1, 2], dtype=long_t))
def test_array_range_hide():
# using C, F, or Xnd kernel if defined
r = m.test_array_range_hide(xnd(3, type=long_t))
assert r.type == xnd(0, type=long_t).type
# r value is random
| 26.769697
| 77
| 0.600634
| 811
| 4,417
| 3.066584
| 0.077682
| 0.104544
| 0.148774
| 0.128669
| 0.894652
| 0.864897
| 0.821874
| 0.821874
| 0.758745
| 0.746683
| 0
| 0.053582
| 0.235228
| 4,417
| 164
| 78
| 26.932927
| 0.682652
| 0.103917
| 0
| 0.609195
| 0
| 0
| 0.0143
| 0
| 0
| 0
| 0
| 0.006098
| 0.448276
| 1
| 0.103448
| false
| 0
| 0.045977
| 0
| 0.149425
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
eca386f51bbe9df459d3def8a91f74e99caeab92
| 28
|
py
|
Python
|
test_data/src-mini-project/src/mini/example.py
|
ajeetraina/pycograph
|
a1bcf8e0f62a605a798e82373ec2279add83cf16
|
[
"BSD-3-Clause"
] | 346
|
2016-02-22T20:21:10.000Z
|
2022-01-27T20:55:53.000Z
|
Language Skills/Python/Unit 4/2-Taking a Vacation/Review of Functions/1-Before we begin.py
|
vpstudios/Codecademy-Exercise-Answers
|
ebd0ee8197a8001465636f52c69592ea6745aa0c
|
[
"MIT"
] | 55
|
2016-04-07T13:58:44.000Z
|
2020-06-25T12:20:24.000Z
|
Language Skills/Python/Unit 4/2-Taking a Vacation/Review of Functions/1-Before we begin.py
|
vpstudios/Codecademy-Exercise-Answers
|
ebd0ee8197a8001465636f52c69592ea6745aa0c
|
[
"MIT"
] | 477
|
2016-02-21T06:17:02.000Z
|
2021-12-22T10:08:01.000Z
|
def answer():
return 42
| 9.333333
| 13
| 0.607143
| 4
| 28
| 4.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 0.285714
| 28
| 2
| 14
| 14
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0.5
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
ecabd3f4bdd9f064663b14463a5ce941ba9d7e6b
| 20,935
|
py
|
Python
|
abeja/notebook/api/client.py
|
abeja-inc/abeja-platform-sdk
|
97cfc99b11ffc1fccb3f527435277bc89e18b8c3
|
[
"Apache-2.0"
] | 2
|
2020-10-20T18:38:16.000Z
|
2020-10-20T20:12:35.000Z
|
abeja/notebook/api/client.py
|
abeja-inc/abeja-platform-sdk
|
97cfc99b11ffc1fccb3f527435277bc89e18b8c3
|
[
"Apache-2.0"
] | 30
|
2020-04-07T01:15:47.000Z
|
2020-11-18T03:25:19.000Z
|
abeja/notebook/api/client.py
|
abeja-inc/abeja-platform-sdk
|
97cfc99b11ffc1fccb3f527435277bc89e18b8c3
|
[
"Apache-2.0"
] | null | null | null |
from typing import Optional
from abeja.common.api_client import BaseAPIClient
from abeja.notebook.types import InstanceType, ImageType, NotebookType
class APIClient(BaseAPIClient):
"""A Low-Level client for Notebook API
.. code-block:: python
from abeja.notebook import APIClient
api_client = APIClient()
"""
def create_notebook(
self,
organization_id: str,
job_definition_name: str,
instance_type: Optional[str] = None,
image: Optional[str] = None,
notebook_type: Optional[str] = None) -> dict:
"""create a notebook.
API reference: POST /organizations/{organization_id}/training/definitions/{job_definition_name}/notebooks
Request Syntax:
.. code-block:: python
organization_id = "1410000000000"
job_definition_name = "test_job_definition"
instance_type = 'cpu-1'
image = 'abeja-inc/all-cpu:19.10'
notebook_type = 'lab'
response = api_client.create_notebook(
organization_id, job_definition_name,
instance_type, image, notebook_type
)
Params:
- **organization_id** (str): organization id
- **job_definition_name** (str): training job definition name
- **instance_type** (str): **[optional]** instance type (ex. cpu-1)
- **image** (str): **[optional]** runtime environment (ex. abeja-inc/all-cpu:19.10)
- **notebook_type** (str): **[optional]** notebook type (notebook or lab)
Return type:
dict
Returns:
Response Syntax:
.. code-block:: python
{
"job_definition_id": "1234567890123",
"training_notebook_id": "1410000000000",
"name": "notebook-3",
"description": None,
"status": "Pending",
"status_message": None,
"instance_type": "cpu-1",
"image": "abeja-inc/all-cpu:18.10",
"creator": {
"updated_at": "2018-01-04T03:02:12Z",
"role": "admin",
"is_registered": True,
"id": "1122334455660",
"email": "test@abeja.asia",
"display_name": None,
"created_at": "2017-05-26T01:38:46Z"
},
"created_at": "2018-06-07T04:42:34.913644Z",
"modified_at": "2018-06-07T04:42:34.913726Z"
}
Raises:
- NotFound
- BadRequest
- Unauthorized: Authentication failed
- InternalServerError
"""
params = {}
if instance_type is not None and InstanceType.to_enum(instance_type):
params['instance_type'] = instance_type
if image is not None and ImageType.to_enum(image):
params['image'] = image
if notebook_type is not None and NotebookType.to_enum(notebook_type):
params['notebook_type'] = notebook_type
path = '/organizations/{}/training/definitions/{}/notebooks'.format(
organization_id, job_definition_name)
return self._connection.api_request(
method='POST', path=path, json=params)
def get_notebooks(
self,
organization_id: str,
job_definition_name: str) -> dict:
"""get notebooks.
API reference: GET /organizations/{organization_id}/training/definitions/{job_definition_name}/notebooks
Request Syntax:
.. code-block:: python
organization_id = "1410000000000"
job_definition_name = "test_job_definition"
response = api_client.get_notebooks(
organization_id, job_definition_name
)
Params:
- **organization_id** (str): organization id
- **job_definition_name** (str): training job definition name
Return type:
dict
Returns:
Response Syntax:
.. code-block:: python
{
"total": 1,
"offset": 0,
"limit": 10,
"entries": [
{
"job_definition_id": "1234567890123",
"training_notebook_id": "1410000000000",
"name": "notebook-3",
"description": None,
"status": "Pending",
"status_message": None,
"instance_type": "cpu-1",
"image": "abeja-inc/all-cpu:18.10",
"creator": {
"updated_at": "2018-01-04T03:02:12Z",
"role": "admin",
"is_registered": True,
"id": "1122334455660",
"email": "test@abeja.asia",
"display_name": None,
"created_at": "2017-05-26T01:38:46Z"
},
"created_at": "2018-06-07T04:42:34.913644Z",
"modified_at": "2018-06-07T04:42:34.913726Z"
}
]
}
Raises:
- NotFound
- Unauthorized: Authentication failed
- InternalServerError
"""
path = '/organizations/{}/training/definitions/{}/notebooks'.format(
organization_id, job_definition_name)
return self._connection.api_request(method='GET', path=path)
def get_notebook(
self,
organization_id: str,
job_definition_name: str,
notebook_id: str=None) -> dict:
"""get a notebook.
API reference: GET /organizations/{organization_id}/training/definitions/{job_definition_name}/notebooks/{notebook_id}
Request Syntax:
.. code-block:: python
organization_id = "1410000000000"
job_definition_name = "test_job_definition"
notebook_id = "1230000000000"
response = api_client.get_notebook(
organization_id, job_definition_name, notebook_id
)
Params:
- **organization_id** (str): organization id
- **job_definition_name** (str): training job definition name
- **notebook_id** (str): notebook id
Return type:
dict
Returns:
Response Syntax:
.. code-block:: python
{
"job_definition_id": "1234567890123",
"training_notebook_id": "1410000000000",
"name": "notebook-3",
"description": None,
"status": "Pending",
"status_message": None,
"instance_type": "cpu-1",
"image": "abeja-inc/all-cpu:18.10",
"creator": {
"updated_at": "2018-01-04T03:02:12Z",
"role": "admin",
"is_registered": True,
"id": "1122334455660",
"email": "test@abeja.asia",
"display_name": None,
"created_at": "2017-05-26T01:38:46Z"
},
"created_at": "2018-06-07T04:42:34.913644Z",
"modified_at": "2018-06-07T04:42:34.913726Z"
}
Raises:
- NotFound
- Unauthorized: Authentication failed
- InternalServerError
"""
path = '/organizations/{}/training/definitions/{}/notebooks/{}'.format(
organization_id, job_definition_name, notebook_id)
return self._connection.api_request(method='GET', path=path)
def update_notebook(
self,
organization_id: str,
job_definition_name: str,
notebook_id: str,
instance_type: Optional[str] = None,
image: Optional[str] = None,
notebook_type: Optional[str] = None) -> dict:
"""update a notebook.
API reference: PUT /organizations/{organization_id}/training/definitions/{job_definition_name}/notebooks/{notebook_id}
Request Syntax:
.. code-block:: python
organization_id = "1410000000000"
job_definition_name = "test_job_definition"
notebook_id = "1230000000000"
instance_type = 'cpu-1'
image = 'abeja-inc/all-cpu:19.10'
response = api_client.update_notebook(
organization_id, job_definition_name, notebook_id,
instance_type=instance_type, image=image
)
Params:
- **organization_id** (str): organization id
- **job_definition_name** (str): training job definition name
- **notebook_id** (str): notebook id
- **instance_type** (str): **[optional]** instance type (ex. cpu-1)
- **image** (str): **[optional]** runtime environment (ex. abeja-inc/all-cpu:19.10)
- **notebook_type** (str): **[optional]** notebook type (notebook or lab)
Return type:
dict
Returns:
Response Syntax:
.. code-block:: python
{
"job_definition_id": "1234567890123",
"training_notebook_id": 0,
"name": "notebook-3",
"description": None,
"status": "Pending",
"status_message": None,
"instance_type": "cpu-1",
"image": "abeja-inc/all-cpu:18.10",
"creator": {
"updated_at": "2018-01-04T03:02:12Z",
"role": "admin",
"is_registered": True,
"id": "1122334455660",
"email": "test@abeja.asia",
"display_name": None,
"created_at": "2017-05-26T01:38:46Z"
},
"created_at": "2018-06-07T04:42:34.913644Z",
"modified_at": "2018-06-07T04:42:34.913726Z"
}
Raises:
- NotFound
- BadRequest
- Unauthorized: Authentication failed
- InternalServerError
"""
params = {}
if instance_type is not None and InstanceType.to_enum(instance_type):
params['instance_type'] = instance_type
if image is not None and ImageType.to_enum(image):
params['image'] = image
if notebook_type is not None and NotebookType.to_enum(notebook_type):
params['notebook_type'] = notebook_type
path = '/organizations/{}/training/definitions/{}/notebooks/{}'.format(
organization_id, job_definition_name, notebook_id)
return self._connection.api_request(
method='PUT', path=path, json=params)
def delete_notebook(
self,
organization_id: str,
job_definition_name: str,
notebook_id: str) -> dict:
"""delete a notebook.
API reference: DELETE /organizations/{organization_id}/training/definitions/{job_definition_name}/notebooks/{notebook_id}
Request Syntax:
.. code-block:: python
organization_id = "1410000000000"
job_definition_name = "test_job_definition"
notebook_id = "1230000000000"
response = api_client.delete_notebook(
organization_id, job_definition_name, notebook_id
)
Params:
- **organization_id** (str): organization id
- **job_definition_name** (str): training job definition name
- **notebook_id** (str): notebook id
Return type:
dict
Returns:
Response Syntax:
.. code-block:: python
{
"value": {
"message": "1111111111111 deleted"
}
}
Raises:
- NotFound
- Unauthorized: Authentication failed
- InternalServerError
"""
path = '/organizations/{}/training/definitions/{}/notebooks/{}'.format(
organization_id, job_definition_name, notebook_id)
return self._connection.api_request(method='DELETE', path=path)
def start_notebook(
self,
organization_id: str,
job_definition_name: str,
notebook_id: str,
notebook_type: Optional[str] = None) -> dict:
"""start a notebook.
API reference: POST /organizations/{organization_id}/training/definitions/{job_definition_name}/notebooks/{notebook_id}/start
Request Syntax:
.. code-block:: python
organization_id = "1410000000000"
job_definition_name = "test_job_definition"
notebook_id = "1230000000000"
response = api_client.start_notebook(
organization_id, job_definition_name, notebook_id
)
Params:
- **organization_id** (str): organization id
- **job_definition_name** (str): training job definition name
- **notebook_id** (str): notebook id
- **notebook_type** (str): **[optional]** notebook type (notebook or lab)
Return type:
dict
Returns:
Response Syntax:
.. code-block:: python
{
"job_definition_id": "1234567890123",
"training_notebook_id": 0,
"name": "notebook-3",
"description": None,
"status": "Pending",
"status_message": None,
"instance_type": "cpu-1",
"image": "abeja-inc/all-cpu:18.10",
"creator": {
"updated_at": "2018-01-04T03:02:12Z",
"role": "admin",
"is_registered": True,
"id": "1122334455660",
"email": "test@abeja.asia",
"display_name": None,
"created_at": "2017-05-26T01:38:46Z"
},
"created_at": "2018-06-07T04:42:34.913644Z",
"modified_at": "2018-06-07T04:42:34.913726Z"
}
Raises:
- NotFound
- Unauthorized: Authentication failed
- InternalServerError
"""
params = {}
if notebook_type is not None and NotebookType.to_enum(notebook_type):
params['notebook_type'] = notebook_type
path = '/organizations/{}/training/definitions/{}/notebooks/{}/start'.format(
organization_id, job_definition_name, notebook_id)
return self._connection.api_request(
method='POST', path=path, json=params)
def stop_notebook(
self,
organization_id: str,
job_definition_name: str,
notebook_id: str) -> dict:
"""stop a notebook.
API reference: POST /organizations/{organization_id}/training/definitions/{job_definition_name}/notebooks/{notebook_id}/stop
Request Syntax:
.. code-block:: python
organization_id = "1410000000000"
job_definition_name = "test_job_definition"
notebook_id = "1230000000000"
response = api_client.stop_notebook(
organization_id, job_definition_name, notebook_id
)
Params:
- **organization_id** (str): organization id
- **job_definition_name** (str): training job definition name
- **notebook_id** (str): notebook id
Return type:
dict
Returns:
Response Syntax:
.. code-block:: python
{
"job_definition_id": "1234567890123",
"training_notebook_id": 0,
"name": "notebook-3",
"description": None,
"status": "Pending",
"status_message": None,
"instance_type": "cpu-1",
"image": "abeja-inc/all-cpu:18.10",
"creator": {
"updated_at": "2018-01-04T03:02:12Z",
"role": "admin",
"is_registered": True,
"id": "1122334455660",
"email": "test@abeja.asia",
"display_name": None,
"created_at": "2017-05-26T01:38:46Z"
},
"created_at": "2018-06-07T04:42:34.913644Z",
"modified_at": "2018-06-07T04:42:34.913726Z"
}
Raises:
- NotFound
- Unauthorized: Authentication failed
- InternalServerError
"""
path = '/organizations/{}/training/definitions/{}/notebooks/{}/stop'.format(
organization_id, job_definition_name, notebook_id)
return self._connection.api_request(method='POST', path=path, json={})
def get_notebook_recent_logs(
self,
organization_id: str,
job_definition_name: str,
notebook_id: str,
next_forward_token: Optional[str]=None,
next_backward_token: Optional[str]=None,
) -> dict:
"""get recent logs of the notebook.
API reference: GET /organizations/{organization_id}/training/definitions/{job_definition_name}/notebooks/{notebook_id}/recentlogs
Request Syntax:
.. code-block:: python
organization_id = "1410000000000"
job_definition_name = "test_job_definition"
notebook_id = "1230000000000"
response = api_client.get_notebook_recent_logs(
organization_id, job_definition_name, notebook_id
)
Params:
- **organization_id** (str): organization id
- **job_definition_name** (str): training job definition name
- **notebook_id** (str): notebook id
- **next_forward_token** (str): **[optional]** token for the next page of logs
- **next_backward_token** (str): **[optional]** token for the next previous of logs
Return type:
dict
Returns:
Response Syntax:
.. code-block:: python
{
"events": [
{
"message": "start executing model with abeja-runtime-python36 (version: 0.X.X)",
"timestamp": "2019-10-16T00:00:00.000Z"
}
],
"next_backward_token": "...",
"next_forward_token": "..."
}
Raises:
- NotFound
- Unauthorized: Authentication failed
- InternalServerError
"""
params = {}
if next_forward_token:
params['next_forward_token'] = next_forward_token
if next_backward_token:
params['next_backward_token'] = next_backward_token
path = '/organizations/{}/training/definitions/{}/notebooks/{}/recentlogs'.format(
organization_id, job_definition_name, notebook_id)
return self._connection.api_request(
method='GET', path=path, params=params)
| 39.5
| 137
| 0.485933
| 1,758
| 20,935
| 5.569397
| 0.0876
| 0.092943
| 0.097232
| 0.066183
| 0.889797
| 0.878664
| 0.871515
| 0.865182
| 0.846696
| 0.831784
| 0
| 0.071597
| 0.412228
| 20,935
| 529
| 138
| 39.574669
| 0.724096
| 0.636207
| 0
| 0.72549
| 0
| 0
| 0.123508
| 0.093783
| 0
| 0
| 0
| 0
| 0
| 1
| 0.078431
| false
| 0
| 0.029412
| 0
| 0.196078
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ecb49bb21b35339cb73ab865c201f6495e6361cb
| 29
|
py
|
Python
|
taskwiki/__init__.py
|
PtitCaius/taskwiki
|
8c4da3a744fd1aa22bfb0369658cecc925e77fd0
|
[
"MIT"
] | 465
|
2015-03-27T09:42:18.000Z
|
2020-07-18T20:35:19.000Z
|
taskwiki/__init__.py
|
PtitCaius/taskwiki
|
8c4da3a744fd1aa22bfb0369658cecc925e77fd0
|
[
"MIT"
] | 272
|
2015-01-10T20:38:02.000Z
|
2020-07-16T12:55:15.000Z
|
taskwiki/__init__.py
|
PtitCaius/taskwiki
|
8c4da3a744fd1aa22bfb0369658cecc925e77fd0
|
[
"MIT"
] | 66
|
2015-03-21T16:33:39.000Z
|
2020-07-12T09:20:29.000Z
|
# (c) 2014-2015, Tomas Babej
| 14.5
| 28
| 0.655172
| 5
| 29
| 3.8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 0.172414
| 29
| 1
| 29
| 29
| 0.458333
| 0.896552
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ecbc2b714802eef938dc23390f77656b110ad0d7
| 33,775
|
py
|
Python
|
admin/tools/crawler_db/PS3-PHPtoMYSQL.py
|
gorpoorko/TCXS-Project-Store-Web-PlayStation3-V3
|
58bf3ba08723d775e8b42e6dfa16ea5b79594d26
|
[
"MIT"
] | null | null | null |
admin/tools/crawler_db/PS3-PHPtoMYSQL.py
|
gorpoorko/TCXS-Project-Store-Web-PlayStation3-V3
|
58bf3ba08723d775e8b42e6dfa16ea5b79594d26
|
[
"MIT"
] | null | null | null |
admin/tools/crawler_db/PS3-PHPtoMYSQL.py
|
gorpoorko/TCXS-Project-Store-Web-PlayStation3-V3
|
58bf3ba08723d775e8b42e6dfa16ea5b79594d26
|
[
"MIT"
] | null | null | null |
import os
import pathlib
import sqlite3
from bs4 import BeautifulSoup
import pymysql.cursors
from datetime import datetime
#faz a conexao com o banco de dados
conexao = pymysql.connect(host = 'localhost',
user = 'root',
password = '',
db = 'tcxs_store',
charset = 'utf8mb4',
cursorclass = pymysql.cursors.DictCursor)
#variavies iniciais
dados = open('base.html', 'r', encoding="utf-8").read()
dados= BeautifulSoup(dados, 'html5lib')
key_titulo = dados.find_all('h2', {'class':'titulo_jogo'})
key_desc = dados.find_all('p', {'class':'textoJogo'})
key_contentid = dados.find_all('a', href=True)
key_imagem = dados.find_all('img',{'class':'caixa_imagem'})
key_links = dados.find_all('a', href=True)
titulos = []
for titulo in key_titulo:
titulo = str(titulo).split('"titulo_jogo">')[1].split('</h')[0].replace("'","").replace('</h2>','').replace(':','')
titulos.append(titulo)
#print(titulo)
descricoes = []
for desc in key_desc:
desc = str(desc).split('textoJogo">')[1].replace('</p>','')
descricoes.append(desc)
#print(desc)
ids = []
invalidar = ['index.php','psp.php','ps1.php','ps2.php','ps3.php','emuladores.php','https://tcxsproject.com.br/doadores/','https://tcxsproject.com.br/dev/ps3xploit.com/']
for id in key_contentid:
id = id['href']
if id in invalidar:
pass
else:
try:
id = id.split('/')[5].split('.pkg')[0]
ids.append(id)
#print(id)
except:
id = 'FALTA CONTENT_ID'
ids.append(id)
#print(id)
imagens = []
for imagem in key_imagem:
imagem = str(imagem).split('ps3/')[1].split('"/>')[0].replace('" width="170','')
imagens.append(imagem)
print(imagem)
links = []
invalidar = ['index.php','psp.php','ps1.php','ps2.php','ps3.php','emuladores.php','https://tcxsproject.com.br/doadores/','https://tcxsproject.com.br/dev/ps3xploit.com/']
for link in key_links:
link = link['href']
if link in invalidar:
#print(f'Pulando o {link}')
pass
else:
links.append(link)
#print(f'gravando o {link}')
print(len(titulos), len(descricoes), len(imagens), len(links))
dicionario_jogos = list(zip(list(titulos), list(imagens), list(links)))#--
#print(dicionario_jogos)
now = datetime.now()
hoje = now.strftime('%Y-%m-%d %H:%M:%S')
if len(links) == 30:
print('==== 30 LINKS ENCONTRADOS ======')
print(f'Titulo: {titulos[0]}')
print(f'Descrição: {descricoes[0]}')
print(f'ContentID: {ids[0]}')
print(f'Link:{links[0:]}')
with conexao.cursor() as cursor:
tabela = f"""INSERT INTO playstation_ps3 (titulo,descricao,content_id,imagem,cadastro,
link1,link2,link3,link4,link5,link6,link7,link8,link9,link10,link11,link12,link13,link14,
link15,link16,link17,link18,link19,link20,link21,link22,link23,link24,link25,link26,link27,
link28,link29,link30) VALUES ('{titulos[0]}','{descricoes[0]}','{ids[0]}','{imagens[0]}','{hoje}',
'{links[0]}','{links[1]}','{links[2]}','{links[3]}','{links[4]}','{links[5]}','{links[6]}',
'{links[7]}','{links[8]}','{links[9]}','{links[10]}','{links[11]}','{links[12]}','{links[13]}',
'{links[14]}','{links[15]}','{links[16]}','{links[17]}','{links[18]}','{links[19]}','{links[20]}',
'{links[21]}','{links[22]}','{links[23]}','{links[24]}','{links[25]}','{links[26]}','{links[27]}',
'{links[28]}','{links[29]}') """
cursor.execute(tabela)
conexao.commit()
conexao.close()
if len(links) == 29:
print('==== 29 LINKS ENCONTRADOS ======')
print(f'Titulo: {titulos[0]}')
print(f'Descrição: {descricoes[0]}')
print(f'ContentID: {ids[0]}')
print(f'Link:{links[0:]}')
with conexao.cursor() as cursor:
tabela = f"""INSERT INTO playstation_ps3 (titulo,descricao,content_id,imagem,cadastro,
link1,link2,link3,link4,link5,link6,link7,link8,link9,link10,link11,link12,link13,link14,
link15,link16,link17,link18,link19,link20,link21,link22,link23,link24,link25,link26,link27,
link28,link29,link30) VALUES ('{titulos[0]}','{descricoes[0]}','{ids[0]}','{imagens[0]}','{hoje}',
'{links[0]}','{links[1]}','{links[2]}','{links[3]}','{links[4]}','{links[5]}','{links[6]}',
'{links[7]}','{links[8]}','{links[9]}','{links[10]}','{links[11]}','{links[12]}','{links[13]}',
'{links[14]}','{links[15]}','{links[16]}','{links[17]}','{links[18]}','{links[19]}','{links[20]}',
'{links[21]}','{links[22]}','{links[23]}','{links[24]}','{links[25]}','{links[26]}','{links[27]}',
'{links[28]}','---') """
cursor.execute(tabela)
conexao.commit()
conexao.close()
if len(links) == 28:
print('==== 28 LINKS ENCONTRADOS ======')
print(f'Titulo: {titulos[0]}')
print(f'Descrição: {descricoes[0]}')
print(f'ContentID: {ids[0]}')
print(f'Link:{links[0:]}')
with conexao.cursor() as cursor:
tabela = f"""INSERT INTO playstation_ps3 (titulo,descricao,content_id,imagem,cadastro,
link1,link2,link3,link4,link5,link6,link7,link8,link9,link10,link11,link12,link13,link14,
link15,link16,link17,link18,link19,link20,link21,link22,link23,link24,link25,link26,link27,
link28,link29,link30) VALUES ('{titulos[0]}','{descricoes[0]}','{ids[0]}','{imagens[0]}','{hoje}',
'{links[0]}','{links[1]}','{links[2]}','{links[3]}','{links[4]}','{links[5]}','{links[6]}',
'{links[7]}','{links[8]}','{links[9]}','{links[10]}','{links[11]}','{links[12]}','{links[13]}',
'{links[14]}','{links[15]}','{links[16]}','{links[17]}','{links[18]}','{links[19]}','{links[20]}',
'{links[21]}','{links[22]}','{links[23]}','{links[24]}','{links[25]}','{links[26]}','{links[27]}',
'---','---') """
cursor.execute(tabela)
conexao.commit()
conexao.close()
if len(links) == 27:
print('==== 27 LINKS ENCONTRADOS ======')
print(f'Titulo: {titulos[0]}')
print(f'Descrição: {descricoes[0]}')
print(f'ContentID: {ids[0]}')
print(f'Link:{links[0:]}')
with conexao.cursor() as cursor:
tabela = f"""INSERT INTO playstation_ps3 (titulo,descricao,content_id,imagem,cadastro,
link1,link2,link3,link4,link5,link6,link7,link8,link9,link10,link11,link12,link13,link14,
link15,link16,link17,link18,link19,link20,link21,link22,link23,link24,link25,link26,link27,
link28,link29,link30) VALUES ('{titulos[0]}','{descricoes[0]}','{ids[0]}','{imagens[0]}','{hoje}',
'{links[0]}','{links[1]}','{links[2]}','{links[3]}','{links[4]}','{links[5]}','{links[6]}',
'{links[7]}','{links[8]}','{links[9]}','{links[10]}','{links[11]}','{links[12]}','{links[13]}',
'{links[14]}','{links[15]}','{links[16]}','{links[17]}','{links[18]}','{links[19]}','{links[20]}',
'{links[21]}','{links[22]}','{links[23]}','{links[24]}','{links[25]}','{links[26]}','---',
'---','---') """
cursor.execute(tabela)
conexao.commit()
conexao.close()
if len(links) == 26:
print('==== 26 LINKS ENCONTRADOS ======')
print(f'Titulo: {titulos[0]}')
print(f'Descrição: {descricoes[0]}')
print(f'ContentID: {ids[0]}')
print(f'Link:{links[0:]}')
with conexao.cursor() as cursor:
tabela = f"""INSERT INTO playstation_ps3 (titulo,descricao,content_id,imagem,cadastro,
link1,link2,link3,link4,link5,link6,link7,link8,link9,link10,link11,link12,link13,link14,
link15,link16,link17,link18,link19,link20,link21,link22,link23,link24,link25,link26,link27,
link28,link29,link30) VALUES ('{titulos[0]}','{descricoes[0]}','{ids[0]}','{imagens[0]}','{hoje}',
'{links[0]}','{links[1]}','{links[2]}','{links[3]}','{links[4]}','{links[5]}','{links[6]}',
'{links[7]}','{links[8]}','{links[9]}','{links[10]}','{links[11]}','{links[12]}','{links[13]}',
'{links[14]}','{links[15]}','{links[16]}','{links[17]}','{links[18]}','{links[19]}','{links[20]}',
'{links[21]}','{links[22]}','{links[23]}','{links[24]}','{links[25]}','---','---',
'---','---') """
cursor.execute(tabela)
conexao.commit()
conexao.close()
if len(links) == 25:
print('==== 25 LINKS ENCONTRADOS ======')
print(f'Titulo: {titulos[0]}')
print(f'Descrição: {descricoes[0]}')
print(f'ContentID: {ids[0]}')
print(f'Link:{links[0:]}')
with conexao.cursor() as cursor:
tabela = f"""INSERT INTO playstation_ps3 (titulo,descricao,content_id,imagem,cadastro,
link1,link2,link3,link4,link5,link6,link7,link8,link9,link10,link11,link12,link13,link14,
link15,link16,link17,link18,link19,link20,link21,link22,link23,link24,link25,link26,link27,
link28,link29,link30) VALUES ('{titulos[0]}','{descricoes[0]}','{ids[0]}','{imagens[0]}','{hoje}',
'{links[0]}','{links[1]}','{links[2]}','{links[3]}','{links[4]}','{links[5]}','{links[6]}',
'{links[7]}','{links[8]}','{links[9]}','{links[10]}','{links[11]}','{links[12]}','{links[13]}',
'{links[14]}','{links[15]}','{links[16]}','{links[17]}','{links[18]}','{links[19]}','{links[20]}',
'{links[21]}','{links[22]}','{links[23]}','{links[24]}','---','---','---',
'---','---') """
cursor.execute(tabela)
conexao.commit()
conexao.close()
if len(links) == 24:
print('==== 24 LINKS ENCONTRADOS ======')
print(f'Titulo: {titulos[0]}')
print(f'Descrição: {descricoes[0]}')
print(f'ContentID: {ids[0]}')
print(f'Link:{links[0:]}')
with conexao.cursor() as cursor:
tabela = f"""INSERT INTO playstation_ps3 (titulo,descricao,content_id,imagem,cadastro,
link1,link2,link3,link4,link5,link6,link7,link8,link9,link10,link11,link12,link13,link14,
link15,link16,link17,link18,link19,link20,link21,link22,link23,link24,link25,link26,link27,
link28,link29,link30) VALUES ('{titulos[0]}','{descricoes[0]}','{ids[0]}','{imagens[0]}','{hoje}',
'{links[0]}','{links[1]}','{links[2]}','{links[3]}','{links[4]}','{links[5]}','{links[6]}',
'{links[7]}','{links[8]}','{links[9]}','{links[10]}','{links[11]}','{links[12]}','{links[13]}',
'{links[14]}','{links[15]}','{links[16]}','{links[17]}','{links[18]}','{links[19]}','{links[20]}',
'{links[21]}','{links[22]}','{links[23]}','---','---','---','---',
'---','---') """
cursor.execute(tabela)
conexao.commit()
conexao.close()
if len(links) == 23:
print('==== 23 LINKS ENCONTRADOS ======')
print(f'Titulo: {titulos[0]}')
print(f'Descrição: {descricoes[0]}')
print(f'ContentID: {ids[0]}')
print(f'Link:{links[0:]}')
with conexao.cursor() as cursor:
tabela = f"""INSERT INTO playstation_ps3 (titulo,descricao,content_id,imagem,cadastro,
link1,link2,link3,link4,link5,link6,link7,link8,link9,link10,link11,link12,link13,link14,
link15,link16,link17,link18,link19,link20,link21,link22,link23,link24,link25,link26,link27,
link28,link29,link30) VALUES ('{titulos[0]}','{descricoes[0]}','{ids[0]}','{imagens[0]}','{hoje}',
'{links[0]}','{links[1]}','{links[2]}','{links[3]}','{links[4]}','{links[5]}','{links[6]}',
'{links[7]}','{links[8]}','{links[9]}','{links[10]}','{links[11]}','{links[12]}','{links[13]}',
'{links[14]}','{links[15]}','{links[16]}','{links[17]}','{links[18]}','{links[19]}','{links[20]}',
'{links[21]}','{links[22]}','---','---','---','---','---',
'---','---') """
cursor.execute(tabela)
conexao.commit()
conexao.close()
if len(links) == 22:
print('==== 22 LINKS ENCONTRADOS ======')
print(f'Titulo: {titulos[0]}')
print(f'Descrição: {descricoes[0]}')
print(f'ContentID: {ids[0]}')
print(f'Link:{links[0:]}')
with conexao.cursor() as cursor:
tabela = f"""INSERT INTO playstation_ps3 (titulo,descricao,content_id,imagem,cadastro,
link1,link2,link3,link4,link5,link6,link7,link8,link9,link10,link11,link12,link13,link14,
link15,link16,link17,link18,link19,link20,link21,link22,link23,link24,link25,link26,link27,
link28,link29,link30) VALUES ('{titulos[0]}','{descricoes[0]}','{ids[0]}','{imagens[0]}','{hoje}',
'{links[0]}','{links[1]}','{links[2]}','{links[3]}','{links[4]}','{links[5]}','{links[6]}',
'{links[7]}','{links[8]}','{links[9]}','{links[10]}','{links[11]}','{links[12]}','{links[13]}',
'{links[14]}','{links[15]}','{links[16]}','{links[17]}','{links[18]}','{links[19]}','{links[20]}',
'{links[21]}','---','---','---','---','---','---',
'---','---') """
cursor.execute(tabela)
conexao.commit()
conexao.close()
if len(links) == 21:
print('==== 21 LINKS ENCONTRADOS ======')
print(f'Titulo: {titulos[0]}')
print(f'Descrição: {descricoes[0]}')
print(f'ContentID: {ids[0]}')
print(f'Link:{links[0:]}')
with conexao.cursor() as cursor:
tabela = f"""INSERT INTO playstation_ps3 (titulo,descricao,content_id,imagem,cadastro,
link1,link2,link3,link4,link5,link6,link7,link8,link9,link10,link11,link12,link13,link14,
link15,link16,link17,link18,link19,link20,link21,link22,link23,link24,link25,link26,link27,
link28,link29,link30) VALUES ('{titulos[0]}','{descricoes[0]}','{ids[0]}','{imagens[0]}','{hoje}',
'{links[0]}','{links[1]}','{links[2]}','{links[3]}','{links[4]}','{links[5]}','{links[6]}',
'{links[7]}','{links[8]}','{links[9]}','{links[10]}','{links[11]}','{links[12]}','{links[13]}',
'{links[14]}','{links[15]}','{links[16]}','{links[17]}','{links[18]}','{links[19]}','{links[20]}',
'---','---','---','---','---','---','---',
'---','---') """
cursor.execute(tabela)
conexao.commit()
conexao.close()
if len(links) == 20:
print('==== 20 LINKS ENCONTRADOS ======')
print(f'Titulo: {titulos[0]}')
print(f'Descrição: {descricoes[0]}')
print(f'ContentID: {ids[0]}')
print(f'Link:{links[0:]}')
with conexao.cursor() as cursor:
tabela = f"""INSERT INTO playstation_ps3 (titulo,descricao,content_id,imagem,cadastro,
link1,link2,link3,link4,link5,link6,link7,link8,link9,link10,link11,link12,link13,link14,
link15,link16,link17,link18,link19,link20,link21,link22,link23,link24,link25,link26,link27,
link28,link29,link30) VALUES ('{titulos[0]}','{descricoes[0]}','{ids[0]}','{imagens[0]}','{hoje}',
'{links[0]}','{links[1]}','{links[2]}','{links[3]}','{links[4]}','{links[5]}','{links[6]}',
'{links[7]}','{links[8]}','{links[9]}','{links[10]}','{links[11]}','{links[12]}','{links[13]}',
'{links[14]}','{links[15]}','{links[16]}','{links[17]}','{links[18]}','{links[19]}','---',
'---','---','---','---','---','---','---',
'---','---') """
cursor.execute(tabela)
conexao.commit()
conexao.close()
if len(links) == 19:
print('==== 19 LINKS ENCONTRADOS ======')
print(f'Titulo: {titulos[0]}')
print(f'Descrição: {descricoes[0]}')
print(f'ContentID: {ids[0]}')
print(f'Link:{links[0:]}')
with conexao.cursor() as cursor:
tabela = f"""INSERT INTO playstation_ps3 (titulo,descricao,content_id,imagem,cadastro,
link1,link2,link3,link4,link5,link6,link7,link8,link9,link10,link11,link12,link13,link14,
link15,link16,link17,link18,link19,link20,link21,link22,link23,link24,link25,link26,link27,
link28,link29,link30) VALUES ('{titulos[0]}','{descricoes[0]}','{ids[0]}','{imagens[0]}','{hoje}',
'{links[0]}','{links[1]}','{links[2]}','{links[3]}','{links[4]}','{links[5]}','{links[6]}',
'{links[7]}','{links[8]}','{links[9]}','{links[10]}','{links[11]}','{links[12]}','{links[13]}',
'{links[14]}','{links[15]}','{links[16]}','{links[17]}','{links[18]}','---','---',
'---','---','---','---','---','---','---',
'---','---') """
cursor.execute(tabela)
conexao.commit()
conexao.close()
if len(links) == 18:
print('==== 18LINKS ENCONTRADOS ======')
print(f'Titulo: {titulos[0]}')
print(f'Descrição: {descricoes[0]}')
print(f'ContentID: {ids[0]}')
print(f'Link:{links[0:]}')
with conexao.cursor() as cursor:
tabela = f"""INSERT INTO playstation_ps3 (titulo,descricao,content_id,imagem,cadastro,
link1,link2,link3,link4,link5,link6,link7,link8,link9,link10,link11,link12,link13,link14,
link15,link16,link17,link18,link19,link20,link21,link22,link23,link24,link25,link26,link27,
link28,link29,link30) VALUES ('{titulos[0]}','{descricoes[0]}','{ids[0]}','{imagens[0]}','{hoje}',
'{links[0]}','{links[1]}','{links[2]}','{links[3]}','{links[4]}','{links[5]}','{links[6]}',
'{links[7]}','{links[8]}','{links[9]}','{links[10]}','{links[11]}','{links[12]}','{links[13]}',
'{links[14]}','{links[15]}','{links[16]}','{links[17]}','---','---','---',
'---','---','---','---','---','---','---',
'---','---') """
cursor.execute(tabela)
conexao.commit()
conexao.close()
if len(links) == 17:
print('==== 17 LINKS ENCONTRADOS ======')
print(f'Titulo: {titulos[0]}')
print(f'Descrição: {descricoes[0]}')
print(f'ContentID: {ids[0]}')
print(f'Link:{links[0:]}')
with conexao.cursor() as cursor:
tabela = f"""INSERT INTO playstation_ps3 (titulo,descricao,content_id,imagem,cadastro,
link1,link2,link3,link4,link5,link6,link7,link8,link9,link10,link11,link12,link13,link14,
link15,link16,link17,link18,link19,link20,link21,link22,link23,link24,link25,link26,link27,
link28,link29,link30) VALUES ('{titulos[0]}','{descricoes[0]}','{ids[0]}','{imagens[0]}','{hoje}',
'{links[0]}','{links[1]}','{links[2]}','{links[3]}','{links[4]}','{links[5]}','{links[6]}',
'{links[7]}','{links[8]}','{links[9]}','{links[10]}','{links[11]}','{links[12]}','{links[13]}',
'{links[14]}','{links[15]}','{links[16]}','---','---','---','---',
'---','---','---','---','---','---','---',
'---','---') """
cursor.execute(tabela)
conexao.commit()
conexao.close()
if len(links) == 16:
print('==== 16 LINKS ENCONTRADOS ======')
print(f'Titulo: {titulos[0]}')
print(f'Descrição: {descricoes[0]}')
print(f'ContentID: {ids[0]}')
print(f'Link:{links[0:]}')
with conexao.cursor() as cursor:
tabela = f"""INSERT INTO playstation_ps3 (titulo,descricao,content_id,imagem,cadastro,
link1,link2,link3,link4,link5,link6,link7,link8,link9,link10,link11,link12,link13,link14,
link15,link16,link17,link18,link19,link20,link21,link22,link23,link24,link25,link26,link27,
link28,link29,link30) VALUES ('{titulos[0]}','{descricoes[0]}','{ids[0]}','{imagens[0]}','{hoje}',
'{links[0]}','{links[1]}','{links[2]}','{links[3]}','{links[4]}','{links[5]}','{links[6]}',
'{links[7]}','{links[8]}','{links[9]}','{links[10]}','{links[11]}','{links[12]}','{links[13]}',
'{links[14]}','{links[15]}','---','---','---','---','---',
'---','---','---','---','---','---','---',
'---','---') """
cursor.execute(tabela)
conexao.commit()
conexao.close()
if len(links) == 15:
print('==== 15 LINKS ENCONTRADOS ======')
print(f'Titulo: {titulos[0]}')
print(f'Descrição: {descricoes[0]}')
print(f'ContentID: {ids[0]}')
print(f'Link:{links[0:]}')
with conexao.cursor() as cursor:
tabela = f"""INSERT INTO playstation_ps3 (titulo,descricao,content_id,imagem,cadastro,
link1,link2,link3,link4,link5,link6,link7,link8,link9,link10,link11,link12,link13,link14,
link15,link16,link17,link18,link19,link20,link21,link22,link23,link24,link25,link26,link27,
link28,link29,link30) VALUES ('{titulos[0]}','{descricoes[0]}','{ids[0]}','{imagens[0]}','{hoje}',
'{links[0]}','{links[1]}','{links[2]}','{links[3]}','{links[4]}','{links[5]}','{links[6]}',
'{links[7]}','{links[8]}','{links[9]}','{links[10]}','{links[11]}','{links[12]}','{links[13]}',
'{links[14]}','---','---','---','---','---','---',
'---','---','---','---','---','---','---',
'---','---') """
cursor.execute(tabela)
conexao.commit()
conexao.close()
if len(links) == 14:
print('==== 14 LINKS ENCONTRADOS ======')
print(f'Titulo: {titulos[0]}')
print(f'Descrição: {descricoes[0]}')
print(f'ContentID: {ids[0]}')
print(f'Link:{links[0:]}')
with conexao.cursor() as cursor:
tabela = f"""INSERT INTO playstation_ps3 (titulo,descricao,content_id,imagem,cadastro,
link1,link2,link3,link4,link5,link6,link7,link8,link9,link10,link11,link12,link13,link14,
link15,link16,link17,link18,link19,link20,link21,link22,link23,link24,link25,link26,link27,
link28,link29,link30) VALUES ('{titulos[0]}','{descricoes[0]}','{ids[0]}','{imagens[0]}','{hoje}',
'{links[0]}','{links[1]}','{links[2]}','{links[3]}','{links[4]}','{links[5]}','{links[6]}',
'{links[7]}','{links[8]}','{links[9]}','{links[10]}','{links[11]}','{links[12]}','{links[13]}',
'---','---','---','---','---','---','---',
'---','---','---','---','---','---','---',
'---','---') """
cursor.execute(tabela)
conexao.commit()
conexao.close()
if len(links) == 13:
print('==== 13 LINKS ENCONTRADOS ======')
print(f'Titulo: {titulos[0]}')
print(f'Descrição: {descricoes[0]}')
print(f'ContentID: {ids[0]}')
print(f'Link:{links[0:]}')
with conexao.cursor() as cursor:
tabela = f"""INSERT INTO playstation_ps3 (titulo,descricao,content_id,imagem,cadastro,
link1,link2,link3,link4,link5,link6,link7,link8,link9,link10,link11,link12,link13,link14,
link15,link16,link17,link18,link19,link20,link21,link22,link23,link24,link25,link26,link27,
link28,link29,link30) VALUES ('{titulos[0]}','{descricoes[0]}','{ids[0]}','{imagens[0]}','{hoje}',
'{links[0]}','{links[1]}','{links[2]}','{links[3]}','{links[4]}','{links[5]}','{links[6]}',
'{links[7]}','{links[8]}','{links[9]}','{links[10]}','{links[11]}','{links[12]}','---',
'---','---','---','---','---','---','---',
'---','---','---','---','---','---','---',
'---','---') """
cursor.execute(tabela)
conexao.commit()
conexao.close()
if len(links) == 12:
print('==== 12 LINKS ENCONTRADOS ======')
print(f'Titulo: {titulos[0]}')
print(f'Descrição: {descricoes[0]}')
print(f'ContentID: {ids[0]}')
print(f'Link:{links[0:]}')
with conexao.cursor() as cursor:
tabela = f"""INSERT INTO playstation_ps3 (titulo,descricao,content_id,imagem,cadastro,
link1,link2,link3,link4,link5,link6,link7,link8,link9,link10,link11,link12,link13,link14,
link15,link16,link17,link18,link19,link20,link21,link22,link23,link24,link25,link26,link27,
link28,link29,link30) VALUES ('{titulos[0]}','{descricoes[0]}','{ids[0]}','{imagens[0]}','{hoje}',
'{links[0]}','{links[1]}','{links[2]}','{links[3]}','{links[4]}','{links[5]}','{links[6]}',
'{links[7]}','{links[8]}','{links[9]}','{links[10]}','{links[11]}','---','---',
'---','---','---','---','---','---','---',
'---','---','---','---','---','---','---',
'---','---') """
cursor.execute(tabela)
conexao.commit()
conexao.close()
if len(links) == 11:
print('==== 11 LINKS ENCONTRADOS ======')
print(f'Titulo: {titulos[0]}')
print(f'Descrição: {descricoes[0]}')
print(f'ContentID: {ids[0]}')
print(f'Link:{links[0:]}')
with conexao.cursor() as cursor:
tabela = f"""INSERT INTO playstation_ps3 (titulo,descricao,content_id,imagem,cadastro,
link1,link2,link3,link4,link5,link6,link7,link8,link9,link10,link11,link12,link13,link14,
link15,link16,link17,link18,link19,link20,link21,link22,link23,link24,link25,link26,link27,
link28,link29,link30) VALUES ('{titulos[0]}','{descricoes[0]}','{ids[0]}','{imagens[0]}','{hoje}',
'{links[0]}','{links[1]}','{links[2]}','{links[3]}','{links[4]}','{links[5]}','{links[6]}',
'{links[7]}','{links[8]}','{links[9]}','{links[10]}','---','---','---',
'---','---','---','---','---','---','---',
'---','---','---','---','---','---','---',
'---','---') """
cursor.execute(tabela)
conexao.commit()
conexao.close()
if len(links) == 10:
print('==== 10 LINKS ENCONTRADOS ======')
print(f'Titulo: {titulos[0]}')
print(f'Descrição: {descricoes[0]}')
print(f'ContentID: {ids[0]}')
print(f'Link:{links[0:]}')
with conexao.cursor() as cursor:
tabela = f"""INSERT INTO playstation_ps3 (titulo,descricao,content_id,imagem,cadastro,
link1,link2,link3,link4,link5,link6,link7,link8,link9,link10,link11,link12,link13,link14,
link15,link16,link17,link18,link19,link20,link21,link22,link23,link24,link25,link26,link27,
link28,link29,link30) VALUES ('{titulos[0]}','{descricoes[0]}','{ids[0]}','{imagens[0]}','{hoje}',
'{links[0]}','{links[1]}','{links[2]}','{links[3]}','{links[4]}','{links[5]}','{links[6]}',
'{links[7]}','{links[8]}','{links[9]}','---','---','---','---',
'---','---','---','---','---','---','---',
'---','---','---','---','---','---','---',
'---','---') """
cursor.execute(tabela)
conexao.commit()
conexao.close()
if len(links) == 9:
print('==== 9 LINKS ENCONTRADOS ======')
print(f'Titulo: {titulos[0]}')
print(f'Descrição: {descricoes[0]}')
print(f'ContentID: {ids[0]}')
print(f'Link:{links[0:]}')
with conexao.cursor() as cursor:
tabela = f"""INSERT INTO playstation_ps3 (titulo,descricao,content_id,imagem,cadastro,
link1,link2,link3,link4,link5,link6,link7,link8,link9,link10,link11,link12,link13,link14,
link15,link16,link17,link18,link19,link20,link21,link22,link23,link24,link25,link26,link27,
link28,link29,link30) VALUES ('{titulos[0]}','{descricoes[0]}','{ids[0]}','{imagens[0]}','{hoje}',
'{links[0]}','{links[1]}','{links[2]}','{links[3]}','{links[4]}','{links[5]}','{links[6]}',
'{links[7]}','{links[8]}','---','---','---','---','---',
'---','---','---','---','---','---','---',
'---','---','---','---','---','---','---',
'---','---') """
cursor.execute(tabela)
conexao.commit()
conexao.close()
if len(links) == 8:
print('==== 8 LINKS ENCONTRADOS ======')
print(f'Titulo: {titulos[0]}')
print(f'Descrição: {descricoes[0]}')
print(f'ContentID: {ids[0]}')
print(f'Link:{links[0:]}')
with conexao.cursor() as cursor:
tabela = f"""INSERT INTO playstation_ps3 (titulo,descricao,content_id,imagem,cadastro,
link1,link2,link3,link4,link5,link6,link7,link8,link9,link10,link11,link12,link13,link14,
link15,link16,link17,link18,link19,link20,link21,link22,link23,link24,link25,link26,link27,
link28,link29,link30) VALUES ('{titulos[0]}','{descricoes[0]}','{ids[0]}','{imagens[0]}','{hoje}',
'{links[0]}','{links[1]}','{links[2]}','{links[3]}','{links[4]}','{links[5]}','{links[6]}',
'{links[7]}','---','---','---','---','---','---',
'---','---','---','---','---','---','---',
'---','---','---','---','---','---','---',
'---','---') """
cursor.execute(tabela)
conexao.commit()
conexao.close()
if len(links) == 7:
print('==== 7 LINKS ENCONTRADOS ======')
print(f'Titulo: {titulos[0]}')
print(f'Descrição: {descricoes[0]}')
print(f'ContentID: {ids[0]}')
print(f'Link:{links[0:]}')
with conexao.cursor() as cursor:
tabela = f"""INSERT INTO playstation_ps3 (titulo,descricao,content_id,imagem,cadastro,
link1,link2,link3,link4,link5,link6,link7,link8,link9,link10,link11,link12,link13,link14,
link15,link16,link17,link18,link19,link20,link21,link22,link23,link24,link25,link26,link27,
link28,link29,link30) VALUES ('{titulos[0]}','{descricoes[0]}','{ids[0]}','{imagens[0]}','{hoje}',
'{links[0]}','{links[1]}','{links[2]}','{links[3]}','{links[4]}','{links[5]}','{links[6]}',
'---','---','---','---','---','---','---',
'---','---','---','---','---','---','---',
'---','---','---','---','---','---','---',
'---','---') """
cursor.execute(tabela)
conexao.commit()
conexao.close()
if len(links) == 6:
print('==== 6 LINKS ENCONTRADOS ======')
print(f'Titulo: {titulos[0]}')
print(f'Descrição: {descricoes[0]}')
print(f'ContentID: {ids[0]}')
print(f'Link:{links[0:]}')
with conexao.cursor() as cursor:
tabela = f"""INSERT INTO playstation_ps3 (titulo,descricao,content_id,imagem,cadastro,
link1,link2,link3,link4,link5,link6,link7,link8,link9,link10,link11,link12,link13,link14,
link15,link16,link17,link18,link19,link20,link21,link22,link23,link24,link25,link26,link27,
link28,link29,link30) VALUES ('{titulos[0]}','{descricoes[0]}','{ids[0]}','{imagens[0]}','{hoje}',
'{links[0]}','{links[1]}','{links[2]}','{links[3]}','{links[4]}','{links[5]}','---',
'---','---','---','---','---','---','---',
'---','---','---','---','---','---','---',
'---','---','---','---','---','---','---',
'---','---') """
cursor.execute(tabela)
conexao.commit()
conexao.close()
if len(links) == 5:
print('==== 5 LINKS ENCONTRADOS ======')
print(f'Titulo: {titulos[0]}')
print(f'Descrição: {descricoes[0]}')
print(f'ContentID: {ids[0]}')
print(f'Link:{links[0:]}')
with conexao.cursor() as cursor:
tabela = f"""INSERT INTO playstation_ps3 (titulo,descricao,content_id,imagem,cadastro,
link1,link2,link3,link4,link5,link6,link7,link8,link9,link10,link11,link12,link13,link14,
link15,link16,link17,link18,link19,link20,link21,link22,link23,link24,link25,link26,link27,
link28,link29,link30) VALUES ('{titulos[0]}','{descricoes[0]}','{ids[0]}','{imagens[0]}','{hoje}',
'{links[0]}','{links[1]}','{links[2]}','{links[3]}','{links[4]}','---','---',
'---','---','---','---','---','---','---',
'---','---','---','---','---','---','---',
'---','---','---','---','---','---','---',
'---','---') """
cursor.execute(tabela)
conexao.commit()
conexao.close()
if len(links) == 4:
print('==== 4 LINKS ENCONTRADOS ======')
print(f'Titulo: {titulos[0]}')
print(f'Descrição: {descricoes[0]}')
print(f'ContentID: {ids[0]}')
print(f'Link:{links[0:]}')
with conexao.cursor() as cursor:
tabela = f"""INSERT INTO playstation_ps3 (titulo,descricao,content_id,imagem,cadastro,
link1,link2,link3,link4,link5,link6,link7,link8,link9,link10,link11,link12,link13,link14,
link15,link16,link17,link18,link19,link20,link21,link22,link23,link24,link25,link26,link27,
link28,link29,link30) VALUES ('{titulos[0]}','{descricoes[0]}','{ids[0]}','{imagens[0]}','{hoje}',
'{links[0]}','{links[1]}','{links[2]}','{links[3]}','---','---','---',
'---','---','---','---','---','---','---',
'---','---','---','---','---','---','---',
'---','---','---','---','---','---','---',
'---','---') """
cursor.execute(tabela)
conexao.commit()
conexao.close()
if len(links) == 3:
print('==== 3 LINKS ENCONTRADOS ======')
print(f'Titulo: {titulos[0]}')
print(f'Descrição: {descricoes[0]}')
print(f'ContentID: {ids[0]}')
print(f'Link:{links[0:]}')
with conexao.cursor() as cursor:
tabela = f"""INSERT INTO playstation_ps3 (titulo,descricao,content_id,imagem,cadastro,
link1,link2,link3,link4,link5,link6,link7,link8,link9,link10,link11,link12,link13,link14,
link15,link16,link17,link18,link19,link20,link21,link22,link23,link24,link25,link26,link27,
link28,link29,link30) VALUES ('{titulos[0]}','{descricoes[0]}','{ids[0]}','{imagens[0]}','{hoje}',
'{links[0]}','{links[1]}','{links[2]}','---','---','---','---',
'---','---','---','---','---','---','---',
'---','---','---','---','---','---','---',
'---','---','---','---','---','---','---',
'---','---') """
cursor.execute(tabela)
conexao.commit()
conexao.close()
if len(links) == 2:
print('==== 2 LINKS ENCONTRADOS ======')
print(f'Titulo: {titulos[0]}')
print(f'Descrição: {descricoes[0]}')
print(f'ContentID: {ids[0]}')
print(f'Link:{links[0:]}')
with conexao.cursor() as cursor:
tabela = f"""INSERT INTO playstation_ps3 (titulo,descricao,content_id,imagem,cadastro,
link1,link2,link3,link4,link5,link6,link7,link8,link9,link10,link11,link12,link13,link14,
link15,link16,link17,link18,link19,link20,link21,link22,link23,link24,link25,link26,link27,
link28,link29,link30) VALUES ('{titulos[0]}','{descricoes[0]}','{ids[0]}','{imagens[0]}','{hoje}',
'{links[0]}','{links[1]}','---','---','---','---','---',
'---','---','---','---','---','---','---',
'---','---','---','---','---','---','---',
'---','---','---','---','---','---','---',
'---','---') """
cursor.execute(tabela)
conexao.commit()
conexao.close()
| 42.430905
| 169
| 0.533412
| 3,910
| 33,775
| 4.587212
| 0.048082
| 0.039474
| 0.033954
| 0.037188
| 0.922558
| 0.920551
| 0.918209
| 0.915756
| 0.915756
| 0.915756
| 0
| 0.089469
| 0.159112
| 33,775
| 795
| 170
| 42.484277
| 0.542058
| 0.005093
| 0
| 0.782895
| 0
| 0.245066
| 0.73919
| 0.521292
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.004934
| 0.009868
| 0
| 0.009868
| 0.241776
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
01d12b115e20541f808963ebd17fccca6ce7f41b
| 188
|
py
|
Python
|
largescale/src/neuron/neuron/__init__.py
|
cosmozhang-lab/motion-illusion-model
|
32a5ccab920095818b220642bae491429ff71f27
|
[
"MIT"
] | null | null | null |
largescale/src/neuron/neuron/__init__.py
|
cosmozhang-lab/motion-illusion-model
|
32a5ccab920095818b220642bae491429ff71f27
|
[
"MIT"
] | null | null | null |
largescale/src/neuron/neuron/__init__.py
|
cosmozhang-lab/motion-illusion-model
|
32a5ccab920095818b220642bae491429ff71f27
|
[
"MIT"
] | null | null | null |
# Package: largescale.src.neuron.neuron
from neuron import NeuronGroup
from neuron import T_EXCITATORY, T_INHIBITORY, T_EXC, T_E, T_INH, T_I
from neuron import T_ON, T_OFF, T_O, T_F
| 31.333333
| 70
| 0.776596
| 35
| 188
| 3.885714
| 0.514286
| 0.220588
| 0.352941
| 0.25
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.154255
| 188
| 5
| 71
| 37.6
| 0.855346
| 0.196809
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
01e8d97a7e29b1604d1107707c9dd60bfb163ffc
| 43,415
|
py
|
Python
|
AlignmentPracticableIORepa.py
|
caiks/AlignmentRepaPy
|
7b67e5e1ed7a40fc0c9588b92d72536b12edaf11
|
[
"MIT"
] | null | null | null |
AlignmentPracticableIORepa.py
|
caiks/AlignmentRepaPy
|
7b67e5e1ed7a40fc0c9588b92d72536b12edaf11
|
[
"MIT"
] | null | null | null |
AlignmentPracticableIORepa.py
|
caiks/AlignmentRepaPy
|
7b67e5e1ed7a40fc0c9588b92d72536b12edaf11
|
[
"MIT"
] | null | null | null |
from AlignmentPracticableRepa import *
import logging
from timeit import default_timer as timer
from sys import stdout
# logging.basicConfig(format='%(asctime)s : %(name)s : %(message)s', datefmt='%Y-%m-%d %H:%M:%S', level=logging.INFO)
# logging.basicConfig(format='%(message)s', level=logging.INFO)
logging.basicConfig(format='%(message)s')
layerer_log = logging.getLogger('layerer')
layerer_log.setLevel(logging.INFO)
tupler_log = logging.getLogger('tupler')
tupler_log.setLevel(logging.INFO)
parter_log = logging.getLogger('parter')
parter_log.setLevel(logging.INFO)
roller_log = logging.getLogger('roller')
roller_log.setLevel(logging.INFO)
applier_log = logging.getLogger('applier')
applier_log.setLevel(logging.INFO)
dervarser_log = logging.getLogger('dervarser')
dervarser_log.setLevel(logging.INFO)
decomper_log = logging.getLogger('decomper')
decomper_log.setLevel(logging.INFO)
# parametersSystemsLayererMaxRollByMExcludedSelfHighestIORepa_u ::
# Integer -> Integer -> Integer -> Integer -> Integer -> Integer -> Integer -> Integer ->
# System -> Set.Set Variable -> HistoryRepa -> HistogramRepaRed -> HistoryRepa -> HistogramRepaRed -> Integer ->
# IO (System, Fud, [(Set.Set Variable, Double)])
def parametersSystemsLayererMaxRollByMExcludedSelfHighestIORepa_u(wmax,lmax,xmax,omax,bmax,mmax,umax,pmax,uu,vv,xx,xxp,xxrr,xxrrp,f):
repaRounding = 1e-6
def sgl(x):
return sset([x])
def maxr(mm):
if len(mm) > 0:
return list(sset([b for (_,b) in mm]))[-1:][0]
return 0
uvars = systemsSetVar
cart = systemsSetVarsSetStateCartesian_u
lluu = listsSystem_u
uunion = pairSystemsUnion
sunion = pairStatesUnionLeft
ssgl = stateSingleton
llaa = listsHistogram_u
hhvvr = historyRepasVectorVar
apvvr = histogramRepaRedsVectorVar
hrhx = historyRepasRed
def unit(qq):
return llaa([(ss,1) for ss in qq])
tttr = systemsTransformsTransformRepa_u
apply = historyRepasListTransformRepasApply_u
trans = histogramsSetVarsTransform_u
ttpp = transformsPartition
und = transformsUnderlying
qqff = setTransformsFud_u
ffqq = fudsSetTransform
def funion(ff,gg):
return qqff(ffqq(ff) | ffqq(gg))
def buildfftup(uu,vv,ff,hh,hhp,hhrr,hhrrp):
return parametersSystemsBuilderTupleNoSumlayerMultiEffectiveRepa_ui(xmax,omax,bmax,mmax,uu,vv,ff,hh,hhp,hhrr,hhrrp)
def parter(uu,kk,bb,y1):
return parametersSystemsPartitionerMaxRollByMRepa_ui(mmax,umax,pmax,uu,kk,bb,y1)
def roller(qq):
return parametersRollerMaximumRollExcludedSelfRepa_i(qq)
def buildffdervar(uu,vv,ff,xx,xxp,xxrr,xxrrp):
(x1,s1) = parametersSystemsBuilderDerivedVarsHighestNoSumlayerRepa_ui(wmax,omax,uu,vv,ff,xx,xxp,xxrr,xxrrp)
return ([(kk,a) for ((kk,_,_),a) in x1],s1)
def layer(vv,uu,ff,mm,xx,xxp,xxrr,xxrrp,f,l):
if l > lmax:
return (uu,ff,mm)
layerer_log.info(">>> layer\tfud: %d\tlayer: %d" % (f,l))
t1 = timer()
tupler_log.info(">>> tupler")
tupler_log.info("substrate cardinality: %d" % len(vv))
tupler_log.info("fud cardinality: %d" % len(ffqq(ff)))
stdout.flush()
(x2,s2) = buildfftup(uu,vv,ff,xx,xxp,xxrr,xxrrp)
if len(x2) > 0:
tupler_log.info("tuple cardinality: %d" % len(x2))
tupler_log.info("max tuple algn: %.2f" % max([b for (a,b) in x2]))
else:
tupler_log.info("no tuples")
t2 = timer()
tupler_log.info("tupler\tsearched: %d\trate: %.2f" % (s2,s2/(t2-t1)))
tupler_log.info("<<< tupler %.3fs" % (t2-t1))
parter_log.info(">>> parter")
stdout.flush()
y3 = [parter(uu,kk,bb,y1) for ((kk,bb),y1) in x2]
x3 = [x for (ll,_) in y3 for x in ll]
s3 = sum([s for (_,s) in y3])
if len(x3) > 0:
parter_log.info("partitions cardinality: %d" % len(x3))
else:
parter_log.info("no tuple partitions")
t3 = timer()
parter_log.info("parter\tsearched: %d\trate: %.2f" % (s3,s3/(t3-t2)))
parter_log.info("<<< parter %.3fs" % (t3-t2))
roller_log.info(">>> roller")
stdout.flush()
y4 = [roller(qq) for qq in x3]
x4 = [x for (ll,_) in y4 for x in ll]
s4 = sum([s for (_,s) in y4])
if len(x4) > 0:
roller_log.info("roll cardinality: %d" % len(x4))
else:
roller_log.info("no rolls")
t4 = timer()
roller_log.info("roller\tsearched: %d\trate: %.2f" % (s4,s4/(t4-t3)))
roller_log.info("<<< roller %.3fs" % (t4-t3))
applier_log.info(">>> application")
stdout.flush()
ll0 = []
for (yy,pp) in x4:
for (jj,p) in zip(yy,pp):
if max(p) + 1 < len(p):
ii = list(zip(cart(uu,jj),p))
ll0.append(ii)
ll = []
for (b,ii) in enumerate(ll0):
w = VarPair((VarPair((VarInt(f),VarInt(l))),VarInt(b+1)))
ww = sset([ValInt(u) for (_,u) in ii])
tt = trans(unit([sunion(ss,ssgl(w,ValInt(u))) for (ss,u) in ii]),sgl(w))
ll.append((tt,(w,ww)))
ll1 = []
for (tt,(w,ww)) in ll:
if all([len(ww) != len(ww1) or und(tt) != und(tt1) or ttpp(tt) != ttpp(tt1) for (tt1,(w1,ww1)) in ll if w > w1]):
ll1.append((tt,(w,ww)))
if len(ll1) > 0:
hh = qqff(sset([tt for (tt,_) in ll1]))
uu1 = uunion(uu,lluu([(w,ww) for (_,(w,ww)) in ll1]))
ffr = [tttr(uu1,tt) for (tt,_) in ll1]
xx1 = apply(xx,ffr)
xxp1 = hrhx(xx1)
xxrr1 = apply(xxrr,ffr)
xxrrp1 = hrhx(xxrr1)
gg = funion(ff,hh)
applier_log.info("fud cardinality: %d" % len(ffqq(gg)))
t5 = timer()
applier_log.info("<<< application %.3fs" % (t5-t4))
dervarser_log.info( ">>> dervarser")
stdout.flush()
(mm1,s5) = buildffdervar(uu1,vv,gg,xx1,xxp1,xxrr1,xxrrp1)
if len(mm1) > 0:
dervarser_log.info("der vars algn density: %.2f" % maxr(mm1))
else:
dervarser_log.info("no der vars sets")
t6 = timer()
dervarser_log.info("dervarser\tsearched: %d\trate: %.2f" % (s5,s5/(t6-t5)))
dervarser_log.info("<<< dervarser %.3fs" % (t6-t5))
layerer_log.info( "<<< layer %.3fs" % (t6-t1))
stdout.flush()
if l <= lmax and (len(mm) == 0 or maxr(mm1) > maxr(mm) + repaRounding):
(ffr,ll0,ll,ll1) = (None,None,None,None)
(x2,x3,x4) = (None,None,None)
return layer(vv,uu1,gg,mm1,xx1,xxp1,xxrr1,xxrrp1,f,l+1)
else:
t5 = timer()
applier_log.info("<<< application %.3fs" % (t5-t4))
layerer_log.info( "<<< layer %.3fs" % (t5-t1))
stdout.flush()
return (uu,ff,mm)
layerer_log.info(">>> layerer")
t1 = timer()
x1 = layer(vv,uu,fudEmpty(),[],xx,xxp,xxrr,xxrrp,f,1)
t2 = timer()
layerer_log.info("<<< layerer %.3fs" % (t2-t1))
stdout.flush()
return x1
# parametersSystemsHistoryRepasDecomperMaxRollByMExcludedSelfHighestFmaxIORepa ::
# Integer -> Integer -> Integer -> Integer -> Integer -> Integer -> Integer -> Integer -> Integer ->
# Integer -> Integer ->
# System -> Set.Set Variable -> HistoryRepa ->
# IO (Maybe (System, DecompFud))
def parametersSystemsHistoryRepasDecomperMaxRollByMExcludedSelfHighestFmaxIORepa(wmax,lmax,xmax,omax,bmax,mmax,umax,pmax,fmax,mult,seed,uu,vv,aa):
repaRounding = 1e-6
dom = relationsDomain
def maxd(mm):
if len(mm) > 0:
return list(sset([(b,a) for (a,b) in mm]))[-1]
return (0,sset())
def tsgl(r):
return sdict([(r,sdict())])
uvars = systemsSetVar
acard = histogramsCardinality
trim = histogramsTrim
aall = histogramsList
def red(aa,vv):
return setVarsHistogramsReduce(vv,aa)
def unit(ss):
return setStatesHistogramUnit(sset([ss]))
aahh = histogramsHistory
hhhr = systemsHistoriesHistoryRepa
def vars(hr):
return sset(historyRepasVectorVar(hr))
size = historyRepasSize
rraa = systemsHistogramRepasHistogram
hrhx = historyRepasRed
def hrhrred(hr,vv):
return setVarsHistoryRepasHistoryRepaReduced(vv,hr)
def hrred(hr,vv):
return setVarsHistoryRepasReduce(1,vv,hr)
def reduce(uu,ww,hh):
return rraa(uu,hrred(hh,ww))
def select(uu,ss,hh):
return historyRepasHistoryRepasHistoryRepaSelection_u(hhhr(uu,aahh(unit(ss))),hh)
hrconcat = vectorHistoryRepasConcat_u
hrshuffle = historyRepasShuffle_u
ffqq = fudsSetTransform
fder = fudsDerived
tttr = systemsTransformsTransformRepa_u
def apply(uu,ff,hh):
return historyRepasListTransformRepasApply(hh,[tttr(uu,tt) for tt in ffqq(ff)])
depends = fudsSetVarsDepends
zzdf = treePairStateFudsDecompFud
dfzz = decompFudsTreePairStateFud
def zztrim(df):
pp = []
for ll in treesPaths(df):
(_,ff) = ll[-1]
if len(ff) == 0:
pp.append(ll[:-1])
else:
pp.append(ll)
return pathsTree(pp)
def layerer(uu,xx,f):
decomper_log.info(">>> repa shuffle")
stdout.flush()
t1 = timer()
z = size(xx)
xxrr = hrconcat([hrshuffle(xx,seed+i*z) for i in range(1,mult+1)])
t2 = timer()
decomper_log.info("<<< repa shuffle %.3fs" % (t2-t1))
decomper_log.info(">>> repa perimeters")
stdout.flush()
t1 = timer()
xxp = hrhx(xx)
xxrrp = hrhx(xxrr)
t2 = timer()
decomper_log.info("<<< repa perimeters %.3fs" % (t2-t1))
return parametersSystemsLayererMaxRollByMExcludedSelfHighestIORepa_u(wmax,lmax,xmax,omax,bmax,mmax,umax,pmax,uu,vv,xx,xxp,xxrr,xxrrp,f)
def decomp(uu,zz,qq,f):
if len(zz) == 0:
(uur,ffr,nnr) = layerer(uu,aa,f)
if len(ffr) == 0 or len(nnr) == 0:
return (uu, decompFudEmpty())
(ar,kkr) = maxd(nnr)
if ar <= repaRounding:
return (uu, decompFudEmpty())
decomper_log.info(">>> slicing")
stdout.flush()
t3 = timer()
ffr1 = depends(ffr,kkr)
decomper_log.info("dependent fud cardinality : %d" % len(ffqq(ffr1)))
aar = apply(uur,ffr1,aa)
aa1 = trim(reduce(uur,fder(ffr1),aar))
decomper_log.info("derived cardinality : %d" % acard(aa1))
zzr = tsgl((stateEmpty(),ffr1))
qq[(stateEmpty(),ffr1)] = (aar,aa1)
(ffr,nnr,kkr) = (None,None,None)
t4 = timer()
decomper_log.info("<<< slicing %.3fs" % (t4-t3))
stdout.flush()
return decomp(uur,zzr,qq,f+1)
if fmax > 0 and f > fmax:
return (uu,zzdf(zztrim(zz)))
decomper_log.info(">>> slice selection")
stdout.flush()
t1 = timer()
mm = []
for (nn,yy) in treesPlaces(zz):
(rr,ff) = nn[-1]
if len(ff) > 0:
(bb,bb1) = qq[(rr,ff)]
tt = dom(treesRoots(yy))
for (ss,a) in aall(red(bb1,fder(ff))):
if a > 0 and ss not in tt:
mm.append((a,(nn,ss,bb)))
decomper_log.info("slices: %d" % len(mm))
if len(mm) == 0:
t2 = timer()
decomper_log.info("<<< slice selection %.3fs" % (t2-t1))
stdout.flush()
return (uu,zzdf(zztrim(zz)))
mm.sort(key = lambda x: x[0])
(a,(nn,ss,bb)) = mm[-1]
cc = hrhrred(select(uu,ss,bb),vars(aa))
decomper_log.info("decomp path length: %d" % len(nn))
decomper_log.info("slice size: %d" % a)
t2 = timer()
decomper_log.info("<<< slice selection %.3fs" % (t2-t1))
stdout.flush()
(uuc,ffc,nnc) = layerer(uu,cc,f)
decomper_log.info(">>> slicing")
stdout.flush()
t3 = timer()
(ac,kkc) = maxd(nnc)
ffc1 = fudEmpty()
if ac > repaRounding:
ffc1 = depends(ffc,kkc)
decomper_log.info("dependent fud cardinality : %d" % len(ffqq(ffc1)))
ccc = apply(uuc,ffc1,cc)
cc1 = trim(reduce(uuc,fder(ffc1),ccc))
decomper_log.info("derived cardinality : %d" % acard(cc1))
qq[(ss,ffc1)] = (ccc,cc1)
zzc = pathsTree(treesPaths(zz) + [nn+[(ss,ffc1)]])
(mm,cc,ffc,nnc,kkc) = (None,None,None,None,None)
t4 = timer()
decomper_log.info("<<< slicing %.3fs" % (t4-t3))
stdout.flush()
return decomp(uuc,zzc,qq,f+1)
if wmax < 0 or lmax < 0 or xmax < 0 or omax < 0 or bmax < 0 or mmax < 1 or umax < 0 or pmax < 0:
return None
if size(aa) == 0 or mult < 1:
return None
if not (vars(aa).issubset(uvars(uu)) and vv.issubset(vars(aa))):
return None
decomper_log.info(">>> decomper")
t1 = timer()
x1 = decomp(uu,emptyTree(),sdict(),1)
decomper_log.info("nodes: %d" % len(treesNodes(dfzz(x1[1]))))
t2 = timer()
decomper_log.info("<<< decomper repa %.3fs" % (t2 - t1))
stdout.flush()
return x1
# parametersSystemsLayererLevelMaxRollByMExcludedSelfHighestIORepa_u ::
# Integer -> Integer -> Integer -> Integer -> Integer -> Integer -> Integer -> Integer ->
# System -> Set.Set Variable -> Fud ->
# HistoryRepa -> HistogramRepaRed -> HistoryRepa -> HistogramRepaRed -> Integer -> Integer ->
# IO (System, Fud, [(Set.Set Variable, Double)])
def parametersSystemsLayererLevelMaxRollByMExcludedSelfHighestIORepa_u(wmax,lmax,xmax,omax,bmax,mmax,umax,pmax,uu,vvg,ffg,xx,xxp,xxrr,xxrrp,f,g):
repaRounding = 1e-6
def sgl(x):
return sset([x])
def maxr(mm):
if len(mm) > 0:
return list(sset([b for (_,b) in mm]))[-1:][0]
return 0
uvars = systemsSetVar
cart = systemsSetVarsSetStateCartesian_u
lluu = listsSystem_u
uunion = pairSystemsUnion
sunion = pairStatesUnionLeft
ssgl = stateSingleton
llaa = listsHistogram_u
hhvvr = historyRepasVectorVar
apvvr = histogramRepaRedsVectorVar
hrhx = historyRepasRed
def unit(qq):
return llaa([(ss,1) for ss in qq])
tttr = systemsTransformsTransformRepa_u
apply = historyRepasListTransformRepasApply_u
trans = histogramsSetVarsTransform_u
ttpp = transformsPartition
und = transformsUnderlying
qqff = setTransformsFud_u
ffqq = fudsSetTransform
def fder(ff):
und = transformsUnderlying
vv = set()
for (aa,ww) in ff:
vv |= ww
for tt in ff:
vv -= und(tt)
return vv
def fund(ff):
und = transformsUnderlying
vv = set()
for tt in ff:
vv |= und(tt)
for (aa,ww) in ff:
vv -= ww
return vv
def depends(ff,vv):
und = transformsUnderlying
dd = dict([(v,(xx,ww)) for (xx,ww) in ff for v in ww])
yy = set(dd.keys())
def deps(uu,xx):
ff = []
for w in uu & yy - xx:
tt = dd[w]
ff.append(tt)
zz = xx.copy()
zz.add(w)
ff = ff + deps(und(tt),zz)
return ff
return set(deps(vv,set()))
def funion(ff,gg):
return qqff(ffqq(ff) | ffqq(gg))
def buildfftup(uu,vvg,ffg,ff,hh,hhp,hhrr,hhrrp):
return parametersSystemsBuilderTupleLevelNoSumlayerMultiEffectiveRepa_ui(xmax,omax,bmax,mmax,uu,vvg,ffg,ff,hh,hhp,hhrr,hhrrp)
def parter(uu,kk,bb,y1):
return parametersSystemsPartitionerMaxRollByMRepa_ui(mmax,umax,pmax,uu,kk,bb,y1)
def roller(qq):
return parametersRollerMaximumRollExcludedSelfRepa_i(qq)
def buildffdervar(uu,vv,ffg,ff,xx,xxp,xxrr,xxrrp):
(x1,s1) = parametersSystemsBuilderDerivedVarsLevelHighestNoSumlayerRepa_ui(wmax,omax,uu,vv,ffg,ff,xx,xxp,xxrr,xxrrp)
return ([(kk,a) for ((kk,_,_),a) in x1],s1)
def layer(uu,ff,mm,xx,xxp,xxrr,xxrrp,l):
if l > lmax:
return (uu,ff,mm)
layerer_log.info(">>> layer\tfud: %d\tlevel node: %d\tlayer: %d" % (f,g,l))
t1 = timer()
tupler_log.info(">>> tupler")
tupler_log.info("level substrate cardinality: %d" % len(vvg))
tupler_log.info("level fud derived cardinality: %d" % len(fder(ffg)))
tupler_log.info("fud cardinality: %d" % len(ffqq(ff)))
tupler_log.info("level excluded fud cardinality: %d" % len(ffqq(ff)-ffqq(ffg)))
stdout.flush()
(x2,s2) = buildfftup(uu,vvg,ffg,ff,xx,xxp,xxrr,xxrrp)
if len(x2) > 0:
tupler_log.info("tuple cardinality: %d" % len(x2))
tupler_log.info("max tuple algn: %.2f" % max([b for (a,b) in x2]))
else:
tupler_log.info("no tuples")
t2 = timer()
tupler_log.info("tupler\tsearched: %d\trate: %.2f" % (s2,s2/(t2-t1)))
tupler_log.info("<<< tupler %.3fs" % (t2-t1))
parter_log.info(">>> parter")
stdout.flush()
y3 = [parter(uu,kk,bb,y1) for ((kk,bb),y1) in x2]
x3 = [x for (ll,_) in y3 for x in ll]
s3 = sum([s for (_,s) in y3])
if len(x3) > 0:
parter_log.info("partitions cardinality: %d" % len(x3))
else:
parter_log.info("no tuple partitions")
t3 = timer()
parter_log.info("parter\tsearched: %d\trate: %.2f" % (s3,s3/(t3-t2)))
parter_log.info("<<< parter %.3fs" % (t3-t2))
roller_log.info(">>> roller")
stdout.flush()
y4 = [roller(qq) for qq in x3]
x4 = [x for (ll,_) in y4 for x in ll]
s4 = sum([s for (_,s) in y4])
if len(x4) > 0:
roller_log.info("roll cardinality: %d" % len(x4))
else:
roller_log.info("no rolls")
t4 = timer()
roller_log.info("roller\tsearched: %d\trate: %.2f" % (s4,s4/(t4-t3)))
roller_log.info("<<< roller %.3fs" % (t4-t3))
applier_log.info(">>> application")
stdout.flush()
ll0 = []
for (yy,pp) in x4:
for (jj,p) in zip(yy,pp):
if max(p) + 1 < len(p):
ii = list(zip(cart(uu,jj),p))
ll0.append(ii)
ll = []
for (b,ii) in enumerate(ll0):
w = VarPair((VarPair((VarPair((VarInt(f),VarInt(g))),VarInt(l))),VarInt(b+1)))
ww = sset([ValInt(u) for (_,u) in ii])
tt = trans(unit([sunion(ss,ssgl(w,ValInt(u))) for (ss,u) in ii]),sgl(w))
ll.append((tt,(w,ww)))
ll1 = []
for (tt,(w,ww)) in ll:
if all([len(ww) != len(ww1) or und(tt) != und(tt1) or ttpp(tt) != ttpp(tt1) for (tt1,(w1,ww1)) in ll if w > w1]):
ll1.append((tt,(w,ww)))
if len(ll1) > 0:
hh = qqff(sset([tt for (tt,_) in ll1]))
uu1 = uunion(uu,lluu([(w,ww) for (_,(w,ww)) in ll1]))
ffr = [tttr(uu1,tt) for (tt,_) in ll1]
xx1 = apply(xx,ffr)
xxp1 = hrhx(xx1)
xxrr1 = apply(xxrr,ffr)
xxrrp1 = hrhx(xxrr1)
gg = funion(funion(ff,hh),depends(ffg,fund(hh)))
applier_log.info("fud cardinality: %d" % len(ffqq(gg)))
t5 = timer()
applier_log.info("<<< application %.3fs" % (t5-t4))
dervarser_log.info( ">>> dervarser")
stdout.flush()
(mm1,s5) = buildffdervar(uu1,vvg,ffg,gg,xx1,xxp1,xxrr1,xxrrp1)
if len(mm1) > 0:
dervarser_log.info("der vars algn density: %.2f" % maxr(mm1))
else:
dervarser_log.info("no der vars sets")
t6 = timer()
dervarser_log.info("dervarser\tsearched: %d\trate: %.2f" % (s5,s5/(t6-t5)))
dervarser_log.info("<<< dervarser %.3fs" % (t6-t5))
layerer_log.info( "<<< layer %.3fs" % (t6-t1))
stdout.flush()
if l <= lmax and (len(mm) == 0 or maxr(mm1) > maxr(mm) + repaRounding):
(ffr,ll0,ll,ll1) = (None,None,None,None)
(x2,x3,x4) = (None,None,None)
return layer(uu1,gg,mm1,xx1,xxp1,xxrr1,xxrrp1,l+1)
else:
t5 = timer()
applier_log.info("<<< application %.3fs" % (t5-t4))
layerer_log.info( "<<< layer %.3fs" % (t5-t1))
stdout.flush()
return (uu,ff,mm)
layerer_log.info(">>> layerer")
t1 = timer()
x1 = layer(uu,fudEmpty(),[],xx,xxp,xxrr,xxrrp,1)
t2 = timer()
layerer_log.info("<<< layerer %.3fs" % (t2-t1))
stdout.flush()
return x1
# parametersSystemsLayererLevelMaxRollByMExcludedSelfHighestIORepa_u_1 ::
# Integer -> Integer -> Integer -> Integer -> Integer -> Integer -> Integer -> Integer ->
# System -> Set.Set Variable -> Fud ->
# HistoryRepa -> HistogramRepaRed -> HistoryRepa -> HistogramRepaRed -> Integer -> Integer ->
# IO (System, Fud, [(Set.Set Variable, Double)])
def parametersSystemsLayererLevelMaxRollByMExcludedSelfHighestIORepa_u_1(wmax,lmax,xmax,omax,bmax,mmax,umax,pmax,uu,vvg,ffg,xx,xxp,xxrr,xxrrp,f,g):
repaRounding = 1e-6
def sgl(x):
return sset([x])
def maxr(mm):
if len(mm) > 0:
return list(sset([b for (_,b) in mm]))[-1:][0]
return 0
uvars = systemsSetVar
cart = systemsSetVarsSetStateCartesian_u
lluu = listsSystem_u
uunion = pairSystemsUnion
sunion = pairStatesUnionLeft
ssgl = stateSingleton
llaa = listsHistogram_u
hhvvr = historyRepasVectorVar
apvvr = histogramRepaRedsVectorVar
hrhx = historyRepasRed
def unit(qq):
return llaa([(ss,1) for ss in qq])
tttr = systemsTransformsTransformRepa_u
apply = historyRepasListTransformRepasApply_u
trans = histogramsSetVarsTransform_u
ttpp = transformsPartition
und = transformsUnderlying
qqff = setTransformsFud_u
ffqq = fudsSetTransform
fund = fudsUnderlying
fder = fudsDerived
depends = fudsSetVarsDepends
def funion(ff,gg):
return qqff(ffqq(ff) | ffqq(gg))
def buildfftup(uu,vvg,ffg,ff,hh,hhp,hhrr,hhrrp):
return parametersSystemsBuilderTupleLevelNoSumlayerMultiEffectiveRepa_ui(xmax,omax,bmax,mmax,uu,vvg,ffg,ff,hh,hhp,hhrr,hhrrp)
def parter(uu,kk,bb,y1):
return parametersSystemsPartitionerMaxRollByMRepa_ui(mmax,umax,pmax,uu,kk,bb,y1)
def roller(qq):
return parametersRollerMaximumRollExcludedSelfRepa_i(qq)
def buildffdervar(uu,vv,ffg,ff,xx,xxp,xxrr,xxrrp):
(x1,s1) = parametersSystemsBuilderDerivedVarsLevelHighestNoSumlayerRepa_ui(wmax,omax,uu,vv,ffg,ff,xx,xxp,xxrr,xxrrp)
return ([(kk,a) for ((kk,_,_),a) in x1],s1)
def layer(uu,ff,mm,xx,xxp,xxrr,xxrrp,l):
if l > lmax:
return (uu,ff,mm)
layerer_log.info(">>> layer\tfud: %d\tlevel node: %d\tlayer: %d" % (f,g,l))
t1 = timer()
tupler_log.info(">>> tupler")
tupler_log.info("level substrate cardinality: %d" % len(vvg))
tupler_log.info("level fud derived cardinality: %d" % len(fder(ffg)))
tupler_log.info("fud cardinality: %d" % len(ffqq(ff)))
tupler_log.info("level excluded fud cardinality: %d" % len(ffqq(ff)-ffqq(ffg)))
stdout.flush()
(x2,s2) = buildfftup(uu,vvg,ffg,ff,xx,xxp,xxrr,xxrrp)
if len(x2) > 0:
tupler_log.info("tuple cardinality: %d" % len(x2))
tupler_log.info("max tuple algn: %.2f" % max([b for (a,b) in x2]))
else:
tupler_log.info("no tuples")
t2 = timer()
tupler_log.info("tupler\tsearched: %d\trate: %.2f" % (s2,s2/(t2-t1)))
tupler_log.info("<<< tupler %.3fs" % (t2-t1))
parter_log.info(">>> parter")
stdout.flush()
y3 = [parter(uu,kk,bb,y1) for ((kk,bb),y1) in x2]
x3 = [x for (ll,_) in y3 for x in ll]
s3 = sum([s for (_,s) in y3])
if len(x3) > 0:
parter_log.info("partitions cardinality: %d" % len(x3))
else:
parter_log.info("no tuple partitions")
t3 = timer()
parter_log.info("parter\tsearched: %d\trate: %.2f" % (s3,s3/(t3-t2)))
parter_log.info("<<< parter %.3fs" % (t3-t2))
roller_log.info(">>> roller")
stdout.flush()
y4 = [roller(qq) for qq in x3]
x4 = [x for (ll,_) in y4 for x in ll]
s4 = sum([s for (_,s) in y4])
if len(x4) > 0:
roller_log.info("roll cardinality: %d" % len(x4))
else:
roller_log.info("no rolls")
t4 = timer()
roller_log.info("roller\tsearched: %d\trate: %.2f" % (s4,s4/(t4-t3)))
roller_log.info("<<< roller %.3fs" % (t4-t3))
applier_log.info(">>> application")
stdout.flush()
ll0 = []
for (yy,pp) in x4:
for (jj,p) in zip(yy,pp):
if max(p) + 1 < len(p):
ii = list(zip(cart(uu,jj),p))
ll0.append(ii)
ll = []
for (b,ii) in enumerate(ll0):
w = VarPair((VarPair((VarPair((VarInt(f),VarInt(g))),VarInt(l))),VarInt(b+1)))
ww = sset([ValInt(u) for (_,u) in ii])
tt = trans(unit([sunion(ss,ssgl(w,ValInt(u))) for (ss,u) in ii]),sgl(w))
ll.append((tt,(w,ww)))
ll1 = []
for (tt,(w,ww)) in ll:
if all([len(ww) != len(ww1) or und(tt) != und(tt1) or ttpp(tt) != ttpp(tt1) for (tt1,(w1,ww1)) in ll if w > w1]):
ll1.append((tt,(w,ww)))
if len(ll1) > 0:
hh = qqff(sset([tt for (tt,_) in ll1]))
uu1 = uunion(uu,lluu([(w,ww) for (_,(w,ww)) in ll1]))
ffr = [tttr(uu1,tt) for (tt,_) in ll1]
xx1 = apply(xx,ffr)
xxp1 = hrhx(xx1)
xxrr1 = apply(xxrr,ffr)
xxrrp1 = hrhx(xxrr1)
gg = funion(funion(ff,hh),depends(ffg,fund(hh)))
applier_log.info("fud cardinality: %d" % len(ffqq(gg)))
t5 = timer()
applier_log.info("<<< application %.3fs" % (t5-t4))
dervarser_log.info( ">>> dervarser")
stdout.flush()
(mm1,s5) = buildffdervar(uu1,vvg,ffg,gg,xx1,xxp1,xxrr1,xxrrp1)
if len(mm1) > 0:
dervarser_log.info("der vars algn density: %.2f" % maxr(mm1))
else:
dervarser_log.info("no der vars sets")
t6 = timer()
dervarser_log.info("dervarser\tsearched: %d\trate: %.2f" % (s5,s5/(t6-t5)))
dervarser_log.info("<<< dervarser %.3fs" % (t6-t5))
layerer_log.info( "<<< layer %.3fs" % (t6-t1))
stdout.flush()
if l <= lmax and (len(mm) == 0 or maxr(mm1) > maxr(mm) + repaRounding):
(ffr,ll0,ll,ll1) = (None,None,None,None)
(x2,x3,x4) = (None,None,None)
return layer(uu1,gg,mm1,xx1,xxp1,xxrr1,xxrrp1,l+1)
else:
t5 = timer()
applier_log.info("<<< application %.3fs" % (t5-t4))
layerer_log.info( "<<< layer %.3fs" % (t5-t1))
stdout.flush()
return (uu,ff,mm)
layerer_log.info(">>> layerer")
t1 = timer()
x1 = layer(uu,fudEmpty(),[],xx,xxp,xxrr,xxrrp,1)
t2 = timer()
layerer_log.info("<<< layerer %.3fs" % (t2-t1))
stdout.flush()
return x1
# parametersSystemsHistoryRepasDecomperLevelMaxRollByMExcludedSelfHighestFmaxIORepa ::
# Integer -> Integer -> Integer -> Integer -> Integer -> Integer -> Integer -> Integer -> Integer ->
# Integer -> Integer ->
# System -> Tree (Integer, Set.Set Variable, Fud) -> HistoryRepa ->
# IO (Maybe (System, DecompFud))
def parametersSystemsHistoryRepasDecomperLevelMaxRollByMExcludedSelfHighestFmaxIORepa(wmax,lmax,xmax,omax,bmax,mmax,umax,pmax,fmax,mult,seed,uu,zzg,aa):
repaRounding = 1e-6
dom = relationsDomain
def maxd(mm):
if len(mm) > 0:
return list(sset([(b,a) for (a,b) in mm]))[-1]
return (0,sset())
def tsgl(r):
return sdict([(r,sdict())])
uvars = systemsSetVar
acard = histogramsCardinality
trim = histogramsTrim
aall = histogramsList
def red(aa,vv):
return setVarsHistogramsReduce(vv,aa)
def unit(ss):
return setStatesHistogramUnit(sset([ss]))
qqff = setTransformsFud_u
ffqq = fudsSetTransform
def fder(ff):
und = transformsUnderlying
vv = set()
for (aa,ww) in ff:
vv |= ww
for tt in ff:
vv -= und(tt)
return vv
def fvars(ff):
vars = histogramsSetVar
vv = set()
for (aa,ww) in ff:
vv |= vars(aa)
return vv
def fund(ff):
und = transformsUnderlying
vv = set()
for tt in ff:
vv |= und(tt)
for (aa,ww) in ff:
vv -= ww
return vv
def funion(ff,gg):
return qqff(ffqq(ff) | ffqq(gg))
aahh = histogramsHistory
hhhr = systemsHistoriesHistoryRepa
def vars(hr):
return sset(historyRepasVectorVar(hr))
size = historyRepasSize
rraa = systemsHistogramRepasHistogram
hrhx = historyRepasRed
def hrhrred(hr,vv):
return setVarsHistoryRepasHistoryRepaReduced(vv,hr)
def hrred(hr,vv):
return setVarsHistoryRepasReduce(1,vv,hr)
def reduce(uu,ww,hh):
return rraa(uu,hrred(hh,ww))
def select(uu,ss,hh):
return historyRepasHistoryRepasHistoryRepaSelection_u(hhhr(uu,aahh(unit(ss))),hh)
hrconcat = vectorHistoryRepasConcat_u
hrshuffle = historyRepasShuffle_u
ffqq = fudsSetTransform
tttr = systemsTransformsTransformRepa_u
def ltrsort(uu,ff,hr):
vars = historyRepasVectorVar
return listVariablesListTransformRepasSort(vars(hr),[tttr(uu,tt) for tt in ffqq(ff)])
ltrmul = historyRepasListTransformRepasApply_u
def apply(uu,ff,hr):
return historyRepasListTransformRepasApply(hr,[tttr(uu,tt) for tt in ffqq(ff)])
depends = fudsSetVarsDepends
zzdf = treePairStateFudsDecompFud
dfzz = decompFudsTreePairStateFud
def zztrim(df):
pp = []
for ll in treesPaths(df):
(_,ff) = ll[-1]
if len(ff) == 0:
pp.append(ll[:-1])
else:
pp.append(ll)
return pathsTree(pp)
def okLevel(zzg):
for (wmaxg,vvg,ffg) in treesElements(zzg):
if wmaxg < 0:
return False
if not vvg.issubset(vars(aa)):
return False
if not fvars(ffg).issubset(uvars(uu)):
return False
if not fund(ffg).issubset(vars(aa)):
return False
return True
def layerer(wmax,uu,vvg,ffg,xx,f,g):
decomper_log.info(">>> repa shuffle")
stdout.flush()
t1 = timer()
z = size(xx)
xxrr = hrconcat([hrshuffle(xx,seed+i*z) for i in range(1,mult+1)])
t2 = timer()
decomper_log.info("<<< repa shuffle %.3fs" % (t2-t1))
decomper_log.info(">>> repa perimeters")
stdout.flush()
t1 = timer()
vv1 = fder(ffg) | vvg
frg = ltrsort(uu,ffg,xx)
xx1 = hrhrred(ltrmul(xx,frg),vv1)
xxp = hrhx(xx1)
xxrr1 = hrhrred(ltrmul(xxrr,frg),vv1)
xxrrp = hrhx(xxrr1)
t2 = timer()
decomper_log.info("<<< repa perimeters %.3fs" % (t2-t1))
return parametersSystemsLayererLevelMaxRollByMExcludedSelfHighestIORepa_u(wmax,lmax,xmax,omax,bmax,mmax,umax,pmax,uu,vvg,ffg,xx1,xxp,xxrr1,xxrrp,f,g)
def level(uu,aa,ttg,f,g):
(uu0,ff0,g0) = (uu,fudEmpty(),g)
for ((wmaxg,vvg,ffg),xxg) in ttg.items():
(uuh,ffh,gh) = level(uu0,aa,xxg,f,g0)
(uu1,gg,nn) = layerer(wmaxg,uuh,vvg,funion(ffg,ffh),aa,f,gh)
(a,kk) = maxd(nn)
gg1 = fudEmpty()
if a > repaRounding:
gg1 = depends(gg,kk)
(uu0,ff0,g0) = (uu1,funion(ff0,gg1),gh+1)
return (uu0,ff0,g0)
def decomp(uu,zz,qq,f):
if len(zz) == 0:
(uur,ffr,_) = level(uu,aa,zzg,f,1)
if len(ffr) == 0:
return (uu, decompFudEmpty())
decomper_log.info(">>> slicing")
stdout.flush()
t3 = timer()
decomper_log.info("dependent fud cardinality : %d" % len(ffqq(ffr)))
aar = apply(uur,ffr,aa)
wwr = sset(fder(ffr))
aa1 = trim(reduce(uur,wwr,aar))
decomper_log.info("derived cardinality : %d" % acard(red(aa1,wwr)))
zzr = tsgl((stateEmpty(),ffr))
qq[(stateEmpty(),ffr)] = (aar,aa1)
(ffr,nnr,kkr) = (None,None,None)
t4 = timer()
decomper_log.info("<<< slicing %.3fs" % (t4-t3))
stdout.flush()
return decomp(uur,zzr,qq,f+1)
if fmax > 0 and f > fmax:
return (uu,zzdf(zztrim(zz)))
decomper_log.info(">>> slice selection")
stdout.flush()
t1 = timer()
mm = []
for (nn,yy) in treesPlaces(zz):
(rr,ff) = nn[-1]
if len(ff) > 0:
(bb,bb1) = qq[(rr,ff)]
tt = dom(treesRoots(yy))
for (ss,a) in aall(red(bb1,fder(ff))):
if a > 0 and ss not in tt:
mm.append((a,(nn,ss,bb)))
decomper_log.info("slices: %d" % len(mm))
if len(mm) == 0:
t2 = timer()
decomper_log.info("<<< slice selection %.3fs" % (t2-t1))
stdout.flush()
return (uu,zzdf(zztrim(zz)))
mm.sort(key = lambda x: x[0])
(a,(nn,ss,bb)) = mm[-1]
cc = hrhrred(select(uu,ss,bb),vars(aa))
decomper_log.info("decomp path length: %d" % len(nn))
decomper_log.info("slice size: %d" % a)
t2 = timer()
decomper_log.info("<<< slice selection %.3fs" % (t2-t1))
stdout.flush()
(uuc,ffc,_) = level(uu,cc,zzg,f,1)
decomper_log.info(">>> slicing")
stdout.flush()
t3 = timer()
decomper_log.info("dependent fud cardinality : %d" % len(ffqq(ffc)))
wwc = sset(fder(ffc))
ccc = apply(uuc,ffc,cc)
cc1 = trim(reduce(uuc,wwc,ccc))
decomper_log.info("derived cardinality : %d" % acard(red(cc1,wwc)))
qq[(ss,ffc)] = (ccc,cc1)
zzc = pathsTree(treesPaths(zz) + [nn+[(ss,ffc)]])
(mm,cc,ffc,nnc,kkc) = (None,None,None,None,None)
t4 = timer()
decomper_log.info("<<< slicing %.3fs" % (t4-t3))
stdout.flush()
return decomp(uuc,zzc,qq,f+1)
if wmax < 0 or lmax < 0 or xmax < 0 or omax < 0 or bmax < 0 or mmax < 1 or umax < 0 or pmax < 0:
return None
if size(aa) == 0 or mult < 1:
return None
if not vars(aa).issubset(uvars(uu)):
return None
if not okLevel(zzg):
return None
decomper_log.info(">>> decomper")
t1 = timer()
x1 = decomp(uu,emptyTree(),sdict(),1)
decomper_log.info("nodes: %d" % len(treesNodes(dfzz(x1[1]))))
t2 = timer()
decomper_log.info("<<< decomper repa %.3fs" % (t2 - t1))
stdout.flush()
return x1
# parametersSystemsHistoryRepasDecomperLevelMaxRollByMExcludedSelfHighestFmaxIORepa_1 ::
# Integer -> Integer -> Integer -> Integer -> Integer -> Integer -> Integer -> Integer -> Integer ->
# Integer -> Integer ->
# System -> Tree (Integer, Set.Set Variable, Fud) -> HistoryRepa ->
# IO (Maybe (System, DecompFud))
def parametersSystemsHistoryRepasDecomperLevelMaxRollByMExcludedSelfHighestFmaxIORepa_1(wmax,lmax,xmax,omax,bmax,mmax,umax,pmax,fmax,mult,seed,uu,zzg,aa):
repaRounding = 1e-6
dom = relationsDomain
def maxd(mm):
if len(mm) > 0:
return list(sset([(b,a) for (a,b) in mm]))[-1]
return (0,sset())
def tsgl(r):
return sdict([(r,sdict())])
uvars = systemsSetVar
acard = histogramsCardinality
trim = histogramsTrim
aall = histogramsList
def red(aa,vv):
return setVarsHistogramsReduce(vv,aa)
def unit(ss):
return setStatesHistogramUnit(sset([ss]))
qqff = setTransformsFud_u
ffqq = fudsSetTransform
fvars = fudsVars
fder = fudsDerived
fund = fudsUnderlying
def funion(ff,gg):
return qqff(ffqq(ff) | ffqq(gg))
aahh = histogramsHistory
hhhr = systemsHistoriesHistoryRepa
def vars(hr):
return sset(historyRepasVectorVar(hr))
size = historyRepasSize
rraa = systemsHistogramRepasHistogram
hrhx = historyRepasRed
def hrhrred(hr,vv):
return setVarsHistoryRepasHistoryRepaReduced(vv,hr)
def hrred(hr,vv):
return setVarsHistoryRepasReduce(1,vv,hr)
def reduce(uu,ww,hh):
return rraa(uu,hrred(hh,ww))
def select(uu,ss,hh):
return historyRepasHistoryRepasHistoryRepaSelection_u(hhhr(uu,aahh(unit(ss))),hh)
hrconcat = vectorHistoryRepasConcat_u
hrshuffle = historyRepasShuffle_u
ffqq = fudsSetTransform
fder = fudsDerived
tttr = systemsTransformsTransformRepa_u
def apply(uu,ff,hh):
return historyRepasListTransformRepasApply(hh,[tttr(uu,tt) for tt in ffqq(ff)])
depends = fudsSetVarsDepends
zzdf = treePairStateFudsDecompFud
dfzz = decompFudsTreePairStateFud
def zztrim(df):
pp = []
for ll in treesPaths(df):
(_,ff) = ll[-1]
if len(ff) == 0:
pp.append(ll[:-1])
else:
pp.append(ll)
return pathsTree(pp)
def okLevel(zzg):
for (wmaxg,vvg,ffg) in treesElements(zzg):
if wmaxg < 0:
return False
if not vvg.issubset(vars(aa)):
return False
if not fvars(ffg).issubset(uvars(uu)):
return False
if not fund(ffg).issubset(vars(aa)):
return False
return True
def layerer(wmax,uu,vvg,ffg,xx,f,g):
decomper_log.info(">>> repa shuffle")
stdout.flush()
t1 = timer()
z = size(xx)
xxrr = hrconcat([hrshuffle(xx,seed+i*z) for i in range(1,mult+1)])
t2 = timer()
decomper_log.info("<<< repa shuffle %.3fs" % (t2-t1))
decomper_log.info(">>> repa perimeters")
stdout.flush()
t1 = timer()
xx1 = apply(uu,ffg,xx)
xxp = hrhx(xx1)
xxrr1 = apply(uu,ffg,xxrr)
xxrrp = hrhx(xxrr1)
t2 = timer()
decomper_log.info("<<< repa perimeters %.3fs" % (t2-t1))
return parametersSystemsLayererLevelMaxRollByMExcludedSelfHighestIORepa_u(wmax,lmax,xmax,omax,bmax,mmax,umax,pmax,uu,vvg,ffg,xx1,xxp,xxrr1,xxrrp,f,g)
def level(uu,aa,ttg,f,g):
(uu0,ff0,g0) = (uu,fudEmpty(),g)
for ((wmaxg,vvg,ffg),xxg) in ttg.items():
(uuh,ffh,gh) = level(uu0,aa,xxg,f,g0)
(uu1,gg,nn) = layerer(wmaxg,uuh,vvg,funion(ffg,ffh),aa,f,gh)
(a,kk) = maxd(nn)
gg1 = fudEmpty()
if a > repaRounding:
gg1 = depends(gg,kk)
(uu0,ff0,g0) = (uu1,funion(ff0,gg1),gh+1)
return (uu0,ff0,g0)
def decomp(uu,zz,qq,f):
if len(zz) == 0:
(uur,ffr,_) = level(uu,aa,zzg,f,1)
if len(ffr) == 0:
return (uu, decompFudEmpty())
decomper_log.info(">>> slicing")
stdout.flush()
t3 = timer()
decomper_log.info("dependent fud cardinality : %d" % len(ffqq(ffr)))
aar = apply(uur,ffr,aa)
wwr = fder(ffr)
aa1 = trim(reduce(uur,wwr,aar))
decomper_log.info("derived cardinality : %d" % acard(red(aa1,wwr)))
zzr = tsgl((stateEmpty(),ffr))
qq[(stateEmpty(),ffr)] = (aar,aa1)
(ffr,nnr,kkr) = (None,None,None)
t4 = timer()
decomper_log.info("<<< slicing %.3fs" % (t4-t3))
stdout.flush()
return decomp(uur,zzr,qq,f+1)
if fmax > 0 and f > fmax:
return (uu,zzdf(zztrim(zz)))
decomper_log.info(">>> slice selection")
stdout.flush()
t1 = timer()
mm = []
for (nn,yy) in treesPlaces(zz):
(rr,ff) = nn[-1]
if len(ff) > 0:
(bb,bb1) = qq[(rr,ff)]
tt = dom(treesRoots(yy))
for (ss,a) in aall(red(bb1,fder(ff))):
if a > 0 and ss not in tt:
mm.append((a,(nn,ss,bb)))
decomper_log.info("slices: %d" % len(mm))
if len(mm) == 0:
t2 = timer()
decomper_log.info("<<< slice selection %.3fs" % (t2-t1))
stdout.flush()
return (uu,zzdf(zztrim(zz)))
mm.sort(key = lambda x: x[0])
(a,(nn,ss,bb)) = mm[-1]
cc = hrhrred(select(uu,ss,bb),vars(aa))
decomper_log.info("decomp path length: %d" % len(nn))
decomper_log.info("slice size: %d" % a)
t2 = timer()
decomper_log.info("<<< slice selection %.3fs" % (t2-t1))
stdout.flush()
(uuc,ffc,_) = level(uu,cc,zzg,f,1)
decomper_log.info(">>> slicing")
stdout.flush()
t3 = timer()
decomper_log.info("dependent fud cardinality : %d" % len(ffqq(ffc)))
wwc = fder(ffc)
ccc = apply(uuc,ffc,cc)
cc1 = trim(reduce(uuc,wwc,ccc))
decomper_log.info("derived cardinality : %d" % acard(red(cc1,wwc)))
qq[(ss,ffc)] = (ccc,cc1)
zzc = pathsTree(treesPaths(zz) + [nn+[(ss,ffc)]])
(mm,cc,ffc,nnc,kkc) = (None,None,None,None,None)
t4 = timer()
decomper_log.info("<<< slicing %.3fs" % (t4-t3))
stdout.flush()
return decomp(uuc,zzc,qq,f+1)
if wmax < 0 or lmax < 0 or xmax < 0 or omax < 0 or bmax < 0 or mmax < 1 or umax < 0 or pmax < 0:
return None
if size(aa) == 0 or mult < 1:
return None
if not vars(aa).issubset(uvars(uu)):
return None
if not okLevel(zzg):
return None
decomper_log.info(">>> decomper")
t1 = timer()
x1 = decomp(uu,emptyTree(),sdict(),1)
decomper_log.info("nodes: %d" % len(treesNodes(dfzz(x1[1]))))
t2 = timer()
decomper_log.info("<<< decomper repa %.3fs" % (t2 - t1))
stdout.flush()
return x1
| 41.151659
| 158
| 0.54548
| 5,339
| 43,415
| 4.383592
| 0.066866
| 0.048752
| 0.040378
| 0.046659
| 0.915741
| 0.911767
| 0.90758
| 0.903478
| 0.889036
| 0.870877
| 0
| 0.025468
| 0.305424
| 43,415
| 1,054
| 159
| 41.190702
| 0.750622
| 0.048509
| 0
| 0.90239
| 0
| 0
| 0.081924
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.006972
| 0.003984
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
17111a7abf53a035de98baa91e71385ab4317ae0
| 7,123
|
py
|
Python
|
pygcn/models.py
|
coquid/pygcn
|
a11788468514cce47bd4262849456895def13714
|
[
"MIT"
] | null | null | null |
pygcn/models.py
|
coquid/pygcn
|
a11788468514cce47bd4262849456895def13714
|
[
"MIT"
] | null | null | null |
pygcn/models.py
|
coquid/pygcn
|
a11788468514cce47bd4262849456895def13714
|
[
"MIT"
] | null | null | null |
import torch.nn as nn
import torch.nn.functional as F
from pygcn.layers import GraphConvolution, MyGraphConvolution
class GCN(nn.Module):
def __init__(self, nfeat, nhid, nclass, dropout):
super(GCN, self).__init__()
self.gc1 = GraphConvolution(nfeat, nhid)
self.gc2 = GraphConvolution(nhid, nclass)
self.dropout = dropout
def forward(self, x, adj):
x = F.relu(self.gc1(x, adj))
x = F.dropout(x, self.dropout, training=self.training)
x = self.gc2(x, adj)
return F.log_softmax(x, dim=1)
class MyGCN_v1(nn.Module):
def __init__(self, nfeat, nhid, nout, dropout):
super(MyGCN_v1, self).__init__()
self.gc1 = MyGraphConvolution(nfeat, nhid)
self.gc2 = MyGraphConvolution(nhid, nhid)
self.gc3 = MyGraphConvolution(nhid, nhid)
self.gc4 = MyGraphConvolution(nhid, nhid)
self.gc5 = MyGraphConvolution(nhid, nhid)
self.gc6 = MyGraphConvolution(nhid, nout)
self.dropout = dropout
def forward(self, x, adj):
x = F.relu(self.gc1(x, adj))
x = F.dropout(x, self.dropout, training=self.training)
x = F.relu(self.gc2(x, adj))
x = F.dropout(x, self.dropout, training=self.training)
x = F.relu(self.gc3(x, adj))
x = F.dropout(x, self.dropout, training=self.training)
x = F.relu(self.gc4(x, adj))
x = F.dropout(x, self.dropout, training=self.training)
x = F.relu(self.gc5(x, adj))
x = F.dropout(x, self.dropout, training=self.training)
x = self.gc6(x, adj)
return x
class MyGCN_v2(nn.Module):
def __init__(self, nfeat, nhid, nout, dropout):
super(MyGCN_v2, self).__init__()
self.gc1 = MyGraphConvolution(nfeat, 12)
self.gc2 = MyGraphConvolution(12, 10)
self.gc3 = MyGraphConvolution(10, 8)
self.gc4 = MyGraphConvolution(8, 6)
self.gc5 = MyGraphConvolution(6, 4)
self.gc6 = MyGraphConvolution(4, nout)
self.dropout = dropout
def forward(self, x, adj):
x = F.tanhshrink(self.gc1(x, adj))
x = F.dropout(x, self.dropout, training=self.training)
x = F.tanhshrink(self.gc2(x, adj))
x = F.dropout(x, self.dropout, training=self.training)
x = F.tanhshrink(self.gc3(x, adj))
x = F.dropout(x, self.dropout, training=self.training)
x = F.tanhshrink(self.gc4(x, adj))
x = F.dropout(x, self.dropout, training=self.training)
x = F.tanhshrink(self.gc5(x, adj))
x = F.dropout(x, self.dropout, training=self.training)
x = self.gc6(x, adj)
return x
class MyGCN_v3(nn.Module):
def __init__(self, nfeat, nhid, nout, dropout):
super(MyGCN_v3, self).__init__()
self.gc1 = MyGraphConvolution(nfeat, 12)
self.gc2 = MyGraphConvolution(12, 10)
self.gc3 = MyGraphConvolution(10, 8)
self.gc4 = MyGraphConvolution(8, 6)
self.gc5 = MyGraphConvolution(6, 4)
self.gc6 = MyGraphConvolution(4, nout)
self.dropout = dropout
def forward(self, x, adj):
x = (self.gc1(x, adj))
x = F.dropout(x, p=0, training=self.training)
x = (self.gc2(x, adj))
x = F.dropout(x, self.dropout, training=self.training)
x = (self.gc3(x, adj))
x = F.dropout(x, self.dropout, training=self.training)
x = (self.gc4(x, adj))
x = F.dropout(x, self.dropout, training=self.training)
x = (self.gc5(x, adj))
x = F.dropout(x, self.dropout, training=self.training)
x = self.gc6(x, adj)
return x
class MyGCN_v4(nn.Module):
def __init__(self, nfeat, nhid, nout, dropout):
super(MyGCN_v4, self).__init__()
self.gc1 = MyGraphConvolution(nfeat, 12)
self.gc2 = MyGraphConvolution(12, 10)
self.gc3 = MyGraphConvolution(10, 8)
self.gc4 = MyGraphConvolution(8, 6)
self.gc5 = MyGraphConvolution(6, 4)
self.gc6 = MyGraphConvolution(4, nout)
self.dropout = dropout
def forward(self, x, adj):
x = (self.gc1(x, adj))
x = F.dropout(x, p=0, training=self.training)
x = F.relu(self.gc2(x, adj))
x = F.dropout(x, self.dropout, training=self.training)
x = F.tanhshrink(self.gc3(x, adj))
x = F.dropout(x, self.dropout, training=self.training)
x = F.tanhshrink(self.gc4(x, adj))
x = F.dropout(x, self.dropout, training=self.training)
x = (self.gc5(x, adj))
x = F.dropout(x, self.dropout, training=self.training)
x = self.gc6(x, adj)
return x
class MyGCN_v5(nn.Module):
def __init__(self, nfeat, nhid, nout, dropout):
super(MyGCN_v5, self).__init__()
self.gc1 = MyGraphConvolution(nfeat, 12)
self.gc2 = MyGraphConvolution(12, 10)
self.gc3 = MyGraphConvolution(10, 8)
self.gc4 = MyGraphConvolution(8, 6)
self.gc5 = MyGraphConvolution(6, 4)
self.gc6 = MyGraphConvolution(4, nout)
self.dropout = dropout
def forward(self, x, adj):
x = (self.gc1(x, adj))
x = F.dropout(x, self.dropout, training=self.training)
x = F.relu(self.gc2(x, adj))
x = F.dropout(x, self.dropout, training=self.training)
x = F.tanhshrink(self.gc3(x, adj))
x = F.dropout(x, self.dropout, training=self.training)
x = F.tanhshrink(self.gc4(x, adj))
x = F.dropout(x, self.dropout, training=self.training)
x = (self.gc5(x, adj))
x = F.dropout(x, self.dropout, training=self.training)
x = self.gc6(x, adj)
return x
class MyGCN_v6(nn.Module):
def __init__(self, nfeat, nhid, nout, dropout):
super(MyGCN_v6, self).__init__()
self.gc1 = MyGraphConvolution(nfeat, 12)
self.gc2 = MyGraphConvolution(12, 11)
self.gc3 = MyGraphConvolution(11, 10)
self.gc4 = MyGraphConvolution(10, 9)
self.gc5 = MyGraphConvolution(9, 8)
self.gc6 = MyGraphConvolution(8, 7)
self.gc7 = MyGraphConvolution(7, 6)
self.gc8 = MyGraphConvolution(6, 5)
self.gc9 = MyGraphConvolution(5, 4)
self.gc10 = MyGraphConvolution(4, nout)
self.dropout = dropout
def forward(self, x, adj):
x = (self.gc1(x, adj))
x = F.dropout(x, self.dropout, training=self.training)
x = (self.gc2(x, adj))
x = F.dropout(x, self.dropout, training=self.training)
x = (self.gc3(x, adj))
x = F.dropout(x, self.dropout, training=self.training)
x = (self.gc4(x, adj))
x = F.dropout(x, self.dropout, training=self.training)
x = (self.gc5(x, adj))
x = F.dropout(x, self.dropout, training=self.training)
x = self.gc6(x, adj)
x = F.dropout(x, self.dropout, training=self.training)
x = self.gc7(x, adj)
x = F.dropout(x, self.dropout, training=self.training)
x = self.gc8(x, adj)
x = F.dropout(x, self.dropout, training=self.training)
x = self.gc9(x, adj)
x = F.dropout(x, self.dropout, training=self.training)
x = self.gc10(x, adj)
return x
| 38.090909
| 62
| 0.600168
| 978
| 7,123
| 4.300614
| 0.063395
| 0.068949
| 0.049929
| 0.054208
| 0.843795
| 0.843795
| 0.83476
| 0.828103
| 0.828103
| 0.828103
| 0
| 0.034365
| 0.260564
| 7,123
| 186
| 63
| 38.295699
| 0.764192
| 0
| 0
| 0.713415
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.085366
| false
| 0
| 0.018293
| 0
| 0.189024
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
17435a67c44ac869cb192970c53155e65fc347af
| 8,912
|
py
|
Python
|
WN.py
|
neyudin/wavenetglow
|
3261dd8163709b2204b1c9ba90bc544755439fa5
|
[
"BSD-3-Clause"
] | null | null | null |
WN.py
|
neyudin/wavenetglow
|
3261dd8163709b2204b1c9ba90bc544755439fa5
|
[
"BSD-3-Clause"
] | 2
|
2020-01-28T22:48:08.000Z
|
2020-03-03T16:25:33.000Z
|
WN.py
|
neyudin/wavenetglow
|
3261dd8163709b2204b1c9ba90bc544755439fa5
|
[
"BSD-3-Clause"
] | null | null | null |
import torch
import torch.nn as nn
class WN(torch.nn.Module):
"""
WN block for affine coupling layer. Actual version
"""
def __init__(self, num_channels, mel_channels, n_layers=8, residual_channels=512,
gate_channels=256, skip_channels=256):
"""
Parameters
----------
num_channels : int
Number of x_a channels
mel_channels : int
Number of spectrogram (condition c) channels
----------
Parameters from original paper
----------
n_layers : int
The depth of WN (default : 8)
residual_channels : int
Number of chanels used by residual connections (default : 512)
gate_channels : int
Number of filters and gates channels (default : 256)
skip_channels : int
Number of chanels used by skip connections
"""
super(WN, self).__init__()
self.n_layers = n_layers
self.num_channels = num_channels
self.residual_channels = residual_channels
self.gate_channels = gate_channels
self.skip_channels = skip_channels
self.mel_channels = mel_channels
self.dilations_list = [2**i for i in range(n_layers)]
self.conv_input = nn.Conv1d(in_channels=num_channels, out_channels=residual_channels, kernel_size=1)
self.conv_filter = nn.ModuleList([
torch.nn.utils.weight_norm(nn.Conv1d(
in_channels=residual_channels,
out_channels=gate_channels,
kernel_size=3,
dilation=d,
padding=(2 * d // 2)
), name='weight') for d in self.dilations_list])
self.conv_gate = nn.ModuleList([
torch.nn.utils.weight_norm(nn.Conv1d(
in_channels=residual_channels,
out_channels=gate_channels,
kernel_size=3,
dilation=d,
padding=(2 * d // 2)
), name='weight') for d in self.dilations_list])
self.conv_mel = nn.ModuleList([
torch.nn.utils.weight_norm(nn.Conv1d(
in_channels=mel_channels,
out_channels=gate_channels * 2,
kernel_size=1
), name='weight') for _ in range(len(self.dilations_list))])
self.conv_residual = nn.ModuleList([
torch.nn.utils.weight_norm(nn.Conv1d(
in_channels=gate_channels,
out_channels=residual_channels,
kernel_size=1
), name='weight') for _ in range(len(self.dilations_list) - 1)])
self.conv_skip = nn.ModuleList([
torch.nn.utils.weight_norm(nn.Conv1d(
in_channels=gate_channels,
out_channels=skip_channels,
kernel_size=1
), name='weight') for _ in range(len(self.dilations_list))])
self.conv_out = nn.Conv1d(
in_channels=skip_channels,
out_channels=2 * num_channels, # log s, t
kernel_size=1)
self.conv_out.weight.data.uniform_(-0.0001, 0.0001)
self.conv_out.bias.data.uniform_(-0.0001, 0.0001)
def forward(self, x_a, c):
"""
Parameters
----------
x_a : FloatTensor of size batch_size * num_channels * T
Unchangable part of embedding
c : FloatTensor of size batch_size * mel_channels * T
Upsampled mel-spectrogram
"""
assert x_a.size(2) == c.size(2) # Проверить, что спектрограмме не забыли сделать upsampling
x_acc = 0
x = self.conv_input(x_a)
for i in range(len(self.dilations_list)):
x_filter = self.conv_filter[i](x)
x_gate = self.conv_gate[i](x)
c_proj = self.conv_mel[i](c)
x_filter = x_filter + c_proj[:, :self.gate_channels]
x_gate = x_gate + c_proj[:, self.gate_channels:]
x_gate = torch.sigmoid(x_gate)
x_filter = torch.tanh(x_filter)
x_filter_gate = x_gate * x_filter
x_skip = self.conv_skip[i](x_filter_gate)
if i != len(self.dilations_list) - 1:
x_res = self.conv_residual[i](x_filter_gate)
x = x + x_res
x_acc = x_acc + x_skip
return self.conv_out(x_acc)
class VanillaWN(torch.nn.Module):
"""
WN block for affine coupling layer.
"""
def __init__(self, num_channels, mel_channels, n_layers=4, residual_channels=128,
gate_channels=64, skip_channels=64, pre_channels=32):
"""
Parameters
----------
num_channels : int
Number of x_a channels
mel_channels : int
Number of spectrogram (condition c) channels
----------
Parameters from original paper
----------
n_layers : int
The depth of WN (default : 8)
residual_channels : int
Number of chanels used by residual connections (default : 512)
gate_channels : int
Number of filters and gates channels (default : 256)
skip_channels : int
Number of chanels used by skip connections
pre_channels : int
Number of channels in final non-linearity
"""
super(VanillaWN, self).__init__()
self.n_layers = n_layers
self.num_channels = num_channels
self.residual_channels = residual_channels
self.gate_channels = gate_channels
self.skip_channels = skip_channels
self.mel_channels = mel_channels
self.dilations_list = [2**i for i in range(n_layers)]
self.conv_input = nn.Conv1d(in_channels=num_channels, out_channels=residual_channels, kernel_size=1)
self.conv_filter = nn.ModuleList([
nn.Conv1d(
in_channels=residual_channels,
out_channels=gate_channels,
kernel_size=3,
dilation=d,
padding=(2 * d // 2)
) for d in self.dilations_list])
self.conv_gate = nn.ModuleList([
nn.Conv1d(
in_channels=residual_channels,
out_channels=gate_channels,
kernel_size=3,
dilation=d,
padding=(2 * d // 2)
) for d in self.dilations_list])
self.conv_mel = nn.ModuleList([
nn.Conv1d(
in_channels=mel_channels,
out_channels=gate_channels * 2,
kernel_size=1
) for _ in range(len(self.dilations_list))])
self.conv_residual = nn.ModuleList([
nn.Conv1d(
in_channels=gate_channels,
out_channels=residual_channels,
kernel_size=1
) for _ in range(len(self.dilations_list) - 1)])
self.conv_skip = nn.ModuleList([
nn.Conv1d(
in_channels=gate_channels,
out_channels=skip_channels,
kernel_size=1
) for _ in range(len(self.dilations_list))])
self.conv_out_1 = nn.Conv1d(
in_channels=skip_channels,
out_channels=pre_channels,
kernel_size=1)
self.conv_out_2 = nn.Conv1d(
in_channels=pre_channels,
out_channels=2 * num_channels,
kernel_size=1)
def forward(self, x_a, c):
"""
Parameters
----------
x_a : FloatTensor of size batch_size * num_channels * T
Unchangable part of embedding
c : FloatTensor of size batch_size * mel_channels * T
Upsampled mel-spectrogram
"""
assert x_a.size(2) == c.size(2) # Проверить, что спектрограмме не забыли сделать upsampling
x_acc = 0
x = self.conv_input(x_a)
for i in range(len(self.dilations_list)):
x_filter = self.conv_filter[i](x)
x_gate = self.conv_gate[i](x)
c_proj = self.conv_mel[i](c)
x_filter = x_filter + c_proj[:, :self.gate_channels]
x_gate = x_gate + c_proj[:, self.gate_channels:]
x_gate = torch.sigmoid(x_gate)
x_filter = torch.tanh(x_filter)
x_filter_gate = x_gate * x_filter
x_skip = self.conv_skip[i](x_filter_gate)
if i != len(self.dilations_list) - 1:
x_res = self.conv_residual[i](x_filter_gate)
x = x + x_res
x = x * 0.5**0.5
x_acc = x_acc + x_skip
return self.conv_out_2(torch.relu(self.conv_out_1(x_acc)))
| 36.080972
| 108
| 0.546118
| 1,057
| 8,912
| 4.332072
| 0.109745
| 0.055907
| 0.059402
| 0.058965
| 0.92553
| 0.92553
| 0.89561
| 0.89561
| 0.877703
| 0.841887
| 0
| 0.020448
| 0.363443
| 8,912
| 246
| 109
| 36.227642
| 0.786709
| 0.182338
| 0
| 0.85034
| 0
| 0
| 0.004416
| 0
| 0
| 0
| 0
| 0
| 0.013605
| 1
| 0.027211
| false
| 0
| 0.013605
| 0
| 0.068027
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
1757cb3adbb9ba3e3de2859b8f9845a3c7c5f97a
| 12,676
|
py
|
Python
|
utils/yolo.py
|
Cuda-Chen/fish-yolo-grabcut
|
359da92815d49a7d238fe8de7bd51e5de68f0d40
|
[
"MIT"
] | 7
|
2020-01-20T00:33:29.000Z
|
2022-01-01T04:36:06.000Z
|
utils/yolo.py
|
ZurMaD/fish-yolo-grabcut
|
c2570691143df36d528d2b3c115bf2bc29cddfd6
|
[
"MIT"
] | 1
|
2020-01-09T09:18:29.000Z
|
2020-01-16T13:22:43.000Z
|
utils/yolo.py
|
Cuda-Chen/fish-yolo-grabcut
|
359da92815d49a7d238fe8de7bd51e5de68f0d40
|
[
"MIT"
] | 5
|
2020-06-20T01:50:10.000Z
|
2020-12-24T09:13:10.000Z
|
#!/usr/bin/python
import numpy as np
import argparse
import time
import cv2 as cv
import os
def runYOLODetection(args):
# load my fish class labels that my YOLO model was trained on
labelsPath = os.path.sep.join([args["yolo"], "fish.names"])
#labelsPath = os.path.sep.join([args["yolo"], "coco.names"])
LABELS = open(labelsPath).read().strip().split("\n")
# initialize a list of colors to represent each possible class label
np.random.seed(0)
COLORS = np.random.randint(0, 255, size=(len(LABELS), 3),
dtype="uint8")
print(COLORS)
#COLORS = np.array([255, 0, 0], dtype="uint8")
# derive the paths to the YOLO weights and model configuration
weightsPath = os.path.sep.join([args["yolo"], "fish.weights"])
configPath = os.path.sep.join([args["yolo"], "fish_test.cfg"])
#weightsPath = os.path.sep.join([args["yolo"], "yolov3.weights"])
#configPath = os.path.sep.join([args["yolo"], "yolov3.cfg"])
# load my YOLO object detector trained on my fish dataset (1 class)
print("[INFO] loading YOLO from disk ...")
net = cv.dnn.readNetFromDarknet(configPath, weightsPath)
# load input image and grab its spatial dimensions
image = cv.imread(args["image"])
(H, W) = image.shape[:2]
# determine only the *output* layer names that we need from YOLO
ln = net.getLayerNames()
ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]
# construct a blob from the input image and then perform a forward
# pass of the YOLO object detector, giving us our bounding boxes and
# associated probabilities
# NOTE: (608, 608) is my YOLO input image size. However, using
# (416, 416) results in much accutate result. Pretty interesting.
blob = cv.dnn.blobFromImage(image, 1 / 255.0, (416, 416),
swapRB=True, crop=False)
net.setInput(blob)
start = time.time()
layerOutputs = net.forward(ln)
end = time.time()
# show execution time information of YOLO
print("[INFO] YOLO took {:.6f} seconds.".format(end - start))
# initialize out lists of detected bounding boxes, confidences, and
# class IDs, respectively
boxes = []
confidences = []
classIDs = []
# loop over each of the layer outputs
for output in layerOutputs:
# loop over each of the detections
for detection in output:
# extract the class ID and confidence (i.e., probability) of
# the current object detection
scores = detection[5:]
classID = np.argmax(scores)
confidence = scores[classID]
# filter out weak predictions by ensuring the detected
# probability is greater then the minimum probability
if confidence > args["confidence"]:
# scale the bounding box coordinates back relative to the
# size of the image, keeping in mind that YOLO actually
# returns the center (x, y)-coordinates of the bounding
# box followed by the boxes' width and height
box = detection[0:4] * np.array([W, H, W, H])
(centerX, centerY, width, height) = box.astype("int")
# use the center (x, y)-coordinates to derive the top and
# left corner of the bounding box
x = int(centerX - (width / 2))
y = int(centerY - (height / 2))
# update out list of bounding box coordinates, confidences,
# and class IDs
boxes.append([x, y, int(width), int(height)])
confidences.append(float(confidence))
classIDs.append(classID)
# apply non-maxima suppression to suppress weark and overlapping bounding
# boxes
idxs = cv.dnn.NMSBoxes(boxes, confidences, args["confidence"],
args["threshold"])
# ensure at least one detection exists
if len(idxs) > 0:
# loop over the indexes we are keeping
for i in idxs.flatten():
# extract the bounding box coordinates
(x, y) = (boxes[i][0], boxes[i][1])
(w, h) = (boxes[i][2], boxes[i][3])
# draw a bounding box rectangle and label on the image
color = [int(c) for c in COLORS[classIDs[i]]]
cv.rectangle(image, (x, y), (x + w, y + h), color, 2)
text = "{}: {:.4f}".format(LABELS[classIDs[i]], confidences[i])
cv.putText(image, text, (x, y - 5), cv.FONT_HERSHEY_SIMPLEX,
0.5, color, 2)
return image
def runYOLOBoundingBoxes(args):
# load my fish class labels that my YOLO model was trained on
labelsPath = os.path.sep.join([args["yolo"], "fish.names"])
#labelsPath = os.path.sep.join([args["yolo"], "coco.names"])
LABELS = open(labelsPath).read().strip().split("\n")
# initialize a list of colors to represent each possible class label
np.random.seed(0)
COLORS = np.random.randint(0, 255, size=(len(LABELS), 3),
dtype="uint8")
print(COLORS)
#COLORS = np.array([255, 0, 0], dtype="uint8")
# derive the paths to the YOLO weights and model configuration
weightsPath = os.path.sep.join([args["yolo"], "fish.weights"])
configPath = os.path.sep.join([args["yolo"], "fish_test.cfg"])
#weightsPath = os.path.sep.join([args["yolo"], "yolov3.weights"])
#configPath = os.path.sep.join([args["yolo"], "yolov3.cfg"])
# load my YOLO object detector trained on my fish dataset (1 class)
print("[INFO] loading YOLO from disk ...")
net = cv.dnn.readNetFromDarknet(configPath, weightsPath)
# load input image and grab its spatial dimensions
image = cv.imread(args["image"])
(H, W) = image.shape[:2]
# determine only the *output* layer names that we need from YOLO
ln = net.getLayerNames()
ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]
# construct a blob from the input image and then perform a forward
# pass of the YOLO object detector, giving us our bounding boxes and
# associated probabilities
# NOTE: (608, 608) is my YOLO input image size. However, using
# (416, 416) results in much accutate result. Pretty interesting.
blob = cv.dnn.blobFromImage(image, 1 / 255.0, (416, 416),
swapRB=True, crop=False)
net.setInput(blob)
start = time.time()
layerOutputs = net.forward(ln)
end = time.time()
# show execution time information of YOLO
print("[INFO] YOLO took {:.6f} seconds.".format(end - start))
# initialize out lists of detected bounding boxes, confidences, and
# class IDs, respectively
boxes = []
confidences = []
classIDs = []
# loop over each of the layer outputs
for output in layerOutputs:
# loop over each of the detections
for detection in output:
# extract the class ID and confidence (i.e., probability) of
# the current object detection
scores = detection[5:]
classID = np.argmax(scores)
confidence = scores[classID]
# filter out weak predictions by ensuring the detected
# probability is greater then the minimum probability
if confidence > args["confidence"]:
# scale the bounding box coordinates back relative to the
# size of the image, keeping in mind that YOLO actually
# returns the center (x, y)-coordinates of the bounding
# box followed by the boxes' width and height
box = detection[0:4] * np.array([W, H, W, H])
(centerX, centerY, width, height) = box.astype("int")
# use the center (x, y)-coordinates to derive the top and
# left corner of the bounding box
x = int(centerX - (width / 2))
y = int(centerY - (height / 2))
# update out list of bounding box coordinates, confidences,
# and class IDs
boxes.append([x, y, int(width), int(height)])
confidences.append(float(confidence))
classIDs.append(classID)
# apply non-maxima suppression to suppress weark and overlapping bounding
# boxes
idxs = cv.dnn.NMSBoxes(boxes, confidences, args["confidence"],
args["threshold"])
return image, boxes, idxs
def runYOLOBoundingBoxes_streamlit(image, yolopath, _confidence, _threshold):
# load my fish class labels that my YOLO model was trained on
labelsPath = os.path.sep.join([yolopath, "fish.names"])
LABELS = open(labelsPath).read().strip().split("\n")
# initialize a list of colors to represent each possible class label
np.random.seed(0)
COLORS = np.random.randint(0, 255, size=(len(LABELS), 3),
dtype="uint8")
print(COLORS)
#COLORS = np.array([255, 0, 0], dtype="uint8")
# derive the paths to the YOLO weights and model configuration
weightsPath = os.path.sep.join([yolopath, "fish.weights"])
configPath = os.path.sep.join([yolopath, "fish_test.cfg"])
# load my YOLO object detector trained on my fish dataset (1 class)
print("[INFO] loading YOLO model ...")
net = cv.dnn.readNetFromDarknet(configPath, weightsPath)
# grab input image's spatial dimensions
(H, W) = image.shape[:2]
# determine only the *output* layer names that we need from YOLO
ln = net.getLayerNames()
ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]
# construct a blob from the input image and then perform a forward
# pass of the YOLO object detector, giving us our bounding boxes and
# associated probabilities
# NOTE: (608, 608) is my YOLO input image size. However, using
# (416, 416) results in much accutate result. Pretty interesting.
blob = cv.dnn.blobFromImage(image, 1 / 255.0, (416, 416),
swapRB=True, crop=False)
net.setInput(blob)
start = time.time()
layerOutputs = net.forward(ln)
end = time.time()
# show execution time information of YOLO
print("[INFO] YOLO took {:.6f} seconds.".format(end - start))
# initialize out lists of detected bounding boxes, confidences, and
# class IDs, respectively
boxes = []
confidences = []
classIDs = []
# loop over each of the layer outputs
for output in layerOutputs:
# loop over each of the detections
for detection in output:
# extract the class ID and confidence (i.e., probability) of
# the current object detection
scores = detection[5:]
classID = np.argmax(scores)
confidence = scores[classID]
# filter out weak predictions by ensuring the detected
# probability is greater then the minimum probability
if confidence > _confidence:
# scale the bounding box coordinates back relative to the
# size of the image, keeping in mind that YOLO actually
# returns the center (x, y)-coordinates of the bounding
# box followed by the boxes' width and height
box = detection[0:4] * np.array([W, H, W, H])
(centerX, centerY, width, height) = box.astype("int")
# use the center (x, y)-coordinates to derive the top and
# left corner of the bounding box
x = int(centerX - (width / 2))
y = int(centerY - (height / 2))
# update out list of bounding box coordinates, confidences,
# and class IDs
boxes.append([x, y, int(width), int(height)])
confidences.append(float(confidence))
classIDs.append(classID)
# apply non-maxima suppression to suppress weark and overlapping bounding
# boxes
idxs = cv.dnn.NMSBoxes(boxes, confidences, _confidence,
_threshold)
return boxes, idxs
if __name__ == '__main__':
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=True,
help="path to input image")
ap.add_argument("-y", "--yolo", required=True,
help="base path to YOLO directory")
ap.add_argument("-c", "--confidence", type=float, default=0.25,
help="minimum probability to filter weak detections")
ap.add_argument("-t", "--threshold", type=float, default=0.45,
help="threshold when applying non-maxima suppression")
args = vars(ap.parse_args())
image = runYOLODetection(args)
# show the output image
#cv.namedWindow("Image", cv.WINDOW_NORMAL)
#cv.resizeWindow("image", 1920, 1080)
cv.imshow("Image", image)
#cv.imwrite("predictions.jpg", image)
cv.waitKey(0)
| 40.758842
| 77
| 0.620227
| 1,637
| 12,676
| 4.787416
| 0.161271
| 0.013398
| 0.017226
| 0.024882
| 0.857343
| 0.857343
| 0.847263
| 0.842925
| 0.842925
| 0.842925
| 0
| 0.018249
| 0.269407
| 12,676
| 310
| 78
| 40.890323
| 0.827988
| 0.415273
| 0
| 0.726667
| 0
| 0
| 0.085159
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02
| false
| 0
| 0.033333
| 0
| 0.073333
| 0.06
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
1791323b17a576d78ae9d6ff260342ba69a97bc9
| 919
|
py
|
Python
|
tests/test_half_wildcard.py
|
oqwa/gwh
|
9399a9b0fd5815e81d68a4d52215c273f9c0d49b
|
[
"MIT"
] | null | null | null |
tests/test_half_wildcard.py
|
oqwa/gwh
|
9399a9b0fd5815e81d68a4d52215c273f9c0d49b
|
[
"MIT"
] | null | null | null |
tests/test_half_wildcard.py
|
oqwa/gwh
|
9399a9b0fd5815e81d68a4d52215c273f9c0d49b
|
[
"MIT"
] | null | null | null |
from gwh import *
from tests.utils import *
app = GitWebhook()
app.add_handler(lambda: None, repository=KNOWN_REPO)
app.add_handler(lambda: None, type=KNOWN_TYPE)
def test_bitbucket():
check_bitbucket_webhook(app, KNOWN_TYPE, KNOWN_REPO, "master", hit_expected=True)
check_bitbucket_webhook(app, UNKNOWN_TYPE, KNOWN_REPO, "master", hit_expected=True)
check_bitbucket_webhook(app, KNOWN_TYPE, UNKNOWN_REPO, "master", hit_expected=True)
check_bitbucket_webhook(app, UNKNOWN_TYPE, UNKNOWN_REPO, "master", hit_expected=False)
def test_gitlab():
check_gitlab_webhook(app, KNOWN_TYPE, KNOWN_REPO, "master", hit_expected=True)
check_gitlab_webhook(app, UNKNOWN_TYPE, KNOWN_REPO, "master", hit_expected=True)
check_gitlab_webhook(app, KNOWN_TYPE, UNKNOWN_REPO, "master", hit_expected=True)
check_gitlab_webhook(app, UNKNOWN_TYPE, UNKNOWN_REPO, "master", hit_expected=False)
| 43.761905
| 90
| 0.771491
| 127
| 919
| 5.220472
| 0.212598
| 0.120664
| 0.156863
| 0.253394
| 0.838612
| 0.769231
| 0.731523
| 0.731523
| 0.731523
| 0.731523
| 0
| 0
| 0.124048
| 919
| 20
| 91
| 45.95
| 0.823602
| 0
| 0
| 0
| 0
| 0
| 0.052231
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.133333
| false
| 0
| 0.133333
| 0
| 0.266667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
bd85a5016f4253e00cc44d9424523f4276b499b6
| 128
|
py
|
Python
|
python/testData/completion/heavyStarPropagation/lib/_pkg0/_pkg0_1/_pkg0_1_0/_pkg0_1_0_1/_pkg0_1_0_1_0/_mod0_1_0_1_0_4.py
|
jnthn/intellij-community
|
8fa7c8a3ace62400c838e0d5926a7be106aa8557
|
[
"Apache-2.0"
] | 2
|
2019-04-28T07:48:50.000Z
|
2020-12-11T14:18:08.000Z
|
python/testData/completion/heavyStarPropagation/lib/_pkg0/_pkg0_1/_pkg0_1_0/_pkg0_1_0_1/_pkg0_1_0_1_0/_mod0_1_0_1_0_4.py
|
jnthn/intellij-community
|
8fa7c8a3ace62400c838e0d5926a7be106aa8557
|
[
"Apache-2.0"
] | 173
|
2018-07-05T13:59:39.000Z
|
2018-08-09T01:12:03.000Z
|
python/testData/completion/heavyStarPropagation/lib/_pkg0/_pkg0_1/_pkg0_1_0/_pkg0_1_0_1/_pkg0_1_0_1_0/_mod0_1_0_1_0_4.py
|
jnthn/intellij-community
|
8fa7c8a3ace62400c838e0d5926a7be106aa8557
|
[
"Apache-2.0"
] | 2
|
2020-03-15T08:57:37.000Z
|
2020-04-07T04:48:14.000Z
|
name0_1_0_1_0_4_0 = None
name0_1_0_1_0_4_1 = None
name0_1_0_1_0_4_2 = None
name0_1_0_1_0_4_3 = None
name0_1_0_1_0_4_4 = None
| 14.222222
| 24
| 0.820313
| 40
| 128
| 1.875
| 0.175
| 0.266667
| 0.466667
| 0.533333
| 0.88
| 0.88
| 0.746667
| 0
| 0
| 0
| 0
| 0.318182
| 0.140625
| 128
| 9
| 25
| 14.222222
| 0.363636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
bda5541d8519516c1c651c498c62fb368ec8e19e
| 2,796
|
py
|
Python
|
Decorator Foundation.py
|
Enthuisasticpessimist/Small-exercise
|
b169000023c3863f9e30d2cfc7c0f6e228f612f7
|
[
"MIT"
] | null | null | null |
Decorator Foundation.py
|
Enthuisasticpessimist/Small-exercise
|
b169000023c3863f9e30d2cfc7c0f6e228f612f7
|
[
"MIT"
] | null | null | null |
Decorator Foundation.py
|
Enthuisasticpessimist/Small-exercise
|
b169000023c3863f9e30d2cfc7c0f6e228f612f7
|
[
"MIT"
] | null | null | null |
##-----------Non parametric decorator----------------
####initialization
##name = 'a'
##password = '1'
##user_status = False
##
####decorator
##def login(func):
## def inner():
## global name,password,user_status
## if user_status == True:
## pass
## else:
## n = input('name:')
## p = input('password:')
## if n == name and p == password:
## user_status = True
## if user_status:
## func()
## return inner
##
##@login
##def webpage1():
## print('webpage---1')
##@login
##def webpage2():
## print('webpage---2')
##
####webpage1 = login(webpage1)##original method1
####webpage2 = login(webpage2)##original method2
##webpage1()
##webpage2()
##-----------Non parametric decorator----------------
####-----------Parametric decorator--------------------
####initialization
##name = 'a'
##password = '1'
##user_status = False
##
####decorator
##def login(func):
## def inner(*args,**kwargs):##arbitrary parameters can be passed in
## global name,password,user_status
## if user_status == True:
## pass
## else:
## n = input('name:')
## p = input('password:')
## if n == name and p == password:
## user_status = True
## if user_status:
## func(*args,**kwargs)##arbitrary parameters can be passed in
## return inner
##
##@login
##def webpage1(arg):
## print('webpage---1',arg)
##@login
##def webpage2():
## print('webpage---2')
##
####webpage1 = login(webpage1)##original method1
####webpage2 = login(webpage2)##original method2
##webpage1('111')
##webpage2()
####-----------Parametric decorator--------------------
##-----------Multi-layer decorator--------------------
##initialization
name = 'a'
password = '1'
user_status = False
##decorator
def login(auth_type):
def outer(func):
def inner(*args,**kwargs):##arbitrary parameters can be passed in
global name,password,user_status
if auth_type == 'qq':
if user_status == True:
pass
else:
n = input('name:')
p = input('password:')
if n == name and p == password:
user_status = True
if user_status:
func(*args,**kwargs)##arbitrary parameters can be passed in
else:
print('auth_type is wrong!')
return inner
return outer
@login('qq')
def webpage1(arg):
print('webpage---1',arg)
@login('weixin')
def webpage2():
print('webpage---2')
##temp = login("qq")##original method1
##webpage1 = temp(webpage1)
webpage1('111')
##-----------Multi-layer decorator--------------------
| 26.377358
| 79
| 0.508941
| 279
| 2,796
| 5.035842
| 0.189964
| 0.106762
| 0.076868
| 0.082562
| 0.824199
| 0.776512
| 0.776512
| 0.776512
| 0.730249
| 0.730249
| 0
| 0.020049
| 0.268598
| 2,796
| 105
| 80
| 26.628571
| 0.666993
| 0.629828
| 0
| 0.071429
| 0
| 0
| 0.082063
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.178571
| false
| 0.178571
| 0
| 0
| 0.25
| 0.107143
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
bdd6f392de5f9a7829a76f27096203708e20bfc0
| 79
|
py
|
Python
|
2017/12.12/python/lsh-get-github-user.py
|
mksweetlife/study
|
0786a4bd7901ac0d1aa5efdae5b755693eee5cd3
|
[
"MIT"
] | 1
|
2017-10-24T08:19:15.000Z
|
2017-10-24T08:19:15.000Z
|
2017/12.12/python/lsh-get-github-user.py
|
mksweetlife/study
|
0786a4bd7901ac0d1aa5efdae5b755693eee5cd3
|
[
"MIT"
] | 31
|
2017-10-31T11:09:44.000Z
|
2018-12-04T07:47:46.000Z
|
2017/12.12/python/lsh-get-github-user.py
|
mksweetlife/study
|
0786a4bd7901ac0d1aa5efdae5b755693eee5cd3
|
[
"MIT"
] | 5
|
2017-10-26T02:13:08.000Z
|
2018-07-05T04:58:47.000Z
|
def getUser():
return "Sanghak,Lee / http://sanghaklee.tistory.com" #FIXME:
| 39.5
| 64
| 0.696203
| 10
| 79
| 5.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.126582
| 79
| 2
| 64
| 39.5
| 0.797101
| 0.075949
| 0
| 0
| 0
| 0
| 0.589041
| 0
| 0
| 0
| 0
| 0.5
| 0
| 1
| 0.5
| true
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
bddd1a20f636344c19e0119a658dd15a4909a0a2
| 2,955
|
py
|
Python
|
match_rcnn/mmdetection/mmdet/pretrained_models/cocopth.py
|
201419/taobao-live-product-recognition
|
1f5de5917b43b2b58f4387a77272fc7c587a1051
|
[
"Apache-2.0"
] | null | null | null |
match_rcnn/mmdetection/mmdet/pretrained_models/cocopth.py
|
201419/taobao-live-product-recognition
|
1f5de5917b43b2b58f4387a77272fc7c587a1051
|
[
"Apache-2.0"
] | null | null | null |
match_rcnn/mmdetection/mmdet/pretrained_models/cocopth.py
|
201419/taobao-live-product-recognition
|
1f5de5917b43b2b58f4387a77272fc7c587a1051
|
[
"Apache-2.0"
] | 1
|
2021-05-14T03:30:29.000Z
|
2021-05-14T03:30:29.000Z
|
import torch
import numpy as np
num_classes = 25
model_coco = torch.load(r"/media/alvinai/Documents/model/faster_rcnn_r50_fpn_1x_20190610-bf0ea559.pth")
# print(model_coco)
# print(model_coco["state_dict"]["rpn_head.rpn_cls.weight"].shape)
# a = model_coco["state_dict"]["rpn_head.rpn_cls.weight"][0]
# model_coco["state_dict"]["rpn_head.rpn_cls.weight"]=np.insert(model_coco["state_dict"]["rpn_head.rpn_cls.weight"], 0, values=a, axis=0)
# print(model_coco["state_dict"]["rpn_head.rpn_cls.weight"].shape)
# b=model_coco["state_dict"]["rpn_head.rpn_cls.bias"][0]
# model_coco["state_dict"]["rpn_head.rpn_cls.bias"] = np.insert(model_coco["state_dict"]["rpn_head.rpn_cls.bias"], 0, values=b, axis=0)
# print(model_coco["state_dict"]["rpn_head.rpn_cls.bias"].shape)
# c= model_coco["state_dict"]["rpn_head.rpn_reg.weight"][0].repeat(4,1,1,1)
# model_coco["state_dict"]["rpn_head.rpn_reg.weight"]=np.insert(model_coco["state_dict"]["rpn_head.rpn_reg.weight"], 0, values=c, axis=0)
# # c= model_coco["state_dict"]["rpn_head.rpn_reg.weight"][1]
# # model_coco["state_dict"]["rpn_head.rpn_reg.weight"]=np.insert(model_coco["state_dict"]["rpn_head.rpn_reg.weight"], 0, values=c, axis=0)
# # c= model_coco["state_dict"]["rpn_head.rpn_reg.weight"][2]
# # model_coco["state_dict"]["rpn_head.rpn_reg.weight"]=np.insert(model_coco["state_dict"]["rpn_head.rpn_reg.weight"], 0, values=c, axis=0)
# # c= model_coco["state_dict"]["rpn_head.rpn_reg.weight"][3]
# # model_coco["state_dict"]["rpn_head.rpn_reg.weight"]=np.insert(model_coco["state_dict"]["rpn_head.rpn_reg.weight"], 0, values=c, axis=0)
# print(model_coco["state_dict"]["rpn_head.rpn_reg.weight"].shape)
# d=model_coco["state_dict"]["rpn_head.rpn_reg.bias"][0].repeat(4,)
# model_coco["state_dict"]["rpn_head.rpn_reg.bias"] = np.insert(model_coco["state_dict"]["rpn_head.rpn_reg.bias"], 0, values=d, axis=0)
# print(model_coco["state_dict"]["rpn_head.rpn_reg.bias"].shape)
# # model_coco["state_dict"]["rpn_head.rpn_reg.weight"] = model_coco["state_dict"]["rpn_head.rpn_reg.weight"].repeat(2,1,1,1)
# # model_coco["state_dict"]["rpn_head.rpn_reg.bias"] = model_coco["state_dict"]["rpn_head.rpn_reg.bias"].repeat(2,)
# weight
model_coco["state_dict"]["bbox_head.fc_cls.weight"] = model_coco["state_dict"]["bbox_head.fc_cls.weight"][
:num_classes, :]
model_coco["state_dict"]["bbox_head.fc_reg.weight"] = model_coco["state_dict"]["bbox_head.fc_reg.weight"][
:num_classes*4, :]
# bias
model_coco["state_dict"]["bbox_head.fc_cls.bias"] = model_coco["state_dict"]["bbox_head.fc_cls.bias"][:num_classes]
model_coco["state_dict"]["bbox_head.fc_reg.bias"] = model_coco["state_dict"]["bbox_head.fc_reg.bias"][:num_classes*4]
# save new model
torch.save(model_coco, r"/media/alvinai/Documents/underwater/model/libra_faster_rcnn_r50_fpn_1x_cls_%d.pth" % num_classes)
| 75.769231
| 140
| 0.706937
| 494
| 2,955
| 3.878543
| 0.105263
| 0.192589
| 0.277662
| 0.356994
| 0.845511
| 0.826722
| 0.826722
| 0.826722
| 0.826722
| 0.669102
| 0
| 0.019806
| 0.094416
| 2,955
| 38
| 141
| 77.763158
| 0.696188
| 0.671404
| 0
| 0
| 0
| 0
| 0.458287
| 0.369299
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.181818
| 0
| 0.181818
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
bdf4b884e53e55033540d679eaf6e95f48c085d7
| 118
|
py
|
Python
|
tests/test_crawler.py
|
Yotamho/nba-analytics
|
13174040198d44aab035de58cf785bce6926958a
|
[
"MIT"
] | null | null | null |
tests/test_crawler.py
|
Yotamho/nba-analytics
|
13174040198d44aab035de58cf785bce6926958a
|
[
"MIT"
] | null | null | null |
tests/test_crawler.py
|
Yotamho/nba-analytics
|
13174040198d44aab035de58cf785bce6926958a
|
[
"MIT"
] | null | null | null |
from nba_analytics.crawler import pbp_for_range
def test_crawler():
assert pbp_for_range(3, 2008, 2009) != None
| 19.666667
| 47
| 0.762712
| 19
| 118
| 4.421053
| 0.789474
| 0.142857
| 0.261905
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.09
| 0.152542
| 118
| 5
| 48
| 23.6
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
bdfb84bba555606efcd2d3ca97385378284beca7
| 8,203
|
py
|
Python
|
tests/test_levdistresult.py
|
ZenulAbidin/bip39validator
|
b78f2db6f46b56b408eef3a51e921e96247a9b46
|
[
"MIT"
] | 3
|
2021-02-11T20:37:56.000Z
|
2021-06-11T03:29:15.000Z
|
tests/test_levdistresult.py
|
ZenulAbidin/bip39validator
|
b78f2db6f46b56b408eef3a51e921e96247a9b46
|
[
"MIT"
] | 4
|
2020-10-04T23:11:08.000Z
|
2020-12-23T00:32:52.000Z
|
tests/test_levdistresult.py
|
ZenulAbidin/bip39validator
|
b78f2db6f46b56b408eef3a51e921e96247a9b46
|
[
"MIT"
] | null | null | null |
from unittest import TestCase
from bip39validator import ValidationFailed
from bip39validator.BIP39WordList import BIP39WordList
levdist_gt2 = """brown
brpyt"""
levdist_le2 = """brow
brol"""
# Expected results *must* be in word alphabetical order.
class TestLevDistResult(TestCase):
def test_getwordpairs_eq(self):
bip39 = BIP39WordList("levdist_le2", string=levdist_le2)
try:
res = bip39.test_lev_distance(2)
except ValidationFailed as e:
res = e.status_obj
expected_res = [("brol", "brow")]
self.assertEqual(expected_res, res.getwordpairs_eq(1))
try:
res.getwordpairs_eq(2)
self.fail()
except AssertionError as e:
pass
def test_getlinepairs_eq(self):
bip39 = BIP39WordList("levdist_le2", string=levdist_le2)
try:
res = bip39.test_lev_distance(2)
except ValidationFailed as e:
res = e.status_obj
expected_res = [(2,1)]
self.assertEqual(expected_res, res.getlinepairs_eq(1))
try:
res.getwordpairs_eq(0)
self.fail()
except AssertionError as e:
pass
def test_getwordpairs_lt(self):
bip39 = BIP39WordList("levdist_le2", string=levdist_le2)
try:
res = bip39.test_lev_distance(2)
except ValidationFailed as e:
res = e.status_obj
expected_res = [("brol", "brow")]
self.assertEqual(expected_res, res.getwordpairs_lt(2))
try:
res.getwordpairs_lt(0)
self.fail()
except AssertionError as e:
pass
def test_getlinepairs_lt(self):
bip39 = BIP39WordList("levdist_le2", string=levdist_le2)
try:
res = bip39.test_lev_distance(2)
except ValidationFailed as e:
res = e.status_obj
expected_res = [(2, 1)]
self.assertEqual(expected_res, res.getlinepairs_lt(2))
try:
res.getlinepairs_lt(0)
self.fail()
except AssertionError as e:
pass
def test_getwordpairs_gt(self):
bip39 = BIP39WordList("levdist_gt2", string=levdist_gt2)
try:
res = bip39.test_lev_distance(2)
except ValidationFailed as e:
res = e.status_obj
expected_res = [("brown", "brpyt")]
self.assertEqual(expected_res, res.getwordpairs_gt(2))
try:
res.getwordpairs_gt(0)
self.fail()
except AssertionError as e:
pass
def test_getlinepairs_gt(self):
bip39 = BIP39WordList("levdist_gt2", string=levdist_gt2)
try:
res = bip39.test_lev_distance(2)
except ValidationFailed as e:
res = e.status_obj
expected_res = [(1, 2)]
self.assertEqual(expected_res, res.getlinepairs_gt(2))
try:
res.getlinepairs_gt(0)
self.fail()
except AssertionError as e:
pass
def test_getwordpairs_list(self):
concat = "\n".join([levdist_le2]+["zzyzx"])
bip39 = BIP39WordList("levdist_concat", string=concat)
try:
res = bip39.test_lev_distance(2)
except ValidationFailed as e:
res = e.status_obj
expected_res = [("brol", "brow")]
self.assertEqual(expected_res, res.getwordpairs_list([1,2]))
for t in ["abc", [], ["a"], 0]:
try:
res.getwordpairs_list(t)
self.fail()
except AssertionError as e:
pass
def test_getlinepairs_list(self):
concat = "\n".join([levdist_le2]+["zzyzx"])
bip39 = BIP39WordList("levdist_concat", string=concat)
try:
res = bip39.test_lev_distance(2)
except ValidationFailed as e:
res = e.status_obj
expected_res = [(2, 1)]
self.assertEqual(expected_res, res.getlinepairs_list([1,2]))
for t in ["abc", [], ["a"], 0]:
try:
res.getlinepairs_list(t)
self.fail()
except AssertionError as e:
pass
def test_getdist(self):
bip39 = BIP39WordList("levdist_le2", string=levdist_le2)
try:
res = bip39.test_lev_distance(2)
except ValidationFailed as e:
res = e.status_obj
expected_res = 1
self.assertEqual(expected_res, res.getdist("brow", "brol"))
for t in [(1, "abc"), ("", "abc"), ("ABC", "abc"),
("abc", 1), ("abc", ""), ("abc", "ABC")]:
try:
res.getdist(*t)
self.fail()
except AssertionError as e:
pass
def test_getdist_all(self):
bip39 = BIP39WordList("levdist_le2", string=levdist_le2)
try:
res = bip39.test_lev_distance(2)
except ValidationFailed as e:
res = e.status_obj
expected_res = [(("brol", "brow"), (2, 1), 1)]
self.assertEqual(expected_res, res.getdist_all("brow"))
for t in [1, "", "ABC"]:
try:
res.getdist_all(t)
self.fail()
except AssertionError as e:
pass
def test_getdist_all_eq(self):
bip39 = BIP39WordList("levdist_le2", string=levdist_le2)
try:
res = bip39.test_lev_distance(2)
except ValidationFailed as e:
res = e.status_obj
expected_res = [(("brol", "brow"), (2, 1), 1)]
self.assertEqual(expected_res, res.getdist_all_eq("brow", 1))
for t in [1, "", "ABC"]:
try:
res.getdist_all_eq(t, 1)
self.fail()
except AssertionError as e:
pass
except KeyError as e:
pass
try:
res.getdist_all_eq("abc", 0)
self.fail()
except AssertionError as e:
pass
except KeyError as e:
pass
def test_getdist_all_lt(self):
bip39 = BIP39WordList("levdist_le2", string=levdist_le2)
try:
res = bip39.test_lev_distance(2)
except ValidationFailed as e:
res = e.status_obj
expected_res = [(("brol", "brow"), (2, 1), 1)]
self.assertEqual(expected_res, res.getdist_all_lt("brow", 2))
for t in [1, "", "ABC"]:
try:
res.getdist_all_lt(t, 1)
self.fail()
except AssertionError as e:
pass
except KeyError as e:
pass
try:
res.getdist_all_lt("abc", 0)
self.fail()
except AssertionError as e:
pass
except KeyError as e:
pass
def test_getdist_all_gt(self):
bip39 = BIP39WordList("levdist_gt2", string=levdist_gt2)
try:
res = bip39.test_lev_distance(2)
except ValidationFailed as e:
res = e.status_obj
expected_res = [(("brpyt", "brown"), (2, 1), 3)]
self.assertEqual(expected_res, res.getdist_all_gt("brown", 2))
for t in [1, "", "ABC"]:
try:
res.getdist_all_gt(t, 1)
self.fail()
except AssertionError as e:
pass
try:
res.getdist_all_gt("abc", 0)
self.fail()
except AssertionError as e:
pass
except KeyError as e:
pass
def test_getdist_all_list(self):
concat = "\n".join([levdist_le2]+["zzyzx"])
bip39 = BIP39WordList("concat", string=concat)
try:
res = bip39.test_lev_distance(2)
except ValidationFailed as e:
res = e.status_obj
expected_res = [(("brol", "brow"), (2, 1), 1)]
self.assertEqual(expected_res, res.getdist_all_list("brow", [1]))
for t in [1, "", "ABC"]:
for u in ["abc", [], ["a"], 0]:
try:
res.getdist_all_list(t, u)
self.fail()
except AssertionError as e:
pass
except KeyError as e:
pass
| 33.076613
| 73
| 0.536633
| 922
| 8,203
| 4.590022
| 0.069414
| 0.026229
| 0.038043
| 0.112476
| 0.889887
| 0.884688
| 0.851371
| 0.825142
| 0.825142
| 0.804112
| 0
| 0.036539
| 0.356089
| 8,203
| 247
| 74
| 33.210526
| 0.764672
| 0.006583
| 0
| 0.734783
| 0
| 0
| 0.044311
| 0
| 0
| 0
| 0
| 0
| 0.134783
| 1
| 0.06087
| false
| 0.1
| 0.013043
| 0
| 0.078261
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
da804ba481b451cc0ca78dfe3274c111f94eaf58
| 16,539
|
py
|
Python
|
data-processing/utils/__init__.py
|
mark-andrews/BayesianAccountMemoryText
|
28609a4d3d3924c5082af81359ffc3f78f6eb6da
|
[
"CC-BY-4.0"
] | 2
|
2020-04-10T17:14:19.000Z
|
2020-04-10T17:14:26.000Z
|
data-processing/utils/__init__.py
|
mark-andrews/BayesianAccountMemoryText
|
28609a4d3d3924c5082af81359ffc3f78f6eb6da
|
[
"CC-BY-4.0"
] | 18
|
2020-03-24T17:07:23.000Z
|
2021-12-13T20:01:11.000Z
|
data-processing/utils/__init__.py
|
mark-andrews/BayesianAccountMemoryText
|
28609a4d3d3924c5082af81359ffc3f78f6eb6da
|
[
"CC-BY-4.0"
] | null | null | null |
"""
Some general utils.
"""
##=============================================================================
## Standard library imports
##=============================================================================
#import string
#import re
#import os
#import errno
#import hashlib
#
##================================ End Imports ================================
#
#def deletechars(s, exclude_chars):
# ''' Fast deletion of characters from string.
# It uses a dummy translation table, and so no mapping is applied, and we
# just delete the exclude_chars characters.
# '''
# phony_translate_table = string.maketrans("","")
# return s.translate(phony_translate_table, exclude_chars)
#
#
#def deletepunctuation(s):
# ''' Fast deletion of punctuation from string'''
# return deletechars(s,string.punctuation)
#
#
#def tokenize(text, foldcase=True):
# '''
# A very cheap and easy tokenization.
# First, remove "'s". For example, "dog's" becomes "dog".
# Second, zap utf-8 chars.
# Then, remove all punctuation and, by default, fold upper and lower case words
# and then split by whitespace.
# '''
#
# text = re.sub(r'\'s','', text)
# s = ''.join([s for s in text if s in string.printable])
#
# s = str(s) # Got to convert it to str.
# s = deletepunctuation(s)
#
# if foldcase:
# s = s.lower()
# return s.split()
#
#
#def mkdir_p(path):
# '''
# Make a directory, making parents if necessary.
# Taken verbatim from
# http://stackoverflow.com/a/600612
# '''
# try:
# os.makedirs(path)
# except OSError as exc: # Python >2.5
# if exc.errno == errno.EEXIST and os.path.isdir(path):
# pass
# else: raise
#
#
#def checksum(argument, algorithm='sha256'):
# '''
# Returns the hash checksum of `argument'.
# If `argument' is a name of a file, then perform the checksum on the file.
# Otherwise, the checksum is of the string `argument'.
# By default, it will be the sha1 checksum (and so equivalent to linux's
# sha1sum). Alternatively, the algorithm could be md5 (equivalent to linux's
# md5sum), or else sha224, sha256, sha384, sha512.
# '''
#
# h = hashlib.new(algorithm)
#
# if os.path.exists(argument) and os.path.isfile(argument):
# argument = open(argument,'rb').read()
#
# h.update(argument)
#
# return h.hexdigest()
# I didn't have anywhere better to put these.
hdptm_170617202450_6333_state_checksums = '''
hdptm_170617202450_6333_state_19000.npz 2dadab2c09f54f4d03a1187c8d5db49a8ec0a2bfe7bd5f5630448958ba4f21ac
hdptm_170617202450_6333_state_19010.npz 0edcb069ab3e559f62728d372f98fb5c047ca8a47ede262dad05ef236d29615f
hdptm_170617202450_6333_state_19020.npz 395b4214a753d811f18f24c6665665bdfc201928c7e661294ab0e991b993b1c5
hdptm_170617202450_6333_state_19030.npz 469e9742d4a508c4b34e5283254041ec34b58ebf1f82a15f845949d7367708d5
hdptm_170617202450_6333_state_19040.npz 036d02b36964f24b4a49465769eb51f46ccbc6f52255797b32207c398c6a31f5
hdptm_170617202450_6333_state_19050.npz 74f93e761164f35d6c433809513604bd6c3bf54e68fb00b11437f9fc8f0366d6
hdptm_170617202450_6333_state_19060.npz e9b21a7550a86b55e419a6bde8e38766fd18fe978e3b78e3bff18d7c0d842a85
hdptm_170617202450_6333_state_19070.npz bb1778aa769a72468642f0ac7193f625ddc62c6326cd214794f9c3b88f17ab17
hdptm_170617202450_6333_state_19080.npz d6f49bcf8394679f46068b06c5a8798facd10f4e05e906232dd8feb69b48144a
hdptm_170617202450_6333_state_19090.npz df75378ce0436cc1bdb6e822320d88e9f6a5ffd21dd0b14e0d81270dfdd1601f
hdptm_170617202450_6333_state_19100.npz a8c6a2c9766a1f5933e2864b901469ab316d536b4af4100bb01982d4372d8a87
hdptm_170617202450_6333_state_19110.npz aa4150923a0fa565865311128490c7172ffce4bb6615e3cd09c2ef8285e05f06
hdptm_170617202450_6333_state_19120.npz e0b88f8afa10d0fdf3ca522bdfb983b3b90bd09cc7b43fe6a8382b83a4b124f0
hdptm_170617202450_6333_state_19130.npz 7a742737378a34d4fd6eb0de4821ef589e5b51036ba85d49215d3ca1989eac36
hdptm_170617202450_6333_state_19140.npz af7a17b9cf56ba85c546f0b50378d8e6841ce9be91cb8993dc4262aac4d37be9
hdptm_170617202450_6333_state_19150.npz e6ad504a862dd1d405b55197d68ea57bcc60c24d0c2f520b57d45f0829eddbc7
hdptm_170617202450_6333_state_19160.npz 2756ed729b15107bb87f171a16e9dee50815912c49a24bb08e4dc1deec385afa
hdptm_170617202450_6333_state_19170.npz b1cbbe73548c07adeca9329fb7965d0045e52a5af1a561967381ebea97889dcc
hdptm_170617202450_6333_state_19180.npz 8254e6586d1cdf61911afa73d4cd49df8a72c4951cf978442cc7655d40d1604a
hdptm_170617202450_6333_state_19190.npz 386db58a375862cc0292d9cefc66f3da87c8d5d15d887bbcf2be64603ec20ea0
hdptm_170617202450_6333_state_19200.npz ee7ce427247d69dfdee5c624170620514b7092a578b55247275e15e8ed0fce9b
hdptm_170617202450_6333_state_19210.npz 7dff8c7a9bfc2aaa62a74ae1e161bb5e083c371bb63c26a960d7d295e3ff819b
hdptm_170617202450_6333_state_19220.npz 1bbd70b4ca4385bcb3c07fc3597d1dc332fd1b265befa198a967881e1a27ecaf
hdptm_170617202450_6333_state_19230.npz 8fe014690b637ee9be3d21d975391a180b42931433fc6984a3d7f89ccad30813
hdptm_170617202450_6333_state_19240.npz c7f96995c0d91a92d5c3e7b39b946538ae4acb581604dc3fc0bdcbcdab6a8464
hdptm_170617202450_6333_state_19250.npz e35aeca75a6ddcca4337f6394da7b7a66cbf4e7f59b5fa2196ae7789ca675c12
hdptm_170617202450_6333_state_19260.npz d249f87aeae32583306bf4c23c3a241839dc1f2754ce272cb26f720cf334bc90
hdptm_170617202450_6333_state_19270.npz 9548567eabb7ec4a12d8d59f01a4d295a08ca7685e62771d630ac7127f8b46e7
hdptm_170617202450_6333_state_19280.npz dcc07c85a9c91dadc8066ed97ad41d738b464a11eaa95b645f061a6bc57fd085
hdptm_170617202450_6333_state_19290.npz 8a7bcfb27a9445239c9d38998d30d8db685f7ab92bdb0ba85b72d4e20b6f0bfa
hdptm_170617202450_6333_state_19300.npz 8e34da3bda88e342b0046c809c609e24d7927626f09c8455e97ec54767c063f0
hdptm_170617202450_6333_state_19310.npz 7c948def3ae6b24bcefc5d17a165354d282e74f7dfa661b343bcb236bfe114e8
hdptm_170617202450_6333_state_19320.npz ac74ebcdff39c4a5f32fd6f6dd6c1294f584dce6536b6eaf1dffcbc512dab340
hdptm_170617202450_6333_state_19330.npz 6a02ea121ddf1a6dcdf5616707c0a0c46414ad2ed5219cf4b3f08d0ac4add6ce
hdptm_170617202450_6333_state_19340.npz 11e9ce24716ac4852d87f3043afff8a7624d1384da19fb41ffa50b8dddca6b76
hdptm_170617202450_6333_state_19350.npz 119881f704fd40ed0bf156e66ff801755922e8782f34cb281618da7482bcedca
hdptm_170617202450_6333_state_19360.npz 579c001ca2f4812b98149480698675120cf9f6759214d408ee519edced5365a2
hdptm_170617202450_6333_state_19370.npz 78efe683e79cd6fdc164659658e33e681e9c9f18e1a6e1dcd22e43f59388b998
hdptm_170617202450_6333_state_19380.npz bb78c916ec550dbb1458af9e3fa9e03f30f2ca604adf09953b2bfbb2223dc3d6
hdptm_170617202450_6333_state_19390.npz 09c802096a17cef6554a4d8bcad4e7f4020d67502378d99deb07b6e3846ec211
hdptm_170617202450_6333_state_19400.npz 59e76f0bc6d3fe1ce81131ecc3839c0970196d38c812c487aa97235e20281f77
hdptm_170617202450_6333_state_19410.npz e98c8b229f7af914518eaa522a4b8c39e0534429a20e9eb93c50c501d53a6c98
hdptm_170617202450_6333_state_19420.npz d97e227352d9db0f7c180cfbba291a3d68603796c12eb89669b5d53cda38bdf9
hdptm_170617202450_6333_state_19430.npz 16c023d80ade574fdbab993b191a03b27043d0805aec0f5bac32518e8fbf6f6a
hdptm_170617202450_6333_state_19440.npz 8ef8f6d4d542406475ee5ef7d0551561defef556d7b75fc337a4a588668c45d4
hdptm_170617202450_6333_state_19450.npz b48b4d4ea8f57e2c5bf293673b24a2003b55bbc4615fcd3661ac7acbef35d89c
hdptm_170617202450_6333_state_19460.npz 5998645559a3d936109ddd786e0f2d73667de7a80c435700f06c969bf5d0fca9
hdptm_170617202450_6333_state_19470.npz f729fc10ebd3715af80a7600d3fc6bc4c2d741e11454970351f07abeb18eec0e
hdptm_170617202450_6333_state_19480.npz 665e457a2462cf30aab5d8ab5a79b1dbb4e6f305d42cc2066efd81f35b257706
hdptm_170617202450_6333_state_19490.npz 36e5247839b15d32b8100756d76426a664af22b8413c5e195d40802d42896a84
hdptm_170617202450_6333_state_19500.npz e4613cb454b29ac5e3e5f419079e9e0c0c38295adba129f361f749a948269482
hdptm_170617202450_6333_state_19510.npz 6785bc7506629b87f271d9022eb648033ea61a63b13c42c12f7a11c1bb1f1526
hdptm_170617202450_6333_state_19520.npz 26439ee52c214ec5283a882c4e976d4cbd20d4321a1d6ab57cb4015e7a3a302e
hdptm_170617202450_6333_state_19530.npz 7da0b15f82c93c78a0bded179ca73fab61521d2be5abede01af0ac1b632a370d
hdptm_170617202450_6333_state_19540.npz 1e2d38e7228ec08aec617e4e94eebbec740ade10c7bca43513aa72f04137e1e1
hdptm_170617202450_6333_state_19550.npz 84745e32828efdb8642ef3242a7405304a19d0e99d407f67f20a0a20fa9844ae
hdptm_170617202450_6333_state_19560.npz 6783b0d5ec6a61fc85c49ec95fed53d2e7f1cd8116a6b35e473b218a2e66f83d
hdptm_170617202450_6333_state_19570.npz 3e0afadedf9fe5f0ddb26ec1e147e103cda15fa7c53d7894b4b43006c3368124
hdptm_170617202450_6333_state_19580.npz cdac508bc665928a82d5fbefdaaa1fe40b14b8e6e20f48f3bd5f617f01906807
hdptm_170617202450_6333_state_19590.npz 63178a70c938803757bf3701b9f9fd38f85c42a9a56eb3005dfee4605dedb5ff
hdptm_170617202450_6333_state_19600.npz f3c16b084267c09b54b840012051ad71fc9a152392524bac18da266aeddbdf08
hdptm_170617202450_6333_state_19610.npz f53ce93e6b5ca21d095cdfb485ef1c0a93b8723aed5b3f1d47912f046ea18f50
hdptm_170617202450_6333_state_19620.npz 9b707d7902cd3fc323d6a3bc5794e6328c7c9ab8fc4a1803ab46237598f774a6
hdptm_170617202450_6333_state_19630.npz f788652fc7daad24f8f16a9e33527e9f2282a34c61c49abf297bfd1c1435bc41
hdptm_170617202450_6333_state_19640.npz 0f1353c671f5e5c7c9748318e92b1ef1d50efce57c423f6800ef51b4cd54d474
hdptm_170617202450_6333_state_19650.npz 2f4bfdd72fd02c7c17f2e6f05fcb4922d32e252eacd45fbfdcde2b208e9b226c
hdptm_170617202450_6333_state_19660.npz 887731ab2a189453936610624ea4072ea201de020ad7d0820ac9e50564a4b320
hdptm_170617202450_6333_state_19670.npz 9392437d1a4c024f4f16eee1ed638f7e495eac2743887c0879094ff3cb347927
hdptm_170617202450_6333_state_19680.npz 043a10ae74c4e49f7a9c1e50dae9bf449b7a07a8af8ecda2a2ad32cbe6f359d0
hdptm_170617202450_6333_state_19690.npz 2ee4badeef50880921b8ebb91242ac1decaef17d80d3417aae6d9d9d8059714b
hdptm_170617202450_6333_state_19700.npz 79ad9e6d38f113237870d3a8052044b1ffccf4365d0f3151d1e4c1c29457edff
hdptm_170617202450_6333_state_19710.npz fe033e037a350b1f80afdb94ba08dd90b0b529a36caae0094f73a75cd85c3359
hdptm_170617202450_6333_state_19720.npz 73d77fba3122a661446f5db0a64c77ee960e292ed18ff87ed42ff202e1093a45
hdptm_170617202450_6333_state_19730.npz c4debd707536613aa1430177fb1c03f9b23d1d5a881897c556ea9cfd493c7720
hdptm_170617202450_6333_state_19740.npz a9b47af3f6fe82c51f85472f69e67de99bf7c5cce88fb793f8a140bf1905d835
hdptm_170617202450_6333_state_19750.npz 4ef5fb2a9aae7336f3ca4f51227c725d5d5f16f97916b0f3e17540f27669813a
hdptm_170617202450_6333_state_19760.npz 6571c32082aa5c009d0223b3f8f843980b48e550cb267b51c531af2479d5804c
hdptm_170617202450_6333_state_19770.npz d8ddb87feef83d76a0121fa5c050e7462e74ec003aa1dd3ded4f8a33ec4578b7
hdptm_170617202450_6333_state_19780.npz e0ed56a8f39b623dfbe8aae83e63ea61c62cdef1d74867475dc4fb87debeede2
hdptm_170617202450_6333_state_19790.npz 49c676838b6d69bff4c5697b1ee4268c914fac6ff54a37421bc8c35459a9f419
hdptm_170617202450_6333_state_19800.npz ca044e99eee9ccdc5f606885d8d87c3688d9c0719ebefa47d60c9d814404faf3
hdptm_170617202450_6333_state_19810.npz 038de2608ffdeeb4a4dccc9c71e0d674616f36cf056a95c0a95ee74afa5b6535
hdptm_170617202450_6333_state_19820.npz 72a87a8749d9f82f3ed25ea5df0c8f96efac5c94fc7de8cc07c6ee3d57ef2c39
hdptm_170617202450_6333_state_19830.npz aa52113c9e68d80a02f717739240cca50af6734cd23b92e481ba8a8493a6d26c
hdptm_170617202450_6333_state_19840.npz cae47400e63db6748fcd7b87055623fbc33a4c831a17d9e2ab939dabdee8ade0
hdptm_170617202450_6333_state_19850.npz 245994e5a33e7ab2ef33f0f29d91d12289055f9103f30e29f14b7a95152c4f33
hdptm_170617202450_6333_state_19860.npz d76be92b7e1ea383072743bb57d26c16a16495268ced639acc88c8c9ab682c3a
hdptm_170617202450_6333_state_19870.npz 6dd791cd116efb0c13007f81e5341f15d8e636594b9da256fdb3853f104b9f36
hdptm_170617202450_6333_state_19880.npz c4bbcada19fd5dedc602a4aef92b0def31f1b8c0badda5fee8e6147e443089b6
hdptm_170617202450_6333_state_19890.npz 5f55097c1dab7c441df6d964ca4eb33c13dd50c679306bdb7b6267e324d3a8bb
hdptm_170617202450_6333_state_19900.npz 447ebb530652ee64be1c404dac2b486e9ae9e600cc18a1f58a8cb2a4775a7d75
hdptm_170617202450_6333_state_19910.npz 66d2444d50c556445a90a8a3c44d6f5802296ff3c7467080b9cc6ea65cc7b356
hdptm_170617202450_6333_state_19920.npz 65211ccdb9090dd1714ffd6de5bf7b35dc565adb7a10000722bc2f8ce8d5b845
hdptm_170617202450_6333_state_19930.npz 6df3476774eb256b082b5aeda5e879f780d7a8bd20df936337283da172f51f65
hdptm_170617202450_6333_state_19940.npz 4fae17fd61bb16c83cc3ac647aa7903a8d41a5786dafef43e6cf0423fdabb2c7
hdptm_170617202450_6333_state_19950.npz 44d7a8608e071d01303ed9f060f3c51fd66d4b865d316e020771237929352452
hdptm_170617202450_6333_state_19960.npz f0363ffe99a18209de5e036cc9912a81171c1e451defa412738fed097a5d6e3d
hdptm_170617202450_6333_state_19970.npz 6226b1a68d7d51d0e903aa16b0bef0b1759ee3f8edb7faa1b148255ade2340ee
hdptm_170617202450_6333_state_19980.npz d55545cc6d2464b1a8f9ef740420d93288100872b2242361d841199d3d9054a5
hdptm_170617202450_6333_state_19990.npz 21dd9a492a6a9911553bcd6caf1ea4954aa28fbbb61293747dfe9286ab99050a
hdptm_170617202450_6333_state_20000.npz 4b593d3027d2d76a6186de244b459ceab0d74dacf11bf1db913fbadb90ed110c
'''.strip().splitlines()
hdptm_201117172636_2290_state_checksums = '''
hdptm_201117172636_2290_state_12000.npz 11437bf9bcbeb120200d233822e67f87630780923094ee37c3ad0ba35bf511da
hdptm_201117172636_2290_state_12010.npz d3559444ff4e9098d6ea6216ce2afd2665bd19dd8963825189cb45d9e6c1d64b
hdptm_201117172636_2290_state_12020.npz c1d2e458ee366150869c8be18c897c4c60d96227adf0228b6297bd9df6220e7f
hdptm_201117172636_2290_state_12030.npz fa447583f242ffe45bc441cee34dc9ecb78da33b18a480d98026e70e262981ee
hdptm_201117172636_2290_state_12040.npz 4ee93667dd3662032d00263628c37980d02c6682a0ba75050e2f96a1fb54e612
hdptm_201117172636_2290_state_12050.npz d6fa8eb2340534f90710c81d32b385be158d8b20d96923eaac1fe0f7e8ce958e
hdptm_201117172636_2290_state_12060.npz 52c02cce5f6dead3c57c1bbf849caaa3c25e879905ce713cf0a1adc025182213
hdptm_201117172636_2290_state_12070.npz aac843fc16c7af66aadbb38e589842d37c28369910bad13604615fc4d7ea2c8d
hdptm_201117172636_2290_state_12080.npz 96214e1648a7480c7c635950c978e0109934c5b6390260476f1f02c47bf00249
hdptm_201117172636_2290_state_12090.npz 1814613561f524275a6991f7157c7d17b470d4c6df5801ebd4bb910990f90f17
hdptm_201117172636_2290_state_12100.npz 1fdf6bba764eb3650a530a3b3f8f4378a602463c48edaa741e811ec9f4b40547
hdptm_201117172636_2290_state_12110.npz 55e80b139763c636b9b1df998095867334e1328a35c826fbf19b3cedef921da3
hdptm_201117172636_2290_state_12120.npz 18aac3b341b47d6c0cbf4792ba3eea24189c84ae76bd659e3fa507fc6925a536
hdptm_201117172636_2290_state_12130.npz 29464f260ebd0ee01f0024a3659b958daeabe2ac5971e561e878c6532b3a2713
hdptm_201117172636_2290_state_12140.npz 4a251c321d18b161c9fe159472ccccbe93915a3fdeabd16f3ba72487336bd43a
hdptm_201117172636_2290_state_12150.npz f40711eec89e8074e8446fc504b0b454c0d425dc63add807d541e667e16d1af9
hdptm_201117172636_2290_state_12160.npz 29cb8eee77f9a74e07d3c5d8a2a3d9f074ab70a9564805814531431dc71704f2
hdptm_201117172636_2290_state_12170.npz 45c42a89ea2778aaab542e4125254a84c77d6bd21e0cc521df02c0ee771bd5f2
hdptm_201117172636_2290_state_12180.npz 9cf7d16ffefb92ecd9e5ce39aaac22ff2b71d3877a026c0a0aafcfed3c3a77c3
hdptm_201117172636_2290_state_12190.npz 76ce02b3fdd6883a580edbccbb3604be5ddf65e8fcfd212dc2912229f9800a99
hdptm_201117172636_2290_state_12200.npz d74550abc232d06d1f0bf599875ff666373acb9430c03f0c1cf04611965ed638
hdptm_201117172636_2290_state_12210.npz 62c61f020b996d3ee0e94ceb4be90861d4d75733884cbc16f65d99f2d9a4f671
hdptm_201117172636_2290_state_12220.npz 11fbfb7e29f27d5e14315cfc81082b9c500df2ffbf0a75f36ace77fc612576e4
hdptm_201117172636_2290_state_12230.npz b1fe6e9c3b1ca80e3306533b619bd99b511679a1cf7079c70cb88702595cd0a2
hdptm_201117172636_2290_state_12240.npz 4d9875827548820b7fc2b6489b6ff7b510e3a84f57481ce41ce048db7282982d
hdptm_201117172636_2290_state_12250.npz 8a66908d2d38f67a5ec46ba26ca4177231a6fd552b5eb862a5daea99e9d22cb1
hdptm_201117172636_2290_state_12260.npz 242d0cfa8c666b2bb38fafe1d56bca4a6e74457ed2760cd94a14f455e0e7381d
hdptm_201117172636_2290_state_12270.npz c40d2590ace5c83ca62cd7156a13bfee1dee500320e07a769ea504b7af9c0409
hdptm_201117172636_2290_state_12280.npz 435b2240d78cdbaf58674763756a75f7450d3d5877cecb105653b1f70d52431f
hdptm_201117172636_2290_state_12290.npz 8e5947086206b572de6d36a2b15e218d9d0d1da95b033379bb555b0ae8963491
hdptm_201117172636_2290_state_12300.npz e6b046aa17f9165a7cd7195d77e5cfc71f7b8209b3013a15c0ea77e2dc3b3bd9
hdptm_201117172636_2290_state_12310.npz 5441c7b1a20b03309e5b155aa253830b293c94b6c10b55d5535893e503d8f128
'''.strip().splitlines()
| 73.834821
| 104
| 0.912026
| 1,245
| 16,539
| 11.675502
| 0.345382
| 0.11929
| 0.147358
| 0.182444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.517416
| 0.04704
| 16,539
| 223
| 105
| 74.165919
| 0.40486
| 0.136405
| 0
| 0.014599
| 0
| 0
| 0.984285
| 0.965398
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
da8e99c09aabca3db1bc0e1af11da10e940286d6
| 2,549
|
py
|
Python
|
tests/fixtures/dict_list/docket_list_with_homicide.py
|
SimmonsRitchie/court_docket_scraper
|
f467d59c4ea8dbddb4fd7545dc36656a4b30e46d
|
[
"MIT"
] | 1
|
2021-10-29T20:12:44.000Z
|
2021-10-29T20:12:44.000Z
|
tests/fixtures/dict_list/docket_list_with_homicide.py
|
SimmonsRitchie/court_docket_scraper
|
f467d59c4ea8dbddb4fd7545dc36656a4b30e46d
|
[
"MIT"
] | 2
|
2019-07-19T20:13:16.000Z
|
2019-07-19T20:13:16.000Z
|
tests/fixtures/dict_list/docket_list_with_homicide.py
|
SimmonsRitchie/court_docket_scraper
|
f467d59c4ea8dbddb4fd7545dc36656a4b30e46d
|
[
"MIT"
] | null | null | null |
docket_list = [
{
"county": "Dauphin",
"docketnum": 1,
"case_caption": "Commonwealth V. Smith, John A.",
"arresting_agency": "Harrisburg PD",
"municipality": "Harrisburg",
"defendant": "John A. Smith",
"defendant_race": "white",
"defendant_gender": "Male",
"dob": "01/01/1986",
"filing_date": "03/03/2019",
"charges": "Receiving Stolen Property; Driving W/O A License",
"bail": 25000,
"url": "https://ujsportal.pacourts.us/DocketSheets/MDJReport.ashx?docketNumber=MJ-12302-CR-0000110-2019&dnh=zj8BkxXzkOi23xMzscQ6hw%3d%3d",
},
{
"county": "Dauphin",
"docketnum": 2,
"case_caption": "Commonwealth V. Smith, Duke A.",
"arresting_agency": "Harrisburg PD",
"municipality": "Harrisburg",
"defendant": "Duke A. Smith",
"defendant_race": "white",
"defendant_gender": "Male",
"dob": "01/01/1986",
"filing_date": "03/03/2019",
"charges": "Receiving Stolen Property; Driving W/O A License",
"bail": 25000,
"url": "https://ujsportal.pacourts.us/DocketSheets/MDJReport.ashx?docketNumber=MJ-12302-CR-0000110-2019&dnh=zj8BkxXzkOi23xMzscQ6hw%3d%3d",
},
{
"county": "Dauphin",
"docketnum": 3,
"case_caption": "Commonwealth V. Smith, John A.",
"arresting_agency": "Harrisburg PD",
"municipality": "Harrisburg",
"defendant": "John A. Smith",
"defendant_race": "white",
"defendant_gender": "Male",
"dob": "01/01/1986",
"filing_date": "03/03/2019",
"charges": "Receiving Stolen Property; homicide; Driving W/O A "
"License",
"bail": 25000,
"url": "https://ujsportal.pacourts.us/DocketSheets/MDJReport.ashx?docketNumber=MJ-12302-CR-0000110-2019&dnh=zj8BkxXzkOi23xMzscQ6hw%3d%3d",
},
{
"county": "Dauphin",
"docketnum": 4,
"case_caption": "Commonwealth V. Smith, John A.",
"arresting_agency": "Harrisburg PD",
"municipality": "Harrisburg",
"defendant": "John A. Smith",
"defendant_race": "white",
"defendant_gender": "Male",
"dob": "01/01/1986",
"filing_date": "03/03/2019",
"charges": "Receiving Stolen Property; Driving W/O A License; Murder",
"bail": 25000,
"url": "https://ujsportal.pacourts.us/DocketSheets/MDJReport.ashx?docketNumber=MJ-12302-CR-0000110-2019&dnh=zj8BkxXzkOi23xMzscQ6hw%3d%3d",
}
]
| 39.828125
| 146
| 0.582581
| 264
| 2,549
| 5.545455
| 0.234848
| 0.020492
| 0.060109
| 0.065574
| 0.960383
| 0.940574
| 0.940574
| 0.940574
| 0.900273
| 0.900273
| 0
| 0.092098
| 0.250294
| 2,549
| 63
| 147
| 40.460317
| 0.673993
| 0
| 0
| 0.698413
| 0
| 0.063492
| 0.634759
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
16fee0714125b907c565a7460bda1a63c75c9808
| 3,682
|
py
|
Python
|
boundlexx/boundless/migrations/0002_create_item_timeseries.py
|
AngellusMortis/boundlexx
|
407f5e38e8e0f067cbcb358787fc9af6a9be9b2a
|
[
"MIT"
] | 1
|
2021-04-23T11:49:50.000Z
|
2021-04-23T11:49:50.000Z
|
boundlexx/boundless/migrations/0002_create_item_timeseries.py
|
AngellusMortis/boundlexx
|
407f5e38e8e0f067cbcb358787fc9af6a9be9b2a
|
[
"MIT"
] | 1
|
2021-04-17T18:17:12.000Z
|
2021-04-17T18:17:12.000Z
|
boundlexx/boundless/migrations/0002_create_item_timeseries.py
|
AngellusMortis/boundlexx
|
407f5e38e8e0f067cbcb358787fc9af6a9be9b2a
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.8 on 2020-07-21 17:44
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('boundless', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='ItemShopStandPrice',
fields=[
('time', models.DateTimeField(auto_now=True, primary_key=True, serialize=False)),
('location_x', models.IntegerField()),
('location_y', models.IntegerField()),
('location_z', models.IntegerField()),
('price', models.DecimalField(decimal_places=2, max_digits=10)),
('item_count', models.IntegerField()),
('beacon_name', models.CharField(db_index=True, max_length=64)),
('guild_tag', models.CharField(max_length=8)),
('shop_activity', models.IntegerField()),
('active', models.BooleanField(db_index=True, default=True)),
('world', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='boundless.World')),
('item', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='boundless.Item')),
],
options={
'abstract': False,
'unique_together': {('time', 'world', 'location_x', 'location_y', 'item', 'price', 'item_count')},
},
),
migrations.CreateModel(
name='ItemRequestBasketPrice',
fields=[
('time', models.DateTimeField(auto_now=True, primary_key=True, serialize=False)),
('location_x', models.IntegerField()),
('location_y', models.IntegerField()),
('location_z', models.IntegerField()),
('price', models.DecimalField(decimal_places=2, max_digits=10)),
('item_count', models.IntegerField()),
('beacon_name', models.CharField(db_index=True, max_length=64)),
('guild_tag', models.CharField(max_length=8)),
('shop_activity', models.IntegerField()),
('active', models.BooleanField(db_index=True, default=True)),
('world', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='boundless.World')),
('item', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='boundless.Item')),
],
options={
'abstract': False,
'unique_together': {('time', 'world', 'location_x', 'location_y', 'item', 'price', 'item_count')},
},
),
migrations.RunSQL(
"CREATE EXTENSION IF NOT EXISTS timescaledb CASCADE", reverse_sql=migrations.RunSQL.noop
),
migrations.RunSQL(
'ALTER TABLE "boundless_itemshopstandprice" DROP CONSTRAINT "boundless_itemshopstandprice_pkey"', reverse_sql=migrations.RunSQL.noop
),
migrations.RunSQL(
"SELECT create_hypertable('boundless_itemshopstandprice', 'time', chunk_time_interval => 86400000000, migrate_data => true, create_default_indexes => false)", reverse_sql=migrations.RunSQL.noop
),
migrations.RunSQL(
'ALTER TABLE "boundless_itemrequestbasketprice" DROP CONSTRAINT "boundless_itemrequestbasketprice_pkey"', reverse_sql=migrations.RunSQL.noop
),
migrations.RunSQL(
"SELECT create_hypertable('boundless_itemrequestbasketprice', 'time', chunk_time_interval => 86400000000, migrate_data => true, create_default_indexes => false)", reverse_sql=migrations.RunSQL.noop
),
]
| 51.138889
| 209
| 0.605921
| 348
| 3,682
| 6.215517
| 0.281609
| 0.083218
| 0.032362
| 0.050855
| 0.784096
| 0.784096
| 0.784096
| 0.784096
| 0.784096
| 0.784096
| 0
| 0.0194
| 0.258012
| 3,682
| 71
| 210
| 51.859155
| 0.772328
| 0.012222
| 0
| 0.738462
| 1
| 0
| 0.27923
| 0.084182
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.030769
| 0
| 0.076923
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e541c66e26b2dcd462e9c9a22b50ce0b746cca85
| 7,028
|
py
|
Python
|
process_azure_roles.py
|
noamsdahan/iam-dataset
|
da640ba65906f3f6091c6cbfdfdc0ca03df83f8f
|
[
"MIT"
] | 58
|
2021-06-23T07:12:19.000Z
|
2022-03-26T14:55:00.000Z
|
process_azure_roles.py
|
noamsdahan/iam-dataset
|
da640ba65906f3f6091c6cbfdfdc0ca03df83f8f
|
[
"MIT"
] | 8
|
2021-11-01T15:41:19.000Z
|
2022-02-08T08:04:05.000Z
|
process_azure_roles.py
|
noamsdahan/iam-dataset
|
da640ba65906f3f6091c6cbfdfdc0ca03df83f8f
|
[
"MIT"
] | 4
|
2021-07-31T03:13:12.000Z
|
2022-03-22T08:28:08.000Z
|
import os
import json
import time
import requests
import re
result = {
'roles': []
}
raw_roles = []
with open("azure/built-in-roles-raw.json", "r") as f:
raw_roles = json.loads(f.read())
provider_ops = []
with open("azure/provider-operations.json", "r") as f:
provider_ops = json.loads(f.read())
for raw_role in raw_roles:
if raw_role['roleType'] != "BuiltInRole":
continue
permitted_actions = []
permitted_data_actions = []
has_unknown = False
has_external = False
for permission in raw_role['permissions']:
for action in permission['actions']:
matched = False
matchexpression = "^" + action.replace(".", "\\.").replace("*", ".*").replace("?", ".{{1}}") + "$"
for provider in provider_ops:
for operation in provider['operations']:
if not operation['isDataAction'] and re.search(matchexpression.lower(), operation['name'].lower()):
permitted_actions.append({
'name': operation['name'],
'description': operation['description'],
'displayName': operation['displayName'],
'providerName': provider['name'],
'providerDisplayName': provider['displayName']
})
matched = True
for resource_type in provider['resourceTypes']:
for operation in resource_type['operations']:
if not operation['isDataAction'] and re.search(matchexpression.lower(), operation['name'].lower()):
permitted_actions.append({
'name': operation['name'],
'description': operation['description'],
'displayName': operation['displayName'],
'providerName': provider['name'],
'providerDisplayName': provider['displayName']
})
matched = True
if not action.lower().startswith("microsoft."):
has_external = True
if not matched:
has_unknown = True
for permission in raw_role['permissions']:
for action in permission['dataActions']:
matched = False
matchexpression = "^" + action.replace(".", "\\.").replace("*", ".*").replace("?", ".{{1}}") + "$"
for provider in provider_ops:
for operation in provider['operations']:
if operation['isDataAction'] and re.search(matchexpression.lower(), operation['name'].lower()):
permitted_data_actions.append({
'name': operation['name'],
'description': operation['description'],
'displayName': operation['displayName'],
'providerName': provider['name'],
'providerDisplayName': provider['displayName']
})
matched = True
for resource_type in provider['resourceTypes']:
for operation in resource_type['operations']:
if operation['isDataAction'] and re.search(matchexpression.lower(), operation['name'].lower()):
permitted_data_actions.append({
'name': operation['name'],
'description': operation['description'],
'displayName': operation['displayName'],
'providerName': provider['name'],
'providerDisplayName': provider['displayName']
})
matched = True
if not action.lower().startswith("microsoft."):
has_external = True
if not matched:
has_unknown = True
for permission in raw_role['permissions']:
for action in permission['notActions']:
matched = False
matchexpression = "^" + action.replace(".", "\\.").replace("*", ".*").replace("?", ".{{1}}") + "$"
for provider in provider_ops:
for operation in provider['operations']:
if not operation['isDataAction'] and re.search(matchexpression.lower(), operation['name'].lower()):
permitted_actions = list(filter(lambda x: x['name'].lower() != operation['name'].lower(), permitted_actions))
matched = True
for resource_type in provider['resourceTypes']:
for operation in resource_type['operations']:
if not operation['isDataAction'] and re.search(matchexpression.lower(), operation['name'].lower()):
permitted_actions = list(filter(lambda x: x['name'].lower() != operation['name'].lower(), permitted_actions))
matched = True
if not action.lower().startswith("microsoft."):
has_external = True
if not matched:
has_unknown = True
for permission in raw_role['permissions']:
for action in permission['notDataActions']:
matched = False
matchexpression = "^" + action.replace(".", "\\.").replace("*", ".*").replace("?", ".{{1}}") + "$"
for provider in provider_ops:
for operation in provider['operations']:
if operation['isDataAction'] and re.search(matchexpression.lower(), operation['name'].lower()):
permitted_data_actions = list(filter(lambda x: x['name'].lower() != operation['name'].lower(), permitted_data_actions))
matched = True
for resource_type in provider['resourceTypes']:
for operation in resource_type['operations']:
if operation['isDataAction'] and re.search(matchexpression.lower(), operation['name'].lower()):
permitted_data_actions = list(filter(lambda x: x['name'].lower() != operation['name'].lower(), permitted_data_actions))
matched = True
if not action.lower().startswith("microsoft."):
has_external = True
if not matched:
has_unknown = True
result['roles'].append({
'name': raw_role['roleName'],
'description': raw_role['description'],
'permittedActions': permitted_actions,
'permittedDataActions': permitted_data_actions,
'rawPermissions': raw_role['permissions'],
'hasUnknown': has_unknown,
'hasExternal': has_external
})
with open("azure/built-in-roles.json", "w") as f:
f.write(json.dumps(result, indent=2, sort_keys=True))
| 50.2
| 147
| 0.517075
| 585
| 7,028
| 6.104274
| 0.136752
| 0.058247
| 0.060487
| 0.077289
| 0.8317
| 0.8317
| 0.817698
| 0.817698
| 0.817698
| 0.817698
| 0
| 0.0011
| 0.353159
| 7,028
| 139
| 148
| 50.561151
| 0.784426
| 0
| 0
| 0.726563
| 0
| 0
| 0.157371
| 0.011952
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.039063
| 0
| 0.039063
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e56946e13d2d2d1c51e541739f896030848cdd8a
| 76,894
|
py
|
Python
|
utils/utils_df_nn.py
|
Lifelong-ML/LASEM
|
c4ec052c850e37f54bc3e6faf6b988a4c5239f10
|
[
"MIT"
] | 8
|
2021-07-06T14:35:50.000Z
|
2022-03-03T08:45:13.000Z
|
utils/utils_df_nn.py
|
Lifelong-ML/LASEM
|
c4ec052c850e37f54bc3e6faf6b988a4c5239f10
|
[
"MIT"
] | null | null | null |
utils/utils_df_nn.py
|
Lifelong-ML/LASEM
|
c4ec052c850e37f54bc3e6faf6b988a4c5239f10
|
[
"MIT"
] | 1
|
2021-07-09T09:26:11.000Z
|
2021-07-09T09:26:11.000Z
|
import numpy as np
import tensorflow as tf
from utils.utils import *
from utils.utils_nn import *
###########################################################
##### functions to generate parameter #####
###########################################################
#### function to generate knowledge-base parameters for ELLA_tensorfactor layer
def new_ELLA_KB_param(shape, layer_number, task_number, reg_type, init_tensor=None, trainable=True):
#kb_name = 'KB_'+str(layer_number)+'_'+str(task_number)
kb_name = 'KB_'+str(layer_number)
if init_tensor is None:
param_to_return = tf.get_variable(name=kb_name, shape=shape, dtype=tf.float32, regularizer=reg_type, trainable=trainable)
elif type(init_tensor) == np.ndarray:
param_to_return = tf.get_variable(name=kb_name, shape=shape, dtype=tf.float32, regularizer=reg_type, initializer=tf.constant_initializer(init_tensor), trainable=trainable)
else:
param_to_return = init_tensor
return param_to_return
#### function to generate task-specific parameters for ELLA_tensorfactor layer
def new_ELLA_cnn_deconv_TS_param(shape, layer_number, task_number, reg_type):
ts_w_name, ts_b_name, ts_p_name = 'TS_DeconvW0_'+str(layer_number)+'_'+str(task_number), 'TS_Deconvb0_'+str(layer_number)+'_'+str(task_number), 'TS_Convb0_'+str(layer_number)+'_'+str(task_number)
return [tf.get_variable(name=ts_w_name, shape=shape[0], dtype=tf.float32, regularizer=reg_type), tf.get_variable(name=ts_b_name, shape=shape[1], dtype=tf.float32, regularizer=reg_type), tf.get_variable(name=ts_p_name, shape=shape[2], dtype=tf.float32, regularizer=reg_type)]
#### function to generate task-specific parameters for ELLA_tensorfactor layer
def new_ELLA_cnn_deconv_tensordot_TS_param(shape, layer_number, task_number, reg_type, init_tensor, trainable):
ts_w_name, ts_b_name, ts_k_name, ts_p_name = 'TS_DeconvW0_'+str(layer_number)+'_'+str(task_number), 'TS_Deconvb0_'+str(layer_number)+'_'+str(task_number), 'TS_ConvW1_'+str(layer_number)+'_'+str(task_number), 'TS_Convb0_'+str(layer_number)+'_'+str(task_number)
params_to_return, params_name = [], [ts_w_name, ts_b_name, ts_k_name, ts_p_name]
for i, (t, n) in enumerate(zip(init_tensor, params_name)):
if t is None:
params_to_return.append(tf.get_variable(name=n, shape=shape[i], dtype=tf.float32, regularizer=reg_type if trainable and i<3 else None, trainable=trainable))
elif type(t) == np.ndarray:
params_to_return.append(tf.get_variable(name=n, shape=shape[i], dtype=tf.float32, regularizer=reg_type if trainable and i<3 else None, trainable=trainable, initializer=tf.constant_initializer(t)))
else:
params_to_return.append(t)
return params_to_return
#### function to generate task-specific parameters for ELLA_tensorfactor layer
def new_ELLA_cnn_deconv_tensordot_TS_param2(shape, layer_number, task_number, reg_type):
ts_w_name, ts_b_name, ts_k_name, ts_k_name2, ts_p_name = 'TS_DeconvW0_'+str(layer_number)+'_'+str(task_number), 'TS_Deconvb0_'+str(layer_number)+'_'+str(task_number), 'TS_tdot_W1_'+str(layer_number)+'_'+str(task_number), 'TS_tdot_W2_'+str(layer_number)+'_'+str(task_number), 'TS_tdot_b0_'+str(layer_number)+'_'+str(task_number)
return [tf.get_variable(name=ts_w_name, shape=shape[0], dtype=tf.float32, regularizer=reg_type), tf.get_variable(name=ts_b_name, shape=shape[1], dtype=tf.float32, regularizer=reg_type), tf.get_variable(name=ts_k_name, shape=shape[2], dtype=tf.float32, regularizer=reg_type), tf.get_variable(name=ts_k_name2, shape=shape[3], dtype=tf.float32, regularizer=reg_type), tf.get_variable(name=ts_p_name, shape=shape[4], dtype=tf.float32, regularizer=reg_type)]
###############################################################
##### functions for adding ELLA network (CNN/Deconv ver) #####
###############################################################
#### function to generate convolutional layer with shared knowledge base
#### KB_size : [filter_height(and width), num_of_channel]
#### TS_size : deconv_filter_height(and width)
#### TS_stride_size : [stride_in_height, stride_in_width]
def new_ELLA_cnn_deconv_layer(layer_input, k_size, ch_size, stride_size, KB_size, TS_size, TS_stride_size, layer_num, task_num, activation_fn=tf.nn.relu, para_activation_fn=tf.nn.relu, KB_param=None, TS_param=None, KB_reg_type=None, TS_reg_type=None, padding_type='SAME', max_pool=False, pool_size=None, skip_connect_input=None):
assert (k_size[0] == k_size[1] and k_size[0] == (KB_size[0]-1)*TS_stride_size[0]+1), "CNN kernel size does not match the output size of Deconv from KB"
with tf.name_scope('ELLA_cdnn_KB'):
if KB_param is None:
## KB \in R^{1 \times h \times w \times c}
KB_param = new_ELLA_KB_param([1, KB_size[0], KB_size[0], KB_size[1]], layer_num, task_num, KB_reg_type)
if TS_param is None:
## TS1 : Deconv W \in R^{h \times w \times ch_in*ch_out \times c}
## TS2 : Deconv bias \in R^{ch_out}
TS_param = new_ELLA_cnn_deconv_TS_param([[TS_size, TS_size, ch_size[0]*ch_size[1], KB_size[1]], [1, 1, 1, ch_size[0]*ch_size[1]], [ch_size[1]]], layer_num, task_num, TS_reg_type)
with tf.name_scope('ELLA_cdnn_TS'):
para_tmp = tf.add(tf.nn.conv2d_transpose(KB_param, TS_param[0], [1, k_size[0], k_size[1], ch_size[0]*ch_size[1]], strides=[1, TS_stride_size[0], TS_stride_size[1], 1]), TS_param[1])
if para_activation_fn is not None:
para_tmp = para_activation_fn(para_tmp)
W, b = tf.reshape(para_tmp, k_size+ch_size), TS_param[2]
layer_eqn, _ = new_cnn_layer(layer_input, k_size+ch_size, stride_size=stride_size, activation_fn=activation_fn, weight=W, bias=b, padding_type=padding_type, max_pooling=max_pool, pool_size=pool_size, skip_connect_input=skip_connect_input)
return layer_eqn, [KB_param], TS_param, [W, b]
#### function to generate network of convolutional layers with shared knowledge base
def new_ELLA_cnn_deconv_net(net_input, k_sizes, ch_sizes, stride_sizes, KB_sizes, TS_sizes, TS_stride_sizes, activation_fn=tf.nn.relu, para_activation_fn=tf.nn.relu, KB_params=None, TS_params=None, KB_reg_type=None, TS_reg_type=None, padding_type='SAME', max_pool=False, pool_sizes=None, dropout=False, dropout_prob=None, flat_output=False, input_size=[0, 0], task_index=0, skip_connections=[]):
_num_TS_param_per_layer = 3
## first element : make new KB&TS / second element : make new TS / third element : not make new para
control_flag = [(KB_params is None and TS_params is None), (not (KB_params is None) and (TS_params is None)), not (KB_params is None or TS_params is None)]
if control_flag[1]:
TS_params = []
elif control_flag[0]:
KB_params, TS_params = [], []
cnn_gen_params=[]
layers_for_skip, next_skip_connect = [net_input], None
with tf.name_scope('ELLA_cdnn_net'):
layers = []
for layer_cnt in range(len(k_sizes)//2):
next_skip_connect = skip_connections.pop(0) if (len(skip_connections) > 0 and next_skip_connect is None) else next_skip_connect
if next_skip_connect is not None:
skip_connect_in, skip_connect_out = next_skip_connect
assert (skip_connect_in > -1 and skip_connect_out > -1), "Given skip connection has error (try connecting non-existing layer)"
else:
skip_connect_in, skip_connect_out = -1, -1
if layer_cnt == skip_connect_out:
processed_skip_connect_input = layers_for_skip[skip_connect_in]
for layer_cnt_tmp in range(skip_connect_in, skip_connect_out):
if max_pool and (pool_sizes[2*layer_cnt_tmp]>1 or pool_sizes[2*layer_cnt_tmp+1]>1):
processed_skip_connect_input = tf.nn.max_pool(processed_skip_connect_input, ksize=[1]+pool_sizes[2*layer_cnt_tmp:2*(layer_cnt_tmp+1)]+[1], strides=[1]+pool_sizes[2*layer_cnt_tmp:2*(layer_cnt_tmp+1)]+[1], padding=padding_type)
else:
processed_skip_connect_input = None
if layer_cnt == 0 and control_flag[0]:
layer_tmp, KB_para_tmp, TS_para_tmp, cnn_gen_para_tmp = new_ELLA_cnn_deconv_layer(net_input, k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], KB_sizes[2*layer_cnt:2*(layer_cnt+1)], TS_sizes[layer_cnt], TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=activation_fn, para_activation_fn=para_activation_fn, KB_param=None, TS_param=None, KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pool=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], skip_connect_input=processed_skip_connect_input)
elif layer_cnt == 0 and control_flag[1]:
layer_tmp, _, TS_para_tmp, cnn_gen_para_tmp = new_ELLA_cnn_deconv_layer(net_input, k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], KB_sizes[2*layer_cnt:2*(layer_cnt+1)], TS_sizes[layer_cnt], TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=activation_fn, para_activation_fn=para_activation_fn, KB_param=KB_params[layer_cnt], TS_param=None, KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pool=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], skip_connect_input=processed_skip_connect_input)
elif layer_cnt == 0 and control_flag[2]:
layer_tmp, _, _, cnn_gen_para_tmp = new_ELLA_cnn_deconv_layer(net_input, k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], KB_sizes[2*layer_cnt:2*(layer_cnt+1)], TS_sizes[layer_cnt], TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=activation_fn, para_activation_fn=para_activation_fn, KB_param=KB_params[layer_cnt], TS_param=TS_params[_num_TS_param_per_layer*layer_cnt:_num_TS_param_per_layer*(layer_cnt+1)], KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pool=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], skip_connect_input=processed_skip_connect_input)
elif control_flag[0]:
layer_tmp, KB_para_tmp, TS_para_tmp, cnn_gen_para_tmp = new_ELLA_cnn_deconv_layer(layers[layer_cnt-1], k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], KB_sizes[2*layer_cnt:2*(layer_cnt+1)], TS_sizes[layer_cnt], TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=activation_fn, para_activation_fn=para_activation_fn, KB_param=None, TS_param=None, KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pool=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], skip_connect_input=processed_skip_connect_input)
elif control_flag[1]:
layer_tmp, _, TS_para_tmp, cnn_gen_para_tmp = new_ELLA_cnn_deconv_layer(layers[layer_cnt-1], k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], KB_sizes[2*layer_cnt:2*(layer_cnt+1)], TS_sizes[layer_cnt], TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=activation_fn, para_activation_fn=para_activation_fn, KB_param=KB_params[layer_cnt], TS_param=None, KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pool=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], skip_connect_input=processed_skip_connect_input)
elif control_flag[2]:
layer_tmp, _, _, cnn_gen_para_tmp = new_ELLA_cnn_deconv_layer(layers[layer_cnt-1], k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], KB_sizes[2*layer_cnt:2*(layer_cnt+1)], TS_sizes[layer_cnt], TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=activation_fn, para_activation_fn=para_activation_fn, KB_param=KB_params[layer_cnt], TS_param=TS_params[_num_TS_param_per_layer*layer_cnt:_num_TS_param_per_layer*(layer_cnt+1)], KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pool=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], skip_connect_input=processed_skip_connect_input)
layers.append(layer_tmp)
layers_for_skip.append(layer_tmp)
cnn_gen_params = cnn_gen_params + cnn_gen_para_tmp
if control_flag[1]:
TS_params = TS_params + TS_para_tmp
elif control_flag[0]:
KB_params = KB_params + KB_para_tmp
TS_params = TS_params + TS_para_tmp
if layer_cnt == skip_connect_out:
next_skip_connect = None
#### flattening output
if flat_output:
output_dim = [int(layers[-1].shape[1]*layers[-1].shape[2]*layers[-1].shape[3])]
layers.append(tf.reshape(layers[-1], [-1, output_dim[0]]))
else:
output_dim = layers[-1].shape[1:]
#### add dropout layer
if dropout:
layers.append(tf.nn.dropout(layers[-1], dropout_prob))
return (layers, KB_params, TS_params, cnn_gen_params, output_dim)
#### function to generate network of cnn->ffnn
def new_ELLA_cnn_deconv_fc_net(net_input, k_sizes, ch_sizes, stride_sizes, fc_sizes, cnn_KB_sizes, cnn_TS_sizes, cnn_TS_stride_sizes, cnn_activation_fn=tf.nn.relu, cnn_para_activation_fn=tf.nn.relu, cnn_KB_params=None, cnn_TS_params=None, fc_activation_fn=tf.nn.relu, fc_params=None, KB_reg_type=None, TS_reg_type=None, padding_type='SAME', max_pool=False, pool_sizes=None, dropout=False, dropout_prob=None, input_size=[0, 0], output_type=None, task_index=0, skip_connections=[]):
## add CNN layers
cnn_model, cnn_KB_params, cnn_TS_params, cnn_gen_params, cnn_output_dim = new_ELLA_cnn_deconv_net(net_input, k_sizes, ch_sizes, stride_sizes, cnn_KB_sizes, cnn_TS_sizes, cnn_TS_stride_sizes, activation_fn=cnn_activation_fn, para_activation_fn=cnn_para_activation_fn, KB_params=cnn_KB_params, TS_params=cnn_TS_params, KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pool=max_pool, pool_sizes=pool_sizes, dropout=dropout, dropout_prob=dropout_prob, flat_output=True, input_size=input_size, task_index=task_index, skip_connections=skip_connections)
## add fc layers
#fc_model, fc_params = new_fc_net(cnn_model[-1], [cnn_output_dim[0]]+fc_sizes, activation_fn=fc_activation_fn, params=fc_params, output_type=output_type, tensorboard_name_scope='fc_net')
fc_model, fc_params = new_fc_net(cnn_model[-1], fc_sizes, activation_fn=fc_activation_fn, params=fc_params, output_type=output_type, tensorboard_name_scope='fc_net')
return (cnn_model+fc_model, cnn_KB_params, cnn_TS_params, cnn_gen_params, fc_params)
###########################################################################
##### functions for adding ELLA network (CNN/Deconv & Tensordot ver) #####
###########################################################################
#### KB_size : [filter_height(and width), num_of_channel]
#### TS_size : [deconv_filter_height(and width), deconv_filter_channel]
#### TS_stride_size : [stride_in_height, stride_in_width]
def new_ELLA_cnn_deconv_tensordot_layer(layer_input, k_size, ch_size, stride_size, KB_size, TS_size, TS_stride_size, layer_num, task_num, activation_fn=tf.nn.relu, para_activation_fn=tf.nn.relu, KB_param=None, TS_param=None, KB_reg_type=None, TS_reg_type=None, padding_type='SAME', max_pool=False, pool_size=None, skip_connect_input=None, highway_connect_type=0, highway_W=None, highway_b=None, trainable=True, trainable_KB=True):
assert (k_size[0] == k_size[1] and k_size[0] == (KB_size[0]-1)*TS_stride_size[0]+1), "CNN kernel size does not match the output size of Deconv from KB"
with tf.name_scope('ELLA_cdnn_KB'):
## KB \in R^{1 \times h \times w \times c}
KB_param = new_ELLA_KB_param([1, KB_size[0], KB_size[0], KB_size[1]], layer_num, task_num, KB_reg_type, KB_param, trainable=trainable_KB)
## TS1 : Deconv W \in R^{h \times w \times kb_c_out \times c}
## TS2 : Deconv bias \in R^{kb_c_out}
## TS3 : tensor W \in R^{kb_c_out \times ch_in \times ch_out}
## TS4 : Conv bias \in R^{ch_out}
TS_param = new_ELLA_cnn_deconv_tensordot_TS_param([[TS_size[0], TS_size[0], TS_size[1], KB_size[1]], [1, 1, 1, TS_size[1]], [TS_size[1], ch_size[0], ch_size[1]], [ch_size[1]]], layer_num, task_num, TS_reg_type, [None, None, None, None] if TS_param is None else TS_param, trainable=trainable)
with tf.name_scope('DFCNN_param_gen'):
para_tmp = tf.add(tf.nn.conv2d_transpose(KB_param, TS_param[0], [1, k_size[0], k_size[1], TS_size[1]], strides=[1, TS_stride_size[0], TS_stride_size[1], 1]), TS_param[1])
para_tmp = tf.reshape(para_tmp, [k_size[0], k_size[1], TS_size[1]])
if para_activation_fn is not None:
para_tmp = para_activation_fn(para_tmp)
W = tf.tensordot(para_tmp, TS_param[2], [[2], [0]])
b = TS_param[3]
## HighwayNet's skip connection
highway_params, gate = [], None
if highway_connect_type > 0:
with tf.name_scope('highway_connection'):
if highway_connect_type == 1:
x = layer_input
if highway_W is None:
highway_W = new_weight([k_size[0], k_size[1], ch_size[0], ch_size[1]])
if highway_b is None:
highway_b = new_bias([ch_size[1]], init_val=-2.0)
gate, _ = new_cnn_layer(x, k_size+ch_size, stride_size=stride_size, activation_fn=None, weight=highway_W, bias=highway_b, padding_type=padding_type, max_pooling=False)
elif highway_connect_type == 2:
x = tf.reshape(layer_input, [-1, int(layer_input.shape[1]*layer_input.shape[2]*layer_input.shape[3])])
if highway_W is None:
highway_W = new_weight([int(x.shape[1]), 1])
if highway_b is None:
highway_b = new_bias([1], init_val=-2.0)
gate = tf.broadcast_to(tf.stack([tf.stack([tf.matmul(x, highway_W) + highway_b], axis=2)], axis=3), layer_input.get_shape())
gate = tf.nn.sigmoid(gate)
highway_params = [highway_W, highway_b]
layer_eqn, _ = new_cnn_layer(layer_input, k_size+ch_size, stride_size=stride_size, activation_fn=activation_fn, weight=W, bias=b, padding_type=padding_type, max_pooling=max_pool, pool_size=pool_size, skip_connect_input=skip_connect_input, highway_connect_type=highway_connect_type, highway_gate=gate)
return layer_eqn, [KB_param], TS_param, [W, b], highway_params
#### function to generate network of convolutional layers with shared knowledge base
def new_ELLA_cnn_deconv_tensordot_net(net_input, k_sizes, ch_sizes, stride_sizes, KB_sizes, TS_sizes, TS_stride_sizes, activation_fn=tf.nn.relu, para_activation_fn=tf.nn.relu, KB_params=None, TS_params=None, KB_reg_type=None, TS_reg_type=None, padding_type='SAME', max_pool=False, pool_sizes=None, dropout=False, dropout_prob=None, flat_output=False, input_size=[0, 0], task_index=0, skip_connections=[]):
_num_TS_param_per_layer = 4
## first element : make new KB&TS / second element : make new TS / third element : not make new para / fourth element : make new KB
control_flag = [(KB_params is None and TS_params is None), (not (KB_params is None) and (TS_params is None)), not (KB_params is None or TS_params is None), ((KB_params is None) and not (TS_params is None))]
if control_flag[1]:
TS_params = []
elif control_flag[3]:
KB_params = []
elif control_flag[0]:
KB_params, TS_params = [], []
cnn_gen_params = []
layers_for_skip, next_skip_connect = [net_input], None
with tf.name_scope('ELLA_cdnn_net'):
layers = []
for layer_cnt in range(len(k_sizes)//2):
next_skip_connect = skip_connections.pop(0) if (len(skip_connections) > 0 and next_skip_connect is None) else next_skip_connect
if next_skip_connect is not None:
skip_connect_in, skip_connect_out = next_skip_connect
assert (skip_connect_in > -1 and skip_connect_out > -1), "Given skip connection has error (try connecting non-existing layer)"
else:
skip_connect_in, skip_connect_out = -1, -1
if layer_cnt == skip_connect_out:
processed_skip_connect_input = layers_for_skip[skip_connect_in]
for layer_cnt_tmp in range(skip_connect_in, skip_connect_out):
if max_pool and (pool_sizes[2*layer_cnt_tmp]>1 or pool_sizes[2*layer_cnt_tmp+1]>1):
processed_skip_connect_input = tf.nn.max_pool(processed_skip_connect_input, ksize=[1]+pool_sizes[2*layer_cnt_tmp:2*(layer_cnt_tmp+1)]+[1], strides=[1]+pool_sizes[2*layer_cnt_tmp:2*(layer_cnt_tmp+1)]+[1], padding=padding_type)
else:
processed_skip_connect_input = None
if layer_cnt == 0 and control_flag[0]:
layer_tmp, KB_para_tmp, TS_para_tmp, cnn_gen_para_tmp, _ = new_ELLA_cnn_deconv_tensordot_layer(net_input, k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], KB_sizes[2*layer_cnt:2*(layer_cnt+1)], TS_sizes[2*layer_cnt:2*(layer_cnt+1)], TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=activation_fn, para_activation_fn=para_activation_fn, KB_param=None, TS_param=None, KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pool=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], skip_connect_input=processed_skip_connect_input)
elif layer_cnt == 0 and control_flag[1]:
layer_tmp, _, TS_para_tmp, cnn_gen_para_tmp, _ = new_ELLA_cnn_deconv_tensordot_layer(net_input, k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], KB_sizes[2*layer_cnt:2*(layer_cnt+1)], TS_sizes[2*layer_cnt:2*(layer_cnt+1)], TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=activation_fn, para_activation_fn=para_activation_fn, KB_param=KB_params[layer_cnt], TS_param=None, KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pool=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], skip_connect_input=processed_skip_connect_input)
elif layer_cnt == 0 and control_flag[2]:
layer_tmp, _, _, cnn_gen_para_tmp, _ = new_ELLA_cnn_deconv_tensordot_layer(net_input, k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], KB_sizes[2*layer_cnt:2*(layer_cnt+1)], TS_sizes[2*layer_cnt:2*(layer_cnt+1)], TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=activation_fn, para_activation_fn=para_activation_fn, KB_param=KB_params[layer_cnt], TS_param=TS_params[_num_TS_param_per_layer*layer_cnt:_num_TS_param_per_layer*(layer_cnt+1)], KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pool=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], skip_connect_input=processed_skip_connect_input)
elif layer_cnt == 0 and control_flag[3]:
layer_tmp, KB_para_tmp, _, cnn_gen_para_tmp, _ = new_ELLA_cnn_deconv_tensordot_layer(net_input, k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], KB_sizes[2*layer_cnt:2*(layer_cnt+1)], TS_sizes[2*layer_cnt:2*(layer_cnt+1)], TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=activation_fn, para_activation_fn=para_activation_fn, KB_param=None, TS_param=TS_params[_num_TS_param_per_layer*layer_cnt:_num_TS_param_per_layer*(layer_cnt+1)], KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pool=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], skip_connect_input=processed_skip_connect_input)
elif control_flag[0]:
layer_tmp, KB_para_tmp, TS_para_tmp, cnn_gen_para_tmp, _ = new_ELLA_cnn_deconv_tensordot_layer(layers[layer_cnt-1], k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], KB_sizes[2*layer_cnt:2*(layer_cnt+1)], TS_sizes[2*layer_cnt:2*(layer_cnt+1)], TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=activation_fn, para_activation_fn=para_activation_fn, KB_param=None, TS_param=None, KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pool=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], skip_connect_input=processed_skip_connect_input)
elif control_flag[1]:
layer_tmp, _, TS_para_tmp, cnn_gen_para_tmp, _ = new_ELLA_cnn_deconv_tensordot_layer(layers[layer_cnt-1], k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], KB_sizes[2*layer_cnt:2*(layer_cnt+1)], TS_sizes[2*layer_cnt:2*(layer_cnt+1)], TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=activation_fn, para_activation_fn=para_activation_fn, KB_param=KB_params[layer_cnt], TS_param=None, KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pool=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], skip_connect_input=processed_skip_connect_input)
elif control_flag[2]:
layer_tmp, _, _, cnn_gen_para_tmp, _ = new_ELLA_cnn_deconv_tensordot_layer(layers[layer_cnt-1], k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], KB_sizes[2*layer_cnt:2*(layer_cnt+1)], TS_sizes[2*layer_cnt:2*(layer_cnt+1)], TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=activation_fn, para_activation_fn=para_activation_fn, KB_param=KB_params[layer_cnt], TS_param=TS_params[_num_TS_param_per_layer*layer_cnt:_num_TS_param_per_layer*(layer_cnt+1)], KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pool=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], skip_connect_input=processed_skip_connect_input)
elif control_flag[3]:
layer_tmp, KB_para_tmp, _, cnn_gen_para_tmp, _ = new_ELLA_cnn_deconv_tensordot_layer(layers[layer_cnt-1], k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], KB_sizes[2*layer_cnt:2*(layer_cnt+1)], TS_sizes[2*layer_cnt:2*(layer_cnt+1)], TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=activation_fn, para_activation_fn=para_activation_fn, KB_param=None, TS_param=TS_params[_num_TS_param_per_layer*layer_cnt:_num_TS_param_per_layer*(layer_cnt+1)], KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pool=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], skip_connect_input=processed_skip_connect_input)
layers.append(layer_tmp)
layers_for_skip.append(layer_tmp)
cnn_gen_params = cnn_gen_params + cnn_gen_para_tmp
if control_flag[1]:
TS_params = TS_params + TS_para_tmp
elif control_flag[3]:
KB_params = KB_params + KB_para_tmp
elif control_flag[0]:
KB_params = KB_params + KB_para_tmp
TS_params = TS_params + TS_para_tmp
if layer_cnt == skip_connect_out:
next_skip_connect = None
#### flattening output
if flat_output:
output_dim = [int(layers[-1].shape[1]*layers[-1].shape[2]*layers[-1].shape[3])]
layers.append(tf.reshape(layers[-1], [-1, output_dim[0]]))
else:
output_dim = layers[-1].shape[1:]
#### add dropout layer
if dropout:
layers.append(tf.nn.dropout(layers[-1], dropout_prob))
return (layers, KB_params, TS_params, cnn_gen_params, output_dim)
#### function to generate network of cnn (with shared KB through deconv)-> simple ffnn
def new_ELLA_cnn_deconv_tensordot_fc_net(net_input, k_sizes, ch_sizes, stride_sizes, fc_sizes, cnn_KB_sizes, cnn_TS_sizes, cnn_TS_stride_sizes, cnn_activation_fn=tf.nn.relu, cnn_para_activation_fn=tf.nn.relu, cnn_KB_params=None, cnn_TS_params=None, fc_activation_fn=tf.nn.relu, fc_params=None, KB_reg_type=None, TS_reg_type=None, padding_type='SAME', max_pool=False, pool_sizes=None, dropout=False, dropout_prob=None, input_size=[0, 0], output_type=None, task_index=0, skip_connections=[]):
## add CNN layers
cnn_model, cnn_KB_params, cnn_TS_params, cnn_gen_params, cnn_output_dim = new_ELLA_cnn_deconv_tensordot_net(net_input, k_sizes, ch_sizes, stride_sizes, cnn_KB_sizes, cnn_TS_sizes, cnn_TS_stride_sizes, activation_fn=cnn_activation_fn, para_activation_fn=cnn_para_activation_fn, KB_params=cnn_KB_params, TS_params=cnn_TS_params, KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pool=max_pool, pool_sizes=pool_sizes, dropout=dropout, dropout_prob=dropout_prob, flat_output=True, input_size=input_size, task_index=task_index, skip_connections=skip_connections)
## add fc layers
#fc_model, fc_params = new_fc_net(cnn_model[-1], [cnn_output_dim[0]]+fc_sizes, activation_fn=fc_activation_fn, params=fc_params, output_type=output_type, tensorboard_name_scope='fc_net')
fc_model, fc_params = new_fc_net(cnn_model[-1], fc_sizes, activation_fn=fc_activation_fn, params=fc_params, output_type=output_type, tensorboard_name_scope='fc_net')
return (cnn_model+fc_model, cnn_KB_params, cnn_TS_params, cnn_gen_params, fc_params)
###########################################################################
##### functions for adding ELLA network (CNN/Deconv & Tensordot ver2) #####
###########################################################################
#### KB_size : [filter_height(and width), num_of_channel0, num_of_channel1]
#### TS_size : [deconv_filter_height(and width), deconv_filter_channel]
#### TS_stride_size : [stride_in_height, stride_in_width]
def new_ELLA_cnn_deconv_tensordot_layer2(layer_input, k_size, ch_size, stride_size, KB_size, TS_size, TS_stride_size, layer_num, task_num, activation_fn=tf.nn.relu, para_activation_fn=tf.nn.relu, KB_param=None, TS_param=None, KB_reg_type=None, TS_reg_type=None, padding_type='SAME', max_pool=False, pool_size=None, skip_connect_input=None):
assert (k_size[0] == k_size[1] and k_size[0] == (KB_size[0]-1)*TS_stride_size[0]+1), "CNN kernel size does not match the output size of Deconv from KB"
with tf.name_scope('ELLA_cdnn_KB'):
if KB_param is None:
## KB \in R^{d \times h \times w \times c}
KB_param = new_ELLA_KB_param([KB_size[1], KB_size[0], KB_size[0], KB_size[2]], layer_num, task_num, KB_reg_type)
if TS_param is None:
## TS1 : Deconv W \in R^{h \times w \times kb_c_out \times c}
## TS2 : Deconv bias \in R^{kb_c_out}
## TS3 : tensor W \in R^{d \times ch_in}
## TS4 : tensor W \in R^{kb_c_out \times ch_out}
## TS5 : Conv bias \in R^{ch_out}
TS_param = new_ELLA_cnn_deconv_tensordot_TS_param2([[TS_size[0], TS_size[0], TS_size[1], KB_size[2]], [1, 1, 1, TS_size[1]], [KB_size[1], ch_size[0]], [TS_size[1], ch_size[1]], [1, 1, 1, ch_size[1]]], layer_num, task_num, TS_reg_type)
with tf.name_scope('ELLA_cdnn_TS'):
para_tmp = tf.add(tf.nn.conv2d_transpose(KB_param, TS_param[0], [KB_size[1], k_size[0], k_size[1], TS_size[1]], strides=[1, TS_stride_size[0], TS_stride_size[1], 1]), TS_param[1])
if para_activation_fn is not None:
para_tmp = para_activation_fn(para_tmp)
para_tmp = tf.tensordot(para_tmp, TS_param[2], [[0], [0]])
W = tf.tensordot(para_tmp, TS_param[3], [[2], [0]])
b = TS_param[4]
layer_eqn, _ = new_cnn_layer(layer_input, k_size+ch_size, stride_size=stride_size, activation_fn=activation_fn, weight=W, bias=b, padding_type=padding_type, max_pooling=max_pool, pool_size=pool_size, skip_connect_input=skip_connect_input)
return layer_eqn, [KB_param], TS_param, [W, b]
#### function to generate network of convolutional layers with shared knowledge base
def new_ELLA_cnn_deconv_tensordot_net2(net_input, k_sizes, ch_sizes, stride_sizes, KB_sizes, TS_sizes, TS_stride_sizes, activation_fn=tf.nn.relu, para_activation_fn=tf.nn.relu, KB_params=None, TS_params=None, KB_reg_type=None, TS_reg_type=None, padding_type='SAME', max_pool=False, pool_sizes=None, dropout=False, dropout_prob=None, flat_output=False, input_size=[0, 0], task_index=0, skip_connections=[]):
_num_TS_param_per_layer = 5
## first element : make new KB&TS / second element : make new TS / third element : not make new para / fourth element : make new KB
control_flag = [(KB_params is None and TS_params is None), (not (KB_params is None) and (TS_params is None)), not (KB_params is None or TS_params is None), ((KB_params is None) and not (TS_params is None))]
if control_flag[1]:
TS_params = []
elif control_flag[3]:
KB_params = []
elif control_flag[0]:
KB_params, TS_params = [], []
cnn_gen_params = []
layers_for_skip, next_skip_connect = [net_input], None
with tf.name_scope('ELLA_cdnn_net'):
layers = []
for layer_cnt in range(len(k_sizes)//2):
next_skip_connect = skip_connections.pop(0) if (len(skip_connections) > 0 and next_skip_connect is None) else next_skip_connect
if next_skip_connect is not None:
skip_connect_in, skip_connect_out = next_skip_connect
assert (skip_connect_in > -1 and skip_connect_out > -1), "Given skip connection has error (try connecting non-existing layer)"
else:
skip_connect_in, skip_connect_out = -1, -1
if layer_cnt == skip_connect_out:
processed_skip_connect_input = layers_for_skip[skip_connect_in]
for layer_cnt_tmp in range(skip_connect_in, skip_connect_out):
if max_pool and (pool_sizes[2*layer_cnt_tmp]>1 or pool_sizes[2*layer_cnt_tmp+1]>1):
processed_skip_connect_input = tf.nn.max_pool(processed_skip_connect_input, ksize=[1]+pool_sizes[2*layer_cnt_tmp:2*(layer_cnt_tmp+1)]+[1], strides=[1]+pool_sizes[2*layer_cnt_tmp:2*(layer_cnt_tmp+1)]+[1], padding=padding_type)
else:
processed_skip_connect_input = None
if layer_cnt == 0 and control_flag[0]:
layer_tmp, KB_para_tmp, TS_para_tmp, cnn_gen_para_tmp = new_ELLA_cnn_deconv_tensordot_layer2(net_input, k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], KB_sizes[3*layer_cnt:3*(layer_cnt+1)], TS_sizes[2*layer_cnt:2*(layer_cnt+1)], TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=activation_fn, para_activation_fn=para_activation_fn, KB_param=None, TS_param=None, KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pool=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], skip_connect_input=processed_skip_connect_input)
elif layer_cnt == 0 and control_flag[1]:
layer_tmp, _, TS_para_tmp, cnn_gen_para_tmp = new_ELLA_cnn_deconv_tensordot_layer2(net_input, k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], KB_sizes[3*layer_cnt:3*(layer_cnt+1)], TS_sizes[2*layer_cnt:2*(layer_cnt+1)], TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=activation_fn, para_activation_fn=para_activation_fn, KB_param=KB_params[layer_cnt], TS_param=None, KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pool=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], skip_connect_input=processed_skip_connect_input)
elif layer_cnt == 0 and control_flag[2]:
layer_tmp, _, _, cnn_gen_para_tmp = new_ELLA_cnn_deconv_tensordot_layer2(net_input, k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], KB_sizes[3*layer_cnt:3*(layer_cnt+1)], TS_sizes[2*layer_cnt:2*(layer_cnt+1)], TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=activation_fn, para_activation_fn=para_activation_fn, KB_param=KB_params[layer_cnt], TS_param=TS_params[_num_TS_param_per_layer*layer_cnt:_num_TS_param_per_layer*(layer_cnt+1)], KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pool=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], skip_connect_input=processed_skip_connect_input)
elif layer_cnt == 0 and control_flag[3]:
layer_tmp, KB_para_tmp, _, cnn_gen_para_tmp = new_ELLA_cnn_deconv_tensordot_layer2(net_input, k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], KB_sizes[3*layer_cnt:3*(layer_cnt+1)], TS_sizes[2*layer_cnt:2*(layer_cnt+1)], TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=activation_fn, para_activation_fn=para_activation_fn, KB_param=None, TS_param=TS_params[_num_TS_param_per_layer*layer_cnt:_num_TS_param_per_layer*(layer_cnt+1)], KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pool=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], skip_connect_input=processed_skip_connect_input)
elif control_flag[0]:
layer_tmp, KB_para_tmp, TS_para_tmp, cnn_gen_para_tmp = new_ELLA_cnn_deconv_tensordot_layer2(layers[layer_cnt-1], k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], KB_sizes[3*layer_cnt:3*(layer_cnt+1)], TS_sizes[2*layer_cnt:2*(layer_cnt+1)], TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=activation_fn, para_activation_fn=para_activation_fn, KB_param=None, TS_param=None, KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pool=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], skip_connect_input=processed_skip_connect_input)
elif control_flag[1]:
layer_tmp, _, TS_para_tmp, cnn_gen_para_tmp = new_ELLA_cnn_deconv_tensordot_layer2(layers[layer_cnt-1], k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], KB_sizes[3*layer_cnt:3*(layer_cnt+1)], TS_sizes[2*layer_cnt:2*(layer_cnt+1)], TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=activation_fn, para_activation_fn=para_activation_fn, KB_param=KB_params[layer_cnt], TS_param=None, KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pool=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], skip_connect_input=processed_skip_connect_input)
elif control_flag[2]:
layer_tmp, _, _, cnn_gen_para_tmp = new_ELLA_cnn_deconv_tensordot_layer2(layers[layer_cnt-1], k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], KB_sizes[3*layer_cnt:3*(layer_cnt+1)], TS_sizes[2*layer_cnt:2*(layer_cnt+1)], TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=activation_fn, para_activation_fn=para_activation_fn, KB_param=KB_params[layer_cnt], TS_param=TS_params[_num_TS_param_per_layer*layer_cnt:_num_TS_param_per_layer*(layer_cnt+1)], KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pool=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], skip_connect_input=processed_skip_connect_input)
elif control_flag[3]:
layer_tmp, KB_para_tmp, _, cnn_gen_para_tmp = new_ELLA_cnn_deconv_tensordot_layer2(layers[layer_cnt-1], k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], KB_sizes[3*layer_cnt:3*(layer_cnt+1)], TS_sizes[2*layer_cnt:2*(layer_cnt+1)], TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=activation_fn, para_activation_fn=para_activation_fn, KB_param=None, TS_param=TS_params[_num_TS_param_per_layer*layer_cnt:_num_TS_param_per_layer*(layer_cnt+1)], KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pool=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], skip_connect_input=processed_skip_connect_input)
layers.append(layer_tmp)
layers_for_skip.append(layer_tmp)
cnn_gen_params = cnn_gen_params + cnn_gen_para_tmp
if control_flag[1]:
TS_params = TS_params + TS_para_tmp
elif control_flag[3]:
KB_params = KB_params + KB_para_tmp
elif control_flag[0]:
KB_params = KB_params + KB_para_tmp
TS_params = TS_params + TS_para_tmp
if layer_cnt == skip_connect_out:
next_skip_connect = None
#### flattening output
if flat_output:
output_dim = [int(layers[-1].shape[1]*layers[-1].shape[2]*layers[-1].shape[3])]
layers.append(tf.reshape(layers[-1], [-1, output_dim[0]]))
else:
output_dim = layers[-1].shape[1:]
#### add dropout layer
if dropout:
layers.append(tf.nn.dropout(layers[-1], dropout_prob))
return (layers, KB_params, TS_params, cnn_gen_params, output_dim)
#### function to generate network of cnn (with shared KB through deconv)-> simple ffnn
def new_ELLA_cnn_deconv_tensordot_fc_net2(net_input, k_sizes, ch_sizes, stride_sizes, fc_sizes, cnn_KB_sizes, cnn_TS_sizes, cnn_TS_stride_sizes, cnn_activation_fn=tf.nn.relu, cnn_para_activation_fn=tf.nn.relu, cnn_KB_params=None, cnn_TS_params=None, fc_activation_fn=tf.nn.relu, fc_params=None, KB_reg_type=None, TS_reg_type=None, padding_type='SAME', max_pool=False, pool_sizes=None, dropout=False, dropout_prob=None, input_size=[0, 0], output_type=None, task_index=0, skip_connections=[]):
## add CNN layers
cnn_model, cnn_KB_params, cnn_TS_params, cnn_gen_params, cnn_output_dim = new_ELLA_cnn_deconv_tensordot_net2(net_input, k_sizes, ch_sizes, stride_sizes, cnn_KB_sizes, cnn_TS_sizes, cnn_TS_stride_sizes, activation_fn=cnn_activation_fn, para_activation_fn=cnn_para_activation_fn, KB_params=cnn_KB_params, TS_params=cnn_TS_params, KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pool=max_pool, pool_sizes=pool_sizes, dropout=dropout, dropout_prob=dropout_prob, flat_output=True, input_size=input_size, task_index=task_index, skip_connections=skip_connections)
## add fc layers
#fc_model, fc_params = new_fc_net(cnn_model[-1], [cnn_output_dim[0]]+fc_sizes, activation_fn=fc_activation_fn, params=fc_params, output_type=output_type, tensorboard_name_scope='fc_net')
fc_model, fc_params = new_fc_net(cnn_model[-1], fc_sizes, activation_fn=fc_activation_fn, params=fc_params, output_type=output_type, tensorboard_name_scope='fc_net')
return (cnn_model+fc_model, cnn_KB_params, cnn_TS_params, cnn_gen_params, fc_params)
##############################################################################################################
#### functions for Conv-FC nets whose conv layers are freely set to shared across tasks by DeconvFactor ####
##############################################################################################################
def new_ELLA_flexible_cnn_deconv_tensordot_fc_net(net_input, k_sizes, ch_sizes, stride_sizes, fc_sizes, cnn_sharing, cnn_KB_sizes, cnn_TS_sizes, cnn_TS_stride_sizes, cnn_activation_fn=tf.nn.relu, cnn_para_activation_fn=tf.nn.relu, cnn_KB_params=None, cnn_TS_params=None, cnn_params=None, fc_activation_fn=tf.nn.relu, fc_params=None, KB_reg_type=None, TS_reg_type=None, padding_type='SAME', max_pool=False, pool_sizes=None, dropout=False, dropout_prob=None, input_size=[0, 0], output_type=None, task_index=0, skip_connections=[], highway_connect_type=0, cnn_highway_params=None, trainable=True, trainable_KB=True):
_num_TS_param_per_layer = 4
num_conv_layers = [len(k_sizes)//2, len(ch_sizes)-1, len(stride_sizes)//2, len(cnn_sharing), len(cnn_KB_sizes)//2, len(cnn_TS_sizes)//2, len(cnn_TS_stride_sizes)//2]
assert (all([(num_conv_layers[i]==num_conv_layers[i+1]) for i in range(len(num_conv_layers)-1)])), "Parameters related to conv layers are wrong!"
num_conv_layers = num_conv_layers[0]
'''
if cnn_KB_params is not None:
assert (len(cnn_KB_params) == 1), "Given init value of KB (last layer) is wrong!"
if cnn_TS_params is not None:
assert (len(cnn_TS_params) == 4), "Given init value of TS (last layer) is wrong!"
'''
## add CNN layers
## first element : make new KB&TS / second element : make new TS / third element : not make new para / fourth element : make new KB
control_flag = [(cnn_KB_params is None and cnn_TS_params is None), (not (cnn_KB_params is None) and (cnn_TS_params is None)), not (cnn_KB_params is None or cnn_TS_params is None), ((cnn_KB_params is None) and not (cnn_TS_params is None))]
if control_flag[1]:
cnn_TS_params = []
elif control_flag[3]:
cnn_KB_params = []
elif control_flag[0]:
cnn_KB_params, cnn_TS_params = [], []
cnn_gen_params = []
if cnn_params is None:
cnn_params = [None for _ in range(2*num_conv_layers)]
layers_for_skip, next_skip_connect = [net_input], None
with tf.name_scope('Hybrid_DFCNN'):
cnn_model, cnn_params_to_return, cnn_highway_params_to_return = [], [], []
cnn_KB_to_return, cnn_TS_to_return = [], []
for layer_cnt in range(num_conv_layers):
KB_para_tmp, TS_para_tmp, para_tmp = [None], [None for _ in range(_num_TS_param_per_layer)], [None, None]
highway_para_tmp = [None, None] if cnn_highway_params is None else cnn_highway_params[2*layer_cnt:2*(layer_cnt+1)]
cnn_gen_para_tmp = [None, None]
next_skip_connect = skip_connections.pop(0) if (len(skip_connections) > 0 and next_skip_connect is None) else next_skip_connect
if next_skip_connect is not None:
skip_connect_in, skip_connect_out = next_skip_connect
assert (skip_connect_in > -1 and skip_connect_out > -1), "Given skip connection has error (try connecting non-existing layer)"
else:
skip_connect_in, skip_connect_out = -1, -1
if layer_cnt == skip_connect_out:
processed_skip_connect_input = layers_for_skip[skip_connect_in]
for layer_cnt_tmp in range(skip_connect_in, skip_connect_out):
if max_pool and (pool_sizes[2*layer_cnt_tmp]>1 or pool_sizes[2*layer_cnt_tmp+1]>1):
processed_skip_connect_input = tf.nn.max_pool(processed_skip_connect_input, ksize=[1]+pool_sizes[2*layer_cnt_tmp:2*(layer_cnt_tmp+1)]+[1], strides=[1]+pool_sizes[2*layer_cnt_tmp:2*(layer_cnt_tmp+1)]+[1], padding=padding_type)
else:
processed_skip_connect_input = None
if layer_cnt == 0:
if control_flag[0] and cnn_sharing[layer_cnt]:
layer_tmp, KB_para_tmp, TS_para_tmp, cnn_gen_para_tmp, highway_para_tmp = new_ELLA_cnn_deconv_tensordot_layer(net_input, k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], cnn_KB_sizes[2*layer_cnt:2*(layer_cnt+1)], cnn_TS_sizes[2*layer_cnt:2*(layer_cnt+1)], cnn_TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=cnn_activation_fn, para_activation_fn=cnn_para_activation_fn, KB_param=None, TS_param=None, KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pool=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], skip_connect_input=processed_skip_connect_input, highway_connect_type=highway_connect_type, highway_W=highway_para_tmp[0], highway_b=highway_para_tmp[1], trainable=trainable, trainable_KB=trainable_KB)
elif control_flag[1] and cnn_sharing[layer_cnt]:
layer_tmp, KB_para_tmp, TS_para_tmp, cnn_gen_para_tmp, highway_para_tmp = new_ELLA_cnn_deconv_tensordot_layer(net_input, k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], cnn_KB_sizes[2*layer_cnt:2*(layer_cnt+1)], cnn_TS_sizes[2*layer_cnt:2*(layer_cnt+1)], cnn_TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=cnn_activation_fn, para_activation_fn=cnn_para_activation_fn, KB_param=cnn_KB_params[layer_cnt], TS_param=None, KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pool=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], skip_connect_input=processed_skip_connect_input, highway_connect_type=highway_connect_type, highway_W=highway_para_tmp[0], highway_b=highway_para_tmp[1], trainable=trainable, trainable_KB=trainable_KB)
elif control_flag[2] and cnn_sharing[layer_cnt]:
layer_tmp, KB_para_tmp, TS_para_tmp, cnn_gen_para_tmp, highway_para_tmp = new_ELLA_cnn_deconv_tensordot_layer(net_input, k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], cnn_KB_sizes[2*layer_cnt:2*(layer_cnt+1)], cnn_TS_sizes[2*layer_cnt:2*(layer_cnt+1)], cnn_TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=cnn_activation_fn, para_activation_fn=cnn_para_activation_fn, KB_param=cnn_KB_params[layer_cnt], TS_param=cnn_TS_params[_num_TS_param_per_layer*layer_cnt:_num_TS_param_per_layer*(layer_cnt+1)], KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pool=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], skip_connect_input=processed_skip_connect_input, highway_connect_type=highway_connect_type, highway_W=highway_para_tmp[0], highway_b=highway_para_tmp[1], trainable=trainable, trainable_KB=trainable_KB)
elif control_flag[3] and cnn_sharing[layer_cnt]:
layer_tmp, KB_para_tmp, TS_para_tmp, cnn_gen_para_tmp, highway_para_tmp = new_ELLA_cnn_deconv_tensordot_layer(net_input, k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], cnn_KB_sizes[2*layer_cnt:2*(layer_cnt+1)], cnn_TS_sizes[2*layer_cnt:2*(layer_cnt+1)], cnn_TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=cnn_activation_fn, para_activation_fn=cnn_para_activation_fn, KB_param=None, TS_param=cnn_TS_params[_num_TS_param_per_layer*layer_cnt:_num_TS_param_per_layer*(layer_cnt+1)], KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pool=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], skip_connect_input=processed_skip_connect_input, highway_connect_type=highway_connect_type, highway_W=highway_para_tmp[0], highway_b=highway_para_tmp[1], trainable=trainable, trainable_KB=trainable_KB)
elif (not cnn_sharing[layer_cnt]):
layer_tmp, para_tmp = new_cnn_layer(layer_input=net_input, k_size=k_sizes[2*layer_cnt:2*(layer_cnt+1)]+ch_sizes[layer_cnt:layer_cnt+2], stride_size=[1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], activation_fn=cnn_activation_fn, weight=cnn_params[2*layer_cnt], bias=cnn_params[2*layer_cnt+1], padding_type=padding_type, max_pooling=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], skip_connect_input=processed_skip_connect_input, trainable=trainable)
else:
if control_flag[0] and cnn_sharing[layer_cnt]:
layer_tmp, KB_para_tmp, TS_para_tmp, cnn_gen_para_tmp, highway_para_tmp = new_ELLA_cnn_deconv_tensordot_layer(cnn_model[layer_cnt-1], k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], cnn_KB_sizes[2*layer_cnt:2*(layer_cnt+1)], cnn_TS_sizes[2*layer_cnt:2*(layer_cnt+1)], cnn_TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=cnn_activation_fn, para_activation_fn=cnn_para_activation_fn, KB_param=None, TS_param=None, KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pool=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], skip_connect_input=processed_skip_connect_input, highway_connect_type=highway_connect_type, highway_W=highway_para_tmp[0], highway_b=highway_para_tmp[1], trainable=trainable, trainable_KB=trainable_KB)
elif control_flag[1] and cnn_sharing[layer_cnt]:
layer_tmp, KB_para_tmp, TS_para_tmp, cnn_gen_para_tmp, highway_para_tmp = new_ELLA_cnn_deconv_tensordot_layer(cnn_model[layer_cnt-1], k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], cnn_KB_sizes[2*layer_cnt:2*(layer_cnt+1)], cnn_TS_sizes[2*layer_cnt:2*(layer_cnt+1)], cnn_TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=cnn_activation_fn, para_activation_fn=cnn_para_activation_fn, KB_param=cnn_KB_params[layer_cnt], TS_param=None, KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pool=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], skip_connect_input=processed_skip_connect_input, highway_connect_type=highway_connect_type, highway_W=highway_para_tmp[0], highway_b=highway_para_tmp[1], trainable=trainable, trainable_KB=trainable_KB)
elif control_flag[2] and cnn_sharing[layer_cnt]:
layer_tmp, KB_para_tmp, TS_para_tmp, cnn_gen_para_tmp, highway_para_tmp = new_ELLA_cnn_deconv_tensordot_layer(cnn_model[layer_cnt-1], k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], cnn_KB_sizes[2*layer_cnt:2*(layer_cnt+1)], cnn_TS_sizes[2*layer_cnt:2*(layer_cnt+1)], cnn_TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=cnn_activation_fn, para_activation_fn=cnn_para_activation_fn, KB_param=cnn_KB_params[layer_cnt], TS_param=cnn_TS_params[_num_TS_param_per_layer*layer_cnt:_num_TS_param_per_layer*(layer_cnt+1)], KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pool=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], skip_connect_input=processed_skip_connect_input, highway_connect_type=highway_connect_type, highway_W=highway_para_tmp[0], highway_b=highway_para_tmp[1], trainable=trainable, trainable_KB=trainable_KB)
elif control_flag[3] and cnn_sharing[layer_cnt]:
layer_tmp, KB_para_tmp, TS_para_tmp, cnn_gen_para_tmp, highway_para_tmp = new_ELLA_cnn_deconv_tensordot_layer(cnn_model[layer_cnt-1], k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], cnn_KB_sizes[2*layer_cnt:2*(layer_cnt+1)], cnn_TS_sizes[2*layer_cnt:2*(layer_cnt+1)], cnn_TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=cnn_activation_fn, para_activation_fn=cnn_para_activation_fn, KB_param=None, TS_param=cnn_TS_params[_num_TS_param_per_layer*layer_cnt:_num_TS_param_per_layer*(layer_cnt+1)], KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pool=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], skip_connect_input=processed_skip_connect_input, highway_connect_type=highway_connect_type, highway_W=highway_para_tmp[0], highway_b=highway_para_tmp[1], trainable=trainable, trainable_KB=trainable_KB)
elif (not cnn_sharing[layer_cnt]):
layer_tmp, para_tmp = new_cnn_layer(layer_input=cnn_model[layer_cnt-1], k_size=k_sizes[2*layer_cnt:2*(layer_cnt+1)]+ch_sizes[layer_cnt:layer_cnt+2], stride_size=[1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], activation_fn=cnn_activation_fn, weight=cnn_params[2*layer_cnt], bias=cnn_params[2*layer_cnt+1], padding_type=padding_type, max_pooling=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], skip_connect_input=processed_skip_connect_input, trainable=trainable)
cnn_model.append(layer_tmp)
layers_for_skip.append(layer_tmp)
cnn_KB_to_return = cnn_KB_to_return + KB_para_tmp
cnn_TS_to_return = cnn_TS_to_return + TS_para_tmp
cnn_params_to_return = cnn_params_to_return + para_tmp
cnn_gen_params = cnn_gen_params + cnn_gen_para_tmp
cnn_highway_params_to_return = cnn_highway_params_to_return + highway_para_tmp
if layer_cnt == skip_connect_out:
next_skip_connect = None
#### flattening output
output_dim = [int(cnn_model[-1].shape[1]*cnn_model[-1].shape[2]*cnn_model[-1].shape[3])]
cnn_model.append(tf.reshape(cnn_model[-1], [-1, output_dim[0]]))
#### add dropout layer
if dropout:
cnn_model.append(tf.nn.dropout(cnn_model[-1], dropout_prob))
## add fc layers
fc_model, fc_params = new_fc_net(cnn_model[-1], fc_sizes, activation_fn=fc_activation_fn, params=fc_params, output_type=output_type, tensorboard_name_scope='fc_net', trainable=trainable)
#return (cnn_model+fc_model, cnn_KB_params, cnn_TS_params, cnn_gen_params, cnn_params_to_return, cnn_highway_params_to_return, fc_params)
return (cnn_model+fc_model, cnn_KB_to_return, cnn_TS_to_return, cnn_gen_params, cnn_params_to_return, cnn_highway_params_to_return, fc_params)
#### function to generate DARTS-based network for selective sharing on DF-CNN
def new_darts_dfcnn_layer(layer_input, k_size, ch_size, stride_size, KB_size, TS_size, TS_stride_size, layer_num, task_num, activation_fn=tf.nn.relu, para_activation_fn=tf.nn.relu, KB_param=None, TS_param=None, conv_param=None, select_param=None, KB_reg_type=None, TS_reg_type=None, padding_type='SAME', max_pooling=False, pool_size=None, trainable=True, skip_connect_input=None, name_scope='darts_dfcnn_layer', use_numpy_var_in_graph=False):
with tf.name_scope(name_scope):
## init DF-CNN KB params
if KB_param is None or (type(KB_param) == np.ndarray and not use_numpy_var_in_graph):
KB_param = new_ELLA_KB_param([1, KB_size[0], KB_size[0], KB_size[1]], layer_num, task_num, KB_reg_type, KB_param, trainable=trainable)
## init DF-CNN task-specific mapping params
if TS_param is None or (type(TS_param) == np.ndarray and not use_numpy_var_in_graph):
TS_param = new_ELLA_cnn_deconv_tensordot_TS_param([[TS_size[0], TS_size[0], TS_size[1], KB_size[1]], [1, 1, 1, TS_size[1]], [TS_size[1], ch_size[0], ch_size[1]], [ch_size[1]]], layer_num, task_num, TS_reg_type, [None, None, None, None] if TS_param is None else TS_param, trainable=trainable)
## init task-specific conv params
if conv_param is None:
conv_param = [new_weight(shape=k_size+ch_size, trainable=trainable), new_bias(shape=[ch_size[-1]], trainable=trainable)]
else:
if conv_param[0] is None or (type(conv_param[0]) == np.ndarray and not use_numpy_var_in_graph):
conv_param[0] = new_weight(shape=k_size+ch_size, init_tensor=conv_param[0], trainable=trainable)
if conv_param[1] is None or (type(conv_param[1]) == np.ndarray and not use_numpy_var_in_graph):
conv_param[1] = new_bias(shape=[ch_size[-1]], init_tensor=conv_param[1], trainable=trainable)
## init DARTS-selection params
if select_param is None:
select_param = new_weight(shape=[2], init_tensor=np.zeros(2, dtype=np.float32), trainable=trainable)
elif (type(select_param) == np.ndarray) and not use_numpy_var_in_graph:
select_param = new_weight(shape=[2], init_tensor=select_param, trainable=trainable)
with tf.name_scope('DFCNN_param_gen'):
para_tmp = tf.add(tf.nn.conv2d_transpose(KB_param, TS_param[0], [1, k_size[0], k_size[1], TS_size[1]], strides=[1, TS_stride_size[0], TS_stride_size[1], 1]), TS_param[1])
para_tmp = tf.reshape(para_tmp, [k_size[0], k_size[1], TS_size[1]])
if para_activation_fn is not None:
para_tmp = para_activation_fn(para_tmp)
W = tf.tensordot(para_tmp, TS_param[2], [[2], [0]])
b = TS_param[3]
mixing_weight = tf.reshape(tf.nn.softmax(select_param), [2,1])
shared_conv_layer = tf.nn.conv2d(layer_input, W, strides=stride_size, padding=padding_type) + b
TS_conv_layer = tf.nn.conv2d(layer_input, conv_param[0], strides=stride_size, padding=padding_type) + conv_param[1]
if skip_connect_input is not None:
shape1, shape2 = shared_conv_layer.get_shape().as_list(), skip_connect_input.get_shape().as_list()
assert (len(shape1) == len(shape2)), "Shape of layer's output and input of skip connection do not match!"
assert (all([(x==y) for (x, y) in zip(shape1, shape2)])), "Shape of layer's output and input of skip connection do NOT match!"
shared_conv_layer = shared_conv_layer + skip_connect_input
TS_conv_layer = TS_conv_layer + skip_connect_input
if not (activation_fn is None):
shared_conv_layer = activation_fn(shared_conv_layer)
TS_conv_layer = activation_fn(TS_conv_layer)
mixed_conv_temp = tf.tensordot(tf.stack([TS_conv_layer, shared_conv_layer], axis=4), mixing_weight, axes=[[4], [0]])
conv_layer = tf.reshape(mixed_conv_temp, mixed_conv_temp.get_shape()[0:-1])
if max_pooling and (pool_size[1] > 1 or pool_size[2] > 1):
layer = tf.nn.max_pool(conv_layer, ksize=pool_size, strides=pool_size, padding=padding_type)
else:
layer = conv_layer
return (layer, [KB_param], TS_param, conv_param, [select_param])
def new_darts_dfcnn_net(net_input, k_sizes, ch_sizes, stride_sizes, dfcnn_KB_sizes, dfcnn_TS_sizes, dfcnn_TS_stride_sizes, activation_fn=tf.nn.relu, dfcnn_TS_activation_fn=tf.nn.relu, dfcnn_KB_params=None, dfcnn_TS_params=None, cnn_TS_params=None, select_params=None, KB_reg_type=None, TS_reg_type=None, padding_type='SAME', max_pool=False, pool_sizes=None, dropout=False, dropout_prob=None, flat_output=False, trainable=True, task_index=0, skip_connections=[], use_numpy_var_in_graph=False):
_num_TS_param_per_layer = 4
num_conv_layers = [len(k_sizes)//2, len(ch_sizes)-1, len(stride_sizes)//2, len(dfcnn_KB_sizes)//2, len(dfcnn_TS_sizes)//2, len(dfcnn_TS_stride_sizes)//2]
assert (all([(num_conv_layers[i]==num_conv_layers[i+1]) for i in range(len(num_conv_layers)-1)])), "Parameters related to conv layers are wrong!"
num_conv_layers = num_conv_layers[0]
## first element : make new KB&TS / second element : make new TS / third element : not make new para / fourth element : make new KB
control_flag = [(dfcnn_KB_params is None and dfcnn_TS_params is None), (not (dfcnn_KB_params is None) and (dfcnn_TS_params is None)), not (dfcnn_KB_params is None or dfcnn_TS_params is None), ((dfcnn_KB_params is None) and not (dfcnn_TS_params is None))]
if cnn_TS_params is None:
cnn_TS_params = [None for _ in range(2*num_conv_layers)]
else:
assert(len(cnn_TS_params) == 2*num_conv_layers), "Check given parameters!"
if select_params is None:
select_params = [None for _ in range(num_conv_layers)]
layers_for_skip, next_skip_connect = [net_input], None
layers, dfcnn_shared_params_return, dfcnn_TS_params_return, cnn_TS_params_return, select_params_return = [], [], [], [], []
with tf.name_scope('DARTS_DFCNN_net'):
for layer_cnt in range(num_conv_layers):
next_skip_connect = skip_connections.pop(0) if (len(skip_connections) > 0 and next_skip_connect is None) else None
if next_skip_connect is not None:
skip_connect_in, skip_connect_out = next_skip_connect
assert (skip_connect_in > -1 and skip_connect_out > -1), "Given skip connection has error (try connecting non-existing layer)"
else:
skip_connect_in, skip_connect_out = -1, -1
if layer_cnt == skip_connect_out:
processed_skip_connect_input = layers_for_skip[skip_connect_in]
for layer_cnt_tmp in range(skip_connect_in, skip_connect_out):
if max_pool and (pool_sizes[2*layer_cnt_tmp]>1 or pool_sizes[2*layer_cnt_tmp+1]>1):
processed_skip_connect_input = tf.nn.max_pool(processed_skip_connect_input, ksize=[1]+pool_sizes[2*layer_cnt_tmp:2*(layer_cnt_tmp+1)]+[1], strides=[1]+pool_sizes[2*layer_cnt_tmp:2*(layer_cnt_tmp+1)]+[1], padding=padding_type)
else:
processed_skip_connect_input = None
if layer_cnt == 0:
if control_flag[0]:
layer_tmp, KB_para_tmp, TS_para_tmp, cnn_TS_para_tmp, select_para_tmp = new_darts_dfcnn_layer(net_input, k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], dfcnn_KB_sizes[2*layer_cnt:2*(layer_cnt+1)], dfcnn_TS_sizes[2*layer_cnt:2*(layer_cnt+1)], dfcnn_TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=activation_fn, para_activation_fn=dfcnn_TS_activation_fn, KB_param=None, TS_param=None, conv_param=cnn_TS_params[2*layer_cnt:2*(layer_cnt+1)], select_param=select_params[layer_cnt], KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pooling=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], trainable=trainable, skip_connect_input=processed_skip_connect_input, use_numpy_var_in_graph=use_numpy_var_in_graph)
elif control_flag[1]:
layer_tmp, KB_para_tmp, TS_para_tmp, cnn_TS_para_tmp, select_para_tmp = new_darts_dfcnn_layer(net_input, k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], dfcnn_KB_sizes[2*layer_cnt:2*(layer_cnt+1)], dfcnn_TS_sizes[2*layer_cnt:2*(layer_cnt+1)], dfcnn_TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=activation_fn, para_activation_fn=dfcnn_TS_activation_fn, KB_param=dfcnn_KB_params[layer_cnt], TS_param=None, conv_param=cnn_TS_params[2*layer_cnt:2*(layer_cnt+1)], select_param=select_params[layer_cnt], KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pooling=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], trainable=trainable, skip_connect_input=processed_skip_connect_input, use_numpy_var_in_graph=use_numpy_var_in_graph)
elif control_flag[2]:
layer_tmp, KB_para_tmp, TS_para_tmp, cnn_TS_para_tmp, select_para_tmp = new_darts_dfcnn_layer(net_input, k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], dfcnn_KB_sizes[2*layer_cnt:2*(layer_cnt+1)], dfcnn_TS_sizes[2*layer_cnt:2*(layer_cnt+1)], dfcnn_TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=activation_fn, para_activation_fn=dfcnn_TS_activation_fn, KB_param=dfcnn_KB_params[layer_cnt], TS_param=dfcnn_TS_params[_num_TS_param_per_layer*layer_cnt:_num_TS_param_per_layer*(layer_cnt+1)], conv_param=cnn_TS_params[2*layer_cnt:2*(layer_cnt+1)], select_param=select_params[layer_cnt], KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pooling=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], trainable=trainable, skip_connect_input=processed_skip_connect_input, use_numpy_var_in_graph=use_numpy_var_in_graph)
elif control_flag[3]:
layer_tmp, KB_para_tmp, TS_para_tmp, cnn_TS_para_tmp, select_para_tmp = new_darts_dfcnn_layer(net_input, k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], dfcnn_KB_sizes[2*layer_cnt:2*(layer_cnt+1)], dfcnn_TS_sizes[2*layer_cnt:2*(layer_cnt+1)], dfcnn_TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=activation_fn, para_activation_fn=dfcnn_TS_activation_fn, KB_param=None, TS_param=dfcnn_TS_params[_num_TS_param_per_layer*layer_cnt:_num_TS_param_per_layer*(layer_cnt+1)], conv_param=cnn_TS_params[2*layer_cnt:2*(layer_cnt+1)], select_param=select_params[layer_cnt], KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pooling=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], trainable=trainable, skip_connect_input=processed_skip_connect_input, use_numpy_var_in_graph=use_numpy_var_in_graph)
else:
if control_flag[0]:
layer_tmp, KB_para_tmp, TS_para_tmp, cnn_TS_para_tmp, select_para_tmp = new_darts_dfcnn_layer(layers[layer_cnt-1], k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], dfcnn_KB_sizes[2*layer_cnt:2*(layer_cnt+1)], dfcnn_TS_sizes[2*layer_cnt:2*(layer_cnt+1)], dfcnn_TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=activation_fn, para_activation_fn=dfcnn_TS_activation_fn, KB_param=None, TS_param=None, conv_param=cnn_TS_params[2*layer_cnt:2*(layer_cnt+1)], select_param=select_params[layer_cnt], KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pooling=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], trainable=trainable, skip_connect_input=processed_skip_connect_input, use_numpy_var_in_graph=use_numpy_var_in_graph)
elif control_flag[1]:
layer_tmp, KB_para_tmp, TS_para_tmp, cnn_TS_para_tmp, select_para_tmp = new_darts_dfcnn_layer(layers[layer_cnt-1], k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], dfcnn_KB_sizes[2*layer_cnt:2*(layer_cnt+1)], dfcnn_TS_sizes[2*layer_cnt:2*(layer_cnt+1)], dfcnn_TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=activation_fn, para_activation_fn=dfcnn_TS_activation_fn, KB_param=dfcnn_KB_params[layer_cnt], TS_param=None, conv_param=cnn_TS_params[2*layer_cnt:2*(layer_cnt+1)], select_param=select_params[layer_cnt], KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pooling=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], trainable=trainable, skip_connect_input=processed_skip_connect_input, use_numpy_var_in_graph=use_numpy_var_in_graph)
elif control_flag[2]:
layer_tmp, KB_para_tmp, TS_para_tmp, cnn_TS_para_tmp, select_para_tmp = new_darts_dfcnn_layer(layers[layer_cnt-1], k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], dfcnn_KB_sizes[2*layer_cnt:2*(layer_cnt+1)], dfcnn_TS_sizes[2*layer_cnt:2*(layer_cnt+1)], dfcnn_TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=activation_fn, para_activation_fn=dfcnn_TS_activation_fn, KB_param=dfcnn_KB_params[layer_cnt], TS_param=dfcnn_TS_params[_num_TS_param_per_layer*layer_cnt:_num_TS_param_per_layer*(layer_cnt+1)], conv_param=cnn_TS_params[2*layer_cnt:2*(layer_cnt+1)], select_param=select_params[layer_cnt], KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pooling=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], trainable=trainable, skip_connect_input=processed_skip_connect_input, use_numpy_var_in_graph=use_numpy_var_in_graph)
elif control_flag[3]:
layer_tmp, KB_para_tmp, TS_para_tmp, cnn_TS_para_tmp, select_para_tmp = new_darts_dfcnn_layer(layers[layer_cnt-1], k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], dfcnn_KB_sizes[2*layer_cnt:2*(layer_cnt+1)], dfcnn_TS_sizes[2*layer_cnt:2*(layer_cnt+1)], dfcnn_TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=activation_fn, para_activation_fn=dfcnn_TS_activation_fn, KB_param=None, TS_param=dfcnn_TS_params[_num_TS_param_per_layer*layer_cnt:_num_TS_param_per_layer*(layer_cnt+1)], conv_param=cnn_TS_params[2*layer_cnt:2*(layer_cnt+1)], select_param=select_params[layer_cnt], KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pooling=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], trainable=trainable, skip_connect_input=processed_skip_connect_input, use_numpy_var_in_graph=use_numpy_var_in_graph)
layers.append(layer_tmp)
layers_for_skip.append(layer_tmp)
dfcnn_shared_params_return = dfcnn_shared_params_return + KB_para_tmp
dfcnn_TS_params_return = dfcnn_TS_params_return + TS_para_tmp
cnn_TS_params_return = cnn_TS_params_return + cnn_TS_para_tmp
select_params_return = select_params_return + select_para_tmp
if layer_cnt == skip_connect_out:
next_skip_connect = None
#### flattening output
if flat_output:
output_dim = [int(layers[-1].shape[1]*layers[-1].shape[2]*layers[-1].shape[3])]
layers.append(tf.reshape(layers[-1], [-1, output_dim[0]]))
else:
output_dim = layers[-1].shape[1:]
#### add dropout layer
if dropout:
layers.append(tf.nn.dropout(layers[-1], dropout_prob))
return (layers, dfcnn_shared_params_return, dfcnn_TS_params_return, cnn_TS_params_return, select_params_return, output_dim)
def new_darts_dfcnn_fc_net(net_input, k_sizes, ch_sizes, stride_sizes, fc_sizes, dfcnn_KB_sizes, dfcnn_TS_sizes, dfcnn_TS_stride_sizes, cnn_activation_fn=tf.nn.relu, dfcnn_TS_activation_fn=tf.nn.relu, fc_activation_fn=tf.nn.relu, dfcnn_KB_params=None, dfcnn_TS_params=None, cnn_TS_params=None, select_params=None, fc_params=None, KB_reg_type=None, TS_reg_type=None, padding_type='SAME', max_pool=False, pool_sizes=None, dropout=False, dropout_prob=None, output_type=None, trainable=True, task_index=0, skip_connections=[], use_numpy_var_in_graph=False):
cnn_model, dfcnn_shared_params_return, dfcnn_TS_params_return, cnn_TS_params_return, cnn_select_params_return, cnn_output_dim = new_darts_dfcnn_net(net_input, k_sizes, ch_sizes, stride_sizes, dfcnn_KB_sizes, dfcnn_TS_sizes, dfcnn_TS_stride_sizes, activation_fn=cnn_activation_fn, dfcnn_TS_activation_fn=dfcnn_TS_activation_fn, dfcnn_KB_params=dfcnn_KB_params, dfcnn_TS_params=dfcnn_TS_params, cnn_TS_params=cnn_TS_params, select_params=select_params, KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pool=max_pool, pool_sizes=pool_sizes, dropout=dropout, dropout_prob=dropout_prob, flat_output=True, trainable=trainable, task_index=task_index, skip_connections=skip_connections, use_numpy_var_in_graph=use_numpy_var_in_graph)
fc_model, fc_params_return = new_fc_net(cnn_model[-1], fc_sizes, activation_fn=fc_activation_fn, params=fc_params, output_type=output_type, use_numpy_var_in_graph=use_numpy_var_in_graph)
return (cnn_model+fc_model, dfcnn_shared_params_return, dfcnn_TS_params_return, cnn_TS_params_return, cnn_select_params_return, fc_params_return)
| 116.154079
| 1,000
| 0.734986
| 13,174
| 76,894
| 3.849552
| 0.01837
| 0.11973
| 0.087313
| 0.066254
| 0.936566
| 0.925011
| 0.911287
| 0.90403
| 0.892929
| 0.887073
| 0
| 0.023474
| 0.136292
| 76,894
| 661
| 1,001
| 116.329803
| 0.74013
| 0.053801
| 0
| 0.599147
| 0
| 0
| 0.01676
| 0
| 0
| 0
| 0
| 0
| 0.027719
| 1
| 0.036247
| false
| 0
| 0.008529
| 0
| 0.081023
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
f902e32cb0c6f87b960daa97f2b7828a5ba52e29
| 38,435
|
py
|
Python
|
ross/tests/test_rubbing.py
|
rodrigomoliveira1/ross
|
51f9379a8a834e1253b94e70dd9f5324acd8c78e
|
[
"MIT"
] | 1
|
2021-07-20T04:24:19.000Z
|
2021-07-20T04:24:19.000Z
|
ross/tests/test_rubbing.py
|
rodrigomoliveira1/ross
|
51f9379a8a834e1253b94e70dd9f5324acd8c78e
|
[
"MIT"
] | null | null | null |
ross/tests/test_rubbing.py
|
rodrigomoliveira1/ross
|
51f9379a8a834e1253b94e70dd9f5324acd8c78e
|
[
"MIT"
] | null | null | null |
import os
from pathlib import Path
from tempfile import tempdir
import numpy as np
import pytest
from numpy.testing import assert_allclose, assert_almost_equal
import ross as rs
from ross.defects.misalignment import MisalignmentFlex
from ross.units import Q_
steel2 = rs.Material(name="Steel", rho=7850, E=2.17e11, Poisson=0.2992610837438423)
# Rotor with 6 DoFs, with internal damping, with 10 shaft elements, 2 disks and 2 bearings.
i_d = 0
o_d = 0.019
n = 33
# fmt: off
L = np.array(
[0 , 25, 64, 104, 124, 143, 175, 207, 239, 271,
303, 335, 345, 355, 380, 408, 436, 466, 496, 526,
556, 586, 614, 647, 657, 667, 702, 737, 772, 807,
842, 862, 881, 914]
)/ 1000
# fmt: on
L = [L[i] - L[i - 1] for i in range(1, len(L))]
shaft_elem = [
rs.ShaftElement6DoF(
material=steel2,
L=l,
idl=i_d,
odl=o_d,
idr=i_d,
odr=o_d,
alpha=8.0501,
beta=1.0e-5,
rotary_inertia=True,
shear_effects=True,
)
for l in L
]
Id = 0.003844540885417
Ip = 0.007513248437500
disk0 = rs.DiskElement6DoF(n=12, m=2.6375, Id=Id, Ip=Ip)
disk1 = rs.DiskElement6DoF(n=24, m=2.6375, Id=Id, Ip=Ip)
kxx1 = 4.40e5
kyy1 = 4.6114e5
kzz = 0
cxx1 = 27.4
cyy1 = 2.505
czz = 0
kxx2 = 9.50e5
kyy2 = 1.09e8
cxx2 = 50.4
cyy2 = 100.4553
bearing0 = rs.BearingElement6DoF(
n=4, kxx=kxx1, kyy=kyy1, cxx=cxx1, cyy=cyy1, kzz=kzz, czz=czz
)
bearing1 = rs.BearingElement6DoF(
n=31, kxx=kxx2, kyy=kyy2, cxx=cxx2, cyy=cyy2, kzz=kzz, czz=czz
)
rotor = rs.Rotor(shaft_elem, [disk0, disk1], [bearing0, bearing1])
@pytest.fixture
def rub():
unbalance_magnitudet = np.array([5e-4, 0])
unbalance_phaset = np.array([-np.pi / 2, 0])
rubbing = rotor.run_rubbing(
dt=0.001,
tI=0,
tF=0.5,
deltaRUB=7.95e-5,
kRUB=1.1e6,
cRUB=40,
miRUB=0.3,
posRUB=12,
speed=125.66370614359172,
unbalance_magnitude=unbalance_magnitudet,
unbalance_phase=unbalance_phaset,
print_progress=True,
)
return rubbing
@pytest.fixture
def rub_units():
unbalance_magnitudet = Q_(np.array([0.043398083107259365, 0]), "lb*in")
unbalance_phaset = Q_(np.array([-90.0, 0.0]), "degrees")
rubbing = rotor.run_rubbing(
dt=0.001,
tI=0,
tF=0.5,
deltaRUB=7.95e-5,
kRUB=1.1e6,
cRUB=40,
miRUB=0.3,
posRUB=12,
speed=Q_(1200, "RPM"),
unbalance_magnitude=unbalance_magnitudet,
unbalance_phase=unbalance_phaset,
print_progress=True,
)
return rubbing
def test_rub_parameters(rub):
assert rub.dt == 0.001
assert rub.tI == 0
assert rub.tF == 0.5
assert rub.deltaRUB == 7.95e-5
assert rub.kRUB == 1.1e6
assert rub.cRUB == 40
assert rub.miRUB == 0.3
assert rub.posRUB == 12
assert rub.speed == 125.66370614359172
def test_rub_parameters_units(rub_units):
assert rub_units.dt == 0.001
assert rub_units.tI == 0
assert rub_units.tF == 0.5
assert rub_units.deltaRUB == 7.95e-5
assert rub_units.kRUB == 1.1e6
assert rub_units.cRUB == 40
assert rub_units.miRUB == 0.3
assert rub_units.posRUB == 12
assert rub_units.speed == 125.66370614359172
def test_rub_forces(rub):
assert rub.forces_rub[rub.posRUB * 6, :] == pytest.approx(
# fmt: off
np.array([ 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
1.33959978, 2.38449456, 2.49659676, 1.81196092,
0.59693967, -1.62826881, -4.24183226, -6.00328692,
-6.20469077, -4.73542742, -2.72934246, -2.58572889,
-6.09380802, -11.89929423, -16.28700512, -16.40580808,
-12.28949661, -6.4715516 , -1.9219398 , 0.03597126,
0.1483467 , 0.06399221, 0.18405941, 0.44370982,
0.59170921, 0.55028863, 0.61661081, 1.15972902,
1.99145991, 2.2827491 , 1.21484512, 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , -0.16254596,
-1.15328767, -1.31402774, -0.57077859, 0.17589894,
0. , 0. , 0. , 0. ,
1.35177422, 4.63052083, 8.26595768, 10.77229714,
11.33408396, 9.97185414, 7.43725544, 4.8091569 ,
2.90279915, 1.9280518 , 1.60783411, 1.51278525,
1.30371098, 0.82428403, 0.11364267, -1.19150903,
-2.28684891, -2.8371608 , -2.70293006, -1.7191864 ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
-0.36389397, -0.02296953, -0.58251754, -1.26899237,
-1.37833025, -0.83533185, 0. , -1.54209802,
-3.90213044, -5.84909057, -5.54764546, -2.82245046,
0. , 0. , -0.9813435 , -5.02620683,
-7.74948602, -7.28498629, -4.21292376, -0.68113248,
0. , 0. , 0. , 0. ,
0.49651344, 0.94005508, 1.20771384, 1.01602262,
0.58504824, 0.42249176, 0.74712263, 1.21263464,
1.24036134, 0.53226162, 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0.81085807, 2.24845489,
2.79783564, 2.36749471, 1.48390821, 0.91606906,
1.10872277, 1.79762142, 2.27408108, 2.02409761,
1.09722613, 0.03554649, 0. , 0. ,
0. , 0. , 0. , -3.20746741,
-5.67109902, -5.84109266, -3.27138013, 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , -0.94605595,
-2.89024152, -2.82355527, -0.91081202, 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0.29208245, 0.31276202, 0.55407755,
0.65238154, 0.38599931, 0. , 0. ,
0. , 0. , 0. , 0.30206142,
1.24645948, 2.17118657, 2.79095166, 2.92291644,
2.4997066 , 1.57624218, 0.3448333 , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
-0.25292217, 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , -1.16895344, -3.13003066,
-4.31801297, -3.79661585, -1.72270664, 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0.22784948, 0.25813886,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , -1.0783812 , -1.3305057 ,
0.04528174, 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. ])
# fmt: on
)
assert rub.forces_rub[rub.posRUB * 6 + 1, :] == pytest.approx(
# fmt: off
np.array([ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
-2.04052219e+00, -4.37686754e+00, -6.57561714e+00, -8.28614638e+00,
-9.37724617e+00, -9.76009440e+00, -9.25692987e+00, -7.68859694e+00,
-5.19404219e+00, -2.47936767e+00, -6.07948333e-01, -1.39115180e-01,
-2.90615182e-01, 5.97112915e-01, 3.77091904e+00, 7.17958236e+00,
8.50566481e+00, 6.92594573e+00, 3.76719699e+00, 1.38672233e+00,
1.14491558e+00, 2.47008298e+00, 3.68895320e+00, 3.55463037e+00,
2.15169896e+00, 7.16172655e-01, 4.58130638e-01, 1.31722645e+00,
2.08017814e+00, 1.73698314e+00, 3.66421390e-01, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.41608952e+00,
3.91872725e+00, 6.08751103e+00, 6.04879589e+00, 3.34005262e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
1.32257678e+00, 4.72982449e+00, 5.65624377e+00, 3.74610313e+00,
2.79267497e-01, -1.61571947e+00, -2.61760973e+00, -2.76295228e+00,
-2.46754501e+00, -2.27912450e+00, -2.49834149e+00, -3.08123496e+00,
-3.77081696e+00, -4.26182369e+00, -4.32814157e+00, -3.92465771e+00,
-3.18784204e+00, -2.30089569e+00, -1.36585055e+00, -4.14179880e-01,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
-2.27746914e-01, -2.70148968e+00, -3.76415772e+00, -3.18400597e+00,
-1.61071020e+00, -1.55937047e-01, 0.00000000e+00, -3.48135704e-01,
-1.20173785e+00, -1.41164098e+00, -8.22789836e-01, 2.22972608e-02,
0.00000000e+00, 0.00000000e+00, 1.11826215e+00, 3.42190067e+00,
6.43880255e+00, 7.73939551e+00, 6.02642177e+00, 2.15551882e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
1.30262708e+00, 2.86790215e+00, 2.69095371e+00, 1.30596104e+00,
6.27355622e-02, -1.46409693e-01, -1.61596825e-03, 2.02368230e-01,
7.76880971e-02, -1.13943983e-01, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, -1.20894403e-01, -9.67454370e-02,
-3.75553448e-01, -6.69880393e-01, -7.81524379e-01, -8.20393160e-01,
-1.19541101e+00, -2.17352648e+00, -3.38963807e+00, -3.97901667e+00,
-3.24152409e+00, -1.21249778e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, -1.71167570e+00,
-2.63258535e+00, -2.11381589e+00, -7.29071480e-01, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.55809454e-01,
-1.33550998e-01, 1.30622371e-01, 5.14772296e-01, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 5.87400070e-01, 2.52352092e+00, 3.18829790e+00,
2.12154931e+00, 1.96463435e-01, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, -1.71590151e-01,
8.49398637e-02, -3.92130275e-03, -3.10907319e-01, -7.32520442e-01,
-1.04949045e+00, -1.03758926e+00, -5.57304261e-01, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
-2.22986515e-01, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 1.36141783e-01, -1.62089253e-01,
-6.56719131e-02, 3.67462355e-01, 6.11527272e-01, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, -7.23117550e-02, -4.63871628e-02,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 8.90041966e-01, 1.14524591e+00,
4.21896347e-01, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00])
# fmt: on
)
def test_rub_forces_units(rub_units):
assert rub_units.forces_rub[rub_units.posRUB * 6, :] == pytest.approx(
# fmt: off
np.array([ 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
1.33959978, 2.38449456, 2.49659676, 1.81196092,
0.59693967, -1.62826881, -4.24183226, -6.00328692,
-6.20469077, -4.73542742, -2.72934246, -2.58572889,
-6.09380802, -11.89929423, -16.28700512, -16.40580808,
-12.28949661, -6.4715516 , -1.9219398 , 0.03597126,
0.1483467 , 0.06399221, 0.18405941, 0.44370982,
0.59170921, 0.55028863, 0.61661081, 1.15972902,
1.99145991, 2.2827491 , 1.21484512, 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , -0.16254596,
-1.15328767, -1.31402774, -0.57077859, 0.17589894,
0. , 0. , 0. , 0. ,
1.35177422, 4.63052083, 8.26595768, 10.77229714,
11.33408396, 9.97185414, 7.43725544, 4.8091569 ,
2.90279915, 1.9280518 , 1.60783411, 1.51278525,
1.30371098, 0.82428403, 0.11364267, -1.19150903,
-2.28684891, -2.8371608 , -2.70293006, -1.7191864 ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
-0.36389397, -0.02296953, -0.58251754, -1.26899237,
-1.37833025, -0.83533185, 0. , -1.54209802,
-3.90213044, -5.84909057, -5.54764546, -2.82245046,
0. , 0. , -0.9813435 , -5.02620683,
-7.74948602, -7.28498629, -4.21292376, -0.68113248,
0. , 0. , 0. , 0. ,
0.49651344, 0.94005508, 1.20771384, 1.01602262,
0.58504824, 0.42249176, 0.74712263, 1.21263464,
1.24036134, 0.53226162, 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0.81085807, 2.24845489,
2.79783564, 2.36749471, 1.48390821, 0.91606906,
1.10872277, 1.79762142, 2.27408108, 2.02409761,
1.09722613, 0.03554649, 0. , 0. ,
0. , 0. , 0. , -3.20746741,
-5.67109902, -5.84109266, -3.27138013, 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , -0.94605595,
-2.89024152, -2.82355527, -0.91081202, 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0.29208245, 0.31276202, 0.55407755,
0.65238154, 0.38599931, 0. , 0. ,
0. , 0. , 0. , 0.30206142,
1.24645948, 2.17118657, 2.79095166, 2.92291644,
2.4997066 , 1.57624218, 0.3448333 , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
-0.25292217, 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , -1.16895344, -3.13003066,
-4.31801297, -3.79661585, -1.72270664, 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0.22784948, 0.25813886,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. , 0. , -1.0783812 , -1.3305057 ,
0.04528174, 0. , 0. , 0. ,
0. , 0. , 0. , 0. ,
0. ])
# fmt: on
)
assert rub_units.forces_rub[rub_units.posRUB * 6 + 1, :] == pytest.approx(
# fmt: off
np.array([ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
-2.04052219e+00, -4.37686754e+00, -6.57561714e+00, -8.28614638e+00,
-9.37724617e+00, -9.76009440e+00, -9.25692987e+00, -7.68859694e+00,
-5.19404219e+00, -2.47936767e+00, -6.07948333e-01, -1.39115180e-01,
-2.90615182e-01, 5.97112915e-01, 3.77091904e+00, 7.17958236e+00,
8.50566481e+00, 6.92594573e+00, 3.76719699e+00, 1.38672233e+00,
1.14491558e+00, 2.47008298e+00, 3.68895320e+00, 3.55463037e+00,
2.15169896e+00, 7.16172655e-01, 4.58130638e-01, 1.31722645e+00,
2.08017814e+00, 1.73698314e+00, 3.66421390e-01, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.41608952e+00,
3.91872725e+00, 6.08751103e+00, 6.04879589e+00, 3.34005262e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
1.32257678e+00, 4.72982449e+00, 5.65624377e+00, 3.74610313e+00,
2.79267497e-01, -1.61571947e+00, -2.61760973e+00, -2.76295228e+00,
-2.46754501e+00, -2.27912450e+00, -2.49834149e+00, -3.08123496e+00,
-3.77081696e+00, -4.26182369e+00, -4.32814157e+00, -3.92465771e+00,
-3.18784204e+00, -2.30089569e+00, -1.36585055e+00, -4.14179880e-01,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
-2.27746914e-01, -2.70148968e+00, -3.76415772e+00, -3.18400597e+00,
-1.61071020e+00, -1.55937047e-01, 0.00000000e+00, -3.48135704e-01,
-1.20173785e+00, -1.41164098e+00, -8.22789836e-01, 2.22972608e-02,
0.00000000e+00, 0.00000000e+00, 1.11826215e+00, 3.42190067e+00,
6.43880255e+00, 7.73939551e+00, 6.02642177e+00, 2.15551882e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
1.30262708e+00, 2.86790215e+00, 2.69095371e+00, 1.30596104e+00,
6.27355622e-02, -1.46409693e-01, -1.61596825e-03, 2.02368230e-01,
7.76880971e-02, -1.13943983e-01, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, -1.20894403e-01, -9.67454370e-02,
-3.75553448e-01, -6.69880393e-01, -7.81524379e-01, -8.20393160e-01,
-1.19541101e+00, -2.17352648e+00, -3.38963807e+00, -3.97901667e+00,
-3.24152409e+00, -1.21249778e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, -1.71167570e+00,
-2.63258535e+00, -2.11381589e+00, -7.29071480e-01, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.55809454e-01,
-1.33550998e-01, 1.30622371e-01, 5.14772296e-01, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 5.87400070e-01, 2.52352092e+00, 3.18829790e+00,
2.12154931e+00, 1.96463435e-01, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, -1.71590151e-01,
8.49398637e-02, -3.92130275e-03, -3.10907319e-01, -7.32520442e-01,
-1.04949045e+00, -1.03758926e+00, -5.57304261e-01, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
-2.22986515e-01, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 1.36141783e-01, -1.62089253e-01,
-6.56719131e-02, 3.67462355e-01, 6.11527272e-01, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, -7.23117550e-02, -4.63871628e-02,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 8.90041966e-01, 1.14524591e+00,
4.21896347e-01, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00])
# fmt: on
)
| 57.19494
| 92
| 0.452738
| 4,847
| 38,435
| 3.577058
| 0.094285
| 0.431422
| 0.517707
| 0.581382
| 0.92006
| 0.913023
| 0.904487
| 0.900969
| 0.900969
| 0.897912
| 0
| 0.625108
| 0.39732
| 38,435
| 671
| 93
| 57.280179
| 0.123381
| 0.004527
| 0
| 0.847134
| 0
| 0
| 0.000523
| 0
| 0
| 0
| 0
| 0
| 0.036624
| 1
| 0.009554
| false
| 0
| 0.014331
| 0
| 0.02707
| 0.003185
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 12
|
f924a3360f6478f383a6de27cd9e66749ac25041
| 36,224
|
py
|
Python
|
tests/io/test_hdf_writer.py
|
hbdeng/pycroscopy
|
f9a6d273f6a8e6fdda1287cec82cd6da32d9e2a5
|
[
"MIT"
] | 1
|
2020-02-13T20:54:47.000Z
|
2020-02-13T20:54:47.000Z
|
tests/io/test_hdf_writer.py
|
Liambcollins/pycroscopy
|
fd02ac735a1194d2a5687183fafe00368ed8a3ca
|
[
"MIT"
] | null | null | null |
tests/io/test_hdf_writer.py
|
Liambcollins/pycroscopy
|
fd02ac735a1194d2a5687183fafe00368ed8a3ca
|
[
"MIT"
] | 1
|
2020-03-20T13:19:09.000Z
|
2020-03-20T13:19:09.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 3 15:07:16 2017
@author: Suhas Somnath
"""
from __future__ import division, print_function, unicode_literals, absolute_import
import unittest
import os
import h5py
import numpy as np
import sys
sys.path.append("../../../pycroscopy/")
from pycroscopy.io.virtual_data import VirtualGroup, VirtualDataset
from pycroscopy.io.hdf_writer import HDFwriter
from pyUSID.io.hdf_utils import get_attr, get_h5_obj_refs # Until an elegant solution presents itself
class TestHDFWriter(unittest.TestCase):
@staticmethod
def __delete_existing_file(file_path):
if os.path.exists(file_path):
os.remove(file_path)
def test_init_invalid_input(self):
with self.assertRaises(TypeError):
_ = HDFwriter(4)
def test_init_path_non_existant_file_01(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
writer = HDFwriter(file_path)
self.assertIsInstance(writer, HDFwriter, "writer should be an HDFwriter")
writer.close()
os.remove(file_path)
def test_init_path_existing_file_01(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
h5_f = h5py.File(file_path)
h5_f.close()
# Existing h5 file
writer = HDFwriter(file_path)
self.assertIsInstance(writer, HDFwriter, "writer should be an HDFwriter")
writer.close()
os.remove(file_path)
def test_init_h5_handle_r_01(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
h5_f = h5py.File(file_path)
h5_f.close()
h5_f = h5py.File(file_path, mode='r')
# hdf handle but of mode r
with self.assertRaises(TypeError):
_ = HDFwriter(h5_f)
os.remove(file_path)
def test_init_h5_handle_r_plus_01(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
h5_f = h5py.File(file_path)
h5_f.close()
h5_f = h5py.File(file_path, mode='r+')
# open h5 file handle or mode r+
writer = HDFwriter(h5_f)
self.assertIsInstance(writer, HDFwriter, "writer should be an HDFwriter")
writer.close()
os.remove(file_path)
def test_init_h5_handle_w_01(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
h5_f = h5py.File(file_path)
h5_f.close()
h5_f = h5py.File(file_path, mode='w')
# open h5 file handle or mode w
writer = HDFwriter(h5_f)
self.assertIsInstance(writer, HDFwriter, "writer should be an HDFwriter")
writer.close()
os.remove(file_path)
def test_init_h5_handle_closed(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
h5_f = h5py.File(file_path)
h5_f.close()
# Existing h5 file but closed
with self.assertRaises(ValueError):
_ = HDFwriter(h5_f)
os.remove(file_path)
def test_simple_dset_write_success_01(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
dtype = np.uint16
dset_name = 'test'
data = np.random.randint(0, high=15, size=5, dtype=dtype)
microdset = VirtualDataset(dset_name, data)
writer = HDFwriter(h5_f)
h5_d = writer._create_simple_dset(h5_f, microdset)
self.assertIsInstance(h5_d, h5py.Dataset)
self.assertEqual(h5_d.parent, h5_f)
self.assertEqual(h5_d.name, '/' + dset_name)
self.assertEqual(h5_d.shape, data.shape)
self.assertTrue(np.allclose(h5_d[()], data))
self.assertEqual(h5_d.dtype, dtype)
os.remove(file_path)
def test_simple_dset_write_success_more_options_02(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
dset_name = 'test'
data = np.random.rand(16, 1024)
dtype = data.dtype
compression = 'gzip'
chunking=(1, 1024)
microdset = VirtualDataset(dset_name, data, dtype=dtype, compression=compression, chunking=chunking)
writer = HDFwriter(h5_f)
h5_d = writer._create_simple_dset(h5_f, microdset)
self.assertIsInstance(h5_d, h5py.Dataset)
self.assertEqual(h5_d.parent, h5_f)
self.assertEqual(h5_d.name, '/' + dset_name)
self.assertEqual(h5_d.shape, data.shape)
self.assertTrue(np.allclose(h5_d[()], data))
self.assertEqual(h5_d.dtype, dtype)
self.assertEqual(h5_d.compression, compression)
self.assertEqual(h5_d.chunks, chunking)
os.remove(file_path)
def test_simple_dset_write_success_more_options_03(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
dset_name = 'test'
data = np.random.rand(16, 1024)
dtype = np.float16
compression = 'gzip'
chunking=(1, 1024)
microdset = VirtualDataset(dset_name, data, dtype=dtype, compression=compression, chunking=chunking)
writer = HDFwriter(h5_f)
h5_d = writer._create_simple_dset(h5_f, microdset)
self.assertIsInstance(h5_d, h5py.Dataset)
self.assertEqual(h5_d.parent, h5_f)
self.assertEqual(h5_d.name, '/' + dset_name)
self.assertEqual(h5_d.shape, data.shape)
self.assertEqual(h5_d.dtype, dtype)
self.assertEqual(h5_d.compression, compression)
self.assertEqual(h5_d.chunks, chunking)
self.assertTrue(np.all(h5_d[()] - data < 1E-3))
os.remove(file_path)
def test_empty_dset_write_success_01(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
dset_name = 'test'
maxshape = (16, 1024)
microdset = VirtualDataset(dset_name, None, maxshape=maxshape)
writer = HDFwriter(h5_f)
h5_d = writer._create_empty_dset(h5_f, microdset)
self.assertIsInstance(h5_d, h5py.Dataset)
self.assertEqual(h5_d.parent, h5_f)
self.assertEqual(h5_d.name, '/' + dset_name)
self.assertEqual(h5_d.shape, maxshape)
self.assertEqual(h5_d.maxshape, maxshape)
# dtype is assigned automatically by h5py. Not to be tested here
os.remove(file_path)
def test_empty_dset_write_success_w_options_02(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
dset_name = 'test'
maxshape = (16, 1024)
chunking = (1, 1024)
compression = 'gzip'
dtype = np.float16
microdset = VirtualDataset(dset_name, None, maxshape=maxshape,
dtype=dtype, compression=compression, chunking=chunking)
writer = HDFwriter(h5_f)
h5_d = writer._create_empty_dset(h5_f, microdset)
self.assertIsInstance(h5_d, h5py.Dataset)
self.assertEqual(h5_d.parent, h5_f)
self.assertEqual(h5_d.name, '/' + dset_name)
self.assertEqual(h5_d.dtype, dtype)
self.assertEqual(h5_d.compression, compression)
self.assertEqual(h5_d.chunks, chunking)
self.assertEqual(h5_d.shape, maxshape)
self.assertEqual(h5_d.maxshape, maxshape)
os.remove(file_path)
def test_expandable_dset_write_success_01(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
dset_name = 'test'
maxshape = (None, 1024)
data = np.random.rand(1, 1024)
microdset = VirtualDataset(dset_name, data, maxshape=maxshape)
writer = HDFwriter(h5_f)
h5_d = writer._create_resizeable_dset(h5_f, microdset)
self.assertIsInstance(h5_d, h5py.Dataset)
self.assertEqual(h5_d.parent, h5_f)
self.assertEqual(h5_d.name, '/' + dset_name)
self.assertEqual(h5_d.shape, data.shape)
self.assertEqual(h5_d.maxshape, maxshape)
self.assertTrue(np.allclose(h5_d[()], data))
# Now test to make sure that the dataset can be expanded:
# TODO: add this to the example!
expansion_axis = 0
h5_d.resize(h5_d.shape[expansion_axis] + 1, axis=expansion_axis)
self.assertEqual(h5_d.shape, (data.shape[0]+1, data.shape[1]))
self.assertEqual(h5_d.maxshape, maxshape)
# Finally try checking to see if this new data is also present in the file
new_data = np.random.rand(1024)
h5_d[1] = new_data
data = np.vstack((np.squeeze(data), new_data))
self.assertTrue(np.allclose(h5_d[()], data))
os.remove(file_path)
# TODO: will have to check to see if the parent is correctly declared for the group
def test_group_create_non_indexed_simple_01(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
grp_name = 'test'
micro_group = VirtualGroup(grp_name)
writer = HDFwriter(h5_f)
h5_grp = writer._create_group(h5_f, micro_group)
self.assertIsInstance(h5_grp, h5py.Group)
self.assertEqual(h5_grp.parent, h5_f)
self.assertEqual(h5_grp.name, '/' + grp_name)
# self.assertEqual(len(h5_grp.items), 0)
os.remove(file_path)
def test_group_create_indexed_simple_01(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
grp_name = 'test_'
micro_group = VirtualGroup(grp_name)
writer = HDFwriter(h5_f)
h5_grp = writer._create_group(h5_f, micro_group)
self.assertIsInstance(h5_grp, h5py.Group)
self.assertEqual(h5_grp.parent, h5_f)
self.assertEqual(h5_grp.name, '/' + grp_name + '000')
# self.assertEqual(len(h5_grp.items), 0)
os.remove(file_path)
def test_group_create_root_01(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
grp_name = ''
micro_group = VirtualGroup(grp_name)
writer = HDFwriter(h5_f)
with self.assertRaises(ValueError):
_ = writer._create_group(h5_f, micro_group)
os.remove(file_path)
def test_group_create_indexed_nested_01(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
outer_grp_name = 'outer_'
micro_group = VirtualGroup(outer_grp_name)
writer = HDFwriter(h5_f)
h5_outer_grp = writer._create_group(h5_f, micro_group)
self.assertIsInstance(h5_outer_grp, h5py.Group)
self.assertEqual(h5_outer_grp.parent, h5_f)
self.assertEqual(h5_outer_grp.name, '/' + outer_grp_name + '000')
inner_grp_name = 'inner_'
micro_group = VirtualGroup(inner_grp_name)
h5_inner_grp = writer._create_group(h5_outer_grp, micro_group)
self.assertIsInstance(h5_inner_grp, h5py.Group)
self.assertEqual(h5_inner_grp.parent, h5_outer_grp)
self.assertEqual(h5_inner_grp.name, h5_outer_grp.name + '/' + inner_grp_name + '000')
os.remove(file_path)
def test_write_legal_reg_ref_multi_dim_data(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
writer = HDFwriter(h5_f)
data = np.random.rand(5, 7)
h5_dset = writer._create_simple_dset(h5_f, VirtualDataset('test', data))
self.assertIsInstance(h5_dset, h5py.Dataset)
attrs = {'labels': {'even_rows': (slice(0, None, 2), slice(None)),
'odd_rows': (slice(1, None, 2), slice(None))}}
writer._write_dset_attributes(h5_dset, attrs.copy())
h5_f.flush()
# two atts point to region references. one for labels
self.assertEqual(len(h5_dset.attrs), 1 + len(attrs['labels']))
# check if the labels attribute was written:
self.assertTrue(np.all([x in list(attrs['labels'].keys()) for x in get_attr(h5_dset, 'labels')]))
expected_data = [data[:None:2], data[1:None:2]]
written_data = [h5_dset[h5_dset.attrs['even_rows']], h5_dset[h5_dset.attrs['odd_rows']]]
for exp, act in zip(expected_data, written_data):
self.assertTrue(np.allclose(exp, act))
os.remove(file_path)
def test_write_legal_reg_ref_multi_dim_data_2nd_dim(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
writer = HDFwriter(h5_f)
data = np.random.rand(5, 3)
h5_dset = writer._create_simple_dset(h5_f, VirtualDataset('test', data))
self.assertIsInstance(h5_dset, h5py.Dataset)
attrs = {'labels': {'even_rows': (slice(None), slice(0, None, 2)),
'odd_rows': (slice(None), slice(1, None, 2))}}
writer._write_dset_attributes(h5_dset, attrs.copy())
h5_f.flush()
# two atts point to region references. one for labels
self.assertEqual(len(h5_dset.attrs), 1 + len(attrs['labels']))
# check if the labels attribute was written:
self.assertTrue(np.all([x in list(attrs['labels'].keys()) for x in get_attr(h5_dset, 'labels')]))
expected_data = [data[:, 0:None:2], data[:, 1:None:2]]
written_data = [h5_dset[h5_dset.attrs['even_rows']], h5_dset[h5_dset.attrs['odd_rows']]]
for exp, act in zip(expected_data, written_data):
self.assertTrue(np.allclose(exp, act))
os.remove(file_path)
def test_write_legal_reg_ref_one_dim_data(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
writer = HDFwriter(h5_f)
data = np.random.rand(7)
h5_dset = writer._create_simple_dset(h5_f, VirtualDataset('test', data))
self.assertIsInstance(h5_dset, h5py.Dataset)
attrs = {'labels': {'even_rows': (slice(0, None, 2)),
'odd_rows': (slice(1, None, 2))}}
writer._write_dset_attributes(h5_dset, attrs.copy())
h5_f.flush()
# two atts point to region references. one for labels
self.assertEqual(len(h5_dset.attrs), 1 + len(attrs['labels']))
# check if the labels attribute was written:
self.assertTrue(np.all([x in list(attrs['labels'].keys()) for x in get_attr(h5_dset, 'labels')]))
expected_data = [data[:None:2], data[1:None:2]]
written_data = [h5_dset[h5_dset.attrs['even_rows']], h5_dset[h5_dset.attrs['odd_rows']]]
for exp, act in zip(expected_data, written_data):
self.assertTrue(np.allclose(exp, act))
os.remove(file_path)
def test_generate_and_write_reg_ref_legal(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
writer = HDFwriter(h5_f)
data = np.random.rand(2, 7)
h5_dset = writer._create_simple_dset(h5_f, VirtualDataset('test', data))
self.assertIsInstance(h5_dset, h5py.Dataset)
attrs = {'labels': ['row_1', 'row_2']}
if sys.version_info.major == 3:
with self.assertWarns(UserWarning):
writer._write_dset_attributes(h5_dset, attrs.copy())
else:
writer._write_dset_attributes(h5_dset, attrs.copy())
h5_f.flush()
# two atts point to region references. one for labels
self.assertEqual(len(h5_dset.attrs), 1 + len(attrs['labels']))
# check if the labels attribute was written:
self.assertTrue(np.all([x in list(attrs['labels']) for x in get_attr(h5_dset, 'labels')]))
expected_data = [data[0], data[1]]
written_data = [h5_dset[h5_dset.attrs['row_1']], h5_dset[h5_dset.attrs['row_2']]]
for exp, act in zip(expected_data, written_data):
self.assertTrue(np.allclose(np.squeeze(exp), np.squeeze(act)))
os.remove(file_path)
def test_generate_and_write_reg_ref_illegal(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
writer = HDFwriter(h5_f)
data = np.random.rand(3, 7)
h5_dset = writer._create_simple_dset(h5_f, VirtualDataset('test', data))
self.assertIsInstance(h5_dset, h5py.Dataset)
# with self.assertWarns(UserWarning):
writer._write_dset_attributes(h5_dset, {'labels': ['row_1', 'row_2']})
self.assertEqual(len(h5_dset.attrs), 0)
h5_f.flush()
os.remove(file_path)
def test_generate_and_write_reg_ref_illegal(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
writer = HDFwriter(h5_f)
data = np.random.rand(2, 7)
h5_dset = writer._create_simple_dset(h5_f, VirtualDataset('test', data))
self.assertIsInstance(h5_dset, h5py.Dataset)
# with self.assertWarns(UserWarning):
with self.assertRaises(TypeError):
writer._write_dset_attributes(h5_dset, {'labels': [1, np.arange(3)]})
os.remove(file_path)
def test_write_illegal_reg_ref_too_many_slices(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
writer = HDFwriter(h5_f)
data = np.random.rand(5, 7)
h5_dset = writer._create_simple_dset(h5_f, VirtualDataset('test', data))
self.assertIsInstance(h5_dset, h5py.Dataset)
attrs = {'labels': {'even_rows': (slice(0, None, 2), slice(None), slice(None)),
'odd_rows': (slice(1, None, 2), slice(None), slice(None))}}
with self.assertRaises(ValueError):
writer._write_dset_attributes(h5_dset, attrs.copy())
os.remove(file_path)
def test_write_illegal_reg_ref_too_few_slices(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
writer = HDFwriter(h5_f)
data = np.random.rand(5, 7)
h5_dset = writer._create_simple_dset(h5_f, VirtualDataset('test', data))
self.assertIsInstance(h5_dset, h5py.Dataset)
attrs = {'labels': {'even_rows': (slice(0, None, 2)),
'odd_rows': (slice(1, None, 2))}}
with self.assertRaises(ValueError):
writer._write_dset_attributes(h5_dset, attrs.copy())
os.remove(file_path)
def test_write_reg_ref_slice_dim_larger_than_data(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
writer = HDFwriter(h5_f)
data = np.random.rand(5, 7)
h5_dset = writer._create_simple_dset(h5_f, VirtualDataset('test', data))
self.assertIsInstance(h5_dset, h5py.Dataset)
attrs = {'labels': {'even_rows': (slice(0, 15, 2), slice(None)),
'odd_rows': (slice(1, 15, 2), slice(None))}}
writer._write_dset_attributes(h5_dset, attrs.copy())
h5_f.flush()
# two atts point to region references. one for labels
self.assertEqual(len(h5_dset.attrs), 1 + len(attrs['labels']))
# check if the labels attribute was written:
self.assertTrue(np.all([x in list(attrs['labels'].keys()) for x in get_attr(h5_dset, 'labels')]))
expected_data = [data[:None:2], data[1:None:2]]
written_data = [h5_dset[h5_dset.attrs['even_rows']], h5_dset[h5_dset.attrs['odd_rows']]]
for exp, act in zip(expected_data, written_data):
self.assertTrue(np.allclose(exp, act))
os.remove(file_path)
def test_write_illegal_reg_ref_not_slice_objs(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
writer = HDFwriter(h5_f)
data = np.random.rand(5, 7)
h5_dset = writer._create_simple_dset(h5_f, VirtualDataset('test', data))
self.assertIsInstance(h5_dset, h5py.Dataset)
attrs = {'labels': {'even_rows': (slice(0, None, 2), 15),
'odd_rows': (slice(1, None, 2), 'hello')}}
with self.assertRaises(TypeError):
writer._write_dset_attributes(h5_dset, attrs.copy())
os.remove(file_path)
def test_write_simple_atts_reg_ref_to_dset(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
writer = HDFwriter(h5_f)
data = np.random.rand(5, 7)
h5_dset = writer._create_simple_dset(h5_f, VirtualDataset('test', data))
self.assertIsInstance(h5_dset, h5py.Dataset)
attrs = {'att_1': 'string_val',
'att_2': 1.2345,
'att_3': [1, 2, 3, 4],
'att_4': ['str_1', 'str_2', 'str_3'],
'labels': {'even_rows': (slice(0, None, 2), slice(None)),
'odd_rows': (slice(1, None, 2), slice(None))}
}
writer._write_dset_attributes(h5_dset, attrs.copy())
reg_ref = attrs.pop('labels')
self.assertEqual(len(h5_dset.attrs), len(attrs) + 1 + len(reg_ref))
for key, expected_val in attrs.items():
self.assertTrue(np.all(get_attr(h5_dset, key) == expected_val))
self.assertTrue(np.all([x in list(reg_ref.keys()) for x in get_attr(h5_dset, 'labels')]))
expected_data = [data[:None:2], data[1:None:2]]
written_data = [h5_dset[h5_dset.attrs['even_rows']], h5_dset[h5_dset.attrs['odd_rows']]]
for exp, act in zip(expected_data, written_data):
self.assertTrue(np.allclose(exp, act))
os.remove(file_path)
def test_write_invalid_input(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
writer = HDFwriter(h5_f)
with self.assertRaises(TypeError):
_ = writer.write(np.arange(5))
def test_write_dset_under_root(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
writer = HDFwriter(h5_f)
data = np.random.rand(5, 7)
attrs = {'att_1': 'string_val',
'att_2': 1.2345,
'att_3': [1, 2, 3, 4],
'att_4': ['str_1', 'str_2', 'str_3'],
'labels': {'even_rows': (slice(0, None, 2), slice(None)),
'odd_rows': (slice(1, None, 2), slice(None))}
}
micro_dset = VirtualDataset('test', data)
micro_dset.attrs = attrs.copy()
[h5_dset] = writer.write(micro_dset)
self.assertIsInstance(h5_dset, h5py.Dataset)
reg_ref = attrs.pop('labels')
self.assertEqual(len(h5_dset.attrs), len(attrs) + 1 + len(reg_ref))
for key, expected_val in attrs.items():
self.assertTrue(np.all(get_attr(h5_dset, key) == expected_val))
self.assertTrue(np.all([x in list(reg_ref.keys()) for x in get_attr(h5_dset, 'labels')]))
expected_data = [data[:None:2], data[1:None:2]]
written_data = [h5_dset[h5_dset.attrs['even_rows']], h5_dset[h5_dset.attrs['odd_rows']]]
for exp, act in zip(expected_data, written_data):
self.assertTrue(np.allclose(exp, act))
os.remove(file_path)
def test_write_dset_under_existing_group(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
writer = HDFwriter(h5_f)
h5_g = writer._create_group(h5_f, VirtualGroup('test_group'))
self.assertIsInstance(h5_g, h5py.Group)
data = np.random.rand(5, 7)
attrs = {'att_1': 'string_val',
'att_2': 1.2345,
'att_3': [1, 2, 3, 4],
'att_4': ['str_1', 'str_2', 'str_3'],
'labels': {'even_rows': (slice(0, None, 2), slice(None)),
'odd_rows': (slice(1, None, 2), slice(None))}
}
micro_dset = VirtualDataset('test', data, parent='/test_group')
micro_dset.attrs = attrs.copy()
[h5_dset] = writer.write(micro_dset)
self.assertIsInstance(h5_dset, h5py.Dataset)
self.assertEqual(h5_dset.parent, h5_g)
reg_ref = attrs.pop('labels')
self.assertEqual(len(h5_dset.attrs), len(attrs) + 1 + len(reg_ref))
for key, expected_val in attrs.items():
self.assertTrue(np.all(get_attr(h5_dset, key) == expected_val))
self.assertTrue(np.all([x in list(reg_ref.keys()) for x in get_attr(h5_dset, 'labels')]))
expected_data = [data[:None:2], data[1:None:2]]
written_data = [h5_dset[h5_dset.attrs['even_rows']], h5_dset[h5_dset.attrs['odd_rows']]]
for exp, act in zip(expected_data, written_data):
self.assertTrue(np.allclose(exp, act))
os.remove(file_path)
def test_write_dset_under_invalid_group(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
writer = HDFwriter(h5_f)
with self.assertRaises(KeyError):
_ = writer.write(VirtualDataset('test', np.random.rand(5, 7), parent='/does_not_exist'))
os.remove(file_path)
def test_write_root(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
attrs = {'att_1': 'string_val',
'att_2': 1.2345,
'att_3': [1, 2, 3, 4],
'att_4': ['str_1', 'str_2', 'str_3']}
micro_group = VirtualGroup('')
micro_group.attrs = attrs
writer = HDFwriter(h5_f)
[ret_val] = writer.write(micro_group)
self.assertIsInstance(ret_val, h5py.File)
self.assertEqual(h5_f, ret_val)
for key, expected_val in attrs.items():
self.assertTrue(np.all(get_attr(h5_f, key) == expected_val))
os.remove(file_path)
def test_write_single_group(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
attrs = {'att_1': 'string_val',
'att_2': 1.2345,
'att_3': [1, 2, 3, 4],
'att_4': ['str_1', 'str_2', 'str_3']}
micro_group = VirtualGroup('Test_')
micro_group.attrs = attrs
writer = HDFwriter(h5_f)
[h5_group] = writer.write(micro_group)
for key, expected_val in attrs.items():
self.assertTrue(np.all(get_attr(h5_group, key) == expected_val))
os.remove(file_path)
def test_group_indexing_sequential(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
writer = HDFwriter(h5_f)
micro_group_0 = VirtualGroup('Test_', attrs={'att_1': 'string_val', 'att_2': 1.2345})
[h5_group_0] = writer.write(micro_group_0)
_ = writer.write(VirtualGroup('blah'))
self.assertIsInstance(h5_group_0, h5py.Group)
self.assertEqual(h5_group_0.name, '/Test_000')
for key, expected_val in micro_group_0.attrs.items():
self.assertTrue(np.all(get_attr(h5_group_0, key) == expected_val))
micro_group_1 = VirtualGroup('Test_', attrs={'att_3': [1, 2, 3, 4], 'att_4': ['str_1', 'str_2', 'str_3']})
[h5_group_1] = writer.write(micro_group_1)
self.assertIsInstance(h5_group_1, h5py.Group)
self.assertEqual(h5_group_1.name, '/Test_001')
for key, expected_val in micro_group_1.attrs.items():
self.assertTrue(np.all(get_attr(h5_group_1, key) == expected_val))
os.remove(file_path)
def test_group_indexing_simultaneous(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
micro_group_0 = VirtualGroup('Test_', attrs = {'att_1': 'string_val', 'att_2': 1.2345})
micro_group_1 = VirtualGroup('Test_', attrs={'att_3': [1, 2, 3, 4], 'att_4': ['str_1', 'str_2', 'str_3']})
root_group = VirtualGroup('', children=[VirtualGroup('blah'), micro_group_0,
VirtualGroup('meh'), micro_group_1])
writer = HDFwriter(h5_f)
h5_refs_list = writer.write(root_group)
[h5_group_1] = get_h5_obj_refs(['Test_001'], h5_refs_list)
[h5_group_0] = get_h5_obj_refs(['Test_000'], h5_refs_list)
self.assertIsInstance(h5_group_0, h5py.Group)
self.assertEqual(h5_group_0.name, '/Test_000')
for key, expected_val in micro_group_0.attrs.items():
self.assertTrue(np.all(get_attr(h5_group_0, key) == expected_val))
self.assertIsInstance(h5_group_1, h5py.Group)
self.assertEqual(h5_group_1.name, '/Test_001')
for key, expected_val in micro_group_1.attrs.items():
self.assertTrue(np.all(get_attr(h5_group_1, key) == expected_val))
os.remove(file_path)
def test_write_simple_tree(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
inner_dset_data = np.random.rand(5, 7)
inner_dset_attrs = {'att_1': 'string_val',
'att_2': 1.2345,
'att_3': [1, 2, 3, 4],
'att_4': ['str_1', 'str_2', 'str_3'],
'labels': {'even_rows': (slice(0, None, 2), slice(None)),
'odd_rows': (slice(1, None, 2), slice(None))}
}
inner_dset = VirtualDataset('inner_dset', inner_dset_data)
inner_dset.attrs = inner_dset_attrs.copy()
attrs_inner_grp = {'att_1': 'string_val',
'att_2': 1.2345,
'att_3': [1, 2, 3, 4],
'att_4': ['str_1', 'str_2', 'str_3']}
inner_group = VirtualGroup('indexed_inner_group_')
inner_group.attrs = attrs_inner_grp
inner_group.add_children(inner_dset)
outer_dset_data = np.random.rand(5, 7)
outer_dset_attrs = {'att_1': 'string_val',
'att_2': 1.2345,
'att_3': [1, 2, 3, 4],
'att_4': ['str_1', 'str_2', 'str_3'],
'labels': {'even_rows': (slice(0, None, 2), slice(None)),
'odd_rows': (slice(1, None, 2), slice(None))}
}
outer_dset = VirtualDataset('test', outer_dset_data, parent='/test_group')
outer_dset.attrs = outer_dset_attrs.copy()
attrs_outer_grp = {'att_1': 'string_val',
'att_2': 1.2345,
'att_3': [1, 2, 3, 4],
'att_4': ['str_1', 'str_2', 'str_3']}
outer_group = VirtualGroup('unindexed_outer_group')
outer_group.attrs = attrs_outer_grp
outer_group.add_children([inner_group, outer_dset])
writer = HDFwriter(h5_f)
h5_refs_list = writer.write(outer_group)
# I don't know of a more elegant way to do this:
[h5_outer_dset] = get_h5_obj_refs([outer_dset.name], h5_refs_list)
[h5_inner_dset] = get_h5_obj_refs([inner_dset.name], h5_refs_list)
[h5_outer_group] = get_h5_obj_refs([outer_group.name], h5_refs_list)
[h5_inner_group] = get_h5_obj_refs(['indexed_inner_group_000'], h5_refs_list)
self.assertIsInstance(h5_outer_dset, h5py.Dataset)
self.assertIsInstance(h5_inner_dset, h5py.Dataset)
self.assertIsInstance(h5_outer_group, h5py.Group)
self.assertIsInstance(h5_inner_group, h5py.Group)
# check assertions for the inner dataset first
self.assertEqual(h5_inner_dset.parent, h5_inner_group)
reg_ref = inner_dset_attrs.pop('labels')
self.assertEqual(len(h5_inner_dset.attrs), len(inner_dset_attrs) + 1 + len(reg_ref))
for key, expected_val in inner_dset_attrs.items():
self.assertTrue(np.all(get_attr(h5_inner_dset, key) == expected_val))
self.assertTrue(np.all([x in list(reg_ref.keys()) for x in get_attr(h5_inner_dset, 'labels')]))
expected_data = [inner_dset_data[:None:2], inner_dset_data[1:None:2]]
written_data = [h5_inner_dset[h5_inner_dset.attrs['even_rows']], h5_inner_dset[h5_inner_dset.attrs['odd_rows']]]
for exp, act in zip(expected_data, written_data):
self.assertTrue(np.allclose(exp, act))
# check assertions for the inner data group next:
self.assertEqual(h5_inner_group.parent, h5_outer_group)
for key, expected_val in attrs_inner_grp.items():
self.assertTrue(np.all(get_attr(h5_inner_group, key) == expected_val))
# check the outer dataset next:
self.assertEqual(h5_outer_dset.parent, h5_outer_group)
reg_ref = outer_dset_attrs.pop('labels')
self.assertEqual(len(h5_outer_dset.attrs), len(outer_dset_attrs) + 1 + len(reg_ref))
for key, expected_val in outer_dset_attrs.items():
self.assertTrue(np.all(get_attr(h5_outer_dset, key) == expected_val))
self.assertTrue(np.all([x in list(reg_ref.keys()) for x in get_attr(h5_outer_dset, 'labels')]))
expected_data = [outer_dset_data[:None:2], outer_dset_data[1:None:2]]
written_data = [h5_outer_dset[h5_outer_dset.attrs['even_rows']],
h5_outer_dset[h5_outer_dset.attrs['odd_rows']]]
for exp, act in zip(expected_data, written_data):
self.assertTrue(np.allclose(exp, act))
# Finally check the outer group:
self.assertEqual(h5_outer_group.parent, h5_f)
for key, expected_val in attrs_outer_grp.items():
self.assertTrue(np.all(get_attr(h5_outer_group, key) == expected_val))
os.remove(file_path)
if __name__ == '__main__':
unittest.main()
| 39.98234
| 124
| 0.593087
| 4,738
| 36,224
| 4.221824
| 0.056986
| 0.059991
| 0.044993
| 0.030395
| 0.849773
| 0.820177
| 0.79708
| 0.763036
| 0.749938
| 0.73584
| 0
| 0.037082
| 0.291271
| 36,224
| 905
| 125
| 40.026519
| 0.742064
| 0.038262
| 0
| 0.703988
| 0
| 0
| 0.053793
| 0.001264
| 0
| 0
| 0
| 0.001105
| 0.226994
| 1
| 0.058282
| false
| 0
| 0.013804
| 0
| 0.07362
| 0.001534
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
00b16f07c262f23c71a57d135003bb5d4c284846
| 2,851
|
py
|
Python
|
CodeForces/EducationalRound73/B.py
|
takaaki82/Java-Lessons
|
c4f11462bf84c091527dde5f25068498bfb2cc49
|
[
"MIT"
] | 1
|
2018-11-25T04:15:45.000Z
|
2018-11-25T04:15:45.000Z
|
CodeForces/EducationalRound73/B.py
|
takaaki82/Java-Lessons
|
c4f11462bf84c091527dde5f25068498bfb2cc49
|
[
"MIT"
] | null | null | null |
CodeForces/EducationalRound73/B.py
|
takaaki82/Java-Lessons
|
c4f11462bf84c091527dde5f25068498bfb2cc49
|
[
"MIT"
] | 2
|
2018-08-08T13:01:14.000Z
|
2018-11-25T12:38:36.000Z
|
N = int(input())
grid = [["*"] * N for _ in range(N)]
grid[0][0] = "W"
for i in range(N):
for j in range(N):
if grid[i][j] == "*":
if j == 0:
if grid[i - 1][j] == "W":
grid[i][j] = "B"
else:
grid[i][j] = "W"
else:
if grid[i][j - 1] == "W":
grid[i][j] = "B"
else:
grid[i][j] = "W"
if grid[i][j] == "W":
if 0 <= i - 2 < N and 0 <= j + 1 < N:
if grid[i - 2][j + 1] == "*":
grid[i - 2][j + 1] = "B"
if 0 <= i - 1 < N and 0 <= j + 2 < N:
if grid[i - 1][j + 2] == "*":
grid[i - 1][j + 2] = "B"
if 0 <= i + 1 < N and 0 <= j + 2 < N:
if grid[i + 1][j + 2] == "*":
grid[i + 1][j + 2] = "B"
if 0 <= i + 2 < N and 0 <= j + 1 < N:
if grid[i + 2][j + 1] == "*":
grid[i + 2][j + 1] = "B"
if 0 <= i + 2 < N and 0 <= j - 1 < N:
if grid[i + 2][j - 1] == "*":
grid[i + 2][j - 1] = "B"
if 0 <= i + 1 < N and 0 <= j - 2 < N:
if grid[i + 1][j - 2] == "*":
grid[i + 1][j - 2] = "B"
if 0 <= i - 1 < N and 0 <= j + 2 < N:
if grid[i - 1][j + 2] == "*":
grid[i - 1][j + 2] = "B"
if 0 <= i - 2 < N and 0 <= j - 1 < N:
if grid[i - 2][j - 1] == "*":
grid[i - 2][j - 1] = "B"
else:
if 0 <= i - 2 < N and 0 <= j + 1 < N:
if grid[i - 2][j + 1] == "*":
grid[i - 2][j + 1] = "W"
if 0 <= i - 1 < N and 0 <= j + 2 < N:
if grid[i - 1][j + 2] == "*":
grid[i - 1][j + 2] = "W"
if 0 <= i + 1 < N and 0 <= j + 2 < N:
if grid[i + 1][j + 2] == "*":
grid[i + 1][j + 2] = "W"
if 0 <= i + 2 < N and 0 <= j + 1 < N:
if grid[i + 2][j + 1] == "*":
grid[i + 2][j + 1] = "W"
if 0 <= i + 2 < N and 0 <= j - 1 < N:
if grid[i + 2][j - 1] == "*":
grid[i + 2][j - 1] = "W"
if 0 <= i + 1 < N and 0 <= j - 2 < N:
if grid[i + 1][j - 2] == "*":
grid[i + 1][j - 2] = "W"
if 0 <= i - 1 < N and 0 <= j + 2 < N:
if grid[i - 1][j + 2] == "*":
grid[i - 1][j + 2] = "W"
if 0 <= i - 2 < N and 0 <= j - 1 < N:
if grid[i - 2][j - 1] == "*":
grid[i - 2][j - 1] = "W"
for i in range(N):
for j in range(N):
print(grid[i][j], end="")
print()
| 38.527027
| 49
| 0.244476
| 435
| 2,851
| 1.6
| 0.043678
| 0.29454
| 0.201149
| 0.195402
| 0.886494
| 0.862069
| 0.862069
| 0.862069
| 0.862069
| 0.862069
| 0
| 0.102465
| 0.544721
| 2,851
| 73
| 50
| 39.054795
| 0.433744
| 0
| 0
| 0.685714
| 0
| 0
| 0.014732
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.028571
| 0
| 0
| 1
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 12
|
00d37eaf2e152000bf2c3d503f3ab04f61ff6ea3
| 36
|
py
|
Python
|
frappe/patches/v4_0/remove_index_sitemap.py
|
pawaranand/phr_frappe
|
d997ae7d6fbade4b2c4a2491603d988876dfd67e
|
[
"MIT"
] | 1
|
2022-03-05T16:02:39.000Z
|
2022-03-05T16:02:39.000Z
|
frappe/patches/v4_0/remove_index_sitemap.py
|
pawaranand/phr_frappe
|
d997ae7d6fbade4b2c4a2491603d988876dfd67e
|
[
"MIT"
] | 1
|
2015-07-11T20:52:38.000Z
|
2019-12-06T15:00:58.000Z
|
frappe/patches/v4_0/remove_index_sitemap.py
|
pawaranand/phr_frappe
|
d997ae7d6fbade4b2c4a2491603d988876dfd67e
|
[
"MIT"
] | 2
|
2015-09-05T05:30:23.000Z
|
2018-03-21T19:45:10.000Z
|
import frappe
def execute():
pass
| 7.2
| 14
| 0.722222
| 5
| 36
| 5.2
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.194444
| 36
| 4
| 15
| 9
| 0.896552
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 8
|
da9c410d602b9ec28b6c717a86a0c174065fc8dc
| 36
|
py
|
Python
|
reporter/__init__.py
|
eaingaran/TimeMachine
|
f6199827ffc358dd32f26edd8d68e2dbf7c63a90
|
[
"MIT"
] | null | null | null |
reporter/__init__.py
|
eaingaran/TimeMachine
|
f6199827ffc358dd32f26edd8d68e2dbf7c63a90
|
[
"MIT"
] | null | null | null |
reporter/__init__.py
|
eaingaran/TimeMachine
|
f6199827ffc358dd32f26edd8d68e2dbf7c63a90
|
[
"MIT"
] | null | null | null |
from reporter import GenerateReport
| 18
| 35
| 0.888889
| 4
| 36
| 8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 36
| 1
| 36
| 36
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
daaf9ad2bbd982879bb0f1d6c500eb2886d1827f
| 18,405
|
py
|
Python
|
sdk/python/pulumi_buildkite/team.py
|
grapl-security/pulumi-buildkite
|
f801ecb661d82da6b939b13f5520038e3b6e891f
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_buildkite/team.py
|
grapl-security/pulumi-buildkite
|
f801ecb661d82da6b939b13f5520038e3b6e891f
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_buildkite/team.py
|
grapl-security/pulumi-buildkite
|
f801ecb661d82da6b939b13f5520038e3b6e891f
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = ['TeamArgs', 'Team']
@pulumi.input_type
class TeamArgs:
def __init__(__self__, *,
default_member_role: pulumi.Input[str],
default_team: pulumi.Input[bool],
privacy: pulumi.Input[str],
description: Optional[pulumi.Input[str]] = None,
members_can_create_pipelines: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a Team resource.
:param pulumi.Input[str] default_member_role: Default role to assign to a team member.
:param pulumi.Input[bool] default_team: Whether to assign this team to a user by default.
:param pulumi.Input[str] privacy: The privacy level to set the team too.
:param pulumi.Input[str] description: The description to assign to the team.
:param pulumi.Input[bool] members_can_create_pipelines: Whether team members can create.
:param pulumi.Input[str] name: The name of the team.
"""
pulumi.set(__self__, "default_member_role", default_member_role)
pulumi.set(__self__, "default_team", default_team)
pulumi.set(__self__, "privacy", privacy)
if description is not None:
pulumi.set(__self__, "description", description)
if members_can_create_pipelines is not None:
pulumi.set(__self__, "members_can_create_pipelines", members_can_create_pipelines)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter(name="defaultMemberRole")
def default_member_role(self) -> pulumi.Input[str]:
"""
Default role to assign to a team member.
"""
return pulumi.get(self, "default_member_role")
@default_member_role.setter
def default_member_role(self, value: pulumi.Input[str]):
pulumi.set(self, "default_member_role", value)
@property
@pulumi.getter(name="defaultTeam")
def default_team(self) -> pulumi.Input[bool]:
"""
Whether to assign this team to a user by default.
"""
return pulumi.get(self, "default_team")
@default_team.setter
def default_team(self, value: pulumi.Input[bool]):
pulumi.set(self, "default_team", value)
@property
@pulumi.getter
def privacy(self) -> pulumi.Input[str]:
"""
The privacy level to set the team too.
"""
return pulumi.get(self, "privacy")
@privacy.setter
def privacy(self, value: pulumi.Input[str]):
pulumi.set(self, "privacy", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The description to assign to the team.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="membersCanCreatePipelines")
def members_can_create_pipelines(self) -> Optional[pulumi.Input[bool]]:
"""
Whether team members can create.
"""
return pulumi.get(self, "members_can_create_pipelines")
@members_can_create_pipelines.setter
def members_can_create_pipelines(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "members_can_create_pipelines", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the team.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@pulumi.input_type
class _TeamState:
def __init__(__self__, *,
default_member_role: Optional[pulumi.Input[str]] = None,
default_team: Optional[pulumi.Input[bool]] = None,
description: Optional[pulumi.Input[str]] = None,
members_can_create_pipelines: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
privacy: Optional[pulumi.Input[str]] = None,
slug: Optional[pulumi.Input[str]] = None,
uuid: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering Team resources.
:param pulumi.Input[str] default_member_role: Default role to assign to a team member.
:param pulumi.Input[bool] default_team: Whether to assign this team to a user by default.
:param pulumi.Input[str] description: The description to assign to the team.
:param pulumi.Input[bool] members_can_create_pipelines: Whether team members can create.
:param pulumi.Input[str] name: The name of the team.
:param pulumi.Input[str] privacy: The privacy level to set the team too.
:param pulumi.Input[str] slug: The name of the team.
:param pulumi.Input[str] uuid: The UUID for the team.
"""
if default_member_role is not None:
pulumi.set(__self__, "default_member_role", default_member_role)
if default_team is not None:
pulumi.set(__self__, "default_team", default_team)
if description is not None:
pulumi.set(__self__, "description", description)
if members_can_create_pipelines is not None:
pulumi.set(__self__, "members_can_create_pipelines", members_can_create_pipelines)
if name is not None:
pulumi.set(__self__, "name", name)
if privacy is not None:
pulumi.set(__self__, "privacy", privacy)
if slug is not None:
pulumi.set(__self__, "slug", slug)
if uuid is not None:
pulumi.set(__self__, "uuid", uuid)
@property
@pulumi.getter(name="defaultMemberRole")
def default_member_role(self) -> Optional[pulumi.Input[str]]:
"""
Default role to assign to a team member.
"""
return pulumi.get(self, "default_member_role")
@default_member_role.setter
def default_member_role(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "default_member_role", value)
@property
@pulumi.getter(name="defaultTeam")
def default_team(self) -> Optional[pulumi.Input[bool]]:
"""
Whether to assign this team to a user by default.
"""
return pulumi.get(self, "default_team")
@default_team.setter
def default_team(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "default_team", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The description to assign to the team.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="membersCanCreatePipelines")
def members_can_create_pipelines(self) -> Optional[pulumi.Input[bool]]:
"""
Whether team members can create.
"""
return pulumi.get(self, "members_can_create_pipelines")
@members_can_create_pipelines.setter
def members_can_create_pipelines(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "members_can_create_pipelines", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the team.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def privacy(self) -> Optional[pulumi.Input[str]]:
"""
The privacy level to set the team too.
"""
return pulumi.get(self, "privacy")
@privacy.setter
def privacy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "privacy", value)
@property
@pulumi.getter
def slug(self) -> Optional[pulumi.Input[str]]:
"""
The name of the team.
"""
return pulumi.get(self, "slug")
@slug.setter
def slug(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "slug", value)
@property
@pulumi.getter
def uuid(self) -> Optional[pulumi.Input[str]]:
"""
The UUID for the team.
"""
return pulumi.get(self, "uuid")
@uuid.setter
def uuid(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "uuid", value)
class Team(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
default_member_role: Optional[pulumi.Input[str]] = None,
default_team: Optional[pulumi.Input[bool]] = None,
description: Optional[pulumi.Input[str]] = None,
members_can_create_pipelines: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
privacy: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
## # Resource: team
This resource allows you to create and manage teams.
Buildkite Documentation: https://buildkite.com/docs/pipelines/permissions
Note: You must first enable Teams on your organization.
## Example Usage
```python
import pulumi
import pulumi_buildkite as buildkite
team = buildkite.Team("team",
default_member_role="MEMBER",
default_team=True,
privacy="VISIBLE")
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] default_member_role: Default role to assign to a team member.
:param pulumi.Input[bool] default_team: Whether to assign this team to a user by default.
:param pulumi.Input[str] description: The description to assign to the team.
:param pulumi.Input[bool] members_can_create_pipelines: Whether team members can create.
:param pulumi.Input[str] name: The name of the team.
:param pulumi.Input[str] privacy: The privacy level to set the team too.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: TeamArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
## # Resource: team
This resource allows you to create and manage teams.
Buildkite Documentation: https://buildkite.com/docs/pipelines/permissions
Note: You must first enable Teams on your organization.
## Example Usage
```python
import pulumi
import pulumi_buildkite as buildkite
team = buildkite.Team("team",
default_member_role="MEMBER",
default_team=True,
privacy="VISIBLE")
```
:param str resource_name: The name of the resource.
:param TeamArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(TeamArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
default_member_role: Optional[pulumi.Input[str]] = None,
default_team: Optional[pulumi.Input[bool]] = None,
description: Optional[pulumi.Input[str]] = None,
members_can_create_pipelines: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
privacy: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.plugin_download_url is None:
opts.plugin_download_url = _utilities.get_plugin_download_url()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = TeamArgs.__new__(TeamArgs)
if default_member_role is None and not opts.urn:
raise TypeError("Missing required property 'default_member_role'")
__props__.__dict__["default_member_role"] = default_member_role
if default_team is None and not opts.urn:
raise TypeError("Missing required property 'default_team'")
__props__.__dict__["default_team"] = default_team
__props__.__dict__["description"] = description
__props__.__dict__["members_can_create_pipelines"] = members_can_create_pipelines
__props__.__dict__["name"] = name
if privacy is None and not opts.urn:
raise TypeError("Missing required property 'privacy'")
__props__.__dict__["privacy"] = privacy
__props__.__dict__["slug"] = None
__props__.__dict__["uuid"] = None
super(Team, __self__).__init__(
'buildkite:index/team:Team',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
default_member_role: Optional[pulumi.Input[str]] = None,
default_team: Optional[pulumi.Input[bool]] = None,
description: Optional[pulumi.Input[str]] = None,
members_can_create_pipelines: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
privacy: Optional[pulumi.Input[str]] = None,
slug: Optional[pulumi.Input[str]] = None,
uuid: Optional[pulumi.Input[str]] = None) -> 'Team':
"""
Get an existing Team resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] default_member_role: Default role to assign to a team member.
:param pulumi.Input[bool] default_team: Whether to assign this team to a user by default.
:param pulumi.Input[str] description: The description to assign to the team.
:param pulumi.Input[bool] members_can_create_pipelines: Whether team members can create.
:param pulumi.Input[str] name: The name of the team.
:param pulumi.Input[str] privacy: The privacy level to set the team too.
:param pulumi.Input[str] slug: The name of the team.
:param pulumi.Input[str] uuid: The UUID for the team.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _TeamState.__new__(_TeamState)
__props__.__dict__["default_member_role"] = default_member_role
__props__.__dict__["default_team"] = default_team
__props__.__dict__["description"] = description
__props__.__dict__["members_can_create_pipelines"] = members_can_create_pipelines
__props__.__dict__["name"] = name
__props__.__dict__["privacy"] = privacy
__props__.__dict__["slug"] = slug
__props__.__dict__["uuid"] = uuid
return Team(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="defaultMemberRole")
def default_member_role(self) -> pulumi.Output[str]:
"""
Default role to assign to a team member.
"""
return pulumi.get(self, "default_member_role")
@property
@pulumi.getter(name="defaultTeam")
def default_team(self) -> pulumi.Output[bool]:
"""
Whether to assign this team to a user by default.
"""
return pulumi.get(self, "default_team")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
The description to assign to the team.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="membersCanCreatePipelines")
def members_can_create_pipelines(self) -> pulumi.Output[Optional[bool]]:
"""
Whether team members can create.
"""
return pulumi.get(self, "members_can_create_pipelines")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the team.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def privacy(self) -> pulumi.Output[str]:
"""
The privacy level to set the team too.
"""
return pulumi.get(self, "privacy")
@property
@pulumi.getter
def slug(self) -> pulumi.Output[str]:
"""
The name of the team.
"""
return pulumi.get(self, "slug")
@property
@pulumi.getter
def uuid(self) -> pulumi.Output[str]:
"""
The UUID for the team.
"""
return pulumi.get(self, "uuid")
| 38.34375
| 134
| 0.629394
| 2,157
| 18,405
| 5.122856
| 0.077886
| 0.093575
| 0.08362
| 0.075656
| 0.844344
| 0.823891
| 0.792127
| 0.762986
| 0.753665
| 0.7381
| 0
| 0.000074
| 0.26721
| 18,405
| 479
| 135
| 38.4238
| 0.819233
| 0.249878
| 0
| 0.659259
| 1
| 0
| 0.098952
| 0.029386
| 0
| 0
| 0
| 0
| 0
| 1
| 0.159259
| false
| 0.003704
| 0.018519
| 0
| 0.274074
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
dae7ac093bb2806c2693266b9bf2b6b2ab7584bc
| 9,341
|
py
|
Python
|
whatthefood/tests/test_ops.py
|
lychanl/WhatTheFood
|
94b6eec2c306e7e55b19395cde207d6e6beec7fe
|
[
"MIT"
] | null | null | null |
whatthefood/tests/test_ops.py
|
lychanl/WhatTheFood
|
94b6eec2c306e7e55b19395cde207d6e6beec7fe
|
[
"MIT"
] | null | null | null |
whatthefood/tests/test_ops.py
|
lychanl/WhatTheFood
|
94b6eec2c306e7e55b19395cde207d6e6beec7fe
|
[
"MIT"
] | null | null | null |
import unittest
import numpy as np
import whatthefood.graph as graph
class TestOps(unittest.TestCase):
def test_matmul(self):
x_arr = np.array([[1, 2], [2, 3], [3, 4]])
y_arr = np.array([[1, 2, 3, 4], [4, 5, 6, 7]])
x = graph.Constant(x_arr)
y = graph.Constant(y_arr)
m = graph.Matmul(x, y)
np.testing.assert_array_equal(graph.run(m), np.matmul(x_arr, y_arr))
def test_matmul_grad(self):
x_arr = np.array([[1, 2], [2, 3], [3, 4]])
y_arr = np.array([[1, 2, 3, 4], [4, 5, 6, 7]])
x = graph.Constant(x_arr)
y = graph.Constant(y_arr)
m = graph.Matmul(x, y)
g = graph.Grad(m, [x, y])
mv, (g_x, g_y) = graph.run((m, g))
self.assertSequenceEqual(g_x.shape, x.shape)
self.assertSequenceEqual(g_y.shape, y.shape)
np.testing.assert_array_equal(g_x, np.matmul(np.ones_like(mv), y_arr.T))
np.testing.assert_array_equal(g_y, np.matmul(x_arr.T, np.ones_like(mv)))
def test_matmul_vec(self):
x = graph.Constant([1, 2, 3])
y = graph.Constant([[1, 2], [1, 3], [2, 4]])
m = graph.Matmul(x, y)
np.testing.assert_array_equal([9, 20], graph.run(m))
def test_matmul_vec_grad(self):
x = graph.Constant([1, 2, 3])
y = graph.Constant([[1, 2], [1, 3], [2, 4]])
m = graph.Matmul(x, y)
g = graph.Grad(m, [x, y])
g_x, g_y = graph.run(g)
np.testing.assert_array_equal([3, 4, 6], g_x)
np.testing.assert_array_equal([[1, 1], [2, 2], [3, 3]], g_y)
def test_reduce_sum(self):
x = graph.Constant([[[1], [2]], [[3], [4]], [[5], [6]]])
y1 = graph.ReduceSum(x, axis=0)
y2 = graph.ReduceSum(x, axis=(1, -1))
y3 = graph.ReduceSum(x)
np.testing.assert_array_equal([[9], [12]], graph.run(y1))
np.testing.assert_array_equal([3, 7, 11], graph.run(y2))
self.assertEqual(21, graph.run(y3))
def test_reduce_sum_batched(self):
x_arr = np.array([[[1], [2]], [[3], [4]], [[5], [6]]])
y_arr = np.array([9, 12])
x = graph.Placeholder(shape=(3, 2, 1), batched=True)
y1 = graph.ReduceSum(x, (0, 2), True)
y2 = graph.ReduceSum(x, (0, 2), False)
np.testing.assert_array_equal(y_arr * 3, graph.run(y1, {x: np.array([x_arr, 2 * x_arr])}))
np.testing.assert_array_equal([y_arr, 2 * y_arr], graph.run(y2, {x: np.array([x_arr, 2 * x_arr])}))
def test_reduce_sum_grad(self):
x = graph.Constant([[[1], [2]], [[3], [4]], [[5], [6]]])
y1 = graph.ReduceSum(x, axis=0)
y2 = graph.ReduceSum(x, axis=(1, -1))
y3 = graph.ReduceSum(x)
g1 = graph.Grad(y1, x)
g2 = graph.Grad(y2, x)
g3 = graph.Grad(y3, x)
np.testing.assert_array_equal(np.ones_like(x.value), graph.run(g1))
np.testing.assert_array_equal(np.ones_like(x.value), graph.run(g2))
np.testing.assert_array_equal(np.ones_like(x.value), graph.run(g3))
def test_reduce_sum_grad_batched(self):
x_arr = np.array([[[1], [2]], [[3], [4]], [[5], [6]]])
x = graph.Placeholder(shape=(3, 2, 1), batched=True)
y1 = graph.ReduceSum(x, (0, 2), True)
y2 = graph.ReduceSum(x, (0, 2), False)
g1 = graph.Grad(y1, x)
g2 = graph.Grad(y2, x)
np.testing.assert_array_equal(
[np.ones_like(x_arr), np.ones_like(x_arr)],
graph.run(g1, {x: np.array([x_arr, 2 * x_arr])}))
np.testing.assert_array_equal(
[np.ones_like(x_arr), np.ones_like(x_arr)],
graph.run(g2, {x: np.array([x_arr, 2 * x_arr])}))
def test_reduce_mean(self):
x = graph.Constant([[[1], [2]], [[3], [4]], [[5], [6]]])
y1 = graph.ReduceMean(x, axis=0)
y2 = graph.ReduceMean(x, axis=(1, -1))
y3 = graph.ReduceMean(x)
np.testing.assert_array_equal([[3], [4]], graph.run(y1))
np.testing.assert_array_equal([1.5, 3.5, 5.5], graph.run(y2))
self.assertEqual(3.5, graph.run(y3))
def test_reduce_mean_batched(self):
x_arr = np.array([[[1], [2]], [[3], [4]], [[5], [6]]])
y_arr = np.array([3, 4])
x = graph.Placeholder(shape=(3, 2, 1), batched=True)
y1 = graph.ReduceMean(x, (0, 2), True)
y2 = graph.ReduceMean(x, (0, 2), False)
np.testing.assert_array_equal(y_arr * 1.5, graph.run(y1, {x: np.array([x_arr, 2 * x_arr])}))
np.testing.assert_array_equal([y_arr, 2 * y_arr], graph.run(y2, {x: np.array([x_arr, 2 * x_arr])}))
def test_reduce_mean_grad(self):
x = graph.Constant([[[1], [2]], [[3], [4]], [[5], [6]]])
y1 = graph.ReduceMean(x, axis=0)
y2 = graph.ReduceMean(x, axis=(1, -1))
y3 = graph.ReduceMean(x)
g1 = graph.Grad(y1, x)
g2 = graph.Grad(y2, x)
g3 = graph.Grad(y3, x)
np.testing.assert_array_equal(np.ones_like(x.value) / 3, graph.run(g1))
np.testing.assert_array_equal(np.ones_like(x.value) / 2, graph.run(g2))
np.testing.assert_array_equal(np.ones_like(x.value) / 6, graph.run(g3))
def test_reduce_mean_grad_batched(self):
x_arr = np.array([[[1], [2]], [[3], [4]], [[5], [6]]])
x = graph.Placeholder(shape=(3, 2, 1), batched=True)
y1 = graph.ReduceMean(x, (0, 2), True)
y2 = graph.ReduceMean(x, (0, 2), False)
g1 = graph.Grad(y1, x)
g2 = graph.Grad(y2, x)
np.testing.assert_array_equal(
[np.ones_like(x_arr) / 6, np.ones_like(x_arr) / 6],
graph.run(g1, {x: np.array([x_arr, 2 * x_arr])}))
np.testing.assert_array_equal(
[np.ones_like(x_arr) / 3, np.ones_like(x_arr) / 3],
graph.run(g2, {x: np.array([x_arr, 2 * x_arr])}))
def test_slice(self):
x_arr = [[1, 2, 3], [4, 5, 6]]
x = graph.Constant(x_arr)
y1 = graph.Slice(x, (0, 1), (2, 2))
np.testing.assert_array_equal([[2], [5]], graph.run(y1))
def test_slice_batched(self):
x_arr = np.array([[1, 2, 3], [4, 5, 6]])
x = graph.Placeholder((2, 3), True)
y1 = graph.Slice(x, (0, 1), (2, 2))
np.testing.assert_array_equal(
[[[2], [5]], [[-2], [-5]]],
graph.run(y1, {x: np.array([x_arr, -x_arr])}))
def test_slice_grad(self):
x_arr = [[1, 2, 3], [4, 5, 6]]
x = graph.Constant(x_arr)
y1 = graph.Slice(x, (0, 1), (2, 2))
g1 = graph.Grad(y1, x)
np.testing.assert_array_equal([[0, 1, 0], [0, 1, 0]], graph.run(g1))
def test_slice_grad_batched(self):
x_arr = np.array([[1, 2, 3], [4, 5, 6]])
x = graph.Placeholder((2, 3), True)
y1 = graph.Slice(x, (0, 1), (2, 2))
g1 = graph.Grad(y1, x)
np.testing.assert_array_equal(
[[[0, 1, 0], [0, 1, 0]], [[0, 1, 0], [0, 1, 0]]],
graph.run(g1, {x: np.array([x_arr, -x_arr])}))
def test_concatenate(self):
x1 = graph.Constant([[1, 2, 3], [4, 5, 6]])
x2 = graph.Constant([[7, 8], [9, 10]])
y = graph.Concatenate((x1, x2), axis=1)
np.testing.assert_array_equal([[1, 2, 3, 7, 8], [4, 5, 6, 9, 10]], graph.run(y))
def test_concatenate_batched(self):
x1_arr = np.array([[1, 2, 3], [4, 5, 6]])
x2_arr = np.array([[7, 8], [9, 10]])
x1 = graph.Placeholder(x1_arr.shape, batched=True)
x2 = graph.Placeholder(x2_arr.shape, batched=True)
y = graph.Concatenate((x1, x2), axis=-1)
np.testing.assert_array_equal(
[[[1, 2, 3, -7, -8], [4, 5, 6, -9, -10]], [[-1, -2, -3, 7, 8], [-4, -5, -6, 9, 10]]],
graph.run(y, {x1: np.array([x1_arr, -x1_arr]), x2: np.array([-x2_arr, x2_arr])}))
def test_concatenate_grad(self):
x1 = graph.Constant([[1, 2, 3], [4, 5, 6]])
x2 = graph.Constant([[7, 8], [9, 10]])
y = graph.Concatenate((x1, x2), axis=1)
g = graph.Grad(y, (x1, x2))
g1, g2 = graph.run(g)
np.testing.assert_array_equal(np.ones_like(x1.value), g1)
np.testing.assert_array_equal(np.ones_like(x2.value), g2)
def test_multply(self):
x1 = graph.Constant([[1], [2], [3], [4]])
x2 = graph.Constant([[1, -1], [2, -2], [3, -3], [4, -4]])
y = graph.Multiply(x1, x2)
np.testing.assert_array_equal([[1, -1], [4, -4], [9, -9], [16, -16]], graph.run(y))
def test_multply_grad(self):
x1 = graph.Constant([[1], [2], [3], [4]])
x2 = graph.Constant([[1, -1], [2, -2], [3, -3], [4, -4]])
y = graph.Multiply(x1, x2)
g = graph.Grad(y, (x1, x2))
g1, g2 = graph.run(g)
np.testing.assert_array_equal([[0], [0], [0], [0]], g1)
np.testing.assert_array_equal([[1, 1], [2, 2], [3, 3], [4, 4]], g2)
def test_divide(self):
x1 = graph.Constant([1, 2, 3, 4])
x2 = graph.Constant([4, 3, 2, 1])
y = graph.Divide(x1, x2)
np.testing.assert_array_equal([1/4, 2/3, 3/2, 4], graph.run(y))
def test_divide_grad(self):
x1 = graph.Constant([1, 2, 3, 4])
x2 = graph.Constant([4, 3, 2, 1])
y = graph.Divide(x1, x2)
g = graph.Grad(y, (x1, x2))
g1, g2 = graph.run(g)
np.testing.assert_array_equal([1/4, 1/3, 1/2, 1], g1)
np.testing.assert_array_equal([-1/16, -2/9, -3/4, -4], g2)
| 36.065637
| 107
| 0.52853
| 1,533
| 9,341
| 3.076321
| 0.048271
| 0.03732
| 0.120865
| 0.161154
| 0.859415
| 0.833969
| 0.78838
| 0.774597
| 0.738126
| 0.708227
| 0
| 0.078281
| 0.260143
| 9,341
| 258
| 108
| 36.205426
| 0.604109
| 0
| 0
| 0.594737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.221053
| 1
| 0.121053
| false
| 0
| 0.015789
| 0
| 0.142105
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
970f7b10a07b98a1a06557eff5b9b16e7aa0fdb8
| 59,592
|
py
|
Python
|
sdk/python/pulumi_aws_native/elasticloadbalancingv2/outputs.py
|
AaronFriel/pulumi-aws-native
|
5621690373ac44accdbd20b11bae3be1baf022d1
|
[
"Apache-2.0"
] | 29
|
2021-09-30T19:32:07.000Z
|
2022-03-22T21:06:08.000Z
|
sdk/python/pulumi_aws_native/elasticloadbalancingv2/outputs.py
|
AaronFriel/pulumi-aws-native
|
5621690373ac44accdbd20b11bae3be1baf022d1
|
[
"Apache-2.0"
] | 232
|
2021-09-30T19:26:26.000Z
|
2022-03-31T23:22:06.000Z
|
sdk/python/pulumi_aws_native/elasticloadbalancingv2/outputs.py
|
AaronFriel/pulumi-aws-native
|
5621690373ac44accdbd20b11bae3be1baf022d1
|
[
"Apache-2.0"
] | 4
|
2021-11-10T19:42:01.000Z
|
2022-02-05T10:15:49.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'ListenerAction',
'ListenerAuthenticateCognitoConfig',
'ListenerAuthenticateOidcConfig',
'ListenerCertificate',
'ListenerCertificateCertificate',
'ListenerFixedResponseConfig',
'ListenerForwardConfig',
'ListenerRedirectConfig',
'ListenerRuleAction',
'ListenerRuleAuthenticateCognitoConfig',
'ListenerRuleAuthenticateOidcConfig',
'ListenerRuleFixedResponseConfig',
'ListenerRuleForwardConfig',
'ListenerRuleHostHeaderConfig',
'ListenerRuleHttpHeaderConfig',
'ListenerRuleHttpRequestMethodConfig',
'ListenerRulePathPatternConfig',
'ListenerRuleQueryStringConfig',
'ListenerRuleQueryStringKeyValue',
'ListenerRuleRedirectConfig',
'ListenerRuleRuleCondition',
'ListenerRuleSourceIpConfig',
'ListenerRuleTargetGroupStickinessConfig',
'ListenerRuleTargetGroupTuple',
'ListenerTargetGroupStickinessConfig',
'ListenerTargetGroupTuple',
'LoadBalancerAttribute',
'LoadBalancerSubnetMapping',
'LoadBalancerTag',
'TargetGroupAttribute',
'TargetGroupMatcher',
'TargetGroupTag',
'TargetGroupTargetDescription',
]
@pulumi.output_type
class ListenerAction(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "authenticateCognitoConfig":
suggest = "authenticate_cognito_config"
elif key == "authenticateOidcConfig":
suggest = "authenticate_oidc_config"
elif key == "fixedResponseConfig":
suggest = "fixed_response_config"
elif key == "forwardConfig":
suggest = "forward_config"
elif key == "redirectConfig":
suggest = "redirect_config"
elif key == "targetGroupArn":
suggest = "target_group_arn"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ListenerAction. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ListenerAction.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ListenerAction.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
type: str,
authenticate_cognito_config: Optional['outputs.ListenerAuthenticateCognitoConfig'] = None,
authenticate_oidc_config: Optional['outputs.ListenerAuthenticateOidcConfig'] = None,
fixed_response_config: Optional['outputs.ListenerFixedResponseConfig'] = None,
forward_config: Optional['outputs.ListenerForwardConfig'] = None,
order: Optional[int] = None,
redirect_config: Optional['outputs.ListenerRedirectConfig'] = None,
target_group_arn: Optional[str] = None):
pulumi.set(__self__, "type", type)
if authenticate_cognito_config is not None:
pulumi.set(__self__, "authenticate_cognito_config", authenticate_cognito_config)
if authenticate_oidc_config is not None:
pulumi.set(__self__, "authenticate_oidc_config", authenticate_oidc_config)
if fixed_response_config is not None:
pulumi.set(__self__, "fixed_response_config", fixed_response_config)
if forward_config is not None:
pulumi.set(__self__, "forward_config", forward_config)
if order is not None:
pulumi.set(__self__, "order", order)
if redirect_config is not None:
pulumi.set(__self__, "redirect_config", redirect_config)
if target_group_arn is not None:
pulumi.set(__self__, "target_group_arn", target_group_arn)
@property
@pulumi.getter
def type(self) -> str:
return pulumi.get(self, "type")
@property
@pulumi.getter(name="authenticateCognitoConfig")
def authenticate_cognito_config(self) -> Optional['outputs.ListenerAuthenticateCognitoConfig']:
return pulumi.get(self, "authenticate_cognito_config")
@property
@pulumi.getter(name="authenticateOidcConfig")
def authenticate_oidc_config(self) -> Optional['outputs.ListenerAuthenticateOidcConfig']:
return pulumi.get(self, "authenticate_oidc_config")
@property
@pulumi.getter(name="fixedResponseConfig")
def fixed_response_config(self) -> Optional['outputs.ListenerFixedResponseConfig']:
return pulumi.get(self, "fixed_response_config")
@property
@pulumi.getter(name="forwardConfig")
def forward_config(self) -> Optional['outputs.ListenerForwardConfig']:
return pulumi.get(self, "forward_config")
@property
@pulumi.getter
def order(self) -> Optional[int]:
return pulumi.get(self, "order")
@property
@pulumi.getter(name="redirectConfig")
def redirect_config(self) -> Optional['outputs.ListenerRedirectConfig']:
return pulumi.get(self, "redirect_config")
@property
@pulumi.getter(name="targetGroupArn")
def target_group_arn(self) -> Optional[str]:
return pulumi.get(self, "target_group_arn")
@pulumi.output_type
class ListenerAuthenticateCognitoConfig(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "userPoolArn":
suggest = "user_pool_arn"
elif key == "userPoolClientId":
suggest = "user_pool_client_id"
elif key == "userPoolDomain":
suggest = "user_pool_domain"
elif key == "authenticationRequestExtraParams":
suggest = "authentication_request_extra_params"
elif key == "onUnauthenticatedRequest":
suggest = "on_unauthenticated_request"
elif key == "sessionCookieName":
suggest = "session_cookie_name"
elif key == "sessionTimeout":
suggest = "session_timeout"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ListenerAuthenticateCognitoConfig. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ListenerAuthenticateCognitoConfig.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ListenerAuthenticateCognitoConfig.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
user_pool_arn: str,
user_pool_client_id: str,
user_pool_domain: str,
authentication_request_extra_params: Optional[Any] = None,
on_unauthenticated_request: Optional[str] = None,
scope: Optional[str] = None,
session_cookie_name: Optional[str] = None,
session_timeout: Optional[str] = None):
pulumi.set(__self__, "user_pool_arn", user_pool_arn)
pulumi.set(__self__, "user_pool_client_id", user_pool_client_id)
pulumi.set(__self__, "user_pool_domain", user_pool_domain)
if authentication_request_extra_params is not None:
pulumi.set(__self__, "authentication_request_extra_params", authentication_request_extra_params)
if on_unauthenticated_request is not None:
pulumi.set(__self__, "on_unauthenticated_request", on_unauthenticated_request)
if scope is not None:
pulumi.set(__self__, "scope", scope)
if session_cookie_name is not None:
pulumi.set(__self__, "session_cookie_name", session_cookie_name)
if session_timeout is not None:
pulumi.set(__self__, "session_timeout", session_timeout)
@property
@pulumi.getter(name="userPoolArn")
def user_pool_arn(self) -> str:
return pulumi.get(self, "user_pool_arn")
@property
@pulumi.getter(name="userPoolClientId")
def user_pool_client_id(self) -> str:
return pulumi.get(self, "user_pool_client_id")
@property
@pulumi.getter(name="userPoolDomain")
def user_pool_domain(self) -> str:
return pulumi.get(self, "user_pool_domain")
@property
@pulumi.getter(name="authenticationRequestExtraParams")
def authentication_request_extra_params(self) -> Optional[Any]:
return pulumi.get(self, "authentication_request_extra_params")
@property
@pulumi.getter(name="onUnauthenticatedRequest")
def on_unauthenticated_request(self) -> Optional[str]:
return pulumi.get(self, "on_unauthenticated_request")
@property
@pulumi.getter
def scope(self) -> Optional[str]:
return pulumi.get(self, "scope")
@property
@pulumi.getter(name="sessionCookieName")
def session_cookie_name(self) -> Optional[str]:
return pulumi.get(self, "session_cookie_name")
@property
@pulumi.getter(name="sessionTimeout")
def session_timeout(self) -> Optional[str]:
return pulumi.get(self, "session_timeout")
@pulumi.output_type
class ListenerAuthenticateOidcConfig(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "authorizationEndpoint":
suggest = "authorization_endpoint"
elif key == "clientId":
suggest = "client_id"
elif key == "clientSecret":
suggest = "client_secret"
elif key == "tokenEndpoint":
suggest = "token_endpoint"
elif key == "userInfoEndpoint":
suggest = "user_info_endpoint"
elif key == "authenticationRequestExtraParams":
suggest = "authentication_request_extra_params"
elif key == "onUnauthenticatedRequest":
suggest = "on_unauthenticated_request"
elif key == "sessionCookieName":
suggest = "session_cookie_name"
elif key == "sessionTimeout":
suggest = "session_timeout"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ListenerAuthenticateOidcConfig. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ListenerAuthenticateOidcConfig.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ListenerAuthenticateOidcConfig.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
authorization_endpoint: str,
client_id: str,
client_secret: str,
issuer: str,
token_endpoint: str,
user_info_endpoint: str,
authentication_request_extra_params: Optional[Any] = None,
on_unauthenticated_request: Optional[str] = None,
scope: Optional[str] = None,
session_cookie_name: Optional[str] = None,
session_timeout: Optional[str] = None):
pulumi.set(__self__, "authorization_endpoint", authorization_endpoint)
pulumi.set(__self__, "client_id", client_id)
pulumi.set(__self__, "client_secret", client_secret)
pulumi.set(__self__, "issuer", issuer)
pulumi.set(__self__, "token_endpoint", token_endpoint)
pulumi.set(__self__, "user_info_endpoint", user_info_endpoint)
if authentication_request_extra_params is not None:
pulumi.set(__self__, "authentication_request_extra_params", authentication_request_extra_params)
if on_unauthenticated_request is not None:
pulumi.set(__self__, "on_unauthenticated_request", on_unauthenticated_request)
if scope is not None:
pulumi.set(__self__, "scope", scope)
if session_cookie_name is not None:
pulumi.set(__self__, "session_cookie_name", session_cookie_name)
if session_timeout is not None:
pulumi.set(__self__, "session_timeout", session_timeout)
@property
@pulumi.getter(name="authorizationEndpoint")
def authorization_endpoint(self) -> str:
return pulumi.get(self, "authorization_endpoint")
@property
@pulumi.getter(name="clientId")
def client_id(self) -> str:
return pulumi.get(self, "client_id")
@property
@pulumi.getter(name="clientSecret")
def client_secret(self) -> str:
return pulumi.get(self, "client_secret")
@property
@pulumi.getter
def issuer(self) -> str:
return pulumi.get(self, "issuer")
@property
@pulumi.getter(name="tokenEndpoint")
def token_endpoint(self) -> str:
return pulumi.get(self, "token_endpoint")
@property
@pulumi.getter(name="userInfoEndpoint")
def user_info_endpoint(self) -> str:
return pulumi.get(self, "user_info_endpoint")
@property
@pulumi.getter(name="authenticationRequestExtraParams")
def authentication_request_extra_params(self) -> Optional[Any]:
return pulumi.get(self, "authentication_request_extra_params")
@property
@pulumi.getter(name="onUnauthenticatedRequest")
def on_unauthenticated_request(self) -> Optional[str]:
return pulumi.get(self, "on_unauthenticated_request")
@property
@pulumi.getter
def scope(self) -> Optional[str]:
return pulumi.get(self, "scope")
@property
@pulumi.getter(name="sessionCookieName")
def session_cookie_name(self) -> Optional[str]:
return pulumi.get(self, "session_cookie_name")
@property
@pulumi.getter(name="sessionTimeout")
def session_timeout(self) -> Optional[str]:
return pulumi.get(self, "session_timeout")
@pulumi.output_type
class ListenerCertificate(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "certificateArn":
suggest = "certificate_arn"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ListenerCertificate. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ListenerCertificate.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ListenerCertificate.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
certificate_arn: Optional[str] = None):
if certificate_arn is not None:
pulumi.set(__self__, "certificate_arn", certificate_arn)
@property
@pulumi.getter(name="certificateArn")
def certificate_arn(self) -> Optional[str]:
return pulumi.get(self, "certificate_arn")
@pulumi.output_type
class ListenerCertificateCertificate(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "certificateArn":
suggest = "certificate_arn"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ListenerCertificateCertificate. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ListenerCertificateCertificate.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ListenerCertificateCertificate.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
certificate_arn: Optional[str] = None):
if certificate_arn is not None:
pulumi.set(__self__, "certificate_arn", certificate_arn)
@property
@pulumi.getter(name="certificateArn")
def certificate_arn(self) -> Optional[str]:
return pulumi.get(self, "certificate_arn")
@pulumi.output_type
class ListenerFixedResponseConfig(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "statusCode":
suggest = "status_code"
elif key == "contentType":
suggest = "content_type"
elif key == "messageBody":
suggest = "message_body"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ListenerFixedResponseConfig. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ListenerFixedResponseConfig.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ListenerFixedResponseConfig.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
status_code: str,
content_type: Optional[str] = None,
message_body: Optional[str] = None):
pulumi.set(__self__, "status_code", status_code)
if content_type is not None:
pulumi.set(__self__, "content_type", content_type)
if message_body is not None:
pulumi.set(__self__, "message_body", message_body)
@property
@pulumi.getter(name="statusCode")
def status_code(self) -> str:
return pulumi.get(self, "status_code")
@property
@pulumi.getter(name="contentType")
def content_type(self) -> Optional[str]:
return pulumi.get(self, "content_type")
@property
@pulumi.getter(name="messageBody")
def message_body(self) -> Optional[str]:
return pulumi.get(self, "message_body")
@pulumi.output_type
class ListenerForwardConfig(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "targetGroupStickinessConfig":
suggest = "target_group_stickiness_config"
elif key == "targetGroups":
suggest = "target_groups"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ListenerForwardConfig. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ListenerForwardConfig.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ListenerForwardConfig.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
target_group_stickiness_config: Optional['outputs.ListenerTargetGroupStickinessConfig'] = None,
target_groups: Optional[Sequence['outputs.ListenerTargetGroupTuple']] = None):
if target_group_stickiness_config is not None:
pulumi.set(__self__, "target_group_stickiness_config", target_group_stickiness_config)
if target_groups is not None:
pulumi.set(__self__, "target_groups", target_groups)
@property
@pulumi.getter(name="targetGroupStickinessConfig")
def target_group_stickiness_config(self) -> Optional['outputs.ListenerTargetGroupStickinessConfig']:
return pulumi.get(self, "target_group_stickiness_config")
@property
@pulumi.getter(name="targetGroups")
def target_groups(self) -> Optional[Sequence['outputs.ListenerTargetGroupTuple']]:
return pulumi.get(self, "target_groups")
@pulumi.output_type
class ListenerRedirectConfig(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "statusCode":
suggest = "status_code"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ListenerRedirectConfig. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ListenerRedirectConfig.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ListenerRedirectConfig.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
status_code: str,
host: Optional[str] = None,
path: Optional[str] = None,
port: Optional[str] = None,
protocol: Optional[str] = None,
query: Optional[str] = None):
pulumi.set(__self__, "status_code", status_code)
if host is not None:
pulumi.set(__self__, "host", host)
if path is not None:
pulumi.set(__self__, "path", path)
if port is not None:
pulumi.set(__self__, "port", port)
if protocol is not None:
pulumi.set(__self__, "protocol", protocol)
if query is not None:
pulumi.set(__self__, "query", query)
@property
@pulumi.getter(name="statusCode")
def status_code(self) -> str:
return pulumi.get(self, "status_code")
@property
@pulumi.getter
def host(self) -> Optional[str]:
return pulumi.get(self, "host")
@property
@pulumi.getter
def path(self) -> Optional[str]:
return pulumi.get(self, "path")
@property
@pulumi.getter
def port(self) -> Optional[str]:
return pulumi.get(self, "port")
@property
@pulumi.getter
def protocol(self) -> Optional[str]:
return pulumi.get(self, "protocol")
@property
@pulumi.getter
def query(self) -> Optional[str]:
return pulumi.get(self, "query")
@pulumi.output_type
class ListenerRuleAction(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "authenticateCognitoConfig":
suggest = "authenticate_cognito_config"
elif key == "authenticateOidcConfig":
suggest = "authenticate_oidc_config"
elif key == "fixedResponseConfig":
suggest = "fixed_response_config"
elif key == "forwardConfig":
suggest = "forward_config"
elif key == "redirectConfig":
suggest = "redirect_config"
elif key == "targetGroupArn":
suggest = "target_group_arn"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ListenerRuleAction. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ListenerRuleAction.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ListenerRuleAction.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
type: str,
authenticate_cognito_config: Optional['outputs.ListenerRuleAuthenticateCognitoConfig'] = None,
authenticate_oidc_config: Optional['outputs.ListenerRuleAuthenticateOidcConfig'] = None,
fixed_response_config: Optional['outputs.ListenerRuleFixedResponseConfig'] = None,
forward_config: Optional['outputs.ListenerRuleForwardConfig'] = None,
order: Optional[int] = None,
redirect_config: Optional['outputs.ListenerRuleRedirectConfig'] = None,
target_group_arn: Optional[str] = None):
pulumi.set(__self__, "type", type)
if authenticate_cognito_config is not None:
pulumi.set(__self__, "authenticate_cognito_config", authenticate_cognito_config)
if authenticate_oidc_config is not None:
pulumi.set(__self__, "authenticate_oidc_config", authenticate_oidc_config)
if fixed_response_config is not None:
pulumi.set(__self__, "fixed_response_config", fixed_response_config)
if forward_config is not None:
pulumi.set(__self__, "forward_config", forward_config)
if order is not None:
pulumi.set(__self__, "order", order)
if redirect_config is not None:
pulumi.set(__self__, "redirect_config", redirect_config)
if target_group_arn is not None:
pulumi.set(__self__, "target_group_arn", target_group_arn)
@property
@pulumi.getter
def type(self) -> str:
return pulumi.get(self, "type")
@property
@pulumi.getter(name="authenticateCognitoConfig")
def authenticate_cognito_config(self) -> Optional['outputs.ListenerRuleAuthenticateCognitoConfig']:
return pulumi.get(self, "authenticate_cognito_config")
@property
@pulumi.getter(name="authenticateOidcConfig")
def authenticate_oidc_config(self) -> Optional['outputs.ListenerRuleAuthenticateOidcConfig']:
return pulumi.get(self, "authenticate_oidc_config")
@property
@pulumi.getter(name="fixedResponseConfig")
def fixed_response_config(self) -> Optional['outputs.ListenerRuleFixedResponseConfig']:
return pulumi.get(self, "fixed_response_config")
@property
@pulumi.getter(name="forwardConfig")
def forward_config(self) -> Optional['outputs.ListenerRuleForwardConfig']:
return pulumi.get(self, "forward_config")
@property
@pulumi.getter
def order(self) -> Optional[int]:
return pulumi.get(self, "order")
@property
@pulumi.getter(name="redirectConfig")
def redirect_config(self) -> Optional['outputs.ListenerRuleRedirectConfig']:
return pulumi.get(self, "redirect_config")
@property
@pulumi.getter(name="targetGroupArn")
def target_group_arn(self) -> Optional[str]:
return pulumi.get(self, "target_group_arn")
@pulumi.output_type
class ListenerRuleAuthenticateCognitoConfig(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "userPoolArn":
suggest = "user_pool_arn"
elif key == "userPoolClientId":
suggest = "user_pool_client_id"
elif key == "userPoolDomain":
suggest = "user_pool_domain"
elif key == "authenticationRequestExtraParams":
suggest = "authentication_request_extra_params"
elif key == "onUnauthenticatedRequest":
suggest = "on_unauthenticated_request"
elif key == "sessionCookieName":
suggest = "session_cookie_name"
elif key == "sessionTimeout":
suggest = "session_timeout"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ListenerRuleAuthenticateCognitoConfig. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ListenerRuleAuthenticateCognitoConfig.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ListenerRuleAuthenticateCognitoConfig.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
user_pool_arn: str,
user_pool_client_id: str,
user_pool_domain: str,
authentication_request_extra_params: Optional[Any] = None,
on_unauthenticated_request: Optional[str] = None,
scope: Optional[str] = None,
session_cookie_name: Optional[str] = None,
session_timeout: Optional[int] = None):
pulumi.set(__self__, "user_pool_arn", user_pool_arn)
pulumi.set(__self__, "user_pool_client_id", user_pool_client_id)
pulumi.set(__self__, "user_pool_domain", user_pool_domain)
if authentication_request_extra_params is not None:
pulumi.set(__self__, "authentication_request_extra_params", authentication_request_extra_params)
if on_unauthenticated_request is not None:
pulumi.set(__self__, "on_unauthenticated_request", on_unauthenticated_request)
if scope is not None:
pulumi.set(__self__, "scope", scope)
if session_cookie_name is not None:
pulumi.set(__self__, "session_cookie_name", session_cookie_name)
if session_timeout is not None:
pulumi.set(__self__, "session_timeout", session_timeout)
@property
@pulumi.getter(name="userPoolArn")
def user_pool_arn(self) -> str:
return pulumi.get(self, "user_pool_arn")
@property
@pulumi.getter(name="userPoolClientId")
def user_pool_client_id(self) -> str:
return pulumi.get(self, "user_pool_client_id")
@property
@pulumi.getter(name="userPoolDomain")
def user_pool_domain(self) -> str:
return pulumi.get(self, "user_pool_domain")
@property
@pulumi.getter(name="authenticationRequestExtraParams")
def authentication_request_extra_params(self) -> Optional[Any]:
return pulumi.get(self, "authentication_request_extra_params")
@property
@pulumi.getter(name="onUnauthenticatedRequest")
def on_unauthenticated_request(self) -> Optional[str]:
return pulumi.get(self, "on_unauthenticated_request")
@property
@pulumi.getter
def scope(self) -> Optional[str]:
return pulumi.get(self, "scope")
@property
@pulumi.getter(name="sessionCookieName")
def session_cookie_name(self) -> Optional[str]:
return pulumi.get(self, "session_cookie_name")
@property
@pulumi.getter(name="sessionTimeout")
def session_timeout(self) -> Optional[int]:
return pulumi.get(self, "session_timeout")
@pulumi.output_type
class ListenerRuleAuthenticateOidcConfig(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "authorizationEndpoint":
suggest = "authorization_endpoint"
elif key == "clientId":
suggest = "client_id"
elif key == "clientSecret":
suggest = "client_secret"
elif key == "tokenEndpoint":
suggest = "token_endpoint"
elif key == "userInfoEndpoint":
suggest = "user_info_endpoint"
elif key == "authenticationRequestExtraParams":
suggest = "authentication_request_extra_params"
elif key == "onUnauthenticatedRequest":
suggest = "on_unauthenticated_request"
elif key == "sessionCookieName":
suggest = "session_cookie_name"
elif key == "sessionTimeout":
suggest = "session_timeout"
elif key == "useExistingClientSecret":
suggest = "use_existing_client_secret"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ListenerRuleAuthenticateOidcConfig. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ListenerRuleAuthenticateOidcConfig.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ListenerRuleAuthenticateOidcConfig.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
authorization_endpoint: str,
client_id: str,
client_secret: str,
issuer: str,
token_endpoint: str,
user_info_endpoint: str,
authentication_request_extra_params: Optional[Any] = None,
on_unauthenticated_request: Optional[str] = None,
scope: Optional[str] = None,
session_cookie_name: Optional[str] = None,
session_timeout: Optional[int] = None,
use_existing_client_secret: Optional[bool] = None):
pulumi.set(__self__, "authorization_endpoint", authorization_endpoint)
pulumi.set(__self__, "client_id", client_id)
pulumi.set(__self__, "client_secret", client_secret)
pulumi.set(__self__, "issuer", issuer)
pulumi.set(__self__, "token_endpoint", token_endpoint)
pulumi.set(__self__, "user_info_endpoint", user_info_endpoint)
if authentication_request_extra_params is not None:
pulumi.set(__self__, "authentication_request_extra_params", authentication_request_extra_params)
if on_unauthenticated_request is not None:
pulumi.set(__self__, "on_unauthenticated_request", on_unauthenticated_request)
if scope is not None:
pulumi.set(__self__, "scope", scope)
if session_cookie_name is not None:
pulumi.set(__self__, "session_cookie_name", session_cookie_name)
if session_timeout is not None:
pulumi.set(__self__, "session_timeout", session_timeout)
if use_existing_client_secret is not None:
pulumi.set(__self__, "use_existing_client_secret", use_existing_client_secret)
@property
@pulumi.getter(name="authorizationEndpoint")
def authorization_endpoint(self) -> str:
return pulumi.get(self, "authorization_endpoint")
@property
@pulumi.getter(name="clientId")
def client_id(self) -> str:
return pulumi.get(self, "client_id")
@property
@pulumi.getter(name="clientSecret")
def client_secret(self) -> str:
return pulumi.get(self, "client_secret")
@property
@pulumi.getter
def issuer(self) -> str:
return pulumi.get(self, "issuer")
@property
@pulumi.getter(name="tokenEndpoint")
def token_endpoint(self) -> str:
return pulumi.get(self, "token_endpoint")
@property
@pulumi.getter(name="userInfoEndpoint")
def user_info_endpoint(self) -> str:
return pulumi.get(self, "user_info_endpoint")
@property
@pulumi.getter(name="authenticationRequestExtraParams")
def authentication_request_extra_params(self) -> Optional[Any]:
return pulumi.get(self, "authentication_request_extra_params")
@property
@pulumi.getter(name="onUnauthenticatedRequest")
def on_unauthenticated_request(self) -> Optional[str]:
return pulumi.get(self, "on_unauthenticated_request")
@property
@pulumi.getter
def scope(self) -> Optional[str]:
return pulumi.get(self, "scope")
@property
@pulumi.getter(name="sessionCookieName")
def session_cookie_name(self) -> Optional[str]:
return pulumi.get(self, "session_cookie_name")
@property
@pulumi.getter(name="sessionTimeout")
def session_timeout(self) -> Optional[int]:
return pulumi.get(self, "session_timeout")
@property
@pulumi.getter(name="useExistingClientSecret")
def use_existing_client_secret(self) -> Optional[bool]:
return pulumi.get(self, "use_existing_client_secret")
@pulumi.output_type
class ListenerRuleFixedResponseConfig(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "statusCode":
suggest = "status_code"
elif key == "contentType":
suggest = "content_type"
elif key == "messageBody":
suggest = "message_body"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ListenerRuleFixedResponseConfig. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ListenerRuleFixedResponseConfig.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ListenerRuleFixedResponseConfig.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
status_code: str,
content_type: Optional[str] = None,
message_body: Optional[str] = None):
pulumi.set(__self__, "status_code", status_code)
if content_type is not None:
pulumi.set(__self__, "content_type", content_type)
if message_body is not None:
pulumi.set(__self__, "message_body", message_body)
@property
@pulumi.getter(name="statusCode")
def status_code(self) -> str:
return pulumi.get(self, "status_code")
@property
@pulumi.getter(name="contentType")
def content_type(self) -> Optional[str]:
return pulumi.get(self, "content_type")
@property
@pulumi.getter(name="messageBody")
def message_body(self) -> Optional[str]:
return pulumi.get(self, "message_body")
@pulumi.output_type
class ListenerRuleForwardConfig(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "targetGroupStickinessConfig":
suggest = "target_group_stickiness_config"
elif key == "targetGroups":
suggest = "target_groups"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ListenerRuleForwardConfig. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ListenerRuleForwardConfig.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ListenerRuleForwardConfig.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
target_group_stickiness_config: Optional['outputs.ListenerRuleTargetGroupStickinessConfig'] = None,
target_groups: Optional[Sequence['outputs.ListenerRuleTargetGroupTuple']] = None):
if target_group_stickiness_config is not None:
pulumi.set(__self__, "target_group_stickiness_config", target_group_stickiness_config)
if target_groups is not None:
pulumi.set(__self__, "target_groups", target_groups)
@property
@pulumi.getter(name="targetGroupStickinessConfig")
def target_group_stickiness_config(self) -> Optional['outputs.ListenerRuleTargetGroupStickinessConfig']:
return pulumi.get(self, "target_group_stickiness_config")
@property
@pulumi.getter(name="targetGroups")
def target_groups(self) -> Optional[Sequence['outputs.ListenerRuleTargetGroupTuple']]:
return pulumi.get(self, "target_groups")
@pulumi.output_type
class ListenerRuleHostHeaderConfig(dict):
def __init__(__self__, *,
values: Optional[Sequence[str]] = None):
if values is not None:
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def values(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "values")
@pulumi.output_type
class ListenerRuleHttpHeaderConfig(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "httpHeaderName":
suggest = "http_header_name"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ListenerRuleHttpHeaderConfig. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ListenerRuleHttpHeaderConfig.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ListenerRuleHttpHeaderConfig.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
http_header_name: Optional[str] = None,
values: Optional[Sequence[str]] = None):
if http_header_name is not None:
pulumi.set(__self__, "http_header_name", http_header_name)
if values is not None:
pulumi.set(__self__, "values", values)
@property
@pulumi.getter(name="httpHeaderName")
def http_header_name(self) -> Optional[str]:
return pulumi.get(self, "http_header_name")
@property
@pulumi.getter
def values(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "values")
@pulumi.output_type
class ListenerRuleHttpRequestMethodConfig(dict):
def __init__(__self__, *,
values: Optional[Sequence[str]] = None):
if values is not None:
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def values(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "values")
@pulumi.output_type
class ListenerRulePathPatternConfig(dict):
def __init__(__self__, *,
values: Optional[Sequence[str]] = None):
if values is not None:
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def values(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "values")
@pulumi.output_type
class ListenerRuleQueryStringConfig(dict):
def __init__(__self__, *,
values: Optional[Sequence['outputs.ListenerRuleQueryStringKeyValue']] = None):
if values is not None:
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def values(self) -> Optional[Sequence['outputs.ListenerRuleQueryStringKeyValue']]:
return pulumi.get(self, "values")
@pulumi.output_type
class ListenerRuleQueryStringKeyValue(dict):
def __init__(__self__, *,
key: Optional[str] = None,
value: Optional[str] = None):
if key is not None:
pulumi.set(__self__, "key", key)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> Optional[str]:
return pulumi.get(self, "key")
@property
@pulumi.getter
def value(self) -> Optional[str]:
return pulumi.get(self, "value")
@pulumi.output_type
class ListenerRuleRedirectConfig(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "statusCode":
suggest = "status_code"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ListenerRuleRedirectConfig. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ListenerRuleRedirectConfig.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ListenerRuleRedirectConfig.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
status_code: str,
host: Optional[str] = None,
path: Optional[str] = None,
port: Optional[str] = None,
protocol: Optional[str] = None,
query: Optional[str] = None):
pulumi.set(__self__, "status_code", status_code)
if host is not None:
pulumi.set(__self__, "host", host)
if path is not None:
pulumi.set(__self__, "path", path)
if port is not None:
pulumi.set(__self__, "port", port)
if protocol is not None:
pulumi.set(__self__, "protocol", protocol)
if query is not None:
pulumi.set(__self__, "query", query)
@property
@pulumi.getter(name="statusCode")
def status_code(self) -> str:
return pulumi.get(self, "status_code")
@property
@pulumi.getter
def host(self) -> Optional[str]:
return pulumi.get(self, "host")
@property
@pulumi.getter
def path(self) -> Optional[str]:
return pulumi.get(self, "path")
@property
@pulumi.getter
def port(self) -> Optional[str]:
return pulumi.get(self, "port")
@property
@pulumi.getter
def protocol(self) -> Optional[str]:
return pulumi.get(self, "protocol")
@property
@pulumi.getter
def query(self) -> Optional[str]:
return pulumi.get(self, "query")
@pulumi.output_type
class ListenerRuleRuleCondition(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "hostHeaderConfig":
suggest = "host_header_config"
elif key == "httpHeaderConfig":
suggest = "http_header_config"
elif key == "httpRequestMethodConfig":
suggest = "http_request_method_config"
elif key == "pathPatternConfig":
suggest = "path_pattern_config"
elif key == "queryStringConfig":
suggest = "query_string_config"
elif key == "sourceIpConfig":
suggest = "source_ip_config"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ListenerRuleRuleCondition. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ListenerRuleRuleCondition.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ListenerRuleRuleCondition.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
field: Optional[str] = None,
host_header_config: Optional['outputs.ListenerRuleHostHeaderConfig'] = None,
http_header_config: Optional['outputs.ListenerRuleHttpHeaderConfig'] = None,
http_request_method_config: Optional['outputs.ListenerRuleHttpRequestMethodConfig'] = None,
path_pattern_config: Optional['outputs.ListenerRulePathPatternConfig'] = None,
query_string_config: Optional['outputs.ListenerRuleQueryStringConfig'] = None,
source_ip_config: Optional['outputs.ListenerRuleSourceIpConfig'] = None,
values: Optional[Sequence[str]] = None):
if field is not None:
pulumi.set(__self__, "field", field)
if host_header_config is not None:
pulumi.set(__self__, "host_header_config", host_header_config)
if http_header_config is not None:
pulumi.set(__self__, "http_header_config", http_header_config)
if http_request_method_config is not None:
pulumi.set(__self__, "http_request_method_config", http_request_method_config)
if path_pattern_config is not None:
pulumi.set(__self__, "path_pattern_config", path_pattern_config)
if query_string_config is not None:
pulumi.set(__self__, "query_string_config", query_string_config)
if source_ip_config is not None:
pulumi.set(__self__, "source_ip_config", source_ip_config)
if values is not None:
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def field(self) -> Optional[str]:
return pulumi.get(self, "field")
@property
@pulumi.getter(name="hostHeaderConfig")
def host_header_config(self) -> Optional['outputs.ListenerRuleHostHeaderConfig']:
return pulumi.get(self, "host_header_config")
@property
@pulumi.getter(name="httpHeaderConfig")
def http_header_config(self) -> Optional['outputs.ListenerRuleHttpHeaderConfig']:
return pulumi.get(self, "http_header_config")
@property
@pulumi.getter(name="httpRequestMethodConfig")
def http_request_method_config(self) -> Optional['outputs.ListenerRuleHttpRequestMethodConfig']:
return pulumi.get(self, "http_request_method_config")
@property
@pulumi.getter(name="pathPatternConfig")
def path_pattern_config(self) -> Optional['outputs.ListenerRulePathPatternConfig']:
return pulumi.get(self, "path_pattern_config")
@property
@pulumi.getter(name="queryStringConfig")
def query_string_config(self) -> Optional['outputs.ListenerRuleQueryStringConfig']:
return pulumi.get(self, "query_string_config")
@property
@pulumi.getter(name="sourceIpConfig")
def source_ip_config(self) -> Optional['outputs.ListenerRuleSourceIpConfig']:
return pulumi.get(self, "source_ip_config")
@property
@pulumi.getter
def values(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "values")
@pulumi.output_type
class ListenerRuleSourceIpConfig(dict):
def __init__(__self__, *,
values: Optional[Sequence[str]] = None):
if values is not None:
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def values(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "values")
@pulumi.output_type
class ListenerRuleTargetGroupStickinessConfig(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "durationSeconds":
suggest = "duration_seconds"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ListenerRuleTargetGroupStickinessConfig. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ListenerRuleTargetGroupStickinessConfig.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ListenerRuleTargetGroupStickinessConfig.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
duration_seconds: Optional[int] = None,
enabled: Optional[bool] = None):
if duration_seconds is not None:
pulumi.set(__self__, "duration_seconds", duration_seconds)
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
@property
@pulumi.getter(name="durationSeconds")
def duration_seconds(self) -> Optional[int]:
return pulumi.get(self, "duration_seconds")
@property
@pulumi.getter
def enabled(self) -> Optional[bool]:
return pulumi.get(self, "enabled")
@pulumi.output_type
class ListenerRuleTargetGroupTuple(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "targetGroupArn":
suggest = "target_group_arn"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ListenerRuleTargetGroupTuple. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ListenerRuleTargetGroupTuple.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ListenerRuleTargetGroupTuple.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
target_group_arn: Optional[str] = None,
weight: Optional[int] = None):
if target_group_arn is not None:
pulumi.set(__self__, "target_group_arn", target_group_arn)
if weight is not None:
pulumi.set(__self__, "weight", weight)
@property
@pulumi.getter(name="targetGroupArn")
def target_group_arn(self) -> Optional[str]:
return pulumi.get(self, "target_group_arn")
@property
@pulumi.getter
def weight(self) -> Optional[int]:
return pulumi.get(self, "weight")
@pulumi.output_type
class ListenerTargetGroupStickinessConfig(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "durationSeconds":
suggest = "duration_seconds"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ListenerTargetGroupStickinessConfig. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ListenerTargetGroupStickinessConfig.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ListenerTargetGroupStickinessConfig.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
duration_seconds: Optional[int] = None,
enabled: Optional[bool] = None):
if duration_seconds is not None:
pulumi.set(__self__, "duration_seconds", duration_seconds)
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
@property
@pulumi.getter(name="durationSeconds")
def duration_seconds(self) -> Optional[int]:
return pulumi.get(self, "duration_seconds")
@property
@pulumi.getter
def enabled(self) -> Optional[bool]:
return pulumi.get(self, "enabled")
@pulumi.output_type
class ListenerTargetGroupTuple(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "targetGroupArn":
suggest = "target_group_arn"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ListenerTargetGroupTuple. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ListenerTargetGroupTuple.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ListenerTargetGroupTuple.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
target_group_arn: Optional[str] = None,
weight: Optional[int] = None):
if target_group_arn is not None:
pulumi.set(__self__, "target_group_arn", target_group_arn)
if weight is not None:
pulumi.set(__self__, "weight", weight)
@property
@pulumi.getter(name="targetGroupArn")
def target_group_arn(self) -> Optional[str]:
return pulumi.get(self, "target_group_arn")
@property
@pulumi.getter
def weight(self) -> Optional[int]:
return pulumi.get(self, "weight")
@pulumi.output_type
class LoadBalancerAttribute(dict):
def __init__(__self__, *,
key: Optional[str] = None,
value: Optional[str] = None):
if key is not None:
pulumi.set(__self__, "key", key)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> Optional[str]:
return pulumi.get(self, "key")
@property
@pulumi.getter
def value(self) -> Optional[str]:
return pulumi.get(self, "value")
@pulumi.output_type
class LoadBalancerSubnetMapping(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "subnetId":
suggest = "subnet_id"
elif key == "allocationId":
suggest = "allocation_id"
elif key == "iPv6Address":
suggest = "i_pv6_address"
elif key == "privateIPv4Address":
suggest = "private_i_pv4_address"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in LoadBalancerSubnetMapping. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
LoadBalancerSubnetMapping.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
LoadBalancerSubnetMapping.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
subnet_id: str,
allocation_id: Optional[str] = None,
i_pv6_address: Optional[str] = None,
private_i_pv4_address: Optional[str] = None):
pulumi.set(__self__, "subnet_id", subnet_id)
if allocation_id is not None:
pulumi.set(__self__, "allocation_id", allocation_id)
if i_pv6_address is not None:
pulumi.set(__self__, "i_pv6_address", i_pv6_address)
if private_i_pv4_address is not None:
pulumi.set(__self__, "private_i_pv4_address", private_i_pv4_address)
@property
@pulumi.getter(name="subnetId")
def subnet_id(self) -> str:
return pulumi.get(self, "subnet_id")
@property
@pulumi.getter(name="allocationId")
def allocation_id(self) -> Optional[str]:
return pulumi.get(self, "allocation_id")
@property
@pulumi.getter(name="iPv6Address")
def i_pv6_address(self) -> Optional[str]:
return pulumi.get(self, "i_pv6_address")
@property
@pulumi.getter(name="privateIPv4Address")
def private_i_pv4_address(self) -> Optional[str]:
return pulumi.get(self, "private_i_pv4_address")
@pulumi.output_type
class LoadBalancerTag(dict):
def __init__(__self__, *,
key: str,
value: str):
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> str:
return pulumi.get(self, "key")
@property
@pulumi.getter
def value(self) -> str:
return pulumi.get(self, "value")
@pulumi.output_type
class TargetGroupAttribute(dict):
def __init__(__self__, *,
key: Optional[str] = None,
value: Optional[str] = None):
if key is not None:
pulumi.set(__self__, "key", key)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> Optional[str]:
return pulumi.get(self, "key")
@property
@pulumi.getter
def value(self) -> Optional[str]:
return pulumi.get(self, "value")
@pulumi.output_type
class TargetGroupMatcher(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "grpcCode":
suggest = "grpc_code"
elif key == "httpCode":
suggest = "http_code"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in TargetGroupMatcher. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
TargetGroupMatcher.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
TargetGroupMatcher.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
grpc_code: Optional[str] = None,
http_code: Optional[str] = None):
if grpc_code is not None:
pulumi.set(__self__, "grpc_code", grpc_code)
if http_code is not None:
pulumi.set(__self__, "http_code", http_code)
@property
@pulumi.getter(name="grpcCode")
def grpc_code(self) -> Optional[str]:
return pulumi.get(self, "grpc_code")
@property
@pulumi.getter(name="httpCode")
def http_code(self) -> Optional[str]:
return pulumi.get(self, "http_code")
@pulumi.output_type
class TargetGroupTag(dict):
def __init__(__self__, *,
key: str,
value: str):
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> str:
return pulumi.get(self, "key")
@property
@pulumi.getter
def value(self) -> str:
return pulumi.get(self, "value")
@pulumi.output_type
class TargetGroupTargetDescription(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "availabilityZone":
suggest = "availability_zone"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in TargetGroupTargetDescription. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
TargetGroupTargetDescription.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
TargetGroupTargetDescription.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
id: str,
availability_zone: Optional[str] = None,
port: Optional[int] = None):
pulumi.set(__self__, "id", id)
if availability_zone is not None:
pulumi.set(__self__, "availability_zone", availability_zone)
if port is not None:
pulumi.set(__self__, "port", port)
@property
@pulumi.getter
def id(self) -> str:
return pulumi.get(self, "id")
@property
@pulumi.getter(name="availabilityZone")
def availability_zone(self) -> Optional[str]:
return pulumi.get(self, "availability_zone")
@property
@pulumi.getter
def port(self) -> Optional[int]:
return pulumi.get(self, "port")
| 36.116364
| 159
| 0.65044
| 6,266
| 59,592
| 5.857804
| 0.034791
| 0.027462
| 0.042855
| 0.062635
| 0.83302
| 0.814412
| 0.800654
| 0.751342
| 0.741915
| 0.737774
| 0
| 0.000422
| 0.244815
| 59,592
| 1,649
| 160
| 36.138266
| 0.81519
| 0.002702
| 0
| 0.810117
| 1
| 0.016862
| 0.204813
| 0.09257
| 0
| 0
| 0
| 0
| 0
| 1
| 0.16349
| false
| 0
| 0.004399
| 0.08871
| 0.314516
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
971ea22b4f898d7b945a3d66e55c054fd8c1a0fd
| 10,581
|
py
|
Python
|
awx/main/tests/functional/api/test_webhooks.py
|
Avinesh/awx
|
6310a2edd890d6062a9f6bcdeb2b46c4b876c2bf
|
[
"Apache-2.0"
] | 17
|
2021-04-03T01:40:17.000Z
|
2022-03-03T11:45:20.000Z
|
awx/main/tests/functional/api/test_webhooks.py
|
Avinesh/awx
|
6310a2edd890d6062a9f6bcdeb2b46c4b876c2bf
|
[
"Apache-2.0"
] | 24
|
2021-05-18T21:13:35.000Z
|
2022-03-29T10:23:52.000Z
|
awx/main/tests/functional/api/test_webhooks.py
|
hostinger/awx
|
dac01b14e2c04c201a162ea03ef8386d822e3923
|
[
"Apache-2.0"
] | 24
|
2020-11-27T08:37:35.000Z
|
2021-03-08T13:27:15.000Z
|
import pytest
from awx.api.versioning import reverse
from awx.main.models.mixins import WebhookTemplateMixin
from awx.main.models.credential import Credential, CredentialType
@pytest.mark.django_db
@pytest.mark.parametrize(
"user_role, expect", [
('superuser', 200),
('org admin', 200),
('jt admin', 200),
('jt execute', 403),
('org member', 403),
]
)
def test_get_webhook_key_jt(organization_factory, job_template_factory, get, user_role, expect):
objs = organization_factory("org", superusers=['admin'], users=['user'])
jt = job_template_factory("jt", organization=objs.organization,
inventory='test_inv', project='test_proj').job_template
if user_role == 'superuser':
user = objs.superusers.admin
else:
user = objs.users.user
grant_obj = objs.organization if user_role.startswith('org') else jt
getattr(grant_obj, '{}_role'.format(user_role.split()[1])).members.add(user)
url = reverse('api:webhook_key', kwargs={'model_kwarg': 'job_templates', 'pk': jt.pk})
response = get(url, user=user, expect=expect)
if expect < 400:
assert response.data == {'webhook_key': ''}
@pytest.mark.django_db
@pytest.mark.parametrize(
"user_role, expect", [
('superuser', 200),
('org admin', 200),
('jt admin', 200),
('jt execute', 403),
('org member', 403),
]
)
def test_get_webhook_key_wfjt(organization_factory, workflow_job_template_factory, get, user_role, expect):
objs = organization_factory("org", superusers=['admin'], users=['user'])
wfjt = workflow_job_template_factory("wfjt", organization=objs.organization).workflow_job_template
if user_role == 'superuser':
user = objs.superusers.admin
else:
user = objs.users.user
grant_obj = objs.organization if user_role.startswith('org') else wfjt
getattr(grant_obj, '{}_role'.format(user_role.split()[1])).members.add(user)
url = reverse('api:webhook_key', kwargs={'model_kwarg': 'workflow_job_templates', 'pk': wfjt.pk})
response = get(url, user=user, expect=expect)
if expect < 400:
assert response.data == {'webhook_key': ''}
@pytest.mark.django_db
@pytest.mark.parametrize(
"user_role, expect", [
('superuser', 201),
('org admin', 201),
('jt admin', 201),
('jt execute', 403),
('org member', 403),
]
)
def test_post_webhook_key_jt(organization_factory, job_template_factory, post, user_role, expect):
objs = organization_factory("org", superusers=['admin'], users=['user'])
jt = job_template_factory("jt", organization=objs.organization,
inventory='test_inv', project='test_proj').job_template
if user_role == 'superuser':
user = objs.superusers.admin
else:
user = objs.users.user
grant_obj = objs.organization if user_role.startswith('org') else jt
getattr(grant_obj, '{}_role'.format(user_role.split()[1])).members.add(user)
url = reverse('api:webhook_key', kwargs={'model_kwarg': 'job_templates', 'pk': jt.pk})
response = post(url, {}, user=user, expect=expect)
if expect < 400:
assert bool(response.data.get('webhook_key'))
@pytest.mark.django_db
@pytest.mark.parametrize(
"user_role, expect", [
('superuser', 201),
('org admin', 201),
('jt admin', 201),
('jt execute', 403),
('org member', 403),
]
)
def test_post_webhook_key_wfjt(organization_factory, workflow_job_template_factory, post, user_role, expect):
objs = organization_factory("org", superusers=['admin'], users=['user'])
wfjt = workflow_job_template_factory("wfjt", organization=objs.organization).workflow_job_template
if user_role == 'superuser':
user = objs.superusers.admin
else:
user = objs.users.user
grant_obj = objs.organization if user_role.startswith('org') else wfjt
getattr(grant_obj, '{}_role'.format(user_role.split()[1])).members.add(user)
url = reverse('api:webhook_key', kwargs={'model_kwarg': 'workflow_job_templates', 'pk': wfjt.pk})
response = post(url, {}, user=user, expect=expect)
if expect < 400:
assert bool(response.data.get('webhook_key'))
@pytest.mark.django_db
@pytest.mark.parametrize(
"service", [s for s, _ in WebhookTemplateMixin.SERVICES]
)
def test_set_webhook_service(organization_factory, job_template_factory, patch, service):
objs = organization_factory("org", superusers=['admin'])
jt = job_template_factory("jt", organization=objs.organization,
inventory='test_inv', project='test_proj').job_template
admin = objs.superusers.admin
assert (jt.webhook_service, jt.webhook_key) == ('', '')
url = reverse('api:job_template_detail', kwargs={'pk': jt.pk})
patch(url, {'webhook_service': service}, user=admin, expect=200)
jt.refresh_from_db()
assert jt.webhook_service == service
assert jt.webhook_key != ''
@pytest.mark.django_db
@pytest.mark.parametrize(
"service", [s for s, _ in WebhookTemplateMixin.SERVICES]
)
def test_unset_webhook_service(organization_factory, job_template_factory, patch, service):
objs = organization_factory("org", superusers=['admin'])
jt = job_template_factory("jt", organization=objs.organization, webhook_service=service,
inventory='test_inv', project='test_proj').job_template
admin = objs.superusers.admin
assert jt.webhook_service == service
assert jt.webhook_key != ''
url = reverse('api:job_template_detail', kwargs={'pk': jt.pk})
patch(url, {'webhook_service': ''}, user=admin, expect=200)
jt.refresh_from_db()
assert (jt.webhook_service, jt.webhook_key) == ('', '')
@pytest.mark.django_db
@pytest.mark.parametrize(
"service", [s for s, _ in WebhookTemplateMixin.SERVICES]
)
def test_set_webhook_credential(organization_factory, job_template_factory, patch, service):
objs = organization_factory("org", superusers=['admin'])
jt = job_template_factory("jt", organization=objs.organization, webhook_service=service,
inventory='test_inv', project='test_proj').job_template
admin = objs.superusers.admin
assert jt.webhook_service == service
assert jt.webhook_key != ''
cred_type = CredentialType.defaults['{}_token'.format(service)]()
cred_type.save()
cred = Credential.objects.create(credential_type=cred_type, name='test-cred',
inputs={'token': 'secret'})
url = reverse('api:job_template_detail', kwargs={'pk': jt.pk})
patch(url, {'webhook_credential': cred.pk}, user=admin, expect=200)
jt.refresh_from_db()
assert jt.webhook_service == service
assert jt.webhook_key != ''
assert jt.webhook_credential == cred
@pytest.mark.django_db
@pytest.mark.parametrize(
"service,token", [
(s, WebhookTemplateMixin.SERVICES[i - 1][0]) for i, (s, _) in enumerate(WebhookTemplateMixin.SERVICES)
]
)
def test_set_wrong_service_webhook_credential(organization_factory, job_template_factory, patch, service, token):
objs = organization_factory("org", superusers=['admin'])
jt = job_template_factory("jt", organization=objs.organization, webhook_service=service,
inventory='test_inv', project='test_proj').job_template
admin = objs.superusers.admin
assert jt.webhook_service == service
assert jt.webhook_key != ''
cred_type = CredentialType.defaults['{}_token'.format(token)]()
cred_type.save()
cred = Credential.objects.create(credential_type=cred_type, name='test-cred',
inputs={'token': 'secret'})
url = reverse('api:job_template_detail', kwargs={'pk': jt.pk})
response = patch(url, {'webhook_credential': cred.pk}, user=admin, expect=400)
jt.refresh_from_db()
assert jt.webhook_service == service
assert jt.webhook_key != ''
assert jt.webhook_credential is None
assert response.data == {'webhook_credential': ["Must match the selected webhook service."]}
@pytest.mark.django_db
@pytest.mark.parametrize(
"service", [s for s, _ in WebhookTemplateMixin.SERVICES]
)
def test_set_webhook_credential_without_service(organization_factory, job_template_factory, patch, service):
objs = organization_factory("org", superusers=['admin'])
jt = job_template_factory("jt", organization=objs.organization,
inventory='test_inv', project='test_proj').job_template
admin = objs.superusers.admin
assert jt.webhook_service == ''
assert jt.webhook_key == ''
cred_type = CredentialType.defaults['{}_token'.format(service)]()
cred_type.save()
cred = Credential.objects.create(credential_type=cred_type, name='test-cred',
inputs={'token': 'secret'})
url = reverse('api:job_template_detail', kwargs={'pk': jt.pk})
response = patch(url, {'webhook_credential': cred.pk}, user=admin, expect=400)
jt.refresh_from_db()
assert jt.webhook_service == ''
assert jt.webhook_key == ''
assert jt.webhook_credential is None
assert response.data == {'webhook_credential': ["Must match the selected webhook service."]}
@pytest.mark.django_db
@pytest.mark.parametrize(
"service", [s for s, _ in WebhookTemplateMixin.SERVICES]
)
def test_unset_webhook_service_with_credential(organization_factory, job_template_factory, patch, service):
objs = organization_factory("org", superusers=['admin'])
jt = job_template_factory("jt", organization=objs.organization, webhook_service=service,
inventory='test_inv', project='test_proj').job_template
admin = objs.superusers.admin
assert jt.webhook_service == service
assert jt.webhook_key != ''
cred_type = CredentialType.defaults['{}_token'.format(service)]()
cred_type.save()
cred = Credential.objects.create(credential_type=cred_type, name='test-cred',
inputs={'token': 'secret'})
jt.webhook_credential = cred
jt.save()
url = reverse('api:job_template_detail', kwargs={'pk': jt.pk})
response = patch(url, {'webhook_service': ''}, user=admin, expect=400)
jt.refresh_from_db()
assert jt.webhook_service == service
assert jt.webhook_key != ''
assert jt.webhook_credential == cred
assert response.data == {'webhook_credential': ["Must match the selected webhook service."]}
| 40.54023
| 113
| 0.671865
| 1,271
| 10,581
| 5.369788
| 0.080252
| 0.058022
| 0.057143
| 0.038681
| 0.961026
| 0.955458
| 0.955458
| 0.954432
| 0.941538
| 0.92044
| 0
| 0.011189
| 0.189113
| 10,581
| 260
| 114
| 40.696154
| 0.784266
| 0
| 0
| 0.826484
| 0
| 0
| 0.131651
| 0.017201
| 0
| 0
| 0
| 0
| 0.150685
| 1
| 0.045662
| false
| 0
| 0.018265
| 0
| 0.063927
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
975afa8d06b3b9c7712ca708d7fed99f0a1cdb6b
| 8,367
|
py
|
Python
|
thornode_client/thornode_client/api/tx_api.py
|
hoodieonwho/thorchain-python-client
|
fccfd66552e16bdab1dbb90b68022475c7a9693d
|
[
"MIT"
] | null | null | null |
thornode_client/thornode_client/api/tx_api.py
|
hoodieonwho/thorchain-python-client
|
fccfd66552e16bdab1dbb90b68022475c7a9693d
|
[
"MIT"
] | null | null | null |
thornode_client/thornode_client/api/tx_api.py
|
hoodieonwho/thorchain-python-client
|
fccfd66552e16bdab1dbb90b68022475c7a9693d
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
THORChain API
This documentation outlines the API for THORChain. NOTE: This document is a **work in progress**. # noqa: E501
OpenAPI spec version:
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from thornode_client.api_client import ApiClient
class TxApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def get_a_tx_with_given_hash(self, hash, **kwargs): # noqa: E501
"""Get a tx with given hash # noqa: E501
Retrieve a tx with the given hash from THORChain # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_a_tx_with_given_hash(hash, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str hash: Tx hash of an inbound transaction or outbound transaction (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_a_tx_with_given_hash_with_http_info(hash, **kwargs) # noqa: E501
else:
(data) = self.get_a_tx_with_given_hash_with_http_info(hash, **kwargs) # noqa: E501
return data
def get_a_tx_with_given_hash_with_http_info(self, hash, **kwargs): # noqa: E501
"""Get a tx with given hash # noqa: E501
Retrieve a tx with the given hash from THORChain # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_a_tx_with_given_hash_with_http_info(hash, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str hash: Tx hash of an inbound transaction or outbound transaction (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['hash'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_a_tx_with_given_hash" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'hash' is set
if self.api_client.client_side_validation and ('hash' not in params or
params['hash'] is None): # noqa: E501
raise ValueError("Missing the required parameter `hash` when calling `get_a_tx_with_given_hash`") # noqa: E501
collection_formats = {}
path_params = {}
if 'hash' in params:
path_params['hash'] = params['hash'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/thorchain/tx/{hash}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_tx_signers(self, hash, **kwargs): # noqa: E501
"""Get tx signers # noqa: E501
Get tx signers that match the request hash # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_tx_signers(hash, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str hash: Tx hash of an inbound transaction or outbound transaction (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_tx_signers_with_http_info(hash, **kwargs) # noqa: E501
else:
(data) = self.get_tx_signers_with_http_info(hash, **kwargs) # noqa: E501
return data
def get_tx_signers_with_http_info(self, hash, **kwargs): # noqa: E501
"""Get tx signers # noqa: E501
Get tx signers that match the request hash # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_tx_signers_with_http_info(hash, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str hash: Tx hash of an inbound transaction or outbound transaction (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['hash'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_tx_signers" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'hash' is set
if self.api_client.client_side_validation and ('hash' not in params or
params['hash'] is None): # noqa: E501
raise ValueError("Missing the required parameter `hash` when calling `get_tx_signers`") # noqa: E501
collection_formats = {}
path_params = {}
if 'hash' in params:
path_params['hash'] = params['hash'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/thorchain/tx/{hash}/signers', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 37.186667
| 123
| 0.60643
| 1,011
| 8,367
| 4.773492
| 0.154303
| 0.051388
| 0.017406
| 0.020721
| 0.898881
| 0.897016
| 0.891836
| 0.871529
| 0.868214
| 0.859925
| 0
| 0.017093
| 0.307757
| 8,367
| 224
| 124
| 37.352679
| 0.816126
| 0.351141
| 0
| 0.778761
| 0
| 0
| 0.153908
| 0.042194
| 0
| 0
| 0
| 0
| 0
| 1
| 0.044248
| false
| 0
| 0.035398
| 0
| 0.141593
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8ae9058fefadf30c0326f4f63c43379b31ae9689
| 9,267
|
py
|
Python
|
src/tests/functional/test_entry_data_endpoint.py
|
jroberts07/fpl-stats-api
|
3a7b5faacec5f83643a16143000b46bea70b5364
|
[
"MIT"
] | null | null | null |
src/tests/functional/test_entry_data_endpoint.py
|
jroberts07/fpl-stats-api
|
3a7b5faacec5f83643a16143000b46bea70b5364
|
[
"MIT"
] | 7
|
2019-09-08T21:46:14.000Z
|
2019-12-23T15:06:53.000Z
|
src/tests/functional/test_entry_data_endpoint.py
|
jroberts07/fpl_stats_api
|
3a7b5faacec5f83643a16143000b46bea70b5364
|
[
"MIT"
] | null | null | null |
from aioresponses import aioresponses
from server import app as sanic_app
async def test_success_multiple_classics(test_cli):
"""Test entry data with an array of classic leagues.
Args:
test_cli (obj): The test event loop.
"""
with aioresponses(passthrough=['http://127.0.0.1:']) as m:
with open(
'tests/functional/data/'
'entry_response_multiple_classic_leagues.json'
) as f:
entry_data = f.read()
with open(
'tests/functional/data/'
'league_response_less_than_fifty.json'
) as f:
league_data = f.read()
m.get(
sanic_app.config.FPL_URL + sanic_app.config.ENTRY_DATA.format(
entry_id=123
),
status=200,
body=entry_data
)
m.get(
sanic_app.config.FPL_URL + sanic_app.config.LEAGUE_DATA.format(
league_id=1
),
status=200,
body=league_data
)
m.get(
sanic_app.config.FPL_URL + sanic_app.config.LEAGUE_DATA.format(
league_id=2
),
status=200,
body=league_data
)
resp = await test_cli.get(
'/entry_data/123?player_cookie=456'
)
assert resp.status == 200
resp_json = await resp.json()
assert resp_json == {
"name": "TEAM A",
"leagues": [
{
"id": 1,
"name": "LEAGUE A",
},
{
"id": 2,
"name": "LEAGUE B",
}
]
}
async def test_success_multiple_classics_some_more_than_fifty(test_cli):
"""Test entry data with an array of classic leagues some with more than
fifty entries.
Args:
test_cli (obj): The test event loop.
"""
with aioresponses(passthrough=['http://127.0.0.1:']) as m:
with open(
'tests/functional/data/'
'entry_response_multiple_classic_leagues.json'
) as f:
entry_data = f.read()
with open(
'tests/functional/data/'
'league_response_less_than_fifty.json'
) as f:
league_data_less_than_fifty = f.read()
with open(
'tests/functional/data/'
'league_response_more_than_fifty.json'
) as f:
league_data_more_than_fifty = f.read()
m.get(
sanic_app.config.FPL_URL + sanic_app.config.ENTRY_DATA.format(
entry_id=123
),
status=200,
body=entry_data
)
m.get(
sanic_app.config.FPL_URL + sanic_app.config.LEAGUE_DATA.format(
league_id=1
),
status=200,
body=league_data_less_than_fifty
)
m.get(
sanic_app.config.FPL_URL + sanic_app.config.LEAGUE_DATA.format(
league_id=2
),
status=200,
body=league_data_more_than_fifty
)
resp = await test_cli.get(
'/entry_data/123?player_cookie=456'
)
assert resp.status == 200
resp_json = await resp.json()
assert resp_json == {
"name": "TEAM A",
"leagues": [
{
"id": 1,
"name": "LEAGUE A",
}
]
}
async def test_league_api_bad_response(test_cli):
"""Test entry data with a bad response from league API.
Args:
test_cli (obj): The test event loop.
"""
with aioresponses(passthrough=['http://127.0.0.1:']) as m:
with open(
'tests/functional/data/'
'entry_response_single_classic_league.json'
) as f:
entry_data = f.read()
with open(
'tests/functional/data/'
'bad_response.json'
) as f:
league_data = f.read()
m.get(
sanic_app.config.FPL_URL + sanic_app.config.ENTRY_DATA.format(
entry_id=123
),
status=200,
body=entry_data
)
m.get(
sanic_app.config.FPL_URL + sanic_app.config.LEAGUE_DATA.format(
league_id=1
),
status=200,
body=league_data
)
resp = await test_cli.get(
'/entry_data/123?player_cookie=456'
)
assert resp.status == 500
resp_json = await resp.json()
assert resp_json == {
"error": "THERE WAS A PROBLEM WITH THE DATA RETURNED FROM FPL"
}
async def test_success_single_classics(test_cli):
"""Test entry data with a single classic league.
Args:
test_cli (obj): The test event loop.
"""
with aioresponses(passthrough=['http://127.0.0.1:']) as m:
with open(
'tests/functional/data/'
'entry_response_single_classic_league.json'
) as f:
entry_data = f.read()
with open(
'tests/functional/data/'
'league_response_less_than_fifty.json'
) as f:
league_data = f.read()
m.get(
sanic_app.config.FPL_URL + sanic_app.config.ENTRY_DATA.format(
entry_id=123
),
status=200,
body=entry_data
)
m.get(
sanic_app.config.FPL_URL + sanic_app.config.LEAGUE_DATA.format(
league_id=1
),
status=200,
body=league_data
)
resp = await test_cli.get(
'/entry_data/123?player_cookie=456'
)
assert resp.status == 200
resp_json = await resp.json()
assert resp_json == {
"name": "TEAM A",
"leagues": [
{
"id": 1,
"name": "LEAGUE A",
}
]
}
async def test_no_leagues(test_cli):
"""Test entry data with no leagues.
Args:
test_cli (obj): The test event loop.
"""
with aioresponses(passthrough=['http://127.0.0.1:']) as m:
with open(
'tests/functional/data/entry_response_no_leagues.json'
) as f:
fpl_data = f.read()
m.get(
sanic_app.config.FPL_URL + sanic_app.config.ENTRY_DATA.format(
entry_id=123
),
status=200,
body=fpl_data
)
resp = await test_cli.get(
'/entry_data/123?player_cookie=456'
)
assert resp.status == 200
resp_json = await resp.json()
assert resp_json == {
"name": "TEAM A",
"leagues": []
}
async def test_no_name(test_cli):
"""Test entry data with no name.
Args:
test_cli (obj): The test event loop.
"""
with aioresponses(passthrough=['http://127.0.0.1:']) as m:
with open(
'tests/functional/data/entry_response_no_name.json'
) as f:
entry_data = f.read()
with open(
'tests/functional/data/'
'league_response_less_than_fifty.json'
) as f:
league_data = f.read()
m.get(
sanic_app.config.FPL_URL + sanic_app.config.ENTRY_DATA.format(
entry_id=123
),
status=200,
body=entry_data
)
m.get(
sanic_app.config.FPL_URL + sanic_app.config.LEAGUE_DATA.format(
league_id=1
),
status=200,
body=league_data
)
resp = await test_cli.get(
'/entry_data/123?player_cookie=456'
)
assert resp.status == 200
resp_json = await resp.json()
assert resp_json == {
"name": None,
"leagues": [
{
"id": 1,
"name": "LEAGUE A",
}
]
}
async def test_no_player_cookie(test_cli):
"""Test entry data with no player_cookie.
Args:
test_cli (obj): The test event loop.
"""
resp = await test_cli.get(
'/entry_data/123?player_cookie='
)
assert resp.status == 400
resp_json = await resp.json()
assert resp_json == {
"error": "PARAMETERS REQUIRED: player_cookie"
}
async def test_fpl_error_response(test_cli):
"""Test entry data with an error response from FPL.
Args:
test_cli (obj): The test event loop.
"""
with aioresponses(passthrough=['http://127.0.0.1:']) as m:
m.get(
sanic_app.config.FPL_URL + sanic_app.config.ENTRY_DATA.format(
entry_id=123
),
status=500,
body=None
)
resp = await test_cli.get(
'/entry_data/123?player_cookie=456'
)
assert resp.status == 500
resp_json = await resp.json()
assert resp_json == {
"error": "ERROR CONNECTING TO THE FANTASY API"
}
| 28.601852
| 75
| 0.499838
| 1,036
| 9,267
| 4.243243
| 0.082046
| 0.067561
| 0.089172
| 0.038217
| 0.893085
| 0.881256
| 0.867152
| 0.818926
| 0.812102
| 0.792539
| 0
| 0.033483
| 0.400561
| 9,267
| 323
| 76
| 28.690402
| 0.757876
| 0
| 0
| 0.718045
| 0
| 0
| 0.161897
| 0.111603
| 0
| 0
| 0
| 0
| 0.06015
| 1
| 0
| false
| 0.026316
| 0.007519
| 0
| 0.007519
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8af62ad39d558e0d5a5806871f1e4a6b7a8df43e
| 194,948
|
py
|
Python
|
src/lib/tracker/multitracker.py
|
DerryHub/FairMOT-attack
|
2678cebe939eb8e14106c1e9e07f0c44b5ba975f
|
[
"MIT"
] | 18
|
2021-11-18T15:38:46.000Z
|
2022-03-22T07:24:27.000Z
|
src/lib/tracker/multitracker.py
|
DerryHub/FairMOT-attack
|
2678cebe939eb8e14106c1e9e07f0c44b5ba975f
|
[
"MIT"
] | null | null | null |
src/lib/tracker/multitracker.py
|
DerryHub/FairMOT-attack
|
2678cebe939eb8e14106c1e9e07f0c44b5ba975f
|
[
"MIT"
] | 1
|
2021-11-25T03:14:37.000Z
|
2021-11-25T03:14:37.000Z
|
# ------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# ------------------------------------------------------------------------------
from collections import deque
import cv2
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from models import *
from models.decode import mot_decode
from models.model import create_model, load_model
from models.utils import _tranpose_and_gather_feat, _tranpose_and_gather_feat_expand
from tracker import matching
from tracking_utils.kalman_filter import KalmanFilter
from tracking_utils.log import logger
from tracking_utils.utils import *
from utils.post_process import ctdet_post_process
from cython_bbox import bbox_overlaps as bbox_ious
from .basetrack import BaseTrack, TrackState
from scipy.optimize import linear_sum_assignment
import random
import pickle
import copy
class GaussianBlurConv(nn.Module):
def __init__(self, channels=3):
super(GaussianBlurConv, self).__init__()
self.channels = channels
kernel = [[0.00078633, 0.00655965, 0.01330373, 0.00655965, 0.00078633],
[0.00655965, 0.05472157, 0.11098164, 0.05472157, 0.00655965],
[0.01330373, 0.11098164, 0.22508352, 0.11098164, 0.01330373],
[0.00655965, 0.05472157, 0.11098164, 0.05472157, 0.00655965],
[0.00078633, 0.00655965, 0.01330373, 0.00655965, 0.00078633]]
kernel = torch.FloatTensor(kernel).unsqueeze(0).unsqueeze(0)
kernel = np.repeat(kernel, self.channels, axis=0)
self.weight = nn.Parameter(data=kernel, requires_grad=False)
def __call__(self, x):
x = F.conv2d(x, self.weight, padding=2, groups=self.channels)
return x
gaussianBlurConv = GaussianBlurConv().cuda()
seed = 0
random.seed(seed)
np.random.seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
# Remove randomness (may be slower on Tesla GPUs)
# https://pytorch.org/docs/stable/notes/randomness.html
if seed == 0:
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
smoothL1 = torch.nn.SmoothL1Loss()
mse = torch.nn.MSELoss()
td_ = {}
def bbox_dis(bbox1, bbox2):
center1 = (bbox1[:, :2] + bbox1[:, 2:]) / 2
center2 = (bbox2[:, :2] + bbox2[:, 2:]) / 2
center1 = np.repeat(center1.reshape(-1, 1, 2), len(bbox2), axis=1)
center2 = np.repeat(center2.reshape(1, -1, 2), len(bbox1), axis=0)
dis = np.sqrt(np.sum((center1 - center2) ** 2, axis=-1))
return dis
class STrack(BaseTrack):
shared_kalman = KalmanFilter()
shared_kalman_ = KalmanFilter()
def __init__(self, tlwh, score, temp_feat, buffer_size=30):
# wait activate
self._tlwh = np.asarray(tlwh, dtype=np.float)
self.kalman_filter = None
self.mean, self.covariance = None, None
self.is_activated = False
self.score = score
self.tracklet_len = 0
self.exist_len = 1
self.smooth_feat = None
self.smooth_feat_ad = None
self.update_features(temp_feat)
self.features = deque([], maxlen=buffer_size)
self.alpha = 0.9
self.curr_tlbr = self.tlwh_to_tlbr(self._tlwh)
self.det_dict = {}
def get_v(self):
return self.mean[4:6] if self.mean is not None else None
def update_features_ad(self, feat):
feat /= np.linalg.norm(feat)
if self.smooth_feat_ad is None:
self.smooth_feat_ad = feat
else:
self.smooth_feat_ad = self.alpha * self.smooth_feat_ad + (1 - self.alpha) * feat
self.smooth_feat_ad /= np.linalg.norm(self.smooth_feat_ad)
def update_features(self, feat):
feat /= np.linalg.norm(feat)
self.curr_feat = feat
if self.smooth_feat is None:
self.smooth_feat = feat
else:
self.smooth_feat = self.alpha * self.smooth_feat + (1 - self.alpha) * feat
self.features.append(feat)
self.smooth_feat /= np.linalg.norm(self.smooth_feat)
def predict(self):
mean_state = self.mean.copy()
if self.state != TrackState.Tracked:
mean_state[7] = 0
self.mean, self.covariance = self.kalman_filter.predict(mean_state, self.covariance)
@staticmethod
def multi_predict(stracks):
if len(stracks) > 0:
multi_mean = np.asarray([st.mean.copy() for st in stracks])
multi_covariance = np.asarray([st.covariance for st in stracks])
for i, st in enumerate(stracks):
if st.state != TrackState.Tracked:
multi_mean[i][7] = 0
multi_mean, multi_covariance = STrack.shared_kalman.multi_predict(multi_mean, multi_covariance)
for i, (mean, cov) in enumerate(zip(multi_mean, multi_covariance)):
stracks[i].mean = mean
stracks[i].covariance = cov
@staticmethod
def multi_predict_(stracks):
if len(stracks) > 0:
multi_mean = np.asarray([st.mean.copy() for st in stracks])
multi_covariance = np.asarray([st.covariance for st in stracks])
for i, st in enumerate(stracks):
if st.state != TrackState.Tracked:
multi_mean[i][7] = 0
multi_mean, multi_covariance = STrack.shared_kalman_.multi_predict(multi_mean, multi_covariance)
for i, (mean, cov) in enumerate(zip(multi_mean, multi_covariance)):
stracks[i].mean = mean
stracks[i].covariance = cov
def activate(self, kalman_filter, frame_id, track_id=None):
"""Start a new tracklet"""
self.kalman_filter = kalman_filter
if track_id:
self.track_id = track_id['track_id']
track_id['track_id'] += 1
else:
self.track_id = self.next_id()
self.mean, self.covariance = self.kalman_filter.initiate(self.tlwh_to_xyah(self._tlwh))
self.tracklet_len = 0
self.state = TrackState.Tracked
self.is_activated = True
self.frame_id = frame_id
self.start_frame = frame_id
def activate_(self, kalman_filter, frame_id, track_id=None):
"""Start a new tracklet"""
self.kalman_filter = kalman_filter
if track_id:
self.track_id = track_id['track_id']
track_id['track_id'] += 1
else:
self.track_id = self.next_id_()
self.mean, self.covariance = self.kalman_filter.initiate(self.tlwh_to_xyah(self._tlwh))
self.tracklet_len = 0
self.state = TrackState.Tracked
self.is_activated = True
self.frame_id = frame_id
self.start_frame = frame_id
def re_activate(self, new_track, frame_id, new_id=False):
self.curr_tlbr = self.tlwh_to_tlbr(new_track.tlwh)
self.mean, self.covariance = self.kalman_filter.update(
self.mean, self.covariance, self.tlwh_to_xyah(new_track.tlwh)
)
self.update_features(new_track.curr_feat)
self.tracklet_len = 0
self.exist_len += 1
self.state = TrackState.Tracked
self.is_activated = True
self.frame_id = frame_id
if new_id:
self.track_id = self.next_id()
def re_activate_(self, new_track, frame_id, new_id=False):
self.curr_tlbr = self.tlwh_to_tlbr(new_track.tlwh)
self.mean, self.covariance = self.kalman_filter.update(
self.mean, self.covariance, self.tlwh_to_xyah(new_track.tlwh)
)
self.update_features(new_track.curr_feat)
self.tracklet_len = 0
self.exist_len += 1
self.state = TrackState.Tracked
self.is_activated = True
self.frame_id = frame_id
if new_id:
self.track_id = self.next_id_()
def update(self, new_track, frame_id, update_feature=True):
"""
Update a matched track
:type new_track: STrack
:type frame_id: int
:type update_feature: bool
:return:
"""
self.frame_id = frame_id
self.tracklet_len += 1
self.exist_len += 1
self.curr_tlbr = self.tlwh_to_tlbr(new_track.tlwh)
new_tlwh = new_track.tlwh
self.mean, self.covariance = self.kalman_filter.update(
self.mean, self.covariance, self.tlwh_to_xyah(new_tlwh))
self.state = TrackState.Tracked
self.is_activated = True
self.score = new_track.score
if update_feature:
self.update_features(new_track.curr_feat)
@property
# @jit(nopython=True)
def tlwh(self):
"""Get current position in bounding box format `(top left x, top left y,
width, height)`.
"""
if self.mean is None:
return self._tlwh.copy()
ret = self.mean[:4].copy()
ret[2] *= ret[3]
ret[:2] -= ret[2:] / 2
return ret
@property
# @jit(nopython=True)
def tlbr(self):
"""Convert bounding box to format `(min x, min y, max x, max y)`, i.e.,
`(top left, bottom right)`.
"""
ret = self.tlwh.copy()
ret[2:] += ret[:2]
return ret
@staticmethod
# @jit(nopython=True)
def tlwh_to_xyah(tlwh):
"""Convert bounding box to format `(center x, center y, aspect ratio,
height)`, where the aspect ratio is `width / height`.
"""
ret = np.asarray(tlwh).copy()
ret[:2] += ret[2:] / 2
ret[2] /= ret[3]
return ret
def to_xyah(self):
return self.tlwh_to_xyah(self.tlwh)
@staticmethod
# @jit(nopython=True)
def tlbr_to_tlwh(tlbr):
ret = np.asarray(tlbr).copy()
ret[2:] -= ret[:2]
return ret
@staticmethod
# @jit(nopython=True)
def tlwh_to_tlbr(tlwh):
ret = np.asarray(tlwh).copy()
ret[2:] += ret[:2]
return ret
def __repr__(self):
return 'OT_{}_({}-{})'.format(self.track_id, self.start_frame, self.end_frame)
class JDETracker(object):
def __init__(
self,
opt,
frame_rate=30,
tracked_stracks=[],
lost_stracks=[],
removed_stracks=[],
frame_id=0,
ad_last_info={},
model=None
):
self.opt = opt
print('Creating model...')
if model:
self.model = model
else:
self.model = create_model(opt.arch, opt.heads, opt.head_conv)
self.model = load_model(self.model, opt.load_model).cuda()
self.model.eval()
self.log_index = []
self.unconfirmed_ad_iou = None
self.tracked_stracks_ad_iou = None
self.strack_pool_ad_iou = None
self.tracked_stracks = copy.deepcopy(tracked_stracks) # type: list[STrack]
self.lost_stracks = copy.deepcopy(lost_stracks) # type: list[STrack]
self.removed_stracks = copy.deepcopy(removed_stracks) # type: list[STrack]
self.tracked_stracks_ad = copy.deepcopy(tracked_stracks) # type: list[STrack]
self.lost_stracks_ad = copy.deepcopy(lost_stracks) # type: list[STrack]
self.removed_stracks_ad = copy.deepcopy(removed_stracks) # type: list[STrack]
self.tracked_stracks_ = copy.deepcopy(tracked_stracks) # type: list[STrack]
self.lost_stracks_ = copy.deepcopy(lost_stracks) # type: list[STrack]
self.removed_stracks_ = copy.deepcopy(removed_stracks) # type: list[STrack]
self.frame_id = frame_id
self.frame_id_ = frame_id
self.frame_id_ad = frame_id
self.det_thresh = opt.conf_thres
self.buffer_size = int(frame_rate / 30.0 * opt.track_buffer)
self.max_time_lost = self.buffer_size
self.max_per_image = 128
self.kalman_filter = KalmanFilter()
self.kalman_filter_ad = KalmanFilter()
self.kalman_filter_ = KalmanFilter()
self.attacked_ids = set([])
self.low_iou_ids = set([])
self.ATTACK_IOU_THR = opt.iou_thr
self.attack_iou_thr = self.ATTACK_IOU_THR
self.ad_last_info = copy.deepcopy(ad_last_info)
self.FRAME_THR = 10
self.temp_i = 0
self.multiple_ori_ids = {}
self.multiple_att_ids = {}
self.multiple_ori2att = {}
self.multiple_att_freq = {}
# hijacking attack
self.ad_bbox = True
self.ad_ids = set([])
def post_process(self, dets, meta):
dets = dets.detach().cpu().numpy()
dets = dets.reshape(1, -1, dets.shape[2])
dets = ctdet_post_process(
dets.copy(), [meta['c']], [meta['s']],
meta['out_height'], meta['out_width'], self.opt.num_classes)
for j in range(1, self.opt.num_classes + 1):
dets[0][j] = np.array(dets[0][j], dtype=np.float32).reshape(-1, 5)
return dets[0]
def merge_outputs(self, detections):
results = {}
for j in range(1, self.opt.num_classes + 1):
results[j] = np.concatenate(
[detection[j] for detection in detections], axis=0).astype(np.float32)
scores = np.hstack(
[results[j][:, 4] for j in range(1, self.opt.num_classes + 1)])
if len(scores) > self.max_per_image:
kth = len(scores) - self.max_per_image
thresh = np.partition(scores, kth)[kth]
for j in range(1, self.opt.num_classes + 1):
keep_inds = (results[j][:, 4] >= thresh)
results[j] = results[j][keep_inds]
return results
@staticmethod
def recoverImg(im_blob, img0):
height = 608
width = 1088
im_blob = im_blob.cpu() * 255.0
shape = img0.shape[:2] # shape = [height, width]
ratio = min(float(height) / shape[0], float(width) / shape[1])
new_shape = (round(shape[1] * ratio), round(shape[0] * ratio)) # new_shape = [width, height]
dw = (width - new_shape[0]) / 2 # width padding
dh = (height - new_shape[1]) / 2 # height padding
top, bottom = round(dh - 0.1), round(dh + 0.1)
left, right = round(dw - 0.1), round(dw + 0.1)
im_blob = im_blob.squeeze().permute(1, 2, 0)[top:height - bottom, left:width - right, :].numpy().astype(
np.uint8)
im_blob = cv2.cvtColor(im_blob, cv2.COLOR_RGB2BGR)
h, w, _ = img0.shape
im_blob = cv2.resize(im_blob, (w, h))
return im_blob
def recoverNoise(self, noise, img0):
height = 608
width = 1088
shape = img0.shape[:2] # shape = [height, width]
ratio = min(float(height) / shape[0], float(width) / shape[1])
new_shape = (round(shape[1] * ratio), round(shape[0] * ratio)) # new_shape = [width, height]
dw = (width - new_shape[0]) / 2 # width padding
dh = (height - new_shape[1]) / 2 # height padding
top, bottom = round(dh - 0.1), round(dh + 0.1)
left, right = round(dw - 0.1), round(dw + 0.1)
noise = noise[:, :, top:height - bottom, left:width - right]
h, w, _ = img0.shape
# noise = self.resizeTensor(noise, h, w).cpu().squeeze().permute(1, 2, 0).numpy()
noise = noise.cpu().squeeze().permute(1, 2, 0).numpy()
noise = (noise[:, :, ::-1] * 255).astype(np.int)
return noise
@staticmethod
def resizeTensor(tensor, height, width):
h = torch.linspace(-1, 1, height).view(-1, 1).repeat(1, width).to(tensor.device)
w = torch.linspace(-1, 1, width).repeat(height, 1).to(tensor.device)
grid = torch.cat((h.unsqueeze(2), w.unsqueeze(2)), dim=2)
grid = grid.unsqueeze(0)
output = F.grid_sample(tensor, grid=grid, mode='bilinear', align_corners=True)
return output
@staticmethod
def processIoUs(ious):
h, w = ious.shape
assert h == w
ious = np.tril(ious, -1)
index = np.argsort(-ious.reshape(-1))
indSet = set([])
for ind in index:
i = ind // h
j = ind % w
if ious[i, j] == 0:
break
if i in indSet or j in indSet:
ious[i, j] = 0
else:
indSet.add(i)
indSet.add(j)
return ious
def attack_sg_hj(
self,
im_blob,
img0,
dets,
inds,
remain_inds,
last_info,
outputs_ori,
attack_id,
attack_ind,
ad_bbox,
track_v
):
noise = torch.zeros_like(im_blob)
im_blob_ori = im_blob.clone().data
outputs = outputs_ori
H, W = outputs_ori['hm'].size()[2:]
hm_index = inds[0][remain_inds]
hm_index_att = hm_index[attack_ind].item()
index = list(range(hm_index.size(0)))
index.pop(attack_ind)
wh_ori = outputs['wh'].clone().data
reg_ori = outputs['reg'].clone().data
i = 0
while True:
i += 1
loss = 0
hm_index_att_lst = [hm_index_att]
loss -= ((outputs['hm'].view(-1)[hm_index_att_lst].sigmoid()) ** 2).mean()
if ad_bbox:
assert track_v is not None
hm_index_gen = hm_index_att_lst[0]
hm_index_gen += -(np.sign(track_v[0]) + W * np.sign(track_v[1]))
loss -= ((1 - outputs['hm'].view(-1)[[hm_index_gen]].sigmoid()) ** 2).mean()
loss -= smoothL1(outputs['wh'].view(2, -1)[:, [hm_index_gen]].T,
wh_ori.view(2, -1)[:, hm_index_att_lst].T)
loss -= smoothL1(outputs['reg'].view(2, -1)[:, [hm_index_gen]].T,
reg_ori.view(2, -1)[:, hm_index_att_lst].T)
loss.backward()
grad = im_blob.grad
grad /= (grad ** 2).sum().sqrt() + 1e-8
noise += grad * 2
im_blob = torch.clip(im_blob_ori + noise, min=0, max=1).data
outputs, suc, _ = self.forwardFeatureDet(
im_blob,
img0,
dets,
[attack_ind],
thr=1 if ad_bbox else 0,
vs=[track_v] if ad_bbox else []
)
if suc:
break
if i > 60:
break
return noise, i, suc
def attack_sg_det(
self,
im_blob,
img0,
dets,
inds,
remain_inds,
last_info,
outputs_ori,
attack_id,
attack_ind
):
noise = torch.zeros_like(im_blob)
im_blob_ori = im_blob.clone().data
outputs = outputs_ori
H, W = outputs_ori['hm'].size()[2:]
hm_index = inds[0][remain_inds]
hm_index_att = hm_index[attack_ind].item()
index = list(range(hm_index.size(0)))
index.pop(attack_ind)
i = 0
while True:
i += 1
loss = 0
hm_index_att_lst = [hm_index_att]
# for n_i in range(3):
# for n_j in range(3):
# hm_index_att_ = hm_index_att + (n_i - 1) * W + (n_j - 1)
# hm_index_att_ = max(0, min(H * W - 1, hm_index_att_))
# hm_index_att_lst.append(hm_index_att_)
loss -= ((outputs['hm'].view(-1)[hm_index_att_lst].sigmoid()) ** 2).mean()
# loss += ((outputs['hm'].view(-1)[hm_index_att_lst].sigmoid()) ** 2 *
# torch.log(1 - outputs['hm'].view(-1)[hm_index_att_lst].sigmoid())).mean()
loss.backward()
grad = im_blob.grad
grad /= (grad ** 2).sum().sqrt() + 1e-8
noise += grad * 2
im_blob = torch.clip(im_blob_ori + noise, min=0, max=1).data
outputs, suc, _ = self.forwardFeatureDet(
im_blob,
img0,
dets,
[attack_ind]
)
if suc:
break
if i > 60:
break
return noise, i, suc
def attack_mt_hj(
self,
im_blob,
img0,
dets,
inds,
remain_inds,
last_info,
outputs_ori,
attack_ids,
attack_inds,
ad_ids,
track_vs
):
img0_h, img0_w = img0.shape[:2]
H, W = outputs_ori['hm'].size()[2:]
r_w, r_h = img0_w / W, img0_h / H
r_max = max(r_w, r_h)
noise = torch.zeros_like(im_blob)
im_blob_ori = im_blob.clone().data
outputs = outputs_ori
wh_ori = outputs['wh'].clone().data
reg_ori = outputs['reg'].clone().data
i = 0
hm_index = inds[0][remain_inds]
hm_index_att_lst = hm_index[attack_inds].cpu().numpy().tolist()
best_i = None
best_noise = None
best_fail = np.inf
while True:
i += 1
loss = 0
loss -= ((outputs['hm'].view(-1)[hm_index_att_lst].sigmoid()) ** 2).mean()
hm_index_att_lst_ = [hm_index_att_lst[j] for j in range(len(hm_index_att_lst))
if attack_ids[j] not in ad_ids]
if len(hm_index_att_lst_):
assert len(track_vs) == len(hm_index_att_lst_)
hm_index_gen_lst = []
for index in range(len(hm_index_att_lst_)):
track_v = track_vs[index]
hm_index_gen = hm_index_att_lst_[index]
hm_index_gen += -(np.sign(track_v[0]) + W * np.sign(track_v[1]))
hm_index_gen_lst.append(hm_index_gen)
loss -= ((1 - outputs['hm'].view(-1)[hm_index_gen_lst].sigmoid()) ** 2).mean()
loss -= smoothL1(outputs['wh'].view(2, -1)[:, hm_index_gen_lst].T,
wh_ori.view(2, -1)[:, hm_index_att_lst_].T)
loss -= smoothL1(outputs['reg'].view(2, -1)[:, hm_index_gen_lst].T,
reg_ori.view(2, -1)[:, hm_index_att_lst_].T)
loss.backward()
grad = im_blob.grad
grad /= (grad ** 2).sum().sqrt() + 1e-8
noise += grad
thrs = [0 for j in range(len(attack_inds))]
for j in range(len(thrs)):
if attack_ids[j] not in ad_ids:
thrs[j] = 0.9
im_blob = torch.clip(im_blob_ori + noise, min=0, max=1).data
outputs, suc, fail_ids = self.forwardFeatureDet(
im_blob,
img0,
dets,
attack_inds.tolist(),
thr=thrs
)
if fail_ids is not None:
if fail_ids == 0:
break
elif fail_ids <= best_fail:
best_fail = fail_ids
best_i = i
best_noise = noise.clone()
if i > 60:
if self.opt.no_f_noise:
return None, i, False
else:
if best_i is not None:
noise = best_noise
i = best_i
return noise, i, False
return noise, i, True
def attack_mt_det(
self,
im_blob,
img0,
dets,
inds,
remain_inds,
last_info,
outputs_ori,
attack_ids,
attack_inds
):
img0_h, img0_w = img0.shape[:2]
H, W = outputs_ori['hm'].size()[2:]
r_w, r_h = img0_w / W, img0_h / H
r_max = max(r_w, r_h)
noise = torch.zeros_like(im_blob)
im_blob_ori = im_blob.clone().data
outputs = outputs_ori
wh_ori = outputs['wh'].clone().data
reg_ori = outputs['reg'].clone().data
i = 0
hm_index = inds[0][remain_inds]
hm_index_att_lst = hm_index[attack_inds].cpu().numpy().tolist()
best_i = None
best_noise = None
best_fail = np.inf
while True:
i += 1
loss = 0
loss -= ((outputs['hm'].view(-1)[hm_index_att_lst].sigmoid()) ** 2).mean()
loss.backward()
grad = im_blob.grad
grad /= (grad ** 2).sum().sqrt() + 1e-8
noise += grad
im_blob = torch.clip(im_blob_ori + noise, min=0, max=1).data
outputs, suc, fail_ids = self.forwardFeatureDet(
im_blob,
img0,
dets,
attack_inds.tolist()
)
if fail_ids is not None:
if fail_ids == 0:
break
elif fail_ids <= best_fail:
best_fail = fail_ids
best_i = i
best_noise = noise.clone()
if i > 60:
if self.opt.no_f_noise:
return None, i, False
else:
if best_i is not None:
noise = best_noise
i = best_i
return noise, i, False
return noise, i, True
def attack_sg_feat(
self,
im_blob,
img0,
id_features,
dets,
inds,
remain_inds,
last_info,
outputs_ori,
attack_id,
attack_ind,
target_id,
target_ind
):
noise = torch.zeros_like(im_blob)
im_blob_ori = im_blob.clone().data
last_ad_id_features = [None for _ in range(len(id_features[0]))]
for i in range(len(id_features)):
id_features[i] = id_features[i][[attack_ind, target_ind]]
i = 0
suc = True
while True:
i += 1
loss = 0
loss_feat = 0
for id_i, id_feature in enumerate(id_features):
if last_ad_id_features[attack_ind] is not None:
last_ad_id_feature = torch.from_numpy(last_ad_id_features[attack_ind]).unsqueeze(0).cuda()
sim_1 = torch.mm(id_feature[0:0 + 1], last_ad_id_feature.T).squeeze()
sim_2 = torch.mm(id_feature[1:1 + 1], last_ad_id_feature.T).squeeze()
loss_feat += sim_2 - sim_1
if last_ad_id_features[target_ind] is not None:
last_ad_id_feature = torch.from_numpy(last_ad_id_features[target_ind]).unsqueeze(0).cuda()
sim_1 = torch.mm(id_feature[1:1 + 1], last_ad_id_feature.T).squeeze()
sim_2 = torch.mm(id_feature[0:0 + 1], last_ad_id_feature.T).squeeze()
loss_feat += sim_2 - sim_1
if last_ad_id_features[attack_ind] is None and last_ad_id_features[target_ind] is None:
loss_feat += torch.mm(id_feature[0:0 + 1], id_feature[1:1 + 1].T).squeeze()
loss += loss_feat / len(id_features)
loss.backward()
grad = im_blob.grad
grad /= (grad ** 2).sum().sqrt() + 1e-8
noise += grad
im_blob = torch.clip(im_blob_ori + noise, min=0, max=1).data
id_features_, outputs_, ae_attack_id, ae_target_id, hm_index_ = self.forwardFeatureSg(
im_blob,
img0,
dets,
inds,
remain_inds,
attack_id,
attack_ind,
target_id,
target_ind,
last_info
)
if id_features_ is not None:
id_features = id_features_
if ae_attack_id != attack_id and ae_attack_id is not None:
break
if i > 60:
suc = False
break
return noise, i, suc
def attack_sg_cl(
self,
im_blob,
img0,
id_features,
dets,
inds,
remain_inds,
last_info,
outputs_ori,
attack_id,
attack_ind,
target_id,
target_ind
):
img0_h, img0_w = img0.shape[:2]
H, W = outputs_ori['hm'].size()[2:]
r_w, r_h = img0_w / W, img0_h / H
r_max = max(r_w, r_h)
noise = torch.zeros_like(im_blob)
im_blob_ori = im_blob.clone().data
outputs = outputs_ori
wh_ori = outputs['wh'].clone().data
reg_ori = outputs['reg'].clone().data
last_ad_id_features = [None for _ in range(len(id_features[0]))]
strack_pool = copy.deepcopy(last_info['last_strack_pool'])
last_attack_det = None
last_target_det = None
STrack.multi_predict(strack_pool)
for strack in strack_pool:
if strack.track_id == attack_id:
last_ad_id_features[attack_ind] = strack.smooth_feat
last_attack_det = torch.from_numpy(strack.tlbr).cuda().float()
last_attack_det[[0, 2]] = (last_attack_det[[0, 2]] - 0.5 * W * (r_w - r_max)) / r_max
last_attack_det[[1, 3]] = (last_attack_det[[1, 3]] - 0.5 * H * (r_h - r_max)) / r_max
elif strack.track_id == target_id:
last_ad_id_features[target_ind] = strack.smooth_feat
last_target_det = torch.from_numpy(strack.tlbr).cuda().float()
last_target_det[[0, 2]] = (last_target_det[[0, 2]] - 0.5 * W * (r_w - r_max)) / r_max
last_target_det[[1, 3]] = (last_target_det[[1, 3]] - 0.5 * H * (r_h - r_max)) / r_max
last_attack_det_center = torch.round(
(last_attack_det[:2] + last_attack_det[2:]) / 2) if last_attack_det is not None else None
last_target_det_center = torch.round(
(last_target_det[:2] + last_target_det[2:]) / 2) if last_target_det is not None else None
hm_index = inds[0][remain_inds]
for i in range(len(id_features)):
id_features[i] = id_features[i][[attack_ind, target_ind]]
i = 0
j = -1
suc = True
ori_hm_index = hm_index[[attack_ind, target_ind]].clone()
ori_hm_index_re = hm_index[[target_ind, attack_ind]].clone()
att_hm_index = None
noise_0 = None
i_0 = None
noise_1 = None
i_1 = None
while True:
i += 1
loss = 0
loss_feat = 0
# for id_i, id_feature in enumerate(id_features):
# if last_ad_id_features[attack_ind] is not None:
# last_ad_id_feature = torch.from_numpy(last_ad_id_features[attack_ind]).unsqueeze(0).cuda()
# sim_1 = torch.mm(id_feature[0:0 + 1], last_ad_id_feature.T).squeeze()
# sim_2 = torch.mm(id_feature[1:1 + 1], last_ad_id_feature.T).squeeze()
# loss_feat += sim_2 - sim_1
# if last_ad_id_features[target_ind] is not None:
# last_ad_id_feature = torch.from_numpy(last_ad_id_features[target_ind]).unsqueeze(0).cuda()
# sim_1 = torch.mm(id_feature[1:1 + 1], last_ad_id_feature.T).squeeze()
# sim_2 = torch.mm(id_feature[0:0 + 1], last_ad_id_feature.T).squeeze()
# loss_feat += sim_2 - sim_1
# if last_ad_id_features[attack_ind] is None and last_ad_id_features[target_ind] is None:
# loss_feat += torch.mm(id_feature[0:0 + 1], id_feature[1:1 + 1].T).squeeze()
# loss += loss_feat / len(id_features)
if i in [1, 10, 20, 30, 35, 40, 45, 50, 55]:
attack_det_center = torch.stack([hm_index[attack_ind] % W, hm_index[attack_ind] // W]).float()
target_det_center = torch.stack([hm_index[target_ind] % W, hm_index[target_ind] // W]).float()
if last_target_det_center is not None:
attack_center_delta = attack_det_center - last_target_det_center
if torch.max(torch.abs(attack_center_delta)) > 1:
attack_center_delta /= torch.max(torch.abs(attack_center_delta))
attack_det_center = torch.round(attack_det_center - attack_center_delta).int()
hm_index[attack_ind] = attack_det_center[0] + attack_det_center[1] * W
if last_attack_det_center is not None:
target_center_delta = target_det_center - last_attack_det_center
if torch.max(torch.abs(target_center_delta)) > 1:
target_center_delta /= torch.max(torch.abs(target_center_delta))
target_det_center = torch.round(target_det_center - target_center_delta).int()
hm_index[target_ind] = target_det_center[0] + target_det_center[1] * W
att_hm_index = hm_index[[attack_ind, target_ind]].clone()
if att_hm_index is not None:
n_att_hm_index = []
n_ori_hm_index_re = []
for hm_ind in range(len(att_hm_index)):
for n_i in range(3):
for n_j in range(3):
att_hm_ind = att_hm_index[hm_ind].item()
att_hm_ind = att_hm_ind + (n_i - 1) * W + (n_j - 1)
att_hm_ind = max(0, min(H*W-1, att_hm_ind))
n_att_hm_index.append(att_hm_ind)
ori_hm_ind = ori_hm_index_re[hm_ind].item()
ori_hm_ind = ori_hm_ind + (n_i - 1) * W + (n_j - 1)
ori_hm_ind = max(0, min(H * W - 1, ori_hm_ind))
n_ori_hm_index_re.append(ori_hm_ind)
# print(n_att_hm_index, n_ori_hm_index_re)
loss += ((1 - outputs['hm'].view(-1).sigmoid()[n_att_hm_index]) ** 2 *
torch.log(outputs['hm'].view(-1).sigmoid()[n_att_hm_index])).mean()
loss += ((outputs['hm'].view(-1).sigmoid()[n_ori_hm_index_re]) ** 2 *
torch.log(1 - outputs['hm'].view(-1).sigmoid()[n_ori_hm_index_re])).mean()
loss -= smoothL1(outputs['wh'].view(2, -1)[:, n_att_hm_index].T, wh_ori.view(2, -1)[:, n_ori_hm_index_re].T)
loss -= smoothL1(outputs['reg'].view(2, -1)[:, n_att_hm_index].T, reg_ori.view(2, -1)[:, n_ori_hm_index_re].T)
loss.backward()
grad = im_blob.grad
grad /= (grad ** 2).sum().sqrt() + 1e-8
noise += grad
im_blob = torch.clip(im_blob_ori + noise, min=0, max=1).data
id_features_, outputs_, ae_attack_id, ae_target_id, hm_index_ = self.forwardFeatureSg(
im_blob,
img0,
dets,
inds,
remain_inds,
attack_id,
attack_ind,
target_id,
target_ind,
last_info
)
if id_features_ is not None:
id_features = id_features_
if outputs_ is not None:
outputs = outputs_
# if hm_index_ is not None:
# hm_index = hm_index_
if ae_attack_id != attack_id and ae_attack_id is not None:
break
if i > 60:
if noise_0 is not None:
return noise_0, i_0, suc
elif noise_1 is not None:
return noise_1, i_1, suc
if self.opt.no_f_noise:
return None, i, False
else:
suc = False
break
return noise, i, suc
def attack_sg_random(
self,
im_blob,
img0,
id_features,
dets,
inds,
remain_inds,
last_info,
outputs_ori,
attack_id,
attack_ind,
target_id,
target_ind
):
im_blob_ori = im_blob.clone().data
suc = False
noise = torch.rand(im_blob_ori.size()).to(im_blob_ori.device)
noise /= (noise**2).sum().sqrt()
noise *= random.uniform(2, 8)
im_blob = torch.clip(im_blob_ori + noise, min=0, max=1).data
id_features_, outputs_, ae_attack_id, ae_target_id, hm_index_ = self.forwardFeatureSg(
im_blob,
img0,
dets,
inds,
remain_inds,
attack_id,
attack_ind,
target_id,
target_ind,
last_info,
grad=False
)
if ae_attack_id != attack_id and ae_attack_id is not None:
suc = True
return noise, 1, suc
def attack_mt_random(
self,
im_blob,
img0,
id_features,
dets,
inds,
remain_inds,
last_info,
outputs_ori,
attack_ids,
attack_inds,
target_ids,
target_inds
):
im_blob_ori = im_blob.clone().data
suc = False
noise = torch.rand(im_blob_ori.size()).to(im_blob_ori.device)
noise /= (noise ** 2).sum().sqrt()
noise *= random.uniform(2, 8)
im_blob = torch.clip(im_blob_ori + noise, min=0, max=1).data
id_features, outputs, fail_ids = self.forwardFeatureMt(
im_blob,
img0,
dets,
inds,
remain_inds,
attack_ids,
attack_inds,
target_ids,
target_inds,
last_info,
grad=False
)
if fail_ids == 0:
suc = True
return noise, 1, suc
def attack_sg(
self,
im_blob,
img0,
id_features,
dets,
inds,
remain_inds,
last_info,
outputs_ori,
attack_id,
attack_ind,
target_id,
target_ind
):
img0_h, img0_w = img0.shape[:2]
H, W = outputs_ori['hm'].size()[2:]
r_w, r_h = img0_w / W, img0_h / H
r_max = max(r_w, r_h)
noise = torch.zeros_like(im_blob)
im_blob_ori = im_blob.clone().data
outputs = outputs_ori
wh_ori = outputs['wh'].clone().data
reg_ori = outputs['reg'].clone().data
last_ad_id_features = [None for _ in range(len(id_features[0]))]
strack_pool = copy.deepcopy(last_info['last_strack_pool'])
last_attack_det = None
last_target_det = None
STrack.multi_predict(strack_pool)
for strack in strack_pool:
if strack.track_id == attack_id:
last_ad_id_features[attack_ind] = strack.smooth_feat
last_attack_det = torch.from_numpy(strack.tlbr).cuda().float()
last_attack_det[[0, 2]] = (last_attack_det[[0, 2]] - 0.5 * W * (r_w - r_max)) / r_max
last_attack_det[[1, 3]] = (last_attack_det[[1, 3]] - 0.5 * H * (r_h - r_max)) / r_max
elif strack.track_id == target_id:
last_ad_id_features[target_ind] = strack.smooth_feat
last_target_det = torch.from_numpy(strack.tlbr).cuda().float()
last_target_det[[0, 2]] = (last_target_det[[0, 2]] - 0.5 * W * (r_w - r_max)) / r_max
last_target_det[[1, 3]] = (last_target_det[[1, 3]] - 0.5 * H * (r_h - r_max)) / r_max
last_attack_det_center = torch.round(
(last_attack_det[:2] + last_attack_det[2:]) / 2) if last_attack_det is not None else None
last_target_det_center = torch.round(
(last_target_det[:2] + last_target_det[2:]) / 2) if last_target_det is not None else None
hm_index = inds[0][remain_inds]
for i in range(len(id_features)):
id_features[i] = id_features[i][[attack_ind, target_ind]]
i = 0
j = -1
suc = True
ori_hm_index = hm_index[[attack_ind, target_ind]].clone()
ori_hm_index_re = hm_index[[target_ind, attack_ind]].clone()
att_hm_index = None
noise_0 = None
i_0 = None
noise_1 = None
i_1 = None
while True:
i += 1
loss = 0
loss_feat = 0
for id_i, id_feature in enumerate(id_features):
if last_ad_id_features[attack_ind] is not None:
last_ad_id_feature = torch.from_numpy(last_ad_id_features[attack_ind]).unsqueeze(0).cuda()
sim_1 = torch.mm(id_feature[0:0 + 1], last_ad_id_feature.T).squeeze()
sim_2 = torch.mm(id_feature[1:1 + 1], last_ad_id_feature.T).squeeze()
loss_feat += sim_2 - sim_1
if last_ad_id_features[target_ind] is not None:
last_ad_id_feature = torch.from_numpy(last_ad_id_features[target_ind]).unsqueeze(0).cuda()
sim_1 = torch.mm(id_feature[1:1 + 1], last_ad_id_feature.T).squeeze()
sim_2 = torch.mm(id_feature[0:0 + 1], last_ad_id_feature.T).squeeze()
loss_feat += sim_2 - sim_1
if last_ad_id_features[attack_ind] is None and last_ad_id_features[target_ind] is None:
loss_feat += torch.mm(id_feature[0:0 + 1], id_feature[1:1 + 1].T).squeeze()
loss += loss_feat / len(id_features)
if i in [10, 20, 30, 35, 40, 45, 50, 55]:
attack_det_center = torch.stack([hm_index[attack_ind] % W, hm_index[attack_ind] // W]).float()
target_det_center = torch.stack([hm_index[target_ind] % W, hm_index[target_ind] // W]).float()
if last_target_det_center is not None:
attack_center_delta = attack_det_center - last_target_det_center
if torch.max(torch.abs(attack_center_delta)) > 1:
attack_center_delta /= torch.max(torch.abs(attack_center_delta))
attack_det_center = torch.round(attack_det_center - attack_center_delta).int()
hm_index[attack_ind] = attack_det_center[0] + attack_det_center[1] * W
if last_attack_det_center is not None:
target_center_delta = target_det_center - last_attack_det_center
if torch.max(torch.abs(target_center_delta)) > 1:
target_center_delta /= torch.max(torch.abs(target_center_delta))
target_det_center = torch.round(target_det_center - target_center_delta).int()
hm_index[target_ind] = target_det_center[0] + target_det_center[1] * W
att_hm_index = hm_index[[attack_ind, target_ind]].clone()
if att_hm_index is not None:
n_att_hm_index = []
n_ori_hm_index_re = []
for hm_ind in range(len(att_hm_index)):
for n_i in range(3):
for n_j in range(3):
att_hm_ind = att_hm_index[hm_ind].item()
att_hm_ind = att_hm_ind + (n_i - 1) * W + (n_j - 1)
att_hm_ind = max(0, min(H*W-1, att_hm_ind))
n_att_hm_index.append(att_hm_ind)
ori_hm_ind = ori_hm_index_re[hm_ind].item()
ori_hm_ind = ori_hm_ind + (n_i - 1) * W + (n_j - 1)
ori_hm_ind = max(0, min(H * W - 1, ori_hm_ind))
n_ori_hm_index_re.append(ori_hm_ind)
# print(n_att_hm_index, n_ori_hm_index_re)
loss += ((1 - outputs['hm'].view(-1).sigmoid()[n_att_hm_index]) ** 2 *
torch.log(outputs['hm'].view(-1).sigmoid()[n_att_hm_index])).mean()
loss += ((outputs['hm'].view(-1).sigmoid()[n_ori_hm_index_re]) ** 2 *
torch.log(1 - outputs['hm'].view(-1).sigmoid()[n_ori_hm_index_re])).mean()
loss -= smoothL1(outputs['wh'].view(2, -1)[:, n_att_hm_index].T, wh_ori.view(2, -1)[:, n_ori_hm_index_re].T)
loss -= smoothL1(outputs['reg'].view(2, -1)[:, n_att_hm_index].T, reg_ori.view(2, -1)[:, n_ori_hm_index_re].T)
loss.backward()
grad = im_blob.grad
grad /= (grad ** 2).sum().sqrt() + 1e-8
noise += grad
im_blob = torch.clip(im_blob_ori + noise, min=0, max=1).data
id_features_, outputs_, ae_attack_id, ae_target_id, hm_index_ = self.forwardFeatureSg(
im_blob,
img0,
dets,
inds,
remain_inds,
attack_id,
attack_ind,
target_id,
target_ind,
last_info
)
if id_features_ is not None:
id_features = id_features_
if outputs_ is not None:
outputs = outputs_
# if hm_index_ is not None:
# hm_index = hm_index_
if ae_attack_id != attack_id and ae_attack_id is not None:
break
if i > 60:
if noise_0 is not None:
return noise_0, i_0, suc
elif noise_1 is not None:
return noise_1, i_1, suc
if self.opt.no_f_noise:
return None, i, False
else:
suc = False
break
return noise, i, suc
def attack_mt(
self,
im_blob,
img0,
id_features,
dets,
inds,
remain_inds,
last_info,
outputs_ori,
attack_ids,
attack_inds,
target_ids,
target_inds
):
img0_h, img0_w = img0.shape[:2]
H, W = outputs_ori['hm'].size()[2:]
r_w, r_h = img0_w / W, img0_h / H
r_max = max(r_w, r_h)
noise = torch.zeros_like(im_blob)
im_blob_ori = im_blob.clone().data
outputs = outputs_ori
wh_ori = outputs['wh'].clone().data
reg_ori = outputs['reg'].clone().data
i = 0
j = -1
last_ad_id_features = [None for _ in range(len(id_features[0]))]
strack_pool = copy.deepcopy(last_info['last_strack_pool'])
ad_attack_ids = [self.multiple_ori2att[attack_id] for attack_id in attack_ids]
ad_target_ids = [self.multiple_ori2att[target_id] for target_id in target_ids]
last_attack_dets = [None] * len(ad_attack_ids)
last_target_dets = [None] * len(ad_target_ids)
STrack.multi_predict(strack_pool)
for strack in strack_pool:
if strack.track_id in ad_attack_ids:
index = ad_attack_ids.index(strack.track_id)
last_ad_id_features[attack_inds[index]] = strack.smooth_feat
last_attack_dets[index] = torch.from_numpy(strack.tlbr).cuda().float()
last_attack_dets[index][[0, 2]] = (last_attack_dets[index][[0, 2]] - 0.5 * W * (r_w - r_max)) / r_max
last_attack_dets[index][[1, 3]] = (last_attack_dets[index][[1, 3]] - 0.5 * H * (r_h - r_max)) / r_max
if strack.track_id in ad_target_ids:
index = ad_target_ids.index(strack.track_id)
last_ad_id_features[target_inds[index]] = strack.smooth_feat
last_target_dets[index] = torch.from_numpy(strack.tlbr).cuda().float()
last_target_dets[index][[0, 2]] = (last_target_dets[index][[0, 2]] - 0.5 * W * (r_w - r_max)) / r_max
last_target_dets[index][[1, 3]] = (last_target_dets[index][[1, 3]] - 0.5 * H * (r_h - r_max)) / r_max
last_attack_dets_center = []
for det in last_attack_dets:
if det is None:
last_attack_dets_center.append(None)
else:
last_attack_dets_center.append((det[:2] + det[2:]) / 2)
last_target_dets_center = []
for det in last_target_dets:
if det is None:
last_target_dets_center.append(None)
else:
last_target_dets_center.append((det[:2] + det[2:]) / 2)
hm_index = inds[0][remain_inds]
ori_hm_index_re_lst = []
for ind in range(len(attack_ids)):
attack_ind = attack_inds[ind]
target_ind = target_inds[ind]
ori_hm_index_re_lst.append(hm_index[[target_ind, attack_ind]].clone())
att_hm_index_lst = []
best_i = None
best_noise = None
best_fail = np.inf
while True:
i += 1
loss = 0
loss_feat = 0
for index, attack_id in enumerate(attack_ids):
target_id = target_ids[index]
attack_ind = attack_inds[index]
target_ind = target_inds[index]
for id_i, id_feature in enumerate(id_features):
if last_ad_id_features[attack_ind] is not None:
last_ad_id_feature = torch.from_numpy(last_ad_id_features[attack_ind]).unsqueeze(0).cuda()
sim_1 = torch.mm(id_feature[attack_ind:attack_ind + 1], last_ad_id_feature.T).squeeze()
sim_2 = torch.mm(id_feature[target_ind:target_ind + 1], last_ad_id_feature.T).squeeze()
if self.opt.hard_sample > 0:
loss_feat += torch.clamp(sim_2 - sim_1, max=self.opt.hard_sample)
else:
loss_feat += sim_2 - sim_1
if last_ad_id_features[target_ind] is not None:
last_ad_id_feature = torch.from_numpy(last_ad_id_features[target_ind]).unsqueeze(0).cuda()
sim_1 = torch.mm(id_feature[target_ind:target_ind + 1], last_ad_id_feature.T).squeeze()
sim_2 = torch.mm(id_feature[attack_ind:attack_ind + 1], last_ad_id_feature.T).squeeze()
if self.opt.hard_sample > 0:
loss_feat += torch.clamp(sim_2 - sim_1, max=self.opt.hard_sample)
else:
loss_feat += sim_2 - sim_1
if last_ad_id_features[attack_ind] is None and last_ad_id_features[target_ind] is None:
loss_feat += torch.mm(id_feature[attack_ind:attack_ind + 1],
id_feature[target_ind:target_ind + 1].T).squeeze()
if i in [10, 20, 30, 35, 40, 45, 50, 55]:
attack_det_center = torch.stack([hm_index[attack_ind] % W, hm_index[attack_ind] // W]).float()
target_det_center = torch.stack([hm_index[target_ind] % W, hm_index[target_ind] // W]).float()
if last_target_dets_center[index] is not None:
attack_center_delta = attack_det_center - last_target_dets_center[index]
if torch.max(torch.abs(attack_center_delta)) > 1:
attack_center_delta /= torch.max(torch.abs(attack_center_delta))
attack_det_center = torch.round(attack_det_center - attack_center_delta).int()
hm_index[attack_ind] = attack_det_center[0] + attack_det_center[1] * W
if last_attack_dets_center[index] is not None:
target_center_delta = target_det_center - last_attack_dets_center[index]
if torch.max(torch.abs(target_center_delta)) > 1:
target_center_delta /= torch.max(torch.abs(target_center_delta))
target_det_center = torch.round(target_det_center - target_center_delta).int()
hm_index[target_ind] = target_det_center[0] + target_det_center[1] * W
if index == 0:
att_hm_index_lst = []
att_hm_index_lst.append(hm_index[[attack_ind, target_ind]].clone())
loss += loss_feat / len(id_features)
if len(att_hm_index_lst):
assert len(att_hm_index_lst) == len(ori_hm_index_re_lst)
n_att_hm_index_lst = []
n_ori_hm_index_re_lst = []
for lst_ind in range(len(att_hm_index_lst)):
for hm_ind in range(len(att_hm_index_lst[lst_ind])):
for n_i in range(3):
for n_j in range(3):
att_hm_ind = att_hm_index_lst[lst_ind][hm_ind].item()
att_hm_ind = att_hm_ind + (n_i - 1) * W + (n_j - 1)
att_hm_ind = max(0, min(H*W-1, att_hm_ind))
n_att_hm_index_lst.append(att_hm_ind)
ori_hm_ind = ori_hm_index_re_lst[lst_ind][hm_ind].item()
ori_hm_ind = ori_hm_ind + (n_i - 1) * W + (n_j - 1)
ori_hm_ind = max(0, min(H * W - 1, ori_hm_ind))
n_ori_hm_index_re_lst.append(ori_hm_ind)
# print(n_att_hm_index, n_ori_hm_index_re)
loss += ((1 - outputs['hm'].view(-1).sigmoid()[n_att_hm_index_lst]) ** 2 *
torch.log(outputs['hm'].view(-1).sigmoid()[n_att_hm_index_lst])).mean()
loss += ((outputs['hm'].view(-1).sigmoid()[n_ori_hm_index_re_lst]) ** 2 *
torch.log(1 - outputs['hm'].view(-1).sigmoid()[n_ori_hm_index_re_lst])).mean()
loss -= smoothL1(outputs['wh'].view(2, -1)[:, n_att_hm_index_lst].T, wh_ori.view(2, -1)[:, n_ori_hm_index_re_lst].T)
loss -= smoothL1(outputs['reg'].view(2, -1)[:, n_att_hm_index_lst].T, reg_ori.view(2, -1)[:, n_ori_hm_index_re_lst].T)
loss.backward()
grad = im_blob.grad
grad /= (grad ** 2).sum().sqrt() + 1e-8
noise += grad
im_blob = torch.clip(im_blob_ori + noise, min=0, max=1).data
id_features, outputs, fail_ids = self.forwardFeatureMt(
im_blob,
img0,
dets,
inds,
remain_inds,
attack_ids,
attack_inds,
target_ids,
target_inds,
last_info
)
if fail_ids is not None:
if fail_ids == 0:
break
elif fail_ids <= best_fail:
best_fail = fail_ids
best_i = i
best_noise = noise.clone()
if i > 60:
if self.opt.no_f_noise:
return None, i, False
else:
if best_i is not None:
noise = best_noise
i = best_i
return noise, i, False
return noise, i, True
def forwardFeatureDet(self, im_blob, img0, dets_, attack_inds, thr=0, vs=[]):
width = img0.shape[1]
height = img0.shape[0]
inp_height = im_blob.shape[2]
inp_width = im_blob.shape[3]
c = np.array([width / 2., height / 2.], dtype=np.float32)
s = max(float(inp_width) / float(inp_height) * height, width) * 1.0
meta = {'c': c, 's': s,
'out_height': inp_height // self.opt.down_ratio,
'out_width': inp_width // self.opt.down_ratio}
im_blob.requires_grad = True
self.model.zero_grad()
output = self.model(im_blob)[-1]
hm = output['hm'].sigmoid()
wh = output['wh']
reg = output['reg'] if self.opt.reg_offset else None
dets_raw, inds = mot_decode(hm, wh, reg=reg, cat_spec_wh=self.opt.cat_spec_wh, K=self.opt.K)
dets = self.post_process(dets_raw.clone(), meta)
dets = self.merge_outputs([dets])[1]
remain_inds = dets[:, 4] > self.opt.conf_thres
dets = dets[remain_inds]
ious = bbox_ious(np.ascontiguousarray(dets_[:, :4], dtype=np.float),
np.ascontiguousarray(dets[:, :4], dtype=np.float))
row_inds, col_inds = linear_sum_assignment(-ious)
if not isinstance(thr, list):
thr = [thr for _ in range(len(attack_inds))]
fail_n = 0
for i in range(len(row_inds)):
if row_inds[i] in attack_inds:
if ious[row_inds[i], col_inds[i]] > thr[attack_inds.index(row_inds[i])]:
fail_n += 1
elif len(vs):
d_o = dets_[row_inds[i], :4]
d_a = dets[col_inds[i], :4]
c_o = (d_o[[0, 1]] + d_o[[2, 3]]) / 2
c_a = (d_a[[0, 1]] + d_a[[2, 3]]) / 2
c_d = ((c_a - c_o) / 4).astype(np.int) * vs[0]
if c_d[0] >= 0 or c_d[1] >= 0:
fail_n += 1
return output, fail_n == 0, fail_n
def forwardFeatureSg(self, im_blob, img0, dets_, inds_, remain_inds_, attack_id, attack_ind, target_id, target_ind,
last_info, grad=True):
width = img0.shape[1]
height = img0.shape[0]
inp_height = im_blob.shape[2]
inp_width = im_blob.shape[3]
c = np.array([width / 2., height / 2.], dtype=np.float32)
s = max(float(inp_width) / float(inp_height) * height, width) * 1.0
meta = {'c': c, 's': s,
'out_height': inp_height // self.opt.down_ratio,
'out_width': inp_width // self.opt.down_ratio}
im_blob.requires_grad = True
self.model.zero_grad()
if grad:
output = self.model(im_blob)[-1]
else:
with torch.no_grad():
output = self.model(im_blob)[-1]
hm = output['hm'].sigmoid()
wh = output['wh']
id_feature = output['id']
id_feature = F.normalize(id_feature, dim=1)
reg = output['reg'] if self.opt.reg_offset else None
dets_raw, inds = mot_decode(hm, wh, reg=reg, cat_spec_wh=self.opt.cat_spec_wh, K=self.opt.K)
dets = self.post_process(dets_raw.clone(), meta)
dets = self.merge_outputs([dets])[1]
remain_inds = dets[:, 4] > self.opt.conf_thres
dets = dets[remain_inds]
if target_ind is None:
ious = bbox_ious(np.ascontiguousarray(dets_[[attack_ind], :4], dtype=np.float),
np.ascontiguousarray(dets[:, :4], dtype=np.float))
else:
ious = bbox_ious(np.ascontiguousarray(dets_[[attack_ind, target_ind], :4], dtype=np.float),
np.ascontiguousarray(dets[:, :4], dtype=np.float))
# det_ind = np.argmax(ious, axis=1)
row_inds, col_inds = linear_sum_assignment(-ious)
match = True
if target_ind is None:
if ious[row_inds[0], col_inds[0]] < 0.8:
dets = dets_
inds = inds_
remain_inds = remain_inds_
match = False
else:
if len(col_inds) < 2 or ious[row_inds[0], col_inds[0]] < 0.6 or ious[row_inds[1], col_inds[1]] < 0.6:
dets = dets_
inds = inds_
remain_inds = remain_inds_
match = False
# assert match
id_features = []
for i in range(3):
for j in range(3):
id_feature_exp = _tranpose_and_gather_feat_expand(id_feature, inds, bias=(i - 1, j - 1)).squeeze(0)
id_features.append(id_feature_exp)
for i in range(len(id_features)):
id_features[i] = id_features[i][remain_inds]
ae_attack_id = None
ae_target_id = None
if not match:
for i in range(len(id_features)):
if target_ind is not None:
id_features[i] = id_features[i][[attack_ind, target_ind]]
else:
id_features[i] = id_features[i][[attack_ind]]
return id_features, output, ae_attack_id, ae_target_id, None
if row_inds[0] == 0:
ae_attack_ind = col_inds[0]
ae_target_ind = col_inds[1] if target_ind is not None else None
else:
ae_attack_ind = col_inds[1]
ae_target_ind = col_inds[0] if target_ind is not None else None
# ae_attack_ind = det_ind[0]
# ae_target_ind = det_ind[1] if target_ind is not None else None
hm_index = None
# if target_ind is not None:
# hm_index[[attack_ind, target_ind]] = hm_index[[ae_attack_ind, ae_target_ind]]
id_features_ = [None for _ in range(len(id_features))]
for i in range(len(id_features)):
if target_ind is None:
id_features_[i] = id_features[i][[ae_attack_ind]]
else:
try:
id_features_[i] = id_features[i][[ae_attack_ind, ae_target_ind]]
except:
import pdb; pdb.set_trace()
id_feature = _tranpose_and_gather_feat_expand(id_feature, inds)
id_feature = id_feature.squeeze(0)
id_feature = id_feature[remain_inds]
id_feature = id_feature.detach().cpu().numpy()
if len(dets) > 0:
'''Detections'''
detections = [STrack(STrack.tlbr_to_tlwh(tlbrs[:4]), tlbrs[4], f, 30) for
(tlbrs, f) in zip(dets[:, :5], id_feature)]
else:
detections = []
unconfirmed = copy.deepcopy(last_info['last_unconfirmed'])
strack_pool = copy.deepcopy(last_info['last_strack_pool'])
kalman_filter = copy.deepcopy(last_info['kalman_filter'])
STrack.multi_predict(strack_pool)
dists = matching.embedding_distance(strack_pool, detections)
dists = matching.fuse_motion(kalman_filter, dists, strack_pool, detections)
matches, u_track, u_detection = matching.linear_assignment(dists, thresh=0.7)
for itracked, idet in matches:
track = strack_pool[itracked]
det = detections[idet]
if idet == ae_attack_ind:
ae_attack_id = track.track_id
elif idet == ae_target_ind:
ae_target_id = track.track_id
# if ae_attack_id is not None and ae_target_id is not None:
# return id_features_, output, ae_attack_id, ae_target_id
''' Step 3: Second association, with IOU'''
for i, idet in enumerate(u_detection):
if idet == ae_attack_ind:
ae_attack_ind = i
elif idet == ae_target_ind:
ae_target_ind = i
detections = [detections[i] for i in u_detection]
r_tracked_stracks = [strack_pool[i] for i in u_track if strack_pool[i].state == TrackState.Tracked]
dists = matching.iou_distance(r_tracked_stracks, detections)
matches, u_track, u_detection = matching.linear_assignment(dists, thresh=0.5)
for itracked, idet in matches:
track = r_tracked_stracks[itracked]
det = detections[idet]
if idet == ae_attack_ind:
ae_attack_id = track.track_id
elif idet == ae_target_ind:
ae_target_id = track.track_id
# if ae_attack_id is not None and ae_target_id is not None:
# return id_features_, output, ae_attack_id, ae_target_id
'''Deal with unconfirmed tracks, usually tracks with only one beginning frame'''
for i, idet in enumerate(u_detection):
if idet == ae_attack_ind:
ae_attack_ind = i
elif idet == ae_target_ind:
ae_target_ind = i
detections = [detections[i] for i in u_detection]
dists = matching.iou_distance(unconfirmed, detections)
matches, u_unconfirmed, u_detection = matching.linear_assignment(dists, thresh=0.7)
for itracked, idet in matches:
track = unconfirmed[itracked]
if idet == ae_attack_ind:
ae_attack_id = track.track_id
elif idet == ae_target_ind:
ae_target_id = track.track_id
return id_features_, output, ae_attack_id, ae_target_id, hm_index
def forwardFeatureMt(self, im_blob, img0, dets_, inds_, remain_inds_, attack_ids, attack_inds, target_ids,
target_inds, last_info, grad=True):
width = img0.shape[1]
height = img0.shape[0]
inp_height = im_blob.shape[2]
inp_width = im_blob.shape[3]
c = np.array([width / 2., height / 2.], dtype=np.float32)
s = max(float(inp_width) / float(inp_height) * height, width) * 1.0
meta = {'c': c, 's': s,
'out_height': inp_height // self.opt.down_ratio,
'out_width': inp_width // self.opt.down_ratio}
im_blob.requires_grad = True
self.model.zero_grad()
if grad:
output = self.model(im_blob)[-1]
else:
with torch.no_grad():
output = self.model(im_blob)[-1]
hm = output['hm'].sigmoid()
wh = output['wh']
id_feature = output['id']
id_feature = F.normalize(id_feature, dim=1)
reg = output['reg'] if self.opt.reg_offset else None
dets_raw, inds = mot_decode(hm, wh, reg=reg, cat_spec_wh=self.opt.cat_spec_wh, K=self.opt.K)
dets = self.post_process(dets_raw.clone(), meta)
dets = self.merge_outputs([dets])[1]
dets_index = [i for i in range(len(dets))]
remain_inds = dets[:, 4] > self.opt.conf_thres
dets = dets[remain_inds]
ious = bbox_ious(np.ascontiguousarray(dets_[:, :4], dtype=np.float64),
np.ascontiguousarray(dets[:, :4], dtype=np.float64))
row_inds, col_inds = linear_sum_assignment(-ious)
match = True
if target_inds is not None:
for index, attack_ind in enumerate(attack_inds):
target_ind = target_inds[index]
if attack_ind not in row_inds or target_ind not in row_inds:
match = False
break
att_index = row_inds.tolist().index(attack_ind)
tar_index = row_inds.tolist().index(target_ind)
if ious[attack_ind, col_inds[att_index]] < 0.6 or ious[target_ind, col_inds[tar_index]] < 0.6:
match = False
break
else:
for index, attack_ind in enumerate(attack_inds):
if attack_ind not in row_inds:
match = False
break
att_index = row_inds.tolist().index(attack_ind)
if ious[attack_ind, col_inds[att_index]] < 0.8:
match = False
break
if not match:
dets = dets_
inds = inds_
remain_inds = remain_inds_
# assert match
id_features = []
for i in range(3):
for j in range(3):
id_feature_exp = _tranpose_and_gather_feat_expand(id_feature, inds, bias=(i - 1, j - 1)).squeeze(0)
id_features.append(id_feature_exp)
for i in range(len(id_features)):
id_features[i] = id_features[i][remain_inds]
fail_ids = 0
if not match:
return id_features, output, None
ae_attack_inds = []
ae_attack_ids = []
for i in range(len(row_inds)):
if ious[row_inds[i], col_inds[i]] > 0.6:
if row_inds[i] in attack_inds:
ae_attack_inds.append(col_inds[i])
index = attack_inds.tolist().index(row_inds[i])
ae_attack_ids.append(self.multiple_ori2att[attack_ids[index]])
# ae_attack_inds = [col_inds[row_inds == attack_ind] for attack_ind in attack_inds]
# ae_attack_inds = np.concatenate(ae_attack_inds)
id_features_ = [torch.zeros([len(dets_), id_features[0].size(1)]).to(id_features[0].device) for _ in range(len(id_features))]
for i in range(9):
id_features_[i][row_inds] = id_features[i][col_inds]
id_feature = _tranpose_and_gather_feat_expand(id_feature, inds)
id_feature = id_feature.squeeze(0)
id_feature = id_feature[remain_inds]
id_feature = id_feature.detach().cpu().numpy()
if len(dets) > 0:
'''Detections'''
detections = [STrack(STrack.tlbr_to_tlwh(tlbrs[:4]), tlbrs[4], f, 30) for
(tlbrs, f) in zip(dets[:, :5], id_feature)]
else:
detections = []
unconfirmed = copy.deepcopy(last_info['last_unconfirmed'])
strack_pool = copy.deepcopy(last_info['last_strack_pool'])
kalman_filter = copy.deepcopy(last_info['kalman_filter'])
STrack.multi_predict(strack_pool)
dists = matching.embedding_distance(strack_pool, detections)
dists = matching.fuse_motion(kalman_filter, dists, strack_pool, detections)
matches, u_track, u_detection = matching.linear_assignment(dists, thresh=0.7)
for itracked, idet in matches:
track = strack_pool[itracked]
det = detections[idet]
if dets_index[idet] in ae_attack_inds:
index = ae_attack_inds.index(dets_index[idet])
if track.track_id == ae_attack_ids[index]:
fail_ids += 1
''' Step 3: Second association, with IOU'''
dets_index = [dets_index[i] for i in u_detection]
detections = [detections[i] for i in u_detection]
r_tracked_stracks = [strack_pool[i] for i in u_track if strack_pool[i].state == TrackState.Tracked]
dists = matching.iou_distance(r_tracked_stracks, detections)
matches, u_track, u_detection = matching.linear_assignment(dists, thresh=0.5)
for itracked, idet in matches:
track = r_tracked_stracks[itracked]
det = detections[idet]
if dets_index[idet] in ae_attack_inds:
index = ae_attack_inds.index(dets_index[idet])
if track.track_id == ae_attack_ids[index]:
fail_ids += 1
'''Deal with unconfirmed tracks, usually tracks with only one beginning frame'''
dets_index = [dets_index[i] for i in u_detection]
detections = [detections[i] for i in u_detection]
dists = matching.iou_distance(unconfirmed, detections)
matches, u_unconfirmed, u_detection = matching.linear_assignment(dists, thresh=0.7)
for itracked, idet in matches:
track = unconfirmed[itracked]
if dets_index[idet] in ae_attack_inds:
index = ae_attack_inds.index(dets_index[idet])
if track.track_id == ae_attack_ids[index]:
fail_ids += 1
return id_features_, output, fail_ids
def CheckFit(self, dets, id_feature, attack_ids, attack_inds):
ad_attack_ids_ = [self.multiple_ori2att[attack_id] for attack_id in attack_ids] \
if self.opt.attack == 'multiple' else attack_ids
attack_dets = dets[attack_inds, :4]
ad_attack_dets = []
ad_attack_ids = []
if len(dets) > 0:
'''Detections'''
detections = [STrack(STrack.tlbr_to_tlwh(tlbrs[:4]), tlbrs[4], f, 30) for
(tlbrs, f) in zip(dets[:, :5], id_feature)]
else:
detections = []
unconfirmed = copy.deepcopy(self.ad_last_info['last_unconfirmed'])
strack_pool = copy.deepcopy(self.ad_last_info['last_strack_pool'])
kalman_filter = copy.deepcopy(self.ad_last_info['kalman_filter'])
STrack.multi_predict(strack_pool)
dists = matching.embedding_distance(strack_pool, detections)
dists = matching.fuse_motion(kalman_filter, dists, strack_pool, detections)
matches, u_track, u_detection = matching.linear_assignment(dists, thresh=0.7)
for itracked, idet in matches:
track = strack_pool[itracked]
det = detections[idet]
if track.track_id in ad_attack_ids_:
ad_attack_dets.append(det.tlbr)
ad_attack_ids.append(track.track_id)
''' Step 3: Second association, with IOU'''
detections = [detections[i] for i in u_detection]
r_tracked_stracks = [strack_pool[i] for i in u_track if strack_pool[i].state == TrackState.Tracked]
dists = matching.iou_distance(r_tracked_stracks, detections)
matches, u_track, u_detection = matching.linear_assignment(dists, thresh=0.5)
for itracked, idet in matches:
track = r_tracked_stracks[itracked]
det = detections[idet]
if track.track_id in ad_attack_ids_:
ad_attack_dets.append(det.tlbr)
ad_attack_ids.append(track.track_id)
'''Deal with unconfirmed tracks, usually tracks with only one beginning frame'''
detections = [detections[i] for i in u_detection]
dists = matching.iou_distance(unconfirmed, detections)
matches, u_unconfirmed, u_detection = matching.linear_assignment(dists, thresh=0.7)
for itracked, idet in matches:
track = unconfirmed[itracked]
det = detections[idet]
if track.track_id in ad_attack_ids_:
ad_attack_dets.append(det.tlbr)
ad_attack_ids.append(track.track_id)
if len(ad_attack_dets) == 0:
return []
ori_dets = np.array(attack_dets)
ad_dets = np.array(ad_attack_dets)
ious = bbox_ious(ori_dets.astype(np.float64), ad_dets.astype(np.float64))
row_ind, col_ind = linear_sum_assignment(-ious)
attack_index = []
for i in range(len(row_ind)):
if self.opt.attack == 'multiple':
if ious[row_ind[i], col_ind[i]] > 0.9 and self.multiple_ori2att[attack_ids[row_ind[i]]] == ad_attack_ids[col_ind[i]]:
attack_index.append(row_ind[i])
else:
if ious[row_ind[i], col_ind[i]] > 0.9:
attack_index.append(row_ind[i])
return attack_index
def update_attack_sg(self, im_blob, img0, **kwargs):
self.frame_id_ += 1
attack_id = kwargs['attack_id']
self_track_id_ori = kwargs.get('track_id', {}).get('origin', None)
self_track_id_att = kwargs.get('track_id', {}).get('attack', None)
activated_starcks = []
refind_stracks = []
lost_stracks = []
removed_stracks = []
width = img0.shape[1]
height = img0.shape[0]
inp_height = im_blob.shape[2]
inp_width = im_blob.shape[3]
c = np.array([width / 2., height / 2.], dtype=np.float32)
s = max(float(inp_width) / float(inp_height) * height, width) * 1.0
meta = {'c': c, 's': s,
'out_height': inp_height // self.opt.down_ratio,
'out_width': inp_width // self.opt.down_ratio}
''' Step 1: Network forward, get detections & embeddings'''
# with torch.no_grad():
im_blob.requires_grad = True
self.model.zero_grad()
output = self.model(im_blob)[-1]
hm = output['hm'].sigmoid()
wh = output['wh']
id_feature = output['id']
id_feature = F.normalize(id_feature, dim=1)
reg = output['reg'] if self.opt.reg_offset else None
dets_raw, inds = mot_decode(hm, wh, reg=reg, cat_spec_wh=self.opt.cat_spec_wh, K=self.opt.K)
id_features = []
for i in range(3):
for j in range(3):
id_feature_exp = _tranpose_and_gather_feat_expand(id_feature, inds, bias=(i - 1, j - 1)).squeeze(0)
id_features.append(id_feature_exp)
id_feature = _tranpose_and_gather_feat_expand(id_feature, inds)
id_feature = id_feature.squeeze(0)
dets = self.post_process(dets_raw.clone(), meta)
dets = self.merge_outputs([dets])[1]
remain_inds = dets[:, 4] > self.opt.conf_thres
dets = dets[remain_inds]
id_feature = id_feature[remain_inds]
for i in range(len(id_features)):
id_features[i] = id_features[i][remain_inds]
id_feature = id_feature.detach().cpu().numpy()
last_id_features = [None for _ in range(len(dets))]
last_ad_id_features = [None for _ in range(len(dets))]
dets_index = [i for i in range(len(dets))]
dets_ids = [None for _ in range(len(dets))]
tracks_ad = []
# import pdb; pdb.set_trace()
# vis
'''
for i in range(0, dets.shape[0]):
bbox = dets[i][0:4]
cv2.rectangle(img0, (bbox[0], bbox[1]),
(bbox[2], bbox[3]),
(0, 255, 0), 2)
cv2.imshow('dets', img0)
cv2.waitKey(0)
id0 = id0-1
'''
if len(dets) > 0:
'''Detections'''
detections = [STrack(STrack.tlbr_to_tlwh(tlbrs[:4]), tlbrs[4], f, 30) for
(tlbrs, f) in zip(dets[:, :5], id_feature)]
else:
detections = []
''' Add newly detected tracklets to tracked_stracks'''
unconfirmed = []
tracked_stracks = [] # type: list[STrack]
for track in self.tracked_stracks_:
if not track.is_activated:
unconfirmed.append(track)
else:
tracked_stracks.append(track)
''' Step 2: First association, with embedding'''
strack_pool = joint_stracks(tracked_stracks, self.lost_stracks_)
STrack.multi_predict(strack_pool)
dists = matching.embedding_distance(strack_pool, detections)
# dists = matching.gate_cost_matrix(self.kalman_filter, dists, strack_pool, detections)
dists = matching.fuse_motion(self.kalman_filter_, dists, strack_pool, detections)
matches, u_track, u_detection = matching.linear_assignment(dists, thresh=0.7)
# import pdb; pdb.set_trace()
for itracked, idet in matches:
track = strack_pool[itracked]
det = detections[idet]
assert last_id_features[dets_index[idet]] is None
assert last_ad_id_features[dets_index[idet]] is None
last_id_features[dets_index[idet]] = track.smooth_feat
last_ad_id_features[dets_index[idet]] = track.smooth_feat_ad
tracks_ad.append((track, dets_index[idet]))
if track.state == TrackState.Tracked:
track.update(detections[idet], self.frame_id_)
activated_starcks.append(track)
else:
track.re_activate_(det, self.frame_id_, new_id=False)
refind_stracks.append(track)
dets_ids[dets_index[idet]] = track.track_id
''' Step 3: Second association, with IOU'''
dets_index = [dets_index[i] for i in u_detection]
detections = [detections[i] for i in u_detection]
r_tracked_stracks = [strack_pool[i] for i in u_track if strack_pool[i].state == TrackState.Tracked]
dists = matching.iou_distance(r_tracked_stracks, detections)
matches, u_track, u_detection = matching.linear_assignment(dists, thresh=0.5)
for itracked, idet in matches:
track = r_tracked_stracks[itracked]
det = detections[idet]
assert last_id_features[dets_index[idet]] is None
assert last_ad_id_features[dets_index[idet]] is None
last_id_features[dets_index[idet]] = track.smooth_feat
last_ad_id_features[dets_index[idet]] = track.smooth_feat_ad
tracks_ad.append((track, dets_index[idet]))
if track.state == TrackState.Tracked:
track.update(det, self.frame_id_)
activated_starcks.append(track)
else:
track.re_activate_(det, self.frame_id_, new_id=False)
refind_stracks.append(track)
dets_ids[dets_index[idet]] = track.track_id
for it in u_track:
track = r_tracked_stracks[it]
if not track.state == TrackState.Lost:
track.mark_lost()
lost_stracks.append(track)
'''Deal with unconfirmed tracks, usually tracks with only one beginning frame'''
dets_index = [dets_index[i] for i in u_detection]
detections = [detections[i] for i in u_detection]
dists = matching.iou_distance(unconfirmed, detections)
matches, u_unconfirmed, u_detection = matching.linear_assignment(dists, thresh=0.7)
for itracked, idet in matches:
assert last_id_features[dets_index[idet]] is None
assert last_ad_id_features[dets_index[idet]] is None
last_id_features[dets_index[idet]] = unconfirmed[itracked].smooth_feat
last_ad_id_features[dets_index[idet]] = unconfirmed[itracked].smooth_feat_ad
tracks_ad.append((unconfirmed[itracked], dets_index[idet]))
unconfirmed[itracked].update(detections[idet], self.frame_id_)
activated_starcks.append(unconfirmed[itracked])
dets_ids[dets_index[idet]] = unconfirmed[itracked].track_id
for it in u_unconfirmed:
track = unconfirmed[it]
track.mark_removed()
removed_stracks.append(track)
""" Step 4: Init new stracks"""
for inew in u_detection:
track = detections[inew]
if track.score < self.det_thresh:
continue
track.activate_(self.kalman_filter_, self.frame_id_, track_id=self_track_id_ori)
activated_starcks.append(track)
dets_ids[dets_index[inew]] = track.track_id
""" Step 5: Update state"""
for track in self.lost_stracks_:
if self.frame_id_ - track.end_frame > self.max_time_lost:
track.mark_removed()
removed_stracks.append(track)
# print('Ramained match {} s'.format(t4-t3))
self.tracked_stracks_ = [t for t in self.tracked_stracks_ if t.state == TrackState.Tracked]
self.tracked_stracks_ = joint_stracks(self.tracked_stracks_, activated_starcks)
self.tracked_stracks_ = joint_stracks(self.tracked_stracks_, refind_stracks)
self.lost_stracks_ = sub_stracks(self.lost_stracks_, self.tracked_stracks_)
self.lost_stracks_.extend(lost_stracks)
self.lost_stracks_ = sub_stracks(self.lost_stracks_, self.removed_stracks_)
self.removed_stracks_.extend(removed_stracks)
self.tracked_stracks_, self.lost_stracks_ = remove_duplicate_stracks(self.tracked_stracks_, self.lost_stracks_)
# get scores of lost tracks
output_stracks_ori = [track for track in self.tracked_stracks_ if track.is_activated]
logger.debug('===========Frame {}=========='.format(self.frame_id_))
logger.debug('Activated: {}'.format([track.track_id for track in activated_starcks]))
logger.debug('Refind: {}'.format([track.track_id for track in refind_stracks]))
logger.debug('Lost: {}'.format([track.track_id for track in lost_stracks]))
logger.debug('Removed: {}'.format([track.track_id for track in removed_stracks]))
noise = None
suc = 0
for attack_ind, track_id in enumerate(dets_ids):
if track_id == attack_id:
if self.opt.attack_id > 0:
if not hasattr(self, f'frames_{attack_id}'):
setattr(self, f'frames_{attack_id}', 0)
if getattr(self, f'frames_{attack_id}') < self.FRAME_THR:
setattr(self, f'frames_{attack_id}', getattr(self, f'frames_{attack_id}') + 1)
break
fit = self.CheckFit(dets, id_feature, [attack_id], [attack_ind])
ious = bbox_ious(np.ascontiguousarray(dets[:, :4], dtype=np.float64),
np.ascontiguousarray(dets[:, :4], dtype=np.float64))
ious[range(len(dets)), range(len(dets))] = 0
dis = bbox_dis(np.ascontiguousarray(dets[:, :4], dtype=np.float64),
np.ascontiguousarray(dets[:, :4], dtype=np.float64))
dis[range(len(dets)), range(len(dets))] = np.inf
target_ind = np.argmax(ious[attack_ind])
if ious[attack_ind][target_ind] >= self.attack_iou_thr:
if ious[attack_ind][target_ind] == 0:
target_ind = np.argmin(dis[attack_ind])
target_id = dets_ids[target_ind]
if fit:
if self.opt.rand:
noise, attack_iter, suc = self.attack_sg_random(
im_blob,
img0,
id_features,
dets,
inds,
remain_inds,
last_info=self.ad_last_info,
outputs_ori=output,
attack_id=attack_id,
attack_ind=attack_ind,
target_id=target_id,
target_ind=target_ind
)
else:
noise, attack_iter, suc = self.attack_sg(
im_blob,
img0,
id_features,
dets,
inds,
remain_inds,
last_info=self.ad_last_info,
outputs_ori=output,
attack_id=attack_id,
attack_ind=attack_ind,
target_id=target_id,
target_ind=target_ind
)
self.attack_iou_thr = 0
if suc:
suc = 1
print(
f'attack id: {attack_id}\tattack frame {self.frame_id_}: SUCCESS\tl2 distance: {(noise ** 2).sum().sqrt().item()}\titeration: {attack_iter}')
else:
suc = 2
print(
f'attack id: {attack_id}\tattack frame {self.frame_id_}: FAIL\tl2 distance: {(noise ** 2).sum().sqrt().item() if noise is not None else None}\titeration: {attack_iter}')
else:
suc = 3
if ious[attack_ind][target_ind] == 0:
self.temp_i += 1
if self.temp_i >= 10:
self.attack_iou_thr = self.ATTACK_IOU_THR
else:
self.temp_i = 0
else:
self.attack_iou_thr = self.ATTACK_IOU_THR
if fit:
suc = 2
if noise is not None:
l2_dis = (noise ** 2).sum().sqrt().item()
adImg = torch.clip(im_blob + noise, min=0, max=1)
noise = self.recoverNoise(noise, img0)
# adImg = np.clip(img0 + noise, a_min=0, a_max=255)
# noise = adImg - img0
noise = (noise - np.min(noise)) / (np.max(noise) - np.min(noise))
noise = (noise * 255).astype(np.uint8)
else:
l2_dis = None
adImg = im_blob
output_stracks_att = self.update(adImg, img0, track_id=self_track_id_att)
adImg = self.recoverNoise(adImg.detach(), img0)
return output_stracks_ori, output_stracks_att, adImg, noise, l2_dis, suc
def update_attack_mt(self, im_blob, img0, **kwargs):
self.frame_id_ += 1
activated_starcks = []
refind_stracks = []
lost_stracks = []
removed_stracks = []
width = img0.shape[1]
height = img0.shape[0]
inp_height = im_blob.shape[2]
inp_width = im_blob.shape[3]
c = np.array([width / 2., height / 2.], dtype=np.float32)
s = max(float(inp_width) / float(inp_height) * height, width) * 1.0
meta = {'c': c, 's': s,
'out_height': inp_height // self.opt.down_ratio,
'out_width': inp_width // self.opt.down_ratio}
''' Step 1: Network forward, get detections & embeddings'''
# with torch.no_grad():
im_blob.requires_grad = True
self.model.zero_grad()
output = self.model(im_blob)[-1]
hm = output['hm'].sigmoid()
wh = output['wh']
id_feature = output['id']
id_feature = F.normalize(id_feature, dim=1)
reg = output['reg'] if self.opt.reg_offset else None
dets_raw, inds = mot_decode(hm, wh, reg=reg, cat_spec_wh=self.opt.cat_spec_wh, K=self.opt.K)
id_features = []
for i in range(3):
for j in range(3):
id_feature_exp = _tranpose_and_gather_feat_expand(id_feature, inds, bias=(i - 1, j - 1)).squeeze(0)
id_features.append(id_feature_exp)
id_feature = _tranpose_and_gather_feat_expand(id_feature, inds)
id_feature = id_feature.squeeze(0)
dets = self.post_process(dets_raw.clone(), meta)
dets = self.merge_outputs([dets])[1]
remain_inds = dets[:, 4] > self.opt.conf_thres
dets = dets[remain_inds]
id_feature = id_feature[remain_inds]
for i in range(len(id_features)):
id_features[i] = id_features[i][remain_inds]
id_feature = id_feature.detach().cpu().numpy()
last_id_features = [None for _ in range(len(dets))]
last_ad_id_features = [None for _ in range(len(dets))]
dets_index = [i for i in range(len(dets))]
dets_ids = [None for _ in range(len(dets))]
tracks_ad = []
# import pdb; pdb.set_trace()
# vis
'''
for i in range(0, dets.shape[0]):
bbox = dets[i][0:4]
cv2.rectangle(img0, (bbox[0], bbox[1]),
(bbox[2], bbox[3]),
(0, 255, 0), 2)
cv2.imshow('dets', img0)
cv2.waitKey(0)
id0 = id0-1
'''
if len(dets) > 0:
'''Detections'''
detections = [STrack(STrack.tlbr_to_tlwh(tlbrs[:4]), tlbrs[4], f, 30) for
(tlbrs, f) in zip(dets[:, :5], id_feature)]
else:
detections = []
''' Add newly detected tracklets to tracked_stracks'''
unconfirmed = []
tracked_stracks = [] # type: list[STrack]
for track in self.tracked_stracks_:
if not track.is_activated:
unconfirmed.append(track)
else:
tracked_stracks.append(track)
''' Step 2: First association, with embedding'''
strack_pool = joint_stracks(tracked_stracks, self.lost_stracks_)
STrack.multi_predict(strack_pool)
dists = matching.embedding_distance(strack_pool, detections)
dists = matching.fuse_motion(self.kalman_filter_, dists, strack_pool, detections)
matches, u_track, u_detection = matching.linear_assignment(dists, thresh=0.7)
# import pdb; pdb.set_trace()
for itracked, idet in matches:
track = strack_pool[itracked]
det = detections[idet]
assert last_id_features[dets_index[idet]] is None
assert last_ad_id_features[dets_index[idet]] is None
last_id_features[dets_index[idet]] = track.smooth_feat
last_ad_id_features[dets_index[idet]] = track.smooth_feat_ad
tracks_ad.append((track, dets_index[idet]))
if track.state == TrackState.Tracked:
track.update(detections[idet], self.frame_id_)
activated_starcks.append(track)
else:
track.re_activate_(det, self.frame_id_, new_id=False)
refind_stracks.append(track)
dets_ids[dets_index[idet]] = track.track_id
''' Step 3: Second association, with IOU'''
dets_index = [dets_index[i] for i in u_detection]
detections = [detections[i] for i in u_detection]
r_tracked_stracks = [strack_pool[i] for i in u_track if strack_pool[i].state == TrackState.Tracked]
dists = matching.iou_distance(r_tracked_stracks, detections)
matches, u_track, u_detection = matching.linear_assignment(dists, thresh=0.5)
for itracked, idet in matches:
track = r_tracked_stracks[itracked]
det = detections[idet]
assert last_id_features[dets_index[idet]] is None
assert last_ad_id_features[dets_index[idet]] is None
last_id_features[dets_index[idet]] = track.smooth_feat
last_ad_id_features[dets_index[idet]] = track.smooth_feat_ad
tracks_ad.append((track, dets_index[idet]))
if track.state == TrackState.Tracked:
track.update(det, self.frame_id_)
activated_starcks.append(track)
else:
track.re_activate_(det, self.frame_id_, new_id=False)
refind_stracks.append(track)
dets_ids[dets_index[idet]] = track.track_id
for it in u_track:
track = r_tracked_stracks[it]
if not track.state == TrackState.Lost:
track.mark_lost()
lost_stracks.append(track)
'''Deal with unconfirmed tracks, usually tracks with only one beginning frame'''
dets_index = [dets_index[i] for i in u_detection]
detections = [detections[i] for i in u_detection]
dists = matching.iou_distance(unconfirmed, detections)
matches, u_unconfirmed, u_detection = matching.linear_assignment(dists, thresh=0.7)
for itracked, idet in matches:
assert last_id_features[dets_index[idet]] is None
assert last_ad_id_features[dets_index[idet]] is None
last_id_features[dets_index[idet]] = unconfirmed[itracked].smooth_feat
last_ad_id_features[dets_index[idet]] = unconfirmed[itracked].smooth_feat_ad
tracks_ad.append((unconfirmed[itracked], dets_index[idet]))
unconfirmed[itracked].update(detections[idet], self.frame_id_)
activated_starcks.append(unconfirmed[itracked])
dets_ids[dets_index[idet]] = unconfirmed[itracked].track_id
for it in u_unconfirmed:
track = unconfirmed[it]
track.mark_removed()
removed_stracks.append(track)
""" Step 4: Init new stracks"""
for inew in u_detection:
track = detections[inew]
if track.score < self.det_thresh:
continue
track.activate_(self.kalman_filter_, self.frame_id_)
activated_starcks.append(track)
dets_ids[dets_index[inew]] = track.track_id
""" Step 5: Update state"""
for track in self.lost_stracks_:
if self.frame_id_ - track.end_frame > self.max_time_lost:
track.mark_removed()
removed_stracks.append(track)
# print('Ramained match {} s'.format(t4-t3))
self.tracked_stracks_ = [t for t in self.tracked_stracks_ if t.state == TrackState.Tracked]
self.tracked_stracks_ = joint_stracks(self.tracked_stracks_, activated_starcks)
self.tracked_stracks_ = joint_stracks(self.tracked_stracks_, refind_stracks)
self.lost_stracks_ = sub_stracks(self.lost_stracks_, self.tracked_stracks_)
self.lost_stracks_.extend(lost_stracks)
self.lost_stracks_ = sub_stracks(self.lost_stracks_, self.removed_stracks_)
self.removed_stracks_.extend(removed_stracks)
self.tracked_stracks_, self.lost_stracks_ = remove_duplicate_stracks(self.tracked_stracks_, self.lost_stracks_)
# get scores of lost tracks
output_stracks_ori = [track for track in self.tracked_stracks_ if track.is_activated]
id_set = set([track.track_id for track in output_stracks_ori])
for i in range(len(dets_ids)):
if dets_ids[i] is not None and dets_ids[i] not in id_set:
dets_ids[i] = None
output_stracks_ori_ind = []
for ind, track in enumerate(output_stracks_ori):
if track.track_id not in self.multiple_ori_ids:
self.multiple_ori_ids[track.track_id] = 0
self.multiple_ori_ids[track.track_id] += 1
if self.multiple_ori_ids[track.track_id] <= self.FRAME_THR:
output_stracks_ori_ind.append(ind)
logger.debug('===========Frame {}=========='.format(self.frame_id_))
logger.debug('Activated: {}'.format([track.track_id for track in activated_starcks]))
logger.debug('Refind: {}'.format([track.track_id for track in refind_stracks]))
logger.debug('Lost: {}'.format([track.track_id for track in lost_stracks]))
logger.debug('Removed: {}'.format([track.track_id for track in removed_stracks]))
attack_ids = []
target_ids = []
attack_inds = []
target_inds = []
noise = None
if len(dets) > 0:
ious = bbox_ious(np.ascontiguousarray(dets[:, :4], dtype=np.float64),
np.ascontiguousarray(dets[:, :4], dtype=np.float64))
ious[range(len(dets)), range(len(dets))] = 0
ious_inds = np.argmax(ious, axis=1)
dis = bbox_dis(np.ascontiguousarray(dets[:, :4], dtype=np.float64),
np.ascontiguousarray(dets[:, :4], dtype=np.float64))
dis[range(len(dets)), range(len(dets))] = np.inf
dis_inds = np.argmin(dis, axis=1)
for attack_ind, track_id in enumerate(dets_ids):
if track_id is None or self.multiple_ori_ids[track_id] <= self.FRAME_THR \
or dets_ids[ious_inds[attack_ind]] not in self.multiple_ori2att \
or track_id not in self.multiple_ori2att:
continue
if ious[attack_ind, ious_inds[attack_ind]] > self.ATTACK_IOU_THR or (
track_id in self.low_iou_ids and ious[attack_ind, ious_inds[attack_ind]] > 0
):
attack_ids.append(track_id)
target_ids.append(dets_ids[ious_inds[attack_ind]])
attack_inds.append(attack_ind)
target_inds.append(ious_inds[attack_ind])
if hasattr(self, f'temp_i_{track_id}'):
self.__setattr__(f'temp_i_{track_id}', 0)
elif ious[attack_ind, ious_inds[attack_ind]] == 0 and track_id in self.low_iou_ids:
if hasattr(self, f'temp_i_{track_id}'):
self.__setattr__(f'temp_i_{track_id}', self.__getattribute__(f'temp_i_{track_id}') + 1)
else:
self.__setattr__(f'temp_i_{track_id}', 1)
if self.__getattribute__(f'temp_i_{track_id}') > 10:
self.low_iou_ids.remove(track_id)
elif dets_ids[dis_inds[attack_ind]] in self.multiple_ori2att:
attack_ids.append(track_id)
target_ids.append(dets_ids[dis_inds[attack_ind]])
attack_inds.append(attack_ind)
target_inds.append(dis_inds[attack_ind])
fit_index = self.CheckFit(dets, id_feature, attack_ids, attack_inds) if len(attack_ids) else []
if fit_index:
attack_ids = np.array(attack_ids)[fit_index]
target_ids = np.array(target_ids)[fit_index]
attack_inds = np.array(attack_inds)[fit_index]
target_inds = np.array(target_inds)[fit_index]
if self.opt.rand:
noise, attack_iter, suc = self.attack_mt_random(
im_blob,
img0,
id_features,
dets,
inds,
remain_inds,
last_info=self.ad_last_info,
outputs_ori=output,
attack_ids=attack_ids,
attack_inds=attack_inds,
target_ids=target_ids,
target_inds=target_inds
)
else:
noise, attack_iter, suc = self.attack_mt(
im_blob,
img0,
id_features,
dets,
inds,
remain_inds,
last_info=self.ad_last_info,
outputs_ori=output,
attack_ids=attack_ids,
attack_inds=attack_inds,
target_ids=target_ids,
target_inds=target_inds
)
self.low_iou_ids.update(set(attack_ids))
if suc:
self.attacked_ids.update(set(attack_ids))
print(
f'attack ids: {attack_ids}\tattack frame {self.frame_id_}: SUCCESS\tl2 distance: {(noise ** 2).sum().sqrt().item()}\titeration: {attack_iter}')
else:
print(f'attack ids: {attack_ids}\tattack frame {self.frame_id_}: FAIL\tl2 distance: {(noise ** 2).sum().sqrt().item() if noise is not None else None}\titeration: {attack_iter}')
if noise is not None:
l2_dis = (noise ** 2).sum().sqrt().item()
adImg = torch.clip(im_blob + noise, min=0, max=1)
noise = self.recoverNoise(noise, img0)
noise = (noise - np.min(noise)) / (np.max(noise) - np.min(noise))
noise = (noise * 255).astype(np.uint8)
else:
l2_dis = None
adImg = im_blob
output_stracks_att = self.update(adImg, img0)
adImg = self.recoverNoise(adImg.detach(), img0)
output_stracks_att_ind = []
for ind, track in enumerate(output_stracks_att):
if track.track_id not in self.multiple_att_ids:
self.multiple_att_ids[track.track_id] = 0
self.multiple_att_ids[track.track_id] += 1
if self.multiple_att_ids[track.track_id] <= self.FRAME_THR:
output_stracks_att_ind.append(ind)
if len(output_stracks_ori_ind) and len(output_stracks_att_ind):
ori_dets = [track.curr_tlbr for i, track in enumerate(output_stracks_ori) if i in output_stracks_ori_ind]
att_dets = [track.curr_tlbr for i, track in enumerate(output_stracks_att) if i in output_stracks_att_ind]
ori_dets = np.stack(ori_dets).astype(np.float64)
att_dets = np.stack(att_dets).astype(np.float64)
ious = bbox_ious(ori_dets, att_dets)
row_ind, col_ind = linear_sum_assignment(-ious)
for i in range(len(row_ind)):
if ious[row_ind[i], col_ind[i]] > 0.9:
ori_id = output_stracks_ori[output_stracks_ori_ind[row_ind[i]]].track_id
att_id = output_stracks_att[output_stracks_att_ind[col_ind[i]]].track_id
self.multiple_ori2att[ori_id] = att_id
return output_stracks_ori, output_stracks_att, adImg, noise, l2_dis
def update_attack_sg_feat(self, im_blob, img0, **kwargs):
self.frame_id_ += 1
attack_id = kwargs['attack_id']
self_track_id_ori = kwargs.get('track_id', {}).get('origin', None)
self_track_id_att = kwargs.get('track_id', {}).get('attack', None)
activated_starcks = []
refind_stracks = []
lost_stracks = []
removed_stracks = []
width = img0.shape[1]
height = img0.shape[0]
inp_height = im_blob.shape[2]
inp_width = im_blob.shape[3]
c = np.array([width / 2., height / 2.], dtype=np.float32)
s = max(float(inp_width) / float(inp_height) * height, width) * 1.0
meta = {'c': c, 's': s,
'out_height': inp_height // self.opt.down_ratio,
'out_width': inp_width // self.opt.down_ratio}
''' Step 1: Network forward, get detections & embeddings'''
# with torch.no_grad():
im_blob.requires_grad = True
self.model.zero_grad()
output = self.model(im_blob)[-1]
hm = output['hm'].sigmoid()
wh = output['wh']
id_feature = output['id']
id_feature = F.normalize(id_feature, dim=1)
reg = output['reg'] if self.opt.reg_offset else None
dets_raw, inds = mot_decode(hm, wh, reg=reg, cat_spec_wh=self.opt.cat_spec_wh, K=self.opt.K)
id_features = []
for i in range(3):
for j in range(3):
id_feature_exp = _tranpose_and_gather_feat_expand(id_feature, inds, bias=(i - 1, j - 1)).squeeze(0)
id_features.append(id_feature_exp)
id_feature = _tranpose_and_gather_feat_expand(id_feature, inds)
id_feature = id_feature.squeeze(0)
dets = self.post_process(dets_raw.clone(), meta)
dets = self.merge_outputs([dets])[1]
remain_inds = dets[:, 4] > self.opt.conf_thres
dets = dets[remain_inds]
id_feature = id_feature[remain_inds]
for i in range(len(id_features)):
id_features[i] = id_features[i][remain_inds]
id_feature = id_feature.detach().cpu().numpy()
last_id_features = [None for _ in range(len(dets))]
last_ad_id_features = [None for _ in range(len(dets))]
dets_index = [i for i in range(len(dets))]
dets_ids = [None for _ in range(len(dets))]
tracks_ad = []
# import pdb; pdb.set_trace()
# vis
'''
for i in range(0, dets.shape[0]):
bbox = dets[i][0:4]
cv2.rectangle(img0, (bbox[0], bbox[1]),
(bbox[2], bbox[3]),
(0, 255, 0), 2)
cv2.imshow('dets', img0)
cv2.waitKey(0)
id0 = id0-1
'''
if len(dets) > 0:
'''Detections'''
detections = [STrack(STrack.tlbr_to_tlwh(tlbrs[:4]), tlbrs[4], f, 30) for
(tlbrs, f) in zip(dets[:, :5], id_feature)]
else:
detections = []
''' Add newly detected tracklets to tracked_stracks'''
unconfirmed = []
tracked_stracks = [] # type: list[STrack]
for track in self.tracked_stracks_:
if not track.is_activated:
unconfirmed.append(track)
else:
tracked_stracks.append(track)
''' Step 2: First association, with embedding'''
strack_pool = joint_stracks(tracked_stracks, self.lost_stracks_)
STrack.multi_predict(strack_pool)
dists = matching.embedding_distance(strack_pool, detections)
# dists = matching.gate_cost_matrix(self.kalman_filter, dists, strack_pool, detections)
dists = matching.fuse_motion(self.kalman_filter_, dists, strack_pool, detections)
matches, u_track, u_detection = matching.linear_assignment(dists, thresh=0.7)
# import pdb; pdb.set_trace()
for itracked, idet in matches:
track = strack_pool[itracked]
det = detections[idet]
assert last_id_features[dets_index[idet]] is None
assert last_ad_id_features[dets_index[idet]] is None
last_id_features[dets_index[idet]] = track.smooth_feat
last_ad_id_features[dets_index[idet]] = track.smooth_feat_ad
tracks_ad.append((track, dets_index[idet]))
if track.state == TrackState.Tracked:
track.update(detections[idet], self.frame_id_)
activated_starcks.append(track)
else:
track.re_activate_(det, self.frame_id_, new_id=False)
refind_stracks.append(track)
dets_ids[dets_index[idet]] = track.track_id
''' Step 3: Second association, with IOU'''
dets_index = [dets_index[i] for i in u_detection]
detections = [detections[i] for i in u_detection]
r_tracked_stracks = [strack_pool[i] for i in u_track if strack_pool[i].state == TrackState.Tracked]
dists = matching.iou_distance(r_tracked_stracks, detections)
matches, u_track, u_detection = matching.linear_assignment(dists, thresh=0.5)
for itracked, idet in matches:
track = r_tracked_stracks[itracked]
det = detections[idet]
assert last_id_features[dets_index[idet]] is None
assert last_ad_id_features[dets_index[idet]] is None
last_id_features[dets_index[idet]] = track.smooth_feat
last_ad_id_features[dets_index[idet]] = track.smooth_feat_ad
tracks_ad.append((track, dets_index[idet]))
if track.state == TrackState.Tracked:
track.update(det, self.frame_id_)
activated_starcks.append(track)
else:
track.re_activate_(det, self.frame_id_, new_id=False)
refind_stracks.append(track)
dets_ids[dets_index[idet]] = track.track_id
for it in u_track:
track = r_tracked_stracks[it]
if not track.state == TrackState.Lost:
track.mark_lost()
lost_stracks.append(track)
'''Deal with unconfirmed tracks, usually tracks with only one beginning frame'''
dets_index = [dets_index[i] for i in u_detection]
detections = [detections[i] for i in u_detection]
dists = matching.iou_distance(unconfirmed, detections)
matches, u_unconfirmed, u_detection = matching.linear_assignment(dists, thresh=0.7)
for itracked, idet in matches:
assert last_id_features[dets_index[idet]] is None
assert last_ad_id_features[dets_index[idet]] is None
last_id_features[dets_index[idet]] = unconfirmed[itracked].smooth_feat
last_ad_id_features[dets_index[idet]] = unconfirmed[itracked].smooth_feat_ad
tracks_ad.append((unconfirmed[itracked], dets_index[idet]))
unconfirmed[itracked].update(detections[idet], self.frame_id_)
activated_starcks.append(unconfirmed[itracked])
dets_ids[dets_index[idet]] = unconfirmed[itracked].track_id
for it in u_unconfirmed:
track = unconfirmed[it]
track.mark_removed()
removed_stracks.append(track)
""" Step 4: Init new stracks"""
for inew in u_detection:
track = detections[inew]
if track.score < self.det_thresh:
continue
track.activate_(self.kalman_filter_, self.frame_id_, track_id=self_track_id_ori)
activated_starcks.append(track)
dets_ids[dets_index[inew]] = track.track_id
""" Step 5: Update state"""
for track in self.lost_stracks_:
if self.frame_id_ - track.end_frame > self.max_time_lost:
track.mark_removed()
removed_stracks.append(track)
# print('Ramained match {} s'.format(t4-t3))
self.tracked_stracks_ = [t for t in self.tracked_stracks_ if t.state == TrackState.Tracked]
self.tracked_stracks_ = joint_stracks(self.tracked_stracks_, activated_starcks)
self.tracked_stracks_ = joint_stracks(self.tracked_stracks_, refind_stracks)
self.lost_stracks_ = sub_stracks(self.lost_stracks_, self.tracked_stracks_)
self.lost_stracks_.extend(lost_stracks)
self.lost_stracks_ = sub_stracks(self.lost_stracks_, self.removed_stracks_)
self.removed_stracks_.extend(removed_stracks)
self.tracked_stracks_, self.lost_stracks_ = remove_duplicate_stracks(self.tracked_stracks_, self.lost_stracks_)
# get scores of lost tracks
output_stracks_ori = [track for track in self.tracked_stracks_ if track.is_activated]
logger.debug('===========Frame {}=========='.format(self.frame_id_))
logger.debug('Activated: {}'.format([track.track_id for track in activated_starcks]))
logger.debug('Refind: {}'.format([track.track_id for track in refind_stracks]))
logger.debug('Lost: {}'.format([track.track_id for track in lost_stracks]))
logger.debug('Removed: {}'.format([track.track_id for track in removed_stracks]))
noise = None
suc = 0
for attack_ind, track_id in enumerate(dets_ids):
if track_id == attack_id:
if self.opt.attack_id > 0:
if not hasattr(self, f'frames_{attack_id}'):
setattr(self, f'frames_{attack_id}', 0)
if getattr(self, f'frames_{attack_id}') < self.FRAME_THR:
setattr(self, f'frames_{attack_id}', getattr(self, f'frames_{attack_id}') + 1)
break
fit = self.CheckFit(dets, id_feature, [attack_id], [attack_ind])
ious = bbox_ious(np.ascontiguousarray(dets[:, :4], dtype=np.float64),
np.ascontiguousarray(dets[:, :4], dtype=np.float64))
ious[range(len(dets)), range(len(dets))] = 0
dis = bbox_dis(np.ascontiguousarray(dets[:, :4], dtype=np.float64),
np.ascontiguousarray(dets[:, :4], dtype=np.float64))
dis[range(len(dets)), range(len(dets))] = np.inf
target_ind = np.argmax(ious[attack_ind])
if ious[attack_ind][target_ind] >= self.attack_iou_thr:
if ious[attack_ind][target_ind] == 0:
target_ind = np.argmin(dis[attack_ind])
target_id = dets_ids[target_ind]
if fit:
noise, attack_iter, suc = self.attack_sg_feat(
im_blob,
img0,
id_features,
dets,
inds,
remain_inds,
last_info=self.ad_last_info,
outputs_ori=output,
attack_id=attack_id,
attack_ind=attack_ind,
target_id=target_id,
target_ind=target_ind
)
self.attack_iou_thr = 0
if suc:
suc = 1
print(
f'attack id: {attack_id}\tattack frame {self.frame_id_}: SUCCESS\tl2 distance: {(noise ** 2).sum().sqrt().item()}\titeration: {attack_iter}')
else:
suc = 2
print(
f'attack id: {attack_id}\tattack frame {self.frame_id_}: FAIL\tl2 distance: {(noise ** 2).sum().sqrt().item()}\titeration: {attack_iter}')
else:
suc = 3
if ious[attack_ind][target_ind] == 0:
self.temp_i += 1
if self.temp_i >= 10:
self.attack_iou_thr = self.ATTACK_IOU_THR
else:
self.temp_i = 0
else:
self.attack_iou_thr = self.ATTACK_IOU_THR
if fit:
suc = 2
if noise is not None:
l2_dis = (noise ** 2).sum().sqrt().item()
adImg = torch.clip(im_blob + noise, min=0, max=1)
noise = self.recoverNoise(noise, img0)
noise = (noise - np.min(noise)) / (np.max(noise) - np.min(noise))
noise = (noise * 255).astype(np.uint8)
else:
l2_dis = None
adImg = im_blob
output_stracks_att = self.update(adImg, img0, track_id=self_track_id_att)
adImg = self.recoverNoise(adImg.detach(), img0)
return output_stracks_ori, output_stracks_att, adImg, noise, l2_dis, suc
def update_attack_sg_cl(self, im_blob, img0, **kwargs):
self.frame_id_ += 1
attack_id = kwargs['attack_id']
self_track_id_ori = kwargs.get('track_id', {}).get('origin', None)
self_track_id_att = kwargs.get('track_id', {}).get('attack', None)
activated_starcks = []
refind_stracks = []
lost_stracks = []
removed_stracks = []
width = img0.shape[1]
height = img0.shape[0]
inp_height = im_blob.shape[2]
inp_width = im_blob.shape[3]
c = np.array([width / 2., height / 2.], dtype=np.float32)
s = max(float(inp_width) / float(inp_height) * height, width) * 1.0
meta = {'c': c, 's': s,
'out_height': inp_height // self.opt.down_ratio,
'out_width': inp_width // self.opt.down_ratio}
''' Step 1: Network forward, get detections & embeddings'''
# with torch.no_grad():
im_blob.requires_grad = True
self.model.zero_grad()
output = self.model(im_blob)[-1]
hm = output['hm'].sigmoid()
wh = output['wh']
id_feature = output['id']
id_feature = F.normalize(id_feature, dim=1)
reg = output['reg'] if self.opt.reg_offset else None
dets_raw, inds = mot_decode(hm, wh, reg=reg, cat_spec_wh=self.opt.cat_spec_wh, K=self.opt.K)
id_features = []
for i in range(3):
for j in range(3):
id_feature_exp = _tranpose_and_gather_feat_expand(id_feature, inds, bias=(i - 1, j - 1)).squeeze(0)
id_features.append(id_feature_exp)
id_feature = _tranpose_and_gather_feat_expand(id_feature, inds)
id_feature = id_feature.squeeze(0)
dets = self.post_process(dets_raw.clone(), meta)
dets = self.merge_outputs([dets])[1]
remain_inds = dets[:, 4] > self.opt.conf_thres
dets = dets[remain_inds]
id_feature = id_feature[remain_inds]
for i in range(len(id_features)):
id_features[i] = id_features[i][remain_inds]
id_feature = id_feature.detach().cpu().numpy()
last_id_features = [None for _ in range(len(dets))]
last_ad_id_features = [None for _ in range(len(dets))]
dets_index = [i for i in range(len(dets))]
dets_ids = [None for _ in range(len(dets))]
tracks_ad = []
# import pdb; pdb.set_trace()
# vis
'''
for i in range(0, dets.shape[0]):
bbox = dets[i][0:4]
cv2.rectangle(img0, (bbox[0], bbox[1]),
(bbox[2], bbox[3]),
(0, 255, 0), 2)
cv2.imshow('dets', img0)
cv2.waitKey(0)
id0 = id0-1
'''
if len(dets) > 0:
'''Detections'''
detections = [STrack(STrack.tlbr_to_tlwh(tlbrs[:4]), tlbrs[4], f, 30) for
(tlbrs, f) in zip(dets[:, :5], id_feature)]
else:
detections = []
''' Add newly detected tracklets to tracked_stracks'''
unconfirmed = []
tracked_stracks = [] # type: list[STrack]
for track in self.tracked_stracks_:
if not track.is_activated:
unconfirmed.append(track)
else:
tracked_stracks.append(track)
''' Step 2: First association, with embedding'''
strack_pool = joint_stracks(tracked_stracks, self.lost_stracks_)
STrack.multi_predict(strack_pool)
dists = matching.embedding_distance(strack_pool, detections)
# dists = matching.gate_cost_matrix(self.kalman_filter, dists, strack_pool, detections)
dists = matching.fuse_motion(self.kalman_filter_, dists, strack_pool, detections)
matches, u_track, u_detection = matching.linear_assignment(dists, thresh=0.7)
# import pdb; pdb.set_trace()
for itracked, idet in matches:
track = strack_pool[itracked]
det = detections[idet]
assert last_id_features[dets_index[idet]] is None
assert last_ad_id_features[dets_index[idet]] is None
last_id_features[dets_index[idet]] = track.smooth_feat
last_ad_id_features[dets_index[idet]] = track.smooth_feat_ad
tracks_ad.append((track, dets_index[idet]))
if track.state == TrackState.Tracked:
track.update(detections[idet], self.frame_id_)
activated_starcks.append(track)
else:
track.re_activate_(det, self.frame_id_, new_id=False)
refind_stracks.append(track)
dets_ids[dets_index[idet]] = track.track_id
''' Step 3: Second association, with IOU'''
dets_index = [dets_index[i] for i in u_detection]
detections = [detections[i] for i in u_detection]
r_tracked_stracks = [strack_pool[i] for i in u_track if strack_pool[i].state == TrackState.Tracked]
dists = matching.iou_distance(r_tracked_stracks, detections)
matches, u_track, u_detection = matching.linear_assignment(dists, thresh=0.5)
for itracked, idet in matches:
track = r_tracked_stracks[itracked]
det = detections[idet]
assert last_id_features[dets_index[idet]] is None
assert last_ad_id_features[dets_index[idet]] is None
last_id_features[dets_index[idet]] = track.smooth_feat
last_ad_id_features[dets_index[idet]] = track.smooth_feat_ad
tracks_ad.append((track, dets_index[idet]))
if track.state == TrackState.Tracked:
track.update(det, self.frame_id_)
activated_starcks.append(track)
else:
track.re_activate_(det, self.frame_id_, new_id=False)
refind_stracks.append(track)
dets_ids[dets_index[idet]] = track.track_id
for it in u_track:
track = r_tracked_stracks[it]
if not track.state == TrackState.Lost:
track.mark_lost()
lost_stracks.append(track)
'''Deal with unconfirmed tracks, usually tracks with only one beginning frame'''
dets_index = [dets_index[i] for i in u_detection]
detections = [detections[i] for i in u_detection]
dists = matching.iou_distance(unconfirmed, detections)
matches, u_unconfirmed, u_detection = matching.linear_assignment(dists, thresh=0.7)
for itracked, idet in matches:
assert last_id_features[dets_index[idet]] is None
assert last_ad_id_features[dets_index[idet]] is None
last_id_features[dets_index[idet]] = unconfirmed[itracked].smooth_feat
last_ad_id_features[dets_index[idet]] = unconfirmed[itracked].smooth_feat_ad
tracks_ad.append((unconfirmed[itracked], dets_index[idet]))
unconfirmed[itracked].update(detections[idet], self.frame_id_)
activated_starcks.append(unconfirmed[itracked])
dets_ids[dets_index[idet]] = unconfirmed[itracked].track_id
for it in u_unconfirmed:
track = unconfirmed[it]
track.mark_removed()
removed_stracks.append(track)
""" Step 4: Init new stracks"""
for inew in u_detection:
track = detections[inew]
if track.score < self.det_thresh:
continue
track.activate_(self.kalman_filter_, self.frame_id_, track_id=self_track_id_ori)
activated_starcks.append(track)
dets_ids[dets_index[inew]] = track.track_id
""" Step 5: Update state"""
for track in self.lost_stracks_:
if self.frame_id_ - track.end_frame > self.max_time_lost:
track.mark_removed()
removed_stracks.append(track)
# print('Ramained match {} s'.format(t4-t3))
self.tracked_stracks_ = [t for t in self.tracked_stracks_ if t.state == TrackState.Tracked]
self.tracked_stracks_ = joint_stracks(self.tracked_stracks_, activated_starcks)
self.tracked_stracks_ = joint_stracks(self.tracked_stracks_, refind_stracks)
self.lost_stracks_ = sub_stracks(self.lost_stracks_, self.tracked_stracks_)
self.lost_stracks_.extend(lost_stracks)
self.lost_stracks_ = sub_stracks(self.lost_stracks_, self.removed_stracks_)
self.removed_stracks_.extend(removed_stracks)
self.tracked_stracks_, self.lost_stracks_ = remove_duplicate_stracks(self.tracked_stracks_, self.lost_stracks_)
# get scores of lost tracks
output_stracks_ori = [track for track in self.tracked_stracks_ if track.is_activated]
logger.debug('===========Frame {}=========='.format(self.frame_id_))
logger.debug('Activated: {}'.format([track.track_id for track in activated_starcks]))
logger.debug('Refind: {}'.format([track.track_id for track in refind_stracks]))
logger.debug('Lost: {}'.format([track.track_id for track in lost_stracks]))
logger.debug('Removed: {}'.format([track.track_id for track in removed_stracks]))
noise = None
suc = 0
for attack_ind, track_id in enumerate(dets_ids):
if track_id == attack_id:
if self.opt.attack_id > 0:
if not hasattr(self, f'frames_{attack_id}'):
setattr(self, f'frames_{attack_id}', 0)
if getattr(self, f'frames_{attack_id}') < self.FRAME_THR:
setattr(self, f'frames_{attack_id}', getattr(self, f'frames_{attack_id}') + 1)
break
fit = self.CheckFit(dets, id_feature, [attack_id], [attack_ind])
ious = bbox_ious(np.ascontiguousarray(dets[:, :4], dtype=np.float64),
np.ascontiguousarray(dets[:, :4], dtype=np.float64))
ious[range(len(dets)), range(len(dets))] = 0
dis = bbox_dis(np.ascontiguousarray(dets[:, :4], dtype=np.float64),
np.ascontiguousarray(dets[:, :4], dtype=np.float64))
dis[range(len(dets)), range(len(dets))] = np.inf
target_ind = np.argmax(ious[attack_ind])
if ious[attack_ind][target_ind] >= self.attack_iou_thr:
if ious[attack_ind][target_ind] == 0:
target_ind = np.argmin(dis[attack_ind])
target_id = dets_ids[target_ind]
if fit:
noise, attack_iter, suc = self.attack_sg_cl(
im_blob,
img0,
id_features,
dets,
inds,
remain_inds,
last_info=self.ad_last_info,
outputs_ori=output,
attack_id=attack_id,
attack_ind=attack_ind,
target_id=target_id,
target_ind=target_ind
)
self.attack_iou_thr = 0
if suc:
suc = 1
print(
f'attack id: {attack_id}\tattack frame {self.frame_id_}: SUCCESS\tl2 distance: {(noise ** 2).sum().sqrt().item()}\titeration: {attack_iter}')
else:
suc = 2
print(
f'attack id: {attack_id}\tattack frame {self.frame_id_}: FAIL\tl2 distance: {(noise ** 2).sum().sqrt().item() if noise is not None else None}\titeration: {attack_iter}')
else:
suc = 3
if ious[attack_ind][target_ind] == 0:
self.temp_i += 1
if self.temp_i >= 10:
self.attack_iou_thr = self.ATTACK_IOU_THR
else:
self.temp_i = 0
else:
self.attack_iou_thr = self.ATTACK_IOU_THR
if fit:
suc = 2
if noise is not None:
l2_dis = (noise ** 2).sum().sqrt().item()
adImg = torch.clip(im_blob + noise, min=0, max=1)
noise = self.recoverNoise(noise, img0)
# adImg = np.clip(img0 + noise, a_min=0, a_max=255)
# noise = adImg - img0
noise = (noise - np.min(noise)) / (np.max(noise) - np.min(noise))
noise = (noise * 255).astype(np.uint8)
else:
l2_dis = None
adImg = im_blob
output_stracks_att = self.update(adImg, img0, track_id=self_track_id_att)
adImg = self.recoverNoise(adImg.detach(), img0)
return output_stracks_ori, output_stracks_att, adImg, noise, l2_dis, suc
def update_attack_sg_det(self, im_blob, img0, **kwargs):
self.frame_id_ += 1
attack_id = kwargs['attack_id']
self_track_id_ori = kwargs.get('track_id', {}).get('origin', None)
self_track_id_att = kwargs.get('track_id', {}).get('attack', None)
activated_starcks = []
refind_stracks = []
lost_stracks = []
removed_stracks = []
width = img0.shape[1]
height = img0.shape[0]
inp_height = im_blob.shape[2]
inp_width = im_blob.shape[3]
c = np.array([width / 2., height / 2.], dtype=np.float32)
s = max(float(inp_width) / float(inp_height) * height, width) * 1.0
meta = {'c': c, 's': s,
'out_height': inp_height // self.opt.down_ratio,
'out_width': inp_width // self.opt.down_ratio}
''' Step 1: Network forward, get detections & embeddings'''
# with torch.no_grad():
im_blob.requires_grad = True
self.model.zero_grad()
output = self.model(im_blob)[-1]
hm = output['hm'].sigmoid()
wh = output['wh']
id_feature = output['id']
id_feature = F.normalize(id_feature, dim=1)
reg = output['reg'] if self.opt.reg_offset else None
dets_raw, inds = mot_decode(hm, wh, reg=reg, cat_spec_wh=self.opt.cat_spec_wh, K=self.opt.K)
id_features = []
for i in range(3):
for j in range(3):
id_feature_exp = _tranpose_and_gather_feat_expand(id_feature, inds, bias=(i - 1, j - 1)).squeeze(0)
id_features.append(id_feature_exp)
id_feature = _tranpose_and_gather_feat_expand(id_feature, inds)
id_feature = id_feature.squeeze(0)
dets = self.post_process(dets_raw.clone(), meta)
dets = self.merge_outputs([dets])[1]
remain_inds = dets[:, 4] > self.opt.conf_thres
dets = dets[remain_inds]
id_feature = id_feature[remain_inds]
for i in range(len(id_features)):
id_features[i] = id_features[i][remain_inds]
id_feature = id_feature.detach().cpu().numpy()
last_id_features = [None for _ in range(len(dets))]
last_ad_id_features = [None for _ in range(len(dets))]
dets_index = [i for i in range(len(dets))]
dets_ids = [None for _ in range(len(dets))]
tracks_ad = []
# import pdb; pdb.set_trace()
# vis
'''
for i in range(0, dets.shape[0]):
bbox = dets[i][0:4]
cv2.rectangle(img0, (bbox[0], bbox[1]),
(bbox[2], bbox[3]),
(0, 255, 0), 2)
cv2.imshow('dets', img0)
cv2.waitKey(0)
id0 = id0-1
'''
if len(dets) > 0:
'''Detections'''
detections = [STrack(STrack.tlbr_to_tlwh(tlbrs[:4]), tlbrs[4], f, 30) for
(tlbrs, f) in zip(dets[:, :5], id_feature)]
else:
detections = []
''' Add newly detected tracklets to tracked_stracks'''
unconfirmed = []
tracked_stracks = [] # type: list[STrack]
for track in self.tracked_stracks_:
if not track.is_activated:
unconfirmed.append(track)
else:
tracked_stracks.append(track)
''' Step 2: First association, with embedding'''
strack_pool = joint_stracks(tracked_stracks, self.lost_stracks_)
STrack.multi_predict(strack_pool)
dists = matching.embedding_distance(strack_pool, detections)
# dists = matching.gate_cost_matrix(self.kalman_filter, dists, strack_pool, detections)
dists = matching.fuse_motion(self.kalman_filter_, dists, strack_pool, detections)
matches, u_track, u_detection = matching.linear_assignment(dists, thresh=0.7)
# import pdb; pdb.set_trace()
for itracked, idet in matches:
track = strack_pool[itracked]
det = detections[idet]
assert last_id_features[dets_index[idet]] is None
assert last_ad_id_features[dets_index[idet]] is None
last_id_features[dets_index[idet]] = track.smooth_feat
last_ad_id_features[dets_index[idet]] = track.smooth_feat_ad
tracks_ad.append((track, dets_index[idet]))
if track.state == TrackState.Tracked:
track.update(detections[idet], self.frame_id_)
activated_starcks.append(track)
else:
track.re_activate_(det, self.frame_id_, new_id=False)
refind_stracks.append(track)
dets_ids[dets_index[idet]] = track.track_id
''' Step 3: Second association, with IOU'''
dets_index = [dets_index[i] for i in u_detection]
detections = [detections[i] for i in u_detection]
r_tracked_stracks = [strack_pool[i] for i in u_track if strack_pool[i].state == TrackState.Tracked]
dists = matching.iou_distance(r_tracked_stracks, detections)
matches, u_track, u_detection = matching.linear_assignment(dists, thresh=0.5)
for itracked, idet in matches:
track = r_tracked_stracks[itracked]
det = detections[idet]
assert last_id_features[dets_index[idet]] is None
assert last_ad_id_features[dets_index[idet]] is None
last_id_features[dets_index[idet]] = track.smooth_feat
last_ad_id_features[dets_index[idet]] = track.smooth_feat_ad
tracks_ad.append((track, dets_index[idet]))
if track.state == TrackState.Tracked:
track.update(det, self.frame_id_)
activated_starcks.append(track)
else:
track.re_activate_(det, self.frame_id_, new_id=False)
refind_stracks.append(track)
dets_ids[dets_index[idet]] = track.track_id
for it in u_track:
track = r_tracked_stracks[it]
if not track.state == TrackState.Lost:
track.mark_lost()
lost_stracks.append(track)
'''Deal with unconfirmed tracks, usually tracks with only one beginning frame'''
dets_index = [dets_index[i] for i in u_detection]
detections = [detections[i] for i in u_detection]
dists = matching.iou_distance(unconfirmed, detections)
matches, u_unconfirmed, u_detection = matching.linear_assignment(dists, thresh=0.7)
for itracked, idet in matches:
assert last_id_features[dets_index[idet]] is None
assert last_ad_id_features[dets_index[idet]] is None
last_id_features[dets_index[idet]] = unconfirmed[itracked].smooth_feat
last_ad_id_features[dets_index[idet]] = unconfirmed[itracked].smooth_feat_ad
tracks_ad.append((unconfirmed[itracked], dets_index[idet]))
unconfirmed[itracked].update(detections[idet], self.frame_id_)
activated_starcks.append(unconfirmed[itracked])
dets_ids[dets_index[idet]] = unconfirmed[itracked].track_id
for it in u_unconfirmed:
track = unconfirmed[it]
track.mark_removed()
removed_stracks.append(track)
""" Step 4: Init new stracks"""
for inew in u_detection:
track = detections[inew]
if track.score < self.det_thresh:
continue
track.activate_(self.kalman_filter_, self.frame_id_, track_id=self_track_id_ori)
activated_starcks.append(track)
dets_ids[dets_index[inew]] = track.track_id
""" Step 5: Update state"""
for track in self.lost_stracks_:
if self.frame_id_ - track.end_frame > self.max_time_lost:
track.mark_removed()
removed_stracks.append(track)
# print('Ramained match {} s'.format(t4-t3))
self.tracked_stracks_ = [t for t in self.tracked_stracks_ if t.state == TrackState.Tracked]
self.tracked_stracks_ = joint_stracks(self.tracked_stracks_, activated_starcks)
self.tracked_stracks_ = joint_stracks(self.tracked_stracks_, refind_stracks)
self.lost_stracks_ = sub_stracks(self.lost_stracks_, self.tracked_stracks_)
self.lost_stracks_.extend(lost_stracks)
self.lost_stracks_ = sub_stracks(self.lost_stracks_, self.removed_stracks_)
self.removed_stracks_.extend(removed_stracks)
self.tracked_stracks_, self.lost_stracks_ = remove_duplicate_stracks(self.tracked_stracks_, self.lost_stracks_)
# get scores of lost tracks
output_stracks_ori = [track for track in self.tracked_stracks_ if track.is_activated]
logger.debug('===========Frame {}=========='.format(self.frame_id_))
logger.debug('Activated: {}'.format([track.track_id for track in activated_starcks]))
logger.debug('Refind: {}'.format([track.track_id for track in refind_stracks]))
logger.debug('Lost: {}'.format([track.track_id for track in lost_stracks]))
logger.debug('Removed: {}'.format([track.track_id for track in removed_stracks]))
noise = None
suc = 0
for attack_ind, track_id in enumerate(dets_ids):
if track_id == attack_id:
if self.opt.attack_id > 0:
if not hasattr(self, f'frames_{attack_id}'):
setattr(self, f'frames_{attack_id}', 0)
if getattr(self, f'frames_{attack_id}') < self.FRAME_THR:
setattr(self, f'frames_{attack_id}', getattr(self, f'frames_{attack_id}') + 1)
break
ious = bbox_ious(np.ascontiguousarray(dets[:, :4], dtype=np.float64),
np.ascontiguousarray(dets[:, :4], dtype=np.float64))
ious = self.processIoUs(ious)
ious = ious + ious.T
target_ind = np.argmax(ious[attack_ind])
if ious[attack_ind][target_ind] >= self.attack_iou_thr:
fit = self.CheckFit(dets, id_feature, [attack_id], [attack_ind])
if fit:
noise, attack_iter, suc = self.attack_sg_det(
im_blob,
img0,
dets,
inds,
remain_inds,
last_info=self.ad_last_info,
outputs_ori=output,
attack_id=attack_id,
attack_ind=attack_ind
)
self.attack_iou_thr = 0
if suc:
suc = 1
print(
f'attack id: {attack_id}\tattack frame {self.frame_id_}: SUCCESS\tl2 distance: {(noise ** 2).sum().sqrt().item()}\titeration: {attack_iter}')
else:
suc = 2
print(
f'attack id: {attack_id}\tattack frame {self.frame_id_}: FAIL\tl2 distance: {(noise ** 2).sum().sqrt().item()}\titeration: {attack_iter}')
else:
suc = 3
if ious[attack_ind][target_ind] == 0:
self.temp_i += 1
if self.temp_i >= 10:
self.attack_iou_thr = self.ATTACK_IOU_THR
else:
self.temp_i = 0
else:
self.attack_iou_thr = self.ATTACK_IOU_THR
break
if noise is not None:
l2_dis = (noise ** 2).sum().sqrt().item()
adImg = torch.clip(im_blob + noise, min=0, max=1)
noise = self.recoverNoise(noise, img0)
noise = (noise - np.min(noise)) / (np.max(noise) - np.min(noise))
noise = (noise * 255).astype(np.uint8)
else:
l2_dis = None
adImg = im_blob
output_stracks_att = self.update(adImg, img0, track_id=self_track_id_att)
adImg = self.recoverNoise(adImg.detach(), img0)
return output_stracks_ori, output_stracks_att, adImg, noise, l2_dis, suc
def update_attack_sg_hj(self, im_blob, img0, **kwargs):
self.frame_id_ += 1
attack_id = kwargs['attack_id']
self_track_id_ori = kwargs.get('track_id', {}).get('origin', None)
self_track_id_att = kwargs.get('track_id', {}).get('attack', None)
activated_starcks = []
refind_stracks = []
lost_stracks = []
removed_stracks = []
width = img0.shape[1]
height = img0.shape[0]
inp_height = im_blob.shape[2]
inp_width = im_blob.shape[3]
c = np.array([width / 2., height / 2.], dtype=np.float32)
s = max(float(inp_width) / float(inp_height) * height, width) * 1.0
meta = {'c': c, 's': s,
'out_height': inp_height // self.opt.down_ratio,
'out_width': inp_width // self.opt.down_ratio}
''' Step 1: Network forward, get detections & embeddings'''
# with torch.no_grad():
im_blob.requires_grad = True
self.model.zero_grad()
output = self.model(im_blob)[-1]
hm = output['hm'].sigmoid()
wh = output['wh']
id_feature = output['id']
id_feature = F.normalize(id_feature, dim=1)
reg = output['reg'] if self.opt.reg_offset else None
dets_raw, inds = mot_decode(hm, wh, reg=reg, cat_spec_wh=self.opt.cat_spec_wh, K=self.opt.K)
id_features = []
for i in range(3):
for j in range(3):
id_feature_exp = _tranpose_and_gather_feat_expand(id_feature, inds, bias=(i - 1, j - 1)).squeeze(0)
id_features.append(id_feature_exp)
id_feature = _tranpose_and_gather_feat_expand(id_feature, inds)
id_feature = id_feature.squeeze(0)
dets = self.post_process(dets_raw.clone(), meta)
dets = self.merge_outputs([dets])[1]
remain_inds = dets[:, 4] > self.opt.conf_thres
dets = dets[remain_inds]
id_feature = id_feature[remain_inds]
for i in range(len(id_features)):
id_features[i] = id_features[i][remain_inds]
id_feature = id_feature.detach().cpu().numpy()
last_id_features = [None for _ in range(len(dets))]
last_ad_id_features = [None for _ in range(len(dets))]
dets_index = [i for i in range(len(dets))]
dets_ids = [None for _ in range(len(dets))]
tracks_ad = []
# import pdb; pdb.set_trace()
# vis
'''
for i in range(0, dets.shape[0]):
bbox = dets[i][0:4]
cv2.rectangle(img0, (bbox[0], bbox[1]),
(bbox[2], bbox[3]),
(0, 255, 0), 2)
cv2.imshow('dets', img0)
cv2.waitKey(0)
id0 = id0-1
'''
if len(dets) > 0:
'''Detections'''
detections = [STrack(STrack.tlbr_to_tlwh(tlbrs[:4]), tlbrs[4], f, 30) for
(tlbrs, f) in zip(dets[:, :5], id_feature)]
else:
detections = []
''' Add newly detected tracklets to tracked_stracks'''
unconfirmed = []
tracked_stracks = [] # type: list[STrack]
for track in self.tracked_stracks_:
if not track.is_activated:
unconfirmed.append(track)
else:
tracked_stracks.append(track)
''' Step 2: First association, with embedding'''
strack_pool = joint_stracks(tracked_stracks, self.lost_stracks_)
STrack.multi_predict(strack_pool)
dists = matching.embedding_distance(strack_pool, detections)
# dists = matching.gate_cost_matrix(self.kalman_filter, dists, strack_pool, detections)
dists = matching.fuse_motion(self.kalman_filter_, dists, strack_pool, detections)
matches, u_track, u_detection = matching.linear_assignment(dists, thresh=0.7)
# import pdb; pdb.set_trace()
for itracked, idet in matches:
track = strack_pool[itracked]
det = detections[idet]
assert last_id_features[dets_index[idet]] is None
assert last_ad_id_features[dets_index[idet]] is None
last_id_features[dets_index[idet]] = track.smooth_feat
last_ad_id_features[dets_index[idet]] = track.smooth_feat_ad
tracks_ad.append((track, dets_index[idet]))
if track.state == TrackState.Tracked:
track.update(detections[idet], self.frame_id_)
activated_starcks.append(track)
else:
track.re_activate_(det, self.frame_id_, new_id=False)
refind_stracks.append(track)
dets_ids[dets_index[idet]] = track.track_id
''' Step 3: Second association, with IOU'''
dets_index = [dets_index[i] for i in u_detection]
detections = [detections[i] for i in u_detection]
r_tracked_stracks = [strack_pool[i] for i in u_track if strack_pool[i].state == TrackState.Tracked]
dists = matching.iou_distance(r_tracked_stracks, detections)
matches, u_track, u_detection = matching.linear_assignment(dists, thresh=0.5)
for itracked, idet in matches:
track = r_tracked_stracks[itracked]
det = detections[idet]
assert last_id_features[dets_index[idet]] is None
assert last_ad_id_features[dets_index[idet]] is None
last_id_features[dets_index[idet]] = track.smooth_feat
last_ad_id_features[dets_index[idet]] = track.smooth_feat_ad
tracks_ad.append((track, dets_index[idet]))
if track.state == TrackState.Tracked:
track.update(det, self.frame_id_)
activated_starcks.append(track)
else:
track.re_activate_(det, self.frame_id_, new_id=False)
refind_stracks.append(track)
dets_ids[dets_index[idet]] = track.track_id
for it in u_track:
track = r_tracked_stracks[it]
if not track.state == TrackState.Lost:
track.mark_lost()
lost_stracks.append(track)
'''Deal with unconfirmed tracks, usually tracks with only one beginning frame'''
dets_index = [dets_index[i] for i in u_detection]
detections = [detections[i] for i in u_detection]
dists = matching.iou_distance(unconfirmed, detections)
matches, u_unconfirmed, u_detection = matching.linear_assignment(dists, thresh=0.7)
for itracked, idet in matches:
assert last_id_features[dets_index[idet]] is None
assert last_ad_id_features[dets_index[idet]] is None
last_id_features[dets_index[idet]] = unconfirmed[itracked].smooth_feat
last_ad_id_features[dets_index[idet]] = unconfirmed[itracked].smooth_feat_ad
tracks_ad.append((unconfirmed[itracked], dets_index[idet]))
unconfirmed[itracked].update(detections[idet], self.frame_id_)
activated_starcks.append(unconfirmed[itracked])
dets_ids[dets_index[idet]] = unconfirmed[itracked].track_id
for it in u_unconfirmed:
track = unconfirmed[it]
track.mark_removed()
removed_stracks.append(track)
""" Step 4: Init new stracks"""
for inew in u_detection:
track = detections[inew]
if track.score < self.det_thresh:
continue
track.activate_(self.kalman_filter_, self.frame_id_, track_id=self_track_id_ori)
activated_starcks.append(track)
dets_ids[dets_index[inew]] = track.track_id
""" Step 5: Update state"""
for track in self.lost_stracks_:
if self.frame_id_ - track.end_frame > self.max_time_lost:
track.mark_removed()
removed_stracks.append(track)
# print('Ramained match {} s'.format(t4-t3))
self.tracked_stracks_ = [t for t in self.tracked_stracks_ if t.state == TrackState.Tracked]
self.tracked_stracks_ = joint_stracks(self.tracked_stracks_, activated_starcks)
self.tracked_stracks_ = joint_stracks(self.tracked_stracks_, refind_stracks)
self.lost_stracks_ = sub_stracks(self.lost_stracks_, self.tracked_stracks_)
self.lost_stracks_.extend(lost_stracks)
self.lost_stracks_ = sub_stracks(self.lost_stracks_, self.removed_stracks_)
self.removed_stracks_.extend(removed_stracks)
self.tracked_stracks_, self.lost_stracks_ = remove_duplicate_stracks(self.tracked_stracks_, self.lost_stracks_)
# get scores of lost tracks
output_stracks_ori = [track for track in self.tracked_stracks_ if track.is_activated]
logger.debug('===========Frame {}=========='.format(self.frame_id_))
logger.debug('Activated: {}'.format([track.track_id for track in activated_starcks]))
logger.debug('Refind: {}'.format([track.track_id for track in refind_stracks]))
logger.debug('Lost: {}'.format([track.track_id for track in lost_stracks]))
logger.debug('Removed: {}'.format([track.track_id for track in removed_stracks]))
noise = None
suc = 0
att_tracker = None
if self.ad_bbox:
for t in output_stracks_ori:
if t.track_id == attack_id:
att_tracker = t
for attack_ind, track_id in enumerate(dets_ids):
if track_id == attack_id:
if self.opt.attack_id > 0:
if not hasattr(self, f'frames_{attack_id}'):
setattr(self, f'frames_{attack_id}', 0)
if getattr(self, f'frames_{attack_id}') < self.FRAME_THR:
setattr(self, f'frames_{attack_id}', getattr(self, f'frames_{attack_id}') + 1)
break
ious = bbox_ious(np.ascontiguousarray(dets[:, :4], dtype=np.float64),
np.ascontiguousarray(dets[:, :4], dtype=np.float64))
ious = self.processIoUs(ious)
ious = ious + ious.T
target_ind = np.argmax(ious[attack_ind])
if ious[attack_ind][target_ind] >= self.attack_iou_thr:
fit = self.CheckFit(dets, id_feature, [attack_id], [attack_ind])
if fit:
noise, attack_iter, suc = self.attack_sg_hj(
im_blob,
img0,
dets,
inds,
remain_inds,
last_info=self.ad_last_info,
outputs_ori=output,
attack_id=attack_id,
attack_ind=attack_ind,
ad_bbox=self.ad_bbox,
track_v=att_tracker.get_v() if att_tracker is not None else None
)
self.attack_iou_thr = 0
if suc:
suc = 1
print(
f'attack id: {attack_id}\tattack frame {self.frame_id_}: SUCCESS\tl2 distance: {(noise ** 2).sum().sqrt().item()}\titeration: {attack_iter}')
else:
suc = 2
print(
f'attack id: {attack_id}\tattack frame {self.frame_id_}: FAIL\tl2 distance: {(noise ** 2).sum().sqrt().item()}\titeration: {attack_iter}')
else:
suc = 3
if ious[attack_ind][target_ind] == 0:
self.temp_i += 1
if self.temp_i >= 10:
self.attack_iou_thr = self.ATTACK_IOU_THR
else:
self.temp_i = 0
else:
self.attack_iou_thr = self.ATTACK_IOU_THR
break
if noise is not None:
self.ad_bbox = False
l2_dis = (noise ** 2).sum().sqrt().item()
adImg = torch.clip(im_blob + noise, min=0, max=1)
noise = self.recoverNoise(noise, img0)
noise = (noise - np.min(noise)) / (np.max(noise) - np.min(noise))
noise = (noise * 255).astype(np.uint8)
else:
l2_dis = None
adImg = im_blob
output_stracks_att = self.update(adImg, img0, track_id=self_track_id_att)
adImg = self.recoverNoise(adImg.detach(), img0)
return output_stracks_ori, output_stracks_att, adImg, noise, l2_dis, suc
def update_attack_mt_det(self, im_blob, img0, **kwargs):
self.frame_id_ += 1
activated_starcks = []
refind_stracks = []
lost_stracks = []
removed_stracks = []
width = img0.shape[1]
height = img0.shape[0]
inp_height = im_blob.shape[2]
inp_width = im_blob.shape[3]
c = np.array([width / 2., height / 2.], dtype=np.float32)
s = max(float(inp_width) / float(inp_height) * height, width) * 1.0
meta = {'c': c, 's': s,
'out_height': inp_height // self.opt.down_ratio,
'out_width': inp_width // self.opt.down_ratio}
''' Step 1: Network forward, get detections & embeddings'''
# with torch.no_grad():
im_blob.requires_grad = True
self.model.zero_grad()
output = self.model(im_blob)[-1]
hm = output['hm'].sigmoid()
wh = output['wh']
id_feature = output['id']
id_feature = F.normalize(id_feature, dim=1)
reg = output['reg'] if self.opt.reg_offset else None
dets_raw, inds = mot_decode(hm, wh, reg=reg, cat_spec_wh=self.opt.cat_spec_wh, K=self.opt.K)
id_features = []
for i in range(3):
for j in range(3):
id_feature_exp = _tranpose_and_gather_feat_expand(id_feature, inds, bias=(i - 1, j - 1)).squeeze(0)
id_features.append(id_feature_exp)
id_feature = _tranpose_and_gather_feat_expand(id_feature, inds)
id_feature = id_feature.squeeze(0)
dets = self.post_process(dets_raw.clone(), meta)
dets = self.merge_outputs([dets])[1]
remain_inds = dets[:, 4] > self.opt.conf_thres
dets = dets[remain_inds]
id_feature = id_feature[remain_inds]
for i in range(len(id_features)):
id_features[i] = id_features[i][remain_inds]
id_feature = id_feature.detach().cpu().numpy()
last_id_features = [None for _ in range(len(dets))]
last_ad_id_features = [None for _ in range(len(dets))]
dets_index = [i for i in range(len(dets))]
dets_ids = [None for _ in range(len(dets))]
tracks_ad = []
# import pdb; pdb.set_trace()
# vis
'''
for i in range(0, dets.shape[0]):
bbox = dets[i][0:4]
cv2.rectangle(img0, (bbox[0], bbox[1]),
(bbox[2], bbox[3]),
(0, 255, 0), 2)
cv2.imshow('dets', img0)
cv2.waitKey(0)
id0 = id0-1
'''
if len(dets) > 0:
'''Detections'''
detections = [STrack(STrack.tlbr_to_tlwh(tlbrs[:4]), tlbrs[4], f, 30) for
(tlbrs, f) in zip(dets[:, :5], id_feature)]
else:
detections = []
''' Add newly detected tracklets to tracked_stracks'''
unconfirmed = []
tracked_stracks = [] # type: list[STrack]
for track in self.tracked_stracks_:
if not track.is_activated:
unconfirmed.append(track)
else:
tracked_stracks.append(track)
''' Step 2: First association, with embedding'''
strack_pool = joint_stracks(tracked_stracks, self.lost_stracks_)
STrack.multi_predict(strack_pool)
dists = matching.embedding_distance(strack_pool, detections)
dists = matching.fuse_motion(self.kalman_filter_, dists, strack_pool, detections)
matches, u_track, u_detection = matching.linear_assignment(dists, thresh=0.7)
# import pdb; pdb.set_trace()
for itracked, idet in matches:
track = strack_pool[itracked]
det = detections[idet]
assert last_id_features[dets_index[idet]] is None
assert last_ad_id_features[dets_index[idet]] is None
last_id_features[dets_index[idet]] = track.smooth_feat
last_ad_id_features[dets_index[idet]] = track.smooth_feat_ad
tracks_ad.append((track, dets_index[idet]))
if track.state == TrackState.Tracked:
track.update(detections[idet], self.frame_id_)
activated_starcks.append(track)
else:
track.re_activate_(det, self.frame_id_, new_id=False)
refind_stracks.append(track)
dets_ids[dets_index[idet]] = track.track_id
''' Step 3: Second association, with IOU'''
dets_index = [dets_index[i] for i in u_detection]
detections = [detections[i] for i in u_detection]
r_tracked_stracks = [strack_pool[i] for i in u_track if strack_pool[i].state == TrackState.Tracked]
dists = matching.iou_distance(r_tracked_stracks, detections)
matches, u_track, u_detection = matching.linear_assignment(dists, thresh=0.5)
for itracked, idet in matches:
track = r_tracked_stracks[itracked]
det = detections[idet]
assert last_id_features[dets_index[idet]] is None
assert last_ad_id_features[dets_index[idet]] is None
last_id_features[dets_index[idet]] = track.smooth_feat
last_ad_id_features[dets_index[idet]] = track.smooth_feat_ad
tracks_ad.append((track, dets_index[idet]))
if track.state == TrackState.Tracked:
track.update(det, self.frame_id_)
activated_starcks.append(track)
else:
track.re_activate_(det, self.frame_id_, new_id=False)
refind_stracks.append(track)
dets_ids[dets_index[idet]] = track.track_id
for it in u_track:
track = r_tracked_stracks[it]
if not track.state == TrackState.Lost:
track.mark_lost()
lost_stracks.append(track)
'''Deal with unconfirmed tracks, usually tracks with only one beginning frame'''
dets_index = [dets_index[i] for i in u_detection]
detections = [detections[i] for i in u_detection]
dists = matching.iou_distance(unconfirmed, detections)
matches, u_unconfirmed, u_detection = matching.linear_assignment(dists, thresh=0.7)
for itracked, idet in matches:
assert last_id_features[dets_index[idet]] is None
assert last_ad_id_features[dets_index[idet]] is None
last_id_features[dets_index[idet]] = unconfirmed[itracked].smooth_feat
last_ad_id_features[dets_index[idet]] = unconfirmed[itracked].smooth_feat_ad
tracks_ad.append((unconfirmed[itracked], dets_index[idet]))
unconfirmed[itracked].update(detections[idet], self.frame_id_)
activated_starcks.append(unconfirmed[itracked])
dets_ids[dets_index[idet]] = unconfirmed[itracked].track_id
for it in u_unconfirmed:
track = unconfirmed[it]
track.mark_removed()
removed_stracks.append(track)
""" Step 4: Init new stracks"""
for inew in u_detection:
track = detections[inew]
if track.score < self.det_thresh:
continue
track.activate_(self.kalman_filter_, self.frame_id_)
activated_starcks.append(track)
dets_ids[dets_index[inew]] = track.track_id
""" Step 5: Update state"""
for track in self.lost_stracks_:
if self.frame_id_ - track.end_frame > self.max_time_lost:
track.mark_removed()
removed_stracks.append(track)
# print('Ramained match {} s'.format(t4-t3))
self.tracked_stracks_ = [t for t in self.tracked_stracks_ if t.state == TrackState.Tracked]
self.tracked_stracks_ = joint_stracks(self.tracked_stracks_, activated_starcks)
self.tracked_stracks_ = joint_stracks(self.tracked_stracks_, refind_stracks)
self.lost_stracks_ = sub_stracks(self.lost_stracks_, self.tracked_stracks_)
self.lost_stracks_.extend(lost_stracks)
self.lost_stracks_ = sub_stracks(self.lost_stracks_, self.removed_stracks_)
self.removed_stracks_.extend(removed_stracks)
self.tracked_stracks_, self.lost_stracks_ = remove_duplicate_stracks(self.tracked_stracks_, self.lost_stracks_)
# get scores of lost tracks
output_stracks_ori = [track for track in self.tracked_stracks_ if track.is_activated]
id_set = set([track.track_id for track in output_stracks_ori])
for i in range(len(dets_ids)):
if dets_ids[i] is not None and dets_ids[i] not in id_set:
dets_ids[i] = None
output_stracks_ori_ind = []
for ind, track in enumerate(output_stracks_ori):
if track.track_id not in self.multiple_ori_ids:
self.multiple_ori_ids[track.track_id] = 0
self.multiple_ori_ids[track.track_id] += 1
if self.multiple_ori_ids[track.track_id] <= self.FRAME_THR:
output_stracks_ori_ind.append(ind)
logger.debug('===========Frame {}=========='.format(self.frame_id_))
logger.debug('Activated: {}'.format([track.track_id for track in activated_starcks]))
logger.debug('Refind: {}'.format([track.track_id for track in refind_stracks]))
logger.debug('Lost: {}'.format([track.track_id for track in lost_stracks]))
logger.debug('Removed: {}'.format([track.track_id for track in removed_stracks]))
attack_ids = []
target_ids = []
attack_inds = []
target_inds = []
noise = None
if len(dets) > 0:
ious = bbox_ious(np.ascontiguousarray(dets[:, :4], dtype=np.float64),
np.ascontiguousarray(dets[:, :4], dtype=np.float64))
ious[range(len(dets)), range(len(dets))] = 0
ious_inds = np.argmax(ious, axis=1)
dis = bbox_dis(np.ascontiguousarray(dets[:, :4], dtype=np.float64),
np.ascontiguousarray(dets[:, :4], dtype=np.float64))
dis[range(len(dets)), range(len(dets))] = np.inf
dis_inds = np.argmin(dis, axis=1)
for attack_ind, track_id in enumerate(dets_ids):
if track_id is None or self.multiple_ori_ids[track_id] <= self.FRAME_THR \
or dets_ids[ious_inds[attack_ind]] not in self.multiple_ori2att \
or track_id not in self.multiple_ori2att:
continue
if ious[attack_ind, ious_inds[attack_ind]] > self.ATTACK_IOU_THR or (
track_id in self.low_iou_ids and ious[attack_ind, ious_inds[attack_ind]] > 0
):
attack_ids.append(track_id)
target_ids.append(dets_ids[ious_inds[attack_ind]])
attack_inds.append(attack_ind)
target_inds.append(ious_inds[attack_ind])
if hasattr(self, f'temp_i_{track_id}'):
self.__setattr__(f'temp_i_{track_id}', 0)
elif ious[attack_ind, ious_inds[attack_ind]] == 0 and track_id in self.low_iou_ids:
if hasattr(self, f'temp_i_{track_id}'):
self.__setattr__(f'temp_i_{track_id}', self.__getattribute__(f'temp_i_{track_id}') + 1)
else:
self.__setattr__(f'temp_i_{track_id}', 1)
if self.__getattribute__(f'temp_i_{track_id}') > 10:
self.low_iou_ids.remove(track_id)
elif dets_ids[dis_inds[attack_ind]] in self.multiple_ori2att:
attack_ids.append(track_id)
target_ids.append(dets_ids[dis_inds[attack_ind]])
attack_inds.append(attack_ind)
target_inds.append(dis_inds[attack_ind])
fit_index = self.CheckFit(dets, id_feature, attack_ids, attack_inds) if len(attack_ids) else []
if fit_index:
attack_ids = np.array(attack_ids)[fit_index]
target_ids = np.array(target_ids)[fit_index]
attack_inds = np.array(attack_inds)[fit_index]
target_inds = np.array(target_inds)[fit_index]
noise, attack_iter, suc = self.attack_mt_det(
im_blob,
img0,
dets,
inds,
remain_inds,
last_info=self.ad_last_info,
outputs_ori=output,
attack_ids=attack_ids,
attack_inds=attack_inds
)
self.low_iou_ids.update(set(attack_ids))
if suc:
self.attacked_ids.update(set(attack_ids))
print(
f'attack ids: {attack_ids}\tattack frame {self.frame_id_}: SUCCESS\tl2 distance: {(noise ** 2).sum().sqrt().item()}\titeration: {attack_iter}')
else:
print(f'attack ids: {attack_ids}\tattack frame {self.frame_id_}: FAIL\tl2 distance: {(noise ** 2).sum().sqrt().item() if noise is not None else None}\titeration: {attack_iter}')
if noise is not None:
l2_dis = (noise ** 2).sum().sqrt().item()
adImg = torch.clip(im_blob + noise, min=0, max=1)
noise = self.recoverNoise(noise, img0)
noise = (noise - np.min(noise)) / (np.max(noise) - np.min(noise))
noise = (noise * 255).astype(np.uint8)
else:
l2_dis = None
adImg = im_blob
output_stracks_att = self.update(adImg, img0)
adImg = self.recoverNoise(adImg.detach(), img0)
output_stracks_att_ind = []
for ind, track in enumerate(output_stracks_att):
if track.track_id not in self.multiple_att_ids:
self.multiple_att_ids[track.track_id] = 0
self.multiple_att_ids[track.track_id] += 1
if self.multiple_att_ids[track.track_id] <= self.FRAME_THR:
output_stracks_att_ind.append(ind)
if len(output_stracks_ori_ind) and len(output_stracks_att_ind):
ori_dets = [track.curr_tlbr for i, track in enumerate(output_stracks_ori) if i in output_stracks_ori_ind]
att_dets = [track.curr_tlbr for i, track in enumerate(output_stracks_att) if i in output_stracks_att_ind]
ori_dets = np.stack(ori_dets).astype(np.float64)
att_dets = np.stack(att_dets).astype(np.float64)
ious = bbox_ious(ori_dets, att_dets)
row_ind, col_ind = linear_sum_assignment(-ious)
for i in range(len(row_ind)):
if ious[row_ind[i], col_ind[i]] > 0.9:
ori_id = output_stracks_ori[output_stracks_ori_ind[row_ind[i]]].track_id
att_id = output_stracks_att[output_stracks_att_ind[col_ind[i]]].track_id
self.multiple_ori2att[ori_id] = att_id
return output_stracks_ori, output_stracks_att, adImg, noise, l2_dis
def update_attack_mt_hj(self, im_blob, img0, **kwargs):
self.frame_id_ += 1
activated_starcks = []
refind_stracks = []
lost_stracks = []
removed_stracks = []
width = img0.shape[1]
height = img0.shape[0]
inp_height = im_blob.shape[2]
inp_width = im_blob.shape[3]
c = np.array([width / 2., height / 2.], dtype=np.float32)
s = max(float(inp_width) / float(inp_height) * height, width) * 1.0
meta = {'c': c, 's': s,
'out_height': inp_height // self.opt.down_ratio,
'out_width': inp_width // self.opt.down_ratio}
''' Step 1: Network forward, get detections & embeddings'''
# with torch.no_grad():
im_blob.requires_grad = True
self.model.zero_grad()
output = self.model(im_blob)[-1]
hm = output['hm'].sigmoid()
wh = output['wh']
id_feature = output['id']
id_feature = F.normalize(id_feature, dim=1)
reg = output['reg'] if self.opt.reg_offset else None
dets_raw, inds = mot_decode(hm, wh, reg=reg, cat_spec_wh=self.opt.cat_spec_wh, K=self.opt.K)
id_features = []
for i in range(3):
for j in range(3):
id_feature_exp = _tranpose_and_gather_feat_expand(id_feature, inds, bias=(i - 1, j - 1)).squeeze(0)
id_features.append(id_feature_exp)
id_feature = _tranpose_and_gather_feat_expand(id_feature, inds)
id_feature = id_feature.squeeze(0)
dets = self.post_process(dets_raw.clone(), meta)
dets = self.merge_outputs([dets])[1]
remain_inds = dets[:, 4] > self.opt.conf_thres
dets = dets[remain_inds]
id_feature = id_feature[remain_inds]
for i in range(len(id_features)):
id_features[i] = id_features[i][remain_inds]
id_feature = id_feature.detach().cpu().numpy()
last_id_features = [None for _ in range(len(dets))]
last_ad_id_features = [None for _ in range(len(dets))]
dets_index = [i for i in range(len(dets))]
dets_ids = [None for _ in range(len(dets))]
tracks_ad = []
# import pdb; pdb.set_trace()
# vis
'''
for i in range(0, dets.shape[0]):
bbox = dets[i][0:4]
cv2.rectangle(img0, (bbox[0], bbox[1]),
(bbox[2], bbox[3]),
(0, 255, 0), 2)
cv2.imshow('dets', img0)
cv2.waitKey(0)
id0 = id0-1
'''
if len(dets) > 0:
'''Detections'''
detections = [STrack(STrack.tlbr_to_tlwh(tlbrs[:4]), tlbrs[4], f, 30) for
(tlbrs, f) in zip(dets[:, :5], id_feature)]
else:
detections = []
''' Add newly detected tracklets to tracked_stracks'''
unconfirmed = []
tracked_stracks = [] # type: list[STrack]
for track in self.tracked_stracks_:
if not track.is_activated:
unconfirmed.append(track)
else:
tracked_stracks.append(track)
''' Step 2: First association, with embedding'''
strack_pool = joint_stracks(tracked_stracks, self.lost_stracks_)
STrack.multi_predict(strack_pool)
dists = matching.embedding_distance(strack_pool, detections)
dists = matching.fuse_motion(self.kalman_filter_, dists, strack_pool, detections)
matches, u_track, u_detection = matching.linear_assignment(dists, thresh=0.7)
# import pdb; pdb.set_trace()
for itracked, idet in matches:
track = strack_pool[itracked]
det = detections[idet]
assert last_id_features[dets_index[idet]] is None
assert last_ad_id_features[dets_index[idet]] is None
last_id_features[dets_index[idet]] = track.smooth_feat
last_ad_id_features[dets_index[idet]] = track.smooth_feat_ad
tracks_ad.append((track, dets_index[idet]))
if track.state == TrackState.Tracked:
track.update(detections[idet], self.frame_id_)
activated_starcks.append(track)
else:
track.re_activate_(det, self.frame_id_, new_id=False)
refind_stracks.append(track)
dets_ids[dets_index[idet]] = track.track_id
''' Step 3: Second association, with IOU'''
dets_index = [dets_index[i] for i in u_detection]
detections = [detections[i] for i in u_detection]
r_tracked_stracks = [strack_pool[i] for i in u_track if strack_pool[i].state == TrackState.Tracked]
dists = matching.iou_distance(r_tracked_stracks, detections)
matches, u_track, u_detection = matching.linear_assignment(dists, thresh=0.5)
for itracked, idet in matches:
track = r_tracked_stracks[itracked]
det = detections[idet]
assert last_id_features[dets_index[idet]] is None
assert last_ad_id_features[dets_index[idet]] is None
last_id_features[dets_index[idet]] = track.smooth_feat
last_ad_id_features[dets_index[idet]] = track.smooth_feat_ad
tracks_ad.append((track, dets_index[idet]))
if track.state == TrackState.Tracked:
track.update(det, self.frame_id_)
activated_starcks.append(track)
else:
track.re_activate_(det, self.frame_id_, new_id=False)
refind_stracks.append(track)
dets_ids[dets_index[idet]] = track.track_id
for it in u_track:
track = r_tracked_stracks[it]
if not track.state == TrackState.Lost:
track.mark_lost()
lost_stracks.append(track)
'''Deal with unconfirmed tracks, usually tracks with only one beginning frame'''
dets_index = [dets_index[i] for i in u_detection]
detections = [detections[i] for i in u_detection]
dists = matching.iou_distance(unconfirmed, detections)
matches, u_unconfirmed, u_detection = matching.linear_assignment(dists, thresh=0.7)
for itracked, idet in matches:
assert last_id_features[dets_index[idet]] is None
assert last_ad_id_features[dets_index[idet]] is None
last_id_features[dets_index[idet]] = unconfirmed[itracked].smooth_feat
last_ad_id_features[dets_index[idet]] = unconfirmed[itracked].smooth_feat_ad
tracks_ad.append((unconfirmed[itracked], dets_index[idet]))
unconfirmed[itracked].update(detections[idet], self.frame_id_)
activated_starcks.append(unconfirmed[itracked])
dets_ids[dets_index[idet]] = unconfirmed[itracked].track_id
for it in u_unconfirmed:
track = unconfirmed[it]
track.mark_removed()
removed_stracks.append(track)
""" Step 4: Init new stracks"""
for inew in u_detection:
track = detections[inew]
if track.score < self.det_thresh:
continue
track.activate_(self.kalman_filter_, self.frame_id_)
activated_starcks.append(track)
dets_ids[dets_index[inew]] = track.track_id
""" Step 5: Update state"""
for track in self.lost_stracks_:
if self.frame_id_ - track.end_frame > self.max_time_lost:
track.mark_removed()
removed_stracks.append(track)
# print('Ramained match {} s'.format(t4-t3))
self.tracked_stracks_ = [t for t in self.tracked_stracks_ if t.state == TrackState.Tracked]
self.tracked_stracks_ = joint_stracks(self.tracked_stracks_, activated_starcks)
self.tracked_stracks_ = joint_stracks(self.tracked_stracks_, refind_stracks)
self.lost_stracks_ = sub_stracks(self.lost_stracks_, self.tracked_stracks_)
self.lost_stracks_.extend(lost_stracks)
self.lost_stracks_ = sub_stracks(self.lost_stracks_, self.removed_stracks_)
self.removed_stracks_.extend(removed_stracks)
self.tracked_stracks_, self.lost_stracks_ = remove_duplicate_stracks(self.tracked_stracks_, self.lost_stracks_)
# get scores of lost tracks
output_stracks_ori = [track for track in self.tracked_stracks_ if track.is_activated]
id_set = set([track.track_id for track in output_stracks_ori])
for i in range(len(dets_ids)):
if dets_ids[i] is not None and dets_ids[i] not in id_set:
dets_ids[i] = None
output_stracks_ori_ind = []
for ind, track in enumerate(output_stracks_ori):
if track.track_id not in self.multiple_ori_ids:
self.multiple_ori_ids[track.track_id] = 0
self.multiple_ori_ids[track.track_id] += 1
if self.multiple_ori_ids[track.track_id] <= self.FRAME_THR:
output_stracks_ori_ind.append(ind)
logger.debug('===========Frame {}=========='.format(self.frame_id_))
logger.debug('Activated: {}'.format([track.track_id for track in activated_starcks]))
logger.debug('Refind: {}'.format([track.track_id for track in refind_stracks]))
logger.debug('Lost: {}'.format([track.track_id for track in lost_stracks]))
logger.debug('Removed: {}'.format([track.track_id for track in removed_stracks]))
attack_ids = []
target_ids = []
attack_inds = []
target_inds = []
noise = None
if len(dets) > 0:
ious = bbox_ious(np.ascontiguousarray(dets[:, :4], dtype=np.float64),
np.ascontiguousarray(dets[:, :4], dtype=np.float64))
ious[range(len(dets)), range(len(dets))] = 0
ious_inds = np.argmax(ious, axis=1)
dis = bbox_dis(np.ascontiguousarray(dets[:, :4], dtype=np.float64),
np.ascontiguousarray(dets[:, :4], dtype=np.float64))
dis[range(len(dets)), range(len(dets))] = np.inf
dis_inds = np.argmin(dis, axis=1)
for attack_ind, track_id in enumerate(dets_ids):
if track_id is None or self.multiple_ori_ids[track_id] <= self.FRAME_THR \
or dets_ids[ious_inds[attack_ind]] not in self.multiple_ori2att \
or track_id not in self.multiple_ori2att:
continue
if ious[attack_ind, ious_inds[attack_ind]] > self.ATTACK_IOU_THR or (
track_id in self.low_iou_ids and ious[attack_ind, ious_inds[attack_ind]] > 0
):
attack_ids.append(track_id)
target_ids.append(dets_ids[ious_inds[attack_ind]])
attack_inds.append(attack_ind)
target_inds.append(ious_inds[attack_ind])
if hasattr(self, f'temp_i_{track_id}'):
self.__setattr__(f'temp_i_{track_id}', 0)
elif ious[attack_ind, ious_inds[attack_ind]] == 0 and track_id in self.low_iou_ids:
if hasattr(self, f'temp_i_{track_id}'):
self.__setattr__(f'temp_i_{track_id}', self.__getattribute__(f'temp_i_{track_id}') + 1)
else:
self.__setattr__(f'temp_i_{track_id}', 1)
if self.__getattribute__(f'temp_i_{track_id}') > 10:
self.low_iou_ids.remove(track_id)
elif dets_ids[dis_inds[attack_ind]] in self.multiple_ori2att:
attack_ids.append(track_id)
target_ids.append(dets_ids[dis_inds[attack_ind]])
attack_inds.append(attack_ind)
target_inds.append(dis_inds[attack_ind])
fit_index = self.CheckFit(dets, id_feature, attack_ids, attack_inds) if len(attack_ids) else []
if fit_index:
attack_ids = np.array(attack_ids)[fit_index]
target_ids = np.array(target_ids)[fit_index]
attack_inds = np.array(attack_inds)[fit_index]
target_inds = np.array(target_inds)[fit_index]
att_trackers = []
for attack_id in attack_ids:
if attack_id not in self.ad_ids:
for t in output_stracks_ori:
if t.track_id == attack_id:
att_trackers.append(t)
noise, attack_iter, suc = self.attack_mt_hj(
im_blob,
img0,
dets,
inds,
remain_inds,
last_info=self.ad_last_info,
outputs_ori=output,
attack_ids=attack_ids,
attack_inds=attack_inds,
ad_ids=self.ad_ids,
track_vs=[t.get_v() for t in att_trackers]
)
self.ad_ids.update(attack_ids)
self.low_iou_ids.update(set(attack_ids))
if suc:
self.attacked_ids.update(set(attack_ids))
print(
f'attack ids: {attack_ids}\tattack frame {self.frame_id_}: SUCCESS\tl2 distance: {(noise ** 2).sum().sqrt().item()}\titeration: {attack_iter}')
else:
print(f'attack ids: {attack_ids}\tattack frame {self.frame_id_}: FAIL\tl2 distance: {(noise ** 2).sum().sqrt().item() if noise is not None else None}\titeration: {attack_iter}')
if noise is not None:
l2_dis = (noise ** 2).sum().sqrt().item()
adImg = torch.clip(im_blob + noise, min=0, max=1)
noise = self.recoverNoise(noise, img0)
noise = (noise - np.min(noise)) / (np.max(noise) - np.min(noise))
noise = (noise * 255).astype(np.uint8)
else:
l2_dis = None
adImg = im_blob
output_stracks_att = self.update(adImg, img0)
adImg = self.recoverNoise(adImg.detach(), img0)
output_stracks_att_ind = []
for ind, track in enumerate(output_stracks_att):
if track.track_id not in self.multiple_att_ids:
self.multiple_att_ids[track.track_id] = 0
self.multiple_att_ids[track.track_id] += 1
if self.multiple_att_ids[track.track_id] <= self.FRAME_THR:
output_stracks_att_ind.append(ind)
if len(output_stracks_ori_ind) and len(output_stracks_att_ind):
ori_dets = [track.curr_tlbr for i, track in enumerate(output_stracks_ori) if i in output_stracks_ori_ind]
att_dets = [track.curr_tlbr for i, track in enumerate(output_stracks_att) if i in output_stracks_att_ind]
ori_dets = np.stack(ori_dets).astype(np.float64)
att_dets = np.stack(att_dets).astype(np.float64)
ious = bbox_ious(ori_dets, att_dets)
row_ind, col_ind = linear_sum_assignment(-ious)
for i in range(len(row_ind)):
if ious[row_ind[i], col_ind[i]] > 0.9:
ori_id = output_stracks_ori[output_stracks_ori_ind[row_ind[i]]].track_id
att_id = output_stracks_att[output_stracks_att_ind[col_ind[i]]].track_id
self.multiple_ori2att[ori_id] = att_id
return output_stracks_ori, output_stracks_att, adImg, noise, l2_dis
def update(self, im_blob, img0, **kwargs):
self.frame_id += 1
self_track_id = kwargs.get('track_id', None)
activated_starcks = []
refind_stracks = []
lost_stracks = []
removed_stracks = []
width = img0.shape[1]
height = img0.shape[0]
inp_height = im_blob.shape[2]
inp_width = im_blob.shape[3]
c = np.array([width / 2., height / 2.], dtype=np.float32)
s = max(float(inp_width) / float(inp_height) * height, width) * 1.0
meta = {'c': c, 's': s,
'out_height': inp_height // self.opt.down_ratio,
'out_width': inp_width // self.opt.down_ratio}
''' Step 1: Network forward, get detections & embeddings'''
with torch.no_grad():
output = self.model(im_blob)[-1]
hm = output['hm'].sigmoid_()
wh = output['wh']
id_feature = output['id']
id_feature = F.normalize(id_feature, dim=1)
reg = output['reg'] if self.opt.reg_offset else None
dets, inds = mot_decode(hm, wh, reg=reg, cat_spec_wh=self.opt.cat_spec_wh, K=self.opt.K)
id_feature_ = id_feature.permute(0, 2, 3, 1).view(-1, 512)
id_feature = _tranpose_and_gather_feat(id_feature, inds)
id_feature = id_feature.squeeze(0)
id_feature = id_feature.detach().cpu().numpy()
dets = self.post_process(dets, meta)
dets = self.merge_outputs([dets])[1]
remain_inds = dets[:, 4] > self.opt.conf_thres
dets = dets[remain_inds]
id_feature = id_feature[remain_inds]
# import pdb; pdb.set_trace()
dets_index = inds[0][remain_inds].tolist()
# vis
'''
for i in range(0, dets.shape[0]):
bbox = dets[i][0:4]
cv2.rectangle(img0, (bbox[0], bbox[1]),
(bbox[2], bbox[3]),
(0, 255, 0), 2)
cv2.imshow('dets', img0)
cv2.waitKey(0)
id0 = id0-1
'''
if len(dets) > 0:
'''Detections'''
detections = [STrack(STrack.tlbr_to_tlwh(tlbrs[:4]), tlbrs[4], f, 30) for
(tlbrs, f) in zip(dets[:, :5], id_feature)]
else:
detections = []
''' Add newly detected tracklets to tracked_stracks'''
unconfirmed = []
tracked_stracks = [] # type: list[STrack]
for track in self.tracked_stracks:
if not track.is_activated:
unconfirmed.append(track)
else:
tracked_stracks.append(track)
''' Step 2: First association, with embedding'''
strack_pool = joint_stracks(tracked_stracks, self.lost_stracks)
# Predict the current location with KF
# for strack in strack_pool:
# strack.predict()
STrack.multi_predict(strack_pool)
dists = matching.embedding_distance(strack_pool, detections)
dists = matching.fuse_motion(self.kalman_filter, dists, strack_pool, detections)
matches, u_track, u_detection = matching.linear_assignment(dists, thresh=0.7)
for itracked, idet in matches:
track = strack_pool[itracked]
det = detections[idet]
if track.state == TrackState.Tracked:
track.update(detections[idet], self.frame_id)
activated_starcks.append(track)
else:
track.re_activate(det, self.frame_id, new_id=False)
refind_stracks.append(track)
''' Step 3: Second association, with IOU'''
dets_index = [dets_index[i] for i in u_detection]
detections = [detections[i] for i in u_detection]
r_tracked_stracks = [strack_pool[i] for i in u_track if strack_pool[i].state == TrackState.Tracked]
dists = matching.iou_distance(r_tracked_stracks, detections)
matches, u_track, u_detection = matching.linear_assignment(dists, thresh=0.5)
for itracked, idet in matches:
track = r_tracked_stracks[itracked]
det = detections[idet]
if track.state == TrackState.Tracked:
track.update(det, self.frame_id)
activated_starcks.append(track)
else:
track.re_activate(det, self.frame_id, new_id=False)
refind_stracks.append(track)
for it in u_track:
track = r_tracked_stracks[it]
if not track.state == TrackState.Lost:
track.mark_lost()
lost_stracks.append(track)
'''Deal with unconfirmed tracks, usually tracks with only one beginning frame'''
dets_index = [dets_index[i] for i in u_detection]
detections = [detections[i] for i in u_detection]
dists = matching.iou_distance(unconfirmed, detections)
matches, u_unconfirmed, u_detection = matching.linear_assignment(dists, thresh=0.7)
for itracked, idet in matches:
unconfirmed[itracked].update(detections[idet], self.frame_id)
activated_starcks.append(unconfirmed[itracked])
for it in u_unconfirmed:
track = unconfirmed[it]
track.mark_removed()
removed_stracks.append(track)
""" Step 4: Init new stracks"""
for inew in u_detection:
track = detections[inew]
if track.score < self.det_thresh:
continue
track.activate(self.kalman_filter, self.frame_id, track_id=self_track_id)
activated_starcks.append(track)
""" Step 5: Update state"""
for track in self.lost_stracks:
if self.frame_id - track.end_frame > self.max_time_lost:
track.mark_removed()
removed_stracks.append(track)
# print('Ramained match {} s'.format(t4-t3))
self.tracked_stracks = [t for t in self.tracked_stracks if t.state == TrackState.Tracked]
self.tracked_stracks = joint_stracks(self.tracked_stracks, activated_starcks)
self.tracked_stracks = joint_stracks(self.tracked_stracks, refind_stracks)
self.lost_stracks = sub_stracks(self.lost_stracks, self.tracked_stracks)
self.lost_stracks.extend(lost_stracks)
self.lost_stracks = sub_stracks(self.lost_stracks, self.removed_stracks)
self.removed_stracks.extend(removed_stracks)
self.tracked_stracks, self.lost_stracks = remove_duplicate_stracks(self.tracked_stracks, self.lost_stracks)
# get scores of lost tracks
output_stracks = [track for track in self.tracked_stracks if track.is_activated]
logger.debug('===========Frame {}=========='.format(self.frame_id))
logger.debug('Activated: {}'.format([track.track_id for track in activated_starcks]))
logger.debug('Refind: {}'.format([track.track_id for track in refind_stracks]))
logger.debug('Lost: {}'.format([track.track_id for track in lost_stracks]))
logger.debug('Removed: {}'.format([track.track_id for track in removed_stracks]))
unconfirmed = []
tracked_stracks = [] # type: list[STrack]
for track in self.tracked_stracks:
if not track.is_activated:
unconfirmed.append(track)
else:
tracked_stracks.append(track)
''' Step 2: First association, with embedding'''
strack_pool = joint_stracks(tracked_stracks, self.lost_stracks)
self.ad_last_info = {
'last_strack_pool': copy.deepcopy(strack_pool),
'last_unconfirmed': copy.deepcopy(unconfirmed),
'kalman_filter': copy.deepcopy(self.kalman_filter_)
}
return output_stracks
def _nms(self, heat, kernel=3):
pad = (kernel - 1) // 2
hmax = nn.functional.max_pool2d(
heat, (kernel, kernel), stride=1, padding=pad)
keep = (hmax == heat).float
return keep
def computer_targets(self, boxes, gt_box):
an_ws = boxes[:, 2]
an_hs = boxes[:, 3]
ctr_x = boxes[:, 0]
ctr_y = boxes[:, 1]
gt_ws = gt_box[:, 2]
gt_hs = gt_box[:, 3]
gt_ctr_x = gt_box[:, 0]
gt_ctr_y = gt_box[:, 1]
targets_dx = (gt_ctr_x - ctr_x) / an_ws
targets_dy = (gt_ctr_y - ctr_y) / an_hs
targets_dw = np.log(gt_ws / an_ws)
targets_dh = np.log(gt_hs / an_hs)
targets = np.vstack((targets_dx, targets_dy, targets_dw, targets_dh)).T
return targets
def joint_stracks(tlista, tlistb):
exists = {}
res = []
for t in tlista:
exists[t.track_id] = 1
res.append(t)
for t in tlistb:
tid = t.track_id
if not exists.get(tid, 0):
exists[tid] = 1
res.append(t)
return res
def sub_stracks(tlista, tlistb):
stracks = {}
for t in tlista:
stracks[t.track_id] = t
for t in tlistb:
tid = t.track_id
if stracks.get(tid, 0):
del stracks[tid]
return list(stracks.values())
def remove_duplicate_stracks(stracksa, stracksb):
pdist = matching.iou_distance(stracksa, stracksb)
pairs = np.where(pdist < 0.15)
dupa, dupb = list(), list()
for p, q in zip(*pairs):
timep = stracksa[p].frame_id - stracksa[p].start_frame
timeq = stracksb[q].frame_id - stracksb[q].start_frame
if timep > timeq:
dupb.append(q)
else:
dupa.append(p)
resa = [t for i, t in enumerate(stracksa) if not i in dupa]
resb = [t for i, t in enumerate(stracksb) if not i in dupb]
return resa, resb
def save(obj, name):
with open(f'/home/derry/Desktop/{name}.pth', 'wb') as f:
pickle.dump(obj, f)
def load(name):
with open(f'/home/derry/Desktop/{name}.pth', 'rb') as f:
obj = pickle.load(f)
return obj
| 43.986462
| 201
| 0.56671
| 24,775
| 194,948
| 4.172714
| 0.021877
| 0.025924
| 0.018863
| 0.017644
| 0.928041
| 0.914315
| 0.903743
| 0.895598
| 0.887782
| 0.879618
| 0
| 0.017038
| 0.325907
| 194,948
| 4,431
| 202
| 43.996389
| 0.769635
| 0.029049
| 0
| 0.845361
| 0
| 0.004458
| 0.026489
| 0.00304
| 0
| 0
| 0
| 0
| 0.014489
| 1
| 0.016439
| false
| 0
| 0.00613
| 0.000836
| 0.040958
| 0.004737
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c1219d3a603a918463a09f2d7d78debd9182f482
| 192
|
py
|
Python
|
eula-scan/app/counts.py
|
davidbstein/ml-law
|
2db439a9b618384c57acb51ddc0d55cf864ed8be
|
[
"MIT"
] | null | null | null |
eula-scan/app/counts.py
|
davidbstein/ml-law
|
2db439a9b618384c57acb51ddc0d55cf864ed8be
|
[
"MIT"
] | null | null | null |
eula-scan/app/counts.py
|
davidbstein/ml-law
|
2db439a9b618384c57acb51ddc0d55cf864ed8be
|
[
"MIT"
] | null | null | null |
import model
print(dict(model._ex("select count(*) policies from tos_text").fetchone()))
print(dict(model._ex("select count(*) companies from company where last_error is null").fetchone()))
| 38.4
| 100
| 0.75
| 28
| 192
| 5
| 0.678571
| 0.128571
| 0.2
| 0.228571
| 0.385714
| 0.385714
| 0
| 0
| 0
| 0
| 0
| 0
| 0.09375
| 192
| 4
| 101
| 48
| 0.804598
| 0
| 0
| 0
| 0
| 0
| 0.534031
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0.666667
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 7
|
c1724e3fcc8f514b866413548912dc98400bc49f
| 2,790
|
py
|
Python
|
entrepreneurial_property/migrations/0015_auto_20180905_0646.py
|
CzechInvest/ciis
|
c6102598f564a717472e5e31e7eb894bba2c8104
|
[
"MIT"
] | 1
|
2019-05-26T22:24:01.000Z
|
2019-05-26T22:24:01.000Z
|
entrepreneurial_property/migrations/0015_auto_20180905_0646.py
|
CzechInvest/ciis
|
c6102598f564a717472e5e31e7eb894bba2c8104
|
[
"MIT"
] | 6
|
2019-01-22T14:53:43.000Z
|
2020-09-22T16:20:28.000Z
|
entrepreneurial_property/migrations/0015_auto_20180905_0646.py
|
CzechInvest/ciis
|
c6102598f564a717472e5e31e7eb894bba2c8104
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.0.5 on 2018-09-05 06:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('entrepreneurial_property', '0014_auto_20180905_0629'),
]
operations = [
migrations.AlterModelOptions(
name='brownfieldwastewater',
options={'verbose_name': 'Odpadní voda', 'verbose_name_plural': 'Odpadní vody'},
),
migrations.AlterModelOptions(
name='developmentparkwastewater',
options={'verbose_name': 'Odpadní voda', 'verbose_name_plural': 'Odpadní vody'},
),
migrations.AlterModelOptions(
name='greenfieldwastewater',
options={'verbose_name': 'Odpadní voda', 'verbose_name_plural': 'Odpadní vody'},
),
migrations.AlterModelOptions(
name='industrialarealwastewater',
options={'verbose_name': 'Odpadní voda', 'verbose_name_plural': 'Odpadní vody'},
),
migrations.AlterModelOptions(
name='officewastewater',
options={'verbose_name': 'Odpadní voda', 'verbose_name_plural': 'Odpadní vody'},
),
migrations.AlterModelOptions(
name='scientificparkwastewater',
options={'verbose_name': 'Odpadní voda', 'verbose_name_plural': 'Odpadní vody'},
),
migrations.AddField(
model_name='brownfield',
name='uuid',
field=models.CharField(default='6cfe1b7c-0699-466a-99c0-04523b11b3ea', max_length=36),
preserve_default=False,
),
migrations.AddField(
model_name='developmentpark',
name='uuid',
field=models.CharField(default='6cfe1b7c-0699-466a-99c0-04523b11b3ea', max_length=36),
preserve_default=False,
),
migrations.AddField(
model_name='greenfield',
name='uuid',
field=models.CharField(default='6cfe1b7c-0699-466a-99c0-04523b11b3ea', max_length=36),
preserve_default=False,
),
migrations.AddField(
model_name='industrialareal',
name='uuid',
field=models.CharField(default='6cfe1b7c-0699-466a-99c0-04523b11b3ea', max_length=36),
preserve_default=False,
),
migrations.AddField(
model_name='office',
name='uuid',
field=models.CharField(default='6cfe1b7c-0699-466a-99c0-04523b11b3ea', max_length=36),
preserve_default=False,
),
migrations.AddField(
model_name='scientificpark',
name='uuid',
field=models.CharField(default='6cfe1b7c-0699-466a-99c0-04523b11b3ea', max_length=36),
preserve_default=False,
),
]
| 37.702703
| 98
| 0.603584
| 246
| 2,790
| 6.682927
| 0.256098
| 0.080292
| 0.113139
| 0.091241
| 0.748175
| 0.748175
| 0.748175
| 0.748175
| 0.748175
| 0.748175
| 0
| 0.083829
| 0.277419
| 2,790
| 73
| 99
| 38.219178
| 0.731647
| 0.016129
| 0
| 0.716418
| 1
| 0
| 0.297849
| 0.122858
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.014925
| 0
| 0.059701
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c17b28f08989d2fa2b75ad5040f70d15e7571b14
| 2,842
|
py
|
Python
|
OpenThermML/backup.py
|
wwlorey/open-thermostat-software
|
a0521b0d3b65fe9f2bd23f5059971d3a8d773e54
|
[
"MIT"
] | 1
|
2020-12-14T02:44:10.000Z
|
2020-12-14T02:44:10.000Z
|
OpenThermML/backup.py
|
wwlorey/open-thermostat-software
|
a0521b0d3b65fe9f2bd23f5059971d3a8d773e54
|
[
"MIT"
] | null | null | null |
OpenThermML/backup.py
|
wwlorey/open-thermostat-software
|
a0521b0d3b65fe9f2bd23f5059971d3a8d773e54
|
[
"MIT"
] | 1
|
2020-12-12T20:24:43.000Z
|
2020-12-12T20:24:43.000Z
|
predictions = [
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ],
]
prediction_counts = [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ],
]
| 149.578947
| 247
| 0.364532
| 1,011
| 2,842
| 1.023739
| 0.004946
| 1.940097
| 2.901449
| 3.857005
| 0.972947
| 0.972947
| 0.972947
| 0.972947
| 0.972947
| 0.972947
| 0
| 0.481836
| 0.263899
| 2,842
| 18
| 248
| 157.888889
| 0.012906
| 0
| 0
| 0.722222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 14
|
c183f4d6f31778737b79a4e9462050d2a9596ead
| 872
|
py
|
Python
|
carla_env/__init__.py
|
janwithb/Carla-Gym-Wrapper
|
f1ea9fe89427c5a654f5561f214a5fba139b2568
|
[
"Apache-2.0"
] | 6
|
2021-04-15T09:22:44.000Z
|
2022-02-15T01:07:23.000Z
|
carla_env/__init__.py
|
janwithb/Carla-Gym-Wrapper
|
f1ea9fe89427c5a654f5561f214a5fba139b2568
|
[
"Apache-2.0"
] | 2
|
2021-08-23T02:47:40.000Z
|
2022-01-17T02:20:47.000Z
|
carla_env/__init__.py
|
janwithb/Carla-Gym-Wrapper
|
f1ea9fe89427c5a654f5561f214a5fba139b2568
|
[
"Apache-2.0"
] | 2
|
2021-07-12T06:32:37.000Z
|
2021-11-24T14:43:13.000Z
|
from gym.envs.registration import register
register(
id='CarlaEnv-state-v1',
entry_point='carla_env.carla_env:CarlaEnv',
max_episode_steps=500,
kwargs={
'render': True,
'carla_port': 2000,
'changing_weather_speed': 0.1,
'frame_skip': 1,
'observations_type': 'state',
'traffic': True,
'vehicle_name': 'tesla.cybertruck',
'map_name': 'Town05',
'autopilot': True
}
)
register(
id='CarlaEnv-pixel-v1',
entry_point='carla_env.carla_env:CarlaEnv',
max_episode_steps=500,
kwargs={
'render': True,
'carla_port': 2000,
'changing_weather_speed': 0.1,
'frame_skip': 1,
'observations_type': 'pixel',
'traffic': True,
'vehicle_name': 'tesla.cybertruck',
'map_name': 'Town05',
'autopilot': True
}
)
| 23.567568
| 47
| 0.579128
| 94
| 872
| 5.117021
| 0.43617
| 0.066528
| 0.074844
| 0.070686
| 0.806653
| 0.806653
| 0.806653
| 0.806653
| 0.806653
| 0.806653
| 0
| 0.041204
| 0.276376
| 872
| 36
| 48
| 24.222222
| 0.721078
| 0
| 0
| 0.727273
| 0
| 0
| 0.396789
| 0.114679
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.030303
| 0
| 0.030303
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
a9b2db3f369fcfd5b77d040b3bc04025ef934688
| 7,215
|
py
|
Python
|
experiment-2/02_gm_correlations_across_masks.py
|
NBCLab/power-replication
|
7a938cac6fd132f8cbd76535255680aeb2e550cb
|
[
"Apache-2.0"
] | 1
|
2021-12-20T13:30:23.000Z
|
2021-12-20T13:30:23.000Z
|
experiment-2/02_gm_correlations_across_masks.py
|
NBCLab/power-replication
|
7a938cac6fd132f8cbd76535255680aeb2e550cb
|
[
"Apache-2.0"
] | 14
|
2020-12-21T15:58:45.000Z
|
2022-03-16T22:20:25.000Z
|
experiment-2/02_gm_correlations_across_masks.py
|
NBCLab/power-replication
|
7a938cac6fd132f8cbd76535255680aeb2e550cb
|
[
"Apache-2.0"
] | null | null | null |
"""Experiment 2, Analysis Group 2.
Comparing measures of global signal.
Mean cortical signal of MEDN correlated with signal of all gray matter
- Distribution of Pearson correlation coefficients
- Page 2, right column, first paragraph
Mean cortical signal of MEDN correlated with signal of whole brain
- Distribution of Pearson correlation coefficients
- Page 2, right column, first paragraph
"""
import os.path as op
import sys
import numpy as np
from nilearn import image, masking
from scipy.stats import ttest_1samp
sys.path.append("..")
from utils import get_prefixes # noqa: E402
def correlate_cort_with_gm(project_dir, participants_df):
"""Correlate mean cortical signal from MEDN files with signal from all gray matter.
- Distribution of Pearson correlation coefficients
- Page 2, right column, first paragraph
"""
ALPHA = 0.05
corrs = []
for i_run, participant_row in participants_df.iterrows():
if participant_row["exclude"] == 1:
print(f"Skipping {participant_row['participant_id']}.")
continue
subj_id = participant_row["participant_id"]
dset = participant_row["dataset"]
dset_prefix = get_prefixes()[dset]
subj_prefix = dset_prefix.format(participant_id=subj_id)
cort_mask = op.join(
project_dir,
dset,
"derivatives",
"power",
subj_id,
"anat",
f"{subj_id}_space-scanner_res-bold_label-CGM_mask.nii.gz",
)
dseg_file = op.join(
project_dir,
dset,
"derivatives",
"power",
subj_id,
"anat",
f"{subj_id}_space-scanner_res-bold_desc-totalMaskWithCSF_dseg.nii.gz",
)
# Values 1-3 are cortical ribbon, subcortical structures, and cerebellum, respectively.
gm_mask = image.math_img(
"np.logical_and(img > 0, img <= 3).astype(int)", img=dseg_file
)
medn_file = op.join(
project_dir,
dset,
"derivatives",
"tedana",
subj_id,
"func",
f"{subj_prefix}_desc-optcomDenoised_bold.nii.gz",
)
cort_data = masking.apply_mask(medn_file, cort_mask)
gm_data = masking.apply_mask(medn_file, gm_mask)
# Average across voxels
cort_data = np.mean(cort_data, axis=1) # TODO: CHECK AXIS ORDER
gm_data = np.mean(gm_data, axis=1)
corr = np.corrcoef((cort_data, gm_data))
assert corr.shape == (2, 2), corr.shape
corr = corr[1, 0]
corrs.append(corr)
corrs = np.array(corrs)
# Convert r values to normally distributed z values with Fisher's
# transformation (not test statistics though)
z_values = np.arctanh(corrs)
mean_z = np.mean(z_values)
sd_z = np.std(z_values)
# And now a significance test!!
# TODO: Should we compute confidence intervals from z-values then
# convert back to r-values? I think so, but there's so little in the
# literature about dealing with *distributions* of correlation
# coefficients.
t, p = ttest_1samp(z_values, popmean=0, alternative="greater")
if p <= ALPHA:
print(
"ANALYSIS 1: Correlations between the mean multi-echo denoised signal extracted from "
"the cortical ribbon and that extracted from all gray matter "
f"(M[Z] = {mean_z}, SD[Z] = {sd_z}) were significantly higher than zero, "
f"t({participants_df.shape[0] - 1}) = {t:.03f}, p = {p:.03f}."
)
else:
print(
"ANALYSIS 1: Correlations between the mean multi-echo denoised signal extracted from "
"the cortical ribbon and that extracted from all gray matter "
f"(M[Z] = {mean_z}, SD[Z] = {sd_z}) were not significantly higher than zero, "
f"t({participants_df.shape[0] - 1}) = {t:.03f}, p = {p:.03f}."
)
def correlate_cort_with_wb(project_dir, participants_df):
"""Correlate mean cortical signal from MEDN files with signal from whole brain.
- Distribution of Pearson correlation coefficients
- Page 2, right column, first paragraph
"""
ALPHA = 0.05
corrs = []
for i_run, participant_row in participants_df.iterrows():
if participant_row["exclude"] == 1:
print(f"Skipping {participant_row['participant_id']}.")
continue
subj_id = participant_row["participant_id"]
dset = participant_row["dataset"]
dset_prefix = get_prefixes()[dset]
subj_prefix = dset_prefix.format(participant_id=subj_id)
cort_mask = op.join(
project_dir,
dset,
"derivatives",
"power",
subj_id,
"anat",
f"{subj_id}_space-scanner_res-bold_label-CGM_mask.nii.gz",
)
dseg_file = op.join(
project_dir,
dset,
"derivatives",
"power",
subj_id,
"anat",
f"{subj_id}_space-scanner_res-bold_desc-totalMaskWithCSF_dseg.nii.gz",
)
# Values 1+ are brain.
wb_mask = image.math_img("img > 0", img=dseg_file)
medn_file = op.join(
project_dir,
dset,
"derivatives",
"tedana",
subj_id,
"func",
f"{subj_prefix}_desc-optcomDenoised_bold.nii.gz",
)
cort_data = masking.apply_mask(medn_file, cort_mask)
wb_data = masking.apply_mask(medn_file, wb_mask)
# Average across voxels
cort_data = np.mean(cort_data, axis=1) # TODO: CHECK AXIS ORDER
wb_data = np.mean(wb_data, axis=1)
corr = np.corrcoef((cort_data, wb_data))
assert corr.shape == (2, 2), corr.shape
corr = corr[1, 0]
corrs.append(corr)
# Convert r values to normally distributed z values with Fisher's
# transformation (not test statistics though)
z_values = np.arctanh(corrs)
mean_z = np.mean(z_values)
sd_z = np.std(z_values)
# And now a significance test!!
# TODO: Should we compute confidence intervals from z-values then
# convert back to r-values? I think so, but there's so little in the
# literature about dealing with *distributions* of correlation
# coefficients.
t, p = ttest_1samp(z_values, popmean=0, alternative="greater")
if p <= ALPHA:
print(
"ANALYSIS 2: Correlations between the mean multi-echo denoised signal extracted from "
"the cortical ribbon and that extracted from the whole brain "
f"(M[Z] = {mean_z}, SD[Z] = {sd_z}) were significantly higher than zero, "
f"t({participants_df.shape[0] - 1}) = {t:.03f}, p = {p:.03f}."
)
else:
print(
"ANALYSIS 2: Correlations between the mean multi-echo denoised signal extracted from "
"the cortical ribbon and that extracted from the whole brain "
f"(M[Z] = {mean_z}, SD[Z] = {sd_z}) were not significantly higher than zero, "
f"t({participants_df.shape[0] - 1}) = {t:.03f}, p = {p:.03f}."
)
| 35.195122
| 98
| 0.60693
| 926
| 7,215
| 4.568035
| 0.203024
| 0.019858
| 0.007565
| 0.022695
| 0.895035
| 0.895035
| 0.881797
| 0.881797
| 0.867139
| 0.84539
| 0
| 0.013336
| 0.293278
| 7,215
| 204
| 99
| 35.367647
| 0.816238
| 0.228136
| 0
| 0.785714
| 0
| 0.057143
| 0.321467
| 0.092626
| 0
| 0
| 0
| 0.009804
| 0.014286
| 1
| 0.014286
| false
| 0
| 0.042857
| 0
| 0.057143
| 0.042857
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e7ab93c674e1c7b0c7b235ef1ec15c5f79897159
| 184
|
py
|
Python
|
ptb/ledger/views.py
|
vkpdeveloper/ShaktiDeep-Traders-Bill-Management-Project
|
566a64268fabf256e80bee680d1fbde2c6c0787d
|
[
"MIT"
] | 2
|
2019-11-26T11:57:56.000Z
|
2020-06-17T05:16:47.000Z
|
ptb/ledger/views.py
|
vkpdeveloper/ShaktiDeep-Traders-Bill-Management-Project
|
566a64268fabf256e80bee680d1fbde2c6c0787d
|
[
"MIT"
] | null | null | null |
ptb/ledger/views.py
|
vkpdeveloper/ShaktiDeep-Traders-Bill-Management-Project
|
566a64268fabf256e80bee680d1fbde2c6c0787d
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
from django.shortcuts import render
from django.http import HttpResponse
def index(request):
return render(request, 'ledger/index.html')
| 26.285714
| 47
| 0.777174
| 24
| 184
| 5.958333
| 0.541667
| 0.20979
| 0.265734
| 0.34965
| 0.503497
| 0.503497
| 0.503497
| 0
| 0
| 0
| 0
| 0
| 0.152174
| 184
| 7
| 47
| 26.285714
| 0.916667
| 0
| 0
| 0.4
| 0
| 0
| 0.094972
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.6
| 0.2
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 8
|
e7ce0818904d46d11eb74c9bc1c1d5e0067a9008
| 68,039
|
py
|
Python
|
GraphTrace.py
|
a-dera/Graphe
|
70886565cc1dbda9f343dc11edcc480e2372934f
|
[
"MIT"
] | null | null | null |
GraphTrace.py
|
a-dera/Graphe
|
70886565cc1dbda9f343dc11edcc480e2372934f
|
[
"MIT"
] | null | null | null |
GraphTrace.py
|
a-dera/Graphe
|
70886565cc1dbda9f343dc11edcc480e2372934f
|
[
"MIT"
] | null | null | null |
##################################################
# Importation des Bibliotheques et fonctions:
from tkinter import *
from PIL import ImageGrab
from tkinter import PhotoImage
import tkinter as tk
from tkinter import ttk
from collections import defaultdict
class Euler:
def __init__(self,vertices):
self.V= vertices #No. of vertices
self.graph = defaultdict(list) # default dictionary to store graph
# function to add an edge to graph
def addEdge(self,u,v):
# alternatives
self.graph[u].append(v)
self.graph[v].append(u)
#A function used by isConnected
def DFSUtil(self,v,visited):
# Mark the current node as visited
visited[v]= True
#Recur for all the vertices adjacent to this vertex
for i in self.graph[v]:
if visited[i]==False:
self.DFSUtil(i,visited)
'''Method to check if all non-zero degree vertices are
connected. It mainly does DFS traversal starting from
node with non-zero degree'''
def isConnected(self):
# Mark all the vertices as not visited
visited =[False]*(self.V)
# Find a vertex with non-zero degree
for i in range(self.V):
if len(self.graph[i]) > 1:
break
# If there are no edges in the graph, return true
if i == self.V-1:
return True
# Start DFS traversal from a vertex with non-zero degree
self.DFSUtil(i,visited)
# Check if all non-zero degree vertices are visited
for i in range(self.V):
if visited[i]==False and len(self.graph[i]) > 0:
return False
return True
'''The function returns one of the following values
0 --> If grpah is not Eulerian
1 --> If graph has an Euler path (Semi-Eulerian)
2 --> If graph has an Euler Circuit (Eulerian) '''
def isEulerian(self):
# Check if all non-zero degree vertices are connected
if self.isConnected() == False:
return 0
else:
#Count vertices with odd degree
odd = 0
for i in range(self.V):
if len(self.graph[i]) % 2 !=0:
odd +=1
'''If odd count is 2, then semi-eulerian.
If odd count is 0, then eulerian
If count is more than 2, then graph is not Eulerian
Note that odd count can never be 1 for undirected graph'''
if odd == 0:
return 2
elif odd == 2:
return 1
elif odd > 2:
return 0
# Function to run test cases
def test(self):
res = self.isEulerian()
if res == 0:
#print ("Le graphe n'est pas eulerien")
resultat="Le graphe n'est pas eulerien"
return resultat
elif res ==1 :
#print ("Le graphe comporte un chemain eulerien")
resultat="Le graphe comporte un chemain eulerien"
return resultat
else:
#print ("Le graphe comporte un cycle eulerien")
resultat="Le graphe comporte un cycle eulerien"
return resultat
class Hamilton():
def __init__(self, vertices):
self.graph = [[0 for column in range(vertices)]
for row in range(vertices)]
self.V = vertices
''' Check if this vertex is an adjacent vertex
of the previously added vertex and is not
included in the path earlier '''
def isSafe(self, v, pos, path):
# Check if current vertex and last vertex
# in path are adjacent
if self.graph[ path[pos-1] ][v] == 0:
return False
# Check if current vertex not already in path
for vertex in path:
if vertex == v:
return False
return True
#############################################################
# A recursive utility function to solve
# hamiltonian cycle problem
def hamCycleUtil(self, path, pos):
# base case: if all vertices are
# included in the path
if pos == self.V:
# Last vertex must be adjacent to the
# first vertex in path to make a cyle
if self.graph[ path[pos-1] ][ path[0] ] == 1:
return True
else:
return False
# Try different vertices as a next candidate
# in Hamiltonian Cycle. We don't try for 0 as
# we included 0 as starting point in in hamCycle()
for v in range(1,self.V):
if self.isSafe(v, pos, path) == True:
path[pos] = v
if self.hamCycleUtil(path, pos+1) == True:
return True
# Remove current vertex if it doesn't
# lead to a solution
path[pos] = -1
return False
def hamCycle(self):
path = [-1] * self.V
''' Let us put vertex 0 as the first vertex
in the path. If there is a Hamiltonian Cycle,
then the path can be started from any point
of the cycle as the graph is undirected '''
path[0] = 0
if self.hamCycleUtil(path,1) == False:
#print ("Solution does not exist\n")
resultat="Le graphe n'est pas hamiltonien"
return resultat
return self.printSolution(path)
def printSolution(self, path):
#print ("Solution Exists: Following is one Hamiltonian Cycle")
resultat="Le graphe est hamiltonien: "
for vertex in path:
#print (vertex)
resultat+=str(vertex)+" "
#print (path[0], "\n")
resultat+=str(path[0])
return resultat
##########################################
class Max_flow:
def __init__(self,graph):
self.graph = graph # residual graph
self. ROW = len(graph)
#self.COL = len(gr[0])
'''Returns true if there is a path from source 's' to sink 't' in
residual graph. Also fills parent[] to store the path '''
def BFS(self,s, t, parent):
# Mark all the vertices as not visited
visited =[False]*(self.ROW)
# Create a queue for BFS
queue=[]
# Mark the source node as visited and enqueue it
queue.append(s)
visited[s] = True
# Standard BFS Loop
while queue:
#Dequeue a vertex from queue and print it
u = queue.pop(0)
# Get all adjacent vertices of the dequeued vertex u
# If a adjacent has not been visited, then mark it
# visited and enqueue it
for ind, val in enumerate(self.graph[u]):
if visited[ind] == False and val > 0 :
queue.append(ind)
visited[ind] = True
parent[ind] = u
# If we reached sink in BFS starting from source, then return
# true, else false
return True if visited[t] else False
# Returns tne maximum flow from s to t in the given graph
def FordFulkerson(self, source, sink):
# This array is filled by BFS and to store path
parent = [-1]*(self.ROW)
max_flow = 0 # There is no flow initially
# Augment the flow while there is path from source to sink
while self.BFS(source, sink, parent) :
# Find minimum residual capacity of the edges along the
# path filled by BFS. Or we can say find the maximum flow
# through the path found.
path_flow = float("Inf")
s = sink
while(s != source):
path_flow = min (path_flow, self.graph[parent[s]][s])
s = parent[s]
# Add path flow to overall flow
max_flow += path_flow
# update residual capacities of the edges and reverse edges
# along the path
v = sink
while(v != source):
u = parent[v]
self.graph[u][v] -= path_flow
self.graph[v][u] += path_flow
v = parent[v]
return max_flow
#################################################################
# - Fenetre Graphe Orienter - #
# /////////////////////////////////////////////// #
# Description: Programme traitant sur les graphes #
# Orienter #
# /////////////////////////////////////////////// #
class GrapheOriente(Tk):
def __init__(self):
Tk.__init__(self) # constructeur de la classe parente
#recupere la taille de l'ecrant de l'ordinateur
width=self.winfo_screenwidth()
height=self.winfo_screenheight()
self.largeure=900
self.hauteure=500
self.x=(width/2)-(self.largeure/2)
self.y=(height/2)-(self.hauteure/2)
#initialisation du canvas
self.graphe =Canvas(self, width =self.largeure, height =self.hauteure, bg ="white")
self.geometry('{}x{}+{}+{}'.format(self.largeure,self.hauteure,int(self.x),int(self.y)))
self.resizable(False,False)
self.wm_title('Graphe Oriente')
self.graphe.pack(side =TOP, padx =5, pady =5)
#evenement declancher par les clic de la sourie
self.bind("<Double-Button-1>", self.sommet)
self.bind("<Button-3>", self.arc)
#menu de la fenetre
menubar = Menu(self)
filemenu = Menu(menubar, tearoff = 0)
filemenu.add_separator()
filemenu.add_command(label = "Quitter ?", command = self.destroy)
filemenu.add_command(label = "Sauvegarder", command = self.save)
menubar.add_cascade(label = "Fichier", menu = filemenu)
filemenu = Menu(menubar, tearoff = 0)
filemenu.add_separator()
filemenu.add_command(label = "Ordre du graphe", command=self.ordre_graphe)
filemenu.add_command(label = "Degre du sommet", command=self.degres_sommet)
filemenu.add_command(label = "Matrice d'adjacence", command=self.matriceAdj)
filemenu.add_command(label = "Successeur du sommet", command=self.successeur)
filemenu.add_command(label = "Predecesseur du sommet", command=self.predeccesseur)
filemenu.add_command(label = "Demi degre supperieur du sommet", command=self.demi_deg_sup)
filemenu.add_command(label = "Demi degre inferieur du sommet", command=self.demi_deg_inf)
filemenu.add_command(label = "Graphe Hamiltonien ?", command=self.hamilton)
filemenu.add_command(label = "Graphe Eulerien ?", command=self.euler)
filemenu.add_command(label = "Flow maximal", command=self.maxflow)
menubar.add_cascade(label = "Traitement", menu = filemenu)
filemenu = Menu(menubar, tearoff = 0)
filemenu.add_separator()
filemenu.add_command(label = "Tout effacer ?", command =self.delete)
menubar.add_cascade(label = "Effacer", menu = filemenu)
filemenu = Menu(menubar, tearoff = 0)
filemenu = Menu(menubar, tearoff = 0)
filemenu.add_command(label = "Aide", command =self.aide)
menubar.add_cascade(label = "Aide", menu = filemenu)
filemenu = Menu(menubar, tearoff = 0)
self.config(menu = menubar)
#variable globale
self.i=int(0)
self.compt=int()
self.temp=list()
self.connect=list()
self.point=list()
self.sommets=list()
self.couple=list()
self.matrice=list()
self.var=StringVar()
self.entier=int()
def delete(self):
for element in self.graphe.find_all():
self.graphe.delete(element)
self.i=int(0)
self.compt=int()
self.temp=list()
self.connect=list()
self.point=list()
self.sommets=list()
self.couple=list()
self.matrice=list()
self.var=StringVar()
self.entier=int()
pass
# fonction permettant de fermer la fenetre fille
def Close_Toplevel (self):
self.compt=int()
self.temp=list()
self.wm_attributes("-disable",False)
self.toplevel_dialog.destroy()
self.deiconify()
#fenetre permettant de fermet la fenetre fille de sauvegarde
def Close_Save (self,event=None):
if len(self.var.get())>0:
x=self.graphe.winfo_rootx()
y=self.graphe.winfo_rooty()
w=self.graphe.winfo_width()
h=self.graphe.winfo_height()
image=ImageGrab.grab((x+2,y+2,x+w-2,y+h-2))
image.save("save/{}.png".format(self.var.get()))
else:
x=self.graphe.winfo_rootx()
y=self.graphe.winfo_rooty()
w=self.graphe.winfo_width()
h=self.graphe.winfo_height()
image=ImageGrab.grab((x+2,y+2,x+w-2,y+h-2))
image.save("save/Graphe.png")
self.wm_attributes("-disable",False)
self.toplevel_dialog.destroy()
self.deiconify()
def aide(self):
self.wm_attributes("-disable",True)
self.toplevel_dialog=tk.Toplevel(self)
self.toplevel_dialog.minsize(600,150)
self.toplevel_dialog.wm_title("Aide")
width=self.toplevel_dialog.winfo_screenwidth()
height=self.toplevel_dialog.winfo_screenheight()
largeure=600
hauteure=150
x=(width/2)-(largeure/2)
y=(height/2)-(hauteure/2)
self.toplevel_dialog.geometry('{}x{}+{}+{}'.format(largeure,hauteure,int(x),int(y)))
self.toplevel_dialog.transient(self)
self.toplevel_dialog.protocol("WM_DELETE_WINDOW", self.Close_Toplevel)
self.toplevel_dialog.bind("<Return>", self.Close_Toplevel)
self.toplevel_dialog.focus()
aide="""
Tracer un sommet: Double clic
Tracer un arc: clic gauche sur chaque sommet
"""
self.label=tk.Label(self.toplevel_dialog, text=aide,justify='left',font='Century 13 bold')
self.label.pack(side='top')
self.yes_button=ttk.Button(self.toplevel_dialog,text='Retour',width=25,command=self.Close_Toplevel)
self.yes_button.pack(side='right',fill='x',expand=True)
#fonction de sauvegarde du graphe dessiner
def save(self):
self.wm_attributes("-disable",True)
self.toplevel_dialog=tk.Toplevel(self)
self.toplevel_dialog.minsize(600,100)
self.toplevel_dialog.wm_title("Sauvegarder")
width=self.toplevel_dialog.winfo_screenwidth()
height=self.toplevel_dialog.winfo_screenheight()
largeure=600
hauteure=100
x=(width/2)-(largeure/2)
y=(height/2)-(hauteure/2)
self.toplevel_dialog.geometry('{}x{}+{}+{}'.format(largeure,hauteure,int(x),int(y)))
self.toplevel_dialog.transient(self)
self.toplevel_dialog.protocol("WM_DELETE_WINDOW", self.Close_Toplevel)
self.toplevel_dialog.bind("<Return>", self.Close_Save)
self.toplevel_dialog.focus()
self.label=tk.Label(self.toplevel_dialog, text='Entrer le nom de limage: ')
self.label.pack(side='left')
self.var=tk.Entry(self.toplevel_dialog)
self.var.pack(side='left')
self.var.bind("<Return>", self.Close_Save)
self.var.bind("<Escape>", self.Close_Toplevel)
self.var.focus_set()
self.yes_button=ttk.Button(self.toplevel_dialog,text='Retour',width=25,command=self.Close_Toplevel)
self.yes_button.pack(side='right',fill='x',expand=True)
self.yes_button=ttk.Button(self.toplevel_dialog,text='Valider',width=25,command=self.Close_Save)
self.yes_button.pack(side='right',fill='x',expand=True)
# fonction permettant de detecter si le graphe est eulerien
def euler(self):
self.wm_attributes("-disable",True)
self.toplevel_dialog=tk.Toplevel(self)
self.toplevel_dialog.minsize(600,100)
self.toplevel_dialog.wm_title("Graphe eulerien")
width=self.toplevel_dialog.winfo_screenwidth()
height=self.toplevel_dialog.winfo_screenheight()
largeure=600
hauteure=100
x=(width/2)-(largeure/2)
y=(height/2)-(hauteure/2)
self.toplevel_dialog.geometry('{}x{}+{}+{}'.format(largeure,hauteure,int(x),int(y)))
self.toplevel_dialog.transient(self)
self.toplevel_dialog.protocol("WM_DELETE_WINDOW", self.Close_Toplevel)
self.toplevel_dialog.bind("<Return>", self.Close_Toplevel)
l=len(self.couple)
lg=len(self.sommets)
if lg>=2:
g1 = Euler(lg)
for i in range(l):
g1.addEdge(self.couple[i][0],self.couple[i][1])
self.var=g1.test()
self.label=tk.Label(self.toplevel_dialog, text=self.var)
self.label.pack(side='top')
else:
self.label=tk.Label(self.toplevel_dialog, text="Votre requette ne peut etre traiter")
self.label.pack(side='top')
self.yes_button=ttk.Button(self.toplevel_dialog,text='Retour',width=25,command=self.Close_Toplevel)
self.yes_button.pack(side='right',fill='x',expand=True)
#fonction permettant de detecter si le graphe est hamiltonien
def hamilton(self):
self.wm_attributes("-disable",True)
self.toplevel_dialog=tk.Toplevel(self)
self.toplevel_dialog.minsize(600,100)
self.toplevel_dialog.wm_title("Graphe hamiltonien")
width=self.toplevel_dialog.winfo_screenwidth()
height=self.toplevel_dialog.winfo_screenheight()
largeure=600
hauteure=100
x=(width/2)-(largeure/2)
y=(height/2)-(hauteure/2)
self.toplevel_dialog.geometry('{}x{}+{}+{}'.format(largeure,hauteure,int(x),int(y)))
self.toplevel_dialog.transient(self)
self.toplevel_dialog.protocol("WM_DELETE_WINDOW", self.Close_Toplevel)
self.toplevel_dialog.bind("<Return>", self.Close_Toplevel)
lg=len(self.couple)
if lg>1:
l=len(self.sommets)
self.matrice=list()
for i in range(l):
self.matrice.append([])
for j in range(l):
k=int(0)
temp=list()
temp.append(self.sommets[i])
temp.append(self.sommets[j])
for element in self.couple:
if temp[0]==element[0] and temp[1]==element[1]:
self.matrice[i].append(1)
k+=1
if k==0:
self.matrice[i].append(0)
g1 = Hamilton(l)
g1.graph = self.matrice
self.var=g1.hamCycle()
self.label=tk.Label(self.toplevel_dialog, text=self.var)
self.label.pack(side='top')
pass
else:
self.label=tk.Label(self.toplevel_dialog, text="Votre requette ne peut etre traiter")
self.label.pack(side='top')
self.yes_button=ttk.Button(self.toplevel_dialog,text='Retour',width=25,command=self.Close_Toplevel)
self.yes_button.pack(side='right',fill='x',expand=True)
#fonction permettant de connetre le flow maximal
def maxflow(self):
self.wm_attributes("-disable",True)
self.toplevel_dialog=tk.Toplevel(self)
self.toplevel_dialog.minsize(600,200)
self.toplevel_dialog.wm_title("Flow maximal")
width=self.toplevel_dialog.winfo_screenwidth()
height=self.toplevel_dialog.winfo_screenheight()
largeure=600
hauteure=200
x=(width/2)-(largeure/2)
y=(height/2)-(hauteure/2)
self.toplevel_dialog.geometry('{}x{}+{}+{}'.format(largeure,hauteure,int(x),int(y)))
self.toplevel_dialog.transient(self)
self.toplevel_dialog.protocol("WM_DELETE_WINDOW", self.Close_Toplevel)
self.toplevel_dialog.focus()
self.label=tk.Label(self.toplevel_dialog, text='Entrer sommet source: ')
self.label.grid(row=1)
self.valeur1=tk.Entry(self.toplevel_dialog)
self.valeur1.grid(row=1,column=1)
self.label=tk.Label(self.toplevel_dialog, text='Entrer sommet destination: ')
self.label.grid(row=2)
self.valeur2=tk.Entry(self.toplevel_dialog)
self.valeur2.grid(row=2,column=1)
self.label=tk.Label(self.toplevel_dialog, text='\n\n')
self.label.grid(row=3)
self.yes_button=ttk.Button(self.toplevel_dialog,text='Valider',width=25,command=self.Close_maxflow)
self.yes_button.grid(row=4,column=1)
self.yes_button=ttk.Button(self.toplevel_dialog,text='Retour',width=25,command=self.Close_Toplevel)
self.yes_button.grid(row=4,column=3)
pass
def Close_maxflow (self):
lg=len(self.couple)
if self.valeur1.get() in str(self.sommets) and self.valeur2.get() in str(self.sommets) and lg>0 and self.valeur1.get()!=self.valeur2.get() :
l=len(self.sommets)
self.matrice=list()
for i in range(l):
self.matrice.append([])
for j in range(l):
k=int(0)
temp=list()
temp.append(self.sommets[i])
temp.append(self.sommets[j])
for element in self.couple:
if temp[0]==element[0] and temp[1]==element[1]:
self.matrice[i].append(element[2])
k+=1
if k==0:
self.matrice[i].append(0)
g = Max_flow(self.matrice)
src=int(self.valeur1.get())
des=int(self.valeur2.get())
self.label=tk.Label(self.toplevel_dialog, text="Le flow maximal est %d " % g.FordFulkerson(src, des))
self.label.grid(row=6)
else:
self.label=tk.Label(self.toplevel_dialog, text="Votre requette ne peut etre traiter")
self.label.grid(row=6)
pass
def matriceAdj(self):
self.wm_attributes("-disable",True)
self.toplevel_dialog=tk.Toplevel(self)
self.toplevel_dialog.minsize(300,300)
self.toplevel_dialog.wm_title("Matrice D'adjacence")
width=self.toplevel_dialog.winfo_screenwidth()
height=self.toplevel_dialog.winfo_screenheight()
largeure=300
hauteure=300
x=(width/2)-(largeure/2)
y=(height/2)-(hauteure/2)
self.toplevel_dialog.geometry('{}x{}+{}+{}'.format(largeure,hauteure,int(x),int(y)))
self.toplevel_dialog.transient(self)
self.toplevel_dialog.protocol("WM_DELETE_WINDOW", self.Close_Toplevel)
self.toplevel_dialog.bind("<Return>", self.Close_Toplevel)
lg=len(self.couple)
if lg>0:
l=len(self.sommets)
self.matrice=list()
for i in range(l):
resultat=""
self.matrice.append([])
for j in range(l):
k=int(0)
temp=list()
temp.append(self.sommets[i])
temp.append(self.sommets[j])
for element in self.couple:
if temp[0]==element[0] and temp[1]==element[1]:
self.matrice[i].append(1)
resultat+="1 "
k+=1
if k==0:
self.matrice[i].append(0)
resultat+="0 "
self.label=tk.Label(self.toplevel_dialog, text=resultat)
self.label.pack(side='top')
pass
else:
self.label=tk.Label(self.toplevel_dialog, text="Votre requette ne peut etre traiter")
self.label.pack(side='top')
self.yes_button=ttk.Button(self.toplevel_dialog,text='Retour',width=25,command=self.Close_Toplevel)
self.yes_button.pack(side='right',fill='x',expand=True)
#fonction permettant de donner le successeur d'un sommet
def successeur(self):
self.wm_attributes("-disable",True)
self.toplevel_dialog=tk.Toplevel(self)
self.toplevel_dialog.minsize(650,100)
self.toplevel_dialog.wm_title("Successeur d'un sommet")
width=self.toplevel_dialog.winfo_screenwidth()
height=self.toplevel_dialog.winfo_screenheight()
largeure=650
hauteure=100
x=(width/2)-(largeure/2)
y=(height/2)-(hauteure/2)
self.toplevel_dialog.geometry('{}x{}+{}+{}'.format(largeure,hauteure,int(x),int(y)))
self.toplevel_dialog.transient(self)
self.toplevel_dialog.protocol("WM_DELETE_WINDOW", self.Close_Toplevel)
self.toplevel_dialog.focus()
self.toplevel_dialog.bind("<Return>", self.Close_suc)
self.label=tk.Label(self.toplevel_dialog, text='Entrer sommet: ')
self.label.grid(row=1)
self.valeur=tk.Entry(self.toplevel_dialog)
self.valeur.grid(row=1,column=1)
self.valeur.bind("<Return>", self.Close_suc)
self.valeur.bind("<Escape>", self.Close_Toplevel)
self.valeur.focus_set()
self.yes_button=ttk.Button(self.toplevel_dialog,text='Retour',width=25,command=self.Close_Toplevel)
self.yes_button.grid(row=1,column=6)
self.yes_button=ttk.Button(self.toplevel_dialog,text='Valider',width=25,command=self.Close_suc)
self.yes_button.grid(row=1,column=4)
pass
def Close_suc(self):
if self.valeur.get() in str(self.sommets):
resultat=""
for element in self.couple:
if self.valeur.get() == str(element[0]):
resultat+=str(element[1])+" "
self.toplevel_dialog_label=tk.Label(self.toplevel_dialog, text='Le(s) successeur du sommet {} est: {}'.format(self.valeur.get(),resultat))
self.toplevel_dialog_label.grid(row=2)
else:
self.toplevel_dialog_label=tk.Label(self.toplevel_dialog, text='Valeur entrer incorrecte')
self.toplevel_dialog_label.grid(row=2)
def predeccesseur(self):
self.wm_attributes("-disable",True)
self.toplevel_dialog=tk.Toplevel(self)
self.toplevel_dialog.minsize(650,100)
self.toplevel_dialog.wm_title("Predecesseur d'un sommet")
width=self.toplevel_dialog.winfo_screenwidth()
height=self.toplevel_dialog.winfo_screenheight()
largeure=650
hauteure=100
x=(width/2)-(largeure/2)
y=(height/2)-(hauteure/2)
self.toplevel_dialog.geometry('{}x{}+{}+{}'.format(largeure,hauteure,int(x),int(y)))
self.toplevel_dialog.transient(self)
self.toplevel_dialog.protocol("WM_DELETE_WINDOW", self.Close_Toplevel)
self.toplevel_dialog.bind("<Return>", self.Close_pred)
self.toplevel_dialog.focus()
self.label=tk.Label(self.toplevel_dialog, text='Entrer sommet: ')
self.label.grid(row=1)
self.valeur=tk.Entry(self.toplevel_dialog)
self.valeur.grid(row=1,column=1)
self.valeur.bind("<Return>", self.Close_pred)
self.valeur.bind("<Escape>", self.Close_Toplevel)
self.valeur.focus_set()
self.yes_button=ttk.Button(self.toplevel_dialog,text='Retour',width=25,command=self.Close_Toplevel)
self.yes_button.grid(row=1,column=6)
self.yes_button=ttk.Button(self.toplevel_dialog,text='Valider',width=25,command=self.Close_pred)
self.yes_button.grid(row=1,column=4)
def Close_pred(self):
if self.valeur.get() in str(self.sommets):
resultat=""
for element in self.couple:
if self.valeur.get() == str(element[1]):
resultat+=str(element[0])+" "
self.toplevel_dialog_label=tk.Label(self.toplevel_dialog, text='Le(s) predecesseur du sommet {} est: {}'.format(self.valeur.get(),resultat))
self.toplevel_dialog_label.grid(row=2)
else:
self.toplevel_dialog_label=tk.Label(self.toplevel_dialog, text='Valeur entrer incorrecte')
self.toplevel_dialog_label.grid(row=2)
def demi_deg_sup(self):
self.wm_attributes("-disable",True)
self.toplevel_dialog=tk.Toplevel(self)
self.toplevel_dialog.minsize(700,100)
self.toplevel_dialog.wm_title("Demi degre supperieur d'un sommet")
width=self.toplevel_dialog.winfo_screenwidth()
height=self.toplevel_dialog.winfo_screenheight()
largeure=700
hauteure=100
x=(width/2)-(largeure/2)
y=(height/2)-(hauteure/2)
self.toplevel_dialog.geometry('{}x{}+{}+{}'.format(largeure,hauteure,int(x),int(y)))
self.toplevel_dialog.transient(self)
self.toplevel_dialog.protocol("WM_DELETE_WINDOW", self.Close_Toplevel)
self.toplevel_dialog.bind("<Return>", self.Close_degre_sup)
self.toplevel_dialog.focus()
self.label=tk.Label(self.toplevel_dialog, text='Entrer sommet: ')
self.label.grid(row=1)
self.valeur=tk.Entry(self.toplevel_dialog)
self.valeur.grid(row=1,column=1)
self.valeur.bind("<Return>", self.Close_degre_sup)
self.valeur.bind("<Escape>", self.Close_Toplevel)
self.valeur.focus_set()
self.yes_button=ttk.Button(self.toplevel_dialog,text='Retour',width=25,command=self.Close_Toplevel)
self.yes_button.grid(row=1,column=6)
self.yes_button=ttk.Button(self.toplevel_dialog,text='Valider',width=25,command=self.Close_degre_sup)
self.yes_button.grid(row=1,column=4)
def Close_degre_sup(self):
if self.valeur.get() in str(self.sommets):
k=int(0)
for element in self.couple:
if self.valeur.get() == str(element[0]):
k+=1
self.toplevel_dialog_label=tk.Label(self.toplevel_dialog, text='Le demi degre supperieur du sommet {} est: {}'.format(self.valeur.get(),k))
self.toplevel_dialog_label.grid(row=2)
else:
self.toplevel_dialog_label=tk.Label(self.toplevel_dialog, text='Valeur entrer incorrecte')
self.toplevel_dialog_label.grid(row=2)
def demi_deg_inf(self):
self.wm_attributes("-disable",True)
self.toplevel_dialog=tk.Toplevel(self)
self.toplevel_dialog.minsize(700,100)
self.toplevel_dialog.wm_title("Demi degre inferieur d'un sommet")
width=self.toplevel_dialog.winfo_screenwidth()
height=self.toplevel_dialog.winfo_screenheight()
largeure=700
hauteure=100
x=(width/2)-(largeure/2)
y=(height/2)-(hauteure/2)
self.toplevel_dialog.geometry('{}x{}+{}+{}'.format(largeure,hauteure,int(x),int(y)))
self.toplevel_dialog.transient(self)
self.toplevel_dialog.protocol("WM_DELETE_WINDOW", self.Close_Toplevel)
self.toplevel_dialog.bind("<Return>", self.Close_degre_inf)
self.toplevel_dialog.focus()
self.label=tk.Label(self.toplevel_dialog, text='Entrer sommet: ')
self.label.grid(row=1)
self.valeur=tk.Entry(self.toplevel_dialog)
self.valeur.grid(row=1,column=1)
self.valeur.bind("<Return>", self.Close_degre_inf)
self.valeur.bind("<Escape>", self.Close_Toplevel)
self.valeur.focus_set()
self.yes_button=ttk.Button(self.toplevel_dialog,text='Retour',width=25,command=self.Close_Toplevel)
self.yes_button.grid(row=1,column=6)
self.yes_button=ttk.Button(self.toplevel_dialog,text='Valider',width=25,command=self.Close_degre_inf)
self.yes_button.grid(row=1,column=4)
def Close_degre_inf(self):
if self.valeur.get() in str(self.sommets):
k=int(0)
for element in self.couple:
if self.valeur.get() == str(element[1]):
k+=1
self.toplevel_dialog_label=tk.Label(self.toplevel_dialog, text='Le demi degre inferieur du sommet {} est: {}'.format(self.valeur.get(),k))
self.toplevel_dialog_label.grid(row=2)
else:
self.toplevel_dialog_label=tk.Label(self.toplevel_dialog, text='Valeur entrer incorrecte')
self.toplevel_dialog_label.grid(row=2)
def degres_sommet(self):
self.wm_attributes("-disable",True)
self.toplevel_dialog=tk.Toplevel(self)
self.toplevel_dialog.minsize(600,100)
self.toplevel_dialog.wm_title("Degre du sommet")
width=self.toplevel_dialog.winfo_screenwidth()
height=self.toplevel_dialog.winfo_screenheight()
largeure=600
hauteure=100
x=(width/2)-(largeure/2)
y=(height/2)-(hauteure/2)
self.toplevel_dialog.geometry('{}x{}+{}+{}'.format(largeure,hauteure,int(x),int(y)))
self.toplevel_dialog.transient(self)
self.toplevel_dialog.protocol("WM_DELETE_WINDOW", self.Close_Toplevel)
self.toplevel_dialog.bind("<Return>", self.Close_degre)
self.toplevel_dialog.focus()
self.label=tk.Label(self.toplevel_dialog, text='Entrer sommet: ')
self.label.grid(row=1)
self.valeur=tk.Entry(self.toplevel_dialog)
self.valeur.grid(row=1,column=1)
self.valeur.bind("<Return>", self.Close_degre)
self.valeur.bind("<Escape>", self.Close_Toplevel)
self.valeur.focus_set()
self.yes_button=ttk.Button(self.toplevel_dialog,text='Retour',width=25,command=self.Close_Toplevel)
self.yes_button.grid(row=1,column=5)
self.yes_button=ttk.Button(self.toplevel_dialog,text='Valider',width=25,command=self.Close_degre)
self.yes_button.grid(row=1,column=3)
def Close_degre(self):
if self.valeur.get() in str(self.sommets):
k=int(0)
for element in self.couple:
if self.valeur.get() in str(element):
k+=1
self.toplevel_dialog_label=tk.Label(self.toplevel_dialog, text='Le degre du sommet {} est: {}'.format(self.valeur.get(),k))
self.toplevel_dialog_label.grid(row=2)
else:
self.toplevel_dialog_label=tk.Label(self.toplevel_dialog, text='Valeur entrer incorrecte')
self.toplevel_dialog_label.grid(row=2)
def ordre_graphe(self):
self.wm_attributes("-disable",True)
self.toplevel_dialog=tk.Toplevel(self)
self.toplevel_dialog.minsize(502,50)
self.toplevel_dialog.wm_title("Ordre du graphe")
width=self.toplevel_dialog.winfo_screenwidth()
height=self.toplevel_dialog.winfo_screenheight()
largeure=502
hauteure=50
x=(width/2)-(largeure/2)
y=(height/2)-(hauteure/2)
self.toplevel_dialog.geometry('{}x{}+{}+{}'.format(largeure,hauteure,int(x),int(y)))
self.toplevel_dialog.transient(self)
self.toplevel_dialog.protocol("WM_DELETE_WINDOW", self.Close_Toplevel)
self.toplevel_dialog.bind("<Return>", self.Close_Toplevel)
n=len(self.sommets)
self.toplevel_dialog_label=tk.Label(self.toplevel_dialog, text='L ordre du graphe est: {}'.format(n))
self.toplevel_dialog_label.pack(side='top')
self.toplevel_dialog_yes_button=ttk.Button(self.toplevel_dialog,text='Retour',width=25,command=self.Close_Toplevel)
self.toplevel_dialog_yes_button.pack(side='right',fill='x',expand=True)
for i in range(3):
self.toplevel_dialog_label3=tk.Label(self.toplevel_dialog, text='\n')
self.toplevel_dialog_label3.pack()
pass
def sommet(self, event):
x,y=event.x,event.y
if self.point==[]:
self.sommet=self.graphe.create_oval(x-10,y-10,x+10,y+10, fill="cyan")
self.numero=self.graphe.create_text(x,y,text="{}".format(self.i))
self.point.append([event.x,event.y,self.sommet,self.numero,self.i])
self.sommets.append(self.i)
self.i+=1
else:
controle=0
for element in self.point:
if element[0]-25 < event.x < element[0]+25 and element[1]-25 < event.y < element[1]+25:
controle=1
if controle==0:
self.sommet=self.graphe.create_oval(x-10,y-10,x+10,y+10, fill="cyan")
self.numero=self.graphe.create_text(x,y,text="{}".format(self.i))
self.point.append([event.x,event.y,self.sommet,self.numero,self.i])
self.sommets.append(self.i)
self.i+=1
#procedure permettant de dessiner un arc entre deux sommets
def arc(self, event):
for element in self.point:
if element[0]-10 < event.x < element[0]+10 and element[1]-10 < event.y < element[1]+10:
self.temp.append(element)
self.compt+=1
if self.compt==2:
self.wm_attributes("-disable",True)
self.toplevel_dialog=tk.Toplevel(self)
self.toplevel_dialog.minsize(502,100)
self.toplevel_dialog.wm_title("Arc")
width=self.toplevel_dialog.winfo_screenwidth()
height=self.toplevel_dialog.winfo_screenheight()
largeure=502
hauteure=100
x=(width/2)-(largeure/2)
y=(height/2)-(hauteure/2)
self.toplevel_dialog.geometry('{}x{}+{}+{}'.format(largeure,hauteure,int(x),int(y)))
self.toplevel_dialog.transient(self)
self.toplevel_dialog.protocol("WM_DELETE_WINDOW", self.Close_Toplevel)
self.toplevel_dialog.bind("<Return>", self.Close_arc)
self.toplevel_dialog.focus()
self.label=tk.Label(self.toplevel_dialog, text='Entrer la distance entre le sommet {} et le sommet {}: '.format(self.temp[0][4],self.temp[1][4]))
self.label.pack(side='top')
self.valeur=tk.Entry(self.toplevel_dialog)
self.valeur.pack(side='top')
self.valeur.bind("<Return>", self.Close_arc)
self.valeur.bind("<Escape>", self.Close_Toplevel)
self.valeur.focus_set()
self.yes_button=ttk.Button(self.toplevel_dialog,text='Retour',width=25,command=self.Close_Toplevel)
self.yes_button.pack(side='right',fill='x',expand=True)
self.yes_button=ttk.Button(self.toplevel_dialog,text='Valider',width=25,command=self.Close_arc)
self.yes_button.pack(side='right',fill='x',expand=True)
def Close_arc (self,event=None):
if self.temp[0][0] < self.temp[1][0]:
a=[self.temp[0][0]+10,self.temp[0][1]]
b=[self.temp[1][0]-10,self.temp[1][1]]
self.graphe.create_line(a,b,arrow="last")
try:
self.entier=int(self.valeur.get())
except ValueError:
pass
if self.entier>0 or self.entier<0 :
pass
else:
self.entier=int(1)
self.couple.append([self.temp[0][4],self.temp[1][4],self.entier])
elif self.temp[0][0]==self.temp[1][0]:
self.graphe.delete(self.temp[0][2])
self.graphe.delete(self.temp[0][3])
self.graphe.create_oval(self.temp[0][0]-10,self.temp[0][1]-25,self.temp[0][0]+1,self.temp[0][1])
self.graphe.create_oval(self.temp[0][0]-10,self.temp[0][1]-10,self.temp[0][0]+10,self.temp[0][1]+10,fill="cyan")
self.graphe.create_text(self.temp[0][0],self.temp[0][1],text="{}".format(self.temp[0][4]))
a=(self.temp[0][0],self.temp[0][1]-10.5)
b=(self.temp[0][0],self.temp[0][1]-10)
self.graphe.create_line(a,b,arrow="last")
try:
self.entier=int(self.valeur.get())
except ValueError:
pass
if self.entier>0 or self.entier<0 :
pass
else:
self.entier=int(1)
self.couple.append([self.temp[0][4],self.temp[1][4],self.entier])
else:
a=[self.temp[0][0]-10,self.temp[0][1]]
b=[self.temp[1][0]+10,self.temp[1][1]]
self.graphe.create_line(a,b,arrow="last")
try:
self.entier=int(self.valeur.get())
except ValueError:
pass
if self.entier>0 or self.entier<0 :
pass
else:
self.entier=int(1)
self.couple.append([self.temp[0][4],self.temp[1][4],self.entier])
self.compt=int()
self.temp=list()
self.wm_attributes("-disable",False)
self.toplevel_dialog.destroy()
self.deiconify()
###########################################
# - Fenetre Graphe Orienter - #
# /////////////////////////////////////////////// #
# Description: Programme traitant sur les graphes #
# Orienter #
# /////////////////////////////////////////////// #
class Graphe_Non_Oriente(Tk):
def __init__(self):
Tk.__init__(self) # constructeur de la classe parente
#recupere la taille de l'ecrant de l'ordinateur
width=self.winfo_screenwidth()
height=self.winfo_screenheight()
self.largeure=900
self.hauteure=500
self.x=(width/2)-(self.largeure/2)
self.y=(height/2)-(self.hauteure/2)
#initialisation du canvas
self.graphe =Canvas(self, width =self.largeure, height =self.hauteure, bg ="white")
self.geometry('{}x{}+{}+{}'.format(self.largeure,self.hauteure,int(self.x),int(self.y)))
self.resizable(False,False)
self.wm_title('Graphe Non Oriente')
self.graphe.pack(side =TOP, padx =5, pady =5)
#evenement declancher par les clic de la sourie
self.bind("<Double-Button-1>", self.sommet)
self.bind("<Button-3>", self.arc)
#menu de la fenetre
menubar = Menu(self)
filemenu = Menu(menubar, tearoff = 0)
filemenu.add_separator()
filemenu.add_command(label = "Quitter ?", command = self.destroy)
filemenu.add_command(label = "Sauvegarder", command = self.save)
menubar.add_cascade(label = "Fichier", menu = filemenu)
filemenu = Menu(menubar, tearoff = 0)
filemenu.add_separator()
filemenu.add_command(label = "Ordre du graphe", command=self.ordre_graphe)
filemenu.add_command(label = "Degre du sommet", command=self.degres_sommet)
filemenu.add_command(label = "Matrice d'adjacence", command=self.matriceAdj)
filemenu.add_command(label = "Successeur du sommet", command=self.successeur)
filemenu.add_command(label = "Predecesseur du sommet", command=self.predeccesseur)
filemenu.add_command(label = "Graphe Hamiltonien ?", command=self.hamilton)
filemenu.add_command(label = "Graphe Eulerien ?", command=self.euler)
filemenu.add_command(label = "Flow maximal", command=self.maxflow)
menubar.add_cascade(label = "Traitement", menu = filemenu)
filemenu = Menu(menubar, tearoff = 0)
filemenu.add_command(label = "Tout effacer ?", command =self.delete)
menubar.add_cascade(label = "Effacer", menu = filemenu)
filemenu = Menu(menubar, tearoff = 0)
filemenu.add_command(label = "Aide", command =self.aide)
menubar.add_cascade(label = "Aide", menu = filemenu)
filemenu = Menu(menubar, tearoff = 0)
self.config(menu = menubar)
#variable globale
self.i=int(0)
self.compt=int()
self.temp=list()
self.connect=list()
self.point=list()
self.sommets=list()
self.couple=list()
self.matrice=list()
self.var=StringVar()
self.entier=int()
def delete(self):
for element in self.graphe.find_all():
self.graphe.delete(element)
self.i=int(0)
self.compt=int()
self.temp=list()
self.connect=list()
self.point=list()
self.sommets=list()
self.couple=list()
self.matrice=list()
self.var=StringVar()
self.entier=int()
pass
# fonction permettant de fermer la fenetre fille
def Close_Toplevel (self):
self.compt=int()
self.temp=list()
self.wm_attributes("-disable",False)
self.toplevel_dialog.destroy()
self.deiconify()
def aide(self):
self.wm_attributes("-disable",True)
self.toplevel_dialog=tk.Toplevel(self)
self.toplevel_dialog.minsize(600,150)
self.toplevel_dialog.wm_title("Aide")
width=self.toplevel_dialog.winfo_screenwidth()
height=self.toplevel_dialog.winfo_screenheight()
largeure=600
hauteure=150
x=(width/2)-(largeure/2)
y=(height/2)-(hauteure/2)
self.toplevel_dialog.geometry('{}x{}+{}+{}'.format(largeure,hauteure,int(x),int(y)))
self.toplevel_dialog.transient(self)
self.toplevel_dialog.protocol("WM_DELETE_WINDOW", self.Close_Toplevel)
self.toplevel_dialog.bind("<Return>", self.Close_Toplevel)
self.toplevel_dialog.focus()
aide="""
Tracer un sommet: Double clic
Tracer un arc: clic gauche sur chaque sommet
"""
self.label=tk.Label(self.toplevel_dialog, text=aide,justify='left',font='Century 13 bold')
self.label.pack(side='top')
self.yes_button=ttk.Button(self.toplevel_dialog,text='Retour',width=25,command=self.Close_Toplevel)
self.yes_button.pack(side='right',fill='x',expand=True)
#fenetre permettant de fermet la fenetre fille de sauvegarde
def Close_Save (self,event=None):
if len(self.var.get())>0:
x=self.graphe.winfo_rootx()
y=self.graphe.winfo_rooty()
w=self.graphe.winfo_width()
h=self.graphe.winfo_height()
image=ImageGrab.grab((x+2,y+2,x+w-2,y+h-2))
image.save("save/{}.png".format(self.var.get()))
else:
x=self.graphe.winfo_rootx()
y=self.graphe.winfo_rooty()
w=self.graphe.winfo_width()
h=self.graphe.winfo_height()
image=ImageGrab.grab((x+2,y+2,x+w-2,y+h-2))
image.save("save/Graphe.png")
self.wm_attributes("-disable",False)
self.toplevel_dialog.destroy()
self.deiconify()
#fonction de sauvegarde du graphe dessiner
def save(self):
self.wm_attributes("-disable",True)
self.toplevel_dialog=tk.Toplevel(self)
self.toplevel_dialog.minsize(600,100)
self.toplevel_dialog.wm_title("Sauvegarder")
width=self.toplevel_dialog.winfo_screenwidth()
height=self.toplevel_dialog.winfo_screenheight()
largeure=600
hauteure=100
x=(width/2)-(largeure/2)
y=(height/2)-(hauteure/2)
self.toplevel_dialog.geometry('{}x{}+{}+{}'.format(largeure,hauteure,int(x),int(y)))
self.toplevel_dialog.transient(self)
self.toplevel_dialog.protocol("WM_DELETE_WINDOW", self.Close_Toplevel)
self.toplevel_dialog.bind("<Return>", self.Close_Save)
self.label=tk.Label(self.toplevel_dialog, text='Entrer le nom de limage: ')
self.label.pack(side='left')
self.var=tk.Entry(self.toplevel_dialog)
self.var.pack(side='left')
self.var.bind("<Return>", self.Close_Save)
self.var.bind("<Escape>", self.Close_Toplevel)
self.var.focus_set()
self.yes_button=ttk.Button(self.toplevel_dialog,text='Retour',width=25,command=self.Close_Toplevel)
self.yes_button.pack(side='right',fill='x',expand=True)
self.yes_button=ttk.Button(self.toplevel_dialog,text='Valider',width=25,command=self.Close_Save)
self.yes_button.pack(side='right',fill='x',expand=True)
# fonction permettant de detecter si le graphe est eulerien
def euler(self):
self.wm_attributes("-disable",True)
self.toplevel_dialog=tk.Toplevel(self)
self.toplevel_dialog.minsize(600,100)
self.toplevel_dialog.wm_title("Graphe eulerien")
width=self.toplevel_dialog.winfo_screenwidth()
height=self.toplevel_dialog.winfo_screenheight()
largeure=600
hauteure=100
x=(width/2)-(largeure/2)
y=(height/2)-(hauteure/2)
self.toplevel_dialog.geometry('{}x{}+{}+{}'.format(largeure,hauteure,int(x),int(y)))
self.toplevel_dialog.transient(self)
self.toplevel_dialog.protocol("WM_DELETE_WINDOW", self.Close_Toplevel)
self.toplevel_dialog.bind("<Return>", self.Close_Toplevel)
self.toplevel_dialog.focus()
l=len(self.couple)
lg=len(self.sommets)
if lg>=2:
g1 = Euler(lg)
for i in range(l):
g1.addEdge(self.couple[i][0],self.couple[i][1])
self.var=g1.test()
self.label=tk.Label(self.toplevel_dialog, text=self.var)
self.label.pack(side='top')
else:
self.label=tk.Label(self.toplevel_dialog, text="Votre requette ne peut etre traiter")
self.label.pack(side='top')
self.yes_button=ttk.Button(self.toplevel_dialog,text='Retour',width=25,command=self.Close_Toplevel)
self.yes_button.pack(side='right',fill='x',expand=True)
#fonction permettant de detecter si le graphe est hamiltonien
def hamilton(self):
self.wm_attributes("-disable",True)
self.toplevel_dialog=tk.Toplevel(self)
self.toplevel_dialog.minsize(600,100)
self.toplevel_dialog.wm_title("Graphe hamiltonien")
width=self.toplevel_dialog.winfo_screenwidth()
height=self.toplevel_dialog.winfo_screenheight()
largeure=600
hauteure=100
x=(width/2)-(largeure/2)
y=(height/2)-(hauteure/2)
self.toplevel_dialog.geometry('{}x{}+{}+{}'.format(largeure,hauteure,int(x),int(y)))
self.toplevel_dialog.transient(self)
self.toplevel_dialog.protocol("WM_DELETE_WINDOW", self.Close_Toplevel)
self.toplevel_dialog.bind("<Return>", self.Close_Toplevel)
self.toplevel_dialog.focu()
lg=len(self.couple)
if lg>1:
l=len(self.sommets)
self.matrice=list()
for i in range(l):
self.matrice.append([])
for j in range(l):
k=int(0)
temp=list()
temp.append(self.sommets[i])
temp.append(self.sommets[j])
for element in self.couple:
if temp[0]==element[0] and temp[1]==element[1]:
self.matrice[i].append(1)
k+=1
if k==0:
self.matrice[i].append(0)
g1 = Hamilton(l)
g1.graph = self.matrice
self.var=g1.hamCycle()
self.label=tk.Label(self.toplevel_dialog, text=self.var)
self.label.pack(side='top')
pass
else:
self.label=tk.Label(self.toplevel_dialog, text="Votre requette ne peut etre traiter")
self.label.pack(side='top')
self.yes_button=ttk.Button(self.toplevel_dialog,text='Retour',width=25,command=self.Close_Toplevel)
self.yes_button.pack(side='right',fill='x',expand=True)
#fonction permettant de connetre le flow maximal
def maxflow(self):
self.wm_attributes("-disable",True)
self.toplevel_dialog=tk.Toplevel(self)
self.toplevel_dialog.minsize(600,200)
self.toplevel_dialog.wm_title("Flow maximal")
width=self.toplevel_dialog.winfo_screenwidth()
height=self.toplevel_dialog.winfo_screenheight()
largeure=600
hauteure=200
x=(width/2)-(largeure/2)
y=(height/2)-(hauteure/2)
self.toplevel_dialog.geometry('{}x{}+{}+{}'.format(largeure,hauteure,int(x),int(y)))
self.toplevel_dialog.transient(self)
self.toplevel_dialog.protocol("WM_DELETE_WINDOW", self.Close_Toplevel)
self.toplevel_dialog.focus()
self.label=tk.Label(self.toplevel_dialog, text='Entrer sommet source: ')
self.label.grid(row=1)
self.valeur1=tk.Entry(self.toplevel_dialog)
self.valeur1.grid(row=1,column=1)
self.label=tk.Label(self.toplevel_dialog, text='Entrer sommet destination: ')
self.label.grid(row=2)
self.valeur2=tk.Entry(self.toplevel_dialog)
self.valeur2.grid(row=2,column=1)
self.label=tk.Label(self.toplevel_dialog, text='\n\n')
self.label.grid(row=3)
self.yes_button=ttk.Button(self.toplevel_dialog,text='Valider',width=25,command=self.Close_maxflow)
self.yes_button.grid(row=4,column=1)
self.yes_button=ttk.Button(self.toplevel_dialog,text='Retour',width=25,command=self.Close_Toplevel)
self.yes_button.grid(row=4,column=3)
pass
def Close_maxflow (self):
lg=len(self.couple)
if self.valeur1.get() in str(self.sommets) and self.valeur2.get() in str(self.sommets) and lg>0 and self.valeur1.get()!=self.valeur2.get() :
l=len(self.sommets)
self.matrice=list()
for i in range(l):
self.matrice.append([])
for j in range(l):
k=int(0)
temp=list()
temp.append(self.sommets[i])
temp.append(self.sommets[j])
for element in self.couple:
if temp[0]==element[0] and temp[1]==element[1]:
self.matrice[i].append(element[2])
k+=1
if k==0:
self.matrice[i].append(0)
g = Max_flow(self.matrice)
src=int(self.valeur1.get())
des=int(self.valeur2.get())
self.label=tk.Label(self.toplevel_dialog, text="Le flow maximal est %d " % g.FordFulkerson(src, des))
self.label.grid(row=6)
else:
self.label=tk.Label(self.toplevel_dialog, text="Votre requette ne peut etre traiter")
self.label.grid(row=6)
pass
def matriceAdj(self):
self.wm_attributes("-disable",True)
self.toplevel_dialog=tk.Toplevel(self)
self.toplevel_dialog.minsize(300,300)
self.toplevel_dialog.wm_title("Matrice D'adjacence")
width=self.toplevel_dialog.winfo_screenwidth()
height=self.toplevel_dialog.winfo_screenheight()
largeure=300
hauteure=300
x=(width/2)-(largeure/2)
y=(height/2)-(hauteure/2)
self.toplevel_dialog.geometry('{}x{}+{}+{}'.format(largeure,hauteure,int(x),int(y)))
self.toplevel_dialog.transient(self)
self.toplevel_dialog.protocol("WM_DELETE_WINDOW", self.Close_Toplevel)
self.toplevel_dialog.bind("<Return>", self.Close_Toplevel)
self.toplevel_dialog.focus()
lg=len(self.couple)
if lg>0:
l=len(self.sommets)
self.matrice=list()
for i in range(l):
resultat=""
resultat+=str(self.sommets[i])+"| "
self.matrice.append([])
for j in range(l):
k=int(0)
temp=list()
temp.append(self.sommets[i])
temp.append(self.sommets[j])
for element in self.couple:
if temp[0]==element[0] and temp[1]==element[1]:
self.matrice[i].append(1)
resultat+="1 "
k+=1
if k==0:
self.matrice[i].append(0)
resultat+="0 "
self.label=tk.Label(self.toplevel_dialog, text=resultat)
self.label.pack(side='top')
pass
else:
self.label=tk.Label(self.toplevel_dialog, text="Votre requette ne peut etre traiter")
self.label.pack(side='top')
self.yes_button=ttk.Button(self.toplevel_dialog,text='Retour',width=25,command=self.Close_Toplevel)
self.yes_button.pack(side='right',fill='x',expand=True)
#fonction permettant de donner le successeur d'un sommet
def successeur(self):
self.wm_attributes("-disable",True)
self.toplevel_dialog=tk.Toplevel(self)
self.toplevel_dialog.minsize(650,100)
self.toplevel_dialog.wm_title("Successeur d'un sommet")
width=self.toplevel_dialog.winfo_screenwidth()
height=self.toplevel_dialog.winfo_screenheight()
largeure=650
hauteure=100
x=(width/2)-(largeure/2)
y=(height/2)-(hauteure/2)
self.toplevel_dialog.geometry('{}x{}+{}+{}'.format(largeure,hauteure,int(x),int(y)))
self.toplevel_dialog.transient(self)
self.toplevel_dialog.protocol("WM_DELETE_WINDOW", self.Close_Toplevel)
self.toplevel_dialog.bind("<Return>", self.Close_suc)
self.toplevel_dialog.focus()
self.label=tk.Label(self.toplevel_dialog, text='Entrer sommet: ')
self.label.grid(row=1)
self.valeur=tk.Entry(self.toplevel_dialog)
self.valeur.grid(row=1,column=1)
self.valeur.bind("<Return>", self.Close_suc)
self.valeur.bind("<Escape>", self.Close_Toplevel)
self.valeur.focus_set()
self.yes_button=ttk.Button(self.toplevel_dialog,text='Retour',width=25,command=self.Close_Toplevel)
self.yes_button.grid(row=1,column=6)
self.yes_button=ttk.Button(self.toplevel_dialog,text='Valider',width=25,command=self.Close_suc)
self.yes_button.grid(row=1,column=4)
pass
def Close_suc(self):
if self.valeur.get() in str(self.sommets):
resultat=""
for element in self.couple:
if self.valeur.get() == str(element[0]):
resultat+=str(element[1])+" "
self.toplevel_dialog_label=tk.Label(self.toplevel_dialog, text='Le(s) successeur du sommet {} est: {}'.format(self.valeur.get(),resultat))
self.toplevel_dialog_label.grid(row=2)
else:
self.toplevel_dialog_label=tk.Label(self.toplevel_dialog, text='Valeur entrer incorrecte')
self.toplevel_dialog_label.grid(row=2)
def predeccesseur(self):
self.wm_attributes("-disable",True)
self.toplevel_dialog=tk.Toplevel(self)
self.toplevel_dialog.minsize(650,100)
self.toplevel_dialog.wm_title("Predecesseur d'un sommet")
width=self.toplevel_dialog.winfo_screenwidth()
height=self.toplevel_dialog.winfo_screenheight()
largeure=650
hauteure=100
x=(width/2)-(largeure/2)
y=(height/2)-(hauteure/2)
self.toplevel_dialog.geometry('{}x{}+{}+{}'.format(largeure,hauteure,int(x),int(y)))
self.toplevel_dialog.transient(self)
self.toplevel_dialog.protocol("WM_DELETE_WINDOW", self.Close_Toplevel)
self.toplevel_dialog.bind("<Return>", self.Close_pred)
self.toplevel_dialog.focus()
self.label=tk.Label(self.toplevel_dialog, text='Entrer sommet: ')
self.label.grid(row=1)
self.valeur=tk.Entry(self.toplevel_dialog)
self.valeur.grid(row=1,column=1)
self.valeur.bind("<Return>", self.Close_pred)
self.valeur.bind("<Escape>", self.Close_Toplevel)
self.valeur.focus_set()
self.yes_button=ttk.Button(self.toplevel_dialog,text='Retour',width=25,command=self.Close_Toplevel)
self.yes_button.grid(row=1,column=6)
self.yes_button=ttk.Button(self.toplevel_dialog,text='Valider',width=25,command=self.Close_pred)
self.yes_button.grid(row=1,column=4)
def Close_pred(self):
if self.valeur.get() in str(self.sommets):
resultat=""
for element in self.couple:
if self.valeur.get() == str(element[1]):
resultat+=str(element[0])+" "
self.toplevel_dialog_label=tk.Label(self.toplevel_dialog, text='Le(s) predecesseur du sommet {} est: {}'.format(self.valeur.get(),resultat))
self.toplevel_dialog_label.grid(row=2)
else:
self.toplevel_dialog_label=tk.Label(self.toplevel_dialog, text='Valeur entrer incorrecte')
self.toplevel_dialog_label.grid(row=2)
def degres_sommet(self):
self.wm_attributes("-disable",True)
self.toplevel_dialog=tk.Toplevel(self)
self.toplevel_dialog.minsize(600,100)
self.toplevel_dialog.wm_title("Degre du sommet")
width=self.toplevel_dialog.winfo_screenwidth()
height=self.toplevel_dialog.winfo_screenheight()
largeure=600
hauteure=100
x=(width/2)-(largeure/2)
y=(height/2)-(hauteure/2)
self.toplevel_dialog.geometry('{}x{}+{}+{}'.format(largeure,hauteure,int(x),int(y)))
self.toplevel_dialog.transient(self)
self.toplevel_dialog.protocol("WM_DELETE_WINDOW", self.Close_Toplevel)
self.toplevel_dialog.bind("<Return>", self.Close_degre)
self.toplevel_dialog.focus()
self.label=tk.Label(self.toplevel_dialog, text='Entrer sommet: ')
self.label.grid(row=1)
self.valeur=tk.Entry(self.toplevel_dialog)
self.valeur.grid(row=1,column=1)
self.valeur.bind("<Return>", self.Close_degre)
self.valeur.bind("<Escape>", self.Close_Toplevel)
self.valeur.focus_set()
self.yes_button=ttk.Button(self.toplevel_dialog,text='Retour',width=25,command=self.Close_Toplevel)
self.yes_button.grid(row=1,column=5)
self.yes_button=ttk.Button(self.toplevel_dialog,text='Valider',width=25,command=self.Close_degre)
self.yes_button.grid(row=1,column=3)
def Close_degre(self):
if self.valeur.get() in str(self.sommets):
k=int(0)
for element in self.couple:
if self.valeur.get() == str(element[1]):
k+=1
self.toplevel_dialog_label=tk.Label(self.toplevel_dialog, text='Le degre du sommet {} est: {}'.format(self.valeur.get(),k))
self.toplevel_dialog_label.grid(row=2)
else:
self.toplevel_dialog_label=tk.Label(self.toplevel_dialog, text='Valeur entrer incorrecte')
self.toplevel_dialog_label.grid(row=2)
def ordre_graphe(self):
self.wm_attributes("-disable",True)
self.toplevel_dialog=tk.Toplevel(self)
self.toplevel_dialog.minsize(502,50)
self.toplevel_dialog.wm_title("Ordre du graphe")
width=self.toplevel_dialog.winfo_screenwidth()
height=self.toplevel_dialog.winfo_screenheight()
largeure=502
hauteure=50
x=(width/2)-(largeure/2)
y=(height/2)-(hauteure/2)
self.toplevel_dialog.geometry('{}x{}+{}+{}'.format(largeure,hauteure,int(x),int(y)))
self.toplevel_dialog.transient(self)
self.toplevel_dialog.protocol("WM_DELETE_WINDOW", self.Close_Toplevel)
self.toplevel_dialog.bind("<Return>", self.Close_Toplevel)
self.toplevel_dialog.fovus()
n=len(self.sommets)
self.toplevel_dialog_label=tk.Label(self.toplevel_dialog, text='L ordre du graphe est: {}'.format(n))
self.toplevel_dialog_label.pack(side='top')
self.toplevel_dialog_yes_button=ttk.Button(self.toplevel_dialog,text='Retour',width=82,command=self.Close_Toplevel)
self.toplevel_dialog_yes_button.pack(side='right',fill='x',expand=True)
for i in range(3):
self.toplevel_dialog_label3=tk.Label(self.toplevel_dialog, text='\n')
self.toplevel_dialog_label3.pack()
pass
def sommet(self, event):
x,y=event.x,event.y
if self.point==[]:
self.sommet=self.graphe.create_oval(x-10,y-10,x+10,y+10, fill="cyan")
self.numero=self.graphe.create_text(x,y,text="{}".format(self.i))
self.point.append([event.x,event.y,self.sommet,self.numero,self.i])
self.sommets.append(self.i)
self.i+=1
else:
controle=0
for element in self.point:
if element[0]-25 < event.x < element[0]+25 and element[1]-25 < event.y < element[1]+25:
controle=1
if controle==0:
self.sommet=self.graphe.create_oval(x-10,y-10,x+10,y+10, fill="cyan")
self.numero=self.graphe.create_text(x,y,text="{}".format(self.i))
self.point.append([event.x,event.y,self.sommet,self.numero,self.i])
self.sommets.append(self.i)
self.i+=1
#procedure permettant de dessiner un arc entre deux sommets
def arc(self, event):
for element in self.point:
if element[0]-10 < event.x < element[0]+10 and element[1]-10 < event.y < element[1]+10:
self.temp.append(element)
self.compt+=1
if self.compt==2:
self.wm_attributes("-disable",True)
self.toplevel_dialog=tk.Toplevel(self)
self.toplevel_dialog.minsize(502,100)
self.toplevel_dialog.wm_title("Arc")
width=self.toplevel_dialog.winfo_screenwidth()
height=self.toplevel_dialog.winfo_screenheight()
largeure=502
hauteure=100
x=(width/2)-(largeure/2)
y=(height/2)-(hauteure/2)
self.toplevel_dialog.geometry('{}x{}+{}+{}'.format(largeure,hauteure,int(x),int(y)))
self.toplevel_dialog.transient(self)
self.toplevel_dialog.protocol("WM_DELETE_WINDOW", self.Close_Toplevel)
self.toplevel_dialog.bind("<Return>", self.Close_arc)
self.toplevel_dialog.focus
self.label=tk.Label(self.toplevel_dialog, text='Entrer la distance entre le sommet {} et le sommet {}: '.format(self.temp[0][4],self.temp[1][4]))
self.label.pack(side='top')
self.valeur=tk.Entry(self.toplevel_dialog)
self.valeur.pack(side='top')
self.valeur.bind("<Return>", self.Close_arc)
self.valeur.bind("<Escape>", self.Close_Toplevel)
self.valeur.focus_set()
self.yes_button=ttk.Button(self.toplevel_dialog,text='Retour',width=25,command=self.Close_Toplevel)
self.yes_button.pack(side='right',fill='x',expand=True)
self.yes_button=ttk.Button(self.toplevel_dialog,text='Valider',width=25,command=self.Close_arc)
self.yes_button.pack(side='right',fill='x',expand=True)
def Close_arc (self,event=None):
if self.temp[0][0] < self.temp[1][0]:
a=[self.temp[0][0]+10,self.temp[0][1]]
b=[self.temp[1][0]-10,self.temp[1][1]]
self.graphe.create_line(a,b)
try:
self.entier=int(self.valeur.get())
except ValueError:
pass
if self.entier!=0 :
pass
else:
self.entier=int(1)
self.couple.append([self.temp[0][4],self.temp[1][4],self.entier])
self.couple.append([self.temp[1][4],self.temp[0][4],self.entier])
elif self.temp[0][0]==self.temp[1][0]:
self.graphe.delete(self.temp[0][2])
self.graphe.delete(self.temp[0][3])
self.graphe.create_oval(self.temp[0][0]-10,self.temp[0][1]-25,self.temp[0][0]+1,self.temp[0][1])
self.graphe.create_oval(self.temp[0][0]-10,self.temp[0][1]-10,self.temp[0][0]+10,self.temp[0][1]+10,fill="cyan")
self.graphe.create_text(self.temp[0][0],self.temp[0][1],text="{}".format(self.temp[0][4]))
a=(self.temp[0][0],self.temp[0][1]-10.5)
b=(self.temp[0][0],self.temp[0][1]-10)
self.graphe.create_line(a,b)
try:
self.entier=int(self.valeur.get())
except ValueError:
pass
if self.entier>0 or self.entier<0 :
pass
else:
self.entier=int(1)
self.couple.append([self.temp[0][4],self.temp[1][4],self.entier])
self.couple.append([self.temp[1][4],self.temp[0][4],self.entier])
else:
a=[self.temp[0][0]-10,self.temp[0][1]]
b=[self.temp[1][0]+10,self.temp[1][1]]
self.graphe.create_line(a,b)
try:
self.entier=int(self.valeur.get())
except ValueError:
pass
if self.entier>0 or self.entier<0 :
pass
else:
self.entier=int(1)
self.couple.append([self.temp[0][4],self.temp[1][4],self.entier])
self.couple.append([self.temp[1][4],self.temp[0][4],self.entier])
self.compt=int()
self.temp=list()
self.wm_attributes("-disable",False)
self.toplevel_dialog.destroy()
self.deiconify()
######################################################
# - Programme Principale - #
# /////////////////////////////////////////////// #
# Description: Fenetre Principale du Programme #
# /////////////////////////////////////////////// #
if __name__ == '__main__':
#initialisation du canvas
fen =Tk()
width=fen.winfo_screenwidth()
height=fen.winfo_screenheight()
largeure=900
hauteure=500
x=(width/2)-(largeure/2)
y=(height/2)-(hauteure/2)
graphe =Canvas(fen, width =largeure, height =hauteure ,bg="light yellow")
fen.geometry('{}x{}+{}+{}'.format(largeure,hauteure,int(x),int(y)))
fen.wm_title("Graphe Trace")
graphe.pack(side =TOP, padx =5, pady =5)
fen.resizable(False,False)
icon=PhotoImage(file='img/img.png')
fen.tk.call('wm','iconphoto',fen._w,icon)
photo = PhotoImage(file="img/img.png",width=largeure,height=hauteure)
graphe.create_image(300, 90, anchor=NW, image=photo)
def menu():
menubar = Menu(fen)
filemenu = Menu(menubar, tearoff = 0)
filemenu.add_command(label="Graphe Oriente", command = graphe_oriente)
filemenu.add_command(label="Graphe Non Oriente", command = graphe_non_oriente)
filemenu.add_separator()
filemenu.add_command(label = "Quitter", command = fen.destroy)
menubar.add_cascade(label = "Graphe", menu = filemenu)
filemenu = Menu(menubar, tearoff = 0)
filemenu.add_command(label = "Auteur", command = Auteur)
filemenu.add_command(label="Description", command = Description)
filemenu.add_command(label="Version", command = Version)
menubar.add_cascade(label = "A Propos", menu = filemenu)
fen.config(menu = menubar)
fen.mainloop()
pass
def donothing():
#filewin = Toplevel(root)
#button = Button(filewin, text="Do nothing button")
#button.pack()
pass
def graphe_oriente():
# mise en place du canevas
app = GrapheOriente()
app.mainloop()
fen.mainloop()
def graphe_non_oriente():
# mise en place du canevas
app = Graphe_Non_Oriente()
app.mainloop()
fen.mainloop()
def Auteur():
a_propos="""
Ce logiciel a ete creer par des etudiants
en deuxiemme annnee Miage.
Notamment par:
Sawadogo R.R Sylvain
Sawadogo Sidbewende Omar
Yameogo Pingdwinde Boris
"""
fen.wm_attributes("-disable",True)
fen.toplevel_dialog=tk.Toplevel(fen)
fen.toplevel_dialog.minsize(502,210)
fen.toplevel_dialog.wm_title("Auteur")
width=fen.toplevel_dialog.winfo_screenwidth()
height=fen.toplevel_dialog.winfo_screenheight()
largeure=502
hauteure=210
x=(width/2)-(largeure/2)
y=(height/2)-(hauteure/2)
fen.toplevel_dialog.geometry('{}x{}+{}+{}'.format(largeure,hauteure,int(x),int(y)))
fen.toplevel_dialog.transient(fen)
fen.toplevel_dialog.protocol("WM_DELETE_WINDOW", Close_Toplevel)
fen.label=tk.Label(fen.toplevel_dialog, text=a_propos,justify='left',font='Century 13 bold')
fen.label.grid(row=1,padx =5, pady =5)
fen.yes_button=ttk.Button(fen.toplevel_dialog,text='Ok',width=82,command=Close_Toplevel)
fen.yes_button.grid(row=2)
def Description():
a_propos="""
Ce logiciel a ete creer dans le cadre
de traitement de graphe.
"""
fen.wm_attributes("-disable",True)
fen.toplevel_dialog=tk.Toplevel(fen)
fen.toplevel_dialog.minsize(502,126)
fen.toplevel_dialog.wm_title("Description")
width=fen.toplevel_dialog.winfo_screenwidth()
height=fen.toplevel_dialog.winfo_screenheight()
largeure=502
hauteure=126
x=(width/2)-(largeure/2)
y=(height/2)-(hauteure/2)
fen.toplevel_dialog.geometry('{}x{}+{}+{}'.format(largeure,hauteure,int(x),int(y)))
fen.toplevel_dialog.transient(fen)
fen.toplevel_dialog.protocol("WM_DELETE_WINDOW", Close_Toplevel)
fen.label=tk.Label(fen.toplevel_dialog, text=a_propos,justify='left',font='Century 13 bold')
fen.label.grid(row=1,padx =5, pady =5)
fen.yes_button=ttk.Button(fen.toplevel_dialog,text='Ok',width=82,command=Close_Toplevel)
fen.yes_button.grid(row=2)
def Version():
a_propos="""Version 1.0.0"""
fen.wm_attributes("-disable",True)
fen.toplevel_dialog=tk.Toplevel(fen)
fen.toplevel_dialog.minsize(300,64)
fen.toplevel_dialog.wm_title("Version")
width=fen.toplevel_dialog.winfo_screenwidth()
height=fen.toplevel_dialog.winfo_screenheight()
largeure=300
hauteure=64
x=(width/2)-(largeure/2)
y=(height/2)-(hauteure/2)
fen.toplevel_dialog.geometry('{}x{}+{}+{}'.format(largeure,hauteure,int(x),int(y)))
fen.toplevel_dialog.transient(fen)
fen.toplevel_dialog.protocol("WM_DELETE_WINDOW", Close_Toplevel)
fen.label=tk.Label(fen.toplevel_dialog, text=a_propos,justify='left',font='Century 13 bold')
fen.label.grid(row=1,padx =5, pady =5)
fen.yes_button=ttk.Button(fen.toplevel_dialog,text='Ok',width=48,command=Close_Toplevel)
fen.yes_button.grid(row=4)
def Close_Toplevel ():
fen.wm_attributes("-disable",False)
fen.toplevel_dialog.destroy()
fen.deiconify()
menu()
fen.mainloop()
| 37.322545
| 152
| 0.657667
| 9,570
| 68,039
| 4.566353
| 0.050888
| 0.135835
| 0.161876
| 0.047323
| 0.886728
| 0.878261
| 0.868993
| 0.865492
| 0.863204
| 0.857895
| 0
| 0.021911
| 0.190376
| 68,039
| 1,822
| 153
| 37.34303
| 0.771394
| 0.062861
| 0
| 0.872385
| 0
| 0
| 0.082338
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04742
| false
| 0.019526
| 0.004184
| 0
| 0.070432
| 0.001395
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
99dffa459bc7dfffb6f4a430b3ffac5f8cc05734
| 194
|
py
|
Python
|
smores/__init__.py
|
codylandry/Smores
|
cc0717b5edd0c09982820cc8705f73119641d0a2
|
[
"MIT"
] | 7
|
2017-09-18T13:04:30.000Z
|
2021-06-03T06:48:26.000Z
|
smores/__init__.py
|
codylandry/Smores
|
cc0717b5edd0c09982820cc8705f73119641d0a2
|
[
"MIT"
] | 1
|
2017-11-22T20:45:27.000Z
|
2017-11-22T20:45:27.000Z
|
smores/__init__.py
|
codylandry/Smores
|
cc0717b5edd0c09982820cc8705f73119641d0a2
|
[
"MIT"
] | null | null | null |
from .smores import Smores, AutocompleteResponse, TemplateString, TemplateFile, Schema, Nested
__all__ = ['Smores', 'AutocompleteResponse', 'TemplateString', 'TemplateFile', 'Schema', 'Nested']
| 64.666667
| 98
| 0.778351
| 16
| 194
| 9.1875
| 0.5625
| 0.353742
| 0.544218
| 0.707483
| 0.870748
| 0.870748
| 0
| 0
| 0
| 0
| 0
| 0
| 0.087629
| 194
| 2
| 99
| 97
| 0.830508
| 0
| 0
| 0
| 0
| 0
| 0.329897
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 8
|
412e848b7e0488d32c3276f0473edaef8c8bebb6
| 4,834
|
py
|
Python
|
scripts/figures/gene_abundance.py
|
vic-cheung/vectorseq
|
6f1aaeb3035c3c939b442e30076504ff84e43aa5
|
[
"MIT"
] | 1
|
2022-03-30T19:56:43.000Z
|
2022-03-30T19:56:43.000Z
|
scripts/figures/gene_abundance.py
|
vic-cheung/vectorseq
|
6f1aaeb3035c3c939b442e30076504ff84e43aa5
|
[
"MIT"
] | null | null | null |
scripts/figures/gene_abundance.py
|
vic-cheung/vectorseq
|
6f1aaeb3035c3c939b442e30076504ff84e43aa5
|
[
"MIT"
] | null | null | null |
#%%
import scanpy as sc
import pandas as pd
from pathlib import Path
from vectorseq.utils import check_gene_abundance, create_dir
from vectorseq.marker_constants import BrainGenes
data_dir = Path("/spare_volume/vectorseq-data")
figure_save_dir = create_dir(data_dir / "gene_abundance")
#%% [markdown]
# ## Gene Abundance Table for Experiment: 3250, Brain Region: v1
#%%
experiment_id = "3250"
brain_region = "v1"
run_dir = data_dir / experiment_id / brain_region
all_cells_output_dir = create_dir(run_dir / "all_cells")
adata = sc.read_h5ad(all_cells_output_dir / "filter" / "adata.h5ad")
filtered_tg_list = [
gene for gene in BrainGenes.TG_MARKERS if gene.upper() in adata.obs.columns
]
endogenous_genes_list = [
"Snap25",
"Rbfox3",
"Slc17a6",
"Camk2a",
"Gad1",
"Gad2",
"Mog",
"Flt1",
]
gene_list = filtered_tg_list + endogenous_genes_list
count_fractions_df = pd.DataFrame()
for gene in gene_list:
temp = check_gene_abundance(adata, gene_of_interest=gene)
if not temp.empty:
count_fractions_df = count_fractions_df.append(
pd.DataFrame.from_dict(
{
"gene": gene,
"number_of_expressing_cells": temp.shape[0],
"number_of_reads": temp.goi_counts.sum(),
"abundance_in_expressing_cells": f"{round(temp.percent_count_goi.mean(),2)} +/- {round(temp.percent_count_goi.std(),2)}",
},
orient="index",
).T
)
print(f"{gene} detected.")
else:
print(f"{gene} not detected.")
count_fractions_df.set_index(keys="gene", drop=True, inplace=True)
count_fractions_df.to_csv(
figure_save_dir / f"{experiment_id}_{brain_region}_all_cells_gene_abundance.csv"
)
# %%
#%% [markdown]
# ## Gene Abundance Table for Experiment: 3382, Brain Region: snr
#%%
experiment_id = "3382"
brain_region = "snr"
run_dir = data_dir / experiment_id / brain_region
all_cells_output_dir = create_dir(run_dir / "all_cells")
adata = sc.read_h5ad(all_cells_output_dir / "filter" / "adata.h5ad")
filtered_tg_list = [
gene for gene in BrainGenes.TG_MARKERS if gene.upper() in adata.obs.columns
]
endogenous_genes_list = [
"Snap25",
"Rbfox3",
"Slc17a6",
"Camk2a",
"Gad1",
"Gad2",
"Mog",
"Flt1",
]
gene_list = filtered_tg_list + endogenous_genes_list
count_fractions_df = pd.DataFrame()
for gene in gene_list:
temp = check_gene_abundance(adata, gene_of_interest=gene)
if not temp.empty:
count_fractions_df = count_fractions_df.append(
pd.DataFrame.from_dict(
{
"gene": gene,
"number_of_expressing_cells": temp.shape[0],
"number_of_reads": temp.goi_counts.sum(),
"abundance_in_expressing_cells": f"{round(temp.percent_count_goi.mean(),2)} +/- {round(temp.percent_count_goi.std(),2)}",
},
orient="index",
).T
)
print(f"{gene} detected.")
else:
print(f"{gene} not detected.")
count_fractions_df.set_index(keys="gene", drop=True, inplace=True)
count_fractions_df.to_csv(
figure_save_dir / f"{experiment_id}_{brain_region}_all_cells_gene_abundance.csv"
)
# %%
#%% [markdown]
# ## Gene Abundance Table for Experiment: 3454, Brain Region: sc
#%%
data_dir = Path("/spare_volume/vectorseq-data")
experiment_id = "3454"
brain_region = "sc"
run_dir = data_dir / experiment_id / brain_region
all_cells_output_dir = create_dir(run_dir / "all_cells")
adata = sc.read_h5ad(all_cells_output_dir / "filter" / "adata.h5ad")
filtered_tg_list = [
gene for gene in BrainGenes.TG_MARKERS if gene.upper() in adata.obs.columns
]
endogenous_genes_list = [
"Snap25",
"Rbfox3",
"Slc17a6",
"Camk2a",
"Gad1",
"Gad2",
"Mog",
"Flt1",
]
gene_list = filtered_tg_list + endogenous_genes_list
count_fractions_df = pd.DataFrame()
for gene in gene_list:
temp = check_gene_abundance(adata, gene_of_interest=gene)
if not temp.empty:
count_fractions_df = count_fractions_df.append(
pd.DataFrame.from_dict(
{
"gene": gene,
"number_of_expressing_cells": temp.shape[0],
"number_of_reads": temp.goi_counts.sum(),
"abundance_in_expressing_cells": f"{round(temp.percent_count_goi.mean(),2)} +/- {round(temp.percent_count_goi.std(),2)}",
},
orient="index",
).T
)
print(f"{gene} detected.")
else:
print(f"{gene} not detected.")
count_fractions_df.set_index(keys="gene", drop=True, inplace=True)
count_fractions_df.to_csv(
figure_save_dir / f"{experiment_id}_{brain_region}_all_cells_gene_abundance.csv"
)
#%%
| 30.402516
| 141
| 0.646463
| 626
| 4,834
| 4.654952
| 0.161342
| 0.072066
| 0.082361
| 0.047358
| 0.886754
| 0.886754
| 0.87337
| 0.849348
| 0.849348
| 0.849348
| 0
| 0.019055
| 0.22921
| 4,834
| 158
| 142
| 30.594937
| 0.763017
| 0.049441
| 0
| 0.770992
| 0
| 0.022901
| 0.233829
| 0.138767
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.038168
| 0
| 0.038168
| 0.045802
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
418053ee5e7fc1cc778869d9ebfee56bc7e30f8e
| 190
|
py
|
Python
|
built-in/TensorFlow/Official/nlp/Transformer_for_TensorFlow/noahnmt/attentions/__init__.py
|
Huawei-Ascend/modelzoo
|
df51ed9c1d6dbde1deef63f2a037a369f8554406
|
[
"Apache-2.0"
] | null | null | null |
built-in/TensorFlow/Official/nlp/Transformer_for_TensorFlow/noahnmt/attentions/__init__.py
|
Huawei-Ascend/modelzoo
|
df51ed9c1d6dbde1deef63f2a037a369f8554406
|
[
"Apache-2.0"
] | 3
|
2021-03-31T20:15:40.000Z
|
2022-02-09T23:50:46.000Z
|
built-in/TensorFlow/Official/nlp/Transformer_for_TensorFlow/noahnmt/attentions/__init__.py
|
Huawei-Ascend/modelzoo
|
df51ed9c1d6dbde1deef63f2a037a369f8554406
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright Huawei Noah's Ark Lab.
from noahnmt.attentions import dot_attention
from noahnmt.attentions import dot_prod_attention
from noahnmt.attentions import sum_attention
| 31.666667
| 49
| 0.847368
| 28
| 190
| 5.607143
| 0.607143
| 0.210191
| 0.401274
| 0.515924
| 0.66879
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005882
| 0.105263
| 190
| 6
| 50
| 31.666667
| 0.917647
| 0.236842
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
68efa2f47f268526a9d0f5230984fd8a4c74294f
| 5,184
|
py
|
Python
|
tests/test_plasmid_extractor.py
|
lowandrew/Plasmid_Assembler
|
7366e5e5a88e3a87d164934de0c6a3ee51f241b3
|
[
"MIT"
] | 5
|
2018-01-16T04:55:10.000Z
|
2020-10-23T08:59:52.000Z
|
tests/test_plasmid_extractor.py
|
lowandrew/Plasmid_Assembler
|
7366e5e5a88e3a87d164934de0c6a3ee51f241b3
|
[
"MIT"
] | null | null | null |
tests/test_plasmid_extractor.py
|
lowandrew/Plasmid_Assembler
|
7366e5e5a88e3a87d164934de0c6a3ee51f241b3
|
[
"MIT"
] | 3
|
2018-02-16T18:49:07.000Z
|
2021-06-20T06:45:02.000Z
|
import os
import shutil
"""
Remaining things to test:
find_plasmid_kmer_scores
find_score
filter_similar_plasmids
generate_consensus
"""
parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
os.sys.path.insert(0, parentdir)
from plasmidextractor.PlasmidExtractor import *
def test_mash_paired_gzipped():
mash_for_potential_plasmids(forward_reads='tests/test_fastqs/paired_R1.fastq.gz',
reverse_reads='tests/test_fastqs/paired_R2.fastq.gz',
plasmid_db='tests/test_fasta/dummy_db.fasta',
output_dir='tests/mash',
identity_cutoff=-1)
assert os.path.isfile('tests/mash/screen_results.tsv')
shutil.rmtree('tests/mash')
def test_mash_unpaired_gzipped():
mash_for_potential_plasmids(forward_reads='tests/test_fastqs/paired_R1.fastq.gz',
plasmid_db='tests/test_fasta/dummy_db.fasta',
output_dir='tests/mash',
identity_cutoff=-1)
assert os.path.isfile('tests/mash/screen_results.tsv')
shutil.rmtree('tests/mash')
def test_mash_paired_uncompressed():
mash_for_potential_plasmids(forward_reads='tests/test_fastqs/paired_R1.fastq',
reverse_reads='tests/test_fastqs/paired_R2.fastq',
plasmid_db='tests/test_fasta/dummy_db.fasta',
output_dir='tests/mash',
identity_cutoff=-1)
assert os.path.isfile('tests/mash/screen_results.tsv')
shutil.rmtree('tests/mash')
def test_mash_unpaired_uncompressed():
mash_for_potential_plasmids(forward_reads='tests/test_fastqs/paired_R1.fastq',
plasmid_db='tests/test_fasta/dummy_db.fasta',
output_dir='tests/mash',
identity_cutoff=-1)
assert os.path.isfile('tests/mash/screen_results.tsv')
shutil.rmtree('tests/mash')
def test_bait_and_trim_paired_gzipped():
bait_and_trim(forward_reads='tests/test_fastqs/paired_R1.fastq.gz',
reverse_reads='tests/test_fastqs/paired_R2.fastq.gz',
plasmid_db='tests/test_fasta/dummy_db.fasta',
output_dir='tests/out')
assert os.path.isfile('tests/out/plasmid_reads_R1.fastq.gz') and os.path.isfile('tests/out/plasmid_reads_R2.fastq.gz')
shutil.rmtree('tests/out')
def test_bait_and_trim_paired_uncompressed():
bait_and_trim(forward_reads='tests/test_fastqs/paired_R1.fastq',
reverse_reads='tests/test_fastqs/paired_R2.fastq',
plasmid_db='tests/test_fasta/dummy_db.fasta',
output_dir='tests/out')
assert os.path.isfile('tests/out/plasmid_reads_R1.fastq.gz') and os.path.isfile('tests/out/plasmid_reads_R2.fastq.gz')
shutil.rmtree('tests/out')
def test_bait_and_trim_unpaired_gzipped():
bait_and_trim(forward_reads='tests/test_fastqs/paired_R1.fastq.gz',
plasmid_db='tests/test_fasta/dummy_db.fasta',
output_dir='tests/out')
assert os.path.isfile('tests/out/plasmid_reads_R1.fastq.gz')
shutil.rmtree('tests/out')
def test_bait_and_trim_unpaired_uncompressed():
bait_and_trim(forward_reads='tests/test_fastqs/paired_R1.fastq',
plasmid_db='tests/test_fasta/dummy_db.fasta',
output_dir='tests/out')
assert os.path.isfile('tests/out/plasmid_reads_R1.fastq.gz')
shutil.rmtree('tests/out')
def test_bait_and_trim_paired_gzipped_lowmem():
bait_and_trim(forward_reads='tests/test_fastqs/paired_R1.fastq.gz',
reverse_reads='tests/test_fastqs/paired_R2.fastq.gz',
plasmid_db='tests/test_fasta/dummy_db.fasta',
output_dir='tests/out',
low_memory=True)
assert os.path.isfile('tests/out/plasmid_reads_R1.fastq.gz') and os.path.isfile('tests/out/plasmid_reads_R2.fastq.gz')
shutil.rmtree('tests/out')
def test_bait_and_trim_unpaired_gzipped_lowmem():
bait_and_trim(forward_reads='tests/test_fastqs/paired_R1.fastq.gz',
plasmid_db='tests/test_fasta/dummy_db.fasta',
output_dir='tests/out',
low_memory=True)
assert os.path.isfile('tests/out/plasmid_reads_R1.fastq.gz')
shutil.rmtree('tests/out')
def test_fasta_write():
create_individual_fastas(plasmid_db='tests/test_fasta/dummy_db.fasta',
potential_plasmid_list=['seq1'],
output_dir='tests/fasta/')
assert os.path.isfile('tests/fasta/seq1') and not os.path.isfile('tests/fasta/seq2')
shutil.rmtree('tests/fasta')
def test_fasta_kmerization():
kmerize_individual_fastas(potential_plasmid_list=['dummy_db.fasta'],
fasta_dir='tests/test_fasta',
output_dir='tests/kmerization')
assert os.path.isfile('tests/kmerization/dummy_db.fasta.kmc_pre') and os.path.isfile('tests/kmerization/dummy_db.fasta.kmc_suf')
shutil.rmtree('tests/kmerization')
| 42.491803
| 132
| 0.656829
| 670
| 5,184
| 4.759701
| 0.11791
| 0.076199
| 0.06397
| 0.090624
| 0.84666
| 0.818125
| 0.818125
| 0.812794
| 0.801819
| 0.774851
| 0
| 0.007998
| 0.228202
| 5,184
| 121
| 133
| 42.842975
| 0.789053
| 0
| 0
| 0.693182
| 1
| 0
| 0.332216
| 0.270899
| 0
| 0
| 0
| 0
| 0.136364
| 1
| 0.136364
| false
| 0
| 0.034091
| 0
| 0.170455
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ec0bbf6fa340a30833399fb89ab800ad2004fd7e
| 8,517
|
py
|
Python
|
src/rest-api/tests/routes/v1/test_dex.py
|
geometry-labs/craft-multi-token-api
|
e533fd02c928c4857076ee11e14d8c0608bf367d
|
[
"Apache-2.0"
] | null | null | null |
src/rest-api/tests/routes/v1/test_dex.py
|
geometry-labs/craft-multi-token-api
|
e533fd02c928c4857076ee11e14d8c0608bf367d
|
[
"Apache-2.0"
] | null | null | null |
src/rest-api/tests/routes/v1/test_dex.py
|
geometry-labs/craft-multi-token-api
|
e533fd02c928c4857076ee11e14d8c0608bf367d
|
[
"Apache-2.0"
] | null | null | null |
from fastapi.testclient import TestClient
from app.core.config import settings
def test_get_transactions(prep_fixtures, client: TestClient) -> None:
r = client.get(f"{settings.PREFIX}/dex/transactions")
response = r.json()
assert r.status_code == 200
assert response
assert len(response) == 1
r = client.get(f"{settings.PREFIX}/dex/transactions?limit=6")
response = r.json()
assert r.status_code == 200
assert response
assert len(response) == 6
def test_get_transactions_by_method(prep_fixtures, client: TestClient) -> None:
r = client.get(f"{settings.PREFIX}/dex/transactions/add")
response = r.json()
assert r.status_code == 200
assert response
assert len(response) == 1
r = client.get(f"{settings.PREFIX}/dex/transactions/add?limit=4")
response = r.json()
assert r.status_code == 200
assert response
assert len(response) == 4
r = client.get(f"{settings.PREFIX}/dex/transactions/remove")
response = r.json()
assert r.status_code == 200
assert response
assert len(response) == 1
r = client.get(f"{settings.PREFIX}/dex/transactions/remove?limit=2")
response = r.json()
assert r.status_code == 200
assert response
assert len(response) == 2
def test_get_logs(prep_fixtures, client: TestClient) -> None:
r = client.get(f"{settings.PREFIX}/dex/logs")
response = r.json()
assert r.status_code == 200
assert response
assert len(response) == 1
r = client.get(f"{settings.PREFIX}/dex/logs?limit=3")
response = r.json()
assert r.status_code == 200
assert response
assert len(response) == 3
def test_get_logs_by_method(prep_fixtures, client: TestClient) -> None:
r = client.get(f"{settings.PREFIX}/dex/logs/TransferSingle")
response = r.json()
assert r.status_code == 200
assert response
assert len(response) == 1
r = client.get(f"{settings.PREFIX}/dex/logs/TransferSingle?limit=1")
response = r.json()
assert r.status_code == 200
assert response
assert len(response) == 1
r = client.get(f"{settings.PREFIX}/dex/logs/Swap")
response = r.json()
assert r.status_code == 200
assert response
assert len(response) == 1
r = client.get(f"{settings.PREFIX}/dex/logs/Swap?limit=1")
response = r.json()
assert r.status_code == 200
assert response
assert len(response) == 1
def test_get_stats(prep_fixtures, client: TestClient) -> None:
r = client.get(f"{settings.PREFIX}/dex/stats/1")
response = r.json()
assert r.status_code == 200
assert response
def test_get_stats_invalid_market_id(prep_fixtures, client: TestClient) -> None:
r = client.get(f"{settings.PREFIX}/dex/stats/bad-market-id")
response = r.json()
assert r.status_code == 400 # Bad request
assert response
def test_get_balance_of(prep_fixtures, client: TestClient) -> None:
# Fine to leave request as constant since the blockchain is immutable
r = client.get(f"{settings.PREFIX}/dex/balance-of/hxe7af5fcfd8dfc67530a01a0e403882687528dfcb/2")
response = r.json()
assert r.status_code == 200
assert response
def test_get_balance_of_invalid_address(prep_fixtures, client: TestClient) -> None:
r = client.get(f"{settings.PREFIX}/dex/balance-of/0xbadaddress/2")
response = r.json()
assert r.status_code == 400 # Bad request
assert response
def test_get_swap_chart_5m(prep_fixtures, client: TestClient) -> None:
r = client.get(f"{settings.PREFIX}/dex/swap-chart/0/5m/0/1000000000")
response = r.json()
assert r.status_code == 200
assert len(response) == 4
assert response[0][0] == 0 # Timestamp
assert response[0][1] == 1 # Open
assert response[0][2] == 3 # Close
assert response[0][3] == 3 # High
assert response[0][4] == 1 # Low
assert response[0][5] == 3 # Volume
assert response[1][0] == 300000000
assert response[1][1] == 3
assert response[1][2] == 6
assert response[1][3] == 6
assert response[1][4] == 3
assert response[1][5] == 3
assert response[2][0] == 600000000
assert response[2][1] == 6
assert response[2][2] == 6
assert response[2][3] == 6
assert response[2][4] == 6
assert response[2][5] == 0
assert response[3][0] == 900000000
assert response[3][1] == 6
assert response[3][2] == 9
assert response[3][3] == 9
assert response[3][4] == 6
assert response[3][5] == 3
def test_get_swap_chart_15m(prep_fixtures, client: TestClient) -> None:
r = client.get(f"{settings.PREFIX}/dex/swap-chart/1/15m/0/3000000000")
response = r.json()
assert r.status_code == 200
assert len(response) == 4
assert response[0][0] == 0
assert response[0][1] == 1
assert response[0][2] == 3
assert response[0][3] == 3
assert response[0][4] == 1
assert response[0][5] == 3
assert response[1][0] == 900000000
assert response[1][1] == 3
assert response[1][2] == 6
assert response[1][3] == 6
assert response[1][4] == 3
assert response[1][5] == 3
assert response[2][0] == 1800000000
assert response[2][1] == 6
assert response[2][2] == 6
assert response[2][3] == 6
assert response[2][4] == 6
assert response[2][5] == 0
assert response[3][0] == 2700000000
assert response[3][1] == 6
assert response[3][2] == 9
assert response[3][3] == 9
assert response[3][4] == 6
assert response[3][5] == 3
def test_get_swap_chart_1h(prep_fixtures, client: TestClient) -> None:
r = client.get(f"{settings.PREFIX}/dex/swap-chart/2/1h/0/20000000000")
response = r.json()
assert r.status_code == 200
assert len(response) == 6
assert response[0][0] == 0
assert response[0][1] == 1
assert response[0][2] == 3
assert response[0][3] == 3
assert response[0][4] == 1
assert response[0][5] == 3
assert response[1][0] == 3600000000
assert response[1][1] == 3
assert response[1][2] == 6
assert response[1][3] == 6
assert response[1][4] == 3
assert response[1][5] == 3
assert response[2][0] == 7200000000
assert response[2][1] == 6
assert response[2][2] == 6
assert response[2][3] == 6
assert response[2][4] == 6
assert response[2][5] == 0
assert response[3][0] == 10800000000
assert response[3][1] == 6
assert response[3][2] == 9
assert response[3][3] == 9
assert response[3][4] == 6
assert response[3][5] == 3
def test_get_swap_chart_4h(prep_fixtures, client: TestClient) -> None:
r = client.get(f"{settings.PREFIX}/dex/swap-chart/3/4h/0/50000000000")
response = r.json()
assert r.status_code == 200
assert len(response) == 4
assert response[0][0] == 0
assert response[0][1] == 1
assert response[0][2] == 3
assert response[0][3] == 3
assert response[0][4] == 1
assert response[0][5] == 3
assert response[1][0] == 14400000000
assert response[1][1] == 3
assert response[1][2] == 6
assert response[1][3] == 6
assert response[1][4] == 3
assert response[1][5] == 3
assert response[2][0] == 28800000000
assert response[2][1] == 6
assert response[2][2] == 6
assert response[2][3] == 6
assert response[2][4] == 6
assert response[2][5] == 0
assert response[3][0] == 43200000000
assert response[3][1] == 6
assert response[3][2] == 9
assert response[3][3] == 9
assert response[3][4] == 6
assert response[3][5] == 3
def test_get_swap_chart_1d(prep_fixtures, client: TestClient) -> None:
r = client.get(f"{settings.PREFIX}/dex/swap-chart/4/1d/0/300000000000")
response = r.json()
assert r.status_code == 200
assert len(response) == 4
assert response[0][0] == 0
assert response[0][1] == 1
assert response[0][2] == 3
assert response[0][3] == 3
assert response[0][4] == 1
assert response[0][5] == 3
assert response[1][0] == 86400000000
assert response[1][1] == 3
assert response[1][2] == 6
assert response[1][3] == 6
assert response[1][4] == 3
assert response[1][5] == 3
assert response[2][0] == 172800000000
assert response[2][1] == 6
assert response[2][2] == 6
assert response[2][3] == 6
assert response[2][4] == 6
assert response[2][5] == 0
assert response[3][0] == 259200000000
assert response[3][1] == 6
assert response[3][2] == 9
assert response[3][3] == 9
assert response[3][4] == 6
assert response[3][5] == 3
| 29.470588
| 100
| 0.628977
| 1,261
| 8,517
| 4.183981
| 0.07613
| 0.360879
| 0.116566
| 0.043783
| 0.88116
| 0.857278
| 0.857278
| 0.853867
| 0.853677
| 0.828658
| 0
| 0.103128
| 0.215569
| 8,517
| 288
| 101
| 29.572917
| 0.686574
| 0.015029
| 0
| 0.770563
| 0
| 0
| 0.109692
| 0.109692
| 0
| 0
| 0
| 0
| 0.753247
| 1
| 0.056277
| false
| 0
| 0.008658
| 0
| 0.064935
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
d411989115cbd46c3d67f874ee5a2a13e7088d38
| 1,991
|
py
|
Python
|
ARC_B/ARC005_B.py
|
ryosuke0825/atcoder_python
|
185cdbe7db44ecca1aaf357858d16d31ce515ddb
|
[
"MIT"
] | null | null | null |
ARC_B/ARC005_B.py
|
ryosuke0825/atcoder_python
|
185cdbe7db44ecca1aaf357858d16d31ce515ddb
|
[
"MIT"
] | null | null | null |
ARC_B/ARC005_B.py
|
ryosuke0825/atcoder_python
|
185cdbe7db44ecca1aaf357858d16d31ce515ddb
|
[
"MIT"
] | null | null | null |
xyw = input().split()
X = int(xyw[0])-1
Y = int(xyw[1])-1
C = []
for _ in range(9):
li = list(input())
C.append(li)
ans = ''
flg = True
if xyw[2] == 'R':
x_add = 1
for i in range(4):
ans += C[Y][X]
if X == 8:
x_add = -1
X += x_add
elif xyw[2] == 'L':
x_add = -1
for i in range(4):
ans += C[Y][X]
if X == 0:
x_add = 1
X += x_add
elif xyw[2] == 'U':
y_add = -1
for i in range(4):
ans += C[Y][X]
if Y == 0:
y_add = 1
Y += y_add
elif xyw[2] == 'D':
y_add = 1
for i in range(4):
ans += C[Y][X]
if Y == 8:
y_add = -1
Y += y_add
elif xyw[2] == 'RU':
x_add = 1
y_add = -1
for i in range(4):
ans += C[Y][X]
if X == 8 and Y == 0:
x_add = -1
y_add = 1
elif X == 8 and Y != 0:
x_add = -1
elif X != 8 and Y == 0:
y_add = 1
X += x_add
Y += y_add
elif xyw[2] == 'RD':
x_add = 1
y_add = 1
for i in range(4):
ans += C[Y][X]
if X == 8 and Y == 8:
x_add = -1
y_add = -1
elif X == 8 and Y != 8:
x_add = -1
elif X != 8 and Y == 8:
y_add = -1
X += x_add
Y += y_add
elif xyw[2] == 'LU':
x_add = -1
y_add = -1
for i in range(4):
ans += C[Y][X]
if X == 0 and Y == 0:
x_add = 1
y_add = 1
elif X == 0 and Y != 0:
x_add = 1
elif X != 0 and Y == 0:
y_add = 1
X += x_add
Y += y_add
elif xyw[2] == 'LD':
x_add = -1
y_add = 1
for i in range(4):
ans += C[Y][X]
if X == 0 and Y == 8:
x_add = 1
y_add = -1
elif X == 0 and Y != 8:
x_add = 1
elif X != 0 and Y == 8:
y_add = -1
X += x_add
Y += y_add
print(ans)
| 20.525773
| 31
| 0.353591
| 349
| 1,991
| 1.888252
| 0.103152
| 0.194234
| 0.121396
| 0.097117
| 0.849772
| 0.846737
| 0.846737
| 0.846737
| 0.7739
| 0.664643
| 0
| 0.079882
| 0.490708
| 1,991
| 96
| 32
| 20.739583
| 0.57002
| 0
| 0
| 0.638298
| 0
| 0
| 0.006027
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.010638
| 0
| 0
| 1
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d413f6505308e4d39c96e7ca124b0fa0f103d091
| 8,548
|
py
|
Python
|
t2dm/manager/intelligence/interactions/inpatient1.py
|
nhsx-mirror/SynPath_Diabetes
|
1d9bd1c83f20820a35125c94e8b058bdd1a6ac3c
|
[
"MIT"
] | null | null | null |
t2dm/manager/intelligence/interactions/inpatient1.py
|
nhsx-mirror/SynPath_Diabetes
|
1d9bd1c83f20820a35125c94e8b058bdd1a6ac3c
|
[
"MIT"
] | null | null | null |
t2dm/manager/intelligence/interactions/inpatient1.py
|
nhsx-mirror/SynPath_Diabetes
|
1d9bd1c83f20820a35125c94e8b058bdd1a6ac3c
|
[
"MIT"
] | 1
|
2021-09-29T10:00:23.000Z
|
2021-09-29T10:00:23.000Z
|
import datetime
# Interactions for inpatient care
# "review_and_consultation",
# "bd_hypoglycaemic_ep",
# "bd_hyperglycaemic_ep",
# "bd_lower_limb_compl",
# "enhanced_independence",
# "retinal_procedure",
# "amputation"
# Inpatient interaction 1: Inpatient review and consultation (might take out if not in spell)
def review_and_consultation(patient, environment, patient_time):
encounter = {
"resource_type": "Encounter",
"name" : "review and consultation",
"start": patient_time,
}
entry = {
"resource_type" : "Observation",
"name": "review and consultation",
"start": encounter["start"] + datetime.timedelta(minutes=15),
"cost": 3053, # NEL long stay from PSSRU 2018-19 - to be updated
"glucose": -1, # dummy glucose impact, to be updated
"carbon": 5032, # carbon impact, PSSRU 2018-19
}
new_patient_record_entries = [encounter, entry]
next_environment_id_to_prob = {2: 0.5, 30: 0.3, 40: 0.2}
next_environment_id_to_time = {
2: datetime.timedelta(days=10), # TODO: from initial patient_time (not last)
30: datetime.timedelta(days=20),
40: datetime.timedelta(days=20),
}
update_data = {"new_patient_record_entries": new_patient_record_entries}
return (
patient,
environment,
update_data,
next_environment_id_to_prob,
next_environment_id_to_time,
)
# Inpatient interaction 2: Hypoglycaemic episode bed day
def bd_hypoglycaemic_ep(patient, environment, patient_time):
encounter = {
"resource_type": "Encounter",
"name" : "hypoglycaemic ep bd",
"start": patient_time,
}
entry = {
"resource_type" : "Observation",
"name": "hypoglycaemic ep bd",
"start": encounter["start"] + datetime.timedelta(minutes=15),
"cost": 3053, # NEL long stay from PSSRU 2018-19 - to be updated
"glucose": -1, # dummy glucose impact, to be updated
"carbon": 5032, # carbon impact, PSSRU 2018-19
}
new_patient_record_entries = [encounter, entry]
next_environment_id_to_prob = {2: 0.5, 30: 0.3, 40: 0.2}
next_environment_id_to_time = {
2: datetime.timedelta(days=10), # TODO: from initial patient_time (not last)
30: datetime.timedelta(days=20),
40: datetime.timedelta(days=20),
}
update_data = {"new_patient_record_entries": new_patient_record_entries}
return (
patient,
environment,
update_data,
next_environment_id_to_prob,
next_environment_id_to_time,
)
# Inpatient interaction 3: Hyperglycaemic episode bed day
def bd_hyperglycaemic_ep(patient, environment, patient_time):
encounter = {
"resource_type": "Encounter",
"name" : "hyperglycaemic ep bd",
"start": patient_time,
}
entry = {
"resource_type" : "Observation",
"name": "hyperglycaemic ep bd",
"start": encounter["start"] + datetime.timedelta(minutes=15),
"cost": 3053, # NEL long stay from PSSRU 2018-19 - to be updated
"glucose": -1, # dummy glucose impact, to be updated
"carbon": 5032, # carbon impact, PSSRU 2018-19
}
new_patient_record_entries = [encounter, entry]
next_environment_id_to_prob = {2: 0.5, 30: 0.3, 40: 0.2}
next_environment_id_to_time = {
2: datetime.timedelta(days=10), # TODO: from initial patient_time (not last)
30: datetime.timedelta(days=20),
40: datetime.timedelta(days=20),
}
update_data = {"new_patient_record_entries": new_patient_record_entries}
return (
patient,
environment,
update_data,
next_environment_id_to_prob,
next_environment_id_to_time,
)
# Inpatient interaction 4: Lower limb complications bed day
def bd_lower_limb_ep(patient, environment, patient_time):
encounter = {
"resource_type": "Encounter",
"name" : "lower limb ep bd",
"start": patient_time,
}
entry = {
"resource_type" : "Observation",
"name": "lower limb ep bd",
"start": encounter["start"] + datetime.timedelta(minutes=15),
"cost": 3053, # NEL long stay from PSSRU 2018-19 - to be updated
"glucose": -1, # dummy glucose impact, to be updated
"carbon": 5032, # carbon impact, to be updated
}
new_patient_record_entries = [encounter, entry]
next_environment_id_to_prob = {2: 0.5, 30: 0.3, 40: 0.2}
next_environment_id_to_time = {
2: datetime.timedelta(days=10), # TODO: from initial patient_time (not last)
30: datetime.timedelta(days=20),
40: datetime.timedelta(days=20),
}
update_data = {"new_patient_record_entries": new_patient_record_entries}
return (
patient,
environment,
update_data,
next_environment_id_to_prob,
next_environment_id_to_time,
)
# Inpatient interaction 5: Enhanced independence
def enhanced_indep(patient, environment, patient_time):
encounter = {
"resource_type": "Encounter",
"name" : "enhanced independence",
"start": patient_time,
}
entry = {
"resource_type" : "Observation",
"name": "enhanced independence",
"start": encounter["start"] + datetime.timedelta(minutes=15),
"cost": 3053, # NEL long stay from PSSRU 2018-19 - to be updated
"glucose": -1, # dummy glucose impact, to be updated
"carbon": 5032, # carbon impact, to be updated
}
new_patient_record_entries = [encounter, entry]
next_environment_id_to_prob = {2: 0.5, 30: 0.3, 40: 0.2}
next_environment_id_to_time = {
2: datetime.timedelta(days=10), # TODO: from initial patient_time (not last)
30: datetime.timedelta(days=20),
40: datetime.timedelta(days=20),
}
update_data = {"new_patient_record_entries": new_patient_record_entries}
return (
patient,
environment,
update_data,
next_environment_id_to_prob,
next_environment_id_to_time,
)
# Inpatient interaction 6: Retinal procedure
def retinal_procedure(patient, environment, patient_time):
encounter = {
"resource_type": "Encounter",
"name" : "retinal procedure",
"start": patient_time,
}
entry = {
"resource_type" : "Observation",
"name": "retinal procedure",
"start": encounter["start"] + datetime.timedelta(minutes=15),
"cost": 3053, # NEL long stay from PSSRU 2018-19 - to be updated
"glucose": -1, # dummy glucose impact, to be updated
"carbon": 5032, # carbon impact, to be updated
}
new_patient_record_entries = [encounter, entry]
next_environment_id_to_prob = {2: 0.5, 30: 0.3, 40: 0.2}
next_environment_id_to_time = {
2: datetime.timedelta(days=10), # TODO: from initial patient_time (not last)
30: datetime.timedelta(days=20),
40: datetime.timedelta(days=20),
}
update_data = {"new_patient_record_entries": new_patient_record_entries}
return (
patient,
environment,
update_data,
next_environment_id_to_prob,
next_environment_id_to_time,
)
# Inpatient interaction 7: Amputation
def amputation(patient, environment, patient_time):
encounter = {
"resource_type": "Encounter",
"name" : "amputation",
"start": patient_time,
}
entry = {
"resource_type" : "Observation",
"name": "amputation",
"start": encounter["start"] + datetime.timedelta(minutes=15),
"cost": 3053, # NEL long stay from PSSRU 2018-19 - to be updated
"glucose": -1, # dummy glucose impact, to be updated
"carbon": 5032, # carbon impact, to be updated
}
new_patient_record_entries = [encounter, entry]
next_environment_id_to_prob = {2: 0.5, 30: 0.3, 40: 0.2}
next_environment_id_to_time = {
2: datetime.timedelta(days=10), # TODO: from initial patient_time (not last)
30: datetime.timedelta(days=20),
40: datetime.timedelta(days=20),
}
update_data = {"new_patient_record_entries": new_patient_record_entries}
return (
patient,
environment,
update_data,
next_environment_id_to_prob,
next_environment_id_to_time,
)
| 31.776952
| 94
| 0.62576
| 996
| 8,548
| 5.123494
| 0.090361
| 0.093278
| 0.093278
| 0.104252
| 0.892808
| 0.866941
| 0.863414
| 0.863414
| 0.825789
| 0.776406
| 0
| 0.04752
| 0.266378
| 8,548
| 269
| 95
| 31.776952
| 0.766225
| 0.196186
| 0
| 0.823529
| 0
| 0
| 0.151728
| 0.026655
| 0
| 0
| 0
| 0.003717
| 0
| 1
| 0.034314
| false
| 0
| 0.004902
| 0
| 0.073529
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d44a39453789db4c72fb031457a84e78d700988f
| 355
|
py
|
Python
|
auth/auth.py
|
helthazar/contestparser
|
43843de5fd3cb7af7f24c8cbbd5ea068abb7f469
|
[
"MIT"
] | null | null | null |
auth/auth.py
|
helthazar/contestparser
|
43843de5fd3cb7af7f24c8cbbd5ea068abb7f469
|
[
"MIT"
] | null | null | null |
auth/auth.py
|
helthazar/contestparser
|
43843de5fd3cb7af7f24c8cbbd5ea068abb7f469
|
[
"MIT"
] | null | null | null |
class Auth:
@staticmethod
def opencup():
return {'login': '', 'password' : ''}
@staticmethod
def yandexcontest():
return {'login': '', 'password' : ''}
@staticmethod
def atcoder():
return {'login': '', 'password' : ''}
@staticmethod
def hackerrank():
return {'login': '', 'password' : ''}
| 22.1875
| 45
| 0.512676
| 26
| 355
| 7
| 0.423077
| 0.32967
| 0.417582
| 0.510989
| 0.56044
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.292958
| 355
| 16
| 46
| 22.1875
| 0.7251
| 0
| 0
| 0.615385
| 0
| 0
| 0.146067
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.307692
| true
| 0.307692
| 0
| 0.307692
| 0.692308
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 1
| 0
|
0
| 9
|
d458328c51a408b45453324ab4274a2b24ca3ca7
| 4,133
|
py
|
Python
|
test_requests.py
|
charlax/vcrpy
|
1d3fe5c33ecf06b494fa6cbea4acd62585820687
|
[
"MIT"
] | null | null | null |
test_requests.py
|
charlax/vcrpy
|
1d3fe5c33ecf06b494fa6cbea4acd62585820687
|
[
"MIT"
] | null | null | null |
test_requests.py
|
charlax/vcrpy
|
1d3fe5c33ecf06b494fa6cbea4acd62585820687
|
[
"MIT"
] | null | null | null |
# coding=utf-8
import os
import unittest
import vcr
import requests
TEST_CASSETTE_FILE = 'cassettes/test_req.yaml'
class TestRequestsGet(unittest.TestCase):
def setUp(self):
self.unmolested_response = requests.get('http://httpbin.org/')
with vcr.use_cassette(TEST_CASSETTE_FILE):
self.initial_response = requests.get('http://httpbin.org/')
self.cached_response = requests.get('http://httpbin.org/')
def tearDown(self):
try:
os.remove(TEST_CASSETTE_FILE)
except OSError:
pass
def test_initial_response_code(self):
self.assertEqual(self.unmolested_response.status_code, self.initial_response.status_code)
def test_cached_response_code(self):
self.assertEqual(self.unmolested_response.status_code, self.cached_response.status_code)
def test_initial_response_headers(self):
self.assertEqual(self.unmolested_response.headers['content-type'], self.initial_response.headers['content-type'])
def test_cached_response_headers(self):
self.assertEqual(self.unmolested_response.headers['content-type'], self.cached_response.headers['content-type'])
def test_initial_response_text(self):
self.assertEqual(self.unmolested_response.text, self.initial_response.text)
def test_cached_response_text(self):
self.assertEqual(self.unmolested_response.text, self.cached_response.text)
class TestRequestsAuth(unittest.TestCase):
def setUp(self):
self.unmolested_response = requests.get('https://httpbin.org/basic-auth/user/passwd', auth=('user', 'passwd'))
with vcr.use_cassette(TEST_CASSETTE_FILE):
self.initial_response = requests.get('https://httpbin.org/basic-auth/user/passwd', auth=('user', 'passwd'))
self.cached_response = requests.get('https://httpbin.org/basic-auth/user/passwd', auth=('user', 'passwd'))
def tearDown(self):
try:
os.remove(TEST_CASSETTE_FILE)
except OSError:
pass
def test_initial_response_code(self):
self.assertEqual(self.unmolested_response.status_code, self.initial_response.status_code)
def test_cached_response_code(self):
self.assertEqual(self.unmolested_response.status_code, self.cached_response.status_code)
def test_cached_response_auth_can_fail(self):
auth_fail_cached = requests.get('https://httpbin.org/basic-auth/user/passwd', auth=('user', 'passwdzzz'))
self.assertNotEqual(self.unmolested_response.status_code, auth_fail_cached.status_code)
class TestRequestsPost(unittest.TestCase):
def setUp(self):
payload = {'key1': 'value1', 'key2': 'value2'}
self.unmolested_response = requests.post('http://httpbin.org/post', payload)
with vcr.use_cassette(TEST_CASSETTE_FILE):
self.initial_response = requests.post('http://httpbin.org/post', payload)
self.cached_response = requests.post('http://httpbin.org/post', payload)
def tearDown(self):
try:
os.remove(TEST_CASSETTE_FILE)
except OSError:
pass
def test_initial_post_response_text(self):
self.assertEqual(self.unmolested_response.text, self.initial_response.text)
def test_cached_post_response_text(self):
self.assertEqual(self.unmolested_response.text, self.cached_response.text)
class TestRequestsHTTPS(unittest.TestCase):
maxDiff = None
def setUp(self):
self.unmolested_response = requests.get('https://httpbin.org/get')
with vcr.use_cassette(TEST_CASSETTE_FILE):
self.initial_response = requests.get('https://httpbin.org/get')
self.cached_response = requests.get('https://httpbin.org/get')
def tearDown(self):
try:
os.remove(TEST_CASSETTE_FILE)
except OSError:
pass
def test_initial_https_response_text(self):
self.assertEqual(self.unmolested_response.text, self.initial_response.text)
def test_cached_https_response_text(self):
self.assertEqual(self.unmolested_response.text, self.cached_response.text)
| 37.572727
| 121
| 0.709412
| 502
| 4,133
| 5.603586
| 0.125498
| 0.076786
| 0.132954
| 0.098116
| 0.873445
| 0.84856
| 0.803413
| 0.795236
| 0.733736
| 0.733736
| 0
| 0.001471
| 0.177595
| 4,133
| 109
| 122
| 37.917431
| 0.826125
| 0.002903
| 0
| 0.538462
| 0
| 0
| 0.12066
| 0.005584
| 0
| 0
| 0
| 0
| 0.166667
| 1
| 0.269231
| false
| 0.102564
| 0.051282
| 0
| 0.384615
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
2e4ce02ba83abe7b1d661489dcf0a33228a2cf30
| 8,373
|
py
|
Python
|
game_jam/Source/main.py
|
AustinLittle2020/CS-Hangout-Game_Jam
|
44216ad0166023e1b45b4b00c83098fce1cb8264
|
[
"MIT"
] | null | null | null |
game_jam/Source/main.py
|
AustinLittle2020/CS-Hangout-Game_Jam
|
44216ad0166023e1b45b4b00c83098fce1cb8264
|
[
"MIT"
] | 1
|
2021-12-20T07:19:45.000Z
|
2021-12-20T07:19:45.000Z
|
game_jam/Source/main.py
|
AustinLittle2020/CS-Hangout-Game_Jam
|
44216ad0166023e1b45b4b00c83098fce1cb8264
|
[
"MIT"
] | 1
|
2021-12-20T07:15:15.000Z
|
2021-12-20T07:15:15.000Z
|
import pygame
import os
import time
from spaceship import *
from pygame.locals import *
# constants
WIDTH, HEIGHT = 700, 800
WINDOW = pygame.display.set_mode((WIDTH, HEIGHT))
pygame.display.set_caption("Failure is Inevitable")
BLACK = (0, 0, 0)
FPS = 60
VEL = 4
p_x = 330
p_y = 650
width = 50
height = 50
movement = False
# player perameters
class Player(pygame.sprite.Sprite):
def __init__(self):
super().__init__()
self.sprite = [pygame.image.load(
os.path.join("game_jam", "Assets",
'Spaceship', 'ship', 'ship3.png')),
pygame.image.load(
os.path.join("game_jam", "Assets",
'Spaceship', 'ship', 'ship4.png')),
pygame.image.load(
os.path.join("game_jam", "Assets",
'Spaceship', 'ship', 'ship5.png')),
pygame.image.load(
os.path.join("game_jam", "Assets",
'Spaceship', 'ship', 'ship6.png')),
pygame.image.load(
os.path.join("game_jam", "Assets",
'Spaceship', 'ship', 'ship7.png')),
pygame.image.load(
os.path.join("game_jam", "Assets",
'Spaceship', 'ship', 'ship8.png')),
pygame.image.load(
os.path.join("game_jam", "Assets",
'Spaceship', 'ship', 'ship9.png')),
pygame.image.load(
os.path.join("game_jam", "Assets",
'Spaceship', 'ship', 'ship10.png')),
pygame.image.load(
os.path.join("game_jam", "Assets",
'Spaceship', 'ship', 'ship1.png')),
pygame.image.load(
os.path.join("game_jam", "Assets",
'Spaceship', 'ship', 'ship2.png'))]
self.current_sprite = 0
self.image = self.sprite[self.current_sprite]
self.rect = self.image.get_rect(center=(WIDTH/2, HEIGHT-100))
def update(self):
self.current_sprite += 0.3
if self.current_sprite >= len(self.sprite):
self.current_sprite = 0
self.image = self.sprite[int(self.current_sprite)]
self.rect.center = pygame.mouse.get_pos()
def create_bullet(self):
return Bullet(pygame.mouse.get_pos()[0], pygame.mouse.get_pos()[1])
# bullet perameters
class Bullet(pygame.sprite.Sprite):
def __init__(self, pos_x, pos_y):
super().__init__()
self.image = pygame.image.load(
os.path.join('game_jam', 'Assets',
'Laser Animations', 'laser1.png'))
self.rect = self.image.get_rect(center=(pos_x, pos_y))
def update(self):
self.rect.y -= 5
if self.rect.y <= 0:
self.kill()
# player and bullet groups
player = Player()
player_group = pygame.sprite.Group()
player_group.add(player)
pygame.mouse.set_visible(False)
bullet_group = pygame.sprite.Group()
# main function
def main():
# background image
BACKGROUND = pygame.transform.scale(pygame.image.load(os.path.join(
'game_jam', 'Assets', 'Background', 'Galaxy_bg',
'Purple_Nebula', 'PN1.png')), (WIDTH, HEIGHT)).convert()
# variables
clock = pygame.time.Clock()
run = True
y = 0
# constants
WIDTH, HEIGHT = 700, 800
WINDOW = pygame.display.set_mode((WIDTH, HEIGHT))
pygame.display.set_caption("Failure is Inevitable")
BLACK = (0, 0, 0)
FPS = 60
VEL = 4
p_x = 330
p_y = 650
width = 50
height = 50
movement = False
# player perameters
class Player(pygame.sprite.Sprite):
def __init__(self):
super().__init__()
self.sprite = [pygame.image.load(
os.path.join("game_jam", "Assets",
'Spaceship', 'ship', 'ship3.png')),
pygame.image.load(
os.path.join("game_jam", "Assets",
'Spaceship', 'ship', 'ship4.png')),
pygame.image.load(
os.path.join("game_jam", "Assets",
'Spaceship', 'ship', 'ship5.png')),
pygame.image.load(
os.path.join("game_jam", "Assets",
'Spaceship', 'ship', 'ship6.png')),
pygame.image.load(
os.path.join("game_jam", "Assets",
'Spaceship', 'ship', 'ship7.png')),
pygame.image.load(
os.path.join("game_jam", "Assets",
'Spaceship', 'ship', 'ship8.png')),
pygame.image.load(
os.path.join("game_jam", "Assets",
'Spaceship', 'ship', 'ship9.png')),
pygame.image.load(
os.path.join("game_jam", "Assets",
'Spaceship', 'ship', 'ship10.png')),
pygame.image.load(
os.path.join("game_jam", "Assets",
'Spaceship', 'ship', 'ship1.png')),
pygame.image.load(
os.path.join("game_jam", "Assets",
'Spaceship', 'ship', 'ship2.png'))]
self.current_sprite = 0
self.image = self.sprite[self.current_sprite]
self.rect = self.image.get_rect(center=(WIDTH/2, HEIGHT-100))
def update(self):
self.current_sprite += 0.3
if self.current_sprite >= len(self.sprite):
self.current_sprite = 0
self.image = self.sprite[int(self.current_sprite)]
self.rect.center = pygame.mouse.get_pos()
def create_bullet(self):
return Bullet(pygame.mouse.get_pos()[0], pygame.mouse.get_pos()[1])
# bullet perameters
class Bullet(pygame.sprite.Sprite):
def __init__(self, pos_x, pos_y):
super().__init__()
self.image = pygame.image.load(
os.path.join('game_jam', 'Assets',
'Laser Animations', 'laser1.png'))
self.rect = self.image.get_rect(center=(pos_x, pos_y))
def update(self):
self.rect.y -= 5
if self.rect.y <= 0:
self.kill()
# player and bullet groups
player = Player()
player_group = pygame.sprite.Group()
player_group.add(player)
pygame.mouse.set_visible(False)
bullet_group = pygame.sprite.Group()
# main function
def main():
# background image
BACKGROUND = pygame.transform.scale(pygame.image.load(os.path.join(
'game_jam', 'Assets', 'Background', 'Galaxy_bg',
'Purple_Nebula', 'PN1.png')), (WIDTH, HEIGHT)).convert()
# variables
# Initializes mixer
pygame.mixer.init()
# Grabs sound file
pygame.mixer.music.load(os.path.join(
'game_jam', 'Assets', 'Sounds', 'spaceship_music', 'Far-Out_OST', 'OST', 'Far-Out-Hurry_Up.wav'))
# Plays music indefinitely
pygame.mixer.music.play(-1)
# Sets music volume
pygame.mixer.music.set_volume(0.3)
clock = pygame.time.Clock()
run = True
y = 0
# music
pygame.mixer.init()
pygame.mixer.music.load(os.path.join(
'game_jam', 'Assets', 'Sounds', 'Far Out Hurry Up.wav'))
pygame.mixer.music.play(-1)
pygame.mixer.music.set_volume(0.3)
# run
while run:
clock.tick(FPS)
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
if event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE:
run = False
if event.type == pygame.MOUSEBUTTONDOWN:
bullet_group.add(player.create_bullet())
if event.type == pygame.KEYDOWN:
player.animate()
if event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE:
run = False
# updates backgroud for scrolling effect
WINDOW.fill(BLACK)
rel_y = y % BACKGROUND.get_rect().height
WINDOW.blit(BACKGROUND, (0, rel_y - BACKGROUND.get_rect().height))
if rel_y < HEIGHT:
WINDOW.blit(BACKGROUND, (0, rel_y))
y += 1
# update screen
bullet_group.draw(WINDOW)
bullet_group.update()
player_group.draw(WINDOW)
player_group.update()
pygame.display.update()
# starts main function
if __name__ == "__main__":
main()
main.py
| 30.227437
| 105
| 0.54413
| 969
| 8,373
| 4.560372
| 0.150671
| 0.035302
| 0.058837
| 0.082372
| 0.879384
| 0.847929
| 0.842272
| 0.816022
| 0.802897
| 0.802897
| 0
| 0.018274
| 0.313747
| 8,373
| 276
| 106
| 30.336957
| 0.750783
| 0.045981
| 0
| 0.853535
| 0
| 0
| 0.133978
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.060606
| false
| 0
| 0.025253
| 0.010101
| 0.116162
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
2e84df5100eb34e590dde3692727724451d660e6
| 186
|
py
|
Python
|
congregation/codegen/python/libs/external/__init__.py
|
CCD-HRI/congregation
|
a552856b03a64a4295792184107c4e529ca3f4ae
|
[
"MIT"
] | 3
|
2020-10-05T16:30:15.000Z
|
2021-01-22T13:38:02.000Z
|
congregation/codegen/python/libs/external/__init__.py
|
multiparty/congregation
|
a552856b03a64a4295792184107c4e529ca3f4ae
|
[
"MIT"
] | null | null | null |
congregation/codegen/python/libs/external/__init__.py
|
multiparty/congregation
|
a552856b03a64a4295792184107c4e529ca3f4ae
|
[
"MIT"
] | 1
|
2021-08-13T07:28:30.000Z
|
2021-08-13T07:28:30.000Z
|
from congregation.codegen.python.libs.external.unary import *
from congregation.codegen.python.libs.external.binary import *
from congregation.codegen.python.libs.external.nary import *
| 46.5
| 62
| 0.83871
| 24
| 186
| 6.5
| 0.416667
| 0.307692
| 0.442308
| 0.557692
| 0.865385
| 0.865385
| 0.602564
| 0
| 0
| 0
| 0
| 0
| 0.064516
| 186
| 3
| 63
| 62
| 0.896552
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 9
|
5cfb44436bfafdbb15a5fdebd26adcefad3628c4
| 1,809
|
py
|
Python
|
Ago-Dic-2021/valera-rangel-pablo/Practica 3/test_calculator_pytest.py
|
AnhellO/DAS_Sistemas
|
07b4eca78357d02d225d570033d05748d91383e3
|
[
"MIT"
] | 41
|
2017-09-26T09:36:32.000Z
|
2022-03-19T18:05:25.000Z
|
Ago-Dic-2021/valera-rangel-pablo/Practica 3/test_calculator_pytest.py
|
AnhellO/DAS_Sistemas
|
07b4eca78357d02d225d570033d05748d91383e3
|
[
"MIT"
] | 67
|
2017-09-11T05:06:12.000Z
|
2022-02-14T04:44:04.000Z
|
Ago-Dic-2021/valera-rangel-pablo/Practica 3/test_calculator_pytest.py
|
AnhellO/DAS_Sistemas
|
07b4eca78357d02d225d570033d05748d91383e3
|
[
"MIT"
] | 210
|
2017-09-01T00:10:08.000Z
|
2022-03-19T18:05:12.000Z
|
import pytest
from calculator import *
@pytest.mark.parametrize("input_a, input_b, expected_result", [
(-5, 2, 'Imposible Raiz de un Negativo')
])
def testRaizDeNegativo(input_a, input_b, expected_result):
assert Calculator(input_a, input_b).raiz() == expected_result
@pytest.mark.parametrize("input_a, input_b, expected_result", [
(5, 0, 'ZeroDivisionError: division by zero')
])
def testSobreCero(input_a, input_b, expected_result):
assert Calculator(input_a, input_b).division() == expected_result
@pytest.mark.parametrize("input_a, input_b, expected_result", [
(100, 54, 154)
])
def testSumaDosNumeros(input_a, input_b, expected_result):
assert Calculator(input_a, input_b).suma() == expected_result
@pytest.mark.parametrize("input_a, input_b, expected_result", [
(150, 75, 75)
])
def testRestaDosNumeros(input_a, input_b, expected_result):
assert Calculator(input_a, input_b).resta() == expected_result
@pytest.mark.parametrize("input_a, input_b, expected_result", [
(5, 2, 25)
])
def testPotencia(input_a, input_b, expected_result):
assert Calculator(input_a, input_b).potencia() == expected_result
@pytest.mark.parametrize("input_a, input_b, expected_result", [
(100, 0, 'Sin Definir')
])
def testRaizCero(input_a, input_b, expected_result):
assert Calculator(input_a, input_b).raiz() == expected_result
@pytest.mark.parametrize("input_a, input_b, expected_result", [
(10, 0, 1)
])
def testPotenciaALaCero(input_a, input_b, expected_result):
assert Calculator(input_a, input_b).potencia() == expected_result
@pytest.mark.parametrize("input_a, input_b, expected_result", [
(100, 23, 2300)
])
def testMultiplicacion(input_a, input_b, expected_result):
assert Calculator(input_a, input_b).multiplicacion() == expected_result
| 30.661017
| 75
| 0.739635
| 245
| 1,809
| 5.167347
| 0.2
| 0.113744
| 0.208531
| 0.227488
| 0.745656
| 0.745656
| 0.745656
| 0.745656
| 0.745656
| 0.745656
| 0
| 0.025332
| 0.127142
| 1,809
| 58
| 76
| 31.189655
| 0.776441
| 0
| 0
| 0.47619
| 0
| 0
| 0.187396
| 0
| 0
| 0
| 0
| 0
| 0.190476
| 1
| 0.190476
| false
| 0
| 0.047619
| 0
| 0.238095
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
cf372286c3b00f6d57b36a97cb015d54cb8dfc38
| 28,542
|
py
|
Python
|
IndoorPositionEstimator/cflib/drone_quaternion.py
|
capriele/Crazyflie-Indoor-Position-Logger-Controller
|
6f7a44984553d85a66a29c169a2f7c758a2aaac7
|
[
"Apache-2.0"
] | 6
|
2017-04-23T15:47:57.000Z
|
2020-03-15T17:52:15.000Z
|
IndoorPositionEstimator/cflib/drone_quaternion.py
|
capriele/Crazyflie-Indoor-Position-Logger-Controller
|
6f7a44984553d85a66a29c169a2f7c758a2aaac7
|
[
"Apache-2.0"
] | null | null | null |
IndoorPositionEstimator/cflib/drone_quaternion.py
|
capriele/Crazyflie-Indoor-Position-Logger-Controller
|
6f7a44984553d85a66a29c169a2f7c758a2aaac7
|
[
"Apache-2.0"
] | null | null | null |
"""
Quadcopter Model + LQR Control + BackStepping Control
"""
#
# Author: Alberto Petrucci (petrucci.alberto@gmail.com) 2017
#
#__author__ = "Alberto Petrucci"
#__copyright__ = "Copyright 2017, Alberto Petrucci"
#__credits__ = ["Alberto Petrucci"]
#__license__ = "Apache"
#__version__ = "1.0.0"
#__maintainer__ = "Alberto Petrucci"
#__email__ = "petrucci.alberto@gmail.com"
#__status__ = "Production"
from __future__ import division
from numpy import *
from math import *
from control import *
class Quadcopter:
def __init__(self, dt):
## Parametri ambiente
self.g = 9.81
self.airFriction = 0
self.dt = dt
self.t = 0
## Parametri drone
self.m = 27/1000 # massa del drone in g
self.d = (65.0538/1000)*sin(pi/4) # distanza dal centro ai motori
self.c = 0.1 # inerzia delle eliche
self.alpha = 1
self.Ix = self.m * self.d * self.d
self.Iy = self.m * self.d * self.d
self.Iz = 2 * self.m * self.d * self.d
# Cambiando tali parametri diamo priorita maggiori o minori
self.beta1 = 0.3
self.beta2 = 0.3
self.beta3x = 0.2#1.0
self.beta3y = 0.2#1.0
self.beta3z = 0.2#0.5
self.beta3x = 5.0#5.0
self.beta3y = 5.0#5.0
self.beta3z = 1.0#1.0
self.beta4 = 0.2
self.beta = 500
#self.beta = 3000
self.thrustGain = 1
#self.thrustGain = 1.34
#self.thrustGain = 1.37
self.Tf = dt
self.Mat_J = matrix([
[self.m*self.d*self.d, 0, 0],
[0, self.m*self.d*self.d, 0],
[0, 0, 2*self.m*self.d*self.d]
])
self.Mat_Jinv = self.Mat_J.I
self.Mat_T = matrix([
[1, 1, 1, 1],
[-self.d, -self.d, self.d, self.d],
[self.d, -self.d, -self.d, self.d],
[self.c, -self.c, self.c, -self.c]
])
self.Mat_Tinv = self.Mat_T.I
## Modello linearizzato
self.A = matrix([
[0, 0, 0, 0, 0, 0, -0.5*sqrt(1-self.alpha*self.alpha), 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0.5*self.alpha, -0.5*sqrt(1-self.alpha*self.alpha), 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0.5*sqrt(1-self.alpha*self.alpha), 0.5*self.alpha, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0.5*self.alpha, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[0, 2*self.g*sqrt(1-self.alpha*self.alpha), 2*self.g*self.alpha, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, -2*self.g*self.alpha, 2*self.g*sqrt(1-self.alpha*self.alpha), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
])
self.B = matrix([
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[1/self.m, 0, 0, 0],
])
self.C = eye(13)
self.D = zeros((13, 4))
## SATURAZIONE MOTORI
self.fmotmax = 0.5886/4 # max forza generata dai motori
self.q_bar = matrix([
[self.alpha],
[0],
[0],
[sqrt(1 - self.alpha*self.alpha)]
])
self.omega_bar = zeros((3, 1))
self.p_bar = matrix([
[0],
[0],
[1]
])
self.v_bar = zeros((3, 1))
self.ftot_bar = self.m * self.g
self.tau_bar = matrix([
[0],
[0],
[0]
])
self.x_bar = vstack((self.q_bar, self.omega_bar, self.p_bar, self.v_bar))
self.u_bar = vstack((self.ftot_bar, self.tau_bar))
self.u = matrix([
[0],
[0],
[0],
[0]
])
self.Qm = matrix([
[self.beta1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, self.beta1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, self.beta1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, self.beta1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, self.beta2, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, self.beta2, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, self.beta2, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, self.beta3x, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, self.beta3y, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, self.beta3z, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, self.beta4, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, self.beta4, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, self.beta4],
])
self.R = self.beta * eye(4)
## LQR
self.Amm = matrix([
[0, 0, 0, 0.5, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0.5, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0.5, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1.0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.0],
[0, 19.62, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[-19.62, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
])
self.Bmm = matrix([
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 8.7393e+03, 0, 0],
[0, 0, 8.7393e+03, 0],
[0, 0, 0, 4.3696e+03],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[37.0370, 0, 0, 0]
])
self.Cmm = matrix([
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]
])
self.Qmm = matrix([
[self.beta1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, self.beta1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, self.beta1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, self.beta2, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, self.beta2, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, self.beta2, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, self.beta3x, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, self.beta3y, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, self.beta3z, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, self.beta4, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, self.beta4, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, self.beta4]
])
self.Ut = matrix([
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]
])
[self.Km, self.Pm, self.em] = lqr(self.Amm, self.Bmm, self.Qmm, self.R)
self.K_LQR = self.Km*self.Ut
'''
# stampo guadagni lqr per c
for k in range(0, 4):
string = ""
for i in range(0, 13):
string += str(self.K_LQR.item((k, i)))+", "
# rimuovo gli ultimi due caratteri
string = string[:-2]
print "{"+string+"},"
'''
# Stato
self.q = matrix([
[self.alpha], [0], [0], [sqrt(1-self.alpha*self.alpha)]
])
self.omega = matrix([
[0], [0], [0]
])
self.p = matrix([
[0], [0], [0]
])
self.v = matrix([
[0], [0], [0]
])
self.x = vstack((
self.q,
self.omega,
self.p,
self.v
))
self.setPoint = self.x
# Variabili per l'osservatore (ricostruzione stato)
self.x_hat = self.x
# variabili misurate (quaternioni + posizioni)
self.y = matrix([
[0], [0], [0], [0], [0], [0], [0]
])
# Variabili per BackStepping controller
self.backsteppingSetPoint = matrix([
# Roll
[0, 0, 0],
# Pitch
[0, 0, 0],
# Yaw
[0, 0, 0],
# X
[0, 0, 0],
# Y
[0, 0, 0],
# Z
[0, 0, 0],
])
def setSetPoint(self, q0, q1, q2, q3, omegax, omegay, omegaz, px, py, pz, vx, vy, vz):
self.setPoint = matrix([
[q0],
[q1],
[q2],
[q3],
[omegax],
[omegay],
[omegaz],
[px],
[py],
[pz],
[vx],
[vy],
[vz],
])
def setBacksteppingSetPoint(self, xd):
self.backsteppingSetPoint = xd
def setState(self, q0, q1, q2, q3, omegax, omegay, omegaz, px, py, pz, vx, vy, vz):
self.q = matrix([
[q0], [q1], [q2], [q3]
])
self.q = self.q/linalg.norm(self.q)
deg2rad = pi/180.0
self.omega = matrix([
[omegax*deg2rad], [omegay*deg2rad], [omegaz*deg2rad]
])
self.p = matrix([
[px], [py], [pz]
])
self.v = matrix([
[vx], [vy], [vz]
])
'''
# Aggiorno variabili misurate
self.y = matrix([
[q0], [q1], [q2], [q3], [px], [py], [pz]
])
# Aggiorno l'osservatore
self.update_observer()
# Aggiorno lo stato (misurato + stimato)
self.x = vstack((
self.q,
[self.x_hat[4, 0]*deg2rad],
[self.x_hat[5, 0]*deg2rad],
[self.x_hat[6, 0]*deg2rad],
self.p,
[self.x_hat[10, 0]],
[self.x_hat[11, 0]],
[self.x_hat[12, 0]]
))
'''
# Nel caso in cui misuro tutto (e' lento => stimo)
self.x = vstack((
self.q,
self.omega,
self.p,
self.v
))
def update(self):
self.u = self.u_bar - self.K_LQR * (self.x - self.setPoint)
# Calcolo le forze f1 f2 f3 f4
f = self.Mat_Tinv*self.u
# Applico la saturazione
for i in range(0, 4):
if f[i, 0] > self.fmotmax:
f[i, 0] = self.fmotmax
if f[i, 0] < 0:
f[i, 0] = 0
# Calcolo l'ingresso saturato
self.u = self.Mat_T*f
#self.predict(self.u)
def backstepping2(self):
# Current State
x1 = self.q[0, 0] # wq3
x2 = self.q[1, 0] # wq3
x3 = self.q[2, 0] # wq3
x4 = self.q[3, 0] # wq3
# Angular Speeds
x5 = self.omega[0, 0] # wx
x6 = self.omega[1, 0] # wy
x7 = self.omega[2, 0] # wz
# Positions
x8 = self.p[0, 0] # x
x9 = self.p[1, 0] # y
x10 = self.p[2, 0] # z
# Speeds
x11 = self.v[0, 0] # vx
x12 = self.v[1, 0] # vy
x13 = self.v[2, 0] # vz
# contiene il riferimento + la sua derivata 1a e 2a
xd = self.backsteppingSetPoint
print matrix([
[xd[3, 0], xd[4, 0], xd[5, 0]],
[x8, x9, x10],
])
# Z
c10 = 8
c13 = 3
e10 = xd[5, 0] - x10
e13 = x13 - xd[5, 1] - c10 * e10
u1 = self.m * (self.g + e10 + xd[5, 2] - c13 * e13 + c10 * (xd[5, 1] - x13)) / (x1*x1 - x2*x2 - x3*x3 + x4*x4)
if u1 != 0:
# X
c8 = 8#8
c11 = 4#4
e8 = xd[3, 0] - x8
e11 = x11 - xd[3, 1] - c8 * e8
Ux = self.m * (e8 + xd[3, 2] - c11 * e11 + c8 * (xd[3, 1] - x11)) / (2*u1)
# Y
c9 = 8#8
c12 = 4#4
e9 = xd[4, 0] - x9
e12 = x12 - xd[4, 1] - c9 * e9
Uy = self.m * (e9 + xd[4, 2] - c12 * e12 + c9 * (xd[4, 1] - x12)) / (2*u1)
else:
Ux = 0
Uy = 0
# Desired Quaternion
qd = matrix([
[1],
[-(Uy-x3*x4)/x1],
[(Ux-x2*x4)/x1],
[xd[2, 0]],
])
qd = qd / linalg.norm(qd)
# Compute quaternion error
q = matrix([
[x1],
[-x2],
[-x3],
[-x4]
])
qe = self.quaternionProduct(q, qd)
w = matrix([
[0],
[-x5],
[-x6],
[-x7]
])
norm_w = linalg.norm(w)
if norm_w != 0:
w = w / norm_w
we = self.quaternionProduct(w, qe)
c4 = 20
c44 = 10
e4 = qe[3, 0]
e44 = 0.5 * (-x3 * x5 + x2 * x6 + x1 * x7) - c4 * e4
xd4d = we[3, 0]
c3 = 60
c33 = 60
e3 = qe[2, 0]
e33 = 0.5 * (x4 * x5 + x1 * x6 - x2 * x7) - c3 * e3
xd3d = we[2, 0]
c2 = 60
c22 = 60
e2 = qe[1, 0]
e22 = 0.5 * (x1 * x5 - x4 * x6 + x3 * x7) - c2 * e2
xd2d = we[1, 0]
x1_2 = x1 * x1
x2_2 = x2 * x2
x3_2 = x3 * x3
x4_2 = x4 * x4
x5_2 = x5 * x5
x6_2 = x6 * x6
x7_2 = x7 * x7
x1_3 = x1_2 * x1
x2_3 = x2_2 * x2
x3_3 = x3_2 * x3
x4_3 = x4_2 * x4
div = x1 * (x1_2 + x2_2 + x3_2 + x4_2)
mult = self.s * self.d * self.m
u2 = 0
u3 = 0
u4 = 0
if div != 0:
u4 = (mult * (x4_3 * x6_2 - x4_3 * x5_2 + x4_3 * x7_2 + 4 * e4 * x1_2 + 4 * e4 * x4_2 - 2 * c4 * x1_3 * x7 + 4 * c4 * x1_2 * xd4d + 4 * c4 * x4_2 * xd4d - 2 * x1_3 * x5 * x6 - x1_2 * x4 * x5_2 + x1_2 * x4 * x6_2 + x2_2 * x4 * x5_2 + x1_2 * x4 * x7_2 + x2_2 * x4 * x6_2 + x3_2 * x4 * x5_2 + x2_2 * x4 * x7_2 + x3_2 * x4 * x6_2 + x3_2 * x4 * x7_2 + 4 * e2 * x1 * x3 - 4 * e3 * x1 * x2 + 4 * e2 * x2 * x4 + 4 * e3 * x3 * x4 - 4 * c44 * e44 * x1_2 - 4 * c44 * e44 * x4_2 - 4 * c22 * e22 * x1 * x3 - 4 * c22 * e22 * x2 * x4 + 4 * c33 * e33 * x1 * x2 - 4 * c33 * e33 * x3 * x4 + 4 * c2 * x1 * x3 * xd2d - 4 * c3 * x1 * x2 * xd3d + 4 * c2 * x2 * x4 * xd2d + 4 * c3 * x3 * x4 * xd3d - 2 * c2 * x1_2 * x3 * x5 + 2 * c3 * x1_2 * x2 * x6 - 2 * c2 * x1 * x3_2 * x7 - 2 * c3 * x1 * x2_2 * x7 - 2 * c4 * x1_2 * x2 * x6 + 2 * c4 * x1_2 * x3 * x5 + 2 * c2 * x2 * x4_2 * x6 - 2 * c3 * x3 * x4_2 * x5 - 2 * c4 * x1 * x4_2 * x7 - 2 * c4 * x2 * x4_2 * x6 + 2 * c4 * x3 * x4_2 * x5 + 2 * x1_2 * x2 * x5 * x7 - 2 * x1 * x4_2 * x5 * x6 + 2 * x2 * x4_2 * x5 * x7 - 2 * c2 * x1 * x2 * x4 * x5 + 2 * c3 * x1 * x2 * x4 * x5 + 2 * c2 * x1 * x3 * x4 * x6 - 2 * c3 * x1 * x3 * x4 * x6 - 2 * c2 * x2 * x3 * x4 * x7 + 2 * c3 * x2 * x3 * x4 * x7)) / div
u3 = (mult * (x3_3 * x5_2 + x3_3 * x6_2 + x3_3 * x7_2 + 4 * e3 * x1_2 + 4 * e3 * x3_2 - 2 * c3 * x1_3 * x6 + 4 * c3 * x1_2 * xd3d + 4 * c3 * x3_2 * xd3d - 2 * x1_3 * x5 * x7 + x1_2 * x3 * x5_2 + x1_2 * x3 * x6_2 + x2_2 * x3 * x5_2 + x1_2 * x3 * x7_2 + x2_2 * x3 * x6_2 - x3 * x4_2 * x5_2 + x2_2 * x3 * x7_2 + x3 * x4_2 * x6_2 + x3 * x4_2 * x7_2 - 4 * e2 * x1 * x4 + 4 * e2 * x2 * x3 + 4 * e4 * x1 * x2 + 4 * e4 * x3 * x4 - 4 * c33 * e33 * x1_2 - 4 * c33 * e33 * x3_2 + 4 * c22 * e22 * x1 * x4 - 4 * c22 * e22 * x2 * x3 - 4 * c44 * e44 * x1 * x2 - 4 * c44 * e44 * x3 * x4 - 4 * c2 * x1 * x4 * xd2d + 4 * c2 * x2 * x3 * xd2d + 4 * c4 * x1 * x2 * xd4d + 4 * c4 * x3 * x4 * xd4d + 2 * c2 * x1_2 * x4 * x5 - 2 * c2 * x1 * x4_2 * x6 - 2 * c3 * x1 * x3_2 * x6 + 2 * c3 * x1_2 * x2 * x7 - 2 * c3 * x1_2 * x4 * x5 - 2 * c4 * x1 * x2_2 * x6 - 2 * c2 * x2 * x3_2 * x7 - 2 * c4 * x1_2 * x2 * x7 + 2 * c3 * x2 * x3_2 * x7 - 2 * c3 * x3_2 * x4 * x5 + 2 * c4 * x3_2 * x4 * x5 - 2 * x1 * x2 * x4 * x5_2 - 2 * x1_2 * x2 * x5 * x6 - 2 * x1 * x3_2 * x5 * x7 - 2 * x1 * x4_2 * x5 * x7 - 2 * c2 * x1 * x2 * x3 * x5 + 2 * c4 * x1 * x2 * x3 * x5 + 2 * c2 * x1 * x3 * x4 * x7 + 2 * c2 * x2 * x3 * x4 * x6 - 2 * c4 * x1 * x3 * x4 * x7 - 2 * c4 * x2 * x3 * x4 * x6 - 2 * x1 * x3 * x4 * x5 * x6 + 2 * x2 * x3 * x4 * x5 * x7)) / (2 * div)
u2 = (mult * (x2_3 * x5_2 + x2_3 * x6_2 + x2_3 * x7_2 + 4 * e2 * x1_2 + 4 * e2 * x2_2 - 2 * c2 * x1_3 * x5 + 4 * c2 * x1_2 * xd2d + 4 * c2 * x2_2 * xd2d + 2 * x1_3 * x6 * x7 + x1_2 * x2 * x5_2 + x1_2 * x2 * x6_2 + x2 * x3_2 * x5_2 + x1_2 * x2 * x7_2 + x2 * x3_2 * x6_2 - x2 * x4_2 * x5_2 + x2 * x3_2 * x7_2 + x2 * x4_2 * x6_2 + x2 * x4_2 * x7_2 + 4 * e3 * x1 * x4 + 4 * e3 * x2 * x3 - 4 * e4 * x1 * x3 + 4 * e4 * x2 * x4 - 4 * c22 * e22 * x1_2 - 4 * c22 * e22 * x2_2 - 4 * c33 * e33 * x1 * x4 - 4 * c33 * e33 * x2 * x3 + 4 * c44 * e44 * x1 * x3 - 4 * c44 * e44 * x2 * x4 + 4 * c3 * x1 * x4 * xd3d + 4 * c3 * x2 * x3 * xd3d - 4 * c4 * x1 * x3 * xd4d + 4 * c4 * x2 * x4 * xd4d - 2 * c2 * x1 * x2_2 * x5 - 2 * c2 * x1_2 * x3 * x7 + 2 * c2 * x1_2 * x4 * x6 - 2 * c3 * x1 * x4_2 * x5 - 2 * c4 * x1 * x3_2 * x5 - 2 * c2 * x2_2 * x3 * x7 + 2 * c2 * x2_2 * x4 * x6 - 2 * c3 * x1_2 * x4 * x6 + 2 * c3 * x2_2 * x3 * x7 + 2 * c4 * x1_2 * x3 * x7 - 2 * c4 * x2_2 * x4 * x6 + 2 * x1 * x3 * x4 * x5_2 + 2 * x1_2 * x3 * x5 * x6 + 2 * x1 * x2_2 * x6 * x7 + 2 * x1 * x3_2 * x6 * x7 + 2 * x1 * x4_2 * x6 * x7 + 2 * x2_2 * x4 * x5 * x7 - 2 * c3 * x1 * x2 * x3 * x6 + 2 * c4 * x1 * x2 * x3 * x6 + 2 * c3 * x1 * x2 * x4 * x7 - 2 * c3 * x2 * x3 * x4 * x5 - 2 * c4 * x1 * x2 * x4 * x7 + 2 * c4 * x2 * x3 * x4 * x5 - 2 * x1 * x2 * x3 * x5 * x7 - 2 * x1 * x2 * x4 * x5 * x6)) / (2 * div)
self.u = matrix([
[abs(u1)],
[u2],
[u3],
[u4]
])
def update_observer(self):
x_hat_dot = self.observer_function(self.x_hat)
# Eulero
# self.x_hat = self.x_hat + x_hat_dot*self.dt
# Runge Kutta 4
m1 = x_hat_dot
k1 = self.x_hat + m1 * self.dt
m2 = self.observer_function(k1)
k2 = self.x_hat + (m1 + m2) * self.dt / 4
m3 = self.observer_function(k2)
self.x_hat = self.x_hat + (m1 + m2 + 4 * m3) * (self.dt / 6)
def observer_function(self, x_hat):
x1 = x_hat[0, 0]
x2 = x_hat[1, 0]
x3 = x_hat[2, 0]
x4 = x_hat[3, 0]
x5 = x_hat[4, 0]
x6 = x_hat[5, 0]
x7 = x_hat[6, 0]
x8 = x_hat[7, 0]
x9 = x_hat[8, 0]
x10 = x_hat[9, 0]
x11 = x_hat[10, 0]
x12 = x_hat[11, 0]
x13 = x_hat[12, 0]
# Funzione stato
F = matrix([
[-(x2 * x5 + x3 * x6 + x4 * x7) / 2],
[(x1 * x5 - x4 * x6 + x3 * x7) / 2],
[(x4 * x5 + x1 * x6 - x2 * x7) / 2],
[(-x3 * x5 + x2 * x6 + x1 * x7) / 2],
[-x6 * x7],
[x5 * x7],
[0],
[x11],
[x12],
[x13],
[0],
[0],
[-self.g]
])
# Funzione ingressi
G = matrix([
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 1/(self.m*self.d*self.d), 0, 0],
[0, 0, 1/(self.m*self.d*self.d), 0],
[0, 0, 0, 1/(2*self.m*self.d*self.d)],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[(2*x2*x4+2*x1*x3)/self.m, 0, 0, 0],
[(2*x3*x4-2*x1*x2)/self.m, 0, 0, 0],
[(x1*x1-x2*x2-x3*x3+x4*x4)/self.m, 0, 0, 0],
])
# Funzione misure
H = matrix([
[x1],
[x2],
[x3],
[x4],
[x8],
[x9],
[x10]
])
# Inversa di Q
Qinv = matrix([
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0,1,0,0,0,0,0, 0, 0, 0, 0, 0, 0],
[0,0,0,1,0,0,0, 0, 0, 0, 0, 0, 0],
[0,0,0,0,0,1,0, 0, 0, 0, 0, 0, 0],
[-(x1*x1*x5 + x2*x2*x5 - x1*x3*x7 + x1*x4*x6 + x2*x3*x6 + x2*x4*x7)/(x1*x1*x1 + x1*x2*x2 + x1*x3*x3 + x1*x4*x4), (x1*x3*x6 + x1*x4*x7 + x2*x3*x7 - x2*x4*x6)/(x1*x1*x1 + x1*x2*x2 + x1*x3*x3 + x1*x4*x4), (2*(x1*x1 + x2*x2))/(x1*x1*x1 + x1*x2*x2 + x1*x3*x3 + x1*x4*x4), -(x7*x1*x1 + x3*x5*x1 + x7*x2*x2 - x4*x5*x2)/(x1*x1*x1 + x1*x2*x2 + x1*x3*x3 + x1*x4*x4), (2*(x1*x4 + x2*x3))/(x1*x1*x1 + x1*x2*x2 + x1*x3*x3 + x1*x4*x4), (18*x1*x3 - 18*x2*x4 + x1*x1*x6 + x2*x2*x6 - x1*x4*x5 - x2*x3*x5)/(x1*x1*x1 + x1*x2*x2 + x1*x3*x3 + x1*x4*x4), -(2*(x1*x3 - x2*x4))/(x1*x1*x1 + x1*x2*x2 + x1*x3*x3 + x1*x4*x4), 0, 0, 0, 0, 0, 0],
[-(x1*x1*x6 + x3*x3*x6 + x1*x2*x7 - x1*x4*x5 + x2*x3*x5 + x3*x4*x7)/(x1*x1*x1 + x1*x2*x2 + x1*x3*x3 + x1*x4*x4), (x7*x1*x1 - x2*x6*x1 + x7*x3*x3 - x4*x6*x3)/(x1*x1*x1 + x1*x2*x2 + x1*x3*x3 + x1*x4*x4), -(2*(x1*x4 - x2*x3))/(x1*x1*x1 + x1*x2*x2 + x1*x3*x3 + x1*x4*x4), (x1*x2*x5 + x1*x4*x7 - x2*x3*x7 + x3*x4*x5)/(x1*x1*x1 + x1*x2*x2 + x1*x3*x3 + x1*x4*x4), (2*(x1*x1 + x3*x3))/(x1*x1*x1 + x1*x2*x2 + x1*x3*x3 + x1*x4*x4), -(18*x1*x2 + 18*x3*x4 + x1*x1*x5 + x3*x3*x5 + x1*x4*x6 - x2*x3*x6)/(x1*x1*x1 + x1*x2*x2 + x1*x3*x3 + x1*x4*x4), (2*(x1*x2 + x3*x4))/(x1*x1*x1 + x1*x2*x2 + x1*x3*x3 + x1*x4*x4), 0, 0, 0, 0, 0, 0],
[-(x1*x1*x7 + x4*x4*x7 - x1*x2*x6 + x1*x3*x5 + x2*x4*x5 + x3*x4*x6)/(x1*x1*x1 + x1*x2*x2 + x1*x3*x3 + x1*x4*x4), -(x6*x1*x1 + x2*x7*x1 + x6*x4*x4 - x3*x7*x4)/(x1*x1*x1 + x1*x2*x2 + x1*x3*x3 + x1*x4*x4), (2*(x1*x3 + x2*x4))/(x1*x1*x1 + x1*x2*x2 + x1*x3*x3 + x1*x4*x4), (x5*x1*x1 - x3*x7*x1 + x5*x4*x4 - x2*x7*x4)/(x1*x1*x1 + x1*x2*x2 + x1*x3*x3 + x1*x4*x4), -(2*(x1*x2 - x3*x4))/(x1*x1*x1 + x1*x2*x2 + x1*x3*x3 + x1*x4*x4), -(18*x1*x1 + 18*x4*x4 - x1*x2*x5 - x1*x3*x6 - x2*x4*x6 + x3*x4*x5)/(x1*x1*x1 + x1*x2*x2 + x1*x3*x3 + x1*x4*x4), (2*(x1*x1 + x4*x4))/(x1*x1*x1 + x1*x2*x2 + x1*x3*x3 + x1*x4*x4), 0, 0, 0, 0, 0, 0],
[0,0,0,0,0,0,0, 1, 0, 0, 0, 0, 0],
[0,0,0,0,0,0,0, 0, 0, 1, 0, 0, 0],
[0,0,0,0,0,0,0, 0, 0, 0, 0, 1, 0],
[0,0,0,0,0,0,0, 0, 1, 0, 0, 0, 0],
[0,0,0,0,0,0,0, 0, 0, 0, 1, 0, 0],
[0,0,0,0,0,0,0, 0, 0, 0, 0, 0, 1]
])
# Guadagni per la convergenza
K = matrix([
[100, 0, 0, 0, 0, 0, 0],
[0, 100, 0, 0, 0, 0, 0],
[0, 500, 0, 0, 0, 0, 0],
[0, 0, 100, 0, 0, 0, 0],
[0, 0, 500, 0, 0, 0, 0],
[0, 0, 0, 100, 0, 0, 0],
[0, 0, 0, 500, 0, 0, 0],
[0, 0, 0, 0, 100, 0, 0],
[0, 0, 0, 0, 10000, 0, 0],
[0, 0, 0, 0, 0, 100, 0],
[0, 0, 0, 0, 0, 10000, 0],
[0, 0, 0, 0, 0, 0, 100],
[0, 0, 0, 0, 0, 0, 10000]
])
# Aggiorno lo stato predetto
x_hat_dot = F + G*self.u + Qinv*K*(self.y - H)
return x_hat_dot
def predict(self, u):
# Faccio evolvere il sistema
F_b = matrix([
[0],
[0],
[u[0, 0]]
])
Mw = 0*matrix([
[0.1],
[-0.1],
[0.2]
])
Fv = 0*matrix([
[1],
[1],
[1]
])
Q = matrix([
[-self.q[1, 0], -self.q[2, 0], -self.q[3, 0]],
[self.q[0, 0], -self.q[3, 0], self.q[2, 0]],
[self.q[3, 0], self.q[0, 0], -self.q[1, 0]],
[-self.q[2, 0], self.q[1, 0], self.q[0, 0]]
])
# Aggiorno lo stato
q_dot = 0.5 * Q * self.omega
self.q = self.q + q_dot * self.dt
self.q = self.q/linalg.norm(self.q)
U = matrix([
[u[1, 0]],
[u[2, 0]],
[u[3, 0]]
])
omega_dot = self.Mat_Jinv * (U - self.VectorialProduct(self.omega) * self.Mat_J * self.omega) + self.Mat_Jinv * Mw
self.omega = self.omega + omega_dot * self.dt
p_dot = self.v
self.p = self.p + p_dot * self.dt
R = self.quaternion2RotationMatrix()
G = matrix([
[0],
[0],
[self.g]
])
v_dot = (1 / self.m) * (R * F_b + Fv) - G - self.airFriction * linalg.norm(self.v) * self.v
self.v = self.v + v_dot * self.dt
self.x = vstack((
self.q,
self.omega,
self.p,
self.v
))
def getMotorInput(self):
scaleFactor = self.thrustGain * 65535.0 / (self.fmotmax * 4)
u = self.u
u[0, 0] = u[0, 0]*scaleFactor
u[1, 0] = (u[1, 0]/2.0)/self.d
u[2, 0] = (u[2, 0]/2.0)/self.d
u[3, 0] = 0/self.c
percentual = 1
if u[1, 0] < -65536 * percentual:
u[1, 0] = -65536 * percentual
elif u[1, 0] > 65536 * percentual:
u[1, 0] = 65536 * percentual
if u[2, 0] < -65536 * percentual:
u[2, 0] = -65536 * percentual
elif u[2, 0] > 65536 * percentual:
u[2, 0] = 65536 * percentual
if u[3, 0] < -65536 * percentual:
u[3, 0] = -65536 * percentual
elif u[3, 0] > 65536 * percentual:
u[3, 0] = 65536 * percentual
m1 = u[0, 0] - u[1, 0] + u[2, 0] + u[3, 0]
m2 = u[0, 0] - u[1, 0] - u[2, 0] - u[3, 0]
m3 = u[0, 0] + u[1, 0] - u[2, 0] + u[3, 0]
m4 = u[0, 0] + u[1, 0] + u[2, 0] - u[3, 0]
return m1, m2, m3, m4
def quaternionProduct(self, q, p):
"""
Compute the quaternion product q*p
:param self:
:param q:
:param p:
:return:
"""
Qq = matrix([
[q[0, 0], -q[1, 0], -q[2, 0], -q[3, 0]],
[q[1, 0], q[0, 0], -q[3, 0], q[2, 0]],
[q[2, 0], q[3, 0], q[0, 0], -q[1, 0]],
[q[3, 0], -q[2, 0], q[1, 0], q[0, 0]]
])
return Qq*p
def quaternion2RotationMatrix(self):
"""
Genera la matrice di rotazione partendo dai quaternioni dello stato
:return:
"""
q0 = self.q[0, 0]
q1 = self.q[1, 0]
q2 = self.q[2, 0]
q3 = self.q[3, 0]
R = matrix([
[1-2*(q2*q2+q3*q3), 2*(q1*q2-q0*q3), 2*(q0*q2+q1*q3)],
[2*(q1*q2+q0*q3), 1-2*(q1*q1+q3*q3), 2*(q2*q3-q0*q1)],
[2*(q1*q3-q0*q2), 2*(q0*q1+q2*q3), 1-2*(q1*q1+q2*q2)]
])
return R
def VectorialProduct(self, v):
"""
Questa funzione prende in ingresso un vettore di tre elementi e ne genera
la matrice che effettua il prodotto vettoriale
:param v:
:return: M
"""
M = matrix([
[0, -v[2,0], v[1,0]],
[v[2,0], 0, -v[0,0]],
[-v[1,0], v[0,0], 0]
])
return M
def quaternion2RPY(self):
q = self.q
g = 2 * (q[0, 0]*q[2, 0] - q[1, 0]*q[3, 0])
if g > 1:
g = 1
elif g < -1:
g = -1
yaw = atan2(2*(q[1, 0]*q[2, 0] + q[0, 0]*q[3, 0]), q[0, 0] * q[0, 0] + q[1, 0] * q[1, 0] - q[2, 0] * q[2, 0] - q[3, 0] * q[3, 0])
pitch = asin(g)
roll = atan2(2*(q[2, 0]*q[3, 0] + q[0, 0]*q[1, 0]), q[0, 0] * q[0, 0] - q[1, 0] * q[1, 0] - q[2, 0] * q[2, 0] + q[3, 0] * q[3, 0])
rad2deg = 180/pi
#euler = matrix([
# [roll * rad2deg], [pitch * rad2deg], [yaw * rad2deg]
#])
#return euler[0, 0], euler[1, 0], euler[2, 0]
return roll, pitch, yaw
| 37.654354
| 1,374
| 0.387044
| 5,041
| 28,542
| 2.134497
| 0.072208
| 0.224164
| 0.292472
| 0.34684
| 0.491171
| 0.400093
| 0.35539
| 0.327509
| 0.311338
| 0.303903
| 0
| 0.231357
| 0.412725
| 28,542
| 757
| 1,375
| 37.704095
| 0.410572
| 0.051748
| 0
| 0.287695
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.006932
| null | null | 0.001733
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
cf47aca5fbdc5c963454eb2445883327bc3c473e
| 267
|
py
|
Python
|
libp2p/protocol_muxer/exceptions.py
|
lithp/py-libp2p
|
f38899e26edabe59b291e466143d1c696c44de8d
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
libp2p/protocol_muxer/exceptions.py
|
lithp/py-libp2p
|
f38899e26edabe59b291e466143d1c696c44de8d
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
libp2p/protocol_muxer/exceptions.py
|
lithp/py-libp2p
|
f38899e26edabe59b291e466143d1c696c44de8d
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
from libp2p.exceptions import BaseLibp2pError
class MultiselectError(BaseLibp2pError):
"""Raised when an error occurs in multiselect process"""
class MultiselectClientError(BaseLibp2pError):
"""Raised when an error occurs in protocol selection process"""
| 26.7
| 67
| 0.790262
| 28
| 267
| 7.535714
| 0.642857
| 0.199052
| 0.236967
| 0.255924
| 0.379147
| 0.379147
| 0.379147
| 0
| 0
| 0
| 0
| 0.017391
| 0.138577
| 267
| 9
| 68
| 29.666667
| 0.9
| 0.404494
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
cf5b37ee1fc82e3da020ac4e175a1718c4b48d19
| 115
|
py
|
Python
|
env.py
|
olukotun-sandbox/name-button
|
8205dc783dd72765d44378b0b6ca354352d21ad5
|
[
"MIT"
] | null | null | null |
env.py
|
olukotun-sandbox/name-button
|
8205dc783dd72765d44378b0b6ca354352d21ad5
|
[
"MIT"
] | null | null | null |
env.py
|
olukotun-sandbox/name-button
|
8205dc783dd72765d44378b0b6ca354352d21ad5
|
[
"MIT"
] | null | null | null |
import os
print('this is home:', os.environ['HOME'])
print('this is circle branch:', os.environ['CIRCLE_BRANCH'])
| 23
| 60
| 0.704348
| 18
| 115
| 4.444444
| 0.5
| 0.225
| 0.275
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.104348
| 115
| 5
| 60
| 23
| 0.776699
| 0
| 0
| 0
| 0
| 0
| 0.448276
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0.666667
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 7
|
cf74741b8ea29334e97b4fd26bf8a8d8ea156e23
| 18,806
|
py
|
Python
|
tests/data/ec2_offer.py
|
andrewmcgilvray/awspricing
|
fd37598dbdb08545db03c99492ce01f7290ab6f5
|
[
"Apache-2.0"
] | null | null | null |
tests/data/ec2_offer.py
|
andrewmcgilvray/awspricing
|
fd37598dbdb08545db03c99492ce01f7290ab6f5
|
[
"Apache-2.0"
] | null | null | null |
tests/data/ec2_offer.py
|
andrewmcgilvray/awspricing
|
fd37598dbdb08545db03c99492ce01f7290ab6f5
|
[
"Apache-2.0"
] | null | null | null |
BASIC_EC2_OFFER_SKU = '4C7N4APU9GEUZ6H6'
BASIC_EC2_OFFER_MODIFIED_FORMAT = {
'offerCode': 'AmazonEC2',
'version': '20161213014831',
'products': {
'4C7N4APU9GEUZ6H6' : {
'sku' : '4C7N4APU9GEUZ6H6',
'productFamily' : 'Compute Instance',
'attributes' : {
'servicecode' : 'AmazonEC2',
'location' : 'US East (N. Virginia)',
'locationType' : 'AWS Region',
'instanceType' : 'c4.large',
'currentGeneration' : 'Yes',
'instanceFamily' : 'Compute optimized',
'vcpu' : '2',
'physicalProcessor' : 'Intel Xeon E5-2666 v3 (Haswell)',
'clockSpeed' : '2.9 GHz',
'memory' : '3.75 GiB',
'storage' : 'EBS only',
'networkPerformance' : 'Moderate',
'processorArchitecture' : '64-bit',
'tenancy' : 'Shared',
'operatingSystem' : 'Linux',
'licenseModel' : 'No License required',
'usagetype' : 'BoxUsage:c4.large',
'operation' : 'RunInstances',
'dedicatedEbsThroughput' : '500 Mbps',
'enhancedNetworkingSupported' : 'Yes',
'preInstalledSw' : 'NA',
'processorFeatures' : 'Intel AVX; Intel AVX2; Intel Turbo'
}
},
'BNSJSY9CBT29VNPD':{
'sku': 'BNSJSY9CBT29VNPD',
'attributes': {
'servicecode': 'AWSDataTransfer',
'transferType': 'Inter Region Peering Data Transfer Inbound',
'fromLocation': 'External',
'fromLocationType': 'AWS Region',
'toLocation': 'US East (Ohio)',
'toLocationType': 'AWS Region',
'usagetype': 'USE2-AWS-In-Bytes',
'operation': '',
'servicename': 'AWS Data Transfer'
}
},
},
'terms': {
'OnDemand': {
'4C7N4APU9GEUZ6H6' : {
'4C7N4APU9GEUZ6H6.JRTCKXETXF' : {
'offerTermCode' : 'JRTCKXETXF',
'sku' : '4C7N4APU9GEUZ6H6',
'effectiveDate' : '2016-12-01T00:00:00Z',
'priceDimensions' : {
'4C7N4APU9GEUZ6H6.JRTCKXETXF.6YS6EN2CT7' : {
'rateCode' : '4C7N4APU9GEUZ6H6.JRTCKXETXF.6YS6EN2CT7',
'description' : '$0.1 per On Demand Linux c4.large Instance Hour',
'beginRange' : '0',
'endRange' : 'Inf',
'unit' : 'Hrs',
'pricePerUnit' : {
'USD' : '0.1000000000'
},
'appliesTo' : [ ]
}
},
'termAttributes' : { }
}
},
},
'Reserved': {
"4C7N4APU9GEUZ6H6" : {
"4C7N4APU9GEUZ6H6.HU7G6KETJZ" : {
"offerTermCode" : "HU7G6KETJZ",
"sku" : "4C7N4APU9GEUZ6H6",
"effectiveDate" : "2016-11-30T23:59:59Z",
"priceDimensions" : {
"4C7N4APU9GEUZ6H6.HU7G6KETJZ.6YS6EN2CT7" : {
"rateCode" : "4C7N4APU9GEUZ6H6.HU7G6KETJZ.6YS6EN2CT7",
"description" : "Linux/UNIX (Amazon VPC), c4.large instance-hours used this month",
"beginRange" : "0",
"endRange" : "Inf",
"unit" : "Hrs",
"pricePerUnit" : {
"USD" : "0.0300000000"
},
"appliesTo" : [ ]
},
"4C7N4APU9GEUZ6H6.HU7G6KETJZ.2TG2D8R56U" : {
"rateCode" : "4C7N4APU9GEUZ6H6.HU7G6KETJZ.2TG2D8R56U",
"description" : "Upfront Fee",
"unit" : "Quantity",
"pricePerUnit" : {
"USD" : "263"
},
"appliesTo" : [ ]
}
},
"termAttributes" : {
"LeaseContractLength" : "1yr",
"OfferingClass" : "standard",
"PurchaseOption" : "Partial Upfront"
}
},
"4C7N4APU9GEUZ6H6.38NPMPTW36" : {
"offerTermCode" : "38NPMPTW36",
"sku" : "4C7N4APU9GEUZ6H6",
"effectiveDate" : "2016-11-30T23:59:59Z",
"priceDimensions" : {
"4C7N4APU9GEUZ6H6.38NPMPTW36.2TG2D8R56U" : {
"rateCode" : "4C7N4APU9GEUZ6H6.38NPMPTW36.2TG2D8R56U",
"description" : "Upfront Fee",
"unit" : "Quantity",
"pricePerUnit" : {
"USD" : "539"
},
"appliesTo" : [ ]
},
"4C7N4APU9GEUZ6H6.38NPMPTW36.6YS6EN2CT7" : {
"rateCode" : "4C7N4APU9GEUZ6H6.38NPMPTW36.6YS6EN2CT7",
"description" : "Linux/UNIX (Amazon VPC), c4.large instance-hours used this month",
"beginRange" : "0",
"endRange" : "Inf",
"unit" : "Hrs",
"pricePerUnit" : {
"USD" : "0.0210000000"
},
"appliesTo" : [ ]
}
},
"termAttributes" : {
"LeaseContractLength" : "3yr",
"OfferingClass" : "standard",
"PurchaseOption" : "Partial Upfront"
}
},
"4C7N4APU9GEUZ6H6.R5XV2EPZQZ" : {
"offerTermCode" : "R5XV2EPZQZ",
"sku" : "4C7N4APU9GEUZ6H6",
"effectiveDate" : "2016-11-30T23:59:59Z",
"priceDimensions" : {
"4C7N4APU9GEUZ6H6.R5XV2EPZQZ.2TG2D8R56U" : {
"rateCode" : "4C7N4APU9GEUZ6H6.R5XV2EPZQZ.2TG2D8R56U",
"description" : "Upfront Fee",
"unit" : "Quantity",
"pricePerUnit" : {
"USD" : "710"
},
"appliesTo" : [ ]
},
"4C7N4APU9GEUZ6H6.R5XV2EPZQZ.6YS6EN2CT7" : {
"rateCode" : "4C7N4APU9GEUZ6H6.R5XV2EPZQZ.6YS6EN2CT7",
"description" : "Linux/UNIX (Amazon VPC), c4.large instance-hours used this month",
"beginRange" : "0",
"endRange" : "Inf",
"unit" : "Hrs",
"pricePerUnit" : {
"USD" : "0.0270000000"
},
"appliesTo" : [ ]
}
},
"termAttributes" : {
"LeaseContractLength" : "3yr",
"OfferingClass" : "convertible",
"PurchaseOption" : "Partial Upfront"
}
},
"4C7N4APU9GEUZ6H6.4NA7Y494T4" : {
"offerTermCode" : "4NA7Y494T4",
"sku" : "4C7N4APU9GEUZ6H6",
"effectiveDate" : "2017-04-30T23:59:59Z",
"priceDimensions" : {
"4C7N4APU9GEUZ6H6.4NA7Y494T4.6YS6EN2CT7" : {
"rateCode" : "4C7N4APU9GEUZ6H6.4NA7Y494T4.6YS6EN2CT7",
"description" : "Linux/UNIX (Amazon VPC), c4.large reserved instance applied",
"beginRange" : "0",
"endRange" : "Inf",
"unit" : "Hrs",
"pricePerUnit" : {
"USD" : "0.0630000000"
},
"appliesTo" : [ ]
}
},
"termAttributes" : {
"LeaseContractLength" : "1yr",
"OfferingClass" : "standard",
"PurchaseOption" : "No Upfront"
}
},
},
}
}
}
# Includes one variation of the c4.xlarge product and just Partial Upfront RIs.
BASIC_EC2_OFFER_DATA = {
'offerCode': 'AmazonEC2',
'version': '20161213014831',
'products': {
'4C7N4APU9GEUZ6H6' : {
'sku' : '4C7N4APU9GEUZ6H6',
'productFamily' : 'Compute Instance',
'attributes' : {
'servicecode' : 'AmazonEC2',
'location' : 'US East (N. Virginia)',
'locationType' : 'AWS Region',
'instanceType' : 'c4.large',
'currentGeneration' : 'Yes',
'instanceFamily' : 'Compute optimized',
'vcpu' : '2',
'physicalProcessor' : 'Intel Xeon E5-2666 v3 (Haswell)',
'clockSpeed' : '2.9 GHz',
'memory' : '3.75 GiB',
'storage' : 'EBS only',
'networkPerformance' : 'Moderate',
'processorArchitecture' : '64-bit',
'tenancy' : 'Shared',
'operatingSystem' : 'Linux',
'licenseModel' : 'No License required',
'usagetype' : 'BoxUsage:c4.large',
'operation' : 'RunInstances',
'dedicatedEbsThroughput' : '500 Mbps',
'enhancedNetworkingSupported' : 'Yes',
'preInstalledSw' : 'NA',
'processorFeatures' : 'Intel AVX; Intel AVX2; Intel Turbo'
}
},
},
'terms': {
'OnDemand': {
'4C7N4APU9GEUZ6H6' : {
'4C7N4APU9GEUZ6H6.JRTCKXETXF' : {
'offerTermCode' : 'JRTCKXETXF',
'sku' : '4C7N4APU9GEUZ6H6',
'effectiveDate' : '2016-12-01T00:00:00Z',
'priceDimensions' : {
'4C7N4APU9GEUZ6H6.JRTCKXETXF.6YS6EN2CT7' : {
'rateCode' : '4C7N4APU9GEUZ6H6.JRTCKXETXF.6YS6EN2CT7',
'description' : '$0.1 per On Demand Linux c4.large Instance Hour',
'beginRange' : '0',
'endRange' : 'Inf',
'unit' : 'Hrs',
'pricePerUnit' : {
'USD' : '0.1000000000'
},
'appliesTo' : [ ]
}
},
'termAttributes' : { }
}
},
},
'Reserved': {
"4C7N4APU9GEUZ6H6" : {
"4C7N4APU9GEUZ6H6.HU7G6KETJZ" : {
"offerTermCode" : "HU7G6KETJZ",
"sku" : "4C7N4APU9GEUZ6H6",
"effectiveDate" : "2016-11-30T23:59:59Z",
"priceDimensions" : {
"4C7N4APU9GEUZ6H6.HU7G6KETJZ.6YS6EN2CT7" : {
"rateCode" : "4C7N4APU9GEUZ6H6.HU7G6KETJZ.6YS6EN2CT7",
"description" : "Linux/UNIX (Amazon VPC), c4.large instance-hours used this month",
"beginRange" : "0",
"endRange" : "Inf",
"unit" : "Hrs",
"pricePerUnit" : {
"USD" : "0.0300000000"
},
"appliesTo" : [ ]
},
"4C7N4APU9GEUZ6H6.HU7G6KETJZ.2TG2D8R56U" : {
"rateCode" : "4C7N4APU9GEUZ6H6.HU7G6KETJZ.2TG2D8R56U",
"description" : "Upfront Fee",
"unit" : "Quantity",
"pricePerUnit" : {
"USD" : "263"
},
"appliesTo" : [ ]
}
},
"termAttributes" : {
"LeaseContractLength" : "1yr",
"OfferingClass" : "standard",
"PurchaseOption" : "Partial Upfront"
}
},
"4C7N4APU9GEUZ6H6.38NPMPTW36" : {
"offerTermCode" : "38NPMPTW36",
"sku" : "4C7N4APU9GEUZ6H6",
"effectiveDate" : "2016-11-30T23:59:59Z",
"priceDimensions" : {
"4C7N4APU9GEUZ6H6.38NPMPTW36.2TG2D8R56U" : {
"rateCode" : "4C7N4APU9GEUZ6H6.38NPMPTW36.2TG2D8R56U",
"description" : "Upfront Fee",
"unit" : "Quantity",
"pricePerUnit" : {
"USD" : "539"
},
"appliesTo" : [ ]
},
"4C7N4APU9GEUZ6H6.38NPMPTW36.6YS6EN2CT7" : {
"rateCode" : "4C7N4APU9GEUZ6H6.38NPMPTW36.6YS6EN2CT7",
"description" : "Linux/UNIX (Amazon VPC), c4.large instance-hours used this month",
"beginRange" : "0",
"endRange" : "Inf",
"unit" : "Hrs",
"pricePerUnit" : {
"USD" : "0.0210000000"
},
"appliesTo" : [ ]
}
},
"termAttributes" : {
"LeaseContractLength" : "3yr",
"OfferingClass" : "standard",
"PurchaseOption" : "Partial Upfront"
}
},
"4C7N4APU9GEUZ6H6.R5XV2EPZQZ" : {
"offerTermCode" : "R5XV2EPZQZ",
"sku" : "4C7N4APU9GEUZ6H6",
"effectiveDate" : "2016-11-30T23:59:59Z",
"priceDimensions" : {
"4C7N4APU9GEUZ6H6.R5XV2EPZQZ.2TG2D8R56U" : {
"rateCode" : "4C7N4APU9GEUZ6H6.R5XV2EPZQZ.2TG2D8R56U",
"description" : "Upfront Fee",
"unit" : "Quantity",
"pricePerUnit" : {
"USD" : "710"
},
"appliesTo" : [ ]
},
"4C7N4APU9GEUZ6H6.R5XV2EPZQZ.6YS6EN2CT7" : {
"rateCode" : "4C7N4APU9GEUZ6H6.R5XV2EPZQZ.6YS6EN2CT7",
"description" : "Linux/UNIX (Amazon VPC), c4.large instance-hours used this month",
"beginRange" : "0",
"endRange" : "Inf",
"unit" : "Hrs",
"pricePerUnit" : {
"USD" : "0.0270000000"
},
"appliesTo" : [ ]
}
},
"termAttributes" : {
"LeaseContractLength" : "3yr",
"OfferingClass" : "convertible",
"PurchaseOption" : "Partial Upfront"
}
},
"4C7N4APU9GEUZ6H6.4NA7Y494T4" : {
"offerTermCode" : "4NA7Y494T4",
"sku" : "4C7N4APU9GEUZ6H6",
"effectiveDate" : "2017-04-30T23:59:59Z",
"priceDimensions" : {
"4C7N4APU9GEUZ6H6.4NA7Y494T4.6YS6EN2CT7" : {
"rateCode" : "4C7N4APU9GEUZ6H6.4NA7Y494T4.6YS6EN2CT7",
"description" : "Linux/UNIX (Amazon VPC), c4.large reserved instance applied",
"beginRange" : "0",
"endRange" : "Inf",
"unit" : "Hrs",
"pricePerUnit" : {
"USD" : "0.0630000000"
},
"appliesTo" : [ ]
}
},
"termAttributes" : {
"LeaseContractLength" : "1yr",
"OfferingClass" : "standard",
"PurchaseOption" : "No Upfront"
}
},
},
}
}
}
BARE_METAL_EC2_SKU = 'SBVNSX4BKU246KVM'
BARE_METAL_EC2_OFFER = {
'offerCode': 'AmazonEC2',
'version': '20161213014831',
'products': {
"SBVNSX4BKU246KVM": {
"productFamily": "Compute Instance (bare metal)",
"sku": "SBVNSX4BKU246KVM",
"attributes": {
"servicename": "Amazon Elastic Compute Cloud",
"preInstalledSw": "SQL Ent",
"normalizationSizeFactor": "128",
"ecu": "208",
"capacitystatus": "Used",
"operation": "RunInstances:0102",
"physicalProcessor": "Intel Xeon E5-2686 v4 (Broadwell)",
"vcpu": "72",
"instanceFamily": "Storage optimized",
"currentGeneration": "Yes",
"instanceType": "i3.metal",
"locationType": "AWS Region",
"location": "EU (Ireland)",
"servicecode": "AmazonEC2",
"memory": "512 GiB",
"storage": "8 x 1900 NVMe SSD",
"networkPerformance": "25 Gigabit",
"processorArchitecture": "64-bit",
"tenancy": "Shared",
"operatingSystem": "Windows",
"licenseModel": "No License required",
"usagetype": "EU-BoxUsage:i3.metal"
},
}
}
}
| 44.458629
| 111
| 0.37823
| 917
| 18,806
| 7.739368
| 0.221374
| 0.054107
| 0.045089
| 0.030999
| 0.869522
| 0.857545
| 0.849937
| 0.849937
| 0.849937
| 0.849937
| 0
| 0.118661
| 0.509306
| 18,806
| 422
| 112
| 44.563981
| 0.650412
| 0.004094
| 0
| 0.695652
| 0
| 0
| 0.3909
| 0.089181
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
cf89cd77b7a7a86eb1c509ae0d28c2801e9db09a
| 9,359
|
py
|
Python
|
util/dynamic_signal_lights.py
|
ashwxn/Intelligent-Traffic-Management-System-Using-ML-YOLO
|
cc111d9895efc19f052656f7d140c6895458a819
|
[
"CC0-1.0"
] | 1
|
2021-03-11T06:58:31.000Z
|
2021-03-11T06:58:31.000Z
|
util/dynamic_signal_lights.py
|
ashwxn/Intelligent-Traffic-Management-System-Using-ML-YOLO
|
cc111d9895efc19f052656f7d140c6895458a819
|
[
"CC0-1.0"
] | null | null | null |
util/dynamic_signal_lights.py
|
ashwxn/Intelligent-Traffic-Management-System-Using-ML-YOLO
|
cc111d9895efc19f052656f7d140c6895458a819
|
[
"CC0-1.0"
] | null | null | null |
import time
import emoji
def switch_signal(denser_lane,seconds):
print('\033[1m' + '\n\033[99m' +
"OPENING LANE-{}: ".format(str(denser_lane))+ '\033[0m' )
print("----------------------------------------------------------------------------------")
if denser_lane==1:
print(
"Lane 1 Lane 2 Lane 3 Lane 4"
)
time.sleep(1)
print(
" "+ emoji.emojize(":white_circle:") + " "+emoji.emojize(":red_circle:")+ " "+emoji.emojize(":red_circle:")+ " "+emoji.emojize(":red_circle:")+
"\n " + emoji.emojize(":white_circle:") + " "+emoji.emojize(":white_circle:")+ " "+emoji.emojize(":white_circle:")+ " "+emoji.emojize(":white_circle:")+
"\n " + emoji.emojize(":green_circle:") + " "+emoji.emojize(":white_circle:")+ " "+emoji.emojize(":white_circle:")+ " "+emoji.emojize(":white_circle:") +
"\n")
print('\033[0m' + '\n\033[99m' +
"LANE-{} is now OPEN and will CLOSE after {} seconds ".format(str(denser_lane),str(seconds))+ '\033[0m' ,end="")
while seconds:
mins, secs = divmod(seconds, 60)
print('\033[99m'+".", end="")
time.sleep(1)
seconds -= 1
print()
print('\033[1m' + '\n\033[99m' +
"CLOSING LANE-{}: ".format(str(denser_lane))+ '\033[0m' )
print("----------------------------------------------------------------------------------")
time.sleep(1)
print()
print(
"Lane 1 Lane 2 Lane 3 Lane 4"
)
print(
" "+ emoji.emojize(":red_circle:") + " "+emoji.emojize(":red_circle:")+ " "+emoji.emojize(":red_circle:")+ " "+emoji.emojize(":red_circle:")+
"\n " + emoji.emojize(":white_circle:") + " "+emoji.emojize(":white_circle:")+ " "+emoji.emojize(":white_circle:")+ " "+emoji.emojize(":white_circle:")+
"\n " + emoji.emojize(":white_circle:") + " "+emoji.emojize(":white_circle:")+ " "+emoji.emojize(":white_circle:")+ " "+emoji.emojize(":white_circle:") +
"\n")
elif denser_lane==2:
print(
"Lane 1 Lane 2 Lane 3 Lane 4"
)
time.sleep(1)
print(
" "+ emoji.emojize(":red_circle:") + " "+emoji.emojize(":white_circle:")+ " "+emoji.emojize(":red_circle:")+ " "+emoji.emojize(":red_circle:")+
"\n " + emoji.emojize(":white_circle:") + " "+emoji.emojize(":white_circle:")+ " "+emoji.emojize(":white_circle:")+ " "+emoji.emojize(":white_circle:")+
"\n " + emoji.emojize(":white_circle:") + " "+emoji.emojize(":green_circle:")+ " "+emoji.emojize(":white_circle:")+ " "+emoji.emojize(":white_circle:") +
"\n")
print('\033[0m' + '\n\033[99m' +
"LANE-{} is now OPEN and will CLOSE after {} seconds ".format(str(denser_lane),str(seconds))+ '\033[0m' ,end="")
while seconds:
mins, secs = divmod(seconds, 60)
print('\033[99m'+".", end="")
time.sleep(1)
seconds -= 1
print()
print('\033[1m' + '\n\033[99m' +
"CLOSING LANE-{}: ".format(str(denser_lane))+ '\033[0m' )
print("----------------------------------------------------------------------------------")
time.sleep(1)
print()
print(
"Lane 1 Lane 2 Lane 3 Lane 4"
)
print(
" "+ emoji.emojize(":red_circle:") + " "+emoji.emojize(":red_circle:")+ " "+emoji.emojize(":red_circle:")+ " "+emoji.emojize(":red_circle:")+
"\n " + emoji.emojize(":white_circle:") + " "+emoji.emojize(":white_circle:")+ " "+emoji.emojize(":white_circle:")+ " "+emoji.emojize(":white_circle:")+
"\n " + emoji.emojize(":white_circle:") + " "+emoji.emojize(":white_circle:")+ " "+emoji.emojize(":white_circle:")+ " "+emoji.emojize(":white_circle:") +
"\n")
elif denser_lane==3:
print(
"Lane 1 Lane 2 Lane 3 Lane 4"
)
time.sleep(1)
print(
" "+ emoji.emojize(":red_circle:") + " "+emoji.emojize(":red_circle:")+ " "+emoji.emojize(":white_circle:")+ " "+emoji.emojize(":red_circle:")+
"\n " + emoji.emojize(":white_circle:") + " "+emoji.emojize(":white_circle:")+ " "+emoji.emojize(":white_circle:")+ " "+emoji.emojize(":white_circle:")+
"\n " + emoji.emojize(":white_circle:") + " "+emoji.emojize(":white_circle:")+ " "+emoji.emojize(":green_circle:")+ " "+emoji.emojize(":white_circle:") +
"\n")
print('\033[0m' + '\n\033[99m' +
"LANE-{} is now OPEN and will CLOSE after {} seconds ".format(str(denser_lane),str(seconds))+ '\033[0m' ,end="")
while seconds:
mins, secs = divmod(seconds, 60)
print('\033[99m'+".", end="")
time.sleep(1)
seconds -= 1
print()
print('\033[1m' + '\n\033[99m' +
"CLOSING LANE-{}: ".format(str(denser_lane))+ '\033[0m' )
print("----------------------------------------------------------------------------------")
time.sleep(1)
print()
print(
"Lane 1 Lane 2 Lane 3 Lane 4"
)
print(
" "+ emoji.emojize(":red_circle:") + " "+emoji.emojize(":red_circle:")+ " "+emoji.emojize(":red_circle:")+ " "+emoji.emojize(":red_circle:")+
"\n " + emoji.emojize(":white_circle:") + " "+emoji.emojize(":white_circle:")+ " "+emoji.emojize(":white_circle:")+ " "+emoji.emojize(":white_circle:")+
"\n " + emoji.emojize(":white_circle:") + " "+emoji.emojize(":white_circle:")+ " "+emoji.emojize(":white_circle:")+ " "+emoji.emojize(":white_circle:") +
"\n")
elif denser_lane==4:
print(
"Lane 1 Lane 2 Lane 3 Lane 4"
)
time.sleep(1)
print(
" "+ emoji.emojize(":red_circle:") + " "+emoji.emojize(":red_circle:")+ " "+emoji.emojize(":red_circle:")+ " "+emoji.emojize(":white_circle:")+
"\n " + emoji.emojize(":white_circle:") + " "+emoji.emojize(":white_circle:")+ " "+emoji.emojize(":white_circle:")+ " "+emoji.emojize(":white_circle:")+
"\n " + emoji.emojize(":white_circle:") + " "+emoji.emojize(":white_circle:")+ " "+emoji.emojize(":white_circle:")+ " "+emoji.emojize(":green_circle:") +
"\n")
print('\033[0m' + '\n\033[99m' +
"LANE-{} is now OPEN and will CLOSE after {} seconds ".format(str(denser_lane),str(seconds))+ '\033[0m' ,end="")
while seconds:
mins, secs = divmod(seconds, 60)
print('\033[99m'+".", end="")
time.sleep(1)
seconds -= 1
print()
print('\033[1m' + '\n\033[99m' +
"CLOSING LANE-{}: ".format(str(denser_lane))+ '\033[0m' )
print("----------------------------------------------------------------------------------")
time.sleep(1)
print()
print(
"Lane 1 Lane 2 Lane 3 Lane 4"
)
print(
" "+ emoji.emojize(":red_circle:") + " "+emoji.emojize(":red_circle:")+ " "+emoji.emojize(":red_circle:")+ " "+emoji.emojize(":red_circle:")+
"\n " + emoji.emojize(":white_circle:") + " "+emoji.emojize(":white_circle:")+ " "+emoji.emojize(":white_circle:")+ " "+emoji.emojize(":white_circle:")+
"\n " + emoji.emojize(":white_circle:") + " "+emoji.emojize(":white_circle:")+ " "+emoji.emojize(":white_circle:")+ " "+emoji.emojize(":white_circle:") +
"\n")
print('\033[0m' + '\n\033[99m' +
"LANE-{} is now CLOSED ".format(str(denser_lane)+ '\033[0m' ))
| 69.843284
| 221
| 0.398761
| 797
| 9,359
| 4.542033
| 0.056462
| 0.318232
| 0.358011
| 0.40663
| 0.977348
| 0.977348
| 0.966022
| 0.966022
| 0.956906
| 0.956906
| 0
| 0.036313
| 0.382092
| 9,359
| 134
| 222
| 69.843284
| 0.589659
| 0
| 0
| 0.810606
| 0
| 0
| 0.465064
| 0.043803
| 0
| 0
| 0
| 0
| 0
| 1
| 0.007576
| false
| 0
| 0.015152
| 0
| 0.022727
| 0.325758
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
d8617eb30998d8220d39ad8ca6c7311751fdbf18
| 16,601
|
py
|
Python
|
tests/tests.py
|
ipashchenko/uvmod
|
5f81f9f621ccd2f83e99f22eb0c302ae8d8a218d
|
[
"MIT"
] | null | null | null |
tests/tests.py
|
ipashchenko/uvmod
|
5f81f9f621ccd2f83e99f22eb0c302ae8d8a218d
|
[
"MIT"
] | 5
|
2015-01-28T07:53:30.000Z
|
2015-04-16T11:21:58.000Z
|
tests/tests.py
|
ipashchenko/uvmod
|
5f81f9f621ccd2f83e99f22eb0c302ae8d8a218d
|
[
"MIT"
] | null | null | null |
#!/usr/bin python
# -*- coding: utf-8 -*-
from __future__ import print_function
from unittest import (TestCase, skip, skipIf)
from uvmod.stats import LnLike, LS_estimates, LnPrior, LnPost, hdi_of_mcmc
from uvmod.models import Model_1d, Model_2d_isotropic, Model_2d_anisotropic
# TODO: Use ``np.random.uniform`` instead
try:
from scipy.stats import uniform
is_scipy = True
except ImportError:
is_scipy = False
try:
import emcee
is_emcee = True
except ImportError:
is_emcee = False
import numpy as np
import math
# TODO: Add tests for data wo uncertainties
# TODO: Add tests for not installed packages
# TODO: Fix random state to guarantee passing
class Test_1D(TestCase):
def setUp(self):
self.p = [2, 0.3]
self.x = np.array([0., 0.1, 0.2, 0.4, 0.6])
self.model_1d = Model_1d
self.model_1d_detections = Model_1d(self.x)
self.y = self.model_1d_detections(self.p) + np.random.normal(0, 0.1,
size=5)
self.sy = np.random.normal(0.15, 0.025, size=5)
self.xl = np.array([0.5, 0.7])
self.yl = np.array([0.6, 0.2])
self.syl = np.random.normal(0.1, 0.03, size=2)
self.p1 = np.asarray(self.p) + np.array([1., 0.])
self.p2 = np.asarray(self.p) + np.array([-1., 0.])
self.p3 = np.asarray(self.p) + np.array([0., 0.2])
self.p4 = np.asarray(self.p) + np.array([0., -0.2])
self.p0_range = [0., 10.]
self.p1_range = [0., 2.]
@skipIf(not is_scipy, "``scipy`` is not installed")
def test_LnLike(self):
lnlike = LnLike(self.x, self.y, self.model_1d, sy=self.sy,
x_limits=self.xl, y_limits=self.yl, sy_limits=self.syl,
jitter=False, outliers=False)
lnlik0 = lnlike._lnprob[0].__call__(self.p)
lnlik1 = lnlike._lnprob[1].__call__(self.p)
self.assertEqual(lnlike(self.p), lnlik0 + lnlik1)
self.assertGreater(lnlike(self.p), lnlike(self.p1))
self.assertGreater(lnlike(self.p), lnlike(self.p2))
self.assertGreater(lnlike(self.p), lnlike(self.p3))
self.assertGreater(lnlike(self.p), lnlike(self.p4))
@skipIf(not is_scipy, "``scipy`` is not installed")
def test_LS_estimates(self):
lsq = LS_estimates(self.x, self.y, self.model_1d, sy=self.sy)
p, pcov = lsq.fit([1., 1.])
delta0 = 3. * np.sqrt(pcov[0, 0])
delta1 = 5. * np.sqrt(pcov[1, 1])
self.assertAlmostEqual(self.p[0], p[0], delta=delta0)
self.assertAlmostEqual(self.p[1], abs(p[1]), delta=delta1)
@skipIf(not is_scipy, "``scipy`` is not installed")
def test_LnPrior(self):
lnprs = ((uniform.logpdf, self.p0_range, dict(),),
(uniform.logpdf, self.p1_range, dict(),),)
lnpr = LnPrior(lnprs)
self.assertTrue(np.isinf(lnpr([-1., 1.])))
self.assertTrue(np.isinf(lnpr([1., -1.])))
self.assertTrue(np.isinf(lnpr([15., 1.])))
self.assertTrue(np.isinf(lnpr([1., 5.])))
@skipIf(not is_scipy, "``scipy`` is not installed")
def test_LnPost(self):
lnprs = ((uniform.logpdf, self.p0_range, dict(),),
(uniform.logpdf, self.p1_range, dict(),),)
lnpr = LnPrior(lnprs)
lnlike = LnLike(self.x, self.y, self.model_1d, sy=self.sy,
x_limits=self.xl, y_limits=self.yl, sy_limits=self.syl,
jitter=False, outliers=False)
lnpost = LnPost(self.x, self.y, self.model_1d, sy=self.sy,
x_limits=self.xl, y_limits=self.yl, sy_limits=self.syl,
lnpr=lnpr, jitter=False, outliers=False)
self.assertEqual(lnpost._lnpr(self.p), lnpr(self.p))
self.assertEqual(lnpost._lnlike(self.p), lnlike(self.p))
self.assertGreater(lnpost(self.p), lnpost(self.p1))
self.assertGreater(lnpost(self.p), lnpost(self.p2))
self.assertGreater(lnpost(self.p), lnpost(self.p3))
self.assertGreater(lnpost(self.p), lnpost(self.p4))
@skipIf((not is_emcee) or (not is_scipy), "``emcee`` and/or ``scipy`` not"
" installed")
def test_MCMC(self):
nwalkers = 250
ndim = 2
p0 = np.random.uniform(low=self.p1_range[0], high=self.p1_range[1],
size=(nwalkers, ndim))
lnprs = ((uniform.logpdf, self.p0_range, dict(),),
(uniform.logpdf, self.p1_range, dict(),),)
lnpr = LnPrior(lnprs)
lnpost = LnPost(self.x, self.y, self.model_1d, sy=self.sy,
x_limits=self.xl, y_limits=self.yl, sy_limits=self.syl,
lnpr=lnpr, jitter=False, outliers=False)
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnpost)
pos, prob, state = sampler.run_mcmc(p0, 250)
sampler.reset()
sampler.run_mcmc(pos, 500)
sample_vec0 = sampler.flatchain[::10, 0]
sample_vec1 = sampler.flatchain[::10, 1]
p0_hdi_min, p0_hdi_max = hdi_of_mcmc(sample_vec0)
p1_hdi_min, p1_hdi_max = hdi_of_mcmc(sample_vec1)
self.assertTrue((p0_hdi_min < self.p[0] < p0_hdi_max))
self.assertTrue((p1_hdi_min < self.p[1] < p1_hdi_max))
class Test_2D_isoptopic(TestCase):
def setUp(self):
np.random.seed(1)
self.p = [2, 0.3]
self.x1 = np.random.uniform(low=-1, high=1, size=10)
self.x2 = np.random.uniform(low=-1, high=1, size=10)
self.xx = np.column_stack((self.x1, self.x2))
self.model_2d = Model_2d_isotropic
self.model_2d_detections = Model_2d_isotropic(self.xx)
self.y = self.model_2d_detections(self.p) + np.random.normal(0, 0.1,
size=10)
self.sy = np.random.normal(0.15, 0.025, size=10)
self.x1l = np.hstack((np.random.uniform(low=-1, high=-0.5, size=2),
np.random.uniform(low=0.5, high=1, size=2),))
self.x2l = np.hstack((np.random.uniform(low=-1, high=-0.5, size=2),
np.random.uniform(low=0.5, high=1, size=2),))
self.xxl = np.column_stack((self.x1l, self.x2l))
self.model_2d_limits = Model_2d_isotropic(self.xxl)
self.yl = self.model_2d_limits(self.p) + abs(np.random.normal(0, 0.1,
size=4))
self.syl = np.random.normal(0.1, 0.03, size=4)
self.p1 = np.asarray(self.p) + np.array([1., 0.])
self.p2 = np.asarray(self.p) + np.array([-1., 0.])
self.p3 = np.asarray(self.p) + np.array([0., 0.2])
self.p4 = np.asarray(self.p) + np.array([0., -0.2])
self.p0_range = [0., 10.]
self.p1_range = [0., 2.]
@skipIf(not is_scipy, "``scipy`` is not installed")
def test_LnLike(self):
lnlike = LnLike(self.xx, self.y, self.model_2d, sy=self.sy,
x_limits=self.xxl, y_limits=self.yl, sy_limits=self.syl,
jitter=False, outliers=False)
lnlik0 = lnlike._lnprob[0].__call__(self.p)
lnlik1 = lnlike._lnprob[1].__call__(self.p)
self.assertEqual(lnlike(self.p), lnlik0 + lnlik1)
self.assertGreater(lnlike(self.p), lnlike(self.p1))
self.assertGreater(lnlike(self.p), lnlike(self.p2))
self.assertGreater(lnlike(self.p), lnlike(self.p3))
self.assertGreater(lnlike(self.p), lnlike(self.p4))
@skipIf(not is_scipy, "``scipy`` is not installed")
def test_LS_estimates(self):
lsq = LS_estimates(self.xx, self.y, self.model_2d, sy=self.sy)
p, pcov = lsq.fit([1., 1.])
delta0 = 3. * np.sqrt(pcov[0, 0])
delta1 = 5. * np.sqrt(pcov[1, 1])
self.assertAlmostEqual(self.p[0], p[0], delta=delta0)
# FIXME: use variance as parameter so p[1] > 0
self.assertAlmostEqual(self.p[1], abs(p[1]), delta=delta1)
@skipIf(not is_scipy, "``scipy`` is not installed")
def test_LnPost(self):
lnprs = ((uniform.logpdf, self.p0_range, dict(),),
(uniform.logpdf, self.p1_range, dict(),),)
lnpr = LnPrior(lnprs)
lnlike = LnLike(self.xx, self.y, self.model_2d, sy=self.sy,
x_limits=self.xxl, y_limits=self.yl, sy_limits=self.syl,
jitter=False, outliers=False)
lnpost = LnPost(self.xx, self.y, self.model_2d, sy=self.sy,
x_limits=self.xxl, y_limits=self.yl, sy_limits=self.syl,
lnpr=lnpr, jitter=False, outliers=False)
self.assertEqual(lnpost._lnpr(self.p), lnpr(self.p))
self.assertEqual(lnpost._lnlike(self.p), lnlike(self.p))
self.assertGreater(lnpost(self.p), lnpost(self.p1))
self.assertGreater(lnpost(self.p), lnpost(self.p2))
self.assertGreater(lnpost(self.p), lnpost(self.p3))
self.assertGreater(lnpost(self.p), lnpost(self.p4))
@skipIf((not is_emcee) or (not is_scipy), "``emcee`` and/or ``scipy`` not"
" installed")
def test_MCMC(self):
nwalkers = 250
ndim = 2
p0 = np.random.uniform(low=self.p1_range[0], high=self.p1_range[1],
size=(nwalkers, ndim))
lnprs = ((uniform.logpdf, self.p0_range, dict(),),
(uniform.logpdf, self.p1_range, dict(),),)
lnpr = LnPrior(lnprs)
lnpost = LnPost(self.xx, self.y, self.model_2d, sy=self.sy,
x_limits=self.xxl, y_limits=self.yl, sy_limits=self.syl,
lnpr=lnpr, jitter=False, outliers=False)
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnpost)
pos, prob, state = sampler.run_mcmc(p0, 250)
sampler.reset()
sampler.run_mcmc(pos, 500)
sample_vec0 = sampler.flatchain[::10, 0]
sample_vec1 = sampler.flatchain[::10, 1]
p0_hdi_min, p0_hdi_max = hdi_of_mcmc(sample_vec0)
p1_hdi_min, p1_hdi_max = hdi_of_mcmc(sample_vec1)
self.assertTrue((p0_hdi_min < self.p[0] < p0_hdi_max))
self.assertTrue((p1_hdi_min < self.p[1] < p1_hdi_max))
class Test_2D_anisoptopic(TestCase):
def setUp(self):
self.p = [2, 0.7, 0.3, 1.]
self.x1 = np.random.uniform(low=-1, high=1, size=10)
self.x2 = np.random.uniform(low=-1, high=1, size=10)
self.xx = np.column_stack((self.x1, self.x2))
self.model_2d_anisotropic = Model_2d_anisotropic
self.model_2d_detections = Model_2d_anisotropic(self.xx)
self.y = self.model_2d_detections(self.p) + np.random.normal(0, 0.05,
size=10)
self.sy = np.random.normal(0.15, 0.025, size=10)
self.x1l = np.hstack((np.random.uniform(low=-1, high=-0.5, size=2),
np.random.uniform(low=0.5, high=1, size=2),))
self.x2l = np.hstack((np.random.uniform(low=-1, high=-0.5, size=2),
np.random.uniform(low=0.5, high=1, size=2),))
self.xxl = np.column_stack((self.x1l, self.x2l))
self.model_2d_limits = Model_2d_anisotropic(self.xxl)
self.yl = self.model_2d_limits(self.p) + abs(np.random.normal(0, 0.05,
size=4))
self.syl = np.random.normal(0.1, 0.03, size=4)
self.p1 = np.asarray(self.p) + np.array([1., 0., 0., 0.])
self.p2 = np.asarray(self.p) + np.array([-1., 0., 0., 0.])
self.p3 = np.asarray(self.p) + np.array([0., 0.2, 0., 0.])
self.p4 = np.asarray(self.p) + np.array([0., -0.2, 0., 0.])
self.p5 = np.asarray(self.p) + np.array([0., 0., 0.4, 0.])
self.p6 = np.asarray(self.p) + np.array([0., 0., -0.4, 0.])
self.p7 = np.asarray(self.p) + np.array([0., 0., 0., math.pi / 2.])
self.p8 = np.asarray(self.p) + np.array([0., 0., 0., -math.pi / 2.])
self.p0_range = [0., 10.]
self.p1_range = [0., 2.]
self.p2_range = [0., 1.]
self.p3_range = [0., math.pi]
@skipIf(not is_scipy, "``scipy`` is not installed")
def test_LS_estimates(self):
lsq = LS_estimates(self.xx, self.y, self.model_2d_anisotropic,
sy=self.sy)
p, pcov = lsq.fit([1., 0.5, 0.5, 1.])
delta0 = 3. * np.sqrt(pcov[0, 0])
delta1 = 5. * np.sqrt(pcov[1, 1])
delta2 = 5. * np.sqrt(pcov[2, 2])
delta3 = 5. * np.sqrt(pcov[3, 3])
self.assertAlmostEqual(self.p[0], p[0], delta=delta0)
# FIXME: use variance as parameter so p[1] > 0
self.assertAlmostEqual(self.p[1], abs(p[1]), delta=delta1)
self.assertAlmostEqual(self.p[2], p[2], delta=delta2)
self.assertAlmostEqual(self.p[3], p[3], delta=delta3)
def test_LnLike(self):
lnlike = LnLike(self.xx, self.y, self.model_2d_anisotropic, sy=self.sy,
x_limits=self.xxl, y_limits=self.yl, sy_limits=self.syl,
jitter=False, outliers=False)
lnlik0 = lnlike._lnprob[0].__call__(self.p)
lnlik1 = lnlike._lnprob[1].__call__(self.p)
self.assertEqual(lnlike(self.p), lnlik0 + lnlik1)
self.assertGreater(lnlike(self.p), lnlike(self.p1))
self.assertGreater(lnlike(self.p), lnlike(self.p2))
self.assertGreater(lnlike(self.p), lnlike(self.p3))
self.assertGreater(lnlike(self.p), lnlike(self.p4))
self.assertGreater(lnlike(self.p), lnlike(self.p5))
self.assertGreater(lnlike(self.p), lnlike(self.p6))
self.assertGreater(lnlike(self.p), lnlike(self.p7))
self.assertGreater(lnlike(self.p), lnlike(self.p8))
@skipIf(not is_scipy, "``scipy`` is not installed")
def test_LnPost(self):
lnprs = ((uniform.logpdf, self.p0_range, dict(),),
(uniform.logpdf, self.p1_range, dict(),),
(uniform.logpdf, self.p2_range, dict(),),
(uniform.logpdf, self.p3_range, dict(),),)
lnpr = LnPrior(lnprs)
lnlike = LnLike(self.xx, self.y, self.model_2d_anisotropic, sy=self.sy,
x_limits=self.xxl, y_limits=self.yl, sy_limits=self.syl,
jitter=False, outliers=False)
lnpost = LnPost(self.xx, self.y, self.model_2d_anisotropic, sy=self.sy,
x_limits=self.xxl, y_limits=self.yl, sy_limits=self.syl,
lnpr=lnpr, jitter=False, outliers=False)
self.assertEqual(lnpost._lnpr(self.p), lnpr(self.p))
self.assertEqual(lnpost._lnlike(self.p), lnlike(self.p))
self.assertGreater(lnpost(self.p), lnpost(self.p1))
self.assertGreater(lnpost(self.p), lnpost(self.p2))
self.assertGreater(lnpost(self.p), lnpost(self.p3))
self.assertGreater(lnpost(self.p), lnpost(self.p4))
@skipIf((not is_emcee) or (not is_scipy), "``emcee`` and/or ``scipy`` not"
" installed")
def test_MCMC(self):
nwalkers = 250
ndim = 4
p0 = np.random.uniform(low=self.p1_range[0], high=self.p1_range[1],
size=(nwalkers, ndim))
lnprs = ((uniform.logpdf, self.p0_range, dict(),),
(uniform.logpdf, self.p1_range, dict(),),
(uniform.logpdf, self.p2_range, dict(),),
(uniform.logpdf, self.p3_range, dict(),),)
lnpr = LnPrior(lnprs)
lnpost = LnPost(self.xx, self.y, self.model_2d_anisotropic, sy=self.sy,
x_limits=self.xxl, y_limits=self.yl, sy_limits=self.syl,
lnpr=lnpr, jitter=False, outliers=False)
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnpost)
pos, prob, state = sampler.run_mcmc(p0, 250)
sampler.reset()
sampler.run_mcmc(pos, 500)
sample_vec0 = sampler.flatchain[::10, 0]
sample_vec1 = sampler.flatchain[::10, 1]
sample_vec2 = sampler.flatchain[::10, 2]
sample_vec3 = sampler.flatchain[::10, 3]
p0_hdi_min, p0_hdi_max = hdi_of_mcmc(sample_vec0)
p1_hdi_min, p1_hdi_max = hdi_of_mcmc(sample_vec1)
p2_hdi_min, p2_hdi_max = hdi_of_mcmc(sample_vec2)
p3_hdi_min, p3_hdi_max = hdi_of_mcmc(sample_vec3)
self.assertTrue((p0_hdi_min < self.p[0] < p0_hdi_max))
self.assertTrue((p1_hdi_min < self.p[1] < p1_hdi_max))
self.assertTrue((p2_hdi_min < self.p[2] < p2_hdi_max))
self.assertTrue((p3_hdi_min < self.p[3] < p3_hdi_max))
| 49.555224
| 80
| 0.575267
| 2,375
| 16,601
| 3.884632
| 0.070316
| 0.048233
| 0.029807
| 0.03501
| 0.882723
| 0.882723
| 0.870692
| 0.848472
| 0.841318
| 0.841318
| 0
| 0.047336
| 0.269562
| 16,601
| 334
| 81
| 49.703593
| 0.713508
| 0.01789
| 0
| 0.763333
| 0
| 0
| 0.021907
| 0
| 0
| 0
| 0
| 0.002994
| 0.19
| 1
| 0.053333
| false
| 0
| 0.033333
| 0
| 0.096667
| 0.003333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d873c9d5b73ad6048ae3ed992ae54074f4373aad
| 44
|
py
|
Python
|
snowav/database/__init__.py
|
robertson-mark/SNOWAV
|
ef7a470dd45a342ee454d74b6476da5807f14301
|
[
"CC0-1.0"
] | 1
|
2018-09-11T17:14:01.000Z
|
2018-09-11T17:14:01.000Z
|
snowav/database/__init__.py
|
robertson-mark/SNOWAV
|
ef7a470dd45a342ee454d74b6476da5807f14301
|
[
"CC0-1.0"
] | 15
|
2018-10-24T21:59:57.000Z
|
2021-07-01T20:37:05.000Z
|
snowav/database/__init__.py
|
USDA-ARS-NWRC/snowav
|
ef7a470dd45a342ee454d74b6476da5807f14301
|
[
"CC0-1.0"
] | null | null | null |
from . import tables
from . import database
| 14.666667
| 22
| 0.772727
| 6
| 44
| 5.666667
| 0.666667
| 0.588235
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.181818
| 44
| 2
| 23
| 22
| 0.944444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
d87785dc3dfe82dc39a25b2fc439096204f29f6f
| 158
|
py
|
Python
|
distributions/admin.py
|
lueho/BRIT
|
1eae630c4da6f072aa4e2139bc406db4f4756391
|
[
"MIT"
] | null | null | null |
distributions/admin.py
|
lueho/BRIT
|
1eae630c4da6f072aa4e2139bc406db4f4756391
|
[
"MIT"
] | 4
|
2022-03-29T20:52:31.000Z
|
2022-03-29T20:52:31.000Z
|
distributions/admin.py
|
lueho/BRIT
|
1eae630c4da6f072aa4e2139bc406db4f4756391
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Timestep, TemporalDistribution
admin.site.register(TemporalDistribution)
admin.site.register(Timestep)
| 22.571429
| 50
| 0.848101
| 18
| 158
| 7.444444
| 0.555556
| 0.373134
| 0.432836
| 0.552239
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.082278
| 158
| 6
| 51
| 26.333333
| 0.924138
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
2b028209a00a7c4331c52f1ca61c13b4b8eaf902
| 107
|
py
|
Python
|
parquet_metadata/test_parquet_metadata.py
|
dzamo/parquet-metadata
|
221bff0253bcaefc7c95e6e16ae376e3bba6ee9f
|
[
"Apache-2.0"
] | 11
|
2018-09-11T02:56:32.000Z
|
2022-02-16T18:49:39.000Z
|
parquet_metadata/test_parquet_metadata.py
|
dzamo/parquet-metadata
|
221bff0253bcaefc7c95e6e16ae376e3bba6ee9f
|
[
"Apache-2.0"
] | null | null | null |
parquet_metadata/test_parquet_metadata.py
|
dzamo/parquet-metadata
|
221bff0253bcaefc7c95e6e16ae376e3bba6ee9f
|
[
"Apache-2.0"
] | 4
|
2019-05-30T22:44:33.000Z
|
2022-02-16T18:49:40.000Z
|
from . import parquet_metadata
def test_smoke_test():
parquet_metadata.dump('parquets/types.parquet')
| 21.4
| 51
| 0.785047
| 14
| 107
| 5.714286
| 0.714286
| 0.375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.11215
| 107
| 4
| 52
| 26.75
| 0.842105
| 0
| 0
| 0
| 0
| 0
| 0.205607
| 0.205607
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
2b31dd8ff5a66ac8bf0442f51e45b1fcb61fee3b
| 23,469
|
py
|
Python
|
src/test/test_data.py
|
opploans/cbc-syslog
|
72a203b1dbe6ddd97f02dc87f36631d758564022
|
[
"MIT"
] | 14
|
2020-04-28T12:52:50.000Z
|
2021-08-25T00:36:51.000Z
|
src/test/test_data.py
|
opploans/cbc-syslog
|
72a203b1dbe6ddd97f02dc87f36631d758564022
|
[
"MIT"
] | 21
|
2016-10-24T20:16:39.000Z
|
2020-02-11T21:30:50.000Z
|
src/test/test_data.py
|
opploans/cbc-syslog
|
72a203b1dbe6ddd97f02dc87f36631d758564022
|
[
"MIT"
] | 15
|
2016-12-19T20:39:24.000Z
|
2020-01-02T16:26:34.000Z
|
# -*- coding: utf-8 -*-
null = ""
true = "true"
false = "false"
raw_notifications = {
"notifications": [{
"threatInfo": {
"incidentId": "Z7NG6",
"score": 7,
"summary": "A known virus (Sality: Keylogger, Password or Data stealer, Backdoor) was detected running.",
"indicators": [{
"indicatorName": "PACKED_CALL",
"applicationName": "ShippingInvoice.pdf.exe",
"sha256Hash": "cfe0ae57f314a9f747a7cec605907cdaf1984b3cdea74ee8d5893d00ae0886cc"
},
{
"indicatorName": "TARGET_MALWARE_APP",
"applicationName": "explorer.exe",
"sha256Hash": "1e675cb7df214172f7eb0497f7275556038a0d09c6e5a3e6862c5e26885ef455"
},
{
"indicatorName": "HAS_PACKED_CODE",
"applicationName": "ShippingInvoice.pdf.exe",
"sha256Hash": "cfe0ae57f314a9f747a7cec605907cdaf1984b3cdea74ee8d5893d00ae0886cc"
},
{
"indicatorName": "KNOWN_DOWNLOADER",
"applicationName": "ShippingInvoice.pdf.exe",
"sha256Hash": "cfe0ae57f314a9f747a7cec605907cdaf1984b3cdea74ee8d5893d00ae0886cc"
},
{
"indicatorName": "ENUMERATE_PROCESSES",
"applicationName": "ShippingInvoice.pdf.exe",
"sha256Hash": "cfe0ae57f314a9f747a7cec605907cdaf1984b3cdea74ee8d5893d00ae0886cc"
},
{
"indicatorName": "SET_SYSTEM_SECURITY",
"applicationName": "ShippingInvoice.pdf.exe",
"sha256Hash": "cfe0ae57f314a9f747a7cec605907cdaf1984b3cdea74ee8d5893d00ae0886cc"
},
{
"indicatorName": "MODIFY_MEMORY_PROTECTION",
"applicationName": "ShippingInvoice.pdf.exe",
"sha256Hash": "cfe0ae57f314a9f747a7cec605907cdaf1984b3cdea74ee8d5893d00ae0886cc"
},
{
"indicatorName": "KNOWN_PASSWORD_STEALER",
"applicationName": "ShippingInvoice.pdf.exe",
"sha256Hash": "cfe0ae57f314a9f747a7cec605907cdaf1984b3cdea74ee8d5893d00ae0886cc"
},
{
"indicatorName": "RUN_MALWARE_APP",
"applicationName": "explorer.exe",
"sha256Hash": "1e675cb7df214172f7eb0497f7275556038a0d09c6e5a3e6862c5e26885ef455"
},
{
"indicatorName": "MODIFY_PROCESS",
"applicationName": "ShippingInvoice.pdf.exe",
"sha256Hash": "cfe0ae57f314a9f747a7cec605907cdaf1984b3cdea74ee8d5893d00ae0886cc"
},
{
"indicatorName": "MALWARE_APP",
"applicationName": "ShippingInvoice.pdf.exe",
"sha256Hash": "cfe0ae57f314a9f747a7cec605907cdaf1984b3cdea74ee8d5893d00ae0886cc"
}
],
"time": 1460703240678
},
"url": "https://testserver.company.net/ui#investigate/events/device/2004118/incident/Z7NG6",
"eventTime": 1460703240678,
"eventId": "f279d0e6035211e6be8701df2c083974",
"eventDescription": "[syslog alert] [Cb Defense has detected a threat against your company.] [https://testserver.company.net/ui#device/2004118/incident/Z7NG6] [A known virus (Sality: Keylogger, Password or Data stealer, Backdoor) was detected running.] [Incident id: Z7NG6] [Threat score: 7] [Group: default] [Email: FirstName.LastName@company.net.demo] [Name: Demo_CaretoPC] [Type and OS: WINDOWS XP x86 SP: 0]\n",
"deviceInfo": {
"email": "COMPANY\\FirstName.LastName",
"groupName": "default",
"internalIpAddress": null,
"externalIpAddress": null,
"deviceType": "WINDOWS",
"deviceVersion": "XP x86 SP: 0",
"targetPriorityType": "MEDIUM",
"deviceId": 2004118,
"deviceName": "COMPANY\\Demo_CaretoPC",
"deviceHostName": null,
"targetPriorityCode": 0
},
"ruleName": "syslog alert",
"type": "THREAT"
},
{
"policyAction": {
"sha256Hash": "2552332222112552332222112552332222112552332222112552332222112552",
"action": "TERMINATE",
"reputation": "KNOWN_MALWARE",
"applicationName": "firefox.exe"
},
"type": "POLICY_ACTION",
"eventTime": 1423163263482,
"eventId": "EV1",
"url": "http://carbonblack.com/ui#device/100/hash/2552332222112552332222112552332222112552332222112552332222112552/app/firefox.exe/keyword/terminate policy action",
"deviceInfo": {
"deviceType": "WINDOWS",
"email": "tester@carbonblack.com",
"deviceId": 100,
"deviceName": "testers-pc",
"deviceHostName": null,
"deviceVersion": "7 SP1",
"targetPriorityType": "HIGH",
"targetPriorityCode": 0,
"internalIpAddress": "55.33.22.11",
"groupName": "Executives",
"externalIpAddress": "255.233.222.211"
},
"eventDescription": "Policy action 1",
"ruleName": "Alert Rule 1"
},
{
"threatHunterInfo": {
"incidentId": "WNEXFKQ7-000049d4-00001ef0-00000000-1d4ed58a5f07dbf-j0MkcneCQXy1fIbhber6rw-565660",
"score": 1,
"summary": "PowerShell - File and Directory Discovery Enumeration",
"time": 1554652050250,
"indicators": [
{
"applicationName": "powershell.exe",
"sha256Hash": "ba4038fd20e474c047be8aad5bfacdb1bfc1ddbe12f803f473b7918d8d819436",
"indicatorName": "565660-0"
}
],
"watchLists": [
{
"id": "a3xW2ZiaRyAqRtuVES8Q",
"name": "ATT&CK Framework",
"alert": true
}
],
"iocId": "565660-0",
"count": 0,
"dismissed": false,
"documentGuid": "7a9fQEsTRfuFmXcogI8CMQ",
"firstActivityTime": 1554651811577,
"md5": "097ce5761c89434367598b34fe32893b",
"policyId": 9815,
"processGuid": "WNEXFKQ7-000049d4-00001ef0-00000000-1d4ed58a5f07dbf",
"processPath": "c:\\windows\\system32\\windowspowershell\\v1.0\\powershell.exe",
"reportName": "PowerShell - File and Directory Discovery Enumeration",
"reportId": "j0MkcneCQXy1fIbhber6rw-565660",
"reputation": "TRUSTED_WHITE_LIST",
"responseAlarmId": "WNEXFKQ7-000049d4-00001ef0-00000000-1d4ed58a5f07dbf-j0MkcneCQXy1fIbhber6rw-565660",
"responseSeverity": 1,
"runState": "RAN",
"sha256": "ba4038fd20e474c047be8aad5bfacdb1bfc1ddbe12f803f473b7918d8d819436",
"status": "UNRESOLVED",
"tags": null,
"targetPriority": "MEDIUM",
"threatCause": {
"reputation": "TRUSTED_WHITE_LIST",
"actor": "ba4038fd20e474c047be8aad5bfacdb1bfc1ddbe12f803f473b7918d8d819436",
"actorName": "powershell.exe",
"reason": "Process powershell.exe was detected by the report \"PowerShell - File and Directory Discovery Enumeration\" in watchlist \"ATT&CK Framework\"",
"actorType": null,
"threatCategory": "RESPONSE_WATCHLIST",
"actorProcessPPid": null,
"causeEventId": null,
"originSourceType": "UNKNOWN"
},
"threatId": "a2b724aa094af97c06c758d325240460",
"lastUpdatedTime": 0,
"orgId": 428
},
"eventDescription": "[sm-sentinel-notification] [Carbon Black has detected a threat against your company.] [https://defense-eap01.conferdeploy.net#device/18900/incident/WNEXFKQ7-000049d4-00001ef0-00000000-1d4ed58a5f07dbf-j0MkcneCQXy1fIbhber6rw-565660] [PowerShell - File and Directory Discovery Enumeration] [Incident id: WNEXFKQ7-000049d4-00001ef0-00000000-1d4ed58a5f07dbf-j0MkcneCQXy1fIbhber6rw-565660] [Threat score: 1] [Group: sm-detection] [Email: smultani@carbonblack.com] [Name: win-559j1nqvfgj] [Type and OS: WINDOWS pscr-sensor] [Severity: 1]\n",
"eventTime": 1554651811577,
"deviceInfo": {
"deviceId": 18900,
"targetPriorityCode": 0,
"groupName": "sm-detection",
"deviceName": "win-559j1nqvfgj",
"deviceType": "WINDOWS",
"email": "smultani@carbonblack.com",
"deviceHostName": null,
"deviceVersion": "pscr-sensor",
"targetPriorityType": "MEDIUM",
"uemId": null,
"internalIpAddress": "192.168.81.148",
"externalIpAddress": "73.69.152.214"
},
"url": "https://defense-eap01.conferdeploy.net/investigate?s[searchWindow]=ALL&s[c][DEVICE_ID][0]=18900&s[c][INCIDENT_ID][0]=WNEXFKQ7-000049d4-00001ef0-00000000-1d4ed58a5f07dbf-j0MkcneCQXy1fIbhber6rw-565660",
"ruleName": "sm-sentinel-notification",
"type": "THREAT_HUNTER"
}],
"success": true,
"message": "Success"
}
cef_notifications = ['test CEF:0|CarbonBlack|CbDefense_Syslog_Connector|2.0|Active_Threat|A known virus (Sality: Keylogger, Password or Data stealer, Backdoor) was detected running.|7|rt="Apr 15 2016 06:54:00" sntdom=COMPANY dvchost=Demo_CaretoPC duser=FirstName.LastName dvc= cs3Label="Link" cs3="https://testserver.company.net/ui#investigate/events/device/2004118/incident/Z7NG6" cs4Label="Threat_ID" cs4="Z7NG6" act=Alert', 'test CEF:0|CarbonBlack|CbDefense_Syslog_Connector|2.0|Policy_Action|Confer Sensor Policy Action|1|rt="Feb 05 2015 19:07:43" dvchost=testers-pc duser=tester@carbonblack.com dvc=55.33.22.11 cs3Label="Link" cs3="http://carbonblack.com/ui#device/100/hash/2552332222112552332222112552332222112552332222112552332222112552/app/firefox.exe/keyword/terminate policy action" act=TERMINATE hash=2552332222112552332222112552332222112552332222112552332222112552 deviceprocessname=firefox.exe', 'test CEF:0|CarbonBlack|CbDefense_Syslog_Connector|2.0|Threat_Hunter|PowerShell - File and Directory Discovery Enumeration|1|rt="Apr 07 2019 15:43:31" dvchost=win-559j1nqvfgj duser=smultani@carbonblack.com dvc=192.168.81.148 cs3Label="Link" cs3="https://defense-eap01.conferdeploy.net/investigate?s[searchWindow]=ALL&s[c][DEVICE_ID][0]=18900&s[c][INCIDENT_ID][0]=WNEXFKQ7-000049d4-00001ef0-00000000-1d4ed58a5f07dbf-j0MkcneCQXy1fIbhber6rw-565660" cs4Label="Threat_ID" cs4="WNEXFKQ7-000049d4-00001ef0-00000000-1d4ed58a5f07dbf-j0MkcneCQXy1fIbhber6rw-565660" hash=ba4038fd20e474c047be8aad5bfacdb1bfc1ddbe12f803f473b7918d8d819436']
leef_notifications = ['LEEF:2.0|CarbonBlack|Cloud|1.0|INDICATOR|x09|applicationName=ShippingInvoice.pdf.exe\tcat=INDICATOR\tincidentId=Z7NG6\tindicatorName=PACKED_CALL\tsev=Z7NG6\tsha256Hash=cfe0ae57f314a9f747a7cec605907cdaf1984b3cdea74ee8d5893d00ae0886cc', 'LEEF:2.0|CarbonBlack|Cloud|1.0|INDICATOR|x09|applicationName=explorer.exe\tcat=INDICATOR\tincidentId=Z7NG6\tindicatorName=TARGET_MALWARE_APP\tsev=Z7NG6\tsha256Hash=1e675cb7df214172f7eb0497f7275556038a0d09c6e5a3e6862c5e26885ef455', 'LEEF:2.0|CarbonBlack|Cloud|1.0|INDICATOR|x09|applicationName=ShippingInvoice.pdf.exe\tcat=INDICATOR\tincidentId=Z7NG6\tindicatorName=HAS_PACKED_CODE\tsev=Z7NG6\tsha256Hash=cfe0ae57f314a9f747a7cec605907cdaf1984b3cdea74ee8d5893d00ae0886cc', 'LEEF:2.0|CarbonBlack|Cloud|1.0|INDICATOR|x09|applicationName=ShippingInvoice.pdf.exe\tcat=INDICATOR\tincidentId=Z7NG6\tindicatorName=KNOWN_DOWNLOADER\tsev=Z7NG6\tsha256Hash=cfe0ae57f314a9f747a7cec605907cdaf1984b3cdea74ee8d5893d00ae0886cc', 'LEEF:2.0|CarbonBlack|Cloud|1.0|INDICATOR|x09|applicationName=ShippingInvoice.pdf.exe\tcat=INDICATOR\tincidentId=Z7NG6\tindicatorName=ENUMERATE_PROCESSES\tsev=Z7NG6\tsha256Hash=cfe0ae57f314a9f747a7cec605907cdaf1984b3cdea74ee8d5893d00ae0886cc', 'LEEF:2.0|CarbonBlack|Cloud|1.0|INDICATOR|x09|applicationName=ShippingInvoice.pdf.exe\tcat=INDICATOR\tincidentId=Z7NG6\tindicatorName=SET_SYSTEM_SECURITY\tsev=Z7NG6\tsha256Hash=cfe0ae57f314a9f747a7cec605907cdaf1984b3cdea74ee8d5893d00ae0886cc', 'LEEF:2.0|CarbonBlack|Cloud|1.0|INDICATOR|x09|applicationName=ShippingInvoice.pdf.exe\tcat=INDICATOR\tincidentId=Z7NG6\tindicatorName=MODIFY_MEMORY_PROTECTION\tsev=Z7NG6\tsha256Hash=cfe0ae57f314a9f747a7cec605907cdaf1984b3cdea74ee8d5893d00ae0886cc', 'LEEF:2.0|CarbonBlack|Cloud|1.0|INDICATOR|x09|applicationName=ShippingInvoice.pdf.exe\tcat=INDICATOR\tincidentId=Z7NG6\tindicatorName=KNOWN_PASSWORD_STEALER\tsev=Z7NG6\tsha256Hash=cfe0ae57f314a9f747a7cec605907cdaf1984b3cdea74ee8d5893d00ae0886cc', 'LEEF:2.0|CarbonBlack|Cloud|1.0|INDICATOR|x09|applicationName=explorer.exe\tcat=INDICATOR\tincidentId=Z7NG6\tindicatorName=RUN_MALWARE_APP\tsev=Z7NG6\tsha256Hash=1e675cb7df214172f7eb0497f7275556038a0d09c6e5a3e6862c5e26885ef455', 'LEEF:2.0|CarbonBlack|Cloud|1.0|INDICATOR|x09|applicationName=ShippingInvoice.pdf.exe\tcat=INDICATOR\tincidentId=Z7NG6\tindicatorName=MODIFY_PROCESS\tsev=Z7NG6\tsha256Hash=cfe0ae57f314a9f747a7cec605907cdaf1984b3cdea74ee8d5893d00ae0886cc', 'LEEF:2.0|CarbonBlack|Cloud|1.0|INDICATOR|x09|applicationName=ShippingInvoice.pdf.exe\tcat=INDICATOR\tincidentId=Z7NG6\tindicatorName=MALWARE_APP\tsev=Z7NG6\tsha256Hash=cfe0ae57f314a9f747a7cec605907cdaf1984b3cdea74ee8d5893d00ae0886cc', 'LEEF:2.0|CarbonBlack|Cloud|1.0|THREAT|x09|cat=THREAT\tdevTime=Apr-15-2016 06:54:00 GMT\tdevTimeFormat=MMM dd yyyy HH:mm:ss z\tdeviceId=2004118\tdeviceType=WINDOWS\teventId=f279d0e6035211e6be8701df2c083974\tidentHostName=\tidentSrc=\tincidentId=Z7NG6\trealm=default\tresource=COMPANY\\Demo_CaretoPC\truleName=syslog alert\tsev=7\tsummary=A known virus (Sality: Keylogger, Password or Data stealer, Backdoor) was detected running.\ttargetPriorityType=MEDIUM\turl=https://testserver.company.net/ui#investigate/events/device/2004118/incident/Z7NG6', 'LEEF:2.0|CarbonBlack|Cloud|1.0|POLICY_ACTION|x09|action=TERMINATE\tapplicationName=firefox.exe\tcat=POLICY_ACTION\tdevTime=Feb-05-2015 19:07:43 GMT\tdevTimeFormat=MMM dd yyyy HH:mm:ss z\tdeviceId=100\tdeviceType=WINDOWS\teventId=EV1\tidentHostName=\tidentSrc=55.33.22.11\trealm=Executives\treputation=KNOWN_MALWARE\tresource=testers-pc\truleName=Alert Rule 1\tsev=1\tsha256=2552332222112552332222112552332222112552332222112552332222112552\tsummary=\ttargetPriorityType=HIGH\turl=http://carbonblack.com/ui#device/100/hash/2552332222112552332222112552332222112552332222112552332222112552/app/firefox.exe/keyword/terminate policy action', 'LEEF:2.0|CarbonBlack|Cloud|1.0|INDICATOR|x09|applicationName=powershell.exe\tcat=INDICATOR\tincidentId=WNEXFKQ7-000049d4-00001ef0-00000000-1d4ed58a5f07dbf-j0MkcneCQXy1fIbhber6rw-565660\tindicatorName=565660-0\tsev=WNEXFKQ7-000049d4-00001ef0-00000000-1d4ed58a5f07dbf-j0MkcneCQXy1fIbhber6rw-565660\tsha256Hash=ba4038fd20e474c047be8aad5bfacdb1bfc1ddbe12f803f473b7918d8d819436', 'LEEF:2.0|CarbonBlack|Cloud|1.0|THREAT_HUNTER|x09|cat=THREAT_HUNTER\tdevTime=Apr-07-2019 15:43:31 GMT\tdevTimeFormat=MMM dd yyyy HH:mm:ss z\tdeviceId=18900\tdeviceType=WINDOWS\teventId=None\tidentHostName=\tidentSrc=192.168.81.148\tincidentId=WNEXFKQ7-000049d4-00001ef0-00000000-1d4ed58a5f07dbf-j0MkcneCQXy1fIbhber6rw-565660\tprocessGuid=WNEXFKQ7-000049d4-00001ef0-00000000-1d4ed58a5f07dbf\tprocessPath=c:\\windows\\system32\\windowspowershell\\v1.0\\powershell.exe\trealm=sm-detection\treportName=PowerShell - File and Directory Discovery Enumeration\treputation=TRUSTED_WHITE_LIST\tresource=win-559j1nqvfgj\truleName=sm-sentinel-notification\trunState=RAN\tsev=1\tsha256=ba4038fd20e474c047be8aad5bfacdb1bfc1ddbe12f803f473b7918d8d819436\tsummary=PowerShell - File and Directory Discovery Enumeration\ttargetPriorityType=MEDIUM\turl=https://defense-eap01.conferdeploy.net/investigate?s[searchWindow]=ALL&s[c][DEVICE_ID][0]=18900&s[c][INCIDENT_ID][0]=WNEXFKQ7-000049d4-00001ef0-00000000-1d4ed58a5f07dbf-j0MkcneCQXy1fIbhber6rw-565660\twatchlists=ATT&CK Framework']
json_notifications = [{'threatInfo': {'incidentId': 'Z7NG6', 'score': 7, 'summary': 'A known virus (Sality: Keylogger, Password or Data stealer, Backdoor) was detected running.', 'indicators': [{'indicatorName': 'PACKED_CALL', 'applicationName': 'ShippingInvoice.pdf.exe', 'sha256Hash': 'cfe0ae57f314a9f747a7cec605907cdaf1984b3cdea74ee8d5893d00ae0886cc'}, {'indicatorName': 'TARGET_MALWARE_APP', 'applicationName': 'explorer.exe', 'sha256Hash': '1e675cb7df214172f7eb0497f7275556038a0d09c6e5a3e6862c5e26885ef455'}, {'indicatorName': 'HAS_PACKED_CODE', 'applicationName': 'ShippingInvoice.pdf.exe', 'sha256Hash': 'cfe0ae57f314a9f747a7cec605907cdaf1984b3cdea74ee8d5893d00ae0886cc'}, {'indicatorName': 'KNOWN_DOWNLOADER', 'applicationName': 'ShippingInvoice.pdf.exe', 'sha256Hash': 'cfe0ae57f314a9f747a7cec605907cdaf1984b3cdea74ee8d5893d00ae0886cc'}, {'indicatorName': 'ENUMERATE_PROCESSES', 'applicationName': 'ShippingInvoice.pdf.exe', 'sha256Hash': 'cfe0ae57f314a9f747a7cec605907cdaf1984b3cdea74ee8d5893d00ae0886cc'}, {'indicatorName': 'SET_SYSTEM_SECURITY', 'applicationName': 'ShippingInvoice.pdf.exe', 'sha256Hash': 'cfe0ae57f314a9f747a7cec605907cdaf1984b3cdea74ee8d5893d00ae0886cc'}, {'indicatorName': 'MODIFY_MEMORY_PROTECTION', 'applicationName': 'ShippingInvoice.pdf.exe', 'sha256Hash': 'cfe0ae57f314a9f747a7cec605907cdaf1984b3cdea74ee8d5893d00ae0886cc'}, {'indicatorName': 'KNOWN_PASSWORD_STEALER', 'applicationName': 'ShippingInvoice.pdf.exe', 'sha256Hash': 'cfe0ae57f314a9f747a7cec605907cdaf1984b3cdea74ee8d5893d00ae0886cc'}, {'indicatorName': 'RUN_MALWARE_APP', 'applicationName': 'explorer.exe', 'sha256Hash': '1e675cb7df214172f7eb0497f7275556038a0d09c6e5a3e6862c5e26885ef455'}, {'indicatorName': 'MODIFY_PROCESS', 'applicationName': 'ShippingInvoice.pdf.exe', 'sha256Hash': 'cfe0ae57f314a9f747a7cec605907cdaf1984b3cdea74ee8d5893d00ae0886cc'}, {'indicatorName': 'MALWARE_APP', 'applicationName': 'ShippingInvoice.pdf.exe', 'sha256Hash': 'cfe0ae57f314a9f747a7cec605907cdaf1984b3cdea74ee8d5893d00ae0886cc'}], 'time': 1460703240678}, 'url': 'https://testserver.company.net/ui#investigate/events/device/2004118/incident/Z7NG6', 'eventTime': 1460703240678, 'eventId': 'f279d0e6035211e6be8701df2c083974', 'eventDescription': '[syslog alert] [Cb Defense has detected a threat against your company.] [https://testserver.company.net/ui#device/2004118/incident/Z7NG6] [A known virus (Sality: Keylogger, Password or Data stealer, Backdoor) was detected running.] [Incident id: Z7NG6] [Threat score: 7] [Group: default] [Email: FirstName.LastName@company.net.demo] [Name: Demo_CaretoPC] [Type and OS: WINDOWS XP x86 SP: 0]\n', 'deviceInfo': {'email': 'COMPANY\\FirstName.LastName', 'groupName': 'default', 'internalIpAddress': '', 'externalIpAddress': '', 'deviceType': 'WINDOWS', 'deviceVersion': 'XP x86 SP: 0', 'targetPriorityType': 'MEDIUM', 'deviceId': 2004118, 'deviceName': 'COMPANY\\Demo_CaretoPC', 'deviceHostName': '', 'targetPriorityCode': 0}, 'ruleName': 'syslog alert', 'type': 'THREAT', 'source': 'test'}, {'policyAction': {'sha256Hash': '2552332222112552332222112552332222112552332222112552332222112552', 'action': 'TERMINATE', 'reputation': 'KNOWN_MALWARE', 'applicationName': 'firefox.exe'}, 'type': 'POLICY_ACTION', 'eventTime': 1423163263482, 'eventId': 'EV1', 'url': 'http://carbonblack.com/ui#device/100/hash/2552332222112552332222112552332222112552332222112552332222112552/app/firefox.exe/keyword/terminate policy action', 'deviceInfo': {'deviceType': 'WINDOWS', 'email': 'tester@carbonblack.com', 'deviceId': 100, 'deviceName': 'testers-pc', 'deviceHostName': '', 'deviceVersion': '7 SP1', 'targetPriorityType': 'HIGH', 'targetPriorityCode': 0, 'internalIpAddress': '55.33.22.11', 'groupName': 'Executives', 'externalIpAddress': '255.233.222.211'}, 'eventDescription': 'Policy action 1', 'ruleName': 'Alert Rule 1', 'source': 'test'}, {'threatHunterInfo': {'incidentId': 'WNEXFKQ7-000049d4-00001ef0-00000000-1d4ed58a5f07dbf-j0MkcneCQXy1fIbhber6rw-565660', 'score': 1, 'summary': 'PowerShell - File and Directory Discovery Enumeration', 'time': 1554652050250, 'indicators': [{'applicationName': 'powershell.exe', 'sha256Hash': 'ba4038fd20e474c047be8aad5bfacdb1bfc1ddbe12f803f473b7918d8d819436', 'indicatorName': '565660-0'}], 'watchLists': [{'id': 'a3xW2ZiaRyAqRtuVES8Q', 'name': 'ATT&CK Framework', 'alert': 'true'}], 'iocId': '565660-0', 'count': 0, 'dismissed': 'false', 'documentGuid': '7a9fQEsTRfuFmXcogI8CMQ', 'firstActivityTime': 1554651811577, 'md5': '097ce5761c89434367598b34fe32893b', 'policyId': 9815, 'processGuid': 'WNEXFKQ7-000049d4-00001ef0-00000000-1d4ed58a5f07dbf', 'processPath': 'c:\\windows\\system32\\windowspowershell\\v1.0\\powershell.exe', 'reportName': 'PowerShell - File and Directory Discovery Enumeration', 'reportId': 'j0MkcneCQXy1fIbhber6rw-565660', 'reputation': 'TRUSTED_WHITE_LIST', 'responseAlarmId': 'WNEXFKQ7-000049d4-00001ef0-00000000-1d4ed58a5f07dbf-j0MkcneCQXy1fIbhber6rw-565660', 'responseSeverity': 1, 'runState': 'RAN', 'sha256': 'ba4038fd20e474c047be8aad5bfacdb1bfc1ddbe12f803f473b7918d8d819436', 'status': 'UNRESOLVED', 'tags': '', 'targetPriority': 'MEDIUM', 'threatCause': {'reputation': 'TRUSTED_WHITE_LIST', 'actor': 'ba4038fd20e474c047be8aad5bfacdb1bfc1ddbe12f803f473b7918d8d819436', 'actorName': 'powershell.exe', 'reason': 'Process powershell.exe was detected by the report "PowerShell - File and Directory Discovery Enumeration" in watchlist "ATT&CK Framework"', 'actorType': '', 'threatCategory': 'RESPONSE_WATCHLIST', 'actorProcessPPid': '', 'causeEventId': '', 'originSourceType': 'UNKNOWN'}, 'threatId': 'a2b724aa094af97c06c758d325240460', 'lastUpdatedTime': 0, 'orgId': 428}, 'eventDescription': '[sm-sentinel-notification] [Carbon Black has detected a threat against your company.] [https://defense-eap01.conferdeploy.net#device/18900/incident/WNEXFKQ7-000049d4-00001ef0-00000000-1d4ed58a5f07dbf-j0MkcneCQXy1fIbhber6rw-565660] [PowerShell - File and Directory Discovery Enumeration] [Incident id: WNEXFKQ7-000049d4-00001ef0-00000000-1d4ed58a5f07dbf-j0MkcneCQXy1fIbhber6rw-565660] [Threat score: 1] [Group: sm-detection] [Email: smultani@carbonblack.com] [Name: win-559j1nqvfgj] [Type and OS: WINDOWS pscr-sensor] [Severity: 1]\n', 'eventTime': 1554651811577, 'deviceInfo': {'deviceId': 18900, 'targetPriorityCode': 0, 'groupName': 'sm-detection', 'deviceName': 'win-559j1nqvfgj', 'deviceType': 'WINDOWS', 'email': 'smultani@carbonblack.com', 'deviceHostName': '', 'deviceVersion': 'pscr-sensor', 'targetPriorityType': 'MEDIUM', 'uemId': '', 'internalIpAddress': '192.168.81.148', 'externalIpAddress': '73.69.152.214'}, 'url': 'https://defense-eap01.conferdeploy.net/investigate?s[searchWindow]=ALL&s[c][DEVICE_ID][0]=18900&s[c][INCIDENT_ID][0]=WNEXFKQ7-000049d4-00001ef0-00000000-1d4ed58a5f07dbf-j0MkcneCQXy1fIbhber6rw-565660', 'ruleName': 'sm-sentinel-notification', 'type': 'THREAT_HUNTER', 'source': 'test'}]
| 116.761194
| 6,890
| 0.711023
| 2,007
| 23,469
| 8.263079
| 0.158445
| 0.048842
| 0.053726
| 0.058611
| 0.877894
| 0.874457
| 0.854438
| 0.838157
| 0.82266
| 0.81452
| 0
| 0.196449
| 0.160169
| 23,469
| 200
| 6,891
| 117.345
| 0.644952
| 0.000895
| 0
| 0.206186
| 0
| 0.149485
| 0.762348
| 0.373027
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.030928
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
2b3e16bac852a65f995976462d16cdfaa6ccae29
| 36,434
|
py
|
Python
|
networks_utils.py
|
jviquerat/U-net_laminar_flow
|
6a6029f8bd4d036f8675df8dd74e2d13476aa069
|
[
"MIT"
] | null | null | null |
networks_utils.py
|
jviquerat/U-net_laminar_flow
|
6a6029f8bd4d036f8675df8dd74e2d13476aa069
|
[
"MIT"
] | 1
|
2021-11-18T09:29:19.000Z
|
2021-12-14T08:33:44.000Z
|
networks_utils.py
|
jviquerat/u-net_laminar_flow
|
6a6029f8bd4d036f8675df8dd74e2d13476aa069
|
[
"MIT"
] | 1
|
2021-07-20T08:23:27.000Z
|
2021-07-20T08:23:27.000Z
|
# Import stuff
import sys
import math
import keras
# Additional imports from keras
from keras import regularizers
from keras import optimizers
from keras.models import Model
from keras.layers import Input
from keras.layers import concatenate
from keras.layers import Conv2D
from keras.layers import MaxPooling2D
from keras.layers import AveragePooling2D
from keras.layers import Flatten
from keras.layers import Dense
from keras.layers import Activation
from keras.layers import Dropout
from keras.layers import Conv2DTranspose
from keras.layers import Lambda
from keras.layers import BatchNormalization
from keras.layers.convolutional import ZeroPadding2D
# Custom imports
from datasets_utils import *
### ************************************************
### I/O convolutional layer
def io_conv_2D(x,
filters = 8,
kernel_size = 3,
strides = 1,
padding = 'same',
activation = 'relu'):
x = Conv2D(filters = filters,
kernel_size = kernel_size,
strides = strides,
padding = padding,
activation = activation)(x)
return x
### ************************************************
### I/O max-pooling layer
def io_maxp_2D(x,
pool_size = 2,
strides = 2):
x = MaxPooling2D(pool_size = pool_size,
strides = strides)(x)
return x
### ************************************************
### I/O avg-pooling layer
def io_avgp_2D(x,
pool_size = 2,
strides = 2):
x = AveragePooling2D(pool_size = pool_size,
strides = strides)(x)
return x
### ************************************************
### I/O convolutional transposed layer
def io_conv_2D_transp(in_layer,
n_filters,
kernel_size,
stride_size):
out_layer = Conv2DTranspose(filters=n_filters,
kernel_size=kernel_size,
strides=stride_size,
padding='same')(in_layer)
return out_layer
### ************************************************
### I/O concatenate + zero-pad
def io_concat_pad(in_layer_1,
in_layer_2,
axis):
# Compute padding sizes
shape1_x = np.asarray(keras.backend.int_shape(in_layer_1)[1])
shape1_y = np.asarray(keras.backend.int_shape(in_layer_1)[2])
shape2_x = np.asarray(keras.backend.int_shape(in_layer_2)[1])
shape2_y = np.asarray(keras.backend.int_shape(in_layer_2)[2])
dx = shape2_x - shape1_x
dy = shape2_y - shape1_y
# Pad and concat
pad_layer = ZeroPadding2D(((dx,0),(dy,0)))(in_layer_1)
out_layer = concatenate([pad_layer, in_layer_2], axis=axis)
return out_layer
### ************************************************
### Classic U-net for field prediction
def U_net(train_im,
train_sol,
valid_im,
valid_sol,
test_im,
n_filters_initial,
kernel_size,
kernel_transpose_size,
pool_size,
stride_size,
learning_rate,
batch_size,
n_epochs,
height,
width,
n_channels):
# Generate inputs
conv0 = Input((height,width,n_channels))
# 2 convolutions + maxPool
conv1 = io_conv_2D(conv0, n_filters_initial*(2**0), kernel_size)
conv1 = io_conv_2D(conv1, n_filters_initial*(2**0), kernel_size)
pool1 = io_maxp_2D(conv1, pool_size)
# 2 convolutions + maxPool
conv2 = io_conv_2D(pool1, n_filters_initial*(2**1), kernel_size)
conv2 = io_conv_2D(conv2, n_filters_initial*(2**1), kernel_size)
pool2 = io_maxp_2D(conv2, pool_size)
# 2 convolutions + maxPool
conv3 = io_conv_2D(pool2, n_filters_initial*(2**2), kernel_size)
conv3 = io_conv_2D(conv3, n_filters_initial*(2**2), kernel_size)
pool3 = io_maxp_2D(conv3, pool_size)
# 2 convolutions + maxPool
conv4 = io_conv_2D(pool3, n_filters_initial*(2**3), kernel_size)
conv4 = io_conv_2D(conv4, n_filters_initial*(2**3), kernel_size)
pool4 = io_maxp_2D(conv4, pool_size)
# 2 convolutions
conv5 = io_conv_2D(pool4, n_filters_initial*(2**4), kernel_size)
conv5 = io_conv_2D(conv5, n_filters_initial*(2**4), kernel_size)
pre6 = io_conv_2D_transp(conv5, n_filters_initial*(2**3), (2,2), (2,2))
# 1 transpose convolution and concat + 2 convolutions
up6 = io_concat_pad(pre6, conv4, 3)
conv6 = io_conv_2D(up6, n_filters_initial*(2**3), kernel_size)
conv6 = io_conv_2D(conv6, n_filters_initial*(2**3), kernel_size)
# 1 transpose convolution and concat + 2 convolutions
pre7 = io_conv_2D_transp(conv6, n_filters_initial*(2**2), (2,2), (2,2))
up7 = io_concat_pad(pre7, conv3, 3)
conv7 = io_conv_2D(up7, n_filters_initial*(2**2), kernel_size)
conv7 = io_conv_2D(conv7, n_filters_initial*(2**2), kernel_size)
pre8 = io_conv_2D_transp(conv7, n_filters_initial*(2**1), (2,2), (2,2))
# 1 transpose convolution and concat + 2 convolutions
up8 = io_concat_pad(pre8, conv2, 3)
conv8 = io_conv_2D(up8, n_filters_initial*(2**1), kernel_size)
conv8 = io_conv_2D(conv8, n_filters_initial*(2**1), kernel_size)
# 1 transpose convolution and concat + 2 convolutions
pre9 = io_conv_2D_transp(conv8, n_filters_initial*(2**0), (2,2), (2,2))
up9 = io_concat_pad(pre9, conv1, 3)
conv9 = io_conv_2D(up9, n_filters_initial*(2**0), kernel_size)
conv9 = io_conv_2D(conv9, n_filters_initial*(2**0), kernel_size)
# final 1x1 convolution
conv10 = io_conv_2D(conv9, 3, 1)
# construct model
model = Model(inputs=[conv0], outputs=[conv10])
# Print info about model
model.summary()
# Set training parameters
model.compile(loss='mean_squared_error',
optimizer=optimizers.Adam(lr=learning_rate),
metrics=['mean_absolute_error'])
# Train network
train_model = model.fit(train_im, train_sol,
batch_size=batch_size, epochs=n_epochs,
validation_data=(valid_im, valid_sol))
return(model, train_model)
### ************************************************
### Stacked U-nets
def StackedU_net(train_im,
train_sol,
valid_im,
valid_sol,
test_im,
n_filters_initial,
kernel_size,
kernel_size_2,
kernel_transpose_size,
pool_size,
stride_size,
learning_rate,
batch_size,
n_epochs,
height,
width,
n_channels):
# Generate inputs
conv0 = Input((height,width,n_channels))
# 2 convolutions + maxPool
conv1 = io_conv_2D(conv0, n_filters_initial*(2**0), kernel_size)
conv1 = io_conv_2D(conv1, n_filters_initial*(2**0), kernel_size)
pool1 = io_maxp_2D(conv1, pool_size)
# 2 convolutions + maxPool
conv2 = io_conv_2D(pool1, n_filters_initial*(2**1), kernel_size)
conv2 = io_conv_2D(conv2, n_filters_initial*(2**1), kernel_size)
pool2 = io_maxp_2D(conv2, pool_size)
# 2 convolutions + maxPool
conv3 = io_conv_2D(pool2, n_filters_initial*(2**2), kernel_size)
conv3 = io_conv_2D(conv3, n_filters_initial*(2**2), kernel_size)
pool3 = io_maxp_2D(conv3, pool_size)
# 2 convolutions + maxPool
conv4 = io_conv_2D(pool3, n_filters_initial*(2**3), kernel_size)
conv4 = io_conv_2D(conv4, n_filters_initial*(2**3), kernel_size)
pool4 = io_maxp_2D(conv4, pool_size)
# 2 convolutions
conv5 = io_conv_2D(pool4, n_filters_initial*(2**4), kernel_size)
conv5 = io_conv_2D(conv5, n_filters_initial*(2**4), kernel_size)
# 1 transpose convolution and concat + 2 convolutions
pre6 = io_conv_2D_transp(conv5, n_filters_initial*(2**3), (2,2), (2,2))
up6 = io_concat_pad(pre6, conv4, 3)
conv6 = io_conv_2D(up6, n_filters_initial*(2**3), kernel_size)
conv6 = io_conv_2D(conv6, n_filters_initial*(2**3), kernel_size)
# 1 transpose convolution and concat + 2 convolutions
pre7 = io_conv_2D_transp(conv6, n_filters_initial*(2**2), (2,2), (2,2))
up7 = io_concat_pad(pre7, conv3, 3)
conv7 = io_conv_2D(up7, n_filters_initial*(2**2), kernel_size)
conv7 = io_conv_2D(conv7, n_filters_initial*(2**2), kernel_size)
# 1 transpose convolution and concat + 2 convolutions
pre8 = io_conv_2D_transp(conv7, n_filters_initial*(2**1), (2,2), (2,2))
up8 = io_concat_pad(pre8, conv2, 3)
conv8 = io_conv_2D(up8, n_filters_initial*(2**1), kernel_size)
conv8 = io_conv_2D(conv8, n_filters_initial*(2**1), kernel_size)
# 1 transpose convolution and concat + 2 convolutions
pre9 = io_conv_2D_transp(conv8, n_filters_initial*(2**0), (2,2), (2,2))
up9 = io_concat_pad(pre9, conv1, 3)
conv9 = io_conv_2D(up9, n_filters_initial*(2**0), kernel_size)
conv9 = io_conv_2D(conv9, n_filters_initial*(2**0), kernel_size)
# final 1x1 convolution
conv10 = io_conv_2D(conv9, 3, 1)
# 2 convolutions + maxPool
conv21 = io_conv_2D(conv10, n_filters_initial*(2**0), kernel_size_2)
conv21 = io_conv_2D(conv21, n_filters_initial*(2**0), kernel_size_2)
pool21 = io_maxp_2D(conv21, pool_size)
# 2 convolutions + maxPool
conv22 = io_conv_2D(pool21, n_filters_initial*(2**1), kernel_size_2)
conv22 = io_conv_2D(conv22, n_filters_initial*(2**1), kernel_size_2)
pool22 = io_maxp_2D(conv22, pool_size)
# 2 convolutions + maxPool
conv23 = io_conv_2D(pool22, n_filters_initial*(2**2), kernel_size_2)
conv23 = io_conv_2D(conv23, n_filters_initial*(2**2), kernel_size_2)
pool23 = io_maxp_2D(conv23, pool_size)
# 2 convolutions + maxPool
conv24 = io_conv_2D(pool23, n_filters_initial*(2**3), kernel_size_2)
conv24 = io_conv_2D(conv24, n_filters_initial*(2**3), kernel_size_2)
pool24 = io_maxp_2D(conv24, pool_size)
# 2 convolutions
conv25 = io_conv_2D(pool24, n_filters_initial*(2**4), kernel_size_2)
conv25 = io_conv_2D(conv25, n_filters_initial*(2**4), kernel_size_2)
# 1 transpose convolution and concat + 2 convolutions
pre26 = io_conv_2D_transp(conv25, n_filters_initial*(2**3), (2,2), (2,2))
up26 = io_concat_pad(pre26, conv24, 3)
conv26 = io_conv_2D(up26, n_filters_initial*(2**3), kernel_size_2)
conv26 = io_conv_2D(conv26, n_filters_initial*(2**3), kernel_size_2)
pre27 = io_conv_2D_transp(conv26, n_filters_initial*(2**2), (2,2), (2,2))
up27 = io_concat_pad(pre27, conv23, 3)
conv27 = io_conv_2D(up27, n_filters_initial*(2**2), kernel_size_2)
conv27 = io_conv_2D(conv27, n_filters_initial*(2**2), kernel_size_2)
pre28 = io_conv_2D_transp(conv27, n_filters_initial*(2**1), (2,2), (2,2))
up28 = io_concat_pad(pre28, conv22, 3)
conv28 = io_conv_2D(up28, n_filters_initial*(2**1), kernel_size_2)
conv28 = io_conv_2D(conv28, n_filters_initial*(2**1), kernel_size_2)
# 1 transpose convolution and concat + 2 convolutions
pre29 = io_conv_2D_transp(conv28, n_filters_initial*(2**0), (2,2), (2,2))
up29 = io_concat_pad(pre29, conv21, 3)
conv29 = io_conv_2D(up29, n_filters_initial*(2**0), kernel_size_2)
conv29 = io_conv_2D(conv29, n_filters_initial*(2**0), kernel_size_2)
# final 1x1 convolution
conv20 = io_conv_2D(conv29, 3, 1)
# construct model
model = Model(inputs=[conv0], outputs=[conv20])
# Print info about model
model.summary()
# Set training parameters
model.compile(loss='mean_squared_error',
optimizer=optimizers.Adam(lr=learning_rate),
metrics=['mean_absolute_error'])
# Train network
train_model = model.fit(train_im, train_sol,
batch_size=batch_size, epochs=n_epochs,
validation_data=(valid_im, valid_sol))
return(model, train_model)
### ************************************************
### Coupled U-nets
def CpU_net(train_im,
train_sol,
valid_im,
valid_sol,
test_im,
n_filters_initial,
kernel_size,
kernel_transpose_size,
pool_size,
stride_size,
learning_rate,
batch_size,
n_epochs,
height,
width,
n_channels):
# Generate inputs
conv0 = Input((height,width,n_channels))
# 2 convolutions + maxPool
conv1 = io_conv_2D(conv0, n_filters_initial*(2**0), kernel_size)
conv1 = io_conv_2D(conv1, n_filters_initial*(2**0), kernel_size)
pool1 = io_maxp_2D(conv1, pool_size)
# 2 convolutions + maxPool
conv2 = io_conv_2D(pool1, n_filters_initial*(2**1), kernel_size)
conv2 = io_conv_2D(conv2, n_filters_initial*(2**1), kernel_size)
pool2 = io_maxp_2D(conv2, pool_size)
# 2 convolutions + maxPool
conv3 = io_conv_2D(pool2, n_filters_initial*(2**2), kernel_size)
conv3 = io_conv_2D(conv3, n_filters_initial*(2**2), kernel_size)
pool3 = io_maxp_2D(conv3, pool_size)
# 2 convolutions + maxPool
conv4 = io_conv_2D(pool3, n_filters_initial*(2**3), kernel_size)
conv4 = io_conv_2D(conv4, n_filters_initial*(2**3), kernel_size)
pool4 = io_maxp_2D(conv4, pool_size)
# 2 convolutions
conv5 = io_conv_2D(pool4, n_filters_initial*(2**4), kernel_size)
conv5 = io_conv_2D(conv5, n_filters_initial*(2**4), kernel_size)
# 1 transpose convolution and concat + 2 convolutions
pre6 = io_conv_2D_transp(conv5, n_filters_initial*(2**3), (2,2), (2,2))
up6 = io_concat_pad(pre6, conv4, 3)
conv6 = io_conv_2D(up6, n_filters_initial*(2**3), kernel_size)
conv6 = io_conv_2D(conv6, n_filters_initial*(2**3), kernel_size)
# 1 transpose convolution and concat + 2 convolutions
pre7 = io_conv_2D_transp(conv6, n_filters_initial*(2**2), (2,2), (2,2))
up7 = io_concat_pad(pre7, conv3, 3)
conv7 = io_conv_2D(up7, n_filters_initial*(2**2), kernel_size)
conv7 = io_conv_2D(conv7, n_filters_initial*(2**2), kernel_size)
# 1 transpose convolution and concat + 2 convolutions
pre8 = io_conv_2D_transp(conv7, n_filters_initial*(2**1), (2,2), (2,2))
up8 = io_concat_pad(pre8, conv2, 3)
conv8 = io_conv_2D(up8, n_filters_initial*(2**1), kernel_size)
conv8 = io_conv_2D(conv8, n_filters_initial*(2**1), kernel_size)
# 1 transpose convolution and concat + 2 convolutions
pre9 = io_conv_2D_transp(conv8, n_filters_initial*(2**0), (2,2), (2,2))
up9 = io_concat_pad(pre9, conv1, 3)
conv9 = io_conv_2D(up9, n_filters_initial*(2**0), kernel_size)
conv9 = io_conv_2D(conv9, n_filters_initial*(2**0), kernel_size)
# final 1x1 convolution
conv10 = io_conv_2D(conv9, 3, 1)
##### the output of 1-st U-net
# 2 convolutions + maxPool
conv21 = io_conv_2D(concatenate([conv0, conv10], axis=3), n_filters_initial*(2**0), kernel_size)
conv21 = io_conv_2D(conv21, n_filters_initial*(2**0), kernel_size)
pool21 = io_maxp_2D(conv21, pool_size)
# 2 convolutions + maxPool
conv22 = io_conv_2D(concatenate([pool1, pool21], axis=3), n_filters_initial*(2**1), kernel_size)
conv22 = io_conv_2D(conv22, n_filters_initial*(2**1), kernel_size)
pool22 = io_maxp_2D(conv22, pool_size)
# 2 convolutions + maxPool
conv23 = io_conv_2D(concatenate([pool2, pool22], axis=3), n_filters_initial*(2**2), kernel_size)
conv23 = io_conv_2D(conv23, n_filters_initial*(2**2), kernel_size)
pool23 = io_maxp_2D(conv23, pool_size)
# 2 convolutions + maxPool
conv24 = io_conv_2D(concatenate([pool3, pool23], axis=3), n_filters_initial*(2**3), kernel_size)
conv24 = io_conv_2D(conv24, n_filters_initial*(2**3), kernel_size)
pool24 = io_maxp_2D(conv24, pool_size)
# 2 convolutions
conv25 = io_conv_2D(concatenate([pool4, pool24], axis=3), n_filters_initial*(2**4), kernel_size)
conv25 = io_conv_2D(conv25, n_filters_initial*(2**4), kernel_size)
# 1 transpose convolution and concat + 2 convolutions
pre26 = io_conv_2D_transp(concatenate([conv5, conv25], axis=3), n_filters_initial*(2**3), (2,2), (2,2))
up26 = io_concat_pad(pre26, conv24, 3)
conv26 = io_conv_2D(up26, n_filters_initial*(2**3), kernel_size)
conv26 = io_conv_2D(conv26, n_filters_initial*(2**3), kernel_size)
pre27 = io_conv_2D_transp(concatenate([conv6, conv26], axis=3), n_filters_initial*(2**2), (2,2), (2,2))
up27 = io_concat_pad(pre27, conv23, 3)
conv27 = io_conv_2D(up27, n_filters_initial*(2**2), kernel_size)
conv27 = io_conv_2D(conv27, n_filters_initial*(2**2), kernel_size)
pre28 = io_conv_2D_transp(concatenate([conv7, conv27], axis=3), n_filters_initial*(2**1), (2,2), (2,2))
up28 = io_concat_pad(pre28, conv22, 3)
conv28 = io_conv_2D(up28, n_filters_initial*(2**1), kernel_size)
conv28 = io_conv_2D(conv28, n_filters_initial*(2**1), kernel_size)
# 1 transpose convolution and concat + 2 convolutions
pre29 = io_conv_2D_transp(concatenate([conv8, conv28], axis=3), n_filters_initial*(2**0), (2,2), (2,2))
up29 = io_concat_pad(pre29, conv21, 3)
conv29 = io_conv_2D(up29, n_filters_initial*(2**0), kernel_size)
conv29 = io_conv_2D(conv29, n_filters_initial*(2**0), kernel_size)
# final 1x1 convolution
conv20 = io_conv_2D(concatenate([conv9, conv29], axis=3), 3, 1)
# construct model
model = Model(inputs=[conv0], outputs=[conv20])
# Print info about model
model.summary()
# Set training parameters
model.compile(loss='mean_squared_error',
optimizer=optimizers.Adam(lr=learning_rate),
metrics=['mean_absolute_error'])
# Train network
train_model = model.fit(train_im, train_sol,
batch_size=batch_size, epochs=n_epochs,
validation_data=(valid_im, valid_sol))
return(model, train_model)
### ************************************************
### Multilevel U-nets
def Multi_level_U_net(train_im,
train_sol,
valid_im,
valid_sol,
test_im,
n_filters_initial,
kernel_size,
kernel_transpose_size,
pool_size,
stride_size,
learning_rate,
batch_size,
n_epochs,
height,
width,
n_channels):
# Generate inputs
conv0 = Input((height,width,n_channels))
# 2 convolutions + maxPool
conv1 = io_conv_2D(conv0, n_filters_initial*(2**0), kernel_size)
conv1 = io_conv_2D(conv1, n_filters_initial*(2**0), kernel_size)
pool1 = io_maxp_2D(conv1, pool_size)
# 2 convolutions + maxPool
conv2 = io_conv_2D(pool1, n_filters_initial*(2**1), kernel_size)
conv2 = io_conv_2D(conv2, n_filters_initial*(2**1), kernel_size)
pool2 = io_maxp_2D(conv2, pool_size)
# 2 convolutions + maxPool
conv3 = io_conv_2D(pool2, n_filters_initial*(2**2), kernel_size)
conv3 = io_conv_2D(conv3, n_filters_initial*(2**2), kernel_size)
########################################################################################################################
##Here is the bottle of mini U-net
pre4 = io_conv_2D_transp(conv3, n_filters_initial*(2**1), (2,2), (2,2))
# 1 transpose convolution and concat + 2 convolutions
up4 = io_concat_pad(pre4, conv2, 3)
conv4 = io_conv_2D(up4, n_filters_initial*(2**1), kernel_size)
conv4 = io_conv_2D(conv4, n_filters_initial*(2**1), kernel_size)
# 1 transpose convolution and concat + 2 convolutions
pre5 = io_conv_2D_transp(conv4, n_filters_initial*(2**0), (2,2), (2,2))
up5 = io_concat_pad(pre5, conv1, 3)
conv5 = io_conv_2D(up5, n_filters_initial*(2**0), kernel_size)
conv5 = io_conv_2D(conv5, n_filters_initial*(2**0), kernel_size)
# output of mini U-net
conv6 = io_conv_2D(conv5, 3, 1)
########################################################################################################################
pool3 = io_maxp_2D(conv3, pool_size)
conv24 = io_conv_2D(pool3, n_filters_initial*(2**3), kernel_size)
conv24 = io_conv_2D(conv24, n_filters_initial*(2**3), kernel_size)
# Here is the bottleneck of small U-net
pre25 = io_conv_2D_transp(conv24, n_filters_initial*(2**2), (2,2), (2,2))
up25 = io_concat_pad(pre25, conv3, 3)
conv25 = io_conv_2D(up25, n_filters_initial*(2**2), kernel_size)
conv25 = io_conv_2D(conv25, n_filters_initial*(2**2), kernel_size)
pre26 = io_conv_2D_transp(conv25, n_filters_initial*(2**1), (2,2), (2,2))
up26 = io_concat_pad(pre26, conv2, 3)#an alternate is to concatenate pre26 with conv2
conv26 = io_conv_2D(up26, n_filters_initial*(2**1), kernel_size)
conv26 = io_conv_2D(conv26, n_filters_initial*(2**1), kernel_size)
pre27 = io_conv_2D_transp(conv26, n_filters_initial*(2**0), (2,2), (2,2))
up27 = io_concat_pad(pre27, conv1, 3)#an alternate is to concatenate pre26 with conv1
conv27 = io_conv_2D(up27, n_filters_initial*(2**0), kernel_size)
conv27 = io_conv_2D(conv27, n_filters_initial*(2**0), kernel_size)
# output of small U-net
conv28 = io_conv_2D(conv27, 3, 1)
########################################################################################################################
pool24 = io_maxp_2D(conv24, pool_size)
conv35 = io_conv_2D(pool24, n_filters_initial*(2**4), kernel_size)
conv35 = io_conv_2D(conv35, n_filters_initial*(2**4), kernel_size)
# Here is the bottleneck of U-net
pre36 = io_conv_2D_transp(conv35, n_filters_initial*(2**3), (2,2), (2,2))
up36 = io_concat_pad(pre36, conv24, 3)
conv36 = io_conv_2D(up36, n_filters_initial*(2**3), kernel_size)
conv36 = io_conv_2D(conv36, n_filters_initial*(2**3), kernel_size)
# 1 transpose convolution and concat + 2 convolutions
pre37 = io_conv_2D_transp(conv36, n_filters_initial*(2**2), (2,2), (2,2))## conv36, conv26?
up37 = io_concat_pad(pre37, conv3, 3)#an alternate is to concatenate pre37 with conv3
conv37 = io_conv_2D(up37, n_filters_initial*(2**2), kernel_size)
conv37 = io_conv_2D(conv37, n_filters_initial*(2**2), kernel_size)
# 1 transpose convolution and concat + 2 convolutions
pre38 = io_conv_2D_transp(conv37, n_filters_initial*(2**1), (2,2), (2,2))
up38 = io_concat_pad(pre38, conv2, 3)#two alternates are to concatenate pre38 with conv2 or conv4
conv38 = io_conv_2D(up38, n_filters_initial*(2**1), kernel_size)
conv38 = io_conv_2D(conv38, n_filters_initial*(2**1), kernel_size)
# 1 transpose convolution and concat + 2 convolutions
pre39 = io_conv_2D_transp(conv38, n_filters_initial*(2**0), (2,2), (2,2))
up39 = io_concat_pad(pre39, conv1, 3)#two alternates are concatenate pre39 with conv1 or conv5 or conv27
conv39 = io_conv_2D(up39, n_filters_initial*(2**0), kernel_size)
conv39 = io_conv_2D(conv39, n_filters_initial*(2**0), kernel_size)
# final 1x1 convolution
conv30 = io_conv_2D(conv39, 3, 1)
########################################################################################################################
# average the output of three U-nets
conv10 = keras.layers.Average()([conv6, conv28, conv30])
#concatenate the output of three U-nets
#conv10 = io_conv_2D(concatenate([conv6, conv28, conv30], axis=3), 3, 1)
# construct model
model = Model(inputs=[conv0], outputs=[conv10])
# Print info about model
model.summary()
# Set training parameters
model.compile(loss='mean_squared_error',
optimizer=optimizers.Adam(lr=learning_rate),
metrics=['mean_squared_error'])
# Train network
train_model = model.fit(train_im, train_sol,
batch_size=batch_size, epochs=n_epochs,
validation_data=(valid_im, valid_sol))
return(model, train_model)
### ************************************************
### Inverse multilevel U-net
def InvMU_net(train_im,
train_sol,
valid_im,
valid_sol,
test_im,
n_filters_initial,
kernel_size,
kernel_transpose_size,
pool_size,
stride_size,
learning_rate,
batch_size,
n_epochs,
height,
width,
n_channels):
# Generate inputs
conv0 = Input((height,width,n_channels))
# 2 convolutions + maxPool
conv1 = io_conv_2D(conv0, n_filters_initial*(2**0), kernel_size)
conv1 = io_conv_2D(conv1, n_filters_initial*(2**0), kernel_size)
pool1 = io_maxp_2D(conv1, pool_size)
# 2 convolutions + maxPool
conv2 = io_conv_2D(pool1, n_filters_initial*(2**1), kernel_size)
conv2 = io_conv_2D(conv2, n_filters_initial*(2**1), kernel_size)
pool2 = io_maxp_2D(conv2, pool_size)
# 2 convolutions + maxPool
conv3 = io_conv_2D(pool2, n_filters_initial*(2**2), kernel_size)
conv3 = io_conv_2D(conv3, n_filters_initial*(2**2), kernel_size)
pool3 = io_maxp_2D(conv3, pool_size)
conv24 = io_conv_2D(pool3, n_filters_initial*(2**3), kernel_size)
conv24 = io_conv_2D(conv24, n_filters_initial*(2**3), kernel_size)
pool24 = io_maxp_2D(conv24, pool_size)
conv35 = io_conv_2D(pool24, n_filters_initial * (2 ** 4), kernel_size)
conv35 = io_conv_2D(conv35, n_filters_initial * (2 ** 4), kernel_size)
# Here is the bottleneck of U-net
pre36 = io_conv_2D_transp(conv35, n_filters_initial * (2 ** 3), (2, 2), (2, 2))
up36 = io_concat_pad(pre36, conv24, 3)
conv36 = io_conv_2D(up36, n_filters_initial * (2 ** 3), kernel_size)
conv36 = io_conv_2D(conv36, n_filters_initial * (2 ** 3), kernel_size)
# 1 transpose convolution and concat + 2 convolutions
pre37 = io_conv_2D_transp(conv36, n_filters_initial * (2 ** 2), (2, 2), (2, 2)) ## conv36, conv26?
up37 = io_concat_pad(pre37, conv3, 3) # an alternate is to concatenate pre37 with conv3
conv37 = io_conv_2D(up37, n_filters_initial * (2 ** 2), kernel_size)
conv37 = io_conv_2D(conv37, n_filters_initial * (2 ** 2), kernel_size)
# 1 transpose convolution and concat + 2 convolutions
pre38 = io_conv_2D_transp(conv37, n_filters_initial * (2 ** 1), (2, 2), (2, 2))
up38 = io_concat_pad(pre38, conv2, 3) # two alternates are to concatenate pre38 with conv2 or conv4
conv38 = io_conv_2D(up38, n_filters_initial * (2 ** 1), kernel_size)
conv38 = io_conv_2D(conv38, n_filters_initial * (2 ** 1), kernel_size)
# 1 transpose convolution and concat + 2 convolutions
pre39 = io_conv_2D_transp(conv38, n_filters_initial * (2 ** 0), (2, 2), (2, 2))
up39 = io_concat_pad(pre39, conv1, 3) # two alternates are concatenate pre39 with conv1 or conv5 or conv27
conv39 = io_conv_2D(up39, n_filters_initial * (2 ** 0), kernel_size)
conv39 = io_conv_2D(conv39, n_filters_initial * (2 ** 0), kernel_size)
# final 1x1 convolution
conv30 = io_conv_2D(conv39, 3, 1)
conv30 = Lambda(lambda x: x * 1.5)(conv30)
########################################################################################################################
pre25 = io_conv_2D_transp(conv24, n_filters_initial * (2 ** 2), (2, 2), (2, 2))
up25 = io_concat_pad(pre25, conv37, 3)
conv25 = io_conv_2D(up25, n_filters_initial * (2 ** 2), kernel_size)
conv25 = io_conv_2D(conv25, n_filters_initial * (2 ** 2), kernel_size)
pre26 = io_conv_2D_transp(conv25, n_filters_initial * (2 ** 1), (2, 2), (2, 2))
up26 = io_concat_pad(pre26, conv38, 3)
conv26 = io_conv_2D(up26, n_filters_initial * (2 ** 1), kernel_size)
conv26 = io_conv_2D(conv26, n_filters_initial * (2 ** 1), kernel_size)
pre27 = io_conv_2D_transp(conv26, n_filters_initial * (2 ** 0), (2, 2), (2, 2))
up27 = io_concat_pad(pre27, conv39, 3) # an alternate is to concatenate pre26 with conv1
conv27 = io_conv_2D(up27, n_filters_initial * (2 ** 0), kernel_size)
conv27 = io_conv_2D(conv27, n_filters_initial * (2 ** 0), kernel_size)
# output of small U-net
conv28 = io_conv_2D(conv27, 3, 1)
########################################################################################################################
pre4 = io_conv_2D_transp(conv3, n_filters_initial*(2**1), (2,2), (2,2))
# 1 transpose convolution and concat + 2 convolutions
up4 = io_concat_pad(pre4, conv38, 3)
conv4 = io_conv_2D(up4, n_filters_initial*(2**1), kernel_size)
conv4 = io_conv_2D(conv4, n_filters_initial*(2**1), kernel_size)
# 1 transpose convolution and concat + 2 convolutions
pre5 = io_conv_2D_transp(conv4, n_filters_initial*(2**0), (2,2), (2,2))
up5 = io_concat_pad(pre5, conv39, 3)
conv5 = io_conv_2D(up5, n_filters_initial*(2**0), kernel_size)
conv5 = io_conv_2D(conv5, n_filters_initial*(2**0), kernel_size)
# output of mini U-net
conv6 = io_conv_2D(conv5, 3, 1)
conv6 = Lambda(lambda x: x * 0.5)(conv6)
########################################################################################################################
# average the output of three U-nets
conv10 = keras.layers.Average()([conv6, conv28, conv30])
# construct model
model = Model(inputs=[conv0], outputs=[conv10])
# Print info about model
model.summary()
# Set training parameters
model.compile(loss='mean_squared_error',
optimizer=optimizers.Adam(lr=learning_rate),
metrics=['mean_squared_error'])
# Train network
train_model = model.fit(train_im, train_sol,
batch_size=batch_size, epochs=n_epochs,
validation_data=(valid_im, valid_sol))
return(model, train_model)
### ************************************************
### Parallel U-nets
def Parallel_U_net(train_im,
train_sol,
valid_im,
valid_sol,
test_im,
n_filters_initial,
kernel_size,
kernel_size_2,
kernel_transpose_size,
pool_size,
stride_size,
learning_rate,
batch_size,
n_epochs,
height,
width,
n_channels):
# Generate inputs
conv0 = Input((height,width,n_channels))
# 2 convolutions + maxPool
conv1 = io_conv_2D(conv0, n_filters_initial*(2**0), kernel_size)
conv1 = io_conv_2D(conv1, n_filters_initial*(2**0), kernel_size)
pool1 = io_maxp_2D(conv1, pool_size)
# 2 convolutions + maxPool
conv2 = io_conv_2D(pool1, n_filters_initial*(2**1), kernel_size)
conv2 = io_conv_2D(conv2, n_filters_initial*(2**1), kernel_size)
pool2 = io_maxp_2D(conv2, pool_size)
# 2 convolutions + maxPool
conv3 = io_conv_2D(pool2, n_filters_initial*(2**2), kernel_size)
conv3 = io_conv_2D(conv3, n_filters_initial*(2**2), kernel_size)
pool3 = io_maxp_2D(conv3, pool_size)
# 2 convolutions + maxPool
conv4 = io_conv_2D(pool3, n_filters_initial*(2**3), kernel_size)
conv4 = io_conv_2D(conv4, n_filters_initial*(2**3), kernel_size)
pool4 = io_maxp_2D(conv4, pool_size)
# 2 convolutions
conv5 = io_conv_2D(pool4, n_filters_initial*(2**4), kernel_size)
conv5 = io_conv_2D(conv5, n_filters_initial*(2**4), kernel_size)
# 1 transpose convolution and concat + 2 convolutions
pre6 = io_conv_2D_transp(conv5, n_filters_initial*(2**3), (2,2), (2,2))
up6 = io_concat_pad(pre6, conv4, 3)
conv6 = io_conv_2D(up6, n_filters_initial*(2**3), kernel_size)
conv6 = io_conv_2D(conv6, n_filters_initial*(2**3), kernel_size)
# 1 transpose convolution and concat + 2 convolutions
pre7 = io_conv_2D_transp(conv6, n_filters_initial*(2**2), (2,2), (2,2))
up7 = io_concat_pad(pre7, conv3, 3)
conv7 = io_conv_2D(up7, n_filters_initial*(2**2), kernel_size)
conv7 = io_conv_2D(conv7, n_filters_initial*(2**2), kernel_size)
# 1 transpose convolution and concat + 2 convolutions
pre8 = io_conv_2D_transp(conv7, n_filters_initial*(2**1), (2,2), (2,2))
up8 = io_concat_pad(pre8, conv2, 3)
conv8 = io_conv_2D(up8, n_filters_initial*(2**1), kernel_size)
conv8 = io_conv_2D(conv8, n_filters_initial*(2**1), kernel_size)
# 1 transpose convolution and concat + 2 convolutions
pre9 = io_conv_2D_transp(conv8, n_filters_initial*(2**0), (2,2), (2,2))
up9 = io_concat_pad(pre9, conv1, 3)
conv9 = io_conv_2D(up9, n_filters_initial*(2**0), kernel_size)
conv9 = io_conv_2D(conv9, n_filters_initial*(2**0), kernel_size)
# final 1x1 convolution
conv10 = io_conv_2D(conv9, 3, 1)
#conv10 = keras.layers.Add()([conv0, conv10])
##### the output of 1-st U-net
# 2 convolutions + maxPool
conv21 = io_conv_2D(conv0, n_filters_initial*(2**0), kernel_size_2)
conv21 = io_conv_2D(conv21, n_filters_initial*(2**0), kernel_size_2)
pool21 = io_maxp_2D(conv21, pool_size)
# 2 convolutions + maxPool
conv22 = io_conv_2D(pool21, n_filters_initial*(2**1), kernel_size_2)
conv22 = io_conv_2D(conv22, n_filters_initial*(2**1), kernel_size_2)
pool22 = io_maxp_2D(conv22, pool_size)
# 2 convolutions + maxPool
conv23 = io_conv_2D(pool22, n_filters_initial*(2**2), kernel_size_2)
conv23 = io_conv_2D(conv23, n_filters_initial*(2**2), kernel_size_2)
pool23 = io_maxp_2D(conv23, pool_size)
# 2 convolutions + maxPool
conv24 = io_conv_2D(pool23, n_filters_initial*(2**3), kernel_size_2)
conv24 = io_conv_2D(conv24, n_filters_initial*(2**3), kernel_size_2)
pool24 = io_maxp_2D(conv24, pool_size)
# 2 convolutions
conv25 = io_conv_2D(pool24, n_filters_initial*(2**4), kernel_size_2)
conv25 = io_conv_2D(conv25, n_filters_initial*(2**4), kernel_size_2)
# 1 transpose convolution and concat + 2 convolutions
pre26 = io_conv_2D_transp(conv25, n_filters_initial*(2**3), (2,2), (2,2))
up26 = io_concat_pad(pre26, conv24, 3)
conv26 = io_conv_2D(up26, n_filters_initial*(2**3), kernel_size_2)
conv26 = io_conv_2D(conv26, n_filters_initial*(2**3), kernel_size_2)
pre27 = io_conv_2D_transp(conv26, n_filters_initial*(2**2), (2,2), (2,2))
up27 = io_concat_pad(pre27, conv23, 3)
conv27 = io_conv_2D(up27, n_filters_initial*(2**2), kernel_size_2)
conv27 = io_conv_2D(conv27, n_filters_initial*(2**2), kernel_size_2)
pre28 = io_conv_2D_transp(conv27, n_filters_initial*(2**1), (2,2), (2,2))
up28 = io_concat_pad(pre28, conv22, 3)
conv28 = io_conv_2D(up28, n_filters_initial*(2**1), kernel_size_2)
conv28 = io_conv_2D(conv28, n_filters_initial*(2**1), kernel_size_2)
# 1 transpose convolution and concat + 2 convolutions
pre29 = io_conv_2D_transp(conv28, n_filters_initial*(2**0), (2,2), (2,2))
up29 = io_concat_pad(pre29, conv21, 3)
conv29 = io_conv_2D(up29, n_filters_initial*(2**0), kernel_size_2)
conv29 = io_conv_2D(conv29, n_filters_initial*(2**0), kernel_size_2)
# final 1x1 convolution
conv20 = io_conv_2D(conv29, 3, 1)
conv30 = keras.layers.Average()([conv10, conv20])
# construct model
model = Model(inputs=[conv0], outputs=[conv30])
# Print info about model
model.summary()
# Set training parameters
model.compile(loss='mean_squared_error',
optimizer=optimizers.Adam(lr=learning_rate),
metrics=['mean_squared_error'])
# Train network
train_model = model.fit(train_im, train_sol,
batch_size=batch_size, epochs=n_epochs,
validation_data=(valid_im, valid_sol))
return(model, train_model)
| 41.261608
| 124
| 0.625542
| 5,183
| 36,434
| 4.074474
| 0.044183
| 0.069325
| 0.092433
| 0.172744
| 0.916375
| 0.910976
| 0.90752
| 0.893551
| 0.889857
| 0.882659
| 0
| 0.083696
| 0.224104
| 36,434
| 882
| 125
| 41.30839
| 0.663341
| 0.14481
| 0
| 0.827586
| 0
| 0
| 0.007675
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.019964
| false
| 0
| 0.036298
| 0
| 0.065336
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
2b42e860d0ae1b98081e8bfa9a5444fe0146b12b
| 943
|
py
|
Python
|
hangman/hangman_art.py
|
EdwinZurawik/hangman
|
6b865e40bab2a35b71731b3f9bee254ac1d992be
|
[
"MIT"
] | null | null | null |
hangman/hangman_art.py
|
EdwinZurawik/hangman
|
6b865e40bab2a35b71731b3f9bee254ac1d992be
|
[
"MIT"
] | null | null | null |
hangman/hangman_art.py
|
EdwinZurawik/hangman
|
6b865e40bab2a35b71731b3f9bee254ac1d992be
|
[
"MIT"
] | null | null | null |
hangman_title = r"""
____ ____ ____ ____ ____ ____ ____
||H ||||a ||||n ||||g ||||m ||||a ||||n ||
||__||||__||||__||||__||||__||||__||||__||
|/__\||/__\||/__\||/__\||/__\||/__\||/__\| """
hangman_stages = [r"""
____
|/ |
|
|
|
|
===== """, r"""
____
|/ |
| O
|
|
|
===== """, r"""
____
|/ |
| O
| |
|
|
===== """, r"""
____
|/ |
| O
| /|
|
|
===== """, r"""
____
|/ |
| O
| /|\
|
|
===== """, r"""
____
|/ |
| O
| /|\
| /
|
===== """, r"""
____
|/ |
| O
| /|\
| / \
|
===== """]
| 16.839286
| 52
| 0.139979
| 25
| 943
| 1.84
| 0.4
| 0.26087
| 0.326087
| 0.434783
| 0.26087
| 0.26087
| 0.26087
| 0.26087
| 0.26087
| 0.26087
| 0
| 0
| 0.604454
| 943
| 55
| 53
| 17.145455
| 0.123324
| 0
| 0
| 0.545455
| 0
| 0
| 0.888653
| 0.089077
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
2b5f838d508a37329b5f07cb4bf0d798c35b1875
| 125
|
py
|
Python
|
colornamer/__init__.py
|
stitchfix/colornamer
|
1bbaf061feee42322f2428fbf7d9e9a20be255aa
|
[
"Apache-2.0"
] | 55
|
2020-09-02T20:10:30.000Z
|
2022-03-16T01:25:15.000Z
|
colornamer/__init__.py
|
stitchfix/colornamer
|
1bbaf061feee42322f2428fbf7d9e9a20be255aa
|
[
"Apache-2.0"
] | 1
|
2021-01-24T12:21:27.000Z
|
2021-12-07T02:10:14.000Z
|
colornamer/__init__.py
|
stitchfix/colornamer
|
1bbaf061feee42322f2428fbf7d9e9a20be255aa
|
[
"Apache-2.0"
] | 5
|
2020-09-13T13:48:28.000Z
|
2021-11-18T09:45:02.000Z
|
from .colornamer import get_color_from_rgb
from .colornamer import get_color_from_lab
from .colornamer import get_color_json
| 31.25
| 42
| 0.88
| 20
| 125
| 5.1
| 0.4
| 0.411765
| 0.588235
| 0.676471
| 0.901961
| 0.627451
| 0
| 0
| 0
| 0
| 0
| 0
| 0.096
| 125
| 3
| 43
| 41.666667
| 0.902655
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 9
|
99481855b36edd94fabfd379b091a1551b3a37b4
| 2,838
|
py
|
Python
|
test_q2.py
|
karolineos/Desafio-de-Programa-o-Capgemini-
|
cb0aeceeb2d6ba393fc208c3c557c2980469e179
|
[
"MIT"
] | null | null | null |
test_q2.py
|
karolineos/Desafio-de-Programa-o-Capgemini-
|
cb0aeceeb2d6ba393fc208c3c557c2980469e179
|
[
"MIT"
] | null | null | null |
test_q2.py
|
karolineos/Desafio-de-Programa-o-Capgemini-
|
cb0aeceeb2d6ba393fc208c3c557c2980469e179
|
[
"MIT"
] | null | null | null |
from unittest import TestCase
from q2 import *
class TestQuestao2Busca(TestCase):
"""
Nomeclatura: test_quando_..._deve_retornar_...
def test_quando_..._deve_retornar_...(self):
Metodos herdados de TestCase:
|nome: | Ação:
| setUp | Antes de cada teste
| tearDown | Depois de cada teste
| setUpClass | Antes de todos os testes
| tearDownClass | Depois de todos os testes
"""
# def teste_quando_array_crescente_deve_retornar_mediana(self):
# self.assertEqual(hello_world(), 'hello world')
# def teste_quando_array_decrescente_deve_retornar_mediana(self):
#
def setUp(self):
print("\nTestando Questao2 Busca")
def Teste(self, lista, deslocamento,saida_esperada):
print(f"Busca d. - teste: lista: {lista}, deslocamento: {deslocamento}; saida esperada: {saida_esperada}; ", end="")
saida = buscar_deslocamentos(lista, deslocamento)
#saida, pares = buscar_deslocamentos(lista, deslocamento)
print(f"saida:{saida}")
#print(f"saida:{saida}, pares: {pares}")
self.assertEqual(saida, saida_esperada)
def teste_Busca_quando_parametros_sao_do_exemplo1_deve_retornar_3(self):
n = [1, 5, 3, 4, 2]
x = 2
saida_esperada = 3
self.Teste(n,x,saida_esperada)
class TestQuestao2BuscaMelhorada(TestCase):
"""
Nomeclatura: test_quando_..._deve_retornar_...
def test_quando_..._deve_retornar_...(self):
Metodos herdados de TestCase:
|nome: | Ação:
| setUp | Antes de cada teste
| tearDown | Depois de cada teste
| setUpClass | Antes de todos os testes
| tearDownClass | Depois de todos os testes
"""
# def teste_quando_array_crescente_deve_retornar_mediana(self):
# self.assertEqual(hello_world(), 'hello world')
# def teste_quando_array_decrescente_deve_retornar_mediana(self):
#
def setUp(self):
print("\nTestando Questao2 Busca Melhorada")
def Teste(self, lista, deslocamento,saida_esperada):
print(f"Busca d. Melhor - teste: lista: {lista}, deslocamento: {deslocamento}; saida esperada: {saida_esperada}; ", end="")
saida, pares = buscar_deslocamentos_melhorado(lista, deslocamento)
print(f"saida:{saida}, pares: {pares}")
self.assertEqual(saida, saida_esperada)
def teste_Busca_quando_parametros_sao_do_exemplo1_deve_retornar_3(self):
n = [1, 5, 3, 4, 2]
x = 2
saida_esperada = 3
self.Teste(n,x,saida_esperada)
| 37.342105
| 132
| 0.597604
| 299
| 2,838
| 5.411371
| 0.220736
| 0.096415
| 0.034611
| 0.054388
| 0.873918
| 0.873918
| 0.843016
| 0.843016
| 0.843016
| 0.843016
| 0
| 0.011675
| 0.305849
| 2,838
| 75
| 133
| 37.84
| 0.809645
| 0.394644
| 0
| 0.571429
| 0
| 0.071429
| 0.210526
| 0
| 0
| 0
| 0
| 0.08
| 0.071429
| 1
| 0.214286
| false
| 0
| 0.071429
| 0
| 0.357143
| 0.214286
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
9964cf19b644bec6e8da1584aa1fc11f4f7d82fe
| 245
|
py
|
Python
|
6_json_to_csv.py
|
pLINaROF/income_of_russian_deputies
|
6c27fe968825aa2131b613da7265364a03e47397
|
[
"MIT"
] | null | null | null |
6_json_to_csv.py
|
pLINaROF/income_of_russian_deputies
|
6c27fe968825aa2131b613da7265364a03e47397
|
[
"MIT"
] | null | null | null |
6_json_to_csv.py
|
pLINaROF/income_of_russian_deputies
|
6c27fe968825aa2131b613da7265364a03e47397
|
[
"MIT"
] | null | null | null |
import pandas
df = pandas.read_json('data_with_income_rub.json')
df.to_csv('data_with_income_rub.csv', index=False)
df = pandas.read_json('data_with_income_rub_from_csv.json')
df.to_csv('data_with_income_rub_from_csv.csv', index=False)
| 30.625
| 60
| 0.791837
| 44
| 245
| 3.954545
| 0.318182
| 0.183908
| 0.321839
| 0.390805
| 0.781609
| 0.781609
| 0.701149
| 0.701149
| 0
| 0
| 0
| 0
| 0.085714
| 245
| 7
| 61
| 35
| 0.776786
| 0
| 0
| 0
| 0
| 0
| 0.487395
| 0.487395
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.2
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
999ac636b504000a178a19bf4c2635260e522f9b
| 1,920
|
py
|
Python
|
tempset/package_data.py
|
IMMM-SFA/tempset
|
86da8415cda47a2158cc1a481b0fa8ceaf5f2e1e
|
[
"BSD-2-Clause"
] | null | null | null |
tempset/package_data.py
|
IMMM-SFA/tempset
|
86da8415cda47a2158cc1a481b0fa8ceaf5f2e1e
|
[
"BSD-2-Clause"
] | null | null | null |
tempset/package_data.py
|
IMMM-SFA/tempset
|
86da8415cda47a2158cc1a481b0fa8ceaf5f2e1e
|
[
"BSD-2-Clause"
] | null | null | null |
import pkg_resources
def get_example_eplus_file():
"""Convenience wrapper to retrieve file path from package data."""
return pkg_resources.resource_filename('tempset', 'data/json/eplus_params.json')
def get_example_batch_file():
"""Convenience wrapper to retrieve file path from package data."""
return pkg_resources.resource_filename('tempset', 'data/json/batch_params.json')
def get_example_htgsetp_file():
"""Convenience wrapper to retrieve file path from package data."""
return pkg_resources.resource_filename('tempset', 'data/json/htgsetp_params.json')
def get_example_htgsetp_params_file():
"""Convenience wrapper to retrieve file path from package data."""
return pkg_resources.resource_filename('tempset', 'data/electric/htgsetp_params_electric.csv')
def get_example_clgsetp_file():
"""Convenience wrapper to retrieve file path from package data."""
return pkg_resources.resource_filename('tempset', 'data/json/clgsetp_params.json')
def get_example_summary_file():
"""Convenience wrapper to retrieve file path from package data."""
return pkg_resources.resource_filename('tempset', 'data/electric/summary.zip')
def get_example_idd_file():
"""Convenience wrapper to retrieve file path from package data."""
return pkg_resources.resource_filename('tempset', 'data/eplus/Energy+.idd')
def get_example_electric_idf_file():
"""Convenience wrapper to retrieve file path from package data."""
return pkg_resources.resource_filename('tempset', 'data/idf/electric.idf')
def get_example_gas_idf_file():
"""Convenience wrapper to retrieve file path from package data."""
return pkg_resources.resource_filename('tempset', 'data/idf/gas.idf')
def get_example_main_idf_file():
"""Convenience wrapper to retrieve file path from package data."""
return pkg_resources.resource_filename('tempset', 'data/idf/main.idf')
| 30.967742
| 98
| 0.758854
| 253
| 1,920
| 5.517787
| 0.134387
| 0.094556
| 0.093123
| 0.17192
| 0.82808
| 0.795129
| 0.752149
| 0.752149
| 0.752149
| 0.752149
| 0
| 0
| 0.13125
| 1,920
| 61
| 99
| 31.47541
| 0.83693
| 0.317188
| 0
| 0
| 0
| 0
| 0.257143
| 0.175397
| 0
| 0
| 0
| 0
| 0
| 1
| 0.47619
| true
| 0
| 0.047619
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
999d5936d86373d84e9ba7d3368a0e7424c747d6
| 14,919
|
py
|
Python
|
gdmtl/datasets/mtl_dataset.py
|
binshengliu/gdmtl
|
fb8bfe0e87bbd6d8535cc8449012fb4119430d4c
|
[
"MIT"
] | null | null | null |
gdmtl/datasets/mtl_dataset.py
|
binshengliu/gdmtl
|
fb8bfe0e87bbd6d8535cc8449012fb4119430d4c
|
[
"MIT"
] | null | null | null |
gdmtl/datasets/mtl_dataset.py
|
binshengliu/gdmtl
|
fb8bfe0e87bbd6d8535cc8449012fb4119430d4c
|
[
"MIT"
] | 1
|
2022-02-26T00:49:03.000Z
|
2022-02-26T00:49:03.000Z
|
from __future__ import annotations
import logging
from typing import Any, Dict, Mapping, Optional, Union
import numpy as np
import torch
from transformers import PreTrainedTokenizer
from .assembler import Assembler
from .qa_dataset import QADataset
from .rank_dataset import RankGroupDataset
from .tsv_dataset import TsvCollection
from .utils import (
make_targets_mlm_inputs,
make_targets_ntp_inputs,
mask_difference,
mask_whole_word,
)
log = logging.getLogger(__name__)
class MtlSepDataset(RankGroupDataset):
def __init__(
self,
array: Mapping[str, np.ndarray],
tokenizer: PreTrainedTokenizer,
query_col: TsvCollection,
doc_col: TsvCollection,
num_dup: int,
num_neg: int,
decoder_start_token_id: int,
src_max_length: int,
tgt_max_length: int,
sample: Optional[Union[float, int]] = None,
sort: Optional[str] = None,
max_length: Optional[int] = None,
summarizer_prefix_token_ids: Optional[str] = None,
rank_prefix_token_ids: Optional[str] = None,
pad_to_max_length: bool = True,
**kwargs: Any,
):
if kwargs:
log.warning(f"Unused parameters: {kwargs}")
super().__init__(
array,
tokenizer,
query_col,
doc_col,
num_dup,
num_neg,
sample,
sort,
max_length,
summarizer_prefix_token_ids,
pad_to_max_length,
)
self._pas_pad = tokenizer.pad_token_id
self._sum_assembler = Assembler(
tokenizer=tokenizer,
max_length=src_max_length,
prefix_token_ids=summarizer_prefix_token_ids,
pad_to_max_length=pad_to_max_length,
)
self._rank_assembler = Assembler(
tokenizer=tokenizer,
max_length=src_max_length,
prefix_token_ids=rank_prefix_token_ids,
pad_to_max_length=pad_to_max_length,
)
decoder_start_token = tokenizer.decode(decoder_start_token_id)
self._decoder_assembler = Assembler(
tokenizer=tokenizer,
max_length=tgt_max_length,
prefix_token_ids=decoder_start_token,
pad_to_max_length=False,
add_special_tokens=False,
return_token_type_ids=None,
)
self._label_assembler = Assembler(
tokenizer=tokenizer,
max_length=tgt_max_length,
suffix_token_ids=tokenizer.eos_token,
pad_to_max_length=False,
add_special_tokens=False,
return_token_type_ids=None,
)
def __getitem__(self, index: int) -> Dict[str, torch.Tensor]:
qid = self._array["qid"][index]
did = self._array["did"][index]
label = self._array["label"][index]
assert qid.shape == (self._num_neg + 1,)
assert did.shape == (self._num_neg + 1,)
assert label.shape == (self._num_neg + 1,)
queries = [self._query_col[x] for x in qid]
passages = [self._doc_col[x] for x in did]
sum_inputs = self._sum_assembler.batch_assemble(passages)
sum_decoder_inputs = self._decoder_assembler.batch_assemble(queries)
lm_labels = self._label_assembler.batch_assemble(queries)
lm_labels["input_ids"].masked_fill_(~lm_labels["attention_mask"].bool(), -100)
rank_inputs = self._rank_assembler.batch_assemble(passages)
rank_decoder_inputs = self._decoder_assembler.batch_assemble(queries)
item: Dict[str, Any] = {
"qids": torch.tensor([int(x) for x in qid]),
"dnos": torch.tensor([int(x) for x in did]),
"sum_input_ids": sum_inputs["input_ids"],
"sum_attention_mask": sum_inputs["attention_mask"],
"sum_decoder_input_ids": sum_decoder_inputs["input_ids"],
"sum_decoder_attention_mask": sum_decoder_inputs["attention_mask"],
"rank_input_ids": rank_inputs["input_ids"],
"rank_attention_mask": rank_inputs["attention_mask"],
"rank_decoder_input_ids": rank_decoder_inputs["input_ids"],
"rank_decoder_attention_mask": rank_decoder_inputs["attention_mask"],
"lm_labels": lm_labels["input_ids"],
}
assert item["sum_input_ids"].dim() == 2
assert item["sum_attention_mask"].dim() == 2
assert item["sum_decoder_input_ids"].dim() == 2
assert item["sum_decoder_attention_mask"].dim() == 2
assert item["rank_input_ids"].dim() == 2
assert item["rank_attention_mask"].dim() == 2
assert item["rank_decoder_input_ids"].dim() == 2
assert item["rank_decoder_attention_mask"].dim() == 2
assert item["lm_labels"].dim() == 2
return item
class MtlMixedDataset(RankGroupDataset):
def __init__(
self,
array: Mapping[str, np.ndarray],
tokenizer: PreTrainedTokenizer,
query_col: TsvCollection,
doc_col: TsvCollection,
num_dup: int,
num_neg: int,
src_max_length: int,
sample: Optional[Union[float, int]] = None,
sort: Optional[str] = None,
max_length: Optional[int] = None,
summarizer_prefix_token_ids: Optional[str] = None,
rank_prefix_token_ids: Optional[str] = None,
pad_to_max_length: bool = True,
qa_data: Optional[Union[QADataset, str]] = None,
qa_prefix: str = "",
mask_whole_word_prob: float = 0.0,
mask_qgen_query: bool = False,
mask_query_from_passage: float = 0.0,
min_rel_for_qgen: int = 1,
**kwargs: Any,
):
if kwargs:
log.warning(f"Unused params {kwargs}")
super(MtlMixedDataset, self).__init__(
array,
tokenizer,
query_col,
doc_col,
num_dup,
num_neg,
sample,
sort,
max_length,
summarizer_prefix_token_ids,
pad_to_max_length,
)
self._pas_pad = tokenizer.pad_token_id
self._tokenizer = tokenizer
self._mask_whole_word_prob = mask_whole_word_prob
self._mask_query_from_passage = mask_query_from_passage
self._mask_qgen_query = mask_qgen_query
self._min_rel_for_qgen = min_rel_for_qgen
self._sum_assembler = Assembler(
tokenizer=tokenizer,
max_length=src_max_length,
prefix_token_ids=summarizer_prefix_token_ids,
pad_to_max_length=pad_to_max_length,
)
self._rank_assembler = Assembler(
tokenizer=tokenizer,
max_length=src_max_length,
prefix_token_ids=rank_prefix_token_ids,
pad_to_max_length=pad_to_max_length,
)
if qa_data is not None:
if isinstance(qa_data, str):
self._qa = QADataset(
path=qa_data,
tokenizer=tokenizer,
max_length=max_length,
prefix=qa_prefix,
)
else:
self._qa = qa_data
def __getitem__(self, index: int) -> Dict[str, Any]:
qid = self._array["qid"][index]
did = self._array["did"][index]
label = self._array["label"][index]
assert qid.shape == (self._num_neg + 1,)
assert did.shape == (self._num_neg + 1,)
assert label.shape == (self._num_neg + 1,)
if label[0] < self._min_rel_for_qgen:
idx = (self._array["label"][:, 0] >= self._min_rel_for_qgen).nonzero()[0]
sample = np.random.choice(idx)
qgen_queries = [self._query_col[x] for x in self._array["qid"][sample]]
passages = [self._doc_col[x] for x in self._array["did"][sample]]
qgen_passages = [self._doc_col[x] for x in self._array["did"][sample]]
sum_input_weights = torch.tensor(
self._array["label"][sample][:1], dtype=torch.float
)
else:
qgen_queries = [self._query_col[x] for x in qid]
passages = [self._doc_col[x] for x in did]
qgen_passages = [self._doc_col[x] for x in did]
sum_input_weights = torch.tensor(label[:1], dtype=torch.float)
if self._mask_query_from_passage > 0.0:
qgen_passages = [
mask_difference(self._tokenizer, x, y, self._mask_query_from_passage)
for x, y in zip(qgen_passages, qgen_queries)
]
if self._mask_whole_word_prob > 0:
qgen_passages = [
mask_whole_word(self._tokenizer, x, self._mask_whole_word_prob)
for x in qgen_passages
]
if self._mask_qgen_query:
sum_inputs = make_targets_mlm_inputs(
self._assembler,
self._tokenizer,
passages[:1],
qgen_queries[:1],
qgen_passages[:1],
)
else:
sum_inputs = make_targets_ntp_inputs(
self._assembler,
self._tokenizer,
passages[:1],
qgen_queries[:1],
qgen_passages[:1],
)
rank_queries = [self._query_col[x] for x in qid]
rank_passages = [self._doc_col[x] for x in did]
rank_inputs = self._rank_assembler.batch_assemble(rank_passages, rank_queries)
item: Dict[str, Any] = {
"qids": torch.tensor([int(x) for x in qid]),
"dnos": torch.tensor([int(x) for x in did]),
"sum_input_ids": sum_inputs["input_ids"],
"sum_token_type_ids": sum_inputs["token_type_ids"],
"sum_attention_mask": sum_inputs["attention_mask"],
"sum_input_weights": sum_input_weights,
"rank_input_ids": rank_inputs["input_ids"],
"rank_token_type_ids": rank_inputs["token_type_ids"],
"rank_attention_mask": rank_inputs["attention_mask"],
"lm_labels": sum_inputs["lm_labels"],
}
assert item["sum_input_ids"].dim() == 2
if self._mask_qgen_query:
assert item["sum_attention_mask"].dim() == 2
else:
assert item["sum_attention_mask"].dim() == 3
assert item["sum_token_type_ids"].dim() == 2
assert item["sum_input_weights"].dim() == 1
assert item["rank_input_ids"].dim() == 2
assert item["rank_token_type_ids"].dim() == 2
assert item["rank_attention_mask"].dim() == 2
assert item["lm_labels"].dim() == 2
if hasattr(self, "_qa"):
pos_qid = qid[0]
qa_inputs = {f"qa_{k}": v for k, v in self._qa.by_qid(pos_qid).items()}
item.update(qa_inputs)
return item
class MtlCatDataset(RankGroupDataset):
def __init__(
self,
array: Mapping[str, np.ndarray],
tokenizer: PreTrainedTokenizer,
query_col: TsvCollection,
doc_col: TsvCollection,
num_dup: int,
num_neg: int,
src_max_length: int,
tgt_max_length: int,
decoder_start_token_id: int,
sample: Optional[Union[float, int]] = None,
sort: Optional[str] = None,
max_length: Optional[int] = None,
summarizer_prefix_token_ids: Optional[str] = None,
rank_prefix_token_ids: Optional[str] = None,
pad_to_max_length: bool = True,
**kwargs: Any,
):
if kwargs:
log.warning(f"Unused params {kwargs}")
super().__init__(
array,
tokenizer,
query_col,
doc_col,
num_dup,
num_neg,
sample,
sort,
max_length,
summarizer_prefix_token_ids,
pad_to_max_length,
)
self._pas_pad = tokenizer.pad_token_id
self._tokenizer = tokenizer
self._sum_assembler = Assembler(
tokenizer=tokenizer,
max_length=src_max_length,
prefix_token_ids=summarizer_prefix_token_ids,
pad_to_max_length=pad_to_max_length,
)
self._rank_assembler = Assembler(
tokenizer=tokenizer,
max_length=src_max_length,
prefix_token_ids=rank_prefix_token_ids,
pad_to_max_length=pad_to_max_length,
)
decoder_start_token = tokenizer.decode(decoder_start_token_id)
self._decoder_assembler = Assembler(
tokenizer=tokenizer,
max_length=tgt_max_length,
prefix_token_ids=decoder_start_token,
pad_to_max_length=False,
add_special_tokens=False,
return_token_type_ids=None,
)
self._label_assembler = Assembler(
tokenizer=tokenizer,
max_length=tgt_max_length,
suffix_token_ids=tokenizer.eos_token,
pad_to_max_length=False,
add_special_tokens=False,
return_token_type_ids=None,
)
def __getitem__(self, index: int) -> Dict[str, Any]:
qid = self._array["qid"][index]
did = self._array["did"][index]
label = self._array["label"][index]
assert qid.shape == (self._num_neg + 1,)
assert did.shape == (self._num_neg + 1,)
assert label.shape == (self._num_neg + 1,)
queries = [self._query_col[x] for x in qid]
passages = [self._doc_col[x] for x in did]
sum_inputs = self._sum_assembler.batch_assemble(passages)
sum_decoder_inputs = self._decoder_assembler.batch_assemble(queries)
lm_labels = self._label_assembler.batch_assemble(queries)
lm_labels["input_ids"].masked_fill_(~lm_labels["attention_mask"].bool(), -100)
rank_passages = [self._doc_col[x] for x in did]
rank_inputs = self._rank_assembler.batch_assemble(rank_passages, queries)
item: Dict[str, Any] = {
"qids": torch.tensor([int(x) for x in qid]),
"dnos": torch.tensor([int(x) for x in did]),
"sum_input_ids": sum_inputs["input_ids"],
"sum_attention_mask": sum_inputs["attention_mask"],
"sum_decoder_input_ids": sum_decoder_inputs["input_ids"],
"sum_decoder_attention_mask": sum_decoder_inputs["attention_mask"],
"rank_input_ids": rank_inputs["input_ids"],
"rank_attention_mask": rank_inputs["attention_mask"],
"lm_labels": lm_labels["input_ids"],
}
assert item["sum_input_ids"].dim() == 2
assert item["sum_attention_mask"].dim() == 2
assert item["sum_decoder_input_ids"].dim() == 2
assert item["sum_decoder_attention_mask"].dim() == 2
assert item["rank_input_ids"].dim() == 2
assert item["rank_attention_mask"].dim() == 2
assert item["lm_labels"].dim() == 2
return item
| 36.65602
| 86
| 0.597359
| 1,793
| 14,919
| 4.559955
| 0.083659
| 0.060543
| 0.039384
| 0.037671
| 0.827299
| 0.793542
| 0.78498
| 0.756849
| 0.747065
| 0.728596
| 0
| 0.005754
| 0.301093
| 14,919
| 406
| 87
| 36.746305
| 0.778364
| 0
| 0
| 0.720548
| 0
| 0
| 0.088076
| 0.01917
| 0
| 0
| 0
| 0
| 0.093151
| 1
| 0.016438
| false
| 0.068493
| 0.030137
| 0
| 0.063014
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
99a1c1776fe99e2a906834405a696a3f2033b0ed
| 572
|
py
|
Python
|
e_learning/api/models.py
|
Aaditya1978/Accessible-E-Learning-Platform
|
bf846b6f7e3aaca3d7f7ecd0a83a5c4dfc595f6d
|
[
"MIT"
] | 3
|
2021-07-15T06:09:08.000Z
|
2022-02-01T13:47:03.000Z
|
e_learning/api/models.py
|
Aaditya1978/Accessible-E-Learning-Platform
|
bf846b6f7e3aaca3d7f7ecd0a83a5c4dfc595f6d
|
[
"MIT"
] | null | null | null |
e_learning/api/models.py
|
Aaditya1978/Accessible-E-Learning-Platform
|
bf846b6f7e3aaca3d7f7ecd0a83a5c4dfc595f6d
|
[
"MIT"
] | null | null | null |
from django.db import models
# Create your models here.
class Teacher(models.Model):
first_name = models.CharField(max_length=255)
last_name = models.CharField(max_length=255)
email = models.EmailField()
org_name = models.CharField(max_length=255)
password = models.CharField(max_length=255)
class Student(models.Model):
first_name = models.CharField(max_length=255)
last_name = models.CharField(max_length=255)
email = models.EmailField()
class_code = models.CharField(max_length=255)
password = models.CharField(max_length=255)
| 33.647059
| 49
| 0.748252
| 77
| 572
| 5.376623
| 0.311688
| 0.289855
| 0.347826
| 0.463768
| 0.806763
| 0.806763
| 0.797101
| 0.797101
| 0.797101
| 0.797101
| 0
| 0.049281
| 0.148601
| 572
| 16
| 50
| 35.75
| 0.800821
| 0.041958
| 0
| 0.615385
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.153846
| 0.076923
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 10
|
41efa0b1f23a58e01d79993abae419de0e29ae35
| 5,994
|
py
|
Python
|
fonts/diamonstealth64_8x14.py
|
ccccmagicboy/st7735_mpy
|
b15f1bde69fbe6e0eb4931c57e71c136d8e7f024
|
[
"MIT"
] | 6
|
2020-07-11T16:59:19.000Z
|
2021-07-16T19:32:49.000Z
|
ports/esp32/user_modules/st7735_mpy/fonts/diamonstealth64_8x14.py
|
d4niele/micropython
|
a1f7b37d392bf46b28045ce215ae899fda8d8c38
|
[
"MIT"
] | 1
|
2020-04-14T03:14:45.000Z
|
2020-04-14T03:14:45.000Z
|
fonts/diamonstealth64_8x14.py
|
ccccmagicboy/st7735_mpy
|
b15f1bde69fbe6e0eb4931c57e71c136d8e7f024
|
[
"MIT"
] | null | null | null |
"""converted from ..\fonts\DiamonStealth64_8x14.bin """
WIDTH = 8
HEIGHT = 14
FIRST = 0x20
LAST = 0x7f
_FONT =\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x18\x3c\x3c\x3c\x18\x18\x00\x18\x18\x00\x00\x00\x00'\
b'\x66\x66\x66\x24\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x6c\x6c\xfe\x6c\x6c\x6c\xfe\x6c\x6c\x00\x00\x00\x18'\
b'\x18\x7c\xc6\xc2\xc0\x7c\x06\x86\xc6\x7c\x18\x18\x00\x00'\
b'\x00\x00\x00\xc2\xc6\x0c\x18\x30\x66\xc6\x00\x00\x00\x00'\
b'\x00\x38\x6c\x6c\x38\x76\xdc\xcc\xcc\x76\x00\x00\x00\x00'\
b'\x30\x30\x30\x60\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x0c\x18\x30\x30\x30\x30\x30\x18\x0c\x00\x00\x00\x00'\
b'\x00\x30\x18\x0c\x0c\x0c\x0c\x0c\x18\x30\x00\x00\x00\x00'\
b'\x00\x00\x00\x66\x3c\xff\x3c\x66\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x18\x18\x7e\x18\x18\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x18\x18\x18\x30\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\xfe\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x18\x18\x00\x00\x00\x00'\
b'\x00\x02\x06\x0c\x18\x30\x60\xc0\x80\x00\x00\x00\x00\x00'\
b'\x00\x7c\xc6\xce\xde\xf6\xe6\xc6\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x18\x38\x78\x18\x18\x18\x18\x18\x7e\x00\x00\x00\x00'\
b'\x00\x7c\xc6\x06\x0c\x18\x30\x60\xc6\xfe\x00\x00\x00\x00'\
b'\x00\x7c\xc6\x06\x06\x3c\x06\x06\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x0c\x1c\x3c\x6c\xcc\xfe\x0c\x0c\x1e\x00\x00\x00\x00'\
b'\x00\xfe\xc0\xc0\xfc\x0e\x06\x06\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x38\x60\xc0\xc0\xfc\xc6\xc6\xc6\x7c\x00\x00\x00\x00'\
b'\x00\xfe\xc6\x06\x0c\x18\x30\x30\x30\x30\x00\x00\x00\x00'\
b'\x00\x7c\xc6\xc6\xc6\x7c\xc6\xc6\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x7c\xc6\xc6\xc6\x7e\x06\x06\x0c\x78\x00\x00\x00\x00'\
b'\x00\x00\x18\x18\x00\x00\x00\x18\x18\x00\x00\x00\x00\x00'\
b'\x00\x00\x18\x18\x00\x00\x00\x18\x18\x30\x00\x00\x00\x00'\
b'\x00\x06\x0c\x18\x30\x60\x30\x18\x0c\x06\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\xfe\x00\x00\xfe\x00\x00\x00\x00\x00\x00'\
b'\x00\x60\x30\x18\x0c\x06\x0c\x18\x30\x60\x00\x00\x00\x00'\
b'\x00\x7c\xc6\xc6\x0c\x18\x18\x00\x18\x18\x00\x00\x00\x00'\
b'\x00\x7c\xc6\xc6\xde\xde\xde\xdc\xc0\x7c\x00\x00\x00\x00'\
b'\x00\x10\x38\x6c\xc6\xc6\xfe\xc6\xc6\xc6\x00\x00\x00\x00'\
b'\x00\xfc\x66\x66\x66\x7c\x66\x66\x66\xfc\x00\x00\x00\x00'\
b'\x00\x3c\x66\xc2\xc0\xc0\xc0\xc2\x66\x3c\x00\x00\x00\x00'\
b'\x00\xf8\x6c\x66\x66\x66\x66\x66\x6c\xf8\x00\x00\x00\x00'\
b'\x00\xfe\x66\x62\x68\x78\x68\x62\x66\xfe\x00\x00\x00\x00'\
b'\x00\xfe\x66\x62\x68\x78\x68\x60\x60\xf0\x00\x00\x00\x00'\
b'\x00\x3c\x66\xc2\xc0\xc0\xde\xc6\x66\x3a\x00\x00\x00\x00'\
b'\x00\xc6\xc6\xc6\xc6\xfe\xc6\xc6\xc6\xc6\x00\x00\x00\x00'\
b'\x00\x3c\x18\x18\x18\x18\x18\x18\x18\x3c\x00\x00\x00\x00'\
b'\x00\x1e\x0c\x0c\x0c\x0c\x0c\xcc\xcc\x78\x00\x00\x00\x00'\
b'\x00\xe6\x66\x6c\x6c\x78\x6c\x6c\x66\xe6\x00\x00\x00\x00'\
b'\x00\xf0\x60\x60\x60\x60\x60\x62\x66\xfe\x00\x00\x00\x00'\
b'\x00\xc6\xee\xfe\xfe\xd6\xc6\xc6\xc6\xc6\x00\x00\x00\x00'\
b'\x00\xc6\xe6\xf6\xfe\xde\xce\xc6\xc6\xc6\x00\x00\x00\x00'\
b'\x00\x38\x6c\xc6\xc6\xc6\xc6\xc6\x6c\x38\x00\x00\x00\x00'\
b'\x00\xfc\x66\x66\x66\x7c\x60\x60\x60\xf0\x00\x00\x00\x00'\
b'\x00\x7c\xc6\xc6\xc6\xc6\xd6\xde\x7c\x0c\x0e\x00\x00\x00'\
b'\x00\xfc\x66\x66\x66\x7c\x6c\x66\x66\xe6\x00\x00\x00\x00'\
b'\x00\x7c\xc6\xc6\x60\x38\x0c\xc6\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x7e\x7e\x5a\x18\x18\x18\x18\x18\x3c\x00\x00\x00\x00'\
b'\x00\xc6\xc6\xc6\xc6\xc6\xc6\xc6\xc6\x7c\x00\x00\x00\x00'\
b'\x00\xc6\xc6\xc6\xc6\xc6\xc6\x6c\x38\x10\x00\x00\x00\x00'\
b'\x00\xc6\xc6\xc6\xc6\xd6\xd6\xfe\x7c\x6c\x00\x00\x00\x00'\
b'\x00\xc6\xc6\x6c\x38\x38\x38\x6c\xc6\xc6\x00\x00\x00\x00'\
b'\x00\x66\x66\x66\x66\x3c\x18\x18\x18\x3c\x00\x00\x00\x00'\
b'\x00\xfe\xc6\x8c\x18\x30\x60\xc2\xc6\xfe\x00\x00\x00\x00'\
b'\x00\x3c\x30\x30\x30\x30\x30\x30\x30\x3c\x00\x00\x00\x00'\
b'\x00\x80\xc0\xe0\x70\x38\x1c\x0e\x06\x02\x00\x00\x00\x00'\
b'\x00\x3c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x3c\x00\x00\x00\x10'\
b'\x38\x6c\xc6\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\x00\x30'\
b'\x30\x18\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x78\x0c\x7c\xcc\xcc\x76\x00\x00\x00\x00'\
b'\x00\xe0\x60\x60\x78\x6c\x66\x66\x66\xdc\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x7c\xc6\xc0\xc0\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x1c\x0c\x0c\x3c\x6c\xcc\xcc\xcc\x76\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x7c\xc6\xfe\xc0\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x38\x6c\x64\x60\xf0\x60\x60\x60\xf0\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x76\xcc\xcc\xcc\x7c\x0c\xcc\x78\x00\x00'\
b'\x00\xe0\x60\x60\x6c\x76\x66\x66\x66\xe6\x00\x00\x00\x00'\
b'\x00\x18\x18\x00\x38\x18\x18\x18\x18\x3c\x00\x00\x00\x00'\
b'\x00\x06\x06\x00\x0e\x06\x06\x06\x06\x66\x66\x3c\x00\x00'\
b'\x00\xe0\x60\x60\x66\x6c\x78\x6c\x66\xe6\x00\x00\x00\x00'\
b'\x00\x38\x18\x18\x18\x18\x18\x18\x18\x3c\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\xec\xfe\xd6\xd6\xd6\xd6\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\xdc\x66\x66\x66\x66\x66\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x7c\xc6\xc6\xc6\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\xdc\x66\x66\x66\x7c\x60\x60\xf0\x00\x00'\
b'\x00\x00\x00\x00\x76\xcc\xcc\xcc\x7c\x0c\x0c\x1e\x00\x00'\
b'\x00\x00\x00\x00\xdc\x76\x62\x60\x60\xf0\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x7c\xc6\x70\x1c\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x10\x30\x30\xfc\x30\x30\x30\x36\x1c\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\xcc\xcc\xcc\xcc\xcc\x76\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x66\x66\x66\x66\x3c\x18\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\xc6\xc6\xd6\xd6\xfe\x6c\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\xc6\x6c\x38\x38\x6c\xc6\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\xc6\xc6\xc6\xc6\x7e\x06\x0c\xf8\x00\x00'\
b'\x00\x00\x00\x00\xfe\xcc\x18\x30\x66\xfe\x00\x00\x00\x00'\
b'\x00\x0e\x18\x18\x18\x70\x18\x18\x18\x0e\x00\x00\x00\x00'\
b'\x00\x18\x18\x18\x18\x00\x18\x18\x18\x18\x00\x00\x00\x00'\
b'\x00\x70\x18\x18\x18\x0e\x18\x18\x18\x70\x00\x00\x00\x00'\
b'\x00\x76\xdc\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x10\x38\x6c\xc6\xc6\xfe\x00\x00\x00\x00\x00'\
FONT = memoryview(_FONT)
| 57.085714
| 60
| 0.704705
| 1,458
| 5,994
| 2.895062
| 0.044582
| 0.599858
| 0.63113
| 0.508884
| 0.820185
| 0.749112
| 0.704335
| 0.630893
| 0.548448
| 0.41104
| 0
| 0.377808
| 0.019686
| 5,994
| 104
| 61
| 57.634615
| 0.340538
| 0.007841
| 0
| 0
| 0
| 0.941176
| 0.905203
| 0.905203
| 0
| 1
| 0.001347
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 12
|
510ef8a7510ae1de9af9073513790cd07023ac0b
| 26,324
|
py
|
Python
|
wikidata_research/dictionary/dictionary_evaluation.py
|
sjuenger/WikiMETA
|
13ed293b4bda8ff0fc10b532907ca35c24a12616
|
[
"MIT"
] | null | null | null |
wikidata_research/dictionary/dictionary_evaluation.py
|
sjuenger/WikiMETA
|
13ed293b4bda8ff0fc10b532907ca35c24a12616
|
[
"MIT"
] | null | null | null |
wikidata_research/dictionary/dictionary_evaluation.py
|
sjuenger/WikiMETA
|
13ed293b4bda8ff0fc10b532907ca35c24a12616
|
[
"MIT"
] | null | null | null |
# module to evaluate the property dictionary
import json
# global variable for the path to the dictionary
path_to_json_dictionary = "data/property_dictionary.json"
# overload method
#
# recommended == true
# query only those references or qualifiers, that are intended by Wikidata
# .. for References: these are properties, which are a facet of "Wikipedia:Citing sources"
# .. for Qualifiers: these are properties, which are a facet of "restrictive qualifier"
# ,"non-restrictive qualifier"
# ,"Wikidata property used as \"depicts\" (P180) qualifier on Commons"
# ,"Wikidata qualifier"
#
# recommended == false
# query only those references or qualifiers, that are NOT intended by Wikidata
# i.e., who do not fulfil the above mentioned requirements
# BUT are min. 1x times used as a reference / qualifier in Wikidata
#
# recommended == None
# query every property available to the mode
def get_top_x_metadata(x, mode, recommended = None):
with open(path_to_json_dictionary, "r") as dict_data:
property_dictionary = json.load(dict_data)
result_dictionary = {}
result_dictionary["properties"] = {}
result_dictionary["total_usages_of_" + mode] = 0
result_dictionary["total_unique_properties"] = 0
for PID in property_dictionary:
# check, if the property is /is not a recommended reference/qualifier by Wikidata
recommended_bool = False
if recommended == True:
if mode == "reference":
recommended_bool = bool(property_dictionary[PID]["is_reference"])
elif mode == "qualifier":
recommended_bool = property_dictionary[PID]["qualifier_class"] != []
else:
recommended_bool = False
elif recommended == False:
# --> but they are min. 1x times used as a reference/qualifier, but not recommended
if mode == "reference" and int(property_dictionary[PID][mode + "_no"]) > 0\
and not bool(property_dictionary[PID]["is_reference"]):
recommended_bool = True
# --> but they are min. 1x times used as a reference/qualifier, but not recommended
elif mode == "qualifier" and int(property_dictionary[PID][mode + "_no"]) > 0\
and property_dictionary[PID]["qualifier_class"] == []:
recommended_bool = True
else:
recommended_bool = False
elif recommended is None:
# just exclude those, who either aren't a recommended qualifier/reference property
# .. and are never used as a reference / qualifier
if (mode == "reference" and (int(property_dictionary[PID][mode + "_no"]) > 0\
or bool(property_dictionary[PID]["is_reference"]))):
recommended_bool = True
elif (mode == "qualifier" and (int(property_dictionary[PID][mode + "_no"]) > 0\
or property_dictionary[PID]["qualifier_class"] != [])):
recommended_bool = True
else:
recommended_bool = False
if recommended_bool:
result_dictionary["total_usages_of_" + mode] += \
int(property_dictionary[PID][mode + "_no"])
result_dictionary["total_unique_properties"] += 1
# check, if the current property is smaller than any property in the result dictionary and swap them
# or, if the result dictionary has not yet got 'X' entries, just add the property
if len(result_dictionary["properties"]) < x:
result_dictionary["properties"][PID] = property_dictionary[PID]
else:
# no need to check for (non-) recommended properties here (only (non-) recommended properties
# can be added to this dictionary)
for result_PID in result_dictionary["properties"]:
if PID != result_PID \
and (int(property_dictionary[PID][mode + "_no"]) >
int(result_dictionary["properties"][result_PID][mode + "_no"])):
# swap with the smallest in the result property
smallest_PID = ""
for test_PID in result_dictionary["properties"]:
if smallest_PID == "" or \
int(result_dictionary["properties"][test_PID][mode + "_no"]) \
< int(result_dictionary["properties"][smallest_PID][mode + "_no"]):
smallest_PID = test_PID
result_dictionary["properties"].pop(smallest_PID)
result_dictionary["properties"][PID] = property_dictionary[PID]
break
# once all the top x entries are created, store them in a .json file
if recommended:
tmp_string = "recommended"
elif recommended is not None:
tmp_string = "non_recommended"
else:
tmp_string = "all"
with open("data/statistical_information/wikidata_research/" + mode + "/" + tmp_string +
"/properties/top_" + str(x) + ".json", "w") \
as result_json:
json.dump(result_dictionary, result_json)
# query the top facets (properties have) from qualifier / reference
# .. of recommended / non-recommended / overall properties
#
# if a property has no facet -> count it "as" itself
#
def get_top_x_facets_by_metadata(x, mode, recommended = None):
with open(path_to_json_dictionary, "r") as dict_data:
property_dictionary = json.load(dict_data)
facets_dictionary = {}
facets_dictionary["facets"] = {}
# add a counter for the total amount of facets and properties
facets_dictionary["total_facets"] = 0
facets_dictionary["total_properties_without_facets"] = 0
facets_dictionary["total_properties"] = 0
for PID in property_dictionary:
# check, if the property is /is not a recommended reference/qualifier by Wikidata
recommended_bool = True
if recommended == True:
if mode == "reference":
recommended_bool = bool(property_dictionary[PID]["is_reference"])
elif mode == "qualifier":
recommended_bool = property_dictionary[PID]["qualifier_class"] != []
else:
recommended_bool = False
elif recommended == False:
# --> but they are min. 1x times used as a reference/qualifier, but not recommended
if mode == "reference" and int(property_dictionary[PID][mode + "_no"]) > 0\
and not bool(property_dictionary[PID]["is_reference"]):
recommended_bool = True
# --> but they are min. 1x times used as a reference/qualifier, but not recommended
elif mode == "qualifier" and int(property_dictionary[PID][mode + "_no"]) > 0\
and property_dictionary[PID]["qualifier_class"] == []:
recommended_bool = True
else:
recommended_bool = False
elif recommended is None:
# just exclude those, who either aren't a recommended qualifier/reference property
# .. and are never used as a reference / qualifier
if mode == "reference" and (int(property_dictionary[PID][mode + "_no"]) > 0\
or bool(property_dictionary[PID]["is_reference"])):
recommended_bool = True
elif mode == "qualifier" and (int(property_dictionary[PID][mode + "_no"]) > 0\
or property_dictionary[PID]["qualifier_class"] != []):
recommended_bool = True
else:
recommended_bool = False
if recommended_bool:
facets_dictionary["total_properties"] += 1
current_facet_list = property_dictionary[PID]["facet_of"]
# if no facet can be found for the property
# -> count the property with its ID
if len(current_facet_list) == 0:
facets_dictionary["total_properties_without_facets"] += 1
facets_dictionary["facets"][PID] = 1
for facet in current_facet_list:
facets_dictionary["total_facets"] += 1
# add the facet as keys to a dictionary, if it wasn't added before
if facet not in facets_dictionary["facets"]:
facets_dictionary["facets"][facet] = 1
else:
facets_dictionary["facets"][facet] += 1
# store the facet dictionary
if recommended:
tmp_string = "recommended"
elif recommended is not None:
tmp_string = "non_recommended"
else:
tmp_string = "all"
with open("data/statistical_information/wikidata_research/" + mode + "/" + tmp_string +
"/facets/facets.json", "w") \
as result_json:
json.dump(facets_dictionary, result_json)
# extract the top x facets by usages
result_facets_dictionary = {"facets" : {}}
result_facets_dictionary["total_facets"] = facets_dictionary["total_facets"]
result_facets_dictionary["total_properties"] = facets_dictionary["total_properties"]
result_facets_dictionary["total_properties_without_facet"] =\
facets_dictionary["total_properties_without_facets"]
result_facets_dictionary["total_unique_facets"] = len(facets_dictionary["facets"])
for facet in facets_dictionary["facets"]:
if len(result_facets_dictionary["facets"]) < x:
result_facets_dictionary["facets"][facet] = facets_dictionary["facets"][facet]
else:
# swap with the smallest in the result list -> it is greater than that
smallest_ID = ""
for facet_ID in result_facets_dictionary["facets"]:
if smallest_ID == "" or \
int(result_facets_dictionary["facets"][facet_ID]) \
< int(result_facets_dictionary["facets"][smallest_ID]):
smallest_ID = facet_ID
if facets_dictionary["facets"][facet] > facets_dictionary["facets"][smallest_ID]:
result_facets_dictionary["facets"].pop(smallest_ID)
result_facets_dictionary["facets"][facet] = facets_dictionary["facets"][facet]
# once all the top x entries are creaed, store them in a .json file
if recommended:
tmp_string = "recommended"
elif recommended is not None:
tmp_string = "non_recommended"
else:
tmp_string = "all"
with open("data/statistical_information/wikidata_research/" + mode + "/" + tmp_string +
"/facets/top_" + str(x) + ".json", "w") \
as result_json:
json.dump(result_facets_dictionary, result_json)
# get the used datatypes for every metadata
# -> a datatype can e.g. be String, WikibaseItem, etc.
#
def get_datatypes_by_metadata(mode, recommended = None):
with open(path_to_json_dictionary, "r") as dict_data:
property_dictionary = json.load(dict_data)
datatypes_dictionary = {}
datatypes_dictionary["datatypes"] = {}
# add a counter for the total amount of datatypes and properties
datatypes_dictionary["total_properties"] = 0
for PID in property_dictionary:
# check, if the property is /is not a recommended reference/qualifier by Wikidata
recommended_bool = True
if recommended == True:
if mode == "reference":
recommended_bool = bool(property_dictionary[PID]["is_reference"])
elif mode == "qualifier":
recommended_bool = property_dictionary[PID]["qualifier_class"] != []
else:
recommended_bool = False
elif recommended == False:
# --> but they are min. 1x times used as a reference/qualifier, but not recommended
if mode == "reference" and int(property_dictionary[PID][mode + "_no"]) > 0\
and not bool(property_dictionary[PID]["is_reference"]):
recommended_bool = True
# --> but they are min. 1x times used as a reference/qualifier, but not recommended
elif mode == "qualifier" and int(property_dictionary[PID][mode + "_no"]) > 0\
and property_dictionary[PID]["qualifier_class"] == []:
recommended_bool = True
else:
recommended_bool = False
elif recommended is None:
# just exclude those, who either aren't a recommended qualifier/reference property
# .. and are never used as a reference / qualifier
if mode == "reference" and (int(property_dictionary[PID][mode + "_no"]) > 0\
or bool(property_dictionary[PID]["is_reference"])):
recommended_bool = True
elif mode == "qualifier" and (int(property_dictionary[PID][mode + "_no"]) > 0\
or property_dictionary[PID]["qualifier_class"] != []):
recommended_bool = True
else:
recommended_bool = False
if recommended_bool:
datatypes_dictionary["total_properties"] += 1
current_datatype = property_dictionary[PID]["datatype"]
# add the datatype as a key to the dictionary, if it wasn't added before
if current_datatype not in datatypes_dictionary["datatypes"]:
datatypes_dictionary["datatypes"][current_datatype] = 1
else:
datatypes_dictionary["datatypes"][current_datatype] += 1
datatypes_dictionary["total_unique_datatypes"] = len(datatypes_dictionary["datatypes"])
# once all the top x entries are creaed, store them in a .json file
if recommended:
tmp_string = "recommended"
elif recommended is not None:
tmp_string = "non_recommended"
else:
tmp_string = "all"
with open("data/statistical_information/wikidata_research/" + mode + "/" + tmp_string +
"/datatypes/datatypes.json", "w") \
as result_json:
json.dump(datatypes_dictionary, result_json)
# get the accumulated facets by occurences of a (recommended) property in Wikidata
# so, e.g. if "Series Ordinal" occures as a reference 5Miox times in Wikidata, count all of his facets 5Miox times
#
# if a property has no facet -> count it "as" itself
#
def get_top_x_facets_by_accumulated_properties(x, mode, recommended = None):
with open(path_to_json_dictionary, "r") as dict_data:
property_dictionary = json.load(dict_data)
facets_dictionary = {}
facets_dictionary["facets"] = {}
# add a counter for the total amount of facets and properties
facets_dictionary["total_accumulated_facets"] = 0
facets_dictionary["total_accumulated_properties_without_facets"] = 0
facets_dictionary["total_accumulated_properties"] = 0
for PID in property_dictionary:
# check, if the property is /is not a recommended reference/qualifier by Wikidata
recommended_bool = True
if recommended == True:
if mode == "reference":
recommended_bool = bool(property_dictionary[PID]["is_reference"])
elif mode == "qualifier":
recommended_bool = property_dictionary[PID]["qualifier_class"] != []
else:
recommended_bool = False
elif recommended == False:
# --> but they are min. 1x times used as a reference/qualifier, but not recommended
if mode == "reference" and int(property_dictionary[PID][mode + "_no"]) > 0\
and not bool(property_dictionary[PID]["is_reference"]):
recommended_bool = True
# --> but they are min. 1x times used as a reference/qualifier, but not recommended
elif mode == "qualifier" and int(property_dictionary[PID][mode + "_no"]) > 0\
and property_dictionary[PID]["qualifier_class"] == []:
recommended_bool = True
else:
recommended_bool = False
elif recommended is None:
# just exclude those, who either aren't a recommended qualifier/reference property
# .. and are never used as a reference / qualifier
if mode == "reference" and (int(property_dictionary[PID][mode + "_no"]) > 0\
or bool(property_dictionary[PID]["is_reference"])):
recommended_bool = True
elif mode == "qualifier" and (int(property_dictionary[PID][mode + "_no"]) > 0\
or property_dictionary[PID]["qualifier_class"] != []):
recommended_bool = True
else:
recommended_bool = False
if recommended_bool:
facets_dictionary["total_accumulated_properties"] += int(property_dictionary[PID][mode + "_no"])
current_facet_list = property_dictionary[PID]["facet_of"]
facets_dictionary["total_accumulated_facets"] += \
len(current_facet_list) * int(property_dictionary[PID][mode + "_no"])
# if no facet can be found for the property
# -> count the property with its ID
if len(current_facet_list) == 0:
facets_dictionary["total_accumulated_properties_without_facets"] += \
int(property_dictionary[PID][mode + "_no"])
facets_dictionary["facets"][PID] = int(property_dictionary[PID][mode + "_no"])
for facet in current_facet_list:
# add the facet as keys to a dictionary, if it wasn't added before
if facet not in facets_dictionary["facets"]:
facets_dictionary["facets"][facet] = int(property_dictionary[PID][mode + "_no"])
else:
facets_dictionary["facets"][facet] += int(property_dictionary[PID][mode + "_no"])
# extract the top x facets by usages
result_facets_dictionary = {"facets": {}}
result_facets_dictionary["total_accumulated_facets"] = facets_dictionary["total_accumulated_facets"]
result_facets_dictionary["total_accumulated_properties"] = facets_dictionary["total_accumulated_properties"]
result_facets_dictionary["total_accumulated_properties_without_facets"] =\
facets_dictionary["total_accumulated_properties_without_facets"]
# store the dictionar< of accumulated facets
if recommended:
tmp_string = "recommended"
elif recommended is not None:
tmp_string = "non_recommended"
else:
tmp_string = "all"
with open("data/statistical_information/wikidata_research/" + mode + "/" + tmp_string +
"/accumulated_facets/accumulated_facets.json", "w") \
as result_json:
json.dump(facets_dictionary, result_json)
for facet in facets_dictionary["facets"]:
if len(result_facets_dictionary["facets"]) < x:
result_facets_dictionary["facets"][facet] = facets_dictionary["facets"][facet]
else:
# swap with the smallest in the result list -> it is greater than that
smallest_ID = ""
for facet_ID in result_facets_dictionary["facets"]:
if smallest_ID == "" or \
int(result_facets_dictionary["facets"][facet_ID]) \
< int(result_facets_dictionary["facets"][smallest_ID]):
smallest_ID = facet_ID
if facets_dictionary["facets"][facet] > facets_dictionary["facets"][smallest_ID]:
result_facets_dictionary["facets"].pop(smallest_ID)
result_facets_dictionary["facets"][facet] = facets_dictionary["facets"][facet]
# once all the top x entries are creaed, store them in a .json file
if recommended:
tmp_string = "recommended"
elif recommended is not None:
tmp_string = "non_recommended"
else:
tmp_string = "all"
with open("data/statistical_information/wikidata_research/" + mode + "/" + tmp_string +
"/accumulated_facets/top_" + str(x) + ".json", "w") \
as result_json:
json.dump(result_facets_dictionary, result_json)
# get the accumulated datatypes by occurences of a (recommended) property in Wikidata
# so, e.g. if "Series Ordinal" occures as a reference 5Miox times in Wikidata, count his datatype 5Miox times
def get_datatypes_by_accumulated_properties(mode, recommended = None):
with open(path_to_json_dictionary, "r") as dict_data:
property_dictionary = json.load(dict_data)
datatypes_dictionary = {}
datatypes_dictionary["datatypes"] = {}
# add a counter for the total amount of datatypes and properties
datatypes_dictionary["total_properties"] = 0
datatypes_dictionary["total_accumulated_datatypes"] = 0
for PID in property_dictionary:
# check, if the property is /is not a recommended reference/qualifier by Wikidata
recommended_bool = True
if recommended == True:
if mode == "reference":
recommended_bool = bool(property_dictionary[PID]["is_reference"])
elif mode == "qualifier":
recommended_bool = property_dictionary[PID]["qualifier_class"] != []
else:
recommended_bool = False
elif recommended == False:
# --> but they are min. 1x times used as a reference/qualifier, but not recommended
if mode == "reference" and int(property_dictionary[PID][mode + "_no"]) > 0\
and not bool(property_dictionary[PID]["is_reference"]):
recommended_bool = True
# --> but they are min. 1x times used as a reference/qualifier, but not recommended
elif mode == "qualifier" and int(property_dictionary[PID][mode + "_no"]) > 0\
and property_dictionary[PID]["qualifier_class"] == []:
recommended_bool = True
else:
recommended_bool = False
elif recommended is None:
# just exclude those, who either aren't a recommended qualifier/reference property
# .. and are never used as a reference / qualifier
if mode == "reference" and (int(property_dictionary[PID][mode + "_no"]) > 0\
or bool(property_dictionary[PID]["is_reference"])):
recommended_bool = True
elif mode == "qualifier" and (int(property_dictionary[PID][mode + "_no"]) > 0\
or property_dictionary[PID]["qualifier_class"] != []):
recommended_bool = True
else:
recommended_bool = False
if recommended_bool:
datatypes_dictionary["total_properties"] += 1
datatypes_dictionary["total_accumulated_datatypes"] += int(property_dictionary[PID][mode + "_no"])
current_datatype = property_dictionary[PID]["datatype"]
# add the datatype as a key to the dictionary, if it wasn't added before
if current_datatype not in datatypes_dictionary["datatypes"]:
datatypes_dictionary["datatypes"][current_datatype] = int(property_dictionary[PID][mode + "_no"])
else:
datatypes_dictionary["datatypes"][current_datatype] += int(property_dictionary[PID][mode + "_no"])
datatypes_dictionary["total_unique_datatypes"] = len(datatypes_dictionary["datatypes"])
# once all the top x entries are creaed, store them in a .json file
if recommended:
tmp_string = "recommended"
elif recommended is not None:
tmp_string = "non_recommended"
else:
tmp_string = "all"
with open("data/statistical_information/wikidata_research/" + mode + "/" + tmp_string +
"/accumulated_datatypes/accumulated_datatypes.json", "w") \
as result_json:
json.dump(datatypes_dictionary, result_json)
# get the acummulated facets by occurences of a (recommended) property in Wikidata
def get_top_x_metadata_recommended_by_facet(x, mode):
return
# get all datatypes, that are available inside the property dictionary
def get_all_datatypes_from_property_dictionary():
with open(path_to_json_dictionary, "r") as dict_data:
property_dictionary = json.load(dict_data)
result_dict = {}
i = 0
for PID in property_dictionary:
tmp_datatype = property_dictionary[PID]["datatype"]
if tmp_datatype not in result_dict:
result_dict[tmp_datatype] = 0
dict_data.close()
return result_dict
| 48.83859
| 128
| 0.584334
| 2,761
| 26,324
| 5.352409
| 0.063021
| 0.102314
| 0.09663
| 0.050345
| 0.875694
| 0.839153
| 0.803762
| 0.766816
| 0.74435
| 0.737989
| 0
| 0.003615
| 0.327534
| 26,324
| 538
| 129
| 48.929368
| 0.831206
| 0.201793
| 0
| 0.769663
| 0
| 0
| 0.137989
| 0.054775
| 0
| 0
| 0
| 0
| 0
| 1
| 0.019663
| false
| 0
| 0.002809
| 0.002809
| 0.02809
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
510fcdad15a6c8447153e32692aae7694f685210
| 2,456
|
py
|
Python
|
oxe-api/test/resource/user/test_update_user.py
|
CybersecurityLuxembourg/openxeco
|
8d4e5578bde6a07f5d6d569b16b4de224abf7bf0
|
[
"BSD-2-Clause"
] | null | null | null |
oxe-api/test/resource/user/test_update_user.py
|
CybersecurityLuxembourg/openxeco
|
8d4e5578bde6a07f5d6d569b16b4de224abf7bf0
|
[
"BSD-2-Clause"
] | null | null | null |
oxe-api/test/resource/user/test_update_user.py
|
CybersecurityLuxembourg/openxeco
|
8d4e5578bde6a07f5d6d569b16b4de224abf7bf0
|
[
"BSD-2-Clause"
] | null | null | null |
from test.BaseCase import BaseCase
class TestUpdateUser(BaseCase):
@BaseCase.login
@BaseCase.grant_access("/user/update_user")
def test_ok(self, token):
self.db.insert({
"id": 14,
"email": "myemail@test.lu",
"password": "MySecret2!",
"is_admin": 0,
}, self.db.tables["User"])
payload = {
"id": 14,
"is_admin": True
}
response = self.application.post('/user/update_user',
headers=self.get_standard_post_header(token),
json=payload)
users = self.db.get(self.db.tables["User"], {"id": 14})
self.assertEqual(200, response.status_code)
self.assertEqual(len(users), 1)
self.assertEqual(users[0].is_admin, 1)
@BaseCase.login
@BaseCase.grant_access("/user/update_user")
def test_ko_password_param(self, token):
self.db.insert({
"id": 2,
"email": "myemail@test.lu",
"password": "MySecret2!",
"is_admin": 0,
}, self.db.tables["User"])
payload = {
"id": 2,
"is_admin": True,
"password": "new pass"
}
response = self.application.post('/user/update_user',
headers=self.get_standard_post_header(token),
json=payload)
users = self.db.get(self.db.tables["User"], {"id": 2})
self.assertEqual("422 UNPROCESSABLE ENTITY", response.status)
self.assertEqual(users[0].is_admin, 0)
@BaseCase.login
@BaseCase.grant_access("/user/update_user")
def test_ko_email_param(self, token):
self.db.insert({
"id": 2,
"email": "myemail@test.lu",
"password": "MySecret2!",
"is_admin": 0,
}, self.db.tables["User"])
payload = {
"id": 2,
"is_admin": True,
"email": "myemail@test.lu"
}
response = self.application.post('/user/update_user',
headers=self.get_standard_post_header(token),
json=payload)
users = self.db.get(self.db.tables["User"], {"id": 2})
self.assertEqual("422 UNPROCESSABLE ENTITY", response.status)
self.assertEqual(users[0].is_admin, 0)
| 30.7
| 86
| 0.506107
| 254
| 2,456
| 4.755906
| 0.208661
| 0.059603
| 0.069536
| 0.07947
| 0.84851
| 0.84851
| 0.806291
| 0.806291
| 0.806291
| 0.806291
| 0
| 0.021438
| 0.354235
| 2,456
| 79
| 87
| 31.088608
| 0.740227
| 0
| 0
| 0.758065
| 0
| 0
| 0.158795
| 0
| 0
| 0
| 0
| 0
| 0.112903
| 1
| 0.048387
| false
| 0.080645
| 0.016129
| 0
| 0.080645
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
515fe6ec967d9adf1eabbb5dad94f9dd79c8c9e6
| 90
|
py
|
Python
|
tests/test_wb2k.py
|
reillysiemens/wb2k
|
54edaa1afe4904a78746356555468d7c04685b28
|
[
"ISC"
] | 6
|
2016-06-09T04:06:29.000Z
|
2019-12-22T15:29:54.000Z
|
tests/test_wb2k.py
|
reillysiemens/wb2k
|
54edaa1afe4904a78746356555468d7c04685b28
|
[
"ISC"
] | 19
|
2016-06-03T22:00:13.000Z
|
2019-09-25T09:03:16.000Z
|
tests/test_wb2k.py
|
reillysiemens/wb2k
|
54edaa1afe4904a78746356555468d7c04685b28
|
[
"ISC"
] | 4
|
2016-10-06T20:45:44.000Z
|
2017-10-28T22:01:20.000Z
|
# TODO: Write actual tests. This just makes pytest-cov pick up on the module.
import wb2k
| 30
| 77
| 0.766667
| 16
| 90
| 4.3125
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013514
| 0.177778
| 90
| 2
| 78
| 45
| 0.918919
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.5
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
51666c4b9d618cca914f038ac201e94c03f87f6e
| 34,907
|
py
|
Python
|
post_optimization_studies/mad_analyses/four_cuts_eff_flow_chart/Output/Histos/MadAnalysis5job_0/selection_4.py
|
sheride/axion_pheno
|
7d3fc08f5ae5b17a3500eba19a2e43f87f076ce5
|
[
"MIT"
] | null | null | null |
post_optimization_studies/mad_analyses/four_cuts_eff_flow_chart/Output/Histos/MadAnalysis5job_0/selection_4.py
|
sheride/axion_pheno
|
7d3fc08f5ae5b17a3500eba19a2e43f87f076ce5
|
[
"MIT"
] | null | null | null |
post_optimization_studies/mad_analyses/four_cuts_eff_flow_chart/Output/Histos/MadAnalysis5job_0/selection_4.py
|
sheride/axion_pheno
|
7d3fc08f5ae5b17a3500eba19a2e43f87f076ce5
|
[
"MIT"
] | null | null | null |
def selection_4():
# Library import
import numpy
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
# Library version
matplotlib_version = matplotlib.__version__
numpy_version = numpy.__version__
# Histo binning
xBinning = numpy.linspace(-8.0,8.0,161,endpoint=True)
# Creating data sequence: middle of each bin
xData = numpy.array([-7.95,-7.85,-7.75,-7.65,-7.55,-7.45,-7.35,-7.25,-7.15,-7.05,-6.95,-6.85,-6.75,-6.65,-6.55,-6.45,-6.35,-6.25,-6.15,-6.05,-5.95,-5.85,-5.75,-5.65,-5.55,-5.45,-5.35,-5.25,-5.15,-5.05,-4.95,-4.85,-4.75,-4.65,-4.55,-4.45,-4.35,-4.25,-4.15,-4.05,-3.95,-3.85,-3.75,-3.65,-3.55,-3.45,-3.35,-3.25,-3.15,-3.05,-2.95,-2.85,-2.75,-2.65,-2.55,-2.45,-2.35,-2.25,-2.15,-2.05,-1.95,-1.85,-1.75,-1.65,-1.55,-1.45,-1.35,-1.25,-1.15,-1.05,-0.95,-0.85,-0.75,-0.65,-0.55,-0.45,-0.35,-0.25,-0.15,-0.05,0.05,0.15,0.25,0.35,0.45,0.55,0.65,0.75,0.85,0.95,1.05,1.15,1.25,1.35,1.45,1.55,1.65,1.75,1.85,1.95,2.05,2.15,2.25,2.35,2.45,2.55,2.65,2.75,2.85,2.95,3.05,3.15,3.25,3.35,3.45,3.55,3.65,3.75,3.85,3.95,4.05,4.15,4.25,4.35,4.45,4.55,4.65,4.75,4.85,4.95,5.05,5.15,5.25,5.35,5.45,5.55,5.65,5.75,5.85,5.95,6.05,6.15,6.25,6.35,6.45,6.55,6.65,6.75,6.85,6.95,7.05,7.15,7.25,7.35,7.45,7.55,7.65,7.75,7.85,7.95])
# Creating weights for histo: y5_ETA_0
y5_ETA_0_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.93650155414,2.16577055928,2.86585876426,3.2752672163,3.82387435003,4.39295226636,5.03572372006,5.70305915288,6.52597045348,7.36934973668,8.43790882849,9.65385179504,10.7346868764,11.6640460865,13.5022885242,14.5462796369,14.8819953516,16.2903621546,16.7284257823,17.4162331977,17.174681403,17.7683248984,17.7069129506,17.3384452638,17.0764254865,15.6557786939,15.2299910558,13.7847802841,12.7407891714,11.2996703962,9.60881583332,8.6794566232,7.11142195589,5.67030718072,4.68772801583,3.77883958831,2.8331059921,2.13711178364,1.64991579771,1.08902627442,0.888416044921,0.65505344326,0.405314455517,0.302962222508,0.159669264295,0.131010688652,0.0614112678056,0.0163763340815,0.0286585876426,0.00409408452037,0.00409408452037,0.0122822535611,0.065505344326,0.0736934973668,0.10644618953,0.180139686896,0.282491799906,0.454443213762,0.749217363229,0.822910900595,1.07674428486,1.54346948818,2.12892379059,2.70618969997,3.63964050661,4.68772801583,5.76856309721,7.32431777495,8.39696486329,9.67022778112,11.5371301944,12.6097772828,13.8789442041,14.9556872889,16.2330422033,16.6301698658,17.5267731037,17.891148794,17.9320887592,17.8543008254,17.6250330202,16.585133904,15.979210419,15.3159669827,14.2105639222,12.8513290775,12.1389576829,10.7060309008,9.67431977765,8.58938869975,7.6559374931,6.73476627602,5.75628310765,4.98659576181,4.66725603323,3.78702758135,3.32849037106,2.87814115382,2.10845340799,2.04294786367,0.00409408452037,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y5_ETA_1
y5_ETA_1_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0121240822392,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0121753353338,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0121313846429,0.012170493784,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y5_ETA_2
y5_ETA_2_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0200482816269,0.010032919325,0.0200940397991,0.0100262832744,0.010040728874,0.0100696696577,0.0100271592661,0.0301165257319,0.0401877986198,0.0,0.0502017229729,0.010040728874,0.0,0.0301145712787,0.0402058597513,0.0,0.0100568562125,0.0401512714171,0.0,0.0301337191358,0.0100702894631,0.0,0.050140155629,0.0300994521571,0.0100355638284,0.0100184158769,0.0401671012489,0.0100262832744,0.0301196784758,0.0,0.0,0.0100367001384,0.0100153623019,0.0,0.0,0.0,0.0100369728528,0.0100609841169,0.0,0.0,0.0100568562125,0.0100602899348,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0100340928234,0.0100568562125,0.0,0.0,0.0100532489446,0.0200646444914,0.0301498340782,0.0200868128673,0.0301214387233,0.0301310828965,0.010045943504,0.0200609297906,0.0200572522781,0.0,0.0301088194839,0.010045943504,0.0,0.0301025759767,0.0200638924608,0.0201178403294,0.0300965762597,0.0100299566548,0.0200777058588,0.0100187051194,0.0301077410223,0.0301692711779,0.0100355638284,0.0100324441408,0.0200798255935,0.0,0.0100369728528,0.0200832055994,0.0,0.0,0.0100696696577,0.0,0.0100187051194,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y5_ETA_3
y5_ETA_3_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0275282721771,0.0274997813219,0.00549710596009,0.0164555265735,0.0110053636951,0.0770293110214,0.0440010037659,0.0550185105048,0.0330098306536,0.0549834910877,0.0880414145324,0.121011383177,0.0934765336867,0.0880213454001,0.0990525836506,0.148391326591,0.170522298147,0.115509109618,0.0880932530198,0.142979039121,0.143099738296,0.137527304026,0.142956573069,0.132021455399,0.131943372662,0.0934995278747,0.0934989997397,0.0935291846896,0.104510168858,0.0825596976154,0.071452814174,0.0495039679626,0.0549952725621,0.0274769699499,0.0275034010784,0.0110008136084,0.0220214688448,0.0275283574913,0.0,0.0110360564673,0.021979583672,0.0219691225352,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0110095197118,0.0,0.00550243199904,0.00551802823363,0.00547487959922,0.0220146884032,0.0275094786942,0.0385276029448,0.0384989861497,0.0220041419523,0.0495313903599,0.0165169689939,0.0550053883798,0.0825226875355,0.104575210722,0.126506425347,0.066013876197,0.104442608196,0.126527185118,0.0990133797788,0.170543342297,0.0825431629255,0.181466759744,0.1538988815,0.20895098952,0.132032830616,0.115526172443,0.0824931525979,0.126454668112,0.104502043703,0.08245756442,0.0825350783965,0.0990397052803,0.0770264265915,0.0385428294849,0.0935220345535,0.0329914596786,0.043969193785,0.0494868645118,0.0274664559996,0.0164761360286,0.0165097701068,0.0110073624832,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y5_ETA_4
y5_ETA_4_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.00295952755959,0.00197733276196,0.00197153597409,0.00295929588047,0.00592188256639,0.00888223262692,0.00986622754723,0.0207193880908,0.0266393306452,0.0276408337728,0.028623718397,0.0355146850478,0.0365133262569,0.0562514891552,0.0592211905545,0.0720236262363,0.0769833630669,0.0878251039945,0.100658684083,0.116441122468,0.105610244004,0.12136374254,0.104612220072,0.122368861145,0.107579035503,0.124337372063,0.114461228008,0.0977014885466,0.0799371916409,0.0838926916906,0.0818885871641,0.0473736095304,0.0503348935223,0.0305824370646,0.0296045868517,0.0236809646878,0.025663292179,0.01382357141,0.0108591409111,0.00690936069003,0.0049344204759,0.00296236222172,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.00197140370055,0.00197179531041,0.000988009622793,0.00394563925743,0.00493570713674,0.00987045228407,0.00888712273977,0.020722578689,0.0157902465318,0.0246790126701,0.033550310684,0.0444057519442,0.0463809967883,0.0631546850964,0.0799404383551,0.08090190669,0.08585110172,0.103606500224,0.0976781202204,0.0977050960069,0.115473882196,0.132224963753,0.1322358663,0.117436981924,0.104620837893,0.107570056935,0.0986854033011,0.0976784008007,0.0799142241437,0.075986220958,0.0641623289231,0.0661257893969,0.0592049169003,0.0414675156152,0.0375125005685,0.0266518204744,0.0197401189434,0.0167707462239,0.0128311630906,0.0118483786737,0.00395132701983,0.00987041220117,0.00296029153951,0.00395140718562,0.000988172359335,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y5_ETA_5
y5_ETA_5_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.000504690405707,0.000503681322579,0.00176459470884,0.000504669999982,0.000755187890129,0.00201616849525,0.00327598677425,0.00529427346382,0.0068047412699,0.00630216825829,0.00806693941664,0.010840665667,0.01310830592,0.0178963013326,0.0183999386429,0.0226806437203,0.025963360783,0.0310093045706,0.035792138535,0.0400765646566,0.0519272497141,0.0415845077543,0.0476444880596,0.0441119769099,0.050664615445,0.0526808211507,0.0473877760315,0.0398268505919,0.0317628720061,0.0352897295695,0.0277249790567,0.0264671781437,0.01915871554,0.0146224307521,0.00958156839025,0.0110892754217,0.00579553410721,0.00529300110682,0.0035290593812,0.00201740124113,0.00075590049007,0.000252358446569,0.0,0.000252130822703,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.000251770961733,0.000252017871011,0.0,0.0012607777482,0.00100783957898,0.00226974524365,0.0032772871391,0.00453781321669,0.011097473722,0.0110881991197,0.0126025360114,0.0186502288691,0.0219320576825,0.0284887093436,0.0340287397618,0.0400856872162,0.0451133778823,0.0463772524995,0.0509192869005,0.0516829011549,0.0499042020887,0.0499201265568,0.0456294026682,0.0410958106359,0.0451322231699,0.039827538785,0.0239494277149,0.0272195212356,0.0229396403894,0.0196642173734,0.0146193658922,0.012354134315,0.00907918743253,0.00730944287875,0.00453884950745,0.00478865559789,0.00403269148997,0.00352844480876,0.00252244454586,0.00252372690566,0.00100823288933,0.00126131029763,0.000251614677883,0.000756485454199,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y5_ETA_6
y5_ETA_6_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.000283957625274,0.000287128707592,0.000572547709956,0.000861351732727,0.000858235034671,0.00142977823532,0.000286450303915,0.00143123581351,0.000859256738999,0.00314945657622,0.00143529663835,0.00429543394091,0.00544119536808,0.00543670366794,0.00687030779608,0.00572291084221,0.00886473662672,0.0131701286858,0.00973534068191,0.0134554561147,0.0177613000431,0.0174690846075,0.0151850655816,0.0186052458129,0.0208887349921,0.0174626364735,0.0197556129072,0.0174692045728,0.0154570468727,0.0123066885575,0.010887799171,0.00944630632579,0.00601141435195,0.00457853701348,0.00315233774244,0.00228783792074,0.0017190425249,0.000574099460894,0.00085594249811,0.000286764712928,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.00085828172116,0.00257774402458,0.00229116695735,0.00515367657338,0.00314700028705,0.00745015599335,0.0103183139748,0.0122993706752,0.0143236348717,0.0151823563656,0.0186132135071,0.0191729715183,0.0237481674986,0.0226402381232,0.0217478463732,0.0211806505145,0.0189065386215,0.0180207549592,0.0117413421635,0.0131746173868,0.0146080025783,0.00887664717991,0.00715153841053,0.00629180320999,0.00514740738762,0.00373118123045,0.0042993318129,0.00286459701196,0.00228898858775,0.00114435383452,0.00257892468291,0.00114002408751,0.00200925553992,0.0,0.000568809991618,0.00057289490948,0.000571732745799,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y5_ETA_7
y5_ETA_7_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,2.1617996646e-05,8.63041639912e-05,0.000129543836942,0.000129532437545,0.0,0.00021618093038,8.64047469058e-05,0.000237509621227,0.000129647940258,0.000322270298485,0.000410490734636,0.000430353219956,0.000626435839991,0.000475241698333,0.000691605605799,0.000819277594701,0.000970180465002,0.00112314528505,0.00105669727804,0.00114471529107,0.00118825302473,0.0014903026789,0.0017929520587,0.00114463650112,0.00161964099142,0.00144665053218,0.00170707143259,0.00164166529694,0.00123133310612,0.00123126898451,0.000928988408734,0.000496921088276,0.000366997383194,0.000345373062397,0.000172778396927,2.15827549073e-05,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,2.15983662139e-05,0.0,0.0,6.48950068125e-05,0.000107900405391,0.000280954818599,0.0004105766073,0.000496826372698,0.00094881791124,0.00116613190815,0.00172641059339,0.00185748940943,0.00190084693203,0.00185734859335,0.00172633389892,0.002138216743,0.00196600957321,0.00140282697541,0.00108005933632,0.000926656561499,0.00125301165787,0.000777599387679,0.00108001868406,0.00099358569012,0.000907322010752,0.000712771184686,0.000691494964593,0.000496688909381,0.00010813861926,0.000280705372971,0.000302205809145,0.000365607872139,0.000194619348176,0.000172678819842,0.000151298580251,0.000108110372225,0.000108019428507,8.64090216796e-05,4.32315844076e-05,4.32170417946e-05,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y5_ETA_8
y5_ETA_8_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,2.84292642893e-05,0.0,2.83973993922e-05,2.83973993922e-05,5.66030748554e-05,2.84292642893e-05,0.000141723376559,0.000113644061894,0.000112773137535,0.000198503757442,0.000141120452911,8.50868331672e-05,0.000142062857986,0.000225435534901,0.000111915784712,0.000255442735234,0.000113583286953,5.67183438192e-05,0.000198632048547,0.000170429981271,0.000198798351831,0.000198741185077,0.000142022856106,0.000113357649033,0.000113596531822,0.0,2.83973993922e-05,2.83973993922e-05,2.83498693196e-05,5.64019963668e-05,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,5.67658887402e-05,2.83684893481e-05,0.000142025796111,5.68978177294e-05,8.45111565312e-05,0.000141783943621,0.000113497477428,0.000198740145682,0.000141909131387,0.000198819139742,0.000142041461286,0.000113259010398,0.000113446992503,2.84292642893e-05,5.51480250594e-05,5.65914781711e-05,0.000113545022349,0.000113683677712,8.51264489853e-05,0.000170163302076,2.84080903176e-05,2.83973993922e-05,0.000113642859165,8.52862783201e-05,5.6878173154e-05,2.84489088647e-05,0.0,8.51961775765e-05,0.0,0.0,2.83684893481e-05,2.84489088647e-05,0.0,2.83684893481e-05,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y5_ETA_9
y5_ETA_9_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y5_ETA_10
y5_ETA_10_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0521138287,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0529581672,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.05462838872,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y5_ETA_11
y5_ETA_11_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.230673161153,0.0,0.0,0.0,0.0,0.691026068854,0.230619670779,0.690578010125,0.460783443723,0.229982512821,0.230364746113,0.0,0.229982512821,0.921962685466,0.461195765349,0.691490266921,0.0,0.460080998305,0.461188848491,0.690561870788,0.230752243903,0.461033219172,0.230597152562,0.230428265931,0.229982512821,0.459723627277,0.230360173301,0.230020171273,0.0,0.0,0.0,0.0,0.0,0.0,0.229952462913,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.230020171273,0.0,0.0,0.0,0.690728643934,0.0,0.229982512821,0.230465309552,0.0,0.0,0.0,0.0,0.921987663011,0.691353466828,0.690780136104,0.0,0.691670105244,0.0,0.0,1.15198896663,0.461138893401,0.230020171273,0.0,0.460124420806,0.0,0.230551270733,0.230752243903,0.229932019753,0.230752243903,0.230587737949,0.460570173916,0.0,0.459889631883,0.0,0.690224097526,0.0,0.230619670779,0.230360173301,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y5_ETA_12
y5_ETA_12_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0554467039881,0.0831726910094,0.0276908199867,0.0277244637063,0.0277261678187,0.138459100715,0.110763733864,0.110696415651,0.138481027219,0.0553453804604,0.110839668808,0.0276896813472,0.249341468504,0.304489895107,0.193882570303,0.165968553895,0.415543020352,0.221455956552,0.110817588434,0.41551493904,0.24918529025,0.138688482718,0.415323370639,0.332468383129,0.304440925915,0.30428051623,0.193931154819,0.193743817848,0.138552153719,0.33202835282,0.110822973891,0.110800931985,0.221591477732,0.0553593057136,0.0830496717832,0.027763192836,0.0276953706979,0.0,0.0830522875767,0.0276873271331,0.0,0.0,0.0,0.0,0.0276896813472,0.0276896813472,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.027763192836,0.0,0.0554193535598,0.027603817928,0.0554837482391,0.0277263947773,0.0276586841951,0.0831603813932,0.0553866946095,0.110896946991,0.138389897592,0.0830404011036,0.276814223641,0.221656410957,0.276896121056,0.193704811752,0.193765744352,0.193745241148,0.304459505742,0.304710891184,0.221625021436,0.193771283679,0.166210553255,0.193715351861,0.221506118237,0.332359866169,0.193744356394,0.249216833641,0.19377143755,0.221591593134,0.1384076696,0.110700454743,0.166063030199,0.166173124328,0.0831490719334,0.166071069917,0.0554810939782,0.0830438247156,0.0276586841951,0.0276929395487,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y5_ETA_13
y5_ETA_13_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0100450209578,0.0,0.0200719202967,0.0302570227044,0.0201730639343,0.0807196141628,0.040348427967,0.0302623268889,0.0604818076191,0.0504090944485,0.0403768120299,0.0504226704906,0.0504235808197,0.0403129979564,0.0706525268263,0.0706297079089,0.120951914474,0.0705543933437,0.0604464807792,0.0605190340126,0.0907542330928,0.0302540671691,0.13107884078,0.0302726621592,0.0604138485135,0.0604985091246,0.050332353701,0.0604686624662,0.0301965950553,0.0,0.0302875915573,0.0302954446635,0.0605230030477,0.0100953560911,0.040347511569,0.0,0.0100921881456,0.0100996953267,0.0201665945285,0.0100953560911,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0201987048721,0.0201875442367,0.0201245615971,0.0201424829437,0.0101000473206,0.0504036264047,0.0302668906724,0.050308903622,0.0503705389745,0.0706056145306,0.0504038145394,0.110960869966,0.100781812144,0.0806490333092,0.0806486084889,0.0806237261587,0.0706145357563,0.0402893536739,0.0605118484812,0.0806438140887,0.0806781031534,0.100820045968,0.050385189205,0.0403465891021,0.0605020047886,0.0705439549028,0.0201625041162,0.0705455328066,0.0201750545207,0.0403691045764,0.0201483758077,0.050360561767,0.0100592767124,0.0604937875508,0.0403312045394,0.0100853121261,0.0100921881456,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y5_ETA_14
y5_ETA_14_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.00566304279343,0.0113243080673,0.00565465151583,0.0113270474481,0.0226121500506,0.0141291878309,0.00565787567476,0.0141412072492,0.00849151124252,0.0198041884835,0.0282808755203,0.0339622436944,0.0254604059094,0.0226399170613,0.0113074601055,0.0396079460531,0.0396194499137,0.0396201424538,0.0339383202812,0.0481071444908,0.00849126115859,0.0254815707045,0.0339557068853,0.0113223958871,0.0169703182216,0.0283032714978,0.0141288069338,0.0226442338946,0.0197992483641,0.0282745349309,0.0141461896905,0.0113219688207,0.0141390603748,0.0056663977655,0.0113208722989,0.00566218481319,0.00282800521671,0.00283012131147,0.00282190547736,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.00282347946712,0.00565759865872,0.0,0.00283355053922,0.00849388896355,0.00566924872226,0.0113134197978,0.0141524456361,0.0113166093298,0.0113165477706,0.0254648920303,0.0169795905641,0.0198015222041,0.0282696871502,0.0226120269323,0.0339307869839,0.0395990199806,0.0282947532545,0.0226330955413,0.0254486750495,0.039614178914,0.0226347076208,0.0424486301401,0.0395931718642,0.0339560146809,0.0395998279441,0.0198092017043,0.016970391323,0.028310854812,0.0198078820307,0.0339708581238,0.0198045462959,0.0169803023414,0.0113155205028,0.00282142762469,0.0113133543913,0.00849043780536,0.0,0.00282930950057,0.00283041371729,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y5_ETA_15
y5_ETA_15_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.00152260673975,0.00153821483072,0.00151881876977,0.0,0.00304355646424,0.0,0.00456097823541,0.0015356572123,0.00456914512233,0.00152449658811,0.00305783256077,0.00303609280421,0.00304190063088,0.0,0.00304325980887,0.00151265401114,0.00152644434928,0.0,0.00150849610837,0.00154541020084,0.00152162931349,0.00152305585944,0.0030361719911,0.00152449658811,0.00153629543501,0.0,0.0,0.00152495989053,0.00153629543501,0.00153333597266,0.00152162931349,0.00152094972449,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.00154541020084,0.00153219780883,0.00151265401114,0.00305061000709,0.00150849610837,0.00152162931349,0.00151727403443,0.0,0.0,0.0,0.00306015380041,0.00306014316337,0.00305496528615,0.00607878412211,0.00301699221674,0.00306438379871,0.00152192833265,0.00151115655156,0.00305428569715,0.00150849610837,0.00304681258196,0.0,0.00303898371671,0.0,0.0,0.0,0.00151265401114,0.0,0.00152094972449,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y5_ETA_16
y5_ETA_16_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.000180755028423,0.0,0.0,0.0,0.0,0.000180686039232,0.0,0.000542211148965,0.000541323760103,0.0,0.0,0.0,0.0,0.000361533925874,0.000180154568376,0.000722930104357,0.000360550714414,0.000360973850682,0.000180970234659,0.000180626135672,0.000541377657908,0.00018065712691,0.0,0.000180533816432,0.0,0.0,0.0,0.000180553027149,0.000180626135672,0.000180003616023,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.000180553027149,0.0,0.0,0.000180626135672,0.00018065712691,0.000361237718936,0.000180626135672,0.0,0.0,0.0,0.000179998688224,0.0,0.000180766962937,0.0,0.000360616354241,0.000902862921764,0.0,0.0,0.0,0.00018065712691,0.000542553400027,0.0,0.000361314061327,0.0,0.000180402036298,0.0,0.0,0.000360206615427,0.0,0.000180755028423,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating a new Canvas
fig = plt.figure(figsize=(12,6),dpi=80)
frame = gridspec.GridSpec(1,1,right=0.7)
pad = fig.add_subplot(frame[0])
# Creating a new Stack
pad.hist(x=xData, bins=xBinning, weights=y5_ETA_0_weights+y5_ETA_1_weights+y5_ETA_2_weights+y5_ETA_3_weights+y5_ETA_4_weights+y5_ETA_5_weights+y5_ETA_6_weights+y5_ETA_7_weights+y5_ETA_8_weights+y5_ETA_9_weights+y5_ETA_10_weights+y5_ETA_11_weights+y5_ETA_12_weights+y5_ETA_13_weights+y5_ETA_14_weights+y5_ETA_15_weights+y5_ETA_16_weights,\
label="$bg\_dip\_1600\_inf$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#e5e5e5", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y5_ETA_0_weights+y5_ETA_1_weights+y5_ETA_2_weights+y5_ETA_3_weights+y5_ETA_4_weights+y5_ETA_5_weights+y5_ETA_6_weights+y5_ETA_7_weights+y5_ETA_8_weights+y5_ETA_9_weights+y5_ETA_10_weights+y5_ETA_11_weights+y5_ETA_12_weights+y5_ETA_13_weights+y5_ETA_14_weights+y5_ETA_15_weights,\
label="$bg\_dip\_1200\_1600$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#f2f2f2", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y5_ETA_0_weights+y5_ETA_1_weights+y5_ETA_2_weights+y5_ETA_3_weights+y5_ETA_4_weights+y5_ETA_5_weights+y5_ETA_6_weights+y5_ETA_7_weights+y5_ETA_8_weights+y5_ETA_9_weights+y5_ETA_10_weights+y5_ETA_11_weights+y5_ETA_12_weights+y5_ETA_13_weights+y5_ETA_14_weights,\
label="$bg\_dip\_800\_1200$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#ccc6aa", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y5_ETA_0_weights+y5_ETA_1_weights+y5_ETA_2_weights+y5_ETA_3_weights+y5_ETA_4_weights+y5_ETA_5_weights+y5_ETA_6_weights+y5_ETA_7_weights+y5_ETA_8_weights+y5_ETA_9_weights+y5_ETA_10_weights+y5_ETA_11_weights+y5_ETA_12_weights+y5_ETA_13_weights,\
label="$bg\_dip\_600\_800$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#ccc6aa", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y5_ETA_0_weights+y5_ETA_1_weights+y5_ETA_2_weights+y5_ETA_3_weights+y5_ETA_4_weights+y5_ETA_5_weights+y5_ETA_6_weights+y5_ETA_7_weights+y5_ETA_8_weights+y5_ETA_9_weights+y5_ETA_10_weights+y5_ETA_11_weights+y5_ETA_12_weights,\
label="$bg\_dip\_400\_600$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#c1bfa8", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y5_ETA_0_weights+y5_ETA_1_weights+y5_ETA_2_weights+y5_ETA_3_weights+y5_ETA_4_weights+y5_ETA_5_weights+y5_ETA_6_weights+y5_ETA_7_weights+y5_ETA_8_weights+y5_ETA_9_weights+y5_ETA_10_weights+y5_ETA_11_weights,\
label="$bg\_dip\_200\_400$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#bab5a3", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y5_ETA_0_weights+y5_ETA_1_weights+y5_ETA_2_weights+y5_ETA_3_weights+y5_ETA_4_weights+y5_ETA_5_weights+y5_ETA_6_weights+y5_ETA_7_weights+y5_ETA_8_weights+y5_ETA_9_weights+y5_ETA_10_weights,\
label="$bg\_dip\_100\_200$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#b2a596", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y5_ETA_0_weights+y5_ETA_1_weights+y5_ETA_2_weights+y5_ETA_3_weights+y5_ETA_4_weights+y5_ETA_5_weights+y5_ETA_6_weights+y5_ETA_7_weights+y5_ETA_8_weights+y5_ETA_9_weights,\
label="$bg\_dip\_0\_100$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#b7a39b", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y5_ETA_0_weights+y5_ETA_1_weights+y5_ETA_2_weights+y5_ETA_3_weights+y5_ETA_4_weights+y5_ETA_5_weights+y5_ETA_6_weights+y5_ETA_7_weights+y5_ETA_8_weights,\
label="$bg\_vbf\_1600\_inf$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#ad998c", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y5_ETA_0_weights+y5_ETA_1_weights+y5_ETA_2_weights+y5_ETA_3_weights+y5_ETA_4_weights+y5_ETA_5_weights+y5_ETA_6_weights+y5_ETA_7_weights,\
label="$bg\_vbf\_1200\_1600$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#9b8e82", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y5_ETA_0_weights+y5_ETA_1_weights+y5_ETA_2_weights+y5_ETA_3_weights+y5_ETA_4_weights+y5_ETA_5_weights+y5_ETA_6_weights,\
label="$bg\_vbf\_800\_1200$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#876656", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y5_ETA_0_weights+y5_ETA_1_weights+y5_ETA_2_weights+y5_ETA_3_weights+y5_ETA_4_weights+y5_ETA_5_weights,\
label="$bg\_vbf\_600\_800$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#afcec6", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y5_ETA_0_weights+y5_ETA_1_weights+y5_ETA_2_weights+y5_ETA_3_weights+y5_ETA_4_weights,\
label="$bg\_vbf\_400\_600$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#84c1a3", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y5_ETA_0_weights+y5_ETA_1_weights+y5_ETA_2_weights+y5_ETA_3_weights,\
label="$bg\_vbf\_200\_400$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#89a8a0", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y5_ETA_0_weights+y5_ETA_1_weights+y5_ETA_2_weights,\
label="$bg\_vbf\_100\_200$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#829e8c", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y5_ETA_0_weights+y5_ETA_1_weights,\
label="$bg\_vbf\_0\_100$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#adbcc6", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y5_ETA_0_weights,\
label="$signal$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#7a8e99", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
# Axis
plt.rc('text',usetex=False)
plt.xlabel(r"\eta [ j_{2} ] ",\
fontsize=16,color="black")
plt.ylabel(r"$\mathrm{Events}$ $(\mathcal{L}_{\mathrm{int}} = 40.0\ \mathrm{fb}^{-1})$ ",\
fontsize=16,color="black")
# Boundary of y-axis
ymax=(y5_ETA_0_weights+y5_ETA_1_weights+y5_ETA_2_weights+y5_ETA_3_weights+y5_ETA_4_weights+y5_ETA_5_weights+y5_ETA_6_weights+y5_ETA_7_weights+y5_ETA_8_weights+y5_ETA_9_weights+y5_ETA_10_weights+y5_ETA_11_weights+y5_ETA_12_weights+y5_ETA_13_weights+y5_ETA_14_weights+y5_ETA_15_weights+y5_ETA_16_weights).max()*1.1
ymin=0 # linear scale
#ymin=min([x for x in (y5_ETA_0_weights+y5_ETA_1_weights+y5_ETA_2_weights+y5_ETA_3_weights+y5_ETA_4_weights+y5_ETA_5_weights+y5_ETA_6_weights+y5_ETA_7_weights+y5_ETA_8_weights+y5_ETA_9_weights+y5_ETA_10_weights+y5_ETA_11_weights+y5_ETA_12_weights+y5_ETA_13_weights+y5_ETA_14_weights+y5_ETA_15_weights+y5_ETA_16_weights) if x])/100. # log scale
plt.gca().set_ylim(ymin,ymax)
# Log/Linear scale for X-axis
plt.gca().set_xscale("linear")
#plt.gca().set_xscale("log",nonposx="clip")
# Log/Linear scale for Y-axis
plt.gca().set_yscale("linear")
#plt.gca().set_yscale("log",nonposy="clip")
# Legend
plt.legend(bbox_to_anchor=(1.05,1), loc=2, borderaxespad=0.)
# Saving the image
plt.savefig('../../HTML/MadAnalysis5job_0/selection_4.png')
plt.savefig('../../PDF/MadAnalysis5job_0/selection_4.png')
plt.savefig('../../DVI/MadAnalysis5job_0/selection_4.eps')
# Running!
if __name__ == '__main__':
selection_4()
| 179.93299
| 1,742
| 0.742373
| 7,584
| 34,907
| 3.320675
| 0.146097
| 0.273031
| 0.395132
| 0.508259
| 0.393146
| 0.385721
| 0.3831
| 0.378613
| 0.374563
| 0.370314
| 0
| 0.556093
| 0.047268
| 34,907
| 193
| 1,743
| 180.865285
| 0.201161
| 0.037729
| 0
| 0.185841
| 0
| 0.00885
| 0.030637
| 0.005961
| 0
| 0
| 0
| 0
| 0
| 1
| 0.00885
| false
| 0
| 0.035398
| 0
| 0.044248
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5a98fddec1d3cbb33837e1f1d6a6a5b060873682
| 12,100
|
py
|
Python
|
python/tinkoff/cloud/longrunning/v1/longrunning_pb2_grpc.py
|
qwertBR/voicekit-examples
|
273a63e4cf11841339108cdcdf8b485b7c96298a
|
[
"Apache-2.0"
] | 3
|
2022-02-11T04:34:18.000Z
|
2022-03-29T19:35:57.000Z
|
python/tinkoff/cloud/longrunning/v1/longrunning_pb2_grpc.py
|
qwertBR/voicekit-examples
|
273a63e4cf11841339108cdcdf8b485b7c96298a
|
[
"Apache-2.0"
] | 3
|
2022-01-27T15:40:38.000Z
|
2022-03-31T10:03:35.000Z
|
python/tinkoff/cloud/longrunning/v1/longrunning_pb2_grpc.py
|
qwertBR/voicekit-examples
|
273a63e4cf11841339108cdcdf8b485b7c96298a
|
[
"Apache-2.0"
] | 5
|
2022-01-27T15:15:06.000Z
|
2022-03-24T22:06:18.000Z
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
from tinkoff.cloud.longrunning.v1 import longrunning_pb2 as tinkoff_dot_cloud_dot_longrunning_dot_v1_dot_longrunning__pb2
class OperationsStub(object):
"""Missing associated documentation comment in .proto file."""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetOperation = channel.unary_unary(
'/tinkoff.cloud.longrunning.v1.Operations/GetOperation',
request_serializer=tinkoff_dot_cloud_dot_longrunning_dot_v1_dot_longrunning__pb2.GetOperationRequest.SerializeToString,
response_deserializer=tinkoff_dot_cloud_dot_longrunning_dot_v1_dot_longrunning__pb2.Operation.FromString,
)
self.WaitOperation = channel.unary_unary(
'/tinkoff.cloud.longrunning.v1.Operations/WaitOperation',
request_serializer=tinkoff_dot_cloud_dot_longrunning_dot_v1_dot_longrunning__pb2.WaitOperationRequest.SerializeToString,
response_deserializer=tinkoff_dot_cloud_dot_longrunning_dot_v1_dot_longrunning__pb2.Operation.FromString,
)
self.ListOperations = channel.unary_unary(
'/tinkoff.cloud.longrunning.v1.Operations/ListOperations',
request_serializer=tinkoff_dot_cloud_dot_longrunning_dot_v1_dot_longrunning__pb2.ListOperationsRequest.SerializeToString,
response_deserializer=tinkoff_dot_cloud_dot_longrunning_dot_v1_dot_longrunning__pb2.ListOperationsResponse.FromString,
)
self.WatchOperations = channel.unary_stream(
'/tinkoff.cloud.longrunning.v1.Operations/WatchOperations',
request_serializer=tinkoff_dot_cloud_dot_longrunning_dot_v1_dot_longrunning__pb2.WatchOperationsRequest.SerializeToString,
response_deserializer=tinkoff_dot_cloud_dot_longrunning_dot_v1_dot_longrunning__pb2.WatchOperationsResponse.FromString,
)
self.DeleteOperation = channel.unary_unary(
'/tinkoff.cloud.longrunning.v1.Operations/DeleteOperation',
request_serializer=tinkoff_dot_cloud_dot_longrunning_dot_v1_dot_longrunning__pb2.DeleteOperationRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.CancelOperation = channel.unary_unary(
'/tinkoff.cloud.longrunning.v1.Operations/CancelOperation',
request_serializer=tinkoff_dot_cloud_dot_longrunning_dot_v1_dot_longrunning__pb2.CancelOperationRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
class OperationsServicer(object):
"""Missing associated documentation comment in .proto file."""
def GetOperation(self, request, context):
"""Starts polling for operation statuses
Returns operation status
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def WaitOperation(self, request, context):
"""Wait for operation update
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListOperations(self, request, context):
"""List operations
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def WatchOperations(self, request, context):
"""Watch operations
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteOperation(self, request, context):
"""Deletes specified operations
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CancelOperation(self, request, context):
"""Cancels specified operations
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_OperationsServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetOperation': grpc.unary_unary_rpc_method_handler(
servicer.GetOperation,
request_deserializer=tinkoff_dot_cloud_dot_longrunning_dot_v1_dot_longrunning__pb2.GetOperationRequest.FromString,
response_serializer=tinkoff_dot_cloud_dot_longrunning_dot_v1_dot_longrunning__pb2.Operation.SerializeToString,
),
'WaitOperation': grpc.unary_unary_rpc_method_handler(
servicer.WaitOperation,
request_deserializer=tinkoff_dot_cloud_dot_longrunning_dot_v1_dot_longrunning__pb2.WaitOperationRequest.FromString,
response_serializer=tinkoff_dot_cloud_dot_longrunning_dot_v1_dot_longrunning__pb2.Operation.SerializeToString,
),
'ListOperations': grpc.unary_unary_rpc_method_handler(
servicer.ListOperations,
request_deserializer=tinkoff_dot_cloud_dot_longrunning_dot_v1_dot_longrunning__pb2.ListOperationsRequest.FromString,
response_serializer=tinkoff_dot_cloud_dot_longrunning_dot_v1_dot_longrunning__pb2.ListOperationsResponse.SerializeToString,
),
'WatchOperations': grpc.unary_stream_rpc_method_handler(
servicer.WatchOperations,
request_deserializer=tinkoff_dot_cloud_dot_longrunning_dot_v1_dot_longrunning__pb2.WatchOperationsRequest.FromString,
response_serializer=tinkoff_dot_cloud_dot_longrunning_dot_v1_dot_longrunning__pb2.WatchOperationsResponse.SerializeToString,
),
'DeleteOperation': grpc.unary_unary_rpc_method_handler(
servicer.DeleteOperation,
request_deserializer=tinkoff_dot_cloud_dot_longrunning_dot_v1_dot_longrunning__pb2.DeleteOperationRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'CancelOperation': grpc.unary_unary_rpc_method_handler(
servicer.CancelOperation,
request_deserializer=tinkoff_dot_cloud_dot_longrunning_dot_v1_dot_longrunning__pb2.CancelOperationRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'tinkoff.cloud.longrunning.v1.Operations', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class Operations(object):
"""Missing associated documentation comment in .proto file."""
@staticmethod
def GetOperation(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/tinkoff.cloud.longrunning.v1.Operations/GetOperation',
tinkoff_dot_cloud_dot_longrunning_dot_v1_dot_longrunning__pb2.GetOperationRequest.SerializeToString,
tinkoff_dot_cloud_dot_longrunning_dot_v1_dot_longrunning__pb2.Operation.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def WaitOperation(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/tinkoff.cloud.longrunning.v1.Operations/WaitOperation',
tinkoff_dot_cloud_dot_longrunning_dot_v1_dot_longrunning__pb2.WaitOperationRequest.SerializeToString,
tinkoff_dot_cloud_dot_longrunning_dot_v1_dot_longrunning__pb2.Operation.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ListOperations(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/tinkoff.cloud.longrunning.v1.Operations/ListOperations',
tinkoff_dot_cloud_dot_longrunning_dot_v1_dot_longrunning__pb2.ListOperationsRequest.SerializeToString,
tinkoff_dot_cloud_dot_longrunning_dot_v1_dot_longrunning__pb2.ListOperationsResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def WatchOperations(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_stream(request, target, '/tinkoff.cloud.longrunning.v1.Operations/WatchOperations',
tinkoff_dot_cloud_dot_longrunning_dot_v1_dot_longrunning__pb2.WatchOperationsRequest.SerializeToString,
tinkoff_dot_cloud_dot_longrunning_dot_v1_dot_longrunning__pb2.WatchOperationsResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def DeleteOperation(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/tinkoff.cloud.longrunning.v1.Operations/DeleteOperation',
tinkoff_dot_cloud_dot_longrunning_dot_v1_dot_longrunning__pb2.DeleteOperationRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def CancelOperation(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/tinkoff.cloud.longrunning.v1.Operations/CancelOperation',
tinkoff_dot_cloud_dot_longrunning_dot_v1_dot_longrunning__pb2.CancelOperationRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
| 50.416667
| 144
| 0.705702
| 1,158
| 12,100
| 6.948187
| 0.105354
| 0.10788
| 0.057793
| 0.069351
| 0.842158
| 0.837808
| 0.809098
| 0.779518
| 0.740492
| 0.726324
| 0
| 0.00911
| 0.228926
| 12,100
| 239
| 145
| 50.627615
| 0.853269
| 0.051736
| 0
| 0.559585
| 1
| 0
| 0.093099
| 0.061451
| 0
| 0
| 0
| 0
| 0
| 1
| 0.072539
| false
| 0
| 0.015544
| 0.031088
| 0.134715
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5aeccbb2cf767b475bd8dc50612ec4c125139aee
| 20,131
|
py
|
Python
|
tests/test_attack_log.py
|
Thorsten-Sick/PurpleDome
|
297d746ef2e17a4207f8274b7fccbe2ce43c4a5f
|
[
"MIT"
] | 7
|
2021-11-30T19:54:29.000Z
|
2022-03-05T23:15:23.000Z
|
tests/test_attack_log.py
|
Thorsten-Sick/PurpleDome
|
297d746ef2e17a4207f8274b7fccbe2ce43c4a5f
|
[
"MIT"
] | null | null | null |
tests/test_attack_log.py
|
Thorsten-Sick/PurpleDome
|
297d746ef2e17a4207f8274b7fccbe2ce43c4a5f
|
[
"MIT"
] | 2
|
2021-11-30T11:16:27.000Z
|
2022-02-02T13:36:01.000Z
|
#!/usr/bin/env python3
# Testing the attack log class
import unittest
from app.attack_log import AttackLog
import app.attack_log
# from unittest.mock import patch, call
# from app.exceptions import ConfigurationError
# https://docs.python.org/3/library/unittest.html
class TestMachineConfig(unittest.TestCase):
""" Test machine specific config """
def test_init(self):
""" The init is empty """
al = AttackLog()
self.assertIsNotNone(al)
default = {"boilerplate": {'log_format_major_version': 1, 'log_format_minor_version': 1},
"system_overview": [],
"attack_log": []}
self.assertEqual(al.get_dict(), default)
def test_caldera_attack_start(self):
""" Starting a caldera attack """
al = AttackLog()
source = "asource"
paw = "apaw"
group = "agroup"
ability_id = "aability_id"
ttp = "1234"
name = "aname"
description = "adescription"
al.start_caldera_attack(source=source,
paw=paw,
group=group,
ability_id=ability_id,
ttp=ttp,
name=name,
description=description
)
data = al.get_dict()
self.assertEqual(data["attack_log"][0]["event"], "start")
self.assertEqual(data["attack_log"][0]["type"], "attack")
self.assertEqual(data["attack_log"][0]["sub_type"], "caldera")
self.assertEqual(data["attack_log"][0]["source"], source)
self.assertEqual(data["attack_log"][0]["target_paw"], paw)
self.assertEqual(data["attack_log"][0]["target_group"], group)
self.assertEqual(data["attack_log"][0]["ability_id"], ability_id)
self.assertEqual(data["attack_log"][0]["hunting_tag"], "MITRE_" + ttp)
self.assertEqual(data["attack_log"][0]["name"], name)
self.assertEqual(data["attack_log"][0]["description"], description)
def test_caldera_attack_stop(self):
""" Stopping a caldera attack """
al = AttackLog()
source = "asource"
paw = "apaw"
group = "agroup"
ability_id = "aability_id"
ttp = "1234"
name = "aname"
description = "adescription"
al.stop_caldera_attack(source=source,
paw=paw,
group=group,
ability_id=ability_id,
ttp=ttp,
name=name,
description=description
)
data = al.get_dict()
self.assertEqual(data["attack_log"][0]["event"], "stop")
self.assertEqual(data["attack_log"][0]["type"], "attack")
self.assertEqual(data["attack_log"][0]["sub_type"], "caldera")
self.assertEqual(data["attack_log"][0]["source"], source)
self.assertEqual(data["attack_log"][0]["target_paw"], paw)
self.assertEqual(data["attack_log"][0]["target_group"], group)
self.assertEqual(data["attack_log"][0]["ability_id"], ability_id)
self.assertEqual(data["attack_log"][0]["hunting_tag"], "MITRE_" + ttp)
self.assertEqual(data["attack_log"][0]["name"], name)
self.assertEqual(data["attack_log"][0]["description"], description)
def test_kali_attack_start(self):
""" Starting a kali attack """
al = AttackLog()
source = "asource"
target = "a target"
ttp = "1234"
attack_name = "a name"
al.start_kali_attack(source=source,
target=target,
attack_name=attack_name,
ttp=ttp,
)
data = al.get_dict()
self.assertEqual(data["attack_log"][0]["event"], "start")
self.assertEqual(data["attack_log"][0]["type"], "attack")
self.assertEqual(data["attack_log"][0]["sub_type"], "kali")
self.assertEqual(data["attack_log"][0]["source"], source)
self.assertEqual(data["attack_log"][0]["target"], target)
self.assertEqual(data["attack_log"][0]["kali_name"], attack_name)
self.assertEqual(data["attack_log"][0]["hunting_tag"], "MITRE_" + ttp)
def test_kali_attack_stop(self):
""" Stopping a kali attack """
al = AttackLog()
source = "asource"
target = "a target"
ttp = "1234"
attack_name = "a name"
al.stop_kali_attack(source=source,
target=target,
attack_name=attack_name,
ttp=ttp,
)
data = al.get_dict()
self.assertEqual(data["attack_log"][0]["event"], "stop")
self.assertEqual(data["attack_log"][0]["type"], "attack")
self.assertEqual(data["attack_log"][0]["sub_type"], "kali")
self.assertEqual(data["attack_log"][0]["source"], source)
self.assertEqual(data["attack_log"][0]["target"], target)
self.assertEqual(data["attack_log"][0]["kali_name"], attack_name)
self.assertEqual(data["attack_log"][0]["hunting_tag"], "MITRE_" + ttp)
def test_narration_start(self):
""" Starting a narration """
al = AttackLog()
text = "texttextext"
al.start_narration(text
)
data = al.get_dict()
self.assertEqual(data["attack_log"][0]["event"], "start")
self.assertEqual(data["attack_log"][0]["type"], "narration")
self.assertEqual(data["attack_log"][0]["sub_type"], "user defined narration")
self.assertEqual(data["attack_log"][0]["text"], text)
def test_build_start(self):
""" Starting a build """
al = AttackLog()
dl_uri = "asource"
dl_uris = "a target"
payload = "1234"
platform = "a name"
architecture = "arch"
lhost = "lhost"
lport = 8080
filename = "afilename"
encoding = "encoded"
encoded_filename = "ef"
sRDI_conversion = True
for_step = 4
comment = "this is a comment"
al.start_build(dl_uri=dl_uri,
dl_uris=dl_uris,
payload=payload,
platform=platform,
architecture=architecture,
lhost=lhost,
lport=lport,
filename=filename,
encoding=encoding,
encoded_filename=encoded_filename,
sRDI_conversion=sRDI_conversion,
for_step=for_step,
comment=comment
)
data = al.get_dict()
self.assertEqual(data["attack_log"][0]["event"], "start")
self.assertEqual(data["attack_log"][0]["type"], "build")
self.assertEqual(data["attack_log"][0]["dl_uri"], dl_uri)
self.assertEqual(data["attack_log"][0]["dl_uris"], dl_uris)
self.assertEqual(data["attack_log"][0]["payload"], payload)
self.assertEqual(data["attack_log"][0]["platform"], platform)
self.assertEqual(data["attack_log"][0]["architecture"], architecture)
self.assertEqual(data["attack_log"][0]["lhost"], lhost)
self.assertEqual(data["attack_log"][0]["lport"], lport)
self.assertEqual(data["attack_log"][0]["filename"], filename)
self.assertEqual(data["attack_log"][0]["encoding"], encoding)
self.assertEqual(data["attack_log"][0]["encoded_filename"], encoded_filename)
self.assertEqual(data["attack_log"][0]["sRDI_conversion"], sRDI_conversion)
self.assertEqual(data["attack_log"][0]["for_step"], for_step)
self.assertEqual(data["attack_log"][0]["comment"], comment)
def test_build_start_default(self):
""" Starting a build default values"""
al = AttackLog()
al.start_build()
data = al.get_dict()
self.assertEqual(data["attack_log"][0]["event"], "start")
self.assertEqual(data["attack_log"][0]["type"], "build")
self.assertEqual(data["attack_log"][0]["dl_uri"], None)
self.assertEqual(data["attack_log"][0]["dl_uris"], None)
self.assertEqual(data["attack_log"][0]["payload"], None)
self.assertEqual(data["attack_log"][0]["platform"], None)
self.assertEqual(data["attack_log"][0]["architecture"], None)
self.assertEqual(data["attack_log"][0]["lhost"], None)
self.assertEqual(data["attack_log"][0]["lport"], None)
self.assertEqual(data["attack_log"][0]["filename"], None)
self.assertEqual(data["attack_log"][0]["encoding"], None)
self.assertEqual(data["attack_log"][0]["encoded_filename"], None)
self.assertEqual(data["attack_log"][0]["sRDI_conversion"], False)
self.assertEqual(data["attack_log"][0]["for_step"], None)
self.assertEqual(data["attack_log"][0]["comment"], None)
def test_build_stop(self):
""" Stopping a build """
al = AttackLog()
logid = "lid"
al.stop_build(logid=logid)
data = al.get_dict()
self.assertEqual(data["attack_log"][0]["event"], "stop")
self.assertEqual(data["attack_log"][0]["type"], "build")
self.assertEqual(data["attack_log"][0]["logid"], logid)
def test_metasploit_attack_start(self):
""" Starting a metasploit attack """
al = AttackLog()
source = "asource"
target = "a target"
ttp = "1234"
attack_name = "a name"
al.start_metasploit_attack(source=source,
target=target,
metasploit_command=attack_name,
ttp=ttp,
)
data = al.get_dict()
self.assertEqual(data["attack_log"][0]["event"], "start")
self.assertEqual(data["attack_log"][0]["type"], "attack")
self.assertEqual(data["attack_log"][0]["sub_type"], "metasploit")
self.assertEqual(data["attack_log"][0]["source"], source)
self.assertEqual(data["attack_log"][0]["target"], target)
self.assertEqual(data["attack_log"][0]["metasploit_command"], attack_name)
self.assertEqual(data["attack_log"][0]["hunting_tag"], "MITRE_" + ttp)
def test_metasploit_attack_stop(self):
""" Stopping a metasploit attack """
al = AttackLog()
source = "asource"
target = "a target"
ttp = "1234"
attack_name = "a name"
al.stop_metasploit_attack(source=source,
target=target,
metasploit_command=attack_name,
ttp=ttp,
)
data = al.get_dict()
self.assertEqual(data["attack_log"][0]["event"], "stop")
self.assertEqual(data["attack_log"][0]["type"], "attack")
self.assertEqual(data["attack_log"][0]["sub_type"], "metasploit")
self.assertEqual(data["attack_log"][0]["source"], source)
self.assertEqual(data["attack_log"][0]["target"], target)
self.assertEqual(data["attack_log"][0]["metasploit_command"], attack_name)
self.assertEqual(data["attack_log"][0]["hunting_tag"], "MITRE_" + ttp)
def test_attack_plugin_start(self):
""" Starting a attack plugin """
al = AttackLog()
source = "asource"
target = "a target"
ttp = "1234"
attack_name = "a name"
al.start_attack_plugin(source=source,
target=target,
plugin_name=attack_name,
ttp=ttp,
)
data = al.get_dict()
self.assertEqual(data["attack_log"][0]["event"], "start")
self.assertEqual(data["attack_log"][0]["type"], "attack")
self.assertEqual(data["attack_log"][0]["sub_type"], "attack_plugin")
self.assertEqual(data["attack_log"][0]["source"], source)
self.assertEqual(data["attack_log"][0]["target"], target)
self.assertEqual(data["attack_log"][0]["plugin_name"], attack_name)
self.assertEqual(data["attack_log"][0]["hunting_tag"], "MITRE_" + ttp)
def test_attack_plugin_stop(self):
""" Stopping a attack plugin"""
al = AttackLog()
source = "asource"
target = "a target"
ttp = "1234"
attack_name = "a name"
al.stop_attack_plugin(source=source,
target=target,
plugin_name=attack_name,
ttp=ttp,
)
data = al.get_dict()
self.assertEqual(data["attack_log"][0]["event"], "stop")
self.assertEqual(data["attack_log"][0]["type"], "attack")
self.assertEqual(data["attack_log"][0]["sub_type"], "attack_plugin")
self.assertEqual(data["attack_log"][0]["source"], source)
self.assertEqual(data["attack_log"][0]["target"], target)
self.assertEqual(data["attack_log"][0]["plugin_name"], attack_name)
self.assertEqual(data["attack_log"][0]["hunting_tag"], "MITRE_" + ttp)
def test_file_write_start(self):
""" Starting a file write """
al = AttackLog()
source = "asource"
target = "a target"
file_name = "a generic filename"
al.start_file_write(source=source,
target=target,
file_name=file_name,
)
data = al.get_dict()
self.assertEqual(data["attack_log"][0]["event"], "start")
self.assertEqual(data["attack_log"][0]["type"], "dropping_file")
self.assertEqual(data["attack_log"][0]["sub_type"], "by PurpleDome")
self.assertEqual(data["attack_log"][0]["source"], source)
self.assertEqual(data["attack_log"][0]["target"], target)
self.assertEqual(data["attack_log"][0]["file_name"], file_name)
def test_file_write_stop(self):
""" Stopping a file write """
al = AttackLog()
source = "asource"
target = "a target"
file_name = "a generic filename"
al.stop_file_write(source=source,
target=target,
file_name=file_name,
)
data = al.get_dict()
self.assertEqual(data["attack_log"][0]["event"], "stop")
self.assertEqual(data["attack_log"][0]["type"], "dropping_file")
self.assertEqual(data["attack_log"][0]["sub_type"], "by PurpleDome")
self.assertEqual(data["attack_log"][0]["source"], source)
self.assertEqual(data["attack_log"][0]["target"], target)
self.assertEqual(data["attack_log"][0]["file_name"], file_name)
def test_execute_payload_start(self):
""" Starting a execute payload """
al = AttackLog()
source = "asource"
target = "a target"
command = "a generic command"
al.start_execute_payload(source=source,
target=target,
command=command,
)
data = al.get_dict()
self.assertEqual(data["attack_log"][0]["event"], "start")
self.assertEqual(data["attack_log"][0]["type"], "execute_payload")
self.assertEqual(data["attack_log"][0]["sub_type"], "by PurpleDome")
self.assertEqual(data["attack_log"][0]["source"], source)
self.assertEqual(data["attack_log"][0]["target"], target)
self.assertEqual(data["attack_log"][0]["command"], command)
def test_execute_payload_stop(self):
""" Stopping a execute payload """
al = AttackLog()
source = "asource"
target = "a target"
command = "a generic command"
al.stop_execute_payload(source=source,
target=target,
command=command,
)
data = al.get_dict()
self.assertEqual(data["attack_log"][0]["event"], "stop")
self.assertEqual(data["attack_log"][0]["type"], "execute_payload")
self.assertEqual(data["attack_log"][0]["sub_type"], "by PurpleDome")
self.assertEqual(data["attack_log"][0]["source"], source)
self.assertEqual(data["attack_log"][0]["target"], target)
self.assertEqual(data["attack_log"][0]["command"], command)
def test_mitre_fix_ttp_is_none(self):
""" Testing the mitre ttp fix for ttp being none """
self.assertEqual(app.attack_log.__mitre_fix_ttp__(None), "")
def test_mitre_fix_ttp_is_MITRE_SOMETHING(self):
""" Testing the mitre ttp fix for ttp being MITRE_ """
self.assertEqual(app.attack_log.__mitre_fix_ttp__("MITRE_FOO"), "MITRE_FOO")
# tests for a bunch of default data covering caldera attacks. That way we will have some fallback if no data is submitted:
def test_get_caldera_default_name_missing(self):
""" Testing getting the caldera default name """
al = AttackLog()
self.assertEqual(al.get_caldera_default_name("missing"), None)
def test_get_caldera_default_name(self):
""" Testing getting the caldera default name """
al = AttackLog()
self.assertEqual(al.get_caldera_default_name("bd527b63-9f9e-46e0-9816-b8434d2b8989"), "whoami")
def test_get_caldera_default_description_missing(self):
""" Testing getting the caldera default description """
al = AttackLog()
self.assertEqual(al.get_caldera_default_description("missing"), None)
def test_get_caldera_default_description(self):
""" Testing getting the caldera default description """
al = AttackLog()
self.assertEqual(al.get_caldera_default_description("bd527b63-9f9e-46e0-9816-b8434d2b8989"), "Obtain user from current session")
def test_get_caldera_default_tactics_missing(self):
""" Testing getting the caldera default tactics """
al = AttackLog()
self.assertEqual(al.get_caldera_default_tactics("missing", None), None)
def test_get_caldera_default_tactics(self):
""" Testing getting the caldera default tactics """
al = AttackLog()
self.assertEqual(al.get_caldera_default_tactics("bd527b63-9f9e-46e0-9816-b8434d2b8989", None), "System Owner/User Discovery")
def test_get_caldera_default_tactics_id_missing(self):
""" Testing getting the caldera default tactics_id """
al = AttackLog()
self.assertEqual(al.get_caldera_default_tactics_id("missing", None), None)
def test_get_caldera_default_tactics_id(self):
""" Testing getting the caldera default tactics_id """
al = AttackLog()
self.assertEqual(al.get_caldera_default_tactics_id("bd527b63-9f9e-46e0-9816-b8434d2b8989", None), "T1033")
def test_get_caldera_default_situation_description_missing(self):
""" Testing getting the caldera default situation_description """
al = AttackLog()
self.assertEqual(al.get_caldera_default_situation_description("missing"), None)
def test_get_caldera_default_situation_description(self):
""" Testing getting the caldera default situation_description """
al = AttackLog()
self.assertEqual(al.get_caldera_default_situation_description("bd527b63-9f9e-46e0-9816-b8434d2b8989"), None)
def test_get_caldera_default_countermeasure_missing(self):
""" Testing getting the caldera default countermeasure """
al = AttackLog()
self.assertEqual(al.get_caldera_default_countermeasure("missing"), None)
def test_get_caldera_default_countermeasure(self):
""" Testing getting the caldera default countermeasure """
al = AttackLog()
self.assertEqual(al.get_caldera_default_countermeasure("bd527b63-9f9e-46e0-9816-b8434d2b8989"), None)
| 45.035794
| 136
| 0.581442
| 2,212
| 20,131
| 5.076854
| 0.073237
| 0.184328
| 0.208103
| 0.27382
| 0.848976
| 0.834105
| 0.808103
| 0.728673
| 0.675868
| 0.661264
| 0
| 0.021274
| 0.276141
| 20,131
| 446
| 137
| 45.136771
| 0.749382
| 0.071581
| 0
| 0.615804
| 0
| 0
| 0.184674
| 0.014276
| 0
| 0
| 0
| 0
| 0.378747
| 1
| 0.084469
| false
| 0
| 0.008174
| 0
| 0.095368
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
85226a980a1a03102f137cc51fddc714bf4ccf38
| 78
|
py
|
Python
|
vk_advanced_api/__init__.py
|
Ar4ikov/vk_advanced_api
|
73bd2770f987441fa6b0db8c87e108cbc2b226e8
|
[
"MIT"
] | 11
|
2018-03-14T07:51:41.000Z
|
2022-02-16T08:20:23.000Z
|
vk_advanced_api/__init__.py
|
Ar4ikov/vk_advanced_api
|
73bd2770f987441fa6b0db8c87e108cbc2b226e8
|
[
"MIT"
] | 1
|
2018-04-10T21:12:07.000Z
|
2018-04-17T07:36:41.000Z
|
vk_advanced_api/__init__.py
|
Ar4ikov/vk_advanced_api
|
73bd2770f987441fa6b0db8c87e108cbc2b226e8
|
[
"MIT"
] | 1
|
2018-03-30T07:28:45.000Z
|
2018-03-30T07:28:45.000Z
|
from vk_advanced_api.vkapi import VKAPI
from vk_advanced_api.Auth import Auth
| 39
| 40
| 0.871795
| 14
| 78
| 4.571429
| 0.5
| 0.1875
| 0.4375
| 0.53125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.102564
| 78
| 2
| 41
| 39
| 0.914286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
cfbcbb60ae3fc053905862a67c48f5b31ef61170
| 106
|
py
|
Python
|
autonmt/search/__init__.py
|
salvacarrion/autonlp
|
5cc462901e451b9259219f44225034fc8eedf6d3
|
[
"MIT"
] | 5
|
2022-01-10T07:59:16.000Z
|
2022-01-14T01:02:52.000Z
|
autonmt/search/__init__.py
|
salvacarrion/autonlp
|
5cc462901e451b9259219f44225034fc8eedf6d3
|
[
"MIT"
] | 2
|
2022-01-01T06:10:27.000Z
|
2022-01-14T01:10:48.000Z
|
autonmt/search/__init__.py
|
salvacarrion/autonlp
|
5cc462901e451b9259219f44225034fc8eedf6d3
|
[
"MIT"
] | 2
|
2022-01-10T08:20:02.000Z
|
2022-02-22T08:10:16.000Z
|
from autonmt.search.beam_search import beam_search
from autonmt.search.greedy_search import greedy_search
| 35.333333
| 54
| 0.886792
| 16
| 106
| 5.625
| 0.375
| 0.244444
| 0.377778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.075472
| 106
| 2
| 55
| 53
| 0.918367
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
cfd6ee08050d84e7c5fe8334aa7dac72cbb2dd09
| 125
|
py
|
Python
|
data/module/util/got/manager/__init__.py
|
williamducfer/eras
|
d85b8b53431f4416152e3f536bc8c99481a63dd7
|
[
"MIT"
] | 1
|
2020-09-09T19:51:44.000Z
|
2020-09-09T19:51:44.000Z
|
data/module/util/got/manager/__init__.py
|
williamducfer/eras
|
d85b8b53431f4416152e3f536bc8c99481a63dd7
|
[
"MIT"
] | null | null | null |
data/module/util/got/manager/__init__.py
|
williamducfer/eras
|
d85b8b53431f4416152e3f536bc8c99481a63dd7
|
[
"MIT"
] | 2
|
2021-09-11T07:31:19.000Z
|
2022-03-17T16:27:10.000Z
|
from module.util.got.manager.TweetCriteria import TweetCriteria
from module.util.got.manager.TweetManager import TweetManager
| 62.5
| 63
| 0.88
| 16
| 125
| 6.875
| 0.5
| 0.181818
| 0.254545
| 0.309091
| 0.436364
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.056
| 125
| 2
| 64
| 62.5
| 0.932203
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
cfed38f69dc3fe98cfb032a42e95725930455fd7
| 87
|
py
|
Python
|
lectures/code/numpy_scalar_ops.py
|
naskoch/python_course
|
84adfd3f8d48ca3ad5837f7acc59d2fa051e95d3
|
[
"MIT"
] | 4
|
2015-08-10T17:46:55.000Z
|
2020-04-18T21:09:03.000Z
|
lectures/code/numpy_scalar_ops.py
|
naskoch/python_course
|
84adfd3f8d48ca3ad5837f7acc59d2fa051e95d3
|
[
"MIT"
] | null | null | null |
lectures/code/numpy_scalar_ops.py
|
naskoch/python_course
|
84adfd3f8d48ca3ad5837f7acc59d2fa051e95d3
|
[
"MIT"
] | 2
|
2019-04-24T03:31:02.000Z
|
2019-05-13T07:36:06.000Z
|
A = np.ones((3,3))
print 3 * A - 1
# [[ 2. 2. 2.]
# [ 2. 2. 2.]
# [ 2. 2. 2.]]
| 14.5
| 18
| 0.298851
| 18
| 87
| 1.444444
| 0.388889
| 0.615385
| 0.807692
| 0.923077
| 0.346154
| 0.346154
| 0.346154
| 0.346154
| 0
| 0
| 0
| 0.236364
| 0.367816
| 87
| 6
| 19
| 14.5
| 0.236364
| 0.517241
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0.5
| 1
| 0
| 1
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 10
|
7a9dbb50b13d01bd1c12413cedbd4dd221c63516
| 106
|
py
|
Python
|
tests/__init__.py
|
pythoncatcoder/go.py
|
2fe83bbeac4190770678e3cf9df0a908b61be08d
|
[
"MIT"
] | 34
|
2015-05-25T05:24:17.000Z
|
2022-01-18T08:49:46.000Z
|
tests/__init__.py
|
pythoncatcoder/go.py
|
2fe83bbeac4190770678e3cf9df0a908b61be08d
|
[
"MIT"
] | 1
|
2019-12-14T20:31:20.000Z
|
2019-12-17T02:30:53.000Z
|
tests/__init__.py
|
pythoncatcoder/go.py
|
2fe83bbeac4190770678e3cf9df0a908b61be08d
|
[
"MIT"
] | 18
|
2015-01-15T19:14:32.000Z
|
2021-05-17T23:09:54.000Z
|
from .location_test import *
from .array_test import *
from .view_test import *
from .board_test import *
| 21.2
| 28
| 0.773585
| 16
| 106
| 4.875
| 0.4375
| 0.512821
| 0.538462
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.150943
| 106
| 4
| 29
| 26.5
| 0.866667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
8f8a00c79c4bc75d5fe1451c7619bf46e7178afd
| 54,850
|
py
|
Python
|
tests/sql/build_test.py
|
DataJunction/datajunction
|
d2293255bb7df0e5144c7e448a0ca2b590b6c20f
|
[
"MIT"
] | null | null | null |
tests/sql/build_test.py
|
DataJunction/datajunction
|
d2293255bb7df0e5144c7e448a0ca2b590b6c20f
|
[
"MIT"
] | null | null | null |
tests/sql/build_test.py
|
DataJunction/datajunction
|
d2293255bb7df0e5144c7e448a0ca2b590b6c20f
|
[
"MIT"
] | null | null | null |
"""
Tests for ``datajunction.sql.build``.
"""
# pylint: disable=invalid-name, too-many-lines, line-too-long
import datetime
import pytest
from pytest_mock import MockerFixture
from sqlalchemy.engine import create_engine
from sqlmodel import Session
from datajunction.models.column import Column
from datajunction.models.database import Database
from datajunction.models.node import Node, NodeType
from datajunction.models.table import Table
from datajunction.sql.build import (
find_on_clause,
get_dimensions_from_filters,
get_filter,
get_join_columns,
get_query_for_node,
get_query_for_sql,
)
from datajunction.typing import ColumnType
def test_get_query_for_node(mocker: MockerFixture) -> None:
"""
Test ``get_query_for_node``.
"""
database = Database(id=1, name="slow", URI="sqlite://", cost=1.0)
parent = Node(name="A")
child = Node(
name="B",
tables=[
Table(
database=database,
table="B",
columns=[Column(name="cnt", type=ColumnType.INT)],
),
],
type=NodeType.METRIC,
expression="SELECT COUNT(*) AS cnt FROM A",
parents=[parent],
)
engine = create_engine(database.URI)
connection = engine.connect()
connection.execute("CREATE TABLE B (cnt INTEGER)")
mocker.patch("datajunction.sql.transpile.create_engine", return_value=engine)
session = mocker.MagicMock()
create_query = get_query_for_node(session, child, [], [])
assert create_query.database_id == 1
assert create_query.submitted_query == 'SELECT "B".cnt \nFROM "B"'
def test_get_query_for_node_with_groupbys(mocker: MockerFixture) -> None:
"""
Test ``get_query_for_node`` with group bys.
"""
database = Database(id=1, name="slow", URI="sqlite://", cost=1.0)
parent = Node(
name="A",
tables=[
Table(
database=database,
table="A",
columns=[
Column(name="user_id", type=ColumnType.INT),
Column(name="comment", type=ColumnType.STR),
],
),
],
columns=[
Column(name="user_id", type=ColumnType.INT),
Column(name="comment", type=ColumnType.STR),
],
)
child = Node(
name="B",
type=NodeType.METRIC,
expression="SELECT COUNT(*) AS cnt FROM A",
parents=[parent],
)
engine = create_engine(database.URI)
connection = engine.connect()
connection.execute("CREATE TABLE A (user_id INTEGER, comment TEXT)")
mocker.patch("datajunction.sql.transpile.create_engine", return_value=engine)
session = mocker.MagicMock()
create_query = get_query_for_node(session, child, ["A.user_id"], [])
space = " "
assert create_query.database_id == 1
assert (
create_query.submitted_query
== f"""SELECT count('*') AS cnt, "A".user_id{space}
FROM (SELECT "A".user_id AS user_id, "A".comment AS comment{space}
FROM "A") AS "A" GROUP BY "A".user_id"""
)
def test_get_query_for_node_specify_database(mocker: MockerFixture) -> None:
"""
Test ``get_query_for_node`` when a database is specified.
"""
database = Database(id=1, name="slow", URI="sqlite://", cost=1.0)
parent = Node(name="A")
child = Node(
name="B",
tables=[
Table(
database=database,
table="B",
columns=[Column(name="cnt", type=ColumnType.INT)],
),
],
type=NodeType.METRIC,
expression="SELECT COUNT(*) AS cnt FROM A",
parents=[parent],
columns=[Column(name="cnt", type=ColumnType.INT)],
)
engine = create_engine(database.URI)
connection = engine.connect()
connection.execute("CREATE TABLE B (cnt INTEGER)")
mocker.patch("datajunction.sql.transpile.create_engine", return_value=engine)
session = mocker.MagicMock()
session.exec().one.return_value = database
create_query = get_query_for_node(session, child, [], [], 1)
assert create_query.database_id == 1
assert create_query.submitted_query == 'SELECT "B".cnt \nFROM "B"'
with pytest.raises(Exception) as excinfo:
get_query_for_node(session, child, [], [], 2)
assert str(excinfo.value) == "Database ID 2 is not valid"
def test_get_query_for_node_no_databases(mocker: MockerFixture) -> None:
"""
Test ``get_query_for_node``.
"""
database = Database(id=1, name="slow", URI="sqlite://", cost=1.0)
parent = Node(name="A")
child = Node(
name="B",
tables=[
Table(
database=database,
table="B",
columns=[Column(name="one", type=ColumnType.STR)],
),
],
type=NodeType.METRIC,
expression="SELECT COUNT(*) AS cnt FROM A",
parents=[parent],
columns=[Column(name="one", type=ColumnType.STR)],
)
mocker.patch("datajunction.sql.dag.get_computable_databases", return_value=set())
session = mocker.MagicMock()
with pytest.raises(Exception) as excinfo:
get_query_for_node(session, child, [], [])
assert str(excinfo.value) == "No valid database was found"
def test_get_query_for_node_with_dimensions(mocker: MockerFixture) -> None:
"""
Test ``get_query_for_node`` when filtering/grouping by a dimension.
"""
database = Database(id=1, name="one", URI="sqlite://")
dimension = Node(
name="core.users",
type=NodeType.DIMENSION,
tables=[
Table(
database=database,
table="dim_users",
columns=[
Column(name="id", type=ColumnType.INT),
Column(name="age", type=ColumnType.INT),
Column(name="gender", type=ColumnType.STR),
],
),
],
columns=[
Column(name="id", type=ColumnType.INT),
Column(name="age", type=ColumnType.INT),
Column(name="gender", type=ColumnType.STR),
],
)
parent = Node(
name="core.comments",
tables=[
Table(
database=database,
table="comments",
columns=[
Column(name="ds", type=ColumnType.STR),
Column(name="user_id", type=ColumnType.INT),
Column(name="text", type=ColumnType.STR),
],
),
],
columns=[
Column(name="ds", type=ColumnType.STR),
Column(name="user_id", type=ColumnType.INT, dimension=dimension),
Column(name="text", type=ColumnType.STR),
],
)
child = Node(
name="core.num_comments",
type=NodeType.METRIC,
expression="SELECT COUNT(*) FROM core.comments",
parents=[parent],
)
engine = create_engine(database.URI)
connection = engine.connect()
connection.execute("CREATE TABLE dim_users (id INTEGER, age INTEGER, gender TEXT)")
connection.execute("CREATE TABLE comments (ds TEXT, user_id INTEGER, text TEXT)")
mocker.patch("datajunction.sql.transpile.create_engine", return_value=engine)
session = mocker.MagicMock()
session.exec().one.return_value = dimension
create_query = get_query_for_node(
session,
child,
["core.users.gender"],
["core.users.age>25"],
)
space = " "
assert create_query.database_id == 1
assert (
create_query.submitted_query
== f"""SELECT count('*') AS count_1, "core.users".gender{space}
FROM (SELECT comments.ds AS ds, comments.user_id AS user_id, comments.text AS text{space}
FROM comments) AS "core.comments" JOIN (SELECT dim_users.id AS id, dim_users.age AS age, dim_users.gender AS gender{space}
FROM dim_users) AS "core.users" ON "core.comments".user_id = "core.users".id{space}
WHERE "core.users".age > 25 GROUP BY "core.users".gender"""
)
with pytest.raises(Exception) as excinfo:
get_query_for_node(session, child, ["aaaa"], [])
assert str(excinfo.value) == "Invalid dimension: aaaa"
with pytest.raises(Exception) as excinfo:
get_query_for_node(session, child, ["aaaa", "bbbb"], [])
assert str(excinfo.value) == "Invalid dimensions: aaaa, bbbb"
def test_get_query_for_node_with_multiple_dimensions(mocker: MockerFixture) -> None:
"""
Test ``get_query_for_node`` when filtering/grouping by a dimension.
"""
database = Database(id=1, name="one", URI="sqlite://")
dimension_1 = Node(
name="core.users",
type=NodeType.DIMENSION,
tables=[
Table(
database=database,
table="dim_users",
columns=[
Column(name="id", type=ColumnType.INT),
Column(name="age", type=ColumnType.INT),
Column(name="gender", type=ColumnType.STR),
],
),
],
columns=[
Column(name="id", type=ColumnType.INT),
Column(name="age", type=ColumnType.INT),
Column(name="gender", type=ColumnType.STR),
],
)
dimension_2 = Node(
name="core.bands",
type=NodeType.DIMENSION,
tables=[
Table(
database=database,
table="dim_bands",
columns=[
Column(name="uuid", type=ColumnType.INT),
Column(name="name", type=ColumnType.STR),
Column(name="genre", type=ColumnType.STR),
],
),
],
columns=[
Column(name="uuid", type=ColumnType.INT),
Column(name="name", type=ColumnType.STR),
Column(name="genre", type=ColumnType.STR),
],
)
parent = Node(
name="core.comments",
tables=[
Table(
database=database,
table="comments",
columns=[
Column(name="ds", type=ColumnType.STR),
Column(name="user_id", type=ColumnType.INT),
Column(name="band_id", type=ColumnType.INT),
Column(name="text", type=ColumnType.STR),
],
),
],
columns=[
Column(name="ds", type=ColumnType.STR),
Column(name="user_id", type=ColumnType.INT, dimension=dimension_1),
Column(
name="band_id",
type=ColumnType.INT,
dimension=dimension_2,
dimension_column="uuid",
),
Column(name="text", type=ColumnType.STR),
],
)
child = Node(
name="core.num_comments",
type=NodeType.METRIC,
expression="SELECT COUNT(*) FROM core.comments",
parents=[parent],
)
engine = create_engine(database.URI)
connection = engine.connect()
connection.execute("CREATE TABLE dim_users (id INTEGER, age INTEGER, gender TEXT)")
connection.execute("CREATE TABLE dim_bands (uuid INTEGER, name TEXT, genre TEXT)")
connection.execute(
"CREATE TABLE comments (ds TEXT, user_id INTEGER, band_id INTEGER, text TEXT)",
)
mocker.patch("datajunction.sql.transpile.create_engine", return_value=engine)
session = mocker.MagicMock()
session.exec().one.side_effect = [dimension_1, dimension_2]
create_query = get_query_for_node(
session,
child,
["core.users.gender"],
["core.bands.genre='rock'"],
)
space = " "
assert create_query.database_id == 1
assert (
create_query.submitted_query
== f"""SELECT count('*') AS count_1, "core.users".gender{space}
FROM (SELECT comments.ds AS ds, comments.user_id AS user_id, comments.band_id AS band_id, comments.text AS text{space}
FROM comments) AS "core.comments" JOIN (SELECT dim_users.id AS id, dim_users.age AS age, dim_users.gender AS gender{space}
FROM dim_users) AS "core.users" ON "core.comments".user_id = "core.users".id, (SELECT comments.ds AS ds, comments.user_id AS user_id, comments.band_id AS band_id, comments.text AS text{space}
FROM comments) AS "core.comments" JOIN (SELECT dim_bands.uuid AS uuid, dim_bands.name AS name, dim_bands.genre AS genre{space}
FROM dim_bands) AS "core.bands" ON "core.comments".band_id = "core.bands".uuid{space}
WHERE "core.bands".genre = 'rock' GROUP BY "core.users".gender"""
)
def test_get_filter(mocker: MockerFixture) -> None:
"""
Test ``get_filter``.
"""
greater_than = mocker.MagicMock()
less_than = mocker.MagicMock()
equals = mocker.MagicMock()
mocker.patch(
"datajunction.sql.build.COMPARISONS",
new={
">": greater_than,
"<": less_than,
"=": equals,
},
)
column_a = mocker.MagicMock()
column_date = mocker.MagicMock()
column_date.type.python_type = datetime.date
column_dt = mocker.MagicMock()
column_dt.type.python_type = datetime.datetime
columns = {"a": column_a, "day": column_date, "dt": column_dt}
# basic
get_filter(columns, "a>0")
greater_than.assert_called_with(column_a, 0)
# date
get_filter(columns, "day=2020-01-01")
equals.assert_called_with(column_date, "2020-01-01 00:00:00")
get_filter(columns, "day<20200202")
less_than.assert_called_with(column_date, "2020-02-02 00:00:00")
get_filter(columns, "day=3/3/2020")
equals.assert_called_with(column_date, "2020-03-03 00:00:00")
# datetime
get_filter(columns, "dt=2012-01-19 17:21:00")
equals.assert_called_with(column_dt, "2012-01-19 17:21:00")
with pytest.raises(Exception) as excinfo:
get_filter(columns, "dt>foo/bar-baz")
assert str(excinfo.value) == "Invalid date or datetime value: foo/bar-baz"
# exceptions
with pytest.raises(Exception) as excinfo:
get_filter(columns, "invalid")
assert (
str(excinfo.value)
== """The filter "invalid" is invalid
The following error happened:
- The filter "invalid" is not a valid filter. Filters should consist of a dimension name, follow by a valid operator (<=|<|>=|>|!=|=), followed by a value. If the value is a string or date/time it should be enclosed in single quotes. (error code: 100)"""
)
with pytest.raises(Exception) as excinfo:
get_filter(columns, "b>0")
assert str(excinfo.value) == "Invalid column name: b"
with pytest.raises(Exception) as excinfo:
get_filter(columns, "a>open('/etc/passwd').read()")
assert str(excinfo.value) == "Invalid value: open('/etc/passwd').read()"
def test_get_query_for_sql(mocker: MockerFixture, session: Session) -> None:
"""
Test ``get_query_for_sql``.
"""
get_session = mocker.patch("datajunction.sql.build.get_session")
get_session().__next__.return_value = session
database = Database(id=1, name="slow", URI="sqlite://", cost=1.0)
A = Node(
name="A",
tables=[
Table(
database=database,
table="A",
columns=[
Column(name="one", type=ColumnType.STR),
Column(name="two", type=ColumnType.STR),
],
),
],
)
engine = create_engine(database.URI)
connection = engine.connect()
connection.execute("CREATE TABLE A (one TEXT, two TEXT)")
mocker.patch("datajunction.sql.transpile.create_engine", return_value=engine)
B = Node(
name="B",
type=NodeType.METRIC,
expression="SELECT COUNT(*) AS cnt FROM A",
parents=[A],
)
session.add(B)
session.commit()
sql = "SELECT B FROM metrics"
create_query = get_query_for_sql(sql)
assert create_query.database_id == 1
space = " "
assert (
create_query.submitted_query
== f'''SELECT count('*') AS "B"{space}
FROM (SELECT "A".one AS one, "A".two AS two{space}
FROM "A") AS "A"'''
)
def test_get_query_for_sql_no_metrics(mocker: MockerFixture, session: Session) -> None:
"""
Test ``get_query_for_sql`` when no metrics are requested.
"""
get_session = mocker.patch("datajunction.sql.build.get_session")
get_session().__next__.return_value = session
database = Database(id=1, name="db", URI="sqlite://")
dimension = Node(
name="core.users",
type=NodeType.DIMENSION,
tables=[
Table(
database=database,
table="dim_users",
columns=[
Column(name="id", type=ColumnType.INT),
Column(name="age", type=ColumnType.INT),
Column(name="gender", type=ColumnType.STR),
],
),
],
columns=[
Column(name="id", type=ColumnType.INT),
Column(name="age", type=ColumnType.INT),
Column(name="gender", type=ColumnType.STR),
],
)
engine = create_engine(database.URI)
connection = engine.connect()
connection.execute("CREATE TABLE dim_users (id INTEGER, age INTEGER, gender TEXT)")
mocker.patch("datajunction.sql.transpile.create_engine", return_value=engine)
session.add(dimension)
session.commit()
sql = 'SELECT "core.users.gender", "core.users.age" FROM metrics'
create_query = get_query_for_sql(sql)
assert create_query.database_id == 1
space = " "
assert (
create_query.submitted_query
== f'''SELECT "core.users".gender, "core.users".age{space}
FROM (SELECT dim_users.id AS id, dim_users.age AS age, dim_users.gender AS gender{space}
FROM dim_users) AS "core.users"'''
)
other_dimension = Node(
name="core.other_dim",
type=NodeType.DIMENSION,
columns=[
Column(name="full_name", type=ColumnType.STR),
],
)
session.add(other_dimension)
session.commit()
sql = 'SELECT "core.users.gender", "core.other_dim.full_name" FROM metrics'
with pytest.raises(Exception) as excinfo:
get_query_for_sql(sql)
assert (
str(excinfo.value)
== "Cannot query from multiple dimensions when no metric is specified"
)
def test_get_query_for_sql_no_tables(mocker: MockerFixture, session: Session) -> None:
"""
Test ``get_query_for_sql`` when no tables are involved.
"""
get_session = mocker.patch("datajunction.sql.build.get_session")
get_session().__next__.return_value = session
database = Database(id=1, name="memory", URI="sqlite://")
session.add(database)
session.commit()
sql = "SELECT 1"
create_query = get_query_for_sql(sql)
assert create_query.database_id == 1
assert create_query.submitted_query == "SELECT 1"
def test_get_query_for_sql_having(mocker: MockerFixture, session: Session) -> None:
"""
Test ``get_query_for_sql``.
"""
get_session = mocker.patch("datajunction.sql.build.get_session")
get_session().__next__.return_value = session
database = Database(id=1, name="slow", URI="sqlite://", cost=1.0)
A = Node(
name="A",
tables=[
Table(
database=database,
table="A",
columns=[
Column(name="one", type=ColumnType.STR),
Column(name="two", type=ColumnType.STR),
],
),
],
)
engine = create_engine(database.URI)
connection = engine.connect()
connection.execute("CREATE TABLE A (one TEXT, two TEXT)")
mocker.patch("datajunction.sql.transpile.create_engine", return_value=engine)
B = Node(
name="B",
type=NodeType.METRIC,
expression="SELECT COUNT(*) AS cnt FROM A",
parents=[A],
)
session.add(B)
session.commit()
sql = "SELECT B FROM metrics HAVING B > 10"
create_query = get_query_for_sql(sql)
assert create_query.database_id == 1
space = " "
assert (
create_query.submitted_query
== f"""SELECT count('*') AS "B"{space}
FROM (SELECT "A".one AS one, "A".two AS two{space}
FROM "A") AS "A"{space}
HAVING count('*') > 10"""
)
sql = "SELECT B FROM metrics HAVING C > 10"
with pytest.raises(Exception) as excinfo:
get_query_for_sql(sql)
assert str(excinfo.value) == "Invalid dimension: C"
def test_get_query_for_sql_with_dimensions(
mocker: MockerFixture,
session: Session,
) -> None:
"""
Test ``get_query_for_sql`` with dimensions in the query.
"""
get_session = mocker.patch("datajunction.sql.build.get_session")
get_session().__next__.return_value = session
database = Database(id=1, name="slow", URI="sqlite://", cost=1.0)
dimension = Node(
name="core.users",
type=NodeType.DIMENSION,
tables=[
Table(
database=database,
table="dim_users",
columns=[
Column(name="id", type=ColumnType.INT),
Column(name="age", type=ColumnType.INT),
Column(name="gender", type=ColumnType.STR),
],
),
],
columns=[
Column(name="id", type=ColumnType.INT),
Column(name="age", type=ColumnType.INT),
Column(name="gender", type=ColumnType.STR),
],
)
parent = Node(
name="core.comments",
tables=[
Table(
database=database,
table="comments",
columns=[
Column(name="ds", type=ColumnType.STR),
Column(name="user_id", type=ColumnType.INT),
Column(name="text", type=ColumnType.STR),
],
),
],
columns=[
Column(name="ds", type=ColumnType.STR),
Column(name="user_id", type=ColumnType.INT, dimension=dimension),
Column(name="text", type=ColumnType.STR),
],
)
child = Node(
name="core.num_comments",
type=NodeType.METRIC,
expression="SELECT COUNT(*) FROM core.comments",
parents=[parent],
)
engine = create_engine(database.URI)
connection = engine.connect()
connection.execute("CREATE TABLE dim_users (id INTEGER, age INTEGER, gender TEXT)")
connection.execute("CREATE TABLE comments (ds TEXT, user_id INTEGER, text TEXT)")
mocker.patch("datajunction.sql.transpile.create_engine", return_value=engine)
session.add(child)
session.add(dimension)
session.commit()
sql = """
SELECT "core.users.gender", "core.num_comments"
FROM metrics
WHERE "core.users.age" > 25
GROUP BY "core.users.gender"
"""
create_query = get_query_for_sql(sql)
assert create_query.database_id == 1
space = " "
assert (
create_query.submitted_query
== f"""SELECT "core.users".gender, count('*') AS "core.num_comments"{space}
FROM (SELECT comments.ds AS ds, comments.user_id AS user_id, comments.text AS text{space}
FROM comments) AS "core.comments" JOIN (SELECT dim_users.id AS id, dim_users.age AS age, dim_users.gender AS gender{space}
FROM dim_users) AS "core.users" ON "core.comments".user_id = "core.users".id{space}
WHERE "core.users".age > 25 GROUP BY "core.users".gender"""
)
sql = """
SELECT "core.users.invalid", "core.num_comments"
FROM metrics
WHERE "core.users.age" > 25
GROUP BY "core.users.invalid"
"""
with pytest.raises(Exception) as excinfo:
get_query_for_sql(sql)
assert str(excinfo.value) == "Invalid dimension: core.users.invalid"
def test_get_query_for_sql_with_dimensions_order_by(
mocker: MockerFixture,
session: Session,
) -> None:
"""
Test ``get_query_for_sql`` with dimensions in the query and ``ORDER BY``.
"""
get_session = mocker.patch("datajunction.sql.build.get_session")
get_session().__next__.return_value = session
database = Database(id=1, name="slow", URI="sqlite://", cost=1.0)
dimension = Node(
name="core.users",
type=NodeType.DIMENSION,
tables=[
Table(
database=database,
table="dim_users",
columns=[
Column(name="id", type=ColumnType.INT),
Column(name="age", type=ColumnType.INT),
Column(name="gender", type=ColumnType.STR),
],
),
],
columns=[
Column(name="id", type=ColumnType.INT),
Column(name="age", type=ColumnType.INT),
Column(name="gender", type=ColumnType.STR),
],
)
parent = Node(
name="core.comments",
tables=[
Table(
database=database,
table="comments",
columns=[
Column(name="ds", type=ColumnType.STR),
Column(name="user_id", type=ColumnType.INT),
Column(name="text", type=ColumnType.STR),
],
),
],
columns=[
Column(name="ds", type=ColumnType.STR),
Column(name="user_id", type=ColumnType.INT, dimension=dimension),
Column(name="text", type=ColumnType.STR),
],
)
child = Node(
name="core.num_comments",
type=NodeType.METRIC,
expression="SELECT COUNT(*) FROM core.comments",
parents=[parent],
)
engine = create_engine(database.URI)
connection = engine.connect()
connection.execute("CREATE TABLE dim_users (id INTEGER, age INTEGER, gender TEXT)")
connection.execute("CREATE TABLE comments (ds TEXT, user_id INTEGER, text TEXT)")
mocker.patch("datajunction.sql.transpile.create_engine", return_value=engine)
session.add(child)
session.add(dimension)
session.commit()
sql = """
SELECT "core.users.gender" AS "core.users.gender",
"core.num_comments" AS "core.num_comments"
FROM main.metrics
GROUP BY "core.users.gender"
ORDER BY "core.num_comments" DESC
LIMIT 100;
"""
create_query = get_query_for_sql(sql)
space = " "
assert create_query.database_id == 1
assert (
create_query.submitted_query
== f"""SELECT "core.users".gender AS "core.users.gender", count('*') AS "core.num_comments"{space}
FROM (SELECT comments.ds AS ds, comments.user_id AS user_id, comments.text AS text{space}
FROM comments) AS "core.comments" JOIN (SELECT dim_users.id AS id, dim_users.age AS age, dim_users.gender AS gender{space}
FROM dim_users) AS "core.users" ON "core.comments".user_id = "core.users".id GROUP BY "core.users".gender ORDER BY count('*') DESC
LIMIT 100 OFFSET 0"""
)
sql = """
SELECT "core.users.gender" AS "core.users.gender",
"core.num_comments" AS "core.num_comments"
FROM main.metrics
GROUP BY "core.users.gender"
ORDER BY "core.num_comments" ASC
LIMIT 100;
"""
create_query = get_query_for_sql(sql)
assert (
create_query.submitted_query
== f"""SELECT "core.users".gender AS "core.users.gender", count('*') AS "core.num_comments"{space}
FROM (SELECT comments.ds AS ds, comments.user_id AS user_id, comments.text AS text{space}
FROM comments) AS "core.comments" JOIN (SELECT dim_users.id AS id, dim_users.age AS age, dim_users.gender AS gender{space}
FROM dim_users) AS "core.users" ON "core.comments".user_id = "core.users".id GROUP BY "core.users".gender ORDER BY count('*')
LIMIT 100 OFFSET 0"""
)
sql = """
SELECT "core.users.gender" AS "core.users.gender",
"core.num_comments" AS "core.num_comments"
FROM main.metrics
GROUP BY "core.users.gender"
ORDER BY "core.num_comments" ASC
LIMIT 100;
"""
create_query = get_query_for_sql(sql)
assert (
create_query.submitted_query
== f"""SELECT "core.users".gender AS "core.users.gender", count('*') AS "core.num_comments"{space}
FROM (SELECT comments.ds AS ds, comments.user_id AS user_id, comments.text AS text{space}
FROM comments) AS "core.comments" JOIN (SELECT dim_users.id AS id, dim_users.age AS age, dim_users.gender AS gender{space}
FROM dim_users) AS "core.users" ON "core.comments".user_id = "core.users".id GROUP BY "core.users".gender ORDER BY count('*')
LIMIT 100 OFFSET 0"""
)
sql = """
SELECT "core.users.gender" AS "core.users.gender",
"core.num_comments" AS "core.num_comments"
FROM main.metrics
GROUP BY "core.users.gender"
ORDER BY "core.users.gender" ASC
LIMIT 100;
"""
create_query = get_query_for_sql(sql)
assert (
create_query.submitted_query
== f"""SELECT "core.users".gender AS "core.users.gender", count('*') AS "core.num_comments"{space}
FROM (SELECT comments.ds AS ds, comments.user_id AS user_id, comments.text AS text{space}
FROM comments) AS "core.comments" JOIN (SELECT dim_users.id AS id, dim_users.age AS age, dim_users.gender AS gender{space}
FROM dim_users) AS "core.users" ON "core.comments".user_id = "core.users".id GROUP BY "core.users".gender ORDER BY "core.users".gender
LIMIT 100 OFFSET 0"""
)
sql = """
SELECT "core.users.gender" AS "core.users.gender",
"core.num_comments" AS "core.num_comments"
FROM main.metrics
GROUP BY "core.users.gender"
ORDER BY invalid ASC
LIMIT 100;
"""
with pytest.raises(Exception) as excinfo:
get_query_for_sql(sql)
assert str(excinfo.value) == "Invalid identifier: invalid"
def test_get_query_for_sql_compound_names(
mocker: MockerFixture,
session: Session,
) -> None:
"""
Test ``get_query_for_sql`` with nodes with compound names.
"""
get_session = mocker.patch("datajunction.sql.build.get_session")
get_session().__next__.return_value = session
database = Database(id=1, name="slow", URI="sqlite://", cost=1.0)
A = Node(
name="core.A",
tables=[
Table(
database=database,
table="A",
columns=[
Column(name="one", type=ColumnType.STR),
Column(name="two", type=ColumnType.STR),
],
),
],
)
engine = create_engine(database.URI)
connection = engine.connect()
connection.execute("CREATE TABLE A (one TEXT, two TEXT)")
mocker.patch("datajunction.sql.transpile.create_engine", return_value=engine)
B = Node(
name="core.B",
type=NodeType.METRIC,
expression="SELECT COUNT(*) AS cnt FROM core.A",
parents=[A],
)
session.add(B)
session.commit()
sql = "SELECT core.B FROM metrics"
create_query = get_query_for_sql(sql)
assert create_query.database_id == 1
space = " "
assert (
create_query.submitted_query
== f'''SELECT count('*') AS "core.B"{space}
FROM (SELECT "A".one AS one, "A".two AS two{space}
FROM "A") AS "core.A"'''
)
def test_get_query_for_sql_multiple_databases(
mocker: MockerFixture,
session: Session,
) -> None:
"""
Test ``get_query_for_sql`` when the parents are in multiple databases.
"""
get_session = mocker.patch("datajunction.sql.build.get_session")
get_session().__next__.return_value = session
database_1 = Database(id=1, name="slow", URI="sqlite://", cost=10.0)
database_2 = Database(id=2, name="fast", URI="sqlite://", cost=1.0)
A = Node(
name="A",
tables=[
Table(
database=database_1,
table="A",
columns=[
Column(name="one", type=ColumnType.STR),
Column(name="two", type=ColumnType.STR),
],
),
Table(
database=database_2,
table="A",
columns=[
Column(name="one", type=ColumnType.STR),
],
),
],
columns=[
Column(name="one", type=ColumnType.STR),
Column(name="two", type=ColumnType.STR),
],
)
engine = create_engine(database_1.URI)
connection = engine.connect()
connection.execute("CREATE TABLE A (one TEXT, two TEXT)")
mocker.patch("datajunction.sql.transpile.create_engine", return_value=engine)
B = Node(
name="B",
type=NodeType.METRIC,
expression="SELECT COUNT(*) AS cnt FROM A",
parents=[A],
)
session.add(B)
session.commit()
sql = "SELECT B FROM metrics"
create_query = get_query_for_sql(sql)
assert create_query.database_id == 2 # fast
B.expression = "SELECT COUNT(two) AS cnt FROM A"
session.add(B)
session.commit()
sql = "SELECT B FROM metrics"
create_query = get_query_for_sql(sql)
assert create_query.database_id == 1 # slow
def test_get_query_for_sql_multiple_metrics(
mocker: MockerFixture,
session: Session,
) -> None:
"""
Test ``get_query_for_sql`` with multiple metrics.
"""
get_session = mocker.patch("datajunction.sql.build.get_session")
get_session().__next__.return_value = session
database = Database(id=1, name="slow", URI="sqlite://", cost=1.0)
A = Node(
name="A",
tables=[
Table(
database=database,
table="A",
columns=[
Column(name="one", type=ColumnType.STR),
Column(name="two", type=ColumnType.STR),
],
),
],
columns=[
Column(name="one", type=ColumnType.STR),
Column(name="two", type=ColumnType.STR),
],
)
engine = create_engine(database.URI)
connection = engine.connect()
connection.execute("CREATE TABLE A (one TEXT, two TEXT)")
mocker.patch("datajunction.sql.transpile.create_engine", return_value=engine)
B = Node(
name="B",
type=NodeType.METRIC,
expression="SELECT COUNT(*) AS cnt FROM A",
parents=[A],
)
session.add(B)
C = Node(
name="C",
type=NodeType.METRIC,
expression="SELECT MAX(one) AS max_one FROM A",
parents=[A],
)
session.add(C)
session.commit()
sql = "SELECT B, C FROM metrics"
create_query = get_query_for_sql(sql)
assert create_query.database_id == 1
space = " "
assert (
create_query.submitted_query
== f'''SELECT count('*') AS "B", max("A".one) AS "C"{space}
FROM (SELECT "A".one AS one, "A".two AS two{space}
FROM "A") AS "A"'''
)
def test_get_query_for_sql_non_identifiers(
mocker: MockerFixture,
session: Session,
) -> None:
"""
Test ``get_query_for_sql`` with metrics and non-identifiers in the ``SELECT``.
"""
get_session = mocker.patch("datajunction.sql.build.get_session")
get_session().__next__.return_value = session
database = Database(id=1, name="slow", URI="sqlite://", cost=1.0)
A = Node(
name="A",
tables=[
Table(
database=database,
table="A",
columns=[
Column(name="one", type=ColumnType.STR),
Column(name="two", type=ColumnType.STR),
],
),
],
columns=[
Column(name="one", type=ColumnType.STR),
Column(name="two", type=ColumnType.STR),
],
)
engine = create_engine(database.URI)
connection = engine.connect()
connection.execute("CREATE TABLE A (one TEXT, two TEXT)")
mocker.patch("datajunction.sql.transpile.create_engine", return_value=engine)
B = Node(
name="B",
type=NodeType.METRIC,
expression="SELECT COUNT(*) AS cnt FROM A",
parents=[A],
)
session.add(B)
C = Node(
name="C",
type=NodeType.METRIC,
expression="SELECT MAX(one) AS max_one FROM A",
parents=[A],
)
session.add(C)
session.commit()
sql = "SELECT B, C, 'test' FROM metrics"
create_query = get_query_for_sql(sql)
assert create_query.database_id == 1
space = " "
assert (
create_query.submitted_query
== f'''SELECT count('*') AS "B", max("A".one) AS "C", test{space}
FROM (SELECT "A".one AS one, "A".two AS two{space}
FROM "A") AS "A"'''
)
def test_get_query_for_sql_different_parents(
mocker: MockerFixture,
session: Session,
) -> None:
"""
Test ``get_query_for_sql`` with metrics with different parents.
"""
get_session = mocker.patch("datajunction.sql.build.get_session")
get_session().__next__.return_value = session
database = Database(id=1, name="slow", URI="sqlite://", cost=1.0)
A = Node(
name="A",
tables=[
Table(
database=database,
table="A",
columns=[
Column(name="one", type=ColumnType.STR),
Column(name="two", type=ColumnType.STR),
],
),
],
)
B = Node(
name="B",
tables=[
Table(
database=database,
table="B",
columns=[
Column(name="one", type=ColumnType.STR),
Column(name="two", type=ColumnType.STR),
],
),
],
)
C = Node(
name="C",
type=NodeType.METRIC,
expression="SELECT COUNT(*) AS cnt FROM A",
parents=[A],
)
session.add(C)
D = Node(
name="D",
type=NodeType.METRIC,
expression="SELECT MAX(one) AS max_one FROM A",
parents=[B],
)
session.add(D)
session.commit()
sql = "SELECT C, D FROM metrics"
with pytest.raises(Exception) as excinfo:
get_query_for_sql(sql)
assert str(excinfo.value) == "Metrics C and D have non-shared parents"
def test_get_query_for_sql_not_metric(mocker: MockerFixture, session: Session) -> None:
"""
Test ``get_query_for_sql`` when the projection is not a metric node.
"""
get_session = mocker.patch("datajunction.sql.build.get_session")
get_session().__next__.return_value = session
database = Database(id=1, name="slow", URI="sqlite://", cost=1.0)
A = Node(
name="A",
tables=[
Table(
database=database,
table="A",
columns=[
Column(name="one", type=ColumnType.STR),
Column(name="two", type=ColumnType.STR),
],
),
],
)
B = Node(
name="B",
expression="SELECT one FROM A",
parents=[A],
)
session.add(B)
session.commit()
sql = "SELECT B FROM metrics"
with pytest.raises(Exception) as excinfo:
get_query_for_sql(sql)
assert str(excinfo.value) == "Invalid dimension: B"
def test_get_query_for_sql_no_databases(
mocker: MockerFixture,
session: Session,
) -> None:
"""
Test ``get_query_for_sql`` when no common databases are found.
"""
get_session = mocker.patch("datajunction.sql.build.get_session")
get_session().__next__.return_value = session
A = Node(
name="A",
tables=[],
)
B = Node(
name="B",
type=NodeType.METRIC,
expression="SELECT COUNT(*) AS cnt FROM A",
parents=[A],
)
session.add(B)
session.commit()
sql = "SELECT B FROM metrics"
with pytest.raises(Exception) as excinfo:
get_query_for_sql(sql)
assert str(excinfo.value) == "No valid database was found"
def test_get_query_for_sql_alias(mocker: MockerFixture, session: Session) -> None:
"""
Test ``get_query_for_sql`` with aliases.
"""
get_session = mocker.patch("datajunction.sql.build.get_session")
get_session().__next__.return_value = session
database = Database(id=1, name="slow", URI="sqlite://", cost=1.0)
A = Node(
name="A",
tables=[
Table(
database=database,
table="A",
columns=[
Column(name="one", type=ColumnType.STR),
Column(name="two", type=ColumnType.STR),
],
),
],
)
engine = create_engine(database.URI)
connection = engine.connect()
connection.execute("CREATE TABLE A (one TEXT, two TEXT)")
mocker.patch("datajunction.sql.transpile.create_engine", return_value=engine)
B = Node(
name="B",
type=NodeType.METRIC,
expression="SELECT COUNT(*) AS cnt FROM A",
parents=[A],
)
session.add(B)
session.commit()
sql = "SELECT B AS my_metric FROM metrics"
create_query = get_query_for_sql(sql)
assert create_query.database_id == 1
space = " "
assert (
create_query.submitted_query
== f'''SELECT count('*') AS my_metric{space}
FROM (SELECT "A".one AS one, "A".two AS two{space}
FROM "A") AS "A"'''
)
def test_get_query_for_sql_where_groupby(
mocker: MockerFixture,
session: Session,
) -> None:
"""
Test ``get_query_for_sql`` with a where and a group by.
"""
get_session = mocker.patch("datajunction.sql.build.get_session")
get_session().__next__.return_value = session
database = Database(id=1, name="slow", URI="sqlite://", cost=1.0)
comments = Node(
name="core.comments",
tables=[
Table(
database=database,
table="comments",
columns=[
Column(name="user_id", type=ColumnType.INT),
Column(name="comment", type=ColumnType.STR),
],
),
],
)
engine = create_engine(database.URI)
connection = engine.connect()
connection.execute("CREATE TABLE comments (user_id INT, comment TEXT)")
mocker.patch("datajunction.sql.transpile.create_engine", return_value=engine)
num_comments = Node(
name="core.num_comments",
type=NodeType.METRIC,
expression="SELECT COUNT(*) FROM core.comments",
parents=[comments],
)
session.add(num_comments)
session.commit()
sql = """
SELECT "core.num_comments", "core.comments.user_id" FROM metrics
WHERE "core.comments.user_id" > 1
GROUP BY "core.comments.user_id"
"""
create_query = get_query_for_sql(sql)
assert create_query.database_id == 1
space = " "
assert (
create_query.submitted_query
== f"""SELECT count('*') AS "core.num_comments", "core.comments".user_id{space}
FROM (SELECT comments.user_id AS user_id, comments.comment AS comment{space}
FROM comments) AS "core.comments"{space}
WHERE "core.comments".user_id > 1 GROUP BY "core.comments".user_id"""
)
def test_get_query_for_sql_date_trunc(
mocker: MockerFixture,
session: Session,
) -> None:
"""
Test ``get_query_for_sql`` with a call to ``DATE_TRUNC``.
"""
get_session = mocker.patch("datajunction.sql.build.get_session")
get_session().__next__.return_value = session
database = Database(id=1, name="db", URI="sqlite://")
comments = Node(
name="core.comments",
tables=[
Table(
database=database,
table="comments",
columns=[
Column(name="user_id", type=ColumnType.INT),
Column(name="timestamp", type=ColumnType.DATETIME),
],
),
],
)
engine = create_engine(database.URI)
connection = engine.connect()
connection.execute("CREATE TABLE comments (user_id INT, timestamp DATETIME)")
mocker.patch("datajunction.sql.transpile.create_engine", return_value=engine)
num_comments = Node(
name="core.num_comments",
type=NodeType.METRIC,
expression="SELECT COUNT(*) FROM core.comments",
parents=[comments],
)
session.add(num_comments)
session.commit()
sql = """
SELECT
DATE_TRUNC('day', "core.comments.timestamp") AS "__timestamp",
"core.num_comments"
FROM metrics
GROUP BY
DATE_TRUNC('day', "core.comments.timestamp")
"""
create_query = get_query_for_sql(sql)
assert create_query.database_id == 1
space = " "
assert (
create_query.submitted_query
== f"""SELECT datetime("core.comments".timestamp, 'start of day') AS __timestamp, count('*') AS "core.num_comments"{space}
FROM (SELECT comments.user_id AS user_id, comments.timestamp AS timestamp{space}
FROM comments) AS "core.comments" GROUP BY datetime("core.comments".timestamp, 'start of day')"""
)
def test_get_query_for_sql_invalid_column(
mocker: MockerFixture,
session: Session,
) -> None:
"""
Test ``get_query_for_sql`` with an invalid column.
"""
get_session = mocker.patch("datajunction.sql.build.get_session")
get_session().__next__.return_value = session
database = Database(id=1, name="slow", URI="sqlite://", cost=1.0)
comments = Node(
name="core.comments",
tables=[
Table(
database=database,
table="comments",
columns=[
Column(name="user_id", type=ColumnType.INT),
Column(name="comment", type=ColumnType.STR),
],
),
],
)
engine = create_engine(database.URI)
connection = engine.connect()
connection.execute("CREATE TABLE comments (user_id INT, comment TEXT)")
mocker.patch("datajunction.sql.transpile.create_engine", return_value=engine)
num_comments = Node(
name="core.num_comments",
type=NodeType.METRIC,
expression="SELECT COUNT(*) FROM core.comments",
parents=[comments],
)
session.add(num_comments)
session.commit()
sql = """
SELECT "core.num_comments" FROM metrics
WHERE "core.some_other_parent.user_id" > 1
"""
with pytest.raises(Exception) as excinfo:
get_query_for_sql(sql)
assert str(excinfo.value) == "Invalid dimension: core.some_other_parent.user_id"
def test_get_dimensions_from_filters() -> None:
"""
Test ``get_dimensions_from_filters``.
"""
assert get_dimensions_from_filters(["a>1", "b=10"]) == {"a", "b"}
with pytest.raises(Exception) as excinfo:
get_dimensions_from_filters(["aaaa"])
assert (
str(excinfo.value)
== """The filter "aaaa" is invalid
The following error happened:
- The filter "aaaa" is not a valid filter. Filters should consist of a dimension name, follow by a valid operator (<=|<|>=|>|!=|=), followed by a value. If the value is a string or date/time it should be enclosed in single quotes. (error code: 100)"""
)
def test_find_on_clause(mocker: MockerFixture) -> None:
"""
Test ``find_on_clause``.
"""
database = Database(id=1, name="one", URI="sqlite://")
dimension = Node(
name="core.users",
type=NodeType.DIMENSION,
tables=[
Table(
database=database,
table="dim_users",
columns=[
Column(name="id", type=ColumnType.INT),
Column(name="age", type=ColumnType.INT),
Column(name="gender", type=ColumnType.STR),
],
),
],
columns=[
Column(name="id", type=ColumnType.INT),
Column(name="age", type=ColumnType.INT),
Column(name="gender", type=ColumnType.STR),
],
)
parent = Node(
name="core.comments",
tables=[
Table(
database=database,
table="comments",
columns=[
Column(name="ds", type=ColumnType.STR),
Column(name="user_id", type=ColumnType.INT, dimension=dimension),
Column(name="text", type=ColumnType.STR),
],
),
],
columns=[
Column(name="ds", type=ColumnType.STR),
Column(name="user_id", type=ColumnType.INT, dimension=dimension),
Column(name="text", type=ColumnType.STR),
],
)
child = Node(name="core.num_comments", parents=[parent])
node_select = mocker.MagicMock()
subquery = mocker.MagicMock()
find_on_clause(child, node_select, dimension, subquery)
assert node_select.columns.__getitem__.called_with("user_id")
assert subquery.columns.__getitem__.called_with("id")
def test_find_on_clause_parent_no_columns(mocker: MockerFixture) -> None:
"""
Test ``find_on_clause`` when a parent has no columns.
I think we expect all nodes to have at least one column, so this test is just for
completeness.
"""
database = Database(id=1, name="one", URI="sqlite://")
dimension = Node(
name="core.users",
type=NodeType.DIMENSION,
tables=[
Table(
database=database,
table="dim_users",
columns=[
Column(name="id", type=ColumnType.INT),
Column(name="age", type=ColumnType.INT),
Column(name="gender", type=ColumnType.STR),
],
),
],
columns=[
Column(name="id", type=ColumnType.INT),
Column(name="age", type=ColumnType.INT),
Column(name="gender", type=ColumnType.STR),
],
)
parent_1 = Node(
name="core.comments",
tables=[
Table(
database=database,
table="comments",
columns=[
Column(name="ds", type=ColumnType.STR),
Column(name="user_id", type=ColumnType.INT, dimension=dimension),
Column(name="text", type=ColumnType.STR),
],
),
],
columns=[
Column(name="ds", type=ColumnType.STR),
Column(name="user_id", type=ColumnType.INT, dimension=dimension),
Column(name="text", type=ColumnType.STR),
],
)
parent_2 = Node(
name="a_weird_node",
tables=[
Table(
database=database,
table="empty",
columns=[],
),
],
columns=[],
)
child = Node(name="core.num_comments", parents=[parent_2, parent_1])
node_select = mocker.MagicMock()
subquery = mocker.MagicMock()
find_on_clause(child, node_select, dimension, subquery)
assert node_select.columns.__getitem__.called_with("user_id")
def test_find_on_clause_parent_invalid_reference(mocker: MockerFixture) -> None:
"""
Test ``find_on_clause`` when a parent has no columns.
The compiler should check that the dimension is valid, but the table could change.
"""
database = Database(id=1, name="one", URI="sqlite://")
dimension = Node(
name="core.users",
type=NodeType.DIMENSION,
tables=[
Table(
database=database,
table="dim_users",
columns=[
Column(name="id", type=ColumnType.INT),
Column(name="age", type=ColumnType.INT),
Column(name="gender", type=ColumnType.STR),
],
),
],
columns=[
Column(name="id", type=ColumnType.INT),
Column(name="age", type=ColumnType.INT),
Column(name="gender", type=ColumnType.STR),
],
)
parent = Node(
name="core.comments",
tables=[
Table(
database=database,
table="comments",
columns=[
Column(name="ds", type=ColumnType.STR),
Column(name="user_id", type=ColumnType.INT),
Column(name="text", type=ColumnType.STR),
],
),
],
columns=[
Column(name="ds", type=ColumnType.STR),
Column(name="user_id", type=ColumnType.INT),
Column(name="text", type=ColumnType.STR),
],
)
child = Node(name="core.num_comments", parents=[parent])
node_select = mocker.MagicMock()
subquery = mocker.MagicMock()
with pytest.raises(Exception) as excinfo:
find_on_clause(child, node_select, dimension, subquery)
assert (
str(excinfo.value)
== "Node core.num_comments has no columns with dimension core.users"
)
def test_get_join_columns() -> None:
"""
Test ``get_join_columns``.
"""
database = Database(id=1, name="one", URI="sqlite://")
dimension = Node(
name="core.users",
type=NodeType.DIMENSION,
tables=[
Table(
database=database,
table="dim_users",
columns=[
Column(name="id", type=ColumnType.INT),
Column(name="age", type=ColumnType.INT),
Column(name="gender", type=ColumnType.STR),
],
),
],
columns=[
Column(name="id", type=ColumnType.INT),
Column(name="age", type=ColumnType.INT),
Column(name="gender", type=ColumnType.STR),
],
)
orphan = Node(name="orphan")
with pytest.raises(Exception) as excinfo:
get_join_columns(orphan, dimension)
assert str(excinfo.value) == "Node orphan has no columns with dimension core.users"
parent_without_columns = Node(name="parent_without_columns")
broken = Node(name="broken", parents=[parent_without_columns])
with pytest.raises(Exception) as excinfo:
get_join_columns(broken, dimension)
assert str(excinfo.value) == "Node broken has no columns with dimension core.users"
parent = Node(
name="parent",
tables=[
Table(
database=database,
table="comments",
columns=[
Column(name="ds", type=ColumnType.STR),
Column(name="user_id", type=ColumnType.INT),
Column(name="text", type=ColumnType.STR),
],
),
],
columns=[
Column(name="ds", type=ColumnType.STR),
Column(name="user_id", type=ColumnType.INT, dimension=dimension),
Column(name="text", type=ColumnType.STR),
],
)
child = Node(name="child", parents=[parent_without_columns, parent])
parent_name, column_name, dimension_column = get_join_columns(child, dimension)
assert parent_name == "parent"
assert column_name == "user_id"
assert dimension_column == "id"
| 31.023756
| 254
| 0.586472
| 6,378
| 54,850
| 4.892286
| 0.040765
| 0.049995
| 0.047944
| 0.02692
| 0.891549
| 0.875044
| 0.856713
| 0.832067
| 0.816684
| 0.797712
| 0
| 0.006569
| 0.281167
| 54,850
| 1,767
| 255
| 31.041313
| 0.784823
| 0.032762
| 0
| 0.783462
| 0
| 0.023125
| 0.260002
| 0.044817
| 0
| 0
| 0
| 0
| 0.049755
| 1
| 0.020322
| false
| 0.001402
| 0.007708
| 0
| 0.028031
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8fd73c44229c291bcccd7266f8ab8045ad9ba324
| 28,491
|
py
|
Python
|
html/backend/variables.py
|
programmersidharth/web
|
e7df13308e301afc604ade82992ff4b4162d4810
|
[
"MIT"
] | 1
|
2022-03-14T05:37:15.000Z
|
2022-03-14T05:37:15.000Z
|
html/backend/variables.py
|
programmersidharth/web
|
e7df13308e301afc604ade82992ff4b4162d4810
|
[
"MIT"
] | null | null | null |
html/backend/variables.py
|
programmersidharth/web
|
e7df13308e301afc604ade82992ff4b4162d4810
|
[
"MIT"
] | null | null | null |
# icons used for filtering
ICONS = ['10k', '10mp', '11mp', '12mp', '13mp', '14mp', '15mp', '16mp', '17mp', '18mp', '19mp', '1k', '1k_plus', '1x_mobiledata', '20mp', '21mp', '22mp', '23mp', '24mp', '2k', '2k_plus', '2mp', '30fps', '30fps_select', '360', '3d_rotation', '3g_mobiledata', '3k', '3k_plus', '3mp', '3p', '4g_mobiledata', '4g_plus_mobiledata', '4k', '4k_plus', '4mp', '5g', '5k', '5k_plus', '5mp', '6_ft_apart', '60fps', '60fps_select', '6k', '6k_plus', '6mp', '7k', '7k_plus', '7mp', '8k', '8k_plus', '8mp', '9k', '9k_plus', '9mp', 'ac_unit', 'access_alarm', 'access_alarms', 'access_time', 'access_time_filled', 'accessibility', 'accessibility_new', 'accessible', 'accessible_forward', 'account_balance', 'account_balance_wallet', 'account_box', 'account_circle', 'account_tree', 'ad_units', 'adb', 'add', 'add_a_photo', 'add_alarm', 'add_alert', 'add_box', 'add_business', 'add_chart', 'add_circle', 'add_circle_outline', 'add_comment', 'add_ic_call', 'add_link', 'add_location', 'add_location_alt', 'add_moderator', 'add_photo_alternate', 'add_reaction', 'add_road', 'add_shopping_cart', 'add_task', 'add_to_drive', 'add_to_home_screen', 'add_to_photos', 'add_to_queue', 'addchart', 'adjust', 'admin_panel_settings', 'agriculture', 'air', 'airline_seat_flat', 'airline_seat_flat_angled', 'airline_seat_individual_suite', 'airline_seat_legroom_extra', 'airline_seat_legroom_normal', 'airline_seat_legroom_reduced', 'airline_seat_recline_extra', 'airline_seat_recline_normal', 'airplane_ticket', 'airplanemode_active', 'airplanemode_inactive', 'airplay', 'airport_shuttle', 'alarm', 'alarm_add', 'alarm_off', 'alarm_on', 'album', 'align_horizontal_center', 'align_horizontal_left', 'align_horizontal_right', 'align_vertical_bottom', 'align_vertical_center', 'align_vertical_top', 'all_inbox', 'all_inclusive', 'all_out', 'alt_route', 'alternate_email', 'amp_stories', 'analytics', 'anchor', 'android', 'animation', 'announcement', 'aod', 'apartment', 'api', 'app_blocking', 'app_registration', 'app_settings_alt', 'approval', 'apps', 'architecture', 'archive', 'arrow_back', 'arrow_back_ios', 'arrow_back_ios_new', 'arrow_circle_down', 'arrow_circle_up', 'arrow_downward', 'arrow_drop_down', 'arrow_drop_down_circle', 'arrow_drop_up', 'arrow_forward', 'arrow_forward_ios', 'arrow_left', 'arrow_right', 'arrow_right_alt', 'arrow_upward', 'art_track', 'article', 'aspect_ratio', 'assessment', 'assignment', 'assignment_ind', 'assignment_late', 'assignment_return', 'assignment_returned', 'assignment_turned_in', 'assistant', 'assistant_direction', 'assistant_photo', 'atm', 'attach_email', 'attach_file', 'attach_money', 'attachment', 'attractions', 'attribution', 'audiotrack', 'auto_awesome', 'auto_awesome_mosaic', 'auto_awesome_motion', 'auto_delete', 'auto_fix_high', 'auto_fix_normal', 'auto_fix_off', 'auto_graph', 'auto_stories', 'autofps_select', 'autorenew', 'av_timer', 'baby_changing_station', 'backpack', 'backspace', 'backup', 'backup_table', 'badge', 'bakery_dining', 'balcony', 'ballot', 'bar_chart', 'barcode', 'batch_prediction', 'bathroom', 'bathtub', 'battery_20', 'battery_30', 'battery_50', 'battery_60', 'battery_80', 'battery_90', 'battery_alert', 'battery_charging_20', 'battery_charging_30', 'battery_charging_50', 'battery_charging_60', 'battery_charging_80', 'battery_charging_90', 'battery_charging_full', 'battery_full', 'battery_saver', 'battery_std', 'battery_unknown', 'beach_access', 'bed', 'bedroom_baby', 'bedroom_child', 'bedroom_parent', 'bedtime', 'beenhere', 'bento', 'bike_scooter', 'biotech', 'blender', 'block', 'bloodtype', 'bluetooth', 'bluetooth_audio', 'bluetooth_connected', 'bluetooth_disabled', 'bluetooth_drive', 'bluetooth_searching', 'blur_circular', 'blur_linear', 'blur_off', 'blur_on', 'bolt', 'book', 'book_online', 'bookmark', 'bookmark_add', 'bookmark_added', 'bookmark_border', 'bookmark_remove', 'bookmarks', 'border_all', 'border_bottom', 'border_clear', 'border_color', 'border_horizontal', 'border_inner', 'border_left', 'border_outer', 'border_right', 'border_style', 'border_top', 'border_vertical', 'branding_watermark', 'breakfast_dining', 'brightness_1', 'brightness_2', 'brightness_3', 'brightness_4', 'brightness_5', 'brightness_6', 'brightness_7', 'brightness_auto', 'brightness_high', 'brightness_low', 'brightness_medium', 'broken_image', 'browser_not_supported', 'brunch_dining', 'brush', 'bubble_chart', 'bug_report', 'build', 'build_circle', 'bungalow', 'burst_mode', 'bus_alert', 'business', 'business_center', 'cabin', 'cable', 'cached', 'cake', 'calculate', 'calendar_today', 'calendar_view_day', 'calendar_view_month', 'calendar_view_week', 'call', 'call_end', 'call_made', 'call_merge', 'call_missed', 'call_missed_outgoing', 'call_received', 'call_split', 'call_to_action', 'camera', 'camera_alt', 'camera_enhance', 'camera_front', 'camera_indoor', 'camera_outdoor', 'camera_rear', 'camera_roll', 'cameraswitch', 'campaign', 'cancel', 'cancel_presentation', 'cancel_schedule_send', 'car_rental', 'car_repair', 'card_giftcard', 'card_membership', 'card_travel', 'carpenter', 'cases', 'casino', 'cast', 'cast_connected', 'cast_for_education', 'catching_pokemon', 'category', 'celebration', 'cell_wifi', 'center_focus_strong', 'center_focus_weak', 'chair', 'chair_alt', 'chalet', 'change_circle', 'change_history', 'charging_station', 'chat', 'chat_bubble', 'chat_bubble_outline', 'check', 'check_box', 'check_box_outline_blank', 'check_circle', 'check_circle_outline', 'checkroom', 'chevron_left', 'chevron_right', 'child_care', 'child_friendly', 'chrome_reader_mode', 'circle', 'circle_notifications', 'class', 'clean_hands', 'cleaning_services', 'clear', 'clear_all', 'close', 'close_fullscreen', 'closed_caption', 'closed_caption_disabled', 'closed_caption_off', 'cloud', 'cloud_circle', 'cloud_done', 'cloud_download', 'cloud_off', 'cloud_queue', 'cloud_upload', 'code', 'code_off', 'coffee', 'coffee_maker', 'collections', 'collections_bookmark', 'color_lens', 'colorize', 'comment', 'comment_bank', 'commute', 'compare', 'compare_arrows', 'compass_calibration', 'compress', 'computer', 'confirmation_number', 'connect_without_contact', 'connected_tv', 'construction', 'contact_mail', 'contact_page', 'contact_phone', 'contact_support', 'contactless', 'contacts', 'content_copy', 'content_cut', 'content_paste', 'content_paste_off', 'control_camera', 'control_point', 'control_point_duplicate', 'copy_all', 'copyright', 'coronavirus', 'corporate_fare', 'cottage', 'countertops', 'create', 'create_new_folder', 'credit_card', 'credit_card_off', 'credit_score', 'crib', 'crop', 'crop_16_9', 'crop_3_2', 'crop_5_4', 'crop_7_5', 'crop_din', 'crop_free', 'crop_landscape', 'crop_original', 'crop_portrait', 'crop_rotate', 'crop_square', 'dangerous', 'dark_mode', 'dashboard', 'dashboard_customize', 'data_saver_off', 'data_saver_on', 'data_usage', 'date_range', 'deck', 'dehaze', 'delete', 'delete_forever', 'delete_outline', 'delete_sweep', 'delivery_dining', 'departure_board', 'description', 'design_services', 'desktop_access_disabled', 'desktop_mac', 'desktop_windows', 'details', 'developer_board', 'developer_board_off', 'developer_mode', 'device_hub', 'device_thermostat', 'device_unknown', 'devices', 'devices_other', 'dialer_sip', 'dialpad', 'dining', 'dinner_dining', 'directions', 'directions_bike', 'directions_boat', 'directions_boat_filled', 'directions_bus', 'directions_bus_filled', 'directions_car', 'directions_car_filled', 'directions_off', 'directions_railway', 'directions_railway_filled', 'directions_run', 'directions_subway', 'directions_subway_filled', 'directions_transit', 'directions_transit_filled', 'directions_walk', 'dirty_lens', 'disabled_by_default', 'disc_full', 'divide', 'dns', 'do_disturb', 'do_disturb_alt', 'do_disturb_off', 'do_disturb_on', 'do_not_disturb', 'do_not_disturb_alt', 'do_not_disturb_off', 'do_not_disturb_on', 'do_not_disturb_on_total_silence', 'do_not_step', 'do_not_touch', 'dock', 'domain', 'domain_disabled', 'domain_verification', 'done', 'done_all', 'done_outline', 'donut_large', 'donut_small', 'door_back', 'door_front', 'door_sliding', 'doorbell', 'double_arrow', 'downhill_skiing', 'download', 'download_done', 'download_for_offline', 'downloading', 'drafts', 'drag_handle', 'drag_indicator', 'drive_eta', 'drive_file_move', 'drive_file_rename_outline', 'drive_folder_upload', 'dry', 'dry_cleaning', 'duo', 'dvr', 'dynamic_feed', 'dynamic_form', 'e_mobiledata', 'earbuds', 'earbuds_battery', 'east', 'eco', 'edgesensor_high', 'edgesensor_low', 'edit', 'edit_attributes', 'edit_location', 'edit_location_alt', 'edit_notifications', 'edit_off', 'edit_road', 'eject', 'elderly', 'electric_bike', 'electric_car', 'electric_moped', 'electric_rickshaw', 'electric_scooter', 'electrical_services', 'elevator', 'email', 'emoji_emotions', 'emoji_events', 'emoji_flags', 'emoji_food_beverage', 'emoji_nature', 'emoji_objects', 'emoji_people', 'emoji_symbols', 'emoji_transportation', 'engineering', 'enhanced_encryption', 'equalizer', 'equals', 'error', 'error_outline', 'escalator', 'escalator_warning', 'euro', 'euro_symbol', 'ev_station', 'event', 'event_available', 'event_busy', 'event_note', 'event_seat', 'exit_to_app', 'expand', 'expand_less', 'expand_more', 'explicit', 'explore', 'explore_off', 'exposure', 'exposure_neg_1', 'exposure_neg_2', 'exposure_plus_1', 'exposure_plus_2', 'exposure_zero', 'extension', 'extension_off', 'face', 'face_retouching_natural', 'face_retouching_off', 'facebook', 'fact_check', 'family_restroom', 'fast_forward', 'fast_rewind', 'fastfood', 'favorite', 'favorite_border', 'featured_play_list', 'featured_video', 'feed', 'feedback', 'female', 'fence', 'festival', 'fiber_dvr', 'fiber_manual_record', 'fiber_new', 'fiber_pin', 'fiber_smart_record', 'file_copy', 'file_download', 'file_download_done', 'file_download_off', 'file_present', 'file_upload', 'filter', 'filter_1', 'filter_2', 'filter_3', 'filter_4', 'filter_5', 'filter_6', 'filter_7', 'filter_8', 'filter_9', 'filter_9_plus', 'filter_alt', 'filter_b_and_w', 'filter_center_focus', 'filter_drama', 'filter_frames', 'filter_hdr', 'filter_list', 'filter_none', 'filter_tilt_shift', 'filter_vintage', 'find_in_page', 'find_replace', 'fingerprint', 'fire_extinguisher', 'fireplace', 'first_page', 'fit_screen', 'fitness_center', 'flag', 'flaky', 'flare', 'flash_auto', 'flash_off', 'flash_on', 'flashlight_off', 'flashlight_on', 'flatware', 'flight', 'flight_land', 'flight_takeoff', 'flip', 'flip_camera_android', 'flip_camera_ios', 'flip_to_back', 'flip_to_front', 'flourescent', 'flutter_dash', 'fmd_bad', 'fmd_good', 'folder', 'folder_open', 'folder_shared', 'folder_special', 'follow_the_signs', 'font_download', 'font_download_off', 'food_bank', 'format_align_center', 'format_align_justify', 'format_align_left', 'format_align_right', 'format_bold', 'format_clear', 'format_color_fill', 'format_color_reset', 'format_color_text', 'format_indent_decrease', 'format_indent_increase', 'format_italic', 'format_line_spacing', 'format_list_bulleted', 'format_list_numbered', 'format_list_numbered_rtl', 'format_paint', 'format_quote', 'format_shapes', 'format_size', 'format_strikethrough', 'format_textdirection_l_to_r', 'format_textdirection_r_to_l', 'format_underlined', 'forum', 'forward', 'forward_10', 'forward_30', 'forward_5', 'forward_to_inbox', 'foundation', 'free_breakfast', 'fullscreen', 'fullscreen_exit', 'functions', 'g_mobiledata', 'g_translate', 'gamepad', 'games', 'garage', 'gavel', 'gesture', 'get_app', 'gif', 'gite', 'golf_course', 'gpp_bad', 'gpp_good', 'gpp_maybe', 'gps_fixed', 'gps_not_fixed', 'gps_off', 'grade', 'gradient', 'grading', 'grain', 'graphic_eq', 'grass', 'greater_than', 'greater_than_equal', 'grid_3x3', 'grid_4x4', 'grid_goldenratio', 'grid_off', 'grid_on', 'grid_view', 'group', 'group_add', 'group_work', 'groups', 'h_mobiledata', 'h_plus_mobiledata', 'hail', 'handyman', 'hardware', 'hd', 'hdr_auto', 'hdr_auto_select', 'hdr_enhanced_select', 'hdr_off', 'hdr_off_select', 'hdr_on', 'hdr_on_select', 'hdr_plus', 'hdr_strong', 'hdr_weak', 'headphones', 'headphones_battery', 'headset', 'headset_mic', 'headset_off', 'healing', 'health_and_safety', 'hearing', 'hearing_disabled', 'height', 'help', 'help_center', 'help_outline', 'hevc', 'hide_image', 'hide_source', 'high_quality', 'highlight', 'highlight_alt', 'highlight_off', 'hiking', 'history', 'history_edu', 'history_toggle_off', 'holiday_village', 'home', 'home_max', 'home_mini', 'home_repair_service', 'home_work', 'horizontal_distribute', 'horizontal_rule', 'horizontal_split', 'hot_tub', 'hotel', 'hourglass_bottom', 'hourglass_disabled', 'hourglass_empty', 'hourglass_full', 'hourglass_top', 'house', 'house_siding', 'houseboat', 'how_to_reg', 'how_to_vote', 'http', 'https', 'hvac', 'ice_skating', 'icecream', 'image', 'image_aspect_ratio', 'image_not_supported', 'image_search', 'imagesearch_roller', 'import_contacts', 'import_export', 'important_devices', 'inbox', 'indeterminate_check_box', 'info', 'input', 'insert_chart', 'insert_chart_outlined', 'insert_comment', 'insert_drive_file', 'insert_emoticon', 'insert_invitation', 'insert_link', 'insert_photo', 'insights', 'integration_instructions', 'inventory', 'inventory_2', 'invert_colors', 'invert_colors_off', 'ios_share', 'iron', 'iso', 'kayaking', 'keyboard', 'keyboard_alt', 'keyboard_arrow_down', 'keyboard_arrow_left', 'keyboard_arrow_right', 'keyboard_arrow_up', 'keyboard_backspace', 'keyboard_capslock', 'keyboard_hide', 'keyboard_return', 'keyboard_tab', 'keyboard_voice', 'king_bed', 'kitchen', 'kitesurfing', 'label', 'label_important', 'label_off', 'landscape', 'language', 'laptop', 'laptop_chromebook', 'laptop_mac', 'laptop_windows', 'last_page', 'launch', 'layers', 'layers_clear', 'leaderboard', 'leak_add', 'leak_remove', 'leave_bags_at_home', 'legend_toggle', 'lens', 'lens_blur', 'less_than', 'less_than_equal', 'library_add', 'library_add_check', 'library_books', 'library_music', 'light', 'light_mode', 'lightbulb', 'line_style', 'line_weight', 'linear_scale', 'link', 'link_off', 'linked_camera', 'liquor', 'list', 'list_alt', 'live_help', 'live_tv', 'living', 'local_activity', 'local_airport', 'local_atm', 'local_bar', 'local_cafe', 'local_car_wash', 'local_convenience_store', 'local_dining', 'local_drink',
'local_fire_department', 'local_florist', 'local_gas_station', 'local_grocery_store', 'local_hospital', 'local_hotel', 'local_laundry_service', 'local_library', 'local_mall', 'local_movies', 'local_offer', 'local_parking', 'local_pharmacy', 'local_phone', 'local_pizza', 'local_play', 'local_police', 'local_post_office', 'local_printshop', 'local_see', 'local_shipping', 'local_taxi', 'location_city', 'location_disabled', 'location_off', 'location_on', 'location_searching', 'lock', 'lock_clock', 'lock_open', 'log_in', 'log_out', 'login', 'logout', 'looks', 'looks_3', 'looks_4', 'looks_5', 'looks_6', 'looks_one', 'looks_two', 'loop', 'loupe', 'low_priority', 'loyalty', 'lte_mobiledata', 'lte_plus_mobiledata', 'luggage', 'lunch_dining', 'mail', 'mail_outline', 'male', 'manage_accounts', 'manage_search', 'map', 'maps_home_work', 'maps_ugc', 'margin', 'mark_as_unread', 'mark_chat_read', 'mark_chat_unread', 'mark_email_read', 'mark_email_unread', 'markunread', 'markunread_mailbox', 'masks', 'maximize', 'media_bluetooth_off', 'media_bluetooth_on', 'mediation', 'medical_services', 'medication', 'meeting_room', 'memory', 'menu', 'menu_book', 'menu_open', 'merge_type', 'message', 'mic', 'mic_external_off', 'mic_external_on', 'mic_none', 'mic_off', 'microwave', 'military_tech', 'minimize', 'minus', 'miscellaneous_services', 'missed_video_call', 'mms', 'mobile_friendly', 'mobile_off', 'mobile_screen_share', 'mobiledata_off', 'mode', 'mode_comment', 'mode_edit', 'mode_edit_outline', 'mode_night', 'mode_standby', 'model_training', 'monetization_on', 'money', 'money_off', 'money_off_csred', 'monitor', 'monitor_weight', 'monochrome_photos', 'mood', 'mood_bad', 'moped', 'more', 'more_horiz', 'more_time', 'more_vert', 'motion_photos_auto', 'motion_photos_off', 'motion_photos_on', 'motion_photos_pause', 'motion_photos_paused', 'motorcycle', 'mouse', 'move_to_inbox', 'movie', 'movie_creation', 'movie_filter', 'moving', 'mp', 'multiline_chart', 'multiple_stop', 'museum', 'music_note', 'music_off', 'music_video', 'my_location', 'nat', 'nature', 'nature_people', 'navigate_before', 'navigate_next', 'navigation', 'near_me', 'near_me_disabled', 'nearby_error', 'nearby_off', 'network_cell', 'network_check', 'network_locked', 'network_wifi', 'new_releases', 'next_plan', 'next_week', 'nfc', 'night_shelter', 'nightlife', 'nightlight', 'nightlight_round', 'nights_stay', 'no_accounts', 'no_backpack', 'no_cell', 'no_drinks', 'no_encryption', 'no_encryption_gmailerrorred', 'no_flash', 'no_food', 'no_luggage', 'no_meals', 'no_meeting_room', 'no_photography', 'no_sim', 'no_stroller', 'no_transfer', 'nordic_walking', 'north', 'north_east', 'north_west', 'not_accessible', 'not_equal', 'not_interested', 'not_listed_location', 'not_started', 'note', 'note_add', 'note_alt', 'notes', 'notification_add', 'notification_important', 'notifications', 'notifications_active', 'notifications_none', 'notifications_off', 'notifications_paused', 'offline_bolt', 'offline_pin', 'offline_share', 'ondemand_video', 'online_prediction', 'opacity', 'open_in_browser', 'open_in_full', 'open_in_new', 'open_in_new_off', 'open_with', 'other_houses', 'outbond', 'outbound', 'outbox', 'outdoor_grill', 'outlet', 'outlined_flag', 'padding', 'pages', 'pageview', 'paid', 'palette', 'pan_tool', 'panorama', 'panorama_fish_eye', 'panorama_horizontal', 'panorama_horizontal_select', 'panorama_photosphere', 'panorama_photosphere_select', 'panorama_vertical', 'panorama_vertical_select', 'panorama_wide_angle', 'panorama_wide_angle_select', 'paragliding', 'park', 'party_mode', 'password', 'pattern', 'pause', 'pause_circle', 'pause_circle_filled', 'pause_circle_outline', 'pause_presentation', 'payment', 'payments', 'pedal_bike', 'pending', 'pending_actions', 'people', 'people_alt', 'people_outline', 'percentage', 'perm_camera_mic', 'perm_contact_calendar', 'perm_data_setting', 'perm_device_information', 'perm_identity', 'perm_media', 'perm_phone_msg', 'perm_scan_wifi', 'person', 'person_add', 'person_add_alt', 'person_add_alt_1', 'person_add_disabled', 'person_off', 'person_outline', 'person_pin', 'person_pin_circle', 'person_remove', 'person_remove_alt_1', 'person_search', 'personal_video', 'pest_control', 'pest_control_rodent', 'pets', 'phone', 'phone_android', 'phone_bluetooth_speaker', 'phone_callback', 'phone_disabled', 'phone_enabled', 'phone_forwarded', 'phone_in_talk', 'phone_iphone', 'phone_locked', 'phone_missed', 'phone_paused', 'phonelink', 'phonelink_erase', 'phonelink_lock', 'phonelink_off', 'phonelink_ring', 'phonelink_setup', 'photo', 'photo_album', 'photo_camera', 'photo_camera_back', 'photo_camera_front', 'photo_filter', 'photo_library', 'photo_size_select_actual', 'photo_size_select_large', 'photo_size_select_small', 'piano', 'piano_off', 'picture_as_pdf', 'picture_in_picture', 'picture_in_picture_alt', 'pie_chart', 'pie_chart_outline', 'pin', 'pin_drop', 'pin_off', 'pivot_table_chart', 'place', 'plagiarism', 'play_arrow', 'play_circle', 'play_circle_filled', 'play_circle_filled_white', 'play_circle_outline', 'play_disabled', 'play_for_work', 'play_lesson', 'playlist_add', 'playlist_add_check', 'playlist_play', 'plumbing', 'plus', 'plus_minus', 'plus_minus_alt', 'plus_one', 'podcasts', 'point_of_sale', 'policy', 'poll', 'polymer', 'pool', 'portable_wifi_off', 'portrait', 'post_add', 'power', 'power_input', 'power_off', 'power_settings_new', 'precision_manufacturing', 'pregnant_woman', 'present_to_all', 'preview', 'price_change', 'price_check', 'print', 'print_disabled', 'priority_high', 'privacy_tip', 'production_quantity_limits', 'psychology', 'public', 'public_off', 'publish', 'published_with_changes', 'push_pin', 'qr_code', 'qr_code_2', 'qr_code_scanner', 'qrcode', 'query_builder', 'query_stats', 'question_answer', 'queue', 'queue_music', 'queue_play_next', 'quickreply', 'quiz', 'r_mobiledata', 'radar', 'radio', 'radio_button_checked', 'radio_button_unchecked', 'railway_alert', 'ramen_dining', 'rate_review', 'raw_off', 'raw_on', 'read_more', 'receipt', 'receipt_long', 'recent_actors', 'recommend', 'record_voice_over', 'redeem', 'redo', 'reduce_capacity', 'refresh', 'remember_me', 'remove', 'remove_circle', 'remove_circle_outline', 'remove_done', 'remove_from_queue', 'remove_moderator', 'remove_red_eye', 'remove_shopping_cart', 'reorder', 'repeat', 'repeat_on', 'repeat_one', 'repeat_one_on', 'replay', 'replay_10', 'replay_30', 'replay_5', 'replay_circle_filled', 'reply', 'reply_all', 'report', 'report_gmailerrorred', 'report_off', 'report_problem', 'request_page', 'request_quote', 'reset_tv', 'restart_alt', 'restaurant', 'restaurant_menu', 'restore', 'restore_from_trash', 'restore_page', 'reviews', 'rice_bowl', 'ring_volume', 'rocket', 'roofing', 'room', 'room_preferences', 'room_service', 'rotate_90_degrees_ccw', 'rotate_left', 'rotate_right', 'rounded_corner', 'router', 'rowing', 'rss_feed', 'rsvp', 'rtt', 'rule', 'rule_folder', 'run_circle', 'running_with_errors', 'rv_hookup', 'safety_divider', 'sailing', 'sanitizer', 'satellite', 'save', 'save_alt', 'saved_search', 'savings', 'scanner', 'scatter_plot', 'schedule', 'schedule_send', 'schema', 'school', 'science', 'score', 'screen_lock_landscape', 'screen_lock_portrait', 'screen_lock_rotation', 'screen_rotation', 'screen_search_desktop', 'screen_share', 'screenshot', 'sd', 'sd_card', 'sd_card_alert', 'sd_storage', 'search', 'search_off', 'security', 'security_update', 'security_update_good', 'security_update_warning', 'segment', 'select_all', 'self_improvement', 'sell', 'send', 'send_and_archive', 'send_to_mobile', 'sensor_door', 'sensor_window', 'sensors', 'sensors_off', 'sentiment_dissatisfied', 'sentiment_neutral', 'sentiment_satisfied', 'sentiment_satisfied_alt', 'sentiment_slightly_dissatisfied', 'sentiment_very_dissatisfied', 'sentiment_very_satisfied', 'set_meal', 'settings', 'settings_accessibility', 'settings_applications', 'settings_backup_restore', 'settings_bluetooth', 'settings_brightness', 'settings_cell', 'settings_ethernet', 'settings_input_antenna', 'settings_input_component', 'settings_input_composite', 'settings_input_hdmi', 'settings_input_svideo', 'settings_overscan', 'settings_phone', 'settings_power', 'settings_remote', 'settings_suggest', 'settings_system_daydream', 'settings_voice', 'share', 'share_arrival_time', 'share_location', 'shield', 'shop', 'shop_2', 'shop_two', 'shopping_bag', 'shopping_basket', 'shopping_cart', 'short_text', 'shortcut', 'show_chart', 'shower', 'shuffle', 'shuffle_on', 'shutter_speed', 'sick', 'signal_cellular_0_bar', 'signal_cellular_1_bar', 'signal_cellular_2_bar', 'signal_cellular_3_bar', 'signal_cellular_4_bar', 'signal_cellular_alt', 'signal_cellular_connected_no_internet_0_bar', 'signal_cellular_connected_no_internet_1_bar', 'signal_cellular_connected_no_internet_2_bar', 'signal_cellular_connected_no_internet_3_bar', 'signal_cellular_connected_no_internet_4_bar', 'signal_cellular_no_sim', 'signal_cellular_nodata', 'signal_cellular_null', 'signal_cellular_off', 'signal_wifi_0_bar', 'signal_wifi_1_bar', 'signal_wifi_1_bar_lock', 'signal_wifi_2_bar', 'signal_wifi_2_bar_lock', 'signal_wifi_3_bar', 'signal_wifi_3_bar_lock', 'signal_wifi_4_bar', 'signal_wifi_4_bar_lock', 'signal_wifi_bad', 'signal_wifi_connected_no_internet_4', 'signal_wifi_off', 'signal_wifi_statusbar_4_bar', 'signal_wifi_statusbar_connected_no_internet_4', 'signal_wifi_statusbar_null', 'sim_card', 'sim_card_alert', 'sim_card_download', 'single_bed', 'sip', 'skateboarding', 'skip_next', 'skip_previous', 'sledding', 'slideshow', 'slow_motion_video', 'smart_button', 'smart_display', 'smart_screen', 'smart_toy', 'smartphone', 'smoke_free', 'smoking_rooms', 'sms', 'sms_failed', 'snippet_folder', 'snooze', 'snowboarding', 'snowmobile', 'snowshoeing', 'soap', 'social_distance', 'sort', 'sort_by_alpha', 'source', 'south', 'south_east', 'south_west', 'spa', 'space_bar', 'speaker', 'speaker_group', 'speaker_notes', 'speaker_notes_off', 'speaker_phone', 'speed', 'spellcheck', 'splitscreen', 'sports', 'sports_bar', 'sports_baseball', 'sports_basketball', 'sports_cricket', 'sports_esports', 'sports_football', 'sports_golf', 'sports_handball', 'sports_hockey', 'sports_kabaddi', 'sports_mma', 'sports_motorsports', 'sports_rugby', 'sports_score', 'sports_soccer', 'sports_tennis', 'sports_volleyball', 'square_foot', 'stacked_bar_chart', 'stacked_line_chart', 'stairs', 'star', 'star_border', 'star_border_purple500', 'star_half', 'star_outline', 'star_purple500', 'star_rate', 'stars', 'stay_current_landscape', 'stay_current_portrait', 'stay_primary_landscape', 'stay_primary_portrait', 'sticky_note_2', 'stop', 'stop_circle', 'stop_screen_share', 'storage', 'store', 'store_mall_directory', 'storefront', 'storm', 'straighten', 'stream', 'streetview', 'strikethrough_s', 'stroller', 'style', 'subdirectory_arrow_left', 'subdirectory_arrow_right', 'subject', 'subscript', 'subscriptions', 'subtitles', 'subtitles_off', 'subway', 'summarize', 'superscript', 'supervised_user_circle', 'supervisor_account', 'support', 'support_agent', 'surfing', 'surround_sound', 'swap_calls', 'swap_horiz', 'swap_horizontal_circle', 'swap_vert', 'swap_vertical_circle', 'swipe', 'switch_account', 'switch_camera', 'switch_left', 'switch_right', 'switch_video', 'sync', 'sync_alt', 'sync_disabled', 'sync_problem', 'system_security_update', 'system_security_update_good', 'system_security_update_warning', 'system_update', 'system_update_alt', 'tab', 'tab_unselected', 'table_chart', 'table_rows', 'table_view', 'tablet', 'tablet_android', 'tablet_mac', 'tag', 'tag_faces', 'takeout_dining', 'tap_and_play', 'tapas', 'task', 'task_alt', 'taxi_alert', 'terrain', 'text_fields', 'text_format', 'text_rotate_up', 'text_rotate_vertical', 'text_rotation_angledown', 'text_rotation_angleup', 'text_rotation_down', 'text_rotation_none', 'text_snippet', 'textsms', 'texture', 'theater_comedy', 'theaters', 'thermostat', 'thermostat_auto', 'thumb_down', 'thumb_down_alt', 'thumb_down_off_alt', 'thumb_up', 'thumb_up_alt', 'thumb_up_off_alt', 'thumbs_up_down', 'time_to_leave', 'timelapse', 'timeline', 'timer', 'timer_10', 'timer_10_select', 'timer_3', 'timer_3_select', 'timer_off', 'title', 'toc', 'today', 'toggle_off', 'toggle_on', 'toll', 'tonality', 'topic', 'touch_app', 'tour', 'toys', 'track_changes', 'traffic', 'train', 'tram', 'transfer_within_a_station', 'transform', 'transgender', 'transit_enterexit', 'translate', 'travel_explore', 'trending_down', 'trending_flat', 'trending_up', 'trip_origin', 'try', 'tty', 'tune', 'tungsten', 'turned_in', 'turned_in_not', 'tv', 'tv_off', 'two_wheeler', 'umbrella', 'unarchive', 'undo', 'unfold_less', 'unfold_more', 'unpublished', 'unsubscribe', 'upcoming', 'update', 'update_disabled', 'upgrade', 'upload', 'upload_file', 'usb', 'usb_off', 'verified', 'verified_user', 'vertical_align_bottom', 'vertical_align_center', 'vertical_align_top', 'vertical_distribute', 'vertical_split', 'vibration', 'video_call', 'video_camera_back', 'video_camera_front', 'video_label', 'video_library', 'video_settings', 'video_stable', 'videocam', 'videocam_off', 'videogame_asset', 'videogame_asset_off', 'view_agenda', 'view_array', 'view_carousel', 'view_column', 'view_comfy', 'view_compact', 'view_day', 'view_headline', 'view_in_ar', 'view_list', 'view_module', 'view_quilt', 'view_sidebar', 'view_stream', 'view_week', 'vignette', 'villa', 'visibility', 'visibility_off', 'voice_chat', 'voice_over_off', 'voicemail', 'volume_down', 'volume_mute', 'volume_off', 'volume_up', 'volunteer_activism', 'vpn_key', 'vpn_lock', 'vrpano', 'wallpaper', 'warning', 'warning_amber', 'wash', 'watch', 'watch_later', 'water', 'water_damage', 'waterfall_chart', 'waves', 'wb_auto', 'wb_cloudy', 'wb_incandescent', 'wb_iridescent', 'wb_shade', 'wb_sunny', 'wb_twilight', 'wc', 'web', 'web_asset', 'web_asset_off', 'weekend', 'west', 'whatshot', 'wheelchair_pickup', 'where_to_vote', 'widgets', 'wifi', 'wifi_calling', 'wifi_calling_3', 'wifi_lock', 'wifi_off', 'wifi_protected_setup', 'wifi_tethering', 'wifi_tethering_error_rounded', 'wifi_tethering_off', 'window', 'wine_bar', 'work', 'work_off', 'work_outline', 'workspaces', 'wrap_text', 'wrong_location', 'wysiwyg', 'yard', 'youtube_searched_for', 'zoom_in', 'zoom_out', 'zoom_out_map']
| 5,698.2
| 14,241
| 0.745183
| 3,557
| 28,491
| 5.479055
| 0.375879
| 0.007389
| 0.008723
| 0.006414
| 0.015906
| 0.010467
| 0
| 0
| 0
| 0
| 0
| 0.007201
| 0.064126
| 28,491
| 4
| 14,242
| 7,122.75
| 0.72371
| 0.000842
| 0
| 0
| 0
| 0
| 0.744976
| 0.101778
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 1
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
|
0
| 8
|
8ff65882f9235b10adf0a5603fc7d6647c9a1d7c
| 32,488
|
py
|
Python
|
ambari-server/src/test/python/TestSensitiveDataEncryption.py
|
samyzh/ambari
|
ff73620da41697ed2ca9ece676f71ec9ba28a7d5
|
[
"Apache-2.0"
] | 1,664
|
2015-01-03T09:35:21.000Z
|
2022-03-31T04:55:24.000Z
|
ambari-server/src/test/python/TestSensitiveDataEncryption.py
|
samyzh/ambari
|
ff73620da41697ed2ca9ece676f71ec9ba28a7d5
|
[
"Apache-2.0"
] | 3,018
|
2015-02-19T20:16:10.000Z
|
2021-11-13T20:47:48.000Z
|
ambari-server/src/test/python/TestSensitiveDataEncryption.py
|
samyzh/ambari
|
ff73620da41697ed2ca9ece676f71ec9ba28a7d5
|
[
"Apache-2.0"
] | 1,673
|
2015-01-06T14:14:42.000Z
|
2022-03-31T07:22:30.000Z
|
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import os
import sys
from ambari_commons.exceptions import FatalException
from mock.mock import patch, MagicMock, call
with patch.object(os, "geteuid", new=MagicMock(return_value=0)):
from resource_management.core import sudo
reload(sudo)
import operator
import platform
import StringIO
from unittest import TestCase
os.environ["ROOT"] = ""
from only_for_platform import get_platform, os_distro_value, PLATFORM_WINDOWS
from ambari_commons import os_utils
if get_platform() != PLATFORM_WINDOWS:
pass
import shutil
project_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)),os.path.normpath("../../../../"))
shutil.copyfile(project_dir+"/ambari-server/conf/unix/ambari.properties", "/tmp/ambari.properties")
# We have to use this import HACK because the filename contains a dash
_search_file = os_utils.search_file
def search_file_proxy(filename, searchpatch, pathsep=os.pathsep):
global _search_file
if "ambari.properties" in filename:
return "/tmp/ambari.properties"
return _search_file(filename, searchpatch, pathsep)
os_utils.search_file = search_file_proxy
with patch.object(platform, "linux_distribution", return_value = MagicMock(return_value=('Redhat', '6.4', 'Final'))):
with patch("os.path.isdir", return_value = MagicMock(return_value=True)):
with patch("os.access", return_value = MagicMock(return_value=True)):
with patch.object(os_utils, "parse_log4j_file", return_value={'ambari.log.dir': '/var/log/ambari-server'}):
with patch("platform.linux_distribution", return_value = os_distro_value):
with patch("os.symlink"):
with patch("glob.glob", return_value = ['/etc/init.d/postgresql-9.3']):
_ambari_server_ = __import__('ambari-server')
with patch("__builtin__.open"):
from ambari_server.properties import Properties
from ambari_server.serverConfiguration import configDefaults, JDBC_RCA_PASSWORD_FILE_PROPERTY, JDBC_PASSWORD_PROPERTY, \
JDBC_RCA_PASSWORD_ALIAS, SSL_TRUSTSTORE_PASSWORD_PROPERTY, SECURITY_IS_ENCRYPTION_ENABLED, \
SECURITY_SENSITIVE_DATA_ENCRYPTON_ENABLED, SSL_TRUSTSTORE_PASSWORD_ALIAS, SECURITY_KEY_ENV_VAR_NAME
from ambari_server.setupSecurity import get_alias_string, setup_sensitive_data_encryption, sensitive_data_encryption
from ambari_server.serverClassPath import ServerClassPath
@patch.object(platform, "linux_distribution", new = MagicMock(return_value=('Redhat', '6.4', 'Final')))
@patch("ambari_server.dbConfiguration_linux.get_postgre_hba_dir", new = MagicMock(return_value = "/var/lib/pgsql/data"))
@patch("ambari_server.dbConfiguration_linux.get_postgre_running_status", new = MagicMock(return_value = "running"))
class TestSensitiveDataEncryption(TestCase):
def setUp(self):
out = StringIO.StringIO()
sys.stdout = out
def tearDown(self):
sys.stdout = sys.__stdout__
@patch("os.path.isdir", new = MagicMock(return_value=True))
@patch("os.access", new = MagicMock(return_value=True))
@patch.object(ServerClassPath, "get_full_ambari_classpath_escaped_for_shell", new = MagicMock(return_value = 'test' + os.pathsep + 'path12'))
@patch("ambari_server.setupSecurity.find_jdk")
@patch("ambari_server.setupSecurity.get_ambari_properties")
@patch("ambari_server.setupSecurity.run_os_command")
def test_sensitive_data_encryption(self, run_os_command_mock, get_ambari_properties_method, find_jdk_mock):
find_jdk_mock.return_value = "/"
environ = os.environ.copy()
run_os_command_mock.return_value = 0,"",""
properties = Properties()
get_ambari_properties_method.return_value = properties
options = self._create_empty_options_mock()
sensitive_data_encryption(options, "encription")
run_os_command_mock.assert_called_with('None -cp test:path12 org.apache.ambari.server.security.encryption.SensitiveDataEncryption encription > /var/log/ambari-server/ambari-server.out 2>&1', environ)
pass
@patch("ambari_server.setupSecurity.print_error_msg")
@patch("ambari_server.setupSecurity.find_jdk")
def test_sensitive_data_encryption_nojdk(self, find_jdk_mock, print_mock):
find_jdk_mock.return_value = None
options = self._create_empty_options_mock()
code = sensitive_data_encryption(options, "encription")
self.assertEquals(code, 1)
print_mock.assert_called_with("No JDK found, please run the \"setup\" "
"command to install a JDK automatically or install any "
"JDK manually to " + configDefaults.JDK_INSTALL_DIR)
pass
@patch("os.path.isdir", new = MagicMock(return_value=True))
@patch("os.access", new = MagicMock(return_value=True))
@patch.object(ServerClassPath, "get_full_ambari_classpath_escaped_for_shell", new = MagicMock(return_value = 'test' + os.pathsep + 'path12'))
@patch("ambari_server.setupSecurity.find_jdk")
@patch("ambari_server.setupSecurity.get_ambari_properties")
@patch("ambari_server.setupSecurity.run_os_command")
def test_sensitive_data_decryption_not_persisted(self, run_os_command_mock, get_ambari_properties_method, find_jdk_mock):
find_jdk_mock.return_value = "/"
environ = os.environ.copy()
master = "master"
environ[SECURITY_KEY_ENV_VAR_NAME] = master
run_os_command_mock.return_value = 0,"",""
properties = Properties()
get_ambari_properties_method.return_value = properties
options = self._create_empty_options_mock()
sensitive_data_encryption(options, "decryption", master)
run_os_command_mock.assert_called_with('None -cp test:path12 org.apache.ambari.server.security.encryption.SensitiveDataEncryption decryption > /var/log/ambari-server/ambari-server.out 2>&1', environ)
pass
@patch("ambari_server.setupSecurity.get_is_persisted")
@patch("ambari_server.setupSecurity.get_is_secure")
@patch("os.path.exists")
@patch("ambari_server.setupSecurity.read_ambari_user")
@patch("ambari_server.setupSecurity.save_passwd_for_alias")
@patch("ambari_server.setupSecurity.read_passwd_for_alias")
@patch("ambari_server.setupSecurity.update_properties_2")
@patch("ambari_server.setupSecurity.save_master_key")
@patch("ambari_server.setupSecurity.get_validated_string_input")
@patch("ambari_server.setupSecurity.get_YN_input")
@patch("ambari_server.setupSecurity.search_file")
@patch("ambari_server.setupSecurity.get_ambari_properties")
@patch("ambari_server.setupSecurity.is_root")
@patch("ambari_server.setupSecurity.sensitive_data_encryption")
@patch("ambari_server.setupSecurity.get_original_master_key")
def test_reset_master_key_not_persisted(self, get_original_master_key_mock, sensitive_data_encryption_metod, is_root_method,
get_ambari_properties_method,
search_file_message, get_YN_input_method,
get_validated_string_input_method, save_master_key_method,
update_properties_method, read_passwd_for_alias_method,
save_passwd_for_alias_method,
read_ambari_user_method,
exists_mock, get_is_secure_method,
get_is_persisted_method):
is_root_method.return_value = True
search_file_message.return_value = False
read_ambari_user_method.return_value = None
p = Properties()
FAKE_PWD_STRING = '${alias=fakealias}'
p.process_pair(JDBC_PASSWORD_PROPERTY, FAKE_PWD_STRING)
p.process_pair(SSL_TRUSTSTORE_PASSWORD_PROPERTY, FAKE_PWD_STRING)
p.process_pair(JDBC_RCA_PASSWORD_FILE_PROPERTY, FAKE_PWD_STRING)
get_ambari_properties_method.return_value = p
master_key = "aaa"
get_YN_input_method.side_effect = [False, True, False]
get_validated_string_input_method.return_value = master_key
get_original_master_key_mock.return_value = master_key
read_passwd_for_alias_method.return_value = "fakepassword"
save_passwd_for_alias_method.return_value = 0
exists_mock.return_value = False
get_is_secure_method.return_value = True
get_is_persisted_method.return_value = (False, "")
options = self._create_empty_options_mock()
setup_sensitive_data_encryption(options)
calls = [call(options, "decryption", master_key), call(options, "encryption", master_key)]
sensitive_data_encryption_metod.assert_has_calls(calls)
self.assertFalse(save_master_key_method.called)
self.assertTrue(get_original_master_key_mock.called)
self.assertTrue(get_YN_input_method.called)
self.assertTrue(get_validated_string_input_method.called)
self.assertTrue(update_properties_method.called)
self.assertTrue(read_passwd_for_alias_method.called)
self.assertTrue(2, read_passwd_for_alias_method.call_count)
self.assertTrue(2, save_passwd_for_alias_method.call_count)
self.assertFalse(save_master_key_method.called)
result_expected = {JDBC_PASSWORD_PROPERTY:
get_alias_string(JDBC_RCA_PASSWORD_ALIAS),
JDBC_RCA_PASSWORD_FILE_PROPERTY:
get_alias_string(JDBC_RCA_PASSWORD_ALIAS),
SSL_TRUSTSTORE_PASSWORD_PROPERTY:
get_alias_string(SSL_TRUSTSTORE_PASSWORD_ALIAS),
SECURITY_IS_ENCRYPTION_ENABLED: 'true',
SECURITY_SENSITIVE_DATA_ENCRYPTON_ENABLED: 'true'}
sorted_x = sorted(result_expected.iteritems(), key=operator.itemgetter(0))
sorted_y = sorted(update_properties_method.call_args[0][1].iteritems(),
key=operator.itemgetter(0))
self.assertEquals(sorted_x, sorted_y)
pass
@patch("ambari_server.setupSecurity.get_is_persisted")
@patch("ambari_server.setupSecurity.get_is_secure")
@patch("os.path.exists")
@patch("ambari_server.setupSecurity.read_ambari_user")
@patch("ambari_server.setupSecurity.save_passwd_for_alias")
@patch("ambari_server.setupSecurity.read_passwd_for_alias")
@patch("ambari_server.setupSecurity.update_properties_2")
@patch("ambari_server.setupSecurity.save_master_key")
@patch("ambari_server.setupSecurity.get_YN_input")
@patch("ambari_server.setupSecurity.search_file")
@patch("ambari_server.setupSecurity.get_ambari_properties")
@patch("ambari_server.setupSecurity.is_root")
@patch("ambari_server.setupSecurity.sensitive_data_encryption")
@patch("ambari_server.setupSecurity.get_original_master_key")
def test_encrypt_part_not_persisted(self, get_original_master_key_mock, sensitive_data_encryption_metod, is_root_method,
get_ambari_properties_method,
search_file_message, get_YN_input_method,
save_master_key_method,
update_properties_method, read_passwd_for_alias_method,
save_passwd_for_alias_method,
read_ambari_user_method,
exists_mock, get_is_secure_method,
get_is_persisted_method):
is_root_method.return_value = True
search_file_message.return_value = False
read_ambari_user_method.return_value = None
p = Properties()
FAKE_PWD_STRING = '${alias=fakealias}'
p.process_pair(JDBC_PASSWORD_PROPERTY, get_alias_string(JDBC_RCA_PASSWORD_ALIAS))
p.process_pair(SSL_TRUSTSTORE_PASSWORD_PROPERTY, FAKE_PWD_STRING)
p.process_pair(JDBC_RCA_PASSWORD_FILE_PROPERTY, FAKE_PWD_STRING)
get_ambari_properties_method.return_value = p
master_key = "aaa"
get_YN_input_method.side_effect = [False, False, False]
get_original_master_key_mock.return_value = master_key
read_passwd_for_alias_method.return_value = "fakepassword"
save_passwd_for_alias_method.return_value = 0
exists_mock.return_value = False
get_is_secure_method.return_value = True
get_is_persisted_method.return_value = (False, "filePath")
options = self._create_empty_options_mock()
setup_sensitive_data_encryption(options)
calls = [call(options, "encryption", master_key)]
sensitive_data_encryption_metod.assert_has_calls(calls)
self.assertFalse(save_master_key_method.called)
self.assertTrue(get_YN_input_method.called)
self.assertTrue(get_original_master_key_mock.called)
self.assertTrue(update_properties_method.called)
self.assertTrue(read_passwd_for_alias_method.called)
self.assertTrue(2, read_passwd_for_alias_method.call_count)
self.assertTrue(2, save_passwd_for_alias_method.call_count)
self.assertFalse(save_master_key_method.called)
result_expected = {JDBC_PASSWORD_PROPERTY:
get_alias_string(JDBC_RCA_PASSWORD_ALIAS),
JDBC_RCA_PASSWORD_FILE_PROPERTY:
get_alias_string(JDBC_RCA_PASSWORD_ALIAS),
SSL_TRUSTSTORE_PASSWORD_PROPERTY:
get_alias_string(SSL_TRUSTSTORE_PASSWORD_ALIAS),
SECURITY_IS_ENCRYPTION_ENABLED: 'true',
SECURITY_SENSITIVE_DATA_ENCRYPTON_ENABLED: 'true'}
sorted_x = sorted(result_expected.iteritems(), key=operator.itemgetter(0))
sorted_y = sorted(update_properties_method.call_args[0][1].iteritems(),
key=operator.itemgetter(0))
self.assertEquals(sorted_x, sorted_y)
pass
@patch("ambari_server.setupSecurity.get_is_persisted")
@patch("ambari_server.setupSecurity.get_is_secure")
@patch("os.path.exists")
@patch("ambari_server.setupSecurity.read_ambari_user")
@patch("ambari_server.setupSecurity.save_passwd_for_alias")
@patch("ambari_server.setupSecurity.read_passwd_for_alias")
@patch("ambari_server.setupSecurity.save_master_key")
@patch("ambari_server.setupSecurity.get_YN_input")
@patch("ambari_server.setupSecurity.search_file")
@patch("ambari_server.setupSecurity.get_ambari_properties")
@patch("ambari_server.setupSecurity.is_root")
@patch("ambari_server.setupSecurity.get_original_master_key")
def test_decrypt_missed_masterkey_not_persisted(self, get_original_master_key_mock, is_root_method,
get_ambari_properties_method,
search_file_message, get_YN_input_method,
save_master_key_method,
read_passwd_for_alias_method,
save_passwd_for_alias_method,
read_ambari_user_method,
exists_mock, get_is_secure_method,
get_is_persisted_method):
is_root_method.return_value = True
search_file_message.return_value = False
read_ambari_user_method.return_value = None
p = Properties()
FAKE_PWD_STRING = '${alias=fakealias}'
p.process_pair(JDBC_PASSWORD_PROPERTY, get_alias_string(JDBC_RCA_PASSWORD_ALIAS))
p.process_pair(SSL_TRUSTSTORE_PASSWORD_PROPERTY, FAKE_PWD_STRING)
p.process_pair(JDBC_RCA_PASSWORD_FILE_PROPERTY, FAKE_PWD_STRING)
get_ambari_properties_method.return_value = p
get_YN_input_method.side_effect = [True, False]
get_original_master_key_mock.return_value = None
read_passwd_for_alias_method.return_value = "fakepassword"
save_passwd_for_alias_method.return_value = 0
exists_mock.return_value = False
get_is_secure_method.return_value = True
get_is_persisted_method.return_value = (False, "filePath")
options = self._create_empty_options_mock()
self.assertTrue(setup_sensitive_data_encryption(options) == 1)
self.assertFalse(save_master_key_method.called)
self.assertTrue(get_YN_input_method.called)
pass
@patch("ambari_server.setupSecurity.get_ambari_properties")
@patch("ambari_server.setupSecurity.is_root")
def test_setup_sensitive_data_encryption_no_ambari_prop_not_root(self, is_root_method, get_ambari_properties_method):
is_root_method.return_value = False
get_ambari_properties_method.return_value = -1
options = self._create_empty_options_mock()
try:
setup_sensitive_data_encryption(options)
self.fail("Should throw exception")
except FatalException as fe:
self.assertTrue('Failed to read properties file.' == fe.reason)
pass
pass
@patch("os.path.exists")
@patch("ambari_server.setupSecurity.get_is_secure")
@patch("ambari_server.setupSecurity.get_is_persisted")
@patch("ambari_server.setupSecurity.remove_password_file")
@patch("ambari_server.setupSecurity.save_passwd_for_alias")
@patch("ambari_server.setupSecurity.read_master_key")
@patch("ambari_server.setupSecurity.read_ambari_user")
@patch("ambari_server.setupSecurity.update_properties_2")
@patch("ambari_server.setupSecurity.save_master_key")
@patch("ambari_server.setupSecurity.get_YN_input")
@patch("ambari_server.setupSecurity.get_ambari_properties")
@patch("ambari_server.setupSecurity.is_root")
@patch("ambari_server.setupSecurity.sensitive_data_encryption")
@patch("ambari_server.setupSecurity.adjust_directory_permissions")
def test_setup_sensitive_data_encryption_not_persist(self, adjust_directory_permissions_mock, sensitive_data_encryption_metod, is_root_method,
get_ambari_properties_method, get_YN_input_method, save_master_key_method,
update_properties_method,
read_ambari_user_method, read_master_key_method,
save_passwd_for_alias_method, remove_password_file_method,
get_is_persisted_method, get_is_secure_method, exists_mock):
is_root_method.return_value = True
p = Properties()
FAKE_PWD_STRING = "fakepasswd"
p.process_pair(JDBC_PASSWORD_PROPERTY, FAKE_PWD_STRING)
p.process_pair(SSL_TRUSTSTORE_PASSWORD_PROPERTY, FAKE_PWD_STRING)
p.process_pair(JDBC_RCA_PASSWORD_FILE_PROPERTY, FAKE_PWD_STRING)
get_ambari_properties_method.return_value = p
master_key = "aaa"
read_master_key_method.return_value = master_key
get_YN_input_method.return_value = False
read_ambari_user_method.return_value = "asd"
save_passwd_for_alias_method.return_value = 0
get_is_persisted_method.return_value = (True, "filepath")
get_is_secure_method.return_value = False
exists_mock.return_value = False
options = self._create_empty_options_mock()
setup_sensitive_data_encryption(options)
self.assertTrue(get_YN_input_method.called)
self.assertTrue(read_master_key_method.called)
self.assertTrue(read_ambari_user_method.called)
self.assertTrue(update_properties_method.called)
self.assertFalse(save_master_key_method.called)
self.assertTrue(save_passwd_for_alias_method.called)
self.assertEquals(2, save_passwd_for_alias_method.call_count)
self.assertTrue(remove_password_file_method.called)
self.assertTrue(adjust_directory_permissions_mock.called)
sensitive_data_encryption_metod.assert_called_with(options, "encryption", master_key)
result_expected = {JDBC_PASSWORD_PROPERTY:
get_alias_string(JDBC_RCA_PASSWORD_ALIAS),
JDBC_RCA_PASSWORD_FILE_PROPERTY:
get_alias_string(JDBC_RCA_PASSWORD_ALIAS),
SSL_TRUSTSTORE_PASSWORD_PROPERTY:
get_alias_string(SSL_TRUSTSTORE_PASSWORD_ALIAS),
SECURITY_IS_ENCRYPTION_ENABLED: 'true',
SECURITY_SENSITIVE_DATA_ENCRYPTON_ENABLED: 'true'}
sorted_x = sorted(result_expected.iteritems(), key=operator.itemgetter(0))
sorted_y = sorted(update_properties_method.call_args[0][1].iteritems(),
key=operator.itemgetter(0))
self.assertEquals(sorted_x, sorted_y)
pass
@patch("ambari_server.setupSecurity.save_passwd_for_alias")
@patch("os.path.exists")
@patch("ambari_server.setupSecurity.get_is_secure")
@patch("ambari_server.setupSecurity.get_is_persisted")
@patch("ambari_server.setupSecurity.read_master_key")
@patch("ambari_server.setupSecurity.read_ambari_user")
@patch("ambari_server.setupSecurity.update_properties_2")
@patch("ambari_server.setupSecurity.save_master_key")
@patch("ambari_server.setupSecurity.get_YN_input")
@patch("ambari_server.serverConfiguration.search_file")
@patch("ambari_server.setupSecurity.get_ambari_properties")
@patch("ambari_server.setupSecurity.is_root")
@patch("ambari_server.setupSecurity.sensitive_data_encryption")
def test_setup_sensitive_data_encryption_persist(self, sensitive_data_encryption_metod, is_root_method,
get_ambari_properties_method, search_file_message,
get_YN_input_method, save_master_key_method,
update_properties_method,
read_ambari_user_method, read_master_key_method,
get_is_persisted_method, get_is_secure_method, exists_mock,
save_passwd_for_alias_method):
is_root_method.return_value = True
p = Properties()
FAKE_PWD_STRING = "fakepasswd"
p.process_pair(JDBC_PASSWORD_PROPERTY, FAKE_PWD_STRING)
get_ambari_properties_method.return_value = p
search_file_message.return_value = "propertiesfile"
master_key = "aaa"
read_master_key_method.return_value = master_key
get_YN_input_method.return_value = True
read_ambari_user_method.return_value = None
get_is_persisted_method.return_value = (True, "filepath")
get_is_secure_method.return_value = False
exists_mock.return_value = False
save_passwd_for_alias_method.return_value = 0
options = self._create_empty_options_mock()
setup_sensitive_data_encryption(options)
self.assertTrue(get_YN_input_method.called)
self.assertTrue(read_master_key_method.called)
self.assertTrue(read_ambari_user_method.called)
self.assertTrue(update_properties_method.called)
self.assertTrue(save_master_key_method.called)
sensitive_data_encryption_metod.assert_called_with(options, "encryption")
result_expected = {JDBC_PASSWORD_PROPERTY:
get_alias_string(JDBC_RCA_PASSWORD_ALIAS),
SECURITY_IS_ENCRYPTION_ENABLED: 'true',
SECURITY_SENSITIVE_DATA_ENCRYPTON_ENABLED: 'true'}
sorted_x = sorted(result_expected.iteritems(), key=operator.itemgetter(0))
sorted_y = sorted(update_properties_method.call_args[0][1].iteritems(),
key=operator.itemgetter(0))
self.assertEquals(sorted_x, sorted_y)
pass
@patch("ambari_server.setupSecurity.read_master_key")
@patch("os.path.exists")
@patch("ambari_server.setupSecurity.read_ambari_user")
@patch("ambari_server.setupSecurity.save_passwd_for_alias")
@patch("ambari_server.setupSecurity.read_passwd_for_alias")
@patch("ambari_server.setupSecurity.update_properties_2")
@patch("ambari_server.setupSecurity.save_master_key")
@patch("ambari_server.setupSecurity.get_YN_input")
@patch("ambari_server.setupSecurity.search_file")
@patch("ambari_server.setupSecurity.get_ambari_properties")
@patch("ambari_server.setupSecurity.is_root")
@patch("ambari_server.setupSecurity.sensitive_data_encryption")
@patch("ambari_server.setupSecurity.get_is_secure")
@patch("ambari_server.setupSecurity.get_is_persisted")
def test_reset_master_key_persisted(self, get_is_persisted_method, get_is_secure_method, sensitive_data_encryption_metod, is_root_method,
get_ambari_properties_method, search_file_message,
get_YN_input_method,
save_master_key_method, update_properties_method,
read_passwd_for_alias_method, save_passwd_for_alias_method,
read_ambari_user_method, exists_mock,
read_master_key_method):
# Testing call under root
is_root_method.return_value = True
search_file_message.return_value = "filepath"
read_ambari_user_method.return_value = None
p = Properties()
FAKE_PWD_STRING = '${alias=fakealias}'
p.process_pair(JDBC_PASSWORD_PROPERTY, FAKE_PWD_STRING)
p.process_pair(SSL_TRUSTSTORE_PASSWORD_PROPERTY, FAKE_PWD_STRING)
p.process_pair(JDBC_RCA_PASSWORD_FILE_PROPERTY, FAKE_PWD_STRING)
get_ambari_properties_method.return_value = p
master_key = "aaa"
get_is_persisted_method.return_value = (True, "filepath")
get_is_secure_method.return_value = True
get_YN_input_method.side_effect = [False, True, True]
read_master_key_method.return_value = master_key
read_passwd_for_alias_method.return_value = "fakepassword"
save_passwd_for_alias_method.return_value = 0
exists_mock.return_value = False
options = self._create_empty_options_mock()
setup_sensitive_data_encryption(options)
calls = [call(options, "decryption"), call(options, "encryption")]
sensitive_data_encryption_metod.assert_has_calls(calls)
self.assertTrue(save_master_key_method.called)
self.assertTrue(get_YN_input_method.called)
self.assertTrue(read_master_key_method.called)
self.assertTrue(update_properties_method.called)
self.assertTrue(read_passwd_for_alias_method.called)
self.assertTrue(2, read_passwd_for_alias_method.call_count)
self.assertTrue(2, save_passwd_for_alias_method.call_count)
result_expected = {JDBC_PASSWORD_PROPERTY:
get_alias_string(JDBC_RCA_PASSWORD_ALIAS),
JDBC_RCA_PASSWORD_FILE_PROPERTY:
get_alias_string(JDBC_RCA_PASSWORD_ALIAS),
SSL_TRUSTSTORE_PASSWORD_PROPERTY:
get_alias_string(SSL_TRUSTSTORE_PASSWORD_ALIAS),
SECURITY_IS_ENCRYPTION_ENABLED: 'true',
SECURITY_SENSITIVE_DATA_ENCRYPTON_ENABLED: 'true'}
sorted_x = sorted(result_expected.iteritems(), key=operator.itemgetter(0))
sorted_y = sorted(update_properties_method.call_args[0][1].iteritems(),
key=operator.itemgetter(0))
self.assertEquals(sorted_x, sorted_y)
pass
@patch("os.path.exists")
@patch("ambari_server.setupSecurity.read_ambari_user")
@patch("ambari_server.setupSecurity.save_passwd_for_alias")
@patch("ambari_server.setupSecurity.read_passwd_for_alias")
@patch("ambari_server.setupSecurity.update_properties_2")
@patch("ambari_server.setupSecurity.get_YN_input")
@patch("ambari_server.setupSecurity.search_file")
@patch("ambari_server.setupSecurity.get_ambari_properties")
@patch("ambari_server.setupSecurity.is_root")
@patch("ambari_server.setupSecurity.sensitive_data_encryption")
@patch("ambari_server.setupSecurity.get_is_secure")
@patch("ambari_server.setupSecurity.get_is_persisted")
def test_decrypt_sensitive_data_persister(self, get_is_persisted_method, get_is_secure_method, sensitive_data_encryption_metod, is_root_method,
get_ambari_properties_method, search_file_message,
get_YN_input_method,
update_properties_method,
read_passwd_for_alias_method, save_passwd_for_alias_method,
read_ambari_user_method, exists_mock):
# Testing call under root
is_root_method.return_value = True
search_file_message.return_value = "filepath"
read_ambari_user_method.return_value = None
p = Properties()
FAKE_PWD_STRING = '${alias=fakealias}'
p.process_pair(JDBC_PASSWORD_PROPERTY, FAKE_PWD_STRING)
p.process_pair(SSL_TRUSTSTORE_PASSWORD_PROPERTY, FAKE_PWD_STRING)
p.process_pair(JDBC_RCA_PASSWORD_FILE_PROPERTY, FAKE_PWD_STRING)
get_ambari_properties_method.return_value = p
get_is_persisted_method.return_value = (True, "filepath")
get_is_secure_method.return_value = True
get_YN_input_method.side_effect = [True, False]
read_passwd_for_alias_method.return_value = "fakepassword"
save_passwd_for_alias_method.return_value = 0
exists_mock.return_value = False
options = self._create_empty_options_mock()
setup_sensitive_data_encryption(options)
calls = [call(options, "decryption")]
sensitive_data_encryption_metod.assert_has_calls(calls)
self.assertTrue(get_YN_input_method.called)
self.assertTrue(update_properties_method.called)
self.assertTrue(read_passwd_for_alias_method.called)
self.assertTrue(2, read_passwd_for_alias_method.call_count)
self.assertTrue(2, save_passwd_for_alias_method.call_count)
result_expected = {JDBC_PASSWORD_PROPERTY: "fakepassword",
JDBC_RCA_PASSWORD_FILE_PROPERTY: "fakepassword",
SSL_TRUSTSTORE_PASSWORD_PROPERTY: "fakepassword",
SECURITY_IS_ENCRYPTION_ENABLED: 'false',
SECURITY_SENSITIVE_DATA_ENCRYPTON_ENABLED: 'false'}
sorted_x = sorted(result_expected.iteritems(), key=operator.itemgetter(0))
sorted_y = sorted(update_properties_method.call_args[0][1].iteritems(),
key=operator.itemgetter(0))
self.assertEquals(sorted_x, sorted_y)
pass
def _create_empty_options_mock(self):
options = MagicMock()
options.ldap_enabled = None
options.ldap_enabled_ambari = None
options.ldap_manage_services = None
options.ldap_enabled_services = None
options.ldap_url = None
options.ldap_primary_host = None
options.ldap_primary_port = None
options.ldap_secondary_url = None
options.ldap_secondary_host = None
options.ldap_secondary_port = None
options.ldap_ssl = None
options.ldap_user_class = None
options.ldap_user_attr = None
options.ldap_user_group_member_attr = None
options.ldap_group_class = None
options.ldap_group_attr = None
options.ldap_member_attr = None
options.ldap_dn = None
options.ldap_base_dn = None
options.ldap_manager_dn = None
options.ldap_manager_password = None
options.ldap_save_settings = None
options.ldap_referral = None
options.ldap_bind_anonym = None
options.ldap_force_setup = None
options.ambari_admin_username = None
options.ambari_admin_password = None
options.ldap_sync_admin_name = None
options.ldap_sync_username_collisions_behavior = None
options.ldap_sync_disable_endpoint_identification = None
options.ldap_force_lowercase_usernames = None
options.ldap_pagination_enabled = None
options.ldap_sync_admin_password = None
options.custom_trust_store = None
options.trust_store_type = None
options.trust_store_path = None
options.trust_store_password = None
options.security_option = None
options.api_ssl = None
options.api_ssl_port = None
options.import_cert_path = None
options.import_cert_alias = None
options.pem_password = None
options.import_key_path = None
options.master_key = None
options.master_key_persist = None
options.jaas_principal = None
options.jaas_keytab = None
return options
| 48.780781
| 203
| 0.731409
| 3,961
| 32,488
| 5.540773
| 0.083817
| 0.061785
| 0.076685
| 0.131225
| 0.806033
| 0.77113
| 0.751948
| 0.740238
| 0.722696
| 0.715815
| 0
| 0.002848
| 0.189516
| 32,488
| 665
| 204
| 48.854135
| 0.830656
| 0.026871
| 0
| 0.690519
| 0
| 0.003578
| 0.189539
| 0.154859
| 0
| 0
| 0
| 0
| 0.112701
| 1
| 0.026834
| false
| 0.216458
| 0.035778
| 0
| 0.069767
| 0.005367
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
64f2e2b530af5be77d35403bbf827b176b5a071a
| 2,297
|
py
|
Python
|
Chinese_zodiac.py
|
2019-fall-csc-226/a01-breaking-bad-iransi_a01
|
720a648e9068af9e1202893942e311163bd0e1c1
|
[
"MIT"
] | null | null | null |
Chinese_zodiac.py
|
2019-fall-csc-226/a01-breaking-bad-iransi_a01
|
720a648e9068af9e1202893942e311163bd0e1c1
|
[
"MIT"
] | null | null | null |
Chinese_zodiac.py
|
2019-fall-csc-226/a01-breaking-bad-iransi_a01
|
720a648e9068af9e1202893942e311163bd0e1c1
|
[
"MIT"
] | null | null | null |
birthyear = int(input("what year were you born? {2000-2011}"))
if birthyear == 2000 :
print("you're a fire breathing dragon")
elif birthyear == 2001:
print("sssss you're a snake ")
if birthyear == 2002:
print("Haaayyy. Get it? cause your a horse :)")
if birthyear == 2003 :
print("you are the GOAT")
elif birthyear == 2004 :
print("what did one monkey say to another? 'I was born in 2004'")
if birthyear == 2005 :
print("why did the turkey cross the road? "
"To prove he wasn't chicken and "
"neither are you because you're a rooster!")
elif birthyear == 2006 :
print("woof woof you're a dog")
if birthyear == 2007:
print("oink oink you're a pig")
elif birthyear == 2008 :
print("pitty pat pat you are a rat")
if birthyear == 2009 :
print("you are an ox")
elif birthyear == 2010 :
print("you're a tiger grrr")
if birthyear == 2011 :
print(" is your name Thumper? Because you're a rabbit!")
elif birthyear < 2000 :
print("you're too old get out out here !")
elif birthyear > 2011 :
print(" umm I said a year between 2000 and 2011. TRy Again")
birthyear = int(input("now put in a friend's birth year {2000 and up}"))
if birthyear == 2000 :
print("you're a fire breathing dragon!!")
elif birthyear == 2001:
print("sssss you're a snake ")
if birthyear == 2002:
print("Haaayyy. Get it? cause you're a horse :)")
if birthyear == 2003 :
print("you are the GOAT")
elif birthyear == 2004 :
print("what did one monkey say to another?")
print( '"I was born in 2004"')
if birthyear == 2005 :
print("why did the turkey cross the road? to prove he wasn't chicken and neither are you you're a rooster")
elif birthyear == 2006 :
print("woof woof you're a dog")
if birthyear == 2007:
print("oink oink you're a pig")
elif birthyear == 2008 :
print("pitty pat pat you are a rat!")
if birthyear == 2009 :
print("you are an ox")
elif birthyear == 2010 :
print("a tiger grrr")
if birthyear == 2011 :
print(" is your name Thumper? Because you're a rabbit!")
elif birthyear < 2000 :
print("you're too old get out of here !")
elif birthyear > 2011 :
print(" too young ! try again with someone who was born between 2000 and 2011")
| 31.902778
| 111
| 0.637353
| 361
| 2,297
| 4.055402
| 0.265928
| 0.054645
| 0.057377
| 0.057377
| 0.849727
| 0.814208
| 0.814208
| 0.814208
| 0.814208
| 0.814208
| 0
| 0.0855
| 0.246408
| 2,297
| 72
| 112
| 31.902778
| 0.760254
| 0
| 0
| 0.655738
| 0
| 0.016393
| 0.475196
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.47541
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 7
|
64f5cef49fb1fe4a41de9751140db69b63b91e0f
| 212
|
py
|
Python
|
scripts/library/__init__.py
|
geozeke/ubuntu
|
49b7649b4306e6f3eb39c5dd9419cddc5c10d077
|
[
"MIT"
] | null | null | null |
scripts/library/__init__.py
|
geozeke/ubuntu
|
49b7649b4306e6f3eb39c5dd9419cddc5c10d077
|
[
"MIT"
] | 77
|
2020-07-08T18:52:48.000Z
|
2022-01-21T20:13:31.000Z
|
scripts/library/__init__.py
|
geozeke/ubuntu
|
49b7649b4306e6f3eb39c5dd9419cddc5c10d077
|
[
"MIT"
] | null | null | null |
from .classes import Environment
from .utilities import clear
from .utilities import runOneCommand
from .utilities import runManyArguments
from .utilities import minPythonVersion
from .utilities import copyFiles
| 30.285714
| 39
| 0.858491
| 24
| 212
| 7.583333
| 0.416667
| 0.357143
| 0.521978
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.113208
| 212
| 6
| 40
| 35.333333
| 0.968085
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
64f6cfdf3012940080f0da657680c3dfea60fa6b
| 17,679
|
py
|
Python
|
AutomatedTesting/Gem/PythonTests/largeworlds/dyn_veg/TestSuite_Periodic.py
|
LB-JakubSkorupka/o3de
|
e224fc2ee5ec2a12e75a10acae268b7b38ae3a32
|
[
"Apache-2.0",
"MIT"
] | 11
|
2021-07-08T09:58:26.000Z
|
2022-03-17T17:59:26.000Z
|
AutomatedTesting/Gem/PythonTests/largeworlds/dyn_veg/TestSuite_Periodic.py
|
LB-JakubSkorupka/o3de
|
e224fc2ee5ec2a12e75a10acae268b7b38ae3a32
|
[
"Apache-2.0",
"MIT"
] | 29
|
2021-07-06T19:33:52.000Z
|
2022-03-22T10:27:49.000Z
|
AutomatedTesting/Gem/PythonTests/largeworlds/dyn_veg/TestSuite_Periodic.py
|
LB-JakubSkorupka/o3de
|
e224fc2ee5ec2a12e75a10acae268b7b38ae3a32
|
[
"Apache-2.0",
"MIT"
] | 4
|
2021-07-06T19:24:43.000Z
|
2022-03-31T12:42:27.000Z
|
"""
Copyright (c) Contributors to the Open 3D Engine Project.
For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
"""
import os
import pytest
import sys
import ly_test_tools.environment.waiter as waiter
import ly_test_tools.environment.file_system as file_system
import editor_python_test_tools.hydra_test_utils as hydra
from ly_remote_console.remote_console_commands import RemoteConsole as RemoteConsole
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/../../automatedtesting_shared')
from base import TestAutomationBase
@pytest.fixture
def remove_test_slice(request, workspace, project):
file_system.delete([os.path.join(workspace.paths.engine_root(), project, "slices", "TestSlice_1.slice")], True,
True)
file_system.delete([os.path.join(workspace.paths.engine_root(), project, "slices", "TestSlice_2.slice")], True,
True)
def teardown():
file_system.delete([os.path.join(workspace.paths.engine_root(), project, "slices", "TestSlice_1.slice")], True,
True)
file_system.delete([os.path.join(workspace.paths.engine_root(), project, "slices", "TestSlice_2.slice")], True,
True)
request.addfinalizer(teardown)
@pytest.fixture
def remote_console_instance(request):
console = RemoteConsole()
def teardown():
if console.connected:
console.stop()
request.addfinalizer(teardown)
return console
@pytest.mark.SUITE_periodic
@pytest.mark.parametrize("launcher_platform", ['windows_editor'])
@pytest.mark.parametrize("project", ["AutomatedTesting"])
class TestAutomation(TestAutomationBase):
def test_AltitudeFilter_ComponentAndOverrides_InstancesPlantAtSpecifiedAltitude(self, request, workspace, editor, launcher_platform):
from .EditorScripts import AltitudeFilter_ComponentAndOverrides_InstancesPlantAtSpecifiedAltitude as test_module
self._run_test(request, workspace, editor, test_module)
def test_AltitudeFilter_ShapeSample_InstancesPlantAtSpecifiedAltitude(self, request, workspace, editor, launcher_platform):
from .EditorScripts import AltitudeFilter_ShapeSample_InstancesPlantAtSpecifiedAltitude as test_module
self._run_test(request, workspace, editor, test_module)
def test_AltitudeFilter_FilterStageToggle(self, request, workspace, editor, launcher_platform):
from .EditorScripts import AltitudeFilter_FilterStageToggle as test_module
self._run_test(request, workspace, editor, test_module)
def test_SpawnerSlices_SliceCreationAndVisibilityToggleWorks(self, request, workspace, editor, remove_test_slice, launcher_platform):
from .EditorScripts import SpawnerSlices_SliceCreationAndVisibilityToggleWorks as test_module
self._run_test(request, workspace, editor, test_module, enable_prefab_system=False)
def test_AssetListCombiner_CombinedDescriptorsExpressInConfiguredArea(self, request, workspace, editor, launcher_platform):
from .EditorScripts import AssetListCombiner_CombinedDescriptorsExpressInConfiguredArea as test_module
self._run_test(request, workspace, editor, test_module, enable_prefab_system=False)
def test_AssetWeightSelector_InstancesExpressBasedOnWeight(self, request, workspace, editor, launcher_platform):
from .EditorScripts import AssetWeightSelector_InstancesExpressBasedOnWeight as test_module
self._run_test(request, workspace, editor, test_module, enable_prefab_system=False)
@pytest.mark.xfail(reason="https://github.com/o3de/o3de/issues/4155")
def test_DistanceBetweenFilter_InstancesPlantAtSpecifiedRadius(self, request, workspace, editor, launcher_platform):
from .EditorScripts import DistanceBetweenFilter_InstancesPlantAtSpecifiedRadius as test_module
self._run_test(request, workspace, editor, test_module, enable_prefab_system=False)
@pytest.mark.xfail(reason="https://github.com/o3de/o3de/issues/4155")
def test_DistanceBetweenFilterOverrides_InstancesPlantAtSpecifiedRadius(self, request, workspace, editor, launcher_platform):
from .EditorScripts import DistanceBetweenFilterOverrides_InstancesPlantAtSpecifiedRadius as test_module
self._run_test(request, workspace, editor, test_module, enable_prefab_system=False)
def test_SurfaceDataRefreshes_RemainsStable(self, request, workspace, editor, launcher_platform):
from .EditorScripts import SurfaceDataRefreshes_RemainsStable as test_module
self._run_test(request, workspace, editor, test_module, enable_prefab_system=False)
def test_VegetationInstances_DespawnWhenOutOfRange(self, request, workspace, editor, launcher_platform):
from .EditorScripts import VegetationInstances_DespawnWhenOutOfRange as test_module
self._run_test(request, workspace, editor, test_module, enable_prefab_system=False)
def test_InstanceSpawnerPriority_LayerAndSubPriority_HigherValuesPlantOverLower(self, request, workspace, editor, launcher_platform):
from .EditorScripts import InstanceSpawnerPriority_LayerAndSubPriority as test_module
self._run_test(request, workspace, editor, test_module, enable_prefab_system=False)
def test_LayerBlocker_InstancesBlockedInConfiguredArea(self, request, workspace, editor, launcher_platform):
from .EditorScripts import LayerBlocker_InstancesBlockedInConfiguredArea as test_module
self._run_test(request, workspace, editor, test_module, enable_prefab_system=False)
def test_LayerSpawner_InheritBehaviorFlag(self, request, workspace, editor, launcher_platform):
from .EditorScripts import LayerSpawner_InheritBehaviorFlag as test_module
self._run_test(request, workspace, editor, test_module, enable_prefab_system=False)
def test_LayerSpawner_InstancesPlantInAllSupportedShapes(self, request, workspace, editor, launcher_platform):
from .EditorScripts import LayerSpawner_InstancesPlantInAllSupportedShapes as test_module
self._run_test(request, workspace, editor, test_module, enable_prefab_system=False)
def test_LayerSpawner_FilterStageToggle(self, request, workspace, editor, launcher_platform):
from .EditorScripts import LayerSpawner_FilterStageToggle as test_module
self._run_test(request, workspace, editor, test_module, enable_prefab_system=False)
@pytest.mark.xfail(reason="https://github.com/o3de/o3de/issues/2038")
def test_LayerSpawner_InstancesRefreshUsingCorrectViewportCamera(self, request, workspace, editor, launcher_platform):
from .EditorScripts import LayerSpawner_InstancesRefreshUsingCorrectViewportCamera as test_module
self._run_test(request, workspace, editor, test_module, enable_prefab_system=False)
def test_MeshBlocker_InstancesBlockedByMesh(self, request, workspace, editor, launcher_platform):
from .EditorScripts import MeshBlocker_InstancesBlockedByMesh as test_module
self._run_test(request, workspace, editor, test_module, enable_prefab_system=False)
def test_MeshBlocker_InstancesBlockedByMeshHeightTuning(self, request, workspace, editor, launcher_platform):
from .EditorScripts import MeshBlocker_InstancesBlockedByMeshHeightTuning as test_module
self._run_test(request, workspace, editor, test_module, enable_prefab_system=False)
def test_MeshSurfaceTagEmitter_DependentOnMeshComponent(self, request, workspace, editor, launcher_platform):
from .EditorScripts import MeshSurfaceTagEmitter_DependentOnMeshComponent as test_module
self._run_test(request, workspace, editor, test_module, enable_prefab_system=False)
def test_MeshSurfaceTagEmitter_SurfaceTagsAddRemoveSuccessfully(self, request, workspace, editor, launcher_platform):
from .EditorScripts import MeshSurfaceTagEmitter_SurfaceTagsAddRemoveSuccessfully as test_module
self._run_test(request, workspace, editor, test_module, enable_prefab_system=False)
def test_PhysXColliderSurfaceTagEmitter_E2E_Editor(self, request, workspace, editor, launcher_platform):
from .EditorScripts import PhysXColliderSurfaceTagEmitter_E2E_Editor as test_module
self._run_test(request, workspace, editor, test_module, enable_prefab_system=False)
def test_PositionModifier_ComponentAndOverrides_InstancesPlantAtSpecifiedOffsets(self, request, workspace, editor, launcher_platform):
from .EditorScripts import PositionModifier_ComponentAndOverrides_InstancesPlantAtSpecifiedOffsets as test_module
self._run_test(request, workspace, editor, test_module, enable_prefab_system=False)
def test_PositionModifier_AutoSnapToSurfaceWorks(self, request, workspace, editor, launcher_platform):
from .EditorScripts import PositionModifier_AutoSnapToSurfaceWorks as test_module
self._run_test(request, workspace, editor, test_module, enable_prefab_system=False)
def test_RotationModifier_InstancesRotateWithinRange(self, request, workspace, editor, launcher_platform):
from .EditorScripts import RotationModifier_InstancesRotateWithinRange as test_module
self._run_test(request, workspace, editor, test_module, enable_prefab_system=False)
def test_RotationModifierOverrides_InstancesRotateWithinRange(self, request, workspace, editor, launcher_platform):
from .EditorScripts import RotationModifierOverrides_InstancesRotateWithinRange as test_module
self._run_test(request, workspace, editor, test_module, enable_prefab_system=False)
def test_ScaleModifier_InstancesProperlyScale(self, request, workspace, editor, launcher_platform):
from .EditorScripts import ScaleModifier_InstancesProperlyScale as test_module
self._run_test(request, workspace, editor, test_module, enable_prefab_system=False)
def test_ScaleModifierOverrides_InstancesProperlyScale(self, request, workspace, editor, launcher_platform):
from .EditorScripts import ScaleModifierOverrides_InstancesProperlyScale as test_module
self._run_test(request, workspace, editor, test_module, enable_prefab_system=False)
def test_ShapeIntersectionFilter_InstancesPlantInAssignedShape(self, request, workspace, editor, launcher_platform):
from .EditorScripts import ShapeIntersectionFilter_InstancesPlantInAssignedShape as test_module
self._run_test(request, workspace, editor, test_module, enable_prefab_system=False)
def test_ShapeIntersectionFilter_FilterStageToggle(self, request, workspace, editor, launcher_platform):
from .EditorScripts import ShapeIntersectionFilter_FilterStageToggle as test_module
self._run_test(request, workspace, editor, test_module, enable_prefab_system=False)
def test_SlopeAlignmentModifier_InstanceSurfaceAlignment(self, request, workspace, editor, launcher_platform):
from .EditorScripts import SlopeAlignmentModifier_InstanceSurfaceAlignment as test_module
self._run_test(request, workspace, editor, test_module, enable_prefab_system=False)
def test_SlopeAlignmentModifierOverrides_InstanceSurfaceAlignment(self, request, workspace, editor, launcher_platform):
from .EditorScripts import SlopeAlignmentModifierOverrides_InstanceSurfaceAlignment as test_module
self._run_test(request, workspace, editor, test_module, enable_prefab_system=False)
def test_SurfaceMaskFilter_BasicSurfaceTagCreation(self, request, workspace, editor, launcher_platform):
from .EditorScripts import SurfaceMaskFilter_BasicSurfaceTagCreation as test_module
self._run_test(request, workspace, editor, test_module, enable_prefab_system=False)
def test_SurfaceMaskFilter_ExclusiveSurfaceTags_Function(self, request, workspace, editor, launcher_platform):
from .EditorScripts import SurfaceMaskFilter_ExclusionList as test_module
self._run_test(request, workspace, editor, test_module, enable_prefab_system=False)
def test_SurfaceMaskFilter_InclusiveSurfaceTags_Function(self, request, workspace, editor, launcher_platform):
from .EditorScripts import SurfaceMaskFilter_InclusionList as test_module
self._run_test(request, workspace, editor, test_module, enable_prefab_system=False)
def test_SurfaceMaskFilterOverrides_MultipleDescriptorOverridesPlantAsExpected(self, request, workspace, editor, launcher_platform):
from .EditorScripts import SurfaceMaskFilterOverrides_MultipleDescriptorOverridesPlantAsExpected as test_module
self._run_test(request, workspace, editor, test_module, enable_prefab_system=False)
def test_SystemSettings_SectorPointDensity(self, request, workspace, editor, launcher_platform):
from .EditorScripts import SystemSettings_SectorPointDensity as test_module
self._run_test(request, workspace, editor, test_module, enable_prefab_system=False)
def test_SystemSettings_SectorSize(self, request, workspace, editor, launcher_platform):
from .EditorScripts import SystemSettings_SectorSize as test_module
self._run_test(request, workspace, editor, test_module, enable_prefab_system=False)
def test_SlopeFilter_ComponentAndOverrides_InstancesPlantOnValidSlopes(self, request, workspace, editor, launcher_platform):
from .EditorScripts import SlopeFilter_ComponentAndOverrides_InstancesPlantOnValidSlope as test_module
self._run_test(request, workspace, editor, test_module, enable_prefab_system=False)
@pytest.mark.SUITE_periodic
@pytest.mark.parametrize("project", ["AutomatedTesting"])
@pytest.mark.parametrize("level", ["tmp_level"])
class TestAutomationE2E(TestAutomationBase):
# The following tests must run in order, please do not move tests out of order
@pytest.mark.parametrize("launcher_platform", ['windows_editor'])
def test_DynamicSliceInstanceSpawner_Embedded_E2E_Editor(self, request, workspace, project, level, editor, launcher_platform):
# Ensure our test level does not already exist
file_system.delete([os.path.join(workspace.paths.engine_root(), project, "Levels", level)], True, True)
from .EditorScripts import DynamicSliceInstanceSpawner_Embedded_E2E as test_module
self._run_test(request, workspace, editor, test_module, enable_prefab_system=False)
@pytest.mark.parametrize("launcher_platform", ['windows'])
def test_DynamicSliceInstanceSpawner_Embedded_E2E_Launcher(self, workspace, launcher, level,
remote_console_instance, project, launcher_platform):
expected_lines = [
"Instances found in area = 400"
]
hydra.launch_and_validate_results_launcher(launcher, level, remote_console_instance, expected_lines, launch_ap=False)
# Cleanup our temp level
file_system.delete([os.path.join(workspace.paths.engine_root(), project, "Levels", level)], True, True)
@pytest.mark.parametrize("launcher_platform", ['windows_editor'])
def test_DynamicSliceInstanceSpawner_External_E2E_Editor(self, request, workspace, project, level, editor, launcher_platform):
# Ensure our test level does not already exist
file_system.delete([os.path.join(workspace.paths.engine_root(), project, "Levels", level)], True, True)
from .EditorScripts import DynamicSliceInstanceSpawner_External_E2E as test_module
self._run_test(request, workspace, editor, test_module, enable_prefab_system=False)
@pytest.mark.parametrize("launcher_platform", ['windows'])
def test_DynamicSliceInstanceSpawner_External_E2E_Launcher(self, workspace, launcher, level,
remote_console_instance, project, launcher_platform):
expected_lines = [
"Instances found in area = 400"
]
hydra.launch_and_validate_results_launcher(launcher, level, remote_console_instance, expected_lines, launch_ap=False)
# Cleanup our temp level
file_system.delete([os.path.join(workspace.paths.engine_root(), project, "Levels", level)], True, True)
@pytest.mark.parametrize("launcher_platform", ['windows_editor'])
def test_LayerBlender_E2E_Editor(self, request, workspace, project, level, editor, launcher_platform):
# Ensure our test level does not already exist
file_system.delete([os.path.join(workspace.paths.engine_root(), project, "Levels", level)], True, True)
from .EditorScripts import LayerBlender_E2E_Editor as test_module
self._run_test(request, workspace, editor, test_module, enable_prefab_system=False)
@pytest.mark.parametrize("launcher_platform", ['windows'])
@pytest.mark.xfail(reason="https://github.com/o3de/o3de/issues/4170")
def test_LayerBlender_E2E_Launcher(self, workspace, launcher, level,
remote_console_instance, project, launcher_platform):
launcher.args.extend(["-rhi=Null"])
launcher.start(launch_ap=False)
assert launcher.is_alive(), "Launcher failed to start"
# Wait for test script to quit the launcher. If wait_for returns exc, test was not successful
waiter.wait_for(lambda: not launcher.is_alive(), timeout=300)
# Verify launcher quit successfully and did not crash
ret_code = launcher.get_returncode()
assert ret_code == 0, "Test failed. See Game.log for details"
# Cleanup our temp level
file_system.delete([os.path.join(workspace.paths.engine_root(), project, "Levels", level)], True, True)
| 62.25
| 138
| 0.783755
| 1,868
| 17,679
| 7.131156
| 0.123662
| 0.099692
| 0.130471
| 0.049246
| 0.736131
| 0.720892
| 0.720892
| 0.712859
| 0.712859
| 0.668944
| 0
| 0.003509
| 0.14571
| 17,679
| 283
| 139
| 62.469965
| 0.878501
| 0.035353
| 0
| 0.423469
| 0
| 0
| 0.041202
| 0.001761
| 0
| 0
| 0
| 0
| 0.010204
| 1
| 0.244898
| false
| 0
| 0.25
| 0
| 0.510204
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 7
|
8f1dda3eb63e47622769b82f7f990a3303f3a4ae
| 2,514
|
py
|
Python
|
ui/Pytest/test_LineEditMinimumMaximumController.py
|
MoisesHenr/OCEAN
|
e99c853893adc89652794ace62fcc8ffa78aa7ac
|
[
"MIT"
] | 15
|
2021-06-15T13:48:03.000Z
|
2022-01-26T13:51:46.000Z
|
ui/Pytest/test_LineEditMinimumMaximumController.py
|
MoisesHenr/OCEAN
|
e99c853893adc89652794ace62fcc8ffa78aa7ac
|
[
"MIT"
] | 1
|
2021-07-04T02:58:29.000Z
|
2021-07-04T02:58:29.000Z
|
ui/Pytest/test_LineEditMinimumMaximumController.py
|
MoisesHenr/OCEAN
|
e99c853893adc89652794ace62fcc8ffa78aa7ac
|
[
"MIT"
] | 2
|
2021-06-21T20:44:01.000Z
|
2021-06-23T11:10:56.000Z
|
# Author: Moises Henrique Pereira
# this class handle the functions tests of controller of the component of the numerical features
import pytest
import sys
from PyQt5 import QtWidgets
from ui.mainTest import StaticObjects
@pytest.mark.parametrize('featureName', [1, 2.9, False, ('t1', 't2'), None])
def test_CILEMMC_initializeView_wrong_type_featureName_parameter(featureName):
with pytest.raises(AssertionError):
app = QtWidgets.QApplication(sys.argv)
counterfactualInterfaceLineEditMinimumMaximumController = StaticObjects.staticCounterfactualInterfaceLineEditMinimumMaximumController()
counterfactualInterfaceLineEditMinimumMaximumController.initializeView(featureName, 0, 1)
def test_CILEMMC_initializeView_none_min_parameter():
with pytest.raises(AssertionError):
app = QtWidgets.QApplication(sys.argv)
counterfactualInterfaceLineEditMinimumMaximumController = StaticObjects.staticCounterfactualInterfaceLineEditMinimumMaximumController()
counterfactualInterfaceLineEditMinimumMaximumController.initializeView('featureName', None, 1)
def test_CILEMMC_initializeView_none_max_parameter():
with pytest.raises(AssertionError):
app = QtWidgets.QApplication(sys.argv)
counterfactualInterfaceLineEditMinimumMaximumController = StaticObjects.staticCounterfactualInterfaceLineEditMinimumMaximumController()
counterfactualInterfaceLineEditMinimumMaximumController.initializeView('featureName', 0, None)
def test_CILEMMC_initializeView_right_parameters():
app = QtWidgets.QApplication(sys.argv)
counterfactualInterfaceLineEditMinimumMaximumController = StaticObjects.staticCounterfactualInterfaceLineEditMinimumMaximumController()
counterfactualInterfaceLineEditMinimumMaximumController.initializeView('featureName', 0, 1)
def test_CILEMMC_setSelectedValue_none_parameter():
with pytest.raises(AssertionError):
app = QtWidgets.QApplication(sys.argv)
counterfactualInterfaceLineEditMinimumMaximumController = StaticObjects.staticCounterfactualInterfaceLineEditMinimumMaximumController()
counterfactualInterfaceLineEditMinimumMaximumController.setSelectedValue(None)
def test_CILEMMC_setSelectedValue_right_parameter():
app = QtWidgets.QApplication(sys.argv)
counterfactualInterfaceLineEditMinimumMaximumController = StaticObjects.staticCounterfactualInterfaceLineEditMinimumMaximumController()
counterfactualInterfaceLineEditMinimumMaximumController.setSelectedValue(0.5)
| 55.866667
| 143
| 0.838902
| 175
| 2,514
| 11.891429
| 0.32
| 0.020183
| 0.040365
| 0.077847
| 0.825084
| 0.79433
| 0.769822
| 0.769822
| 0.769822
| 0.769822
| 0
| 0.006206
| 0.102625
| 2,514
| 45
| 144
| 55.866667
| 0.916223
| 0.050119
| 0
| 0.484848
| 0
| 0
| 0.020134
| 0
| 0
| 0
| 0
| 0
| 0.121212
| 1
| 0.181818
| false
| 0
| 0.121212
| 0
| 0.30303
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8f31782a6012b73d5ada6e6de802c8e5d232912b
| 33,930
|
py
|
Python
|
mapmint-services/datastores/postgis/pgConnection.py
|
fenilgmehta/mapmint
|
7c28c42dbe9b17b11f5f6f080fd2c397f4f6937f
|
[
"MIT"
] | null | null | null |
mapmint-services/datastores/postgis/pgConnection.py
|
fenilgmehta/mapmint
|
7c28c42dbe9b17b11f5f6f080fd2c397f4f6937f
|
[
"MIT"
] | 2
|
2019-03-31T01:11:06.000Z
|
2020-03-15T13:43:16.000Z
|
mapmint-services/datastores/postgis/pgConnection.py
|
fenilgmehta/mapmint
|
7c28c42dbe9b17b11f5f6f080fd2c397f4f6937f
|
[
"MIT"
] | null | null | null |
import psycopg2
import lxml
# import libxslt
from lxml import etree
import osgeo.ogr
import sys
import zoo
import json
try:
from manage_users.manage_users import mm_md5
except:
from manage_users import mm_md5
class pgConnection:
def __init__(self, conf, dbfile):
self.dbfile = dbfile
self.conf = conf
def parseConf(self):
#libxml2.initParser()
#doc = libxml2.parseFile(self.conf["main"]["dataPath"] + "/PostGIS/" + self.dbfile + ".xml")
doc = etree.parse(self.conf["main"]["dataPath"] + "/PostGIS/" + self.dbfile + ".xml")
#styledoc = libxml2.parseFile(self.conf["main"]["dataPath"] + "/PostGIS/conn.xsl")
#style = etree.XSLT(styledoc)
styledoc = etree.parse(self.conf["main"]["dataPath"] + "/PostGIS/conn.xsl")
style = etree.XSLT(styledoc)
res = style(doc)
self.db_string = str(res).replace("PG: ", "")
def connect(self):
try:
self.conn = psycopg2.connect(self.db_string)
self.cur = self.conn.cursor()
return True
except Exception as e:
self.conf["lenv"]["message"] = "Unable to connect: " + str(e)
return False
def execute(self, req):
try:
self.ex = self.cur.execute(req)
if req.count("SELECT") > 0 or req.count("select") > 0:
return self.cur.fetchall()
else:
return True
except Exception as e:
self.conf["lenv"]["message"] = "Unable to execute " + req.encode('utf-8') + " due to: " + str(e)
# print("Unable to execute "+req+str(e), file=sys.stderr)
return False
def listSchemas(conf, inputs, outputs):
print(inputs["dataStore"]["value"], file=sys.stderr)
db = pgConnection(conf, inputs["dataStore"]["value"])
db.parseConf()
if db.connect():
res = db.execute(
"select nspname as schema from pg_namespace WHERE nspname NOT LIKE 'information_schema' AND nspname NOT LIKE 'pg_%' ORDER BY nspname")
if res:
outputs["Result"]["value"] = json.dumps(res)
return zoo.SERVICE_SUCCEEDED
else:
print("Unable to connect", file=sys.stderr)
return zoo.SERVICE_FAILED
def listTables(conf, inputs, outputs):
import authenticate.service as auth
if not (auth.is_ftable(inputs["schema"]["value"])):
conf["lenv"]["message"] = zoo._("Unable to identify your parameter as table or field name")
return zoo.SERVICE_FAILED
db = pgConnection(conf, inputs["dataStore"]["value"])
db.parseConf()
if db.connect():
req = "select schemaname||'.'||tablename as tablename, tablename as display from pg_tables WHERE schemaname NOT LIKE 'information_schema' AND schemaname NOT LIKE 'pg_%' AND tablename NOT LIKE 'spatial_ref_sys' AND tablename NOT LIKE 'geometry_columns' "
if "schema" in inputs:
req += "AND schemaname='" + inputs["schema"]["value"] + "'"
req += " ORDER BY schemaname||'.'||tablename"
res = db.execute(req)
outputs["Result"]["value"] = json.dumps(res)
return zoo.SERVICE_SUCCEEDED
# return zoo.SERVICE_SUCCEEDED
else:
print("Unable to connect", file=sys.stderr)
return zoo.SERVICE_FAILED
def listTablesAndViews(conf, inputs, outputs):
db = pgConnection(conf, inputs["dataStore"]["value"])
db.parseConf()
if db.connect():
req = "select schemaname||'.'||tablename as tablename, tablename as display from pg_tables WHERE schemaname NOT LIKE 'information_schema' AND schemaname NOT LIKE 'pg_%' AND tablename NOT LIKE 'tmp%' AND tablename NOT LIKE 'spatial_ref_sys' AND tablename NOT LIKE 'geometry_columns' "
req1 = "select schemaname||'.'||viewname as tablename, viewname as display from pg_views WHERE schemaname NOT LIKE 'information_schema' AND schemaname NOT LIKE 'pg_%' "
if "schema" in inputs:
req += " AND schemaname='" + inputs["schema"]["value"] + "'"
req1 += " AND schemaname='" + inputs["schema"]["value"] + "'"
res = db.execute("SELECT * from (" + req + ") as foo UNION (" + req1 + ") ORDER BY display")
if res:
outputs["Result"]["value"] = json.dumps(res)
return zoo.SERVICE_SUCCEEDED
else:
print("Unable to connect", file=sys.stderr)
return zoo.SERVICE_FAILED
def getDesc(cur, table):
tmp = table.split('.')
if len(tmp) == 1:
tmp1 = tmp[0]
tmp = ["public", tmp1];
req = "SELECT b.relname as t FROM pg_inherits, pg_class a, pg_class b WHERE inhrelid=a.oid AND inhparent=b.oid AND a.relname = '" + \
tmp[1] + "' AND a.relnamespace=(select oid from pg_namespace where nspname='" + tmp[0] + "')"
res0 = cur.execute(req)
res = cur.fetchall()
if res != False and len(res) > 0:
return "SELECT * FROM (SELECT DISTINCT ON (\"Pos\",\"Field\") * FROM ((SELECT DISTINCT on (\"Pos\") \"Pos\"-1 as \"Pos\",\"Field\",\"Type\",\"Key\", \"Ref\", \"RefCol\", \"RefCols\",array_upper(\"RefCols\",1) from (SELECT attnum AS \"Pos\", attname AS \"Field\",CASE WHEN atttypmod >0 THEN b.typname || '(' || atttypmod-4 || ')' ELSE b.typname END AS \"Type\" FROM pg_catalog.pg_attribute a, pg_catalog.pg_type b WHERE a.atttypid=b.oid AND a.attrelid = (SELECT pg_class.oid FROM pg_class, pg_namespace WHERE relname='" + \
res[0][0] + "' AND pg_namespace.oid=relnamespace AND nspname='" + tmp[
0] + "') AND a.attnum > 0 AND NOT a.attisdropped ORDER BY attnum) a LEFT JOIN (SELECT conkey,c.conname AS constraint_name, CASE c.contype WHEN 'c' THEN 'CHECK' WHEN 'f' THEN 'FOR' WHEN 'p' THEN 'PRI' WHEN 'u' THEN 'UNIQUE' END AS \"Key\", t3.nspname||'.'||t2.relname AS \"Ref\", (SELECT attname from pg_catalog.pg_attribute WHERE attrelid=c.confrelid AND confkey[1] = attnum) AS \"RefCol\" FROM pg_constraint c LEFT JOIN pg_class t ON c.conrelid = t.oid LEFT JOIN pg_class t2 ON c.confrelid = t2.oid LEFT JOIN pg_namespace t3 ON t2.relnamespace=t3.oid WHERE t.relname = '" + \
res[0][
0] + "') b ON get_nb_of(conkey,\"Pos\")>0 LEFT JOIN (SELECT DISTINCT ON (at2.attnum) c.*, at2.attnum AS \"myid\", ARRAY(SELECT attname AS \"RefCol\" FROM pg_constraint AS c, pg_catalog.pg_attribute, pg_class t, pg_class t2 WHERE c.conrelid = t.oid AND c.confrelid = t2.oid AND t.relname = '" + \
res[0][
0] + "' AND attrelid=confrelid AND get_nb_of(confkey,attnum) > 0) AS \"RefCols\", at2.attnum, at2.attname AS atn, get_index_of(conkey,at2.attnum) AS \"RealOrigColNum\", at1.attnum, at1.attname, get_index_of(confkey,at1.attnum) AS \"RealRefColNum\", t.relname as orig, t2.relname as ref FROM pg_constraint AS c, pg_catalog.pg_attribute AS at1, pg_catalog.pg_attribute AS at2, pg_class t, pg_class t2 WHERE c.conrelid = t.oid AND c.confrelid = t2.oid AND t.relname = '" + \
res[0][
0] + "' AND at1.attrelid=confrelid AND get_nb_of(conkey,at2.attnum) > 0 AND get_nb_of(confkey,at1.attnum) > 0 AND t.relname='" + \
res[0][
0] + "' AND at2.attrelid=t.oid) AS foreigns ON foreigns.myid=a.\"Pos\") UNION (SELECT DISTINCT on (\"Pos\") \"Pos\"-1 as \"Pos\",\"Field\",\"Type\",\"Key\", \"Ref\", \"RefCol\", \"RefCols\",array_upper(\"RefCols\",1) from (SELECT attnum AS \"Pos\", attname AS \"Field\",CASE WHEN atttypmod >0 THEN b.typname || '(' || atttypmod-4 || ')' ELSE b.typname END AS \"Type\" FROM pg_catalog.pg_attribute a, pg_catalog.pg_type b WHERE a.atttypid=b.oid AND a.attrelid = (SELECT pg_class.oid FROM pg_class, pg_namespace WHERE relname='" + \
tmp[1] + "' AND pg_namespace.oid=relnamespace AND nspname='" + tmp[
0] + "') AND a.attnum > 0 AND NOT a.attisdropped ORDER BY attnum) a LEFT JOIN (SELECT conkey,c.conname AS constraint_name, CASE c.contype WHEN 'c' THEN 'CHECK' WHEN 'f' THEN 'FOR' WHEN 'p' THEN 'PRI' WHEN 'u' THEN 'UNIQUE' END AS \"Key\", t3.nspname||'.'||t2.relname AS \"Ref\", (SELECT attname from pg_catalog.pg_attribute WHERE attrelid=c.confrelid AND confkey[1] = attnum) AS \"RefCol\" FROM pg_constraint c LEFT JOIN pg_class t ON c.conrelid = t.oid LEFT JOIN pg_class t2 ON c.confrelid = t2.oid LEFT JOIN pg_namespace t3 ON t2.relnamespace=t3.oid WHERE t.relname = '" + \
tmp[1] + "' and t.relnamespace=(select oid from pg_namespace where nspname='" + tmp[
0] + "') ) b ON get_nb_of(conkey,\"Pos\")>0 LEFT JOIN (SELECT DISTINCT ON (at2.attnum) c.*, at2.attnum AS \"myid\", ARRAY(SELECT attname AS \"RefCol\" FROM pg_constraint AS c, pg_catalog.pg_attribute, pg_class t, pg_class t2 WHERE c.conrelid = t.oid AND c.confrelid = t2.oid AND t.relname = '" + \
tmp[
1] + "' AND attrelid=confrelid AND get_nb_of(confkey,attnum) > 0) AS \"RefCols\", at2.attnum, at2.attname AS atn, get_index_of(conkey,at2.attnum) AS \"RealOrigColNum\", at1.attnum, at1.attname, get_index_of(confkey,at1.attnum) AS \"RealRefColNum\", t.relname as orig, t2.relname as ref FROM pg_constraint AS c, pg_catalog.pg_attribute AS at1, pg_catalog.pg_attribute AS at2, pg_class t, pg_class t2 WHERE c.conrelid = t.oid AND c.confrelid = t2.oid AND t.relname = '" + \
tmp[
1] + "' AND at1.attrelid=confrelid AND get_nb_of(conkey,at2.attnum) > 0 AND get_nb_of(confkey,at1.attnum) > 0 AND t.relname='" + \
tmp[1] + "' and t.relnamespace=(select oid from pg_namespace where nspname='" + tmp[
0] + "') AND at2.attrelid=t.oid) AS foreigns ON foreigns.myid=a.\"Pos\")) As foo) as foo1 ORDER BY \"Pos\",\"Key\""
else:
# print("SELECT DISTINCT on (\"Pos\") \"Pos\"-1 as \"Pos\",\"Field\",\"Type\",\"Key\", \"Ref\", \"RefCol\", \"RefCols\",array_upper(\"RefCols\",1) from (SELECT attnum AS \"Pos\", attname AS \"Field\",CASE WHEN atttypmod >0 THEN b.typname || '(' || atttypmod-4 || ')' ELSE b.typname END AS \"Type\" FROM pg_catalog.pg_attribute a, pg_catalog.pg_type b WHERE a.atttypid=b.oid AND a.attrelid = (SELECT pg_class.oid FROM pg_class, pg_namespace WHERE relname='"+tmp[1]+"' AND pg_namespace.oid=relnamespace AND nspname='"+tmp[0]+"') AND a.attnum > 0 AND NOT a.attisdropped ORDER BY attnum) a LEFT JOIN (SELECT conkey,c.conname AS constraint_name, CASE c.contype WHEN 'c' THEN 'CHECK' WHEN 'f' THEN 'FOR' WHEN 'p' THEN 'PRI' WHEN 'u' THEN 'UNIQUE' END AS \"Key\", t3.nspname||'.'||t2.relname AS \"Ref\", (SELECT attname from pg_catalog.pg_attribute WHERE attrelid=c.confrelid AND confkey[1] = attnum) AS \"RefCol\" FROM pg_constraint c LEFT JOIN pg_class t ON c.conrelid = t.oid LEFT JOIN pg_class t2 ON c.confrelid = t2.oid LEFT JOIN pg_namespace t3 ON t2.relnamespace=t3.oid WHERE t.relname = '"+tmp[1]+"' and t.relnamespace=(select oid from pg_namespace where nspname='"+tmp[0]+"')) b ON get_nb_of(conkey,\"Pos\")>0 LEFT JOIN (SELECT DISTINCT ON (at2.attnum) c.*, at2.attnum AS \"myid\", ARRAY(SELECT attname AS \"RefCol\" FROM pg_constraint AS c, pg_catalog.pg_attribute, pg_class t, pg_class t2 WHERE c.conrelid = t.oid AND c.confrelid = t2.oid AND t.relname = '"+tmp[1]+"' AND attrelid=confrelid AND get_nb_of(confkey,attnum) > 0 and t.relnamespace=(select oid from pg_namespace where nspname='"+tmp[0]+"')) AS \"RefCols\", at2.attnum, at2.attname AS atn, get_index_of(conkey,at2.attnum) AS \"RealOrigColNum\", at1.attnum, at1.attname, get_index_of(confkey,at1.attnum) AS \"RealRefColNum\", t.relname as orig, t2.relname as ref FROM pg_constraint AS c, pg_catalog.pg_attribute AS at1, pg_catalog.pg_attribute AS at2, pg_class t, pg_class t2 WHERE c.conrelid = t.oid AND c.confrelid = t2.oid AND t.relname = '"+tmp[1]+"' AND at1.attrelid=confrelid AND get_nb_of(conkey,at2.attnum) > 0 AND get_nb_of(confkey,at1.attnum) > 0 AND t.relname='"+tmp[1]+"' AND at2.attrelid=t.oid and t.relnamespace=(select oid from pg_namespace where nspname='"+tmp[0]+"')) AS foreigns ON foreigns.myid=a.\"Pos\"", file=sys.stderr)
return "SELECT DISTINCT on (\"Pos\") \"Pos\"-1 as \"Pos\",\"Field\",\"Type\",\"Key\", \"Ref\", \"RefCol\", \"RefCols\",array_upper(\"RefCols\",1) from (SELECT * FROM (SELECT attnum AS \"Pos\", attname AS \"Field\",CASE WHEN atttypmod >0 THEN b.typname || '(' || atttypmod-4 || ')' ELSE b.typname END AS \"Type\" FROM pg_catalog.pg_attribute a, pg_catalog.pg_type b WHERE a.atttypid=b.oid AND a.attrelid = (SELECT pg_class.oid FROM pg_class, pg_namespace WHERE relname='" + \
tmp[1] + "' AND pg_namespace.oid=relnamespace AND nspname='" + tmp[
0] + "') AND a.attnum > 0 AND NOT a.attisdropped ORDER BY attnum) a LEFT JOIN (SELECT conkey,c.conname AS constraint_name, CASE c.contype WHEN 'c' THEN 'CHECK' WHEN 'f' THEN 'FOR' WHEN 'p' THEN 'PRI' WHEN 'u' THEN 'UNIQUE' END AS \"Key\", t3.nspname||'.'||t2.relname AS \"Ref\", (SELECT attname from pg_catalog.pg_attribute WHERE attrelid=c.confrelid AND confkey[1] = attnum) AS \"RefCol\" FROM pg_constraint c LEFT JOIN pg_class t ON c.conrelid = t.oid LEFT JOIN pg_class t2 ON c.confrelid = t2.oid LEFT JOIN pg_namespace t3 ON t2.relnamespace=t3.oid WHERE t.relname = '" + \
tmp[1] + "' and t.relnamespace=(select oid from pg_namespace where nspname='" + tmp[
0] + "')) b ON get_nb_of(conkey,\"Pos\")>0 LEFT JOIN (SELECT DISTINCT ON (at2.attnum) c.*, at2.attnum AS \"myid\", ARRAY(SELECT attname AS \"RefCol\" FROM pg_constraint AS c, pg_catalog.pg_attribute, pg_class t, pg_class t2 WHERE c.conrelid = t.oid AND c.confrelid = t2.oid AND t.relname = '" + \
tmp[
1] + "' AND attrelid=confrelid AND get_nb_of(confkey,attnum) > 0 and t.relnamespace=(select oid from pg_namespace where nspname='" + \
tmp[
0] + "')) AS \"RefCols\", at2.attnum, at2.attname AS atn, get_index_of(conkey,at2.attnum) AS \"RealOrigColNum\", at1.attnum, at1.attname, get_index_of(confkey,at1.attnum) AS \"RealRefColNum\", t.relname as orig, t2.relname as ref FROM pg_constraint AS c, pg_catalog.pg_attribute AS at1, pg_catalog.pg_attribute AS at2, pg_class t, pg_class t2 WHERE c.conrelid = t.oid AND c.confrelid = t2.oid AND t.relname = '" + \
tmp[
1] + "' AND at1.attrelid=confrelid AND get_nb_of(conkey,at2.attnum) > 0 AND get_nb_of(confkey,at1.attnum) > 0 AND t.relname='" + \
tmp[1] + "' AND at2.attrelid=t.oid and t.relnamespace=(select oid from pg_namespace where nspname='" + \
tmp[0] + "')) AS foreigns ON foreigns.myid=a.\"Pos\" order by \"Key\"='PRI' or \"Key\"='FOR' desc) As f"
def getTableDescription(conf, inputs, outputs):
import authenticate.service as auth
# if not(auth.is_ftable(inputs["table"]["value"])):
# conf["lenv"]["message"]=zoo._("Unable to identify your parameter as table or field name")
# return zoo.SERVICE_FAILED
db = pgConnection(conf, inputs["dataStore"]["value"])
db.parseConf()
if db.connect():
tmp = inputs["table"]["value"].split('.')
req = getDesc(db.cur, inputs["table"]["value"])
# print(req, file=sys.stderr)
res = db.execute(req)
if res != False and len(res) > 0:
outputs["Result"]["value"] = json.dumps(res)
return zoo.SERVICE_SUCCEEDED
else:
print("unable to run request " + req, file=sys.stderr)
return zoo.SERVICE_FAILED
else:
print("Unable to connect", file=sys.stderr)
return zoo.SERVICE_FAILED
def getTableContent(conf, inputs, outputs):
import authenticate.service as auth
# if not(auth.is_ftable(inputs["table"]["value"])):
# conf["lenv"]["message"]=zoo._("Unable to identify your parameter as table or field name")
# return zoo.SERVICE_FAILED
db = pgConnection(conf, inputs["dataStore"]["value"])
db.parseConf()
getTableDescription(conf, inputs, outputs)
tmp = eval(outputs["Result"]["value"].replace("null", "None"))
pkey = 0
geom = []
files = []
fields = ""
for i in range(0, len(tmp)):
if tmp[i][3] == "PRI":
pkey = tmp[i][0]
if tmp[i][2] == "geometry":
geom += [i]
if tmp[i][2] == "bytea":
files += [i]
if tmp[i][3] == "FOR" and not ("force" in inputs):
input1 = inputs
otbl = inputs["table"]["value"]
inputs["table"]["value"] = tmp[i][4]
getTableDescription(conf, inputs, outputs)
tmp2 = eval(outputs["Result"]["value"].replace("null", "None"))
pkey1 = 0
for j in range(0, len(tmp2)):
if tmp2[j][3] == "PRI":
pkey1 = j
break
hasV = False
for j in range(0, len(tmp2)):
if not (hasV) and (tmp2[j][2].count("char") > 0 or tmp2[j][2].count("text") > 0):
if fields != "":
fields += ","
hasV = True
fields += "(SELECT " + tmp2[j][1] + " FROM " + tmp[i][4] + " as a WHERE a." + tmp2[pkey][
1] + "=" + otbl + "." + tmp[i][1] + ")"
if not (hasV):
if fields != "":
fields += ","
fields += "(SELECT " + tmp2[0][1] + " FROM " + tmp[i][4] + " as a WHERE a." + tmp2[pkey][
1] + "=" + otbl + "." + tmp[i][1] + ")"
inputs["table"]["value"] = otbl
else:
if fields != "":
fields += ","
fields += tmp[i][1]
if db.connect():
tmp1 = inputs["table"]["value"].split(".")
tmp1[0] = '"' + tmp1[0] + '"'
tmp1[1] = '"' + tmp1[1] + '"'
inputs["table"]["value"] = (".").join(tmp1)
req = "select count(*) from " + inputs["table"]["value"]
if "clause" in inputs and inputs["clause"]["value"] != "NULL":
req += " WHERE " + inputs["clause"]["value"]
if "search" in inputs and inputs["search"]["value"] != "NULL" and inputs["search"]["value"] != "asc":
req += " WHERE "
print(req, file=sys.stderr)
cnt = 0
print(req, file=sys.stderr)
for i in range(0, len(tmp)):
if cnt > 0:
req += " OR "
req += tmp[i][1] + "::varchar like '%" + inputs["search"]["value"] + "%'"
cnt += 1
res = db.execute(req)
if res != False:
total = res[0][0]
req = "select "
if "cols" in inputs and inputs["cols"]["value"] != "NULL":
req += inputs["cols"]["value"]
else:
req += fields
req += " from " + inputs["table"]["value"]
if "clause" in inputs and inputs["clause"]["value"] != "NULL":
req += " WHERE " + inputs["clause"]["value"]
if "search" in inputs and inputs["search"]["value"] != "NULL" and inputs["search"]["value"] != "asc":
req += " WHERE "
print(req, file=sys.stderr)
cnt = 0
print(req, file=sys.stderr)
for i in range(0, len(tmp)):
if cnt > 0:
req += " OR "
req += tmp[i][1] + "::varchar like '%" + inputs["search"]["value"] + "%'"
cnt += 1
if "sortname" in inputs and inputs["sortname"]["value"] != "NULL":
req += " ORDER BY " + inputs["sortname"]["value"] + " " + inputs["sortorder"]["value"]
if "limit" in inputs and inputs["limit"]["value"] != "NULL":
if "page" in inputs and inputs["page"]["value"] != "":
req += " OFFSET " + str((int(inputs["page"]["value"]) - 1) * int(inputs["limit"]["value"]))
page = inputs["page"]["value"]
req += " LIMIT " + inputs["limit"]["value"]
else:
page = 1
req += " LIMIT 10"
print(req, file=sys.stderr)
res = db.execute(req)
if res != False:
rows = []
for i in range(0, len(res)):
res0 = []
for k in range(0, len(res[i])):
try:
tmp = str(res[i][k].decode('utf-8'))
print(dir(tmp), file=sys.stderr)
except Exception as e:
print(e, file=sys.stderr)
tmp = str(res[i][k])
res0 += [str(tmp)]
if len(geom) > 0:
for j in range(0, len(geom)):
res0[geom[j]] = "GEOMETRY"
if len(files) > 0:
for j in range(0, len(files)):
res0[files[j]] = "BINARY FILE"
rows += [{"id": res[i][pkey], "cell": res0}]
outputs["Result"]["value"] = json.dumps({"page": page, "total": total, "rows": rows}, ensure_ascii=False)
return zoo.SERVICE_SUCCEEDED
else:
print("unable to run request", file=sys.stderr)
return zoo.SERVICE_FAILED
else:
print("Unable to connect", file=sys.stderr)
return zoo.SERVICE_FAILED
def getTableContent1(conf, inputs, outputs):
import authenticate.service as auth
# if not(auth.is_ftable(inputs["table"]["value"])):
# conf["lenv"]["message"]=zoo._("Unable to identify your parameter as table or field name")
# return zoo.SERVICE_FAILED
db = pgConnection(conf, inputs["dataStore"]["value"])
db.parseConf()
getTableDescription(conf, inputs, outputs)
tmp = eval(outputs["Result"]["value"].replace("null", "None"))
pkey = 0
geom = []
files = []
fields = ""
for i in range(0, len(tmp)):
if tmp[i][3] == "PRI":
pkey = tmp[i][0]
if tmp[i][2] == "geometry":
geom += [i]
if tmp[i][2] == "bytea":
files += [i]
if tmp[i][3] == "FOR" and not ("force" in inputs):
input1 = inputs
otbl = inputs["table"]["value"]
inputs["table"]["value"] = tmp[i][4]
getTableDescription(conf, inputs, outputs)
tmp2 = eval(outputs["Result"]["value"].replace("null", "None"))
pkey1 = 0
for j in range(0, len(tmp2)):
if tmp2[j][3] == "PRI":
pkey1 = j
break
hasV = False
for j in range(0, len(tmp2)):
if not (hasV) and (tmp2[j][2].count("char") > 0 or tmp2[j][2].count("text") > 0):
if fields != "":
fields += ","
hasV = True
fields += "(SELECT " + tmp2[j][1] + " FROM " + tmp[i][4] + " as a WHERE a." + tmp2[pkey][
1] + "=" + otbl + "." + tmp[i][1] + ")"
if not (hasV):
if fields != "":
fields += ","
fields += "(SELECT " + tmp2[0][1] + " FROM " + tmp[i][4] + " as a WHERE a." + tmp2[pkey][
1] + "=" + otbl + "." + tmp[i][1] + ")"
inputs["table"]["value"] = otbl
else:
if fields != "":
fields += ","
fields += tmp[i][1]
if db.connect():
tmp1 = inputs["table"]["value"].split(".")
tmp1[0] = '"' + tmp1[0] + '"'
tmp1[1] = '"' + tmp1[1] + '"'
inputs["table"]["value"] = (".").join(tmp1)
req = "select count(*) from " + inputs["table"]["value"]
if "clause" in inputs and inputs["clause"]["value"] != "NULL":
req += " WHERE " + inputs["clause"]["value"]
if "search" in inputs and inputs["search"]["value"] != "NULL" and inputs["search"]["value"] != "asc":
req += " WHERE "
print(req, file=sys.stderr)
cnt = 0
print(req, file=sys.stderr)
for i in range(0, len(tmp)):
if cnt > 0:
req += " OR "
req += tmp[i][1] + "::varchar like '%" + inputs["search"]["value"] + "%'"
cnt += 1
print(req, file=sys.stderr)
res = db.execute(req)
if res != False:
total = res[0][0]
req = "select "
if "cols" in inputs and inputs["cols"]["value"] != "NULL":
req += inputs["cols"]["value"]
else:
req += fields
req += " from " + inputs["table"]["value"]
if "clause" in inputs and inputs["clause"]["value"] != "NULL":
req += " WHERE " + inputs["clause"]["value"]
if "search" in inputs and inputs["search"]["value"] != "NULL" and inputs["search"]["value"] != "asc":
req += " WHERE "
print(req, file=sys.stderr)
cnt = 0
print(req, file=sys.stderr)
for i in range(0, len(tmp)):
if cnt > 0:
req += " OR "
req += tmp[i][1] + "::varchar like '%" + inputs["search"]["value"] + "%'"
cnt += 1
print(req, file=sys.stderr)
if "sortname" in inputs and inputs["sortname"]["value"] != "NULL":
req += " ORDER BY " + inputs["sortname"]["value"] + " " + inputs["sortorder"]["value"]
if "limit" in inputs and inputs["limit"]["value"] != "NULL":
if "page" in inputs and inputs["page"]["value"] != "":
req += " OFFSET " + str((int(inputs["page"]["value"]) - 1) * int(inputs["limit"]["value"]))
page = inputs["page"]["value"]
req += " LIMIT " + inputs["limit"]["value"]
else:
page = 1
req += " LIMIT 10"
print(req, file=sys.stderr)
res = db.execute(req)
if res != False:
rows = []
for i in range(0, len(res)):
res0 = []
for k in range(0, len(res[i])):
try:
tmp = str(res[i][k].decode('utf-8'))
# print(dir(tmp), file=sys.stderr)
except Exception as e:
# print(e, file=sys.stderr)
tmp = str(res[i][k])
res0 += [str(tmp)]
if len(geom) > 0:
for j in range(0, len(geom)):
res0[geom[j]] = "GEOMETRY"
if len(files) > 0:
for j in range(0, len(files)):
res0[files[j]] = "BINARY FILE"
rows += [{"id": res[i][pkey], "cell": res0}]
outputs["Result"]["value"] = json.dumps({"page": page, "total": total, "rows": rows}, ensure_ascii=False)
return zoo.SERVICE_SUCCEEDED
else:
print("unable to run request", file=sys.stderr)
return zoo.SERVICE_FAILED
else:
print("Unable to connect", file=sys.stderr)
return zoo.SERVICE_FAILED
def deleteTuple(conf, inputs, outputs):
db = pgConnection(conf, inputs["dataStore"]["value"])
db.parseConf()
if db.connect():
res = db.execute("DELETE FROM " + inputs["table"]["value"] + " WHERE " + inputs["clause"]["value"])
if res == False:
conf = db.conf
return zoo.SERVICE_FAILED
else:
db.conn.commit()
outputs["Result"]["value"] = "Tuple deleted"
return zoo.SERVICE_SUCCEEDED
else:
conf = db.conf
return zoo.SERVICE_FAILED
import psycopg2, json
from psycopg2.extensions import *
def editTuple(conf, inputs, outputs):
# TODO: confirm assumption: inputs is a Python 3 dictionary object
getTableDescription(conf, inputs, outputs)
desc = eval(outputs["Result"]["value"].replace("null", "None"))
tmp = json.loads(inputs["obj"]["value"])
if "clause" in inputs and inputs["clause"]["value"] != "NULL":
req = "UPDATE " + inputs["table"]["value"] + " set "
fields = ""
tkeys = list(tmp.keys())
for i in tkeys:
fd = None
for k in desc:
if k[1] == i:
fd = k[2]
if fd is not None:
print(tmp, file=sys.stderr)
print(fd, file=sys.stderr)
td = testDesc(tmp[i], fd)
if td is not None:
if fields != "":
fields += ", "
fields += '"' + i + '"=' + td
if "content" in inputs:
if fields != "":
fields += ","
print(inputs["content"]["value"], file=sys.stderr)
tmp1 = inputs["content"]["value"]
fields += '"content"=%s' % adapt(
inputs["content"]["value"].replace('<?xml version="1.0" encoding="utf-8"?>\n', ''))
req += fields + " WHERE " + inputs["clause"]["value"]
outputs["Result"]["value"] = "Tuple updated"
else:
req = "INSERT INTO " + inputs["table"]["value"] + " "
fields = "("
values = "("
cnt = 0
for i in tmp:
fd = None
for k in desc:
if k[1] == i:
fd = k[2]
td = testDesc(tmp[i], fd)
if td is not None:
if fields != "(":
fields += ","
if values != "(":
values += ","
fields += i
values += td
cnt += 1
if list(inputs.keys()).count("content") > 0:
if fields != "(":
fields += ","
if values != "(":
values += ","
fields += "content"
values += '%s' % adapt(inputs["content"]["value"].replace('<?xml version="1.0" encoding="utf-8"?>\n', ''))
fields += ")"
values += ")"
req += fields + " VALUES " + values
outputs["Result"]["value"] = "Tuple inserted"
print(req.encode("utf-8"), file=sys.stderr)
db = pgConnection(conf, inputs["dataStore"]["value"])
db.parseConf()
if db.connect():
try:
res = db.execute(req)
if res == False:
conf["lenv"]["message"] = db.conf["lenv"]["message"]
return zoo.SERVICE_FAILED
db.conn.commit()
# print(res, file=sys.stderr)
return zoo.SERVICE_SUCCEEDED
except Exception as e:
conf["lenv"]["message"] = "Unable to run the request " + str(e)
return zoo.SERVICE_FAILED
def testDesc(val, desc):
if desc == "bool":
if val == "t" or val:
return "true"
else:
return "false"
if desc.count("char") > 0 or desc.count("text") > 0:
if desc.count("varchar(40)"):
if val != 'NULL':
return "'" + mm_md5(val) + "'"
else:
return None
else:
if val != 'NULL':
tmp = adapt(val)#.encode('utf-8').decode('utf-8'))
tmp.encoding = "utf-8"
return str(tmp)#.decode('utf-8')
else:
return "NULL"
else:
if desc.count("date") > 0:
tmp = val.split("/")
return "'" + tmp[2] + "-" + tmp[1] + "-" + tmp[0] + "'"
else:
if desc.count("geometry") > 0:
if val != 'NULL':
return "'" + val + "'"
else:
return val
else:
return val
def fetchType(conf, ftype):
db = pgConnection(conf, conf["main"]["dbuserName"])
db.parseConf()
if db.connect():
res = db.execute("SELECT code from mm_tables.ftypes where id=" + ftype)
if res:
return str(res[0][0])
return None
def addColumn(conf, inputs, outputs):
print(inputs["dataStore"]["value"], file=sys.stderr)
db = pgConnection(conf, inputs["dataStore"]["value"])
db.parseConf()
req = []
if db.connect():
if inputs["field_type"]["value"] != "18":
req += ["ALTER TABLE quote_ident(" + inputs["table"]["value"] + ") ADD COLUMN " + inputs["field_name"][
"value"] + " " + fetchType(conf, inputs["field_type"]["value"])]
outputs["Result"]["value"] = zoo._("Column added")
else:
tblInfo = inputs["table"]["value"].split(".")
if len(tblInfo) == 1:
tmp = tblInfo[0]
tblInfo[0] = "public"
tblInfo[1] = tmpl
req += ["SELECT AddGeometryColumn('" + tblInfo[0] + "','" + tblInfo[
1] + "','wkb_geometry',(select srid from spatial_ref_sys where auth_name||':'||auth_srid = '" +
inputs["proj"]["value"] + "'),'" + inputs["geo_type"]["value"] + "',2)"]
outputs["Result"]["value"] = zoo._("Geometry column added.")
if list(inputs.keys()).count("geo_x") > 0 and list(inputs.keys()).count("geo_y") > 0:
req += ["CREATE TRIGGER mm_tables_" + inputs["table"]["value"].replace(".",
"_") + "_update_geom BEFORE UPDATE OR INSERT ON " +
inputs["table"][
"value"] + " FOR EACH ROW EXECUTE PROCEDURE automatically_update_geom_property('" +
inputs["geo_x"]["value"] + "','" + inputs["geo_y"]["value"] + "','" + inputs["proj"][
"value"] + "')"]
outputs["Result"]["value"] += " " + zoo._("Trigger in place")
print(req, file=sys.stderr)
for i in range(0, len(req)):
if not (db.execute(req[i])):
return zoo.SERVICE_FAILED
db.conn.commit()
return zoo.SERVICE_SUCCEEDED
else:
conf["lenv"]["message"] = zoo._("Unable to connect")
return zoo.SERVICE_FAILED
| 54.725806
| 2,322
| 0.536045
| 4,293
| 33,930
| 4.174237
| 0.073142
| 0.012723
| 0.026116
| 0.022321
| 0.822824
| 0.800837
| 0.794531
| 0.775893
| 0.771429
| 0.768862
| 0
| 0.017056
| 0.300147
| 33,930
| 619
| 2,323
| 54.814216
| 0.737598
| 0.098998
| 0
| 0.721137
| 0
| 0.060391
| 0.337327
| 0.045931
| 0
| 0
| 0
| 0.001616
| 0
| 1
| 0.028419
| false
| 0
| 0.026643
| 0
| 0.134991
| 0.053286
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
56d09b11ed25ec6017ed1280c06b4a542229e329
| 9,712
|
py
|
Python
|
Zebrafish spinal locomotor circuit/Version 2/Beat_and_glide_with_sigmas.py
|
Bui-lab/Code
|
6ce5972a4bd0c059ab167522ab1d945f3b0f5707
|
[
"MIT"
] | null | null | null |
Zebrafish spinal locomotor circuit/Version 2/Beat_and_glide_with_sigmas.py
|
Bui-lab/Code
|
6ce5972a4bd0c059ab167522ab1d945f3b0f5707
|
[
"MIT"
] | null | null | null |
Zebrafish spinal locomotor circuit/Version 2/Beat_and_glide_with_sigmas.py
|
Bui-lab/Code
|
6ce5972a4bd0c059ab167522ab1d945f3b0f5707
|
[
"MIT"
] | 2
|
2021-08-25T08:14:52.000Z
|
2021-11-29T12:56:17.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 3 15:47:19 2018
@author: Yann Roussel and Tuan Bui
Editted by: Emine Topcu on Oct 2021
"""
from random import gauss
from Beat_and_glide import Beat_and_glide_base
from Izhikevich_class import Izhikevich_9P, Leaky_Integrator
class Beat_and_glide_with_sigmas(Beat_and_glide_base):
sigmaD = 0
sigmaL = 0
sigmaP = 0
sigmaW = 0
def __init__ (self, stim0 = 2.89, sigma = 0, sigma_LR = 0.1, sigmaD = 0, sigmaL = 0, sigmaP = 0, sigmaW = 0,
E_glu = 0, E_gly = -70, cv = 0.80,
nMN = 15, ndI6 = 15, nV0v = 15, nV2a = 15, nV1 = 15, nMuscle = 15,
R_str = 1.0):
super().__init__(stim0, sigma, sigma_LR, E_glu, E_gly, cv,
nMN, ndI6, nV0v, nV2a, nV1, nMuscle, R_str)
self.sigmaD = sigmaD
self.sigmaL = sigmaL
self.sigmaP = sigmaP
self.sigmaW = sigmaW
def initNeurons(self):
## Declare Neuron Types
self.L_MN = [ Izhikevich_9P(a = 0.5*gauss(1, self.sigmaP),
b = 0.01*gauss(1, self.sigmaP),
c = -55*gauss(1, self.sigmaP),
d = 100*gauss(1, self.sigmaP),
vmax = 10*gauss(1, self.sigmaP),
vr = -65*gauss(1, self.sigmaP),
vt = -58*gauss(1, self.sigmaP),
k = 0.5*gauss(1, self.sigmaP),
Cm = 20*gauss(1, self.sigmaP),
dt = self.getdt(),
x = 5.0+1.6*i*gauss(1, self.sigma),
y = -1) for i in range(self.nMN)]
self.R_MN = [ Izhikevich_9P(a = 0.5*gauss(1, self.sigmaP),
b = 0.01*gauss(1, self.sigmaP),
c = -55*gauss(1, self.sigmaP),
d = 100*gauss(1, self.sigmaP),
vmax = 10*gauss(1, self.sigmaP),
vr = -65*gauss(1, self.sigmaP),
vt = -58*gauss(1, self.sigmaP),
k = 0.5*gauss(1, self.sigmaP),
Cm = 20*gauss(1, self.sigmaP),
dt = self.getdt(),
x = 5.0+1.6*i*gauss(1, self.sigma),
y = 1) for i in range(self.nMN)]
self.L_dI6 = [ Izhikevich_9P(a = 0.1*gauss(1, self.sigmaP),
b = 0.002*gauss(1, self.sigmaP),
c = -55*gauss(1, self.sigmaP),
d = 4*gauss(1, self.sigmaP),
vmax = 10*gauss(1, self.sigmaP),
vr = -60*gauss(1, self.sigmaP),
vt = -54*gauss(1, self.sigmaP),
k = 0.3*gauss(1, self.sigmaP),
Cm = 10*gauss(1, self.sigmaP),
dt = self.getdt(),
x = 5.1+1.6*i*gauss(1, self.sigma),
y = -1) for i in range(self.ndI6)]
self.R_dI6 = [ Izhikevich_9P(a = 0.1*gauss(1, self.sigmaP),
b = 0.002*gauss(1, self.sigmaP),
c = -55*gauss(1, self.sigmaP),
d = 4*gauss(1, self.sigmaP),
vmax = 10*gauss(1, self.sigmaP),
vr = -60*gauss(1, self.sigmaP),
vt = -54*gauss(1, self.sigmaP),
k = 0.3*gauss(1, self.sigmaP),
Cm = 10*gauss(1, self.sigmaP),
dt = self.getdt(),
x = 5.1+1.6*i*gauss(1, self.sigma),
y = 1) for i in range(self.ndI6)]
self.L_V0v = [ Izhikevich_9P(a = 0.01*gauss(1, self.sigmaP),
b = 0.002*gauss(1, self.sigmaP),
c = -55*gauss(1, self.sigmaP),
d = 2*gauss(1, self.sigmaP),
vmax = 8*gauss(1, self.sigmaP),
vr = -60*gauss(1, self.sigmaP),
vt = -54*gauss(1, self.sigmaP),
k = 0.3*gauss(1, self.sigmaP),
Cm = 10*gauss(1, self.sigmaP),
dt = self.getdt(),
x = 5.1+1.6*i*gauss(1, self.sigma),
y = -1) for i in range(self.nV0v)]
self.R_V0v = [ Izhikevich_9P(a = 0.01*gauss(1, self.sigmaP),
b = 0.002*gauss(1, self.sigmaP),
c = -55*gauss(1, self.sigmaP),
d = 2*gauss(1, self.sigmaP),
vmax = 8*gauss(1, self.sigmaP),
vr = -60*gauss(1, self.sigmaP),
vt = -54*gauss(1, self.sigmaP),
k = 0.3*gauss(1, self.sigmaP),
Cm = 10*gauss(1, self.sigmaP),
dt = self.getdt(),
x = 5.1+1.6*i*gauss(1, self.sigma),
y = 1) for i in range(self.nV0v)]
self.L_V2a = [ Izhikevich_9P(a = 0.1*gauss(1, self.sigmaP),
b = 0.002*gauss(1, self.sigmaP),
c = -55*gauss(1, self.sigmaP),
d = 4*gauss(1, self.sigmaP),
vmax = 10*gauss(1, self.sigmaP),
vr = -60*gauss(1, self.sigmaP),
vt = -54*gauss(1, self.sigmaP),
k = 0.3*gauss(1, self.sigmaP),
Cm = 10*gauss(1, self.sigmaP),
dt = self.getdt(),
x = 5.1+1.6*i*gauss(1, self.sigma),
y = -1) for i in range(self.nV2a)]
self.R_V2a = [ Izhikevich_9P(a = 0.1*gauss(1, self.sigmaP),
b = 0.002*gauss(1, self.sigmaP),
c = -55*gauss(1, self.sigmaP),
d = 4*gauss(1, self.sigmaP),
vmax = 10*gauss(1, self.sigmaP),
vr = -60*gauss(1, self.sigmaP),
vt = -54*gauss(1, self.sigmaP),
k = 0.3*gauss(1, self.sigmaP),
Cm = 10*gauss(1, self.sigmaP),
dt = self.getdt(),
x = 5.1+1.6*i*gauss(1, self.sigma),
y = 1) for i in range(self.nV2a)]
self.L_V1 = [ Izhikevich_9P(a = 0.1*gauss(1, self.sigmaP),
b = 0.002*gauss(1, self.sigmaP),
c = -55*gauss(1, self.sigmaP),
d = 4*gauss(1, self.sigmaP),
vmax = 10*gauss(1, self.sigmaP),
vr = -60*gauss(1, self.sigmaP),
vt = -54*gauss(1, self.sigmaP),
k = 0.3*gauss(1, self.sigmaP),
Cm = 10*gauss(1, self.sigmaP),
dt = self.getdt(),
x = 7.1+1.6*i*gauss(1, self.sigma),
y = -1) for i in range(self.nV1)]
self.R_V1 = [ Izhikevich_9P(a = 0.1*gauss(1, self.sigmaP),
b = 0.002*gauss(1, self.sigmaP),
c = -55*gauss(1, self.sigmaP),
d = 4*gauss(1, self.sigmaP),
vmax = 10*gauss(1, self.sigmaP),
vr = -60*gauss(1, self.sigmaP),
vt = -54*gauss(1, self.sigmaP),
k = 0.3*gauss(1, self.sigmaP),
Cm = 10*gauss(1, self.sigmaP),
dt = self.getdt(),
x = 7.1+1.6*i*gauss(1, self.sigma),
y = 1) for i in range(self.nV1)]
self.L_Muscle = [ Leaky_Integrator(1.0, 3.0, self.getdt(), 5.0+1.6*i,-1) for i in range(self.nMuscle)]
self.R_Muscle = [ Leaky_Integrator(1.0, 3.0, self.getdt(), 5.0+1.6*i, 1) for i in range(self.nMuscle)]
def getStimulus(self, t):
if t > 2000: # Let the initial conditions dissipate for the first 200 ms
return self.stim0 * gauss(1, self.sigmaD)
return 0
def rangeNoiseMultiplier(self):
return gauss(1, self.sigmaL)
def weightNoiseMultiplier(self):
return gauss(1, self.sigmaW)
| 56.138728
| 112
| 0.358217
| 1,068
| 9,712
| 3.205993
| 0.11985
| 0.180491
| 0.300818
| 0.420561
| 0.79264
| 0.780958
| 0.780958
| 0.780958
| 0.764603
| 0.764603
| 0
| 0.099761
| 0.525227
| 9,712
| 172
| 113
| 56.465116
| 0.642811
| 0.023682
| 0
| 0.671141
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.033557
| false
| 0
| 0.020134
| 0.013423
| 0.114094
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.