hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
928586b123142b29512b2ac9896000655598f128
| 16,171
|
py
|
Python
|
tests/units/fastsync/commons/test_fastsync_tap_mysql.py
|
epoch8/pipelinewise
|
4de979f9b581dadc92ab3b2ef8f1596ae82fdabe
|
[
"Apache-2.0"
] | null | null | null |
tests/units/fastsync/commons/test_fastsync_tap_mysql.py
|
epoch8/pipelinewise
|
4de979f9b581dadc92ab3b2ef8f1596ae82fdabe
|
[
"Apache-2.0"
] | null | null | null |
tests/units/fastsync/commons/test_fastsync_tap_mysql.py
|
epoch8/pipelinewise
|
4de979f9b581dadc92ab3b2ef8f1596ae82fdabe
|
[
"Apache-2.0"
] | null | null | null |
import pymysql
from unittest import TestCase
from unittest.mock import patch, call, Mock
from pipelinewise.fastsync.commons import tap_mysql
from pipelinewise.fastsync.commons.tap_mysql import FastSyncTapMySql, MARIADB_ENGINE
class FastSyncTapMySqlMock(FastSyncTapMySql):
"""
Mocked FastSyncTapMySql class
"""
def __init__(self, connection_config, tap_type_to_target_type=None):
super().__init__(connection_config, tap_type_to_target_type)
self.executed_queries_unbuffered = []
self.executed_queries = []
# pylint: disable=too-many-arguments
def query(self, query, conn=None, params=None, return_as_cursor=False, n_retry=1):
if query.startswith('INVALID-SQL'):
raise pymysql.err.InternalError
if conn == self.conn_unbuffered:
self.executed_queries.append(query)
else:
self.executed_queries_unbuffered.append(query)
return []
# pylint: disable=invalid-name,no-self-use
class TestFastSyncTapMySql(TestCase):
"""
Unit tests for fastsync tap mysql
"""
def setUp(self) -> None:
"""Initialise test FastSyncTapPostgres object"""
self.connection_config = {
'host': 'foo.com',
'port': 3306,
'user': 'my_user',
'password': 'secret',
'dbname': 'my_db',
}
self.mysql = None
def test_open_connections_with_default_session_sqls(self):
"""Default session parameters should be applied if no custom session SQLs"""
self.mysql = FastSyncTapMySqlMock(connection_config=self.connection_config)
with patch('pymysql.connect') as mysql_connect_mock:
mysql_connect_mock.return_value = []
self.mysql.open_connections()
# Test if session variables applied on both connections
self.assertListEqual(self.mysql.executed_queries, tap_mysql.DEFAULT_SESSION_SQLS)
self.assertListEqual(self.mysql.executed_queries_unbuffered, self.mysql.executed_queries)
def test_get_connection_to_primary(self):
"""
Check that get connection uses the right credentials to connect to primary
"""
creds = {
'host': 'my_primary_host',
'port': 3306,
'user': 'my_primary_user',
'password': 'my_primary_user',
}
conn_params, is_replica = FastSyncTapMySql(
connection_config=creds,
tap_type_to_target_type='testing'
).get_connection_parameters()
self.assertFalse(is_replica)
self.assertEqual(conn_params['host'], creds['host'])
self.assertEqual(conn_params['port'], creds['port'])
self.assertEqual(conn_params['user'], creds['user'])
self.assertEqual(conn_params['password'], creds['password'])
def test_get_connection_to_replica(self):
"""
Check that get connection uses the right credentials to connect to secondary if present
"""
creds = {
'host': 'my_primary_host',
'replica_host': 'my_replica_host',
'port': 3306,
'replica_port': 4406,
'user': 'my_primary_user',
'replica_user': 'my_replica_user',
'password': 'my_primary_user',
'replica_password': 'my_replica_user',
}
conn_params, is_replica = FastSyncTapMySql(
connection_config=creds,
tap_type_to_target_type='testing'
).get_connection_parameters()
self.assertTrue(is_replica)
self.assertEqual(conn_params['host'], creds['replica_host'])
self.assertEqual(conn_params['port'], creds['replica_port'])
self.assertEqual(conn_params['user'], creds['replica_user'])
self.assertEqual(conn_params['password'], creds['replica_password'])
def test_open_connections_with_session_sqls(self):
"""Custom session parameters should be applied if defined"""
session_sqls = [
'SET SESSION max_statement_time=0',
'SET SESSION wait_timeout=28800',
]
self.mysql = FastSyncTapMySqlMock(
connection_config={
**self.connection_config,
**{'session_sqls': session_sqls},
}
)
with patch('pymysql.connect') as mysql_connect_mock:
mysql_connect_mock.return_value = []
self.mysql.open_connections()
# Test if session variables applied on both connections
self.assertListEqual(self.mysql.executed_queries, session_sqls)
self.assertListEqual(self.mysql.executed_queries_unbuffered, self.mysql.executed_queries)
def test_open_connections_with_invalid_session_sqls(self):
"""Invalid SQLs in session_sqls should be ignored"""
session_sqls = [
'SET SESSION max_statement_time=0',
'INVALID-SQL-SHOULD-BE-SILENTLY-IGNORED',
'SET SESSION wait_timeout=28800',
]
self.mysql = FastSyncTapMySqlMock(
connection_config={
**self.connection_config,
**{'session_sqls': session_sqls},
}
)
with patch('pymysql.connect') as mysql_connect_mock:
mysql_connect_mock.return_value = []
self.mysql.open_connections()
# Test if session variables applied on both connections
self.assertListEqual(self.mysql.executed_queries, [
'SET SESSION max_statement_time=0',
'SET SESSION wait_timeout=28800',
])
self.assertListEqual(self.mysql.executed_queries_unbuffered, self.mysql.executed_queries)
def test_fetch_current_log_pos_with_gtid_and_replica_mariadb_engine_succeeds(self):
"""
If using gtid is enabled and engine is replica mariadb, then expect gtid result
"""
self.connection_config['use_gtid'] = True
self.connection_config['engine'] = MARIADB_ENGINE
self.mysql = FastSyncTapMySql(self.connection_config, lambda x: x)
self.mysql.is_replica = True
with patch.object(self.mysql, 'query') as query_method_mock:
expected_gtid = '0-192-444'
query_method_mock.side_effect = [
[{'current_gtids': f'1,,4-192, {expected_gtid},1-400-10'}],
[{'server_id': 192}]
]
with patch('pymysql.connect') as mysql_connect_mock:
con = Mock()
mysql_connect_mock.return_value = con
result = self.mysql.fetch_current_log_pos()
query_method_mock.assert_has_calls([
call('select @@gtid_slave_pos as current_gtids;'),
call('select @@server_id as server_id;', con),
])
self.assertDictEqual(result, {'gtid': expected_gtid})
def test_fetch_current_log_pos_with_gtid_and_replica_mariadb_engine_gtid_not_found(self):
"""
If using gtid is enabled and engine is replica mariadb, the gtid is not found, then expect Exception
"""
self.connection_config['use_gtid'] = True
self.connection_config['engine'] = MARIADB_ENGINE
self.mysql = FastSyncTapMySql(self.connection_config, lambda x: x)
self.mysql.is_replica = True
with patch.object(self.mysql, 'query') as query_method_mock:
query_method_mock.return_value = []
with self.assertRaises(Exception) as context:
self.mysql.fetch_current_log_pos()
self.assertEqual('GTID is not enabled.', str(context.exception))
query_method_mock.assert_called_once_with('select @@gtid_slave_pos as current_gtids;')
def test_fetch_current_log_pos_with_gtid_and_primary_mariadb_engine_succeeds(self):
"""
If using gtid is enabled and engine is primary mariadb which has a list of
gtids with one that has the same server id, then expect gtid result
"""
self.connection_config['use_gtid'] = True
self.connection_config['engine'] = MARIADB_ENGINE
self.mysql = FastSyncTapMySql(self.connection_config, lambda x: x)
with patch.object(self.mysql, 'query') as query_method_mock:
expected_gtid = '0-192-444'
query_method_mock.side_effect = [
[{'current_gtids': f'0,{expected_gtid},43223,0-333-11,'}],
[{'server_id': 192}],
]
result = self.mysql.fetch_current_log_pos()
query_method_mock.assert_has_calls(
[
call('select @@gtid_current_pos as current_gtids;'),
call('select @@server_id as server_id;', None),
]
)
self.assertDictEqual(result, {'gtid': expected_gtid})
def test_fetch_current_log_pos_with_gtid_and_primary_mariadb_engine_no_gtid_found_expect_exception(self):
"""
If using gtid is enabled and engine is primary mariadb which doesn't return gtid, then expect an exception
"""
self.connection_config['use_gtid'] = True
self.connection_config['engine'] = MARIADB_ENGINE
self.mysql = FastSyncTapMySql(self.connection_config, lambda x: x)
with patch.object(self.mysql, 'query') as query_method_mock:
query_method_mock.side_effect = [
[]
]
with self.assertRaises(Exception) as context:
self.mysql.fetch_current_log_pos()
self.assertEqual('GTID is not enabled.', str(context.exception))
query_method_mock.assert_has_calls(
[
call('select @@gtid_current_pos as current_gtids;'),
]
)
def test_fetch_current_log_pos_with_gtid_and_primary_mariadb_engine_no_gtid_with_server_id_found_expect_exception(
self):
"""
If using gtid is enabled and engine is primary mariadb which has a list of
gtids with none having the same server id, then expect an exception
"""
self.connection_config['use_gtid'] = True
self.connection_config['engine'] = MARIADB_ENGINE
self.mysql = FastSyncTapMySql(self.connection_config, lambda x: x)
with patch.object(self.mysql, 'query') as query_method_mock:
query_method_mock.side_effect = [
[{'current_gtids': '0,43223,0-333-11,'}],
[{'server_id': 192}],
]
with self.assertRaises(Exception) as context:
self.mysql.fetch_current_log_pos()
self.assertEqual('No suitable GTID was found.', str(context.exception))
query_method_mock.assert_has_calls(
[
call('select @@gtid_current_pos as current_gtids;'),
call('select @@server_id as server_id;', None),
]
)
def test_fetch_current_log_pos_with_binlog_coordinate_and_replica_server(self):
"""
fetch_current_log_pos without enabled usage of gtid will return binlog coordinates from replica server
"""
self.connection_config['use_gtid'] = False
self.mysql = FastSyncTapMySql(self.connection_config, lambda x: x)
self.mysql.is_replica = True
with patch.object(self.mysql, 'query') as query_method_mock:
query_method_mock.return_value = [
{
'Master_Log_File': 'binlog_xyz',
'Read_Master_Log_Pos': 444,
}
]
result = self.mysql.fetch_current_log_pos()
query_method_mock.assert_called_once_with('SHOW SLAVE STATUS')
self.assertDictEqual(result, {
'log_file': 'binlog_xyz',
'log_pos': 444,
'version': 1,
})
def test_fetch_current_log_pos_with_binlog_coordinate_and_primary_server(self):
"""
fetch_current_log_pos without enabled usage of gtid will return binlog coordinates from primary server
"""
self.connection_config['use_gtid'] = False
self.mysql = FastSyncTapMySql(self.connection_config, lambda x: x)
self.mysql.is_replica = False
with patch.object(self.mysql, 'query') as query_method_mock:
query_method_mock.return_value = [
{
'File': 'binlog_xyz',
'Position': 444,
}
]
result = self.mysql.fetch_current_log_pos()
self.assertDictEqual(result, {
'log_file': 'binlog_xyz',
'log_pos': 444,
'version': 1,
})
query_method_mock.assert_called_once_with('SHOW MASTER STATUS')
def test_fetch_current_log_pos_with_gtid_and_mysql_but_gtid_mode_is_off_fails(self):
"""
If using gtid is enabled and engine is mysql but gtid mode is off, then expect an exception
"""
self.connection_config['use_gtid'] = True
self.mysql = FastSyncTapMySql(self.connection_config, lambda x: x)
self.mysql.is_replica = False
with patch.object(self.mysql, 'query') as query_method_mock:
query_method_mock.side_effect = [
[{'gtid_mode': 'OFF'}]
]
with self.assertRaises(Exception) as context:
self.mysql.fetch_current_log_pos()
self.assertEqual('GTID mode is not enabled.', str(context.exception))
query_method_mock.assert_called_once_with('select @@gtid_mode as gtid_mode;')
def test_fetch_current_log_pos_with_gtid_and_primary_mysql_engine_finds_gtid(self):
"""
If using gtid is enabled and engine is mysql and gtid mode is on, then it should find the expected gtid
"""
self.connection_config['use_gtid'] = True
self.mysql = FastSyncTapMySql(self.connection_config, lambda x: x)
self.mysql.is_replica = False
with patch.object(self.mysql, 'query') as query_method_mock:
query_method_mock.side_effect = [
[{'gtid_mode': 'ON'}],
[{'current_gtids': 'xyz:2:4,abc:1,def:1-55'}],
[{'server_uuid': 'abc'}],
]
with patch('pymysql.connect') as mysql_connect_mock:
result = self.mysql.fetch_current_log_pos()
self.assertDictEqual(result, {
'gtid': 'abc:1'
})
query_method_mock.assert_has_calls([
call('select @@gtid_mode as gtid_mode;'),
call('select @@GLOBAL.gtid_executed as current_gtids;'),
call('select @@server_uuid as server_uuid;', None),
])
mysql_connect_mock.assert_not_called()
def test_fetch_current_log_pos_with_gtid_and_replica_mysql_engine_finds_gtid(self):
"""
If using gtid is enabled and engine is mysql and gtid mode is on, then it should find the expected gtid
"""
self.connection_config['use_gtid'] = True
self.mysql = FastSyncTapMySql(self.connection_config, lambda x: x)
self.mysql.is_replica = True
with patch.object(self.mysql, 'query') as query_method_mock:
query_method_mock.side_effect = [
[{'gtid_mode': 'ON'}],
[{'current_gtids': 'xyz:2:4,abc:1,def:1-55'}],
[{'server_uuid': 'abc'}],
]
with patch('pymysql.connect') as mysql_connect_mock:
con = Mock()
mysql_connect_mock.return_value = con
result = self.mysql.fetch_current_log_pos()
self.assertDictEqual(result, {
'gtid': 'abc:1'
})
query_method_mock.assert_has_calls([
call('select @@gtid_mode as gtid_mode;'),
call('select @@GLOBAL.gtid_executed as current_gtids;'),
call('select @@server_uuid as server_uuid;', con),
])
mysql_connect_mock.assert_called_once()
| 38.049412
| 118
| 0.613567
| 1,833
| 16,171
| 5.10802
| 0.114566
| 0.050945
| 0.064082
| 0.042294
| 0.83061
| 0.809463
| 0.793549
| 0.753818
| 0.725729
| 0.721884
| 0
| 0.01056
| 0.291448
| 16,171
| 424
| 119
| 38.139151
| 0.806598
| 0.108899
| 0
| 0.59364
| 0
| 0
| 0.146839
| 0.013013
| 0
| 0
| 0
| 0
| 0.14841
| 1
| 0.063604
| false
| 0.021201
| 0.017668
| 0
| 0.091873
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
92b266720034b2fbaad6d5b06e9b0a04a3d9bde5
| 58
|
py
|
Python
|
postnl_api/__init__.py
|
eavanvalkenburg/python-postnl-api
|
4d3d9eb43f50b00bfcd25ae30cd10168b4324bf4
|
[
"MIT"
] | 22
|
2018-06-01T14:37:09.000Z
|
2022-02-19T10:22:52.000Z
|
postnl_api/__init__.py
|
eavanvalkenburg/python-postnl-api
|
4d3d9eb43f50b00bfcd25ae30cd10168b4324bf4
|
[
"MIT"
] | 13
|
2018-02-04T19:45:29.000Z
|
2021-12-31T13:18:19.000Z
|
postnl_api/__init__.py
|
eavanvalkenburg/python-postnl-api
|
4d3d9eb43f50b00bfcd25ae30cd10168b4324bf4
|
[
"MIT"
] | 13
|
2018-01-23T14:10:12.000Z
|
2021-06-16T18:39:31.000Z
|
from .postnl_api import PostNL_API, UnauthorizedException
| 29
| 57
| 0.87931
| 7
| 58
| 7
| 0.714286
| 0.367347
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.086207
| 58
| 1
| 58
| 58
| 0.924528
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
2b99f578e50ade47bbe7f6a06a966d7df1df0c99
| 193
|
py
|
Python
|
backtester/statistics/__init__.py
|
unbalancedparentheses/backtester_options
|
46efd30e405f360c560f8eae8b2ee7d26f4532db
|
[
"MIT"
] | 91
|
2020-01-31T10:15:35.000Z
|
2022-03-27T19:15:12.000Z
|
backtester/statistics/__init__.py
|
unbalancedparentheses/backtester_options
|
46efd30e405f360c560f8eae8b2ee7d26f4532db
|
[
"MIT"
] | 38
|
2019-05-12T02:00:46.000Z
|
2019-12-06T14:54:25.000Z
|
backtester/statistics/__init__.py
|
unbalancedparentheses/backtester_options
|
46efd30e405f360c560f8eae8b2ee7d26f4532db
|
[
"MIT"
] | 20
|
2020-06-12T08:21:30.000Z
|
2022-03-28T05:52:59.000Z
|
from .charts import monthly_returns_heatmap, returns_histogram, returns_chart
from .stats import summary
__all__ = ['monthly_returns_heatmap', 'returns_histogram', 'returns_chart', 'summary']
| 38.6
| 86
| 0.818653
| 23
| 193
| 6.347826
| 0.478261
| 0.191781
| 0.287671
| 0.383562
| 0.671233
| 0.671233
| 0.671233
| 0
| 0
| 0
| 0
| 0
| 0.088083
| 193
| 4
| 87
| 48.25
| 0.829545
| 0
| 0
| 0
| 0
| 0
| 0.310881
| 0.119171
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
2bb76de740a46b0084692ec3d6df55d6b8eb8528
| 11,529
|
py
|
Python
|
code/reasoningtool/kg-construction/tests/UpdateNodesInfoDescTests.py
|
andrewsu/RTX
|
dd1de262d0817f7e6d2f64e5bec7d5009a3a2740
|
[
"MIT"
] | 31
|
2018-03-05T20:01:10.000Z
|
2022-02-01T03:31:22.000Z
|
code/reasoningtool/kg-construction/tests/UpdateNodesInfoDescTests.py
|
andrewsu/RTX
|
dd1de262d0817f7e6d2f64e5bec7d5009a3a2740
|
[
"MIT"
] | 1,774
|
2018-03-06T01:55:03.000Z
|
2022-03-31T03:09:04.000Z
|
code/reasoningtool/kg-construction/tests/UpdateNodesInfoDescTests.py
|
andrewsu/RTX
|
dd1de262d0817f7e6d2f64e5bec7d5009a3a2740
|
[
"MIT"
] | 19
|
2018-05-10T00:43:19.000Z
|
2022-03-08T19:26:16.000Z
|
import unittest
import json
import random
import os,sys
parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0,parentdir)
from Neo4jConnection import Neo4jConnection
from QueryEBIOLS import QueryEBIOLS
from QueryOMIM import QueryOMIM
from QueryMyGene import QueryMyGene
from QueryMyChem import QueryMyChem
from QueryReactome import QueryReactome
from QueryKEGG import QueryKEGG
from QueryPubChem import QueryPubChem
from QueryHMDB import QueryHMDB
sys.path.append(os.path.dirname(os.path.abspath(__file__))+"/../../../") # code directory
from RTXConfiguration import RTXConfiguration
def random_int_list(start, stop, length):
start, stop = (int(start), int(stop)) if start <= stop else (int(stop), int(start))
length = int(abs(length)) if length else 0
random_list = []
for i in range(length):
random_list.append(random.randint(start, stop))
return random_list
class UpdateNodesInfoDescTestCase(unittest.TestCase):
rtxConfig = RTXConfiguration()
def test_update_anatomy_nodes_desc(self):
conn = Neo4jConnection(self.rtxConfig.neo4j_bolt, self.rtxConfig.neo4j_username, self.rtxConfig.neo4j_password)
nodes = conn.get_anatomy_nodes()
# generate random number array
random_indexes = random_int_list(0, len(nodes)-1, 100)
for i in random_indexes:
# retrieve data from API
node_id = nodes[i]
desc = QueryEBIOLS.get_anatomy_description(node_id)
# retrieve data from Neo4j
node = conn.get_anatomy_node(node_id)
self.assertIsNotNone(node)
self.assertIsNotNone(node['n']['id'])
self.assertIsNotNone(node['n']['description'])
self.assertEqual(node_id, node['n']['id'])
if node['n']['description'] != "None":
self.assertEqual(desc, node['n']['description'])
conn.close()
def test_update_phenotype_nodes_desc(self):
conn = Neo4jConnection(self.rtxConfig.neo4j_bolt, self.rtxConfig.neo4j_username, self.rtxConfig.neo4j_password)
nodes = conn.get_phenotype_nodes()
# generate random number array
random_indexes = random_int_list(0, len(nodes)-1, 100)
for i in random_indexes:
# retrieve data from API
node_id = nodes[i]
desc = QueryEBIOLS.get_phenotype_description(node_id)
# retrieve data from Neo4j
node = conn.get_phenotype_node(node_id)
self.assertIsNotNone(node)
self.assertIsNotNone(node['n']['id'])
self.assertIsNotNone(node['n']['description'])
self.assertEqual(node_id, node['n']['id'])
if node['n']['description'] != "None":
self.assertEqual(desc, node['n']['description'])
conn.close()
def test_update_microRNA_nodes_desc(self):
conn = Neo4jConnection(self.rtxConfig.neo4j_bolt, self.rtxConfig.neo4j_username, self.rtxConfig.neo4j_password)
nodes = conn.get_microRNA_nodes()
# generate random number array
random_indexes = random_int_list(0, len(nodes)-1, 100)
mg = QueryMyGene()
for i in random_indexes:
# retrieve data from API
node_id = nodes[i]
desc = mg.get_microRNA_desc(node_id)
# retrieve data from Neo4j
node = conn.get_microRNA_node(node_id)
self.assertIsNotNone(node)
self.assertIsNotNone(node['n']['id'])
self.assertIsNotNone(node['n']['description'])
self.assertEqual(node_id, node['n']['id'])
if node['n']['description'] != "None":
self.assertEqual(desc, node['n']['description'])
conn.close()
def test_update_pathway_nodes_desc(self):
conn = Neo4jConnection(self.rtxConfig.neo4j_bolt, self.rtxConfig.neo4j_username, self.rtxConfig.neo4j_password)
nodes = conn.get_pathway_nodes()
# generate random number array
random_indexes = random_int_list(0, len(nodes) - 1, 100)
for i in random_indexes:
# retrieve data from API
node_id = nodes[i]
desc = QueryReactome.get_pathway_desc(node_id)
# retrieve data from Neo4j
node = conn.get_pathway_node(node_id)
self.assertIsNotNone(node)
self.assertIsNotNone(node['n']['id'])
self.assertIsNotNone(node['n']['description'])
self.assertEqual(node_id, node['n']['id'])
if node['n']['description'] != "None":
self.assertEqual(desc, node['n']['description'])
conn.close()
def test_update_protein_nodes_desc(self):
conn = Neo4jConnection(self.rtxConfig.neo4j_bolt, self.rtxConfig.neo4j_username, self.rtxConfig.neo4j_password)
nodes = conn.get_protein_nodes()
# generate random number array
random_indexes = random_int_list(0, len(nodes)-1, 100)
mg = QueryMyGene()
for i in random_indexes:
# retrieve data from API
node_id = nodes[i]
desc = mg.get_protein_desc(node_id)
# retrieve data from Neo4j
node = conn.get_protein_node(node_id)
self.assertIsNotNone(node)
self.assertIsNotNone(node['n']['id'])
self.assertIsNotNone(node['n']['description'])
self.assertEqual(node_id, node['n']['id'])
if node['n']['description'] != "None":
self.assertEqual(desc, node['n']['description'])
conn.close()
def test_update_disease_nodes_desc(self):
conn = Neo4jConnection(self.rtxConfig.neo4j_bolt, self.rtxConfig.neo4j_username, self.rtxConfig.neo4j_password)
nodes = conn.get_disease_nodes()
# generate random number array
random_indexes = random_int_list(0, len(nodes)-1, 100)
qo = QueryOMIM()
for i in random_indexes:
# retrieve data from API
node_id = nodes[i]
if node_id[:4] == "OMIM":
desc = qo.disease_mim_to_description(node_id)
elif node_id[:4] == "DOID":
desc = QueryEBIOLS.get_disease_description(node_id)
# retrieve data from Neo4j
node = conn.get_disease_node(node_id)
self.assertIsNotNone(node)
self.assertIsNotNone(node['n']['id'])
self.assertIsNotNone(node['n']['description'])
self.assertEqual(node_id, node['n']['id'])
if node['n']['description'] != "None":
self.assertEqual(desc, node['n']['description'])
conn.close()
def test_update_chemical_substance_entity(self):
conn = Neo4jConnection(self.rtxConfig.neo4j_bolt, self.rtxConfig.neo4j_username, self.rtxConfig.neo4j_password)
nodes = conn.get_chemical_substance_nodes()
# generate random number array
random_indexes = random_int_list(0, len(nodes)-1, 100)
for i in random_indexes:
# retrieve data from API
node_id = nodes[i]
desc = QueryMyChem.get_chemical_substance_description(node_id)
# retrieve data from Neo4j
node = conn.get_chemical_substance_node(node_id)
self.assertIsNotNone(node)
self.assertIsNotNone(node['n']['rtx_name'])
self.assertIsNotNone(node['n']['description'])
self.assertEqual(node_id, node['n']['rtx_name'])
if node['n']['description'] != "None":
self.assertEqual(desc, node['n']['description'])
conn.close()
def test_update_bio_process_entity(self):
conn = Neo4jConnection(self.rtxConfig.neo4j_bolt, self.rtxConfig.neo4j_username, self.rtxConfig.neo4j_password)
nodes = conn.get_bio_process_nodes()
# generate random number array
random_indexes = random_int_list(0, len(nodes)-1, 100)
for i in random_indexes:
# retrieve data from API
node_id = nodes[i]
desc = QueryEBIOLS.get_bio_process_description(node_id)
# retrieve data from Neo4j
node = conn.get_bio_process_node(node_id)
self.assertIsNotNone(node)
self.assertIsNotNone(node['n']['id'])
self.assertIsNotNone(node['n']['description'])
self.assertEqual(node_id, node['n']['id'])
if node['n']['description'] != "None":
self.assertEqual(desc, node['n']['description'])
conn.close()
def test_update_cellular_component_desc(self):
conn = Neo4jConnection(self.rtxConfig.neo4j_bolt, self.rtxConfig.neo4j_username, self.rtxConfig.neo4j_password)
nodes = conn.get_cellular_component_nodes()
# generate random number array
random_indexes = random_int_list(0, len(nodes)-1, 100)
for i in random_indexes:
# retrieve data from BioLink API
node_id = nodes[i]
desc = QueryEBIOLS.get_cellular_component_description(node_id)
# retrieve data from Neo4j
node = conn.get_node(node_id)
self.assertIsNotNone(node)
self.assertIsNotNone(node['n']['id'])
self.assertIsNotNone(node['n']['description'])
self.assertEqual(node_id, node['n']['id'])
if node['n']['description'] != "None":
self.assertEqual(desc, node['n']['description'])
conn.close()
def test_update_molecular_function_desc(self):
conn = Neo4jConnection(self.rtxConfig.neo4j_bolt, self.rtxConfig.neo4j_username, self.rtxConfig.neo4j_password)
nodes = conn.get_molecular_function_nodes()
# generate random number array
random_indexes = random_int_list(0, len(nodes)-1, 100)
for i in random_indexes:
# retrieve data from BioLink API
node_id = nodes[i]
desc = QueryEBIOLS.get_molecular_function_description(node_id)
# retrieve data from Neo4j
node = conn.get_node(node_id)
self.assertIsNotNone(node)
self.assertIsNotNone(node['n']['id'])
self.assertIsNotNone(node['n']['description'])
self.assertEqual(node_id, node['n']['id'])
if node['n']['description'] != "None":
self.assertEqual(desc, node['n']['description'])
conn.close()
def test_update_metabolite_desc(self):
conn = Neo4jConnection(self.rtxConfig.neo4j_bolt, self.rtxConfig.neo4j_username, self.rtxConfig.neo4j_password)
nodes = conn.get_metabolite_nodes()
# generate random number array
random_indexes = random_int_list(0, len(nodes) - 1, 100)
for i in random_indexes:
# retrieve data from BioLink API
node_id = nodes[i]
pubchem_id = QueryKEGG.map_kegg_compound_to_pub_chem_id(node_id)
hmdb_url = QueryPubChem.get_description_url(pubchem_id)
desc = QueryHMDB.get_compound_desc(hmdb_url)
# retrieve data from Neo4j
node = conn.get_node(node_id)
self.assertIsNotNone(node)
self.assertIsNotNone(node['n']['id'])
self.assertIsNotNone(node['n']['description'])
self.assertEqual(node_id, node['n']['id'])
if node['n']['description'] != "None":
self.assertEqual(desc, node['n']['description'])
conn.close()
if __name__ == '__main__':
unittest.main()
| 36.951923
| 119
| 0.62564
| 1,333
| 11,529
| 5.202551
| 0.083271
| 0.039654
| 0.085652
| 0.076136
| 0.79553
| 0.793655
| 0.793655
| 0.785004
| 0.785004
| 0.785004
| 0
| 0.01362
| 0.261254
| 11,529
| 311
| 120
| 37.07074
| 0.800634
| 0.076763
| 0
| 0.616915
| 0
| 0
| 0.051287
| 0
| 0
| 0
| 0
| 0
| 0.273632
| 1
| 0.059701
| false
| 0.054726
| 0.069652
| 0
| 0.144279
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
2bc5e4715668dbd81dc2dbe3b9c79324b54c3e88
| 1,468
|
py
|
Python
|
api_tests/logs/views/test_log_contributors.py
|
sf2ne/Playground
|
95b2d222d7ac43baca0249acbfc34e043d6a95b3
|
[
"Apache-2.0"
] | null | null | null |
api_tests/logs/views/test_log_contributors.py
|
sf2ne/Playground
|
95b2d222d7ac43baca0249acbfc34e043d6a95b3
|
[
"Apache-2.0"
] | 13
|
2020-03-24T15:29:41.000Z
|
2022-03-11T23:15:28.000Z
|
api_tests/logs/views/test_log_contributors.py
|
sf2ne/Playground
|
95b2d222d7ac43baca0249acbfc34e043d6a95b3
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import httplib as http
from nose.tools import * # noqa
from test_log_nodes_list import LogsTestCase
class TestLogContributors(LogsTestCase):
def test_log_detail_private_logged_in_contributor_can_access_logs(self):
res = self.app.get(self.private_log_contribs_url, auth=self.user.auth)
assert_equal(res.status_code, 200)
json_data = res.json['data']
assert_equal(json_data[0]['id'], self.user._id)
def test_log_detail_private_not_logged_in_cannot_access_logs(self):
res = self.app.get(self.private_log_contribs_url, expect_errors=True)
assert_equal(res.status_code, 401)
def test_log_detail_private_non_contributor_cannot_access_logs(self):
res = self.app.get(self.private_log_contribs_url, auth=self.user_two.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_log_detail_public_not_logged_in_can_access_logs(self):
res = self.app.get(self.public_log_contribs_url, expect_errors=True)
assert_equal(res.status_code, 200)
json_data = res.json['data']
assert_equal(json_data[0]['id'], self.user._id)
def test_log_detail_public_non_contributor_can_access_logs(self):
res = self.app.get(self.public_log_contribs_url, auth=self.user_two.auth, expect_errors=True)
assert_equal(res.status_code, 200)
json_data = res.json['data']
assert_equal(json_data[0]['id'], self.user._id)
| 41.942857
| 102
| 0.737057
| 223
| 1,468
| 4.452915
| 0.242152
| 0.072508
| 0.050352
| 0.080564
| 0.81571
| 0.734139
| 0.734139
| 0.734139
| 0.734139
| 0.734139
| 0
| 0.01541
| 0.160082
| 1,468
| 34
| 103
| 43.176471
| 0.789943
| 0.017711
| 0
| 0.36
| 0
| 0
| 0.012509
| 0
| 0
| 0
| 0
| 0
| 0.32
| 1
| 0.2
| false
| 0
| 0.12
| 0
| 0.36
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a6273360ab0f4043d4e8c469a5b8e90c2f8af159
| 61
|
py
|
Python
|
test/run/t286.py
|
timmartin/skulpt
|
2e3a3fbbaccc12baa29094a717ceec491a8a6750
|
[
"MIT"
] | 2,671
|
2015-01-03T08:23:25.000Z
|
2022-03-31T06:15:48.000Z
|
test/run/t286.py
|
csev/skulpt
|
9aa25b7dbf29f23ee8d3140d01a6f4353d12e66f
|
[
"MIT"
] | 972
|
2015-01-05T08:11:00.000Z
|
2022-03-29T13:47:15.000Z
|
test/run/t286.py
|
csev/skulpt
|
9aa25b7dbf29f23ee8d3140d01a6f4353d12e66f
|
[
"MIT"
] | 845
|
2015-01-03T19:53:36.000Z
|
2022-03-29T18:34:22.000Z
|
print "Yes" if True else "No"
print "Yes" if False else "No"
| 20.333333
| 30
| 0.672131
| 12
| 61
| 3.416667
| 0.583333
| 0.390244
| 0.487805
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.196721
| 61
| 2
| 31
| 30.5
| 0.836735
| 0
| 0
| 0
| 0
| 0
| 0.163934
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 1
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
a63b95d67c397c392424d3857b19286459314527
| 42
|
py
|
Python
|
pysherasync/__init__.py
|
araa47/pysherasync
|
a3aafff7bb81a9887fe48a45810d2fdc10a25331
|
[
"MIT"
] | 8
|
2019-02-16T16:56:51.000Z
|
2021-07-28T17:19:11.000Z
|
pysherasync/__init__.py
|
araa47/pysherasync
|
a3aafff7bb81a9887fe48a45810d2fdc10a25331
|
[
"MIT"
] | 2
|
2020-08-11T01:50:37.000Z
|
2020-09-14T01:44:46.000Z
|
pysherasync/__init__.py
|
araa47/pysherasync
|
a3aafff7bb81a9887fe48a45810d2fdc10a25331
|
[
"MIT"
] | 1
|
2019-05-20T06:16:15.000Z
|
2019-05-20T06:16:15.000Z
|
from .pysherasync import PusherAsyncClient
| 42
| 42
| 0.904762
| 4
| 42
| 9.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.071429
| 42
| 1
| 42
| 42
| 0.974359
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a6ece1869263e493cd077191daf1be7fa5daab86
| 12,075
|
py
|
Python
|
mamba/blockchain/update_folder/commands.py
|
ninhpham0902/akc-mamba
|
3454b8365d69a4c5f543f71760a495296fa0a5e8
|
[
"MIT"
] | 7
|
2020-04-22T02:35:24.000Z
|
2022-01-16T17:14:01.000Z
|
mamba/blockchain/update_folder/commands.py
|
ninhpham0902/akc-mamba
|
3454b8365d69a4c5f543f71760a495296fa0a5e8
|
[
"MIT"
] | 9
|
2020-04-07T09:11:08.000Z
|
2020-12-29T02:35:12.000Z
|
mamba/blockchain/update_folder/commands.py
|
ninhpham0902/akc-mamba
|
3454b8365d69a4c5f543f71760a495296fa0a5e8
|
[
"MIT"
] | 7
|
2020-07-30T02:27:14.000Z
|
2022-02-13T09:58:55.000Z
|
import click
import os
from settings import settings
from os import path
from shutil import copyfile
from utils import hiss, util
def update_folder():
hiss.rattle('Update folder crt in EFS')
# Find efs pod
pods = settings.k8s.find_pod(namespace="default", keyword="test-efs")
if not pods:
return hiss.hiss('cannot find tiller pod')
all_command = ''
prepare_cmd = 'rm -rf %s/akc-ca-data/crypto-config-v1;' % settings.EFS_ROOT
prepare_cmd += 'cd %s/akc-ca-data/;'% settings.EFS_ROOT
all_command += prepare_cmd
if settings.ORDERER_ORGS != '':
# Build orderer command
orderers = settings.ORDERER_ORGS.split(' ')
orderer_cmd = ''
for orderer in orderers:
# Get domain
domain = util.get_domain(orderer)
orderer_cmd += (''
'mkdir -p crypto-config-v1/ordererOrganizations/'+domain+'/ca;'
'mkdir -p crypto-config-v1/ordererOrganizations/'+domain+'/msp/admincerts;'
'mkdir -p crypto-config-v1/ordererOrganizations/'+domain+'/msp/cacerts;'
'mkdir -p crypto-config-v1/ordererOrganizations/'+domain+'/msp/tlscacerts;'
'mkdir -p crypto-config-v1/ordererOrganizations/'+domain+'/tlsca;'
'mkdir -p crypto-config-v1/ordererOrganizations/'+domain+'/users/admin/msp/admincerts;'
'mkdir -p crypto-config-v1/ordererOrganizations/'+domain+'/users/admin/msp/cacerts;'
'mkdir -p crypto-config-v1/ordererOrganizations/'+domain+'/users/admin/msp/keystore;'
'mkdir -p crypto-config-v1/ordererOrganizations/'+domain+'/users/admin/msp/signcerts;'
'mkdir -p crypto-config-v1/ordererOrganizations/'+domain+'/users/admin/msp/tlscacerts;'
'mkdir -p crypto-config-v1/ordererOrganizations/'+domain+'/users/admin/tls;'
'')
for index in range(int(settings.NUM_ORDERERS)):
orderer_cmd += (''
'mkdir -p crypto-config-v1/ordererOrganizations/'+domain+'/orderers/orderer'+str(index)+'-'+orderer+'.'+domain+'/msp/admincerts;'
'mkdir -p crypto-config-v1/ordererOrganizations/'+domain+'/orderers/orderer'+str(index)+'-'+orderer+'.'+domain+'/msp/cacerts;'
'mkdir -p crypto-config-v1/ordererOrganizations/'+domain+'/orderers/orderer'+str(index)+'-'+orderer+'.'+domain+'/msp/keystore;'
'mkdir -p crypto-config-v1/ordererOrganizations/'+domain+'/orderers/orderer'+str(index)+'-'+orderer+'.'+domain+'/msp/signcerts;'
'mkdir -p crypto-config-v1/ordererOrganizations/'+domain+'/orderers/orderer'+str(index)+'-'+orderer+'.'+domain+'/msp/tlscacerts;'
'mkdir -p crypto-config-v1/ordererOrganizations/'+domain+'/orderers/orderer'+str(index)+'-'+orderer+'.'+domain+'/tls;'
'cp ica-'+orderer+'-ca-chain.pem crypto-config-v1/ordererOrganizations/'+domain+'/orderers/orderer'+str(index)+'-'+orderer+'.'+domain+'/msp/cacerts/ca.'+domain+'-cert.pem;'
'cp ica-'+orderer+'-ca-chain.pem crypto-config-v1/ordererOrganizations/'+domain+'/orderers/orderer'+str(index)+'-'+orderer+'.'+domain+'/msp/tlscacerts/tlsca.'+domain+'-cert.pem;'
'cp ica-'+orderer+'-ca-chain.pem crypto-config-v1/ordererOrganizations/'+domain+'/orderers/orderer'+str(index)+'-'+orderer+'.'+domain+'/tls/tlsca.'+domain+'-cert.pem;'
'cp crypto-config/'+orderer+'.'+domain+'/users/admin/msp/signcerts/cert.pem crypto-config-v1/ordererOrganizations/'+domain+'/orderers/orderer'+str(index)+'-'+orderer+'.'+domain+'/msp/admincerts/cert.pem;'
'cp crypto-config/'+orderer+'.'+domain+'/orderers/orderer'+str(index)+'-'+orderer+'.'+domain+'/msp/signcerts/cert.pem crypto-config-v1/ordererOrganizations/'+domain+'/orderers/orderer'+str(index)+'-'+orderer+'.'+domain+'/msp/signcerts/;'
'cp crypto-config/'+orderer+'.'+domain+'/orderers/orderer'+str(index)+'-'+orderer+'.'+domain+'/msp/keystore/*_sk crypto-config-v1/ordererOrganizations/'+domain+'/orderers/orderer'+str(index)+'-'+orderer+'.'+domain+'/msp/keystore/key.pem;'
'cp crypto-config/'+orderer+'.'+domain+'/orderers/orderer'+str(index)+'-'+orderer+'.'+domain+'/tls/server.crt crypto-config-v1/ordererOrganizations/'+domain+'/orderers/orderer'+str(index)+'-'+orderer+'.'+domain+'/tls/;'
'cp crypto-config/'+orderer+'.'+domain+'/orderers/orderer'+str(index)+'-'+orderer+'.'+domain+'/tls/server.key crypto-config-v1/ordererOrganizations/'+domain+'/orderers/orderer'+str(index)+'-'+orderer+'.'+domain+'/tls/server.key;'
'')
orderer_cmd += (''
'cp ica-'+orderer+'-ca-chain.pem crypto-config-v1/ordererOrganizations/'+domain+'/ca/ca.'+domain+'-cert.pem;'
'cp ica-'+orderer+'-ca-chain.pem crypto-config-v1/ordererOrganizations/'+domain+'/msp/cacerts/ca.'+domain+'-cert.pem;'
'cp ica-'+orderer+'-ca-chain.pem crypto-config-v1/ordererOrganizations/'+domain+'/users/admin/msp/cacerts/ca.'+domain+'-cert.pem;'
'cp ica-'+orderer+'-ca-chain.pem crypto-config-v1/ordererOrganizations/'+domain+'/tlsca/tlsca.'+domain+'-cert.pem;'
'cp ica-'+orderer+'-ca-chain.pem crypto-config-v1/ordererOrganizations/'+domain+'/msp/tlscacerts/tlsca.'+domain+'-cert.pem;'
'cp ica-'+orderer+'-ca-chain.pem crypto-config-v1/ordererOrganizations/'+domain+'/users/admin/msp/tlscacerts/tlsca.'+domain+'-cert.pem;'
'cp ica-'+orderer+'-ca-chain.pem crypto-config-v1/ordererOrganizations/'+domain+'/users/admin/tls/tlsca.'+domain+'-cert.pem;'
'cp crypto-config/'+orderer+'.'+domain+'/users/admin/msp/signcerts/cert.pem crypto-config-v1/ordererOrganizations/'+domain+'/msp/admincerts/cert.pem;'
'cp crypto-config/'+orderer+'.'+domain+'/users/admin/msp/signcerts/cert.pem crypto-config-v1/ordererOrganizations/'+domain+'/users/admin/msp/admincerts/cert.pem;'
'cp crypto-config/'+orderer+'.'+domain+'/users/admin/msp/keystore/*_sk crypto-config-v1/ordererOrganizations/'+domain+'/users/admin/msp/keystore/key.pem;'
'cp crypto-config/'+orderer+'.'+domain+'/users/admin/msp/signcerts/cert.pem crypto-config-v1/ordererOrganizations/'+domain+'/users/admin/msp/signcerts/cert.pem;'
'echo "succeed";'
'')
all_command += orderer_cmd
# Build peer command
peers = settings.PEER_ORGS.split(' ')
peer_cmd = ''
for peer in peers:
# Get domain
domain = util.get_domain(peer)
peer_cmd += (''
'mkdir -p crypto-config-v1/peerOrganizations/'+domain+'/ca;'
'mkdir -p crypto-config-v1/peerOrganizations/'+domain+'/msp/admincerts;'
'mkdir -p crypto-config-v1/peerOrganizations/'+domain+'/msp/cacerts;'
'mkdir -p crypto-config-v1/peerOrganizations/'+domain+'/msp/tlscacerts;'
'mkdir -p crypto-config-v1/peerOrganizations/'+domain+'/tlsca;'
'mkdir -p crypto-config-v1/peerOrganizations/'+domain+'/users/admin/msp/admincerts;'
'mkdir -p crypto-config-v1/peerOrganizations/'+domain+'/users/admin/msp/cacerts;'
'mkdir -p crypto-config-v1/peerOrganizations/'+domain+'/users/admin/msp/keystore;'
'mkdir -p crypto-config-v1/peerOrganizations/'+domain+'/users/admin/msp/signcerts;'
'mkdir -p crypto-config-v1/peerOrganizations/'+domain+'/users/admin/msp/tlscacerts;'
'mkdir -p crypto-config-v1/peerOrganizations/'+domain+'/users/admin/tls;'
'')
for index in range(int(settings.NUM_PEERS)):
peer_cmd += (''
'mkdir -p crypto-config-v1/peerOrganizations/'+domain+'/peers/peer'+str(index)+'.'+domain+'/msp/admincerts;'
'mkdir -p crypto-config-v1/peerOrganizations/'+domain+'/peers/peer'+str(index)+'.'+domain+'/msp/cacerts;'
'mkdir -p crypto-config-v1/peerOrganizations/'+domain+'/peers/peer'+str(index)+'.'+domain+'/msp/keystore;'
'mkdir -p crypto-config-v1/peerOrganizations/'+domain+'/peers/peer'+str(index)+'.'+domain+'/msp/signcerts;'
'mkdir -p crypto-config-v1/peerOrganizations/'+domain+'/peers/peer'+str(index)+'.'+domain+'/msp/tlscacerts;'
'mkdir -p crypto-config-v1/peerOrganizations/'+domain+'/peers/peer'+str(index)+'.'+domain+'/tls;'
'cp ica-'+peer+'-ca-chain.pem crypto-config-v1/peerOrganizations/'+domain+'/peers/peer'+str(index)+'.'+domain+'/msp/cacerts/ca.'+domain+'-cert.pem;'
'cp ica-'+peer+'-ca-chain.pem crypto-config-v1/peerOrganizations/'+domain+'/peers/peer'+str(index)+'.'+domain+'/msp/tlscacerts/tlsca.'+domain+'-cert.pem;'
'cp ica-'+peer+'-ca-chain.pem crypto-config-v1/peerOrganizations/'+domain+'/peers/peer'+str(index)+'.'+domain+'/tls/tlsca.'+domain+'-cert.pem;'
'cp crypto-config/'+peer+'.'+domain+'/users/admin/msp/signcerts/cert.pem crypto-config-v1/peerOrganizations/'+domain+'/peers/peer'+str(index)+'.'+domain+'/msp/admincerts/cert.pem;'
'cp crypto-config/'+peer+'.'+domain+'/peers/peer'+str(index)+'-'+peer+'.'+domain+'/msp/signcerts/cert.pem crypto-config-v1/peerOrganizations/'+domain+'/peers/peer'+str(index)+'.'+domain+'/msp/signcerts/;'
'cp crypto-config/'+peer+'.'+domain+'/peers/peer'+str(index)+'-'+peer+'.'+domain+'/msp/keystore/*_sk crypto-config-v1/peerOrganizations/'+domain+'/peers/peer'+str(index)+'.'+domain+'/msp/keystore/key.pem;'
'cp crypto-config/'+peer+'.'+domain+'/peers/peer'+str(index)+'-'+peer+'.'+domain+'/tls/server.crt crypto-config-v1/peerOrganizations/'+domain+'/peers/peer'+str(index)+'.'+domain+'/tls/;'
'cp crypto-config/'+peer+'.'+domain+'/peers/peer'+str(index)+'-'+peer+'.'+domain+'/tls/server.key crypto-config-v1/peerOrganizations/'+domain+'/peers/peer'+str(index)+'.'+domain+'/tls/server.key;'
'')
peer_cmd += (''
'cp ica-'+peer+'-ca-chain.pem crypto-config-v1/peerOrganizations/'+domain+'/ca/ca.'+domain+'-cert.pem;'
'cp ica-'+peer+'-ca-chain.pem crypto-config-v1/peerOrganizations/'+domain+'/msp/cacerts/ca.'+domain+'-cert.pem;'
'cp ica-'+peer+'-ca-chain.pem crypto-config-v1/peerOrganizations/'+domain+'/users/admin/msp/cacerts/ca.'+domain+'-cert.pem;'
'cp ica-'+peer+'-ca-chain.pem crypto-config-v1/peerOrganizations/'+domain+'/tlsca/tlsca.'+domain+'-cert.pem;'
'cp ica-'+peer+'-ca-chain.pem crypto-config-v1/peerOrganizations/'+domain+'/msp/tlscacerts/tlsca.'+domain+'-cert.pem;'
'cp ica-'+peer+'-ca-chain.pem crypto-config-v1/peerOrganizations/'+domain+'/users/admin/msp/tlscacerts/tlsca.'+domain+'-cert.pem;'
'cp ica-'+peer+'-ca-chain.pem crypto-config-v1/peerOrganizations/'+domain+'/users/admin/tls/tlsca.'+domain+'-cert.pem;'
'cp crypto-config/'+peer+'.'+domain+'/users/admin/msp/signcerts/cert.pem crypto-config-v1/peerOrganizations/'+domain+'/msp/admincerts/cert.pem;'
'cp crypto-config/'+peer+'.'+domain+'/users/admin/msp/signcerts/cert.pem crypto-config-v1/peerOrganizations/'+domain+'/users/admin/msp/admincerts/cert.pem;'
'cp crypto-config/'+peer+'.'+domain+'/users/admin/msp/keystore/* crypto-config-v1/peerOrganizations/'+domain+'/users/admin/msp/keystore/key.pem;'
'cp crypto-config/'+peer+'.'+domain+'/users/admin/msp/signcerts/cert.pem crypto-config-v1/peerOrganizations/'+domain+'/users/admin/msp/signcerts/cert.pem;'
'')
all_command += peer_cmd
# Exec command
exec_command = [
'/bin/bash',
'-c',
'%s' % (all_command)]
result_get_folder = settings.k8s.exec_pod(
podName=pods[0], namespace="default", command=exec_command)
hiss.sub_echo(result_get_folder.data)
return True
@click.command('updatefolder', short_help="Update folder crypto-config-v1 in EFS")
def updatefolder():
update_folder()
| 83.275862
| 258
| 0.640745
| 1,421
| 12,075
| 5.415201
| 0.068262
| 0.14347
| 0.134633
| 0.159064
| 0.884211
| 0.883171
| 0.875893
| 0.869786
| 0.8564
| 0.823002
| 0
| 0.007568
| 0.157433
| 12,075
| 144
| 259
| 83.854167
| 0.748771
| 0.007288
| 0
| 0.096774
| 0
| 0
| 0.545242
| 0.327796
| 0
| 0
| 0
| 0
| 0
| 1
| 0.016129
| false
| 0
| 0.048387
| 0
| 0.080645
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4712d4a398852bf9464bfb726a4829d433094475
| 210
|
py
|
Python
|
simple_steganography/decorators/__init__.py
|
karafra/steg-utility
|
04eef24d7c6baff636522764fc7c8e39f0d2f743
|
[
"Apache-2.0"
] | 1
|
2022-01-26T01:07:25.000Z
|
2022-01-26T01:07:25.000Z
|
simple_steganography/decorators/__init__.py
|
karafra/steg-utility
|
04eef24d7c6baff636522764fc7c8e39f0d2f743
|
[
"Apache-2.0"
] | 8
|
2022-01-24T14:11:27.000Z
|
2022-03-28T08:55:19.000Z
|
simple_steganography/decorators/__init__.py
|
karafra/steg-utility
|
04eef24d7c6baff636522764fc7c8e39f0d2f743
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
"""Module storing all decorators
------------
Version: 1.0
------------
----------
Since: 1.0
----------
---------------
Author: Karafra
---------------
"""
from .notNone import NotNone
| 14
| 32
| 0.452381
| 20
| 210
| 4.75
| 0.85
| 0.042105
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.026882
| 0.114286
| 210
| 15
| 33
| 14
| 0.483871
| 0.819048
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
5b2ff826c06ba479c23e27511e6e034ac403a7ce
| 80
|
py
|
Python
|
src/threads/__init__.py
|
remvo/zstt-fira
|
79d237369fe5d516ac3a6086ea050ece763beec6
|
[
"MIT"
] | null | null | null |
src/threads/__init__.py
|
remvo/zstt-fira
|
79d237369fe5d516ac3a6086ea050ece763beec6
|
[
"MIT"
] | null | null | null |
src/threads/__init__.py
|
remvo/zstt-fira
|
79d237369fe5d516ac3a6086ea050ece763beec6
|
[
"MIT"
] | null | null | null |
from .camera_thread import CameraThread
from .serial_thread import SerialThread
| 26.666667
| 39
| 0.875
| 10
| 80
| 6.8
| 0.7
| 0.352941
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 80
| 2
| 40
| 40
| 0.944444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
5b536fbbf0d431e562974941df31825e3b12225d
| 72
|
py
|
Python
|
audio_converter/blueprints/multilingual/__init__.py
|
mac641/audio-converter
|
abd9584a7a6b76285654f5647455e37776045d0c
|
[
"MIT"
] | null | null | null |
audio_converter/blueprints/multilingual/__init__.py
|
mac641/audio-converter
|
abd9584a7a6b76285654f5647455e37776045d0c
|
[
"MIT"
] | null | null | null |
audio_converter/blueprints/multilingual/__init__.py
|
mac641/audio-converter
|
abd9584a7a6b76285654f5647455e37776045d0c
|
[
"MIT"
] | null | null | null |
from audio_converter.blueprints.multilingual.routes import multilingual
| 36
| 71
| 0.902778
| 8
| 72
| 8
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.055556
| 72
| 1
| 72
| 72
| 0.941176
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 1
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
|
0
| 6
|
5b609e82a1d097ebe5868042cfea1a4271d47e3b
| 15,425
|
py
|
Python
|
triangles.py
|
Nicolas-Reyland/Marching-Cubes
|
98743e3acf5e15d3f9bacb251f7e3c53e1a25841
|
[
"MIT"
] | 2
|
2021-01-15T14:43:50.000Z
|
2021-01-15T15:21:44.000Z
|
triangles.py
|
Nicolas-Reyland/Marching-Cubes
|
98743e3acf5e15d3f9bacb251f7e3c53e1a25841
|
[
"MIT"
] | null | null | null |
triangles.py
|
Nicolas-Reyland/Marching-Cubes
|
98743e3acf5e15d3f9bacb251f7e3c53e1a25841
|
[
"MIT"
] | null | null | null |
from ascii_enc_dec import dec as decode
def get_triangles(inside_nodes_index):
""" Returns the associated triangle from inside-nodes index' """
# empty list of '0's
binary = ['0'] * 8
# fill list with inside index'
for index in inside_nodes_index:
binary[index] = '1'
# string from reversed list (the index' are from the 'end' of the list, should be the beginning)
binary = ''.join(binary[::-1])
# decode the binary to base 10
trianlge_table_index = decode(binary, 2)
# get the right triangle configuration
return triangle_table[trianlge_table_index]
triangle_table = [
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[0, 8, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[0, 1, 9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[1, 8, 3, 9, 8, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[1, 2, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[0, 8, 3, 1, 2, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[9, 2, 10, 0, 2, 9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[2, 8, 3, 2, 10, 8, 10, 9, 8, -1, -1, -1, -1, -1, -1, -1],
[3, 11, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[0, 11, 2, 8, 11, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[1, 9, 0, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[1, 11, 2, 1, 9, 11, 9, 8, 11, -1, -1, -1, -1, -1, -1, -1],
[3, 10, 1, 11, 10, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[0, 10, 1, 0, 8, 10, 8, 11, 10, -1, -1, -1, -1, -1, -1, -1],
[3, 9, 0, 3, 11, 9, 11, 10, 9, -1, -1, -1, -1, -1, -1, -1],
[9, 8, 10, 10, 8, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[4, 7, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[4, 3, 0, 7, 3, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[0, 1, 9, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[4, 1, 9, 4, 7, 1, 7, 3, 1, -1, -1, -1, -1, -1, -1, -1],
[1, 2, 10, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[3, 4, 7, 3, 0, 4, 1, 2, 10, -1, -1, -1, -1, -1, -1, -1],
[9, 2, 10, 9, 0, 2, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1],
[2, 10, 9, 2, 9, 7, 2, 7, 3, 7, 9, 4, -1, -1, -1, -1],
[8, 4, 7, 3, 11, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[11, 4, 7, 11, 2, 4, 2, 0, 4, -1, -1, -1, -1, -1, -1, -1],
[9, 0, 1, 8, 4, 7, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1],
[4, 7, 11, 9, 4, 11, 9, 11, 2, 9, 2, 1, -1, -1, -1, -1],
[3, 10, 1, 3, 11, 10, 7, 8, 4, -1, -1, -1, -1, -1, -1, -1],
[1, 11, 10, 1, 4, 11, 1, 0, 4, 7, 11, 4, -1, -1, -1, -1],
[4, 7, 8, 9, 0, 11, 9, 11, 10, 11, 0, 3, -1, -1, -1, -1],
[4, 7, 11, 4, 11, 9, 9, 11, 10, -1, -1, -1, -1, -1, -1, -1],
[9, 5, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[9, 5, 4, 0, 8, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[0, 5, 4, 1, 5, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[8, 5, 4, 8, 3, 5, 3, 1, 5, -1, -1, -1, -1, -1, -1, -1],
[1, 2, 10, 9, 5, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[3, 0, 8, 1, 2, 10, 4, 9, 5, -1, -1, -1, -1, -1, -1, -1],
[5, 2, 10, 5, 4, 2, 4, 0, 2, -1, -1, -1, -1, -1, -1, -1],
[2, 10, 5, 3, 2, 5, 3, 5, 4, 3, 4, 8, -1, -1, -1, -1],
[9, 5, 4, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[0, 11, 2, 0, 8, 11, 4, 9, 5, -1, -1, -1, -1, -1, -1, -1],
[0, 5, 4, 0, 1, 5, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1],
[2, 1, 5, 2, 5, 8, 2, 8, 11, 4, 8, 5, -1, -1, -1, -1],
[10, 3, 11, 10, 1, 3, 9, 5, 4, -1, -1, -1, -1, -1, -1, -1],
[4, 9, 5, 0, 8, 1, 8, 10, 1, 8, 11, 10, -1, -1, -1, -1],
[5, 4, 0, 5, 0, 11, 5, 11, 10, 11, 0, 3, -1, -1, -1, -1],
[5, 4, 8, 5, 8, 10, 10, 8, 11, -1, -1, -1, -1, -1, -1, -1],
[9, 7, 8, 5, 7, 9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[9, 3, 0, 9, 5, 3, 5, 7, 3, -1, -1, -1, -1, -1, -1, -1],
[0, 7, 8, 0, 1, 7, 1, 5, 7, -1, -1, -1, -1, -1, -1, -1],
[1, 5, 3, 3, 5, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[9, 7, 8, 9, 5, 7, 10, 1, 2, -1, -1, -1, -1, -1, -1, -1],
[10, 1, 2, 9, 5, 0, 5, 3, 0, 5, 7, 3, -1, -1, -1, -1],
[8, 0, 2, 8, 2, 5, 8, 5, 7, 10, 5, 2, -1, -1, -1, -1],
[2, 10, 5, 2, 5, 3, 3, 5, 7, -1, -1, -1, -1, -1, -1, -1],
[7, 9, 5, 7, 8, 9, 3, 11, 2, -1, -1, -1, -1, -1, -1, -1],
[9, 5, 7, 9, 7, 2, 9, 2, 0, 2, 7, 11, -1, -1, -1, -1],
[2, 3, 11, 0, 1, 8, 1, 7, 8, 1, 5, 7, -1, -1, -1, -1],
[11, 2, 1, 11, 1, 7, 7, 1, 5, -1, -1, -1, -1, -1, -1, -1],
[9, 5, 8, 8, 5, 7, 10, 1, 3, 10, 3, 11, -1, -1, -1, -1],
[5, 7, 0, 5, 0, 9, 7, 11, 0, 1, 0, 10, 11, 10, 0, -1],
[11, 10, 0, 11, 0, 3, 10, 5, 0, 8, 0, 7, 5, 7, 0, -1],
[11, 10, 5, 7, 11, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[10, 6, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[0, 8, 3, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[9, 0, 1, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[1, 8, 3, 1, 9, 8, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1],
[1, 6, 5, 2, 6, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[1, 6, 5, 1, 2, 6, 3, 0, 8, -1, -1, -1, -1, -1, -1, -1],
[9, 6, 5, 9, 0, 6, 0, 2, 6, -1, -1, -1, -1, -1, -1, -1],
[5, 9, 8, 5, 8, 2, 5, 2, 6, 3, 2, 8, -1, -1, -1, -1],
[2, 3, 11, 10, 6, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[11, 0, 8, 11, 2, 0, 10, 6, 5, -1, -1, -1, -1, -1, -1, -1],
[0, 1, 9, 2, 3, 11, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1],
[5, 10, 6, 1, 9, 2, 9, 11, 2, 9, 8, 11, -1, -1, -1, -1],
[6, 3, 11, 6, 5, 3, 5, 1, 3, -1, -1, -1, -1, -1, -1, -1],
[0, 8, 11, 0, 11, 5, 0, 5, 1, 5, 11, 6, -1, -1, -1, -1],
[3, 11, 6, 0, 3, 6, 0, 6, 5, 0, 5, 9, -1, -1, -1, -1],
[6, 5, 9, 6, 9, 11, 11, 9, 8, -1, -1, -1, -1, -1, -1, -1],
[5, 10, 6, 4, 7, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[4, 3, 0, 4, 7, 3, 6, 5, 10, -1, -1, -1, -1, -1, -1, -1],
[1, 9, 0, 5, 10, 6, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1],
[10, 6, 5, 1, 9, 7, 1, 7, 3, 7, 9, 4, -1, -1, -1, -1],
[6, 1, 2, 6, 5, 1, 4, 7, 8, -1, -1, -1, -1, -1, -1, -1],
[1, 2, 5, 5, 2, 6, 3, 0, 4, 3, 4, 7, -1, -1, -1, -1],
[8, 4, 7, 9, 0, 5, 0, 6, 5, 0, 2, 6, -1, -1, -1, -1],
[7, 3, 9, 7, 9, 4, 3, 2, 9, 5, 9, 6, 2, 6, 9, -1],
[3, 11, 2, 7, 8, 4, 10, 6, 5, -1, -1, -1, -1, -1, -1, -1],
[5, 10, 6, 4, 7, 2, 4, 2, 0, 2, 7, 11, -1, -1, -1, -1],
[0, 1, 9, 4, 7, 8, 2, 3, 11, 5, 10, 6, -1, -1, -1, -1],
[9, 2, 1, 9, 11, 2, 9, 4, 11, 7, 11, 4, 5, 10, 6, -1],
[8, 4, 7, 3, 11, 5, 3, 5, 1, 5, 11, 6, -1, -1, -1, -1],
[5, 1, 11, 5, 11, 6, 1, 0, 11, 7, 11, 4, 0, 4, 11, -1],
[0, 5, 9, 0, 6, 5, 0, 3, 6, 11, 6, 3, 8, 4, 7, -1],
[6, 5, 9, 6, 9, 11, 4, 7, 9, 7, 11, 9, -1, -1, -1, -1],
[10, 4, 9, 6, 4, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[4, 10, 6, 4, 9, 10, 0, 8, 3, -1, -1, -1, -1, -1, -1, -1],
[10, 0, 1, 10, 6, 0, 6, 4, 0, -1, -1, -1, -1, -1, -1, -1],
[8, 3, 1, 8, 1, 6, 8, 6, 4, 6, 1, 10, -1, -1, -1, -1],
[1, 4, 9, 1, 2, 4, 2, 6, 4, -1, -1, -1, -1, -1, -1, -1],
[3, 0, 8, 1, 2, 9, 2, 4, 9, 2, 6, 4, -1, -1, -1, -1],
[0, 2, 4, 4, 2, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[8, 3, 2, 8, 2, 4, 4, 2, 6, -1, -1, -1, -1, -1, -1, -1],
[10, 4, 9, 10, 6, 4, 11, 2, 3, -1, -1, -1, -1, -1, -1, -1],
[0, 8, 2, 2, 8, 11, 4, 9, 10, 4, 10, 6, -1, -1, -1, -1],
[3, 11, 2, 0, 1, 6, 0, 6, 4, 6, 1, 10, -1, -1, -1, -1],
[6, 4, 1, 6, 1, 10, 4, 8, 1, 2, 1, 11, 8, 11, 1, -1],
[9, 6, 4, 9, 3, 6, 9, 1, 3, 11, 6, 3, -1, -1, -1, -1],
[8, 11, 1, 8, 1, 0, 11, 6, 1, 9, 1, 4, 6, 4, 1, -1],
[3, 11, 6, 3, 6, 0, 0, 6, 4, -1, -1, -1, -1, -1, -1, -1],
[6, 4, 8, 11, 6, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[7, 10, 6, 7, 8, 10, 8, 9, 10, -1, -1, -1, -1, -1, -1, -1],
[0, 7, 3, 0, 10, 7, 0, 9, 10, 6, 7, 10, -1, -1, -1, -1],
[10, 6, 7, 1, 10, 7, 1, 7, 8, 1, 8, 0, -1, -1, -1, -1],
[10, 6, 7, 10, 7, 1, 1, 7, 3, -1, -1, -1, -1, -1, -1, -1],
[1, 2, 6, 1, 6, 8, 1, 8, 9, 8, 6, 7, -1, -1, -1, -1],
[2, 6, 9, 2, 9, 1, 6, 7, 9, 0, 9, 3, 7, 3, 9, -1],
[7, 8, 0, 7, 0, 6, 6, 0, 2, -1, -1, -1, -1, -1, -1, -1],
[7, 3, 2, 6, 7, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[2, 3, 11, 10, 6, 8, 10, 8, 9, 8, 6, 7, -1, -1, -1, -1],
[2, 0, 7, 2, 7, 11, 0, 9, 7, 6, 7, 10, 9, 10, 7, -1],
[1, 8, 0, 1, 7, 8, 1, 10, 7, 6, 7, 10, 2, 3, 11, -1],
[11, 2, 1, 11, 1, 7, 10, 6, 1, 6, 7, 1, -1, -1, -1, -1],
[8, 9, 6, 8, 6, 7, 9, 1, 6, 11, 6, 3, 1, 3, 6, -1],
[0, 9, 1, 11, 6, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[7, 8, 0, 7, 0, 6, 3, 11, 0, 11, 6, 0, -1, -1, -1, -1],
[7, 11, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[7, 6, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[3, 0, 8, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[0, 1, 9, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[8, 1, 9, 8, 3, 1, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1],
[10, 1, 2, 6, 11, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[1, 2, 10, 3, 0, 8, 6, 11, 7, -1, -1, -1, -1, -1, -1, -1],
[2, 9, 0, 2, 10, 9, 6, 11, 7, -1, -1, -1, -1, -1, -1, -1],
[6, 11, 7, 2, 10, 3, 10, 8, 3, 10, 9, 8, -1, -1, -1, -1],
[7, 2, 3, 6, 2, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[7, 0, 8, 7, 6, 0, 6, 2, 0, -1, -1, -1, -1, -1, -1, -1],
[2, 7, 6, 2, 3, 7, 0, 1, 9, -1, -1, -1, -1, -1, -1, -1],
[1, 6, 2, 1, 8, 6, 1, 9, 8, 8, 7, 6, -1, -1, -1, -1],
[10, 7, 6, 10, 1, 7, 1, 3, 7, -1, -1, -1, -1, -1, -1, -1],
[10, 7, 6, 1, 7, 10, 1, 8, 7, 1, 0, 8, -1, -1, -1, -1],
[0, 3, 7, 0, 7, 10, 0, 10, 9, 6, 10, 7, -1, -1, -1, -1],
[7, 6, 10, 7, 10, 8, 8, 10, 9, -1, -1, -1, -1, -1, -1, -1],
[6, 8, 4, 11, 8, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[3, 6, 11, 3, 0, 6, 0, 4, 6, -1, -1, -1, -1, -1, -1, -1],
[8, 6, 11, 8, 4, 6, 9, 0, 1, -1, -1, -1, -1, -1, -1, -1],
[9, 4, 6, 9, 6, 3, 9, 3, 1, 11, 3, 6, -1, -1, -1, -1],
[6, 8, 4, 6, 11, 8, 2, 10, 1, -1, -1, -1, -1, -1, -1, -1],
[1, 2, 10, 3, 0, 11, 0, 6, 11, 0, 4, 6, -1, -1, -1, -1],
[4, 11, 8, 4, 6, 11, 0, 2, 9, 2, 10, 9, -1, -1, -1, -1],
[10, 9, 3, 10, 3, 2, 9, 4, 3, 11, 3, 6, 4, 6, 3, -1],
[8, 2, 3, 8, 4, 2, 4, 6, 2, -1, -1, -1, -1, -1, -1, -1],
[0, 4, 2, 4, 6, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[1, 9, 0, 2, 3, 4, 2, 4, 6, 4, 3, 8, -1, -1, -1, -1],
[1, 9, 4, 1, 4, 2, 2, 4, 6, -1, -1, -1, -1, -1, -1, -1],
[8, 1, 3, 8, 6, 1, 8, 4, 6, 6, 10, 1, -1, -1, -1, -1],
[10, 1, 0, 10, 0, 6, 6, 0, 4, -1, -1, -1, -1, -1, -1, -1],
[4, 6, 3, 4, 3, 8, 6, 10, 3, 0, 3, 9, 10, 9, 3, -1],
[10, 9, 4, 6, 10, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[4, 9, 5, 7, 6, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[0, 8, 3, 4, 9, 5, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1],
[5, 0, 1, 5, 4, 0, 7, 6, 11, -1, -1, -1, -1, -1, -1, -1],
[11, 7, 6, 8, 3, 4, 3, 5, 4, 3, 1, 5, -1, -1, -1, -1],
[9, 5, 4, 10, 1, 2, 7, 6, 11, -1, -1, -1, -1, -1, -1, -1],
[6, 11, 7, 1, 2, 10, 0, 8, 3, 4, 9, 5, -1, -1, -1, -1],
[7, 6, 11, 5, 4, 10, 4, 2, 10, 4, 0, 2, -1, -1, -1, -1],
[3, 4, 8, 3, 5, 4, 3, 2, 5, 10, 5, 2, 11, 7, 6, -1],
[7, 2, 3, 7, 6, 2, 5, 4, 9, -1, -1, -1, -1, -1, -1, -1],
[9, 5, 4, 0, 8, 6, 0, 6, 2, 6, 8, 7, -1, -1, -1, -1],
[3, 6, 2, 3, 7, 6, 1, 5, 0, 5, 4, 0, -1, -1, -1, -1],
[6, 2, 8, 6, 8, 7, 2, 1, 8, 4, 8, 5, 1, 5, 8, -1],
[9, 5, 4, 10, 1, 6, 1, 7, 6, 1, 3, 7, -1, -1, -1, -1],
[1, 6, 10, 1, 7, 6, 1, 0, 7, 8, 7, 0, 9, 5, 4, -1],
[4, 0, 10, 4, 10, 5, 0, 3, 10, 6, 10, 7, 3, 7, 10, -1],
[7, 6, 10, 7, 10, 8, 5, 4, 10, 4, 8, 10, -1, -1, -1, -1],
[6, 9, 5, 6, 11, 9, 11, 8, 9, -1, -1, -1, -1, -1, -1, -1],
[3, 6, 11, 0, 6, 3, 0, 5, 6, 0, 9, 5, -1, -1, -1, -1],
[0, 11, 8, 0, 5, 11, 0, 1, 5, 5, 6, 11, -1, -1, -1, -1],
[6, 11, 3, 6, 3, 5, 5, 3, 1, -1, -1, -1, -1, -1, -1, -1],
[1, 2, 10, 9, 5, 11, 9, 11, 8, 11, 5, 6, -1, -1, -1, -1],
[0, 11, 3, 0, 6, 11, 0, 9, 6, 5, 6, 9, 1, 2, 10, -1],
[11, 8, 5, 11, 5, 6, 8, 0, 5, 10, 5, 2, 0, 2, 5, -1],
[6, 11, 3, 6, 3, 5, 2, 10, 3, 10, 5, 3, -1, -1, -1, -1],
[5, 8, 9, 5, 2, 8, 5, 6, 2, 3, 8, 2, -1, -1, -1, -1],
[9, 5, 6, 9, 6, 0, 0, 6, 2, -1, -1, -1, -1, -1, -1, -1],
[1, 5, 8, 1, 8, 0, 5, 6, 8, 3, 8, 2, 6, 2, 8, -1],
[1, 5, 6, 2, 1, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[1, 3, 6, 1, 6, 10, 3, 8, 6, 5, 6, 9, 8, 9, 6, -1],
[10, 1, 0, 10, 0, 6, 9, 5, 0, 5, 6, 0, -1, -1, -1, -1],
[0, 3, 8, 5, 6, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[10, 5, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[11, 5, 10, 7, 5, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[11, 5, 10, 11, 7, 5, 8, 3, 0, -1, -1, -1, -1, -1, -1, -1],
[5, 11, 7, 5, 10, 11, 1, 9, 0, -1, -1, -1, -1, -1, -1, -1],
[10, 7, 5, 10, 11, 7, 9, 8, 1, 8, 3, 1, -1, -1, -1, -1],
[11, 1, 2, 11, 7, 1, 7, 5, 1, -1, -1, -1, -1, -1, -1, -1],
[0, 8, 3, 1, 2, 7, 1, 7, 5, 7, 2, 11, -1, -1, -1, -1],
[9, 7, 5, 9, 2, 7, 9, 0, 2, 2, 11, 7, -1, -1, -1, -1],
[7, 5, 2, 7, 2, 11, 5, 9, 2, 3, 2, 8, 9, 8, 2, -1],
[2, 5, 10, 2, 3, 5, 3, 7, 5, -1, -1, -1, -1, -1, -1, -1],
[8, 2, 0, 8, 5, 2, 8, 7, 5, 10, 2, 5, -1, -1, -1, -1],
[9, 0, 1, 5, 10, 3, 5, 3, 7, 3, 10, 2, -1, -1, -1, -1],
[9, 8, 2, 9, 2, 1, 8, 7, 2, 10, 2, 5, 7, 5, 2, -1],
[1, 3, 5, 3, 7, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[0, 8, 7, 0, 7, 1, 1, 7, 5, -1, -1, -1, -1, -1, -1, -1],
[9, 0, 3, 9, 3, 5, 5, 3, 7, -1, -1, -1, -1, -1, -1, -1],
[9, 8, 7, 5, 9, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[5, 8, 4, 5, 10, 8, 10, 11, 8, -1, -1, -1, -1, -1, -1, -1],
[5, 0, 4, 5, 11, 0, 5, 10, 11, 11, 3, 0, -1, -1, -1, -1],
[0, 1, 9, 8, 4, 10, 8, 10, 11, 10, 4, 5, -1, -1, -1, -1],
[10, 11, 4, 10, 4, 5, 11, 3, 4, 9, 4, 1, 3, 1, 4, -1],
[2, 5, 1, 2, 8, 5, 2, 11, 8, 4, 5, 8, -1, -1, -1, -1],
[0, 4, 11, 0, 11, 3, 4, 5, 11, 2, 11, 1, 5, 1, 11, -1],
[0, 2, 5, 0, 5, 9, 2, 11, 5, 4, 5, 8, 11, 8, 5, -1],
[9, 4, 5, 2, 11, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[2, 5, 10, 3, 5, 2, 3, 4, 5, 3, 8, 4, -1, -1, -1, -1],
[5, 10, 2, 5, 2, 4, 4, 2, 0, -1, -1, -1, -1, -1, -1, -1],
[3, 10, 2, 3, 5, 10, 3, 8, 5, 4, 5, 8, 0, 1, 9, -1],
[5, 10, 2, 5, 2, 4, 1, 9, 2, 9, 4, 2, -1, -1, -1, -1],
[8, 4, 5, 8, 5, 3, 3, 5, 1, -1, -1, -1, -1, -1, -1, -1],
[0, 4, 5, 1, 0, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[8, 4, 5, 8, 5, 3, 9, 0, 5, 0, 3, 5, -1, -1, -1, -1],
[9, 4, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[4, 11, 7, 4, 9, 11, 9, 10, 11, -1, -1, -1, -1, -1, -1, -1],
[0, 8, 3, 4, 9, 7, 9, 11, 7, 9, 10, 11, -1, -1, -1, -1],
[1, 10, 11, 1, 11, 4, 1, 4, 0, 7, 4, 11, -1, -1, -1, -1],
[3, 1, 4, 3, 4, 8, 1, 10, 4, 7, 4, 11, 10, 11, 4, -1],
[4, 11, 7, 9, 11, 4, 9, 2, 11, 9, 1, 2, -1, -1, -1, -1],
[9, 7, 4, 9, 11, 7, 9, 1, 11, 2, 11, 1, 0, 8, 3, -1],
[11, 7, 4, 11, 4, 2, 2, 4, 0, -1, -1, -1, -1, -1, -1, -1],
[11, 7, 4, 11, 4, 2, 8, 3, 4, 3, 2, 4, -1, -1, -1, -1],
[2, 9, 10, 2, 7, 9, 2, 3, 7, 7, 4, 9, -1, -1, -1, -1],
[9, 10, 7, 9, 7, 4, 10, 2, 7, 8, 7, 0, 2, 0, 7, -1],
[3, 7, 10, 3, 10, 2, 7, 4, 10, 1, 10, 0, 4, 0, 10, -1],
[1, 10, 2, 8, 7, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[4, 9, 1, 4, 1, 7, 7, 1, 3, -1, -1, -1, -1, -1, -1, -1],
[4, 9, 1, 4, 1, 7, 0, 8, 1, 8, 7, 1, -1, -1, -1, -1],
[4, 0, 3, 7, 4, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[4, 8, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[9, 10, 8, 10, 11, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[3, 0, 9, 3, 9, 11, 11, 9, 10, -1, -1, -1, -1, -1, -1, -1],
[0, 1, 10, 0, 10, 8, 8, 10, 11, -1, -1, -1, -1, -1, -1, -1],
[3, 1, 10, 11, 3, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[1, 2, 11, 1, 11, 9, 9, 11, 8, -1, -1, -1, -1, -1, -1, -1],
[3, 0, 9, 3, 9, 11, 1, 2, 9, 2, 11, 9, -1, -1, -1, -1],
[0, 2, 11, 8, 0, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[3, 2, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[2, 3, 8, 2, 8, 10, 10, 8, 9, -1, -1, -1, -1, -1, -1, -1],
[9, 10, 2, 0, 9, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[2, 3, 8, 2, 8, 10, 0, 1, 8, 1, 10, 8, -1, -1, -1, -1],
[1, 10, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[1, 3, 8, 9, 1, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[0, 9, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[0, 3, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1]
]
| 55.089286
| 97
| 0.319287
| 4,186
| 15,425
| 1.173435
| 0.012661
| 0.582655
| 0.732899
| 0.795603
| 0.621539
| 0.608103
| 0.547638
| 0.46987
| 0.457044
| 0.369707
| 0
| 0.401069
| 0.272285
| 15,425
| 280
| 98
| 55.089286
| 0.036526
| 0.01731
| 0
| 0
| 0
| 0
| 0.000132
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.003759
| false
| 0
| 0.003759
| 0
| 0.011278
| 0
| 0
| 0
| 1
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
5bc3eab7dce91ca5755a0ec673204ac5334ef2ea
| 104
|
py
|
Python
|
jtalk_py/__init__.py
|
tos-kamiya/jtalk.py
|
b291fc335380c1acb00ff91a146ce92f210605b2
|
[
"Unlicense"
] | null | null | null |
jtalk_py/__init__.py
|
tos-kamiya/jtalk.py
|
b291fc335380c1acb00ff91a146ce92f210605b2
|
[
"Unlicense"
] | null | null | null |
jtalk_py/__init__.py
|
tos-kamiya/jtalk.py
|
b291fc335380c1acb00ff91a146ce92f210605b2
|
[
"Unlicense"
] | null | null | null |
import importlib.metadata
__version__ = importlib.metadata.version('jtalk.py')
from .jtalk import main
| 20.8
| 52
| 0.807692
| 13
| 104
| 6.153846
| 0.615385
| 0.425
| 0.6
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.096154
| 104
| 4
| 53
| 26
| 0.851064
| 0
| 0
| 0
| 0
| 0
| 0.076923
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
5bdec0aa5a65178b04388c7a3745f20c8cc9ec0c
| 97
|
py
|
Python
|
tests/datastructure_example.py
|
mad-skull/DataStructure
|
789bea320d1e3745722cb91e2e17cb621fa27879
|
[
"MIT"
] | 1
|
2021-08-29T17:55:37.000Z
|
2021-08-29T17:55:37.000Z
|
tests/datastructure_example.py
|
mad-skull/EasyDSA
|
789bea320d1e3745722cb91e2e17cb621fa27879
|
[
"MIT"
] | null | null | null |
tests/datastructure_example.py
|
mad-skull/EasyDSA
|
789bea320d1e3745722cb91e2e17cb621fa27879
|
[
"MIT"
] | null | null | null |
from EasyDSA import BinarySearchTree
from EasyDSA import HashMap
from EasyDSA import LinkedList
| 19.4
| 36
| 0.865979
| 12
| 97
| 7
| 0.5
| 0.392857
| 0.607143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.134021
| 97
| 4
| 37
| 24.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
750b7fe20b4f75dc678e1477ebba791e247b1936
| 29
|
py
|
Python
|
pyspeed/cpp_pyb11/__init__.py
|
chr1st1ank/python-metrics
|
f8e445994a5f81d4a6e861d6b6f3a5b8676a5894
|
[
"MIT"
] | 1
|
2022-01-04T06:08:11.000Z
|
2022-01-04T06:08:11.000Z
|
pyspeed/cpp_pyb11/__init__.py
|
chr1st1ank/python-metrics
|
f8e445994a5f81d4a6e861d6b6f3a5b8676a5894
|
[
"MIT"
] | null | null | null |
pyspeed/cpp_pyb11/__init__.py
|
chr1st1ank/python-metrics
|
f8e445994a5f81d4a6e861d6b6f3a5b8676a5894
|
[
"MIT"
] | null | null | null |
from .pyspeed_pyb11 import *
| 14.5
| 28
| 0.793103
| 4
| 29
| 5.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.08
| 0.137931
| 29
| 1
| 29
| 29
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
751847bdb1e443df741881fb297d9192f6b31740
| 36
|
py
|
Python
|
src/segmantic/i2i/__init__.py
|
dyollb/segmantic
|
8fe47340ff0f67812918f7070e3d6080e5d228ac
|
[
"MIT"
] | null | null | null |
src/segmantic/i2i/__init__.py
|
dyollb/segmantic
|
8fe47340ff0f67812918f7070e3d6080e5d228ac
|
[
"MIT"
] | 3
|
2021-09-24T20:32:23.000Z
|
2022-03-14T10:54:13.000Z
|
src/segmantic/i2i/__init__.py
|
dyollb/segmantic
|
8fe47340ff0f67812918f7070e3d6080e5d228ac
|
[
"MIT"
] | 2
|
2021-09-24T11:54:52.000Z
|
2021-10-01T13:01:55.000Z
|
from .translate import translate_3d
| 18
| 35
| 0.861111
| 5
| 36
| 6
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.03125
| 0.111111
| 36
| 1
| 36
| 36
| 0.90625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
753f741d7e70cf39f54f4571b02bd677a0d18601
| 102
|
py
|
Python
|
ambra_sdk/service/entrypoints/tag.py
|
dyens/sdk-python
|
24bf05268af2832c70120b84fd53bf44862cffec
|
[
"Apache-2.0"
] | null | null | null |
ambra_sdk/service/entrypoints/tag.py
|
dyens/sdk-python
|
24bf05268af2832c70120b84fd53bf44862cffec
|
[
"Apache-2.0"
] | null | null | null |
ambra_sdk/service/entrypoints/tag.py
|
dyens/sdk-python
|
24bf05268af2832c70120b84fd53bf44862cffec
|
[
"Apache-2.0"
] | null | null | null |
from ambra_sdk.service.entrypoints.generated.tag import Tag as GTag
class Tag(GTag):
"""Tag."""
| 17
| 67
| 0.715686
| 15
| 102
| 4.8
| 0.733333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.147059
| 102
| 5
| 68
| 20.4
| 0.827586
| 0.039216
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
755190507af692cae8ccce57f694f98af01ee109
| 105
|
py
|
Python
|
candle/metrics/__init__.py
|
ynop/candle
|
1e0687e1f6a9b622033fbd141674d1de964f4465
|
[
"MIT"
] | null | null | null |
candle/metrics/__init__.py
|
ynop/candle
|
1e0687e1f6a9b622033fbd141674d1de964f4465
|
[
"MIT"
] | null | null | null |
candle/metrics/__init__.py
|
ynop/candle
|
1e0687e1f6a9b622033fbd141674d1de964f4465
|
[
"MIT"
] | null | null | null |
from .base import Metric
from .accuracy import BinaryAccuracy
from .accuracy import CategoricalAccuracy
| 21
| 41
| 0.847619
| 12
| 105
| 7.416667
| 0.583333
| 0.269663
| 0.404494
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12381
| 105
| 4
| 42
| 26.25
| 0.967391
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
f33d0eea3272f12108b15c157032de9a105edca5
| 39
|
py
|
Python
|
virtual/walt/virtual/node/__init__.py
|
drakkar-lig/walt-python-packages
|
b778992e241d54b684f54715d83c4aff98a01db7
|
[
"BSD-3-Clause"
] | 4
|
2020-01-14T09:12:56.000Z
|
2022-03-14T14:35:11.000Z
|
virtual/walt/virtual/node/__init__.py
|
drakkar-lig/walt-python-packages
|
b778992e241d54b684f54715d83c4aff98a01db7
|
[
"BSD-3-Clause"
] | 73
|
2016-04-29T13:17:26.000Z
|
2022-03-01T15:06:48.000Z
|
virtual/walt/virtual/node/__init__.py
|
drakkar-lig/walt-python-packages
|
b778992e241d54b684f54715d83c4aff98a01db7
|
[
"BSD-3-Clause"
] | 3
|
2019-03-18T14:27:56.000Z
|
2021-06-03T12:07:02.000Z
|
from walt.virtual.node.node import run
| 19.5
| 38
| 0.820513
| 7
| 39
| 4.571429
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.102564
| 39
| 1
| 39
| 39
| 0.914286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f389417cb71316580287cbfaf5b356adf76525e0
| 33
|
py
|
Python
|
sentiment_analyzer.py
|
JonLMyers/Inb4-Danger
|
5d6cdbb00d655cf0f91b1313227171fa20d4fea1
|
[
"MIT"
] | 1
|
2019-04-09T02:10:40.000Z
|
2019-04-09T02:10:40.000Z
|
sentiment_analyzer.py
|
JonLMyers/inb4-Danger
|
5d6cdbb00d655cf0f91b1313227171fa20d4fea1
|
[
"MIT"
] | 1
|
2018-06-28T20:00:18.000Z
|
2018-06-28T20:00:18.000Z
|
sentiment_analyzer.py
|
JonLMyers/inb4-Danger
|
5d6cdbb00d655cf0f91b1313227171fa20d4fea1
|
[
"MIT"
] | null | null | null |
def analyze_sentiment():
pass
| 16.5
| 24
| 0.727273
| 4
| 33
| 5.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.181818
| 33
| 2
| 25
| 16.5
| 0.851852
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
f38b5dc0f9a2d6e4d812a59cbe73c2230062c32d
| 29
|
py
|
Python
|
__init__.py
|
andytwoods/pychonk
|
5d657e82a474cd9efe74da5c84f3d3423b5fc3a0
|
[
"MIT"
] | null | null | null |
__init__.py
|
andytwoods/pychonk
|
5d657e82a474cd9efe74da5c84f3d3423b5fc3a0
|
[
"MIT"
] | 1
|
2020-06-16T12:54:43.000Z
|
2020-07-09T11:36:10.000Z
|
__init__.py
|
andytwoods/pychonk
|
5d657e82a474cd9efe74da5c84f3d3423b5fc3a0
|
[
"MIT"
] | null | null | null |
from src.pychonk import chonk
| 29
| 29
| 0.862069
| 5
| 29
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.103448
| 29
| 1
| 29
| 29
| 0.961538
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f3b310de98e0d5cf0bcc800c7c09c0f3f6833b5d
| 434
|
py
|
Python
|
python/kyu-5/human-readable-time/test_human_readable_time.py
|
ledwindra/codewars
|
0552669a69e801cfe5f9a3696a4d98be63a96951
|
[
"WTFPL"
] | 1
|
2020-11-13T16:55:04.000Z
|
2020-11-13T16:55:04.000Z
|
python/kyu-5/human-readable-time/test_human_readable_time.py
|
ledwindra/codewars
|
0552669a69e801cfe5f9a3696a4d98be63a96951
|
[
"WTFPL"
] | 1
|
2020-01-28T15:48:17.000Z
|
2020-01-28T15:48:17.000Z
|
python/kyu-5/human-readable-time/test_human_readable_time.py
|
ledwindra/codewars
|
0552669a69e801cfe5f9a3696a4d98be63a96951
|
[
"WTFPL"
] | null | null | null |
from human_readable_time import make_readable
class TestHumanReadableTime:
def test_0(self):
assert make_readable(0) == "00:00:00"
def test_1(self):
assert make_readable(5) == "00:00:05"
def test_2(self):
assert make_readable(60) == "00:01:00"
def test_3(self):
assert make_readable(86399) == "23:59:59"
def test_4(self):
assert make_readable(359999) == "99:59:59"
| 24.111111
| 50
| 0.629032
| 64
| 434
| 4.0625
| 0.421875
| 0.276923
| 0.269231
| 0.423077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.151976
| 0.241935
| 434
| 18
| 50
| 24.111111
| 0.638298
| 0
| 0
| 0
| 0
| 0
| 0.091954
| 0
| 0
| 0
| 0
| 0
| 0.416667
| 1
| 0.416667
| false
| 0
| 0.083333
| 0
| 0.583333
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
f3c72671539c978bafbe0d628964804ec0d27255
| 224
|
py
|
Python
|
exporters/export_formatter/__init__.py
|
scrapinghub/exporters
|
b14f70530826bbbd6163d9e56e74345e762a9189
|
[
"BSD-3-Clause"
] | 41
|
2016-06-16T15:29:39.000Z
|
2021-08-06T03:29:13.000Z
|
exporters/export_formatter/__init__.py
|
bbotella/fluxo
|
c9fb01db1771ada4672bbffd67cb46e1f7802ab9
|
[
"BSD-3-Clause"
] | 52
|
2016-06-20T12:46:57.000Z
|
2018-02-08T12:22:03.000Z
|
exporters/export_formatter/__init__.py
|
bbotella/fluxo
|
c9fb01db1771ada4672bbffd67cb46e1f7802ab9
|
[
"BSD-3-Clause"
] | 10
|
2016-06-23T08:49:36.000Z
|
2018-01-13T10:12:10.000Z
|
from .json_export_formatter import JsonExportFormatter
from .xml_export_formatter import XMLExportFormatter # NOQA
from .csv_export_formatter import CSVExportFormatter # NOQA
DEFAULT_FORMATTER_CLASS = JsonExportFormatter
| 37.333333
| 60
| 0.875
| 24
| 224
| 7.833333
| 0.541667
| 0.239362
| 0.335106
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.098214
| 224
| 5
| 61
| 44.8
| 0.930693
| 0.040179
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.75
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
45e8d791ce7ee6e1cc6abca54d09c9449d485363
| 40
|
py
|
Python
|
goldencheetahlib/__init__.py
|
AartGoossens/goldencheetahlib
|
ebe57de7d94280674c8440a81f53ac02f0b4eb43
|
[
"MIT"
] | 1
|
2018-09-15T00:46:18.000Z
|
2018-09-15T00:46:18.000Z
|
goldencheetahlib/__init__.py
|
AartGoossens/goldencheetahlib
|
ebe57de7d94280674c8440a81f53ac02f0b4eb43
|
[
"MIT"
] | 8
|
2016-08-17T08:02:03.000Z
|
2017-11-06T18:42:21.000Z
|
goldencheetahlib/__init__.py
|
AartGoossens/goldencheetahlib
|
ebe57de7d94280674c8440a81f53ac02f0b4eb43
|
[
"MIT"
] | 1
|
2019-10-15T13:28:29.000Z
|
2019-10-15T13:28:29.000Z
|
from .client import GoldenCheetahClient
| 20
| 39
| 0.875
| 4
| 40
| 8.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 40
| 1
| 40
| 40
| 0.972222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
45fde292ffc6b3f334fc88f75e7ff38c070ec8c1
| 19,355
|
py
|
Python
|
tests/redis_list_tests.py
|
musabhusaini/aioredis-models
|
8f868f4bf65e1068f8e8412fcc322ccfb65c1ea3
|
[
"MIT"
] | null | null | null |
tests/redis_list_tests.py
|
musabhusaini/aioredis-models
|
8f868f4bf65e1068f8e8412fcc322ccfb65c1ea3
|
[
"MIT"
] | null | null | null |
tests/redis_list_tests.py
|
musabhusaini/aioredis-models
|
8f868f4bf65e1068f8e8412fcc322ccfb65c1ea3
|
[
"MIT"
] | null | null | null |
import unittest
from unittest.mock import MagicMock, AsyncMock, call
from aioredis_models.redis_list import RedisList
class RedisListTests(unittest.IsolatedAsyncioTestCase):
def test_init_succeeds(self):
redis_list = RedisList(MagicMock(), MagicMock())
self.assertIsInstance(redis_list, RedisList)
async def test_length_returns_length(self):
redis = AsyncMock()
key = MagicMock()
redis_list = RedisList(redis, key)
result = await redis_list.length()
redis.llen.assert_called_once_with(key)
self.assertEqual(result, redis.llen.return_value)
async def test_get_range_passes_correct_defaults(self):
redis = AsyncMock()
key = MagicMock()
redis_list = RedisList(redis, key)
result = await redis_list.get_range()
redis.lrange.assert_awaited_once_with(key, 0, -1, encoding='utf-8')
self.assertEqual(result, redis.lrange.return_value)
async def test_get_range_works_correctly(self):
redis = AsyncMock()
key = MagicMock()
redis_list = RedisList(redis, key)
start = MagicMock()
stop = MagicMock()
encoding = MagicMock()
result = await redis_list.get_range(start, stop, encoding=encoding)
redis.lrange.assert_awaited_once_with(key, start, stop, encoding=encoding)
self.assertEqual(result, redis.lrange.return_value)
async def test_enumerate_with_batch_size_zero_gets_the_list_at_once(self):
items = [MagicMock() for _ in range(12)]
redis = AsyncMock()
redis.lrange.return_value = items
key = MagicMock()
redis_list = RedisList(redis, key)
result = [item async for item in redis_list.enumerate()]
self.assertEqual(result, items)
redis.lrange.assert_awaited_once_with(key, 0, -1, encoding='utf-8')
async def test_enumerate_gets_correct_batches(self):
items = [MagicMock() for _ in range(9)]
redis = AsyncMock()
redis.lrange.side_effect = lambda _, start, stop, **__: items[start:stop+1]
key = MagicMock()
encoding = MagicMock()
redis_list = RedisList(redis, key)
result = [item async for item in redis_list.enumerate(batch_size=5, encoding=encoding)]
self.assertEqual(result, items)
redis.lrange.assert_has_awaits([
call(key, 0, 4, encoding=encoding),
call(key, 5, 9, encoding=encoding)
])
async def test_enumerate_when_len_divisible_by_batch_size_gets_correct_batches(self):
items = [MagicMock() for _ in range(10)]
redis = AsyncMock()
redis.lrange.side_effect = lambda _, start, stop, **__: items[start:stop+1]
key = MagicMock()
encoding = MagicMock()
redis_list = RedisList(redis, key)
result = [item async for item in redis_list.enumerate(batch_size=5, encoding=encoding)]
self.assertEqual(result, items)
redis.lrange.assert_has_awaits([
call(key, 0, 4, encoding=encoding),
call(key, 5, 9, encoding=encoding),
call(key, 10, 14, encoding=encoding)
])
async def test_enumerate_with_start_returns_correct_result(self):
items = [MagicMock() for _ in range(10)]
redis = AsyncMock()
redis.lrange.side_effect = lambda _, start, stop, **__: items[start:stop+1]
key = MagicMock()
encoding = MagicMock()
redis_list = RedisList(redis, key)
result = [item async for item in redis_list.enumerate(start=3, batch_size=5, encoding=encoding)]
self.assertEqual(result, items[3:])
redis.lrange.assert_has_awaits([
call(key, 3, 7, encoding=encoding),
call(key, 8, 12, encoding=encoding)
])
async def test_enumerate_with_start_and_no_batch_size_get_all_at_once(self):
items = [MagicMock() for _ in range(10)]
redis = AsyncMock()
start = 3
redis.lrange.return_value = items[start:]
key = MagicMock()
encoding = MagicMock()
redis_list = RedisList(redis, key)
result = [item async for item in redis_list.enumerate(start=start, encoding=encoding)]
self.assertEqual(result, items[start:])
redis.lrange.assert_awaited_once_with(key, 3, -1, encoding=encoding)
async def test_enumerate_with_stop_returns_correct_result(self):
items = [MagicMock() for _ in range(10)]
redis = AsyncMock()
redis.lrange.side_effect = lambda _, start, stop, **__: items[start:stop+1]
key = MagicMock()
encoding = MagicMock()
redis_list = RedisList(redis, key)
result = [item async for item in redis_list.enumerate(stop=6, batch_size=5, encoding=encoding)]
self.assertEqual(result, items[:7])
redis.lrange.assert_has_awaits([
call(key, 0, 4, encoding=encoding),
call(key, 5, 6, encoding=encoding)
])
async def test_enumerate_with_stop_and_no_batch_size_gets_all_at_once(self):
items = [MagicMock() for _ in range(10)]
redis = AsyncMock()
stop = 6
redis.lrange.return_value = items[:stop+1]
key = MagicMock()
encoding = MagicMock()
redis_list = RedisList(redis, key)
result = [item async for item in redis_list.enumerate(stop=stop, encoding=encoding)]
self.assertEqual(result, items[:stop+1])
redis.lrange.assert_awaited_once_with(key, 0, stop, encoding=encoding)
async def test_enumerate_with_start_and_stop_returns_correct_result(self):
items = [MagicMock() for _ in range(11)]
redis = AsyncMock()
redis.lrange.side_effect = lambda _, start, stop, **__: items[start:stop+1]
key = MagicMock()
encoding = MagicMock()
redis_list = RedisList(redis, key)
result = [
item async for item in redis_list.enumerate(
start=3,
stop=9,
batch_size=5,
encoding=encoding
)
]
self.assertEqual(result, items[3:10])
redis.lrange.assert_has_awaits([
call(key, 3, 7, encoding=encoding),
call(key, 8, 9, encoding=encoding)
])
async def test_enumerate_with_start_and_stop_and_no_batch_size_gets_all_at_once(self):
items = [MagicMock() for _ in range(10)]
redis = AsyncMock()
start = 3
stop = 6
redis.lrange.return_value = items[start:stop+1]
key = MagicMock()
encoding = MagicMock()
redis_list = RedisList(redis, key)
result = [
item async for item in redis_list.enumerate(
start=start,
stop=stop,
encoding=encoding
)
]
self.assertEqual(result, items[start:stop+1])
redis.lrange.assert_awaited_once_with(key, 3, 6, encoding=encoding)
async def test_push_with_none_value_does_nothing(self):
key = MagicMock()
redis_list = RedisList(None, key)
result = await redis_list.push(None)
self.assertIsNone(result)
async def test_push_with_none_value_reverse_does_nothing(self):
key = MagicMock()
redis_list = RedisList(None, key)
result = await redis_list.push(None, reverse=True)
self.assertIsNone(result)
async def test_push_with_values_lpushes(self):
redis = AsyncMock()
key = MagicMock()
redis_list = RedisList(redis, key)
values = [MagicMock() for _ in range(8)]
result = await redis_list.push(*values)
redis.lpush.assert_awaited_once_with(key, *values)
self.assertEqual(result, redis.lpush.return_value)
async def test_push_with_values_and_reverse_rpushes(self):
redis = AsyncMock()
key = MagicMock()
redis_list = RedisList(redis, key)
values = [MagicMock() for _ in range(8)]
result = await redis_list.push(*values, reverse=True)
redis.rpush.assert_awaited_once_with(key, *values)
self.assertEqual(result, redis.rpush.return_value)
async def test_pop_with_reverse_and_block_brpops(self):
redis = AsyncMock()
key = MagicMock()
timeout = MagicMock()
encoding = MagicMock()
redis_list = RedisList(redis, key)
result = await redis_list.pop(reverse=True, block=True, timeout_seconds=timeout, encoding=encoding)
redis.brpop.assert_awaited_once_with(key, timeout=timeout, encoding=encoding)
self.assertEqual(result, redis.brpop.return_value)
async def test_pop_with_reverse_and_block_and_default_timeout_brpops_with_zero_timeout(self):
redis = AsyncMock()
key = MagicMock()
encoding = MagicMock()
redis_list = RedisList(redis, key)
result = await redis_list.pop(reverse=True, block=True, encoding=encoding)
redis.brpop.assert_awaited_once_with(key, timeout=0, encoding=encoding)
self.assertEqual(result, redis.brpop.return_value)
async def test_pop_with_reverse_and_default_block_rpops(self):
redis = AsyncMock()
key = MagicMock()
encoding = MagicMock()
redis_list = RedisList(redis, key)
result = await redis_list.pop(reverse=True, encoding=encoding)
redis.rpop.assert_awaited_once_with(key, encoding=encoding)
self.assertEqual(result, redis.rpop.return_value)
async def test_pop_with_reverse_and_no_block_rpops(self):
redis = AsyncMock()
key = MagicMock()
encoding = MagicMock()
redis_list = RedisList(redis, key)
result = await redis_list.pop(reverse=True, block=False, encoding=encoding)
redis.rpop.assert_awaited_once_with(key, encoding=encoding)
self.assertEqual(result, redis.rpop.return_value)
async def test_pop_with_block_blpops(self):
redis = AsyncMock()
key = MagicMock()
timeout = MagicMock()
encoding = MagicMock()
redis_list = RedisList(redis, key)
result = await redis_list.pop(block=True, timeout_seconds=timeout, encoding=encoding)
redis.blpop.assert_awaited_once_with(key, timeout=timeout, encoding=encoding)
self.assertEqual(result, redis.blpop.return_value)
async def test_pop_with_block_and_default_timeout_blpops_with_zero_timeout(self):
redis = AsyncMock()
key = MagicMock()
redis_list = RedisList(redis, key)
result = await redis_list.pop(block=True)
redis.blpop.assert_awaited_once_with(key, timeout=0, encoding='utf-8')
self.assertEqual(result, redis.blpop.return_value)
async def test_pop_with_default_block_lpops(self):
redis = AsyncMock()
key = MagicMock()
redis_list = RedisList(redis, key)
result = await redis_list.pop()
redis.lpop.assert_awaited_once_with(key, encoding='utf-8')
self.assertEqual(result, redis.lpop.return_value)
async def test_pop_with_no_block_lpops(self):
redis = AsyncMock()
key = MagicMock()
encoding = MagicMock()
redis_list = RedisList(redis, key)
result = await redis_list.pop(block=False, encoding=encoding)
redis.lpop.assert_awaited_once_with(key, encoding=encoding)
self.assertEqual(result, redis.lpop.return_value)
async def test_enqueue_with_none_value_does_nothing(self):
key = MagicMock()
redis_list = RedisList(None, key)
result = await redis_list.enqueue(None)
self.assertIsNone(result)
async def test_enqueue_with_values_lpushes(self):
redis = AsyncMock()
key = MagicMock()
redis_list = RedisList(redis, key)
values = [MagicMock() for _ in range(8)]
result = await redis_list.enqueue(*values)
redis.lpush.assert_awaited_once_with(key, *values)
self.assertEqual(result, redis.lpush.return_value)
async def test_dequeue_with_block_brpops(self):
redis = AsyncMock()
key = MagicMock()
timeout = MagicMock()
encoding = MagicMock()
redis_list = RedisList(redis, key)
result = await redis_list.dequeue(block=True, timeout_seconds=timeout, encoding=encoding)
redis.brpop.assert_awaited_once_with(key, timeout=timeout, encoding=encoding)
self.assertEqual(result, redis.brpop.return_value)
async def test_dequeue_with_block_and_default_timeout_brpops_with_zero_timeout(self):
redis = AsyncMock()
key = MagicMock()
encoding = MagicMock()
redis_list = RedisList(redis, key)
result = await redis_list.dequeue(block=True, encoding=encoding)
redis.brpop.assert_awaited_once_with(key, timeout=0, encoding=encoding)
self.assertEqual(result, redis.brpop.return_value)
async def test_dequeue_with_no_block_rpops(self):
redis = AsyncMock()
key = MagicMock()
encoding = MagicMock()
redis_list = RedisList(redis, key)
result = await redis_list.dequeue(encoding=encoding)
redis.rpop.assert_awaited_once_with(key, encoding=encoding)
self.assertEqual(result, redis.rpop.return_value)
async def test_move_with_block_brpoplpushes(self):
redis = AsyncMock()
key = MagicMock()
redis_list = RedisList(redis, key)
destination_key = MagicMock()
timeout = MagicMock()
encoding = MagicMock()
result = await redis_list.move(destination_key, block=True, timeout_seconds=timeout, encoding=encoding)
redis.brpoplpush.assert_awaited_once_with(key, destination_key, timeout=timeout, encoding=encoding)
self.assertEqual(result, redis.brpoplpush.return_value)
async def test_move_with_block_default_timeout_brpoplpushes_with_default_timeout(self):
redis = AsyncMock()
key = MagicMock()
redis_list = RedisList(redis, key)
destination_key = MagicMock()
result = await redis_list.move(destination_key, block=True)
redis.brpoplpush.assert_awaited_once_with(key, destination_key, timeout=0, encoding='utf-8')
self.assertEqual(result, redis.brpoplpush.return_value)
async def test_move_without_block_rpoplpushes(self):
redis = AsyncMock()
key = MagicMock()
redis_list = RedisList(redis, key)
destination_key = MagicMock()
encoding = MagicMock()
result = await redis_list.move(destination_key, block=False, encoding=encoding)
redis.rpoplpush.assert_awaited_once_with(key, destination_key, encoding=encoding)
self.assertEqual(result, redis.rpoplpush.return_value)
async def test_requeue_with_block_brpoplpushes(self):
redis = AsyncMock()
key = MagicMock()
redis_list = RedisList(redis, key)
timeout = MagicMock()
encoding = MagicMock()
result = await redis_list.requeue(block=True, timeout_seconds=timeout, encoding=encoding)
redis.brpoplpush.assert_awaited_once_with(key, key, timeout=timeout, encoding=encoding)
self.assertEqual(result, redis.brpoplpush.return_value)
async def test_requeue_with_block_default_timeout_brpoplpushes_with_default_timeout(self):
redis = AsyncMock()
key = MagicMock()
redis_list = RedisList(redis, key)
result = await redis_list.requeue(block=True)
redis.brpoplpush.assert_awaited_once_with(key, key, timeout=0, encoding='utf-8')
self.assertEqual(result, redis.brpoplpush.return_value)
async def test_requeue_without_block_rpoplpushes(self):
redis = AsyncMock()
key = MagicMock()
encoding = MagicMock()
redis_list = RedisList(redis, key)
result = await redis_list.requeue(block=False, encoding=encoding)
redis.rpoplpush.assert_awaited_once_with(key, key, encoding=encoding)
self.assertEqual(result, redis.rpoplpush.return_value)
async def test_remove_removes(self):
redis = AsyncMock()
key = MagicMock()
redis_list = RedisList(redis, key)
value = MagicMock()
count = MagicMock()
result = await redis_list.remove(value, count)
redis.lrem.assert_called_once_with(key, count, value)
self.assertEqual(result, redis.lrem.return_value)
async def test_remove_with_no_count_removes_with_zero_count(self):
redis = AsyncMock()
key = MagicMock()
redis_list = RedisList(redis, key)
value = MagicMock()
result = await redis_list.remove(value)
redis.lrem.assert_called_once_with(key, 0, value)
self.assertEqual(result, redis.lrem.return_value)
async def test_find_index_uses_correct_defaults(self):
redis = AsyncMock()
redis.lrange.return_value = ['test', 'this', 'for', 'me']
key = MagicMock()
redis_list = RedisList(redis, key)
result = await redis_list.find_index('this')
redis.lrange.assert_awaited_once_with(key, 0, -1, encoding='utf-8')
self.assertEqual(result, 1)
async def test_find_index_when_value_present_returns_index(self):
redis = AsyncMock()
redis.lrange.side_effect = [
['test', 'this'],
['for', 'me']
]
key = MagicMock()
redis_list = RedisList(redis, key)
start = 0
stop = 3
batch_size = 2
encoding = MagicMock()
result = await redis_list.find_index('for', start=start, stop=stop, batch_size=batch_size, encoding=encoding)
self.assertEqual(result, 2)
redis.lrange.assert_has_awaits([
call(key, 0, 1, encoding=encoding),
call(key, 2, 3, encoding=encoding)
])
async def test_find_index_when_value_not_present_returns_none(self):
redis = AsyncMock()
redis.lrange.return_value = ['test', 'this']
key = MagicMock()
redis_list = RedisList(redis, key)
result = await redis_list.find_index('me')
self.assertIsNone(result)
async def test_find_index_with_non_zero_start_adds_start_to_index(self):
redis = AsyncMock()
redis.lrange.return_value = ['test', 'this', 'for', 'me']
key = MagicMock()
redis_list = RedisList(redis, key)
start = 5
result = await redis_list.find_index('me', start=start)
self.assertEqual(result, 8)
async def test_find_index_with_stop_finds_result(self):
redis = AsyncMock()
redis.lrange.side_effect = [
['test', 'this', 'for', 'me'],
['because', 'something', 'happened']
]
key = MagicMock()
redis_list = RedisList(redis, key)
start = 5
stop = 12
result = await redis_list.find_index('something', start=start, stop=stop, batch_size=4)
self.assertEqual(result, 10)
async def test_find_index_with_stop_uses_stop(self):
redis = AsyncMock()
redis.lrange.side_effect = [
['test', 'this', 'for', 'me'],
['because']
]
key = MagicMock()
redis_list = RedisList(redis, key)
start = 5
stop = 9
result = await redis_list.find_index('something', start=start, stop=stop, batch_size=4)
self.assertIsNone(result)
| 35.383912
| 117
| 0.65425
| 2,289
| 19,355
| 5.279598
| 0.061162
| 0.066281
| 0.067025
| 0.09607
| 0.927431
| 0.91055
| 0.887547
| 0.831527
| 0.774266
| 0.745552
| 0
| 0.007961
| 0.247171
| 19,355
| 546
| 118
| 35.448718
| 0.821426
| 0
| 0
| 0.620773
| 0
| 0
| 0.00868
| 0
| 0
| 0
| 0
| 0
| 0.193237
| 1
| 0.002415
| false
| 0.002415
| 0.007246
| 0
| 0.012077
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
45ffb4c59e5b64c897c693a1ae3c5e74bc5be8fc
| 77
|
py
|
Python
|
model_zoo/utils/data/__init__.py
|
samuelstanton/model-zoo
|
77bb52e4a74d0601f13ad5f9e04457f3ed6cb10f
|
[
"MIT"
] | 4
|
2021-05-31T23:21:11.000Z
|
2021-06-03T22:20:17.000Z
|
model_zoo/utils/data/__init__.py
|
samuelstanton/model-zoo
|
77bb52e4a74d0601f13ad5f9e04457f3ed6cb10f
|
[
"MIT"
] | null | null | null |
model_zoo/utils/data/__init__.py
|
samuelstanton/model-zoo
|
77bb52e4a74d0601f13ad5f9e04457f3ed6cb10f
|
[
"MIT"
] | null | null | null |
from .dataset import Dataset
from .seq_dataset import SeqDataset, format_seqs
| 38.5
| 48
| 0.857143
| 11
| 77
| 5.818182
| 0.636364
| 0.40625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.103896
| 77
| 2
| 48
| 38.5
| 0.927536
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
345e88ef182032b586494dc6d164c5637541e7d5
| 324
|
py
|
Python
|
elasticapm/transport/http_urllib3.py
|
lyrixderaven/apm-agent-python
|
e21b306da70995ca1582666378b7059495ff1bee
|
[
"BSD-3-Clause"
] | 2
|
2019-02-15T20:23:39.000Z
|
2019-02-15T20:26:06.000Z
|
elasticapm/transport/http_urllib3.py
|
lyrixderaven/apm-agent-python
|
e21b306da70995ca1582666378b7059495ff1bee
|
[
"BSD-3-Clause"
] | null | null | null |
elasticapm/transport/http_urllib3.py
|
lyrixderaven/apm-agent-python
|
e21b306da70995ca1582666378b7059495ff1bee
|
[
"BSD-3-Clause"
] | null | null | null |
import warnings
from elasticapm.transport.http import AsyncTransport as AsyncUrllib3Transport # noqa F401
from elasticapm.transport.http import Transport as Urllib3Transport # noqa F401
warnings.warn(
"The elasticapm.transport.http_urllib3 module has been renamed to elasticapm.transport.http", DeprecationWarning
)
| 36
| 116
| 0.830247
| 38
| 324
| 7.052632
| 0.552632
| 0.283582
| 0.343284
| 0.201493
| 0.246269
| 0
| 0
| 0
| 0
| 0
| 0
| 0.031579
| 0.12037
| 324
| 8
| 117
| 40.5
| 0.908772
| 0.058642
| 0
| 0
| 0
| 0
| 0.298013
| 0.192053
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
cab3ca78ce53bcd311c1d15e5f4ad875c3b9c9ad
| 49
|
py
|
Python
|
monstro/management/templates/project/modules/core/models.py
|
bindlock/monstro
|
f7715426a0933f9ad3d0df73095ef735b20861fc
|
[
"MIT"
] | null | null | null |
monstro/management/templates/project/modules/core/models.py
|
bindlock/monstro
|
f7715426a0933f9ad3d0df73095ef735b20861fc
|
[
"MIT"
] | 6
|
2016-08-31T09:15:55.000Z
|
2017-05-13T12:01:40.000Z
|
monstro/management/templates/project/modules/core/models.py
|
pyvim/monstro
|
f7715426a0933f9ad3d0df73095ef735b20861fc
|
[
"MIT"
] | null | null | null |
from monstro import db
# Create your model here
| 12.25
| 24
| 0.77551
| 8
| 49
| 4.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.204082
| 49
| 3
| 25
| 16.333333
| 0.974359
| 0.44898
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
cad1af4b6fdf1e4ae8e1ca72bf0490a3e602d391
| 10,346
|
py
|
Python
|
src/models/nn.py
|
ngruver/decon-hnn
|
6e6c7e9962568214e1708fb933b715a39328fc7b
|
[
"Apache-2.0"
] | 6
|
2022-02-14T04:52:59.000Z
|
2022-03-08T05:11:34.000Z
|
src/models/nn.py
|
ngruver/decon-hnn
|
6e6c7e9962568214e1708fb933b715a39328fc7b
|
[
"Apache-2.0"
] | null | null | null |
src/models/nn.py
|
ngruver/decon-hnn
|
6e6c7e9962568214e1708fb933b715a39328fc7b
|
[
"Apache-2.0"
] | null | null | null |
import sys
import torch
import torch.nn as nn
from torchdiffeq import odeint
from .utils import FCsoftplus,FCtanh, Linear, CosSin
from typing import Tuple, Union
class NN(nn.Module):
def __init__(
self,
G,
dof_ndim: int = 1,
hidden_size: int = 256,
num_layers: int = 2,
angular_dims: Tuple = tuple(),
wgrad: bool = True,
**kwargs
):
super().__init__(**kwargs)
if wgrad:
print("NN ignores wgrad")
self.q_ndim = dof_ndim
# We parameterize angular dims in terms of cos(theta), sin(theta)
chs = [2 * self.q_ndim + len(angular_dims)] + num_layers * [hidden_size]
layers = [CosSin(self.q_ndim, angular_dims, only_q=False)] + \
[FCtanh(chs[i], chs[i + 1], zero_bias=False, orthogonal_init=True)
for i in range(num_layers)] + \
[Linear(chs[-1], 2 * self.q_ndim, zero_bias=False, orthogonal_init=True)]
self.net = nn.Sequential(*layers)
print("NN currently assumes time independent ODE")
self.nfe = 0
self.angular_dims = angular_dims
def forward(self, t, z):
""" Computes a batch of `NxD` time derivatives of the state `z` at time `t`
Args:
t: Scalar Tensor of the current time
z: N x 2D Tensor of the N different states in D dimensions
Returns: N x 2D Tensor of the time derivatives
"""
assert (z.ndim == 2)
assert z.size(-1) == 2 * self.q_ndim
self.nfe += 1
return self.net(z)
def _integrate(self, dynamics, z0, ts, tol=1e-4, method="rk4"):
""" Integrates an initial state forward in time according to the learned dynamics
Args:
z0: (bs x 2 x D) sized
Tensor representing initial state. N is the batch size
ts: a length T Tensor representing the time points to evaluate at
tol: integrator tolerance
Returns: a bs x T x 2 x D sized Tensor
"""
assert (z0.ndim == 3) and (ts.ndim == 1)
bs = z0.shape[0]
self.nfe = 0
zt = odeint(dynamics, z0.reshape(bs, -1), ts, rtol=tol, method=method)
zt = zt.permute(1, 0, 2) # T x N x D -> N x T x D
# self._acc_magn = self.acc_magn(zt)
return zt.reshape(bs, len(ts), *z0.shape[1:])
def integrate(self, z0, ts, tol=1e-4, method="rk4"):
""" Integrates an initial state forward in time according to the learned dynamics
Args:
z0: (bs x 2 x D) sized
Tensor representing initial state. N is the batch size
ts: a length T Tensor representing the time points to evaluate at
tol: integrator tolerance
Returns: a bs x T x 2 x D sized Tensor
"""
return self._integrate(lambda t,z: self.forward(t,z), z0, ts, tol, method)
def acc_magn(self, zt):
dz_dt = self.forward(torch.zeros(1)[0], zt.reshape(-1, zt.shape[-1]))
magnitude = dz_dt.chunk(2, dim=-1)[1].pow(2).mean()
return magnitude
# def log_data(self,logger,step,name):
# logger.add_scalars('info',
# {'acc_magn': self._acc_magn.cpu().data.numpy()},
# step)
class mNN(nn.Module):
def __init__(
self,
G,
dof_ndim: int = 1,
hidden_size: int = 256,
num_layers: int = 2,
angular_dims: Tuple = tuple(),
wgrad: bool = True,
**kwargs
):
super().__init__(**kwargs)
if wgrad:
print("NN ignores wgrad")
self.q_ndim = dof_ndim
self.cossin = CosSin(3 * self.q_ndim, angular_dims, only_q=False)
# We parameterize angular dims in terms of cos(theta), sin(theta)
chs = [3 * self.q_ndim + len(angular_dims)] + num_layers * [hidden_size]
layers = [CosSin(2 * self.q_ndim, angular_dims, only_q=False)] + \
[FCtanh(chs[i], chs[i + 1], zero_bias=False, orthogonal_init=True)
for i in range(num_layers)] + \
[Linear(chs[-1], 2 * self.q_ndim, zero_bias=False, orthogonal_init=True)]
self.net = nn.Sequential(*layers)
# wrap = lambda: nn.Sequential(*layers)
# self.swag_model = SWAG(wrap)
print("NN currently assumes time independent ODE")
self.nfe = 0
self.angular_dims = angular_dims
def forward(self, t, z):
""" Computes a batch of `NxD` time derivatives of the state `z` at time `t`
Args:
t: Scalar Tensor of the current time
z: N x 2D Tensor of the N different states in D dimensions
Returns: N x 2D Tensor of the time derivatives
"""
z, m = z
assert (t.ndim == 0) and (z.ndim == 2)
assert z.size(-1) == 2 * self.q_ndim
self.nfe += 1
zm = torch.cat([z, m], dim=1)
dz = self.net(zm)
# if self.training:
# dz[:,:self.q_ndim] = dz[:,:self.q_ndim] + 0.01 * torch.randn_like(dz[:,:self.q_ndim])
dm = torch.zeros_like(m)
return dz, dm
def integrate(self, z0, m, ts, tol=1e-4, method="rk4"):
""" Integrates an initial state forward in time according to the learned dynamics
Args:
z0: (bs x 2 x D) sized
Tensor representing initial state. N is the batch size
ts: a length T Tensor representing the time points to evaluate at
tol: integrator tolerance
Returns: a bs x T x 2 x D sized Tensor
"""
assert (z0.ndim == 3) and (ts.ndim == 1)
bs = z0.shape[0]
self.nfe = 0
zt, _ = odeint(self, (z0.reshape(bs, -1), m), ts, rtol=tol, method=method)
zt = zt.permute(1, 0, 2) # T x N x D -> N x T x D
return zt.reshape(bs, len(ts), *z0.shape[1:])
class ControlNN(nn.Module):
def __init__(
self,
control_policy,
G,
dof_ndim: int = 1,
hidden_size: int = 256,
num_layers: int = 2,
angular_dims: Tuple = tuple(),
wgrad: bool = True,
**kwargs
):
super().__init__(**kwargs)
if wgrad:
print("NN ignores wgrad")
self.q_ndim = dof_ndim
# We parameterize angular dims in terms of cos(theta), sin(theta)
chs = [2 * self.q_ndim + len(angular_dims)] + num_layers * [hidden_size]
layers = [CosSin(self.q_ndim, angular_dims, only_q=False)] + \
[FCtanh(chs[i], chs[i + 1], zero_bias=False, orthogonal_init=True)
for i in range(num_layers)] + \
[Linear(chs[-1], 2 * self.q_ndim, zero_bias=False, orthogonal_init=True)]
self.net = nn.Sequential(*layers)
chs = [1] + num_layers * [hidden_size]
#[CosSin(self.q_ndim, angular_dims, only_q=False)]
layers = [FCtanh(chs[i], chs[i + 1], zero_bias=False, orthogonal_init=True)
for i in range(num_layers)] + \
[Linear(chs[-1], 2 * self.q_ndim, zero_bias=False, orthogonal_init=True)]
self.control_net = nn.Sequential(*layers)
print("NN currently assumes time independent ODE")
self.nfe = 0
self.angular_dims = angular_dims
self.control_policy = control_policy
def forward(self, t, z):
""" Computes a batch of `NxD` time derivatives of the state `z` at time `t`
Args:
t: Scalar Tensor of the current time
z: N x 2D Tensor of the N different states in D dimensions
Returns: N x 2D Tensor of the time derivatives
"""
assert (t.ndim == 0) and (z.ndim == 2)
assert z.size(-1) == 2 * self.q_ndim
self.nfe += 1
u = self.control_policy(t, z).detach()
# dynamics = self.net(torch.cat([z, u], axis=-1))
dynamics = self.net(z) + self.control_net(u)
# print(dynamics)
return dynamics
def _integrate(self, dynamics, z0, ts, tol=1e-4, method="rk4"):
""" Integrates an initial state forward in time according to the learned dynamics
Args:
z0: (bs x 2 x D) sized
Tensor representing initial state. N is the batch size
ts: a length T Tensor representing the time points to evaluate at
tol: integrator tolerance
Returns: a bs x T x 2 x D sized Tensor
"""
assert (z0.ndim == 3) and (ts.ndim == 1)
bs = z0.shape[0]
self.nfe = 0
zt = odeint(dynamics, z0.reshape(bs, -1), ts, rtol=tol, method=method)
zt = zt.permute(1, 0, 2) # T x N x D -> N x T x D
return zt.reshape(bs, len(ts), *z0.shape[1:])
def integrate(self, z0, ts, tol=1e-4, method="rk4"):
""" Integrates an initial state forward in time according to the learned dynamics
Args:
z0: (bs x 2 x D) sized
Tensor representing initial state. N is the batch size
ts: a length T Tensor representing the time points to evaluate at
tol: integrator tolerance
Returns: a bs x T x 2 x D sized Tensor
"""
return self._integrate(lambda t,z: self.forward(t,z), z0, ts, tol, method)
def integrate_swag(self, z0, ts, tol=1e-4, method="rk4"):
return self._integrate(lambda t, z: self.swag_model(z), z0, ts, tol, method)
def collect_model(self):
self.swag_model.collect_model(self.net)
def sample(self):
self.swag_model.sample()
class DeltaNN(NN):
def integrate(self, z0, ts, tol=0.0,method=None):
""" Integrates an initial state forward in time according to the learned
dynamics using Euler's method with predicted time derivatives
Args:
z0: (bs x 2 x D) sized
Tensor representing initial state. N is the batch size
ts: a length T Tensor representing the time points to evaluate at
Returns: a bs x T x 2 x D sized Tensor
"""
assert (z0.ndim == 3) and (ts.ndim == 1)
bs = z0.shape[0]
dts = ts[1:] - ts[:-1]
zts = [z0.reshape(bs, -1)]
for dt in dts:
zts.append(zts[-1] + dt * self(ts[0], zts[-1]))
return torch.stack(zts, dim=1).reshape(bs, len(ts), *z0.shape[1:])
| 38.036765
| 99
| 0.564469
| 1,497
| 10,346
| 3.808283
| 0.118904
| 0.018418
| 0.033152
| 0.00842
| 0.820207
| 0.81056
| 0.803543
| 0.794247
| 0.78495
| 0.778635
| 0
| 0.024415
| 0.323023
| 10,346
| 271
| 100
| 38.177122
| 0.789549
| 0.32602
| 0
| 0.695364
| 0
| 0
| 0.029366
| 0
| 0
| 0
| 0
| 0
| 0.066225
| 1
| 0.10596
| false
| 0
| 0.039735
| 0.006623
| 0.245033
| 0.039735
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
cae2c5ebb18de56abf4f674a025ff0c488c35dd3
| 46,316
|
py
|
Python
|
jdcloud_cli/controllers/services/jcq.py
|
Tanc009/jdcloud-cli
|
4e11de77c68501f44e7026c0ad1c24e5d043197e
|
[
"Apache-2.0"
] | 95
|
2018-06-05T10:49:32.000Z
|
2019-12-31T11:07:36.000Z
|
jdcloud_cli/controllers/services/jcq.py
|
Tanc009/jdcloud-cli
|
4e11de77c68501f44e7026c0ad1c24e5d043197e
|
[
"Apache-2.0"
] | 22
|
2018-06-05T10:58:59.000Z
|
2020-07-31T12:13:19.000Z
|
jdcloud_cli/controllers/services/jcq.py
|
Tanc009/jdcloud-cli
|
4e11de77c68501f44e7026c0ad1c24e5d043197e
|
[
"Apache-2.0"
] | 21
|
2018-06-04T12:50:27.000Z
|
2020-11-05T10:55:28.000Z
|
# coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
from argparse import RawTextHelpFormatter
from jdcloud_cli.cement.ext.ext_argparse import expose
from jdcloud_cli.controllers.base_controller import BaseController
from jdcloud_cli.client_factory import ClientFactory
from jdcloud_cli.parameter_builder import collect_user_args, collect_user_headers
from jdcloud_cli.printer import Printer
from jdcloud_cli.skeleton import Skeleton
class JcqController(BaseController):
class Meta:
label = 'jcq'
help = '京东云jcq接口'
description = '''
jcq cli 子命令,jcq相关接口。
OpenAPI文档地址为:https://docs.jdcloud.com/cn/message-queue/api/overview
'''
stacked_on = 'base'
stacked_type = 'nested'
@expose(
arguments=[
(['--region-id'], dict(help="""(string) 所在区域的Region ID """, dest='regionId', required=False)),
(['--topic-name'], dict(help="""(string) topic 名称 """, dest='topicName', required=True)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 查看接入点接口 ''',
description='''
查看接入点接口。
示例: jdc jcq describe-access-point --topic-name xxx
''',
)
def describe_access_point(self):
client_factory = ClientFactory('jcq')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.jcq.apis.DescribeAccessPointRequest import DescribeAccessPointRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = DescribeAccessPointRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--region-id'], dict(help="""(string) 所在区域的Region ID """, dest='regionId', required=False)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' consumerGroupId列表 ''',
description='''
consumerGroupId列表。
示例: jdc jcq describe-consumer-group-ids
''',
)
def describe_consumer_group_ids(self):
client_factory = ClientFactory('jcq')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.jcq.apis.DescribeConsumerGroupIdsRequest import DescribeConsumerGroupIdsRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = DescribeConsumerGroupIdsRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--region-id'], dict(help="""(string) 所在区域的Region ID """, dest='regionId', required=False)),
(['--consumer-group-id'], dict(help="""(string) consumerGroupId为空则显示该用户所有订阅关系里的死信数量 """, dest='consumerGroupId', required=False)),
(['--page-number'], dict(help="""(int) 页码 """, dest='pageNumber', type=int, required=False)),
(['--page-size'], dict(help="""(int) 分页大小;默认为10;取值范围[10, 100] """, dest='pageSize', type=int, required=False)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 死信消息数(按照用户或者consumerGroupId) ''',
description='''
死信消息数(按照用户或者consumerGroupId)。
示例: jdc jcq describe-dead-letter-numbers
''',
)
def describe_dead_letter_numbers(self):
client_factory = ClientFactory('jcq')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.jcq.apis.DescribeDeadLetterNumbersRequest import DescribeDeadLetterNumbersRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = DescribeDeadLetterNumbersRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--region-id'], dict(help="""(string) 所在区域的Region ID """, dest='regionId', required=False)),
(['--topic-name'], dict(help="""(string) topic 名称 """, dest='topicName', required=True)),
(['--consumer-group-id'], dict(help="""(string) consumerGroupId为空则显示该Topic下所有订阅关系里的死信数量 """, dest='consumerGroupId', required=False)),
(['--page-number'], dict(help="""(int) 页码 """, dest='pageNumber', type=int, required=False)),
(['--page-size'], dict(help="""(int) 分页大小;默认为10;取值范围[10, 100] """, dest='pageSize', type=int, required=False)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 死信消息数 ''',
description='''
死信消息数。
示例: jdc jcq describe-dead-letter-numbers-with-topic --topic-name xxx
''',
)
def describe_dead_letter_numbers_with_topic(self):
client_factory = ClientFactory('jcq')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.jcq.apis.DescribeDeadLetterNumbersWithTopicRequest import DescribeDeadLetterNumbersWithTopicRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = DescribeDeadLetterNumbersWithTopicRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--region-id'], dict(help="""(string) 所在区域的Region ID """, dest='regionId', required=False)),
(['--topic-name'], dict(help="""(string) topic 名称 """, dest='topicName', required=True)),
(['--consumer-group-id'], dict(help="""(string) consumerGroupId """, dest='consumerGroupId', required=True)),
(['--page-number'], dict(help="""(int) 页码 """, dest='pageNumber', type=int, required=False)),
(['--page-size'], dict(help="""(int) 分页大小;默认为10;取值范围[10, 100] """, dest='pageSize', type=int, required=False)),
(['--start-time'], dict(help="""(string) 开始时间 """, dest='startTime', required=True)),
(['--end-time'], dict(help="""(string) 结束时间 """, dest='endTime', required=True)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 死信队列列表 ''',
description='''
死信队列列表。
示例: jdc jcq list-dead-letters --topic-name xxx --consumer-group-id xxx --start-time xxx --end-time xxx
''',
)
def list_dead_letters(self):
client_factory = ClientFactory('jcq')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.jcq.apis.ListDeadLettersRequest import ListDeadLettersRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = ListDeadLettersRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--region-id'], dict(help="""(string) 所在区域的Region ID """, dest='regionId', required=False)),
(['--topic-name'], dict(help="""(string) topic 名称 """, dest='topicName', required=True)),
(['--consumer-group-id'], dict(help="""(string) consumerGroupId """, dest='consumerGroupId', required=True)),
(['--message-ids'], dict(help="""(string) messageIds,多个逗号隔开,不传该值就是删除所有的死信 """, dest='messageIds', required=False)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 删除死信消息 ''',
description='''
删除死信消息。
示例: jdc jcq delete-dead-letters --topic-name xxx --consumer-group-id xxx
''',
)
def delete_dead_letters(self):
client_factory = ClientFactory('jcq')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.jcq.apis.DeleteDeadLettersRequest import DeleteDeadLettersRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = DeleteDeadLettersRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--region-id'], dict(help="""(string) 所在区域的Region ID """, dest='regionId', required=False)),
(['--topic-name'], dict(help="""(string) topic 名称 """, dest='topicName', required=True)),
(['--consumer-group-id'], dict(help="""(string) consumerGroupId """, dest='consumerGroupId', required=True)),
(['--message-ids'], dict(help="""(string) messageIds,多个逗号隔开,不传该值就是重发所有死信 """, dest='messageIds', required=False)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 重发死信消息 ''',
description='''
重发死信消息。
示例: jdc jcq resend-dead-letters --topic-name xxx --consumer-group-id xxx
''',
)
def resend_dead_letters(self):
client_factory = ClientFactory('jcq')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.jcq.apis.ResendDeadLettersRequest import ResendDeadLettersRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = ResendDeadLettersRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--region-id'], dict(help="""(string) 所在区域的Region ID """, dest='regionId', required=False)),
(['--topic-name'], dict(help="""(string) topic 名称 """, dest='topicName', required=True)),
(['--start-time'], dict(help="""(string) 开始时间 """, dest='startTime', required=True)),
(['--end-time'], dict(help="""(string) 结束时间 """, dest='endTime', required=True)),
(['--page-size'], dict(help="""(int) 分页大小;默认为10;取值范围[10, 100] """, dest='pageSize', type=int, required=False)),
(['--page-number'], dict(help="""(int) 页码 """, dest='pageNumber', type=int, required=False)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 消息列表 ''',
description='''
消息列表。
示例: jdc jcq describe-messages --topic-name xxx --start-time xxx --end-time xxx
''',
)
def describe_messages(self):
client_factory = ClientFactory('jcq')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.jcq.apis.DescribeMessagesRequest import DescribeMessagesRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = DescribeMessagesRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--region-id'], dict(help="""(string) 所在区域的Region ID """, dest='regionId', required=False)),
(['--topic-name'], dict(help="""(string) topic 名称 """, dest='topicName', required=True)),
(['--message-id'], dict(help="""(string) message Id """, dest='messageId', required=True)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 查询message详情 ''',
description='''
查询message详情。
示例: jdc jcq describe-message --topic-name xxx --message-id xxx
''',
)
def describe_message(self):
client_factory = ClientFactory('jcq')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.jcq.apis.DescribeMessageRequest import DescribeMessageRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = DescribeMessageRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--region-id'], dict(help="""(string) 所在区域的Region ID """, dest='regionId', required=False)),
(['--topic-name'], dict(help="""(string) topic 名称 """, dest='topicName', required=True)),
(['--message-id'], dict(help="""(string) message Id """, dest='messageId', required=True)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 查询消息轨迹 ''',
description='''
查询消息轨迹。
示例: jdc jcq describe-message-trace --topic-name xxx --message-id xxx
''',
)
def describe_message_trace(self):
client_factory = ClientFactory('jcq')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.jcq.apis.DescribeMessageTraceRequest import DescribeMessageTraceRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = DescribeMessageTraceRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--region-id'], dict(help="""(string) 所在区域的Region ID """, dest='regionId', required=False)),
(['--topic-name'], dict(help="""(string) topic 名称 """, dest='topicName', required=True)),
(['--business-id'], dict(help="""(string) business id """, dest='businessId', required=True)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 根据businessId查询消息 ''',
description='''
根据businessId查询消息。
示例: jdc jcq describe-messages-by-business-id --topic-name xxx --business-id xxx
''',
)
def describe_messages_by_business_id(self):
client_factory = ClientFactory('jcq')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.jcq.apis.DescribeMessagesByBusinessIdRequest import DescribeMessagesByBusinessIdRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = DescribeMessagesByBusinessIdRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--region-id'], dict(help="""(string) 所在区域的Region ID """, dest='regionId', required=False)),
(['--topic-name'], dict(help="""(string) topic 名称 """, dest='topicName', required=True)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 查看当前topic授予了哪些用户哪些权限 ''',
description='''
查看当前topic授予了哪些用户哪些权限。
示例: jdc jcq describe-permission --topic-name xxx
''',
)
def describe_permission(self):
client_factory = ClientFactory('jcq')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.jcq.apis.DescribePermissionRequest import DescribePermissionRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = DescribePermissionRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--region-id'], dict(help="""(string) 所在区域的Region ID """, dest='regionId', required=False)),
(['--topic-name'], dict(help="""(string) topic 名称 """, dest='topicName', required=True)),
(['--permission'], dict(help="""(string) 权限类型,[PUB,SUB,PUBSUB] """, dest='permission', required=True)),
(['--target-user-id'], dict(help="""(string) 目标用户UserId """, dest='targetUserId', required=True)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 对当前topic授予目标用户特定权限 ''',
description='''
对当前topic授予目标用户特定权限。
示例: jdc jcq add-permission --topic-name xxx --permission xxx --target-user-id xxx
''',
)
def add_permission(self):
client_factory = ClientFactory('jcq')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.jcq.apis.AddPermissionRequest import AddPermissionRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = AddPermissionRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--region-id'], dict(help="""(string) 所在区域的Region ID """, dest='regionId', required=False)),
(['--topic-name'], dict(help="""(string) topic 名称 """, dest='topicName', required=True)),
(['--permission'], dict(help="""(string) 权限类型, [PUB, SUB, PUBSUB] """, dest='permission', required=True)),
(['--target-user-id'], dict(help="""(string) 目标用户UserId """, dest='targetUserId', required=True)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 删除当前topic对目标用户授权的权限 ''',
description='''
删除当前topic对目标用户授权的权限。
示例: jdc jcq remove-permission --topic-name xxx --permission xxx --target-user-id xxx
''',
)
def remove_permission(self):
client_factory = ClientFactory('jcq')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.jcq.apis.RemovePermissionRequest import RemovePermissionRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = RemovePermissionRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--region-id'], dict(help="""(string) 所在区域的Region ID """, dest='regionId', required=False)),
(['--topic-name'], dict(help="""(string) topic 名称 """, dest='topicName', required=True)),
(['--consumer-group-filter'], dict(help="""(string) consumerGroupFilter,consumerGroupId的过滤条件 """, dest='consumerGroupFilter', required=False)),
(['--page-size'], dict(help="""(int) 分页大小;默认为10;取值范围[10, 100] """, dest='pageSize', type=int, required=False)),
(['--page-number'], dict(help="""(int) 页码 """, dest='pageNumber', type=int, required=False)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 订阅列表 ''',
description='''
订阅列表。
示例: jdc jcq describe-subscriptions --topic-name xxx
''',
)
def describe_subscriptions(self):
client_factory = ClientFactory('jcq')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.jcq.apis.DescribeSubscriptionsRequest import DescribeSubscriptionsRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = DescribeSubscriptionsRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--region-id'], dict(help="""(string) 所在区域的Region ID """, dest='regionId', required=False)),
(['--topic-name'], dict(help="""(string) topic 名称 """, dest='topicName', required=True)),
(['--consumer-group-id'], dict(help="""(string) consumerGroupId """, dest='consumerGroupId', required=True)),
(['--message-invisible-time-in-seconds'], dict(help="""(int) 消息隐藏时间单位秒 """, dest='messageInvisibleTimeInSeconds', type=int, required=False)),
(['--dlq-enable'], dict(help="""(bool) 是否开启死信队列[true, false] """, dest='dlqEnable', required=False)),
(['--max-retry-times'], dict(help="""(int) 最大重试次数dlqEnable为true必填,范围[0,16] """, dest='maxRetryTimes', type=int, required=False)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 创建订阅 ''',
description='''
创建订阅。
示例: jdc jcq create-subscription --topic-name xxx --consumer-group-id xxx
''',
)
def create_subscription(self):
client_factory = ClientFactory('jcq')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.jcq.apis.CreateSubscriptionRequest import CreateSubscriptionRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = CreateSubscriptionRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--region-id'], dict(help="""(string) 所在区域的Region ID """, dest='regionId', required=False)),
(['--topic-name'], dict(help="""(string) topic 名称 """, dest='topicName', required=True)),
(['--consumer-group-id'], dict(help="""(string) consumerGroupId """, dest='consumerGroupId', required=True)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 查询订阅详情 ''',
description='''
查询订阅详情。
示例: jdc jcq describe-subscription --topic-name xxx --consumer-group-id xxx
''',
)
def describe_subscription(self):
client_factory = ClientFactory('jcq')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.jcq.apis.DescribeSubscriptionRequest import DescribeSubscriptionRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = DescribeSubscriptionRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--region-id'], dict(help="""(string) 所在区域的Region ID """, dest='regionId', required=False)),
(['--topic-name'], dict(help="""(string) topic 名称 """, dest='topicName', required=True)),
(['--consumer-group-id'], dict(help="""(string) consumerGroupId """, dest='consumerGroupId', required=True)),
(['--max-retry-times'], dict(help="""(int) 最大重试次数 """, dest='maxRetryTimes', type=int, required=False)),
(['--message-invisible-time-in-seconds'], dict(help="""(int) 消息ack超时时间 """, dest='messageInvisibleTimeInSeconds', type=int, required=False)),
(['--dlq-enable'], dict(help="""(bool) 是否开启死信队列[true, false] """, dest='dlqEnable', required=False)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 修改订阅 ''',
description='''
修改订阅。
示例: jdc jcq modify-subscription-attribute --topic-name xxx --consumer-group-id xxx
''',
)
def modify_subscription_attribute(self):
client_factory = ClientFactory('jcq')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.jcq.apis.ModifySubscriptionAttributeRequest import ModifySubscriptionAttributeRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = ModifySubscriptionAttributeRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--region-id'], dict(help="""(string) 所在区域的Region ID """, dest='regionId', required=False)),
(['--topic-name'], dict(help="""(string) topic 名称 """, dest='topicName', required=True)),
(['--consumer-group-id'], dict(help="""(string) consumerGroupId """, dest='consumerGroupId', required=True)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 删除订阅 ''',
description='''
删除订阅。
示例: jdc jcq delete-subscription --topic-name xxx --consumer-group-id xxx
''',
)
def delete_subscription(self):
client_factory = ClientFactory('jcq')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.jcq.apis.DeleteSubscriptionRequest import DeleteSubscriptionRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = DeleteSubscriptionRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--region-id'], dict(help="""(string) 所在区域的Region ID """, dest='regionId', required=False)),
(['--topic-name'], dict(help="""(string) topic 名称 """, dest='topicName', required=True)),
(['--consumer-group-id'], dict(help="""(string) consumerGroupId """, dest='consumerGroupId', required=True)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 清除消息 ''',
description='''
清除消息。
示例: jdc jcq clean-messages --topic-name xxx --consumer-group-id xxx
''',
)
def clean_messages(self):
client_factory = ClientFactory('jcq')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.jcq.apis.CleanMessagesRequest import CleanMessagesRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = CleanMessagesRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--region-id'], dict(help="""(string) 所在区域的Region ID """, dest='regionId', required=False)),
(['--topic-name'], dict(help="""(string) topic 名称 """, dest='topicName', required=True)),
(['--consumer-group-id'], dict(help="""(string) consumerGroupId """, dest='consumerGroupId', required=True)),
(['--time'], dict(help="""(string) 时间 """, dest='time', required=True)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 重置消费位 ''',
description='''
重置消费位。
示例: jdc jcq reset-consume-offset --topic-name xxx --consumer-group-id xxx --time xxx
''',
)
def reset_consume_offset(self):
client_factory = ClientFactory('jcq')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.jcq.apis.ResetConsumeOffsetRequest import ResetConsumeOffsetRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = ResetConsumeOffsetRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--region-id'], dict(help="""(string) 所在区域的Region ID """, dest='regionId', required=False)),
(['--page-size'], dict(help="""(int) 分页大小;默认为10;取值范围[10, 100] """, dest='pageSize', type=int, required=False)),
(['--page-number'], dict(help="""(int) 页码 """, dest='pageNumber', type=int, required=False)),
(['--topic-filter'], dict(help="""(string) topic名称的过滤条件,大小写不敏感 """, dest='topicFilter', required=False)),
(['--tag-filters'], dict(help="""(array: tagFilter) 标签过滤条件 """, dest='tagFilters', required=False)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 查询topic列表 ''',
description='''
查询topic列表。
示例: jdc jcq describe-topics
''',
)
def describe_topics(self):
client_factory = ClientFactory('jcq')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.jcq.apis.DescribeTopicsRequest import DescribeTopicsRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = DescribeTopicsRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--region-id'], dict(help="""(string) 所在区域的Region ID """, dest='regionId', required=False)),
(['--topic-name'], dict(help="""(string) topic名称 """, dest='topicName', required=True)),
(['--type'], dict(help="""(string) 类型,[normal,global_order] """, dest='type', required=True)),
(['--description'], dict(help="""(string) 描述,长度不大于255 """, dest='description', required=False)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 创建一个指定名称的topic ''',
description='''
创建一个指定名称的topic。
示例: jdc jcq create-topic --topic-name xxx --type xxx
''',
)
def create_topic(self):
client_factory = ClientFactory('jcq')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.jcq.apis.CreateTopicRequest import CreateTopicRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = CreateTopicRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--region-id'], dict(help="""(string) 所在区域的Region ID """, dest='regionId', required=False)),
(['--topic-name'], dict(help="""(string) topic 名称 """, dest='topicName', required=True)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 查询topic详情 ''',
description='''
查询topic详情。
示例: jdc jcq describe-topic --topic-name xxx
''',
)
def describe_topic(self):
client_factory = ClientFactory('jcq')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.jcq.apis.DescribeTopicRequest import DescribeTopicRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = DescribeTopicRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--region-id'], dict(help="""(string) 所在区域的Region ID """, dest='regionId', required=False)),
(['--topic-name'], dict(help="""(string) topic 名称 """, dest='topicName', required=True)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 删除单个topic ''',
description='''
删除单个topic。
示例: jdc jcq delete-topic --topic-name xxx
''',
)
def delete_topic(self):
client_factory = ClientFactory('jcq')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.jcq.apis.DeleteTopicRequest import DeleteTopicRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = DeleteTopicRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--api'], dict(help="""(string) api name """, choices=['describe-access-point','describe-consumer-group-ids','describe-dead-letter-numbers','describe-dead-letter-numbers-with-topic','list-dead-letters','delete-dead-letters','resend-dead-letters','describe-messages','describe-message','describe-message-trace','describe-messages-by-business-id','describe-permission','add-permission','remove-permission','describe-subscriptions','create-subscription','describe-subscription','modify-subscription-attribute','delete-subscription','clean-messages','reset-consume-offset','describe-topics','create-topic','describe-topic','delete-topic',], required=True)),
],
formatter_class=RawTextHelpFormatter,
help=''' 生成单个API接口的json骨架空字符串 ''',
description='''
生成单个API接口的json骨架空字符串。
示例: jdc nc generate-skeleton --api describeContainer ''',
)
def generate_skeleton(self):
skeleton = Skeleton('jcq', self.app.pargs.api)
skeleton.show()
| 50.179848
| 667
| 0.596964
| 4,765
| 46,316
| 5.726548
| 0.069675
| 0.042804
| 0.039506
| 0.024041
| 0.792905
| 0.78268
| 0.773665
| 0.770147
| 0.763111
| 0.753143
| 0
| 0.00373
| 0.235988
| 46,316
| 922
| 668
| 50.234273
| 0.767394
| 0.013689
| 0
| 0.738182
| 0
| 0.041212
| 0.336406
| 0.093792
| 0
| 0
| 0
| 0
| 0
| 1
| 0.031515
| false
| 0
| 0.069091
| 0
| 0.133333
| 0.092121
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
1b0cfdd372833842e780c3b97429e4836cb7a7ed
| 27,202
|
py
|
Python
|
tests/test_block_blob.py
|
yozik04/azure-storage-python
|
bc985392407459717634b46eda7d8f66b7ffb4dc
|
[
"Apache-2.0"
] | 1
|
2020-07-29T15:04:40.000Z
|
2020-07-29T15:04:40.000Z
|
tests/test_block_blob.py
|
yozik04/azure-storage-python
|
bc985392407459717634b46eda7d8f66b7ffb4dc
|
[
"Apache-2.0"
] | 7
|
2017-01-18T00:10:27.000Z
|
2017-02-15T04:24:08.000Z
|
tests/test_block_blob.py
|
yozik04/azure-storage-python
|
bc985392407459717634b46eda7d8f66b7ffb4dc
|
[
"Apache-2.0"
] | 2
|
2016-08-05T08:41:38.000Z
|
2020-12-12T21:11:32.000Z
|
# coding: utf-8
#-------------------------------------------------------------------------
# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
import os
import unittest
from azure.common import AzureHttpError
from azure.storage.blob import (
BlobBlock,
BlobBlockList,
BlockBlobService,
ContentSettings,
)
from tests.testcase import (
StorageTestCase,
TestMode,
record,
)
#------------------------------------------------------------------------------
TEST_BLOB_PREFIX = 'blob'
FILE_PATH = 'blob_input.temp.dat'
LARGE_BLOB_SIZE = 64 * 1024 + 5
#------------------------------------------------------------------------------
class StorageBlockBlobTest(StorageTestCase):
def setUp(self):
super(StorageBlockBlobTest, self).setUp()
self.bs = self._create_storage_service(BlockBlobService, self.settings)
self.container_name = self.get_resource_name('utcontainer')
if not self.is_playback():
self.bs.create_container(self.container_name)
# test chunking functionality by reducing the threshold
# for chunking and the size of each chunk, otherwise
# the tests would take too long to execute
self.bs.MAX_BLOCK_SIZE = 4 * 1024
self.bs.MAX_SINGLE_PUT_SIZE = 32 * 1024
def tearDown(self):
if not self.is_playback():
try:
self.bs.delete_container(self.container_name)
except:
pass
if os.path.isfile(FILE_PATH):
try:
os.remove(FILE_PATH)
except:
pass
return super(StorageBlockBlobTest, self).tearDown()
#--Helpers-----------------------------------------------------------------
def _get_blob_reference(self):
return self.get_resource_name(TEST_BLOB_PREFIX)
def _create_blob(self):
blob_name = self._get_blob_reference()
self.bs.create_blob_from_bytes(self.container_name, blob_name, b'')
return blob_name
def assertBlobEqual(self, container_name, blob_name, expected_data):
actual_data = self.bs.get_blob_to_bytes(container_name, blob_name)
self.assertEqual(actual_data.content, expected_data)
class NonSeekableFile(object):
def __init__(self, wrapped_file):
self.wrapped_file = wrapped_file
def write(self, data):
self.wrapped_file.write(data)
def read(self, count):
return self.wrapped_file.read(count)
#--Test cases for block blobs --------------------------------------------
@record
def test_put_block(self):
# Arrange
blob_name = self._create_blob()
# Act
for i in range(5):
resp = self.bs.put_block(self.container_name,
blob_name,
'block {0}'.format(i).encode('utf-8'),
i)
self.assertIsNone(resp)
# Assert
@record
def test_put_block_unicode(self):
# Arrange
blob_name = self._create_blob()
# Act
with self.assertRaises(TypeError):
resp = self.bs.put_block(self.container_name, blob_name, u'啊齄丂狛狜', '1')
# Assert
@record
def test_put_block_with_md5(self):
# Arrange
blob_name = self._create_blob()
# Act
self.bs.put_block(self.container_name,
blob_name,
b'block',
1,
validate_content=True)
# Assert
@record
def test_put_block_list(self):
# Arrange
blob_name = self._get_blob_reference()
self.bs.put_block(self.container_name, blob_name, b'AAA', '1')
self.bs.put_block(self.container_name, blob_name, b'BBB', '2')
self.bs.put_block(self.container_name, blob_name, b'CCC', '3')
# Act
block_list = [BlobBlock(id='1'), BlobBlock(id='2'), BlobBlock(id='3')]
self.bs.put_block_list(self.container_name, blob_name, block_list)
# Assert
blob = self.bs.get_blob_to_bytes(self.container_name, blob_name)
self.assertEqual(blob.content, b'AAABBBCCC')
@record
def test_put_block_list_invalid_block_id(self):
# Arrange
blob_name = self._get_blob_reference()
self.bs.put_block(self.container_name, blob_name, b'AAA', '1')
self.bs.put_block(self.container_name, blob_name, b'BBB', '2')
self.bs.put_block(self.container_name, blob_name, b'CCC', '3')
# Act
try:
block_list = [ BlobBlock(id='1'), BlobBlock(id='2'), BlobBlock(id='4')]
self.bs.put_block_list(self.container_name, blob_name, block_list)
self.fail()
except AzureHttpError as e:
self.assertGreaterEqual(str(e).find('specified block list is invalid'), 0)
# Assert
@record
def test_put_block_list_with_md5(self):
# Arrange
blob_name = self._get_blob_reference()
self.bs.put_block(self.container_name, blob_name, b'AAA', '1')
self.bs.put_block(self.container_name, blob_name, b'BBB', '2')
self.bs.put_block(self.container_name, blob_name, b'CCC', '3')
# Act
block_list = [BlobBlock(id='1'), BlobBlock(id='2'), BlobBlock(id='3')]
self.bs.put_block_list(self.container_name, blob_name, block_list, validate_content=True)
# Assert
@record
def test_get_block_list_no_blocks(self):
# Arrange
blob_name = self._create_blob()
# Act
block_list = self.bs.get_block_list(self.container_name, blob_name, None, 'all')
# Assert
self.assertIsNotNone(block_list)
self.assertIsInstance(block_list, BlobBlockList)
self.assertEqual(len(block_list.uncommitted_blocks), 0)
self.assertEqual(len(block_list.committed_blocks), 0)
@record
def test_get_block_list_uncommitted_blocks(self):
# Arrange
blob_name = self._get_blob_reference()
self.bs.put_block(self.container_name, blob_name, b'AAA', '1')
self.bs.put_block(self.container_name, blob_name, b'BBB', '2')
self.bs.put_block(self.container_name, blob_name, b'CCC', '3')
# Act
block_list = self.bs.get_block_list(self.container_name, blob_name, None, 'all')
# Assert
self.assertIsNotNone(block_list)
self.assertIsInstance(block_list, BlobBlockList)
self.assertEqual(len(block_list.uncommitted_blocks), 3)
self.assertEqual(len(block_list.committed_blocks), 0)
self.assertEqual(block_list.uncommitted_blocks[0].id, '1')
self.assertEqual(block_list.uncommitted_blocks[0].size, 3)
self.assertEqual(block_list.uncommitted_blocks[1].id, '2')
self.assertEqual(block_list.uncommitted_blocks[1].size, 3)
self.assertEqual(block_list.uncommitted_blocks[2].id, '3')
self.assertEqual(block_list.uncommitted_blocks[2].size, 3)
@record
def test_get_block_list_committed_blocks(self):
# Arrange
blob_name = self._get_blob_reference()
self.bs.put_block(self.container_name, blob_name, b'AAA', '1')
self.bs.put_block(self.container_name, blob_name, b'BBB', '2')
self.bs.put_block(self.container_name, blob_name, b'CCC', '3')
block_list = [BlobBlock(id='1'), BlobBlock(id='2'), BlobBlock(id='3')]
self.bs.put_block_list(self.container_name, blob_name, block_list)
# Act
block_list = self.bs.get_block_list(self.container_name, blob_name, None, 'all')
# Assert
self.assertIsNotNone(block_list)
self.assertIsInstance(block_list, BlobBlockList)
self.assertEqual(len(block_list.uncommitted_blocks), 0)
self.assertEqual(len(block_list.committed_blocks), 3)
self.assertEqual(block_list.committed_blocks[0].id, '1')
self.assertEqual(block_list.committed_blocks[0].size, 3)
self.assertEqual(block_list.committed_blocks[1].id, '2')
self.assertEqual(block_list.committed_blocks[1].size, 3)
self.assertEqual(block_list.committed_blocks[2].id, '3')
self.assertEqual(block_list.committed_blocks[2].size, 3)
@record
def test_create_blob_from_bytes_single_put(self):
# Arrange
blob_name = self._get_blob_reference()
data = b'hello world'
# Act
self.bs.create_blob_from_bytes(self.container_name, blob_name, data)
# Assert
self.assertBlobEqual(self.container_name, blob_name, data)
@record
def test_create_from_bytes_blob_unicode(self):
# Arrange
blob_name = self._get_blob_reference()
# Act
data = u'hello world'
with self.assertRaises(TypeError):
resp = self.bs.create_blob_from_bytes(self.container_name, blob_name, data)
# Assert
def test_create_from_bytes_blob_with_lease_id(self):
# parallel tests introduce random order of requests, can only run live
if TestMode.need_recording_file(self.test_mode):
return
# Arrange
blob_name = self._create_blob()
data = self.get_random_bytes(LARGE_BLOB_SIZE)
lease_id = self.bs.acquire_blob_lease(self.container_name, blob_name)
# Act
self.bs.create_blob_from_bytes(self.container_name, blob_name, data, lease_id=lease_id)
# Assert
blob = self.bs.get_blob_to_bytes(self.container_name, blob_name, lease_id=lease_id)
self.assertEqual(blob.content, data)
def test_create_blob_from_bytes_with_metadata(self):
# parallel tests introduce random order of requests, can only run live
if TestMode.need_recording_file(self.test_mode):
return
# Arrange
blob_name = self._get_blob_reference()
data = self.get_random_bytes(LARGE_BLOB_SIZE)
metadata = {'hello': 'world', 'number': '42'}
# Act
self.bs.create_blob_from_bytes(self.container_name, blob_name, data, metadata=metadata)
# Assert
md = self.bs.get_blob_metadata(self.container_name, blob_name)
self.assertDictEqual(md, metadata)
def test_create_blob_from_bytes_with_properties(self):
# parallel tests introduce random order of requests, can only run live
if TestMode.need_recording_file(self.test_mode):
return
# Arrange
blob_name = self._get_blob_reference()
data = self.get_random_bytes(LARGE_BLOB_SIZE)
# Act
content_settings=ContentSettings(
content_type='image/png',
content_language='spanish')
self.bs.create_blob_from_bytes(self.container_name, blob_name, data,
content_settings=content_settings)
# Assert
self.assertBlobEqual(self.container_name, blob_name, data)
properties = self.bs.get_blob_properties(self.container_name, blob_name).properties
self.assertEqual(properties.content_settings.content_type, content_settings.content_type)
self.assertEqual(properties.content_settings.content_language, content_settings.content_language)
def test_create_blob_from_bytes_with_progress(self):
# parallel tests introduce random order of requests, can only run live
if TestMode.need_recording_file(self.test_mode):
return
# Arrange
blob_name = self._get_blob_reference()
data = self.get_random_bytes(LARGE_BLOB_SIZE)
# Act
progress = []
def callback(current, total):
progress.append((current, total))
self.bs.create_blob_from_bytes(self.container_name, blob_name, data, progress_callback=callback)
# Assert
self.assertBlobEqual(self.container_name, blob_name, data)
self.assert_upload_progress(len(data), self.bs.MAX_BLOCK_SIZE, progress)
def test_create_blob_from_bytes_with_index(self):
# parallel tests introduce random order of requests, can only run live
if TestMode.need_recording_file(self.test_mode):
return
# Arrange
blob_name = self._get_blob_reference()
data = self.get_random_bytes(LARGE_BLOB_SIZE)
# Act
self.bs.create_blob_from_bytes(self.container_name, blob_name, data, 3)
# Assert
self.assertEqual(data[3:], self.bs.get_blob_to_bytes(self.container_name, blob_name).content)
@record
def test_create_blob_from_bytes_with_index_and_count(self):
# Arrange
blob_name = self._get_blob_reference()
data = self.get_random_bytes(LARGE_BLOB_SIZE)
# Act
self.bs.create_blob_from_bytes(self.container_name, blob_name, data, 3, 5)
# Assert
self.assertEqual(data[3:8], self.bs.get_blob_to_bytes(self.container_name, blob_name).content)
@record
def test_create_blob_from_bytes_with_index_and_count_and_properties(self):
# Arrange
blob_name = self._get_blob_reference()
data = self.get_random_bytes(LARGE_BLOB_SIZE)
# Act
content_settings=ContentSettings(
content_type='image/png',
content_language='spanish')
self.bs.create_blob_from_bytes(self.container_name, blob_name, data, 3, 5, content_settings=content_settings)
# Assert
self.assertEqual(data[3:8], self.bs.get_blob_to_bytes(self.container_name, blob_name).content)
properties = self.bs.get_blob_properties(self.container_name, blob_name).properties
self.assertEqual(properties.content_settings.content_type, content_settings.content_type)
self.assertEqual(properties.content_settings.content_language, content_settings.content_language)
@record
def test_create_blob_from_bytes_non_parallel(self):
# Arrange
blob_name = self._get_blob_reference()
data = self.get_random_bytes(LARGE_BLOB_SIZE)
# Act
self.bs.create_blob_from_bytes(self.container_name, blob_name, data, max_connections=1)
# Assert
self.assertBlobEqual(self.container_name, blob_name, data)
def test_create_blob_from_path(self):
# parallel tests introduce random order of requests, can only run live
if TestMode.need_recording_file(self.test_mode):
return
# Arrange
blob_name = self._get_blob_reference()
data = self.get_random_bytes(LARGE_BLOB_SIZE)
with open(FILE_PATH, 'wb') as stream:
stream.write(data)
# Act
self.bs.create_blob_from_path(self.container_name, blob_name, FILE_PATH)
# Assert
self.assertBlobEqual(self.container_name, blob_name, data)
@record
def test_create_blob_from_path_non_parallel(self):
# Arrange
blob_name = self._get_blob_reference()
data = self.get_random_bytes(100)
with open(FILE_PATH, 'wb') as stream:
stream.write(data)
# Act
self.bs.create_blob_from_path(self.container_name, blob_name, FILE_PATH, max_connections=1)
# Assert
self.assertBlobEqual(self.container_name, blob_name, data)
def test_create_blob_from_path_with_progress(self):
# parallel tests introduce random order of requests, can only run live
if TestMode.need_recording_file(self.test_mode):
return
# Arrange
blob_name = self._get_blob_reference()
data = self.get_random_bytes(LARGE_BLOB_SIZE)
with open(FILE_PATH, 'wb') as stream:
stream.write(data)
# Act
progress = []
def callback(current, total):
progress.append((current, total))
self.bs.create_blob_from_path(self.container_name, blob_name, FILE_PATH,
progress_callback=callback)
# Assert
self.assertBlobEqual(self.container_name, blob_name, data)
self.assert_upload_progress(len(data), self.bs.MAX_BLOCK_SIZE, progress)
def test_create_blob_from_path_with_properties(self):
# parallel tests introduce random order of requests, can only run live
if TestMode.need_recording_file(self.test_mode):
return
# Arrange
blob_name = self._get_blob_reference()
data = self.get_random_bytes(LARGE_BLOB_SIZE)
with open(FILE_PATH, 'wb') as stream:
stream.write(data)
# Act
content_settings=ContentSettings(
content_type='image/png',
content_language='spanish')
self.bs.create_blob_from_path(self.container_name, blob_name, FILE_PATH, content_settings=content_settings)
# Assert
self.assertBlobEqual(self.container_name, blob_name, data)
properties = self.bs.get_blob_properties(self.container_name, blob_name).properties
self.assertEqual(properties.content_settings.content_type, content_settings.content_type)
self.assertEqual(properties.content_settings.content_language, content_settings.content_language)
def test_create_blob_from_stream_chunked_upload(self):
# parallel tests introduce random order of requests, can only run live
if TestMode.need_recording_file(self.test_mode):
return
# Arrange
blob_name = self._get_blob_reference()
data = self.get_random_bytes(LARGE_BLOB_SIZE)
with open(FILE_PATH, 'wb') as stream:
stream.write(data)
# Act
with open(FILE_PATH, 'rb') as stream:
self.bs.create_blob_from_stream(self.container_name, blob_name, stream)
# Assert
self.assertBlobEqual(self.container_name, blob_name, data)
def test_create_blob_from_stream_non_seekable_chunked_upload_known_size(self):
# parallel tests introduce random order of requests, can only run live
if TestMode.need_recording_file(self.test_mode):
return
# Arrange
blob_name = self._get_blob_reference()
data = self.get_random_bytes(LARGE_BLOB_SIZE)
blob_size = len(data) - 66
with open(FILE_PATH, 'wb') as stream:
stream.write(data)
# Act
with open(FILE_PATH, 'rb') as stream:
non_seekable_file = StorageBlockBlobTest.NonSeekableFile(stream)
self.bs.create_blob_from_stream(self.container_name, blob_name, non_seekable_file,
count=blob_size, max_connections=1)
# Assert
self.assertBlobEqual(self.container_name, blob_name, data[:blob_size])
def test_create_blob_from_stream_non_seekable_chunked_upload_unknown_size(self):
# parallel tests introduce random order of requests, can only run live
if TestMode.need_recording_file(self.test_mode):
return
# Arrange
blob_name = self._get_blob_reference()
data = self.get_random_bytes(LARGE_BLOB_SIZE)
with open(FILE_PATH, 'wb') as stream:
stream.write(data)
# Act
with open(FILE_PATH, 'rb') as stream:
non_seekable_file = StorageBlockBlobTest.NonSeekableFile(stream)
self.bs.create_blob_from_stream(self.container_name, blob_name,
non_seekable_file, max_connections=1)
# Assert
self.assertBlobEqual(self.container_name, blob_name, data)
def test_create_blob_from_stream_with_progress_chunked_upload(self):
# parallel tests introduce random order of requests, can only run live
if TestMode.need_recording_file(self.test_mode):
return
# Arrange
blob_name = self._get_blob_reference()
data = self.get_random_bytes(LARGE_BLOB_SIZE)
with open(FILE_PATH, 'wb') as stream:
stream.write(data)
# Act
progress = []
def callback(current, total):
progress.append((current, total))
with open(FILE_PATH, 'rb') as stream:
self.bs.create_blob_from_stream(self.container_name, blob_name, stream, progress_callback=callback)
# Assert
self.assertBlobEqual(self.container_name, blob_name, data)
self.assert_upload_progress(len(data), self.bs.MAX_BLOCK_SIZE, progress, unknown_size=True)
def test_create_blob_from_stream_chunked_upload_with_count(self):
# parallel tests introduce random order of requests, can only run live
if TestMode.need_recording_file(self.test_mode):
return
# Arrange
blob_name = self._get_blob_reference()
data = self.get_random_bytes(LARGE_BLOB_SIZE)
with open(FILE_PATH, 'wb') as stream:
stream.write(data)
# Act
blob_size = len(data) - 301
with open(FILE_PATH, 'rb') as stream:
resp = self.bs.create_blob_from_stream(self.container_name, blob_name, stream, blob_size)
# Assert
self.assertBlobEqual(self.container_name, blob_name, data[:blob_size])
def test_create_blob_from_stream_chunked_upload_with_count_and_properties(self):
# parallel tests introduce random order of requests, can only run live
if TestMode.need_recording_file(self.test_mode):
return
# Arrange
blob_name = self._get_blob_reference()
data = self.get_random_bytes(LARGE_BLOB_SIZE)
with open(FILE_PATH, 'wb') as stream:
stream.write(data)
# Act
content_settings=ContentSettings(
content_type='image/png',
content_language='spanish')
blob_size = len(data) - 301
with open(FILE_PATH, 'rb') as stream:
self.bs.create_blob_from_stream(self.container_name, blob_name, stream,
blob_size, content_settings=content_settings)
# Assert
self.assertBlobEqual(self.container_name, blob_name, data[:blob_size])
properties = self.bs.get_blob_properties(self.container_name, blob_name).properties
self.assertEqual(properties.content_settings.content_type, content_settings.content_type)
self.assertEqual(properties.content_settings.content_language, content_settings.content_language)
def test_create_blob_from_stream_chunked_upload_with_properties(self):
# parallel tests introduce random order of requests, can only run live
if TestMode.need_recording_file(self.test_mode):
return
# Arrange
blob_name = self._get_blob_reference()
data = self.get_random_bytes(LARGE_BLOB_SIZE)
with open(FILE_PATH, 'wb') as stream:
stream.write(data)
# Act
content_settings=ContentSettings(
content_type='image/png',
content_language='spanish')
with open(FILE_PATH, 'rb') as stream:
self.bs.create_blob_from_stream(self.container_name, blob_name, stream,
content_settings=content_settings)
# Assert
self.assertBlobEqual(self.container_name, blob_name, data)
properties = self.bs.get_blob_properties(self.container_name, blob_name).properties
self.assertEqual(properties.content_settings.content_type, content_settings.content_type)
self.assertEqual(properties.content_settings.content_language, content_settings.content_language)
@record
def test_create_blob_from_text(self):
# Arrange
blob_name = self._get_blob_reference()
text = u'hello 啊齄丂狛狜 world'
data = text.encode('utf-8')
# Act
self.bs.create_blob_from_text(self.container_name, blob_name, text)
# Assert
self.assertBlobEqual(self.container_name, blob_name, data)
@record
def test_create_blob_from_text_with_encoding(self):
# Arrange
blob_name = self._get_blob_reference()
text = u'hello 啊齄丂狛狜 world'
data = text.encode('utf-16')
# Act
self.bs.create_blob_from_text(self.container_name, blob_name, text, 'utf-16')
# Assert
self.assertBlobEqual(self.container_name, blob_name, data)
@record
def test_create_blob_from_text_with_encoding_and_progress(self):
# Arrange
blob_name = self._get_blob_reference()
text = u'hello 啊齄丂狛狜 world'
data = text.encode('utf-16')
# Act
progress = []
def callback(current, total):
progress.append((current, total))
self.bs.create_blob_from_text(self.container_name, blob_name, text, 'utf-16',
progress_callback=callback)
# Assert
self.assertBlobEqual(self.container_name, blob_name, data)
self.assert_upload_progress(len(data), self.bs.MAX_BLOCK_SIZE, progress)
def test_create_blob_from_text_chunked_upload(self):
# parallel tests introduce random order of requests, can only run live
if TestMode.need_recording_file(self.test_mode):
return
# Arrange
blob_name = self._get_blob_reference()
data = self.get_random_text_data(LARGE_BLOB_SIZE)
encoded_data = data.encode('utf-8')
# Act
self.bs.create_blob_from_text(self.container_name, blob_name, data)
# Assert
self.assertBlobEqual(self.container_name, blob_name, encoded_data)
# Assert
self.assertBlobEqual(self.container_name, blob_name, encoded_data)
@record
def test_create_blob_with_md5(self):
# Arrange
blob_name = self._get_blob_reference()
data = b'hello world'
# Act
self.bs.create_blob_from_bytes(self.container_name, blob_name, data,
validate_content=True)
# Assert
def test_create_blob_with_md5_chunked(self):
# parallel tests introduce random order of requests, can only run live
if TestMode.need_recording_file(self.test_mode):
return
# Arrange
blob_name = self._get_blob_reference()
data = self.get_random_bytes(LARGE_BLOB_SIZE)
# Act
self.bs.create_blob_from_bytes(self.container_name, blob_name, data,
validate_content=True)
# Assert
#------------------------------------------------------------------------------
if __name__ == '__main__':
unittest.main()
| 37.110505
| 117
| 0.647048
| 3,348
| 27,202
| 4.938172
| 0.074373
| 0.060485
| 0.091514
| 0.110506
| 0.867235
| 0.859493
| 0.833122
| 0.807355
| 0.763624
| 0.763624
| 0
| 0.006223
| 0.249724
| 27,202
| 732
| 118
| 37.161202
| 0.803861
| 0.114624
| 0
| 0.644028
| 0
| 0
| 0.018635
| 0
| 0
| 0
| 0
| 0
| 0.163934
| 1
| 0.112412
| false
| 0.004684
| 0.01171
| 0.004684
| 0.177986
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
1b2a15737590b3f95ed6b66ea9f56d8e9439f675
| 170
|
py
|
Python
|
tbats/tbats/ParamsOptimizer.py
|
series-temporais/tbats
|
1f2e0b5e769250c8ec0604fd75ef08ebbe251d37
|
[
"MIT"
] | 1
|
2019-07-21T15:38:12.000Z
|
2019-07-21T15:38:12.000Z
|
tbats/tbats/ParamsOptimizer.py
|
arita37/tbats
|
4e726919f08e39e74dd70a592b5258dfc7b25953
|
[
"MIT"
] | null | null | null |
tbats/tbats/ParamsOptimizer.py
|
arita37/tbats
|
4e726919f08e39e74dd70a592b5258dfc7b25953
|
[
"MIT"
] | null | null | null |
from ..abstract import ParamsOptimizer as AbstractParamsOptimizer
class ParamsOptimizer(AbstractParamsOptimizer):
"""See parent class for documentation"""
pass
| 24.285714
| 65
| 0.794118
| 15
| 170
| 9
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.141176
| 170
| 6
| 66
| 28.333333
| 0.924658
| 0.2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
1b35ed499ef89f707969b69e70d8f6f5fb674ee2
| 34
|
py
|
Python
|
src/envs/lbforaging/__init__.py
|
LAMDA-RL/MAIC
|
715ab22b531f9a6867276f85e1d8c1342d8b6d00
|
[
"Apache-2.0"
] | 7
|
2022-02-23T10:41:29.000Z
|
2022-03-16T07:01:58.000Z
|
src/envs/lbforaging/__init__.py
|
LAMDA-RL/MAIC
|
715ab22b531f9a6867276f85e1d8c1342d8b6d00
|
[
"Apache-2.0"
] | null | null | null |
src/envs/lbforaging/__init__.py
|
LAMDA-RL/MAIC
|
715ab22b531f9a6867276f85e1d8c1342d8b6d00
|
[
"Apache-2.0"
] | 4
|
2022-02-22T13:59:19.000Z
|
2022-03-30T16:23:23.000Z
|
from .foraging import ForagingEnv
| 17
| 33
| 0.852941
| 4
| 34
| 7.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.117647
| 34
| 1
| 34
| 34
| 0.966667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
1b3c716408bc6d9eae78c2f5e2b5c485e151e0ab
| 168
|
py
|
Python
|
src/punits/consts/__init__.py
|
ju-sh/punits
|
a3a82d276e0545a89e6505cf3324788a3c067118
|
[
"MIT"
] | null | null | null |
src/punits/consts/__init__.py
|
ju-sh/punits
|
a3a82d276e0545a89e6505cf3324788a3c067118
|
[
"MIT"
] | null | null | null |
src/punits/consts/__init__.py
|
ju-sh/punits
|
a3a82d276e0545a89e6505cf3324788a3c067118
|
[
"MIT"
] | null | null | null |
"""
Makes CONVERSIONS and LABELS visible directly from consts sub-module
"""
from punits.consts.conversions import CONVERSIONS
from punits.consts.labels import LABELS
| 24
| 68
| 0.815476
| 22
| 168
| 6.227273
| 0.545455
| 0.145985
| 0.233577
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.119048
| 168
| 6
| 69
| 28
| 0.925676
| 0.404762
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
1bd76f926d7ad9c7a18ad82037ce08df6f6b07bb
| 28,291
|
py
|
Python
|
src/tests/unit/enterprise_edr/test_enterprise_edr_process.py
|
avanbrunt-cb/carbon-black-cloud-sdk-python
|
f737ceaf6c69c1efea128d8dfb219c738cc7fc33
|
[
"MIT"
] | null | null | null |
src/tests/unit/enterprise_edr/test_enterprise_edr_process.py
|
avanbrunt-cb/carbon-black-cloud-sdk-python
|
f737ceaf6c69c1efea128d8dfb219c738cc7fc33
|
[
"MIT"
] | null | null | null |
src/tests/unit/enterprise_edr/test_enterprise_edr_process.py
|
avanbrunt-cb/carbon-black-cloud-sdk-python
|
f737ceaf6c69c1efea128d8dfb219c738cc7fc33
|
[
"MIT"
] | null | null | null |
"""Testing Process and Tree objects of cbc_sdk.enterprise_edr"""
import pytest
import logging
from cbc_sdk.enterprise_edr import Process, Tree, Event, Query, AsyncProcessQuery
from cbc_sdk.rest_api import CBCloudAPI
from cbc_sdk.errors import ObjectNotFoundError, ApiError
from tests.unit.fixtures.CBCSDKMock import CBCSDKMock
from tests.unit.fixtures.enterprise_edr.mock_process import (GET_PROCESS_SUMMARY_RESP,
GET_PROCESS_SUMMARY_RESP_1,
GET_PROCESS_SUMMARY_RESP_2,
GET_TREE_RESP,
GET_PROCESS_VALIDATION_RESP,
POST_PROCESS_SEARCH_JOB_RESP,
GET_PROCESS_SEARCH_JOB_RESP,
GET_PROCESS_SEARCH_JOB_RESULTS_RESP_1,
GET_PROCESS_SEARCH_PARENT_JOB_RESULTS_RESP)
log = logging.basicConfig(format='%(asctime)s %(levelname)s:%(message)s', level=logging.DEBUG, filename='log.txt')
@pytest.fixture(scope="function")
def cb():
"""Create CBCloudAPI singleton"""
return CBCloudAPI(url="https://example.com",
org_key="test",
token="abcd/1234",
ssl_verify=False)
@pytest.fixture(scope="function")
def cbcsdk_mock(monkeypatch, cb):
"""Mocks CBC SDK for unit tests"""
return CBCSDKMock(monkeypatch, cb)
# ==================================== UNIT TESTS BELOW ====================================
def test_process_select(cbcsdk_mock):
"""Testing Process Querying with select()"""
cbcsdk_mock.mock_request("GET", "/api/investigate/v1/orgs/test/processes/summary", GET_PROCESS_SUMMARY_RESP)
cbcsdk_mock.mock_request("GET", "/api/investigate/v1/orgs/test/processes/search_validation",
GET_PROCESS_VALIDATION_RESP)
api = cbcsdk_mock.api
guid = 'WNEXFKQ7-0002b226-000015bd-00000000-1d6225bbba74c00'
process = api.select(Process, guid)
assert process.summary is not None
assert process.siblings is not None
summary = api.select(Process.Summary, guid)
assert summary is not None
def test_summary_select(cbcsdk_mock):
"""Test querying for a Proc Summary."""
api = cbcsdk_mock.api
guid = 'WNEXFKQ7-0002b226-000015bd-00000000-1d6225bbba74c00'
summary = api.select(Process.Summary).where(f"process_guid:{guid}")
assert isinstance(summary, Query)
def test_process_events(cbcsdk_mock):
"""Testing Process.events()."""
api = cbcsdk_mock.api
guid = 'WNEXFKQ7-0002b226-000015bd-00000000-1d6225bbba74c00'
process = api.select(Process, guid)
assert isinstance(process.events(), Query)
# create the events query object to compare
events = process.events(event_type="modload")
# emulate the manual select in Process.events()
query = api.select(Event).where(process_guid=guid)
assert [isinstance(q, Query) for q in [events, query]]
# extract and compare the parameters from each Query
events_query_params = events._query_builder._collapse()
query_params = query.and_(event_type="modload")._query_builder._collapse()
expected_params = ("process_guid:WNEXFKQ7\\-0002b226\\-000015bd\\-00000000\\-"
"1d6225bbba74c00 AND event_type:modload")
assert events_query_params == query_params
assert events_query_params == expected_params
def test_process_events_with_criteria_exclusions(cbcsdk_mock):
"""Testing the add_criteria() method when selecting events."""
api = cbcsdk_mock.api
guid = 'WNEXFKQ7-0002b226-000015bd-00000000-1d6225bbba74c00'
process = api.select(Process, guid)
assert isinstance(process.events(), Query)
# create the events query object to compare
events = process.events(event_type="modload").add_criteria("crossproc_action", ["ACTION_PROCESS_API_CALL"]).add_exclusions("crossproc_effective_reputation", ["REP_WHITE"])
events.add_criteria("crossproc_action", "SOME_OTHER_CRIT")
# emulate the manual select in Process.events()
query = api.select(Event).where(process_guid=guid).add_criteria("crossproc_action", ["ACTION_PROCESS_API_CALL"]).add_exclusions("crossproc_effective_reputation", ["REP_WHITE"])
query.add_criteria("crossproc_action", "SOME_OTHER_CRIT")
assert [isinstance(q, Query) for q in [events, query]]
# extract and compare the parameters from each Query
events_query_params = events._get_query_parameters()
query_params = query.and_(event_type="modload")._get_query_parameters()
expected_params = {"query": "process_guid:WNEXFKQ7\\-0002b226\\-000015bd\\-00000000\\-"
"1d6225bbba74c00 AND event_type:modload",
"criteria": {
"crossproc_action": ["ACTION_PROCESS_API_CALL",
"SOME_OTHER_CRIT"],
},
"exclusions": {
"crossproc_effective_reputation": ["REP_WHITE"]
},
"process_guid": "WNEXFKQ7\\-0002b226\\-000015bd\\-00000000\\-1d6225bbba74c00"
}
assert events_query_params == query_params
assert events_query_params == expected_params
def test_process_events_exceptions(cbcsdk_mock):
"""Testing raising an Exception when using Query.add_criteria() and Query.add_exclusions()."""
api = cbcsdk_mock.api
guid = 'WNEXFKQ7-0002b226-000015bd-00000000-1d6225bbba74c00'
process = api.select(Process, guid)
assert isinstance(process.events(), Query)
# use a criteria value that's not a string or list
with pytest.raises(ApiError):
events = process.events(event_type="modload").add_criteria("crossproc_action", 0)
# use an exclusion value that's not a string or list
with pytest.raises(ApiError):
events = process.events().add_exclusions("crossproc_effective_reputation", 0)
def test_process_with_criteria_exclusions(cbcsdk_mock):
"""Testing AsyncProcessQuery.add_criteria() and AsyncProcessQuery.add_exclusions()."""
api = cbcsdk_mock.api
guid = 'WNEXFKQ7-0002b226-000015bd-00000000-1d6225bbba74c00'
# use the update methods
process = api.select(Process).where("event_type:modload").add_criteria("device_id", [1234]).add_exclusions("crossproc_effective_reputation", ["REP_WHITE"])
# mock the search validation
cbcsdk_mock.mock_request("GET", "/api/investigate/v1/orgs/test/processes/search_validation",
GET_PROCESS_VALIDATION_RESP)
# mock the POST of a search
cbcsdk_mock.mock_request("POST", "/api/investigate/v2/orgs/test/processes/search_jobs",
POST_PROCESS_SEARCH_JOB_RESP)
# mock the GET to check search status
cbcsdk_mock.mock_request("GET", ("/api/investigate/v1/orgs/test/processes/"
"search_jobs/2c292717-80ed-4f0d-845f-779e09470920"),
GET_PROCESS_SEARCH_JOB_RESP)
# mock the GET to get search results
cbcsdk_mock.mock_request("GET", ("/api/investigate/v2/orgs/test/processes/search_jobs/"
"2c292717-80ed-4f0d-845f-779e09470920/results"),
GET_PROCESS_SEARCH_JOB_RESULTS_RESP_1)
p = process[0]
assert p.process_md5 == 'c7084336325dc8eadfb1e8ff876921c4'
process_q_params = process._get_query_parameters()
expected_params = {"query": "event_type:modload",
"criteria": {
"device_id": [1234]
},
"exclusions": {
"crossproc_effective_reputation": ["REP_WHITE"]
}}
assert process_q_params == expected_params
def test_process_fields(cbcsdk_mock):
"""Testing AsyncProcessQuery.set_fields()."""
api = cbcsdk_mock.api
guid = 'WNEXFKQ7-0002b226-000015bd-00000000-1d6225bbba74c00'
# use the update methods
process = api.select(Process).where("event_type:modload").add_criteria("device_id", [1234]).add_exclusions("crossproc_effective_reputation", ["REP_WHITE"])
process = process.set_fields(["parent_hash", "device_policy"])
process_q_params = process._get_query_parameters()
expected_params = {"query": "event_type:modload",
"criteria": {
"device_id": [1234]
},
"exclusions": {
"crossproc_effective_reputation": ["REP_WHITE"]
},
"fields": [
"parent_hash",
"device_policy"
]}
assert process_q_params == expected_params
def test_process_time_range(cbcsdk_mock):
"""Testing AsyncProcessQuery.set_fields()."""
api = cbcsdk_mock.api
guid = 'WNEXFKQ7-0002b226-000015bd-00000000-1d6225bbba74c00'
# use the update methods
process = api.select(Process).where("event_type:modload").add_criteria("device_id", [1234]).add_exclusions("crossproc_effective_reputation", ["REP_WHITE"])
process = process.set_time_range(start="2020-01-21T18:34:04Z")
process = process.set_time_range(end="2020-02-21T18:34:04Z")
process = process.set_time_range(window="-1w")
process_q_params = process._get_query_parameters()
expected_params = {"query": "event_type:modload",
"criteria": {
"device_id": [1234]
},
"exclusions": {
"crossproc_effective_reputation": ["REP_WHITE"]
},
"time_range": {
"start": "2020-01-21T18:34:04Z",
"end": "2020-02-21T18:34:04Z",
"window": "-1w"
}}
assert process_q_params == expected_params
def test_process_start_rows(cbcsdk_mock):
"""Testing AsyncProcessQuery.set_start() and AsyncProcessQuery.set_rows()."""
api = cbcsdk_mock.api
guid = 'WNEXFKQ7-0002b226-000015bd-00000000-1d6225bbba74c00'
# use the update methods
process = api.select(Process).where("event_type:modload").add_criteria("device_id", [1234]).add_exclusions("crossproc_effective_reputation", ["REP_WHITE"])
process = process.set_start(10)
process = process.set_rows(102)
process_q_params = process._get_query_parameters()
expected_params = {"query": "event_type:modload",
"criteria": {
"device_id": [1234]
},
"exclusions": {
"crossproc_effective_reputation": ["REP_WHITE"]
},
"start": 10,
"rows": 102
}
assert process_q_params == expected_params
def test_process_sort(cbcsdk_mock):
"""Testing AsyncProcessQuery.sort_by()."""
api = cbcsdk_mock.api
guid = 'WNEXFKQ7-0002b226-000015bd-00000000-1d6225bbba74c00'
# use the update methods
process = api.select(Process).where("event_type:modload").add_criteria("device_id", [1234]).add_exclusions("crossproc_effective_reputation", ["REP_WHITE"])
process = process.sort_by("process_pid", direction="DESC")
process_q_params = process._get_query_parameters()
expected_params = {"query": "event_type:modload",
"criteria": {
"device_id": [1234]
},
"exclusions": {
"crossproc_effective_reputation": ["REP_WHITE"]
},
"sort": [{
"field": "process_pid",
"order": "DESC"
}]}
assert process_q_params == expected_params
def test_process_events_with_criteria_exclusions(cbcsdk_mock):
"""Testing the add_criteria() method when selecting events."""
api = cbcsdk_mock.api
guid = 'WNEXFKQ7-0002b226-000015bd-00000000-1d6225bbba74c00'
process = api.select(Process, guid)
assert isinstance(process.events(), Query)
# create the events query object to compare
events = process.events(event_type="modload").add_criteria("crossproc_action", ["ACTION_PROCESS_API_CALL"]).add_exclusions("crossproc_effective_reputation", ["REP_WHITE"])
events.add_criteria("crossproc_action", "SOME_OTHER_CRIT")
events.add_exclusions("exclusion_key", "exclusion_value")
# emulate the manual select in Process.events()
query = api.select(Event).where(process_guid=guid).add_criteria("crossproc_action", ["ACTION_PROCESS_API_CALL"]).add_exclusions("crossproc_effective_reputation", ["REP_WHITE"])
query.add_criteria("crossproc_action", "SOME_OTHER_CRIT")
query.add_exclusions("exclusion_key", "exclusion_value")
assert [isinstance(q, Query) for q in [events, query]]
# extract and compare the parameters from each Query
events_query_params = events._get_query_parameters()
query_params = query.and_(event_type="modload")._get_query_parameters()
expected_params = {"query": "process_guid:WNEXFKQ7\\-0002b226\\-000015bd\\-00000000\\-"
"1d6225bbba74c00 AND event_type:modload",
"criteria": {
"crossproc_action": ["ACTION_PROCESS_API_CALL",
"SOME_OTHER_CRIT"],
},
"exclusions": {
"crossproc_effective_reputation": ["REP_WHITE"],
"exclusion_key": ["exclusion_value"]
},
"process_guid": "WNEXFKQ7\\-0002b226\\-000015bd\\-00000000\\-1d6225bbba74c00"
}
assert events_query_params == query_params
assert events_query_params == expected_params
def test_process_events_exceptions(cbcsdk_mock):
"""Testing raising an Exception when using Query.add_criteria() and Query.add_exclusions()."""
api = cbcsdk_mock.api
guid = 'WNEXFKQ7-0002b226-000015bd-00000000-1d6225bbba74c00'
process = api.select(Process, guid)
assert isinstance(process.events(), Query)
# use a criteria value that's not a string or list
with pytest.raises(ApiError):
events = process.events(event_type="modload").add_criteria("crossproc_action", 0)
# use an exclusion value that's not a string or list
with pytest.raises(ApiError):
events = process.events().add_exclusions("crossproc_effective_reputation", 0)
def test_process_with_criteria_exclusions(cbcsdk_mock):
"""Testing AsyncProcessQuery.add_criteria() and AsyncProcessQuery.add_exclusions()."""
api = cbcsdk_mock.api
guid = 'WNEXFKQ7-0002b226-000015bd-00000000-1d6225bbba74c00'
# use the update methods
process = api.select(Process).where("event_type:modload").add_criteria("device_id", [1234]).add_exclusions("crossproc_effective_reputation", ["REP_WHITE"])
# mock the search validation
cbcsdk_mock.mock_request("GET", "/api/investigate/v1/orgs/test/processes/search_validation",
GET_PROCESS_VALIDATION_RESP)
# mock the POST of a search
cbcsdk_mock.mock_request("POST", "/api/investigate/v2/orgs/test/processes/search_jobs",
POST_PROCESS_SEARCH_JOB_RESP)
# mock the GET to check search status
cbcsdk_mock.mock_request("GET", ("/api/investigate/v1/orgs/test/processes/"
"search_jobs/2c292717-80ed-4f0d-845f-779e09470920"),
GET_PROCESS_SEARCH_JOB_RESP)
# mock the GET to get search results
cbcsdk_mock.mock_request("GET", ("/api/investigate/v2/orgs/test/processes/search_jobs/"
"2c292717-80ed-4f0d-845f-779e09470920/results"),
GET_PROCESS_SEARCH_JOB_RESULTS_RESP_1)
p = process[0]
assert p.process_md5 == 'c7084336325dc8eadfb1e8ff876921c4'
process_q_params = process._get_query_parameters()
expected_params = {"query": "event_type:modload",
"criteria": {
"device_id": [1234]
},
"exclusions": {
"crossproc_effective_reputation": ["REP_WHITE"]
}}
assert process_q_params == expected_params
def test_process_fields(cbcsdk_mock):
"""Testing AsyncProcessQuery.set_fields()."""
api = cbcsdk_mock.api
guid = 'WNEXFKQ7-0002b226-000015bd-00000000-1d6225bbba74c00'
# use the update methods
process = api.select(Process).where("event_type:modload").add_criteria("device_id", [1234]).add_exclusions("crossproc_effective_reputation", ["REP_WHITE"])
process = process.set_fields(["parent_hash", "device_policy"])
process_q_params = process._get_query_parameters()
expected_params = {"query": "event_type:modload",
"criteria": {
"device_id": [1234]
},
"exclusions": {
"crossproc_effective_reputation": ["REP_WHITE"]
},
"fields": [
"parent_hash",
"device_policy"
]}
assert process_q_params == expected_params
def test_process_time_range(cbcsdk_mock):
"""Testing AsyncProcessQuery.set_fields()."""
api = cbcsdk_mock.api
guid = 'WNEXFKQ7-0002b226-000015bd-00000000-1d6225bbba74c00'
# use the update methods
process = api.select(Process).where("event_type:modload").add_criteria("device_id", [1234]).add_exclusions("crossproc_effective_reputation", ["REP_WHITE"])
process = process.set_time_range(start="2020-01-21T18:34:04Z")
process = process.set_time_range(end="2020-02-21T18:34:04Z")
process = process.set_time_range(window="-1w")
process_q_params = process._get_query_parameters()
expected_params = {"query": "event_type:modload",
"criteria": {
"device_id": [1234]
},
"exclusions": {
"crossproc_effective_reputation": ["REP_WHITE"]
},
"time_range": {
"start": "2020-01-21T18:34:04Z",
"end": "2020-02-21T18:34:04Z",
"window": "-1w"
}}
assert process_q_params == expected_params
def test_process_start_rows(cbcsdk_mock):
"""Testing AsyncProcessQuery.set_start() and AsyncProcessQuery.set_rows()."""
api = cbcsdk_mock.api
guid = 'WNEXFKQ7-0002b226-000015bd-00000000-1d6225bbba74c00'
# use the update methods
process = api.select(Process).where("event_type:modload").add_criteria("device_id", [1234]).add_exclusions("crossproc_effective_reputation", ["REP_WHITE"])
process = process.set_start(10)
process = process.set_rows(102)
process_q_params = process._get_query_parameters()
expected_params = {"query": "event_type:modload",
"criteria": {
"device_id": [1234]
},
"exclusions": {
"crossproc_effective_reputation": ["REP_WHITE"]
},
"start": 10,
"rows": 102
}
assert process_q_params == expected_params
def test_process_sort(cbcsdk_mock):
"""Testing AsyncProcessQuery.sort_by()."""
api = cbcsdk_mock.api
guid = 'WNEXFKQ7-0002b226-000015bd-00000000-1d6225bbba74c00'
# use the update methods
process = api.select(Process).where("event_type:modload").add_criteria("device_id", [1234]).add_exclusions("crossproc_effective_reputation", ["REP_WHITE"])
process = process.sort_by("process_pid", direction="DESC")
process_q_params = process._get_query_parameters()
expected_params = {"query": "event_type:modload",
"criteria": {
"device_id": [1234]
},
"exclusions": {
"crossproc_effective_reputation": ["REP_WHITE"]
},
"sort": [{
"field": "process_pid",
"order": "DESC"
}]}
assert process_q_params == expected_params
@pytest.mark.parametrize('get_summary_response, guid, process_search_results, has_parent_process',
[(GET_PROCESS_SUMMARY_RESP, "test-0002b226-000015bd-00000000-1d6225bbba74c00",
GET_PROCESS_SEARCH_PARENT_JOB_RESULTS_RESP, True),
(GET_PROCESS_SUMMARY_RESP_1, "test-00340b06-00000314-00000000-1d686b9e4d74f52",
None, False),
(GET_PROCESS_SUMMARY_RESP_2, "test-003513bc-0000035c-00000000-1d640200c9a6205",
GET_PROCESS_SEARCH_JOB_RESULTS_RESP_1, True)
])
def test_process_parents(cbcsdk_mock, get_summary_response, guid, process_search_results, has_parent_process):
"""Testing Process.parents property/method."""
api = cbcsdk_mock.api
cbcsdk_mock.mock_request("GET", "/api/investigate/v1/orgs/test/processes/search_validation",
GET_PROCESS_VALIDATION_RESP)
# query for a Process
process = api.select(Process, guid)
cbcsdk_mock.mock_request("GET", "/api/investigate/v1/orgs/test/processes/summary", get_summary_response)
# the process has a parent process (manually flagged)
if has_parent_process:
# Process.parents property returns a Process object, or [] if None
assert isinstance(process.parents, Process)
# mock the POST of a search
cbcsdk_mock.mock_request("POST", "/api/investigate/v2/orgs/test/processes/search_job",
POST_PROCESS_SEARCH_JOB_RESP)
# mock the GET to check search status
cbcsdk_mock.mock_request("GET", ("/api/investigate/v1/orgs/test/processes/"
"search_jobs/2c292717-80ed-4f0d-845f-779e09470920"),
GET_PROCESS_SEARCH_JOB_RESP)
# mock the GET to get search results
cbcsdk_mock.mock_request("GET", ("/api/investigate/v2/orgs/test/processes/search_jobs/"
"2c292717-80ed-4f0d-845f-779e09470920/results"),
process_search_results)
# query for a Process that has a guid == the guid of the parent process
parent_process = api.select(Process).where(process_guid=process.parents.process_guid)
parent_search_results = [process for process in parent_process._perform_query()]
# check that the search for parent_process yields result consistent with the original process's parent
assert parent_search_results[0].process_guid == process.parents.process_guid
else:
# the process has no parent
assert process.parents == []
@pytest.mark.parametrize('get_summary_response, guid, expected_num_children', [
(GET_PROCESS_SUMMARY_RESP, "test-0002b226-000015bd-00000000-1d6225bbba74c00", 0),
(GET_PROCESS_SUMMARY_RESP_1, "test-00340b06-00000314-00000000-1d686b9e4d74f52", 3),
(GET_PROCESS_SUMMARY_RESP_2, "test-003513bc-0000035c-00000000-1d640200c9a6205", 2)])
def test_process_children(cbcsdk_mock, get_summary_response, guid, expected_num_children):
"""Testing Process.children property."""
api = cbcsdk_mock.api
process = api.select(Process, guid)
cbcsdk_mock.mock_request("GET", "/api/investigate/v1/orgs/test/processes/summary", get_summary_response)
# if there's children, check that Process.children returns the right objects
if isinstance(process.summary.children, list):
assert isinstance(process.children, list)
assert [isinstance(child, Process) for child in process.children]
else:
assert process.children == []
assert len(process.children) == expected_num_children
@pytest.mark.parametrize('get_summary_response, guid, md5', [
(GET_PROCESS_SUMMARY_RESP, "test-0002b226-000015bd-00000000-1d6225bbba74c00", None),
(GET_PROCESS_SUMMARY_RESP_1, "test-00340b06-00000314-00000000-1d686b9e4d74f52", "e83650f70459a027aa596e1a73c961a1"),
(GET_PROCESS_SUMMARY_RESP_2, "test-003513bc-0000035c-00000000-1d640200c9a6205",
"708c8760385810080c4d17fa84d325ca")])
def test_process_md5(cbcsdk_mock, get_summary_response, guid, md5):
"""Testing Process.process_md5 property."""
api = cbcsdk_mock.api
cbcsdk_mock.mock_request("GET", "/api/investigate/v1/orgs/test/processes/summary", get_summary_response)
process = api.select(Process, guid)
assert process.process_md5 == md5
def test_process_md5_not_found(cbcsdk_mock):
"""Testing error raising when receiving 404 for a Process."""
api = cbcsdk_mock.api
cbcsdk_mock.mock_request("GET", "/api/investigate/v1/orgs/test/processes/summary",
ObjectNotFoundError(uri='uri_to_get_summ'))
process = api.select(Process, "someNonexistantGuid")
with pytest.raises(ObjectNotFoundError):
process.summary
@pytest.mark.parametrize('get_summary_response, guid, sha256', [
(GET_PROCESS_SUMMARY_RESP, "test-0002b226-000015bd-00000000-1d6225bbba74c00",
"5920199e4fbfa47c1717b863814722148a353e54f8c10912cf1f991a1c86309d"),
(GET_PROCESS_SUMMARY_RESP_1, "test-00340b06-00000314-00000000-1d686b9e4d74f52",
"d5e122606054fa0b03db3ee8cf9ea7701e523875e2bdb87581ad7232ffc9308e"),
(GET_PROCESS_SUMMARY_RESP_2, "test-003513bc-0000035c-00000000-1d640200c9a6205", None)])
def test_process_sha256(cbcsdk_mock, get_summary_response, guid, sha256):
"""Testing Process.process_sha256 property."""
api = cbcsdk_mock.api
cbcsdk_mock.mock_request("GET", "/api/investigate/v1/orgs/test/processes/summary", get_summary_response)
process = api.select(Process, guid)
assert process.process_sha256 == sha256
@pytest.mark.parametrize('get_summary_response, guid, pids', [
(GET_PROCESS_SUMMARY_RESP, "test-0002b226-000015bd-00000000-1d6225bbba74c00", [5565]),
(GET_PROCESS_SUMMARY_RESP_1, "test-00340b06-00000314-00000000-1d686b9e4d74f52", [788]),
(GET_PROCESS_SUMMARY_RESP_2, "test-003513bc-0000035c-00000000-1d640200c9a6205", [860])])
def test_process_pids(cbcsdk_mock, get_summary_response, guid, pids):
"""Testing Process.process_pids property."""
api = cbcsdk_mock.api
cbcsdk_mock.mock_request("GET", "/api/investigate/v1/orgs/test/processes/summary", get_summary_response)
process = api.select(Process, guid)
assert process.process_pids == pids
def test_process_select_where(cbcsdk_mock):
"""Testing Process querying with where()."""
api = cbcsdk_mock.api
guid = 'WNEXFKQ7-0002b226-000015bd-00000000-1d6225bbba74c00'
process = api.select(Process).where(f"process_guid:{guid}")
assert isinstance(process, AsyncProcessQuery)
def test_tree_select(cbcsdk_mock):
"""Testing Tree Querying"""
cbcsdk_mock.mock_request("GET", "/api/investigate/v1/orgs/test/processes/tree", GET_TREE_RESP)
api = cbcsdk_mock.api
guid = "WNEXFKQ7-0002b226-000015bd-00000000-1d6225bbba74c00"
process = api.select(Process, guid)
tree = process.tree()
children = tree.nodes["children"]
assert len(children) == len(tree.children)
assert len(children) > 0
procTree = api.select(Tree).where(process_guid="WNEXFKQ7-0002b226-000015bd-00000000-1d6225bbba74c00")
results = procTree._perform_query()
assert results is not None
assert results["nodes"]["children"] is not None
assert results["incomplete_results"] is False
| 50.072566
| 180
| 0.637517
| 3,005
| 28,291
| 5.721797
| 0.079867
| 0.041875
| 0.028847
| 0.068047
| 0.846691
| 0.827033
| 0.807375
| 0.775852
| 0.772246
| 0.753635
| 0
| 0.089283
| 0.252943
| 28,291
| 564
| 181
| 50.161348
| 0.724249
| 0.107808
| 0
| 0.684706
| 0
| 0
| 0.282613
| 0.188635
| 0
| 0
| 0
| 0
| 0.108235
| 1
| 0.063529
| false
| 0
| 0.016471
| 0
| 0.084706
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
1bd8a1ee2e1c7c3be80463ce86a5b7d04f486354
| 305
|
py
|
Python
|
timet.py
|
Tjccs/College-Python
|
66186f898a5c3b23763f3110c9423427236ca4a5
|
[
"MIT"
] | null | null | null |
timet.py
|
Tjccs/College-Python
|
66186f898a5c3b23763f3110c9423427236ca4a5
|
[
"MIT"
] | null | null | null |
timet.py
|
Tjccs/College-Python
|
66186f898a5c3b23763f3110c9423427236ca4a5
|
[
"MIT"
] | null | null | null |
#-*- coding: utf-8 -*-
import time
while True:
print( "########################################################################")
print time.gmtime().tm_year, time.gmtime().tm_mon, time.gmtime().tm_mday
print time.gmtime().tm_hour, time.gmtime().tm_min, time.gmtime().tm_sec
time.sleep(1)
| 38.125
| 86
| 0.501639
| 37
| 305
| 3.972973
| 0.486486
| 0.408163
| 0.489796
| 0.231293
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007407
| 0.114754
| 305
| 7
| 87
| 43.571429
| 0.537037
| 0.068852
| 0
| 0
| 0
| 0
| 0.254417
| 0.254417
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.166667
| null | null | 0.5
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
9446f5fcad55282baef23797edf756b7e8726deb
| 10,945
|
py
|
Python
|
tests/test_Conv2d_Custom.py
|
neonithinar/hexagdly
|
dcd15bfb7bdabb4f6280f0598f2cf0b923924a81
|
[
"MIT"
] | 67
|
2018-02-10T13:54:16.000Z
|
2022-01-31T05:41:40.000Z
|
tests/test_Conv2d_Custom.py
|
neonithinar/hexagdly
|
dcd15bfb7bdabb4f6280f0598f2cf0b923924a81
|
[
"MIT"
] | 4
|
2018-02-21T16:28:38.000Z
|
2020-05-02T17:01:01.000Z
|
tests/test_Conv2d_Custom.py
|
neonithinar/hexagdly
|
dcd15bfb7bdabb4f6280f0598f2cf0b923924a81
|
[
"MIT"
] | 17
|
2018-05-25T12:30:19.000Z
|
2021-07-19T05:48:47.000Z
|
import numpy as np
import torch
import hexagdly as hex
import pytest
class TestConv2d(object):
def get_in_array(self):
return np.array(
[
[
[
[0, 0, 0, 0, 1, 0],
[0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 1, 0, 0, 1],
]
]
],
dtype=np.float32,
)
def get_kernel_1_stride_1_array(self):
return np.array(
[
[
[
[0, 1, 1, 2, 1, 1],
[0, 1, 1, 2, 2, 1],
[0, 1, 2, 2, 1, 2],
[0, 1, 1, 1, 2, 1],
]
]
],
dtype=np.float32,
)
def get_kernel_1_stride_2_array(self):
return np.array([[[[0, 1, 1], [0, 1, 1]]]], dtype=np.float32)
def get_kernel_1_stride_3_array(self):
return np.array([[[[0, 2]]]], dtype=np.float32)
def get_kernel_2_stride_1_array(self):
return np.array(
[
[
[
[1, 1, 2, 3, 3, 2],
[1, 2, 4, 4, 3, 3],
[2, 2, 3, 4, 5, 2],
[1, 1, 3, 3, 3, 2],
]
]
],
dtype=np.float32,
)
def get_kernel_2_stride_2_array(self):
return np.array([[[[1, 4, 3], [2, 3, 5]]]], dtype=np.float32)
def get_kernel_2_stride_3_array(self):
return np.array([[[[1, 4]]]], dtype=np.float32)
def get_tensors(self, in_channels, kernel_size, stride, bias_bool):
channel_dist = 1000
if bias_bool is False:
bias_value = 0
bias = None
else:
bias_value = 1.0
bias = np.array([1])
# input tensor
array = self.get_in_array()
array = np.concatenate(
[channel * channel_dist * array + array for channel in range(in_channels)],
1,
)
tensor = torch.FloatTensor(array)
# expected output tensor
convolved_array = getattr(
self, "get_kernel_" + str(kernel_size) + "_stride_" + str(stride) + "_array"
)()
convolved_array = np.sum(
np.stack(
[
(channel * channel_dist) * convolved_array + convolved_array
for channel in range(in_channels)
]
),
0,
)
convolved_tensor = torch.FloatTensor(convolved_array) + bias_value
# output tensor of test method
if kernel_size == 1:
kernel = [np.ones((1, in_channels, 3, 1)), np.ones((1, in_channels, 2, 2))]
elif kernel_size == 2:
kernel = [
np.ones((1, in_channels, 5, 1)),
np.ones((1, in_channels, 4, 2)),
np.ones((1, in_channels, 3, 2)),
]
conv2d = hex.Conv2d_CustomKernel(kernel, stride, bias)
return conv2d(tensor), convolved_tensor
def test_in_channels_1_kernel_size_1_stride_1_bias_False(self):
in_channels = 1
kernel_size = 1
stride = 1
bias = False
test_ouput, expectation = self.get_tensors(
in_channels, kernel_size, stride, bias
)
assert torch.equal(test_ouput, expectation)
def test_in_channels_1_kernel_size_1_stride_2_bias_False(self):
in_channels = 1
kernel_size = 1
stride = 2
bias = False
test_ouput, expectation = self.get_tensors(
in_channels, kernel_size, stride, bias
)
assert torch.equal(test_ouput, expectation)
def test_in_channels_1_kernel_size_1_stride_3_bias_False(self):
in_channels = 1
kernel_size = 1
stride = 3
bias = False
test_ouput, expectation = self.get_tensors(
in_channels, kernel_size, stride, bias
)
assert torch.equal(test_ouput, expectation)
def test_in_channels_1_kernel_size_2_stride_1_bias_False(self):
in_channels = 1
kernel_size = 2
stride = 1
bias = False
test_ouput, expectation = self.get_tensors(
in_channels, kernel_size, stride, bias
)
assert torch.equal(test_ouput, expectation)
def test_in_channels_1_kernel_size_2_stride_2_bias_False(self):
in_channels = 1
kernel_size = 2
stride = 2
bias = False
test_ouput, expectation = self.get_tensors(
in_channels, kernel_size, stride, bias
)
assert torch.equal(test_ouput, expectation)
def test_in_channels_1_kernel_size_2_stride_3_bias_False(self):
in_channels = 1
kernel_size = 2
stride = 3
bias = False
test_ouput, expectation = self.get_tensors(
in_channels, kernel_size, stride, bias
)
assert torch.equal(test_ouput, expectation)
def test_in_channels_5_kernel_size_1_stride_1_bias_False(self):
in_channels = 5
kernel_size = 1
stride = 1
bias = False
test_ouput, expectation = self.get_tensors(
in_channels, kernel_size, stride, bias
)
assert torch.equal(test_ouput, expectation)
def test_in_channels_5_kernel_size_1_stride_2_bias_False(self):
in_channels = 5
kernel_size = 1
stride = 2
bias = False
test_ouput, expectation = self.get_tensors(
in_channels, kernel_size, stride, bias
)
assert torch.equal(test_ouput, expectation)
def test_in_channels_5_kernel_size_1_stride_3_bias_False(self):
in_channels = 5
kernel_size = 1
stride = 3
bias = False
test_ouput, expectation = self.get_tensors(
in_channels, kernel_size, stride, bias
)
assert torch.equal(test_ouput, expectation)
def test_in_channels_5_kernel_size_2_stride_1_bias_False(self):
in_channels = 5
kernel_size = 2
stride = 1
bias = False
test_ouput, expectation = self.get_tensors(
in_channels, kernel_size, stride, bias
)
assert torch.equal(test_ouput, expectation)
def test_in_channels_5_kernel_size_2_stride_2_bias_False(self):
in_channels = 5
kernel_size = 2
stride = 2
bias = False
test_ouput, expectation = self.get_tensors(
in_channels, kernel_size, stride, bias
)
assert torch.equal(test_ouput, expectation)
def test_in_channels_5_kernel_size_2_stride_3_bias_False(self):
in_channels = 5
kernel_size = 2
stride = 3
bias = False
test_ouput, expectation = self.get_tensors(
in_channels, kernel_size, stride, bias
)
assert torch.equal(test_ouput, expectation)
def test_in_channels_1_kernel_size_1_stride_1_bias_True(self):
in_channels = 1
kernel_size = 1
stride = 1
bias = True
test_ouput, expectation = self.get_tensors(
in_channels, kernel_size, stride, bias
)
assert torch.equal(test_ouput, expectation)
def test_in_channels_1_kernel_size_1_stride_2_bias_True(self):
in_channels = 1
kernel_size = 1
stride = 2
bias = True
test_ouput, expectation = self.get_tensors(
in_channels, kernel_size, stride, bias
)
assert torch.equal(test_ouput, expectation)
def test_in_channels_1_kernel_size_1_stride_3_bias_True(self):
in_channels = 1
kernel_size = 1
stride = 3
bias = True
test_ouput, expectation = self.get_tensors(
in_channels, kernel_size, stride, bias
)
assert torch.equal(test_ouput, expectation)
def test_in_channels_1_kernel_size_2_stride_1_bias_True(self):
in_channels = 1
kernel_size = 2
stride = 1
bias = True
test_ouput, expectation = self.get_tensors(
in_channels, kernel_size, stride, bias
)
assert torch.equal(test_ouput, expectation)
def test_in_channels_1_kernel_size_2_stride_2_bias_True(self):
in_channels = 1
kernel_size = 2
stride = 2
bias = True
test_ouput, expectation = self.get_tensors(
in_channels, kernel_size, stride, bias
)
assert torch.equal(test_ouput, expectation)
def test_in_channels_1_kernel_size_2_stride_3_bias_True(self):
in_channels = 1
kernel_size = 2
stride = 3
bias = True
test_ouput, expectation = self.get_tensors(
in_channels, kernel_size, stride, bias
)
assert torch.equal(test_ouput, expectation)
def test_in_channels_5_kernel_size_1_stride_1_bias_True(self):
in_channels = 5
kernel_size = 1
stride = 1
bias = True
test_ouput, expectation = self.get_tensors(
in_channels, kernel_size, stride, bias
)
assert torch.equal(test_ouput, expectation)
def test_in_channels_5_kernel_size_1_stride_2_bias_True(self):
in_channels = 5
kernel_size = 1
stride = 2
bias = True
test_ouput, expectation = self.get_tensors(
in_channels, kernel_size, stride, bias
)
assert torch.equal(test_ouput, expectation)
def test_in_channels_5_kernel_size_1_stride_3_bias_True(self):
in_channels = 5
kernel_size = 1
stride = 3
bias = True
test_ouput, expectation = self.get_tensors(
in_channels, kernel_size, stride, bias
)
assert torch.equal(test_ouput, expectation)
def test_in_channels_5_kernel_size_2_stride_1_bias_True(self):
in_channels = 5
kernel_size = 2
stride = 1
bias = True
test_ouput, expectation = self.get_tensors(
in_channels, kernel_size, stride, bias
)
assert torch.equal(test_ouput, expectation)
def test_in_channels_5_kernel_size_2_stride_2_bias_True(self):
in_channels = 5
kernel_size = 2
stride = 2
bias = True
test_ouput, expectation = self.get_tensors(
in_channels, kernel_size, stride, bias
)
assert torch.equal(test_ouput, expectation)
def test_in_channels_5_kernel_size_2_stride_3_bias_True(self):
in_channels = 5
kernel_size = 2
stride = 3
bias = True
test_ouput, expectation = self.get_tensors(
in_channels, kernel_size, stride, bias
)
assert torch.equal(test_ouput, expectation)
| 27.56927
| 88
| 0.568387
| 1,347
| 10,945
| 4.265033
| 0.048998
| 0.139252
| 0.167102
| 0.087032
| 0.884943
| 0.880244
| 0.853264
| 0.824891
| 0.810444
| 0.780853
| 0
| 0.040935
| 0.354957
| 10,945
| 396
| 89
| 27.638889
| 0.772805
| 0.005847
| 0
| 0.581169
| 0
| 0
| 0.002298
| 0
| 0
| 0
| 0
| 0
| 0.077922
| 1
| 0.103896
| false
| 0
| 0.012987
| 0.022727
| 0.146104
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
94589d9bebe96029d84ed3a2086a96d584c01dca
| 138
|
py
|
Python
|
tkdet/data/datasets/__init__.py
|
tkhe/tkdetection
|
54e6c112ef2930e755f457e38449736f5743a9ea
|
[
"MIT"
] | 1
|
2020-10-09T02:27:13.000Z
|
2020-10-09T02:27:13.000Z
|
tkdet/data/datasets/__init__.py
|
tkhe/tkdetection
|
54e6c112ef2930e755f457e38449736f5743a9ea
|
[
"MIT"
] | null | null | null |
tkdet/data/datasets/__init__.py
|
tkhe/tkdetection
|
54e6c112ef2930e755f457e38449736f5743a9ea
|
[
"MIT"
] | null | null | null |
from . import builtin
from .cityscapes import *
from .coco import *
from .lvis import *
from .pascal_voc import *
from .visdrone import *
| 19.714286
| 25
| 0.746377
| 19
| 138
| 5.368421
| 0.473684
| 0.392157
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.173913
| 138
| 6
| 26
| 23
| 0.894737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
94846e6efaf0adc91e7abdc01b09360372e6d185
| 27
|
py
|
Python
|
roles/create-secrets/files/test.py
|
thescouser89/ansible-port-louis
|
510de63315cf0f5542f3d80ae316a64221f3d71d
|
[
"MIT"
] | 1
|
2016-05-27T14:29:52.000Z
|
2016-05-27T14:29:52.000Z
|
roles/create-secrets/files/test.py
|
thescouser89/ansible-port-louis
|
510de63315cf0f5542f3d80ae316a64221f3d71d
|
[
"MIT"
] | null | null | null |
roles/create-secrets/files/test.py
|
thescouser89/ansible-port-louis
|
510de63315cf0f5542f3d80ae316a64221f3d71d
|
[
"MIT"
] | null | null | null |
print("I am running here")
| 13.5
| 26
| 0.703704
| 5
| 27
| 3.8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.148148
| 27
| 1
| 27
| 27
| 0.826087
| 0
| 0
| 0
| 0
| 0
| 0.62963
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
948ac61b9f552ccc2e8c2e34fe2afad2891b4563
| 56,497
|
py
|
Python
|
polymer_network_3D.py
|
pkhandag/polymer_network
|
1597c8c69c274a3d16fba7e1cd195590edd21a15
|
[
"MIT"
] | null | null | null |
polymer_network_3D.py
|
pkhandag/polymer_network
|
1597c8c69c274a3d16fba7e1cd195590edd21a15
|
[
"MIT"
] | null | null | null |
polymer_network_3D.py
|
pkhandag/polymer_network
|
1597c8c69c274a3d16fba7e1cd195590edd21a15
|
[
"MIT"
] | null | null | null |
"""
Created in: 2021
Purpose: obtain average segment density and total free energy of polymer network with nonlocal inter-segment interactions (using 4-chain model) in 2D
Contact: Pratik Khandagale (pkhandag@andrew.cmu.edu)
"""
#imports
from __future__ import print_function
from fenics import *
from ufl import *
from boxfield import *
from scipy.optimize import fsolve
from numpy.linalg import svd
from sympy import Matrix
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from scipy import linalg, matrix
from scipy.integrate import odeint
from tempfile import TemporaryFile
from dolfin import *
from mshr import *
from mpl_toolkits.mplot3d import Axes3D
from itertools import combinations_with_replacement
from numpy import linalg as LA
from scipy.linalg import sqrtm
from xlwt import Workbook
import numpy as np
import matplotlib.pyplot as plt
import math
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import xlwt
#####################
## inputs in the model (independent parameters)
#####################
# polymer parameters
N_chain= 100; #number of segments in one single polymer chain in the polymer network
constant_used_in_excluded_volume_parameter= 0.005 # positive for segment repulsion, and vtakes value less than 1
k_B=1.0 # Boltzmann constant (in normalized setting)
Temp=1.0 #temperature of the polymer network (in normalized setting)
## deformation parameters: principal stretches
lambda_1= 1
lambda_2= 1
lambda_3= 1
#computational parameters
no_of_elements=35 #no of finite elements alng each X, Y and Z axis
dt=0.01 #Time step along chain contour. To satisfy CFL numerical stability condition, we need dt < (((x_max_box-x_min_box)/nx_V)**2)/G_chain
c_dirac=0.1 #standard deviation of Gaussian used to approximate Dirac delta function in the initial condition for q and q*
#####################
## other parameters in the model
#####################
a_chain=1/sqrt(N_chain) # segment length
G_chain=((a_chain**2)*N_chain)/6 # defining a constant term ((a_chain**2)*N_chain)/6 in PDE as G_chain
V_seg= a_chain**3 #area of a single segment (area because 2D)
u0= constant_used_in_excluded_volume_parameter *V_seg ## segment-segment interaction (excluded volume) parameter, This has unit of volume-(to check, look the dirac potential expression)
T=1.0 # final value of chain parameter 's'
n_t=int(T/dt +1) # number of time steps along the chain contour
round_const= 12 # no of significant digits
tol=2e-16 ## tolerance to form submeshes
delta_H_ratio_threshold=1e-3 ## iteration stopping criteria: threshold for relative change in total free energy
l_RMS= a_chain*(N_chain)**(1/2) ## RMS end-to-end length of one chain
#####################
## initializing variable values
#####################
## initializing total free energies
W = 0 #total free energy
W_entropic = 0 #entropic free energy
W_interaction = 0 #interaction free energy
## initializing values of relative change in total free energy for checking the stopping criteria for Self Consistent Field Theory iteration
delta_H_ratio_array=np.zeros(1000)
delta_H_ratio_array[0]=5
delta_H_ratio=5
###############################################################
## V mesh forming
###############################################################
nx_V= no_of_elements #no of finite elements along x-axis
ny_V= no_of_elements #no of finite elements along y-axis
nz_V= no_of_elements #no of finite elements along z-axis
## mesh box size
x_min_box= round( -3* lambda_1 * ( (1/(sqrt(3)))*l_RMS ), round_const )
y_min_box= round( -3* lambda_2 * ( (1/(sqrt(3)))*l_RMS ), round_const )
z_min_box= round( -3* lambda_3 * ( (1/(sqrt(3)))*l_RMS ), round_const )
x_max_box= round( x_min_box + 6* lambda_1 * ( (1/(sqrt(3)))*l_RMS ), round_const)
y_max_box= round( y_min_box + 6* lambda_2 * ( (1/(sqrt(3)))*l_RMS ), round_const)
z_max_box= round( z_min_box + 6* lambda_3 * ( (1/(sqrt(3)))*l_RMS ), round_const)
## domian volume as constant of proportionality for Q and rho
V_domain= (x_max_box- x_min_box)* (y_max_box- y_min_box)* (z_max_box- z_min_box)
#######################################################
## Define periodic boundary condition
class PeriodicBoundary(SubDomain):
def inside(self, x, on_boundary):
# return True if on left or bottom or front boundary or not on the edges
return bool( (near(x[0], x_min_box) or near(x[1], y_min_box) or near(x[2], z_min_box)) and \
( not ( (near(x[0], x_min_box) and near(x[1], y_max_box)) or \
(near(x[0], x_min_box) and near(x[2], z_max_box)) or \
(near(x[1], y_min_box) and near(x[0], x_max_box)) or \
(near(x[1], y_min_box) and near(x[2], z_max_box)) or \
(near(x[2], z_min_box) and near(x[0], x_max_box)) or \
(near(x[2], z_min_box) and near(x[1], y_max_box)) ) ) and on_boundary )
# return bool( (near(x[0], x_min_box) or near(x[1], y_min_box) or near(x[2], z_min_box)) and on_boundary )
# Map right boundary to left boundary
def map(self, x, y):
if (near(x[0], x_max_box) and near(x[1], y_max_box) and near(x[2], z_max_box)):
y[0] = x[0] - 2*x_max_box
y[1] = x[1] - 2*y_max_box
y[2] = x[2] - 2*z_max_box
elif (near(x[0], x_max_box) and near(x[1], y_max_box)):
y[0] = x[0] - 2*x_max_box
y[1] = x[1] - 2*y_max_box
y[2] = x[2]
elif (near(x[1], y_max_box) and near(x[2], z_max_box)):
y[0] = x[0]
y[1] = x[1] - 2*y_max_box
y[2] = x[2] - 2*z_max_box
elif (near(x[2], z_max_box) and near(x[0], 2*x_max_box)):
y[0] = x[0] - 2*x_max_box
y[1] = x[1]
y[2] = x[2] - 2*z_max_box
elif near(x[0], x_max_box):
y[0] = x[0] - 2*x_max_box
y[1] = x[1]
y[2] = x[2]
elif near(x[1], y_max_box):
y[0] = x[0]
y[1] = x[1] - 2*y_max_box
y[2] = x[2]
elif near(x[2], z_max_box):
y[0] = x[0]
y[1] = x[1]
y[2] = x[2] - 2*z_max_box
else:
y[0] = 1000.*2*x_max_box
y[1] = 1000.*2*y_max_box
y[2] = 1000.*2*z_max_box
## Create mesh and define function space and dof coordinates
mesh = BoxMesh(Point(x_min_box, y_min_box, z_min_box), Point(x_max_box, y_max_box, z_max_box), nx_V, ny_V, nz_V)
V = FunctionSpace(mesh, 'Lagrange', 1, constrained_domain=PeriodicBoundary())
n_mesh = V.dim() #no of dof points, n_mesh=(nx+1)*(ny+1)
d_mesh = mesh.geometry().dim()
dof_coordinates = V.tabulate_dof_coordinates()
dof_coordinates.resize((n_mesh, d_mesh))
dof_x = dof_coordinates[:, 0]
dof_y = dof_coordinates[:, 1]
dof_z = dof_coordinates[:, 2]
################################################################################
## Function for computing q
def q_computation(X, w):
##initial q and q_star at t=0
q_n= Function(V)
##initial condition for q
x_cord= X[0]
y_cord= X[1]
z_cord= X[2]
q_0_expression= Expression( ' ( pow( (a_chain*sqrt(N_chain)) , 3 ) ) * (1/(sqrt(2*pi)*c_dirac)) * exp( ( -1/ ( 2*pow(c_dirac,2) ) ) * ( pow(( x[0]- x_cord), 2) + pow(( x[1]- y_cord), 2) + pow(( x[2]- z_cord), 2) ) ) ' , a_chain= a_chain, N_chain=N_chain, c_dirac=c_dirac, x_cord=x_cord, y_cord=y_cord, z_cord=z_cord, degree=2 )
q_0= interpolate(q_0_expression, V)
## write initial condition to file
xdmf_q.write_checkpoint(q_0, "q_label", 0, XDMFFile.Encoding.HDF5, False)
##initialize q value at t=0
q_n.assign(q_0)
######## time stepping for computing q
for n in range(1,n_t):
# print(n)
t=dt*n
#defining q, v
q = TrialFunction(V)
v = TestFunction(V)
#a and L in fem weak form for fenics (with Crank-Nicolson time stepping)
a= G_chain*(dt/2)*dot(grad(q), grad(v))*dx + q*v*dx + dt*w*q*v*dx
L= ( q_n*v*dx - G_chain*(dt/2)*dot(grad(q_n), grad(v))*dx - dt*w*q_n*v*dx)
#solve variational problem
q = Function(V)
solve(a == L, q, solver_parameters={'linear_solver': 'gmres', 'preconditioner': 'ilu'})
#saving solution to file
xdmf_q.write_checkpoint(q, "q_label", t, XDMFFile.Encoding.HDF5, True)
#Update previous solution
q_n.assign(q)
######## end of time stepping
xdmf_q.close()
## returning value of function
return (q)
################################################################################
## Function for computing q_star
def q_star_computation(X, w):
##initial q_star at t=0
q_star_n= Function(V)
##initial condition for q_star
x_cord= X[0]
y_cord= X[1]
z_cord= X[2]
q_star_0_expression= Expression( ' ( pow( (a_chain*sqrt(N_chain)) , 3 ) ) * (1/(sqrt(2*pi)*c_dirac)) * exp( ( -1/ ( 2*pow(c_dirac,2) ) ) * ( pow(( x[0]- x_cord), 2) + pow(( x[1]- y_cord), 2) + pow(( x[2]- z_cord), 2) ) ) ' , a_chain= a_chain, N_chain=N_chain, c_dirac=c_dirac, x_cord=x_cord, y_cord=y_cord, z_cord=z_cord, degree=2 )
q_star_0= interpolate(q_star_0_expression, V)
#write
xdmf_q_star.write_checkpoint(q_star_0, "q_star_label", 0, XDMFFile.Encoding.HDF5, False)
##initialize q_star value at t=0
q_star_n.assign(q_star_0)
######## time stepping for q_star
for n in range(1,n_t):
# print(n)
t=dt*n
######################################### computing q_star
#defining q* and v*
q_star = TrialFunction(V)
v_star = TestFunction(V)
#a_star and L_star in fem weak form for fenics (with Crank-Nicolson time stepping)
a_star= G_chain*(dt/2)*dot(grad(q_star), grad(v_star))*dx + q_star*v_star*dx + dt*w*q_star*v_star*dx
L_star= ( q_star_n*v_star*dx - G_chain*(dt/2)*dot(grad(q_star_n), grad(v_star))*dx - dt*w*q_star_n*v_star*dx)
#solve variational problem
q_star=Function(V)
solve(a_star == L_star, q_star, solver_parameters={'linear_solver': 'gmres', 'preconditioner': 'ilu'})
#saving solution to file
xdmf_q_star.write_checkpoint(q_star, "q_star_label", t, XDMFFile.Encoding.HDF5, True)
#Update previous solution
q_star_n.assign(q_star)
#### end of time stepping for q_star
xdmf_q_star.close()
## returning values from function
return (q_star)
################################################################################
# Function for single chain computation
def single_chain_computation():
##computing Q (Complete Partition Function for single chain)
Q=np.zeros(n_t) # Complete Partition Function Q at each position along the chain
phi_chain=Function(V) # phi function
phi_chain_temp=Function(V) # phi function
phi_chain_numr= phi_chain.vector().get_local()
for i in range(n_t):
# print(i)
q_temp = Function(V)
xdmf_q_call = XDMFFile("q.xdmf")
xdmf_q_call.read_checkpoint(q_temp,"q_label",i)
xdmf_q_call.close()
q_star_temp = Function(V)
xdmf_q_star_call = XDMFFile("q_star.xdmf")
xdmf_q_star_call.read_checkpoint(q_star_temp,"q_star_label", n_t-1-i)
xdmf_q_star_call.close()
Q[i]=assemble((q_temp*q_star_temp)*dx)/V_domain #Q is normalized with dividing by volume of the domain
## computing average segment density for single chain (phi_chain))
q_temp_numr = q_temp.vector().get_local()
q_star_temp_numr = q_star_temp.vector().get_local()
phi_chain_temp_numr= phi_chain_temp.vector().get_local()
phi_chain_temp_numr= q_temp_numr*q_star_temp_numr
phi_chain_numr= phi_chain_numr + phi_chain_temp_numr
Q_chain= Q[round(n_t/2)] #Q at s=0.5
phi_chain_numr= phi_chain_numr *(1/(V_domain*Q_chain))
phi_chain.vector().set_local(phi_chain_numr)
phi_chain.vector().apply('insert')
## returning values from function
return (Q, phi_chain, phi_chain_numr)
################################################################################
#### computation for initial guess of w
###############################################################################
## 8 Chain computation
###############################################################################
###############################################################################
## Chain 1 computation
########## generating random w
#### generating random w
w = Function(V)
w.vector().set_local(np.random.random(n_mesh))
## Point 1 computation
x1= - round( lambda_1 * ( (1/(sqrt(3)))*l_RMS ), round_const ) #x component of X1 vector
y1= - round( lambda_2 * ( (1/(sqrt(3)))*l_RMS ), round_const ) #y component of X1 vector
z1= - round( lambda_3 * ( (1/(sqrt(3)))*l_RMS ), round_const ) #z component of X1 vector
X1=np.array([x1,y1,z1]) #position vector of 1st chain start end
# generating vtk files to store q_point_1
# vtkfile_q = File('q_spyder46_3D_solution/q_point_1.pvd')
xdmf_q = XDMFFile("q.xdmf")
q_point_1=Function(V)
q_point_1= q_computation(X1, w)
## Point 0 computation
x0= 0 #x component of X0 vector
y0= 0 #y component of X0 vector
z0= 0 #z component of X0 vector
X0=np.array([x0,y0,z0]) #position vector of 1st chain start end
# generating vtk files to store q_point_0
xdmf_q_star = XDMFFile("q_star.xdmf")
q_point_0 =Function(V)
q_point_0 = q_star_computation(X0, w)
Q1, phi_chain_1, phi_chain_1_numr = single_chain_computation()
###############################################################################
## Chain 2 computation
#### generating random w
w = Function(V)
w.vector().set_local(np.random.random(n_mesh))
## Point 2 computation
x2= round( lambda_1 * ( (1/(sqrt(3)))*l_RMS ), round_const ) #x component of X2 vector
y2= - round( lambda_2 * ( (1/(sqrt(3)))*l_RMS ), round_const ) #y component of X2 vector
z2= - round( lambda_3 * ( (1/(sqrt(3)))*l_RMS ), round_const ) #z component of X2 vector
X2=np.array([x2,y2,z2]) #position vector of 2nd chain start end
# generating vtk files to store q_point_2
xdmf_q = XDMFFile("q.xdmf")
q_point_2=Function(V)
q_point_2= q_computation(X2, w)
## Point 0 computation
x0= 0 #x component of X0 vector
y0= 0 #y component of X0 vector
z0= 0 #z component of X0 vector
X0=np.array([x0,y0,z0]) #position vector of 2nd chain start end
# generating vtk files to store q_point_0
xdmf_q_star = XDMFFile("q_star.xdmf")
q_point_0=Function(V)
q_point_0 = q_star_computation(X0, w)
Q2, phi_chain_2, phi_chain_2_numr = single_chain_computation()
###############################################################################
## Chain 3 computation
#### generating random w
w = Function(V)
w.vector().set_local(np.random.random(n_mesh))
## Point 3 computation
x3= round( lambda_1 * ( (1/(sqrt(3)))*l_RMS ), round_const ) #x component of X3 vector
y3= round( lambda_2 * ( (1/(sqrt(3)))*l_RMS ), round_const ) #y component of X3 vector
z3= - round( lambda_3 * ( (1/(sqrt(3)))*l_RMS ), round_const ) #z component of X3 vector
X3=np.array([x3,y3,z3]) #position vector of 3rd chain start end
# generating vtk files to store q_point_3
xdmf_q = XDMFFile("q.xdmf")
q_point_3=Function(V)
q_point_3= q_computation(X3, w)
## Point 0 computation
x0= 0 #x component of X0 vector
y0= 0 #y component of X0 vector
z0= 0 #z component of X0 vector
X0=np.array([x0,y0,z0]) #position vector of 3rd chain start end
# generating vtk files to store q_point_0
xdmf_q_star = XDMFFile("q_star.xdmf")
q_point_0=Function(V)
q_point_0 = q_star_computation(X0, w)
Q3, phi_chain_3, phi_chain_3_numr = single_chain_computation()
###############################################################################
## Chain 4 computation
#### generating random w
w = Function(V)
w.vector().set_local(np.random.random(n_mesh))
## Point 4 computation
x4= - round( lambda_1 * ( (1/(sqrt(3)))*l_RMS ), round_const ) #x component of X4 vector
y4= round( lambda_2 * ( (1/(sqrt(3)))*l_RMS ), round_const ) #y component of X4 vector
z4= - round( lambda_3 * ( (1/(sqrt(3)))*l_RMS ), round_const ) #z component of X4 vector
X4=np.array([x4,y4,z4]) #position vector of 4th chain start end
# generating vtk files to store q_point_4
xdmf_q = XDMFFile("q.xdmf")
q_point_4=Function(V)
q_point_4= q_computation(X4, w)
## Point 0 computation
x0= 0 #x component of X0 vector
y0= 0 #y component of X0 vector
z0= 0 #z component of X0 vector
X0=np.array([x0,y0,z0]) #position vector of 4th chain start end
# generating vtk files to store q_point_0
xdmf_q_star = XDMFFile("q_star.xdmf")
q_point_0=Function(V)
q_point_0 = q_star_computation(X0, w)
Q4, phi_chain_4, phi_chain_4_numr = single_chain_computation()
###############################################################################
## Chain 5 computation
#### generating random w
w = Function(V)
w.vector().set_local(np.random.random(n_mesh))
## Point 5 computation
x5= - round( lambda_1 * ( (1/(sqrt(3)))*l_RMS ), round_const ) #x component of X5 vector
y5= - round( lambda_2 * ( (1/(sqrt(3)))*l_RMS ), round_const ) #y component of X5 vector
z5= round( lambda_3 * ( (1/(sqrt(3)))*l_RMS ), round_const ) #z component of X5 vector
X5=np.array([x5,y5,z5]) #position vector of 5th chain start end
# generating vtk files to store q_point_1
xdmf_q = XDMFFile("q.xdmf")
q_point_5=Function(V)
q_point_5= q_computation(X5, w)
## Point 0 computation
x0= 0 #x component of X0 vector
y0= 0 #y component of X0 vector
z0= 0 #z component of X0 vector
X0=np.array([x0,y0,z0]) #position vector of 5th chain start end
# generating vtk files to store q_point_0
xdmf_q_star = XDMFFile("q_star.xdmf")
q_point_0=Function(V)
q_point_0 = q_star_computation(X0, w)
Q5, phi_chain_5, phi_chain_5_numr = single_chain_computation()
###############################################################################
## Chain 6 computation
#### generating random w
w = Function(V)
w.vector().set_local(np.random.random(n_mesh))
## Point 6 computation
x6= round( lambda_1 * ( (1/(sqrt(3)))*l_RMS ), round_const ) #x component of X6 vector
y6= - round( lambda_2 * ( (1/(sqrt(3)))*l_RMS ), round_const ) #y component of X6 vector
z6= round( lambda_3 * ( (1/(sqrt(3)))*l_RMS ), round_const ) #z component of X6 vector
X6=np.array([x6,y6,z6]) #position vector of 6th chain start end
# generating vtk files to store q_point_6
xdmf_q = XDMFFile("q.xdmf")
q_point_6=Function(V)
q_point_6= q_computation(X6, w)
## Point 0 computation
x0= 0 #x component of X0 vector
y0= 0 #y component of X0 vector
z0= 0 #z component of X0 vector
X0=np.array([x0,y0,z0]) #position vector of 6th chain start end
# generating vtk files to store q_point_0
xdmf_q_star = XDMFFile("q_star.xdmf")
q_point_0=Function(V)
q_point_0 = q_star_computation(X0, w)
Q6, phi_chain_6, phi_chain_6_numr = single_chain_computation()
###############################################################################
## Chain 7 computation
#### generating random w
w = Function(V)
w.vector().set_local(np.random.random(n_mesh))
## Point 7 computation
x7= round( lambda_1 * ( (1/(sqrt(3)))*l_RMS ), round_const ) #x component of X7 vector
y7= round( lambda_2 * ( (1/(sqrt(3)))*l_RMS ), round_const ) #y component of X7 vector
z7= round( lambda_3 * ( (1/(sqrt(3)))*l_RMS ), round_const ) #z component of X7 vector
X7=np.array([x7,y7,z7]) #position vector of 7th chain start end
# generating vtk files to store q_point_7
xdmf_q = XDMFFile("q.xdmf")
q_point_7=Function(V)
q_point_7= q_computation(X7, w)
## Point 0 computation
x0= 0 #x component of X0 vector
y0= 0 #y component of X0 vector
z0= 0 #z component of X0 vector
X0=np.array([x0,y0,z0]) #position vector of 7th chain start end
# generating vtk files to store q_point_0
xdmf_q_star = XDMFFile("q_star.xdmf")
q_point_0=Function(V)
q_point_0 = q_star_computation(X0, w)
Q7, phi_chain_7, phi_chain_7_numr = single_chain_computation()
###############################################################################
## Chain 8 computation
#### generating random w
w = Function(V)
w.vector().set_local(np.random.random(n_mesh))
## Point 8 computation
x8= - round( lambda_1 * ( (1/(sqrt(3)))*l_RMS ), round_const ) #x component of X8 vector
y8= round( lambda_2 * ( (1/(sqrt(3)))*l_RMS ), round_const ) #y component of X8 vector
z8= round( lambda_3 * ( (1/(sqrt(3)))*l_RMS ), round_const ) #z component of X8 vector
X8=np.array([x8,y8,z8]) #position vector of 8th chain start end
# generating vtk files to store q_point_8
xdmf_q = XDMFFile("q.xdmf")
q_point_8=Function(V)
q_point_8= q_computation(X8, w)
## Point 0 computation
x0= 0 #x component of X0 vector
y0= 0 #y component of X0 vector
z0= 0 #z component of X0 vector
X0=np.array([x0,y0,z0]) #position vector of 8th chain start end
# generating vtk files to store q_point_0
xdmf_q_star = XDMFFile("q_star.xdmf")
q_point_0=Function(V)
q_point_0 = q_star_computation(X0, w)
Q8, phi_chain_8, phi_chain_8_numr = single_chain_computation()
###############################################################################
#getting Q at each chain mid point
Q1_mid= Q1[round(n_t/2)]
Q2_mid= Q2[round(n_t/2)]
Q3_mid= Q3[round(n_t/2)]
Q4_mid= Q4[round(n_t/2)]
Q5_mid= Q5[round(n_t/2)]
Q6_mid= Q6[round(n_t/2)]
Q7_mid= Q7[round(n_t/2)]
Q8_mid= Q8[round(n_t/2)]
print('Q1 is')
print(Q1_mid)
print('Q2 is')
print(Q2_mid)
print('Q3 is')
print(Q3_mid)
print('Q4 is')
print(Q4_mid)
print('Q5 is')
print(Q5_mid)
print('Q6 is')
print(Q6_mid)
print('Q7 is')
print(Q7_mid)
print('Q8 is')
print(Q8_mid)
#computing phi_MF and converting it into fem function
phi_MF=Function(V) # phi function
phi_MF_numr=phi_MF.vector().get_local() #phi over mesh nodes(vector of nx.ny.nz size)
phi_MF_numr= phi_chain_1_numr + phi_chain_2_numr + phi_chain_3_numr + phi_chain_4_numr + phi_chain_5_numr +phi_chain_6_numr +phi_chain_7_numr +phi_chain_8_numr
phi_MF.vector().set_local(phi_MF_numr)
phi_MF.vector().apply('insert')
# free energy
H = k_B*Temp*(1/(2*u0))*assemble(w*w*dx) - k_B*Temp* ( math.log(Q1_mid) + math.log(Q2_mid) + math.log(Q3_mid)+ math.log(Q4_mid) + math.log(Q5_mid)+ math.log(Q6_mid)+ math.log(Q7_mid)+ math.log(Q8_mid)) #total free energy
H_entropic= - k_B*Temp* ( math.log(Q1_mid) + math.log(Q2_mid) + math.log(Q3_mid)+ math.log(Q4_mid)+ math.log(Q5_mid)+ math.log(Q6_mid)+ math.log(Q7_mid)+ math.log(Q8_mid) ) #entropic free energy
H_interaction= k_B*Temp*(1/(2*u0))*assemble(w*w*dx) #interaction free energy
#############################################################################################
## getting next w
#############################################################################################
# Shift spatially along x the dofs
shift_length = 2* lambda_1 * ( (1/(sqrt(3)))*l_RMS ) #shift along x-axis
class ShiftedExpr_x(UserExpression):
def __init__(self,func,**kwargs):
super().__init__(**kwargs)
self.func = func
def eval(self,values,x):
x0_shift = x[0] - shift_length
if(x0_shift < x_min_box):
x0_shift += (x_max_box- x_min_box)
x_shift = np.array([x0_shift, x[1], x[2]])
values[0] = self.func(x_shift)
def value_shape(self):
return ()
class ShiftedExpr_y(UserExpression):
def __init__(self,func,**kwargs):
super().__init__(**kwargs)
self.func = func
def eval(self,values,x):
x1_shift = x[1] - shift_length
if(x1_shift < y_min_box):
x1_shift += (y_max_box- y_min_box)
x_shift = np.array([x[0], x1_shift, x[2]])
values[0] = self.func(x_shift)
def value_shape(self):
return ()
class ShiftedExpr_z(UserExpression):
def __init__(self,func,**kwargs):
super().__init__(**kwargs)
self.func = func
def eval(self,values,x):
x2_shift = x[2] - shift_length
if(x2_shift < z_min_box):
x2_shift += (z_max_box- z_min_box)
x_shift = np.array([x[0], x[1], x2_shift])
values[0] = self.func(x_shift)
def value_shape(self):
return ()
## middle layer of boxes (in polymer network schematic)
phi_MF.set_allow_extrapolation(True)
shift_length = -2* lambda_1 * ( (1/(sqrt(3)))*l_RMS ) #shift along x-axis
phi_MF_B13 = interpolate(ShiftedExpr_x(phi_MF),V)
shift_length = 2* lambda_1 * ( (1/(sqrt(3)))*l_RMS ) #shift along x-axis
phi_MF_B15 = interpolate(ShiftedExpr_x(phi_MF),V)
phi_MF_B13.set_allow_extrapolation(True)
shift_length = -2* lambda_2 * ( (1/(sqrt(3)))*l_RMS ) #shift along x-axis
phi_MF_B10 = interpolate(ShiftedExpr_y(phi_MF_B13),V)
shift_length = 2* lambda_2 * ( (1/(sqrt(3)))*l_RMS ) #shift along x-axis
phi_MF_B16 = interpolate(ShiftedExpr_y(phi_MF_B13),V)
shift_length = -2* lambda_2 * ( (1/(sqrt(3)))*l_RMS ) #shift along x-axis
phi_MF_B11 = interpolate(ShiftedExpr_y(phi_MF),V)
shift_length = 2* lambda_2 * ( (1/(sqrt(3)))*l_RMS ) #shift along x-axis
phi_MF_B17 = interpolate(ShiftedExpr_y(phi_MF),V)
phi_MF_B15.set_allow_extrapolation(True)
shift_length = -2* lambda_2 * ( (1/(sqrt(3)))*l_RMS ) #shift along x-axis
phi_MF_B12 = interpolate(ShiftedExpr_y(phi_MF_B15),V)
shift_length = 2* lambda_2 * ( (1/(sqrt(3)))*l_RMS ) #shift along x-axis
phi_MF_B18 = interpolate(ShiftedExpr_y(phi_MF_B15),V)
## bottom layer of boxes (in polymer network schematic)
shift_length = -2* lambda_3 * ( (1/(sqrt(3)))*l_RMS ) #shift along x-axis
phi_MF_B5 = interpolate(ShiftedExpr_z(phi_MF),V)
phi_MF_B5.set_allow_extrapolation(True)
shift_length = -2* lambda_1 * ( (1/(sqrt(3)))*l_RMS ) #shift along x-axis
phi_MF_B4 = interpolate(ShiftedExpr_x(phi_MF_B5),V)
shift_length = 2* lambda_1 * ( (1/(sqrt(3)))*l_RMS ) #shift along x-axis
phi_MF_B6 = interpolate(ShiftedExpr_x(phi_MF_B5),V)
phi_MF_B4.set_allow_extrapolation(True)
shift_length = -2* lambda_2 * ( (1/(sqrt(3)))*l_RMS ) #shift along x-axis
phi_MF_B1 = interpolate(ShiftedExpr_y(phi_MF_B4),V)
shift_length = 2* lambda_2 * ( (1/(sqrt(3)))*l_RMS ) #shift along x-axis
phi_MF_B7 = interpolate(ShiftedExpr_y(phi_MF_B4),V)
shift_length = -2* lambda_2 * ( (1/(sqrt(3)))*l_RMS ) #shift along x-axis
phi_MF_B2 = interpolate(ShiftedExpr_y(phi_MF_B5),V)
shift_length = 2* lambda_2 * ( (1/(sqrt(3)))*l_RMS ) #shift along x-axis
phi_MF_B8 = interpolate(ShiftedExpr_y(phi_MF_B5),V)
phi_MF_B6.set_allow_extrapolation(True)
shift_length = -2* lambda_2 * ( (1/(sqrt(3)))*l_RMS ) #shift along x-axis
phi_MF_B3 = interpolate(ShiftedExpr_y(phi_MF_B6),V)
shift_length = 2* lambda_2 * ( (1/(sqrt(3)))*l_RMS ) #shift along x-axis
phi_MF_B9 = interpolate(ShiftedExpr_y(phi_MF_B6),V)
## top layer of boxes (in polymer network schematic)
shift_length = 2* lambda_3 * ( (1/(sqrt(3)))*l_RMS ) #shift along x-axis
phi_MF_B23 = interpolate(ShiftedExpr_z(phi_MF),V)
phi_MF_B23.set_allow_extrapolation(True)
shift_length = -2* lambda_1 * ( (1/(sqrt(3)))*l_RMS ) #shift along x-axis
phi_MF_B22 = interpolate(ShiftedExpr_x(phi_MF_B23),V)
shift_length = 2* lambda_1 * ( (1/(sqrt(3)))*l_RMS ) #shift along x-axis
phi_MF_B24 = interpolate(ShiftedExpr_x(phi_MF_B23),V)
phi_MF_B22.set_allow_extrapolation(True)
shift_length = -2* lambda_2 * ( (1/(sqrt(3)))*l_RMS ) #shift along x-axis
phi_MF_B19 = interpolate(ShiftedExpr_y(phi_MF_B22),V)
shift_length = 2* lambda_2 * ( (1/(sqrt(3)))*l_RMS ) #shift along x-axis
phi_MF_B25 = interpolate(ShiftedExpr_y(phi_MF_B22),V)
shift_length = -2* lambda_2 * ( (1/(sqrt(3)))*l_RMS ) #shift along x-axis
phi_MF_B20 = interpolate(ShiftedExpr_y(phi_MF_B23),V)
shift_length = 2* lambda_2 * ( (1/(sqrt(3)))*l_RMS ) #shift along x-axis
phi_MF_B26 = interpolate(ShiftedExpr_y(phi_MF_B23),V)
phi_MF_B24.set_allow_extrapolation(True)
shift_length = -2* lambda_2 * ( (1/(sqrt(3)))*l_RMS ) #shift along x-axis
phi_MF_B21 = interpolate(ShiftedExpr_y(phi_MF_B24),V)
shift_length = 2* lambda_2 * ( (1/(sqrt(3)))*l_RMS ) #shift along x-axis
phi_MF_B27 = interpolate(ShiftedExpr_y(phi_MF_B24),V)
## phi_MF_total
phi_MF_total= Function(V)
phi_MF_total_numr=phi_MF_total.vector().get_local()
phi_MF_total_numr= phi_MF_B1.vector().get_local() + phi_MF_B2.vector().get_local() + phi_MF_B3.vector().get_local() + phi_MF_B4.vector().get_local() + phi_MF_B5.vector().get_local() + phi_MF_B6.vector().get_local() + phi_MF_B7.vector().get_local() + phi_MF_B8.vector().get_local() + phi_MF_B9.vector().get_local() + phi_MF_B10.vector().get_local() + phi_MF_B11.vector().get_local() + phi_MF_B12.vector().get_local() + phi_MF_B13.vector().get_local() + phi_MF.vector().get_local() + phi_MF_B15.vector().get_local() + phi_MF_B16.vector().get_local() + phi_MF_B17.vector().get_local() + phi_MF_B18.vector().get_local() + phi_MF_B19.vector().get_local() + phi_MF_B20.vector().get_local() + phi_MF_B21.vector().get_local() + phi_MF_B22.vector().get_local() + phi_MF_B23.vector().get_local() + phi_MF_B24.vector().get_local() + phi_MF_B25.vector().get_local() + phi_MF_B26.vector().get_local() + phi_MF_B27.vector().get_local()
phi_MF_total.vector().set_local(phi_MF_total_numr)
phi_MF_total.vector().apply('insert')
# #defining w field for next step
w=Function(V)
w_numr= w.vector().get_local()
w_numr= u0* phi_MF_total.vector().get_local()
w.vector().set_local(w_numr)
w.vector().apply('insert')
############################################################### Iterating for finding equilibrium mean field w
count=0
# while error_w_norm > error_w_norm_threshold:
while abs(delta_H_ratio) > delta_H_ratio_threshold:
count=count+1
print(count)
print(delta_H_ratio)
if count == 51:
print('count=51')
# update dt and nt (with the hope of getting convergence less than 1%)
dt= dt*(0.5)
n_t=int(T/dt +1)
###############################################################################
## Point 0 computation
# generating vtk files to store q_point_0
xdmf_q_star = XDMFFile("q_star.xdmf")
q_point_0=Function(V)
q_point_0 = q_star_computation(X0, w)
###############################################################################
## Chain computation
###############################################################################
###############################################################################
## Chain 1 computation
# generating vtk files to store q_point_1
xdmf_q = XDMFFile("q.xdmf")
q_point_1=Function(V)
q_point_1= q_computation(X1, w)
# generating vtk file for phi_chain
Q1, phi_chain_1, phi_chain_1_numr = single_chain_computation()
###############################################################################
## Chain 2 computation
# generating vtk files to store q_point_2
xdmf_q = XDMFFile("q.xdmf")
q_point_2=Function(V)
q_point_2= q_computation(X2, w)
# generating vtk files to store q_point_2
Q2, phi_chain_2, phi_chain_2_numr= single_chain_computation()
###############################################################################
## Chain 3 computation
# generating vtk files to store q_point_3
xdmf_q = XDMFFile("q.xdmf")
q_point_3=Function(V)
q_point_3= q_computation(X3, w)
# generating vtk files to store q_point_3
Q3, phi_chain_3, phi_chain_3_numr= single_chain_computation()
###############################################################################
## Chain 4 computation
# generating vtk files to store q_point_4
xdmf_q = XDMFFile("q.xdmf")
q_point_4=Function(V)
q_point_4= q_computation(X4, w)
# generating vtk files to store q_point_4
Q4, phi_chain_4, phi_chain_4_numr= single_chain_computation()
###############################################################################
## Chain 5 computation
# generating vtk files to store q_point_5
xdmf_q = XDMFFile("q.xdmf")
q_point_5=Function(V)
q_point_5= q_computation(X5, w)
# generating vtk files to store q_point_5
Q5, phi_chain_5, phi_chain_5_numr= single_chain_computation()
###############################################################################
## Chain 6 computation
# generating vtk files to store q_point_6
xdmf_q = XDMFFile("q.xdmf")
q_point_6=Function(V)
q_point_6= q_computation(X6, w)
# generating vtk files to store q_point_6
Q6, phi_chain_6, phi_chain_6_numr= single_chain_computation()
###############################################################################
## Chain 7 computation
# generating vtk files to store q_point_7
xdmf_q = XDMFFile("q.xdmf")
q_point_7=Function(V)
q_point_7= q_computation(X7, w)
# generating vtk files to store q_point_7
Q7, phi_chain_7, phi_chain_7_numr= single_chain_computation()
###############################################################################
## Chain 8 computation
# generating vtk files to store q_point_8
xdmf_q = XDMFFile("q.xdmf")
q_point_8=Function(V)
q_point_8= q_computation(X8, w)
# generating vtk files to store q_point_8
Q8, phi_chain_8, phi_chain_8_numr= single_chain_computation()
###############################################################################
##getting Q at each chain mid point
Q1_mid= Q1[round(n_t/2)]
Q2_mid= Q2[round(n_t/2)]
Q3_mid= Q3[round(n_t/2)]
Q4_mid= Q4[round(n_t/2)]
Q5_mid= Q5[round(n_t/2)]
Q6_mid= Q6[round(n_t/2)]
Q7_mid= Q7[round(n_t/2)]
Q8_mid= Q8[round(n_t/2)]
print('Q1 is')
print(Q1_mid)
print('Q2 is')
print(Q2_mid)
print('Q3 is')
print(Q3_mid)
print('Q4 is')
print(Q4_mid)
print('Q5 is')
print(Q5_mid)
print('Q6 is')
print(Q6_mid)
print('Q7 is')
print(Q7_mid)
print('Q8 is')
print(Q8_mid)
#computing phi_MF and converting it into fem function
phi_MF=Function(V) # phi function
phi_MF_numr=phi_MF.vector().get_local() #phi over mesh nodes(vector of nx.ny.nz size)
phi_MF_numr= phi_chain_1_numr + phi_chain_2_numr + phi_chain_3_numr + phi_chain_4_numr + phi_chain_5_numr +phi_chain_6_numr +phi_chain_7_numr +phi_chain_8_numr
phi_MF.vector().set_local(phi_MF_numr)
phi_MF.vector().apply('insert')
# free energy
H = k_B*Temp*(1/(2*u0))*assemble(w*w*dx) - k_B*Temp* ( math.log(Q1_mid) + math.log(Q2_mid) + math.log(Q3_mid)+ math.log(Q4_mid) + math.log(Q5_mid)+ math.log(Q6_mid)+ math.log(Q7_mid)+ math.log(Q8_mid)) #total free energy
H_entropic= - k_B*Temp* ( math.log(Q1_mid) + math.log(Q2_mid) + math.log(Q3_mid)+ math.log(Q4_mid)+ math.log(Q5_mid)+ math.log(Q6_mid)+ math.log(Q7_mid)+ math.log(Q8_mid) ) #entropic free energy
H_interaction= k_B*Temp*(1/(2*u0))*assemble(w*w*dx) #interaction free energy
#############################################################################################
## getting next w
#############################################################################################
## middle layer of boxes
phi_MF.set_allow_extrapolation(True)
shift_length = -2* lambda_1 * ( (1/(sqrt(3)))*l_RMS ) #shift along x-axis
phi_MF_B13 = interpolate(ShiftedExpr_x(phi_MF),V)
shift_length = 2* lambda_1 * ( (1/(sqrt(3)))*l_RMS ) #shift along x-axis
phi_MF_B15 = interpolate(ShiftedExpr_x(phi_MF),V)
phi_MF_B13.set_allow_extrapolation(True)
shift_length = -2* lambda_2 * ( (1/(sqrt(3)))*l_RMS ) #shift along x-axis
phi_MF_B10 = interpolate(ShiftedExpr_y(phi_MF_B13),V)
shift_length = 2* lambda_2 * ( (1/(sqrt(3)))*l_RMS ) #shift along x-axis
phi_MF_B16 = interpolate(ShiftedExpr_y(phi_MF_B13),V)
shift_length = -2* lambda_2 * ( (1/(sqrt(3)))*l_RMS ) #shift along x-axis
phi_MF_B11 = interpolate(ShiftedExpr_y(phi_MF),V)
shift_length = 2* lambda_2 * ( (1/(sqrt(3)))*l_RMS ) #shift along x-axis
phi_MF_B17 = interpolate(ShiftedExpr_y(phi_MF),V)
phi_MF_B15.set_allow_extrapolation(True)
shift_length = -2* lambda_2 * ( (1/(sqrt(3)))*l_RMS ) #shift along x-axis
phi_MF_B12 = interpolate(ShiftedExpr_y(phi_MF_B15),V)
shift_length = 2* lambda_2 * ( (1/(sqrt(3)))*l_RMS ) #shift along x-axis
phi_MF_B18 = interpolate(ShiftedExpr_y(phi_MF_B15),V)
## bottom layer of boxes
shift_length = -2* lambda_3 * ( (1/(sqrt(3)))*l_RMS ) #shift along x-axis
phi_MF_B5 = interpolate(ShiftedExpr_z(phi_MF),V)
phi_MF_B5.set_allow_extrapolation(True)
shift_length = -2* lambda_1 * ( (1/(sqrt(3)))*l_RMS ) #shift along x-axis
phi_MF_B4 = interpolate(ShiftedExpr_x(phi_MF_B5),V)
shift_length = 2* lambda_1 * ( (1/(sqrt(3)))*l_RMS ) #shift along x-axis
phi_MF_B6 = interpolate(ShiftedExpr_x(phi_MF_B5),V)
phi_MF_B4.set_allow_extrapolation(True)
shift_length = -2* lambda_2 * ( (1/(sqrt(3)))*l_RMS ) #shift along x-axis
phi_MF_B1 = interpolate(ShiftedExpr_y(phi_MF_B4),V)
shift_length = 2* lambda_2 * ( (1/(sqrt(3)))*l_RMS ) #shift along x-axis
phi_MF_B7 = interpolate(ShiftedExpr_y(phi_MF_B4),V)
shift_length = -2* lambda_2 * ( (1/(sqrt(3)))*l_RMS ) #shift along x-axis
phi_MF_B2 = interpolate(ShiftedExpr_y(phi_MF_B5),V)
shift_length = 2* lambda_2 * ( (1/(sqrt(3)))*l_RMS ) #shift along x-axis
phi_MF_B8 = interpolate(ShiftedExpr_y(phi_MF_B5),V)
phi_MF_B6.set_allow_extrapolation(True)
shift_length = -2* lambda_2 * ( (1/(sqrt(3)))*l_RMS ) #shift along x-axis
phi_MF_B3 = interpolate(ShiftedExpr_y(phi_MF_B6),V)
shift_length = 2* lambda_2 * ( (1/(sqrt(3)))*l_RMS ) #shift along x-axis
phi_MF_B9 = interpolate(ShiftedExpr_y(phi_MF_B6),V)
## top layer of boxes
shift_length = 2* lambda_3 * ( (1/(sqrt(3)))*l_RMS ) #shift along x-axis
phi_MF_B23 = interpolate(ShiftedExpr_z(phi_MF),V)
phi_MF_B23.set_allow_extrapolation(True)
shift_length = -2* lambda_1 * ( (1/(sqrt(3)))*l_RMS ) #shift along x-axis
phi_MF_B22 = interpolate(ShiftedExpr_x(phi_MF_B23),V)
shift_length = 2* lambda_1 * ( (1/(sqrt(3)))*l_RMS ) #shift along x-axis
phi_MF_B24 = interpolate(ShiftedExpr_x(phi_MF_B23),V)
phi_MF_B22.set_allow_extrapolation(True)
shift_length = -2* lambda_2 * ( (1/(sqrt(3)))*l_RMS ) #shift along x-axis
phi_MF_B19 = interpolate(ShiftedExpr_y(phi_MF_B22),V)
shift_length = 2* lambda_2 * ( (1/(sqrt(3)))*l_RMS ) #shift along x-axis
phi_MF_B25 = interpolate(ShiftedExpr_y(phi_MF_B22),V)
shift_length = -2* lambda_2 * ( (1/(sqrt(3)))*l_RMS ) #shift along x-axis
phi_MF_B20 = interpolate(ShiftedExpr_y(phi_MF_B23),V)
shift_length = 2* lambda_2 * ( (1/(sqrt(3)))*l_RMS ) #shift along x-axis
phi_MF_B26 = interpolate(ShiftedExpr_y(phi_MF_B23),V)
phi_MF_B24.set_allow_extrapolation(True)
shift_length = -2* lambda_2 * ( (1/(sqrt(3)))*l_RMS ) #shift along x-axis
phi_MF_B21 = interpolate(ShiftedExpr_y(phi_MF_B24),V)
shift_length = 2* lambda_2 * ( (1/(sqrt(3)))*l_RMS ) #shift along x-axis
phi_MF_B27 = interpolate(ShiftedExpr_y(phi_MF_B24),V)
## phi_MF_total
phi_MF_total= Function(V)
phi_MF_total_numr=phi_MF_total.vector().get_local()
phi_MF_total_numr= phi_MF_B1.vector().get_local() + phi_MF_B2.vector().get_local() + phi_MF_B3.vector().get_local() + phi_MF_B4.vector().get_local() + phi_MF_B5.vector().get_local() + phi_MF_B6.vector().get_local() + phi_MF_B7.vector().get_local() + phi_MF_B8.vector().get_local() + phi_MF_B9.vector().get_local() + phi_MF_B10.vector().get_local() + phi_MF_B11.vector().get_local() + phi_MF_B12.vector().get_local() + phi_MF_B13.vector().get_local() + phi_MF.vector().get_local() + phi_MF_B15.vector().get_local() + phi_MF_B16.vector().get_local() + phi_MF_B17.vector().get_local() + phi_MF_B18.vector().get_local() + phi_MF_B19.vector().get_local() + phi_MF_B20.vector().get_local() + phi_MF_B21.vector().get_local() + phi_MF_B22.vector().get_local() + phi_MF_B23.vector().get_local() + phi_MF_B24.vector().get_local() + phi_MF_B25.vector().get_local() + phi_MF_B26.vector().get_local() + phi_MF_B27.vector().get_local()
phi_MF_total.vector().set_local(phi_MF_total_numr)
phi_MF_total.vector().apply('insert')
###############################################################
## delta_H_ratio check
###############################################################
if count != 1 and abs((H-H_next)/H_next) < delta_H_ratio_threshold:
H_next=H
H_next_entropic= H_entropic
H_next_interaction= H_interaction
print('iteration ended after break')
break
###############################################################
## getting w for next time step
###############################################################
#defining w field for next step
w_next=Function(V) # phi function
w_next_numr= w_next.vector().get_local()
w_next_numr= u0* phi_MF_total.vector().get_local()
w_next.vector().set_local(w_next_numr)
w_next.vector().apply('insert')
###############################################################################
## computation for w_next
###############################################################################
###############################################################################
## Point 0 computation
# generating vtk files to store q_point_0
xdmf_q_star = XDMFFile("q_star.xdmf")
q_point_0=Function(V)
q_point_0 = q_star_computation(X0, w_next)
###############################################################################
## Chain computation
###############################################################################
###############################################################################
## Chain 1 computation
# generating vtk files to store q_point_1
xdmf_q = XDMFFile("q.xdmf")
q_point_1=Function(V)
q_point_1= q_computation(X1, w_next)
# generating vtk file for phi_chain
Q1, phi_chain_1, phi_chain_1_numr = single_chain_computation()
###############################################################################
## Chain 2 computation
# generating vtk files to store q_point_2
xdmf_q = XDMFFile("q.xdmf")
q_point_2=Function(V)
q_point_2= q_computation(X2, w_next)
# generating vtk files to store q_point_2
Q2, phi_chain_2, phi_chain_2_numr= single_chain_computation()
###############################################################################
## Chain 3 computation
# generating vtk files to store q_point_3
xdmf_q = XDMFFile("q.xdmf")
q_point_3=Function(V)
q_point_3= q_computation(X3, w_next)
# generating vtk files to store q_point_3
Q3, phi_chain_3, phi_chain_3_numr= single_chain_computation()
###############################################################################
## Chain 4 computation
# generating vtk files to store q_point_4
xdmf_q = XDMFFile("q.xdmf")
q_point_4=Function(V)
q_point_4= q_computation(X4, w_next)
# generating vtk files to store q_point_4
Q4, phi_chain_4, phi_chain_4_numr= single_chain_computation()
###############################################################################
## Chain 5 computation
# generating vtk files to store q_point_5
xdmf_q = XDMFFile("q.xdmf")
q_point_5=Function(V)
q_point_5= q_computation(X5, w_next)
# generating vtk files to store q_point_5
Q5, phi_chain_5, phi_chain_5_numr= single_chain_computation()
###############################################################################
## Chain 6 computation
# generating vtk files to store q_point_6
xdmf_q = XDMFFile("q.xdmf")
q_point_6=Function(V)
q_point_6= q_computation(X6, w_next)
# generating vtk files to store q_point_6
Q6, phi_chain_6, phi_chain_6_numr= single_chain_computation()
###############################################################################
## Chain 7 computation
# generating vtk files to store q_point_7
xdmf_q = XDMFFile("q.xdmf")
q_point_7=Function(V)
q_point_7= q_computation(X7, w_next)
# generating vtk files to store q_point_7
Q7, phi_chain_7, phi_chain_7_numr= single_chain_computation()
###############################################################################
## Chain 8 computation
# generating vtk files to store q_point_8
xdmf_q = XDMFFile("q.xdmf")
q_point_8=Function(V)
q_point_8= q_computation(X8, w_next)
# generating vtk files to store q_point_8
Q8, phi_chain_8, phi_chain_8_numr= single_chain_computation()
###############################################################################
##getting Q at each chain mid point
Q1_mid= Q1[round(n_t/2)]
Q2_mid= Q2[round(n_t/2)]
Q3_mid= Q3[round(n_t/2)]
Q4_mid= Q4[round(n_t/2)]
Q5_mid= Q5[round(n_t/2)]
Q6_mid= Q6[round(n_t/2)]
Q7_mid= Q7[round(n_t/2)]
Q8_mid= Q8[round(n_t/2)]
print('Q1 is')
print(Q1_mid)
print('Q2 is')
print(Q2_mid)
print('Q3 is')
print(Q3_mid)
print('Q4 is')
print(Q4_mid)
print('Q5 is')
print(Q5_mid)
print('Q6 is')
print(Q6_mid)
print('Q7 is')
print(Q7_mid)
print('Q8 is')
print(Q8_mid)
#computing phi_MF and converting it into fem function
phi_MF=Function(V) # phi function
phi_MF_numr=phi_MF.vector().get_local() #phi over mesh nodes(vector of nx.ny.nz size)
phi_MF_numr= phi_chain_1_numr + phi_chain_2_numr + phi_chain_3_numr + phi_chain_4_numr + phi_chain_5_numr +phi_chain_6_numr +phi_chain_7_numr +phi_chain_8_numr
phi_MF.vector().set_local(phi_MF_numr)
phi_MF.vector().apply('insert')
# free energy
H_next = k_B*Temp*(1/(2*u0))*assemble(w_next*w_next*dx) - k_B*Temp* ( math.log(Q1_mid) + math.log(Q2_mid) + math.log(Q3_mid)+ math.log(Q4_mid) + math.log(Q5_mid)+ math.log(Q6_mid)+ math.log(Q7_mid)+ math.log(Q8_mid)) # total free energy
H_next_entropic= - k_B*Temp* ( math.log(Q1_mid) + math.log(Q2_mid) + math.log(Q3_mid)+ math.log(Q4_mid)+ math.log(Q5_mid)+ math.log(Q6_mid)+ math.log(Q7_mid)+ math.log(Q8_mid) ) #entropic free energy
H_next_interaction= k_B*Temp*(1/(2*u0))*assemble(w_next*w_next*dx) #interaction free energy
# computing relative change in H and H_next
delta_H_ratio=(H_next-H)/H
delta_H_ratio_array[count]=delta_H_ratio
print(delta_H_ratio)
if abs((H_next-H)/H) < delta_H_ratio_threshold:
print('iteration ended after break')
break
#############################################################################################
## getting next w
#############################################################################################
## middle layer of boxes
phi_MF.set_allow_extrapolation(True)
shift_length = -2* lambda_1 * ( (1/(sqrt(3)))*l_RMS ) #shift along x-axis
phi_MF_B13 = interpolate(ShiftedExpr_x(phi_MF),V)
shift_length = 2* lambda_1 * ( (1/(sqrt(3)))*l_RMS ) #shift along x-axis
phi_MF_B15 = interpolate(ShiftedExpr_x(phi_MF),V)
phi_MF_B13.set_allow_extrapolation(True)
shift_length = -2* lambda_2 * ( (1/(sqrt(3)))*l_RMS ) #shift along x-axis
phi_MF_B10 = interpolate(ShiftedExpr_y(phi_MF_B13),V)
shift_length = 2* lambda_2 * ( (1/(sqrt(3)))*l_RMS ) #shift along x-axis
phi_MF_B16 = interpolate(ShiftedExpr_y(phi_MF_B13),V)
shift_length = -2* lambda_2 * ( (1/(sqrt(3)))*l_RMS ) #shift along x-axis
phi_MF_B11 = interpolate(ShiftedExpr_y(phi_MF),V)
shift_length = 2* lambda_2 * ( (1/(sqrt(3)))*l_RMS ) #shift along x-axis
phi_MF_B17 = interpolate(ShiftedExpr_y(phi_MF),V)
phi_MF_B15.set_allow_extrapolation(True)
shift_length = -2* lambda_2 * ( (1/(sqrt(3)))*l_RMS ) #shift along x-axis
phi_MF_B12 = interpolate(ShiftedExpr_y(phi_MF_B15),V)
shift_length = 2* lambda_2 * ( (1/(sqrt(3)))*l_RMS ) #shift along x-axis
phi_MF_B18 = interpolate(ShiftedExpr_y(phi_MF_B15),V)
## bottom layer of boxes
shift_length = -2* lambda_3 * ( (1/(sqrt(3)))*l_RMS ) #shift along x-axis
phi_MF_B5 = interpolate(ShiftedExpr_z(phi_MF),V)
phi_MF_B5.set_allow_extrapolation(True)
shift_length = -2* lambda_1 * ( (1/(sqrt(3)))*l_RMS ) #shift along x-axis
phi_MF_B4 = interpolate(ShiftedExpr_x(phi_MF_B5),V)
shift_length = 2* lambda_1 * ( (1/(sqrt(3)))*l_RMS ) #shift along x-axis
phi_MF_B6 = interpolate(ShiftedExpr_x(phi_MF_B5),V)
phi_MF_B4.set_allow_extrapolation(True)
shift_length = -2* lambda_2 * ( (1/(sqrt(3)))*l_RMS ) #shift along x-axis
phi_MF_B1 = interpolate(ShiftedExpr_y(phi_MF_B4),V)
shift_length = 2* lambda_2 * ( (1/(sqrt(3)))*l_RMS ) #shift along x-axis
phi_MF_B7 = interpolate(ShiftedExpr_y(phi_MF_B4),V)
shift_length = -2* lambda_2 * ( (1/(sqrt(3)))*l_RMS ) #shift along x-axis
phi_MF_B2 = interpolate(ShiftedExpr_y(phi_MF_B5),V)
shift_length = 2* lambda_2 * ( (1/(sqrt(3)))*l_RMS ) #shift along x-axis
phi_MF_B8 = interpolate(ShiftedExpr_y(phi_MF_B5),V)
phi_MF_B6.set_allow_extrapolation(True)
shift_length = -2* lambda_2 * ( (1/(sqrt(3)))*l_RMS ) #shift along x-axis
phi_MF_B3 = interpolate(ShiftedExpr_y(phi_MF_B6),V)
shift_length = 2* lambda_2 * ( (1/(sqrt(3)))*l_RMS ) #shift along x-axis
phi_MF_B9 = interpolate(ShiftedExpr_y(phi_MF_B6),V)
## top layer of boxes
shift_length = 2* lambda_3 * ( (1/(sqrt(3)))*l_RMS ) #shift along x-axis
phi_MF_B23 = interpolate(ShiftedExpr_z(phi_MF),V)
phi_MF_B23.set_allow_extrapolation(True)
shift_length = -2* lambda_1 * ( (1/(sqrt(3)))*l_RMS ) #shift along x-axis
phi_MF_B22 = interpolate(ShiftedExpr_x(phi_MF_B23),V)
shift_length = 2* lambda_1 * ( (1/(sqrt(3)))*l_RMS ) #shift along x-axis
phi_MF_B24 = interpolate(ShiftedExpr_x(phi_MF_B23),V)
phi_MF_B22.set_allow_extrapolation(True)
shift_length = -2* lambda_2 * ( (1/(sqrt(3)))*l_RMS ) #shift along x-axis
phi_MF_B19 = interpolate(ShiftedExpr_y(phi_MF_B22),V)
shift_length = 2* lambda_2 * ( (1/(sqrt(3)))*l_RMS ) #shift along x-axis
phi_MF_B25 = interpolate(ShiftedExpr_y(phi_MF_B22),V)
shift_length = -2* lambda_2 * ( (1/(sqrt(3)))*l_RMS ) #shift along x-axis
phi_MF_B20 = interpolate(ShiftedExpr_y(phi_MF_B23),V)
shift_length = 2* lambda_2 * ( (1/(sqrt(3)))*l_RMS ) #shift along x-axis
phi_MF_B26 = interpolate(ShiftedExpr_y(phi_MF_B23),V)
phi_MF_B24.set_allow_extrapolation(True)
shift_length = -2* lambda_2 * ( (1/(sqrt(3)))*l_RMS ) #shift along x-axis
phi_MF_B21 = interpolate(ShiftedExpr_y(phi_MF_B24),V)
shift_length = 2* lambda_2 * ( (1/(sqrt(3)))*l_RMS ) #shift along x-axis
phi_MF_B27 = interpolate(ShiftedExpr_y(phi_MF_B24),V)
## phi_MF_total
phi_MF_total= Function(V)
phi_MF_total_numr=phi_MF_total.vector().get_local()
phi_MF_total_numr= phi_MF_B1.vector().get_local() + phi_MF_B2.vector().get_local() + phi_MF_B3.vector().get_local() + phi_MF_B4.vector().get_local() + phi_MF_B5.vector().get_local() + phi_MF_B6.vector().get_local() + phi_MF_B7.vector().get_local() + phi_MF_B8.vector().get_local() + phi_MF_B9.vector().get_local() + phi_MF_B10.vector().get_local() + phi_MF_B11.vector().get_local() + phi_MF_B12.vector().get_local() + phi_MF_B13.vector().get_local() + phi_MF.vector().get_local() + phi_MF_B15.vector().get_local() + phi_MF_B16.vector().get_local() + phi_MF_B17.vector().get_local() + phi_MF_B18.vector().get_local() + phi_MF_B19.vector().get_local() + phi_MF_B20.vector().get_local() + phi_MF_B21.vector().get_local() + phi_MF_B22.vector().get_local() + phi_MF_B23.vector().get_local() + phi_MF_B24.vector().get_local() + phi_MF_B25.vector().get_local() + phi_MF_B26.vector().get_local() + phi_MF_B27.vector().get_local()
phi_MF_total.vector().set_local(phi_MF_total_numr)
phi_MF_total.vector().apply('insert')
###############################################################
## getting w for next time step
###############################################################
#defining w field for next step
w=Function(V) # phi function
w_numr= w.vector().get_local()
w_numr= u0* phi_MF_total.vector().get_local()
w.vector().set_local(w_numr)
w.vector().apply('insert')
## converged free energy
W= H_next # total free energy
W_entropic= H_next_entropic # entropic free energy
W_interaction= H_next_interaction #interaction free energy
############################ End of computation
##############################################3
## saving avg segment density results
##############################################3
File('phi_MF_B1.pvd') << (phi_MF_B1)
File('phi_MF_B2.pvd') << (phi_MF_B2)
File('phi_MF_B3.pvd') << (phi_MF_B3)
File('phi_MF_B4.pvd') << (phi_MF_B4)
File('phi_MF_B5.pvd') << (phi_MF_B5)
File('phi_MF_B6.pvd') << (phi_MF_B6)
File('phi_MF_B7.pvd') << (phi_MF_B7)
File('phi_MF_B8.pvd') << (phi_MF_B8)
File('phi_MF_B9.pvd') << (phi_MF_B9)
File('phi_MF_B10.pvd') << (phi_MF_B10)
File('phi_MF_B11.pvd') << (phi_MF_B11)
File('phi_MF_B12.pvd') << (phi_MF_B12)
File('phi_MF_B13.pvd') << (phi_MF_B13)
File('phi_MF.pvd') << (phi_MF)
File('phi_MF_B15.pvd') << (phi_MF_B15)
File('phi_MF_B16.pvd') << (phi_MF_B16)
File('phi_MF_B17.pvd') << (phi_MF_B17)
File('phi_MF_B18.pvd') << (phi_MF_B18)
File('phi_MF_B19.pvd') << (phi_MF_B19)
File('phi_MF_B20.pvd') << (phi_MF_B20)
File('phi_MF_B21.pvd') << (phi_MF_B21)
File('phi_MF_B22.pvd') << (phi_MF_B22)
File('phi_MF_B23.pvd') << (phi_MF_B23)
File('phi_MF_B24.pvd') << (phi_MF_B24)
File('phi_MF_B25.pvd') << (phi_MF_B25)
File('phi_MF_B26.pvd') << (phi_MF_B26)
File('phi_MF_B27.pvd') << (phi_MF_B27)
File('phi_MF_total.pvd') << (phi_MF_total)
File('w.pvd') << (w)
File('w_next.pvd') << (w_next)
| 35.443538
| 925
| 0.594297
| 8,652
| 56,497
| 3.578363
| 0.052589
| 0.059916
| 0.021124
| 0.024645
| 0.813857
| 0.79677
| 0.769929
| 0.756008
| 0.747674
| 0.737242
| 0
| 0.041098
| 0.204099
| 56,497
| 1,593
| 926
| 35.465788
| 0.647423
| 0.203144
| 0
| 0.678526
| 0
| 0.002541
| 0.036324
| 0.002365
| 0
| 0
| 0
| 0
| 0
| 1
| 0.017789
| false
| 0
| 0.034308
| 0.005083
| 0.066074
| 0.069886
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
849a34ea7a8b9cec92a22de8754495b9d89211d7
| 6,351
|
py
|
Python
|
tests/parsers/winreg_plugins/run.py
|
Defense-Cyber-Crime-Center/plaso
|
4f3a85fbea10637c1cdbf0cde9fc539fdcea9c47
|
[
"Apache-2.0"
] | 2
|
2016-02-18T12:46:29.000Z
|
2022-03-13T03:04:59.000Z
|
tests/parsers/winreg_plugins/run.py
|
Defense-Cyber-Crime-Center/plaso
|
4f3a85fbea10637c1cdbf0cde9fc539fdcea9c47
|
[
"Apache-2.0"
] | null | null | null |
tests/parsers/winreg_plugins/run.py
|
Defense-Cyber-Crime-Center/plaso
|
4f3a85fbea10637c1cdbf0cde9fc539fdcea9c47
|
[
"Apache-2.0"
] | 6
|
2016-12-18T08:05:36.000Z
|
2021-04-06T14:19:11.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""Tests for the Run Windows Registry plugin."""
import unittest
from plaso.formatters import winreg as _ # pylint: disable=unused-import
from plaso.parsers.winreg_plugins import run
from tests.parsers.winreg_plugins import test_lib
class RunNtuserPlugintest(test_lib.RegistryPluginTestCase):
"""Tests for the Run Windows Registry plugin on the User hive."""
def setUp(self):
"""Sets up the needed objects used throughout the test."""
self._plugin = run.RunUserPlugin()
def testProcess(self):
"""Tests the Process function."""
test_file_entry = self._GetTestFileEntryFromPath([u'NTUSER-RunTests.DAT'])
key_path = u'\\Software\\Microsoft\\Windows\\CurrentVersion\\Run'
winreg_key = self._GetKeyFromFileEntry(test_file_entry, key_path)
event_queue_consumer = self._ParseKeyWithPlugin(
self._plugin, winreg_key, file_entry=test_file_entry)
event_objects = self._GetEventObjectsFromQueue(event_queue_consumer)
self.assertEqual(len(event_objects), 1)
event_object = event_objects[0]
self.assertEqual(event_object.pathspec, test_file_entry.path_spec)
# This should just be the plugin name, as we're invoking it directly,
# and not through the parser.
self.assertEqual(event_object.parser, self._plugin.plugin_name)
# Timestamp is: 2012-04-05T17:03:53.992061+00:00
self.assertEqual(event_object.timestamp, 1333645433992061)
expected_msg = (
u'[{0:s}] Sidebar: %ProgramFiles%\\Windows Sidebar\\Sidebar.exe '
u'/autoRun').format(key_path)
expected_msg_short = (
u'[{0:s}] Sidebar: %ProgramFiles%\\Wind...').format(key_path)
self._TestGetMessageStrings(event_object, expected_msg, expected_msg_short)
class RunOnceNtuserPlugintest(test_lib.RegistryPluginTestCase):
"""Tests for the RunOnce Windows Registry plugin on the User hive."""
def setUp(self):
"""Sets up the needed objects used throughout the test."""
self._plugin = run.RunUserPlugin()
def testProcess(self):
"""Tests the Process function."""
test_file_entry = self._GetTestFileEntryFromPath([u'NTUSER-RunTests.DAT'])
key_path = u'\\Software\\Microsoft\\Windows\\CurrentVersion\\RunOnce'
winreg_key = self._GetKeyFromFileEntry(test_file_entry, key_path)
event_queue_consumer = self._ParseKeyWithPlugin(
self._plugin, winreg_key, file_entry=test_file_entry)
event_objects = self._GetEventObjectsFromQueue(event_queue_consumer)
self.assertEqual(len(event_objects), 1)
event_object = event_objects[0]
self.assertEqual(event_object.pathspec, test_file_entry.path_spec)
# This should just be the plugin name, as we're invoking it directly,
# and not through the parser.
self.assertEqual(event_object.parser, self._plugin.plugin_name)
# Timestamp is: 2012-04-05T17:03:53.992061+00:00
self.assertEqual(event_object.timestamp, 1333645433992061)
expected_msg = (
u'[{0:s}] mctadmin: C:\\Windows\\System32\\mctadmin.exe').format(
key_path)
expected_msg_short = (
u'[{0:s}] mctadmin: C:\\Windows\\Sys...').format(key_path)
self._TestGetMessageStrings(event_object, expected_msg, expected_msg_short)
class RunSoftwarePluginTest(test_lib.RegistryPluginTestCase):
"""Tests for the Run Windows Registry plugin on the Software hive."""
def setUp(self):
"""Sets up the needed objects used throughout the test."""
self._plugin = run.RunSoftwarePlugin()
def testProcess(self):
"""Tests the Process function."""
test_file_entry = self._GetTestFileEntryFromPath([u'SOFTWARE-RunTests'])
key_path = u'\\Microsoft\\Windows\\CurrentVersion\\Run'
winreg_key = self._GetKeyFromFileEntry(test_file_entry, key_path)
event_queue_consumer = self._ParseKeyWithPlugin(
self._plugin, winreg_key, file_entry=test_file_entry)
event_objects = self._GetEventObjectsFromQueue(event_queue_consumer)
self.assertEqual(len(event_objects), 3)
event_object = event_objects[0]
self.assertEqual(event_object.pathspec, test_file_entry.path_spec)
# This should just be the plugin name, as we're invoking it directly,
# and not through the parser.
self.assertEqual(event_object.parser, self._plugin.plugin_name)
# Timestamp is: 2011-09-16T20:57:09.067575+00:00
self.assertEqual(event_object.timestamp, 1316206629067575)
expected_msg = (
u'[{0:s}] VMware Tools: \"C:\\Program Files\\VMware\\VMware Tools'
u'\\VMwareTray.exe\"').format(key_path)
expected_msg_short = (
u'[{0:s}] VMware Tools: \"C:\\Program Files\\VMwar...').format(key_path)
self._TestGetMessageStrings(event_object, expected_msg, expected_msg_short)
self.assertEqual(event_objects[1].timestamp, 1316206629067575)
class RunOnceSoftwarePluginTest(test_lib.RegistryPluginTestCase):
"""Tests for the RunOnce Windows Registry plugin on the Software hive."""
def setUp(self):
"""Sets up the needed objects used throughout the test."""
self._plugin = run.RunSoftwarePlugin()
def testProcess(self):
"""Tests the Process function."""
test_file_entry = self._GetTestFileEntryFromPath([u'SOFTWARE-RunTests'])
key_path = u'\\Microsoft\\Windows\\CurrentVersion\\RunOnce'
winreg_key = self._GetKeyFromFileEntry(test_file_entry, key_path)
event_queue_consumer = self._ParseKeyWithPlugin(
self._plugin, winreg_key, file_entry=test_file_entry)
event_objects = self._GetEventObjectsFromQueue(event_queue_consumer)
self.assertEqual(len(event_objects), 1)
event_object = event_objects[0]
self.assertEqual(event_object.pathspec, test_file_entry.path_spec)
# This should just be the plugin name, as we're invoking it directly,
# and not through the parser.
self.assertEqual(event_object.parser, self._plugin.plugin_name)
# Timestamp is: 2012-04-06T14:07:27.750000+00:00
self.assertEqual(event_object.timestamp, 1333721247750000)
expected_msg = (
u'[{0:s}] *WerKernelReporting: %SYSTEMROOT%\\SYSTEM32\\WerFault.exe '
u'-k -rq').format(key_path)
expected_msg_short = (
u'[{0:s}] *WerKernelReporting: %SYSTEMROOT%...').format(key_path)
self._TestGetMessageStrings(event_object, expected_msg, expected_msg_short)
if __name__ == '__main__':
unittest.main()
| 38.490909
| 80
| 0.734058
| 800
| 6,351
| 5.59125
| 0.175
| 0.040241
| 0.046501
| 0.069752
| 0.882182
| 0.855578
| 0.848424
| 0.823161
| 0.811759
| 0.797451
| 0
| 0.036789
| 0.152574
| 6,351
| 164
| 81
| 38.72561
| 0.794314
| 0.198866
| 0
| 0.67033
| 0
| 0
| 0.14377
| 0.065895
| 0
| 0
| 0
| 0
| 0.186813
| 1
| 0.087912
| false
| 0
| 0.043956
| 0
| 0.175824
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
849ad094c038970eb85fee42f96b69d38518c241
| 6,914
|
py
|
Python
|
main.py
|
ysb06/boostcamp-p1-image
|
031ec206e3fd67354eda297196e2b62fa1b60a0a
|
[
"MIT"
] | null | null | null |
main.py
|
ysb06/boostcamp-p1-image
|
031ec206e3fd67354eda297196e2b62fa1b60a0a
|
[
"MIT"
] | null | null | null |
main.py
|
ysb06/boostcamp-p1-image
|
031ec206e3fd67354eda297196e2b62fa1b60a0a
|
[
"MIT"
] | null | null | null |
import random
import numpy as np
import torch
from torch import optim
from mask_detector.combined_predictor import Predictor_G1, Predictor_M2, Predictor_M3, Predictor_M4
from mask_detector.dataset import (DatasetType, generate_test_datasets,
generate_train_datasets)
from mask_detector.loss import FocalLoss
from mask_detector.models import BaseModel
from mask_detector.trainer import Trainee
def train_model():
print(f"PyTorch version: {torch.__version__}.")
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print(f"device: {device}")
seed = 37764
seed_everything(seed)
train_set, valid_set = generate_train_datasets("/opt/ml/input/data", random_seed=seed)
# Combined 모델 2
# train_mask_classifier(device, seed, train_set, valid_set)
# train_gender_classifier(device, seed, train_set, valid_set)
# train_u30_classifier(device, seed, train_set, valid_set)
# train_o59_classifier(device, seed, train_set, valid_set)
# 단일 모델 1 (efficientnet), 2(resnext)
# train_general_classifier(device, seed, train_set, valid_set)
# Combined 모델 3
# 58세까지 60세로 그룹으로 편성 수정
# train_mask_classifier(device, seed, train_set, valid_set)
# train_o59_classifier(device, seed, train_set, valid_set)
# train_gender_u30_combined_classifier(device, seed, train_set, valid_set)
# Enssemble 모델
train_general_classifier(device, seed, train_set, valid_set)
train_mask_classifier(device, seed, train_set, valid_set)
train_gender_classifier(device, seed, train_set, valid_set)
train_u30_classifier(device, seed, train_set, valid_set)
train_o59_classifier(device, seed, train_set, valid_set)
def train_mask_classifier(device, seed, train_set, valid_set):
mask_trainee = Trainee("mask-classifier", device=device)
mask_trainee.batch_size = 512
mask_trainee.epochs = 10
mask_trainee.prepare_dataset(train_set, valid_set, DatasetType.Mask_Combined, random_seed=seed)
mask_trainee.log_interval = int(len(mask_trainee.train_set_loader) / 3)
mask_trainee.model = BaseModel(num_classes=3).to(device)
mask_trainee.criterion = FocalLoss()
mask_trainee.optimizer = optim.Adam(
mask_trainee.model.parameters(), lr=0.0001
)
mask_trainee.scheduler = optim.lr_scheduler.CosineAnnealingLR(
mask_trainee.optimizer,
T_max=50,
eta_min=0
)
mask_trainee.train()
def train_gender_classifier(device, seed, train_set, valid_set):
mask_trainee = Trainee("gender-classifier", device=device)
mask_trainee.batch_size = 512
mask_trainee.epochs = 10
mask_trainee.prepare_dataset(train_set, valid_set, DatasetType.Gender, random_seed=seed)
mask_trainee.log_interval = int(len(mask_trainee.train_set_loader) / 3)
mask_trainee.model = BaseModel(num_classes=2).to(device)
mask_trainee.criterion = FocalLoss()
mask_trainee.optimizer = optim.Adam(
mask_trainee.model.parameters(), lr=0.0001
)
mask_trainee.scheduler = optim.lr_scheduler.CosineAnnealingLR(
mask_trainee.optimizer,
T_max=50,
eta_min=0
)
mask_trainee.train()
def train_u30_classifier(device, seed, train_set, valid_set):
mask_trainee = Trainee("u30-classifier", device=device)
mask_trainee.batch_size = 512
mask_trainee.epochs = 10
mask_trainee.prepare_dataset(train_set, valid_set, DatasetType.Under30Age, random_seed=seed)
mask_trainee.log_interval = int(len(mask_trainee.train_set_loader) / 3)
mask_trainee.model = BaseModel(num_classes=2).to(device)
mask_trainee.criterion = FocalLoss()
mask_trainee.optimizer = optim.Adam(
mask_trainee.model.parameters(), lr=0.0001
)
mask_trainee.scheduler = optim.lr_scheduler.CosineAnnealingLR(
mask_trainee.optimizer,
T_max=50,
eta_min=0
)
mask_trainee.train()
def train_o59_classifier(device, seed, train_set, valid_set):
mask_trainee = Trainee("o59-classifier", device=device)
mask_trainee.batch_size = 512
mask_trainee.epochs = 10
mask_trainee.prepare_dataset(train_set, valid_set, DatasetType.Over59Age, random_seed=seed)
mask_trainee.log_interval = int(len(mask_trainee.train_set_loader) / 3)
mask_trainee.model = BaseModel(num_classes=2).to(device)
mask_trainee.criterion = FocalLoss()
mask_trainee.optimizer = optim.Adam(
mask_trainee.model.parameters(), lr=0.0001
)
mask_trainee.scheduler = optim.lr_scheduler.CosineAnnealingLR(
mask_trainee.optimizer,
T_max=50,
eta_min=0
)
mask_trainee.train()
def train_gender_u30_combined_classifier(device, seed, train_set, valid_set):
mask_trainee = Trainee("gender-u30-classifier", device=device)
mask_trainee.batch_size = 64
mask_trainee.epochs = 10
mask_trainee.prepare_dataset(train_set, valid_set, DatasetType.Gender_U30_Combined, random_seed=seed)
mask_trainee.log_interval = int(len(mask_trainee.train_set_loader) / 3) # 1 epoch당 3번씩 기록
mask_trainee.model = BaseModel(num_classes=4).to(device)
mask_trainee.criterion = FocalLoss()
mask_trainee.optimizer = optim.Adam(
mask_trainee.model.parameters(), lr=0.0001
)
mask_trainee.scheduler = optim.lr_scheduler.CosineAnnealingLR(
mask_trainee.optimizer,
T_max=50,
eta_min=0
)
mask_trainee.train()
def train_general_classifier(device, seed, train_set, valid_set):
mask_trainee = Trainee("gen-classifier", device=device)
mask_trainee.batch_size = 512
mask_trainee.epochs = 16
mask_trainee.prepare_dataset(train_set, valid_set, DatasetType.General, random_seed=0)
mask_trainee.log_interval = int(len(mask_trainee.train_set_loader) / 3)
mask_trainee.model = BaseModel(num_classes=18).to(device)
mask_trainee.criterion = FocalLoss()
mask_trainee.optimizer = optim.Adam(
mask_trainee.model.parameters(), lr=0.0001
)
mask_trainee.scheduler = optim.lr_scheduler.CosineAnnealingLR(
mask_trainee.optimizer,
T_max=50,
eta_min=0
)
mask_trainee.train()
def predict_from_models():
print(f"PyTorch version: {torch.__version__}.")
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print(f"device: {device}")
dataset, answer_board = generate_test_datasets("/opt/ml/input/data")
predictor = Predictor_M4(16, dataset, answer_board, device)
predictor.predict()
# 해야할 일 3개 모델 합한 모델 predictor를 작성할 것
def seed_everything(seed: int):
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed) # if use multi-GPU
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(seed)
random.seed(seed)
if __name__ == "__main__":
# train_model()
predict_from_models()
| 35.096447
| 105
| 0.728522
| 922
| 6,914
| 5.151844
| 0.139913
| 0.180632
| 0.071158
| 0.087579
| 0.818316
| 0.804842
| 0.797474
| 0.785684
| 0.775579
| 0.739789
| 0
| 0.025583
| 0.174573
| 6,914
| 196
| 106
| 35.27551
| 0.806729
| 0.095458
| 0
| 0.517731
| 0
| 0
| 0.042174
| 0.003368
| 0
| 0
| 0
| 0
| 0
| 1
| 0.06383
| false
| 0
| 0.06383
| 0
| 0.12766
| 0.028369
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
84bd48b4e1d18ac26a0e797bb30f628d5ef286b6
| 26
|
py
|
Python
|
__init__.py
|
makikaka/covid19
|
d0b028ef12b9da13d78490bacf4771d557148111
|
[
"WTFPL"
] | null | null | null |
__init__.py
|
makikaka/covid19
|
d0b028ef12b9da13d78490bacf4771d557148111
|
[
"WTFPL"
] | null | null | null |
__init__.py
|
makikaka/covid19
|
d0b028ef12b9da13d78490bacf4771d557148111
|
[
"WTFPL"
] | null | null | null |
import first_project.py
| 6.5
| 23
| 0.807692
| 4
| 26
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.153846
| 26
| 3
| 24
| 8.666667
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ca43835d4bc826606a9e0a7f61e34f4493d4914e
| 163
|
py
|
Python
|
delimitapp/views.py
|
fmariv/delimitapp
|
ac328c73dd249284cd1c9411e766cb39ab329de7
|
[
"MIT"
] | null | null | null |
delimitapp/views.py
|
fmariv/delimitapp
|
ac328c73dd249284cd1c9411e766cb39ab329de7
|
[
"MIT"
] | null | null | null |
delimitapp/views.py
|
fmariv/delimitapp
|
ac328c73dd249284cd1c9411e766cb39ab329de7
|
[
"MIT"
] | null | null | null |
# Create your views here.
from django.shortcuts import render, redirect
def index(request):
return render(request, '../../delimitapp/templates/index.html')
| 20.375
| 67
| 0.736196
| 20
| 163
| 6
| 0.85
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.134969
| 163
| 7
| 68
| 23.285714
| 0.851064
| 0.141104
| 0
| 0
| 0
| 0
| 0.270073
| 0.270073
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
ca630d26d516b01f572de17de3e4e86fe1d04e8a
| 132
|
py
|
Python
|
test/test_seasonal_water_yield.py
|
phargogh/invest-natcap.invest-3
|
ee96055a4fa034d9a95fa8ccc6259ab03264e6c1
|
[
"BSD-3-Clause"
] | null | null | null |
test/test_seasonal_water_yield.py
|
phargogh/invest-natcap.invest-3
|
ee96055a4fa034d9a95fa8ccc6259ab03264e6c1
|
[
"BSD-3-Clause"
] | null | null | null |
test/test_seasonal_water_yield.py
|
phargogh/invest-natcap.invest-3
|
ee96055a4fa034d9a95fa8ccc6259ab03264e6c1
|
[
"BSD-3-Clause"
] | null | null | null |
from invest_natcap.seasonal_water_yield import seasonal_water_yield
if __name__ == '__main__':
seasonal_water_yield.main()
| 26.4
| 68
| 0.795455
| 17
| 132
| 5.294118
| 0.588235
| 0.433333
| 0.6
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.136364
| 132
| 4
| 69
| 33
| 0.789474
| 0
| 0
| 0
| 0
| 0
| 0.0625
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
04a58e97a12b9693b27bed2508d71cdbae657cc9
| 39
|
py
|
Python
|
python/setup.py
|
szul/botbuilder-config
|
fb9381a06648c7697bf6cb2dc0a350598caaa076
|
[
"MIT"
] | 1
|
2018-07-23T11:06:05.000Z
|
2018-07-23T11:06:05.000Z
|
python/setup.py
|
szul/botbuilder-config
|
fb9381a06648c7697bf6cb2dc0a350598caaa076
|
[
"MIT"
] | 9
|
2018-07-14T22:22:59.000Z
|
2018-08-24T17:42:38.000Z
|
python/setup.py
|
szul/botbuilder-config
|
fb9381a06648c7697bf6cb2dc0a350598caaa076
|
[
"MIT"
] | null | null | null |
import os
from setuptools import setup
| 13
| 28
| 0.846154
| 6
| 39
| 5.5
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.153846
| 39
| 2
| 29
| 19.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b6c2a170b00663354e9cf569b8cea9d3478d9716
| 10,566
|
py
|
Python
|
src/models.py
|
akhilpandey95/scholarlyimpact
|
215ae832c90f0564fa0301e4c3f1c99525617625
|
[
"MIT"
] | null | null | null |
src/models.py
|
akhilpandey95/scholarlyimpact
|
215ae832c90f0564fa0301e4c3f1c99525617625
|
[
"MIT"
] | 18
|
2020-02-20T23:40:26.000Z
|
2020-10-20T04:05:43.000Z
|
src/models.py
|
akhilpandey95/scholarlyimpact
|
215ae832c90f0564fa0301e4c3f1c99525617625
|
[
"MIT"
] | null | null | null |
# This Source Code Form is subject to the terms of the MIT
# License. If a copy of the same was not distributed with this
# file, You can obtain one at
# https://github.com/akhilpandey95/scholarlyimpact/blob/master/LICENSE.
import tensorflow.keras as keras
from tensorflow.keras import Model
from tensorflow.keras.layers import Dense, LSTM, BatchNormalization
from tensorflow.keras.models import Sequential
# feedforward network for predicting if citations exist or not
class PredictCitationsExist(Model):
"""
Class object for predicting if citations for a given
scholarly paper exist or not
Parameters
----------
No arguments
Returns
-------
Neural Network Model
keras.model.Model
"""
# function for preparing the X & Y for the dataset
def __init__(self):
"""
Build the Vanilla style neural network model and compile it
Parameters
----------
No arguments
Returns
-------
Nothing
None
"""
# super class the keras model
super(PredictCitationsExist, self).__init__()
# create the model
self.model = Sequential()
# add the first hidden layer with 64 neurons, relu activation
self.model.add(Dense(512, activation='selu', input_dim=21))
# add the single output layer
self.model.add(Dense(1, activation='softmax'))
# use the rmsprop optimizer
self.rms = keras.optimizers.RMSprop(lr=0.001)
# compile the model
self.model.compile(optimizer=self.rms, loss='binary_crossentropy', metrics =['accuracy'])
# function for training the neural network model
def train(self, epochs, batch_size, X_train, X_test, Y_train, Y_test, stopping=True):
"""
Fit the neural network model
Parameters
----------
arg1 | model: keras.model.Model
A compiled keras neural network model to train
arg2 | X_train: numpy.ndarray
The training samples containing all the predictors
arg3 | X_test: numpy.ndarray
The test samples containing all the predictors
arg4 | Y_train: numpy.ndarray
The training samples containing values for the target variable
arg5 | Y_test: numpy.ndarray
The test samples containing values for the target variable
arg6 | stopping: boolean
A flag asserting if early stopping should or shouldn't be used for training
Returns
-------
Neural Network Model
keras.model.Model
"""
try:
if not stopping:
# fit the model
self.model.fit(X_train, Y_train, epochs=epochs, validation_split=0.2, batch_size=batch_size)
else:
# prepare for early stopping
early_stopping = keras.callbacks.EarlyStopping(monitor='binary_cross_entropy', min_delta=0,
patience=40, verbose=0, mode='auto',
baseline=None, restore_best_weights=False)
# fit the model
self.model.fit(X_train, Y_train, epochs=epochs, validation_split=0.2, batch_size=batch_size, callbacks=[early_stopping])
# return the model
return self.model
except:
return keras.models.Model()
# feedforward network for predicting if citations more than median or not
class PredictMedianCitationsExist(Model):
"""
Class object for predicting if citations for a given
scholarly paper are more than the median number of
citations or not
Parameters
----------
No arguments
Returns
-------
Neural Network Model
keras.model.Model
"""
# function for preparing the X & Y for the dataset
def __init__(self):
"""
Build the Vanilla style neural network model and compile it
Parameters
----------
No arguments
Returns
-------
Nothing
None
"""
# super class the keras model
super(PredictMedianCitationsExist, self).__init__()
# create the model
self.model = Sequential()
# add the first hidden layer with 64 neurons, relu activation
self.model.add(Dense(64, activation='sigmoid', input_dim=21))
# add the second hidden layer with 128 neurons, relu activation
self.model.add(Dense(128, activation='selu'))
# add the third hidden layer with 64 neurons, relu activation
self.model.add(Dense(64, activation='sigmoid'))
# add the single output layer
self.model.add(Dense(1, activation='sigmoid'))
# use the rmsprop optimizer
self.rms = keras.optimizers.RMSprop(lr=0.001)
# compile the model
self.model.compile(optimizer=self.rms, loss='binary_crossentropy', metrics =['accuracy'])
# function for training the neural network model
def train(self, epochs, batch_size, X_train, X_test, Y_train, Y_test, stopping=True):
"""
Fit the neural network model
Parameters
----------
arg1 | model: keras.model.Model
A compiled keras neural network model to train
arg2 | X_train: numpy.ndarray
The training samples containing all the predictors
arg3 | X_test: numpy.ndarray
The test samples containing all the predictors
arg4 | Y_train: numpy.ndarray
The training samples containing values for the target variable
arg5 | Y_test: numpy.ndarray
The test samples containing values for the target variable
arg6 | stopping: boolean
A flag asserting if early stopping should or shouldn't be used for training
Returns
-------
Neural Network Model
keras.model.Model
"""
try:
if not stopping:
# fit the model
self.model.fit(X_train, Y_train, epochs=epochs, validation_split=0.2, batch_size=batch_size)
else:
# prepare for early stopping
early_stopping = keras.callbacks.EarlyStopping(monitor='binary_cross_entropy', min_delta=0,
patience=40, verbose=0, mode='auto',
baseline=None, restore_best_weights=False)
# fit the model
self.model.fit(X_train, Y_train, epochs=epochs, validation_split=0.2, batch_size=batch_size, callbacks=[early_stopping])
# return the model
return self.model
except:
return keras.models.Model()
# feedforward network for predicting log(1 + citations)
class PredictLogCitation(Model):
"""
Class object for predicting Log(1 + citations) for a given
scholarly paper
Parameters
----------
No arguments
Returns
-------
Neural Network Model
keras.model.Model
"""
# function for preparing the X & Y for the dataset
def __init__(self):
"""
Build the Vanilla style neural network model and compile it
Parameters
----------
No arguments
Returns
-------
Nothing
None
"""
# super class the keras model
super(PredictLogCitation, self).__init__()
# create the model
self.model = Sequential()
# add the first hidden layer with 32 neurons, relu activation
self.model.add(Dense(32, activation='relu', input_dim=21))
# add the second hidden layer with 64 neurons, relu activation
self.model.add(Dense(64, activation='relu'))
# add the third hidden layer with 64 neurons, relu activation
self.model.add(Dense(64, activation='relu'))
# add the fourth hidden layer with 128 neurons, relu activation
self.model.add(Dense(128, activation='relu'))
# add the fifth hidden layer with 64 neurons, relu activation
self.model.add(Dense(64, activation='relu'))
# add the sixth hidden layer with 64 neurons, relu activation
self.model.add(Dense(64, activation='relu'))
# add the seventh hidden layer with 32 neurons, relu activation
self.model.add(Dense(32, activation='relu'))
# add the single output layer
self.model.add(Dense(1))
# use the rmsprop optimizer
self.rms = keras.optimizers.RMSprop(lr=0.001)
# compile the model
self.model.compile(optimizer=self.rms, loss='mean_squared_error', metrics =['mean_absolute_error'])
# function for training the neural network model
def train(self, epochs, batch_size, X_train, X_test, Y_train, Y_test, stopping=True):
"""
Fit the neural network model
Parameters
----------
arg1 | model: keras.model.Model
A compiled keras neural network model to train
arg2 | X_train: numpy.ndarray
The training samples containing all the predictors
arg3 | X_test: numpy.ndarray
The test samples containing all the predictors
arg4 | Y_train: numpy.ndarray
The training samples containing values for the target variable
arg5 | Y_test: numpy.ndarray
The test samples containing values for the target variable
arg6 | stopping: boolean
A flag asserting if early stopping should or shouldn't be used for training
Returns
-------
Neural Network Model
keras.model.Model
"""
try:
if not stopping:
# fit the model
self.model.fit(X_train, Y_train, epochs=epochs, validation_split=0.2, batch_size=batch_size)
else:
# prepare for early stopping
early_stopping = keras.callbacks.EarlyStopping(monitor='mean_squared_error', min_delta=0,
patience=40, verbose=0, mode='auto',
baseline=None, restore_best_weights=False)
# fit the model
self.model.fit(X_train, Y_train, epochs=epochs, validation_split=0.2, batch_size=batch_size, callbacks=[early_stopping])
# return the model
return self.model
except:
return keras.models.Model()
| 33.865385
| 136
| 0.602593
| 1,230
| 10,566
| 5.086179
| 0.155285
| 0.04172
| 0.05179
| 0.038043
| 0.892583
| 0.883632
| 0.870045
| 0.870045
| 0.870045
| 0.863012
| 0
| 0.016098
| 0.318001
| 10,566
| 311
| 137
| 33.974277
| 0.852068
| 0.457221
| 0
| 0.694444
| 0
| 0
| 0.046953
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.055556
| 0
| 0.263889
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b6ce61ffa936b83f56a0eb7796b1c57d4620c96c
| 32
|
py
|
Python
|
commands/operations/__init__.py
|
evandrocoan/Javatar
|
b38d4f9d852565d6dcecb236386628b4e56d9d09
|
[
"MIT"
] | 142
|
2015-01-11T19:43:17.000Z
|
2021-11-15T11:44:56.000Z
|
commands/operations/__init__.py
|
evandroforks/Javatar
|
b38d4f9d852565d6dcecb236386628b4e56d9d09
|
[
"MIT"
] | 46
|
2015-01-02T20:29:37.000Z
|
2018-09-15T05:12:52.000Z
|
commands/operations/__init__.py
|
evandroforks/Javatar
|
b38d4f9d852565d6dcecb236386628b4e56d9d09
|
[
"MIT"
] | 25
|
2015-01-16T01:33:39.000Z
|
2022-01-07T11:12:43.000Z
|
from .organize_imports import *
| 16
| 31
| 0.8125
| 4
| 32
| 6.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 32
| 1
| 32
| 32
| 0.892857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b6f7ac43bd2831f951ec1d436acca6cfd55a4147
| 240
|
py
|
Python
|
jterritory/exceptions/request.py
|
jameysharp/jterritory
|
b41e53ce04fe63db8a5943d2808e4fa9f9b15b32
|
[
"Apache-2.0"
] | 5
|
2021-05-14T18:50:11.000Z
|
2021-05-23T03:08:55.000Z
|
jterritory/exceptions/request.py
|
jameysharp/jterritory
|
b41e53ce04fe63db8a5943d2808e4fa9f9b15b32
|
[
"Apache-2.0"
] | null | null | null |
jterritory/exceptions/request.py
|
jameysharp/jterritory
|
b41e53ce04fe63db8a5943d2808e4fa9f9b15b32
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import annotations
from . import RequestError
class UnknownCapability(RequestError):
pass
class NotJSON(RequestError):
pass
class NotRequest(RequestError):
pass
class Limit(RequestError):
limit: str
| 12
| 38
| 0.75
| 24
| 240
| 7.333333
| 0.5
| 0.272727
| 0.357955
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.191667
| 240
| 19
| 39
| 12.631579
| 0.907216
| 0
| 0
| 0.3
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.3
| 0.2
| 0
| 0.7
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
|
0
| 6
|
8e0ca5fbf1affbfc26bbeae7cc776e29130b9e64
| 11,349
|
py
|
Python
|
tests/unit/facters/test_sitemap.py
|
scorphus/holmes-api
|
6b3c76d4299fecf2d8799d7b5c3c6a6442cacd59
|
[
"MIT"
] | null | null | null |
tests/unit/facters/test_sitemap.py
|
scorphus/holmes-api
|
6b3c76d4299fecf2d8799d7b5c3c6a6442cacd59
|
[
"MIT"
] | null | null | null |
tests/unit/facters/test_sitemap.py
|
scorphus/holmes-api
|
6b3c76d4299fecf2d8799d7b5c3c6a6442cacd59
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from mock import Mock, call
from preggy import expect
from holmes.config import Config
from holmes.reviewer import Reviewer
from holmes.facters.sitemap import SitemapFacter
from tests.unit.base import FacterTestCase
from tests.fixtures import PageFactory
class TestSitemapFacter(FacterTestCase):
def test_get_facts_when_page_not_is_root(self):
page = PageFactory.create(url="http://g1.globo.com/1/")
reviewer = Reviewer(
api_url='http://localhost:2368',
page_uuid=page.uuid,
page_url=page.url,
page_score=0.0,
config=Config(),
validators=[]
)
facter = SitemapFacter(reviewer)
facter.async_get = Mock()
facter.add_fact = Mock()
facter.get_facts()
expect(facter.async_get.call_count).to_equal(0)
expect(facter.add_fact.call_count).to_equal(0)
def test_get_facts(self):
page = PageFactory.create(url="http://g1.globo.com/")
reviewer = Reviewer(
api_url='http://localhost:2368',
page_uuid=page.uuid,
page_url=page.url,
page_score=0.0,
config=Config(),
validators=[]
)
facter = SitemapFacter(reviewer)
facter.async_get = Mock()
facter.get_sitemaps = Mock(return_value=['http://g1.globo.com/sitemap.xml'])
facter.add_fact = Mock()
facter.get_facts()
expect(facter.review.data).to_length(7)
expect(facter.review.data).to_include('sitemap.data')
expect(facter.review.data['sitemap.data']).to_equal({})
expect(facter.review.data).to_include('sitemap.urls')
expect(facter.review.data['sitemap.urls']).to_equal({})
expect(facter.review.data).to_include('sitemap.files')
expect(facter.review.data['sitemap.files']).to_equal(set())
expect(facter.review.data).to_include('sitemap.files.size')
expect(facter.review.data['sitemap.files.size']).to_equal({})
expect(facter.review.data).to_include('sitemap.files.urls')
expect(facter.review.data['sitemap.files.urls']).to_equal({})
expect(facter.review.data).to_include('total.size.sitemap')
expect(facter.review.data['total.size.sitemap']).to_equal(0)
expect(facter.review.data).to_include('total.size.sitemap.gzipped')
expect(facter.review.data['total.size.sitemap.gzipped']).to_equal(0)
expect(facter.add_fact.call_args_list).to_include(
call(key='total.sitemap.indexes', value=0)
)
expect(facter.add_fact.call_args_list).to_include(
call(key='total.sitemap.urls', value=0)
)
expect(facter.add_fact.call_args_list).to_include(
call(key='total.size.sitemap', value=0)
)
expect(facter.add_fact.call_args_list).to_include(
call(key='total.size.sitemap.gzipped', value=0)
)
facter.async_get.assert_called_once_with(
'http://g1.globo.com/robots.txt',
facter.handle_robots_loaded
)
def test_get_sitemaps(self):
page = PageFactory.create(url="http://g1.globo.com/")
reviewer = Reviewer(
api_url='http://localhost:2368',
page_uuid=page.uuid,
page_url=page.url,
page_score=0.0,
config=Config(),
validators=[]
)
facter = SitemapFacter(reviewer)
facter.review.data['robots.response'] = ''
expect(facter.get_sitemaps(Mock(status_code=404))).to_equal(set(['http://g1.globo.com/sitemap.xml']))
def test_get_sitemaps_with_robots_txt(self):
page = PageFactory.create(url="http://g1.globo.com/")
reviewer = Reviewer(
api_url='http://localhost:2368',
page_uuid=page.uuid,
page_url=page.url,
page_score=0.0,
config=Config(),
validators=[]
)
facter = SitemapFacter(reviewer)
response = Mock(status_code=200, text="""
Sitemap: http://g1.globo.com/1.xml
Sitemap: http://g1.globo.com/2.xml
""")
expect(facter.get_sitemaps(response)).to_equal(set([
'http://g1.globo.com/sitemap.xml',
'http://g1.globo.com/1.xml',
'http://g1.globo.com/2.xml'
]))
def test_handle_sitemap_return_404(self):
page = PageFactory.create(url="http://g1.globo.com/")
reviewer = Reviewer(
api_url='http://localhost:2368',
page_uuid=page.uuid,
page_url=page.url,
page_score=0.0,
config=Config(),
validators=[]
)
response = Mock(status_code=404, text='Not found')
facter = SitemapFacter(reviewer)
facter.async_get = Mock()
facter.get_sitemaps = Mock(return_value=['http://g1.globo.com/sitemap.xml'])
facter.get_facts()
facter.async_get = Mock()
facter.handle_sitemap_loaded("http://g1.globo.com/sitemap.xml", response)
expect(facter.review.data['sitemap.data']["http://g1.globo.com/sitemap.xml"]).to_equal(response)
def test_handle_sitemap_index_loaded(self):
page = PageFactory.create(url="http://g1.globo.com/")
reviewer = Reviewer(
api_url='http://localhost:2368',
page_uuid=page.uuid,
page_url=page.url,
page_score=0.0,
config=Config(),
validators=[]
)
content = self.get_file('index_sitemap.xml')
response = Mock(status_code=200, text=content)
facter = SitemapFacter(reviewer)
facter.async_get = Mock()
facter.get_sitemaps = Mock(return_value=['http://g1.globo.com/sitemap.xml'])
facter.get_facts()
facter.async_get = Mock()
facter.handle_sitemap_loaded("http://g1.globo.com/sitemap.xml", response)
expect(facter.review.data['sitemap.files.size']["http://g1.globo.com/sitemap.xml"]).to_equal(0.2607421875)
expect(facter.review.data['sitemap.urls']["http://g1.globo.com/sitemap.xml"]).to_equal(set())
expect(facter.review.facts['total.size.sitemap']['value']).to_equal(0.2607421875)
expect(facter.review.facts['total.size.sitemap.gzipped']['value']).to_equal(0.146484375)
expect(facter.review.data['total.size.sitemap']).to_equal(0.2607421875)
expect(facter.review.data['total.size.sitemap.gzipped']).to_equal(0.146484375)
expect(facter.review.data['sitemap.files.urls']["http://g1.globo.com/sitemap.xml"]).to_equal(2)
expect(facter.async_get.call_args_list).to_include(
call('http://domain.com/1.xml', facter.handle_sitemap_loaded),
)
expect(facter.async_get.call_args_list).to_include(
call('http://domain.com/2.xml', facter.handle_sitemap_loaded),
)
def test_handle_sitemap_url_loaded(self):
page = PageFactory.create(url="http://g1.globo.com/")
reviewer = Reviewer(
api_url='http://localhost:2368',
page_uuid=page.uuid,
page_url=page.url,
page_score=0.0,
config=Config(),
validators=[]
)
reviewer.enqueue = Mock()
content = self.get_file('url_sitemap.xml')
response = Mock(status_code=200, text=content)
facter = SitemapFacter(reviewer)
facter.async_get = Mock()
facter.get_facts()
facter.handle_sitemap_loaded("http://g1.globo.com/sitemap.xml", response)
expect(facter.review.data['sitemap.files.size']["http://g1.globo.com/sitemap.xml"]).to_equal(0.296875)
expect(facter.review.data['sitemap.urls']["http://g1.globo.com/sitemap.xml"]).to_equal(set(['http://domain.com/1.html', 'http://domain.com/2.html']))
expect(facter.review.facts['total.size.sitemap']['value']).to_equal(0.296875)
expect(facter.review.facts['total.size.sitemap.gzipped']['value']).to_equal(0.1494140625)
expect(facter.review.data['total.size.sitemap']).to_equal(0.296875)
expect(facter.review.data['total.size.sitemap.gzipped']).to_equal(0.1494140625)
expect(facter.review.data['sitemap.files.urls']["http://g1.globo.com/sitemap.xml"]).to_equal(2)
expect(facter.review.facts['total.sitemap.urls']['value']).to_equal(2)
def test_handle_robots_loaded(self):
page = PageFactory.create(url="http://g1.globo.com/")
reviewer = Reviewer(
api_url='http://localhost:2368',
page_uuid=page.uuid,
page_url=page.url,
page_score=0.0,
config=Config(),
validators=[]
)
facter = SitemapFacter(reviewer)
facter.async_get = Mock()
facter.get_sitemaps = Mock(return_value=['http://g1.globo.com/sitemap.xml'])
facter.handle_robots_loaded('http://g1.globo.com/robots.txt', Mock())
facter.async_get.assert_called_once_with(
'http://g1.globo.com/sitemap.xml',
facter.handle_sitemap_loaded
)
def test_gzipeed_sitemap(self):
page = PageFactory.create(url="http://g1.globo.com/")
reviewer = Reviewer(
api_url='http://localhost:2368',
page_uuid=page.uuid,
page_url=page.url,
page_score=0.0,
config=Config(),
validators=[]
)
content = self.get_file('index_sitemap.xml.gz')
response = Mock(status_code=200, text=content)
facter = SitemapFacter(reviewer)
facter.async_get = Mock()
facter.get_sitemaps = Mock(return_value=['http://g1.globo.com/sitemap.xml.gz'])
facter.get_facts()
facter.async_get = Mock()
facter.handle_sitemap_loaded("http://g1.globo.com/sitemap.xml.gz", response)
expect(facter.review.data['sitemap.files.size']["http://g1.globo.com/sitemap.xml.gz"]).to_equal(0.2607421875)
expect(facter.review.data['sitemap.urls']["http://g1.globo.com/sitemap.xml.gz"]).to_equal(set())
expect(facter.review.facts['total.size.sitemap']['value']).to_equal(0.2607421875)
expect(facter.review.facts['total.size.sitemap.gzipped']['value']).to_equal(0.146484375)
expect(facter.review.data['total.size.sitemap']).to_equal(0.2607421875)
expect(facter.review.data['total.size.sitemap.gzipped']).to_equal(0.146484375)
expect(facter.review.data['sitemap.files.urls']["http://g1.globo.com/sitemap.xml.gz"]).to_equal(2)
expect(facter.async_get.call_args_list).to_include(
call('http://domain.com/1.xml', facter.handle_sitemap_loaded),
)
expect(facter.async_get.call_args_list).to_include(
call('http://domain.com/2.xml', facter.handle_sitemap_loaded),
)
def test_can_get_fact_definitions(self):
reviewer = Mock()
facter = SitemapFacter(reviewer)
definitions = facter.get_fact_definitions()
expect(definitions).to_length(4)
expect('total.sitemap.indexes' in definitions).to_be_true()
expect('total.sitemap.urls' in definitions).to_be_true()
expect('total.size.sitemap' in definitions).to_be_true()
expect('total.size.sitemap.gzipped' in definitions).to_be_true()
| 38.212121
| 157
| 0.624813
| 1,419
| 11,349
| 4.832276
| 0.077519
| 0.087502
| 0.099752
| 0.075543
| 0.860289
| 0.843955
| 0.80735
| 0.782266
| 0.758203
| 0.696077
| 0
| 0.032584
| 0.223896
| 11,349
| 296
| 158
| 38.341216
| 0.745913
| 0.003348
| 0
| 0.574468
| 0
| 0
| 0.209921
| 0.026704
| 0
| 0
| 0
| 0
| 0.008511
| 1
| 0.042553
| false
| 0
| 0.029787
| 0
| 0.076596
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
8e305bb6e28c73d6cdba83972d677f423496c38f
| 27
|
py
|
Python
|
test1.py
|
tjdbsrud/test
|
fabf74f1fa12ef9654101dd0fc6683e9b11c9c8a
|
[
"MIT"
] | null | null | null |
test1.py
|
tjdbsrud/test
|
fabf74f1fa12ef9654101dd0fc6683e9b11c9c8a
|
[
"MIT"
] | null | null | null |
test1.py
|
tjdbsrud/test
|
fabf74f1fa12ef9654101dd0fc6683e9b11c9c8a
|
[
"MIT"
] | null | null | null |
print("This is really YK")
| 13.5
| 26
| 0.703704
| 5
| 27
| 3.8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.148148
| 27
| 1
| 27
| 27
| 0.826087
| 0
| 0
| 0
| 0
| 0
| 0.62963
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
f3cbc98c170a0654e554fcdf5cb2e1abffbfa3ef
| 18,317
|
py
|
Python
|
tests/test_status_checker.py
|
mbhall88/lsf
|
3e179d02441f39dbffb601404708b6c4f9244d9e
|
[
"MIT"
] | 9
|
2019-08-08T09:34:36.000Z
|
2020-07-15T09:19:23.000Z
|
tests/test_status_checker.py
|
mbhall88/lsf
|
3e179d02441f39dbffb601404708b6c4f9244d9e
|
[
"MIT"
] | 19
|
2020-08-25T00:02:41.000Z
|
2022-03-29T10:20:45.000Z
|
tests/test_status_checker.py
|
mbhall88/lsf
|
3e179d02441f39dbffb601404708b6c4f9244d9e
|
[
"MIT"
] | 6
|
2020-11-10T23:56:17.000Z
|
2022-03-15T10:28:55.000Z
|
import unittest
from subprocess import CalledProcessError
from unittest.mock import patch, call
from tests.src.OSLayer import OSLayer, TailError
from tests.src.lsf_status import (
StatusChecker,
BjobsError,
UNKNOWN,
ZOMBIE,
)
def assert_called_n_times_with_same_args(mock, n, args):
assert mock.call_count == n
for mock_call in mock.call_args_list:
call_args, _ = mock_call
assert " ".join(call_args) == args
class TestStatusChecker(unittest.TestCase):
@patch.object(OSLayer, OSLayer.run_process.__name__, return_value=("PEND", ""))
def test___get_status___bjobs_says_process_is_PEND___job_status_is_running(
self, run_process_mock
):
lsf_status_checker = StatusChecker(123, "dummy")
actual = lsf_status_checker.get_status()
expected = "running"
self.assertEqual(actual, expected)
run_process_mock.assert_called_once_with("bjobs -o 'stat' -noheader 123")
@patch.object(OSLayer, OSLayer.run_process.__name__, return_value=("RUN", ""))
def test___get_status___bjobs_says_process_is_RUN___job_status_is_running(
self, run_process_mock
):
lsf_status_checker = StatusChecker(123, "dummy")
actual = lsf_status_checker.get_status()
expected = "running"
self.assertEqual(actual, expected)
run_process_mock.assert_called_once_with("bjobs -o 'stat' -noheader 123")
@patch.object(OSLayer, OSLayer.run_process.__name__, return_value=("PSUSP", ""))
def test___get_status___bjobs_says_process_is_PSUSP___job_status_is_running(
self, run_process_mock
):
lsf_status_checker = StatusChecker(123, "dummy")
actual = lsf_status_checker.get_status()
expected = "running"
self.assertEqual(actual, expected)
run_process_mock.assert_called_once_with("bjobs -o 'stat' -noheader 123")
@patch.object(OSLayer, OSLayer.run_process.__name__, return_value=("USUSP", ""))
def test___get_status___bjobs_says_process_is_USUSP___job_status_is_running(
self, run_process_mock
):
lsf_status_checker = StatusChecker(123, "dummy")
actual = lsf_status_checker.get_status()
expected = "running"
self.assertEqual(actual, expected)
run_process_mock.assert_called_once_with("bjobs -o 'stat' -noheader 123")
@patch.object(OSLayer, OSLayer.run_process.__name__, return_value=("SSUSP", ""))
def test___get_status___bjobs_says_process_is_SSUSP___job_status_is_running(
self, run_process_mock
):
lsf_status_checker = StatusChecker(123, "dummy")
actual = lsf_status_checker.get_status()
expected = "running"
self.assertEqual(actual, expected)
run_process_mock.assert_called_once_with("bjobs -o 'stat' -noheader 123")
@patch.object(OSLayer, OSLayer.run_process.__name__, return_value=("WAIT", ""))
def test___get_status___bjobs_says_process_is_WAIT___job_status_is_running(
self, run_process_mock
):
lsf_status_checker = StatusChecker(123, "dummy")
actual = lsf_status_checker.get_status()
expected = "running"
self.assertEqual(actual, expected)
run_process_mock.assert_called_once_with("bjobs -o 'stat' -noheader 123")
@patch.object(OSLayer, OSLayer.run_process.__name__, return_value=(UNKNOWN, ""))
def test___get_status___status_UNKWN_and_wait_unknown___job_status_is_running(
self, run_process_mock
):
lsf_status_checker = StatusChecker(123, "dummy", kill_unknown=False)
actual = lsf_status_checker.get_status()
expected = lsf_status_checker.RUNNING
self.assertEqual(actual, expected)
run_process_mock.assert_called_once_with("bjobs -o 'stat' -noheader 123")
@patch.object(OSLayer, OSLayer.run_process.__name__, return_value=(UNKNOWN, ""))
def test___get_status___status_UNKWN_and_kill_unknown___job_status_is_running(
self, run_process_mock
):
jobid = 123
lsf_status_checker = StatusChecker(jobid, "dummy", kill_unknown=True)
actual = lsf_status_checker.get_status()
expected = lsf_status_checker.RUNNING
self.assertEqual(actual, expected)
calls = [
call("bjobs -o 'stat' -noheader {}".format(jobid)),
call("bkill -r {}".format(jobid)),
]
run_process_mock.assert_has_calls(calls, any_order=False)
@patch.object(OSLayer, OSLayer.run_process.__name__, return_value=(ZOMBIE, ""))
def test___get_status___status_ZOMBI_and_ignore_zombie___job_status_is_failed(
self, run_process_mock
):
jobid = 123
lsf_status_checker = StatusChecker(jobid, "dummy", kill_zombie=False)
actual = lsf_status_checker.get_status()
expected = lsf_status_checker.FAILED
self.assertEqual(actual, expected)
run_process_mock.assert_called_once_with(
"bjobs -o 'stat' -noheader {}".format(jobid)
)
@patch.object(OSLayer, OSLayer.run_process.__name__, return_value=(ZOMBIE, ""))
def test___get_status___status_ZOMBI_and_kill_zombie___job_status_is_failed(
self, run_process_mock
):
jobid = 123
lsf_status_checker = StatusChecker(jobid, "dummy", kill_zombie=True)
actual = lsf_status_checker.get_status()
expected = lsf_status_checker.FAILED
self.assertEqual(actual, expected)
calls = [
call("bjobs -o 'stat' -noheader {}".format(jobid)),
call("bkill -r {}".format(jobid)),
]
run_process_mock.assert_has_calls(calls, any_order=False)
@patch.object(OSLayer, OSLayer.run_process.__name__, return_value=("EXIT", ""))
def test___get_status___bjobs_says_process_is_EXIT___job_status_is_failed(
self, run_process_mock
):
lsf_status_checker = StatusChecker(123, "dummy")
actual = lsf_status_checker.get_status()
expected = "failed"
self.assertEqual(actual, expected)
run_process_mock.assert_called_once_with("bjobs -o 'stat' -noheader 123")
@patch.object(OSLayer, OSLayer.run_process.__name__, return_value=("POST_ERR", ""))
def test___get_status___bjobs_says_process_is_POST_ERR___job_status_is_failed(
self, run_process_mock
):
lsf_status_checker = StatusChecker(123, "dummy")
actual = lsf_status_checker.get_status()
expected = "failed"
self.assertEqual(actual, expected)
run_process_mock.assert_called_once_with("bjobs -o 'stat' -noheader 123")
@patch.object(OSLayer, OSLayer.run_process.__name__, return_value=("DONE", ""))
def test___get_status___bjobs_says_process_is_DONE___job_status_is_success(
self, run_process_mock
):
lsf_status_checker = StatusChecker(123, "dummy")
actual = lsf_status_checker.get_status()
expected = "success"
self.assertEqual(actual, expected)
run_process_mock.assert_called_once_with("bjobs -o 'stat' -noheader 123")
@patch.object(OSLayer, OSLayer.run_process.__name__, return_value=("POST_DONE", ""))
def test___get_status___bjobs_says_process_is_POST_DONE___job_status_is_success(
self, run_process_mock
):
lsf_status_checker = StatusChecker(123, "dummy")
actual = lsf_status_checker.get_status()
expected = "success"
self.assertEqual(actual, expected)
run_process_mock.assert_called_once_with("bjobs -o 'stat' -noheader 123")
@patch.object(OSLayer, OSLayer.run_process.__name__)
def test_get_status_bjobs_fails_three_times_succeeds_fourth_job_status_is_success(
self, run_process_mock
):
self.count_fail_three_times_and_then_return_DONE = 0
def fail_three_times_and_then_return_DONE(cmd):
self.count_fail_three_times_and_then_return_DONE += 1
if self.count_fail_three_times_and_then_return_DONE == 1:
raise BjobsError
elif self.count_fail_three_times_and_then_return_DONE == 2:
raise KeyError
elif self.count_fail_three_times_and_then_return_DONE == 3:
raise CalledProcessError(1, "bjobs")
elif self.count_fail_three_times_and_then_return_DONE == 4:
return "DONE", ""
else:
assert False
run_process_mock.side_effect = fail_three_times_and_then_return_DONE
lsf_status_checker = StatusChecker(
123, "dummy", wait_between_tries=0.001, max_status_checks=4
)
actual = lsf_status_checker.get_status()
expected = "success"
self.assertEqual(actual, expected)
assert_called_n_times_with_same_args(
run_process_mock, 4, "bjobs -o 'stat' -noheader 123"
)
@patch.object(OSLayer, OSLayer.run_process.__name__)
def test_get_status_bjobs_fails_three_times_PEND_fourth_time_job_status_running(
self, run_process_mock
):
self.count_fail_three_times_and_then_return_PEND = 0
def fail_three_times_and_then_return_PEND(cmd):
self.count_fail_three_times_and_then_return_PEND += 1
if self.count_fail_three_times_and_then_return_PEND == 1:
raise BjobsError
elif self.count_fail_three_times_and_then_return_PEND == 2:
raise KeyError
elif self.count_fail_three_times_and_then_return_PEND == 3:
raise CalledProcessError(1, "bjobs")
elif self.count_fail_three_times_and_then_return_PEND == 4:
return "PEND", ""
else:
assert False
run_process_mock.side_effect = fail_three_times_and_then_return_PEND
lsf_status_checker = StatusChecker(
123, "dummy", wait_between_tries=0.001, max_status_checks=4
)
actual = lsf_status_checker.get_status()
expected = "running"
self.assertEqual(actual, expected)
assert_called_n_times_with_same_args(
run_process_mock, 4, "bjobs -o 'stat' -noheader 123"
)
@patch.object(OSLayer, OSLayer.run_process.__name__)
def test_get_status_bjobs_fails_once_says_EXIT_in_the_fourth_job_status_is_failed(
self, run_process_mock
):
self.count_fail_three_times_and_then_return_FAIL = 0
def fail_one_time_and_then_return_FAIL(cmd):
self.count_fail_three_times_and_then_return_FAIL += 1
if self.count_fail_three_times_and_then_return_FAIL == 1:
raise BjobsError
elif self.count_fail_three_times_and_then_return_FAIL == 2:
return "EXIT", ""
else:
assert False
run_process_mock.side_effect = fail_one_time_and_then_return_FAIL
lsf_status_checker = StatusChecker(
123, "dummy", wait_between_tries=0.001, max_status_checks=4
)
actual = lsf_status_checker.get_status()
expected = "failed"
self.assertEqual(actual, expected)
assert_called_n_times_with_same_args(
run_process_mock, 2, "bjobs -o 'stat' -noheader 123"
)
@patch.object(OSLayer, OSLayer.run_process.__name__, side_effect=BjobsError)
@patch.object(
StatusChecker,
StatusChecker._get_tail_of_log_file.__name__,
return_value=["Successfully completed.", "", "Resource usage summary:"],
)
def test_get_status_bjobs_fails_query_status_using_log_job_status_is_success(
self, get_lines_of_log_file_mock, run_process_mock
):
lsf_status_checker = StatusChecker(
123, "dummy", wait_between_tries=0.001, max_status_checks=4
)
actual = lsf_status_checker.get_status()
expected = "success"
self.assertEqual(actual, expected)
assert_called_n_times_with_same_args(
run_process_mock, 4, "bjobs -o 'stat' -noheader 123"
)
get_lines_of_log_file_mock.assert_called_once_with()
@patch.object(OSLayer, OSLayer.run_process.__name__, side_effect=BjobsError)
@patch.object(
StatusChecker,
StatusChecker._get_tail_of_log_file.__name__,
return_value=["Exited with exit code 1.", "", "Resource usage summary:"],
)
def test_get_status_bjobs_fails_query_status_using_log_job_status_is_failed(
self, get_lines_of_log_file_mock, run_process_mock
):
lsf_status_checker = StatusChecker(
123, "dummy", wait_between_tries=0.001, max_status_checks=4
)
actual = lsf_status_checker.get_status()
expected = "failed"
self.assertEqual(actual, expected)
assert_called_n_times_with_same_args(
run_process_mock, 4, "bjobs -o 'stat' -noheader 123"
)
get_lines_of_log_file_mock.assert_called_once_with()
@patch.object(OSLayer, OSLayer.run_process.__name__, side_effect=BjobsError)
@patch.object(
StatusChecker,
StatusChecker._get_tail_of_log_file.__name__,
side_effect=FileNotFoundError,
)
def test_get_status_bjobs_fails_log_file_does_not_exist_job_status_is_failed(
self, get_lines_of_log_file_mock, run_process_mock
):
lsf_status_checker = StatusChecker(
123, "dummy", wait_between_tries=0.001, max_status_checks=4
)
actual = lsf_status_checker.get_status()
expected = lsf_status_checker.FAILED
self.assertEqual(actual, expected)
assert_called_n_times_with_same_args(
run_process_mock, 4, "bjobs -o 'stat' -noheader 123"
)
get_lines_of_log_file_mock.assert_called_once_with()
@patch.object(OSLayer, OSLayer.run_process.__name__, side_effect=BjobsError)
@patch.object(
StatusChecker,
StatusChecker._get_tail_of_log_file.__name__,
return_value=["...", "..."],
)
def test_get_status_bjobs_fails_exit_info_not_yet_written_job_status_is_running(
self, get_lines_of_log_file_mock, run_process_mock
):
lsf_status_checker = StatusChecker(
123, "dummy", wait_between_tries=0.001, max_status_checks=4
)
actual = lsf_status_checker.get_status()
expected = lsf_status_checker.RUNNING
self.assertEqual(actual, expected)
assert_called_n_times_with_same_args(
run_process_mock, 4, "bjobs -o 'stat' -noheader 123"
)
get_lines_of_log_file_mock.assert_called_once_with()
@patch.object(OSLayer, OSLayer.run_process.__name__, side_effect=BjobsError)
@patch.object(
StatusChecker,
StatusChecker._get_tail_of_log_file.__name__,
side_effect=TailError,
)
def test_get_status_checking_log_raises_tail_error_status_is_failed(
self, get_lines_of_log_file_mock, run_process_mock
):
lsf_status_checker = StatusChecker(
123, "dummy", wait_between_tries=0.001, max_status_checks=4
)
actual = lsf_status_checker.get_status()
expected = lsf_status_checker.FAILED
self.assertEqual(actual, expected)
assert_called_n_times_with_same_args(
run_process_mock, 4, "bjobs -o 'stat' -noheader 123"
)
get_lines_of_log_file_mock.assert_called_once_with()
@patch.object(OSLayer, OSLayer.run_process.__name__, side_effect=BjobsError)
@patch.object(
StatusChecker,
StatusChecker._get_tail_of_log_file.__name__,
return_value=["Successfully completed.", ""],
)
def test_get_status_bjobs_fails_resource_line_does_not_exist_job_status_is_running(
self, get_lines_of_log_file_mock, run_process_mock
):
lsf_status_checker = StatusChecker(
123, "dummy", wait_between_tries=0.001, max_status_checks=4
)
actual = lsf_status_checker.get_status()
expected = lsf_status_checker.RUNNING
self.assertEqual(actual, expected)
assert_called_n_times_with_same_args(
run_process_mock, 4, "bjobs -o 'stat' -noheader 123"
)
get_lines_of_log_file_mock.assert_called_once_with()
@patch.object(OSLayer, OSLayer.run_process.__name__, side_effect=BjobsError)
@patch.object(
StatusChecker,
StatusChecker._get_tail_of_log_file.__name__,
return_value=["I am an unknown status line", "", "Resource usage summary:"],
)
def test_get_status_bjobs_fails_resource_line_not_recognised_job_status_is_running(
self, get_lines_of_log_file_mock, run_process_mock
):
lsf_status_checker = StatusChecker(
123, "dummy", wait_between_tries=0.001, max_status_checks=4
)
actual = lsf_status_checker.get_status()
expected = lsf_status_checker.FAILED
assert actual == expected
assert_called_n_times_with_same_args(
run_process_mock, 4, "bjobs -o 'stat' -noheader 123"
)
get_lines_of_log_file_mock.assert_called_once_with()
@patch.object(OSLayer, OSLayer.run_process.__name__, return_value=("", ""))
def test____query_status_using_bjobs___empty_stdout___raises_BjobsError(
self, run_process_mock
):
lsf_status_checker = StatusChecker(
123, "dummy", wait_between_tries=0.001, max_status_checks=4
)
self.assertRaises(BjobsError, lsf_status_checker._query_status_using_bjobs)
run_process_mock.assert_called_once_with("bjobs -o 'stat' -noheader 123")
@patch.object(OSLayer, OSLayer.run_process.__name__, return_value=("asd", ""))
def test____query_status_using_bjobs___unknown_job_status___raises_KeyError(
self, run_process_mock
):
lsf_status_checker = StatusChecker(
123, "dummy", wait_between_tries=0.001, max_status_checks=4
)
self.assertRaises(KeyError, lsf_status_checker._query_status_using_bjobs)
run_process_mock.assert_called_once_with("bjobs -o 'stat' -noheader 123")
def test____get_tail_of_log_file(self):
lsf_status_checker = StatusChecker(
123, "test_file.txt", wait_between_tries=0.001, max_status_checks=4
)
actual = lsf_status_checker._get_tail_of_log_file()
expected = ["abcd", "1234"]
self.assertEqual(actual, expected)
if __name__ == "__main__":
unittest.main()
| 42.011468
| 88
| 0.696238
| 2,295
| 18,317
| 4.977342
| 0.064488
| 0.07091
| 0.088243
| 0.068546
| 0.925851
| 0.919286
| 0.906767
| 0.898188
| 0.864484
| 0.836033
| 0
| 0.017339
| 0.219141
| 18,317
| 435
| 89
| 42.108046
| 0.781305
| 0
| 0
| 0.658163
| 0
| 0
| 0.06999
| 0
| 0
| 0
| 0
| 0
| 0.168367
| 1
| 0.079082
| false
| 0
| 0.012755
| 0
| 0.102041
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
6d1d3ba8210ad4937fe83f033fb1784b8849c0ba
| 176
|
py
|
Python
|
schemas.py
|
vnurhaqiqi/indonesian-text-summarization-fastapi
|
7742555fd4afe90508280b9492eb876a357f342c
|
[
"MIT"
] | null | null | null |
schemas.py
|
vnurhaqiqi/indonesian-text-summarization-fastapi
|
7742555fd4afe90508280b9492eb876a357f342c
|
[
"MIT"
] | null | null | null |
schemas.py
|
vnurhaqiqi/indonesian-text-summarization-fastapi
|
7742555fd4afe90508280b9492eb876a357f342c
|
[
"MIT"
] | null | null | null |
from pydantic import BaseModel
from typing import Optional
class Corpus(BaseModel):
text: str
num_words: Optional[int] = None
num_sentences: Optional[int] = None
| 19.555556
| 39
| 0.738636
| 23
| 176
| 5.565217
| 0.652174
| 0.171875
| 0.234375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.193182
| 176
| 8
| 40
| 22
| 0.901408
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ed8e736948796c3bc2676eac20b82dff154d15a3
| 923
|
py
|
Python
|
Tests/Plot/LamWind/__init__.py
|
IrakozeFD/pyleecan
|
5a93bd98755d880176c1ce8ac90f36ca1b907055
|
[
"Apache-2.0"
] | 95
|
2019-01-23T04:19:45.000Z
|
2022-03-17T18:22:10.000Z
|
Tests/Plot/LamWind/__init__.py
|
IrakozeFD/pyleecan
|
5a93bd98755d880176c1ce8ac90f36ca1b907055
|
[
"Apache-2.0"
] | 366
|
2019-02-20T07:15:08.000Z
|
2022-03-31T13:37:23.000Z
|
Tests/Plot/LamWind/__init__.py
|
IrakozeFD/pyleecan
|
5a93bd98755d880176c1ce8ac90f36ca1b907055
|
[
"Apache-2.0"
] | 74
|
2019-01-24T01:47:31.000Z
|
2022-02-25T05:44:42.000Z
|
# -*- coding: utf-8 -*-
from numpy import zeros, array
# User defined winding matrix for plot test (Nrad=2, Ntan=2)
wind_mat = zeros((2, 2, 6, 4)) # Nrad, Ntan, Zs, qs
wind_mat[0, 0, :, :] = array(
[[1, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, -1, -1, 0], [0, -1, 0, 0, 0, 1]]
).T
wind_mat[1, 0, :, :] = array(
[[0, 0, 0, 0, 0, 0], [-1, 0, -1, 0, 0, -1], [0, 0, 0, 0, 1, 0], [0, 1, 0, 1, 0, 0]]
).T
wind_mat[0, 1, :, :] = array(
[[-1, 0, 0, 0, 1, 0], [0, 0, 0, 1, 0, 0], [0, 1, 0, 0, 0, 0], [0, 0, -1, 0, 0, -1]]
).T
wind_mat[1, 1, :, :] = array(
[[0, 0, 0, -1, -1, 0], [1, 0, 0, 0, 0, 1], [0, -1, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0]]
).T
# For radial winding
wind_mat2 = zeros((2, 1, 6, 3))
wind_mat2[0, 0, :, :] = array(
[[1, 0, 0, 1, 0, 0], [0, 1, 0, 0, 1, 0], [0, 0, 1, 0, 0, 1]]
).T
wind_mat2[1, 0, :, :] = array(
[[-1, 0, 0, 0, 0, -1], [0, -1, 0, -1, 0, 0], [0, 0, -1, 0, -1, 0]]
).T
| 28.84375
| 87
| 0.404117
| 208
| 923
| 1.754808
| 0.144231
| 0.361644
| 0.336986
| 0.284932
| 0.493151
| 0.424658
| 0.413699
| 0.364384
| 0.309589
| 0.221918
| 0
| 0.233728
| 0.267606
| 923
| 31
| 88
| 29.774194
| 0.306213
| 0.127844
| 0
| 0.285714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.047619
| 0
| 0.047619
| 0
| 0
| 0
| 1
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
edb8a8b13cd64f1d686aff3cb249685039d73c20
| 44,023
|
py
|
Python
|
examples/plotter.py
|
Guo-Jian-Wang/cmbNNCS
|
cd55e0a2344aa5182d099cf559bc986ae0351cb7
|
[
"MIT"
] | null | null | null |
examples/plotter.py
|
Guo-Jian-Wang/cmbNNCS
|
cd55e0a2344aa5182d099cf559bc986ae0351cb7
|
[
"MIT"
] | null | null | null |
examples/plotter.py
|
Guo-Jian-Wang/cmbNNCS
|
cd55e0a2344aa5182d099cf559bc986ae0351cb7
|
[
"MIT"
] | null | null | null |
import sys
sys.path.append('..')
sys.path.append('../..')
sys.path.append('../../..')
import coplot.plots as pl
import coplot.plot_settings as pls
import cmbnncs.simulator as simulator
import cmbnncs.utils as utils
import cmbnncs.spherical as spherical
import loader
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import matplotlib.gridspec as gridspec
import healpy as hp
import math
import pymaster as nmt
def change_randn_num(randn_num):
randn_num_change = randn_num.split('.')
randn_num_change = randn_num_change[0]+randn_num_change[1]
return randn_num_change
def change_randn_nums(randn_nums):
rdns = ''
rdns_list = []
for rdn in randn_nums:
rdns = rdns + change_randn_num(rdn)
rdns_list.append(change_randn_num(rdn))
return rdns, rdns_list
def mse(true, predict):
'''mean square error'''
return np.mean( (predict-true)**2 )
def cl2dl(Cl, ell_start, ell_in=None, get_ell=True):
'''
ell_start: 0 or 2, which should depend on Dl
ell_in: the ell of Cl (as the input of this function)
'''
if ell_start==0:
lmax_cl = len(Cl) - 1
elif ell_start==2:
lmax_cl = len(Cl) + 1
ell = np.arange(lmax_cl + 1)
if ell_in is not None:
if ell_start==2:
ell[2:] = ell_in
factor = ell * (ell + 1.) / 2. / np.pi
if ell_start==0:
Dl = np.zeros_like(Cl)
Dl[2:] = Cl[2:] * factor[2:]
ell_2 = ell
elif ell_start==2:
Dl = Cl * factor[2:]
ell_2 = ell[2:]
if get_ell:
return ell_2, Dl
else:
return Dl
# The function defined below will compute the power spectrum between two
# NmtFields f_a and f_b, using the coupling matrix stored in the
# NmtWorkspace wsp and subtracting the deprojection bias clb.
# Note that the most expensive operations in the MASTER algorithm are
# the computation of the coupling matrix and the deprojection bias. Since
# these two objects are precomputed, this function should be pretty fast!
def compute_master(f_a, f_b, wsp, clb):
# Compute the power spectrum (a la anafast) of the masked fields
# Note that we only use n_iter=0 here to speed up the computation,
# but the default value of 3 is recommended in general.
cl_coupled = nmt.compute_coupled_cell(f_a, f_b)
# Decouple power spectrum into bandpowers inverting the coupling matrix
cl_decoupled = wsp.decouple_cell(cl_coupled, cl_bias=clb)
return cl_decoupled
def namaster_dl_TT_QQ_UU(cmb_t, mask, bl=None, nside=512, aposize=1, nlb=10,
cl_th=None, cls_th=None, cmb_t_th=None, sim_n=2):
'''Calculate Cl * ell*(ell+1)/2/np.pi of TT, QQ, and UU.
cmb_t : 1-D array with shape (nside**2*12,), the (recovered) CMB I, Q, or U map.
mask : 1-D array with shape (nside**2*12,), the mask file used to the CMB map.
bl : 1-D array with shape (3*nside,), the beam file used to the CMB map, the multipoles starts from 0 to 3*nside-1, so, lmax=3*nside - 1
aposize : float or None, apodization scale in degrees.
nlb : int, the bin size (\delta_\ell) of multipoles, it can be set to ~ 1/fsky
cl_th : 1-D array, the theoretical TT, QQ, or UU power spectrum, where ell start from 0.
cls_th : 6-D array with shape (6, M), the theoretical Cls and ell start from 0.
cls_th[:4, :] correspongding to TT, EE, BB, and TE power spectra, respectively, and cls_th[4:, :] is 0.
cmb_t_th : 1-D array with shape (nside**2*12,), the simulated CMB map based on the theoretical power spectrum.
sim_n : int, the number of simulation.
'''
if aposize is not None:
mask = nmt.mask_apodization(mask, aposize=aposize, apotype="Smooth")
if cmb_t_th is None:
f_t = nmt.NmtField(mask, [cmb_t], templates=None, beam=bl)
else:
f_t = nmt.NmtField(mask, [cmb_t], templates=[[cmb_t-cmb_t_th]], beam=bl)
#method 1
# b = nmt.NmtBin.from_nside_linear(nside, nlb=nlb, is_Dell=True) #nlb=\delta_ell ~ 1/fsky
# dl_TT = nmt.compute_full_master(f_t, f_t, b)[0]
#method 2
b = nmt.NmtBin.from_nside_linear(nside, nlb=nlb, is_Dell=False) #nlb=\delta_ell ~ 1/fsky
if cl_th is None:
cl_bias = None
else:
cl_00_th = cl_th.reshape(1, -1)
cl_bias = nmt.deprojection_bias(f_t, f_t, cl_00_th)
w = nmt.NmtWorkspace()
w.compute_coupling_matrix(f_t, f_t, b)
cl_master = compute_master(f_t, f_t, w, cl_bias)
ell = b.get_effective_ells()
#get error
if cl_th is not None:
cl_mean = np.zeros_like(cl_master)
cl_std = np.zeros_like(cl_master)
for i in np.arange(sim_n):
print("Simulating %s/%s"%(i+1, sim_n))
t, q, u = hp.synfast(cls_th, nside, pol=True, new=True, verbose=False, pixwin=False)
f0_sim = nmt.NmtField(mask, [t], templates=[[cmb_t-cmb_t_th]])
cl = compute_master(f0_sim, f0_sim, w, cl_bias)
cl_mean += cl
cl_std += cl*cl
cl_mean /= sim_n
cl_std = np.sqrt(cl_std / sim_n - cl_mean*cl_mean)
factor = ell*(ell+1)/2/np.pi
dl_std = factor * cl_std
ell, dl_master = cl2dl(cl_master[0], ell_start=2, ell_in=ell)
hp.mollview(mask, title='Mask')
if cl_th is None:
return ell, dl_master
else:
return ell, dl_master, dl_std[0]
def namaster_dl_EE_BB(cmb_qu, mask, bl=None, nside=512, aposize=1, nlb=10):
'''
cmb_qu : 2-D array with shape (2, nside**2*12), CMB Q and U maps.
mask : 1-D array with shape (nside**2*12,), the mask file used to the Q and U maps.
bl : 1-D array with shape (3*nside,), the beam file used to the CMB map, the multipoles starts from 0 to 3*nside-1, so, lmax=3*nside - 1
aposize : float or None, apodization scale in degrees.
nlb : int, the bin size (\delta_\ell) of multipoles, it can be set to ~ 1/fsky
'''
if aposize is not None:
mask = nmt.mask_apodization(mask, aposize=aposize, apotype="Smooth")
f_qu = nmt.NmtField(mask, cmb_qu, beam=bl)
b = nmt.NmtBin.from_nside_linear(nside, nlb=nlb, is_Dell=True) #nlb=10, \delta_ell ~ 1/fsky
dl_22 = nmt.compute_full_master(f_qu, f_qu, b)
ell = b.get_effective_ells()
hp.mollview(mask, title='Mask')
#dl_22[0]: EE, dl_22[3]: BB
return ell, dl_22
class PlotCMBFull(object):
def __init__(self, cmb, cmb_ML, randn_num='', map_type='I', fig_type='test',
map_n=0, input_freqs=[100,143,217,353], out_freq=143, extra_suffix=''):
"""
map_type: 'I', 'Q' or 'U'
fig_type: 'test' or 'obs'
"""
self.cmb = cmb
self.cmb_ML = cmb_ML
self.randn_num = randn_num
self.map_type = map_type
self.fig_type = fig_type
self.map_n = map_n
self.input_freqs = input_freqs
self.freq_num = len(input_freqs)
self.out_freq = out_freq
self.ell = None
self.extra_suffix = extra_suffix
@property
def minmax(self):
if self.map_type=='I':
return 500
else:
return 10
@property
def nside(self):
return int(np.sqrt(len(self.cmb)/12))
@property
def lmax(self):
if self.nside==512:
self.xlim_max = 1500
elif self.nside==256:
self.xlim_max = 760
return 3*self.nside - 1
@property
def randn_marker(self):
return change_randn_num(self.randn_num)
@property
def fig_prefix(self):
if self.fig_type=='obs':
return 'plkcmb'
elif self.fig_type=='test':
return 'simcmb'
def bl_plk(self):
beams = loader.get_planck_beams(nside=self.nside, relative_dir='obs_data')
return beams[str(self.out_freq)][:self.lmax+1]
def bl_fwhm(self, fwhm):
bl = hp.gauss_beam(fwhm*np.pi/10800., lmax=self.lmax)
return bl[:self.lmax+1]
def bl(self, fwhm=None):
if fwhm is None:
print("Using Planck beam file !!!")
return self.bl_plk()
else:
return self.bl_fwhm(fwhm)
@property
def bin_lengh(self):
return 30
@property
def bin_n(self):
return int(math.ceil( (self.lmax-1)/float(self.bin_lengh) ))
def get_plk_fwhm(self):
"""
The recovered CMB map has beam with fwhm=9.43 (for output with 100GHz), while the Planck CMB has 5 arcmin beam.
The generated beam map is used to calculate residual and MSE of CMB map.
Note
----
Note that this procedure is not right!!! The right way is remove the beam from the CMB map, and then add 9.43 arcmin beam,
but it is not feasible. Therefore, this operation is only an approximate method, since the area where the beams work is much
smaller than that of a pixel when nside=256
"""
if self.out_freq == 100:
self.plk_fwhm = 9.43
elif self.out_freq == 143:
self.plk_fwhm = 7.27
elif self.out_freq == 217:
self.plk_fwhm = 5.01
elif self.out_freq == 70:
self.plk_fwhm = 13.31
elif self.out_freq == 353:
self.plk_fwhm = 4.86
@property
def residual_map(self):
return self.cmb_ML - self.cmb
def mask_plk(self):
print("Using Planck mask !!!")
if self.map_type=='I':
self.mask = np.load('obs_data/mask/COM_Mask_CMB-common-Mask-Int_%s_R3.00.npy'%self.nside)
else:
self.mask = np.load('obs_data/mask/COM_Mask_CMB-common-Mask-Pol_%s_R3.00.npy'%self.nside)
self.fsky = np.count_nonzero(self.mask) / float(len(self.mask))
def mask_manual(self):
self.mask = np.ones(self.nside**2*12)
self.fsky = np.count_nonzero(self.mask) / float(len(self.mask))
def plot_cmb(self, savefig=False, root='figures', hold=False):
if self.fig_type=='obs':
title = 'Planck CMB'
elif self.fig_type=='test':
title = 'Simulated CMB'
matplotlib.rcParams.update({'font.size': 16})
hp.mollview(self.cmb, cmap='jet', min=-self.minmax, max=self.minmax, title=title, hold=hold)
if savefig:
utils.mkdir(root)
plt.savefig(root + '/%s_%s_%s.pdf'%(self.fig_prefix,self.map_type,self.map_n), bbox_inches='tight')
def plot_cmb_ML(self, savefig=False, root='figures', hold=False):
matplotlib.rcParams.update({'font.size': 16})
hp.mollview(self.cmb_ML, cmap='jet', min=-self.minmax, max=self.minmax, title='Recovered CMB', hold=hold)
if savefig:
utils.mkdir(root)
if self.extra_suffix:
plt.savefig(root + '/ML_%s_%s_%s_%s_%s.pdf'%(self.fig_prefix,self.map_type,self.map_n,self.randn_marker,self.extra_suffix), bbox_inches='tight')
else:
plt.savefig(root + '/ML_%s_%s_%s_%s.pdf'%(self.fig_prefix,self.map_type,self.map_n,self.randn_marker), bbox_inches='tight')
def plot_residual(self, savefig=False, root='figures', hold=False):
matplotlib.rcParams.update({'font.size': 16})
hp.mollview(self.residual_map, cmap='jet', min=-self.minmax/10., max=self.minmax/10., title='Residual', hold=hold)
if savefig:
utils.mkdir(root)
if self.extra_suffix:
plt.savefig(root+'/residual_%s_%s_%s_%s_%s.pdf'%(self.fig_prefix,self.map_type,self.map_n,self.randn_marker,self.extra_suffix), bbox_inches='tight')
else:
plt.savefig(root+'/residual_%s_%s_%s_%s.pdf'%(self.fig_prefix,self.map_type,self.map_n,self.randn_marker), bbox_inches='tight')
def get_dl(self, fwhm=None, aposize=1, nlb=None, bin_residual=True):
'''
aposize : float or None
nlb : int or None
'''
if nlb is None:
self.nlb = math.ceil(1/self.fsky)
else:
self.nlb = nlb
self.get_plk_fwhm()
if self.fig_type=='obs':
self.ell, self.dl = namaster_dl_TT_QQ_UU(self.cmb, self.mask, bl=self.bl(fwhm=5.0), nside=self.nside, aposize=aposize, nlb=self.nlb)
self.ell, self.dl_ML = namaster_dl_TT_QQ_UU(self.cmb_ML, self.mask, bl=self.bl(fwhm=self.plk_fwhm), nside=self.nside, aposize=aposize, nlb=self.nlb)
else:
self.ell, self.dl = namaster_dl_TT_QQ_UU(self.cmb, self.mask, bl=self.bl(fwhm=fwhm), nside=self.nside, aposize=aposize, nlb=self.nlb, cl_th=None)
self.ell, self.dl_ML = namaster_dl_TT_QQ_UU(self.cmb_ML, self.mask, bl=self.bl(fwhm=fwhm), nside=self.nside, aposize=aposize, nlb=self.nlb, cl_th=None)
self.mse_map = mse(self.cmb, self.cmb_ML)
self.mse_dl = mse(self.dl, self.dl_ML)##
print('mseSpectra:%s'%self.mse_dl)
#different from sim_tt
self.dl_diff = self.dl_ML - self.dl
if bin_residual:
self.ell_bined = [np.mean(self.ell[i*self.bin_lengh:(i+1)*self.bin_lengh]) for i in range(self.bin_n)]
self.dl_diff_bined = [self.dl_diff[i*self.bin_lengh:(i+1)*self.bin_lengh] for i in range(self.bin_n)]
self.dl_diff_bined_best = [np.mean(self.dl_diff_bined[i]) for i in range(self.bin_n)]
self.dl_diff_bined_err = [np.std(self.dl_diff_bined[i]) for i in range(self.bin_n)]
def plot_dl(self, savefig=False, root='figures', one_panel=True,
show_title=False, title_str=None, show_mse=False,
fwhm=None, aposize=1, nlb=None, bin_residual=True):
if self.ell is None:
self.get_dl(fwhm=fwhm, aposize=aposize, nlb=nlb, bin_residual=bin_residual)
if one_panel:
fig_spectra = plt.figure(figsize=(6*1.2, 4.5*1.2))
fig_spectra.subplots_adjust(hspace=0)
gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1])
ticks_size = 12
fontsize = 16
else:
gs = gridspec.GridSpec(3, 2, height_ratios=[5.5, 3, 1])
ticks_size = 12
fontsize = 18
if one_panel:
ax_0 = plt.subplot(gs[0])
else:
ax_0 = plt.subplot(gs[3])
ax_0 = pls.PlotSettings().setting(ax=ax_0,labels=[r'$\ell$', r'$D_\ell^{TT}[\mu k^2]$'],
ticks_size=ticks_size,show_xticks=False,minor_locator_N=8,major_locator_N=5)
if self.fig_type=='obs':
ax_0.plot(self.ell, self.dl, label='Planck CMB')
elif self.fig_type=='test':
ax_0.plot(self.ell, self.dl, label='Simulated CMB')
if self.map_type=='I':
ax_0.plot(self.ell, self.dl_ML, label='Recovered CMB')
ax_0.set_xlim(0, self.xlim_max)
ax_0.set_ylim(10, 7100)
else:
ax_0.plot(self.ell, self.dl_ML, label='Recovered CMB')
ax_0.set_xlim(0, self.xlim_max)
if show_mse:
ax_0.text(self.lmax*0.6, max(self.dl)*0.52, r'$MSE_{CMB}:%.2f$'%self.mse_map, fontsize=fontsize)
ax_0.text(self.lmax*0.6, max(self.dl)*0.35, r'$MSE_{D_\ell}:%.2f$'%self.mse_dl, fontsize=fontsize)
ax_0.legend(fontsize=fontsize)
if show_title:
if self.freq_num==1:
if title_str is None:
plt.title('%s frequency: %s'%(self.freq_num, self.input_freqs), fontsize=fontsize)
else:
plt.title(title_str, fontsize=fontsize)
else:
if title_str is None:
plt.title('%s frequencies: %s'%(self.freq_num, self.input_freqs), fontsize=fontsize)
else:
plt.title(title_str, fontsize=fontsize)
if one_panel:
ax_1 = plt.subplot(gs[1])
else:
ax_1 = plt.subplot(gs[5])
ax_1 = pls.PlotSettings().setting(ax=ax_1,labels=[r'$\ell$', r'$\Delta D_\ell^{TT}[\mu k^2]$'],
ticks_size=ticks_size,minor_locator_N=8,major_locator_N=5)
ax_1.plot([0, max(self.ell)], [0,0], '--', color=pl.fiducial_colors[9])
if bin_residual:
ax_1.errorbar(self.ell_bined, self.dl_diff_bined_best, yerr=self.dl_diff_bined_err, fmt='.')
else:
ax_1.plot(self.ell, self.dl_diff, color=pl.fiducial_colors[8])
if not savefig:
plt.plot([768,768], [-280,280])
plt.text(768-50, 20, '768')
plt.plot([1000,1000], [-280,280])
plt.text(1000-50, 20, '1000')
ax_1.set_xlim(0, self.xlim_max)
if self.map_type=='I':
ax_1.set_ylim(-100, 100)
else:
ax_1.set_ylim(-3, 3)
if savefig:
if self.extra_suffix:
pl.savefig(root, 'spectra_%s_%s_%s_%s_%s.pdf'%(self.fig_prefix,self.map_type,self.map_n,self.randn_marker,self.extra_suffix), fig_spectra)
else:
pl.savefig(root, 'spectra_%s_%s_%s_%s.pdf'%(self.fig_prefix,self.map_type,self.map_n,self.randn_marker), fig_spectra)
def plot_all(self, savefig=False, root='figures', fwhm=None, aposize=1, nlb=None, bin_residual=True):
if self.ell is None:
self.get_dl(fwhm=fwhm, aposize=aposize, nlb=nlb, bin_residual=bin_residual)
fig = plt.figure(figsize=(6*1.2*2, 4.5*1.2*2))
fig.subplots_adjust(wspace=0.21, hspace=0)
pls.PlotSettings().setting(location=(2,2,1),set_labels=False)
self.plot_cmb(hold=True)
pls.PlotSettings().setting(location=(2,2,2),set_labels=False)
self.plot_cmb_ML(hold=True)
pls.PlotSettings().setting(location=(2,2,3),set_labels=False)
self.plot_residual(hold=True)
self.plot_dl(one_panel=False, show_title=True, show_mse=True, fwhm=fwhm, aposize=aposize, nlb=nlb, bin_residual=bin_residual)
if savefig:
if self.extra_suffix:
pl.savefig(root, '%s_%s_%s_%s_%s.pdf'%(self.fig_prefix,self.map_type,self.map_n,self.randn_marker,self.extra_suffix), fig)
else:
pl.savefig(root, '%s_%s_%s_%s.pdf'%(self.fig_prefix,self.map_type,self.map_n,self.randn_marker), fig)
def _get_miniPatch(self, Map):
'''
select a 3*3 deg^2 patch
'''
ps = spherical.PixelSize(nside=self.nside)
patch_size = int(3/ps.pixel_length)
map_blocks = spherical.Cut(Map).block_all()
patch_0 = map_blocks[0][:patch_size, :patch_size]
patch_1 = map_blocks[4][:patch_size, :patch_size]
start_pix = (self.nside-patch_size)//2
patch_2 = map_blocks[4][start_pix:start_pix+patch_size, start_pix:start_pix+patch_size]
patch_3 = map_blocks[4][-patch_size:, -patch_size:]
patch_4 = map_blocks[11][-patch_size:, -patch_size:]
return [patch_0, patch_1, patch_2, patch_3, patch_4]
def get_miniPatch(self):
self.cmb_miniBatches = self._get_miniPatch(self.cmb)
self.cmb_ML_miniBatches = self._get_miniPatch(self.cmb_ML)
self.residual_map_miniBatches = self._get_miniPatch(self.residual_map)
def plot_miniPatch(self, savefig=False, root='figures'):
self.get_miniPatch()
fig = plt.figure(figsize=(3*5, 3*3))
fig.subplots_adjust(left=0,bottom=0,right=1,top=1,wspace=0.15,hspace=0.25)
for row in range(3):
for column in range(5):
pls.PlotSettings().setting(location=(3,5,row*5+column+1),set_labels=False,minor_locator_N=1)
# plt.subplot(3,5,row*5+column+1)
if row==0:
im = plt.imshow(self.cmb_miniBatches[column], cmap='jet', vmin=-500, vmax=500)
if self.fig_type=='obs':
plt.title('Planck CMB', fontsize=16)
elif self.fig_type=='test':
plt.title('Simulated CMB', fontsize=16)
elif row==1:
im = plt.imshow(self.cmb_ML_miniBatches[column], cmap='jet', vmin=-500, vmax=500)
plt.title('Recovered CMB', fontsize=16)
if column==4:
cbar_ax = fig.add_axes([1.01, 0.358, 0.01, 0.641])
plt.colorbar(im, cax=cbar_ax)
elif row==2:
im = plt.imshow(self.residual_map_miniBatches[column], cmap='jet', vmin=-50, vmax=50)
plt.title('Residual', fontsize=16)
if column==4:
cbar_ax = fig.add_axes([1.01, 0., 0.01, 0.287])
plt.colorbar(im, cax=cbar_ax)
if savefig:
utils.mkdir(root)
if self.extra_suffix:
plt.savefig(root + '/miniPatch_%s_%s_%s_%s_%s.pdf'%(self.fig_prefix,self.map_type,self.map_n,self.randn_marker,self.extra_suffix), bbox_inches='tight')
else:
plt.savefig(root + '/miniPatch_%s_%s_%s_%s.pdf'%(self.fig_prefix,self.map_type,self.map_n,self.randn_marker), bbox_inches='tight')
#%%
class PlotCMBBlock(object):
def __init__(self, cmb, cmb_ML, randn_num='', map_type='I', fig_type='test',
map_n=0, input_freqs=[100,143,217,353], out_freq=143, block_n=0, extra_suffix=''):
"""
map_type: 'I', 'Q' or 'U'
fig_type: 'test' or 'obs'
"""
self.cmb = cmb
self.cmb_ML = cmb_ML
self.randn_num = randn_num
self.map_type = map_type
self.fig_type = fig_type
self.map_n = map_n
self.input_freqs = input_freqs
self.freq_num = len(input_freqs)
self.out_freq = out_freq
self.block_n = block_n
self.extra_suffix = extra_suffix
self.ell = None
@property
def minmax(self):
if self.map_type=='I':
return 500
else:
return 10
@property
def dl_type(self):
if self.map_type=='I':
return 'TT'
else:
return '%s%s'%(self.map_type, self.map_type)
@property
def nside(self):
return int(len(self.cmb))
@property
def lmax(self):
if self.nside==512:
self.xlim_max = 1500
elif self.nside==256:
self.xlim_max = 760
return 3*self.nside - 1
@property
def randn_marker(self):
return change_randn_num(self.randn_num)
@property
def fig_prefix(self):
if self.fig_type=='obs':
return 'plkcmb'
elif self.fig_type=='test':
return 'simcmb'
def bl_plk(self):
beams = loader.get_planck_beams(nside=self.nside, relative_dir='obs_data')
return beams[str(self.out_freq)][:self.lmax+1]
def bl_fwhm(self, fwhm):
bl = hp.gauss_beam(fwhm*np.pi/10800., lmax=self.lmax)
return bl[:self.lmax+1]
def bl(self, fwhm=None):
if fwhm is None:
print("Using Planck beam file !!!")
return self.bl_plk()
else:
return self.bl_fwhm(fwhm)
@property
def bin_lengh(self):
return 6 #6*nlb = 30, let nlb=5
@property
def bin_n(self):
return int(math.ceil( (self.lmax-1)/float(self.bin_lengh) ))
@property
def residual_map(self):
if self.fig_type=='obs':
return self.cmb_ML - self.cmb_beam
else:
return self.cmb_ML - self.cmb
def mask_plk(self):
if self.map_type=='I':
mask = np.load('obs_data/mask/COM_Mask_CMB-common-Mask-Int_%s_R3.00.npy'%self.nside)
else:
mask = np.load('obs_data/mask/COM_Mask_CMB-common-Mask-Pol_%s_R3.00.npy'%self.nside)
mask_0 = spherical.Cut(mask).block(self.block_n)
self.mask = spherical.Block2Full(mask_0, self.block_n).full()
self.fsky = np.count_nonzero(self.mask) / float(len(self.mask))
def mask_manual(self):
mask_0 = np.ones((self.nside, self.nside))
self.mask = spherical.Block2Full(mask_0, self.block_n).full()
self.fsky = np.count_nonzero(self.mask) / float(len(self.mask))
def plot_cmb(self, savefig=False, root='figures', hold=False, one_panel=True):
if self.fig_type=='obs':
title = 'Planck CMB'
elif self.fig_type=='test':
title = 'Simulated CMB'
if one_panel:
plt.figure()#
matplotlib.rcParams.update({'font.size': 16})
plt.imshow(self.cmb, cmap='jet', vmin=-self.minmax, vmax=self.minmax)
plt.colorbar()
plt.title(title, fontsize=16)
if savefig:
utils.mkdir(root)
if self.use_mask:
plt.savefig(root + '/%s_%s_%s_block%s_mask.pdf'%(self.fig_prefix,self.map_type,self.map_n,self.block_n), bbox_inches='tight')
else:
plt.savefig(root + '/%s_%s_%s_block%s.pdf'%(self.fig_prefix,self.map_type,self.map_n,self.block_n), bbox_inches='tight')
def plot_cmb_ML(self, savefig=False, root='figures', hold=False, one_panel=True):
if one_panel:
plt.figure()#
matplotlib.rcParams.update({'font.size': 16})
plt.imshow(self.cmb_ML, cmap='jet', vmin=-self.minmax, vmax=self.minmax)
plt.colorbar()
plt.title('Recovered CMB', fontsize=16)
if savefig:
utils.mkdir(root)
if self.extra_suffix:
plt.savefig(root + '/ML_%s_%s_%s_%s_block%s_%s.pdf'%(self.fig_prefix,self.map_type,self.map_n,self.randn_marker,self.block_n,self.extra_suffix), bbox_inches='tight')
else:
plt.savefig(root + '/ML_%s_%s_%s_%s_block%s.pdf'%(self.fig_prefix,self.map_type,self.map_n,self.randn_marker,self.block_n), bbox_inches='tight')
def plot_residual(self, savefig=False, root='figures', hold=False, one_panel=True):
if one_panel:
plt.figure()#
matplotlib.rcParams.update({'font.size': 16})
if self.map_type=='I':
plt.imshow(self.residual_map, cmap='jet', vmin=-self.minmax/50., vmax=self.minmax/50.)
else:
plt.imshow(self.residual_map, cmap='jet', vmin=-self.minmax/50., vmax=self.minmax/50.)
plt.colorbar()
plt.title('Residual', fontsize=16)
if savefig:
utils.mkdir(root)
if self.extra_suffix:
plt.savefig(root+'/residual_%s_%s_%s_%s_block%s_%s.pdf'%(self.fig_prefix,self.map_type,self.map_n,self.randn_marker,self.block_n,self.extra_suffix), bbox_inches='tight')
else:
plt.savefig(root+'/residual_%s_%s_%s_%s_block%s.pdf'%(self.fig_prefix,self.map_type,self.map_n,self.randn_marker,self.block_n), bbox_inches='tight')
def get_dl(self, fwhm=None, aposize=1, nlb=None, bin_residual=True):
'''
aposize : float or None
nlb : int or None
'''
if nlb is None:
self.nlb = math.ceil(1/self.fsky)
else:
self.nlb = nlb
self.cmb_sp = spherical.Block2Full(self.cmb, self.block_n).full()
self.cmb_ML_sp = spherical.Block2Full(self.cmb_ML, self.block_n).full()
self.ell, self.dl = namaster_dl_TT_QQ_UU(self.cmb_sp, self.mask, bl=self.bl(fwhm=fwhm), nside=self.nside, aposize=aposize, nlb=self.nlb)
self.ell, self.dl_ML = namaster_dl_TT_QQ_UU(self.cmb_ML_sp, self.mask, bl=self.bl(fwhm=fwhm), nside=self.nside, aposize=aposize, nlb=self.nlb)
self.mse_map = mse(self.cmb, self.cmb_ML)
self.mse_dl = mse(self.dl, self.dl_ML)##
print('mseSpectra:%s'%self.mse_dl)
self.dl_diff = self.dl_ML - self.dl
if bin_residual:
self.ell_bined = [np.mean(self.ell[i*self.bin_lengh:(i+1)*self.bin_lengh]) for i in range(self.bin_n)]
self.dl_diff_bined = [self.dl_diff[i*self.bin_lengh:(i+1)*self.bin_lengh] for i in range(self.bin_n)]
self.dl_diff_bined_best = [np.mean(self.dl_diff_bined[i]) for i in range(self.bin_n)]
self.dl_diff_bined_err = [np.std(self.dl_diff_bined[i]) for i in range(self.bin_n)]
def plot_dl(self, savefig=False, root='figures', one_panel=True,
show_title=False, title_str=None, show_mse=False,
fwhm=None, aposize=1, nlb=None, bin_residual=True):
if self.ell is None:
self.get_dl(fwhm=fwhm, aposize=aposize, nlb=nlb, bin_residual=bin_residual)
if one_panel:
fig_spectra = plt.figure(figsize=(6*1.2, 4.5*1.2))
fig_spectra.subplots_adjust(hspace=0)
gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1])
ticks_size = 12 + 4
fontsize = 16 + 2
else:
gs = gridspec.GridSpec(3, 2, height_ratios=[5.5, 3, 1])
ticks_size = 12
fontsize = 18
if one_panel:
ax_0 = plt.subplot(gs[0])
else:
ax_0 = plt.subplot(gs[3])
ax_0 = pls.PlotSettings().setting(ax=ax_0,labels=[r'$\ell$', r'$D_\ell^{%s}[\mu k^2]$'%self.dl_type],
ticks_size=ticks_size,show_xticks=False,minor_locator_N=8,major_locator_N=5)
if self.fig_type=='obs':
ax_0.plot(self.ell, self.dl, label='Planck CMB')
elif self.fig_type=='test':
ax_0.plot(self.ell, self.dl, label='Simulated CMB')
if self.map_type=='I':
ax_0.plot(self.ell, self.dl_ML, label='Recovered CMB')
ax_0.set_xlim(0, self.xlim_max)
ax_0.set_ylim(10, 7100)
# if self.fig_type=='obs':
# ax_0.set_ylim(10, 8000)
# else:
# ax_0.set_ylim(10, 7500)
else:
ax_0.plot(self.ell, self.dl_ML, 'r', label='Recovered CMB') ## *2 !!!
ax_0.set_xlim(0, self.xlim_max)
# ax_0.set_ylim(0.001, 2.6)
if show_mse:
ax_0.text(self.lmax*0.62, max(self.dl)*0.4, r'$MSE_{CMB}:%.2f$'%self.mse_map, fontsize=fontsize)
ax_0.text(self.lmax*0.62, max(self.dl)*0.3, r'$MSE_{D_\ell}:%.2f$'%self.mse_dl, fontsize=fontsize)
ax_0.legend(fontsize=fontsize)
if show_title:
if self.freq_num==1:
if title_str is None:
plt.title('%s frequency: %s'%(self.freq_num, self.input_freqs), fontsize=fontsize)
else:
plt.title(title_str, fontsize=fontsize)
else:
if title_str is None:
plt.title('%s frequencies: %s'%(self.freq_num, self.input_freqs), fontsize=fontsize)
else:
plt.title(title_str, fontsize=fontsize)
if one_panel:
ax_1 = plt.subplot(gs[1])
else:
ax_1 = plt.subplot(gs[5])
ax_1 = pls.PlotSettings().setting(ax=ax_1,labels=[r'$\ell$', r'$\Delta D_\ell^{%s}[\mu k^2]$'%self.dl_type],
ticks_size=ticks_size,minor_locator_N=8,major_locator_N=5)
ax_1.plot([0, max(self.ell)], [0,0], '--', color=pl.fiducial_colors[9])
if bin_residual:
ax_1.errorbar(self.ell_bined, self.dl_diff_bined_best, yerr=self.dl_diff_bined_err, fmt='.')
else:
ax_1.plot(self.ell, self.dl_diff, color=pl.fiducial_colors[8])
if not savefig:
plt.plot([768,768], [-280,280])###
plt.plot([1000,1000], [-280,280])###
plt.plot([1250,1250], [-280,280])###
plt.plot([1300,1300], [-280,280])###
ax_1.set_xlim(0, self.xlim_max)
if self.map_type=='I':
ax_1.set_ylim(-100, 100)
else:
ax_1.set_ylim(-0.5, 0.5)
if savefig:
if self.extra_suffix:
pl.savefig(root, 'spectra_%s_%s_%s_%s_block%s_%s.pdf'%(self.fig_prefix,self.map_type,self.map_n,self.randn_marker,self.block_n,self.extra_suffix), fig_spectra)
else:
pl.savefig(root, 'spectra_%s_%s_%s_%s_block%s.pdf'%(self.fig_prefix,self.map_type,self.map_n,self.randn_marker,self.block_n), fig_spectra)
def plot_all(self, savefig=False, root='figures', fwhm=None, aposize=1, nlb=None, bin_residual=True):
if self.ell is None:
self.get_dl(fwhm=fwhm, aposize=aposize, nlb=nlb, bin_residual=bin_residual)
fig = plt.figure(figsize=(6*1.2*2, 4.5*1.2*2))
fig.subplots_adjust(wspace=0.2, hspace=0.2)
pls.PlotSettings().setting(location=(2,2,1),set_labels=False)
self.plot_cmb(hold=True, one_panel=False)
pls.PlotSettings().setting(location=(2,2,2),set_labels=False)
self.plot_cmb_ML(hold=True, one_panel=False)
pls.PlotSettings().setting(location=(2,2,3),set_labels=False)
self.plot_residual(hold=True, one_panel=False)
# pls.PlotSettings().setting(location=(2,2,4),set_labels=False)
self.plot_dl(one_panel=False, show_title=True, show_mse=True, fwhm=fwhm, aposize=aposize, nlb=nlb)#True
# plt.suptitle(self.case_labels[str(self.case)], fontsize=22)
if savefig:
if self.extra_suffix:
pl.savefig(root, '%s_%s_%s_%s_block%s_%s.pdf'%(self.fig_prefix,self.map_type,self.map_n,self.randn_marker,self.block_n,self.extra_suffix), fig)
else:
pl.savefig(root, '%s_%s_%s_%s_block%s.pdf'%(self.fig_prefix,self.map_type,self.map_n,self.randn_marker,self.block_n), fig)
def _get_miniPatch(self, Map):
'''
select a 3*3 deg^2 patch
'''
ps = spherical.PixelSize(nside=self.nside)
patch_size = int(3/ps.pixel_length)
start_pix = (self.nside-patch_size)//2
patch_0 = Map[start_pix:start_pix+patch_size, :patch_size]
patch_1 = Map[start_pix:start_pix+patch_size, start_pix:start_pix+patch_size]
patch_2 = Map[start_pix:start_pix+patch_size, -patch_size:]
return [patch_0, patch_1, patch_2]
def get_miniPatch(self):
self.cmb_miniBatches = self._get_miniPatch(self.cmb)
self.cmb_ML_miniBatches = self._get_miniPatch(self.cmb_ML)
self.residual_map_miniBatches = self._get_miniPatch(self.residual_map)
def plot_miniPatch(self, savefig=False, root='figures'):
self.get_miniPatch()
fig = plt.figure(figsize=(3*3, 3*3))
fig.subplots_adjust(left=0,bottom=0,right=1,top=1,wspace=0.15,hspace=0.25)
for row in range(3):
for column in range(3):
pls.PlotSettings().setting(location=(3,3,row*3+column+1),set_labels=False,minor_locator_N=1)
# plt.subplot(3,3,row*3+column+1)
if row==0:
im = plt.imshow(self.cmb_miniBatches[column], cmap='jet', vmin=-500, vmax=500)
if self.fig_type=='obs':
plt.title('Planck CMB', fontsize=16)
elif self.fig_type=='test':
plt.title('Simulated CMB', fontsize=16)
elif row==1:
im = plt.imshow(self.cmb_ML_miniBatches[column], cmap='jet', vmin=-500, vmax=500)
plt.title('Recovered CMB', fontsize=16)
if column==2:
cbar_ax = fig.add_axes([1.01, 0.358, 0.015, 0.641])
plt.colorbar(im, cax=cbar_ax)
elif row==2:
im = plt.imshow(self.residual_map_miniBatches[column], cmap='jet', vmin=-10, vmax=10)
plt.title('Residual', fontsize=16)
if column==2:
cbar_ax = fig.add_axes([1.01, 0., 0.015, 0.287])
plt.colorbar(im, cax=cbar_ax)
if savefig:
utils.mkdir(root)
if self.extra_suffix:
plt.savefig(root + '/miniPatch_%s_%s_%s_%s_block%s_%s.pdf'%(self.fig_prefix,self.map_type,self.map_n,self.randn_marker,self.block_n,self.extra_suffix), bbox_inches='tight')
else:
plt.savefig(root + '/miniPatch_%s_%s_%s_%s_block%s.pdf'%(self.fig_prefix,self.map_type,self.map_n,self.randn_marker,self.block_n), bbox_inches='tight')
#%%
class PlotCMB_EEBB(object):
def __init__(self, cmb_qu, cmb_ML_qu, map_n=0, nside=512, block_n=0, randn_marker='',extra_suffix=''):
self.cmb_qu = cmb_qu
self.cmb_ML_qu = cmb_ML_qu
self.map_n = map_n
self.nside = nside
self.block_n = block_n
self.randn_marker = randn_marker
self.extra_suffix = extra_suffix
self.ell = None
@property
def lmax(self):
if self.nside==512:
self.xlim_max = 1500
elif self.nside==256:
self.xlim_max = 760
return 3*self.nside - 1
@property
def bin_lengh(self):
return 6 #6*nlb = 30, let nlb=5
@property
def bin_n(self):
return int(math.ceil( (self.lmax-1)/float(self.bin_lengh) ))
@property
def fig_prefix(self):
return 'simcmb'
def mask_manual(self):
mask_0 = np.ones((self.nside, self.nside))
self.mask = spherical.Block2Full(mask_0, self.block_n).full()
self.fsky = np.count_nonzero(self.mask) / float(len(self.mask))
def bl_fwhm(self, fwhm):
bl = hp.gauss_beam(fwhm*np.pi/10800., lmax=self.lmax)
return bl[:self.lmax+1]
def get_dl(self, fwhm=None, aposize=1, nlb=None, bin_residual=True):
'''
aposize : float or None
nlb : int or None
'''
# self.get_fiducial_dls()
if nlb is None:
self.nlb = math.ceil(1/self.fsky)
else:
self.nlb = nlb
self.ell, self.dl = namaster_dl_EE_BB(self.cmb_qu, self.mask, bl=self.bl_fwhm(fwhm=fwhm), nside=self.nside, aposize=aposize, nlb=self.nlb)
self.ell, self.dl_ML = namaster_dl_EE_BB(self.cmb_ML_qu, self.mask, bl=self.bl_fwhm(fwhm=fwhm), nside=self.nside, aposize=aposize, nlb=self.nlb)
self.dl_EE, self.dl_BB = self.dl[0], self.dl[3]
self.dl_ML_EE, self.dl_ML_BB = self.dl_ML[0], self.dl_ML[3]
self.diff_EE = self.dl_ML_EE - self.dl_EE
self.diff_BB = self.dl_ML_BB - self.dl_BB
# print(self.diff_BB)
#bined ell & dl residual
if bin_residual:
self.ell_bined = [np.mean(self.ell[i*self.bin_lengh:(i+1)*self.bin_lengh]) for i in range(self.bin_n)]
#residual of EE
self.diff_EE_bined = [self.diff_EE[i*self.bin_lengh:(i+1)*self.bin_lengh] for i in range(self.bin_n)]
self.diff_EE_bined_best = [np.mean(self.diff_EE_bined[i]) for i in range(self.bin_n)]
self.diff_EE_bined_err = [np.std(self.diff_EE_bined[i]) for i in range(self.bin_n)]
#residual of BB
self.diff_BB_bined = [self.diff_BB[i*self.bin_lengh:(i+1)*self.bin_lengh] for i in range(self.bin_n)]
self.diff_BB_bined_best = [np.mean(self.diff_BB_bined[i]) for i in range(self.bin_n)]
self.diff_BB_bined_err = [np.std(self.diff_BB_bined[i]) for i in range(self.bin_n)]
def plot_dl(self, savefig=False, root='figures', dl_type='', bin_residual=True):
'''
dl_type: EE or BB
'''
fig_spectra = plt.figure(figsize=(6*1.*2, 4.5*1.))
fig_spectra.subplots_adjust(left=0, bottom=0, right=1, top=1, wspace=0.23)
# gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1])
ticks_size = 12 #+ 4
fontsize = 16 #+ 2
ax_0 = pls.PlotSettings().setting(location=[1,2,1],labels=[r'$\ell$', r'$D_\ell^{%s}[\mu k^2]$'%dl_type],
ticks_size=ticks_size,show_xticks=False,minor_locator_N=8,major_locator_N=5)
ax_0.loglog(self.ell, eval('self.dl_%s'%dl_type), label='Simulated CMB')
ax_0.loglog(self.ell, eval('self.dl_ML_%s'%dl_type), label='Recovered CMB')
ax_0.set_xlim(0, self.xlim_max)
# if dl_type=='EE':
# ax_0.set_ylim(-0.05, 0.05)
# elif dl_type=='BB':
# ax_0.set_ylim(-0.003, 0.003)
ax_0.legend(loc=2, fontsize=fontsize)
ax_1 = pls.PlotSettings().setting(location=[1,2,2],labels=[r'$\ell$', r'$\Delta D_\ell^{%s}[\mu k^2]$'%dl_type],
ticks_size=ticks_size,minor_locator_N=8,major_locator_N=5)
ax_1.plot([0, max(self.ell)], [0,0], '--', color=pl.fiducial_colors[9])
if bin_residual:
ax_1.errorbar(self.ell_bined, eval('self.diff_%s_bined_best'%dl_type), yerr=eval('self.diff_%s_bined_err'%dl_type), fmt='.')
else:
ax_1.plot(self.ell, eval('self.diff_%s'%dl_type), color=pl.fiducial_colors[8])
ax_1.set_xlim(0, self.xlim_max)
if dl_type=='EE':
# pass
ax_1.set_ylim(-0.9, 0.1)
# ax_1.set_ylim(-2e-5, 2e-5) #test, plot CL
elif dl_type=='BB':
ax_1.set_ylim(-0.04, 0.04)
if not savefig:
plt.plot([768,768], [-280,280])###
plt.plot([1000,1000], [-280,280])###
plt.plot([1250,1250], [-280,280])###
plt.plot([1300,1300], [-280,280])###
if savefig:
if self.extra_suffix:
pl.savefig(root+'/pdf', 'spectra_%s_%s_%s_%s_block%s_%s.pdf'%(self.fig_prefix,dl_type,self.map_n,self.randn_marker,self.block_n,self.extra_suffix), fig_spectra)
pl.savefig(root+'/jpg', 'spectra_%s_%s_%s_%s_block%s_%s.jpg'%(self.fig_prefix,dl_type,self.map_n,self.randn_marker,self.block_n,self.extra_suffix), fig_spectra)
else:
pl.savefig(root+'/pdf', 'spectra_%s_%s_%s_%s_block%s.pdf'%(self.fig_prefix,dl_type,self.map_n,self.randn_marker,self.block_n), fig_spectra)
pl.savefig(root+'/jpg', 'spectra_%s_%s_%s_%s_block%s.jpg'%(self.fig_prefix,dl_type,self.map_n,self.randn_marker,self.block_n), fig_spectra)
def plot_all(self, savefig=False, root='figures', fwhm=None, aposize=1, nlb=None, bin_residual=True):
if self.ell is None:
self.get_dl(fwhm=fwhm, aposize=aposize, nlb=nlb, bin_residual=True)
self.plot_dl(savefig=savefig, root=root, dl_type='EE') #EE
self.plot_dl(savefig=savefig, root=root, dl_type='BB') #BB
#%% RMS of the residual maps
def mask_latitude(Map, nside=256, degree=30, inclusive=False, start_southPole=True):
'''
mask the map according to latitude
:param start_southPole: if True, start from the south pole, otherwise, start from the north pole
'''
npix = hp.nside2npix(nside)
if start_southPole:
theta, phi = hp.pix2ang(nside=nside, ipix=npix-1)
else:
theta, phi = hp.pix2ang(nside=nside, ipix=0)
idx_list = hp.query_disc(nside=nside, vec=hp.ang2vec(theta=theta, phi=phi), radius=degree/180.*np.pi, inclusive=inclusive)
mask = np.zeros(npix)
mask[idx_list] = 1
map_mask = Map * mask
return map_mask, idx_list
def get_RMS(Map, nside=256, degree_bin=10, inclusive=False):
rms_num = 180//degree_bin
rms_all = []
for i in range(rms_num):
mask_1, idx_1 = mask_latitude(Map, nside=nside, degree=degree_bin*i, inclusive=inclusive)
mask_2, idx_2 = mask_latitude(Map, nside=nside, degree=degree_bin*(i+1), inclusive=inclusive)
diff = mask_2 - mask_1
pix_num = len(idx_2) - len(idx_1)
rms_all.append( np.sqrt(sum(diff**2)/pix_num) )# RMS, this is right !!!
rms_all = np.array(rms_all)
degs = np.arange(-90, 90, degree_bin) + degree_bin/2.
return degs, rms_all
#%% calcualte cosmic variance
def cosmic_variance(ell, get_std=True):
'''
sigma^2 = (delta_C_ell/C_ell)^2 = 2/(2*ell + 1)
'''
cv = 2/(2*ell + 1)
if get_std:
return np.sqrt(cv)
else:
return cv
| 43.287119
| 188
| 0.595416
| 6,731
| 44,023
| 3.685931
| 0.072352
| 0.007416
| 0.006771
| 0.014027
| 0.80133
| 0.765256
| 0.74603
| 0.729504
| 0.721
| 0.69738
| 0
| 0.034761
| 0.268769
| 44,023
| 1,016
| 189
| 43.329724
| 0.735951
| 0.092611
| 0
| 0.645833
| 0
| 0
| 0.05712
| 0.02365
| 0
| 0
| 0
| 0
| 0
| 1
| 0.088542
| false
| 0
| 0.018229
| 0.015625
| 0.177083
| 0.007813
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
edd26f5f903907de0efc4ffb5d58429d869980dd
| 119
|
py
|
Python
|
example/test_gcd.py
|
terencehonles/pyannotate
|
cbc58558a94f89aa4cbe289e5f11ffed123406f4
|
[
"Apache-2.0"
] | 1,363
|
2017-11-13T23:46:52.000Z
|
2022-03-31T17:23:58.000Z
|
example/test_gcd.py
|
terencehonles/pyannotate
|
cbc58558a94f89aa4cbe289e5f11ffed123406f4
|
[
"Apache-2.0"
] | 91
|
2017-11-14T18:48:00.000Z
|
2022-03-10T09:21:27.000Z
|
example/test_gcd.py
|
terencehonles/pyannotate
|
cbc58558a94f89aa4cbe289e5f11ffed123406f4
|
[
"Apache-2.0"
] | 65
|
2017-11-16T05:38:02.000Z
|
2022-02-11T15:38:21.000Z
|
# Tests for gcd function.
from gcd import gcd
def test_gcd():
assert gcd(5, 10) == 5
assert gcd(12, 45) == 3
| 14.875
| 27
| 0.613445
| 21
| 119
| 3.428571
| 0.666667
| 0.25
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.102273
| 0.260504
| 119
| 7
| 28
| 17
| 0.715909
| 0.193277
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0.25
| true
| 0
| 0.25
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
612d91b8add5537e149f5a086c8cace5b256cb99
| 93
|
py
|
Python
|
res/scripts/client/gui/mods/mod_ScoreViewTools.py
|
JoshuaEN/World-of-Tanks-ScoreViewTools-Data-Export-Mods
|
fb424b5bfa3a1e212ef39805f9b3afb750cec82f
|
[
"MIT"
] | null | null | null |
res/scripts/client/gui/mods/mod_ScoreViewTools.py
|
JoshuaEN/World-of-Tanks-ScoreViewTools-Data-Export-Mods
|
fb424b5bfa3a1e212ef39805f9b3afb750cec82f
|
[
"MIT"
] | null | null | null |
res/scripts/client/gui/mods/mod_ScoreViewTools.py
|
JoshuaEN/World-of-Tanks-ScoreViewTools-Data-Export-Mods
|
fb424b5bfa3a1e212ef39805f9b3afb750cec82f
|
[
"MIT"
] | null | null | null |
import ScoreViewTools
import ScoreViewTools_Init
print "SVT Loaded!"
def init():
pass
| 11.625
| 26
| 0.752688
| 11
| 93
| 6.272727
| 0.727273
| 0.57971
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.182796
| 93
| 7
| 27
| 13.285714
| 0.907895
| 0
| 0
| 0
| 0
| 0
| 0.11828
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.2
| 0.4
| null | null | 0.2
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 0
| 0
| 0
|
0
| 6
|
b61646078ecd31e5c32fc86639d3475556ae9c0e
| 28
|
py
|
Python
|
mesh/__init__.py
|
ThierryBleau/mesh-python-client
|
9193cf6952ad4a3bc72ed778e549909e5687f238
|
[
"BSD-3-Clause"
] | null | null | null |
mesh/__init__.py
|
ThierryBleau/mesh-python-client
|
9193cf6952ad4a3bc72ed778e549909e5687f238
|
[
"BSD-3-Clause"
] | null | null | null |
mesh/__init__.py
|
ThierryBleau/mesh-python-client
|
9193cf6952ad4a3bc72ed778e549909e5687f238
|
[
"BSD-3-Clause"
] | null | null | null |
from .mesh import MeshClient
| 28
| 28
| 0.857143
| 4
| 28
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.107143
| 28
| 1
| 28
| 28
| 0.96
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b62cf581de0c5248442058189e2514f2f4b7cf9e
| 151
|
py
|
Python
|
mmdeploy/codebase/mmdet/core/post_processing/__init__.py
|
aegis-rider/mmdeploy
|
230596bad92fafadb36cf0a69c57d80522cc7c60
|
[
"Apache-2.0"
] | 746
|
2021-12-27T10:50:28.000Z
|
2022-03-31T13:34:14.000Z
|
mmdeploy/codebase/mmdet/core/post_processing/__init__.py
|
aegis-rider/mmdeploy
|
230596bad92fafadb36cf0a69c57d80522cc7c60
|
[
"Apache-2.0"
] | 253
|
2021-12-28T05:59:13.000Z
|
2022-03-31T18:22:25.000Z
|
mmdeploy/codebase/mmdet/core/post_processing/__init__.py
|
aegis-rider/mmdeploy
|
230596bad92fafadb36cf0a69c57d80522cc7c60
|
[
"Apache-2.0"
] | 147
|
2021-12-27T10:50:33.000Z
|
2022-03-30T10:44:20.000Z
|
# Copyright (c) OpenMMLab. All rights reserved.
from .bbox_nms import _multiclass_nms, multiclass_nms
__all__ = ['multiclass_nms', '_multiclass_nms']
| 30.2
| 53
| 0.788079
| 19
| 151
| 5.684211
| 0.578947
| 0.481481
| 0.425926
| 0.481481
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.112583
| 151
| 4
| 54
| 37.75
| 0.80597
| 0.298013
| 0
| 0
| 0
| 0
| 0.278846
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
1e52edc05cdf68e3a48c5b0d9fd7df5a948f4723
| 28,174
|
py
|
Python
|
test/core/gen/test_gen.py
|
bcdev/xcube
|
9d275ef3baef8fbcea5c1fbbfb84c3d0164aecd3
|
[
"MIT"
] | 97
|
2018-06-26T13:02:55.000Z
|
2022-03-26T21:03:13.000Z
|
test/core/gen/test_gen.py
|
bcdev/xcube
|
9d275ef3baef8fbcea5c1fbbfb84c3d0164aecd3
|
[
"MIT"
] | 524
|
2018-11-09T12:00:08.000Z
|
2022-03-31T17:00:13.000Z
|
test/core/gen/test_gen.py
|
bcdev/xcube
|
9d275ef3baef8fbcea5c1fbbfb84c3d0164aecd3
|
[
"MIT"
] | 15
|
2019-07-09T08:46:03.000Z
|
2022-02-07T18:47:34.000Z
|
import os
import unittest
from typing import Any, Dict, Optional, Sequence, Tuple
import numpy as np
import xarray as xr
from test.core.gen.helpers import get_inputdata_path
from xcube.core.dsio import rimraf
from xcube.core.gen.config import get_config_dict
from xcube.core.gen.gen import gen_cube
def clean_up():
files = ['l2c-single.nc', 'l2c-single.zarr', 'l2c.nc', 'l2c.zarr', 'l2c_presorted.zarr', 'l2c_1x80x60.zarr',
'l2c_1x80x80.zarr', 'l2c_packed.zarr', 'l2c_packed_1x80x80.zarr']
for file in files:
rimraf(file)
rimraf(file + '.temp.nc') # May remain from Netcdf4DatasetIO.append()
rimraf(get_inputdata_path("input.txt"))
class DefaultProcessTest(unittest.TestCase):
def setUp(self):
clean_up()
def tearDown(self):
clean_up()
def test_process_inputs_single_nc(self):
status, output = gen_cube_wrapper(
[get_inputdata_path('20170101-IFR-L4_GHRSST-SSTfnd-ODYSSEA-NWE_002-v2.0-fv1.0.nc')], 'l2c-single.nc')
self.assertEqual(True, status)
self.assertTrue('\nstep 9 of 9: creating input slice in l2c-single.nc...\n' in output)
with xr.open_dataset('l2c-single.nc') as dataset:
self.assert_cube_ok(dataset,
expected_time_dim=1,
expected_extra_attrs=dict(date_modified=None,
time_coverage_start='2016-12-31T12:00:00.000000000',
time_coverage_end='2017-01-01T12:00:00.000000000'))
def test_process_inputs_single_nc_processed_vars(self):
status, output = gen_cube_wrapper(
[get_inputdata_path('20170101-IFR-L4_GHRSST-SSTfnd-ODYSSEA-NWE_002-v2.0-fv1.0.nc')],
'l2c-single.nc',
processed_variables=(
('analysed_sst', dict(valid_pixel_expression=None)),
('analysis_error', dict(valid_pixel_expression=None)),
('sea_ice_fraction', dict(valid_pixel_expression=None)),
('water_mask', dict(expression='(mask.sea or mask.lake) and not mask.ice', load=True)),
('ice_mask', dict(expression='mask.sea and mask.ice')),
('analysed_sst', dict(valid_pixel_expression='water_mask')),
('analysis_error', dict(valid_pixel_expression='water_mask')),
('sea_ice_fraction', dict(valid_pixel_expression='ice_mask')),
),
output_variables=(
('analysed_sst', dict(name='SST')),
('analysis_error', dict(name='SST_uncertainty')),
('sea_ice_fraction', None),
),
)
self.assertEqual(True, status)
self.assertTrue('\nstep 9 of 9: creating input slice in l2c-single.nc...\n' in output)
with xr.open_dataset('l2c-single.nc') as dataset:
self.assert_cube_ok(dataset,
expected_time_dim=1,
expected_output_vars=('SST', 'SST_uncertainty', 'sea_ice_fraction'),
expected_extra_attrs=dict(date_modified=None,
time_coverage_start='2016-12-31T12:00:00.000000000',
time_coverage_end='2017-01-01T12:00:00.000000000'))
def test_process_inputs_append_multiple_nc(self):
status, output = gen_cube_wrapper(
[get_inputdata_path('201701??-IFR-L4_GHRSST-SSTfnd-ODYSSEA-NWE_002-v2.0-fv1.0.nc')], 'l2c.nc',
no_sort_mode=False)
self.assertEqual(True, status)
self.assertTrue('\nstep 9 of 9: creating input slice in l2c.nc...\n' in output)
self.assertTrue('\nstep 9 of 9: appending input slice to l2c.nc...\n' in output)
with xr.open_dataset('l2c.nc') as dataset:
self.assert_cube_ok(dataset,
expected_time_dim=3,
expected_extra_attrs=dict(date_modified=None,
time_coverage_start='2016-12-31T12:00:00.000000000',
time_coverage_end='2017-01-03T12:00:00.000000000'))
def test_process_inputs_single_zarr(self):
status, output = gen_cube_wrapper(
[get_inputdata_path('20170101-IFR-L4_GHRSST-SSTfnd-ODYSSEA-NWE_002-v2.0-fv1.0.nc')], 'l2c-single.zarr')
self.assertEqual(True, status)
self.assertTrue('\nstep 9 of 9: creating input slice in l2c-single.zarr...\n' in output)
self.assert_cube_ok(xr.open_zarr('l2c-single.zarr'),
expected_time_dim=1,
expected_extra_attrs=dict(date_modified=None,
time_coverage_start='2016-12-31T12:00:00.000000000',
time_coverage_end='2017-01-01T12:00:00.000000000'))
def test_process_inputs_append_multiple_zarr(self):
status, output = gen_cube_wrapper(
[get_inputdata_path('201701??-IFR-L4_GHRSST-SSTfnd-ODYSSEA-NWE_002-v2.0-fv1.0.nc')], 'l2c.zarr',
no_sort_mode=False)
self.assertEqual(True, status)
self.assertTrue('\nstep 9 of 9: creating input slice in l2c.zarr...\n' in output)
self.assertTrue('\nstep 9 of 9: appending input slice to l2c.zarr...\n' in output)
self.assert_cube_ok(xr.open_zarr('l2c.zarr'),
expected_time_dim=3,
expected_extra_attrs=dict(date_modified=None,
time_coverage_start='2016-12-31T12:00:00.000000000',
time_coverage_end='2017-01-03T12:00:00.000000000'))
def test_process_inputs_insert_multiple_zarr(self):
status, output = gen_cube_wrapper(
[get_inputdata_path('20170102-IFR-L4_GHRSST-SSTfnd-ODYSSEA-NWE_002-v2.0-fv1.0.nc'),
get_inputdata_path('20170103-IFR-L4_GHRSST-SSTfnd-ODYSSEA-NWE_002-v2.0-fv1.0.nc'),
get_inputdata_path('20170101-IFR-L4_GHRSST-SSTfnd-ODYSSEA-NWE_002-v2.0-fv1.0.nc')], 'l2c_presorted.zarr')
self.assertEqual(True, status)
self.assertTrue('\nstep 9 of 9: creating input slice in l2c_presorted.zarr...\n' in output)
self.assertTrue('\nstep 9 of 9: appending input slice to l2c_presorted.zarr...\n' in output)
self.assertFalse('\nstep 9 of 9: inserting input slice before index 0 in l2c_presorted.zarr...\n' in output)
status, output = gen_cube_wrapper(
[get_inputdata_path('20170102-IFR-L4_GHRSST-SSTfnd-ODYSSEA-NWE_002-v2.0-fv1.0.nc'),
get_inputdata_path('20170103-IFR-L4_GHRSST-SSTfnd-ODYSSEA-NWE_002-v2.0-fv1.0.nc'),
get_inputdata_path('20170101-IFR-L4_GHRSST-SSTfnd-ODYSSEA-NWE_002-v2.0-fv1.0.nc')], 'l2c.zarr',
no_sort_mode=True)
self.assertEqual(True, status)
self.assertTrue('\nstep 9 of 9: creating input slice in l2c.zarr...\n' in output)
self.assertTrue('\nstep 9 of 9: appending input slice to l2c.zarr...\n' in output)
self.assertTrue('\nstep 9 of 9: inserting input slice before index 0 in l2c.zarr...\n' in output)
self.assert_cube_ok(xr.open_zarr('l2c.zarr'),
expected_time_dim=3,
expected_extra_attrs=dict(date_modified=None,
time_coverage_start='2016-12-31T12:00:00.000000000',
time_coverage_end='2017-01-03T12:00:00.000000000'))
ds_presorted = xr.open_zarr('l2c_presorted.zarr')
ds_insert = xr.open_zarr('l2c.zarr')
np.testing.assert_allclose(ds_presorted.analysed_sst.values, ds_insert.analysed_sst.values)
def test_process_inputs_replace_multiple_zarr(self):
status, output = gen_cube_wrapper(
[get_inputdata_path('20170101-IFR-L4_GHRSST-SSTfnd-ODYSSEA-NWE_002-v2.0-fv1.0.nc'),
get_inputdata_path('20170102-IFR-L4_GHRSST-SSTfnd-ODYSSEA-NWE_002-v2.0-fv1.0.nc'),
get_inputdata_path('20170103-IFR-L4_GHRSST-SSTfnd-ODYSSEA-NWE_002-v2.0-fv1.0.nc'),
get_inputdata_path('20170102-IFR-L4_GHRSST-SSTfnd-ODYSSEA-NWE_002-v2.0-fv1.0.nc')], 'l2c.zarr',
no_sort_mode=True)
self.assertEqual(True, status)
self.assertTrue('\nstep 9 of 9: creating input slice in l2c.zarr...\n' in output)
self.assertTrue('\nstep 9 of 9: appending input slice to l2c.zarr...\n' in output)
self.assertTrue('\nstep 9 of 9: replacing input slice at index 1 in l2c.zarr...\n' in output)
self.assert_cube_ok(xr.open_zarr('l2c.zarr'),
expected_time_dim=3,
expected_extra_attrs=dict(date_modified=None,
time_coverage_start='2016-12-31T12:00:00.000000000',
time_coverage_end='2017-01-03T12:00:00.000000000'))
self.assertTrue(os.path.exists(os.path.join('l2c.zarr', '.zmetadata')))
def test_input_txt(self):
f = open((os.path.join(os.path.dirname(__file__), 'inputdata', "input.txt")), "w+")
for i in range(1, 4):
file_name = f"2017010{i}-IFR-L4_GHRSST-SSTfnd-ODYSSEA-NWE_002-v2.0-fv1.0.nc"
file = get_inputdata_path(file_name)
f.write("%s\n" % file)
f.close()
status, output = gen_cube_wrapper([get_inputdata_path('input.txt')], 'l2c.zarr', no_sort_mode=False)
self.assertEqual(True, status)
self.assert_cube_ok(xr.open_zarr('l2c.zarr'),
expected_time_dim=3,
expected_extra_attrs=dict(time_coverage_start='2016-12-31T12:00:00.000000000',
time_coverage_end='2017-01-03T12:00:00.000000000'))
self.assertTrue(os.path.exists(os.path.join('l2c.zarr', '.zmetadata')))
def test_process_chunked_zarr(self):
status, output = gen_cube_wrapper(
[get_inputdata_path('201701??-IFR-L4_GHRSST-SSTfnd-ODYSSEA-NWE_002-v2.0-fv1.0.nc')],
'l2c.zarr',
no_sort_mode=False)
self.assertEqual(True, status)
self.assert_cube_ok(xr.open_zarr('l2c.zarr'),
expected_time_dim=3,
expected_extra_attrs=dict(time_coverage_start='2016-12-31T12:00:00.000000000',
time_coverage_end='2017-01-03T12:00:00.000000000'))
status, output = gen_cube_wrapper(
[get_inputdata_path('201701??-IFR-L4_GHRSST-SSTfnd-ODYSSEA-NWE_002-v2.0-fv1.0.nc')],
'l2c_1x80x60.zarr',
output_writer_params={'chunksizes': {'lon': 80, 'lat': 60}},
no_sort_mode=False)
self.assertEqual(True, status)
self.assert_cube_ok(xr.open_zarr('l2c_1x80x60.zarr'),
expected_time_dim=3,
expected_extra_attrs=dict(time_coverage_start='2016-12-31T12:00:00.000000000',
time_coverage_end='2017-01-03T12:00:00.000000000'))
ds_unchunked = xr.open_zarr('l2c.zarr')
ds_chunked = xr.open_zarr('l2c_1x80x60.zarr')
np.testing.assert_allclose(ds_unchunked.analysed_sst.values, ds_chunked.analysed_sst.values)
self.assertEqual(((1, 1, 1), (60, 60, 60), (80, 80, 80, 80)), ds_chunked.analysed_sst.chunks)
status, output = gen_cube_wrapper(
[get_inputdata_path('201701??-IFR-L4_GHRSST-SSTfnd-ODYSSEA-NWE_002-v2.0-fv1.0.nc')],
'l2c_1x80x80.zarr',
output_writer_params={'chunksizes': {'lon': 80, 'lat': 80}},
no_sort_mode=False)
self.assertEqual(True, status)
self.assert_cube_ok(xr.open_zarr('l2c_1x80x80.zarr'),
expected_time_dim=3,
expected_extra_attrs=dict(time_coverage_start='2016-12-31T12:00:00.000000000',
time_coverage_end='2017-01-03T12:00:00.000000000'))
ds_unchunked = xr.open_zarr('l2c.zarr')
ds_chunked = xr.open_zarr('l2c_1x80x80.zarr')
np.testing.assert_allclose(ds_unchunked.analysed_sst.values, ds_chunked.analysed_sst.values)
self.assertEqual(((1, 1, 1), (80, 80, 20), (80, 80, 80, 80)), ds_chunked.analysed_sst.chunks)
def test_process_packed_zarr(self):
status, output = gen_cube_wrapper(
[get_inputdata_path('201701??-IFR-L4_GHRSST-SSTfnd-ODYSSEA-NWE_002-v2.0-fv1.0.nc')],
'l2c.zarr',
no_sort_mode=False)
self.assertEqual(True, status)
self.assert_cube_ok(xr.open_zarr('l2c.zarr'),
expected_time_dim=3,
expected_extra_attrs=dict(time_coverage_start='2016-12-31T12:00:00.000000000',
time_coverage_end='2017-01-03T12:00:00.000000000'))
status, output = gen_cube_wrapper(
[get_inputdata_path('201701??-IFR-L4_GHRSST-SSTfnd-ODYSSEA-NWE_002-v2.0-fv1.0.nc')],
'l2c_packed.zarr',
output_writer_params={'packing': {'analysed_sst': {'scale_factor': 0.07324442274239326,
'add_offset': -300.0,
'dtype': 'uint16',
'_FillValue': 65535}}},
no_sort_mode=False)
self.assertEqual(True, status)
self.assert_cube_ok(xr.open_zarr('l2c_packed.zarr'),
expected_time_dim=3,
expected_extra_attrs=dict(time_coverage_start='2016-12-31T12:00:00.000000000',
time_coverage_end='2017-01-03T12:00:00.000000000'))
ds_unpacked = xr.open_zarr('l2c.zarr')
ds_packed = xr.open_zarr('l2c_packed.zarr')
np.testing.assert_almost_equal(ds_unpacked.analysed_sst.values, ds_packed.analysed_sst.values, decimal=1)
self.assertEqual(0.07324442274239326, ds_packed.analysed_sst.encoding['scale_factor'])
self.assertEqual(-300.0, ds_packed.analysed_sst.encoding['add_offset'])
self.assertEqual('uint16', ds_packed.analysed_sst.encoding['dtype'])
self.assertEqual(65535, ds_packed.analysed_sst.encoding['_FillValue'])
self.assertEqual((1, 180, 320), ds_packed.analysed_sst.encoding['chunks'])
status, output = gen_cube_wrapper(
[get_inputdata_path('201701??-IFR-L4_GHRSST-SSTfnd-ODYSSEA-NWE_002-v2.0-fv1.0.nc')],
'l2c_packed_1x80x80.zarr',
output_writer_params={'chunksizes': {'lon': 80, 'lat': 80},
'packing': {'analysed_sst': {'scale_factor': 0.07324442274239326,
'add_offset': -300.0,
'dtype': 'uint16',
'_FillValue': 65535}}},
no_sort_mode=False)
self.assertEqual(True, status)
self.assert_cube_ok(xr.open_zarr('l2c_packed_1x80x80.zarr'),
expected_time_dim=3,
expected_extra_attrs=dict(time_coverage_start='2016-12-31T12:00:00.000000000',
time_coverage_end='2017-01-03T12:00:00.000000000'))
ds_unpacked = xr.open_zarr('l2c.zarr')
ds_packed = xr.open_zarr('l2c_packed_1x80x80.zarr')
np.testing.assert_almost_equal(ds_unpacked.analysed_sst.values, ds_packed.analysed_sst.values, decimal=1)
self.assertEqual(((1, 1, 1), (80, 80, 20), (80, 80, 80, 80)), ds_packed.analysed_sst.chunks)
self.assertEqual(0.07324442274239326, ds_packed.analysed_sst.encoding['scale_factor'])
self.assertEqual(-300.0, ds_packed.analysed_sst.encoding['add_offset'])
self.assertEqual('uint16', ds_packed.analysed_sst.encoding['dtype'])
self.assertEqual(65535, ds_packed.analysed_sst.encoding['_FillValue'])
self.assertEqual((1, 80, 80), ds_packed.analysed_sst.encoding['chunks'])
def test_insert_packed_zarr(self):
status, output = gen_cube_wrapper(
[get_inputdata_path('20170102-IFR-L4_GHRSST-SSTfnd-ODYSSEA-NWE_002-v2.0-fv1.0.nc'),
get_inputdata_path('20170103-IFR-L4_GHRSST-SSTfnd-ODYSSEA-NWE_002-v2.0-fv1.0.nc'),
get_inputdata_path('20170101-IFR-L4_GHRSST-SSTfnd-ODYSSEA-NWE_002-v2.0-fv1.0.nc')], 'l2c_presorted.zarr',
output_writer_params={'chunksizes': {'lon': 80, 'lat': 80},
'packing': {'analysed_sst': {'scale_factor': 0.07324442274239326,
'add_offset': -300.0,
'dtype': 'uint16',
'_FillValue': 65535}}})
self.assertEqual(True, status)
self.assertTrue('\nstep 9 of 9: creating input slice in l2c_presorted.zarr...\n' in output)
self.assertTrue('\nstep 9 of 9: appending input slice to l2c_presorted.zarr...\n' in output)
self.assertFalse('\nstep 9 of 9: inserting input slice before index 0 in l2c_presorted.zarr...\n' in output)
status, output = gen_cube_wrapper(
[get_inputdata_path('20170102-IFR-L4_GHRSST-SSTfnd-ODYSSEA-NWE_002-v2.0-fv1.0.nc'),
get_inputdata_path('20170103-IFR-L4_GHRSST-SSTfnd-ODYSSEA-NWE_002-v2.0-fv1.0.nc'),
get_inputdata_path('20170101-IFR-L4_GHRSST-SSTfnd-ODYSSEA-NWE_002-v2.0-fv1.0.nc')], 'l2c.zarr',
output_writer_params={'chunksizes': {'lon': 80, 'lat': 80},
'packing': {'analysed_sst': {'scale_factor': 0.07324442274239326,
'add_offset': -300.0,
'dtype': 'uint16',
'_FillValue': 65535}}},
no_sort_mode=True)
self.assertEqual(True, status)
self.assertTrue('\nstep 9 of 9: creating input slice in l2c.zarr...\n' in output)
self.assertTrue('\nstep 9 of 9: appending input slice to l2c.zarr...\n' in output)
self.assertTrue('\nstep 9 of 9: inserting input slice before index 0 in l2c.zarr...\n' in output)
self.assert_cube_ok(xr.open_zarr('l2c.zarr'),
expected_time_dim=3,
expected_extra_attrs=dict(date_modified=None,
time_coverage_start='2016-12-31T12:00:00.000000000',
time_coverage_end='2017-01-03T12:00:00.000000000'))
ds_presorted = xr.open_zarr('l2c_presorted.zarr')
ds_insert = xr.open_zarr('l2c.zarr')
np.testing.assert_allclose(ds_presorted.analysed_sst.values, ds_insert.analysed_sst.values)
def test_replace_packed_zarr(self):
status, output = gen_cube_wrapper(
[get_inputdata_path('20170102-IFR-L4_GHRSST-SSTfnd-ODYSSEA-NWE_002-v2.0-fv1.0.nc'),
get_inputdata_path('20170103-IFR-L4_GHRSST-SSTfnd-ODYSSEA-NWE_002-v2.0-fv1.0.nc'),
get_inputdata_path('20170101-IFR-L4_GHRSST-SSTfnd-ODYSSEA-NWE_002-v2.0-fv1.0.nc')], 'l2c_presorted.zarr',
output_writer_params={'chunksizes': {'lon': 80, 'lat': 80},
'packing': {'analysed_sst': {'scale_factor': 0.07324442274239326,
'add_offset': -300.0,
'dtype': 'uint16',
'_FillValue': 65535}}})
self.assertEqual(True, status)
self.assertTrue('\nstep 9 of 9: creating input slice in l2c_presorted.zarr...\n' in output)
self.assertTrue('\nstep 9 of 9: appending input slice to l2c_presorted.zarr...\n' in output)
self.assertFalse('\nstep 9 of 9: inserting input slice before index 0 in l2c_presorted.zarr...\n' in output)
status, output = gen_cube_wrapper(
[get_inputdata_path('20170101-IFR-L4_GHRSST-SSTfnd-ODYSSEA-NWE_002-v2.0-fv1.0.nc'),
get_inputdata_path('20170102-IFR-L4_GHRSST-SSTfnd-ODYSSEA-NWE_002-v2.0-fv1.0.nc'),
get_inputdata_path('20170103-IFR-L4_GHRSST-SSTfnd-ODYSSEA-NWE_002-v2.0-fv1.0.nc'),
get_inputdata_path('20170102-IFR-L4_GHRSST-SSTfnd-ODYSSEA-NWE_002-v2.0-fv1.0.nc')], 'l2c.zarr',
output_writer_params={'chunksizes': {'lon': 80, 'lat': 80},
'packing': {'analysed_sst': {'scale_factor': 0.07324442274239326,
'add_offset': -300.0,
'dtype': 'uint16',
'_FillValue': 65535}}},
no_sort_mode=True)
self.assertEqual(True, status)
self.assertTrue('\nstep 9 of 9: creating input slice in l2c.zarr...\n' in output)
self.assertTrue('\nstep 9 of 9: appending input slice to l2c.zarr...\n' in output)
self.assertTrue('\nstep 9 of 9: replacing input slice at index 1 in l2c.zarr...\n' in output)
self.assert_cube_ok(xr.open_zarr('l2c.zarr'),
expected_time_dim=3,
expected_extra_attrs=dict(date_modified=None,
time_coverage_start='2016-12-31T12:00:00.000000000',
time_coverage_end='2017-01-03T12:00:00.000000000'))
ds_presorted = xr.open_zarr('l2c_presorted.zarr')
ds_insert = xr.open_zarr('l2c.zarr')
np.testing.assert_allclose(ds_presorted.analysed_sst.values, ds_insert.analysed_sst.values)
def test_process_compressed_zarr(self):
status, output = gen_cube_wrapper(
[get_inputdata_path('201701??-IFR-L4_GHRSST-SSTfnd-ODYSSEA-NWE_002-v2.0-fv1.0.nc')],
'l2c.zarr',
no_sort_mode=False)
self.assertEqual(True, status)
self.assert_cube_ok(xr.open_zarr('l2c.zarr'),
expected_time_dim=3,
expected_extra_attrs=dict(time_coverage_start='2016-12-31T12:00:00.000000000',
time_coverage_end='2017-01-03T12:00:00.000000000'))
status, output = gen_cube_wrapper(
[get_inputdata_path('201701??-IFR-L4_GHRSST-SSTfnd-ODYSSEA-NWE_002-v2.0-fv1.0.nc')],
'l2c_packed.zarr',
output_writer_params={'compressor': {'cname': 'zstd', 'clevel': 1, 'shuffle': 2}},
no_sort_mode=False)
self.assertEqual(True, status)
self.assert_cube_ok(xr.open_zarr('l2c_packed.zarr'),
expected_time_dim=3,
expected_extra_attrs=dict(time_coverage_start='2016-12-31T12:00:00.000000000',
time_coverage_end='2017-01-03T12:00:00.000000000'))
ds_default_compressor = xr.open_zarr('l2c.zarr')
ds_compressed = xr.open_zarr('l2c_packed.zarr')
np.testing.assert_allclose(ds_default_compressor.analysed_sst.values, ds_compressed.analysed_sst.values)
self.assertEqual("Blosc(cname='lz4', clevel=5, shuffle=SHUFFLE, blocksize=0)",
str(ds_default_compressor.analysed_sst.encoding['compressor']))
self.assertEqual("Blosc(cname='zstd', clevel=1, shuffle=BITSHUFFLE, blocksize=0)",
str(ds_compressed.analysed_sst.encoding['compressor']))
def assert_cube_ok(self, cube: xr.Dataset,
expected_time_dim: int,
expected_extra_attrs: Dict[str, Any],
expected_output_vars: Sequence[str] = ('analysed_sst',)):
self.assertEqual({'lat': 180, 'lon': 320, 'bnds': 2, 'time': expected_time_dim}, cube.dims)
self.assertEqual({'lon', 'lat', 'time', 'lon_bnds', 'lat_bnds', 'time_bnds'}, set(cube.coords))
self.assertEqual(set(expected_output_vars), set(cube.data_vars))
expected_attrs = dict(title='Test Cube',
project='xcube',
date_modified=None,
geospatial_lon_min=-4.0,
geospatial_lon_max=12.0,
geospatial_lon_resolution=0.05,
geospatial_lon_units='degrees_east',
geospatial_lat_min=47.0,
geospatial_lat_max=56.0,
geospatial_lat_resolution=0.05,
geospatial_lat_units='degrees_north')
expected_attrs.update(expected_extra_attrs)
for k, v in expected_attrs.items():
self.assertIn(k, cube.attrs)
if v is not None:
self.assertEqual(v, cube.attrs[k], msg=f'key {k!r}')
def test_handle_360_lon(self):
status, output = gen_cube_wrapper(
[get_inputdata_path('20170101120000-UKMO-L4_GHRSST-SSTfnd-OSTIAanom-GLOB-v02.0-fv02.0.nc')],
'l2c-single.zarr', no_sort_mode=False)
self.assertEqual(True, status)
ds = xr.open_zarr('l2c-single.zarr')
self.assertIn('lon', ds.coords)
self.assertFalse(np.any(ds.coords['lon'] > 180.))
def test_illegal_proc(self):
with self.assertRaises(ValueError) as e:
gen_cube_wrapper(
[get_inputdata_path('20170101120000-UKMO-L4_GHRSST-SSTfnd-OSTIAanom-GLOB-v02.0-fv02.0.nc')],
'l2c-single.zarr', no_sort_mode=False, input_processor_name="")
self.assertEqual('input_processor_name must not be empty', f'{e.exception}')
with self.assertRaises(ValueError) as e:
gen_cube_wrapper(
[get_inputdata_path('20170101120000-UKMO-L4_GHRSST-SSTfnd-OSTIAanom-GLOB-v02.0-fv02.0.nc')],
'l2c-single.zarr', no_sort_mode=False, input_processor_name='chris-proba')
self.assertEqual("Unknown input_processor_name 'chris-proba'", f'{e.exception}')
# noinspection PyShadowingBuiltins
def gen_cube_wrapper(input_paths,
output_path,
output_writer_params=None,
no_sort_mode=False,
input_processor_name=None,
processed_variables=None,
output_variables=(('analysed_sst', None),),
) -> Tuple[bool, Optional[str]]:
output = None
def output_monitor(msg):
nonlocal output
if output is None:
output = msg + '\n'
else:
output += msg + '\n'
config = get_config_dict(
input_paths=input_paths,
input_processor_name=input_processor_name,
output_path=output_path,
output_size='320,180',
output_region='-4,47,12,56',
output_resampling='Nearest',
no_sort_mode=no_sort_mode,
)
if processed_variables is not None:
config.update(processed_variables=processed_variables)
if output_variables is not None:
config.update(output_variables=output_variables)
if output_writer_params is not None:
config.update(output_writer_params=output_writer_params)
output_metadata = dict(
title='Test Cube',
project='xcube',
)
return gen_cube(dry_run=False, monitor=output_monitor, output_metadata=output_metadata, **config), output
| 61.649891
| 118
| 0.582097
| 3,437
| 28,174
| 4.523713
| 0.079139
| 0.033188
| 0.04425
| 0.040455
| 0.833355
| 0.810844
| 0.778364
| 0.770903
| 0.764857
| 0.754438
| 0
| 0.105314
| 0.299319
| 28,174
| 456
| 119
| 61.785088
| 0.682286
| 0.002627
| 0
| 0.625592
| 0
| 0.094787
| 0.26227
| 0.132719
| 0
| 0
| 0
| 0
| 0.248815
| 1
| 0.049763
| false
| 0
| 0.021327
| 0
| 0.075829
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
1e597eeef2253c62865af1f6c60eb132aca49b67
| 80
|
py
|
Python
|
voc_xml/__init__.py
|
JasonDoingGreat/voc_xml
|
3972cab23bb130a4992479898478ec82eecd95ef
|
[
"MIT"
] | null | null | null |
voc_xml/__init__.py
|
JasonDoingGreat/voc_xml
|
3972cab23bb130a4992479898478ec82eecd95ef
|
[
"MIT"
] | null | null | null |
voc_xml/__init__.py
|
JasonDoingGreat/voc_xml
|
3972cab23bb130a4992479898478ec82eecd95ef
|
[
"MIT"
] | null | null | null |
from .gen_label_file import gen_labels
from .insert_labels import insert_labels
| 26.666667
| 40
| 0.875
| 13
| 80
| 5
| 0.538462
| 0.369231
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 80
| 2
| 41
| 40
| 0.902778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
1e6863abec24014d76854c495d8427471678d43d
| 188
|
py
|
Python
|
fizzbuzz.py
|
james-prior/dojo-20180118-tdd-practice-jessie-fizzbuzz
|
8ba5774b24d04212d004faaef567a0aa8b0a971f
|
[
"MIT"
] | null | null | null |
fizzbuzz.py
|
james-prior/dojo-20180118-tdd-practice-jessie-fizzbuzz
|
8ba5774b24d04212d004faaef567a0aa8b0a971f
|
[
"MIT"
] | null | null | null |
fizzbuzz.py
|
james-prior/dojo-20180118-tdd-practice-jessie-fizzbuzz
|
8ba5774b24d04212d004faaef567a0aa8b0a971f
|
[
"MIT"
] | null | null | null |
class Fizzbuzz:
def __init__(self, max_number):
self.max_number = max_number
def all(self):
if self.max_number == 1:
return (1,)
return (1, 2)
| 20.888889
| 36
| 0.553191
| 25
| 188
| 3.84
| 0.48
| 0.375
| 0.40625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.032258
| 0.340426
| 188
| 8
| 37
| 23.5
| 0.741935
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.285714
| false
| 0
| 0
| 0
| 0.714286
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
1e70fed889e9fc1db45a0ff61368f9c216443670
| 140
|
py
|
Python
|
pystibmivb/service/__init__.py
|
danito/pystibmivb-1
|
e14f9b7b0c6c5578003d354b81542da586c27d29
|
[
"CC-BY-4.0"
] | null | null | null |
pystibmivb/service/__init__.py
|
danito/pystibmivb-1
|
e14f9b7b0c6c5578003d354b81542da586c27d29
|
[
"CC-BY-4.0"
] | null | null | null |
pystibmivb/service/__init__.py
|
danito/pystibmivb-1
|
e14f9b7b0c6c5578003d354b81542da586c27d29
|
[
"CC-BY-4.0"
] | null | null | null |
from .STIBService import STIBService, InvalidLineFilterException, NoScheduleFromAPIException
from .ShapefileService import ShapefileService
| 46.666667
| 92
| 0.9
| 10
| 140
| 12.6
| 0.6
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.071429
| 140
| 2
| 93
| 70
| 0.969231
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
1e92fc11823ccc3a96d9bcda92b2e4080abb687d
| 9,733
|
py
|
Python
|
src/draw_plots.py
|
Adeon18/AlgorithmEfficiencyComparison
|
d905595098239847003432e567ad7bd8d1edd7b6
|
[
"MIT"
] | null | null | null |
src/draw_plots.py
|
Adeon18/AlgorithmEfficiencyComparison
|
d905595098239847003432e567ad7bd8d1edd7b6
|
[
"MIT"
] | null | null | null |
src/draw_plots.py
|
Adeon18/AlgorithmEfficiencyComparison
|
d905595098239847003432e567ad7bd8d1edd7b6
|
[
"MIT"
] | null | null | null |
"""
This module draws plots using the data from the final_results.json file.
Basically a bunch of hardcoded plots - bad code warning!!!!
"""
import json
import matplotlib.pyplot as plt
plt.rcParams.update({'font.size': 22})
def read_json(path):
"""
Read the json file from the path and turn it into dict.
Return the data
"""
with open(path, "r") as f:
data = json.load(f)
return data
def draw_plot_for_random():
"""
Draw the proper MatplotLib plots for the random arrays.
Is a bit hardcoded
"""
data = read_json("data/final_results.json")
selection_sort_times = []
selection_sort_comp = []
insertion_sort_times = []
insertion_sort_comp = []
shell_sort_times = []
shell_sort_comp = []
merge_sort_times = []
merge_sort_comp = []
# These are the powers of 2
sizes = [7, 8, 9, 10, 11, 12, 13, 14, 15]
for i in range(9):
selection_sort_times.append(round(data["selection_sort"]["random_average"][i][0], 5))
selection_sort_comp.append(data["selection_sort"]["random_average"][i][1])
insertion_sort_times.append(round(data["insertion_sort"]["random_average"][i][0], 5))
insertion_sort_comp.append(data["insertion_sort"]["random_average"][i][1])
shell_sort_times.append(round(data["shell_sort"]["random_average"][i][0], 5))
shell_sort_comp.append(data["shell_sort"]["random_average"][i][1])
merge_sort_times.append(round(data["merge_sort"]["random_average"][i][0], 5))
merge_sort_comp.append(data["merge_sort"]["random_average"][i][1])
plt.figure("Random Array Time")
plt.title("Random Array Average Time")
plt.plot(sizes, selection_sort_times, "k", linewidth=3.5, label="Selection Sort")
plt.plot(sizes, insertion_sort_times, "r", linewidth=3.5, label="Insertion Sort")
plt.plot(sizes, shell_sort_times, "b", linewidth=3.5, label="Shell Sort")
plt.plot(sizes, merge_sort_times, "g", linewidth=3.5, label="Merge Sort")
plt.ylabel('Time')
plt.xlabel('Size - 2^X')
plt.yscale("log")
plt.legend(prop={'size': 20}, loc="lower right")
plt.figure("Random Array Comparisons")
plt.title("Random Array Average Comparisons")
plt.plot(sizes, selection_sort_comp, "k", linewidth=3.5, label="Selection Sort")
plt.plot(sizes, insertion_sort_comp, "r", linewidth=3.5, label="Insertion Sort")
plt.plot(sizes, shell_sort_comp, "b", linewidth=3.5, label="Shell Sort")
plt.plot(sizes, merge_sort_comp, "g", linewidth=3.5, label="Merge Sort")
plt.ylabel('Comparisons')
plt.xlabel('Size - 2^X')
plt.yscale("log")
plt.legend(prop={'size': 20}, loc="lower right")
plt.show()
def draw_plot_for_sorted():
"""
Draw the proper MatplotLib plots for the sorted arrays.
Is a bit hardcoded.
"""
data = read_json("data/final_results.json")
selection_sort_times = []
selection_sort_comp = []
insertion_sort_times = []
insertion_sort_comp = []
shell_sort_times = []
shell_sort_comp = []
merge_sort_times = []
merge_sort_comp = []
# These are the powers of 2
sizes = [7, 8, 9, 10, 11, 12, 13, 14, 15]
for i in range(9):
selection_sort_times.append(round(data["selection_sort"]["sorted"][i][0], 5))
selection_sort_comp.append(data["selection_sort"]["sorted"][i][1])
insertion_sort_times.append(round(data["insertion_sort"]["sorted"][i][0], 5))
insertion_sort_comp.append(data["insertion_sort"]["sorted"][i][1])
shell_sort_times.append(round(data["shell_sort"]["sorted"][i][0], 5))
shell_sort_comp.append(data["shell_sort"]["sorted"][i][1])
merge_sort_times.append(round(data["merge_sort"]["sorted"][i][0], 5))
merge_sort_comp.append(data["merge_sort"]["sorted"][i][1])
plt.figure("Sorted Array Time")
plt.title("Sorted Array Time")
plt.plot(sizes, selection_sort_times, "k", linewidth=3.5, label="Selection Sort")
plt.plot(sizes, insertion_sort_times, "r", linewidth=3.5, label="Insertion Sort")
plt.plot(sizes, shell_sort_times, "b", linewidth=3.5, label="Shell Sort")
plt.plot(sizes, merge_sort_times, "g", linewidth=3.5, label="Merge Sort")
plt.ylabel('Time')
plt.xlabel('Size - 2^X')
plt.yscale("log")
plt.legend(prop={'size': 20}, loc="lower right")
plt.figure("Sorted Array Comparisons")
plt.title("Sorted Array Comparisons")
plt.plot(sizes, selection_sort_comp, "k", linewidth=3.5, label="Selection Sort")
plt.plot(sizes, insertion_sort_comp, "r", linewidth=3.5, label="Insertion Sort")
plt.plot(sizes, shell_sort_comp, "b", linewidth=3.5, label="Shell Sort")
plt.plot(sizes, merge_sort_comp, "g", linewidth=3.5, label="Merge Sort")
plt.ylabel('Comparisons')
plt.xlabel('Size - 2^X')
plt.yscale("log")
plt.legend(prop={'size': 20}, loc="lower right")
plt.show()
def draw_plot_for_sorted_inverse():
"""
Draw the proper MatplotLib plots for the inversely sorted arrays.
Is a bit hardcoded
"""
data = read_json("data/final_results.json")
selection_sort_times = []
selection_sort_comp = []
insertion_sort_times = []
insertion_sort_comp = []
shell_sort_times = []
shell_sort_comp = []
merge_sort_times = []
merge_sort_comp = []
# These are the powers of 2
sizes = [7, 8, 9, 10, 11, 12, 13, 14, 15]
for i in range(9):
selection_sort_times.append(round(data["selection_sort"]["sorted_inverse"][i][0], 5))
selection_sort_comp.append(data["selection_sort"]["sorted_inverse"][i][1])
insertion_sort_times.append(round(data["insertion_sort"]["sorted_inverse"][i][0], 5))
insertion_sort_comp.append(data["insertion_sort"]["sorted_inverse"][i][1])
shell_sort_times.append(round(data["shell_sort"]["sorted_inverse"][i][0], 5))
shell_sort_comp.append(data["shell_sort"]["sorted_inverse"][i][1])
merge_sort_times.append(round(data["merge_sort"]["sorted_inverse"][i][0], 5))
merge_sort_comp.append(data["merge_sort"]["sorted_inverse"][i][1])
plt.figure("Sorted Inversely Array Time")
plt.title("Sorted Inversely Array Time")
plt.plot(sizes, selection_sort_times, "k", linewidth=3.5, label="Selection Sort")
plt.plot(sizes, insertion_sort_times, "r", linewidth=3.5, label="Insertion Sort")
plt.plot(sizes, shell_sort_times, "b", linewidth=3.5, label="Shell Sort")
plt.plot(sizes, merge_sort_times, "g", linewidth=3.5, label="Merge Sort")
plt.ylabel('Time')
plt.xlabel('Size - 2^X')
plt.yscale("log")
plt.legend(prop={'size': 20}, loc="lower right")
plt.figure("Sorted Inversely Array Comparisons")
plt.title("Sorted Inversely Array Comparisons")
plt.plot(sizes, selection_sort_comp, "k", linewidth=3.5, label="Selection Sort")
plt.plot(sizes, insertion_sort_comp, "r", linewidth=3.5, label="Insertion Sort")
plt.plot(sizes, shell_sort_comp, "b", linewidth=3.5, label="Shell Sort")
plt.plot(sizes, merge_sort_comp, "g", linewidth=3.5, label="Merge Sort")
plt.ylabel('Comparisons')
plt.xlabel('Size - 2^X')
plt.yscale("log")
plt.legend(prop={'size': 20}, loc="lower right")
plt.show()
def draw_plot_for_123():
"""
Draw the proper MatplotLib plots for the shuffled 123 arrays.
Is a bit hardcoded.
"""
data = read_json("data/final_results.json")
selection_sort_times = []
selection_sort_comp = []
insertion_sort_times = []
insertion_sort_comp = []
shell_sort_times = []
shell_sort_comp = []
merge_sort_times = []
merge_sort_comp = []
# These are the powers of 2
sizes = [7, 8, 9, 10, 11, 12, 13, 14, 15]
for i in range(9):
selection_sort_times.append(round(data["selection_sort"]["with_repetitions"][i][0], 5))
selection_sort_comp.append(data["selection_sort"]["with_repetitions"][i][1])
insertion_sort_times.append(round(data["insertion_sort"]["with_repetitions"][i][0], 5))
insertion_sort_comp.append(data["insertion_sort"]["with_repetitions"][i][1])
shell_sort_times.append(round(data["shell_sort"]["with_repetitions"][i][0], 5))
shell_sort_comp.append(data["shell_sort"]["with_repetitions"][i][1])
merge_sort_times.append(round(data["merge_sort"]["with_repetitions"][i][0], 5))
merge_sort_comp.append(data["merge_sort"]["with_repetitions"][i][1])
plt.figure("{1, 2, 3} Array Time")
plt.title("{1, 2, 3} Array Time")
plt.plot(sizes, selection_sort_times, "k", linewidth=3.5, label="Selection Sort")
plt.plot(sizes, insertion_sort_times, "r", linewidth=3.5, label="Insertion Sort")
plt.plot(sizes, shell_sort_times, "b", linewidth=3.5, label="Shell Sort")
plt.plot(sizes, merge_sort_times, "g", linewidth=3.5, label="Merge Sort")
plt.ylabel('Time')
plt.xlabel('Size - 2^X')
plt.yscale("log")
plt.legend(prop={'size': 20}, loc="lower right")
plt.figure("{1, 2, 3} Array Comparisons")
plt.title("{1, 2, 3} Array Comparisons")
plt.plot(sizes, selection_sort_comp, "k", linewidth=3.5, label="Selection Sort")
plt.plot(sizes, insertion_sort_comp, "r", linewidth=3.5, label="Insertion Sort")
plt.plot(sizes, shell_sort_comp, "b", linewidth=3.5, label="Shell Sort")
plt.plot(sizes, merge_sort_comp, "g", linewidth=3.5, label="Merge Sort")
plt.ylabel('Comparisons')
plt.xlabel('Size - 2^X')
plt.yscale("log")
plt.legend(prop={'size': 20}, loc="lower right")
plt.show()
def draw_plots():
"""
Draw all the plots at once.
"""
draw_plot_for_random()
draw_plot_for_sorted()
draw_plot_for_sorted_inverse()
draw_plot_for_123()
draw_plots()
| 36.453184
| 95
| 0.66105
| 1,423
| 9,733
| 4.32045
| 0.080112
| 0.070267
| 0.062459
| 0.083279
| 0.937541
| 0.889232
| 0.827424
| 0.805303
| 0.805303
| 0.805303
| 0
| 0.028206
| 0.173122
| 9,733
| 267
| 96
| 36.453184
| 0.735711
| 0.0674
| 0
| 0.632768
| 0
| 0
| 0.221279
| 0.010271
| 0
| 0
| 0
| 0
| 0
| 1
| 0.033898
| false
| 0
| 0.011299
| 0
| 0.050847
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
94da728e2099f522d30d80fb79a178704bfdc1e5
| 95
|
py
|
Python
|
ds_util/__init__.py
|
airportpeople/dstools
|
00207fa8edd8695b308c4b4a6e022176357b1c83
|
[
"MIT"
] | null | null | null |
ds_util/__init__.py
|
airportpeople/dstools
|
00207fa8edd8695b308c4b4a6e022176357b1c83
|
[
"MIT"
] | null | null | null |
ds_util/__init__.py
|
airportpeople/dstools
|
00207fa8edd8695b308c4b4a6e022176357b1c83
|
[
"MIT"
] | null | null | null |
from ._data_eng import *
from ._pysup import *
from ._segment import *
from ._requests import *
| 23.75
| 24
| 0.757895
| 13
| 95
| 5.153846
| 0.538462
| 0.447761
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.157895
| 95
| 4
| 25
| 23.75
| 0.8375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a21621ddaef19f762cf23bbddd288eab757fe2d8
| 23,196
|
py
|
Python
|
fbpic/boundaries/field_buffer_handling.py
|
lauridsj/fbpic
|
f253ab8814748ea9a703f4dff2ccf743729c35ef
|
[
"BSD-3-Clause-LBNL"
] | 2
|
2020-11-21T07:39:05.000Z
|
2020-11-21T14:00:32.000Z
|
fbpic/boundaries/field_buffer_handling.py
|
lauridsj/fbpic
|
f253ab8814748ea9a703f4dff2ccf743729c35ef
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
fbpic/boundaries/field_buffer_handling.py
|
lauridsj/fbpic
|
f253ab8814748ea9a703f4dff2ccf743729c35ef
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
# Copyright 2016, FBPIC contributors
# Authors: Remi Lehe, Manuel Kirchen
# License: 3-Clause-BSD-LBNL
"""
This file is part of the Fourier-Bessel Particle-In-Cell code (FB-PIC)
It defines the structure necessary to handle mpi buffers for the fields
"""
import numpy as np
# Check if CUDA is available, then import CUDA functions
from fbpic.utils.cuda import cuda_installed
if cuda_installed:
from fbpic.utils.cuda import cuda, cuda_tpb_bpg_2d
from .cuda_methods import \
copy_vec_to_gpu_buffer, \
replace_vec_from_gpu_buffer, \
add_vec_from_gpu_buffer, \
copy_scal_to_gpu_buffer, \
replace_scal_from_gpu_buffer, \
add_scal_from_gpu_buffer, \
copy_pml_to_gpu_buffer, \
replace_pml_from_gpu_buffer
class BufferHandler(object):
"""
Class that handles the buffers when exchanging the fields
between MPI domains.
"""
def __init__( self, n_guard, Nr, Nm, left_proc, right_proc, use_pml ):
"""
Initialize the guard cell buffers for the fields.
These buffers are used in order to group the MPI exchanges.
Parameters
----------
n_guard: int
Number of guard cells
Nr: int
Number of points in the radial direction
Nm: int
Number of azimuthal modes
left_proc, right_proc: int or None
Rank of the proc to the right and to the left
(None for open boundary)
use_pml: bool
Whether to use PML fields
"""
# Register parameters
self.Nr = Nr
self.Nm = Nm
self.n_guard = n_guard
self.left_proc = left_proc
self.right_proc = right_proc
# Shortcut
ng = self.n_guard
# Get number of field components for E and B
if use_pml:
n_fld = 5 # e.g. Er, Et, Ez, Er_pml, Et_pml
else:
n_fld = 3 # e.g. Er, Et, Ez
# Allocate buffer arrays that are send via MPI to exchange
# the fields between domains (either replacing or adding fields)
# Buffers are allocated for the left and right side of the domain
# Allocate buffers on the CPU
if cuda_installed:
# Use cuda.pinned_array so that CPU array is pagelocked.
# (cannot be swapped out to disk and GPU can access it via DMA)
alloc_cpu = cuda.pinned_array
else:
# Use regular numpy arrays
alloc_cpu = np.empty
# Allocate buffers of different size, for the different exchange types
self.send_l = {
'E:replace': alloc_cpu( (n_fld*Nm, ng, Nr), dtype=np.complex128),
'B:replace': alloc_cpu( (n_fld*Nm, ng, Nr), dtype=np.complex128),
'J:add' : alloc_cpu( ( 3*Nm, 2*ng, Nr), dtype=np.complex128),
'rho:add' : alloc_cpu( ( Nm, 2*ng, Nr), dtype=np.complex128)}
self.send_r = {
'E:replace': alloc_cpu( (n_fld*Nm, ng, Nr), dtype=np.complex128),
'B:replace': alloc_cpu( (n_fld*Nm, ng, Nr), dtype=np.complex128),
'J:add' : alloc_cpu( ( 3*Nm, 2*ng, Nr), dtype=np.complex128),
'rho:add' : alloc_cpu( ( Nm, 2*ng, Nr), dtype=np.complex128)}
self.recv_l = {
'E:replace': alloc_cpu( (n_fld*Nm, ng, Nr), dtype=np.complex128),
'B:replace': alloc_cpu( (n_fld*Nm, ng, Nr), dtype=np.complex128),
'J:add' : alloc_cpu( ( 3*Nm, 2*ng, Nr), dtype=np.complex128),
'rho:add' : alloc_cpu( ( Nm, 2*ng, Nr), dtype=np.complex128)}
self.recv_r = {
'E:replace': alloc_cpu( (n_fld*Nm, ng, Nr), dtype=np.complex128),
'B:replace': alloc_cpu( (n_fld*Nm, ng, Nr), dtype=np.complex128),
'J:add' : alloc_cpu( ( 3*Nm, 2*ng, Nr), dtype=np.complex128),
'rho:add' : alloc_cpu( ( Nm, 2*ng, Nr), dtype=np.complex128)}
# Allocate buffers on the GPU, for the different exchange types
if cuda_installed:
self.d_send_l = { key: cuda.to_device(value) for key, value in \
self.send_l.items() }
self.d_send_r = { key: cuda.to_device(value) for key, value in \
self.send_r.items() }
self.d_recv_l = { key: cuda.to_device(value) for key, value in \
self.recv_l.items() }
self.d_recv_r = { key: cuda.to_device(value) for key, value in \
self.recv_r.items() }
def handle_vec_buffer(self, grid_r, grid_t, grid_z,
pml_r, pml_t, method, exchange_type,
use_cuda, before_sending=False,
after_receiving=False, gpudirect=False ):
"""
Vector field buffer handling
1) Copies data from the field grids to the MPI sending buffers
-- or --
2) Replaces or adds MPI sending buffers to the field grids
For method 'replace':
Either copy the inner part of the domain to the sending buffer
for a vector field, or replace the receving buffer for a vector field
to the guard cells of the domain.
For method 'add':
Either copy the inner part and the guard region of the domain to the
sending buffer for a vector field, or add the receving buffer for the
vector field to the guard cells and the inner region of the domain.
Depending on whether the field data is initially on the CPU
or on the GPU, this function will do the appropriate exchange
with the device.
Parameters
----------
grid_r, grid_t, grid_z: lists of 2darrays
(One element per azimuthal mode)
The 2d arrays represent the fields on the interpolation grid
pml_r, pml_t: lists of 2darrays, or None
The 2d arrays that represent the PML components (if present)
on the interpolation grid
method: str
Can either be 'replace' or 'add' depending on the type
of field exchange that is needed
exchange_type: str
Can either be 'E:replace', 'B:replace', 'J:add' or 'rho:add'
Determines which buffer array is used.
use_cuda: bool
Whether the simulation runs on GPUs. If True,
the buffers are copied to the GPU arrays after the MPI exchange.
before_sending: bool
Whether to copy the inner part of the domain to the sending buffer
after_receiving: bool
Whether to copy the receiving buffer to the guard cells
gpudirect: bool
- if `gpudirect` is True:
Uses the CUDA GPUDirect feature on clusters
that have a working CUDA-aware MPI implementation.
- if `gpudirect` is False: (default)
Standard MPI communication is performed when using CUDA
for computation. This involves a manual GPU to CPU memory
copy before exchanging information between MPI domains.
"""
# Define region that is copied to or from the buffer
# depending on the method used.
if method == 'replace':
nz_start = self.n_guard
nz_end = 2*self.n_guard
if method == 'add':
nz_start = 0
nz_end = 2*self.n_guard
# Whether or not to send to the left or right neighbor
copy_left = (self.left_proc is not None)
copy_right = (self.right_proc is not None)
Nz = grid_r[0].shape[0]
# When using the GPU
if use_cuda:
# Calculate the number of blocks and threads per block
dim_grid_2d, dim_block_2d = cuda_tpb_bpg_2d(
nz_end - nz_start, self.Nr )
if before_sending:
# Copy the inner regions of the domain to the buffers
for m in range(self.Nm):
if pml_r is None:
# Copy only the regular components
copy_vec_to_gpu_buffer[ dim_grid_2d, dim_block_2d ](
self.d_send_l[exchange_type],
self.d_send_r[exchange_type],
grid_r[m], grid_t[m], grid_z[m], m,
copy_left, copy_right, nz_start, nz_end )
else:
# Copy regular components + PML components
copy_pml_to_gpu_buffer[ dim_grid_2d, dim_block_2d ](
self.d_send_l[exchange_type],
self.d_send_r[exchange_type],
grid_r[m], grid_t[m], grid_z[m],
pml_r[m], pml_t[m], m,
copy_left, copy_right, nz_start, nz_end )
# If GPUDirect with CUDA-aware MPI is not used,
# copy the GPU buffers to the sending CPU buffers
if not gpudirect:
if copy_left:
self.d_send_l[exchange_type].copy_to_host(
self.send_l[exchange_type] )
if copy_right:
self.d_send_r[exchange_type].copy_to_host(
self.send_r[exchange_type] )
elif after_receiving:
# If GPUDirect with CUDA-aware MPI is not used,
# copy the CPU receiving buffers to the GPU buffers
if not gpudirect:
if copy_left:
self.d_recv_l[exchange_type].copy_to_device(
self.recv_l[exchange_type] )
if copy_right:
self.d_recv_r[exchange_type].copy_to_device(
self.recv_r[exchange_type] )
if method == 'replace':
# Replace the guard cells of the domain with the buffers
for m in range(self.Nm):
if pml_r is None:
# Copy only the regular components
replace_vec_from_gpu_buffer \
[dim_grid_2d, dim_block_2d](
self.d_recv_l[exchange_type],
self.d_recv_r[exchange_type],
grid_r[m], grid_t[m], grid_z[m], m,
copy_left, copy_right, nz_start, nz_end )
else:
# Copy regular components + PML components
replace_pml_from_gpu_buffer \
[ dim_grid_2d, dim_block_2d ](
self.d_recv_l[exchange_type],
self.d_recv_r[exchange_type],
grid_r[m], grid_t[m], grid_z[m],
pml_r[m], pml_t[m], m,
copy_left, copy_right, nz_start, nz_end )
elif method == 'add':
# Add the buffers to the domain
for m in range(self.Nm):
add_vec_from_gpu_buffer[ dim_grid_2d, dim_block_2d ](
self.d_recv_l[exchange_type],
self.d_recv_r[exchange_type],
grid_r[m], grid_t[m], grid_z[m], m,
copy_left, copy_right, nz_start, nz_end )
# Without GPU
else:
if before_sending:
send_l = self.send_l[exchange_type]
send_r = self.send_r[exchange_type]
# Copy the inner regions of the domain to the buffers
if copy_left:
for m in range(self.Nm):
if pml_r is None:
# Copy only the regular components
send_l[3*m+0,:,:]=grid_r[m][nz_start:nz_end,:]
send_l[3*m+1,:,:]=grid_t[m][nz_start:nz_end,:]
send_l[3*m+2,:,:]=grid_z[m][nz_start:nz_end,:]
else:
# Copy regular components + PML components
send_l[5*m+0,:,:]=grid_r[m][nz_start:nz_end,:]
send_l[5*m+1,:,:]=grid_t[m][nz_start:nz_end,:]
send_l[5*m+2,:,:]=grid_z[m][nz_start:nz_end,:]
send_l[5*m+3,:,:]=pml_r[m][nz_start:nz_end,:]
send_l[5*m+4,:,:]=pml_t[m][nz_start:nz_end,:]
if copy_right:
for m in range(self.Nm):
if pml_r is None:
# Copy only the regular components
send_r[3*m+0,:,:]=grid_r[m][Nz-nz_end:Nz-nz_start,:]
send_r[3*m+1,:,:]=grid_t[m][Nz-nz_end:Nz-nz_start,:]
send_r[3*m+2,:,:]=grid_z[m][Nz-nz_end:Nz-nz_start,:]
else:
# Copy regular components + PML components
send_r[5*m+0,:,:]=grid_r[m][Nz-nz_end:Nz-nz_start,:]
send_r[5*m+1,:,:]=grid_t[m][Nz-nz_end:Nz-nz_start,:]
send_r[5*m+2,:,:]=grid_z[m][Nz-nz_end:Nz-nz_start,:]
send_r[5*m+3,:,:]=pml_r[m][Nz-nz_end:Nz-nz_start,:]
send_r[5*m+4,:,:]=pml_t[m][Nz-nz_end:Nz-nz_start,:]
elif after_receiving:
recv_l = self.recv_l[exchange_type]
recv_r = self.recv_r[exchange_type]
if method == 'replace':
# Replace the guard cells of the domain with the buffers
if copy_left:
if pml_r is None:
# Copy only the regular components
for m in range(self.Nm):
grid_r[m][:nz_end-nz_start,:]=recv_l[3*m+0,:,:]
grid_t[m][:nz_end-nz_start,:]=recv_l[3*m+1,:,:]
grid_z[m][:nz_end-nz_start,:]=recv_l[3*m+2,:,:]
else:
# Copy regular components + PML components
for m in range(self.Nm):
grid_r[m][:nz_end-nz_start,:]=recv_l[5*m+0,:,:]
grid_t[m][:nz_end-nz_start,:]=recv_l[5*m+1,:,:]
grid_z[m][:nz_end-nz_start,:]=recv_l[5*m+2,:,:]
pml_r[m][:nz_end-nz_start,:]=recv_l[5*m+3,:,:]
pml_t[m][:nz_end-nz_start,:]=recv_l[5*m+4,:,:]
if copy_right:
for m in range(self.Nm):
if pml_r is None:
# Copy only the regular components
grid_r[m][-(nz_end-nz_start):,:]=recv_r[3*m+0,:,:]
grid_t[m][-(nz_end-nz_start):,:]=recv_r[3*m+1,:,:]
grid_z[m][-(nz_end-nz_start):,:]=recv_r[3*m+2,:,:]
else:
# Copy regular components + PML components
grid_r[m][-(nz_end-nz_start):,:]=recv_r[5*m+0,:,:]
grid_t[m][-(nz_end-nz_start):,:]=recv_r[5*m+1,:,:]
grid_z[m][-(nz_end-nz_start):,:]=recv_r[5*m+2,:,:]
pml_r[m][-(nz_end-nz_start):,:]=recv_r[5*m+3,:,:]
pml_t[m][-(nz_end-nz_start):,:]=recv_r[5*m+4,:,:]
elif method == 'add':
# Add buffers to the domain
if copy_left:
for m in range(self.Nm):
grid_r[m][:nz_end-nz_start,:]+=recv_l[3*m+0,:,:]
grid_t[m][:nz_end-nz_start,:]+=recv_l[3*m+1,:,:]
grid_z[m][:nz_end-nz_start,:]+=recv_l[3*m+2,:,:]
if copy_right:
for m in range(self.Nm):
grid_r[m][-(nz_end-nz_start):,:]+=recv_r[3*m+0,:,:]
grid_t[m][-(nz_end-nz_start):,:]+=recv_r[3*m+1,:,:]
grid_z[m][-(nz_end-nz_start):,:]+=recv_r[3*m+2,:,:]
def handle_scal_buffer( self, grid, method, exchange_type, use_cuda,
before_sending=False, after_receiving=False,
gpudirect=False ):
"""
Scalar field buffer handling
1) Copies data from the field grid to the MPI sending buffers
-- or --
2) Replaces or adds MPI sending buffers to the field grid
For method 'replace':
Either copy the inner part of the domain to the sending buffer
for a scalar field, or replace the receving buffer for a scalar field
to the guard cells of the domain.
For method 'add':
Either copy the inner part and the guard region of the domain to the
sending buffer for a scalar field, or add the receving buffer for the
scalar field to the guard cells and the inner region of the domain.
Depending on whether the field data is initially on the CPU
or on the GPU, this function will do the appropriate exchange
with the device.
Parameters
----------
grid: list of 2darrays
(One element per azimuthal mode)
The 2d arrays represent the fields on the interpolation grid
method: str
Can either be 'replace' or 'add' depending on the type
of field exchange that is needed
use_cuda: bool
Whether the simulation runs on GPUs. If True,
the buffers are copied to the GPU arrays after the MPI exchange.
before_sending: bool
Whether to copy the inner part of the domain to the sending buffer
after_receiving: bool
Whether to copy the receiving buffer to the guard cells
gpudirect: bool
- if `gpudirect` is True:
Uses the CUDA GPUDirect feature on clusters
that have a working CUDA-aware MPI implementation.
- if `gpudirect` is False: (default)
Standard MPI communication is performed when using CUDA
for computation. This involves a manual GPU to CPU memory
copy before exchanging information between MPI domains.
"""
# Define region that is copied to or from the buffer
# depending on the method used.
if method == 'replace':
nz_start = self.n_guard
nz_end = 2*self.n_guard
if method == 'add':
nz_start = 0
nz_end = 2*self.n_guard
# Whether or not to send to the left or right neighbor
copy_left = (self.left_proc is not None)
copy_right = (self.right_proc is not None)
Nz = grid[0].shape[0]
# When using the GPU
if use_cuda:
# Calculate the number of blocks and threads per block
dim_grid_2d, dim_block_2d = cuda_tpb_bpg_2d(
nz_end - nz_start, self.Nr )
if before_sending:
# Copy the inner regions of the domain to the buffers
for m in range(self.Nm):
copy_scal_to_gpu_buffer[ dim_grid_2d, dim_block_2d ](
self.d_send_l[exchange_type],
self.d_send_r[exchange_type],
grid[m], m, copy_left, copy_right, nz_start, nz_end)
# If GPUDirect with CUDA-aware MPI is not used,
# copy the GPU buffers to the sending CPU buffers
if not gpudirect:
if copy_left:
self.d_send_l[exchange_type].copy_to_host(
self.send_l[exchange_type] )
if copy_right:
self.d_send_r[exchange_type].copy_to_host(
self.send_r[exchange_type] )
elif after_receiving:
# If GPUDirect with CUDA-aware MPI is not used,
# copy the CPU receiving buffers to the GPU buffers
if not gpudirect:
if copy_left:
self.d_recv_l[exchange_type].copy_to_device(
self.recv_l[exchange_type] )
if copy_right:
self.d_recv_r[exchange_type].copy_to_device(
self.recv_r[exchange_type] )
if method == 'replace':
# Replace the guard cells of the domain with the buffers
for m in range(self.Nm):
replace_scal_from_gpu_buffer[dim_grid_2d, dim_block_2d](
self.d_recv_l[exchange_type],
self.d_recv_r[exchange_type],
grid[m], m, copy_left, copy_right, nz_start, nz_end)
elif method == 'add':
# Add the buffers to the domain
for m in range(self.Nm):
add_scal_from_gpu_buffer[ dim_grid_2d, dim_block_2d ](
self.d_recv_l[exchange_type],
self.d_recv_r[exchange_type],
grid[m], m, copy_left, copy_right, nz_start, nz_end)
# Without GPU
else:
if before_sending:
send_l = self.send_l[exchange_type]
send_r = self.send_r[exchange_type]
# Copy the inner regions of the domain to the buffer
if copy_left:
for m in range(self.Nm):
send_l[m,:,:]=grid[m][nz_start:nz_end,:]
if copy_right:
for m in range(self.Nm):
send_r[m,:,:]=grid[m][Nz-nz_end:Nz-nz_start,:]
elif after_receiving:
recv_l = self.recv_l[exchange_type]
recv_r = self.recv_r[exchange_type]
if method == 'replace':
# Replace the guard cells of the domain with the buffers
if copy_left:
for m in range(self.Nm):
grid[m][:nz_end-nz_start,:]=recv_l[m,:,:]
if copy_right:
for m in range(self.Nm):
grid[m][-(nz_end-nz_start):,:]=recv_r[m,:,:]
if method == 'add':
# Add buffers to the domain
if copy_left:
for m in range(self.Nm):
grid[m][:nz_end-nz_start,:]+=recv_l[m,:,:]
if copy_right:
for m in range(self.Nm):
grid[m][-(nz_end-nz_start):,:]+=recv_r[m,:,:]
| 46.207171
| 82
| 0.506725
| 3,021
| 23,196
| 3.681893
| 0.084078
| 0.036501
| 0.023285
| 0.030208
| 0.847523
| 0.821811
| 0.812281
| 0.805538
| 0.795739
| 0.777308
| 0
| 0.013492
| 0.405673
| 23,196
| 501
| 83
| 46.299401
| 0.793341
| 0.293973
| 0
| 0.638376
| 0
| 0
| 0.011637
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.01107
| false
| 0
| 0.01476
| 0
| 0.02952
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a2255d4e5465ab92063c784cba4c5bbd008fde3e
| 132
|
py
|
Python
|
app/config/settings.py
|
SLB974/GrandPyBot-dev
|
7a0268d4ffa58c37eed37253c6afb00874dbabe4
|
[
"MIT"
] | null | null | null |
app/config/settings.py
|
SLB974/GrandPyBot-dev
|
7a0268d4ffa58c37eed37253c6afb00874dbabe4
|
[
"MIT"
] | null | null | null |
app/config/settings.py
|
SLB974/GrandPyBot-dev
|
7a0268d4ffa58c37eed37253c6afb00874dbabe4
|
[
"MIT"
] | null | null | null |
from decouple import config
GOOGLE_API_KEY = config("GOOGLE_API_KEY")
FLASK_DEBUG = config("FLASK_DEBUG", default=True, cast=bool)
| 26.4
| 60
| 0.795455
| 20
| 132
| 4.95
| 0.65
| 0.242424
| 0.30303
| 0.363636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.098485
| 132
| 4
| 61
| 33
| 0.831933
| 0
| 0
| 0
| 0
| 0
| 0.189394
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
bf40a7b284f1a558500a7aaf7953f012d2934715
| 3,573
|
py
|
Python
|
tests/test_quests/test_monster.py
|
Xelaadryth/Xelabot
|
89c1833a4d357d185ee96538e71fbd883d437227
|
[
"MIT"
] | 1
|
2017-02-16T15:35:01.000Z
|
2017-02-16T15:35:01.000Z
|
tests/test_quests/test_monster.py
|
xelaadryth/Xelabot
|
89c1833a4d357d185ee96538e71fbd883d437227
|
[
"MIT"
] | null | null | null |
tests/test_quests/test_monster.py
|
xelaadryth/Xelabot
|
89c1833a4d357d185ee96538e71fbd883d437227
|
[
"MIT"
] | null | null | null |
import unittest
from unittest.mock import patch
from quest.quests import monster
import settings
from tests.test_quests.base_class import TestBase
class TestMonster(TestBase):
quest_constructor = monster.Monster
num_start_players = 1
def test_timeout(self):
self.quest_manager.start_quest(self.quest)
# Simulate timing out and the callback for quest_advance getting called
self.quest_manager.kill_quest_advance_timer()
self.quest_manager.quest_advance()
self.assertEqual(self.player_manager.get_gold(self.player1), self.starting_gold - monster.GOLD_TIMEOUT_PENALTY)
self.assertEqual(self.player_manager.get_exp(self.player1), self.starting_exp)
def test_attack_win_hard(self):
self.quest_manager.start_quest(self.quest)
with patch('quest.quests.monster.randint', return_value=settings.LEVEL_CAP):
self.quest_manager.commands.execute_command(self.player1, '!attack')
self.assertEqual(self.player_manager.get_gold(self.player1),
self.starting_gold + monster.GOLD_RISKY_REWARD_BIG + settings.LEVEL_CAP)
self.assertEqual(self.player_manager.get_exp(self.player1), self.starting_exp + monster.EXP_RISKY_REWARD_BIG)
def test_attack_win(self):
self.quest_manager.start_quest(self.quest)
self.quest_manager.commands.execute_command(self.player1, '!attack')
self.assertEqual(self.player_manager.get_gold(self.player1), self.starting_gold + monster.GOLD_RISKY_REWARD)
self.assertEqual(self.player_manager.get_exp(self.player1), self.starting_exp + monster.EXP_RISKY_REWARD)
def test_attack_lose(self):
self.quest_manager.start_quest(self.quest)
with patch('quest.quests.monster.randint', return_value=-1):
self.quest_manager.commands.execute_command(self.player1, '!attack')
self.assertEqual(self.player_manager.get_gold(self.player1) - 1,
self.starting_gold - monster.GOLD_RISKY_PENALTY)
self.assertEqual(self.player_manager.get_exp(self.player1), self.starting_exp)
def test_attack_lose_hard(self):
self.quest_manager.start_quest(self.quest)
with patch('quest.quests.monster.randint', return_value=-settings.LEVEL_CAP):
self.quest_manager.commands.execute_command(self.player1, '!attack')
self.assertEqual(self.player_manager.get_gold(self.player1) - settings.LEVEL_CAP,
self.starting_gold - monster.GOLD_RISKY_PENALTY)
self.assertEqual(self.player_manager.get_exp(self.player1), self.starting_exp)
def test_flee_win(self):
self.quest_manager.start_quest(self.quest)
with patch('quest.quests.monster.getrandbits', return_value=1):
self.quest_manager.commands.execute_command(self.player1, '!flee')
self.assertEqual(self.player_manager.get_gold(self.player1), self.starting_gold + monster.GOLD_SAFE_REWARD)
self.assertEqual(self.player_manager.get_exp(self.player1), self.starting_exp + monster.EXP_SAFE_REWARD)
def test_flee_lose(self):
self.quest_manager.start_quest(self.quest)
with patch('quest.quests.monster.getrandbits', return_value=0):
self.quest_manager.commands.execute_command(self.player1, '!flee')
self.assertEqual(self.player_manager.get_gold(self.player1), self.starting_gold - monster.GOLD_SAFE_REWARD)
self.assertEqual(self.player_manager.get_exp(self.player1), self.starting_exp)
if __name__ == '__main__':
unittest.main()
| 44.111111
| 119
| 0.735236
| 466
| 3,573
| 5.349785
| 0.143777
| 0.079422
| 0.09627
| 0.140393
| 0.815483
| 0.815483
| 0.815483
| 0.815483
| 0.79984
| 0.781789
| 0
| 0.008364
| 0.163448
| 3,573
| 80
| 120
| 44.6625
| 0.825694
| 0.019312
| 0
| 0.351852
| 0
| 0
| 0.055397
| 0.042262
| 0
| 0
| 0
| 0
| 0.259259
| 1
| 0.12963
| false
| 0
| 0.092593
| 0
| 0.277778
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
bf573aaeb708df700e59eff31afd6777a2a2da17
| 30
|
py
|
Python
|
app/__init__.py
|
iliadmitriev/auth-api
|
efa885b0054a3b3c6394d692a9655614652da147
|
[
"MIT"
] | 3
|
2021-12-26T00:24:22.000Z
|
2022-03-24T05:05:34.000Z
|
app/__init__.py
|
iliadmitriev/auth-api
|
efa885b0054a3b3c6394d692a9655614652da147
|
[
"MIT"
] | 113
|
2021-08-19T11:57:49.000Z
|
2022-03-31T17:24:49.000Z
|
app/__init__.py
|
iliadmitriev/auth-api
|
efa885b0054a3b3c6394d692a9655614652da147
|
[
"MIT"
] | 1
|
2021-11-16T16:00:51.000Z
|
2021-11-16T16:00:51.000Z
|
from app.auth import init_app
| 15
| 29
| 0.833333
| 6
| 30
| 4
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.133333
| 30
| 1
| 30
| 30
| 0.923077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
bf5f3eeecac67935210bfc52ae1f992428c9fa56
| 2,169
|
py
|
Python
|
whats_in_the_cupboard/search/sample_data/__init__.py
|
brandonholderman/whats_in_the_cupboard
|
8f8b0abe8b94547fa488db689261a4f475a24779
|
[
"MIT"
] | null | null | null |
whats_in_the_cupboard/search/sample_data/__init__.py
|
brandonholderman/whats_in_the_cupboard
|
8f8b0abe8b94547fa488db689261a4f475a24779
|
[
"MIT"
] | 10
|
2020-02-11T23:36:20.000Z
|
2022-03-11T23:57:52.000Z
|
whats_in_the_cupboard/search/sample_data/__init__.py
|
brandonholderman/whats_in_the_cupboard
|
8f8b0abe8b94547fa488db689261a4f475a24779
|
[
"MIT"
] | null | null | null |
MOCK_DATA = [{
"id": "1",
"label": "Chicken and Avocado Salad",
"favorites": True,
"image_url": "static/assets/img/chicken_salad.png",
"directions_url": "2014-01-22",
"ingredients": ["Chicken", "Avocado", "Lettuce", "Beans", "Corn", "Lemon", "Olive Oil"],
"calories": 120.0,
},
{
"id": "2",
"label": "Chicken and Avocado Salad",
"favorites": True,
"image_url": "static/assets/img/chicken_salad.png",
"directions_url": "https://natashaskitchen.com/avocado-chicken-salad-recipe/",
"ingredients": ["Chicken", "Avocado", "Lettuce", "Beans", "Corn", "Lemon", "Olive Oil"],
"calories": 120.0,
},
{
"id": "3",
"label": "Chicken and Avocado Salad",
"favorites": True,
"image_url": "static/assets/img/chicken_salad.png",
"directions_url": "https://natashaskitchen.com/avocado-chicken-salad-recipe/",
"ingredients": ["Chicken", "Avocado", "Lettuce", "Beans", "Corn", "Lemon", "Olive Oil"],
"calories": 120.0,
},
{
"id": "4",
"label": "Chicken and Avocado Salad",
"favorites": True,
"image_url": "static/assets/img/chicken_salad.png",
"directions_url": "https://natashaskitchen.com/avocado-chicken-salad-recipe/",
"ingredients": ["Chicken", "Avocado", "Lettuce", "Beans", "Corn", "Lemon", "Olive Oil"],
"calories": 120.0,
},
{
"id": "5",
"label": "Chicken and Avocado Salad",
"favorites": True,
"image_url": "static/assets/img/chicken_salad.png",
"directions_url": "https://natashaskitchen.com/avocado-chicken-salad-recipe/",
"ingredients": ["Chicken", "Avocado", "Lettuce", "Beans", "Corn", "Lemon", "Olive Oil"],
"calories": 120.0,
},
{
"id": "6",
"label": "Chicken and Avocado Salad",
"favorites": True,
"image_url": "static/assets/img/chicken_salad.png",
"directions_url": "https://natashaskitchen.com/avocado-chicken-salad-recipe/",
"ingredients": ["Chicken", "Avocado", "Lettuce", "Beans", "Corn", "Lemon", "Olive Oil"],
"calories": 120.0,
},
]
| 38.052632
| 96
| 0.569387
| 226
| 2,169
| 5.380531
| 0.176991
| 0.108553
| 0.074013
| 0.108553
| 0.980263
| 0.980263
| 0.980263
| 0.980263
| 0.980263
| 0.980263
| 0
| 0.022606
| 0.224988
| 2,169
| 56
| 97
| 38.732143
| 0.700773
| 0
| 0
| 0.636364
| 0
| 0
| 0.586906
| 0.096819
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
bf8ddcaf693d215dc06a19d87c8273e707b8f05b
| 1,451
|
py
|
Python
|
evasao/Rewards.py
|
lmlima/DropoutRL
|
00db2e901c320cf12c60c5039561999d45591bd1
|
[
"BSD-3-Clause"
] | null | null | null |
evasao/Rewards.py
|
lmlima/DropoutRL
|
00db2e901c320cf12c60c5039561999d45591bd1
|
[
"BSD-3-Clause"
] | null | null | null |
evasao/Rewards.py
|
lmlima/DropoutRL
|
00db2e901c320cf12c60c5039561999d45591bd1
|
[
"BSD-3-Clause"
] | null | null | null |
class SparsePReward:
"""
SparsePReward
Recompensa esparsa com retorno apenas positivo, depende apenas do estado atual.
Retorna 1.0 no último estado do episódio, caso o aluno tenha concluido o curso;
Retorna 0.0 para todos os demais estados.
"""
@staticmethod
def reward(dados, seq_id, seq_number):
seq = dados.loc[[seq_id]]
max_seq_number = seq.index.get_level_values(1).max()
if (seq_number == max_seq_number) and (seq.loc[(seq_id, seq_number)]["FORMA_EVASAO_last"] == "Conclusão"):
return 1.0
return 0.0
class SparseNPReward:
"""
SparseNPReward
Recompensa esparsa com retorno positivo e negativo, depende apenas do estado atual.
Retorna 1.0 no último estado do episódio, caso o aluno tenha CONCLUIDO o curso;
Retorna -1.0 no último estado do episódio, caso o aluno tenha DESISTIDO o curso;
Retorna 0.0 para todos os demais estados (estados não terminais).
"""
@staticmethod
def reward(dados, seq_id, seq_number):
seq = dados.loc[[seq_id]]
max_seq_number = seq.index.get_level_values(1).max()
if seq_number == max_seq_number:
if seq.loc[(seq_id, seq_number)]["FORMA_EVASAO_last"] == "Conclusão":
return 1.0
elif seq.loc[(seq_id, seq_number)]["FORMA_EVASAO_last"] == "Desistência":
return -1.0
return 0.0
| 33.744186
| 114
| 0.636802
| 199
| 1,451
| 4.482412
| 0.286432
| 0.110987
| 0.044843
| 0.078475
| 0.779148
| 0.779148
| 0.752242
| 0.752242
| 0.752242
| 0.713004
| 0
| 0.020794
| 0.270848
| 1,451
| 42
| 115
| 34.547619
| 0.822306
| 0.375603
| 0
| 0.631579
| 0
| 0
| 0.098644
| 0
| 0
| 0
| 0
| 0.047619
| 0
| 1
| 0.105263
| false
| 0
| 0
| 0
| 0.473684
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
bfa9c93763a00161c8d74e93e3a6113f9f7c2ef1
| 59
|
py
|
Python
|
py_tdlib/constructors/log_out.py
|
Mr-TelegramBot/python-tdlib
|
2e2d21a742ebcd439971a32357f2d0abd0ce61eb
|
[
"MIT"
] | 24
|
2018-10-05T13:04:30.000Z
|
2020-05-12T08:45:34.000Z
|
py_tdlib/constructors/log_out.py
|
MrMahdi313/python-tdlib
|
2e2d21a742ebcd439971a32357f2d0abd0ce61eb
|
[
"MIT"
] | 3
|
2019-06-26T07:20:20.000Z
|
2021-05-24T13:06:56.000Z
|
py_tdlib/constructors/log_out.py
|
MrMahdi313/python-tdlib
|
2e2d21a742ebcd439971a32357f2d0abd0ce61eb
|
[
"MIT"
] | 5
|
2018-10-05T14:29:28.000Z
|
2020-08-11T15:04:10.000Z
|
from ..factory import Method
class logOut(Method):
pass
| 9.833333
| 28
| 0.745763
| 8
| 59
| 5.5
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.169492
| 59
| 5
| 29
| 11.8
| 0.897959
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
44a00d5d8c0a542ddad8e1067e8c9698446439b3
| 110,808
|
py
|
Python
|
gs1/constants/ai_table.py
|
TrustCodes/gs1-compression
|
74c20141ab57025bda21092fbfaa922f8ca0a7ec
|
[
"Apache-2.0"
] | 3
|
2021-03-11T23:35:21.000Z
|
2021-08-04T04:16:12.000Z
|
gs1/constants/ai_table.py
|
TrustCodes/gs1-compression
|
74c20141ab57025bda21092fbfaa922f8ca0a7ec
|
[
"Apache-2.0"
] | null | null | null |
gs1/constants/ai_table.py
|
TrustCodes/gs1-compression
|
74c20141ab57025bda21092fbfaa922f8ca0a7ec
|
[
"Apache-2.0"
] | null | null | null |
"""list of all GS1 Application Identifiers as defined in
GS1 General Specifications v18."""
import re
AI_TABLE = [{"title": "Serial Shipping Container Code (SSCC) ", "label": "SSCC",
"shortcode": "sscc", "ai": "00", "format": "N18", "type": "I",
"fixedLength": True, "checkDigit": "L", "regex": "(\\d{18})"},
{"title": "Global Trade Item Number (GTIN)", "label": "GTIN",
"shortcode": "gtin", "ai": "01", "format": "N14", "type": "I",
"fixedLength": True, "checkDigit": "L",
"qualifiers": ["22", "10", "21"], "regex": "(\\d{12,14}|\\d{8})"},
{"title": "GTIN of contained trade items", "label": "CONTENT",
"ai": "02", "format": "N14", "type": "D", "fixedLength": True,
"checkDigit": "L", "regex": "(\\d{14})"},
{"title": "Batch or lot number", "label": "BATCH/LOT",
"shortcode": "lot", "ai": "10", "format": "X..20", "type": "Q",
"fixedLength": False,
"regex": "([\\x21-\\x22\\x25-\\x2F\\x30-\\x39\\x41-\\x5A\\x5F\\x61-\\x7A]{0,20})"},
{"title": "Production date (YYMMDD)", "label": "PROD DATE",
"ai": "11", "format": "N6", "type": "D", "fixedLength": True,
"regex": "(\\d{6})"},
{"title": "Due date (YYMMDD)", "label": "DUE DATE", "ai": "12",
"format": "N6", "type": "D", "fixedLength": True,
"regex": "(\\d{6})"},
{"title": "Packaging date (YYMMDD)", "label": "PACK DATE",
"ai": "13", "format": "N6", "type": "D", "fixedLength": True,
"regex": "(\\d{6})"}, {"title": "Best before date (YYMMDD)",
"label": "BEST BEFORE or BEST BY",
"ai": "15", "format": "N6", "type": "D",
"fixedLength": True, "regex": "(\\d{6})"},
{"title": "Sell by date (YYMMDD)", "label": "SELL BY", "ai": "16",
"format": "N6", "type": "D", "fixedLength": True,
"regex": "(\\d{6})"},
{"title": "Expiration date (YYMMDD)", "label": "USE BY OR EXPIRY",
"shortcode": "exp", "ai": "17", "format": "N6", "type": "D",
"fixedLength": True, "regex": "(\\d{6})"},
{"title": "Internal product variant", "label": "VARIANT",
"ai": "20", "format": "N2", "type": "D", "fixedLength": True,
"regex": "(\\d{2})"},
{"title": "Serial number", "label": "SERIAL", "shortcode": "ser",
"ai": "21", "format": "X..20", "type": "Q", "fixedLength": False,
"regex": "([\\x21-\\x22\\x25-\\x2F\\x30-\\x39\\x41-\\x5A\\x5F\\x61-\\x7A]{0,20})"},
{"title": "Consumer product variant", "label": "CPV",
"shortcode": "cpv", "ai": "22", "format": "X..20", "type": "Q",
"fixedLength": False,
"regex": "([\\x21-\\x22\\x25-\\x2F\\x30-\\x39\\x41-\\x5A\\x5F\\x61-\\x7A]{0,20})"},
{
"title": "Additional product identification assigned by the manufacturer",
"label": "ADDITIONAL ID", "ai": "240", "format": "X..30",
"type": "D", "fixedLength": False,
"regex": "([\\x21-\\x22\\x25-\\x2F\\x30-\\x39\\x41-\\x5A\\x5F\\x61-\\x7A]{0,30})"},
{"title": "Customer part number", "label": "CUST. PART NO.",
"ai": "241", "format": "X..30", "type": "D", "fixedLength": False,
"regex": "([\\x21-\\x22\\x25-\\x2F\\x30-\\x39\\x41-\\x5A\\x5F\\x61-\\x7A]{0,30})"},
{"title": "Made-to-Order variation number", "label": "MTO VARIANT",
"ai": "242", "format": "N..6", "type": "D", "fixedLength": False,
"regex": "(\\d{0,6})"},
{"title": "Packaging component number", "label": "PCN", "ai": "243",
"format": "X..20", "type": "D", "fixedLength": False,
"regex": "([\\x21-\\x22\\x25-\\x2F\\x30-\\x39\\x41-\\x5A\\x5F\\x61-\\x7A]{0,20})"},
{"title": "Secondary serial number", "label": "SECONDARY SERIAL",
"ai": "250", "format": "X..30", "type": "D", "fixedLength": False,
"regex": "([\\x21-\\x22\\x25-\\x2F\\x30-\\x39\\x41-\\x5A\\x5F\\x61-\\x7A]{0,30})"},
{"title": "Reference to source entity", "label": "REF. TO SOURCE ",
"ai": "251", "format": "X..30", "type": "D", "fixedLength": False,
"regex": "([\\x21-\\x22\\x25-\\x2F\\x30-\\x39\\x41-\\x5A\\x5F\\x61-\\x7A]{0,30})"},
{"title": "Global Document Type Identifier (GDTI)", "label": "GDTI",
"shortcode": "gdti", "ai": "253", "format": "N13+X..17",
"type": "I", "fixedLength": False, "checkDigit": "13",
"regex": "(\\d{13})([\\x21-\\x22\\x25-\\x2F\\x30-\\x39\\x41-\\x5A\\x5F\\x61-\\x7A]{0,17})"},
{"title": "GLN extension component",
"label": "GLN EXTENSION COMPONENT", "shortcode": "glnx",
"ai": "254", "format": "X..20", "type": "Q", "fixedLength": False,
"regex": "([\\x21-\\x22\\x25-\\x2F\\x30-\\x39\\x41-\\x5A\\x5F\\x61-\\x7A]{0,20})"},
{"title": "Global Coupon Number (GCN)", "label": "GCN",
"shortcode": "gcn", "ai": "255", "format": "N13+N..12",
"type": "I", "fixedLength": False, "checkDigit": "13",
"regex": "(\\d{13})(\\d{0,12})"},
{"title": "Variable count of items (variable measure trade item)",
"label": "VAR. COUNT", "ai": "30", "format": "N..8", "type": "D",
"fixedLength": False, "regex": "(\\d{0,8})"},
{"title": "Net weight, kilograms (variable measure trade item)",
"label": "NET WEIGHT (kg)", "ai": "3100", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Net weight, kilograms (variable measure trade item)",
"label": "NET WEIGHT (kg)", "ai": "3101", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Net weight, kilograms (variable measure trade item)",
"label": "NET WEIGHT (kg)", "ai": "3102", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Net weight, kilograms (variable measure trade item)",
"label": "NET WEIGHT (kg)", "ai": "3103", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Net weight, kilograms (variable measure trade item)",
"label": "NET WEIGHT (kg)", "ai": "3104", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Net weight, kilograms (variable measure trade item)",
"label": "NET WEIGHT (kg)", "ai": "3105", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"}, {
"title": "Length or first dimension, metres (variable measure trade item)",
"label": "LENGTH (m)", "ai": "3110", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"}, {
"title": "Length or first dimension, metres (variable measure trade item)",
"label": "LENGTH (m)", "ai": "3111", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"}, {
"title": "Length or first dimension, metres (variable measure trade item)",
"label": "LENGTH (m)", "ai": "3112", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"}, {
"title": "Length or first dimension, metres (variable measure trade item)",
"label": "LENGTH (m)", "ai": "3113", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"}, {
"title": "Length or first dimension, metres (variable measure trade item)",
"label": "LENGTH (m)", "ai": "3114", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"}, {
"title": "Length or first dimension, metres (variable measure trade item)",
"label": "LENGTH (m)", "ai": "3115", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"}, {
"title": "Width, diameter, or second dimension, metres (variable measure trade item)",
"label": "WIDTH (m)", "ai": "3120", "format": "N6", "type": "D",
"fixedLength": True, "regex": "(\\d{6})"}, {
"title": "Width, diameter, or second dimension, metres (variable measure trade item)",
"label": "WIDTH (m)", "ai": "3121", "format": "N6", "type": "D",
"fixedLength": True, "regex": "(\\d{6})"}, {
"title": "Width, diameter, or second dimension, metres (variable measure trade item)",
"label": "WIDTH (m)", "ai": "3122", "format": "N6", "type": "D",
"fixedLength": True, "regex": "(\\d{6})"}, {
"title": "Width, diameter, or second dimension, metres (variable measure trade item)",
"label": "WIDTH (m)", "ai": "3123", "format": "N6", "type": "D",
"fixedLength": True, "regex": "(\\d{6})"}, {
"title": "Width, diameter, or second dimension, metres (variable measure trade item)",
"label": "WIDTH (m)", "ai": "3124", "format": "N6", "type": "D",
"fixedLength": True, "regex": "(\\d{6})"}, {
"title": "Width, diameter, or second dimension, metres (variable measure trade item)",
"label": "WIDTH (m)", "ai": "3125", "format": "N6", "type": "D",
"fixedLength": True, "regex": "(\\d{6})"}, {
"title": "Depth, thickness, height, or third dimension, metres (variable measure trade item)",
"label": "HEIGHT (m)", "ai": "3130", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"}, {
"title": "Depth, thickness, height, or third dimension, metres (variable measure trade item)",
"label": "HEIGHT (m)", "ai": "3131", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"}, {
"title": "Depth, thickness, height, or third dimension, metres (variable measure trade item)",
"label": "HEIGHT (m)", "ai": "3132", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"}, {
"title": "Depth, thickness, height, or third dimension, metres (variable measure trade item)",
"label": "HEIGHT (m)", "ai": "3133", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"}, {
"title": "Depth, thickness, height, or third dimension, metres (variable measure trade item)",
"label": "HEIGHT (m)", "ai": "3134", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"}, {
"title": "Depth, thickness, height, or third dimension, metres (variable measure trade item)",
"label": "HEIGHT (m)", "ai": "3135", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Area, square metres (variable measure trade item)",
"label": "AREA (m^2)", "ai": "3140", "format": "N6", "type": "D",
"fixedLength": True, "regex": "(\\d{6})"},
{"title": "Area, square metres (variable measure trade item)",
"label": "AREA (m^2)", "ai": "3141", "format": "N6", "type": "D",
"fixedLength": True, "regex": "(\\d{6})"},
{"title": "Area, square metres (variable measure trade item)",
"label": "AREA (m^2)", "ai": "3142", "format": "N6", "type": "D",
"fixedLength": True, "regex": "(\\d{6})"},
{"title": "Area, square metres (variable measure trade item)",
"label": "AREA (m^2)", "ai": "3143", "format": "N6", "type": "D",
"fixedLength": True, "regex": "(\\d{6})"},
{"title": "Area, square metres (variable measure trade item)",
"label": "AREA (m^2)", "ai": "3144", "format": "N6", "type": "D",
"fixedLength": True, "regex": "(\\d{6})"},
{"title": "Area, square metres (variable measure trade item)",
"label": "AREA (m^2)", "ai": "3145", "format": "N6", "type": "D",
"fixedLength": True, "regex": "(\\d{6})"},
{"title": "Net volume, litres (variable measure trade item)",
"label": "NET VOLUME (l)", "ai": "3150", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Net volume, litres (variable measure trade item)",
"label": "NET VOLUME (l)", "ai": "3151", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Net volume, litres (variable measure trade item)",
"label": "NET VOLUME (l)", "ai": "3152", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Net volume, litres (variable measure trade item)",
"label": "NET VOLUME (l)", "ai": "3153", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Net volume, litres (variable measure trade item)",
"label": "NET VOLUME (l)", "ai": "3154", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Net volume, litres (variable measure trade item)",
"label": "NET VOLUME (l)", "ai": "3155", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Net volume, cubic metres (variable measure trade item)",
"label": "NET VOLUME (m^3)", "ai": "3160", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Net volume, cubic metres (variable measure trade item)",
"label": "NET VOLUME (m^3)", "ai": "3161", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Net volume, cubic metres (variable measure trade item)",
"label": "NET VOLUME (m^3)", "ai": "3162", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Net volume, cubic metres (variable measure trade item)",
"label": "NET VOLUME (m^3)", "ai": "3163", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Net volume, cubic metres (variable measure trade item)",
"label": "NET VOLUME (m^3)", "ai": "3164", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Net volume, cubic metres (variable measure trade item)",
"label": "NET VOLUME (m^3)", "ai": "3165", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Net weight, pounds (variable measure trade item)",
"label": "NET WEIGHT (lb)", "ai": "3200", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Net weight, pounds (variable measure trade item)",
"label": "NET WEIGHT (lb)", "ai": "3201", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Net weight, pounds (variable measure trade item)",
"label": "NET WEIGHT (lb)", "ai": "3202", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Net weight, pounds (variable measure trade item)",
"label": "NET WEIGHT (lb)", "ai": "3203", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Net weight, pounds (variable measure trade item)",
"label": "NET WEIGHT (lb)", "ai": "3204", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Net weight, pounds (variable measure trade item)",
"label": "NET WEIGHT (lb)", "ai": "3205", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"}, {
"title": "Length or first dimension, inches (variable measure trade item)",
"label": "LENGTH (in)", "ai": "3210", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"}, {
"title": "Length or first dimension, inches (variable measure trade item)",
"label": "LENGTH (in)", "ai": "3211", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"}, {
"title": "Length or first dimension, inches (variable measure trade item)",
"label": "LENGTH (in)", "ai": "3212", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"}, {
"title": "Length or first dimension, inches (variable measure trade item)",
"label": "LENGTH (in)", "ai": "3213", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"}, {
"title": "Length or first dimension, inches (variable measure trade item)",
"label": "LENGTH (in)", "ai": "3214", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"}, {
"title": "Length or first dimension, inches (variable measure trade item)",
"label": "LENGTH (in)", "ai": "3215", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"}, {
"title": "Length or first dimension, feet (variable measure trade item)",
"label": "LENGTH (ft)", "ai": "3220", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"}, {
"title": "Length or first dimension, feet (variable measure trade item)",
"label": "LENGTH (ft)", "ai": "3221", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"}, {
"title": "Length or first dimension, feet (variable measure trade item)",
"label": "LENGTH (ft)", "ai": "3222", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"}, {
"title": "Length or first dimension, feet (variable measure trade item)",
"label": "LENGTH (ft)", "ai": "3223", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"}, {
"title": "Length or first dimension, feet (variable measure trade item)",
"label": "LENGTH (ft)", "ai": "3224", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"}, {
"title": "Length or first dimension, feet (variable measure trade item)",
"label": "LENGTH (ft)", "ai": "3225", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"}, {
"title": "Length or first dimension, yards (variable measure trade item)",
"label": "LENGTH (yd)", "ai": "3230", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"}, {
"title": "Length or first dimension, yards (variable measure trade item)",
"label": "LENGTH (yd)", "ai": "3231", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"}, {
"title": "Length or first dimension, yards (variable measure trade item)",
"label": "LENGTH (yd)", "ai": "3232", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"}, {
"title": "Length or first dimension, yards (variable measure trade item)",
"label": "LENGTH (yd)", "ai": "3233", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"}, {
"title": "Length or first dimension, yards (variable measure trade item)",
"label": "LENGTH (yd)", "ai": "3234", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"}, {
"title": "Length or first dimension, yards (variable measure trade item)",
"label": "LENGTH (yd)", "ai": "3235", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"}, {
"title": "Width, diameter, or second dimension, inches (variable measure trade item)",
"label": "WIDTH (in)", "ai": "3240", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"}, {
"title": "Width, diameter, or second dimension, inches (variable measure trade item)",
"label": "WIDTH (in)", "ai": "3241", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"}, {
"title": "Width, diameter, or second dimension, inches (variable measure trade item)",
"label": "WIDTH (in)", "ai": "3242", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"}, {
"title": "Width, diameter, or second dimension, inches (variable measure trade item)",
"label": "WIDTH (in)", "ai": "3243", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"}, {
"title": "Width, diameter, or second dimension, inches (variable measure trade item)",
"label": "WIDTH (in)", "ai": "3244", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"}, {
"title": "Width, diameter, or second dimension, inches (variable measure trade item)",
"label": "WIDTH (in)", "ai": "3245", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"}, {
"title": "Width, diameter, or second dimension, feet (variable measure trade item)",
"label": "WIDTH (ft)", "ai": "3250", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"}, {
"title": "Width, diameter, or second dimension, feet (variable measure trade item)",
"label": "WIDTH (ft)", "ai": "3251", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"}, {
"title": "Width, diameter, or second dimension, feet (variable measure trade item)",
"label": "WIDTH (ft)", "ai": "3252", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"}, {
"title": "Width, diameter, or second dimension, feet (variable measure trade item)",
"label": "WIDTH (ft)", "ai": "3253", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"}, {
"title": "Width, diameter, or second dimension, feet (variable measure trade item)",
"label": "WIDTH (ft)", "ai": "3254", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"}, {
"title": "Width, diameter, or second dimension, feet (variable measure trade item)",
"label": "WIDTH (ft)", "ai": "3255", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"}, {
"title": "Width, diameter, or second dimension, yards (variable measure trade item)",
"label": "WIDTH (yd)", "ai": "3260", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"}, {
"title": "Width, diameter, or second dimension, yards (variable measure trade item)",
"label": "WIDTH (yd)", "ai": "3261", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"}, {
"title": "Width, diameter, or second dimension, yards (variable measure trade item)",
"label": "WIDTH (yd)", "ai": "3262", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"}, {
"title": "Width, diameter, or second dimension, yards (variable measure trade item)",
"label": "WIDTH (yd)", "ai": "3263", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"}, {
"title": "Width, diameter, or second dimension, yards (variable measure trade item)",
"label": "WIDTH (yd)", "ai": "3264", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"}, {
"title": "Width, diameter, or second dimension, yards (variable measure trade item)",
"label": "WIDTH (yd)", "ai": "3265", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"}, {
"title": "Depth, thickness, height, or third dimension, inches (variable measure trade item)",
"label": "HEIGHT (in)", "ai": "3270", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"}, {
"title": "Depth, thickness, height, or third dimension, inches (variable measure trade item)",
"label": "HEIGHT (in)", "ai": "3271", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"}, {
"title": "Depth, thickness, height, or third dimension, inches (variable measure trade item)",
"label": "HEIGHT (in)", "ai": "3272", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"}, {
"title": "Depth, thickness, height, or third dimension, inches (variable measure trade item)",
"label": "HEIGHT (in)", "ai": "3273", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"}, {
"title": "Depth, thickness, height, or third dimension, inches (variable measure trade item)",
"label": "HEIGHT (in)", "ai": "3274", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"}, {
"title": "Depth, thickness, height, or third dimension, inches (variable measure trade item)",
"label": "HEIGHT (in)", "ai": "3275", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"}, {
"title": "Depth, thickness, height, or third dimension, feet (variable measure trade item)",
"label": "HEIGHT (ft)", "ai": "3280", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"}, {
"title": "Depth, thickness, height, or third dimension, feet (variable measure trade item)",
"label": "HEIGHT (ft)", "ai": "3281", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"}, {
"title": "Depth, thickness, height, or third dimension, feet (variable measure trade item)",
"label": "HEIGHT (ft)", "ai": "3282", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"}, {
"title": "Depth, thickness, height, or third dimension, feet (variable measure trade item)",
"label": "HEIGHT (ft)", "ai": "3283", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"}, {
"title": "Depth, thickness, height, or third dimension, feet (variable measure trade item)",
"label": "HEIGHT (ft)", "ai": "3284", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"}, {
"title": "Depth, thickness, height, or third dimension, feet (variable measure trade item)",
"label": "HEIGHT (ft)", "ai": "3285", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"}, {
"title": "Depth, thickness, height, or third dimension, yards (variable measure trade item)",
"label": "HEIGHT (yd)", "ai": "3290", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"}, {
"title": "Depth, thickness, height, or third dimension, yards (variable measure trade item)",
"label": "HEIGHT (yd)", "ai": "3291", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"}, {
"title": "Depth, thickness, height, or third dimension, yards (variable measure trade item)",
"label": "HEIGHT (yd)", "ai": "3292", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"}, {
"title": "Depth, thickness, height, or third dimension, yards (variable measure trade item)",
"label": "HEIGHT (yd)", "ai": "3293", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"}, {
"title": "Depth, thickness, height, or third dimension, yards (variable measure trade item)",
"label": "HEIGHT (yd)", "ai": "3294", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"}, {
"title": "Depth, thickness, height, or third dimension, yards (variable measure trade item)",
"label": "HEIGHT (yd)", "ai": "3295", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Logistic weight, kilograms",
"label": "GROSS WEIGHT (kg)", "ai": "3300", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Logistic weight, kilograms",
"label": "GROSS WEIGHT (kg)", "ai": "3301", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Logistic weight, kilograms",
"label": "GROSS WEIGHT (kg)", "ai": "3302", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Logistic weight, kilograms",
"label": "GROSS WEIGHT (kg)", "ai": "3303", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Logistic weight, kilograms",
"label": "GROSS WEIGHT (kg)", "ai": "3304", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Logistic weight, kilograms",
"label": "GROSS WEIGHT (kg)", "ai": "3305", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Length or first dimension, metres",
"label": "LENGTH (m), log", "ai": "3310", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Length or first dimension, metres",
"label": "LENGTH (m), log", "ai": "3311", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Length or first dimension, metres",
"label": "LENGTH (m), log", "ai": "3312", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Length or first dimension, metres",
"label": "LENGTH (m), log", "ai": "3313", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Length or first dimension, metres",
"label": "LENGTH (m), log", "ai": "3314", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Length or first dimension, metres",
"label": "LENGTH (m), log", "ai": "3315", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Width, diameter, or second dimension, metres",
"label": "WIDTH (m), log", "ai": "3320", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Width, diameter, or second dimension, metres",
"label": "WIDTH (m), log", "ai": "3321", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Width, diameter, or second dimension, metres",
"label": "WIDTH (m), log", "ai": "3322", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Width, diameter, or second dimension, metres",
"label": "WIDTH (m), log", "ai": "3323", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Width, diameter, or second dimension, metres",
"label": "WIDTH (m), log", "ai": "3324", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Width, diameter, or second dimension, metres",
"label": "WIDTH (m), log", "ai": "3325", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Depth, thickness, height, or third dimension, metres",
"label": "HEIGHT (m), log", "ai": "3330", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Depth, thickness, height, or third dimension, metres",
"label": "HEIGHT (m), log", "ai": "3331", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Depth, thickness, height, or third dimension, metres",
"label": "HEIGHT (m), log", "ai": "3332", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Depth, thickness, height, or third dimension, metres",
"label": "HEIGHT (m), log", "ai": "3333", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Depth, thickness, height, or third dimension, metres",
"label": "HEIGHT (m), log", "ai": "3334", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Depth, thickness, height, or third dimension, metres",
"label": "HEIGHT (m), log", "ai": "3335", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Area, square metres", "label": "AREA (m^2), log",
"ai": "3340", "format": "N6", "type": "D", "fixedLength": True,
"regex": "(\\d{6})"},
{"title": "Area, square metres", "label": "AREA (m^2), log",
"ai": "3341", "format": "N6", "type": "D", "fixedLength": True,
"regex": "(\\d{6})"},
{"title": "Area, square metres", "label": "AREA (m^2), log",
"ai": "3342", "format": "N6", "type": "D", "fixedLength": True,
"regex": "(\\d{6})"},
{"title": "Area, square metres", "label": "AREA (m^2), log",
"ai": "3343", "format": "N6", "type": "D", "fixedLength": True,
"regex": "(\\d{6})"},
{"title": "Area, square metres", "label": "AREA (m^2), log",
"ai": "3344", "format": "N6", "type": "D", "fixedLength": True,
"regex": "(\\d{6})"},
{"title": "Area, square metres", "label": "AREA (m^2), log",
"ai": "3345", "format": "N6", "type": "D", "fixedLength": True,
"regex": "(\\d{6})"},
{"title": "Logistic volume, litres", "label": "VOLUME (l), log",
"ai": "3350", "format": "N6", "type": "D", "fixedLength": True,
"regex": "(\\d{6})"},
{"title": "Logistic volume, litres", "label": "VOLUME (l), log",
"ai": "3351", "format": "N6", "type": "D", "fixedLength": True,
"regex": "(\\d{6})"},
{"title": "Logistic volume, litres", "label": "VOLUME (l), log",
"ai": "3352", "format": "N6", "type": "D", "fixedLength": True,
"regex": "(\\d{6})"},
{"title": "Logistic volume, litres", "label": "VOLUME (l), log",
"ai": "3353", "format": "N6", "type": "D", "fixedLength": True,
"regex": "(\\d{6})"},
{"title": "Logistic volume, litres", "label": "VOLUME (l), log",
"ai": "3354", "format": "N6", "type": "D", "fixedLength": True,
"regex": "(\\d{6})"},
{"title": "Logistic volume, litres", "label": "VOLUME (l), log",
"ai": "3355", "format": "N6", "type": "D", "fixedLength": True,
"regex": "(\\d{6})"}, {"title": "Logistic volume, cubic metres",
"label": "VOLUME (m^3), log", "ai": "3360",
"format": "N6", "type": "D",
"fixedLength": True, "regex": "(\\d{6})"},
{"title": "Logistic volume, cubic metres",
"label": "VOLUME (m^3), log", "ai": "3361", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Logistic volume, cubic metres",
"label": "VOLUME (m^3), log", "ai": "3362", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Logistic volume, cubic metres",
"label": "VOLUME (m^3), log", "ai": "3363", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Logistic volume, cubic metres",
"label": "VOLUME (m^3), log", "ai": "3364", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Logistic volume, cubic metres",
"label": "VOLUME (m^3), log", "ai": "3365", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Kilograms per square metre", "label": "KG PER m^2",
"ai": "3370", "format": "N6", "type": "D", "fixedLength": True,
"regex": "(\\d{6})"},
{"title": "Kilograms per square metre", "label": "KG PER m^2",
"ai": "3371", "format": "N6", "type": "D", "fixedLength": True,
"regex": "(\\d{6})"},
{"title": "Kilograms per square metre", "label": "KG PER m^2",
"ai": "3372", "format": "N6", "type": "D", "fixedLength": True,
"regex": "(\\d{6})"},
{"title": "Kilograms per square metre", "label": "KG PER m^2",
"ai": "3373", "format": "N6", "type": "D", "fixedLength": True,
"regex": "(\\d{6})"},
{"title": "Kilograms per square metre", "label": "KG PER m^2",
"ai": "3374", "format": "N6", "type": "D", "fixedLength": True,
"regex": "(\\d{6})"},
{"title": "Kilograms per square metre", "label": "KG PER m^2",
"ai": "3375", "format": "N6", "type": "D", "fixedLength": True,
"regex": "(\\d{6})"},
{"title": "Logistic weight, pounds", "label": "GROSS WEIGHT (lb)",
"ai": "3400", "format": "N6", "type": "D", "fixedLength": True,
"regex": "(\\d{6})"},
{"title": "Logistic weight, pounds", "label": "GROSS WEIGHT (lb)",
"ai": "3401", "format": "N6", "type": "D", "fixedLength": True,
"regex": "(\\d{6})"},
{"title": "Logistic weight, pounds", "label": "GROSS WEIGHT (lb)",
"ai": "3402", "format": "N6", "type": "D", "fixedLength": True,
"regex": "(\\d{6})"},
{"title": "Logistic weight, pounds", "label": "GROSS WEIGHT (lb)",
"ai": "3403", "format": "N6", "type": "D", "fixedLength": True,
"regex": "(\\d{6})"},
{"title": "Logistic weight, pounds", "label": "GROSS WEIGHT (lb)",
"ai": "3404", "format": "N6", "type": "D", "fixedLength": True,
"regex": "(\\d{6})"},
{"title": "Logistic weight, pounds", "label": "GROSS WEIGHT (lb)",
"ai": "3405", "format": "N6", "type": "D", "fixedLength": True,
"regex": "(\\d{6})"},
{"title": "Length or first dimension, inches",
"label": "LENGTH (in), log", "ai": "3410", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Length or first dimension, inches",
"label": "LENGTH (in), log", "ai": "3411", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Length or first dimension, inches",
"label": "LENGTH (in), log", "ai": "3412", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Length or first dimension, inches",
"label": "LENGTH (in), log", "ai": "3413", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Length or first dimension, inches",
"label": "LENGTH (in), log", "ai": "3414", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Length or first dimension, inches",
"label": "LENGTH (in), log", "ai": "3415", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Length or first dimension, feet",
"label": "LENGTH (ft), log", "ai": "3420", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Length or first dimension, feet",
"label": "LENGTH (ft), log", "ai": "3421", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Length or first dimension, feet",
"label": "LENGTH (ft), log", "ai": "3422", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Length or first dimension, feet",
"label": "LENGTH (ft), log", "ai": "3423", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Length or first dimension, feet",
"label": "LENGTH (ft), log", "ai": "3424", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Length or first dimension, feet",
"label": "LENGTH (ft), log", "ai": "3425", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Length or first dimension, yards",
"label": "LENGTH (yd), log", "ai": "3430", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Length or first dimension, yards",
"label": "LENGTH (yd), log", "ai": "3431", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Length or first dimension, yards",
"label": "LENGTH (yd), log", "ai": "3432", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Length or first dimension, yards",
"label": "LENGTH (yd), log", "ai": "3433", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Length or first dimension, yards",
"label": "LENGTH (yd), log", "ai": "3434", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Length or first dimension, yards",
"label": "LENGTH (yd), log", "ai": "3435", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Width, diameter, or second dimension, inches",
"label": "WIDTH (in), log", "ai": "3440", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Width, diameter, or second dimension, inches",
"label": "WIDTH (in), log", "ai": "3441", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Width, diameter, or second dimension, inches",
"label": "WIDTH (in), log", "ai": "3442", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Width, diameter, or second dimension, inches",
"label": "WIDTH (in), log", "ai": "3443", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Width, diameter, or second dimension, inches",
"label": "WIDTH (in), log", "ai": "3444", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Width, diameter, or second dimension, inches",
"label": "WIDTH (in), log", "ai": "3445", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Width, diameter, or second dimension, feet",
"label": "WIDTH (ft), log", "ai": "3450", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Width, diameter, or second dimension, feet",
"label": "WIDTH (ft), log", "ai": "3451", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Width, diameter, or second dimension, feet",
"label": "WIDTH (ft), log", "ai": "3452", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Width, diameter, or second dimension, feet",
"label": "WIDTH (ft), log", "ai": "3453", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Width, diameter, or second dimension, feet",
"label": "WIDTH (ft), log", "ai": "3454", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Width, diameter, or second dimension, feet",
"label": "WIDTH (ft), log", "ai": "3455", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Width, diameter, or second dimension, yard",
"label": "WIDTH (yd), log", "ai": "3460", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Width, diameter, or second dimension, yard",
"label": "WIDTH (yd), log", "ai": "3461", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Width, diameter, or second dimension, yard",
"label": "WIDTH (yd), log", "ai": "3462", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Width, diameter, or second dimension, yard",
"label": "WIDTH (yd), log", "ai": "3463", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Width, diameter, or second dimension, yard",
"label": "WIDTH (yd), log", "ai": "3464", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Width, diameter, or second dimension, yard",
"label": "WIDTH (yd), log", "ai": "3465", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Depth, thickness, height, or third dimension, inches",
"label": "HEIGHT (in), log", "ai": "3470", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Depth, thickness, height, or third dimension, inches",
"label": "HEIGHT (in), log", "ai": "3471", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Depth, thickness, height, or third dimension, inches",
"label": "HEIGHT (in), log", "ai": "3472", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Depth, thickness, height, or third dimension, inches",
"label": "HEIGHT (in), log", "ai": "3473", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Depth, thickness, height, or third dimension, inches",
"label": "HEIGHT (in), log", "ai": "3474", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Depth, thickness, height, or third dimension, inches",
"label": "HEIGHT (in), log", "ai": "3475", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Depth, thickness, height, or third dimension, feet",
"label": "HEIGHT (ft), log", "ai": "3480", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Depth, thickness, height, or third dimension, feet",
"label": "HEIGHT (ft), log", "ai": "3481", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Depth, thickness, height, or third dimension, feet",
"label": "HEIGHT (ft), log", "ai": "3482", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Depth, thickness, height, or third dimension, feet",
"label": "HEIGHT (ft), log", "ai": "3483", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Depth, thickness, height, or third dimension, feet",
"label": "HEIGHT (ft), log", "ai": "3484", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Depth, thickness, height, or third dimension, feet",
"label": "HEIGHT (ft), log", "ai": "3485", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Depth, thickness, height, or third dimension, yards",
"label": "HEIGHT (yd), log", "ai": "3490", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Depth, thickness, height, or third dimension, yards",
"label": "HEIGHT (yd), log", "ai": "3491", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Depth, thickness, height, or third dimension, yards",
"label": "HEIGHT (yd), log", "ai": "3492", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Depth, thickness, height, or third dimension, yards",
"label": "HEIGHT (yd), log", "ai": "3493", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Depth, thickness, height, or third dimension, yards",
"label": "HEIGHT (yd), log", "ai": "3494", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Depth, thickness, height, or third dimension, yards",
"label": "HEIGHT (yd), log", "ai": "3495", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Area, square inches (variable measure trade item)",
"label": "AREA (in^2)", "ai": "3500", "format": "N6", "type": "D",
"fixedLength": True, "regex": "(\\d{6})"},
{"title": "Area, square inches (variable measure trade item)",
"label": "AREA (in^2)", "ai": "3501", "format": "N6", "type": "D",
"fixedLength": True, "regex": "(\\d{6})"},
{"title": "Area, square inches (variable measure trade item)",
"label": "AREA (in^2)", "ai": "3502", "format": "N6", "type": "D",
"fixedLength": True, "regex": "(\\d{6})"},
{"title": "Area, square inches (variable measure trade item)",
"label": "AREA (in^2)", "ai": "3503", "format": "N6", "type": "D",
"fixedLength": True, "regex": "(\\d{6})"},
{"title": "Area, square inches (variable measure trade item)",
"label": "AREA (in^2)", "ai": "3504", "format": "N6", "type": "D",
"fixedLength": True, "regex": "(\\d{6})"},
{"title": "Area, square inches (variable measure trade item)",
"label": "AREA (in^2)", "ai": "3505", "format": "N6", "type": "D",
"fixedLength": True, "regex": "(\\d{6})"},
{"title": "Area, square feet (variable measure trade item)",
"label": "AREA (ft^2)", "ai": "3510", "format": "N6", "type": "D",
"fixedLength": True, "regex": "(\\d{6})"},
{"title": "Area, square feet (variable measure trade item)",
"label": "AREA (ft^2)", "ai": "3511", "format": "N6", "type": "D",
"fixedLength": True, "regex": "(\\d{6})"},
{"title": "Area, square feet (variable measure trade item)",
"label": "AREA (ft^2)", "ai": "3512", "format": "N6", "type": "D",
"fixedLength": True, "regex": "(\\d{6})"},
{"title": "Area, square feet (variable measure trade item)",
"label": "AREA (ft^2)", "ai": "3513", "format": "N6", "type": "D",
"fixedLength": True, "regex": "(\\d{6})"},
{"title": "Area, square feet (variable measure trade item)",
"label": "AREA (ft^2)", "ai": "3514", "format": "N6", "type": "D",
"fixedLength": True, "regex": "(\\d{6})"},
{"title": "Area, square feet (variable measure trade item)",
"label": "AREA (ft^2)", "ai": "3515", "format": "N6", "type": "D",
"fixedLength": True, "regex": "(\\d{6})"},
{"title": "Area, square yards (variable measure trade item)",
"label": "AREA (yd^2)", "ai": "3520", "format": "N6", "type": "D",
"fixedLength": True, "regex": "(\\d{6})"},
{"title": "Area, square yards (variable measure trade item)",
"label": "AREA (yd^2)", "ai": "3521", "format": "N6", "type": "D",
"fixedLength": True, "regex": "(\\d{6})"},
{"title": "Area, square yards (variable measure trade item)",
"label": "AREA (yd^2)", "ai": "3522", "format": "N6", "type": "D",
"fixedLength": True, "regex": "(\\d{6})"},
{"title": "Area, square yards (variable measure trade item)",
"label": "AREA (yd^2)", "ai": "3523", "format": "N6", "type": "D",
"fixedLength": True, "regex": "(\\d{6})"},
{"title": "Area, square yards (variable measure trade item)",
"label": "AREA (yd^2)", "ai": "3524", "format": "N6", "type": "D",
"fixedLength": True, "regex": "(\\d{6})"},
{"title": "Area, square yards (variable measure trade item)",
"label": "AREA (yd^2)", "ai": "3525", "format": "N6", "type": "D",
"fixedLength": True, "regex": "(\\d{6})"},
{"title": "Area, square inches", "label": "AREA (in^2), log",
"ai": "3530", "format": "N6", "type": "D", "fixedLength": True,
"regex": "(\\d{6})"},
{"title": "Area, square inches", "label": "AREA (in^2), log",
"ai": "3531", "format": "N6", "type": "D", "fixedLength": True,
"regex": "(\\d{6})"},
{"title": "Area, square inches", "label": "AREA (in^2), log",
"ai": "3532", "format": "N6", "type": "D", "fixedLength": True,
"regex": "(\\d{6})"},
{"title": "Area, square inches", "label": "AREA (in^2), log",
"ai": "3533", "format": "N6", "type": "D", "fixedLength": True,
"regex": "(\\d{6})"},
{"title": "Area, square inches", "label": "AREA (in^2), log",
"ai": "3534", "format": "N6", "type": "D", "fixedLength": True,
"regex": "(\\d{6})"},
{"title": "Area, square inches", "label": "AREA (in^2), log",
"ai": "3535", "format": "N6", "type": "D", "fixedLength": True,
"regex": "(\\d{6})"},
{"title": "Area, square feet", "label": "AREA (ft^2), log",
"ai": "3540", "format": "N6", "type": "D", "fixedLength": True,
"regex": "(\\d{6})"},
{"title": "Area, square feet", "label": "AREA (ft^2), log",
"ai": "3541", "format": "N6", "type": "D", "fixedLength": True,
"regex": "(\\d{6})"},
{"title": "Area, square feet", "label": "AREA (ft^2), log",
"ai": "3542", "format": "N6", "type": "D", "fixedLength": True,
"regex": "(\\d{6})"},
{"title": "Area, square feet", "label": "AREA (ft^2), log",
"ai": "3543", "format": "N6", "type": "D", "fixedLength": True,
"regex": "(\\d{6})"},
{"title": "Area, square feet", "label": "AREA (ft^2), log",
"ai": "3544", "format": "N6", "type": "D", "fixedLength": True,
"regex": "(\\d{6})"},
{"title": "Area, square feet", "label": "AREA (ft^2), log",
"ai": "3545", "format": "N6", "type": "D", "fixedLength": True,
"regex": "(\\d{6})"},
{"title": "Area, square yards", "label": "AREA (yd^2), log",
"ai": "3550", "format": "N6", "type": "D", "fixedLength": True,
"regex": "(\\d{6})"},
{"title": "Area, square yards", "label": "AREA (yd^2), log",
"ai": "3551", "format": "N6", "type": "D", "fixedLength": True,
"regex": "(\\d{6})"},
{"title": "Area, square yards", "label": "AREA (yd^2), log",
"ai": "3552", "format": "N6", "type": "D", "fixedLength": True,
"regex": "(\\d{6})"},
{"title": "Area, square yards", "label": "AREA (yd^2), log",
"ai": "3553", "format": "N6", "type": "D", "fixedLength": True,
"regex": "(\\d{6})"},
{"title": "Area, square yards", "label": "AREA (yd^2), log",
"ai": "3554", "format": "N6", "type": "D", "fixedLength": True,
"regex": "(\\d{6})"},
{"title": "Area, square yards", "label": "AREA (yd^2), log",
"ai": "3555", "format": "N6", "type": "D", "fixedLength": True,
"regex": "(\\d{6})"},
{"title": "Net weight, troy ounces (variable measure trade item)",
"label": "NET WEIGHT (t oz)", "ai": "3560", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Net weight, troy ounces (variable measure trade item)",
"label": "NET WEIGHT (t oz)", "ai": "3561", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Net weight, troy ounces (variable measure trade item)",
"label": "NET WEIGHT (t oz)", "ai": "3562", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Net weight, troy ounces (variable measure trade item)",
"label": "NET WEIGHT (t oz)", "ai": "3563", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Net weight, troy ounces (variable measure trade item)",
"label": "NET WEIGHT (t oz)", "ai": "3564", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Net weight, troy ounces (variable measure trade item)",
"label": "NET WEIGHT (t oz)", "ai": "3565", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"}, {
"title": "Net weight (or volume), ounces (variable measure trade item)",
"label": "NET VOLUME (oz)", "ai": "3570", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"}, {
"title": "Net weight (or volume), ounces (variable measure trade item)",
"label": "NET VOLUME (oz)", "ai": "3571", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"}, {
"title": "Net weight (or volume), ounces (variable measure trade item)",
"label": "NET VOLUME (oz)", "ai": "3572", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"}, {
"title": "Net weight (or volume), ounces (variable measure trade item)",
"label": "NET VOLUME (oz)", "ai": "3573", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"}, {
"title": "Net weight (or volume), ounces (variable measure trade item)",
"label": "NET VOLUME (oz)", "ai": "3574", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"}, {
"title": "Net weight (or volume), ounces (variable measure trade item)",
"label": "NET VOLUME (oz)", "ai": "3575", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Net volume, quarts (variable measure trade item)",
"label": "NET VOLUME (qt)", "ai": "3600", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Net volume, quarts (variable measure trade item)",
"label": "NET VOLUME (qt)", "ai": "3601", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Net volume, quarts (variable measure trade item)",
"label": "NET VOLUME (qt)", "ai": "3602", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Net volume, quarts (variable measure trade item)",
"label": "NET VOLUME (qt)", "ai": "3603", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Net volume, quarts (variable measure trade item)",
"label": "NET VOLUME (qt)", "ai": "3604", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Net volume, quarts (variable measure trade item)",
"label": "NET VOLUME (qt)", "ai": "3605", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Net volume, gallons U.S. (variable measure trade item)",
"label": "NET VOLUME (gal.)", "ai": "3610", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Net volume, gallons U.S. (variable measure trade item)",
"label": "NET VOLUME (gal.)", "ai": "3611", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Net volume, gallons U.S. (variable measure trade item)",
"label": "NET VOLUME (gal.)", "ai": "3612", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Net volume, gallons U.S. (variable measure trade item)",
"label": "NET VOLUME (gal.)", "ai": "3613", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Net volume, gallons U.S. (variable measure trade item)",
"label": "NET VOLUME (gal.)", "ai": "3614", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Net volume, gallons U.S. (variable measure trade item)",
"label": "NET VOLUME (gal.)", "ai": "3615", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Logistic volume, quarts", "label": "VOLUME (qt), log",
"ai": "3620", "format": "N6", "type": "D", "fixedLength": True,
"regex": "(\\d{6})"},
{"title": "Logistic volume, quarts", "label": "VOLUME (qt), log",
"ai": "3621", "format": "N6", "type": "D", "fixedLength": True,
"regex": "(\\d{6})"},
{"title": "Logistic volume, quarts", "label": "VOLUME (qt), log",
"ai": "3622", "format": "N6", "type": "D", "fixedLength": True,
"regex": "(\\d{6})"},
{"title": "Logistic volume, quarts", "label": "VOLUME (qt), log",
"ai": "3623", "format": "N6", "type": "D", "fixedLength": True,
"regex": "(\\d{6})"},
{"title": "Logistic volume, quarts", "label": "VOLUME (qt), log",
"ai": "3624", "format": "N6", "type": "D", "fixedLength": True,
"regex": "(\\d{6})"},
{"title": "Logistic volume, quarts", "label": "VOLUME (qt), log",
"ai": "3625", "format": "N6", "type": "D", "fixedLength": True,
"regex": "(\\d{6})"}, {"title": "Logistic volume, gallons U.S.",
"label": "VOLUME (gal.), log", "ai": "3630",
"format": "N6", "type": "D",
"fixedLength": True, "regex": "(\\d{6})"},
{"title": "Logistic volume, gallons U.S.",
"label": "VOLUME (gal.), log", "ai": "3631", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Logistic volume, gallons U.S.",
"label": "VOLUME (gal.), log", "ai": "3632", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Logistic volume, gallons U.S.",
"label": "VOLUME (gal.), log", "ai": "3633", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Logistic volume, gallons U.S.",
"label": "VOLUME (gal.), log", "ai": "3634", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Logistic volume, gallons U.S.",
"label": "VOLUME (gal.), log", "ai": "3635", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Net volume, cubic inches (variable measure trade item)",
"label": "VOLUME (in^3) ", "ai": "3640", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Net volume, cubic inches (variable measure trade item)",
"label": "VOLUME (in^3) ", "ai": "3641", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Net volume, cubic inches (variable measure trade item)",
"label": "VOLUME (in^3) ", "ai": "3642", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Net volume, cubic inches (variable measure trade item)",
"label": "VOLUME (in^3) ", "ai": "3643", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Net volume, cubic inches (variable measure trade item)",
"label": "VOLUME (in^3) ", "ai": "3644", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Net volume, cubic inches (variable measure trade item)",
"label": "VOLUME (in^3) ", "ai": "3645", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Net volume, cubic feet (variable measure trade item)",
"label": "VOLUME (ft^3) ", "ai": "3650", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Net volume, cubic feet (variable measure trade item)",
"label": "VOLUME (ft^3) ", "ai": "3651", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Net volume, cubic feet (variable measure trade item)",
"label": "VOLUME (ft^3) ", "ai": "3652", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Net volume, cubic feet (variable measure trade item)",
"label": "VOLUME (ft^3) ", "ai": "3653", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Net volume, cubic feet (variable measure trade item)",
"label": "VOLUME (ft^3) ", "ai": "3654", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Net volume, cubic feet (variable measure trade item)",
"label": "VOLUME (ft^3) ", "ai": "3655", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Net volume, cubic yards (variable measure trade item)",
"label": "VOLUME (yd^3) ", "ai": "3660", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Net volume, cubic yards (variable measure trade item)",
"label": "VOLUME (yd^3) ", "ai": "3661", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Net volume, cubic yards (variable measure trade item)",
"label": "VOLUME (yd^3) ", "ai": "3662", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Net volume, cubic yards (variable measure trade item)",
"label": "VOLUME (yd^3) ", "ai": "3663", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Net volume, cubic yards (variable measure trade item)",
"label": "VOLUME (yd^3) ", "ai": "3664", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Net volume, cubic yards (variable measure trade item)",
"label": "VOLUME (yd^3) ", "ai": "3665", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Logistic volume, cubic inches",
"label": "VOLUME (in^3), log", "ai": "3670", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Logistic volume, cubic inches",
"label": "VOLUME (in^3), log", "ai": "3671", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Logistic volume, cubic inches",
"label": "VOLUME (in^3), log", "ai": "3672", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Logistic volume, cubic inches",
"label": "VOLUME (in^3), log", "ai": "3673", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Logistic volume, cubic inches",
"label": "VOLUME (in^3), log", "ai": "3674", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Logistic volume, cubic inches",
"label": "VOLUME (in^3), log", "ai": "3675", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Logistic volume, cubic feet",
"label": "VOLUME (ft^3), log", "ai": "3680", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Logistic volume, cubic feet",
"label": "VOLUME (ft^3), log", "ai": "3681", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Logistic volume, cubic feet",
"label": "VOLUME (ft^3), log", "ai": "3682", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Logistic volume, cubic feet",
"label": "VOLUME (ft^3), log", "ai": "3683", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Logistic volume, cubic feet",
"label": "VOLUME (ft^3), log", "ai": "3684", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Logistic volume, cubic feet",
"label": "VOLUME (ft^3), log", "ai": "3685", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Logistic volume, cubic yards",
"label": "VOLUME (yd^3), log", "ai": "3690", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Logistic volume, cubic yards",
"label": "VOLUME (yd^3), log", "ai": "3691", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Logistic volume, cubic yards",
"label": "VOLUME (yd^3), log", "ai": "3692", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Logistic volume, cubic yards",
"label": "VOLUME (yd^3), log", "ai": "3693", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Logistic volume, cubic yards",
"label": "VOLUME (yd^3), log", "ai": "3694", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Logistic volume, cubic yards",
"label": "VOLUME (yd^3), log", "ai": "3695", "format": "N6",
"type": "D", "fixedLength": True, "regex": "(\\d{6})"},
{"title": "Count of trade items", "label": "COUNT", "ai": "37",
"format": "N..8", "type": "D", "fixedLength": False,
"regex": "(\\d{0,8})"}, {
"title": "Applicable amount payable or Coupon value, local currency",
"label": "AMOUNT", "ai": "3900", "format": "N..15", "type": "D",
"fixedLength": False, "regex": "(\\d{0,15})"}, {
"title": "Applicable amount payable or Coupon value, local currency",
"label": "AMOUNT", "ai": "3901", "format": "N..15", "type": "D",
"fixedLength": False, "regex": "(\\d{0,15})"}, {
"title": "Applicable amount payable or Coupon value, local currency",
"label": "AMOUNT", "ai": "3902", "format": "N..15", "type": "D",
"fixedLength": False, "regex": "(\\d{0,15})"}, {
"title": "Applicable amount payable or Coupon value, local currency",
"label": "AMOUNT", "ai": "3903", "format": "N..15", "type": "D",
"fixedLength": False, "regex": "(\\d{0,15})"}, {
"title": "Applicable amount payable or Coupon value, local currency",
"label": "AMOUNT", "ai": "3904", "format": "N..15", "type": "D",
"fixedLength": False, "regex": "(\\d{0,15})"}, {
"title": "Applicable amount payable or Coupon value, local currency",
"label": "AMOUNT", "ai": "3905", "format": "N..15", "type": "D",
"fixedLength": False, "regex": "(\\d{0,15})"}, {
"title": "Applicable amount payable or Coupon value, local currency",
"label": "AMOUNT", "ai": "3906", "format": "N..15", "type": "D",
"fixedLength": False, "regex": "(\\d{0,15})"}, {
"title": "Applicable amount payable or Coupon value, local currency",
"label": "AMOUNT", "ai": "3907", "format": "N..15", "type": "D",
"fixedLength": False, "regex": "(\\d{0,15})"}, {
"title": "Applicable amount payable or Coupon value, local currency",
"label": "AMOUNT", "ai": "3908", "format": "N..15", "type": "D",
"fixedLength": False, "regex": "(\\d{0,15})"}, {
"title": "Applicable amount payable or Coupon value, local currency",
"label": "AMOUNT", "ai": "3909", "format": "N..15", "type": "D",
"fixedLength": False, "regex": "(\\d{0,15})"},
{"title": "Applicable amount payable with ISO currency code",
"label": "AMOUNT", "ai": "3910", "format": "N..15", "type": "D",
"fixedLength": False, "regex": "(\\d{3})(\\d{0,15})"},
{"title": "Applicable amount payable with ISO currency code",
"label": "AMOUNT", "ai": "3911", "format": "N..15", "type": "D",
"fixedLength": False, "regex": "(\\d{3})(\\d{0,15})"},
{"title": "Applicable amount payable with ISO currency code",
"label": "AMOUNT", "ai": "3912", "format": "N..15", "type": "D",
"fixedLength": False, "regex": "(\\d{3})(\\d{0,15})"},
{"title": "Applicable amount payable with ISO currency code",
"label": "AMOUNT", "ai": "3913", "format": "N..15", "type": "D",
"fixedLength": False, "regex": "(\\d{3})(\\d{0,15})"},
{"title": "Applicable amount payable with ISO currency code",
"label": "AMOUNT", "ai": "3914", "format": "N..15", "type": "D",
"fixedLength": False, "regex": "(\\d{3})(\\d{0,15})"},
{"title": "Applicable amount payable with ISO currency code",
"label": "AMOUNT", "ai": "3915", "format": "N..15", "type": "D",
"fixedLength": False, "regex": "(\\d{3})(\\d{0,15})"},
{"title": "Applicable amount payable with ISO currency code",
"label": "AMOUNT", "ai": "3916", "format": "N..15", "type": "D",
"fixedLength": False, "regex": "(\\d{3})(\\d{0,15})"},
{"title": "Applicable amount payable with ISO currency code",
"label": "AMOUNT", "ai": "3917", "format": "N..15", "type": "D",
"fixedLength": False, "regex": "(\\d{3})(\\d{0,15})"},
{"title": "Applicable amount payable with ISO currency code",
"label": "AMOUNT", "ai": "3918", "format": "N..15", "type": "D",
"fixedLength": False, "regex": "(\\d{3})(\\d{0,15})"},
{"title": "Applicable amount payable with ISO currency code",
"label": "AMOUNT", "ai": "3919", "format": "N..15", "type": "D",
"fixedLength": False, "regex": "(\\d{3})(\\d{0,15})"}, {
"title": "Applicable amount payable, single monetary area (variable measure trade item)",
"label": "PRICE", "ai": "3920", "format": "N..15", "type": "D",
"fixedLength": False, "regex": "(\\d{0,15})"}, {
"title": "Applicable amount payable, single monetary area (variable measure trade item)",
"label": "PRICE", "ai": "3921", "format": "N..15", "type": "D",
"fixedLength": False, "regex": "(\\d{0,15})"}, {
"title": "Applicable amount payable, single monetary area (variable measure trade item)",
"label": "PRICE", "ai": "3922", "format": "N..15", "type": "D",
"fixedLength": False, "regex": "(\\d{0,15})"}, {
"title": "Applicable amount payable, single monetary area (variable measure trade item)",
"label": "PRICE", "ai": "3923", "format": "N..15", "type": "D",
"fixedLength": False, "regex": "(\\d{0,15})"}, {
"title": "Applicable amount payable, single monetary area (variable measure trade item)",
"label": "PRICE", "ai": "3924", "format": "N..15", "type": "D",
"fixedLength": False, "regex": "(\\d{0,15})"}, {
"title": "Applicable amount payable, single monetary area (variable measure trade item)",
"label": "PRICE", "ai": "3925", "format": "N..15", "type": "D",
"fixedLength": False, "regex": "(\\d{0,15})"}, {
"title": "Applicable amount payable, single monetary area (variable measure trade item)",
"label": "PRICE", "ai": "3926", "format": "N..15", "type": "D",
"fixedLength": False, "regex": "(\\d{0,15})"}, {
"title": "Applicable amount payable, single monetary area (variable measure trade item)",
"label": "PRICE", "ai": "3927", "format": "N..15", "type": "D",
"fixedLength": False, "regex": "(\\d{0,15})"}, {
"title": "Applicable amount payable, single monetary area (variable measure trade item)",
"label": "PRICE", "ai": "3928", "format": "N..15", "type": "D",
"fixedLength": False, "regex": "(\\d{0,15})"}, {
"title": "Applicable amount payable, single monetary area (variable measure trade item)",
"label": "PRICE", "ai": "3929", "format": "N..15", "type": "D",
"fixedLength": False, "regex": "(\\d{0,15})"}, {
"title": "Applicable amount payable with ISO currency code (variable measure trade item)",
"label": "PRICE", "ai": "3930", "format": "N..15", "type": "D",
"fixedLength": False, "regex": "(\\d{3})(\\d{0,15})"}, {
"title": "Applicable amount payable with ISO currency code (variable measure trade item)",
"label": "PRICE", "ai": "3931", "format": "N..15", "type": "D",
"fixedLength": False, "regex": "(\\d{3})(\\d{0,15})"}, {
"title": "Applicable amount payable with ISO currency code (variable measure trade item)",
"label": "PRICE", "ai": "3932", "format": "N..15", "type": "D",
"fixedLength": False, "regex": "(\\d{3})(\\d{0,15})"}, {
"title": "Applicable amount payable with ISO currency code (variable measure trade item)",
"label": "PRICE", "ai": "3933", "format": "N..15", "type": "D",
"fixedLength": False, "regex": "(\\d{3})(\\d{0,15})"}, {
"title": "Applicable amount payable with ISO currency code (variable measure trade item)",
"label": "PRICE", "ai": "3934", "format": "N..15", "type": "D",
"fixedLength": False, "regex": "(\\d{3})(\\d{0,15})"}, {
"title": "Applicable amount payable with ISO currency code (variable measure trade item)",
"label": "PRICE", "ai": "3935", "format": "N..15", "type": "D",
"fixedLength": False, "regex": "(\\d{3})(\\d{0,15})"}, {
"title": "Applicable amount payable with ISO currency code (variable measure trade item)",
"label": "PRICE", "ai": "3936", "format": "N..15", "type": "D",
"fixedLength": False, "regex": "(\\d{3})(\\d{0,15})"}, {
"title": "Applicable amount payable with ISO currency code (variable measure trade item)",
"label": "PRICE", "ai": "3937", "format": "N..15", "type": "D",
"fixedLength": False, "regex": "(\\d{3})(\\d{0,15})"}, {
"title": "Applicable amount payable with ISO currency code (variable measure trade item)",
"label": "PRICE", "ai": "3938", "format": "N..15", "type": "D",
"fixedLength": False, "regex": "(\\d{3})(\\d{0,15})"}, {
"title": "Applicable amount payable with ISO currency code (variable measure trade item)",
"label": "PRICE", "ai": "3939", "format": "N..15", "type": "D",
"fixedLength": False, "regex": "(\\d{3})(\\d{0,15})"},
{"title": "Percentage discount of a coupon", "label": "PRCNT OFF",
"ai": "3940", "format": "N4", "type": "D", "fixedLength": True,
"regex": "(\\d{4})"},
{"title": "Percentage discount of a coupon", "label": "PRCNT OFF",
"ai": "3941", "format": "N4", "type": "D", "fixedLength": True,
"regex": "(\\d{4})"},
{"title": "Percentage discount of a coupon", "label": "PRCNT OFF",
"ai": "3942", "format": "N4", "type": "D", "fixedLength": True,
"regex": "(\\d{4})"},
{"title": "Percentage discount of a coupon", "label": "PRCNT OFF",
"ai": "3943", "format": "N4", "type": "D", "fixedLength": True,
"regex": "(\\d{4})"}, {"title": "Customer's purchase order number",
"label": "ORDER NUMBER", "ai": "400",
"format": "X..30", "type": "D",
"fixedLength": False,
"regex": "([\\x21-\\x22\\x25-\\x2F\\x30-\\x39\\x41-\\x5A\\x5F\\x61-\\x7A]{0,30})"},
{"title": "Global Identification Number for Consignment (GINC)",
"label": "GINC", "shortcode": "ginc", "ai": "401",
"format": "X..30", "type": "I", "fixedLength": False,
"regex": "([\\x21-\\x22\\x25-\\x2F\\x30-\\x39\\x41-\\x5A\\x5F\\x61-\\x7A]{0,30})"},
{"title": "Global Shipment Identification Number (GSIN)",
"label": "GSIN", "shortcode": "gsin", "ai": "402", "format": "N17",
"type": "I", "fixedLength": True, "checkDigit": "L",
"regex": "(\\d{17})"},
{"title": "Routing code", "label": "ROUTE", "ai": "403",
"format": "X..30", "type": "D", "fixedLength": False,
"regex": "([\\x21-\\x22\\x25-\\x2F\\x30-\\x39\\x41-\\x5A\\x5F\\x61-\\x7A]{0,30})"},
{"title": "Ship to - Deliver to Global Location Number",
"label": "SHIP TO LOC", "ai": "410", "format": "N13", "type": "D",
"fixedLength": True, "checkDigit": "L", "regex": "(\\d{13})"},
{"title": "Bill to - Invoice to Global Location Number",
"label": "BILL TO ", "ai": "411", "format": "N13", "type": "D",
"fixedLength": True, "checkDigit": "L", "regex": "(\\d{13})"},
{"title": "Purchased from Global Location Number",
"label": "PURCHASE FROM", "ai": "412", "format": "N13",
"type": "D", "fixedLength": True, "checkDigit": "L",
"regex": "(\\d{13})"}, {
"title": "Ship for - Deliver for - Forward to Global Location Number",
"label": "SHIP FOR LOC", "ai": "413", "format": "N13",
"type": "D", "fixedLength": True, "checkDigit": "L",
"regex": "(\\d{13})"}, {
"title": "Identification of a physical location - Global Location Number",
"label": "LOC No", "shortcode": "gln", "ai": "414",
"format": "N13", "type": "I", "fixedLength": True,
"checkDigit": "L", "qualifiers": ["254"], "regex": "(\\d{13})"},
{"title": "Global Location Number of the invoicing party",
"label": "PAY TO", "shortcode": "payto", "ai": "415",
"format": "N13", "type": "I", "fixedLength": True,
"checkDigit": "L", "regex": "(\\d{13})"},
{"title": "GLN of the production or service location",
"label": "PROD/SERV LOC", "ai": "416", "format": "N13",
"type": "D", "fixedLength": True, "checkDigit": "L",
"regex": "(\\d{13})"}, {
"title": "Ship to - Deliver to postal code within a single postal authority",
"label": "SHIP TO POST", "ai": "420", "format": "X..20",
"type": "D", "fixedLength": False,
"regex": "([\\x21-\\x22\\x25-\\x2F\\x30-\\x39\\x41-\\x5A\\x5F\\x61-\\x7A]{0,20})"},
{"title": "Ship to - Deliver to postal code with ISO country code",
"label": "SHIP TO POST", "ai": "421", "format": "N3+X..9",
"type": "D", "fixedLength": False,
"regex": "(\\d{3})([\\x21-\\x22\\x25-\\x2F\\x30-\\x39\\x41-\\x5A\\x5F\\x61-\\x7A]{0,9})"},
{"title": "Country of origin of a trade item", "label": "ORIGIN",
"ai": "422", "format": "N3", "type": "D", "fixedLength": True,
"regex": "(\\d{3})"}, {"title": "Country of initial processing",
"label": "COUNTRY - INITIAL PROCESS.",
"ai": "423", "format": "N3+N..12",
"type": "D", "fixedLength": False,
"regex": "(\\d{3})(\\d{0,12})"},
{"title": "Country of processing", "label": "COUNTRY - PROCESS.",
"ai": "424", "format": "N3", "type": "D", "fixedLength": True,
"regex": "(\\d{3})"}, {"title": "Country of disassembly",
"label": "COUNTRY - DISASSEMBLY",
"ai": "425", "format": "N3+N..12",
"type": "D", "fixedLength": False,
"regex": "(\\d{3})(\\d{0,12})"},
{"title": "Country covering full process chain",
"label": "COUNTRY - FULL PROCESS", "ai": "426", "format": "N3",
"type": "D", "fixedLength": True, "regex": "(\\d{3})"},
{"title": "Country subdivision Of origin",
"label": "ORIGIN SUBDIVISION", "ai": "427", "format": "X..3",
"type": "D", "fixedLength": False,
"regex": "([\\x21-\\x22\\x25-\\x2F\\x30-\\x39\\x41-\\x5A\\x5F\\x61-\\x7A]{0,3})"},
{"title": "NATO Stock Number (NSN)", "label": "NSN", "ai": "7001",
"format": "N13", "type": "D", "fixedLength": True,
"regex": "(\\d{13})"},
{"title": "UN/ECE meat carcasses and cuts classification",
"label": "MEAT CUT", "ai": "7002", "format": "X..30", "type": "D",
"fixedLength": False,
"regex": "([\\x21-\\x22\\x25-\\x2F\\x30-\\x39\\x41-\\x5A\\x5F\\x61-\\x7A]{0,30})"},
{"title": "Expiration date and time", "label": "EXPIRY TIME",
"shortcode": "expdt", "ai": "7003", "format": "N10", "type": "D",
"fixedLength": True, "regex": "(\\d{10})"},
{"title": "Active potency", "label": "ACTIVE POTENCY", "ai": "7004",
"format": "N..4", "type": "D", "fixedLength": False,
"regex": "(\\d{0,4})"},
{"title": "Catch area", "label": "CATCH AREA", "ai": "7005",
"format": "X..12", "type": "D", "fixedLength": False,
"regex": "([\\x21-\\x22\\x25-\\x2F\\x30-\\x39\\x41-\\x5A\\x5F\\x61-\\x7A]{0,12})"},
{"title": "First freeze date ", "label": "FIRST FREEZE DATE",
"ai": "7006", "format": "N6", "type": "D", "fixedLength": True,
"regex": "(\\d{6})"},
{"title": "Harvest date", "label": "HARVEST DATE", "ai": "7007",
"format": "N6..12", "type": "D", "fixedLength": False,
"regex": "(\\d{6,12})"}, {"title": "Species for fishery purposes",
"label": "AQUATIC SPECIES", "ai": "7008",
"format": "X..3", "type": "D",
"fixedLength": False,
"regex": "([\\x21-\\x22\\x25-\\x2F\\x30-\\x39\\x41-\\x5A\\x5F\\x61-\\x7A]{0,3})"},
{"title": "Fishing gear type", "label": "FISHING GEAR TYPE",
"ai": "7009", "format": "X..10", "type": "D", "fixedLength": False,
"regex": "([\\x21-\\x22\\x25-\\x2F\\x30-\\x39\\x41-\\x5A\\x5F\\x61-\\x7A]{0,10})"},
{"title": "Production method", "label": "PROD METHOD", "ai": "7010",
"format": "X..2", "type": "D", "fixedLength": False,
"regex": "([\\x21-\\x22\\x25-\\x2F\\x30-\\x39\\x41-\\x5A\\x5F\\x61-\\x7A]{0,2})"},
{"title": "Refurbishment lot ID", "label": "REFURB LOT",
"ai": "7020", "format": "X..20", "type": "D", "fixedLength": False,
"regex": "([\\x21-\\x22\\x25-\\x2F\\x30-\\x39\\x41-\\x5A\\x5F\\x61-\\x7A]{0,20})"},
{"title": "Functional status", "label": "FUNC STAT", "ai": "7021",
"format": "X..20", "type": "D", "fixedLength": False,
"regex": "([\\x21-\\x22\\x25-\\x2F\\x30-\\x39\\x41-\\x5A\\x5F\\x61-\\x7A]{0,20})"},
{"title": "Revision status", "label": "REV STAT", "ai": "7022",
"format": "X..20", "type": "D", "fixedLength": False,
"regex": "([\\x21-\\x22\\x25-\\x2F\\x30-\\x39\\x41-\\x5A\\x5F\\x61-\\x7A]{0,20})"},
{
"title": "Global Individual Asset Identifier (GIAI) of an assembly",
"label": "GIAI - ASSEMBLY", "ai": "7023", "format": "X..30",
"type": "D", "fixedLength": False,
"regex": "([\\x21-\\x22\\x25-\\x2F\\x30-\\x39\\x41-\\x5A\\x5F\\x61-\\x7A]{0,30})"},
{"title": "Number of processor with ISO Country Code",
"label": "PROCESSOR # 0", "ai": "7030", "format": "X..27",
"type": "D", "fixedLength": False,
"regex": "(\\d{3})([\\x21-\\x22\\x25-\\x2F\\x30-\\x39\\x41-\\x5A\\x5F\\x61-\\x7A]{0,27})"},
{"title": "Number of processor with ISO Country Code",
"label": "PROCESSOR # 1", "ai": "7031", "format": "X..27",
"type": "D", "fixedLength": False,
"regex": "(\\d{3})([\\x21-\\x22\\x25-\\x2F\\x30-\\x39\\x41-\\x5A\\x5F\\x61-\\x7A]{0,27})"},
{"title": "Number of processor with ISO Country Code",
"label": "PROCESSOR # 2", "ai": "7032", "format": "X..27",
"type": "D", "fixedLength": False,
"regex": "(\\d{3})([\\x21-\\x22\\x25-\\x2F\\x30-\\x39\\x41-\\x5A\\x5F\\x61-\\x7A]{0,27})"},
{"title": "Number of processor with ISO Country Code",
"label": "PROCESSOR # 3", "ai": "7033", "format": "X..27",
"type": "D", "fixedLength": False,
"regex": "(\\d{3})([\\x21-\\x22\\x25-\\x2F\\x30-\\x39\\x41-\\x5A\\x5F\\x61-\\x7A]{0,27})"},
{"title": "Number of processor with ISO Country Code",
"label": "PROCESSOR # 4", "ai": "7034", "format": "X..27",
"type": "D", "fixedLength": False,
"regex": "(\\d{3})([\\x21-\\x22\\x25-\\x2F\\x30-\\x39\\x41-\\x5A\\x5F\\x61-\\x7A]{0,27})"},
{"title": "Number of processor with ISO Country Code",
"label": "PROCESSOR # 5", "ai": "7035", "format": "X..27",
"type": "D", "fixedLength": False,
"regex": "(\\d{3})([\\x21-\\x22\\x25-\\x2F\\x30-\\x39\\x41-\\x5A\\x5F\\x61-\\x7A]{0,27})"},
{"title": "Number of processor with ISO Country Code",
"label": "PROCESSOR # 6", "ai": "7036", "format": "X..27",
"type": "D", "fixedLength": False,
"regex": "(\\d{3})([\\x21-\\x22\\x25-\\x2F\\x30-\\x39\\x41-\\x5A\\x5F\\x61-\\x7A]{0,27})"},
{"title": "Number of processor with ISO Country Code",
"label": "PROCESSOR # 7", "ai": "7037", "format": "X..27",
"type": "D", "fixedLength": False,
"regex": "(\\d{3})([\\x21-\\x22\\x25-\\x2F\\x30-\\x39\\x41-\\x5A\\x5F\\x61-\\x7A]{0,27})"},
{"title": "Number of processor with ISO Country Code",
"label": "PROCESSOR # 8", "ai": "7038", "format": "X..27",
"type": "D", "fixedLength": False,
"regex": "(\\d{3})([\\x21-\\x22\\x25-\\x2F\\x30-\\x39\\x41-\\x5A\\x5F\\x61-\\x7A]{0,27})"},
{"title": "Number of processor with ISO Country Code",
"label": "PROCESSOR # 9", "ai": "7039", "format": "X..27",
"type": "D", "fixedLength": False,
"regex": "(\\d{3})([\\x21-\\x22\\x25-\\x2F\\x30-\\x39\\x41-\\x5A\\x5F\\x61-\\x7A]{0,27})"},
{
"title": "National Healthcare Reimbursement Number (NHRN) - Germany PZN",
"label": "NHRN PZN", "ai": "710", "format": "X..20",
"type": "D", "fixedLength": False,
"regex": "([\\x21-\\x22\\x25-\\x2F\\x30-\\x39\\x41-\\x5A\\x5F\\x61-\\x7A]{0,20})"},
{
"title": "National Healthcare Reimbursement Number (NHRN) - France CIP",
"label": "NHRN CIP", "ai": "711", "format": "X..20",
"type": "D", "fixedLength": False,
"regex": "([\\x21-\\x22\\x25-\\x2F\\x30-\\x39\\x41-\\x5A\\x5F\\x61-\\x7A]{0,20})"},
{
"title": "National Healthcare Reimbursement Number (NHRN) - Spain CN",
"label": "NHRN CN", "ai": "712", "format": "X..20", "type": "D",
"fixedLength": False,
"regex": "([\\x21-\\x22\\x25-\\x2F\\x30-\\x39\\x41-\\x5A\\x5F\\x61-\\x7A]{0,20})"},
{
"title": "National Healthcare Reimbursement Number (NHRN) - Brasil DRN",
"label": "NHRN DRN", "ai": "713", "format": "X..20",
"type": "D", "fixedLength": False,
"regex": "([\\x21-\\x22\\x25-\\x2F\\x30-\\x39\\x41-\\x5A\\x5F\\x61-\\x7A]{0,20})"},
{
"title": "National Healthcare Reimbursement Number (NHRN) - Portugal AIM",
"label": "NHRN AIM", "ai": "714", "format": "X..20",
"type": "D", "fixedLength": False,
"regex": "([\\x21-\\x22\\x25-\\x2F\\x30-\\x39\\x41-\\x5A\\x5F\\x61-\\x7A]{0,20})"},
{"title": "Certification reference # 0", "label": "CERT # 0",
"ai": "7230", "format": "X2+X..28", "type": "D",
"fixedLength": False,
"regex": "([\\x21-\\x22\\x25-\\x2F\\x30-\\x39\\x41-\\x5A\\x5F\\x61-\\x7A]{2,30})"},
{"title": "Certification reference # 1", "label": "CERT # 1",
"ai": "7231", "format": "X2+X..28", "type": "D",
"fixedLength": False,
"regex": "([\\x21-\\x22\\x25-\\x2F\\x30-\\x39\\x41-\\x5A\\x5F\\x61-\\x7A]{2,30})"},
{"title": "Certification reference # 2", "label": "CERT # 2",
"ai": "7232", "format": "X2+X..28", "type": "D",
"fixedLength": False,
"regex": "([\\x21-\\x22\\x25-\\x2F\\x30-\\x39\\x41-\\x5A\\x5F\\x61-\\x7A]{2,30})"},
{"title": "Certification reference # 3", "label": "CERT # 3",
"ai": "7233", "format": "X2+X..28", "type": "D",
"fixedLength": False,
"regex": "([\\x21-\\x22\\x25-\\x2F\\x30-\\x39\\x41-\\x5A\\x5F\\x61-\\x7A]{2,30})"},
{"title": "Certification reference # 4", "label": "CERT # 4",
"ai": "7234", "format": "X2+X..28", "type": "D",
"fixedLength": False,
"regex": "([\\x21-\\x22\\x25-\\x2F\\x30-\\x39\\x41-\\x5A\\x5F\\x61-\\x7A]{2,30})"},
{"title": "Certification reference # 5", "label": "CERT # 5",
"ai": "7235", "format": "X2+X..28", "type": "D",
"fixedLength": False,
"regex": "([\\x21-\\x22\\x25-\\x2F\\x30-\\x39\\x41-\\x5A\\x5F\\x61-\\x7A]{2,30})"},
{"title": "Certification reference # 6", "label": "CERT # 6",
"ai": "7236", "format": "X2+X..28", "type": "D",
"fixedLength": False,
"regex": "([\\x21-\\x22\\x25-\\x2F\\x30-\\x39\\x41-\\x5A\\x5F\\x61-\\x7A]{2,30})"},
{"title": "Certification reference # 7", "label": "CERT # 7",
"ai": "7237", "format": "X2+X..28", "type": "D",
"fixedLength": False,
"regex": "([\\x21-\\x22\\x25-\\x2F\\x30-\\x39\\x41-\\x5A\\x5F\\x61-\\x7A]{2,30})"},
{"title": "Certification reference # 8", "label": "CERT # 8",
"ai": "7238", "format": "X2+X..28", "type": "D",
"fixedLength": False,
"regex": "([\\x21-\\x22\\x25-\\x2F\\x30-\\x39\\x41-\\x5A\\x5F\\x61-\\x7A]{2,30})"},
{"title": "Certification reference # 9", "label": "CERT # 9",
"ai": "7239", "format": "X2+X..28", "type": "D",
"fixedLength": False,
"regex": "([\\x21-\\x22\\x25-\\x2F\\x30-\\x39\\x41-\\x5A\\x5F\\x61-\\x7A]{2,30})"},
{
"title": "Roll products (width, length, core diameter, direction, splices)",
"label": "DIMENSIONS", "ai": "8001", "format": "N14",
"type": "D", "fixedLength": True, "regex": "(\\d{14})"},
{"title": "Cellular mobile telephone identifier", "label": "CMT No",
"ai": "8002", "format": "X..20", "type": "D", "fixedLength": False,
"regex": "([\\x21-\\x22\\x25-\\x2F\\x30-\\x39\\x41-\\x5A\\x5F\\x61-\\x7A]{0,20})"},
{"title": "Global Returnable Asset Identifier (GRAI)",
"label": "GRAI", "shortcode": "grai", "ai": "8003",
"format": "N14+X..16", "type": "I", "fixedLength": False,
"checkDigit": "14",
"regex": "(\\d{14})([\\x21-\\x22\\x25-\\x2F\\x30-\\x39\\x41-\\x5A\\x5F\\x61-\\x7A]{0,16})"},
{"title": "Global Individual Asset Identifier (GIAI)",
"label": "GIAI", "shortcode": "giai", "ai": "8004",
"format": "X..30", "type": "I", "fixedLength": False,
"regex": "([\\x21-\\x22\\x25-\\x2F\\x30-\\x39\\x41-\\x5A\\x5F\\x61-\\x7A]{0,30})"},
{"title": "Price per unit of measure", "label": "PRICE PER UNIT",
"ai": "8005", "format": "N6", "type": "D", "fixedLength": True,
"regex": "(\\d{6})"},
{"title": "Identification of an individual trade item piece",
"label": "ITIP", "shortcode": "itip", "ai": "8006",
"format": "N14+N2+N2", "type": "I", "fixedLength": True,
"checkDigit": "14", "qualifiers": ["22", "10", "21"],
"regex": "(\\d{14})(\\d{2})(\\d{2})"},
{"title": "International Bank Account Number (IBAN) ",
"label": "IBAN", "ai": "8007", "format": "X..34", "type": "D",
"fixedLength": False,
"regex": "([\\x21-\\x22\\x25-\\x2F\\x30-\\x39\\x41-\\x5A\\x5F\\x61-\\x7A]{0,34})"},
{"title": "Date and time of production", "label": "PROD TIME",
"ai": "8008", "format": "N8+N..4", "type": "D",
"fixedLength": False, "regex": "(\\d{8})(\\d{0,4})"},
{"title": "Optically Readable Sensor Indicator", "label": "OPT SEN",
"ai": "8009", "format": "X..50", "type": "D", "fixedLength": False,
"regex": "([\\x21-\\x22\\x25-\\x2F\\x30-\\x39\\x41-\\x5A\\x5F\\x61-\\x7A]{0,50})"},
{"title": "Component/Part Identifier (CPID)", "label": "CPID",
"shortcode": "cpid", "ai": "8010", "format": "Y..30", "type": "I",
"fixedLength": False, "qualifiers": ["8011"],
"regex": "([\\x23\\x2D\\x2F\\x30-\\x39\\x41-\\x5A]{0,30})"},
{"title": "Component/Part Identifier serial number (CPID SERIAL)",
"label": "CPID SERIAL", "shortcode": "cpsn", "ai": "8011",
"format": "N..12", "type": "Q", "fixedLength": False,
"regex": "(\\d{0,12})"},
{"title": "Software version", "label": "VERSION", "ai": "8012",
"format": "X..20", "type": "D", "fixedLength": False,
"regex": "([\\x21-\\x22\\x25-\\x2F\\x30-\\x39\\x41-\\x5A\\x5F\\x61-\\x7A]{0,20})"},
{"title": "Global Model Number (GMN)",
"label": "GMN (for medical devices, the default, global data title is BUDI-DI )",
"ai": "8013", "format": "X..30", "type": "D", "fixedLength": False,
"regex": "([\\x21-\\x22\\x25-\\x2F\\x30-\\x39\\x41-\\x5A\\x5F\\x61-\\x7A]{0,30})"},
{"title": "Global Service Relation Number - Provider",
"label": "GSRN - PROVIDER", "shortcode": "gsrnp", "ai": "8017",
"format": "N18", "type": "I", "fixedLength": True,
"checkDigit": "L", "qualifiers": ["8019"], "regex": "(\\d{18})"},
{"title": "Global Service Relation Number - Recipient",
"label": "GSRN - RECIPIENT", "shortcode": "gsrn", "ai": "8018",
"format": "N18", "type": "I", "fixedLength": True,
"checkDigit": "L", "qualifiers": ["8019"], "regex": "(\\d{18})"},
{"title": "Service Relation Instance Number (SRIN)",
"label": "SRIN", "shortcode": "srin", "ai": "8019",
"format": "N..10", "type": "Q", "fixedLength": False,
"regex": "(\\d{0,10})"},
{"title": "Payment slip reference number", "label": "REF No",
"ai": "8020", "format": "X..25", "type": "D", "fixedLength": False,
"regex": "([\\x21-\\x22\\x25-\\x2F\\x30-\\x39\\x41-\\x5A\\x5F\\x61-\\x7A]{0,25})"},
{
"title": "Identification of pieces of a trade item contained in a logistics unit",
"label": "ITIP CONTENT", "ai": "8026", "format": "N14+N2+N2",
"type": "D", "fixedLength": True, "checkDigit": "14",
"regex": "(\\d{14})(\\d{2})(\\d{2})"},
{"title": "Coupon code identification for use in North America",
"ai": "8110", "format": "X..70", "type": "D", "fixedLength": False,
"regex": "([\\x21-\\x22\\x25-\\x2F\\x30-\\x39\\x41-\\x5A\\x5F\\x61-\\x7A]{0,70})"},
{"title": "Loyalty points of a coupon", "label": "POINTS",
"ai": "8111", "format": "N4", "type": "D", "fixedLength": True,
"regex": "(\\d{4})"}, {
"title": "Paperless coupon code identification for use in North America",
"ai": "8112", "format": "X..70", "type": "D",
"fixedLength": False,
"regex": "([\\x21-\\x22\\x25-\\x2F\\x30-\\x39\\x41-\\x5A\\x5F\\x61-\\x7A]{0,70})"},
{"title": "Extended Packaging URL ", "label": "PRODUCT URL",
"ai": "8200", "format": "X..70", "type": "D", "fixedLength": False,
"regex": "([\\x21-\\x22\\x25-\\x2F\\x30-\\x39\\x41-\\x5A\\x5F\\x61-\\x7A]{0,70})"},
{"title": "Information mutually agreed between trading partners",
"label": "INTERNAL", "ai": "90", "format": "X..30", "type": "D",
"fixedLength": False,
"regex": "([\\x21-\\x22\\x25-\\x2F\\x30-\\x39\\x41-\\x5A\\x5F\\x61-\\x7A]{0,30})"},
{"title": "Company internal information", "label": "INTERNAL",
"ai": "91", "format": "X..90", "type": "D", "fixedLength": False,
"regex": "([\\x21-\\x22\\x25-\\x2F\\x30-\\x39\\x41-\\x5A\\x5F\\x61-\\x7A]{0,90})"},
{"title": "Company internal information", "label": "INTERNAL",
"ai": "92", "format": "X..90", "type": "D", "fixedLength": False,
"regex": "([\\x21-\\x22\\x25-\\x2F\\x30-\\x39\\x41-\\x5A\\x5F\\x61-\\x7A]{0,90})"},
{"title": "Company internal information", "label": "INTERNAL",
"ai": "93", "format": "X..90", "type": "D", "fixedLength": False,
"regex": "([\\x21-\\x22\\x25-\\x2F\\x30-\\x39\\x41-\\x5A\\x5F\\x61-\\x7A]{0,90})"},
{"title": "Company internal information", "label": "INTERNAL",
"ai": "94", "format": "X..90", "type": "D", "fixedLength": False,
"regex": "([\\x21-\\x22\\x25-\\x2F\\x30-\\x39\\x41-\\x5A\\x5F\\x61-\\x7A]{0,90})"},
{"title": "Company internal information", "label": "INTERNAL",
"ai": "95", "format": "X..90", "type": "D", "fixedLength": False,
"regex": "([\\x21-\\x22\\x25-\\x2F\\x30-\\x39\\x41-\\x5A\\x5F\\x61-\\x7A]{0,90})"},
{"title": "Company internal information", "label": "INTERNAL",
"ai": "96", "format": "X..90", "type": "D", "fixedLength": False,
"regex": "([\\x21-\\x22\\x25-\\x2F\\x30-\\x39\\x41-\\x5A\\x5F\\x61-\\x7A]{0,90})"},
{"title": "Company internal information", "label": "INTERNAL",
"ai": "97", "format": "X..90", "type": "D", "fixedLength": False,
"regex": "([\\x21-\\x22\\x25-\\x2F\\x30-\\x39\\x41-\\x5A\\x5F\\x61-\\x7A]{0,90})"},
{"title": "Company internal information", "label": "INTERNAL",
"ai": "98", "format": "X..90", "type": "D", "fixedLength": False,
"regex": "([\\x21-\\x22\\x25-\\x2F\\x30-\\x39\\x41-\\x5A\\x5F\\x61-\\x7A]{0,90})"},
{"title": "Company internal information", "label": "INTERNAL",
"ai": "99", "format": "X..90", "type": "D", "fixedLength": False,
"regex": "([\\x21-\\x22\\x25-\\x2F\\x30-\\x39\\x41-\\x5A\\x5F\\x61-\\x7A]{0,90})"}]
def fetch_short_code(application_identifier):
return (application_identifier.get('ai'),
application_identifier.get('shortcode'))
def fetch_qualifiers(application_identifier):
return (application_identifier.get('ai'),
application_identifier.get('qualifiers'))
def fetch_check_digit(application_identifier):
return (application_identifier.get('ai'),
application_identifier.get('checkDigit'))
def construct_regex(application_identifier):
pattern = "^" + application_identifier.get('regex') + "$"
return (application_identifier.get('ai'),
re.compile(pattern))
AI_REGEX = {construct_regex(ai)[0]: construct_regex(ai)[-1] for ai in AI_TABLE}
AI_SHORT_CODE = {
fetch_short_code(ai)[0]: fetch_short_code(ai)[-1] for ai in AI_TABLE
if ai.get('shortcode')
}
AI_QUALIFIER = {
fetch_qualifiers(ai)[0]: fetch_qualifiers(ai)[-1] for ai in AI_TABLE
if ai.get('qualifiers')
}
AI_CHECK_DIGIT_POSITION = {
fetch_check_digit(ai)[0]: fetch_check_digit(ai)[-1] for ai in AI_TABLE
if ai.get('checkDigit')
}
SHORT_CODE_TO_NUMERIC = {
value: key for key, value in AI_SHORT_CODE.items()
}
IDENTIFIERS = [ai for ai in AI_TABLE if ai.get('type', '') == 'I']
QUALIFIERS = [ai for ai in AI_TABLE if ai.get('type', '') == 'Q']
DATA_ATTRIBUTES = [ai for ai in AI_TABLE if ai.get('type', '') == 'D']
FIXED_LENGTH = [ai for ai in AI_TABLE if ai.get('fixedLength', False)]
VARIABLE_LENGTH = [ai for ai in AI_TABLE if not ai.get('fixedLength', False)]
def get_sub_map(sub_ai_table):
"""Get sub map."""
return {
sub_ai.get('ai'): sub_ai for sub_ai in sub_ai_table
}
IDENTIFIER_MAP = get_sub_map(IDENTIFIERS)
QUALIFIER_MAP = get_sub_map(QUALIFIERS)
ATTRIBUTE_MAP = get_sub_map(DATA_ATTRIBUTES)
FIXED_LENGTH_MAP = get_sub_map(FIXED_LENGTH)
VARIABLE_LENGTH_MAP = get_sub_map(VARIABLE_LENGTH)
AI_MAPS = {
'identifiers': list(IDENTIFIER_MAP.keys()),
'qualifiers': list(QUALIFIER_MAP.keys()),
'dataAttributes': list(ATTRIBUTE_MAP.keys()),
'fixedLength': list(FIXED_LENGTH_MAP.keys()),
'variableLength': list(VARIABLE_LENGTH_MAP.keys()),
}
AI_UNION_KEYS = sum(list(AI_MAPS.values()), [])
| 70.354286
| 121
| 0.464317
| 11,954
| 110,808
| 4.296052
| 0.072528
| 0.044494
| 0.14207
| 0.134359
| 0.866556
| 0.858397
| 0.851913
| 0.846675
| 0.840152
| 0.836316
| 0
| 0.059179
| 0.282958
| 110,808
| 1,574
| 122
| 70.398983
| 0.58717
| 0.000884
| 0
| 0.566366
| 0
| 0.045747
| 0.529711
| 0.046695
| 0
| 0
| 0
| 0
| 0
| 1
| 0.003222
| false
| 0
| 0.000644
| 0.001933
| 0.007088
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
44ad3cb9ea4912758e3487784da6f1dd05117bfa
| 29
|
py
|
Python
|
pytclfirmware/__init__.py
|
mpata/pytclfirmware
|
841d982e5ad93df51e05cec69fd31c4c7ce7ac2f
|
[
"BSD-2-Clause"
] | 1
|
2021-03-16T15:29:51.000Z
|
2021-03-16T15:29:51.000Z
|
pytclfirmware/__init__.py
|
mpata/pytclfirmware
|
841d982e5ad93df51e05cec69fd31c4c7ce7ac2f
|
[
"BSD-2-Clause"
] | null | null | null |
pytclfirmware/__init__.py
|
mpata/pytclfirmware
|
841d982e5ad93df51e05cec69fd31c4c7ce7ac2f
|
[
"BSD-2-Clause"
] | null | null | null |
from tclfirmware import main
| 14.5
| 28
| 0.862069
| 4
| 29
| 6.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137931
| 29
| 1
| 29
| 29
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
44c2c4ef85e25643833660798c47e43ee5df00f2
| 109
|
py
|
Python
|
descqa/__init__.py
|
adam-broussard/descqa
|
d9681bd393553c31882ec7e28e6c1c7b6e482dd3
|
[
"BSD-3-Clause"
] | 4
|
2017-11-14T03:33:57.000Z
|
2021-06-05T16:35:40.000Z
|
descqa/__init__.py
|
adam-broussard/descqa
|
d9681bd393553c31882ec7e28e6c1c7b6e482dd3
|
[
"BSD-3-Clause"
] | 136
|
2017-11-06T16:02:58.000Z
|
2021-11-11T18:20:23.000Z
|
descqa/__init__.py
|
adam-broussard/descqa
|
d9681bd393553c31882ec7e28e6c1c7b6e482dd3
|
[
"BSD-3-Clause"
] | 31
|
2017-11-06T19:55:35.000Z
|
2020-12-15T13:53:53.000Z
|
"""
DESCQA Validation Tests
"""
from .register import *
from .base import *
from .version import __version__
| 15.571429
| 32
| 0.743119
| 13
| 109
| 5.923077
| 0.615385
| 0.25974
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.155963
| 109
| 6
| 33
| 18.166667
| 0.836957
| 0.211009
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
44d76c61902075027e29fe1b55566d8ca3de6379
| 340
|
py
|
Python
|
challenges/fifo-animal-shelter-dir/conftest.py
|
tyler-fishbone/data-structures-and-algorithms
|
29790f2672d3ddb0aadf62725f28180b092f4568
|
[
"MIT"
] | null | null | null |
challenges/fifo-animal-shelter-dir/conftest.py
|
tyler-fishbone/data-structures-and-algorithms
|
29790f2672d3ddb0aadf62725f28180b092f4568
|
[
"MIT"
] | 4
|
2018-03-22T19:19:11.000Z
|
2018-04-11T00:35:26.000Z
|
challenges/fifo-animal-shelter-dir/conftest.py
|
tyler-fishbone/data-structures-and-algorithms
|
29790f2672d3ddb0aadf62725f28180b092f4568
|
[
"MIT"
] | null | null | null |
import pytest
# from node import Node
from fifo_animal_shelter import AnimalShelter
@pytest.fixture
def three_cats_two_dogs_queue():
return AnimalShelter(['cat', 'dog', 'dog', 'cat', 'dog'])
@pytest.fixture
def empty_shelter_queue():
return AnimalShelter([])
@pytest.fixture
def one_cat_queue():
return AnimalShelter(['cat'])
| 22.666667
| 61
| 0.741176
| 44
| 340
| 5.5
| 0.454545
| 0.161157
| 0.198347
| 0.239669
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.129412
| 340
| 15
| 62
| 22.666667
| 0.817568
| 0.061765
| 0
| 0.272727
| 0
| 0
| 0.056604
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.272727
| true
| 0
| 0.181818
| 0.272727
| 0.727273
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
44e913cdc095b3c4ab58bb273e5d7b2a919819af
| 15,328
|
py
|
Python
|
monitor_provider/app.py
|
bento-dbaas/monitor-provider
|
247a26c3c1c5795d94ef203662c404d45274ea7a
|
[
"BSD-3-Clause"
] | null | null | null |
monitor_provider/app.py
|
bento-dbaas/monitor-provider
|
247a26c3c1c5795d94ef203662c404d45274ea7a
|
[
"BSD-3-Clause"
] | 4
|
2021-08-31T13:08:16.000Z
|
2022-03-04T17:15:23.000Z
|
monitor_provider/app.py
|
bento-dbaas/monitor-provider
|
247a26c3c1c5795d94ef203662c404d45274ea7a
|
[
"BSD-3-Clause"
] | null | null | null |
import logging
import json
from bson import json_util
from traceback import print_exc
from flask import Flask, request, jsonify, make_response
from flask_httpauth import HTTPBasicAuth
from mongoengine import connect
from monitor_provider.providers.constants import VALID_DBMS
from monitor_provider.providers import get_provider_to
from monitor_provider.settings import (
APP_USERNAME,
APP_PASSWORD,
LOGGING_LEVEL,
MONGODB_DB,
MONGODB_PARAMS)
app = Flask(__name__)
auth = HTTPBasicAuth()
connect(MONGODB_DB, **MONGODB_PARAMS)
logging.basicConfig(
level=LOGGING_LEVEL,
format='%(asctime)s %(filename)s(%(lineno)d) %(levelname)s: %(message)s')
@auth.verify_password
def verify_password(username, password):
if APP_USERNAME and username != APP_USERNAME:
return False
if APP_PASSWORD and password != APP_PASSWORD:
return False
return True
@app.route(
"/<string:provider_name>/<string:env>/credential/new",
methods=['POST'])
@auth.login_required
def create_credential(provider_name, env):
data = json.loads(request.data or 'null')
if not data:
logging.error("No data")
return response_invalid_request("No data".format(data))
try:
provider_cls = get_provider_to(provider_name)
provider = provider_cls(env)
success, message = provider.credential_add(data)
except Exception as e:
print_exc() # TODO Improve log
return response_invalid_request(str(e))
if not success:
return response_invalid_request(message)
return response_created(success=success, id=str(message))
@app.route(
"/<string:provider_name>/credentials",
methods=['GET'])
@auth.login_required
def get_all_credential(provider_name):
try:
provider_cls = get_provider_to(provider_name)
provider = provider_cls(None)
return make_response(
json.dumps(
list(map(lambda x: x, provider.credential.all())),
default=json_util.default
)
)
except Exception as e:
print_exc() # TODO Improve log
return response_invalid_request(str(e))
@app.route(
"/<string:provider_name>/<string:env>/credential",
methods=['GET'])
@auth.login_required
def get_credential(provider_name, env):
try:
provider_cls = get_provider_to(provider_name)
provider = provider_cls(env)
credential = provider.credential.get_by(environment=env)
except Exception as e:
print_exc() # TODO Improve log
return response_invalid_request(str(e))
if credential.count() == 0:
return response_not_found('{}/{}'.format(provider_name, env))
return make_response(json.dumps(credential[0], default=json_util.default))
@app.route("/<string:provider_name>/<string:env>/credential", methods=['PUT'])
@auth.login_required
def update_credential(provider_name, env):
return create_credential(provider_name, env)
@app.route(
"/<string:provider_name>/<string:env>/credential",
methods=['DELETE'])
@auth.login_required
def destroy_credential(provider_name, env):
try:
provider_cls = get_provider_to(provider_name)
provider = provider_cls(env)
deleted = provider.credential.delete()
except Exception as e:
print_exc() # TODO Improve log
return response_invalid_request(str(e))
if deleted['n'] > 0:
return response_ok()
return response_not_found("{}-{}".format(provider_name, env))
def response_invalid_request(error, status_code=500):
return _response(status_code, error=error)
def response_not_found(identifier):
error = "Could not found with {}".format(identifier)
return _response(404, error=error)
def response_created(status_code=201, **kwargs):
return _response(status_code, **kwargs)
def response_ok(**kwargs):
if kwargs:
return _response(200, **kwargs)
return _response(200, message="ok")
def _response(status, **kwargs):
content = jsonify(**kwargs)
return make_response(content, status)
@app.route("/<string:provider_name>/<string:env>/service/new",
methods=['POST'])
@auth.login_required
def create_service_monitor(provider_name, env):
data = json.loads(request.data or 'null')
try:
provider_cls = get_provider_to(provider_name)
provider = provider_cls(env)
service = provider.create_service_monitor(**data)
except Exception as e:
print_exc()
return response_invalid_request(str(e))
return response_created(success=True, identifier=service.identifier)
@app.route(
"/<string:provider_name>/<string:env>/service/<string:identifier_or_name>",
methods=['GET'])
@auth.login_required
def get_service_monitor(provider_name, env, identifier_or_name):
try:
provider_cls = get_provider_to(provider_name)
provider = provider_cls(env)
except Exception as e:
print_exc()
return response_invalid_request(str(e))
service = provider.get_service_monitor(identifier_or_name)
if not service:
return response_not_found(identifier_or_name)
return response_ok(**service.get_json)
@app.route(
"/<string:provider_name>/<string:env>/service/<string:identifier>",
methods=['DELETE'])
@auth.login_required
def delete_service_monitor(provider_name, env, identifier):
try:
provider_cls = get_provider_to(provider_name)
provider = provider_cls(env)
provider.delete_service_monitor(identifier)
except Exception as e:
print_exc()
return response_invalid_request(str(e))
return response_ok()
@app.route(
"/<string:provider_name>/<string:env>/host/new",
methods=['POST'])
@auth.login_required
def create_host_monitor(provider_name, env):
data = json.loads(request.data or 'null')
try:
provider_cls = get_provider_to(provider_name)
provider = provider_cls(env)
host = provider.create_host_monitor(**data)
except Exception as e:
print_exc()
return response_invalid_request(str(e))
return response_created(success=True, identifier=host.identifier)
@app.route(
"/<string:provider_name>/<string:env>/host/<string:identifier_or_name>",
methods=['GET'])
@auth.login_required
def get_host_monitor(provider_name, env, identifier_or_name):
try:
provider_cls = get_provider_to(provider_name)
provider = provider_cls(env)
except Exception as e:
print_exc()
return response_invalid_request(str(e))
host = provider.get_host_monitor(identifier_or_name)
if not host:
return response_not_found(identifier_or_name)
return response_ok(**host.get_json)
@app.route(
"/<string:provider_name>/<string:env>/host/<string:identifier>",
methods=['DELETE'])
@auth.login_required
def delete_host_monitor(provider_name, env, identifier):
try:
provider_cls = get_provider_to(provider_name)
provider = provider_cls(env)
provider.delete_host_monitor(identifier)
except Exception as e:
print_exc()
return response_invalid_request(str(e))
return response_ok()
@app.route(
"/<string:provider_name>/<string:env>/web/new",
methods=['POST'])
@auth.login_required
def create_web_monitor(provider_name, env):
data = json.loads(request.data or 'null')
try:
provider_cls = get_provider_to(provider_name)
provider = provider_cls(env)
web = provider.create_web_monitor(**data)
except Exception as e:
print_exc()
return response_invalid_request(str(e))
return response_created(success=True, identifier=web.identifier)
@app.route(
"/<string:provider_name>/<string:env>/web/<string:identifier_or_name>",
methods=['GET'])
@auth.login_required
def get_web_monitor(provider_name, env, identifier_or_name):
try:
provider_cls = get_provider_to(provider_name)
provider = provider_cls(env)
except Exception as e:
print_exc()
return response_invalid_request(str(e))
host = provider.get_web_monitor(identifier_or_name)
if not host:
return response_not_found(identifier_or_name)
return response_ok(**host.get_json)
@app.route(
"/<string:provider_name>/<string:env>/web/<string:identifier>",
methods=['DELETE'])
@auth.login_required
def delete_web_monitor(provider_name, env, identifier):
try:
provider_cls = get_provider_to(provider_name)
provider = provider_cls(env)
provider.delete_web_monitor(identifier)
except Exception as e:
print_exc()
return response_invalid_request(str(e))
return response_ok()
@app.route(
"/<string:provider_name>/<string:env>/database/<string:dbms>/new",
methods=['POST'])
@auth.login_required
def create_database_monitor(provider_name, env, dbms):
if dbms not in VALID_DBMS:
return response_invalid_request(
'Invalid database. Available options are {}'.format(list(VALID_DBMS))
)
data = json.loads(request.data or 'null')
try:
provider_cls = get_provider_to(provider_name)
provider = provider_cls(env)
monitor = provider.create_database_monitor(dbms_name=dbms, **data)
except Exception as e:
print_exc()
return response_invalid_request(str(e))
return response_created(success=True, identifier=monitor.identifier)
@app.route(
"/<string:provider_name>/<string:env>/database/<string:identifier_or_name>",
methods=['GET'])
@auth.login_required
def get_database_monitor(provider_name, env, identifier_or_name):
try:
provider_cls = get_provider_to(provider_name)
provider = provider_cls(env)
except Exception as e:
print_exc()
return response_invalid_request(str(e))
database = provider.get_database_monitor(identifier_or_name)
if not database:
return response_not_found(identifier_or_name)
return response_ok(**database.get_json)
@app.route(
"/<string:provider_name>/<string:env>/database/<string:database_name>",
methods=['DELETE'])
@auth.login_required
def delete_database_monitor(provider_name, env, database_name):
try:
provider_cls = get_provider_to(provider_name)
provider = provider_cls(env)
provider.delete_database_monitor(database_name)
except Exception as e:
print_exc()
return response_invalid_request(str(e))
return response_ok()
@app.route(
"/<string:provider_name>/<string:env>/instance/<string:dbms>/new",
methods=['POST'])
@auth.login_required
def create_instance_monitor(provider_name, env, dbms):
if dbms not in VALID_DBMS:
return response_invalid_request(
'Invalid database. Available options are {}'.format(list(VALID_DBMS))
)
data = json.loads(request.data or 'null')
try:
provider_cls = get_provider_to(provider_name)
provider = provider_cls(env)
monitor = provider.create_instance_monitor(dbms_name=dbms, **data)
except Exception as e:
print_exc()
return response_invalid_request(str(e))
return response_created(success=True, identifier=monitor.identifier)
@app.route(
"/<string:provider_name>/<string:env>/instance/<string:identifier_or_name>",
methods=['GET'])
@auth.login_required
def get_instance_monitor(provider_name, env, identifier_or_name):
try:
provider_cls = get_provider_to(provider_name)
provider = provider_cls(env)
except Exception as e:
print_exc()
return response_invalid_request(str(e))
database = provider.get_instance_monitor(identifier_or_name)
if not database:
return response_not_found(identifier_or_name)
return response_ok(**database.get_json)
@app.route(
"/<string:provider_name>/<string:env>/instance/<string:instance_name>",
methods=['DELETE'])
@auth.login_required
def delete_instance_monitor(provider_name, env, instance_name):
try:
provider_cls = get_provider_to(provider_name)
provider = provider_cls(env)
provider.delete_instance_monitor(instance_name)
except Exception as e:
print_exc()
return response_invalid_request(str(e))
return response_ok()
@app.route(
"/<string:provider_name>/<string:env>/tcp/new",
methods=['POST'])
@auth.login_required
def create_tcp_monitor(provider_name, env):
data = json.loads(request.data or 'null')
try:
provider_cls = get_provider_to(provider_name)
provider = provider_cls(env)
tcp = provider.create_tcp_monitor(**data)
except Exception as e:
print_exc()
return response_invalid_request(str(e))
return response_created(success=True, identifier=tcp.identifier)
@app.route(
"/<string:provider_name>/<string:env>/tcp/<string:identifier_or_name>",
methods=['GET'])
@auth.login_required
def get_tcp_monitor(provider_name, env, identifier_or_name):
try:
provider_cls = get_provider_to(provider_name)
provider = provider_cls(env)
except Exception as e:
print_exc()
return response_invalid_request(str(e))
tcp = provider.get_tcp_monitor(identifier_or_name)
if not tcp:
return response_not_found(identifier_or_name)
return response_ok(**tcp.get_json)
@app.route(
"/<string:provider_name>/<string:env>/tcp/<string:identifier>",
methods=['DELETE'])
@auth.login_required
def delete_tcp_monitor(provider_name, env, identifier):
try:
provider_cls = get_provider_to(provider_name)
provider = provider_cls(env)
provider.delete_tcp_monitor(identifier)
except Exception as e:
print_exc()
return response_invalid_request(str(e))
return response_ok()
@app.route(
"/<string:provider_name>/<string:env>/mysql/new",
methods=['POST'])
@auth.login_required
def create_mysql_monitor(provider_name, env):
data = json.loads(request.data or 'null')
try:
provider_cls = get_provider_to(provider_name)
provider = provider_cls(env)
db = provider.create_mysql_monitor(**data)
except Exception as e:
print_exc()
return response_invalid_request(str(e))
return response_created(success=True, identifier=db.identifier)
@app.route(
"/<string:provider_name>/<string:env>/mysql/<string:identifier_or_name>",
methods=['GET'])
@auth.login_required
def get_mysql_monitor(provider_name, env, identifier_or_name):
try:
provider_cls = get_provider_to(provider_name)
provider = provider_cls(env)
except Exception as e:
print_exc()
return response_invalid_request(str(e))
db = provider.get_mysql_monitor(identifier_or_name)
if not db:
return response_not_found(identifier_or_name)
return response_ok(**db.get_json)
@app.route(
"/<string:provider_name>/<string:env>/mysql/<string:identifier>",
methods=['DELETE'])
@auth.login_required
def delete_mysql_monitor(provider_name, env, identifier):
try:
provider_cls = get_provider_to(provider_name)
provider = provider_cls(env)
provider.delete_mysql_monitor(identifier)
except Exception as e:
print_exc()
return response_invalid_request(str(e))
return response_ok()
| 31.091278
| 81
| 0.70257
| 1,932
| 15,328
| 5.29089
| 0.065735
| 0.093915
| 0.064567
| 0.079437
| 0.812855
| 0.789865
| 0.774799
| 0.768343
| 0.713168
| 0.670906
| 0
| 0.001447
| 0.188674
| 15,328
| 492
| 82
| 31.154472
| 0.820521
| 0.004371
| 0
| 0.646489
| 0
| 0.002421
| 0.121592
| 0.100944
| 0
| 0
| 0
| 0.002033
| 0
| 1
| 0.077482
| false
| 0.009685
| 0.024213
| 0.007264
| 0.27845
| 0.062954
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
7847458e3464dc1457d950b5e42bda20b8cf9493
| 98
|
py
|
Python
|
mock applications/mock/mock/ecom/admin.py
|
SuryaVamsiKrishna/Inner-Pieces
|
deb9e83af891dac58966230446a5a32fe10e86f2
|
[
"MIT"
] | 1
|
2021-02-17T06:06:50.000Z
|
2021-02-17T06:06:50.000Z
|
mock applications/mock/mock/ecom/admin.py
|
SuryaVamsiKrishna/Inner-Pieces
|
deb9e83af891dac58966230446a5a32fe10e86f2
|
[
"MIT"
] | null | null | null |
mock applications/mock/mock/ecom/admin.py
|
SuryaVamsiKrishna/Inner-Pieces
|
deb9e83af891dac58966230446a5a32fe10e86f2
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import User_info
admin.site.register(User_info)
| 12.25
| 32
| 0.806122
| 15
| 98
| 5.133333
| 0.666667
| 0.207792
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.132653
| 98
| 7
| 33
| 14
| 0.905882
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
788a17b872f97d4b8b49d8c263387fbdbffc3643
| 44
|
py
|
Python
|
dataloader/__init__.py
|
lucamocerino/Binary-Neural-Networks-PyTorch-1.0
|
aa62f5449e4f64bc821aea4d9921572e8dca8037
|
[
"MIT"
] | 22
|
2020-09-15T12:59:49.000Z
|
2022-02-12T15:56:32.000Z
|
dataloader/__init__.py
|
lucamocerino/Binary-Neural-Networks-PyTorch-1.0
|
aa62f5449e4f64bc821aea4d9921572e8dca8037
|
[
"MIT"
] | 3
|
2021-08-07T15:50:13.000Z
|
2022-01-27T09:46:19.000Z
|
dataloader/__init__.py
|
lucamocerino/Binary-Neural-Networks-PyTorch-1.0
|
aa62f5449e4f64bc821aea4d9921572e8dca8037
|
[
"MIT"
] | 2
|
2021-07-19T06:34:55.000Z
|
2022-03-22T18:06:03.000Z
|
from .cifar10 import *
from .mnist import *
| 14.666667
| 22
| 0.727273
| 6
| 44
| 5.333333
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.055556
| 0.181818
| 44
| 2
| 23
| 22
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
15a7ac3cb0ee6367aa863bd5bc307604fa12b7cc
| 81
|
py
|
Python
|
pyinpoly/__init__.py
|
mvonlanthen/pyinpoly
|
32d71af9f22366d49edf1ffc8c164434ca734623
|
[
"MIT"
] | null | null | null |
pyinpoly/__init__.py
|
mvonlanthen/pyinpoly
|
32d71af9f22366d49edf1ffc8c164434ca734623
|
[
"MIT"
] | null | null | null |
pyinpoly/__init__.py
|
mvonlanthen/pyinpoly
|
32d71af9f22366d49edf1ffc8c164434ca734623
|
[
"MIT"
] | null | null | null |
from .py_core import pts_in_polygon, pts_in_polygon_py
# from .rs_core import *
| 20.25
| 54
| 0.802469
| 15
| 81
| 3.866667
| 0.533333
| 0.344828
| 0.413793
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.135802
| 81
| 3
| 55
| 27
| 0.828571
| 0.271605
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
ec64cbb7a6059fc6411f009a12371dd6dc7a19b5
| 134
|
py
|
Python
|
rubin_sim/maf/maps/__init__.py
|
RileyWClarke/flarubin
|
eb7b1ee21c828523f8a5374fe4510fe6e5ec2a2a
|
[
"MIT"
] | null | null | null |
rubin_sim/maf/maps/__init__.py
|
RileyWClarke/flarubin
|
eb7b1ee21c828523f8a5374fe4510fe6e5ec2a2a
|
[
"MIT"
] | null | null | null |
rubin_sim/maf/maps/__init__.py
|
RileyWClarke/flarubin
|
eb7b1ee21c828523f8a5374fe4510fe6e5ec2a2a
|
[
"MIT"
] | null | null | null |
from .baseMap import *
from .dustMap import *
from .galCoordsMap import *
from .stellarDensityMap import *
from .trilegalMap import *
| 22.333333
| 32
| 0.776119
| 15
| 134
| 6.933333
| 0.466667
| 0.384615
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.149254
| 134
| 5
| 33
| 26.8
| 0.912281
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
01c23c34ab59b0b4f5dbb504a8616f8adb0b484b
| 2,682
|
py
|
Python
|
tests/unit/converters/test_defaults_converters.py
|
FlyingBird95/openapi_generator
|
df4649b9723eb89fa370b02220356b7596794069
|
[
"MIT"
] | 3
|
2022-01-10T12:43:36.000Z
|
2022-01-13T18:08:15.000Z
|
tests/unit/converters/test_defaults_converters.py
|
FlyingBird95/openapi_generator
|
df4649b9723eb89fa370b02220356b7596794069
|
[
"MIT"
] | 6
|
2022-02-06T19:00:05.000Z
|
2022-03-22T14:22:21.000Z
|
tests/unit/converters/test_defaults_converters.py
|
FlyingBird95/openapi-builder
|
df4649b9723eb89fa370b02220356b7596794069
|
[
"MIT"
] | 2
|
2021-12-17T17:26:06.000Z
|
2021-12-17T17:39:00.000Z
|
import enum
import pytest
from marshmallow import fields
@pytest.mark.parametrize(
"marshmallow_fields",
[
{"field": fields.String(dump_default="abc")},
{"field": fields.String(load_default="abc")},
],
)
@pytest.mark.usefixtures("get_with_marshmallow_schema")
def test_vanilla_converter(http, open_api_documentation):
http.get("/get_with_marshmallow_schema")
configuration = open_api_documentation.get_specification()
properties = configuration["components"]["schemas"]["GeneratedSchema"]["properties"]
assert properties["field"] == {"type": "string", "default": "abc"}
@pytest.mark.parametrize(
"marshmallow_fields", [{"field": fields.List(fields.String(), dump_default=[])}]
)
@pytest.mark.usefixtures("get_with_marshmallow_schema")
def test_list_converter(http, open_api_documentation):
http.get("/get_with_marshmallow_schema")
configuration = open_api_documentation.get_specification()
properties = configuration["components"]["schemas"]["GeneratedSchema"]["properties"]
assert properties["field"] == {"type": "array", "default": []}
@pytest.mark.parametrize(
"marshmallow_fields", [{"field": fields.String(dump_default=lambda: "abc")}]
)
@pytest.mark.usefixtures("get_with_marshmallow_schema")
def test_callable_converter(http, open_api_documentation):
http.get("/get_with_marshmallow_schema")
configuration = open_api_documentation.get_specification()
properties = configuration["components"]["schemas"]["GeneratedSchema"]["properties"]
assert properties["field"] == {"type": "string", "default": "abc"}
class MyEnum(enum.Enum):
first_value = "first_value"
second_value = "second_value"
@pytest.mark.parametrize(
"marshmallow_fields", [{"field": fields.String(dump_default=MyEnum.first_value)}]
)
@pytest.mark.usefixtures("get_with_marshmallow_schema")
def test_enum_converter(http, open_api_documentation):
http.get("/get_with_marshmallow_schema")
configuration = open_api_documentation.get_specification()
properties = configuration["components"]["schemas"]["GeneratedSchema"]["properties"]
assert properties["field"] == {"type": "string", "default": "first_value"}
@pytest.mark.parametrize(
"marshmallow_fields", [{"field": fields.String(dump_default=None)}]
)
@pytest.mark.usefixtures("get_with_marshmallow_schema")
def test_none_converter(http, open_api_documentation):
http.get("/get_with_marshmallow_schema")
configuration = open_api_documentation.get_specification()
properties = configuration["components"]["schemas"]["GeneratedSchema"]["properties"]
assert properties["field"] == {"type": "string", "default": None}
| 36.243243
| 88
| 0.738628
| 285
| 2,682
| 6.663158
| 0.154386
| 0.052659
| 0.094787
| 0.126382
| 0.875197
| 0.875197
| 0.875197
| 0.849394
| 0.849394
| 0.767246
| 0
| 0
| 0.10701
| 2,682
| 73
| 89
| 36.739726
| 0.792902
| 0
| 0
| 0.490909
| 0
| 0
| 0.284489
| 0.102535
| 0
| 0
| 0
| 0
| 0.090909
| 1
| 0.090909
| false
| 0
| 0.054545
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
01d0a30188cb6c88d7f05715ea2d1544aeb02a64
| 106
|
py
|
Python
|
django-like/app_one/views.py
|
mirokrastev/flask-structures
|
fb7ad464c3df85be2e66229b2fd99a8da903b3c9
|
[
"MIT"
] | 1
|
2021-05-06T09:04:36.000Z
|
2021-05-06T09:04:36.000Z
|
django-like/app_one/views.py
|
mirokrastev/flask-structures
|
fb7ad464c3df85be2e66229b2fd99a8da903b3c9
|
[
"MIT"
] | null | null | null |
django-like/app_one/views.py
|
mirokrastev/flask-structures
|
fb7ad464c3df85be2e66229b2fd99a8da903b3c9
|
[
"MIT"
] | null | null | null |
def app_one_index():
return 'App One Index works!'
def main_index():
return 'Main Index works!'
| 15.142857
| 33
| 0.669811
| 16
| 106
| 4.25
| 0.4375
| 0.176471
| 0.323529
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.216981
| 106
| 6
| 34
| 17.666667
| 0.819277
| 0
| 0
| 0
| 0
| 0
| 0.349057
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
|
0
| 6
|
01d36f3f09bd6ccb3209827e385ae45a8eb0b3bd
| 263
|
py
|
Python
|
waves_litecoin_gateway/test/__init__.py
|
jansenmarc/WavesGatewayLTCExample
|
14aaf9de5740ce32d175efa413f0060561421c93
|
[
"MIT"
] | 8
|
2018-03-04T02:09:04.000Z
|
2020-03-01T08:09:27.000Z
|
waves_litecoin_gateway/test/__init__.py
|
jansenmarc/WavesGatewayLTCExample
|
14aaf9de5740ce32d175efa413f0060561421c93
|
[
"MIT"
] | 6
|
2018-04-22T09:40:02.000Z
|
2019-09-16T08:33:51.000Z
|
waves_litecoin_gateway/test/__init__.py
|
jansenmarc/WavesGatewayLTCExample
|
14aaf9de5740ce32d175efa413f0060561421c93
|
[
"MIT"
] | 12
|
2018-05-02T16:06:25.000Z
|
2020-11-25T16:52:02.000Z
|
"""WavesGatewayLTCExample Tests"""
from .test_litecoin_gateway import *
from .test_litecoin_chain_query_service import *
from .test_litecoin_gateway import *
from .test_litecoin_integer_converter_service import *
from .test_litecoin_transaction_service import *
| 32.875
| 54
| 0.851711
| 32
| 263
| 6.53125
| 0.40625
| 0.191388
| 0.382775
| 0.421053
| 0.631579
| 0.430622
| 0.430622
| 0.430622
| 0
| 0
| 0
| 0
| 0.087452
| 263
| 7
| 55
| 37.571429
| 0.870833
| 0.106464
| 0
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
bf1ba2388b9f9b460c81cf746f12eabc3d87f520
| 14,959
|
py
|
Python
|
tests/app/views/test_suppliers.py
|
pebblecode/cirrus-buyer-frontend
|
506c45eab09fa9538c0eb05643e24feecdcca56f
|
[
"MIT"
] | null | null | null |
tests/app/views/test_suppliers.py
|
pebblecode/cirrus-buyer-frontend
|
506c45eab09fa9538c0eb05643e24feecdcca56f
|
[
"MIT"
] | null | null | null |
tests/app/views/test_suppliers.py
|
pebblecode/cirrus-buyer-frontend
|
506c45eab09fa9538c0eb05643e24feecdcca56f
|
[
"MIT"
] | null | null | null |
# coding: utf-8
import mock
from nose.tools import assert_equal, assert_true, assert_false
from ...helpers import BaseApplicationTest
from dmapiclient import APIError
class TestSuppliersPage(BaseApplicationTest):
def setup(self):
super(TestSuppliersPage, self).setup()
self._data_api_client = mock.patch(
'app.main.suppliers.data_api_client'
).start()
self.suppliers_by_prefix = self._get_suppliers_by_prefix_fixture_data() # noqa
self.suppliers_by_prefix_page_2 = self._get_suppliers_by_prefix_fixture_data_page_2() # noqa
self.suppliers_by_prefix_next_and_prev = self._get_suppliers_by_prefix_fixture_with_next_and_prev() # noqa
self.supplier = self._get_supplier_fixture_data() # noqa
self.supplier_with_minimum_data = self._get_supplier_with_minimum_fixture_data() # noqa
self._data_api_client.find_suppliers.return_value = self.suppliers_by_prefix # noqa
self._data_api_client.get_supplier.return_value = self.supplier # noqa
def teardown(self):
self._data_api_client.stop()
def test_should_call_api_with_correct_params(self):
self.client.get('/g-cloud/suppliers')
self._data_api_client.find_suppliers.assert_called_once_with('A', 1, 'g-cloud')
def test_should_show_suppliers_prefixed_by_a_default(self):
res = self.client.get('/g-cloud/suppliers')
assert_equal(200, res.status_code)
assert_true(
self._strip_whitespace('<li class="selected"><span class="visuallyhidden">Suppliers starting with </span><strong>A</strong></li>') # noqa
in self._strip_whitespace(res.get_data(as_text=True)))
def test_should_show_suppliers_prefixed_by_a_param(self):
res = self.client.get('/g-cloud/suppliers?prefix=M')
self._data_api_client.find_suppliers.assert_called_once_with('M', 1, 'g-cloud')
assert_equal(200, res.status_code)
assert_true(
self._strip_whitespace('<li class="selected"><span class="visuallyhidden">Suppliers starting with </span><strong>M</strong></li>') # noqa
in self._strip_whitespace(res.get_data(as_text=True)))
def test_should_use_uppercase_prefix(self):
res = self.client.get('/g-cloud/suppliers?prefix=b')
assert_equal(200, res.status_code)
assert_true(
self._strip_whitespace('<li class="selected"><span class="visuallyhidden">Suppliers starting with </span><strong>B</strong></li>') # noqa
in self._strip_whitespace(res.get_data(as_text=True)))
def test_should_use_default_if_invalid(self):
res = self.client.get('/g-cloud/suppliers?prefix=+')
self._data_api_client.find_suppliers.assert_called_once_with('A', 1, 'g-cloud')
assert_equal(200, res.status_code)
assert_true(
self._strip_whitespace('<li class="selected"><span class="visuallyhidden">Suppliers starting with </span><strong>A</strong></li>') # noqa
in self._strip_whitespace(res.get_data(as_text=True)))
def test_should_use_default_if_multichar_prefix(self):
res = self.client.get('/g-cloud/suppliers?prefix=Prefix')
self._data_api_client.find_suppliers.assert_called_once_with('A', 1, 'g-cloud')
assert_equal(200, res.status_code)
assert_true(
self._strip_whitespace('<li class="selected"><span class="visuallyhidden">Suppliers starting with </span><strong>A</strong></li>') # noqa
in self._strip_whitespace(res.get_data(as_text=True)))
def test_should_use_number_range_prefix(self):
res = self.client.get('/g-cloud/suppliers?prefix=other')
self._data_api_client.find_suppliers.assert_called_once_with(u'other', 1, 'g-cloud')
assert_equal(200, res.status_code)
assert_true(
self._strip_whitespace(u'<li class="selected"><span class="visuallyhidden">Suppliers starting with </span><strong>1–9</strong></li>') # noqa
in self._strip_whitespace(res.get_data(as_text=True)))
def test_should_show_supplier_names_link_and_description(self):
res = self.client.get('/g-cloud/suppliers')
assert_equal(200, res.status_code)
supplier_html = self._strip_whitespace('''
<div class="search-result">
<h2 class="search-result-title">
<a href="/g-cloud/supplier/586559">ABM UNITED KINGDOM LTD</a>
</h2>
<p class="search-result-excerpt">
We specialise in the development of intelligence and investigative software across law enforcement agencies, public sector and commercial organisations. We provide solutions to clients across the globe, including the United Kingdom, Australia, USA, Canada and Europe.
</p>
</div>''') # noqa
assert_true(
supplier_html
in self._strip_whitespace(res.get_data(as_text=True)))
def test_should_show_a_t_z_nav(self):
res = self.client.get('/g-cloud/suppliers')
assert_equal(200, res.status_code)
supplier_html = self._strip_whitespace(u'''
<li class="selected"><span class="visuallyhidden">Suppliers starting with </span><strong>A</strong></li>
<li><span class="visuallyhidden">Suppliers starting with </span><a href="/g-cloud/suppliers?prefix=B">B</a></li>
<li><span class="visuallyhidden">Suppliers starting with </span><a href="/g-cloud/suppliers?prefix=C">C</a></li>
<li><span class="visuallyhidden">Suppliers starting with </span><a href="/g-cloud/suppliers?prefix=D">D</a></li>
<li><span class="visuallyhidden">Suppliers starting with </span><a href="/g-cloud/suppliers?prefix=E">E</a></li>
<li><span class="visuallyhidden">Suppliers starting with </span><a href="/g-cloud/suppliers?prefix=F">F</a></li>
<li><span class="visuallyhidden">Suppliers starting with </span><a href="/g-cloud/suppliers?prefix=G">G</a></li>
<li><span class="visuallyhidden">Suppliers starting with </span><a href="/g-cloud/suppliers?prefix=H">H</a></li>
<li><span class="visuallyhidden">Suppliers starting with </span><a href="/g-cloud/suppliers?prefix=I">I</a></li>
<li><span class="visuallyhidden">Suppliers starting with </span><a href="/g-cloud/suppliers?prefix=J">J</a></li>
<li><span class="visuallyhidden">Suppliers starting with </span><a href="/g-cloud/suppliers?prefix=K">K</a></li>
<li><span class="visuallyhidden">Suppliers starting with </span><a href="/g-cloud/suppliers?prefix=L">L</a></li>
<li><span class="visuallyhidden">Suppliers starting with </span><a href="/g-cloud/suppliers?prefix=M">M</a></li>
<li><span class="visuallyhidden">Suppliers starting with </span><a href="/g-cloud/suppliers?prefix=N">N</a></li>
<li><span class="visuallyhidden">Suppliers starting with </span><a href="/g-cloud/suppliers?prefix=O">O</a></li>
<li><span class="visuallyhidden">Suppliers starting with </span><a href="/g-cloud/suppliers?prefix=P">P</a></li>
<li><span class="visuallyhidden">Suppliers starting with </span><a href="/g-cloud/suppliers?prefix=Q">Q</a></li>
<li><span class="visuallyhidden">Suppliers starting with </span><a href="/g-cloud/suppliers?prefix=R">R</a></li>
<li><span class="visuallyhidden">Suppliers starting with </span><a href="/g-cloud/suppliers?prefix=S">S</a></li>
<li><span class="visuallyhidden">Suppliers starting with </span><a href="/g-cloud/suppliers?prefix=T">T</a></li>
<li><span class="visuallyhidden">Suppliers starting with </span><a href="/g-cloud/suppliers?prefix=U">U</a></li>
<li><span class="visuallyhidden">Suppliers starting with </span><a href="/g-cloud/suppliers?prefix=V">V</a></li>
<li><span class="visuallyhidden">Suppliers starting with </span><a href="/g-cloud/suppliers?prefix=W">W</a></li>
<li><span class="visuallyhidden">Suppliers starting with </span><a href="/g-cloud/suppliers?prefix=X">X</a></li>
<li><span class="visuallyhidden">Suppliers starting with </span><a href="/g-cloud/suppliers?prefix=Y">Y</a></li>
<li><span class="visuallyhidden">Suppliers starting with </span><a href="/g-cloud/suppliers?prefix=Z">Z</a></li>
<li><span class="visuallyhidden">Suppliers starting with </span><a href="/g-cloud/suppliers?prefix=other">1–9</a></li>
''') # noqa
assert_true(
supplier_html
in self._strip_whitespace(res.get_data(as_text=True)))
def test_should_show_no_suppliers_page_if_api_returns_404(self):
self._data_api_client.find_suppliers.side_effect = APIError(mock.Mock(status_code=404))
res = self.client.get('/g-cloud/suppliers')
assert_equal(404, res.status_code)
def test_should_show_next_page_on_supplier_list(self):
res = self.client.get('/g-cloud/suppliers')
assert_equal(200, res.status_code)
html_tag = '<li class="next">'
html_link = '<a href="/g-cloud/suppliers?'
html_prefix = 'prefix=A'
html_page = 'page=2'
assert_true(
html_tag
in res.get_data(as_text=True)
)
assert_true(
html_prefix
in res.get_data(as_text=True)
)
assert_true(
html_link
in res.get_data(as_text=True)
)
assert_true(
html_page
in res.get_data(as_text=True)
)
def test_should_show_next_nav_on_supplier_list(self):
self._data_api_client.find_suppliers.return_value = self.suppliers_by_prefix_page_2 # noqa
res = self.client.get('/g-cloud/suppliers?page=2')
self._data_api_client.find_suppliers.assert_called_once_with('A', 2, 'g-cloud')
assert_equal(200, res.status_code)
html_tag = '<li class="previous">'
html_link = '<a href="/g-cloud/suppliers?'
html_prefix = 'prefix=A'
html_page = 'page=1'
assert_true(
html_tag
in res.get_data(as_text=True)
)
assert_true(
html_prefix
in res.get_data(as_text=True)
)
assert_true(
html_link
in res.get_data(as_text=True)
)
assert_true(
html_page
in res.get_data(as_text=True)
)
def test_should_show_next_and_prev_nav_on_supplier_list(self):
self._data_api_client.find_suppliers.return_value = self.suppliers_by_prefix_next_and_prev # noqa
res = self.client.get('/g-cloud/suppliers?page=2')
assert_equal(200, res.status_code)
previous_html_tag = '<li class="previous">'
previous_html_link = '<a href="/g-cloud/suppliers?'
previous_html_prefix = 'prefix=A'
previous_html_page = 'page=1'
assert_true(
previous_html_tag
in res.get_data(as_text=True)
)
assert_true(
previous_html_prefix
in res.get_data(as_text=True)
)
assert_true(
previous_html_link
in res.get_data(as_text=True)
)
assert_true(
previous_html_page
in res.get_data(as_text=True)
)
next_html_tag = '<li class="next">'
next_html_link = '<a href="/g-cloud/suppliers?'
next_html_prefix = 'prefix=A'
next_html_page = 'page=3'
assert_true(
next_html_tag
in res.get_data(as_text=True)
)
assert_true(
next_html_link
in res.get_data(as_text=True)
)
assert_true(
next_html_prefix
in res.get_data(as_text=True)
)
assert_true(
next_html_page
in res.get_data(as_text=True)
)
def test_should_have_supplier_details_on_supplier_page(self):
res = self.client.get('/g-cloud/supplier/92191')
assert_equal(200, res.status_code)
assert_true(
'<h1>ExampleCompanyLimited</h1>'
in self._strip_whitespace(res.get_data(as_text=True))
)
assert_true(
"Example Company Limited is an innovation station sensation; we deliver software so bleeding edge you literally won't be able to run any of it on your systems." # noqa
in res.get_data(as_text=True))
def test_should_show_supplier_with_no_desc_or_clients(self):
self._data_api_client.get_supplier.return_value = self.supplier_with_minimum_data # noqa
res = self.client.get('/g-cloud/supplier/92191')
assert_equal(200, res.status_code)
assert_true(
'<h1>ExampleCompanyLimited</h1>'
in self._strip_whitespace(res.get_data(as_text=True)))
assert_false(
self._strip_whitespace("<h2>Clients</h2>")
in self._strip_whitespace(res.get_data(as_text=True)))
def test_should_have_supplier_contact_details_on_supplier_page(self):
res = self.client.get('/g-cloud/supplier/92191')
assert_equal(200, res.status_code)
assert_true(
self._strip_whitespace('<span itemprop="name">John Example</span>')
in self._strip_whitespace(res.get_data(as_text=True)))
assert_true(
self._strip_whitespace('<span itemprop="telephone">07309404738</span>')
in self._strip_whitespace(res.get_data(as_text=True)))
email_html = '''<a href="mailto:j@examplecompany.biz"
data-event-category="Email a supplier"
data-event-label="Example Company Limited">j@examplecompany.biz</a>'''
assert_true(
self._strip_whitespace(email_html)
in self._strip_whitespace(res.get_data(as_text=True)))
def test_should_have_minimum_supplier_contact_details_on_supplier_page(self):
self._data_api_client.get_supplier.return_value = self.supplier_with_minimum_data # noqa
res = self.client.get('/g-cloud/supplier/92191')
assert_equal(200, res.status_code)
assert_true(
self._strip_whitespace('<span itemprop="name">John Example</span>')
in self._strip_whitespace(res.get_data(as_text=True)))
email_html = '''<a href="mailto:j@examplecompany.biz"
data-event-category="Email a supplier"
data-event-label="Example Company Limited">j@examplecompany.biz</a>'''
assert_true(
self._strip_whitespace(email_html)
in self._strip_whitespace(res.get_data(as_text=True)))
def test_should_not_show_web_address(self):
res = self.client.get('/g-cloud/supplier/92191')
assert_false(
'www.examplecompany.biz'
in res.get_data(as_text=True)
)
| 48.099678
| 279
| 0.649843
| 2,005
| 14,959
| 4.587531
| 0.105736
| 0.035877
| 0.070124
| 0.044357
| 0.844858
| 0.829746
| 0.814416
| 0.787997
| 0.772451
| 0.756795
| 0
| 0.010811
| 0.22087
| 14,959
| 310
| 280
| 48.254839
| 0.778207
| 0.007554
| 0
| 0.484733
| 0
| 0.133588
| 0.41082
| 0.236981
| 0
| 0
| 0
| 0
| 0.217557
| 1
| 0.076336
| false
| 0
| 0.015267
| 0
| 0.09542
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
175029925396a5dc7295af6c34143fd78920b3bb
| 34
|
py
|
Python
|
ScoutingWebsite/Scouting2016/models.py
|
ArcticWarriors/scouting-app
|
3411dfc6ddca3728889460cc372716847fff5939
|
[
"MIT"
] | 4
|
2017-03-20T21:29:14.000Z
|
2018-02-20T17:52:49.000Z
|
ScoutingWebsite/Scouting2016/models.py
|
ArcticWarriors/scouting-app
|
3411dfc6ddca3728889460cc372716847fff5939
|
[
"MIT"
] | 9
|
2016-03-04T01:09:41.000Z
|
2016-09-29T00:04:53.000Z
|
ScoutingWebsite/Scouting2016/models.py
|
ArcticWarriors/scouting-app
|
3411dfc6ddca3728889460cc372716847fff5939
|
[
"MIT"
] | 3
|
2016-02-23T03:28:17.000Z
|
2016-05-12T13:12:49.000Z
|
from Scouting2016.model import *
| 11.333333
| 32
| 0.794118
| 4
| 34
| 6.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137931
| 0.147059
| 34
| 2
| 33
| 17
| 0.793103
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
1764faa2867d123cc063ee9062c708402e06d673
| 89
|
py
|
Python
|
extlibs/freetype-2.5.2/src/tools/PaxHeaders.20920/chktrcmp.py
|
halak/bibim
|
ad01efa8aac4f074f64bf033ac0f1ed382060334
|
[
"curl"
] | 3
|
2016-08-28T06:48:11.000Z
|
2019-12-04T13:04:34.000Z
|
extlibs/freetype-2.5.2/src/tools/PaxHeaders.20920/chktrcmp.py
|
Darkttd/Bibim
|
9dec24529ef89536f7686abc1245ea5fc7fa9474
|
[
"curl"
] | 8
|
2016-04-24T13:07:28.000Z
|
2016-06-01T10:04:42.000Z
|
extlibs/freetype-2.5.2/src/tools/PaxHeaders.20920/chktrcmp.py
|
Darkttd/Bibim
|
9dec24529ef89536f7686abc1245ea5fc7fa9474
|
[
"curl"
] | 1
|
2016-08-28T06:47:43.000Z
|
2016-08-28T06:47:43.000Z
|
30 mtime=1384255857.233983368
29 atime=1386526222.15050211
30 ctime=1384255857.233983368
| 22.25
| 29
| 0.865169
| 12
| 89
| 6.416667
| 0.75
| 0.493506
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.746988
| 0.067416
| 89
| 3
| 30
| 29.666667
| 0.180723
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
bd60ea080bf8aeb228b2b913a747537eb5631098
| 1,529
|
py
|
Python
|
xarrayutils/test/test_build_grids.py
|
cspencerjones/xarrayutils
|
6c33e83b830b7586693366c520a54c1122194d50
|
[
"MIT"
] | 40
|
2019-02-05T17:06:47.000Z
|
2021-11-05T17:52:28.000Z
|
xarrayutils/test/test_build_grids.py
|
cspencerjones/xarrayutils
|
6c33e83b830b7586693366c520a54c1122194d50
|
[
"MIT"
] | 88
|
2017-03-20T15:53:06.000Z
|
2022-03-16T02:31:10.000Z
|
xarrayutils/test/test_build_grids.py
|
cspencerjones/xarrayutils
|
6c33e83b830b7586693366c520a54c1122194d50
|
[
"MIT"
] | 14
|
2017-04-24T18:58:12.000Z
|
2021-12-02T18:38:42.000Z
|
import pytest
xgcm = pytest.importorskip("xgcm")
from xarrayutils.build_grids import rebuild_grid
from numpy.testing import assert_allclose
from .datasets import datagrid_dimtest, datagrid_dimtest_ll
@pytest.mark.parametrize(
"test_coord",
["i", "j", "i_g", "j_g", "XC", "XG", "YC", "YG", "dxC", "dxG", "dyC", "dyG"],
)
# TODO This should be able to read all coord variable from the dataset
# so its not hardcoded, but I cant get it to work
def test_rebuild_grid(datagrid_dimtest, test_coord):
a = datagrid_dimtest
coords = a.coords.keys()
coords_stripped = [x for x in coords if x not in ["i", "j", "XC", "YC"]]
stripped = a.drop(coords_stripped)
b = rebuild_grid(stripped, x_wrap=360.0, y_wrap=180.0, ll_dist=False)
assert b[test_coord].dims == a[test_coord].dims
assert_allclose(b[test_coord].data, a[test_coord].data)
@pytest.mark.parametrize(
"test_coord",
["i", "j", "i_g", "j_g", "XC", "XG", "YC", "YG", "dxC", "dxG", "dyC", "dyG"],
)
# TODO This should be able to read all coord variable from the dataset
# so its not hardcoded, but I cant get it to work
def test_rebuild_grid_ll(datagrid_dimtest_ll, test_coord):
a = datagrid_dimtest_ll
coords = a.coords.keys()
coords_stripped = [x for x in coords if x not in ["i", "j", "XC", "YC"]]
stripped = a.drop(coords_stripped)
b = rebuild_grid(stripped, x_wrap=360.0, y_wrap=180.0, ll_dist=True)
assert b[test_coord].dims == a[test_coord].dims
assert_allclose(b[test_coord].data, a[test_coord].data)
| 39.205128
| 81
| 0.686723
| 254
| 1,529
| 3.948819
| 0.291339
| 0.107677
| 0.03988
| 0.04985
| 0.789631
| 0.739781
| 0.739781
| 0.739781
| 0.739781
| 0.739781
| 0
| 0.012559
| 0.166776
| 1,529
| 38
| 82
| 40.236842
| 0.774725
| 0.152387
| 0
| 0.551724
| 0
| 0
| 0.071263
| 0
| 0
| 0
| 0
| 0.026316
| 0.172414
| 1
| 0.068966
| false
| 0
| 0.172414
| 0
| 0.241379
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
bdda90522cb67974c7cdb832fc86f3d4987d72bf
| 226
|
py
|
Python
|
PC/ContextualAssistance/InputOutput/Speak.py
|
limvi-licef/GoalFormulationAssistanceAR
|
ed9c33c874a555b1b9e6638ac08d449a685488bc
|
[
"Apache-2.0"
] | null | null | null |
PC/ContextualAssistance/InputOutput/Speak.py
|
limvi-licef/GoalFormulationAssistanceAR
|
ed9c33c874a555b1b9e6638ac08d449a685488bc
|
[
"Apache-2.0"
] | null | null | null |
PC/ContextualAssistance/InputOutput/Speak.py
|
limvi-licef/GoalFormulationAssistanceAR
|
ed9c33c874a555b1b9e6638ac08d449a685488bc
|
[
"Apache-2.0"
] | 1
|
2020-08-14T06:40:28.000Z
|
2020-08-14T06:40:28.000Z
|
# coding: utf-8
import InputOutput as io
############################################################################
class Speak(io.Output):
"""
Speak output that inherits from Output class.
"""
pass
| 16.142857
| 76
| 0.402655
| 19
| 226
| 4.789474
| 0.736842
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005525
| 0.199115
| 226
| 14
| 77
| 16.142857
| 0.497238
| 0.265487
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
bdf33276fff846e5432b955d99253c0a9a8db878
| 592
|
py
|
Python
|
aula11a.py
|
marvincosmo/Python-Curso-em-Video
|
47ee3dd6423835e7bca159ffd7ee796423569176
|
[
"MIT"
] | null | null | null |
aula11a.py
|
marvincosmo/Python-Curso-em-Video
|
47ee3dd6423835e7bca159ffd7ee796423569176
|
[
"MIT"
] | null | null | null |
aula11a.py
|
marvincosmo/Python-Curso-em-Video
|
47ee3dd6423835e7bca159ffd7ee796423569176
|
[
"MIT"
] | null | null | null |
""" Teste de cores no terminal"""
print('\33[30;41m Teste \33[m', end=' ')
print('\33[4;33;44m Teste \33[m', end=' ')
print('\33[1;35;43m Teste \33[m', end=' ')
print('\33[30;42m Teste \33[m', end=' ')
print('\33[m Teste ', end=' ')
print('\33[37;107m Teste \33[m', end=' ')
print('\33[7;30m Teste \33[m', end=' ')
print('\33[97;40m Teste \33[m\n')
print('\33[1;31;43m Olá, Mundo! \33[m', end=' ')
print('\33[4;30;45m Olá, Mundo! \33[m', end=' ')
print('\33[7;30m Olá, Mundo! \33[m', end=' ')
print('\33[0;33;44m Olá, Mundo! \33[m', end=' ')
print('\33[7;33;44m Olá, Mundo! \33[m', end=' ')
| 37
| 48
| 0.560811
| 117
| 592
| 2.837607
| 0.247863
| 0.274096
| 0.198795
| 0.331325
| 0.683735
| 0.683735
| 0.391566
| 0.13253
| 0
| 0
| 0
| 0.199226
| 0.126689
| 592
| 15
| 49
| 39.466667
| 0.44294
| 0.043919
| 0
| 0
| 0
| 0
| 0.592129
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
da1bbfe97ad25b77f45f2af9a2ac7acfe7b388cd
| 21,657
|
py
|
Python
|
tests/test_predefined_dynamical_decoupling.py
|
robo2323/python-open-controls
|
bdd499c3c04cd0485d7804dbdc2f83cdf6984f0e
|
[
"Apache-2.0"
] | null | null | null |
tests/test_predefined_dynamical_decoupling.py
|
robo2323/python-open-controls
|
bdd499c3c04cd0485d7804dbdc2f83cdf6984f0e
|
[
"Apache-2.0"
] | null | null | null |
tests/test_predefined_dynamical_decoupling.py
|
robo2323/python-open-controls
|
bdd499c3c04cd0485d7804dbdc2f83cdf6984f0e
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Q-CTRL Pty Ltd & Q-CTRL Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
========================
Tests for Predefined DDS
========================
"""
import numpy as np
import pytest
from qctrlopencontrols.exceptions.exceptions import ArgumentsValueError
from qctrlopencontrols import new_predefined_dds
from qctrlopencontrols.dynamic_decoupling_sequences import (
SPIN_ECHO, CARR_PURCELL, CARR_PURCELL_MEIBOOM_GILL,
WALSH_SINGLE_AXIS, PERIODIC_SINGLE_AXIS,
UHRIG_SINGLE_AXIS, QUADRATIC, X_CONCATENATED,
XY_CONCATENATED)
def test_ramsey():
"""Tests Ramsey sequence
"""
duration = 10.
sequence = new_predefined_dds(
scheme='Ramsey',
duration=duration)
_offsets = np.array([])
_rabi_rotations = np.array([])
_azimuthal_angles = np.array([])
_detuning_rotations = np.array([])
assert np.allclose(_offsets, sequence.offsets)
assert np.allclose(_rabi_rotations, sequence.rabi_rotations)
assert np.allclose(_azimuthal_angles, sequence.azimuthal_angles)
assert np.allclose(_detuning_rotations, sequence.detuning_rotations)
sequence = new_predefined_dds(
scheme='Ramsey',
duration=duration,
pre_post_rotation=True)
_rabi_rotations = np.array([np.pi/2, np.pi/2])
_azimuthal_angles = np.array([0., 0.])
_detuning_rotations = np.array([0., 0.])
assert np.allclose(_rabi_rotations, sequence.rabi_rotations)
assert np.allclose(_azimuthal_angles, sequence.azimuthal_angles)
assert np.allclose(_detuning_rotations, sequence.detuning_rotations)
def test_spin_echo():
"""
Test for Spin Echo Sequence
"""
duration = 10.
sequence = new_predefined_dds(
scheme=SPIN_ECHO,
duration=duration)
_offsets = np.array([duration/2.])
_rabi_rotations = np.array([np.pi])
_azimuthal_angles = np.array([0])
_detuning_rotations = np.array([0])
assert np.allclose(_offsets, sequence.offsets)
assert np.allclose(_rabi_rotations, sequence.rabi_rotations)
assert np.allclose(_azimuthal_angles, sequence.azimuthal_angles)
assert np.allclose(_detuning_rotations, sequence.detuning_rotations)
sequence = new_predefined_dds(
scheme=SPIN_ECHO,
duration=duration,
pre_post_rotation=True)
_offsets = np.array([0, duration / 2., duration])
_rabi_rotations = np.array([np.pi/2, np.pi, np.pi/2])
_azimuthal_angles = np.array([0, 0, 0])
_detuning_rotations = np.array([0, 0, 0])
assert np.allclose(_offsets, sequence.offsets)
assert np.allclose(_rabi_rotations, sequence.rabi_rotations)
assert np.allclose(_azimuthal_angles, sequence.azimuthal_angles)
assert np.allclose(_detuning_rotations, sequence.detuning_rotations)
def test_curr_purcell():
"""
Test for Carr-Purcell (CP) sequence
"""
duration = 10.
number_of_offsets = 4
sequence = new_predefined_dds(
scheme=CARR_PURCELL,
duration=duration,
number_of_offsets=number_of_offsets)
_spacing = duration/number_of_offsets
_offsets = np.array([_spacing*0.5, _spacing*0.5+_spacing,
_spacing*0.5+2*_spacing, _spacing*0.5+3*_spacing])
_rabi_rotations = np.array([np.pi, np.pi, np.pi, np.pi])
_azimuthal_angles = np.array([0, 0, 0, 0])
_detuning_rotations = np.array([0, 0, 0, 0])
assert np.allclose(_offsets, sequence.offsets)
assert np.allclose(_rabi_rotations, sequence.rabi_rotations)
assert np.allclose(_azimuthal_angles, sequence.azimuthal_angles)
assert np.allclose(_detuning_rotations, sequence.detuning_rotations)
sequence = new_predefined_dds(
scheme=CARR_PURCELL,
duration=duration,
number_of_offsets=number_of_offsets,
pre_post_rotation=True)
_offsets = np.array([0, _spacing * 0.5, _spacing * 0.5 + _spacing,
_spacing * 0.5 + 2 * _spacing, _spacing * 0.5 + 3 * _spacing,
duration])
_rabi_rotations = np.array([np.pi/2, np.pi, np.pi, np.pi, np.pi, np.pi/2])
_azimuthal_angles = np.array([0, 0, 0, 0, 0, 0])
_detuning_rotations = np.array([0, 0, 0, 0, 0, 0])
assert np.allclose(_offsets, sequence.offsets)
assert np.allclose(_rabi_rotations, sequence.rabi_rotations)
assert np.allclose(_azimuthal_angles, sequence.azimuthal_angles)
assert np.allclose(_detuning_rotations, sequence.detuning_rotations)
def test_curr_purcell_meiboom_sequence(): # pylint: disable=invalid-name
"""
Test for Carr-Purcell-Meiboom-Sequence (CPMG) sequence
"""
duration = 10.
number_of_offsets = 4
sequence = new_predefined_dds(
scheme=CARR_PURCELL_MEIBOOM_GILL,
duration=duration,
number_of_offsets=number_of_offsets)
_spacing = duration/number_of_offsets
_offsets = np.array([_spacing*0.5, _spacing*0.5+_spacing,
_spacing*0.5+2*_spacing, _spacing*0.5+3*_spacing])
_rabi_rotations = np.array([np.pi, np.pi, np.pi, np.pi])
_azimuthal_angles = np.array([np.pi/2, np.pi/2, np.pi/2, np.pi/2])
_detuning_rotations = np.array([0, 0, 0, 0])
assert np.allclose(_offsets, sequence.offsets)
assert np.allclose(_rabi_rotations, sequence.rabi_rotations)
assert np.allclose(_azimuthal_angles, sequence.azimuthal_angles)
assert np.allclose(_detuning_rotations, sequence.detuning_rotations)
sequence = new_predefined_dds(
scheme=CARR_PURCELL_MEIBOOM_GILL,
duration=duration,
number_of_offsets=number_of_offsets,
pre_post_rotation=True)
_offsets = np.array([0, _spacing * 0.5, _spacing * 0.5 + _spacing,
_spacing * 0.5 + 2 * _spacing, _spacing * 0.5 + 3 * _spacing, duration])
_rabi_rotations = np.array([np.pi/2, np.pi, np.pi, np.pi, np.pi, np.pi/2])
_azimuthal_angles = np.array([0, np.pi / 2, np.pi / 2, np.pi / 2, np.pi / 2, 0])
_detuning_rotations = np.array([0, 0, 0, 0, 0, 0])
assert np.allclose(_offsets, sequence.offsets)
assert np.allclose(_rabi_rotations, sequence.rabi_rotations)
assert np.allclose(_azimuthal_angles, sequence.azimuthal_angles)
assert np.allclose(_detuning_rotations, sequence.detuning_rotations)
def test_uhrig_single_axis_sequence():
"""
Test for Uhrig Single Axis Sequence
"""
duration = 10.
number_of_offsets = 4
sequence = new_predefined_dds(
scheme=UHRIG_SINGLE_AXIS,
duration=duration,
number_of_offsets=number_of_offsets)
constant = 0.5 / (number_of_offsets+1)
_delta_positions = [duration*(np.sin(np.pi*(k+1)*constant))**2
for k in range(number_of_offsets)]
_offsets = np.array(_delta_positions)
_rabi_rotations = np.array([np.pi, np.pi, np.pi, np.pi])
_azimuthal_angles = np.array([np.pi/2, np.pi/2, np.pi/2, np.pi/2])
_detuning_rotations = np.array([0, 0, 0, 0])
assert np.allclose(_offsets, sequence.offsets)
assert np.allclose(_rabi_rotations, sequence.rabi_rotations)
assert np.allclose(_azimuthal_angles, sequence.azimuthal_angles)
assert np.allclose(_detuning_rotations, sequence.detuning_rotations)
sequence = new_predefined_dds(
scheme=UHRIG_SINGLE_AXIS,
duration=duration,
number_of_offsets=number_of_offsets,
pre_post_rotation=True)
_offsets = np.array(_delta_positions)
_offsets = np.insert(_offsets,
[0, _offsets.shape[0]], # pylint: disable=unsubscriptable-object
[0, duration])
_rabi_rotations = np.array([np.pi/2, np.pi, np.pi, np.pi, np.pi, np.pi/2])
_azimuthal_angles = np.array([0., np.pi / 2, np.pi / 2, np.pi / 2, np.pi / 2, 0.])
_detuning_rotations = np.array([0., 0, 0, 0, 0, 0.])
assert np.allclose(_offsets, sequence.offsets)
assert np.allclose(_rabi_rotations, sequence.rabi_rotations)
assert np.allclose(_azimuthal_angles, sequence.azimuthal_angles)
assert np.allclose(_detuning_rotations, sequence.detuning_rotations)
def test_periodic_single_axis_sequence(): # pylint: disable=invalid-name
"""
Test for Periodic Single Axis Sequence
"""
duration = 10.
number_of_offsets = 4
sequence = new_predefined_dds(
scheme=PERIODIC_SINGLE_AXIS,
duration=duration,
number_of_offsets=number_of_offsets)
constant = 1 / (number_of_offsets+1)
# prepare the offsets for delta comb
_delta_positions = [duration*k * constant for k in range(1, number_of_offsets + 1)]
_offsets = np.array(_delta_positions)
_rabi_rotations = np.array([np.pi, np.pi, np.pi, np.pi])
_azimuthal_angles = np.array([0, 0, 0, 0])
_detuning_rotations = np.array([0, 0, 0, 0])
assert np.allclose(_offsets, sequence.offsets)
assert np.allclose(_rabi_rotations, sequence.rabi_rotations)
assert np.allclose(_azimuthal_angles, sequence.azimuthal_angles)
assert np.allclose(_detuning_rotations, sequence.detuning_rotations)
sequence = new_predefined_dds(
scheme=PERIODIC_SINGLE_AXIS,
duration=duration,
number_of_offsets=number_of_offsets,
pre_post_rotation=True)
_offsets = np.array(_delta_positions)
_offsets = np.insert(_offsets,
[0, _offsets.shape[0]], # pylint: disable=unsubscriptable-object
[0, duration])
_rabi_rotations = np.array([np.pi/2, np.pi, np.pi, np.pi, np.pi, np.pi/2])
_azimuthal_angles = np.array([0, 0, 0, 0, 0, 0])
_detuning_rotations = np.array([0, 0, 0, 0, 0, 0])
assert np.allclose(_offsets, sequence.offsets)
assert np.allclose(_rabi_rotations, sequence.rabi_rotations)
assert np.allclose(_azimuthal_angles, sequence.azimuthal_angles)
assert np.allclose(_detuning_rotations, sequence.detuning_rotations)
def test_walsh_single_axis_sequence():
"""
Test for Periodic Single Axis Sequence
"""
duration = 10.
paley_order = 20
sequence = new_predefined_dds(
scheme=WALSH_SINGLE_AXIS,
duration=duration,
paley_order=paley_order)
hamming_weight = 5
samples = 2 ** hamming_weight
relative_offset = np.arange(1. / (2 * samples), 1., 1. / samples)
binary_string = np.binary_repr(paley_order)
binary_order = [int(binary_string[i]) for i in range(hamming_weight)]
walsh_array = np.ones([samples])
for i in range(hamming_weight):
walsh_array *= np.sign(np.sin(2 ** (i + 1) * np.pi
* relative_offset)) ** binary_order[hamming_weight - 1 - i]
walsh_relative_offsets = []
for i in range(samples - 1):
if walsh_array[i] != walsh_array[i + 1]:
walsh_relative_offsets.append((i + 1) * (1. / samples))
walsh_relative_offsets = np.array(walsh_relative_offsets, dtype=np.float)
_offsets = duration * walsh_relative_offsets
_offsets = np.array(_offsets)
_rabi_rotations = np.pi * np.ones(_offsets.shape)
_azimuthal_angles = np.zeros(_offsets.shape)
_detuning_rotations = np.zeros(_offsets.shape)
assert np.allclose(_offsets, sequence.offsets)
assert np.allclose(_rabi_rotations, sequence.rabi_rotations)
assert np.allclose(_azimuthal_angles, sequence.azimuthal_angles)
assert np.allclose(_detuning_rotations, sequence.detuning_rotations)
sequence = new_predefined_dds(
scheme=WALSH_SINGLE_AXIS,
duration=duration,
paley_order=paley_order,
pre_post_rotation=True)
_offsets = np.insert(_offsets,
[0, _offsets.shape[0]], # pylint: disable=unsubscriptable-object
[0, duration])
_rabi_rotations = np.insert(_rabi_rotations, [0, _rabi_rotations.shape[0]],
[np.pi/2, np.pi/2])
_azimuthal_angles = np.zeros(_offsets.shape)
_detuning_rotations = np.zeros(_offsets.shape)
assert np.allclose(_offsets, sequence.offsets)
assert np.allclose(_rabi_rotations, sequence.rabi_rotations)
assert np.allclose(_azimuthal_angles, sequence.azimuthal_angles)
assert np.allclose(_detuning_rotations, sequence.detuning_rotations)
def test_quadratic_sequence():
"""
Test for Quadratic Sequence
"""
duration = 10.
number_inner_offsets = 4
number_outer_offsets = 4
sequence = new_predefined_dds(
scheme=QUADRATIC, duration=duration,
number_inner_offsets=number_inner_offsets,
number_outer_offsets=number_outer_offsets)
_offsets = np.zeros((number_outer_offsets+1, number_inner_offsets + 1))
constant = 0.5 / (number_outer_offsets + 1)
_delta_positions = [duration * (np.sin(np.pi * (k + 1) * constant)) ** 2
for k in range(number_outer_offsets)]
_outer_offsets = np.array(_delta_positions)
_offsets[0:number_outer_offsets, -1] = _outer_offsets
_outer_offsets = np.insert(
_outer_offsets,
[0, _outer_offsets.shape[0]], # pylint: disable=unsubscriptable-object
[0, duration])
_inner_durations = _outer_offsets[1:] - _outer_offsets[0:-1]
constant = 0.5 / (number_inner_offsets+1)
_delta_positions = [(np.sin(np.pi * (k + 1) * constant)) ** 2
for k in range(number_inner_offsets)]
_delta_positions = np.array(_delta_positions)
for inner_sequence_idx in range(_inner_durations.shape[0]):
_inner_deltas = _inner_durations[inner_sequence_idx] * _delta_positions
_inner_deltas = _outer_offsets[inner_sequence_idx] + _inner_deltas
_offsets[inner_sequence_idx, 0:number_inner_offsets] = _inner_deltas
_rabi_rotations = np.zeros(_offsets.shape)
_detuning_rotations = np.zeros(_offsets.shape)
_rabi_rotations[0:number_outer_offsets, -1] = np.pi
_detuning_rotations[0:(number_outer_offsets+1), 0:number_inner_offsets] = np.pi
_offsets = np.reshape(_offsets, (-1,))
_rabi_rotations = np.reshape(_rabi_rotations, (-1,))
_detuning_rotations = np.reshape(_detuning_rotations, (-1,))
_offsets = _offsets[0:-1]
_rabi_rotations = _rabi_rotations[0:-1]
_detuning_rotations = _detuning_rotations[0:-1]
_azimuthal_angles = np.zeros(_offsets.shape)
assert np.allclose(_offsets, sequence.offsets)
assert np.allclose(_rabi_rotations, sequence.rabi_rotations)
assert np.allclose(_azimuthal_angles, sequence.azimuthal_angles)
assert np.allclose(_detuning_rotations, sequence.detuning_rotations)
sequence = new_predefined_dds(
scheme=QUADRATIC, duration=duration,
number_inner_offsets=number_inner_offsets,
number_outer_offsets=number_outer_offsets,
pre_post_rotation=True)
_offsets = np.insert(_offsets, [0, _offsets.shape[0]], [0, duration])
_rabi_rotations = np.insert(_rabi_rotations, [0, _rabi_rotations.shape[0]],
[np.pi/2, np.pi/2])
_detuning_rotations = np.insert(_detuning_rotations, [0, _detuning_rotations.shape[0]],
[0, 0])
_azimuthal_angles = np.zeros(_offsets.shape)
assert np.allclose(_offsets, sequence.offsets)
assert np.allclose(_rabi_rotations, sequence.rabi_rotations)
assert np.allclose(_azimuthal_angles, sequence.azimuthal_angles)
assert np.allclose(_detuning_rotations, sequence.detuning_rotations)
def test_xconcatenated_sequence():
"""
Test X-CDD Sequence
"""
duration = 10.
concatenation_order = 3
sequence = new_predefined_dds(
scheme=X_CONCATENATED,
duration=duration,
concatenation_order=concatenation_order)
_spacing = duration/(2**concatenation_order)
_offsets = [_spacing, 3*_spacing, 4 * _spacing, 5 * _spacing, 7 * _spacing]
_offsets = np.array(_offsets)
_rabi_rotations = np.pi * np.ones(_offsets.shape)
_azimuthal_angles = np.zeros(_offsets.shape)
_detuning_rotations = np.zeros(_offsets.shape)
assert np.allclose(_offsets, sequence.offsets)
assert np.allclose(_rabi_rotations, sequence.rabi_rotations)
assert np.allclose(_azimuthal_angles, sequence.azimuthal_angles)
assert np.allclose(_detuning_rotations, sequence.detuning_rotations)
sequence = new_predefined_dds(
scheme=X_CONCATENATED,
duration=duration,
concatenation_order=concatenation_order,
pre_post_rotation=True)
_offsets = np.insert(
_offsets,
[0, _offsets.shape[0]], # pylint: disable=unsubscriptable-object
[0, duration])
_rabi_rotations = np.insert(
_rabi_rotations,
[0, _rabi_rotations.shape[0]], # pylint: disable=unsubscriptable-object
[np.pi/2, np.pi/2])
_azimuthal_angles = np.zeros(_offsets.shape)
_detuning_rotations = np.zeros(_offsets.shape)
assert np.allclose(_offsets, sequence.offsets)
assert np.allclose(_rabi_rotations, sequence.rabi_rotations)
assert np.allclose(_azimuthal_angles, sequence.azimuthal_angles)
assert np.allclose(_detuning_rotations, sequence.detuning_rotations)
def test_xyconcatenated_sequence():
"""
Test XY4-CDD Sequence
"""
duration = 10.
concatenation_order = 2
sequence = new_predefined_dds(
scheme=XY_CONCATENATED,
duration=duration,
concatenation_order=concatenation_order)
_spacing = duration / (2 ** (concatenation_order*2))
_offsets = [_spacing, 2*_spacing, 3 * _spacing, 4 * _spacing,
5 * _spacing, 6 * _spacing, 7 * _spacing, 9 * _spacing,
10 * _spacing, 11 * _spacing, 12 * _spacing, 13 * _spacing,
14 * _spacing, 15 * _spacing]
_offsets = np.array(_offsets)
_rabi_rotations = [np.pi, np.pi, np.pi, 0., np.pi, np.pi, np.pi,
np.pi, np.pi, np.pi, 0, np.pi, np.pi, np.pi]
_rabi_rotations = np.array(_rabi_rotations)
_azimuthal_angles = [0, np.pi/2, 0, 0, 0, np.pi/2, 0, 0, np.pi/2, 0, 0, 0, np.pi/2, 0]
_azimuthal_angles = np.array(_azimuthal_angles)
_detuning_rotations = [0, 0, 0, np.pi, 0, 0, 0, 0, 0, 0, np.pi, 0, 0, 0]
_detuning_rotations = np.array(_detuning_rotations)
assert np.allclose(_offsets, sequence.offsets)
assert np.allclose(_rabi_rotations, sequence.rabi_rotations)
assert np.allclose(_azimuthal_angles, sequence.azimuthal_angles)
assert np.allclose(_detuning_rotations, sequence.detuning_rotations)
sequence = new_predefined_dds(
scheme=XY_CONCATENATED,
duration=duration,
concatenation_order=concatenation_order,
pre_post_rotation=True)
_offsets = np.insert(_offsets,
[0, _offsets.shape[0]], # pylint: disable=unsubscriptable-object
[0, duration])
_rabi_rotations = np.insert(
_rabi_rotations,
[0, _rabi_rotations.shape[0]], # pylint: disable=unsubscriptable-object
[np.pi/2, np.pi/2])
_azimuthal_angles = np.insert(
_azimuthal_angles,
[0, _azimuthal_angles.shape[0]], # pylint: disable=unsubscriptable-object
[0, 0])
_detuning_rotations = np.insert(
_detuning_rotations,
[0, _detuning_rotations.shape[0]], # pylint: disable=unsubscriptable-object
[0, 0])
assert np.allclose(_offsets, sequence.offsets)
assert np.allclose(_rabi_rotations, sequence.rabi_rotations)
assert np.allclose(_azimuthal_angles, sequence.azimuthal_angles)
assert np.allclose(_detuning_rotations, sequence.detuning_rotations)
def test_attribute_values():
"""
Test for the correctness of the attribute values
"""
# Check that errors are raised correctly
# duration cannot be <= 0
with pytest.raises(ArgumentsValueError):
_ = new_predefined_dds(scheme=SPIN_ECHO, duration=-2)
# number_of_offsets cannot be <= 0
_ = new_predefined_dds(
scheme=CARR_PURCELL_MEIBOOM_GILL, duration=2,
number_of_offsets=-1)
# for QDD, none of the offsets can be <=0
_ = new_predefined_dds(
scheme=QUADRATIC, duration=2,
number_inner_offsets=-1, number_outer_offsets=2)
_ = new_predefined_dds(
scheme=QUADRATIC, duration=2,
number_inner_offsets=1, number_outer_offsets=-2)
_ = new_predefined_dds(
scheme=QUADRATIC, duration=2,
number_inner_offsets=-1, number_outer_offsets=-2)
# for x-cdd and xy-cdd concatenation_order cannot be <=0
_ = new_predefined_dds(
scheme=X_CONCATENATED, duration=2,
concatenation_order=-1)
_ = new_predefined_dds(
scheme=X_CONCATENATED, duration=-2,
concatenation_order=1)
_ = new_predefined_dds(
scheme=X_CONCATENATED, duration=-2,
concatenation_order=-1)
_ = new_predefined_dds(
scheme=XY_CONCATENATED, duration=2,
concatenation_order=-1)
_ = new_predefined_dds(
scheme=XY_CONCATENATED, duration=-2,
concatenation_order=1)
_ = new_predefined_dds(
scheme=XY_CONCATENATED, duration=-2,
concatenation_order=-1)
| 37.147513
| 97
| 0.682227
| 2,706
| 21,657
| 5.132299
| 0.072801
| 0.02765
| 0.091014
| 0.021889
| 0.806668
| 0.78737
| 0.769657
| 0.7518
| 0.728471
| 0.706005
| 0
| 0.02191
| 0.211802
| 21,657
| 582
| 98
| 37.21134
| 0.791681
| 0.078173
| 0
| 0.685504
| 0
| 0
| 0.000607
| 0
| 0
| 0
| 0
| 0
| 0.194103
| 1
| 0.027027
| false
| 0
| 0.012285
| 0
| 0.039312
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
da62732ab8cafe6f3dc3a826eec094e383270dcc
| 117
|
py
|
Python
|
Hackerrank-Solutions/Hackerrank-Python-Solutions/Numpy/Floor, Ceil and Rint.py
|
HetDaftary/Competitive-Coding-Solutions
|
a683fa11895410c6eef07b1a68054f3e90aa596b
|
[
"MIT"
] | null | null | null |
Hackerrank-Solutions/Hackerrank-Python-Solutions/Numpy/Floor, Ceil and Rint.py
|
HetDaftary/Competitive-Coding-Solutions
|
a683fa11895410c6eef07b1a68054f3e90aa596b
|
[
"MIT"
] | null | null | null |
Hackerrank-Solutions/Hackerrank-Python-Solutions/Numpy/Floor, Ceil and Rint.py
|
HetDaftary/Competitive-Coding-Solutions
|
a683fa11895410c6eef07b1a68054f3e90aa596b
|
[
"MIT"
] | null | null | null |
import numpy
numpy.set_printoptions(sign=' ')
a =
print(numpy.floor(a))
print(numpy.ceil(a))
print(numpy.rint(a))
| 13
| 32
| 0.709402
| 19
| 117
| 4.315789
| 0.526316
| 0.219512
| 0.402439
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.102564
| 117
| 9
| 33
| 13
| 0.780952
| 0
| 0
| 0
| 0
| 0
| 0.008475
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.166667
| null | null | 0.666667
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
e5bda1362919be958c9ed6e5b41a6803a83faa77
| 92
|
py
|
Python
|
app/main/blueprint.py
|
ds-vologdin/market-form-flask
|
28a46e4cb9077f6b3f7bc0e2727bb51d401fd68c
|
[
"MIT"
] | 13
|
2019-05-24T20:52:31.000Z
|
2022-02-02T10:27:54.000Z
|
app/main/blueprint.py
|
ds-vologdin/market-form-flask
|
28a46e4cb9077f6b3f7bc0e2727bb51d401fd68c
|
[
"MIT"
] | null | null | null |
app/main/blueprint.py
|
ds-vologdin/market-form-flask
|
28a46e4cb9077f6b3f7bc0e2727bb51d401fd68c
|
[
"MIT"
] | 7
|
2019-11-05T09:25:41.000Z
|
2021-11-16T15:48:37.000Z
|
from flask import Blueprint
blueprint = Blueprint('main', __name__)
from . import routes
| 13.142857
| 39
| 0.76087
| 11
| 92
| 6
| 0.636364
| 0.545455
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.163043
| 92
| 6
| 40
| 15.333333
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0.043478
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0.666667
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
|
0
| 6
|
f912886fc01c5c7999d40ab7acd01f5feb703e57
| 1,227
|
py
|
Python
|
user/vistas/widgets/contact-form.py
|
ZerpaTechnology/occoa
|
a8c0bd2657bc058801a883109c0ec0d608d04ccc
|
[
"Apache-2.0"
] | null | null | null |
user/vistas/widgets/contact-form.py
|
ZerpaTechnology/occoa
|
a8c0bd2657bc058801a883109c0ec0d608d04ccc
|
[
"Apache-2.0"
] | null | null | null |
user/vistas/widgets/contact-form.py
|
ZerpaTechnology/occoa
|
a8c0bd2657bc058801a883109c0ec0d608d04ccc
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
doc+="""<section class="well1"> <div class="container"> <h2>Feedback</h2> <form method="post" action="bat/rd-mailform.php" class="mailform off2"> <input type="hidden" name="form-type" value="contact"> <fieldset class="row"> <label class="grid_4"> <input type="text" name="name" placeholder="Your Name:" data-constraints="@LettersOnly @NotEmpty"> </label> <label class="grid_4"> <input type="text" name="phone" placeholder="Telephone:" data-constraints="@Phone"> </label> <label class="grid_4"> <input type="text" name="email" placeholder="Email:" data-constraints="@Email @NotEmpty"> </label> <label class="grid_12"> <textarea name="message" placeholder="Message:" data-constraints="@NotEmpty"></textarea> </label> <div class="mfControls grid_12"> <button type="submit" class="btn">Sumbit comment</button> </div> </fieldset> </form> </div> </section>"""
| 409
| 1,185
| 0.511002
| 118
| 1,227
| 5.271186
| 0.449153
| 0.057878
| 0.090032
| 0.072347
| 0.226688
| 0.170418
| 0.170418
| 0.170418
| 0.118971
| 0
| 0
| 0.014493
| 0.325183
| 1,227
| 3
| 1,185
| 409
| 0.736715
| 0.03097
| 0
| 0
| 0
| 1
| 0.988215
| 0.1633
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
006db3dc66eca7b96cae38d9c15e5556d1337947
| 191
|
py
|
Python
|
aws_admin/scripts.py
|
crccheck/django-aws-admin
|
c53252cacb0d983454441a6eb299f39845a0108f
|
[
"Apache-2.0"
] | null | null | null |
aws_admin/scripts.py
|
crccheck/django-aws-admin
|
c53252cacb0d983454441a6eb299f39845a0108f
|
[
"Apache-2.0"
] | null | null | null |
aws_admin/scripts.py
|
crccheck/django-aws-admin
|
c53252cacb0d983454441a6eb299f39845a0108f
|
[
"Apache-2.0"
] | null | null | null |
from aws_admin.utils import pull_vpcs, pull_ec2, pull_security_groups
if __name__ == '__main__':
import django; django.setup()
pull_vpcs()
pull_ec2()
pull_security_groups()
| 21.222222
| 69
| 0.727749
| 26
| 191
| 4.692308
| 0.576923
| 0.131148
| 0.196721
| 0.245902
| 0.540984
| 0.540984
| 0.540984
| 0
| 0
| 0
| 0
| 0.012739
| 0.17801
| 191
| 8
| 70
| 23.875
| 0.764331
| 0
| 0
| 0
| 0
| 0
| 0.041885
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
008dc627e3f13ae5991ef6695f47e8356e0ff4b4
| 3,091
|
py
|
Python
|
apis/event_type_api.py
|
tacklebox-webhooks/python
|
d2581110ab701467f5d584d0fd8ebb5f4c43a7aa
|
[
"MIT"
] | null | null | null |
apis/event_type_api.py
|
tacklebox-webhooks/python
|
d2581110ab701467f5d584d0fd8ebb5f4c43a7aa
|
[
"MIT"
] | null | null | null |
apis/event_type_api.py
|
tacklebox-webhooks/python
|
d2581110ab701467f5d584d0fd8ebb5f4c43a7aa
|
[
"MIT"
] | null | null | null |
from .error import *
from .http_request import HttpRequest
from .http_client import HttpClient
class EventTypeApi:
def __init__(self, config):
self.base_url = config['base_url']
self.http_client = HttpClient(config['api_key'])
self.validator = Validation()
def list_event_types(self, service_id):
if not self.validator.is_valid_id(service_id):
return new_error(
ERROR_TYPES['missing_parameter'],
"The list_event_types method must be invoked with a non-empty string service_id argument."
)
path = f"services/{service_id}/event_types"
request = HttpRequest("GET", self.base_url, path)
return self.http_client.send(request)
def create_event_type(self, service_id, event_type_data):
if not self.validator.is_valid_id(service_id):
return new_error(
ERROR_TYPES['missing_parameter'],
"The create_event_types method must be invoked with a non-empty string service_id argument."
)
elif not self.validator.is_valid_data(event_type_data):
return new_error(
ERROR_TYPES['missing_parameter'],
"The create_event_types method must be invoked with an event_type_data object that contains a non-empty string name property."
)
path = f"services/{service_id}/event_types"
request = HttpRequest("POST", self.base_url, path, event_type_data)
return self.http_client.send(request)
def delete_event_type(self, service_id, event_type_id):
if not self.validator.is_valid_service_id(service_id):
return new_error(
ERROR_TYPES['missing_parameter'],
"The delete_event_type method must be invoked with a non-empty string service_id argument."
)
elif not self.validator.event_type_id(event_type_id):
return new_error(
ERROR_TYPES['missing_parameter'],
"The delete_event_type method must be invoked with a non-empty string event_type_id argument."
)
path = f"services/{service_id}/event_types/{event_type_id}"
request = HttpRequest("DELETE", self.base_url, path)
return self.http_client.send(request)
def get_event_type(self, service_id, event_type_id):
if not self.validator.is_valid_service_id(service_id):
return new_error(
ERROR_TYPES['missing_parameter'],
"The get_event_type method must be invoked with a non-empty string service_id argument."
)
elif not self.validator.event_type_id(event_type_id):
return new_error(
ERROR_TYPES['missing_parameter'],
"The get_event_type method must be invoked with a non-empty string event_type_id argument."
)
path = f"services/{service_id}/event_types/{event_type_id}"
request = HttpRequest("GET", self.base_url, path)
return self.http_client.send(request)
| 44.797101
| 142
| 0.649951
| 398
| 3,091
| 4.743719
| 0.153266
| 0.100106
| 0.058263
| 0.070445
| 0.809322
| 0.805614
| 0.805614
| 0.770127
| 0.770127
| 0.726695
| 0
| 0
| 0.27208
| 3,091
| 68
| 143
| 45.455882
| 0.839111
| 0
| 0
| 0.508475
| 0
| 0
| 0.314461
| 0.053057
| 0
| 0
| 0
| 0
| 0
| 1
| 0.084746
| false
| 0
| 0.050847
| 0
| 0.338983
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
00cc2c93140f953d3bf806475ac9310bfbe6b27c
| 81
|
py
|
Python
|
toolkit_cxc/path.py
|
XiaochenCui/toolkit_cxc
|
d89ab835b8ae3329f70516488e145c403091d844
|
[
"MIT"
] | null | null | null |
toolkit_cxc/path.py
|
XiaochenCui/toolkit_cxc
|
d89ab835b8ae3329f70516488e145c403091d844
|
[
"MIT"
] | null | null | null |
toolkit_cxc/path.py
|
XiaochenCui/toolkit_cxc
|
d89ab835b8ae3329f70516488e145c403091d844
|
[
"MIT"
] | null | null | null |
import os
import sys
def add_current_path():
sys.path.append(os.getcwd())
| 10.125
| 32
| 0.703704
| 13
| 81
| 4.230769
| 0.692308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.17284
| 81
| 7
| 33
| 11.571429
| 0.820896
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0
| 0.5
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
dadb34553a2ee60197172a219f755595a5468a65
| 95
|
py
|
Python
|
abnorm/adapters/django3p1.py
|
trashnroll/django-abnorm
|
aa94e895c1b692d0122d2e7ad3f7d37e09e6febe
|
[
"MIT"
] | 12
|
2018-04-05T09:00:28.000Z
|
2020-01-21T13:31:45.000Z
|
abnorm/adapters/django3p1.py
|
trashnroll/django-abnorm
|
aa94e895c1b692d0122d2e7ad3f7d37e09e6febe
|
[
"MIT"
] | 6
|
2018-06-25T03:49:34.000Z
|
2019-12-28T12:14:20.000Z
|
abnorm/adapters/django3p1.py
|
trashnroll/django-abnorm
|
aa94e895c1b692d0122d2e7ad3f7d37e09e6febe
|
[
"MIT"
] | 2
|
2018-06-06T18:09:03.000Z
|
2018-10-11T14:21:10.000Z
|
from .django3p0 import SpecificDjango as Django3p0
class SpecificDjango(Django3p0):
pass
| 15.833333
| 50
| 0.8
| 10
| 95
| 7.6
| 0.7
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.075
| 0.157895
| 95
| 5
| 51
| 19
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
dae67d341309fbdb620c4da94c85daac4f3d92e8
| 129
|
py
|
Python
|
pages/themes/beginners/practicalities/examples/nonPEP8_styled.py
|
ProgressBG-Python-Course/ProgressBG-VC2-Python
|
03b892a42ee1fad3d4f97e328e06a4b1573fd356
|
[
"MIT"
] | null | null | null |
pages/themes/beginners/practicalities/examples/nonPEP8_styled.py
|
ProgressBG-Python-Course/ProgressBG-VC2-Python
|
03b892a42ee1fad3d4f97e328e06a4b1573fd356
|
[
"MIT"
] | null | null | null |
pages/themes/beginners/practicalities/examples/nonPEP8_styled.py
|
ProgressBG-Python-Course/ProgressBG-VC2-Python
|
03b892a42ee1fad3d4f97e328e06a4b1573fd356
|
[
"MIT"
] | null | null | null |
def print_list(my_list):
for i in my_list:
print(i)
my_list = [
1, 2, 3,
4, 5, 6,]
print_list(my_list)
| 16.125
| 24
| 0.534884
| 24
| 129
| 2.625
| 0.541667
| 0.380952
| 0.349206
| 0.47619
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.069767
| 0.333333
| 129
| 8
| 25
| 16.125
| 0.662791
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0
| 0
| 0.142857
| 0.428571
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
97215e661d483c7047606fe418b2fe866c1f30f8
| 7,437
|
py
|
Python
|
alarconpy/create_map.py
|
apalarcon/alarconpy
|
1decf4bbec562e654038367439f5ac6345ebfdd4
|
[
"MIT"
] | 2
|
2020-06-20T01:30:22.000Z
|
2022-03-26T22:54:45.000Z
|
alarconpy/create_map.py
|
apalarcon/alarconpy
|
1decf4bbec562e654038367439f5ac6345ebfdd4
|
[
"MIT"
] | null | null | null |
alarconpy/create_map.py
|
apalarcon/alarconpy
|
1decf4bbec562e654038367439f5ac6345ebfdd4
|
[
"MIT"
] | 2
|
2020-11-05T21:37:10.000Z
|
2021-12-07T00:43:07.000Z
|
"""
Autor: Albenis Pérez Alarcón
Last Update: abril 19, 2019
apalarcon1991@gmail.com
"""
import cartopy.crs as ccrs
import cartopy.feature as cfeature
import matplotlib.pyplot as plt
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
import matplotlib.ticker as mticker
import matplotlib.gridspec as gridspec
import sys
import os
from alarconpy.paths import *
import numpy as np
from math import floor, ceil
os.environ["CARTOPY_USER_BACKGROUNDS"]=cartopy_BM()
def get_map(lower_left_corner=(-85,19),upper_right_corner=(-73,30),dlon=None,bg="None",res="medium",cr="10m",landcolor="#bfbfbf",oceancolor="#b8dffe",fontsize=15,id_=111):
"""
To create a map with Cartopy
Author: Albenis Pérez Alarcón
contact:apalarcon1991@gmail.com
Parameters
-------------
lower_left_corner: coordinate like (lon,lat)
upper_right_corner: coordinate like (lon,lat)
bg: string to define map background_img
aviable options
None: to use empty map
BM: to use Bluemarble background, aviable resolutions: low, medium, high, full
product : to use vegetation background, aviable resolutions:low, high
topo: to use topography background, aviable resolutions: low, high
stock: default cartopy background, use default resolution
define_color:to set specific colors to land and ocean define by landcolor and oceancolor
res: string to get background resolution
cr: string Coast resolution
aviable options: 110m or 10m
fontsize:float
to set fontsize to plot draw_labels
id_: float
position in figure (111 is default to plot in all figure)
Return a created map
"""
min_lon,min_lat=lower_left_corner
max_lon,max_lat=upper_right_corner
crs = ccrs.PlateCarree()
mapa=plt.subplot(id_,projection=ccrs.PlateCarree())
if cr=="110m" or cr=="10m":
mapa.add_feature(cfeature.COASTLINE.with_scale(cr), linewidth=1)
else:
raise ValueError('Aviable coast resolution are "110m" and "10m"')
mapa.add_feature(cfeature.STATES, linewidth=0.25)
mapa.set_extent([min_lon,max_lon,min_lat,max_lat], crs=ccrs.PlateCarree())
if dlon==None:
if abs(min_lon-max_lon)<=2:
paso_h=0.5
elif 3<abs(min_lon-max_lon)<=8:
paso_h=2
elif 8<abs(min_lon-max_lon)<=30:
paso_h=5
elif 30<abs(min_lon-max_lon)<=100:
paso_h=10
else:
paso_h=15
else:
paso_h=dlon
#
if bg!="None":
if bg=="BM":
if res=="low" or res=="medium" or res=="high" or res=="full":
mapa.background_img(name=bg, resolution=res)
else:
raise ValueError('aviable resolutions to BM background are low, medium, high, full')
elif bg=="product":
if res=="low" or res=="high" :
mapa.background_img(name=bg, resolution=res)
else:
raise ValueError('aviable resolutions to vegetation background are low, high')
elif bg=="topo":
if res=="low" or res=="high" :
mapa.background_img(name=bg, resolution=res)
else:
raise ValueError('aviable resolutions to topography background are low, high')
elif bg=="stock":
mapa.stock_img()
elif bg=="define_color":
mapa.add_feature(cfeature.LAND,color=landcolor) #If I comment this => all ok, but I need
mapa.add_feature(cfeature.OCEAN,color=oceancolor)
else:
raise ValueError('aviable backgrounds are BM, topo, product')
gl = mapa.gridlines(crs=ccrs.PlateCarree(), draw_labels=True,linewidth=0.5, color='black', alpha=1, linestyle='--')
gl.xlabels_top = False
gl.ylabels_left = True
gl.ylabels_right = False
gl.xlines = True
lons=np.arange(floor(min_lon-paso_h),ceil(max_lon+paso_h),paso_h)
gl.xlocator = mticker.FixedLocator(lons)
gl.xformatter = LONGITUDE_FORMATTER
gl.yformatter = LATITUDE_FORMATTER
gl.xlabel_style = {'size': fontsize, 'color': 'black'}
gl.ylabel_style = {'size': fontsize,'color': 'black'}
return mapa
def get_map_all(lower_left_corner=(255,19),upper_right_corner=(290,30),dlon=None,dlat=None,bg="None",
res="medium",cr="10m",landcolor="#bfbfbf",oceancolor="#b8dffe",fontsize=15,id_=111,center=180):
"""
To create a map with Cartopy
Author: Albenis Pérez Alarcón
contact:apalarcon1991@gmail.com
Parameters
-------------
lower_left_corner: coordinate like (lon,lat)
upper_right_corner: coordinate like (lon,lat)
bg: string to define map background_img
aviable options
None: to use empty map
BM: to use Bluemarble background, aviable resolutions: low, medium, high, full
product : to use vegetation background, aviable resolutions:low, high
topo: to use topography background, aviable resolutions: low, high
stock: default cartopy background, use default resolution
define_color:to set specific colors to land and ocean define by landcolor and oceancolor
res: string to get background resolution
cr: string Coast resolution
aviable options: 110m or 10m
fontsize:float
to set fontsize to plot draw_labels
id_: float
position in figure (111 is default to plot in all figure)
Return a created map
"""
min_lon,min_lat=lower_left_corner
max_lon,max_lat=upper_right_corner
crs = ccrs.PlateCarree()
mapa=plt.subplot(id_,projection=ccrs.PlateCarree(center))
if cr=="110m" or cr=="10m":
mapa.add_feature(cfeature.COASTLINE.with_scale(cr), linewidth=1)
else:
raise ValueError('Aviable coast resolution are "110m" and "10m"')
mapa.add_feature(cfeature.STATES, linewidth=0.25)
mapa.set_extent([min_lon,max_lon,min_lat,max_lat], crs=ccrs.PlateCarree())
if dlon==None:
if abs(min_lon-max_lon)<=2:
paso_h=0.5
elif 3<abs(min_lon-max_lon)<=8:
paso_h=2
elif 8<abs(min_lon-max_lon)<=30:
paso_h=5
elif 30<abs(min_lon-max_lon)<=100:
paso_h=10
else:
paso_h=15
else:
paso_h=dlon
#
if dlat==None:
dlat=5
else:
dlat=dlat
if bg!="None":
if bg=="BM":
if res=="low" or res=="medium" or res=="high" or res=="full":
mapa.background_img(name=bg, resolution=res)
else:
raise ValueError('aviable resolutions to BM background are low, medium, high, full')
elif bg=="product":
if res=="low" or res=="high" :
mapa.background_img(name=bg, resolution=res)
else:
raise ValueError('aviable resolutions to vegetation background are low, high')
elif bg=="topo":
if res=="low" or res=="high" :
mapa.background_img(name=bg, resolution=res)
else:
raise ValueError('aviable resolutions to topography background are low, high')
elif bg=="stock":
mapa.stock_img()
elif bg=="define_color":
mapa.add_feature(cfeature.LAND,color=landcolor) #If I comment this => all ok, but I need
mapa.add_feature(cfeature.OCEAN,color=oceancolor)
else:
raise ValueError('aviable backgrounds are BM, topo, product')
gl = mapa.gridlines(crs=ccrs.PlateCarree(), draw_labels=True,linewidth=0.5, color='black', alpha=1, linestyle='--')
lons=np.arange(min_lon,max_lon,paso_h)
gl_lon_info=[]
for clons in lons:
if clons<180:
gl_lon_info=np.append(gl_lon_info,clons)
else:
gl_lon_info=np.append(gl_lon_info,clons-360)
#gl_lon_info=[160,180,-20,-40,-60,-80,-100,-120,-140,-160,-180,-200,-220,-240,-260]
gl_loc=[True,False,False,True]
gl.ylabels_left = gl_loc[0]
gl.ylabels_right = gl_loc[1]
gl.xlabels_top = gl_loc[2]
gl.xlabels_bottom = gl_loc[3]
gl.xlocator = mticker.FixedLocator(gl_lon_info)
gl.ylocator = mticker.MultipleLocator(dlat)
gl.xformatter = LONGITUDE_FORMATTER
gl.yformatter = LATITUDE_FORMATTER
gl.xlabel_style = {'size': fontsize, 'color': 'k'}
gl.ylabel_style = {'size': fontsize, 'color': 'k'}
return mapa,crs
| 28.714286
| 171
| 0.726772
| 1,151
| 7,437
| 4.56212
| 0.179844
| 0.015235
| 0.018854
| 0.025138
| 0.807084
| 0.804799
| 0.793373
| 0.793373
| 0.793373
| 0.781565
| 0
| 0.030806
| 0.14885
| 7,437
| 258
| 172
| 28.825581
| 0.798736
| 0.277128
| 0
| 0.685315
| 0
| 0
| 0.154253
| 0.004537
| 0
| 0
| 0
| 0
| 0
| 1
| 0.013986
| false
| 0
| 0.076923
| 0
| 0.104895
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.