code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
|---|---|---|
"""
Downloads tweets between two dates.
"""
from __future__ import annotations
import datetime
import sys
from argparse import ArgumentParser, Namespace, RawDescriptionHelpFormatter
from pathlib import Path
from py_executable_checklist.workflow import run_workflow
from twitter_utils import setup_logging
from twitter_utils.browser_session import BrowserSession
from twitter_utils.workflows.workflow_steps import (
CloseBrowserSession,
CreateBrowserSession,
GetAllTweetsBetweenDateRange,
WriteTweetsToDirectory,
)
def parse_args(args: list[str]) -> Namespace:
parser = ArgumentParser(description=__doc__, formatter_class=RawDescriptionHelpFormatter)
parser.add_argument(
"-q", "--query", required=True, type=str, help="A twitter handle or a hash tag. Use '#' to start a hash tag."
)
parser.add_argument(
"-s",
"--since",
required=True,
type=datetime.date.fromisoformat,
help="Search from this date. Format YYYY-MM-DD",
)
parser.add_argument(
"-u",
"--until",
required=True,
type=datetime.date.fromisoformat,
help="Search until this date. Format YYYY-MM-DD",
)
parser.add_argument(
"-b",
"--browser",
required=False,
type=str,
default="firefox",
help="Browser to use for web scraping. Default: firefox",
)
parser.add_argument(
"-o",
"--output-directory",
required=True,
type=Path,
help="Directory to save tweets to",
)
parser.add_argument(
"-v",
"--verbose",
action="store_true",
default=False,
dest="verbose",
help="Display context variables at each step",
)
return parser.parse_args(args=args)
def workflow_steps() -> list:
return [
CreateBrowserSession,
GetAllTweetsBetweenDateRange,
WriteTweetsToDirectory,
CloseBrowserSession,
]
def tweets_between_workflow(context: dict) -> None:
run_workflow(context, workflow_steps())
def main() -> None: # pragma: no cover
setup_logging()
parsed_args = parse_args(sys.argv[1:])
context = parsed_args.__dict__
context["browser_session"] = BrowserSession(parsed_args.browser)
tweets_between_workflow(context)
if __name__ == "__main__": # pragma: no cover
main()
|
[
"twitter_utils.setup_logging",
"twitter_utils.browser_session.BrowserSession",
"argparse.ArgumentParser"
] |
[((594, 679), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': '__doc__', 'formatter_class': 'RawDescriptionHelpFormatter'}), '(description=__doc__, formatter_class=RawDescriptionHelpFormatter\n )\n', (608, 679), False, 'from argparse import ArgumentParser, Namespace, RawDescriptionHelpFormatter\n'), ((2120, 2135), 'twitter_utils.setup_logging', 'setup_logging', ([], {}), '()\n', (2133, 2135), False, 'from twitter_utils import setup_logging\n'), ((2247, 2282), 'twitter_utils.browser_session.BrowserSession', 'BrowserSession', (['parsed_args.browser'], {}), '(parsed_args.browser)\n', (2261, 2282), False, 'from twitter_utils.browser_session import BrowserSession\n')]
|
#
# Copyright 2017-2018 Amazon.com, Inc. and its affiliates. All Rights Reserved.
#
# Licensed under the MIT License. See the LICENSE accompanying this file
# for the specific language governing permissions and limitations under
# the License.
#
import mount_efs
from .. import utils
from botocore.exceptions import ClientError, NoCredentialsError
from mock import MagicMock
try:
import ConfigParser
except ImportError:
from configparser import ConfigParser
DEFAULT_CLOUDWATCH_LOG_GROUP = '/aws/efs/utils'
DEFAULT_CLOUDWATCH_ENABLED = 'true'
DEFAULT_CLOUDWATCH_DISABLED = 'false'
DEFAULT_RETENTION_DAYS = 14
FS_ID = 'fs-deadbeef'
INSTANCE = 'i-12345678'
DEFAULT_CLOUDWATCH_LOG_STREAM = '%s - %s - mount.log' % (FS_ID, INSTANCE)
MOCK_AGENT = {
'client': 'fake-agent',
'log_group_name': DEFAULT_CLOUDWATCH_LOG_GROUP,
'log_stream_name': '%s - %s - mount.log' % (FS_ID, INSTANCE)
}
def _get_mock_config(enabled, log_group_name, retention_in_days):
def config_get_side_effect(section, field):
if section == mount_efs.CLOUDWATCH_LOG_SECTION and field == 'log_group_name':
return log_group_name
elif section == mount_efs.CLOUDWATCH_LOG_SECTION and field == 'retention_in_days':
return retention_in_days
else:
raise ValueError('Unexpected arguments')
def config_getboolean_side_effect(section, field):
if section == mount_efs.CLOUDWATCH_LOG_SECTION and field == 'enabled':
return True if enabled == 'true' else False
else:
raise ValueError('Unexpected arguments')
mock_config = MagicMock()
mock_config.get.side_effect = config_get_side_effect
mock_config.getboolean.side_effect = config_getboolean_side_effect
return mock_config
"""
cloudwatch-log config unit tests
"""
def test_get_cloudwatchlog_config_without_fsid_with_instance_id(mocker):
config = _get_mock_config(DEFAULT_CLOUDWATCH_ENABLED, DEFAULT_CLOUDWATCH_LOG_GROUP, DEFAULT_RETENTION_DAYS)
enabled = mount_efs.check_if_cloudwatch_log_enabled(config)
assert enabled == True
mocker.patch('mount_efs.get_instance_identity_info_from_instance_metadata', return_value=INSTANCE)
cloudwatchlog_agent = mount_efs.get_cloudwatchlog_config(config)
assert cloudwatchlog_agent.get('log_group_name') == DEFAULT_CLOUDWATCH_LOG_GROUP
assert cloudwatchlog_agent.get('retention_days') == DEFAULT_RETENTION_DAYS
assert cloudwatchlog_agent.get('log_stream_name') == '%s - mount.log' % INSTANCE
def test_get_cloudwatchlog_config_with_fsid_with_instance_id(mocker):
config = _get_mock_config(DEFAULT_CLOUDWATCH_ENABLED, DEFAULT_CLOUDWATCH_LOG_GROUP, DEFAULT_RETENTION_DAYS)
enabled = mount_efs.check_if_cloudwatch_log_enabled(config)
assert enabled == True
mocker.patch('mount_efs.get_instance_identity_info_from_instance_metadata', return_value=INSTANCE)
cloudwatchlog_agent = mount_efs.get_cloudwatchlog_config(config, FS_ID)
assert cloudwatchlog_agent.get('log_group_name') == DEFAULT_CLOUDWATCH_LOG_GROUP
assert cloudwatchlog_agent.get('retention_days') == DEFAULT_RETENTION_DAYS
assert cloudwatchlog_agent.get('log_stream_name') == '%s - %s - mount.log' % (FS_ID, INSTANCE)
def test_get_cloudwatchlog_config_with_fsid_without_instance_id(mocker):
config = _get_mock_config(DEFAULT_CLOUDWATCH_ENABLED, DEFAULT_CLOUDWATCH_LOG_GROUP, DEFAULT_RETENTION_DAYS)
enabled = mount_efs.check_if_cloudwatch_log_enabled(config)
assert enabled == True
mocker.patch('mount_efs.get_instance_identity_info_from_instance_metadata', return_value=None)
cloudwatchlog_agent = mount_efs.get_cloudwatchlog_config(config, FS_ID)
assert cloudwatchlog_agent.get('log_group_name') == DEFAULT_CLOUDWATCH_LOG_GROUP
assert cloudwatchlog_agent.get('retention_days') == DEFAULT_RETENTION_DAYS
assert cloudwatchlog_agent.get('log_stream_name') == '%s - mount.log' % (FS_ID)
def test_get_cloudwatchlog_config_without_fsid_without_instance_id(mocker):
config = _get_mock_config(DEFAULT_CLOUDWATCH_ENABLED, DEFAULT_CLOUDWATCH_LOG_GROUP, DEFAULT_RETENTION_DAYS)
enabled = mount_efs.check_if_cloudwatch_log_enabled(config)
assert enabled == True
mocker.patch('mount_efs.get_instance_identity_info_from_instance_metadata', return_value=None)
cloudwatchlog_agent = mount_efs.get_cloudwatchlog_config(config)
assert cloudwatchlog_agent.get('log_group_name') == DEFAULT_CLOUDWATCH_LOG_GROUP
assert cloudwatchlog_agent.get('retention_days') == DEFAULT_RETENTION_DAYS
assert cloudwatchlog_agent.get('log_stream_name') == 'default - mount.log'
# When config set enabled = false, or there is no enabled section, call the bootstrap_cloudwatch_logging, the get_botocore_client
# is not called
def test_botocore_not_called_when_feature_not_enabled(mocker):
config = _get_mock_config(DEFAULT_CLOUDWATCH_DISABLED, DEFAULT_CLOUDWATCH_LOG_GROUP, DEFAULT_RETENTION_DAYS)
enabled = mount_efs.check_if_cloudwatch_log_enabled(config)
assert enabled == False
get_botocore_client_mock = mocker.patch('mount_efs.get_botocore_client')
cloudwatchlog_agent = mount_efs.bootstrap_cloudwatch_logging(config, FS_ID)
utils.assert_not_called(get_botocore_client_mock)
assert cloudwatchlog_agent == None
# When config set enabled = true, call the bootstrap_cloudwatch_logging, the get_botocore_client is called
def test_cloudwatchlog_agent_none_when_botocore_agent_is_none(mocker):
config = _get_mock_config(DEFAULT_CLOUDWATCH_ENABLED, DEFAULT_CLOUDWATCH_LOG_GROUP, DEFAULT_RETENTION_DAYS)
get_botocore_client_mock = mocker.patch('mount_efs.get_botocore_client', return_value=None)
cloudwatchlog_agent = mount_efs.bootstrap_cloudwatch_logging(config, FS_ID)
utils.assert_called_once(get_botocore_client_mock)
assert cloudwatchlog_agent == None
"""
bootstrap cloud watch log unit tests
"""
def test_bootstrap_cloudwatch_log(mocker):
config = _get_mock_config(DEFAULT_CLOUDWATCH_ENABLED, DEFAULT_CLOUDWATCH_LOG_GROUP, DEFAULT_RETENTION_DAYS)
mocker.patch('mount_efs.get_instance_identity_info_from_instance_metadata', return_value=INSTANCE)
get_botocore_client_mock = mocker.patch('mount_efs.get_botocore_client', return_value='fake-agent')
create_log_group_mock = mocker.patch('mount_efs.create_cloudwatch_log_group', return_value=True)
put_retention_policy_mock = mocker.patch('mount_efs.put_cloudwatch_log_retention_policy', return_value=True)
create_log_stream_mock = mocker.patch('mount_efs.create_cloudwatch_log_stream', return_value=True)
cloudwatchlog_agent = mount_efs.bootstrap_cloudwatch_logging(config, FS_ID)
utils.assert_called_once(get_botocore_client_mock)
utils.assert_called_once(create_log_group_mock)
utils.assert_called_once(put_retention_policy_mock)
utils.assert_called_once(create_log_stream_mock)
assert cloudwatchlog_agent == MOCK_AGENT
def test_bootstrap_cloudwatch_log_create_log_group_failed(mocker):
config = _get_mock_config(DEFAULT_CLOUDWATCH_ENABLED, DEFAULT_CLOUDWATCH_LOG_GROUP, DEFAULT_RETENTION_DAYS)
mocker.patch('mount_efs.get_instance_identity_info_from_instance_metadata', return_value=INSTANCE)
get_botocore_client_mock = mocker.patch('mount_efs.get_botocore_client', return_value='fake-agent')
create_log_group_mock = mocker.patch('mount_efs.create_cloudwatch_log_group', return_value=False)
put_retention_policy_mock = mocker.patch('mount_efs.put_cloudwatch_log_retention_policy')
create_log_stream_mock = mocker.patch('mount_efs.create_cloudwatch_log_stream')
cloudwatchlog_agent = mount_efs.bootstrap_cloudwatch_logging(config, FS_ID)
utils.assert_called_once(get_botocore_client_mock)
utils.assert_called_once(create_log_group_mock)
utils.assert_not_called(put_retention_policy_mock)
utils.assert_not_called(create_log_stream_mock)
assert cloudwatchlog_agent == None
def test_bootstrap_cloudwatch_log_put_retention_days_failed(mocker):
config = _get_mock_config(DEFAULT_CLOUDWATCH_ENABLED, DEFAULT_CLOUDWATCH_LOG_GROUP, DEFAULT_RETENTION_DAYS)
mocker.patch('mount_efs.get_instance_identity_info_from_instance_metadata', return_value=INSTANCE)
get_botocore_client_mock = mocker.patch('mount_efs.get_botocore_client', return_value='fake-agent')
create_log_group_mock = mocker.patch('mount_efs.create_cloudwatch_log_group', return_value=True)
put_retention_policy_mock = mocker.patch('mount_efs.put_cloudwatch_log_retention_policy', return_value=False)
create_log_stream_mock = mocker.patch('mount_efs.create_cloudwatch_log_stream')
cloudwatchlog_agent = mount_efs.bootstrap_cloudwatch_logging(config, FS_ID)
utils.assert_called_once(get_botocore_client_mock)
utils.assert_called_once(create_log_group_mock)
utils.assert_called_once(put_retention_policy_mock)
utils.assert_not_called(create_log_stream_mock)
assert cloudwatchlog_agent == None
def test_bootstrap_cloudwatch_log_create_log_stream_failed(mocker):
config = _get_mock_config(DEFAULT_CLOUDWATCH_ENABLED, DEFAULT_CLOUDWATCH_LOG_GROUP, DEFAULT_RETENTION_DAYS)
mocker.patch('mount_efs.get_instance_identity_info_from_instance_metadata', return_value=INSTANCE)
get_botocore_client_mock = mocker.patch('mount_efs.get_botocore_client', return_value='fake-agent')
create_log_group_mock = mocker.patch('mount_efs.create_cloudwatch_log_group', return_value=True)
put_retention_policy_mock = mocker.patch('mount_efs.put_cloudwatch_log_retention_policy', return_value=True)
create_log_stream_mock = mocker.patch('mount_efs.create_cloudwatch_log_stream', return_value=False)
cloudwatchlog_agent = mount_efs.bootstrap_cloudwatch_logging(config, FS_ID)
utils.assert_called_once(get_botocore_client_mock)
utils.assert_called_once(create_log_group_mock)
utils.assert_called_once(put_retention_policy_mock)
utils.assert_called_once(create_log_stream_mock)
assert cloudwatchlog_agent == None
"""
botocore client unit tests
"""
def test_botocore_none_if_botocore_not_present(mocker):
config = _get_mock_config(DEFAULT_CLOUDWATCH_ENABLED, DEFAULT_CLOUDWATCH_LOG_GROUP, DEFAULT_RETENTION_DAYS)
mount_efs.BOTOCORE_PRESENT = False
client = mount_efs.get_botocore_client(config, 'logs')
assert client == None
def _test_botocore_client_established(mocker, iam_name):
config = _get_mock_config(DEFAULT_CLOUDWATCH_ENABLED, DEFAULT_CLOUDWATCH_LOG_GROUP, DEFAULT_RETENTION_DAYS)
mount_efs.BOTOCORE_PRESENT = True
mocker.patch('mount_efs.get_target_region', return_value='us-east-1')
mocker.patch('mount_efs.get_iam_role_name', return_value=iam_name)
mocker.patch('mount_efs.get_aws_security_credentials_from_instance_metadata',
return_value=({
'AccessKeyId': '123456',
'SecretAccessKey': '123456',
'Token': '<PASSWORD>'
}, ''))
boto_session_mock = MagicMock()
boto_session_mock.create_client.return_value = 'fake-client'
mocker.patch('botocore.session.get_session', return_value=boto_session_mock)
client = mount_efs.get_botocore_client(config, 'logs')
assert client == 'fake-client'
def test_botocore_client_established_if_iam_name_is_present(mocker):
_test_botocore_client_established(mocker, 'default')
def test_botocore_client_established_if_iam_name_is_not_present(mocker):
_test_botocore_client_established(mocker, None)
"""
create_log_group api call exception unit tests
"""
def _test_create_log_group_client_error(mocker, exception, desired_result=False):
operation_name = 'CreateLogGroup'
response = {
'Error': {
'Code': exception
}
}
mocker.patch('mount_efs.cloudwatch_create_log_group_helper', side_effect=[ClientError(response, operation_name)])
is_completed = mount_efs.create_cloudwatch_log_group(MOCK_AGENT, DEFAULT_CLOUDWATCH_LOG_GROUP)
assert is_completed == desired_result
def test_create_log_group_no_credentials_error(mocker):
mocker.patch('mount_efs.cloudwatch_create_log_group_helper', side_effect=[NoCredentialsError()])
is_completed = mount_efs.create_cloudwatch_log_group(MOCK_AGENT, DEFAULT_CLOUDWATCH_LOG_GROUP)
assert is_completed == False
def test_create_log_group_resource_already_exist(mocker):
_test_create_log_group_client_error(mocker, 'ResourceAlreadyExistsException', True)
def test_create_log_group_limit_exceed(mocker):
_test_create_log_group_client_error(mocker, 'LimitExceededException')
def test_create_log_group_operation_aborted(mocker):
_test_create_log_group_client_error(mocker, 'OperationAbortedException')
def test_create_log_group_invalid_parameter(mocker):
_test_create_log_group_client_error(mocker, 'InvalidParameterException')
def test_create_log_group_service_unavailable_exception(mocker):
_test_create_log_group_client_error(mocker, 'ServiceUnavailableException')
def test_create_log_group_access_denied_exception(mocker):
_test_create_log_group_client_error(mocker, 'AccessDeniedException')
def test_create_log_group_unexpected_client_error(mocker):
_test_create_log_group_client_error(mocker, 'Unknown exception')
"""
put_retention_policy api call exception unit tests
"""
def _test_put_retention_policy_client_error(mocker, exception, desired_result=False):
operation_name = 'PutRetentionPolicy'
response = {
'Error': {
'Code': exception
}
}
mocker.patch('mount_efs.cloudwatch_put_retention_policy_helper', side_effect=[ClientError(response, operation_name)])
is_completed = mount_efs.put_cloudwatch_log_retention_policy(MOCK_AGENT['client'],
DEFAULT_CLOUDWATCH_LOG_GROUP,
DEFAULT_RETENTION_DAYS)
assert is_completed == desired_result
def test_put_retention_policy_no_credentials_error(mocker):
mocker.patch('mount_efs.cloudwatch_put_retention_policy_helper', side_effect=[NoCredentialsError()])
is_completed = mount_efs.put_cloudwatch_log_retention_policy(MOCK_AGENT['client'], DEFAULT_CLOUDWATCH_LOG_GROUP,
DEFAULT_RETENTION_DAYS)
assert is_completed == False
def test_put_retention_policy_resource_not_found(mocker):
_test_put_retention_policy_client_error(mocker, 'ResourceNotFoundException')
def test_put_retention_policy_operation_aborted(mocker):
_test_put_retention_policy_client_error(mocker, 'OperationAbortedException')
def test_put_retention_policy_invalid_parameter(mocker):
_test_put_retention_policy_client_error(mocker, 'InvalidParameterException')
def test_put_retention_policy_service_unavailable_exception(mocker):
_test_put_retention_policy_client_error(mocker, 'ServiceUnavailableException')
def test_put_retention_policy_access_denied_exception(mocker):
_test_put_retention_policy_client_error(mocker, 'AccessDeniedException')
def test_put_retention_policy_unexpected_client_error(mocker):
_test_put_retention_policy_client_error(mocker, 'Unknown exception')
"""
create_log_stream api call exception unit tests
"""
def _test_create_log_stream_client_error(mocker, exception, desired_result=False):
operation_name = 'CreateLogStream'
response = {
'Error': {
'Code': exception
}
}
mocker.patch('mount_efs.cloudwatch_create_log_stream_helper', side_effect=[ClientError(response, operation_name)])
is_completed = mount_efs.create_cloudwatch_log_stream(MOCK_AGENT['client'], DEFAULT_CLOUDWATCH_LOG_GROUP,
DEFAULT_CLOUDWATCH_LOG_STREAM)
assert is_completed == desired_result
def test_create_log_stream_no_credentials_error(mocker):
mocker.patch('mount_efs.cloudwatch_create_log_stream_helper', side_effect=[NoCredentialsError()])
is_completed = mount_efs.create_cloudwatch_log_stream(MOCK_AGENT['client'], DEFAULT_CLOUDWATCH_LOG_GROUP,
DEFAULT_CLOUDWATCH_LOG_STREAM)
assert is_completed == False
def test_create_log_stream_resource_already_exist(mocker):
_test_create_log_stream_client_error(mocker, 'ResourceAlreadyExistsException', True)
def test_create_log_stream_resource_not_found(mocker):
_test_create_log_stream_client_error(mocker, 'ResourceNotFoundException')
def test_create_log_stream_invalid_parameter(mocker):
_test_create_log_stream_client_error(mocker, 'InvalidParameterException')
def test_create_log_stream_service_unavailable_exception(mocker):
_test_create_log_stream_client_error(mocker, 'ServiceUnavailableException')
def test_create_log_stream_access_denied_exception(mocker):
_test_create_log_stream_client_error(mocker, 'AccessDeniedException')
def test_create_log_stream_unexpected_client_error(mocker):
_test_create_log_stream_client_error(mocker, 'Unknown exception')
"""
put_log_events api call exception unit tests
"""
def _test_put_log_events_client_error(mocker, exception, desired_result=False):
operation_name = 'PutLogEvents'
response = {
'Error': {
'Code': exception
}
}
mocker.patch('mount_efs.get_log_stream_next_token', return_value='ABCDEF')
mocker.patch('mount_efs.cloudwatch_put_log_events_helper', side_effect=[ClientError(response, operation_name)])
is_completed = mount_efs.publish_cloudwatch_log(MOCK_AGENT, 'Test')
assert is_completed == desired_result
def test_put_log_events_no_credentials_error(mocker):
mocker.patch('mount_efs.get_log_stream_next_token', return_value='ABCDEF')
mocker.patch('mount_efs.cloudwatch_put_log_events_helper', side_effect=[NoCredentialsError()])
is_completed = mount_efs.publish_cloudwatch_log(MOCK_AGENT, 'Test')
assert is_completed == False
def test_put_log_events_resource_not_found(mocker):
_test_put_log_events_client_error(mocker, 'ResourceNotFoundException')
def test_put_log_events_invalid_sequence_token(mocker):
_test_put_log_events_client_error(mocker, 'InvalidSequenceTokenException')
def test_put_log_events_invalid_parameter(mocker):
_test_put_log_events_client_error(mocker, 'InvalidParameterException')
def test_put_log_events_data_already_accepted(mocker):
_test_put_log_events_client_error(mocker, 'DataAlreadyAcceptedException')
def test_put_log_events_unrecognized_client(mocker):
_test_put_log_events_client_error(mocker, 'UnrecognizedClientException')
def test_put_log_events_service_unavailable_exception(mocker):
_test_put_log_events_client_error(mocker, 'ServiceUnavailableException')
def test_put_log_events_access_denied_exception(mocker):
_test_put_log_events_client_error(mocker, 'AccessDeniedException')
def test_put_log_events_unexpected_client_error(mocker):
_test_put_log_events_client_error(mocker, 'Unknown exception')
"""
describe_log_stream api call exception unit tests
"""
def _test_get_log_stream_next_token_client_error(mocker, exception, desired_result=None):
operation_name = 'DescribeLogStream'
response = {
'Error': {
'Code': exception
}
}
mocker.patch('mount_efs.cloudwatch_describe_log_streams_helper', side_effect=[ClientError(response, operation_name)])
token = mount_efs.get_log_stream_next_token(MOCK_AGENT)
assert token == desired_result
def test_get_log_stream_next_token_no_credentials_error(mocker):
mocker.patch('mount_efs.cloudwatch_describe_log_streams_helper', side_effect=[NoCredentialsError()])
token = mount_efs.get_log_stream_next_token(MOCK_AGENT)
assert token == None
def test_get_log_stream_next_token_resource_not_found(mocker):
_test_put_log_events_client_error(mocker, 'ResourceNotFoundException')
def test_get_log_stream_next_token_invalid_parameter(mocker):
_test_get_log_stream_next_token_client_error(mocker, 'InvalidParameterException')
def test_get_log_stream_next_token_service_unavailable_exception(mocker):
_test_get_log_stream_next_token_client_error(mocker, 'ServiceUnavailableException')
def test_get_log_stream_next_token_access_denied_exception(mocker):
_test_get_log_stream_next_token_client_error(mocker, 'AccessDeniedException')
def test_get_log_stream_next_token_unexpected_client_error(mocker):
_test_get_log_stream_next_token_client_error(mocker, 'Unknown exception')
def _test_get_log_stream_token_response(mocker, response, desired_token=None):
mocker.patch('mount_efs.cloudwatch_describe_log_streams_helper', return_value=response)
token = mount_efs.get_log_stream_next_token(MOCK_AGENT)
assert token == desired_token
def test_get_log_stream_token_index_error(mocker):
response = {
'logStreams': []
}
_test_get_log_stream_token_response(mocker, response)
def test_get_log_stream_token_key_error(mocker):
response = {}
_test_get_log_stream_token_response(mocker, response)
def test_get_log_stream_token_type_error(mocker):
response = None
_test_get_log_stream_token_response(mocker, response)
def test_get_log_stream_token_return_correct(mocker):
token = 'ABCDEF'
response = {
'logStreams': [
{
'uploadSequenceToken': token
}
]
}
_test_get_log_stream_token_response(mocker, response, token)
|
[
"mount_efs.bootstrap_cloudwatch_logging",
"mount_efs.check_if_cloudwatch_log_enabled",
"mount_efs.create_cloudwatch_log_group",
"botocore.exceptions.ClientError",
"mount_efs.create_cloudwatch_log_stream",
"mount_efs.get_cloudwatchlog_config",
"mount_efs.get_botocore_client",
"botocore.exceptions.NoCredentialsError",
"mount_efs.get_log_stream_next_token",
"mount_efs.put_cloudwatch_log_retention_policy",
"mount_efs.publish_cloudwatch_log",
"mock.MagicMock"
] |
[((1609, 1620), 'mock.MagicMock', 'MagicMock', ([], {}), '()\n', (1618, 1620), False, 'from mock import MagicMock\n'), ((2014, 2063), 'mount_efs.check_if_cloudwatch_log_enabled', 'mount_efs.check_if_cloudwatch_log_enabled', (['config'], {}), '(config)\n', (2055, 2063), False, 'import mount_efs\n'), ((2221, 2263), 'mount_efs.get_cloudwatchlog_config', 'mount_efs.get_cloudwatchlog_config', (['config'], {}), '(config)\n', (2255, 2263), False, 'import mount_efs\n'), ((2711, 2760), 'mount_efs.check_if_cloudwatch_log_enabled', 'mount_efs.check_if_cloudwatch_log_enabled', (['config'], {}), '(config)\n', (2752, 2760), False, 'import mount_efs\n'), ((2918, 2967), 'mount_efs.get_cloudwatchlog_config', 'mount_efs.get_cloudwatchlog_config', (['config', 'FS_ID'], {}), '(config, FS_ID)\n', (2952, 2967), False, 'import mount_efs\n'), ((3432, 3481), 'mount_efs.check_if_cloudwatch_log_enabled', 'mount_efs.check_if_cloudwatch_log_enabled', (['config'], {}), '(config)\n', (3473, 3481), False, 'import mount_efs\n'), ((3635, 3684), 'mount_efs.get_cloudwatchlog_config', 'mount_efs.get_cloudwatchlog_config', (['config', 'FS_ID'], {}), '(config, FS_ID)\n', (3669, 3684), False, 'import mount_efs\n'), ((4137, 4186), 'mount_efs.check_if_cloudwatch_log_enabled', 'mount_efs.check_if_cloudwatch_log_enabled', (['config'], {}), '(config)\n', (4178, 4186), False, 'import mount_efs\n'), ((4340, 4382), 'mount_efs.get_cloudwatchlog_config', 'mount_efs.get_cloudwatchlog_config', (['config'], {}), '(config)\n', (4374, 4382), False, 'import mount_efs\n'), ((4964, 5013), 'mount_efs.check_if_cloudwatch_log_enabled', 'mount_efs.check_if_cloudwatch_log_enabled', (['config'], {}), '(config)\n', (5005, 5013), False, 'import mount_efs\n'), ((5146, 5199), 'mount_efs.bootstrap_cloudwatch_logging', 'mount_efs.bootstrap_cloudwatch_logging', (['config', 'FS_ID'], {}), '(config, FS_ID)\n', (5184, 5199), False, 'import mount_efs\n'), ((5707, 5760), 'mount_efs.bootstrap_cloudwatch_logging', 'mount_efs.bootstrap_cloudwatch_logging', (['config', 'FS_ID'], {}), '(config, FS_ID)\n', (5745, 5760), False, 'import mount_efs\n'), ((6609, 6662), 'mount_efs.bootstrap_cloudwatch_logging', 'mount_efs.bootstrap_cloudwatch_logging', (['config', 'FS_ID'], {}), '(config, FS_ID)\n', (6647, 6662), False, 'import mount_efs\n'), ((7620, 7673), 'mount_efs.bootstrap_cloudwatch_logging', 'mount_efs.bootstrap_cloudwatch_logging', (['config', 'FS_ID'], {}), '(config, FS_ID)\n', (7658, 7673), False, 'import mount_efs\n'), ((8644, 8697), 'mount_efs.bootstrap_cloudwatch_logging', 'mount_efs.bootstrap_cloudwatch_logging', (['config', 'FS_ID'], {}), '(config, FS_ID)\n', (8682, 8697), False, 'import mount_efs\n'), ((9687, 9740), 'mount_efs.bootstrap_cloudwatch_logging', 'mount_efs.bootstrap_cloudwatch_logging', (['config', 'FS_ID'], {}), '(config, FS_ID)\n', (9725, 9740), False, 'import mount_efs\n'), ((10254, 10299), 'mount_efs.get_botocore_client', 'mount_efs.get_botocore_client', (['config', '"""logs"""'], {}), "(config, 'logs')\n", (10283, 10299), False, 'import mount_efs\n'), ((11040, 11051), 'mock.MagicMock', 'MagicMock', ([], {}), '()\n', (11049, 11051), False, 'from mock import MagicMock\n'), ((11212, 11257), 'mount_efs.get_botocore_client', 'mount_efs.get_botocore_client', (['config', '"""logs"""'], {}), "(config, 'logs')\n", (11241, 11257), False, 'import mount_efs\n'), ((11944, 12023), 'mount_efs.create_cloudwatch_log_group', 'mount_efs.create_cloudwatch_log_group', (['MOCK_AGENT', 'DEFAULT_CLOUDWATCH_LOG_GROUP'], {}), '(MOCK_AGENT, DEFAULT_CLOUDWATCH_LOG_GROUP)\n', (11981, 12023), False, 'import mount_efs\n'), ((12244, 12323), 'mount_efs.create_cloudwatch_log_group', 'mount_efs.create_cloudwatch_log_group', (['MOCK_AGENT', 'DEFAULT_CLOUDWATCH_LOG_GROUP'], {}), '(MOCK_AGENT, DEFAULT_CLOUDWATCH_LOG_GROUP)\n', (12281, 12323), False, 'import mount_efs\n'), ((13715, 13840), 'mount_efs.put_cloudwatch_log_retention_policy', 'mount_efs.put_cloudwatch_log_retention_policy', (["MOCK_AGENT['client']", 'DEFAULT_CLOUDWATCH_LOG_GROUP', 'DEFAULT_RETENTION_DAYS'], {}), "(MOCK_AGENT['client'],\n DEFAULT_CLOUDWATCH_LOG_GROUP, DEFAULT_RETENTION_DAYS)\n", (13760, 13840), False, 'import mount_efs\n'), ((14195, 14320), 'mount_efs.put_cloudwatch_log_retention_policy', 'mount_efs.put_cloudwatch_log_retention_policy', (["MOCK_AGENT['client']", 'DEFAULT_CLOUDWATCH_LOG_GROUP', 'DEFAULT_RETENTION_DAYS'], {}), "(MOCK_AGENT['client'],\n DEFAULT_CLOUDWATCH_LOG_GROUP, DEFAULT_RETENTION_DAYS)\n", (14240, 14320), False, 'import mount_efs\n'), ((15670, 15795), 'mount_efs.create_cloudwatch_log_stream', 'mount_efs.create_cloudwatch_log_stream', (["MOCK_AGENT['client']", 'DEFAULT_CLOUDWATCH_LOG_GROUP', 'DEFAULT_CLOUDWATCH_LOG_STREAM'], {}), "(MOCK_AGENT['client'],\n DEFAULT_CLOUDWATCH_LOG_GROUP, DEFAULT_CLOUDWATCH_LOG_STREAM)\n", (15708, 15795), False, 'import mount_efs\n'), ((16072, 16197), 'mount_efs.create_cloudwatch_log_stream', 'mount_efs.create_cloudwatch_log_stream', (["MOCK_AGENT['client']", 'DEFAULT_CLOUDWATCH_LOG_GROUP', 'DEFAULT_CLOUDWATCH_LOG_STREAM'], {}), "(MOCK_AGENT['client'],\n DEFAULT_CLOUDWATCH_LOG_GROUP, DEFAULT_CLOUDWATCH_LOG_STREAM)\n", (16110, 16197), False, 'import mount_efs\n'), ((17588, 17640), 'mount_efs.publish_cloudwatch_log', 'mount_efs.publish_cloudwatch_log', (['MOCK_AGENT', '"""Test"""'], {}), "(MOCK_AGENT, 'Test')\n", (17620, 17640), False, 'import mount_efs\n'), ((17936, 17988), 'mount_efs.publish_cloudwatch_log', 'mount_efs.publish_cloudwatch_log', (['MOCK_AGENT', '"""Test"""'], {}), "(MOCK_AGENT, 'Test')\n", (17968, 17988), False, 'import mount_efs\n'), ((19489, 19536), 'mount_efs.get_log_stream_next_token', 'mount_efs.get_log_stream_next_token', (['MOCK_AGENT'], {}), '(MOCK_AGENT)\n', (19524, 19536), False, 'import mount_efs\n'), ((19756, 19803), 'mount_efs.get_log_stream_next_token', 'mount_efs.get_log_stream_next_token', (['MOCK_AGENT'], {}), '(MOCK_AGENT)\n', (19791, 19803), False, 'import mount_efs\n'), ((20768, 20815), 'mount_efs.get_log_stream_next_token', 'mount_efs.get_log_stream_next_token', (['MOCK_AGENT'], {}), '(MOCK_AGENT)\n', (20803, 20815), False, 'import mount_efs\n'), ((11885, 11922), 'botocore.exceptions.ClientError', 'ClientError', (['response', 'operation_name'], {}), '(response, operation_name)\n', (11896, 11922), False, 'from botocore.exceptions import ClientError, NoCredentialsError\n'), ((12202, 12222), 'botocore.exceptions.NoCredentialsError', 'NoCredentialsError', ([], {}), '()\n', (12220, 12222), False, 'from botocore.exceptions import ClientError, NoCredentialsError\n'), ((13656, 13693), 'botocore.exceptions.ClientError', 'ClientError', (['response', 'operation_name'], {}), '(response, operation_name)\n', (13667, 13693), False, 'from botocore.exceptions import ClientError, NoCredentialsError\n'), ((14153, 14173), 'botocore.exceptions.NoCredentialsError', 'NoCredentialsError', ([], {}), '()\n', (14171, 14173), False, 'from botocore.exceptions import ClientError, NoCredentialsError\n'), ((15611, 15648), 'botocore.exceptions.ClientError', 'ClientError', (['response', 'operation_name'], {}), '(response, operation_name)\n', (15622, 15648), False, 'from botocore.exceptions import ClientError, NoCredentialsError\n'), ((16030, 16050), 'botocore.exceptions.NoCredentialsError', 'NoCredentialsError', ([], {}), '()\n', (16048, 16050), False, 'from botocore.exceptions import ClientError, NoCredentialsError\n'), ((17529, 17566), 'botocore.exceptions.ClientError', 'ClientError', (['response', 'operation_name'], {}), '(response, operation_name)\n', (17540, 17566), False, 'from botocore.exceptions import ClientError, NoCredentialsError\n'), ((17894, 17914), 'botocore.exceptions.NoCredentialsError', 'NoCredentialsError', ([], {}), '()\n', (17912, 17914), False, 'from botocore.exceptions import ClientError, NoCredentialsError\n'), ((19437, 19474), 'botocore.exceptions.ClientError', 'ClientError', (['response', 'operation_name'], {}), '(response, operation_name)\n', (19448, 19474), False, 'from botocore.exceptions import ClientError, NoCredentialsError\n'), ((19721, 19741), 'botocore.exceptions.NoCredentialsError', 'NoCredentialsError', ([], {}), '()\n', (19739, 19741), False, 'from botocore.exceptions import ClientError, NoCredentialsError\n')]
|
import unittest
from unittest.case import expectedFailure
import spydrnet as sdn
from spydrnet_physical.util.get_names import get_names
class TestDefinition(unittest.TestCase):
def setUp(self):
self.netlist = sdn.Netlist("test_netlist")
self.library = self.netlist.create_library("test_lib")
self.definition = self.library.create_definition()
def test_create_feedthroughs_ports(self):
''' Test feedthrough port creation '''
cable = sdn.Cable("cable1")
cable.create_wires(4)
port1, port2 = self.definition.create_feedthroughs_ports(cable, "feed")
self.assertIsInstance(port1, sdn.Port)
self.assertIsInstance(port2, sdn.Port)
self.assertSetEqual(set(get_names(self.definition.get_ports())),
{"cable1_feed_in", "cable1_feed_out"})
self.assertSetEqual(set(get_names(self.definition.get_cables())),
{'cable1_feed_in', 'cable1_feed_out'})
def test_create_feedthroughs_ports_2(self):
''' Test feedthrough port creation with lambda naming '''
cable = sdn.Cable("cable1")
cable.create_wires(4)
def get_port_name(x): return "inport" if x is sdn.IN else "outport"
port1, port2 = self.definition.create_feedthroughs_ports(
cable,
get_port_names=get_port_name)
self.assertIsInstance(port1, sdn.Port)
self.assertIsInstance(port2, sdn.Port)
self.assertSetEqual(set(get_names(self.definition.get_ports())),
{"inport", "outport"})
def test_create_feedthrough(self):
''' This checks bus feedthrough from single instance '''
module1 = self.library.create_definition("module1")
module2 = self.library.create_definition("module2")
driver_port = module1.create_port("driver", direction=sdn.OUT, pins=4)
load_port = module1.create_port("load", direction=sdn.IN, pins=4)
# Create instances
inst0 = self.definition.create_child("inst0", reference=module1)
inst1 = self.definition.create_child("inst1", reference=module1)
ft_inst = self.definition.create_child("ft_inst", reference=module2)
# Create cable
cable = self.definition.create_cable("cable", wires=4)
cable.connect_instance_port(inst0, driver_port)
cable.connect_instance_port(inst1, load_port)
# Create Feedthrough
new_cables = self.definition.create_feedthrough(ft_inst, cable)
# Check correctness of connections
for new_cable in new_cables:
self.assertTrue(isinstance(new_cable, sdn.Cable),
"Return value should be cable")
new_cables = new_cables[0]
self.assertEqual(new_cables.size, 4,
"New cable should have same dimensions")
self.assertSetEqual(set(map(lambda p: p.name, module2.ports)),
{"cable_ft_out", "cable_ft_in"})
self.assertSetEqual(set(map(lambda p: p.name, self.definition.get_cables())),
{"cable", "cable_ft_in_0"})
self.assertSetEqual(set(('cable_ft_in_0', 'cable')),
set(get_names(ft_inst.get_cables(selection="OUTSIDE"))),
"Checks if both the cable are connected to feedthoguh instance")
self.assertSetEqual(set(('cable',)),
set(get_names(inst0.get_cables(selection="OUTSIDE"))),
"Checks if original wire name is still same ")
self.assertSetEqual(set(('cable_ft_in_0',)),
set(get_names(inst1.get_cables(selection="OUTSIDE"))),
"Checks if feethrough wire name is as expected ")
def test_combine_ports(self):
''' Creates 3 port on the given definition and combines them '''
port1 = self.definition.create_port(pins=1)
port2 = self.definition.create_port(pins=1)
port3 = self.definition.create_port(pins=1)
cable1 = self.definition.create_cable(wires=1)
cable1.connect_port(port1)
wire1 = cable1.wires[0]
cable2 = self.definition.create_cable(wires=1)
cable2.connect_port(port2)
wire2 = cable2.wires[0]
cable3 = self.definition.create_cable(wires=1)
cable3.connect_port(port3)
wire3 = cable3.wires[0]
new_port, new_cable = self.definition.combine_ports(
"merged_port", [port1, port2, port3])
self.assertIsInstance(new_port, sdn.Port)
self.assertEqual(new_port.size, 3)
self.assertIsInstance(new_cable, sdn.Cable)
self.assertEqual(new_cable.size, 3)
self.assertEqual(len(self.definition.ports), 1)
self.assertEqual(new_port.pins[0].wire, wire1)
self.assertEqual(new_port.pins[1].wire, wire2)
self.assertEqual(new_port.pins[2].wire, wire3)
def test_merge_instance(self):
def2 = self.library.create_definition("def2")
def3 = self.library.create_definition("def3")
inst2 = self.definition.create_child("inst2", reference=def2)
inst3 = self.definition.create_child("inst3", reference=def3)
new_m, inst, pin_map = self.definition.merge_instance([inst2, inst3])
self.assertTrue(new_m.name, "def2_def3_merged")
self.assertTrue(inst.name, "def2_def3_merged_1")
self.assertTrue(inst.reference, new_m)
self.assertEqual(set(get_names(self.definition.get_instances())),
{"def2_def3_merged_1", })
self.assertEqual(set(get_names(new_m.get_instances())),
{"inst2", "inst3"})
def test_flatten_instance(self):
submodule = self.library.create_definition("submodule")
sm_in1 = submodule.create_port("sm_in1", direction=sdn.IN, pins=1)
sm_in2 = submodule.create_port("sm_in2", direction=sdn.IN, pins=4)
sm_out1 = submodule.create_port("sm_out1", direction=sdn.OUT, pins=1)
sm_out2 = submodule.create_port("sm_out2", direction=sdn.OUT, pins=4)
module = self.library.create_definition("module")
in1 = module.create_port("in1", direction=sdn.IN, pins=1)
in2 = module.create_port("in2", direction=sdn.IN, pins=4)
out1 = module.create_port("out1", direction=sdn.OUT, pins=1)
out2 = module.create_port("out2", direction=sdn.OUT, pins=4)
sm_inst = module.create_child("submodule_inst", reference=submodule)
in1_c = module.create_cable("in1", wires=1)
in1_c.connect_instance_port(sm_inst, sm_in1)
in2_c = module.create_cable("in2", wires=4)
in2_c.connect_instance_port(sm_inst, sm_in2)
out1_c = module.create_cable("out1", wires=1)
out1_c.connect_instance_port(sm_inst, sm_out1)
out2_c = module.create_cable("out2", wires=4)
out2_c.connect_instance_port(sm_inst, sm_out2)
inst1 = self.definition.create_child("inst1", reference=module)
win1 = self.definition.create_cable("win1", wires=1)
win1.connect_instance_port(inst1, in1)
win2 = self.definition.create_cable("win2", wires=4)
win2.connect_instance_port(inst1, in2)
wout1 = self.definition.create_cable("wout1", wires=1)
wout1.connect_instance_port(inst1, out1)
wout2 = self.definition.create_cable("wout2", wires=4)
wout2.connect_instance_port(inst1, out2)
self.definition.flatten_instance(inst1)
self.assertEqual(len(self.definition.children), 1)
self.assertEqual(self.definition.children[0].name,
"inst1_submodule_inst")
@expectedFailure
def test_get_connectivity_network():
assert 1 == 2
@expectedFailure
def test_merge_multiple_instance(self):
# TODO: Wrte test for checking merge multiple instances
self.definition.merge_multiple_instance()
|
[
"spydrnet.Netlist",
"spydrnet.Cable"
] |
[((224, 251), 'spydrnet.Netlist', 'sdn.Netlist', (['"""test_netlist"""'], {}), "('test_netlist')\n", (235, 251), True, 'import spydrnet as sdn\n'), ((484, 503), 'spydrnet.Cable', 'sdn.Cable', (['"""cable1"""'], {}), "('cable1')\n", (493, 503), True, 'import spydrnet as sdn\n'), ((1121, 1140), 'spydrnet.Cable', 'sdn.Cable', (['"""cable1"""'], {}), "('cable1')\n", (1130, 1140), True, 'import spydrnet as sdn\n')]
|
#!/usr/bin/env python
import os
import sys
import django
from memory_profiler import profile
@profile(precision=8)
def no_cache():
from hashid_field.hashid import Hashid
instances = [Hashid(i, salt="asdf", min_length=7) for i in range(1, 10_000)]
return instances
@profile(precision=8)
def with_cache():
from hashid_field.hashid import Hashid
from hashids import Hashids
hashids = Hashids(salt="asdf", min_length=7)
instances = [Hashid(i, hashids=hashids) for i in range(1, 10_000)]
return instances
if __name__ == "__main__":
print("Python:", sys.version)
print("Django:", django.get_version(django.VERSION))
os.environ['DJANGO_SETTINGS_MODULE'] = 'tests.test_settings'
django.setup()
no_cache()
with_cache()
|
[
"django.setup",
"memory_profiler.profile",
"hashids.Hashids",
"hashid_field.hashid.Hashid",
"django.get_version"
] |
[((97, 117), 'memory_profiler.profile', 'profile', ([], {'precision': '(8)'}), '(precision=8)\n', (104, 117), False, 'from memory_profiler import profile\n'), ((282, 302), 'memory_profiler.profile', 'profile', ([], {'precision': '(8)'}), '(precision=8)\n', (289, 302), False, 'from memory_profiler import profile\n'), ((410, 444), 'hashids.Hashids', 'Hashids', ([], {'salt': '"""asdf"""', 'min_length': '(7)'}), "(salt='asdf', min_length=7)\n", (417, 444), False, 'from hashids import Hashids\n'), ((726, 740), 'django.setup', 'django.setup', ([], {}), '()\n', (738, 740), False, 'import django\n'), ((194, 230), 'hashid_field.hashid.Hashid', 'Hashid', (['i'], {'salt': '"""asdf"""', 'min_length': '(7)'}), "(i, salt='asdf', min_length=7)\n", (200, 230), False, 'from hashid_field.hashid import Hashid\n'), ((462, 488), 'hashid_field.hashid.Hashid', 'Hashid', (['i'], {'hashids': 'hashids'}), '(i, hashids=hashids)\n', (468, 488), False, 'from hashid_field.hashid import Hashid\n'), ((621, 655), 'django.get_version', 'django.get_version', (['django.VERSION'], {}), '(django.VERSION)\n', (639, 655), False, 'import django\n')]
|
import FWCore.ParameterSet.Config as cms
from DQMServices.Core.DQMEDAnalyzer import DQMEDAnalyzer
EcalPi0MonDQM = DQMEDAnalyzer('DQMSourcePi0',
prescaleFactor = cms.untracked.int32(1),
FolderName = cms.untracked.string('AlCaReco/EcalPi0'),
AlCaStreamEBpi0Tag = cms.untracked.InputTag("hltAlCaPi0RegRecHits","pi0EcalRecHitsEB"),
AlCaStreamEEpi0Tag = cms.untracked.InputTag("hltAlCaPi0RegRecHits","pi0EcalRecHitsEE"),
AlCaStreamEBetaTag = cms.untracked.InputTag("hltAlCaEtaRegRecHits","etaEcalRecHitsEB"),
AlCaStreamEEetaTag = cms.untracked.InputTag("hltAlCaEtaRegRecHits","etaEcalRecHitsEE"),
isMonEEpi0 = cms.untracked.bool(True),
isMonEBpi0 = cms.untracked.bool(True),
isMonEEeta = cms.untracked.bool(True),
isMonEBeta = cms.untracked.bool(True),
SaveToFile = cms.untracked.bool(False),
FileName = cms.untracked.string('MonitorAlCaEcalPi0.root'),
clusSeedThr = cms.double( 0.5 ),
clusSeedThrEndCap = cms.double( 1.0 ),
clusEtaSize = cms.int32( 3 ),
clusPhiSize = cms.int32( 3 ),
seleXtalMinEnergy = cms.double( -0.15 ),
seleXtalMinEnergyEndCap = cms.double( -0.75 ),
selePtGamma = cms.double(1 ),
selePtPi0 = cms.double( 2. ),
seleMinvMaxPi0 = cms.double( 0.22 ),
seleMinvMinPi0 = cms.double( 0.06 ),
seleS4S9Gamma = cms.double( 0.83 ),
selePi0Iso = cms.double( 0.5 ),
ptMinForIsolation = cms.double( 1 ),
selePi0BeltDR = cms.double( 0.2 ),
selePi0BeltDeta = cms.double( 0.05 ),
selePtGammaEndCap = cms.double( 0.8 ),
selePtPi0EndCap = cms.double( 3.0 ),
seleS4S9GammaEndCap = cms.double( 0.9 ),
seleMinvMaxPi0EndCap = cms.double( 0.3 ),
seleMinvMinPi0EndCap = cms.double( 0.05 ),
ptMinForIsolationEndCap = cms.double( 0.5 ),
selePi0IsoEndCap = cms.double( 0.5 ),
selePi0BeltDREndCap = cms.double( 0.2 ),
selePi0BeltDetaEndCap = cms.double( 0.05 ),
selePtGammaEta = cms.double(1.2),
selePtEta = cms.double(4.0),
seleS4S9GammaEta = cms.double(0.9),
seleS9S25GammaEta = cms.double(0.8),
seleMinvMaxEta = cms.double(0.8),
seleMinvMinEta = cms.double(0.3),
ptMinForIsolationEta = cms.double(1.0),
seleEtaIso = cms.double(0.5),
seleEtaBeltDR = cms.double(0.3),
seleEtaBeltDeta = cms.double(0.1),
massLowPi0Cand = cms.double(0.104),
massHighPi0Cand = cms.double(0.163),
selePtGammaEtaEndCap = cms.double(1.5),
selePtEtaEndCap = cms.double(5),
seleS4S9GammaEtaEndCap = cms.double(0.9),
seleS9S25GammaEtaEndCap = cms.double(0.85),
seleMinvMaxEtaEndCap = cms.double(0.8),
seleMinvMinEtaEndCap = cms.double(0.3),
ptMinForIsolationEtaEndCap = cms.double(0.5),
seleEtaIsoEndCap = cms.double(0.5),
seleEtaBeltDREndCap = cms.double(0.3),
seleEtaBeltDetaEndCap = cms.double(0.1),
posCalcParameters = cms.PSet( T0_barl = cms.double(5.7),
T0_endc = cms.double(3.1),
T0_endcPresh = cms.double(1.2),
LogWeighted = cms.bool(True),
W0 = cms.double(4.2),
X0 = cms.double(0.89)
)
)
|
[
"FWCore.ParameterSet.Config.untracked.int32",
"FWCore.ParameterSet.Config.double",
"FWCore.ParameterSet.Config.untracked.string",
"FWCore.ParameterSet.Config.untracked.bool",
"FWCore.ParameterSet.Config.int32",
"FWCore.ParameterSet.Config.bool",
"FWCore.ParameterSet.Config.untracked.InputTag"
] |
[((166, 188), 'FWCore.ParameterSet.Config.untracked.int32', 'cms.untracked.int32', (['(1)'], {}), '(1)\n', (185, 188), True, 'import FWCore.ParameterSet.Config as cms\n'), ((207, 247), 'FWCore.ParameterSet.Config.untracked.string', 'cms.untracked.string', (['"""AlCaReco/EcalPi0"""'], {}), "('AlCaReco/EcalPi0')\n", (227, 247), True, 'import FWCore.ParameterSet.Config as cms\n'), ((275, 341), 'FWCore.ParameterSet.Config.untracked.InputTag', 'cms.untracked.InputTag', (['"""hltAlCaPi0RegRecHits"""', '"""pi0EcalRecHitsEB"""'], {}), "('hltAlCaPi0RegRecHits', 'pi0EcalRecHitsEB')\n", (297, 341), True, 'import FWCore.ParameterSet.Config as cms\n'), ((367, 433), 'FWCore.ParameterSet.Config.untracked.InputTag', 'cms.untracked.InputTag', (['"""hltAlCaPi0RegRecHits"""', '"""pi0EcalRecHitsEE"""'], {}), "('hltAlCaPi0RegRecHits', 'pi0EcalRecHitsEE')\n", (389, 433), True, 'import FWCore.ParameterSet.Config as cms\n'), ((459, 525), 'FWCore.ParameterSet.Config.untracked.InputTag', 'cms.untracked.InputTag', (['"""hltAlCaEtaRegRecHits"""', '"""etaEcalRecHitsEB"""'], {}), "('hltAlCaEtaRegRecHits', 'etaEcalRecHitsEB')\n", (481, 525), True, 'import FWCore.ParameterSet.Config as cms\n'), ((551, 617), 'FWCore.ParameterSet.Config.untracked.InputTag', 'cms.untracked.InputTag', (['"""hltAlCaEtaRegRecHits"""', '"""etaEcalRecHitsEE"""'], {}), "('hltAlCaEtaRegRecHits', 'etaEcalRecHitsEE')\n", (573, 617), True, 'import FWCore.ParameterSet.Config as cms\n'), ((636, 660), 'FWCore.ParameterSet.Config.untracked.bool', 'cms.untracked.bool', (['(True)'], {}), '(True)\n', (654, 660), True, 'import FWCore.ParameterSet.Config as cms\n'), ((679, 703), 'FWCore.ParameterSet.Config.untracked.bool', 'cms.untracked.bool', (['(True)'], {}), '(True)\n', (697, 703), True, 'import FWCore.ParameterSet.Config as cms\n'), ((722, 746), 'FWCore.ParameterSet.Config.untracked.bool', 'cms.untracked.bool', (['(True)'], {}), '(True)\n', (740, 746), True, 'import FWCore.ParameterSet.Config as cms\n'), ((765, 789), 'FWCore.ParameterSet.Config.untracked.bool', 'cms.untracked.bool', (['(True)'], {}), '(True)\n', (783, 789), True, 'import FWCore.ParameterSet.Config as cms\n'), ((810, 835), 'FWCore.ParameterSet.Config.untracked.bool', 'cms.untracked.bool', (['(False)'], {}), '(False)\n', (828, 835), True, 'import FWCore.ParameterSet.Config as cms\n'), ((852, 899), 'FWCore.ParameterSet.Config.untracked.string', 'cms.untracked.string', (['"""MonitorAlCaEcalPi0.root"""'], {}), "('MonitorAlCaEcalPi0.root')\n", (872, 899), True, 'import FWCore.ParameterSet.Config as cms\n'), ((920, 935), 'FWCore.ParameterSet.Config.double', 'cms.double', (['(0.5)'], {}), '(0.5)\n', (930, 935), True, 'import FWCore.ParameterSet.Config as cms\n'), ((963, 978), 'FWCore.ParameterSet.Config.double', 'cms.double', (['(1.0)'], {}), '(1.0)\n', (973, 978), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1000, 1012), 'FWCore.ParameterSet.Config.int32', 'cms.int32', (['(3)'], {}), '(3)\n', (1009, 1012), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1034, 1046), 'FWCore.ParameterSet.Config.int32', 'cms.int32', (['(3)'], {}), '(3)\n', (1043, 1046), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1074, 1091), 'FWCore.ParameterSet.Config.double', 'cms.double', (['(-0.15)'], {}), '(-0.15)\n', (1084, 1091), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1125, 1142), 'FWCore.ParameterSet.Config.double', 'cms.double', (['(-0.75)'], {}), '(-0.75)\n', (1135, 1142), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1164, 1177), 'FWCore.ParameterSet.Config.double', 'cms.double', (['(1)'], {}), '(1)\n', (1174, 1177), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1196, 1211), 'FWCore.ParameterSet.Config.double', 'cms.double', (['(2.0)'], {}), '(2.0)\n', (1206, 1211), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1235, 1251), 'FWCore.ParameterSet.Config.double', 'cms.double', (['(0.22)'], {}), '(0.22)\n', (1245, 1251), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1276, 1292), 'FWCore.ParameterSet.Config.double', 'cms.double', (['(0.06)'], {}), '(0.06)\n', (1286, 1292), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1316, 1332), 'FWCore.ParameterSet.Config.double', 'cms.double', (['(0.83)'], {}), '(0.83)\n', (1326, 1332), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1353, 1368), 'FWCore.ParameterSet.Config.double', 'cms.double', (['(0.5)'], {}), '(0.5)\n', (1363, 1368), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1396, 1409), 'FWCore.ParameterSet.Config.double', 'cms.double', (['(1)'], {}), '(1)\n', (1406, 1409), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1433, 1448), 'FWCore.ParameterSet.Config.double', 'cms.double', (['(0.2)'], {}), '(0.2)\n', (1443, 1448), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1474, 1490), 'FWCore.ParameterSet.Config.double', 'cms.double', (['(0.05)'], {}), '(0.05)\n', (1484, 1490), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1519, 1534), 'FWCore.ParameterSet.Config.double', 'cms.double', (['(0.8)'], {}), '(0.8)\n', (1529, 1534), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1560, 1575), 'FWCore.ParameterSet.Config.double', 'cms.double', (['(3.0)'], {}), '(3.0)\n', (1570, 1575), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1605, 1620), 'FWCore.ParameterSet.Config.double', 'cms.double', (['(0.9)'], {}), '(0.9)\n', (1615, 1620), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1651, 1666), 'FWCore.ParameterSet.Config.double', 'cms.double', (['(0.3)'], {}), '(0.3)\n', (1661, 1666), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1697, 1713), 'FWCore.ParameterSet.Config.double', 'cms.double', (['(0.05)'], {}), '(0.05)\n', (1707, 1713), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1747, 1762), 'FWCore.ParameterSet.Config.double', 'cms.double', (['(0.5)'], {}), '(0.5)\n', (1757, 1762), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1789, 1804), 'FWCore.ParameterSet.Config.double', 'cms.double', (['(0.5)'], {}), '(0.5)\n', (1799, 1804), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1835, 1850), 'FWCore.ParameterSet.Config.double', 'cms.double', (['(0.2)'], {}), '(0.2)\n', (1845, 1850), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1883, 1899), 'FWCore.ParameterSet.Config.double', 'cms.double', (['(0.05)'], {}), '(0.05)\n', (1893, 1899), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1925, 1940), 'FWCore.ParameterSet.Config.double', 'cms.double', (['(1.2)'], {}), '(1.2)\n', (1935, 1940), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1958, 1973), 'FWCore.ParameterSet.Config.double', 'cms.double', (['(4.0)'], {}), '(4.0)\n', (1968, 1973), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1999, 2014), 'FWCore.ParameterSet.Config.double', 'cms.double', (['(0.9)'], {}), '(0.9)\n', (2009, 2014), True, 'import FWCore.ParameterSet.Config as cms\n'), ((2041, 2056), 'FWCore.ParameterSet.Config.double', 'cms.double', (['(0.8)'], {}), '(0.8)\n', (2051, 2056), True, 'import FWCore.ParameterSet.Config as cms\n'), ((2079, 2094), 'FWCore.ParameterSet.Config.double', 'cms.double', (['(0.8)'], {}), '(0.8)\n', (2089, 2094), True, 'import FWCore.ParameterSet.Config as cms\n'), ((2117, 2132), 'FWCore.ParameterSet.Config.double', 'cms.double', (['(0.3)'], {}), '(0.3)\n', (2127, 2132), True, 'import FWCore.ParameterSet.Config as cms\n'), ((2161, 2176), 'FWCore.ParameterSet.Config.double', 'cms.double', (['(1.0)'], {}), '(1.0)\n', (2171, 2176), True, 'import FWCore.ParameterSet.Config as cms\n'), ((2195, 2210), 'FWCore.ParameterSet.Config.double', 'cms.double', (['(0.5)'], {}), '(0.5)\n', (2205, 2210), True, 'import FWCore.ParameterSet.Config as cms\n'), ((2232, 2247), 'FWCore.ParameterSet.Config.double', 'cms.double', (['(0.3)'], {}), '(0.3)\n', (2242, 2247), True, 'import FWCore.ParameterSet.Config as cms\n'), ((2271, 2286), 'FWCore.ParameterSet.Config.double', 'cms.double', (['(0.1)'], {}), '(0.1)\n', (2281, 2286), True, 'import FWCore.ParameterSet.Config as cms\n'), ((2309, 2326), 'FWCore.ParameterSet.Config.double', 'cms.double', (['(0.104)'], {}), '(0.104)\n', (2319, 2326), True, 'import FWCore.ParameterSet.Config as cms\n'), ((2350, 2367), 'FWCore.ParameterSet.Config.double', 'cms.double', (['(0.163)'], {}), '(0.163)\n', (2360, 2367), True, 'import FWCore.ParameterSet.Config as cms\n'), ((2397, 2412), 'FWCore.ParameterSet.Config.double', 'cms.double', (['(1.5)'], {}), '(1.5)\n', (2407, 2412), True, 'import FWCore.ParameterSet.Config as cms\n'), ((2436, 2449), 'FWCore.ParameterSet.Config.double', 'cms.double', (['(5)'], {}), '(5)\n', (2446, 2449), True, 'import FWCore.ParameterSet.Config as cms\n'), ((2481, 2496), 'FWCore.ParameterSet.Config.double', 'cms.double', (['(0.9)'], {}), '(0.9)\n', (2491, 2496), True, 'import FWCore.ParameterSet.Config as cms\n'), ((2529, 2545), 'FWCore.ParameterSet.Config.double', 'cms.double', (['(0.85)'], {}), '(0.85)\n', (2539, 2545), True, 'import FWCore.ParameterSet.Config as cms\n'), ((2574, 2589), 'FWCore.ParameterSet.Config.double', 'cms.double', (['(0.8)'], {}), '(0.8)\n', (2584, 2589), True, 'import FWCore.ParameterSet.Config as cms\n'), ((2618, 2633), 'FWCore.ParameterSet.Config.double', 'cms.double', (['(0.3)'], {}), '(0.3)\n', (2628, 2633), True, 'import FWCore.ParameterSet.Config as cms\n'), ((2668, 2683), 'FWCore.ParameterSet.Config.double', 'cms.double', (['(0.5)'], {}), '(0.5)\n', (2678, 2683), True, 'import FWCore.ParameterSet.Config as cms\n'), ((2708, 2723), 'FWCore.ParameterSet.Config.double', 'cms.double', (['(0.5)'], {}), '(0.5)\n', (2718, 2723), True, 'import FWCore.ParameterSet.Config as cms\n'), ((2751, 2766), 'FWCore.ParameterSet.Config.double', 'cms.double', (['(0.3)'], {}), '(0.3)\n', (2761, 2766), True, 'import FWCore.ParameterSet.Config as cms\n'), ((2796, 2811), 'FWCore.ParameterSet.Config.double', 'cms.double', (['(0.1)'], {}), '(0.1)\n', (2806, 2811), True, 'import FWCore.ParameterSet.Config as cms\n'), ((2894, 2909), 'FWCore.ParameterSet.Config.double', 'cms.double', (['(5.7)'], {}), '(5.7)\n', (2904, 2909), True, 'import FWCore.ParameterSet.Config as cms\n'), ((2960, 2975), 'FWCore.ParameterSet.Config.double', 'cms.double', (['(3.1)'], {}), '(3.1)\n', (2970, 2975), True, 'import FWCore.ParameterSet.Config as cms\n'), ((3034, 3049), 'FWCore.ParameterSet.Config.double', 'cms.double', (['(1.2)'], {}), '(1.2)\n', (3044, 3049), True, 'import FWCore.ParameterSet.Config as cms\n'), ((3100, 3114), 'FWCore.ParameterSet.Config.bool', 'cms.bool', (['(True)'], {}), '(True)\n', (3108, 3114), True, 'import FWCore.ParameterSet.Config as cms\n'), ((3165, 3180), 'FWCore.ParameterSet.Config.double', 'cms.double', (['(4.2)'], {}), '(4.2)\n', (3175, 3180), True, 'import FWCore.ParameterSet.Config as cms\n'), ((3231, 3247), 'FWCore.ParameterSet.Config.double', 'cms.double', (['(0.89)'], {}), '(0.89)\n', (3241, 3247), True, 'import FWCore.ParameterSet.Config as cms\n')]
|
import datetime
class SimpleLogger:
log_file = None
@staticmethod
def instance():
if '_instance' not in SimpleLogger.__dict__:
SimpleLogger._instance = SimpleLogger()
return SimpleLogger._instance
def open_log(self, path):
self.log_file = open(path, 'w')
def write_log(self, log_record):
now = str(datetime.datetime.now())
record = '%s: %s' % (now, log_record)
self.log_file.writelines(record)
def close_log(self):
self.log_file.close()
if __name__ == '__main__':
logger1 = SimpleLogger.instance()
logger2 = SimpleLogger.instance()
assert logger1 is logger2
logger1.open_log('test.log')
logger2.write_log('Hello World')
logger1.close_log()
|
[
"datetime.datetime.now"
] |
[((367, 390), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (388, 390), False, 'import datetime\n')]
|
import discord
from .vars import ban, feedback, support, unban, warns
def perms_dict(ctx):
admin_roles = [
role for role in ctx.guild.roles if role.permissions.manage_guild and not role.managed
]
overwrite_dict = {}
for i in admin_roles:
overwrite_dict[i] = discord.PermissionOverwrite(read_messages=True)
overwrite_dict.update({ctx.guild.default_role: discord.PermissionOverwrite(
read_messages=False), ctx.guild.me: discord.PermissionOverwrite(read_messages=True)})
return admin_roles, overwrite_dict
async def channel_creation(ctx):
admin_roles, overwrite_dict = perms_dict(ctx)
category = discord.utils.get(ctx.guild.categories, name="Admin / Feedback") if discord.utils.get(
ctx.guild.categories, name="Admin / Feedback") else False
# Feedback Channel
if discord.utils.get(ctx.guild.text_channels, topic=feedback):
feed_channel = discord.utils.get(
ctx.guild.text_channels,
topic=feedback
)
else:
feed_channel = False
# Ban
if discord.utils.get(ctx.guild.text_channels, topic=ban):
ban_channel = discord.utils.get(
ctx.guild.text_channels,
topic=ban
)
else:
ban_channel = False
# Unban
if discord.utils.get(ctx.guild.text_channels, topic=unban):
unban_channel = discord.utils.get(
ctx.guild.text_channels,
topic=unban,
)
else:
unban_channel = False
# Warns
if discord.utils.get(ctx.guild.text_channels, topic=warns):
warns_channel = discord.utils.get(
ctx.guild.text_channels,
topic=warns,
)
else:
warns_channel = False
# Support
if discord.utils.get(ctx.guild.text_channels, topic=support):
support_channel = discord.utils.get(
ctx.guild.text_channels,
topic=support
)
else:
support_channel = False
support_channel_roles = discord.utils.get(ctx.guild.roles, name="SupportRequired") if discord.utils.get(
ctx.guild.roles, name="SupportRequired") else False
if isinstance(support_channel, bool) or isinstance(unban, bool) or isinstance(ban, bool) or isinstance(feed_channel, bool) or isinstance(warns, bool):
if isinstance(category, bool):
category = await ctx.guild.create_category("Admin / Feedback", overwrites=overwrite_dict, reason="To log the admin and feedback events")
# Bot Setup
if not discord.utils.get(category.channels, name="bot-setup"):
botask = discord.utils.get(category.channels, name="bot-setup")
else:
botask = False
return feed_channel, support_channel, support_channel_roles, ban_channel, unban_channel, warns_channel, botask
|
[
"discord.utils.get",
"discord.PermissionOverwrite"
] |
[((839, 897), 'discord.utils.get', 'discord.utils.get', (['ctx.guild.text_channels'], {'topic': 'feedback'}), '(ctx.guild.text_channels, topic=feedback)\n', (856, 897), False, 'import discord\n'), ((1072, 1125), 'discord.utils.get', 'discord.utils.get', (['ctx.guild.text_channels'], {'topic': 'ban'}), '(ctx.guild.text_channels, topic=ban)\n', (1089, 1125), False, 'import discord\n'), ((1295, 1350), 'discord.utils.get', 'discord.utils.get', (['ctx.guild.text_channels'], {'topic': 'unban'}), '(ctx.guild.text_channels, topic=unban)\n', (1312, 1350), False, 'import discord\n'), ((1527, 1582), 'discord.utils.get', 'discord.utils.get', (['ctx.guild.text_channels'], {'topic': 'warns'}), '(ctx.guild.text_channels, topic=warns)\n', (1544, 1582), False, 'import discord\n'), ((1761, 1818), 'discord.utils.get', 'discord.utils.get', (['ctx.guild.text_channels'], {'topic': 'support'}), '(ctx.guild.text_channels, topic=support)\n', (1778, 1818), False, 'import discord\n'), ((293, 340), 'discord.PermissionOverwrite', 'discord.PermissionOverwrite', ([], {'read_messages': '(True)'}), '(read_messages=True)\n', (320, 340), False, 'import discord\n'), ((723, 787), 'discord.utils.get', 'discord.utils.get', (['ctx.guild.categories'], {'name': '"""Admin / Feedback"""'}), "(ctx.guild.categories, name='Admin / Feedback')\n", (740, 787), False, 'import discord\n'), ((655, 719), 'discord.utils.get', 'discord.utils.get', (['ctx.guild.categories'], {'name': '"""Admin / Feedback"""'}), "(ctx.guild.categories, name='Admin / Feedback')\n", (672, 719), False, 'import discord\n'), ((922, 980), 'discord.utils.get', 'discord.utils.get', (['ctx.guild.text_channels'], {'topic': 'feedback'}), '(ctx.guild.text_channels, topic=feedback)\n', (939, 980), False, 'import discord\n'), ((1149, 1202), 'discord.utils.get', 'discord.utils.get', (['ctx.guild.text_channels'], {'topic': 'ban'}), '(ctx.guild.text_channels, topic=ban)\n', (1166, 1202), False, 'import discord\n'), ((1376, 1431), 'discord.utils.get', 'discord.utils.get', (['ctx.guild.text_channels'], {'topic': 'unban'}), '(ctx.guild.text_channels, topic=unban)\n', (1393, 1431), False, 'import discord\n'), ((1608, 1663), 'discord.utils.get', 'discord.utils.get', (['ctx.guild.text_channels'], {'topic': 'warns'}), '(ctx.guild.text_channels, topic=warns)\n', (1625, 1663), False, 'import discord\n'), ((1846, 1903), 'discord.utils.get', 'discord.utils.get', (['ctx.guild.text_channels'], {'topic': 'support'}), '(ctx.guild.text_channels, topic=support)\n', (1863, 1903), False, 'import discord\n'), ((2071, 2129), 'discord.utils.get', 'discord.utils.get', (['ctx.guild.roles'], {'name': '"""SupportRequired"""'}), "(ctx.guild.roles, name='SupportRequired')\n", (2088, 2129), False, 'import discord\n'), ((2009, 2067), 'discord.utils.get', 'discord.utils.get', (['ctx.guild.roles'], {'name': '"""SupportRequired"""'}), "(ctx.guild.roles, name='SupportRequired')\n", (2026, 2067), False, 'import discord\n'), ((2522, 2576), 'discord.utils.get', 'discord.utils.get', (['category.channels'], {'name': '"""bot-setup"""'}), "(category.channels, name='bot-setup')\n", (2539, 2576), False, 'import discord\n'), ((2595, 2649), 'discord.utils.get', 'discord.utils.get', (['category.channels'], {'name': '"""bot-setup"""'}), "(category.channels, name='bot-setup')\n", (2612, 2649), False, 'import discord\n'), ((392, 440), 'discord.PermissionOverwrite', 'discord.PermissionOverwrite', ([], {'read_messages': '(False)'}), '(read_messages=False)\n', (419, 440), False, 'import discord\n'), ((465, 512), 'discord.PermissionOverwrite', 'discord.PermissionOverwrite', ([], {'read_messages': '(True)'}), '(read_messages=True)\n', (492, 512), False, 'import discord\n')]
|
import subprocess
from pathlib import Path
import pytest
from bldr.bldr import BLDR
from ..testutil import copytree, extract_deb
@pytest.fixture
def quilt_project_path(tmp_path: Path, asset_dir: Path) -> Path:
quilt_project_dir = tmp_path.joinpath('quilt_project')
quilt_project_dir.mkdir()
subprocess.check_call(['git', 'init'], cwd=quilt_project_dir)
subprocess.check_call(['git', 'checkout', '-b', 'upstream'], cwd=quilt_project_dir)
copytree(asset_dir.joinpath('test-quilt-proj-onedir', 'upstream'), quilt_project_dir)
subprocess.check_call(['git', 'add', '--all'], cwd=quilt_project_dir)
subprocess.check_call(['git', 'commit', '--no-verify', '--message', 'Imported upstream'], cwd=quilt_project_dir)
subprocess.check_call(['git', 'checkout', '-b', 'ubuntu'], cwd=quilt_project_dir)
copytree(asset_dir.joinpath('test-quilt-proj-onedir', 'debian'), quilt_project_dir)
subprocess.check_call(['git', 'add', '--all'], cwd=quilt_project_dir)
subprocess.check_call(['git', 'commit', '--no-verify', '--message', 'Imported debian'], cwd=quilt_project_dir)
subprocess.check_call(['git', 'checkout', '-b', 'master'], cwd=quilt_project_dir)
subprocess.check_call(['git', 'reset', '--hard', 'ubuntu'], cwd=quilt_project_dir)
return quilt_project_dir
def test_quilt_project_build(local_repo_dir: Path, quilt_project_path: Path, docker_from: str, tmp_path: Path):
bldr = BLDR(
local_repo_dir=local_repo_dir,
source_dir=quilt_project_path.parent,
docker_from=docker_from,
)
bldr.build(quilt_project_path)
quilt_proj_deb_file = list(local_repo_dir.glob('**/quilt-proj*.deb'))[0]
extract_dir = tmp_path.joinpath('extracted')
extract_dir.mkdir()
extract_deb(quilt_proj_deb_file, extract_dir)
content = extract_dir.joinpath('usr', 'share', 'doc', 'feeling', 'alone').read_text()
assert content == "Hello, friend!\n", "The patched file should be correct."
content = extract_dir.joinpath('usr', 'share', 'doc', 'feeling', 'lonely').read_text()
assert content == "I am a test.\n", "The patched file should be correct."
|
[
"bldr.bldr.BLDR",
"subprocess.check_call"
] |
[((307, 368), 'subprocess.check_call', 'subprocess.check_call', (["['git', 'init']"], {'cwd': 'quilt_project_dir'}), "(['git', 'init'], cwd=quilt_project_dir)\n", (328, 368), False, 'import subprocess\n'), ((373, 461), 'subprocess.check_call', 'subprocess.check_call', (["['git', 'checkout', '-b', 'upstream']"], {'cwd': 'quilt_project_dir'}), "(['git', 'checkout', '-b', 'upstream'], cwd=\n quilt_project_dir)\n", (394, 461), False, 'import subprocess\n'), ((552, 621), 'subprocess.check_call', 'subprocess.check_call', (["['git', 'add', '--all']"], {'cwd': 'quilt_project_dir'}), "(['git', 'add', '--all'], cwd=quilt_project_dir)\n", (573, 621), False, 'import subprocess\n'), ((626, 742), 'subprocess.check_call', 'subprocess.check_call', (["['git', 'commit', '--no-verify', '--message', 'Imported upstream']"], {'cwd': 'quilt_project_dir'}), "(['git', 'commit', '--no-verify', '--message',\n 'Imported upstream'], cwd=quilt_project_dir)\n", (647, 742), False, 'import subprocess\n'), ((744, 830), 'subprocess.check_call', 'subprocess.check_call', (["['git', 'checkout', '-b', 'ubuntu']"], {'cwd': 'quilt_project_dir'}), "(['git', 'checkout', '-b', 'ubuntu'], cwd=\n quilt_project_dir)\n", (765, 830), False, 'import subprocess\n'), ((918, 987), 'subprocess.check_call', 'subprocess.check_call', (["['git', 'add', '--all']"], {'cwd': 'quilt_project_dir'}), "(['git', 'add', '--all'], cwd=quilt_project_dir)\n", (939, 987), False, 'import subprocess\n'), ((992, 1106), 'subprocess.check_call', 'subprocess.check_call', (["['git', 'commit', '--no-verify', '--message', 'Imported debian']"], {'cwd': 'quilt_project_dir'}), "(['git', 'commit', '--no-verify', '--message',\n 'Imported debian'], cwd=quilt_project_dir)\n", (1013, 1106), False, 'import subprocess\n'), ((1108, 1194), 'subprocess.check_call', 'subprocess.check_call', (["['git', 'checkout', '-b', 'master']"], {'cwd': 'quilt_project_dir'}), "(['git', 'checkout', '-b', 'master'], cwd=\n quilt_project_dir)\n", (1129, 1194), False, 'import subprocess\n'), ((1194, 1281), 'subprocess.check_call', 'subprocess.check_call', (["['git', 'reset', '--hard', 'ubuntu']"], {'cwd': 'quilt_project_dir'}), "(['git', 'reset', '--hard', 'ubuntu'], cwd=\n quilt_project_dir)\n", (1215, 1281), False, 'import subprocess\n'), ((1432, 1534), 'bldr.bldr.BLDR', 'BLDR', ([], {'local_repo_dir': 'local_repo_dir', 'source_dir': 'quilt_project_path.parent', 'docker_from': 'docker_from'}), '(local_repo_dir=local_repo_dir, source_dir=quilt_project_path.parent,\n docker_from=docker_from)\n', (1436, 1534), False, 'from bldr.bldr import BLDR\n')]
|
import numpy as np
import tensorflow as tf
from tensorflow.keras import layers
import matplotlib.pyplot as plt
def many2many(n_gestures=2, n_frames=300, n_features=21, rnn_units=32):
"""Model for predicting labels for a sequence of multiple gestures
Arguments:
n_gestures -- int, size of gesture vocabulary
n_frames -- int, number of frames per training example
n_features -- int, number of features
rnn_units -- int, size of LSTM hidden state
Note:
Not bidirectional
"""
inputs = tf.keras.Input(shape=(n_frames,n_features))
# x = layers.Bidirectional(layers.LSTM(rnn_units, return_sequences=False))(x)
# x = layers.BatchNormalization()(x)
x = layers.LSTM(rnn_units, return_sequences=True)(inputs)
x = layers.Dense(n_gestures, activation='softmax')(x)
outputs = x
model = tf.keras.Model(inputs=inputs, outputs=outputs, name='many2one')
model.summary()
return model
def many2one_model(n_gestures=2, n_frames=35, n_features=21, rnn_units=4, bidirectional=True, n_lstm_layers=1, n_dense_layers=1, dense_size=4, recurrent_dropout=0.0):
"""Model for predicting labels for a single gesture
Arguments:
n_gestures -- int, size of gesture vocabulary. 2 indicates gesture/non gesture only
n_frames -- int, number of frames per training example
n_features -- int, number of features
rnn_units -- int, size of LSTM hidden state
layers -- int, number of LSTM layers
Note:
Bidirectional
"""
inputs = tf.keras.Input(shape=(n_frames,n_features))
x = inputs
for i in range(n_lstm_layers):
if bidirectional == True:
if i == n_lstm_layers - 1: # check whether or not this is the last layer of LSTMs
# if this is the last layer, the return sequences should be false
x = layers.Bidirectional(layers.LSTM(rnn_units, return_sequences=False, recurrent_dropout=recurrent_dropout))(x)
else:
x = layers.Bidirectional(layers.LSTM(rnn_units, return_sequences=True, recurrent_dropout=recurrent_dropout))(x)
else:
if i == n_lstm_layers - 1: # check whether or not this is the last layer of LSTMs
x = layers.LSTM(rnn_units, return_sequences=False, stateful=False, recurrent_dropout=recurrent_dropout)(x)
else:
x = layers.LSTM(rnn_units, return_sequences=True, stateful=False, recurrent_dropout=recurrent_dropout)(x)
for i in range(n_dense_layers - 1):
x = layers.Dense(dense_size, activation='relu')(x)
x = layers.Dense(n_gestures, activation='softmax')(x)
outputs = x
model = tf.keras.Model(inputs=inputs, outputs=outputs, name='many2one')
model.summary()
return model
def plt_metric(history, metric='loss'):
"""plots metrics from the history of a model
Arguments:
history -- history of a keras model
metric -- str, metric to be plotted
"""
plt.plot(history.history[metric])
plt.plot(history.history['val_' + metric])
plt.title('model ' + metric)
plt.ylabel(metric)
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
def plt_pred(y, pred):
"""Plots truth labels vs predicted labels for an example"""
labels = np.argmax(np.squeeze(pred), axis=-1)
plt.plot(labels)
plt.plot(y)
plt.title('predicted vs labels')
plt.ylabel('label')
plt.xlabel('time step')
plt.legend(['predicted', 'labels'], loc='upper left')
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.plot",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.Input",
"matplotlib.pyplot.legend",
"tensorflow.keras.Model",
"tensorflow.keras.layers.LSTM",
"numpy.squeeze",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] |
[((526, 570), 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': '(n_frames, n_features)'}), '(shape=(n_frames, n_features))\n', (540, 570), True, 'import tensorflow as tf\n'), ((847, 910), 'tensorflow.keras.Model', 'tf.keras.Model', ([], {'inputs': 'inputs', 'outputs': 'outputs', 'name': '"""many2one"""'}), "(inputs=inputs, outputs=outputs, name='many2one')\n", (861, 910), True, 'import tensorflow as tf\n'), ((1520, 1564), 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': '(n_frames, n_features)'}), '(shape=(n_frames, n_features))\n', (1534, 1564), True, 'import tensorflow as tf\n'), ((2672, 2735), 'tensorflow.keras.Model', 'tf.keras.Model', ([], {'inputs': 'inputs', 'outputs': 'outputs', 'name': '"""many2one"""'}), "(inputs=inputs, outputs=outputs, name='many2one')\n", (2686, 2735), True, 'import tensorflow as tf\n'), ((2988, 3021), 'matplotlib.pyplot.plot', 'plt.plot', (['history.history[metric]'], {}), '(history.history[metric])\n', (2996, 3021), True, 'import matplotlib.pyplot as plt\n'), ((3026, 3068), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['val_' + metric]"], {}), "(history.history['val_' + metric])\n", (3034, 3068), True, 'import matplotlib.pyplot as plt\n'), ((3073, 3101), 'matplotlib.pyplot.title', 'plt.title', (["('model ' + metric)"], {}), "('model ' + metric)\n", (3082, 3101), True, 'import matplotlib.pyplot as plt\n'), ((3106, 3124), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['metric'], {}), '(metric)\n', (3116, 3124), True, 'import matplotlib.pyplot as plt\n'), ((3129, 3148), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), "('epoch')\n", (3139, 3148), True, 'import matplotlib.pyplot as plt\n'), ((3153, 3200), 'matplotlib.pyplot.legend', 'plt.legend', (["['train', 'test']"], {'loc': '"""upper left"""'}), "(['train', 'test'], loc='upper left')\n", (3163, 3200), True, 'import matplotlib.pyplot as plt\n'), ((3344, 3360), 'matplotlib.pyplot.plot', 'plt.plot', (['labels'], {}), '(labels)\n', (3352, 3360), True, 'import matplotlib.pyplot as plt\n'), ((3365, 3376), 'matplotlib.pyplot.plot', 'plt.plot', (['y'], {}), '(y)\n', (3373, 3376), True, 'import matplotlib.pyplot as plt\n'), ((3381, 3413), 'matplotlib.pyplot.title', 'plt.title', (['"""predicted vs labels"""'], {}), "('predicted vs labels')\n", (3390, 3413), True, 'import matplotlib.pyplot as plt\n'), ((3418, 3437), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""label"""'], {}), "('label')\n", (3428, 3437), True, 'import matplotlib.pyplot as plt\n'), ((3442, 3465), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time step"""'], {}), "('time step')\n", (3452, 3465), True, 'import matplotlib.pyplot as plt\n'), ((3470, 3523), 'matplotlib.pyplot.legend', 'plt.legend', (["['predicted', 'labels']"], {'loc': '"""upper left"""'}), "(['predicted', 'labels'], loc='upper left')\n", (3480, 3523), True, 'import matplotlib.pyplot as plt\n'), ((701, 746), 'tensorflow.keras.layers.LSTM', 'layers.LSTM', (['rnn_units'], {'return_sequences': '(True)'}), '(rnn_units, return_sequences=True)\n', (712, 746), False, 'from tensorflow.keras import layers\n'), ((763, 809), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['n_gestures'], {'activation': '"""softmax"""'}), "(n_gestures, activation='softmax')\n", (775, 809), False, 'from tensorflow.keras import layers\n'), ((2588, 2634), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['n_gestures'], {'activation': '"""softmax"""'}), "(n_gestures, activation='softmax')\n", (2600, 2634), False, 'from tensorflow.keras import layers\n'), ((3313, 3329), 'numpy.squeeze', 'np.squeeze', (['pred'], {}), '(pred)\n', (3323, 3329), True, 'import numpy as np\n'), ((2527, 2570), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['dense_size'], {'activation': '"""relu"""'}), "(dense_size, activation='relu')\n", (2539, 2570), False, 'from tensorflow.keras import layers\n'), ((2227, 2330), 'tensorflow.keras.layers.LSTM', 'layers.LSTM', (['rnn_units'], {'return_sequences': '(False)', 'stateful': '(False)', 'recurrent_dropout': 'recurrent_dropout'}), '(rnn_units, return_sequences=False, stateful=False,\n recurrent_dropout=recurrent_dropout)\n', (2238, 2330), False, 'from tensorflow.keras import layers\n'), ((2368, 2470), 'tensorflow.keras.layers.LSTM', 'layers.LSTM', (['rnn_units'], {'return_sequences': '(True)', 'stateful': '(False)', 'recurrent_dropout': 'recurrent_dropout'}), '(rnn_units, return_sequences=True, stateful=False,\n recurrent_dropout=recurrent_dropout)\n', (2379, 2470), False, 'from tensorflow.keras import layers\n'), ((1865, 1953), 'tensorflow.keras.layers.LSTM', 'layers.LSTM', (['rnn_units'], {'return_sequences': '(False)', 'recurrent_dropout': 'recurrent_dropout'}), '(rnn_units, return_sequences=False, recurrent_dropout=\n recurrent_dropout)\n', (1876, 1953), False, 'from tensorflow.keras import layers\n'), ((2012, 2099), 'tensorflow.keras.layers.LSTM', 'layers.LSTM', (['rnn_units'], {'return_sequences': '(True)', 'recurrent_dropout': 'recurrent_dropout'}), '(rnn_units, return_sequences=True, recurrent_dropout=\n recurrent_dropout)\n', (2023, 2099), False, 'from tensorflow.keras import layers\n')]
|
# Generated by Django 3.2.10 on 2021-12-20 03:53
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('referral', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='clearance',
options={'ordering': ['-pk']},
),
]
|
[
"django.db.migrations.AlterModelOptions"
] |
[((218, 295), 'django.db.migrations.AlterModelOptions', 'migrations.AlterModelOptions', ([], {'name': '"""clearance"""', 'options': "{'ordering': ['-pk']}"}), "(name='clearance', options={'ordering': ['-pk']})\n", (246, 295), False, 'from django.db import migrations\n')]
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Created on 26/04/2016
Versão 1.0
@author: Ricieri (ELP)
Python 3.4.4
"""
"""
Reviewed on 15/10/2020
Versão 1.0 rev.A - rounded printing values to 3 decimal places and displays '°C' instead of 'ºC'.
@author: Marcelo (ELP)
Python 3.8.6
"""
"""
Reviewed on 06/05/2021
Versão 1.0 rev.B - Added FAC_DCDC_EMA variables.
@author: Marcelo (ELT)
Python 3.9.5
"""
import struct
import glob
import serial
import time
import csv
import math
import numpy as np
import matplotlib.pyplot as plt
import os
from datetime import datetime
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
======================================================================
Listas de Entidades BSMP
A posição da entidade na lista corresponde ao seu ID BSMP
======================================================================
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
UDC_FIRMWARE_VERSION = "0.42 2021-05-06"
ListVar = ['iLoad1','iLoad2','iMod1','iMod2','iMod3','iMod4','vLoad',
'vDCMod1','vDCMod2','vDCMod3','vDCMod4','vOutMod1','vOutMod2',
'vOutMod3','vOutMod4','temp1','temp2','temp3','temp4','ps_OnOff',
'ps_OpMode','ps_Remote','ps_OpenLoop','ps_SoftInterlocks',
'ps_HardInterlocks','iRef','wfmRef_Gain','wfmRef_Offset','sigGen_Enable','sigGen_Type',
'sigGen_Ncycles','sigGenPhaseStart','sigGen_PhaseEnd','sigGen_Freq',
'sigGen_Amplitude','sigGen_Offset','sigGen_Aux','dp_ID','dp_Class','dp_Coeffs','ps_Model',
'wfmRef_PtrBufferStart','wfmRef_PtrBufferEnd','wfmRef_PtrBufferK','wfmRef_SyncMode']
ListCurv = ['wfmRef_Curve','sigGen_SweepAmp','samplesBuffer','fullwfmRef_Curve','wfmRef_Blocks','samplesBuffer_blocks']
ListFunc = ['TurnOn','TurnOff','OpenLoop','ClosedLoop','OpMode','RemoteInterface',
'SetISlowRef','ConfigWfmRef','ConfigSigGen', 'EnableSigGen',
'DisableSigGen','ConfigDPModule','WfmRefUpdate','ResetInterlocks','ConfigPSModel',
'ConfigHRADC','ConfigHRADCOpMode','EnableHRADCSampling','DisableHRADCSampling','ResetWfmRef',
'SetRSAddress','EnableSamplesBuffer','DisableSamplesBuffer','SetISlowRefx4','SelectHRADCBoard','SelectTestSource',
'ResetHRADCBoards','Config_nHRADC','ReadHRADC_UFM','WriteHRADC_UFM','EraseHRADC_UFM','ReadHRADC_BoardData']
ListTestFunc = ['UdcIoExpanderTest', 'UdcLedTest', 'UdcBuzzerTest', 'UdcEepromTest', 'UdcFlashTest', 'UdcRamTest',
'UdcRtcTest', 'UdcSensorTempTest', 'UdcIsoPlaneTest', 'UdcAdcTest', 'UdcUartTest', 'UdcLoopBackTest',
'UdcComTest', 'UdcI2cIsoTest']
ListHRADCInputType = ['Vin_bipolar','Vin_unipolar_p','Vin_unipolar_n','Iin_bipolar','Iin_unipolar_p',
'Iin_unipolar_n','Vref_bipolar_p','Vref_bipolar_n','GND','Vref_unipolar_p',
'Vref_unipolar_n','GND_unipolar','Temp','Reserved0','Reserved1','Reserved2']
ListPSModels = ['FBP_100kHz', 'FBP_Parallel_100kHz', 'FAC_ACDC_10kHz', 'FAC_DCDC_20kHz',
'FAC_Full_ACDC_10kHz', 'FAC_Full_DCDC_20kHz', 'FAP_ACDC',
'FAP_DCDC_20kHz', 'TEST_HRPWM', 'TEST_HRADC', 'JIGA_HRADC',
'FAP_DCDC_15kHz_225A', 'FBPx4_100kHz', 'FAP_6U_DCDC_20kHz',
'JIGA_BASTIDOR']
ListPSModels_v2_1 = ['Empty','FBP','FBP_DCLink','FAC_ACDC','FAC_DCDC',
'FAC_2S_ACDC','FAC_2S_DCDC','FAC_2P4S_ACDC','FAC_2P4S_DCDC',
'FAP','FAP_4P','FAC_DCDC_EMA','FAP_2P2S','FAP_IMAS',
'FAC_2P_ACDC_IMAS','FAC_2P_DCDC_IMAS','Invalid','Invalid',
'Invalid','Invalid','Invalid','Invalid','Invalid','Invalid',
'Invalid','Invalid','Invalid','Invalid','Invalid','Invalid',
'Invalid','Uninitialized']
ListVar_v2_1 = ['ps_status','ps_setpoint','ps_reference','firmware_version',
'counter_set_slowref','counter_sync_pulse','siggen_enable',
'siggen_type','siggen_num_cycles','siggen_n','siggen_freq',
'siggen_amplitude','siggen_offset','siggen_aux_param',
'wfmref_selected','wfmref_sync_mode','wfmref_gain',
'wfmref_offset','p_wfmref_start','p_wfmref_end','p_wfmref_idx']
#ListCurv_v2_1 = ['wfmref','buf_samples_ctom','buf_samples_mtoc']
ListCurv_v2_1 = ['wfmref_data_0','wfmref_data_1','buf_samples_ctom']
ListFunc_v2_1 = ['turn_on','turn_off','open_loop','closed_loop','select_op_mode',
'reset_interlocks','set_command_interface',
'set_serial_termination','unlock_udc','lock_udc',
'cfg_source_scope','cfg_freq_scope','cfg_duration_scope',
'enable_scope','disable_scope','sync_pulse','set_slowref',
'set_slowref_fbp','set_slowref_readback_mon',
'set_slowref_fbp_readback_mon','set_slowref_readback_ref',
'set_slowref_fbp_readback_ref','reset_counters','cfg_wfmref',
'select_wfmref','get_wfmref_size','reset_wfmref','cfg_siggen',
'set_siggen','enable_siggen','disable_siggen','set_param','get_param',
'save_param_eeprom','load_param_eeprom', 'save_param_bank',
'load_param_bank','set_dsp_coeffs','get_dsp_coeff',
'save_dsp_coeffs_eeprom', 'load_dsp_coeffs_eeprom',
'save_dsp_modules_eeprom', 'load_dsp_modules_eeprom','reset_udc']
ListOpMode_v2_1 = ['Off','Interlock','Initializing','SlowRef','SlowRefSync',
'Cycle','RmpWfm','MigWfm','FastRef']
ListSigGenTypes_v2_1 = ['Sine','DampedSine','Trapezoidal','DampedSquaredSine',
'Square']
ListParameters = ['PS_Name','PS_Model','Num_PS_Modules','Command_Interface',
'RS485_Baudrate','RS485_Address','RS485_Termination',
'UDCNet_Address','Ethernet_IP','Ethernet_Subnet_Mask',
'Buzzer_Volume','Freq_ISR_Controller','Freq_TimeSlicer',
'Control_Loop_State','Max_Ref','Min_Ref','Max_Ref_OpenLoop',
'Min_Ref_OpenLoop',
'PWM_Freq','PWM_DeadTime','PWM_Max_Duty','PWM_Min_Duty',
'PWM_Max_Duty_OpenLoop','PWM_Min_Duty_OpenLoop',
'PWM_Lim_Duty_Share','HRADC_Num_Boards','HRADC_Freq_SPICLK',
'HRADC_Freq_Sampling','HRADC_Enable_Heater',
'HRADC_Enable_Monitor','HRADC_Type_Transducer',
'HRADC_Gain_Transducer','HRADC_Offset_Transducer','SigGen_Type',
'SigGen_Num_Cycles','SigGen_Freq','SigGen_Amplitude',
'SigGen_Offset','SigGen_Aux_Param','WfmRef_ID_WfmRef',
'WfmRef_SyncMode','WfmRef_Frequency','WfmRef_Gain',
'WfmRef_Offset','Analog_Var_Max','Analog_Var_Min',
'Hard_Interlocks_Debounce_Time','Hard_Interlocks_Reset_Time',
'Soft_Interlocks_Debounce_Time','Soft_Interlocks_Reset_Time',
'Scope_Sampling_Frequency','Scope_Source','','','','','','',
'','','','','Password','Enable_Onboard_EEPROM']
ListBCBFunc = ['ClearPof', 'SetPof', 'ReadPof', 'EnableBuzzer', 'DisableBuzzer',
'SendUartData', 'GetUartData', 'SendCanData', 'GetCanData',
'GetI2cData']
typeFormat = {'uint8_t': 'BBHBB', 'uint16_t': 'BBHHB', 'uint32_t': 'BBHIB',
'float': 'BBHfB'}
bytesFormat = {'Uint16': 'H', 'Uint32': 'L', 'Uint64': 'Q', 'float': 'f'}
typeSize = {'uint8_t': 6, 'uint16_t': 7, 'uint32_t': 9, 'float': 9}
num_blocks_curves_fbp = [4, 4, 4]
num_blocks_curves_fax = [16, 16, 16]
size_curve_block = [1024, 1024, 1024]
ufmOffset = {'serial': 0, 'calibdate': 4, 'variant': 9, 'rburden': 10,
'calibtemp': 12, 'vin_gain': 14, 'vin_offset': 16,
'iin_gain': 18, 'iin_offset': 20, 'vref_p': 22, 'vref_n': 24,
'gnd': 26}
hradcVariant = ['HRADC-FBP','HRADC-FAX-A','HRADC-FAX-B','HRADC-FAX-C','HRADC-FAX-D']
hradcInputTypes = ['GND', 'Vref_bipolar_p', 'Vref_bipolar_n', 'Temp',
'Vin_bipolar_p', 'Vin_bipolar_n', 'Iin_bipolar_p','Iin_bipolar_n']
NUM_MAX_COEFFS_DSP = 12
num_dsp_classes = 7
num_dsp_modules = [4, 4, 4, 6, 8, 4, 2, 2]
num_coeffs_dsp_modules = [0, 1, 1, 4, 8, 16, 2]
dsp_classes_names = ["DSP_Error", "DSP_SRLim", "DSP_LPF","DSP_PI",
"DSP_IIR_2P2Z", "DSP_IIR_3P3Z", "DSP_VdcLink_FeedForward",
"DSP_Vect_Product"]
# FBP
list_fbp_soft_interlocks = ['Heat-Sink Overtemperature']
list_fbp_hard_interlocks = ['Load Overcurrent',
'Load Overvoltage',
'DCLink Overvoltage',
'DCLink Undervoltage',
'DCLink Relay Fault',
'DCLink Fuse Fault',
'MOSFETs Driver Fault',
'Welded Relay Fault']
# FBP DC-Link
list_fbp_dclink_hard_interlocks = ['Power_Module_1_Fault',
'Power_Module_2_Fault',
'Power_Module_3_Fault',
'Total_Output_Overvoltage',
'Power_Module_1_Overvoltage',
'Power_Module_2_Overvoltage',
'Power_Module_3_Overvoltage',
'Total_Output_Undervoltage',
'Power_Module_1_Undervoltage',
'Power_Module_2_Undervoltage',
'Power_Module_3_Undervoltage',
'Smoke_Detector','External_Interlock']
# FAC ACDC
list_fac_acdc_soft_interlocks = []
list_fac_acdc_hard_interlocks = ['CapBank Overvoltage',
'Rectifier Overvoltage',
'Rectifier Undervoltage',
'Rectifier Overcurrent',
'Welded Contactor Fault',
'Opened Contactor Fault',
'IIB Input Stage Interlock',
'IIB Command Interlock']
list_fac_acdc_iib_is_interlocks = ['Rectifier Overvoltage',
'Input Overcurrent',
'IGBT Overtemperature',
'IGBT Overtemperature HW',
'Driver Overvoltage',
'Driver Overcurrent',
'Top Driver Error',
'Bottom Driver Error',
'Inductors Overtemperature',
'Heat-Sink Overtemperature',
'Board IIB Overtemperature',
'Module Overhumidity']
list_fac_acdc_iib_is_alarms = ['Rectifier Overvoltage',
'Input Overcurrent',
'IGBT Overtemperature',
'Driver Overvoltage',
'Driver Overcurrent',
'Inductors Overtemperature',
'Heat-Sink Overtemperature',
'Board IIB Overtemperature',
'Module Overhumidity']
list_fac_acdc_iib_cmd_interlocks = ['Capbank Overvoltage',
'Output Overvoltage',
'External Boards Overvoltage',
'Auxiliary Board Overcurrent',
'IDB Board Overcurrent',
'Rectifier Inductor Overtemperature',
'Rectifier Heat-Sink Overtemperature',
'AC Mains Overcurrent',
'Emergency Button',
'AC Mains Undervoltage',
'AC Mains Overvoltage',
'Ground Leakage Overcurrent',
'Board IIB Overtemperature',
'Module Overhumidity']
list_fac_acdc_iib_cmd_alarms = ['Capbank Overvoltage',
'Output Overvoltage',
'External Boards Overvoltage',
'Auxiliary Board Overcurrent',
'IDB Board Overcurrent',
'Rectifier Inductor Overtemperature',
'Rectifier Heat-Sink Overtemperature',
'Ground Leakage Overcurrent',
'Board IIB Overtemperature',
'Module Overhumidity']
# FAC DCDC
list_fac_dcdc_soft_interlocks = ['DCCT 1 Fault',
'DCCT 2 Fault',
'DCCT High Difference',
'Load Feedback 1 Fault',
'Load Feedback 2 Fault']
list_fac_dcdc_hard_interlocks = ['Load Overcurrent',
'CapBank Overvoltage',
'CapBank Undervoltage',
'IIB Interlock',
'External Interlock',
'Rack Interlock']
list_fac_dcdc_iib_interlocks = ['Input Overvoltage',
'Input Overcurrent',
'Output Overcurrent',
'IGBT 1 Overtemperature',
'IGBT 1 Overtemperature HW',
'IGBT 2 Overtemperature',
'IGBT 2 Overtemperature HW',
'Driver Overvoltage',
'Driver 1 Overcurrent',
'Driver 2 Overcurrent',
'Top Driver 1 Error',
'Bottom Driver 1 Error',
'Top Driver 2 Error',
'Bottom Driver 2 Error',
'Inductors Overtemperature',
'Heat-Sink Overtemperature',
'Ground Leakage Overcurrent',
'Board IIB Overtemperature',
'Module Overhumidity']
list_fac_dcdc_iib_alarms = ['Input Overvoltage',
'Input Overcurrent',
'Output Overcurrent',
'IGBT 1 Overtemperature',
'IGBT 2 Overtemperature',
'Driver Overvoltage',
'Driver 1 Overcurrent',
'Driver 2 Overcurrent',
'Inductors Overtemperature',
'Heat-Sink Overtemperature',
'Ground Leakage Overcurrent',
'Board IIB Overtemperature',
'Module Overhumidity']
# FAC-2S AC/DC
list_fac_2s_acdc_hard_interlocks = ['CapBank Overvoltage',
'Rectifier Overvoltage',
'Rectifier Undervoltage',
'Rectifier Overcurrent',
'Welded Contactor Fault',
'Opened Contactor Fault',
'IIB Input Stage Interlock',
'IIB Command Interlock']
list_fac_2s_acdc_iib_is_interlocks = list_fac_acdc_iib_is_interlocks
list_fac_2s_acdc_iib_cmd_interlocks = list_fac_acdc_iib_cmd_interlocks
list_fac_2s_acdc_iib_is_alarms = list_fac_acdc_iib_is_alarms
list_fac_2s_acdc_iib_cmd_alarms = list_fac_acdc_iib_cmd_alarms
# FAC-2S DC/DC
list_fac_2s_dcdc_soft_interlocks = ['DCCT 1 Fault',
'DCCT 2 Fault',
'DCCT High Difference',
'Load Feedback 1 Fault',
'Load Feedback 2 Fault']
list_fac_2s_dcdc_hard_interlocks = ['Load Overcurrent',
'Module 1 CapBank Overvoltage',
'Module 2 CapBank Overvoltage',
'Module 1 CapBank Undervoltage',
'Module 2 CapBank Undervoltage',
'IIB Mod 1 Itlk',
'IIB Mod 2 Itlk',
'External Interlock',
'Rack Interlock']
list_fac_2s_dcdc_iib_interlocks = list_fac_dcdc_iib_interlocks
list_fac_2s_dcdc_iib_alarms = list_fac_dcdc_iib_alarms
# FAC-2P4S AC/DC
list_fac_2p4s_acdc_hard_interlocks = ['CapBank Overvoltage',
'Rectifier Overvoltage',
'Rectifier Undervoltage',
'Rectifier Overcurrent',
'Welded Contactor Fault',
'Opened Contactor Fault',
'IIB Input Stage Interlock',
'IIB Command Interlock']
list_fac_2p4s_acdc_iib_is_interlocks = list_fac_acdc_iib_is_interlocks
list_fac_2p4s_acdc_iib_cmd_interlocks = list_fac_acdc_iib_cmd_interlocks
list_fac_2p4s_acdc_iib_is_alarms = list_fac_acdc_iib_is_alarms
list_fac_2p4s_acdc_iib_cmd_alarms = list_fac_acdc_iib_cmd_alarms
# FAC-2P4S DC/DC
list_fac_2p4s_dcdc_soft_interlocks = ['DCCT 1 Fault',
'DCCT 2 Fault',
'DCCT High Difference',
'Load Feedback 1 Fault',
'Load Feedback 2 Fault',
'Arm 1 Overcurrent',
'Arm 2 Overcurrent',
'Arms High Difference',
'Complementary PS Interlock']
list_fac_2p4s_dcdc_hard_interlocks = ['Load Overcurrent',
'Module 1 CapBank Overvoltage',
'Module 2 CapBank Overvoltage',
'Module 3 CapBank Overvoltage',
'Module 4 CapBank Overvoltage',
'Module 5 CapBank Overvoltage',
'Module 6 CapBank Overvoltage',
'Module 7 CapBank Overvoltage',
'Module 8 CapBank Overvoltage',
'Module 1 CapBank Undervoltage',
'Module 2 CapBank Undervoltage',
'Module 3 CapBank Undervoltage',
'Module 4 CapBank Undervoltage',
'Module 5 CapBank Undervoltage',
'Module 6 CapBank Undervoltage',
'Module 7 CapBank Undervoltage',
'Module 8 CapBank Undervoltage',
'IIB 1 Itlk',
'IIB 2 Itlk',
'IIB 3 Itlk',
'IIB 4 Itlk',
'IIB 5 Itlk',
'IIB 6 Itlk',
'IIB 7 Itlk',
'IIB 8 Itlk']
list_fac_2p4s_dcdc_iib_interlocks = list_fac_dcdc_iib_interlocks
list_fac_2p4s_dcdc_iib_alarms = list_fac_dcdc_iib_alarms
# FAP
list_fap_soft_interlocks = ['DCCT 1 Fault',
'DCCT 2 Fault',
'DCCT High Difference',
'Load Feedback 1 Fault',
'Load Feedback 2 Fault',
'IGBTs Current High Difference']
list_fap_hard_interlocks = ['Load Overcurrent',
'Load Overvoltage',
'DCLink Overvoltage',
'DCLink Undervoltage',
'Welded Contactor Fault',
'Opened Contactor Fault',
'IGBT 1 Overcurrent',
'IGBT 2 Overcurrent',
'IIB Itlk']
list_fap_iib_interlocks = ['Input Overvoltage',
'Output Overvoltage',
'IGBT 1 Overcurrent',
'IGBT 2 Overcurrent',
'IGBT 1 Overtemperature',
'IGBT 2 Overtemperature',
'Driver Overvoltage',
'Driver 1 Overcurrent',
'Driver 2 Overcurrent',
'Driver 1 Error',
'Driver 2 Error',
'Inductors Overtemperature',
'Heat-Sink Overtemperature',
'DCLink Contactor Fault',
'Contact Sticking of Contactor',
'External Interlock',
'Rack Interlock',
'High Leakage Current',
'Board IIB Overtemperature',
'Module Overhumidity']
list_fap_iib_alarms = ['Input Overvoltage',
'Output Overvoltage',
'IGBT 1 Overcurrent',
'IGBT 2 Overcurrent',
'IGBT 1 Overtemperature',
'IGBT 2 Overtemperature',
'Driver Overvoltage',
'Driver 1 Overcurrent',
'Driver 2 Overcurrent',
'Inductors Overtemperature',
'Heat-Sink Overtemperature',
'High Leakage Current',
'Board IIB Overtemperature',
'Module Overhumidity']
# FAP-4P
list_fap_4p_soft_interlocks = ['DCCT 1 Fault',
'DCCT 2 Fault',
'DCCT High Difference',
'Load Feedback 1 Fault',
'Load Feedback 2 Fault',
'IGBTs Current High Difference']
list_fap_4p_hard_interlocks = ['Load Overcurrent',
'Load Overvoltage',
'IGBT 1 Mod 1 Overcurrent',
'IGBT 2 Mod 1 Overcurrent',
'IGBT 1 Mod 2 Overcurrent',
'IGBT 2 Mod 2 Overcurrent',
'IGBT 1 Mod 3 Overcurrent',
'IGBT 2 Mod 3 Overcurrent',
'IGBT 1 Mod 4 Overcurrent',
'IGBT 2 Mod 4 Overcurrent',
'Welded Contactor Mod 1 Fault',
'Welded Contactor Mod 2 Fault',
'Welded Contactor Mod 3 Fault',
'Welded Contactor Mod 4 Fault',
'Opened Contactor Mod 1 Fault',
'Opened Contactor Mod 2 Fault',
'Opened Contactor Mod 3 Fault',
'Opened Contactor Mod 4 Fault',
'DCLink Mod 1 Overvoltage',
'DCLink Mod 2 Overvoltage',
'DCLink Mod 3 Overvoltage',
'DCLink Mod 4 Overvoltage',
'DCLink Mod 1 Undervoltage',
'DCLink Mod 2 Undervoltage',
'DCLink Mod 3 Undervoltage',
'DCLink Mod 4 Undervoltage',
'IIB Mod 1 Itlk',
'IIB Mod 2 Itlk',
'IIB Mod 3 Itlk',
'IIB Mod 4 Itlk']
list_fap_4p_iib_interlocks = list_fap_iib_interlocks
list_fap_4p_iib_alarms = list_fap_iib_alarms
# FAC DCDC EMA
list_fac_dcdc_ema_soft_interlocks = ['DCCT Fault',
'Load Feedback Fault']
list_fac_dcdc_ema_hard_interlocks = ['Load Overcurrent',
'DCLink Overvoltage',
'DCLink Undervoltage',
'Emergency Button',
'Load Waterflow',
'Load Overtemperature',
'IIB Itlk']
list_fac_dcdc_ema_iib_interlocks = ['Input Overvoltage',
'Input Overcurrent',
'Output Overcurrent',
'IGBT 1 Overtemperature',
'IGBT 1 Overtemperature HW',
'IGBT 2 Overtemperature',
'IGBT 2 Overtemperature HW',
'Driver Overvoltage',
'Driver 1 Overcurrent',
'Driver 2 Overcurrent',
'Top Driver 1 Error',
'Bottom Driver 1 Error',
'Top Driver 2 Error',
'Bottom Driver 2 Error',
'Inductors Overtemperature',
'Heat-Sink Overtemperature',
'Ground Leakage Overcurrent',
'Board IIB Overtemperature',
'Module Overhumidity']
list_fac_dcdc_ema_iib_alarms = ['Input Overvoltage',
'Input Overcurrent',
'Output Overcurrent',
'IGBT 1 Overtemperature',
'IGBT 2 Overtemperature',
'Driver Overvoltage',
'Driver 1 Overcurrent',
'Driver 2 Overcurrent',
'Inductors Overtemperature',
'Heat-Sink Overtemperature',
'Ground Leakage Overcurrent',
'Board IIB Overtemperature',
'Module Overhumidity']
# FAP-2P2S
list_fap_2p2s_soft_interlocks = ['DCCT 1 Fault',
'DCCT 2 Fault',
'DCCT High Difference',
'Load Feedback 1 Fault',
'Load Feedback 2 Fault',
'Arms High Difference',
'IGBTs Current High Difference',
'Complementary PS Interlock']
list_fap_2p2s_hard_interlocks = ['Load Overcurrent',
'IGBT 1 Mod 1 Overcurrent',
'IGBT 2 Mod 1 Overcurrent',
'IGBT 1 Mod 2 Overcurrent',
'IGBT 2 Mod 2 Overcurrent',
'IGBT 1 Mod 3 Overcurrent',
'IGBT 2 Mod 3 Overcurrent',
'IGBT 1 Mod 4 Overcurrent',
'IGBT 2 Mod 4 Overcurrent',
'Welded Contactor Mod 1 Fault',
'Welded Contactor Mod 2 Fault',
'Welded Contactor Mod 3 Fault',
'Welded Contactor Mod 4 Fault',
'Opened Contactor Mod 1 Fault',
'Opened Contactor Mod 2 Fault',
'Opened Contactor Mod 3 Fault',
'Opened Contactor Mod 4 Fault',
'DCLink Mod 1 Overvoltage',
'DCLink Mod 2 Overvoltage',
'DCLink Mod 3 Overvoltage',
'DCLink Mod 4 Overvoltage',
'DCLink Mod 1 Undervoltage',
'DCLink Mod 2 Undervoltage',
'DCLink Mod 3 Undervoltage',
'DCLink Mod 4 Undervoltage',
'IIB Mod 1 Itlk',
'IIB Mod 2 Itlk',
'IIB Mod 3 Itlk',
'IIB Mod 4 Itlk',
'Arm 1 Overcurrent',
'Arm 2 Overcurrent']
list_fap_2p2s_iib_interlocks = list_fap_iib_interlocks
list_fap_2p2s_iib_alarms = list_fap_iib_alarms
# FAP 225A
list_fap_225A_soft_interlocks = ['IGBTs Current High Difference']
list_fap_225A_hard_interlocks = ['Load Overcurrent',
'DCLink Contactor Fault',
'IGBT 1 Overcurrent',
'IGBT 2 Overcurrent']
# FAC-2P ACDC
list_fac_2p_acdc_imas_soft_interlocks = []
list_fac_2p_acdc_imas_hard_interlocks = ['CapBank Overvoltage',
'Rectifier Overcurrent',
'AC Mains Contactor Fault',
'Module A Interlock',
'Module B Interlock',
'DCDC Interlock']
# FAC-2P DCDC
list_fac_2p_dcdc_imas_soft_interlocks = []
list_fac_2p_dcdc_imas_hard_interlocks = ['Load Overcurrent',
'Module 1 CapBank_Overvoltage',
'Module 2 CapBank_Overvoltage',
'Module 1 CapBank_Undervoltage',
'Module 2 CapBank_Undervoltage',
'Arm 1 Overcurrent',
'Arm 2 Overcurrent',
'Arms High_Difference',
'ACDC Interlock']
class SerialDRS(object):
ser = serial.Serial()
def __init__(self):
#self.ser=serial.Serial()
self.MasterAdd = '\x00'
self.SlaveAdd = '\x01'
self.BCastAdd = '\xFF'
self.ComWriteVar = '\x20'
self.WriteFloatSizePayload = '\x00\x05'
self.WriteDoubleSizePayload = '\x00\x03'
self.ComReadVar = '\x10\x00\x01'
self.ComRequestCurve = '\x40'
self.ComSendWfmRef = '\x41'
self.ComFunction = '\x50'
self.DP_MODULE_MAX_COEFF = 16
self.ListDPClass = ['ELP_Error','ELP_SRLim','ELP_LPF','ELP_PI_dawu','ELP_IIR_2P2Z','ELP_IIR_3P3Z',
'DCL_PID','DCL_PI','DCL_DF13','DCL_DF22','DCL_23']
self.ListHardInterlocks = ['Sobrecorrente', 'Interlock Externo', 'Falha AC',
'Falha ACDC', 'Falha DCDC','Sobretensao','Falha Resistor Precarga','Falha Carga Capacitores Saída',
'Botão de Emergência', 'OUT_OVERVOLTAGE', 'IN_OVERVOLTAGE','ARM1_OVERCURRENT','ARM2_OVERCURRENT',
'IN_OVERCURRENT','DRIVER1_FAULT','DRIVER2_FAULT','OUT1_OVERCURRENT','OUT2_OVERCURRENT','OUT1_OVERVOLTAGE',
'OUT2_OVERVOLTAGE','LEAKAGE_OVERCURRENT','AC_OVERCURRENT']
self.ListSoftInterlocks = ['IGBT1_OVERTEMP','IGBT2_OVERTEMP','L1_OVERTEMP','L2_OVERTEMP','HEATSINK_OVERTEMP','WATER_OVERTEMP',
'RECTFIER1_OVERTEMP','RECTFIER2_OVERTEMP','AC_TRANSF_OVERTEMP','WATER_FLUX_FAULT','OVER_HUMIDITY_FAULT']
print("\n pyDRS - compatible UDC firmware version: " + UDC_FIRMWARE_VERSION + "\n")
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
======================================================================
Funções Internas da Classe
======================================================================
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
# Converte float para hexadecimal
def float_to_hex(self, value):
hex_value = struct.pack('f', value)
return hex_value.decode('ISO-8859-1')
# Converte lista de float para hexadecimal
def float_list_to_hex(self, value_list):
hex_list = b''
for value in value_list:
hex_list = hex_list + struct.pack('f', value)
return hex_list.decode('ISO-8859-1')
def format_list_size(self, in_list, max_size):
out_list = in_list[0:max_size]
if max_size > len(in_list):
for i in range(max_size - len(in_list)):
out_list.append(0)
return out_list
# Converte double para hexadecimal
def double_to_hex(self,value):
hex_value = struct.pack('H',value)
return hex_value.decode('ISO-8859-1')
# Converte unsigned int para hexadecimal
def uint32_to_hex(self,value):
hex_value = struct.pack('I',value)
return hex_value.decode('ISO-8859-1')
# Converte indice para hexadecimal
def index_to_hex(self,value):
hex_value = struct.pack('B',value)
return hex_value.decode('ISO-8859-1')
# Converte payload_size para hexadecimal
def size_to_hex(self,value):
hex_value = struct.pack('>H',value)
return hex_value.decode('ISO-8859-1')
# Função Checksum
def checksum(self, packet):
b=bytearray(packet.encode('ISO-8859-1'))
csum =(256-sum(b))%256
hcsum = struct.pack('B',csum)
send_msg = packet + hcsum.decode(encoding='ISO-8859-1')
return send_msg
# Função de leitura de variável
def read_var(self,var_id):
send_msg = self.checksum(self.SlaveAdd+self.ComReadVar+var_id)
self.ser.reset_input_buffer()
self.ser.write(send_msg.encode('ISO-8859-1'))
def is_open(self):
return self.ser.isOpen()
def _convertToUint16List(self, val, format):
val_16 = []
val_b = struct.pack(bytesFormat[format],val)
print(val_b)
for i in range(0,len(val_b),2):
val_16.append(struct.unpack('H',val_b[i:i+2])[0])
print(val_16)
return val_16
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
======================================================================
Métodos de Chamada de Entidades Funções BSMP
O retorno do método são os bytes de retorno da mensagem
======================================================================
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
def TurnOn_FAx(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('TurnOn'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def TurnOn(self,ps_modules):
payload_size = self.size_to_hex(1+2) #Payload: ID + ps_modules
hex_modules = self.double_to_hex(ps_modules)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('TurnOn'))+hex_modules
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def turn_on(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('turn_on'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def TurnOff_FAx(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('TurnOff'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def TurnOff(self,ps_modules):
payload_size = self.size_to_hex(1+2) #Payload: ID + ps_modules
hex_modules = self.double_to_hex(ps_modules)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('TurnOff'))+hex_modules
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def turn_off(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('turn_off'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def open_loop(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('open_loop'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def closed_loop(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('closed_loop'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def OpenLoop(self,ps_modules):
payload_size = self.size_to_hex(1+2) #Payload: ID + ps_modules
hex_modules = self.double_to_hex(ps_modules)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('OpenLoop'))+hex_modules
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def ClosedLoop(self,ps_modules):
payload_size = self.size_to_hex(1+2) #Payload: ID + ps_modules
hex_modules = self.double_to_hex(ps_modules)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('ClosedLoop'))+hex_modules
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def OpenLoop_FAx(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('OpenLoop'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def ClosedLoop_FAx(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('ClosedLoop'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def OpMode(self,op_mode):
payload_size = self.size_to_hex(1+2) #Payload: ID + ps_opmode
hex_opmode = self.double_to_hex(op_mode)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('OpMode'))+hex_opmode
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def RemoteInterface(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('RemoteInterface'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def SetISlowRef(self,setpoint):
payload_size = self.size_to_hex(1+4) #Payload: ID + iSlowRef
hex_setpoint = self.float_to_hex(setpoint)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('SetISlowRef'))+hex_setpoint
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def ConfigWfmRef(self,gain,offset):
payload_size = self.size_to_hex(1+4+4) #Payload: ID + gain + offset
hex_gain = self.float_to_hex(gain)
hex_offset = self.float_to_hex(offset)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('ConfigWfmRef'))+hex_gain+hex_offset
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def ConfigSigGen(self,sigType,nCycles,phaseStart,phaseEnd):
payload_size = self.size_to_hex(1+2+2+4+4) #Payload: ID + type + nCycles + phaseStart + phaseEnd
hex_sigType = self.double_to_hex(sigType)
hex_nCycles = self.double_to_hex(nCycles)
hex_phaseStart = self.float_to_hex(phaseStart)
hex_phaseEnd = self.float_to_hex(phaseEnd)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('ConfigSigGen'))+hex_sigType+hex_nCycles+hex_phaseStart+hex_phaseEnd
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def EnableSigGen(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('EnableSigGen'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def DisableSigGen(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('DisableSigGen'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def ConfigDPModule(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('ConfigDPModule'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def ConfigDPModuleFull(self,dp_id,dp_class,dp_coeffs):
self.Write_dp_ID(dp_id)
self.Write_dp_Class(dp_class)
self.Write_dp_Coeffs(dp_coeffs)
self.ConfigDPModule()
def WfmRefUpdate(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('WfmRefUpdate'))
send_msg = self.checksum(self.BCastAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
def ResetInterlocks(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('ResetInterlocks'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def reset_interlocks(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('reset_interlocks'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def ConfigPSModel(self,ps_model):
payload_size = self.size_to_hex(1+2) #Payload: ID + ps_Model
hex_model = self.double_to_hex(ps_model)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('ConfigPSModel'))+hex_model
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def ConfigHRADC(self,hradcID,freqSampling,inputType,enableHeater,enableMonitor):
payload_size = self.size_to_hex(1+2+4+2+2+2) #Payload: ID + hradcID + freqSampling + inputType + enableHeater + enableMonitor
hex_hradcID = self.double_to_hex(hradcID)
hex_freq = self.float_to_hex(freqSampling)
hex_type = self.double_to_hex(ListHRADCInputType.index(inputType))
hex_enHeater = self.double_to_hex(enableHeater)
hex_enMonitor = self.double_to_hex(enableMonitor)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('ConfigHRADC'))+hex_hradcID+hex_freq+hex_type+hex_enHeater+hex_enMonitor
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def ConfigHRADCOpMode(self,hradcID,opMode):
payload_size = self.size_to_hex(1+2+2) #Payload: ID + hradcID + opMode
hex_hradcID = self.double_to_hex(hradcID)
hex_opMode = self.double_to_hex(opMode)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('ConfigHRADCOpMode'))+hex_hradcID+hex_opMode
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def EnableHRADCSampling(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('EnableHRADCSampling'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def DisableHRADCSampling(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('DisableHRADCSampling'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def ResetWfmRef(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('ResetWfmRef'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def SetRSAddress(self,rs_address):
payload_size = self.size_to_hex(1+2) #Payload: ID + rs_address
hex_add = self.double_to_hex(rs_address)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('SetRSAddress'))+hex_add
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def EnableSamplesBuffer(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('EnableSamplesBuffer'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def DisableSamplesBuffer(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('DisableSamplesBuffer'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def SelectHRADCBoard(self,hradcID):
payload_size = self.size_to_hex(1+2) #Payload: ID
hex_hradcID = self.double_to_hex(hradcID)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('SelectHRADCBoard'))+hex_hradcID
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def SelectTestSource(self,inputType):
payload_size = self.size_to_hex(1+2) #Payload: inputType
hex_type = self.double_to_hex(ListHRADCInputType.index(inputType))
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('SelectTestSource'))+hex_type
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def ResetHRADCBoards(self, enable):
payload_size = self.size_to_hex(1+2) #Payload: ID+enable(2)
hex_enable = self.double_to_hex(enable)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('ResetHRADCBoards'))+hex_enable
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def Config_nHRADC(self,nHRADC):
payload_size = self.size_to_hex(1+2) #Payload: nHRADC
hex_nhradc = self.double_to_hex(nHRADC)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('Config_nHRADC'))+hex_nhradc
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def ReadHRADC_UFM(self,hradcID,ufmadd):
payload_size = self.size_to_hex(1+2+2) #Payload: ID + hradcID + ufmadd
hex_hradcID = self.double_to_hex(hradcID)
hex_ufmadd = self.double_to_hex(ufmadd)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('ReadHRADC_UFM'))+hex_hradcID+hex_ufmadd
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def WriteHRADC_UFM(self,hradcID,ufmadd,ufmdata):
payload_size = self.size_to_hex(1+2+2+2) #Payload: ID + hradcID + ufmadd + ufmdata
hex_hradcID = self.double_to_hex(hradcID)
hex_ufmadd = self.double_to_hex(ufmadd)
hex_ufmdata = self.double_to_hex(ufmdata)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('WriteHRADC_UFM'))+hex_hradcID+hex_ufmadd+hex_ufmdata
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def EraseHRADC_UFM(self,hradcID):
payload_size = self.size_to_hex(1+2) #Payload: ID + hradcID
hex_hradcID = self.double_to_hex(hradcID)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('EraseHRADC_UFM'))+hex_hradcID
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def InitHRADC_BoardData(self, serial = 12345678, day = 1, mon = 1,
year = 2017, hour = 12, minutes = 30,
variant = 'HRADC-FBP', rburden = 20, calibtemp = 40,
vin_gain = 1, vin_offset = 0, iin_gain = 1,
iin_offset = 0, vref_p = 5, vref_n = -5, gnd = 0):
boardData = {'serial': serial, 'variant': variant, 'rburden': rburden,
'tm_mday': day, 'tm_mon': mon, 'tm_year': year,
'tm_hour': hour, 'tm_min': minutes, 'calibtemp': calibtemp,
'vin_gain': vin_gain, 'vin_offset': vin_offset,
'iin_gain': iin_gain, 'iin_offset': iin_offset,
'vref_p': vref_p, 'vref_n': vref_n, 'gnd': gnd}
return boardData
def WriteHRADC_BoardData(self,hradcID,boardData):
print('Configurando placa em UFM mode...')
self.ConfigHRADCOpMode(hradcID,1)
time.sleep(0.5)
print('\nEnviando serial number...')
ufmdata_16 = self._convertToUint16List(boardData['serial'],'Uint64')
for i in range(len(ufmdata_16)):
self.WriteHRADC_UFM(hradcID,i+ufmOffset['serial'],ufmdata_16[i])
time.sleep(0.1)
print('\nEnviando variante...')
ufmdata_16 = self._convertToUint16List(hradcVariant.index(boardData['variant']),'Uint16')
for i in range(len(ufmdata_16)):
self.WriteHRADC_UFM(hradcID,i+ufmOffset['variant'],ufmdata_16[i])
time.sleep(0.1)
print('\nEnviando rburden...')
ufmdata_16 = self._convertToUint16List(boardData['rburden'],'float')
for i in range(len(ufmdata_16)):
self.WriteHRADC_UFM(hradcID,i+ufmOffset['rburden'],ufmdata_16[i])
time.sleep(0.1)
print('\nEnviando calibdate...')
ufmdata_16 = self._convertToUint16List(boardData['tm_mday'],'Uint16')
for i in range(len(ufmdata_16)):
self.WriteHRADC_UFM(hradcID,i+ufmOffset['calibdate'],ufmdata_16[i])
time.sleep(0.1)
# Month
ufmdata_16 = self._convertToUint16List(boardData['tm_mon'],'Uint16')
for i in range(len(ufmdata_16)):
self.WriteHRADC_UFM(hradcID,i+ufmOffset['calibdate']+1,ufmdata_16[i])
time.sleep(0.1)
# Year
ufmdata_16 = self._convertToUint16List(boardData['tm_year'],'Uint16')
for i in range(len(ufmdata_16)):
self.WriteHRADC_UFM(hradcID,i+ufmOffset['calibdate']+2,ufmdata_16[i])
time.sleep(0.1)
# Hour
ufmdata_16 = self._convertToUint16List(boardData['tm_hour'],'Uint16')
for i in range(len(ufmdata_16)):
self.WriteHRADC_UFM(hradcID,i+ufmOffset['calibdate']+3,ufmdata_16[i])
time.sleep(0.1)
# Minutes
ufmdata_16 = self._convertToUint16List(boardData['tm_min'],'Uint16')
for i in range(len(ufmdata_16)):
self.WriteHRADC_UFM(hradcID,i+ufmOffset['calibdate']+4,ufmdata_16[i])
time.sleep(0.1)
print('\nEnviando calibtemp...')
ufmdata_16 = self._convertToUint16List(boardData['calibtemp'],'float')
for i in range(len(ufmdata_16)):
self.WriteHRADC_UFM(hradcID,i+ufmOffset['calibtemp'],ufmdata_16[i])
time.sleep(0.1)
print('\nEnviando vin_gain...')
ufmdata_16 = self._convertToUint16List(boardData['vin_gain'],'float')
for i in range(len(ufmdata_16)):
self.WriteHRADC_UFM(hradcID,i+ufmOffset['vin_gain'],ufmdata_16[i])
time.sleep(0.1)
print('\nEnviando vin_offset...')
ufmdata_16 = self._convertToUint16List(boardData['vin_offset'],'float')
for i in range(len(ufmdata_16)):
self.WriteHRADC_UFM(hradcID,i+ufmOffset['vin_offset'],ufmdata_16[i])
time.sleep(0.1)
print('\nEnviando iin_gain...')
ufmdata_16 = self._convertToUint16List(boardData['iin_gain'],'float')
for i in range(len(ufmdata_16)):
self.WriteHRADC_UFM(hradcID,i+ufmOffset['iin_gain'],ufmdata_16[i])
time.sleep(0.1)
print('\nEnviando iin_offset...')
ufmdata_16 = self._convertToUint16List(boardData['iin_offset'],'float')
for i in range(len(ufmdata_16)):
self.WriteHRADC_UFM(hradcID,i+ufmOffset['iin_offset'],ufmdata_16[i])
time.sleep(0.1)
print('\nEnviando vref_p...')
ufmdata_16 = self._convertToUint16List(boardData['vref_p'],'float')
for i in range(len(ufmdata_16)):
self.WriteHRADC_UFM(hradcID,i+ufmOffset['vref_p'],ufmdata_16[i])
time.sleep(0.1)
print('\nEnviando vref_n...')
ufmdata_16 = self._convertToUint16List(boardData['vref_n'],'float')
for i in range(len(ufmdata_16)):
self.WriteHRADC_UFM(hradcID,i+ufmOffset['vref_n'],ufmdata_16[i])
time.sleep(0.1)
print('\nEnviando gnd...')
ufmdata_16 = self._convertToUint16List(boardData['gnd'],'float')
for i in range(len(ufmdata_16)):
self.WriteHRADC_UFM(hradcID,i+ufmOffset['gnd'],ufmdata_16[i])
time.sleep(0.1)
print('Colocando a placa em Sampling mode...')
self.ConfigHRADCOpMode(hradcID,0)
def ReadHRADC_BoardData(self,hradcID):
print('Configurando placa em UFM mode...')
print(self.ConfigHRADCOpMode(hradcID,1))
time.sleep(0.5)
print('Extraindo dados da placa...')
payload_size = self.size_to_hex(1+2) #Payload: ID + hradcID
hex_hradcID = self.double_to_hex(hradcID)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('ReadHRADC_BoardData'))+hex_hradcID
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
print(self.ser.read(6))
print('Lendo dados da placa...')
self.read_var(self.index_to_hex(50+hradcID))
reply_msg = self.ser.read(1+1+2+56+1)
print(reply_msg)
print(len(reply_msg))
val = struct.unpack('BBHLLHHHHHHfffffffffB',reply_msg)
try:
boardData = self.InitHRADC_BoardData(val[3]+val[4]*pow(2,32),val[5],
val[6],val[7],val[8],val[9],
hradcVariant[val[10]],val[11],
val[12],val[13],val[14],val[15],
val[16],val[17],val[18],val[19])
except:
print('\n### Placa não inicializada ###\n')
boardData = self.InitHRADC_BoardData(serial = int(input('Digite o S/N: ')))
print('\n')
print('Colocando a placa em Sampling mode...')
print(self.ConfigHRADCOpMode(hradcID,0))
time.sleep(0.5)
return boardData
def UpdateHRADC_BoardData(self,hradcID):
variant = len(hradcVariant)
while variant >= len(hradcVariant) or variant < 0:
variant = int(input("Enter HRADC variant number:\n 0: HRADC-FBP\n 1: HRADC-FAX-A\n 2: HRADC-FAX-B\n 3: HRADC-FAX-C\n 4: HRADC-FAX-D\n\n>>> "))
variant = hradcVariant[variant]
boardData = self.ReadHRADC_BoardData(hradcID)
boardData['variant'] = variant
boardData['vin_offset'] = np.float32(0)
boardData['iin_offset'] = np.float32(0)
if variant == 'HRADC-FBP':
boardData['rburden'] = np.float32(20)
boardData['vin_gain'] = np.float32(1)
boardData['iin_gain'] = np.float32(1)
print(boardData['vin_gain'])
print(boardData['variant'])
elif variant == 'HRADC-FAX-A':
boardData['rburden'] = np.float32(0)
boardData['vin_gain'] = np.float32(6.0/5.0)
boardData['iin_gain'] = np.float32(6.0/5.0)
print(boardData['vin_gain'])
print(boardData['variant'])
elif variant == 'HRADC-FAX-B':
boardData['rburden'] = np.float32(0)
boardData['vin_gain'] = np.float32(1)
boardData['iin_gain'] = np.float32(1)
print(boardData['vin_gain'])
print(boardData['variant'])
elif variant == 'HRADC-FAX-C':
boardData['rburden'] = np.float32(5)
boardData['vin_gain'] = np.float32(1)
boardData['iin_gain'] = np.float32(1)
print(boardData['vin_gain'])
print(boardData['variant'])
elif variant == 'HRADC-FAX-D':
boardData['rburden'] = np.float32(1)
boardData['vin_gain'] = np.float32(1)
boardData['iin_gain'] = np.float32(1)
print(boardData['vin_gain'])
print(boardData['variant'])
print('\n\nBoard data from HRADC of slot #' + str(hradcID) + ' is about to be overwritten by the following data:')
print(boardData)
i = input('\n Do you want to proceed? [y/n]: ')
if i is 'Y' or i is 'y':
self.ConfigHRADCOpMode(hradcID,1)
time.sleep(0.1)
self.EraseHRADC_UFM(hradcID)
time.sleep(0.5)
self.ResetHRADCBoards(1)
time.sleep(0.5)
self.ResetHRADCBoards(0)
time.sleep(1.5)
self.WriteHRADC_BoardData(hradcID,boardData)
boardData_new = self.ReadHRADC_BoardData(hradcID)
print(boardData_new)
print(boardData)
if boardData_new == boardData:
print('\n\n ### Operation was successful !!! ### \n\n')
else:
print('\n\n ### Operation failed !!! ### \n\n')
return [boardData, boardData_new]
def GetHRADCs_BoardData(self,numHRADC):
boardData_list = []
for i in range(numHRADC):
boardData_list.append(self.ReadHRADC_BoardData(i))
return boardData_list
def UdcEepromTest(self, rw, data=None):
if data is not None:
payload_size = self.size_to_hex(12)
hex_rw = self.double_to_hex(rw)
hex_byte_0 = self.double_to_hex(data[0])
hex_byte_1 = self.double_to_hex(data[1])
hex_byte_2 = self.double_to_hex(data[2])
hex_byte_3 = self.double_to_hex(data[3])
hex_byte_4 = self.double_to_hex(data[4])
hex_byte_5 = self.double_to_hex(data[5])
hex_byte_6 = self.double_to_hex(data[6])
hex_byte_7 = self.double_to_hex(data[7])
hex_byte_8 = self.double_to_hex(data[8])
hex_byte_9 = self.double_to_hex(data[9])
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListTestFunc.index('UdcEepromTest'))+hex_rw[0]+ \
hex_byte_0[0] + hex_byte_1[0] + hex_byte_2[0] + hex_byte_3[0] + hex_byte_4[0] + hex_byte_5[0]+ \
hex_byte_6[0] + hex_byte_7[0] + hex_byte_8[0] + hex_byte_9[0]
print(send_packet.encode('ISO-8859-1'))
self.ser.write(send_packet.encode('ISO-8859-1'))
return self.ser.read(15)
def UdcFlashTest(self, rw):
payload_size = self.size_to_hex(2)
hex_rw = self.double_to_hex(rw)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListTestFunc.index('UdcFlashTest'))+hex_rw[0]
self.ser.write(send_packet.encode('ISO-8859-1'))
return self.ser.read(6)
def UdcRamTest(self, rw):
payload_size = self.size_to_hex(2)
hex_rw = self.double_to_hex(rw)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListTestFunc.index('UdcRamTest'))+hex_rw[0]
self.ser.write(send_packet.encode('ISO-8859-1'))
return self.ser.read(6)
def UdcAdcTest(self, rw, channel):
payload_size = self.size_to_hex(3)
hex_rw = self.double_to_hex(rw)
hex_channel = self.double_to_hex(channel)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListTestFunc.index('UdcAdcTest'))+hex_rw[0]+hex_channel[0]
self.ser.write(send_packet.encode('ISO-8859-1'))
return self.ser.read(6)
def UdcSensorTempTest(self, rw):
payload_size = self.size_to_hex(2)
hex_rw = self.double_to_hex(rw)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListTestFunc.index('UdcSensorTempTest'))+hex_rw[0]
self.ser.write(send_packet.encode('ISO-8859-1'))
return self.ser.read(6)
def UdcRtcTest(self, rw):
payload_size = self.size_to_hex(2)
hex_rw = self.double_to_hex(rw)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListTestFunc.index('UdcRtcTest'))+hex_rw[0]
self.ser.write(send_packet.encode('ISO-8859-1'))
return self.ser.read(6)
def UdcUartTest(self, rw):
payload_size = self.size_to_hex(2)
hex_rw = self.double_to_hex(rw)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListTestFunc.index('UdcUartTest'))+hex_rw[0]
self.ser.write(send_packet.encode('ISO-8859-1'))
return self.ser.read(6)
def UdcIoExpanderTest(self, rw):
payload_size = self.size_to_hex(2)
hex_rw = self.double_to_hex(rw)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListTestFunc.index('UdcIoExpanderTest'))+hex_rw[0]
self.ser.write(send_packet.encode('ISO-8859-1'))
return self.ser.read(6)
# def UdcEthernetTest(self, rw):
# payload_size = self.size_to_hex(2)
# hex_rw = self.double_to_hex(rw)
# send_packet = self.ComFunction+payload_size+self.index_to_hex(ListTestFunc.index('UdcEthernetTest'))+hex_rw
# self.ser.write(send_packet.encode('ISO-8859-1'))
# return self.ser.read()
def UdcIsoPlaneTest(self, rw):
payload_size = self.size_to_hex(2)
hex_rw = self.double_to_hex(rw)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListTestFunc.index('UdcIsoPlaneTest'))+hex_rw[0]
self.ser.write(send_packet.encode('ISO-8859-1'))
return self.ser.read(6)
def UdcLoopBackTest(self, rw, channel):
payload_size = self.size_to_hex(3)
hex_rw = self.double_to_hex(rw)
hex_channel = self.double_to_hex(channel)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListTestFunc.index('UdcLoopBackTest'))+hex_rw[0]+hex_channel[0]
self.ser.write(send_packet.encode('ISO-8859-1'))
return self.ser.read(6)
def UdcLedTest(self, rw):
payload_size = self.size_to_hex(2)
hex_rw = self.double_to_hex(rw)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListTestFunc.index('UdcLedTest'))+hex_rw[0]
self.ser.write(send_packet.encode('ISO-8859-1'))
return self.ser.read(6)
def UdcBuzzerTest(self, rw):
payload_size = self.size_to_hex(2)
hex_rw = self.double_to_hex(rw)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListTestFunc.index('UdcBuzzerTest'))+hex_rw[0]
self.ser.write(send_packet.encode('ISO-8859-1'))
return self.ser.read(6)
def UdcComTest(self, rw, val):
payload_size = self.size_to_hex(3)
hex_rw = self.double_to_hex(rw)
hex_value = self.double_to_hex(val)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListTestFunc.index('UdcComTest'))+hex_rw[0]+hex_value[0]
self.ser.write(send_packet.encode('ISO-8859-1'))
time.sleep(0.2)
return self.ser.read(6)
def UdcI2cIsoTest(self, rw, val):
payload_size = self.size_to_hex(3)
hex_rw = self.double_to_hex(rw)
hex_value = self.double_to_hex(val)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListTestFunc.index('UdcI2cIsoTest'))+hex_rw[0]+hex_value[0]
self.ser.write(send_packet.encode('ISO-8859-1'))
return self.ser.read(6)
def SetISlowRefx4(self, iRef1 = 0, iRef2 = 0, iRef3 = 0, iRef4 = 0):
payload_size = self.size_to_hex(1+4*4) #Payload: ID + 4*iRef
hex_iRef1 = self.float_to_hex(iRef1)
hex_iRef2 = self.float_to_hex(iRef2)
hex_iRef3 = self.float_to_hex(iRef3)
hex_iRef4 = self.float_to_hex(iRef4)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('SetISlowRefx4'))+hex_iRef1+hex_iRef2+hex_iRef3+hex_iRef4
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def SetPof(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListBCBFunc.index('SetPof'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def ClearPof(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListBCBFunc.index('ClearPof'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def ReadPof(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListBCBFunc.index('ReadPof'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def EnableBuzzer(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListBCBFunc.index('EnableBuzzer'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def DisableBuzzer(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListBCBFunc.index('DisableBuzzer'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def SendUartData(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListBCBFunc.index('SendUartData'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def GetUartData(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListBCBFunc.index('GetUartData'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def SendCanData(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListBCBFunc.index('SendCanData'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def GetCanData(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListBCBFunc.index('GetCanData'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def GetI2cData(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListBCBFunc.index('GetI2cData'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def read_ps_status(self):
self.read_var(self.index_to_hex(ListVar_v2_1.index('ps_status')))
reply_msg = self.ser.read(7)
val = struct.unpack('BBHHB',reply_msg)
status = {}
status['state'] = ListOpMode_v2_1[(val[3] & 0b0000000000001111)]
status['open_loop'] = (val[3] & 0b0000000000010000) >> 4
status['interface'] = (val[3] & 0b0000000001100000) >> 5
status['active'] = (val[3] & 0b0000000010000000) >> 7
status['model'] = ListPSModels_v2_1[(val[3] & 0b0001111100000000) >> 8]
status['unlocked'] = (val[3] & 0b0010000000000000) >> 13
#print(status)
return status
def set_ps_name(self,ps_name):
if type(ps_name) == str:
for n in range(len(ps_name)):
self.set_param('PS_Name', n, float(ord(ps_name[n])))
for i in range(n+1,64):
self.set_param('PS_Name', i, float(ord(" ")))
def get_ps_name(self):
ps_name = ""
for n in range(64):
ps_name = ps_name + chr(int(self.get_param('PS_Name', n)))
if ps_name[-3:] == ' ':
ps_name = ps_name[:n-2]
break
return ps_name
def set_slowref(self,setpoint):
payload_size = self.size_to_hex(1+4) #Payload: ID + iSlowRef
hex_setpoint = self.float_to_hex(setpoint)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('set_slowref'))+hex_setpoint
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def set_slowref_fbp(self, iRef1 = 0, iRef2 = 0, iRef3 = 0, iRef4 = 0):
payload_size = self.size_to_hex(1+4*4) #Payload: ID + 4*iRef
hex_iRef1 = self.float_to_hex(iRef1)
hex_iRef2 = self.float_to_hex(iRef2)
hex_iRef3 = self.float_to_hex(iRef3)
hex_iRef4 = self.float_to_hex(iRef4)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('set_slowref_fbp'))+hex_iRef1+hex_iRef2+hex_iRef3+hex_iRef4
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def set_slowref_readback_mon(self,setpoint):
payload_size = self.size_to_hex(1+4) #Payload: ID + iSlowRef
hex_setpoint = self.float_to_hex(setpoint)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('set_slowref_readback_mon'))+hex_setpoint
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
reply_msg = self.ser.read(9)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def set_slowref_fbp_readback_mon(self, iRef1 = 0, iRef2 = 0, iRef3 = 0, iRef4 = 0):
payload_size = self.size_to_hex(1+4*4) #Payload: ID + 4*iRef
hex_iRef1 = self.float_to_hex(iRef1)
hex_iRef2 = self.float_to_hex(iRef2)
hex_iRef3 = self.float_to_hex(iRef3)
hex_iRef4 = self.float_to_hex(iRef4)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('set_slowref_fbp_readback_mon'))+hex_iRef1+hex_iRef2+hex_iRef3+hex_iRef4
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
reply_msg = self.ser.read(21)
if(len(reply_msg) == 6):
return reply_msg
else:
val = struct.unpack('BBHffffB',reply_msg)
return [val[3],val[4],val[5],val[6]]
def set_slowref_readback_ref(self,setpoint):
payload_size = self.size_to_hex(1+4) #Payload: ID + iSlowRef
hex_setpoint = self.float_to_hex(setpoint)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('set_slowref_readback_ref'))+hex_setpoint
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
reply_msg = self.ser.read(9)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def set_slowref_fbp_readback_ref(self, iRef1 = 0, iRef2 = 0, iRef3 = 0, iRef4 = 0):
payload_size = self.size_to_hex(1+4*4) #Payload: ID + 4*iRef
hex_iRef1 = self.float_to_hex(iRef1)
hex_iRef2 = self.float_to_hex(iRef2)
hex_iRef3 = self.float_to_hex(iRef3)
hex_iRef4 = self.float_to_hex(iRef4)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('set_slowref_fbp_readback_ref'))+hex_iRef1+hex_iRef2+hex_iRef3+hex_iRef4
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
reply_msg = self.ser.read(21)
if(len(reply_msg) == 6):
return reply_msg
else:
val = struct.unpack('BBHffffB',reply_msg)
return [val[3],val[4],val[5],val[6]]
def set_param(self, param_id, n, value):
payload_size = self.size_to_hex(1+2+2+4) #Payload: ID + param id + [n] + value
if type(param_id) == str:
hex_id = self.double_to_hex(ListParameters.index(param_id))
if type(param_id) == int:
hex_id = self.double_to_hex(param_id)
hex_n = self.double_to_hex(n)
hex_value = self.float_to_hex(value)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('set_param'))+hex_id+hex_n+hex_value
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
reply_msg = self.ser.read(6)
if reply_msg[4] == 8:
print('Invalid parameter')
return reply_msg
def get_param(self, param_id, n = 0):
payload_size = self.size_to_hex(1+2+2) #Payload: ID + param id + [n]
if type(param_id) == str:
hex_id = self.double_to_hex(ListParameters.index(param_id))
if type(param_id) == int:
hex_id = self.double_to_hex(param_id)
hex_n = self.double_to_hex(n)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('get_param'))+hex_id+hex_n
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.reset_input_buffer()
self.ser.write(send_msg.encode('ISO-8859-1'))
reply_msg = self.ser.read(9)
if len(reply_msg) == 9:
val = struct.unpack('BBHfB',reply_msg)
return val[3]
else:
#print('Invalid parameter')
return float('nan')
def save_param_eeprom(self, param_id, n = 0, type_memory = 2):
payload_size = self.size_to_hex(1+2+2+2) #Payload: ID + param id + [n] + memory type
if type(param_id) == str:
hex_id = self.double_to_hex(ListParameters.index(param_id))
if type(param_id) == int:
hex_id = self.double_to_hex(param_id)
hex_n = self.double_to_hex(n)
hex_type = self.double_to_hex(type_memory)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('save_param_eeprom'))+hex_id+hex_n+hex_type
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
reply_msg = self.ser.read(6)
if reply_msg[4] == 8:
print('Invalid parameter')
return reply_msg
def load_param_eeprom(self, param_id, n = 0, type_memory = 2):
payload_size = self.size_to_hex(1+2+2+2) #Payload: ID + param id + [n] + memory type
if type(param_id) == str:
hex_id = self.double_to_hex(ListParameters.index(param_id))
if type(param_id) == int:
hex_id = self.double_to_hex(param_id)
hex_n = self.double_to_hex(n)
hex_type = self.double_to_hex(type_memory)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('load_param_eeprom'))+hex_id+hex_n+hex_type
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
reply_msg = self.ser.read(6)
if reply_msg[4] == 8:
print('Invalid parameter')
return reply_msg
def save_param_bank(self, type_memory = 2):
payload_size = self.size_to_hex(1+2) #Payload: ID + memory type
hex_type = self.double_to_hex(type_memory)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('save_param_bank'))+hex_type
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def load_param_bank(self, type_memory = 2):
payload_size = self.size_to_hex(1+2) #Payload: ID + memory type
hex_type = self.double_to_hex(type_memory)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('load_param_bank'))+hex_type
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def set_param_bank(self, param_file):
fbp_param_list = []
with open(param_file,newline='') as f:
reader = csv.reader(f)
for row in reader:
fbp_param_list.append(row)
for param in fbp_param_list:
if str(param[0]) == 'PS_Name':
print(str(param[0]) + "[0]: " + str(param[1]))
print(self.set_ps_name(str(param[1])))
else:
for n in range(64):
try:
print(str(param[0]) + "["+ str(n) + "]: " + str(param[n+1]))
print(self.set_param(str(param[0]),n,float(param[n+1])))
except:
break
#self.save_param_bank()
def get_param_bank(self, list_param = ListParameters, timeout = 0.5, print_modules = True):
timeout_old = self.ser.timeout
#self.ser.timeout = 0.05
param_bank = []
for param_name in list_param:
param_row = [param_name]
for n in range(64):
if param_name == 'PS_Name':
p = self.get_ps_name()
param_row.append(p)
#if(print_modules):
#print('PS_Name: ' + p)
self.ser.timeout = timeout
break
else:
p = self.get_param(param_name,n)
if math.isnan(p):
break
param_row.append(p)
#if(print_modules):
#print(param_name + "[" + str(n) + "]: " + str(p))
if(print_modules):
print(param_row)
param_bank.append(param_row)
self.ser.timeout = timeout_old
return param_bank
def store_param_bank_csv(self, bank):
filename = input('Digite o nome do arquivo: ')
with open( filename + '.csv', 'w', newline='') as f:
writer = csv.writer(f, delimiter=',')
for param_row in bank:
writer.writerow(param_row)
def enable_onboard_eeprom(self):
self.set_param('Enable_Onboard_EEPROM',0,0)
self.save_param_eeprom('Enable_Onboard_EEPROM',0,2)
def disable_onboard_eeprom(self):
self.set_param('Enable_Onboard_EEPROM',0,1)
self.save_param_eeprom('Enable_Onboard_EEPROM',0,2)
def set_dsp_coeffs(self, dsp_class, dsp_id, coeffs_list = [0,0,0,0,0,0,0,0,0,0,0,0]):
coeffs_list_full = self.format_list_size(coeffs_list, NUM_MAX_COEFFS_DSP)
payload_size = self.size_to_hex(1+2+2+4*NUM_MAX_COEFFS_DSP)
hex_dsp_class= self.double_to_hex(dsp_class)
hex_dsp_id = self.double_to_hex(dsp_id)
hex_coeffs = self.float_list_to_hex(coeffs_list_full)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('set_dsp_coeffs'))+hex_dsp_class+hex_dsp_id+hex_coeffs
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def get_dsp_coeff(self, dsp_class, dsp_id, coeff):
payload_size = self.size_to_hex(1+2+2+2)
hex_dsp_class= self.double_to_hex(dsp_class)
hex_dsp_id = self.double_to_hex(dsp_id)
hex_coeff = self.double_to_hex(coeff)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('get_dsp_coeff'))+hex_dsp_class+hex_dsp_id+hex_coeff
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.reset_input_buffer()
self.ser.write(send_msg.encode('ISO-8859-1'))
reply_msg = self.ser.read(9)
#print(reply_msg)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def save_dsp_coeffs_eeprom(self, dsp_class, dsp_id, type_memory = 2):
payload_size = self.size_to_hex(1+2+2+2)
hex_dsp_class= self.double_to_hex(dsp_class)
hex_dsp_id = self.double_to_hex(dsp_id)
hex_type = self.double_to_hex(type_memory)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('save_dsp_coeffs_eeprom'))+hex_dsp_class+hex_dsp_id+hex_type
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def load_dsp_coeffs_eeprom(self, dsp_class, dsp_id, type_memory = 2):
payload_size = self.size_to_hex(1+2+2+2)
hex_dsp_class= self.double_to_hex(dsp_class)
hex_dsp_id = self.double_to_hex(dsp_id)
hex_type = self.double_to_hex(type_memory)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('load_dsp_coeffs_eeprom'))+hex_dsp_class+hex_dsp_id+hex_type
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def save_dsp_modules_eeprom(self, type_memory = 2):
payload_size = self.size_to_hex(1+2) #Payload: ID + memory type
hex_type = self.double_to_hex(type_memory)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('save_dsp_modules_eeprom'))+hex_type
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def load_dsp_modules_eeprom(self, type_memory = 2):
payload_size = self.size_to_hex(1+2) #Payload: ID + memory type
hex_type = self.double_to_hex(type_memory)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('load_dsp_modules_eeprom'))+hex_type
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def reset_udc(self):
reply = input('\nEste comando realiza o reset do firmware da placa UDC, e por isso, so e executado caso a fonte esteja desligada. \nCaso deseje apenas resetar interlocks, utilize o comando reset_interlocks(). \n\nTem certeza que deseja prosseguir? [Y/N]: ')
if reply == 'Y' or reply == 'y':
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('reset_udc'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
def run_bsmp_func(self,id_func,print_msg = 0):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(id_func)
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
reply_msg = self.ser.read(6)
if print_msg:
print(reply_msg)
return reply_msg
def run_bsmp_func_all_ps(self,p_func,add_list,arg = None,delay = 0.5, print_reply = 1):
old_add = self.GetSlaveAdd()
for add in add_list:
self.SetSlaveAdd(add)
if arg == None:
r = p_func()
else:
r = p_func(arg)
if print_reply:
print('\n Add ' + str(add))
print(r)
time.sleep(delay)
self.SetSlaveAdd(old_add)
def cfg_source_scope(self,p_source):
payload_size = self.size_to_hex(1+4) #Payload: ID + p_source
hex_op_mode = self.uint32_to_hex(p_source)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('cfg_source_scope'))+hex_op_mode
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def cfg_freq_scope(self,freq):
payload_size = self.size_to_hex(1+4) #Payload: ID + freq
hex_op_mode = self.float_to_hex(freq)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('cfg_freq_scope'))+hex_op_mode
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def cfg_duration_scope(self,duration):
payload_size = self.size_to_hex(1+4) #Payload: ID + duration
hex_op_mode = self.float_to_hex(duration)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('cfg_duration_scope'))+hex_op_mode
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def enable_scope(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('enable_scope'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def disable_scope(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('disable_scope'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def get_scope_vars(self):
print('\n### Scope Variables ###\n')
print('Frequency: ' + str((round(self.read_bsmp_variable(25,'float'),3))))
print('Duration: ' + str((round(self.read_bsmp_variable(26,'float'),3))))
print('Source Data: ' + str((round(self.read_bsmp_variable(27,'uint32_t'),3))))
def sync_pulse(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('sync_pulse'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def select_op_mode(self,op_mode):
payload_size = self.size_to_hex(1+2) #Payload: ID + enable
hex_op_mode = self.double_to_hex(ListOpMode_v2_1.index(op_mode))
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('select_op_mode'))+hex_op_mode
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def set_serial_termination(self,term_enable):
payload_size = self.size_to_hex(1+2) #Payload: ID + enable
hex_enable = self.double_to_hex(term_enable)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('set_serial_termination'))+hex_enable
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def set_command_interface(self,interface):
payload_size = self.size_to_hex(1+2) #Payload: ID + enable
hex_interface = self.double_to_hex(interface)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('set_command_interface'))+hex_interface
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def unlock_udc(self,password):
payload_size = self.size_to_hex(1+2) #Payload: ID + password
hex_password = self.double_to_hex(password)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('unlock_udc'))+hex_password
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def lock_udc(self,password):
payload_size = self.size_to_hex(1+2) #Payload: ID + password
hex_password = self.double_to_hex(password)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('lock_udc'))+hex_password
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def reset_counters(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('reset_counters'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def cfg_siggen(self,sig_type,num_cycles,freq,amplitude,offset,aux0,aux1,aux2,aux3):
payload_size = self.size_to_hex(1+2+2+4+4+4+4*4)
hex_sig_type = self.double_to_hex(ListSigGenTypes_v2_1.index(sig_type))
hex_num_cycles = self.double_to_hex(num_cycles)
hex_freq = self.float_to_hex(freq)
hex_amplitude = self.float_to_hex(amplitude)
hex_offset = self.float_to_hex(offset)
hex_aux0 = self.float_to_hex(aux0)
hex_aux1 = self.float_to_hex(aux1)
hex_aux2 = self.float_to_hex(aux2)
hex_aux3 = self.float_to_hex(aux3)
send_packet = self.ComFunction + payload_size + self.index_to_hex(ListFunc_v2_1.index('cfg_siggen')) + hex_sig_type + hex_num_cycles + hex_freq + hex_amplitude + hex_offset + hex_aux0 + hex_aux1 + hex_aux2 + hex_aux3
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def set_siggen(self,freq,amplitude,offset):
payload_size = self.size_to_hex(1+4+4+4)
hex_freq = self.float_to_hex(freq)
hex_amplitude = self.float_to_hex(amplitude)
hex_offset = self.float_to_hex(offset)
send_packet = self.ComFunction + payload_size + self.index_to_hex(ListFunc_v2_1.index('set_siggen')) + hex_freq + hex_amplitude + hex_offset
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def enable_siggen(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size + self.index_to_hex(ListFunc_v2_1.index('enable_siggen'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def disable_siggen(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size + self.index_to_hex(ListFunc_v2_1.index('disable_siggen'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def cfg_wfmref(self, idx, sync_mode, frequency, gain = 1, offset = 0):
payload_size = self.size_to_hex(1+2+2+4+4+4) #Payload: ID + idx + sync_mode + frequency + gain + offset
hex_idx = self.double_to_hex(idx)
hex_mode = self.double_to_hex(sync_mode)
hex_freq = self.float_to_hex(frequency)
hex_gain = self.float_to_hex(gain)
hex_offset = self.float_to_hex(offset)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('cfg_wfmref'))+hex_idx+hex_mode+hex_freq+hex_gain+hex_offset
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def select_wfmref(self,idx):
payload_size = self.size_to_hex(1+2) #Payload: ID + idx
hex_idx = self.double_to_hex(idx)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('select_wfmref'))+hex_idx
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def reset_wfmref(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size + self.index_to_hex(ListFunc_v2_1.index('reset_wfmref'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def get_wfmref_vars(self,curve_id):
print('\n### WfmRef ' + str(curve_id) + ' Variables ###\n')
print('Length: ' + str((round(self.read_bsmp_variable(20+curve_id*3,'uint32_t'),3)-round(self.read_bsmp_variable(19+curve_id*3,'uint32_t'),3))/2+1))
print('Index: ' + str((round(self.read_bsmp_variable(21+curve_id*3,'uint32_t'),3)-round(self.read_bsmp_variable(19+curve_id*3,'uint32_t'),3))/2+1))
print('WfmRef Selected: ' + str(round(self.read_bsmp_variable(14,'uint16_t'),3)))
print('Sync Mode: ' + str(round(self.read_bsmp_variable(15,'uint16_t'),3)))
print('Frequency: ' + str(round(self.read_bsmp_variable(16,'float'),3)) + " Hz")
print('Gain: ' + str(round(self.read_bsmp_variable(17,'float'),3)))
print('Offset: ' + str(round(self.read_bsmp_variable(18,'float'),3)))
def read_csv_file(self,filename, type = 'float'):
csv_list = []
with open(filename, newline = '') as f:
reader = csv.reader(f)
for row in reader:
if type == 'float':
row_converted = float(row[0])
elif type == 'string' or type == 'str':
row_converted = str(row[0])
csv_list.append(row_converted)
print('Length of list: ' + str(len(csv_list)) + '\n')
return csv_list
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
======================================================================
Métodos de Leitura de Valores das Variáveis BSMP
O retorno do método são os valores double/float da respectiva variavel
======================================================================
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
def read_bsmp_variable(self,id_var,type_var,print_msg = 0):
self.read_var(self.index_to_hex(id_var))
reply_msg = self.ser.read(typeSize[type_var])
if print_msg:
print(reply_msg)
val = struct.unpack(typeFormat[type_var],reply_msg)
return val[3]
def read_bsmp_variable_gen(self,id_var,size_bytes,print_msg = 0):
self.read_var(self.index_to_hex(id_var))
reply_msg = self.ser.read(size_bytes+5)
if print_msg:
print(reply_msg)
return reply_msg
def read_udc_arm_version(self):
self.read_var(self.index_to_hex(3))
reply_msg = self.ser.read(133)
val = struct.unpack('16s',reply_msg[4:20])
return val[0].decode('utf-8')
def read_udc_c28_version(self):
self.read_var(self.index_to_hex(3))
reply_msg = self.ser.read(133)
val = struct.unpack('16s',reply_msg[20:36])
return val[0].decode('utf-8')
def read_udc_version(self):
print('\n ARM: ' + self.read_udc_arm_version())
print(' C28: ' + self.read_udc_c28_version())
def Read_iLoad1(self):
self.read_var(self.index_to_hex(ListVar.index('iLoad1')))
reply_msg = self.ser.read(9)
print(reply_msg)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def Read_iLoad2(self):
self.read_var(self.index_to_hex(ListVar.index('iLoad2')))
reply_msg = self.ser.read(9)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def Read_iMod1(self):
self.read_var(self.index_to_hex(ListVar.index('iMod1')))
reply_msg = self.ser.read(9)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def Read_iMod2(self):
self.read_var(self.index_to_hex(ListVar.index('iMod2')))
reply_msg = self.ser.read(9)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def Read_iMod3(self):
self.read_var(self.index_to_hex(ListVar.index('iMod3')))
reply_msg = self.ser.read(9)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def Read_iMod4(self):
self.read_var(self.index_to_hex(ListVar.index('iMod4')))
reply_msg = self.ser.read(9)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def Read_vLoad(self):
self.read_var(self.index_to_hex(ListVar.index('vLoad')))
reply_msg = self.ser.read(9)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def Read_vDCMod1(self):
self.read_var(self.index_to_hex(ListVar.index('vDCMod1')))
reply_msg = self.ser.read(9)
print(reply_msg)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def Read_vDCMod2(self):
self.read_var(self.index_to_hex(ListVar.index('vDCMod2')))
reply_msg = self.ser.read(9)
print(reply_msg)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def Read_vDCMod3(self):
self.read_var(self.index_to_hex(ListVar.index('vDCMod3')))
reply_msg = self.ser.read(9)
print(reply_msg)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def Read_vDCMod4(self):
self.read_var(self.index_to_hex(ListVar.index('vDCMod4')))
reply_msg = self.ser.read(9)
print(reply_msg)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def Read_vOutMod1(self):
self.read_var(self.index_to_hex(ListVar.index('vOutMod1')))
reply_msg = self.ser.read(9)
print(reply_msg)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def Read_vOutMod2(self):
self.read_var(self.index_to_hex(ListVar.index('vOutMod2')))
reply_msg = self.ser.read(9)
print(reply_msg)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def Read_vOutMod3(self):
self.read_var(self.index_to_hex(ListVar.index('vOutMod3')))
reply_msg = self.ser.read(9)
print(reply_msg)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def Read_vOutMod4(self):
self.read_var(self.index_to_hex(ListVar.index('vOutMod4')))
reply_msg = self.ser.read(9)
print(reply_msg)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def Read_temp1(self):
self.read_var(self.index_to_hex(ListVar.index('temp1')))
reply_msg = self.ser.read(9)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def Read_temp2(self):
self.read_var(self.index_to_hex(ListVar.index('temp2')))
reply_msg = self.ser.read(9)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def Read_temp3(self):
self.read_var(self.index_to_hex(ListVar.index('temp3')))
reply_msg = self.ser.read(9)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def Read_temp4(self):
self.read_var(self.index_to_hex(ListVar.index('temp4')))
reply_msg = self.ser.read(9)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def Read_ps_OnOff(self):
self.read_var(self.index_to_hex(ListVar.index('ps_OnOff')))
reply_msg = self.ser.read(7)
val = struct.unpack('BBHHB',reply_msg)
return val[3]
def Read_ps_OpMode(self):
self.read_var(self.index_to_hex(ListVar.index('ps_OpMode')))
reply_msg = self.ser.read(7)
val = struct.unpack('BBHHB',reply_msg)
return val[3]
def Read_ps_Remote(self):
self.read_var(self.index_to_hex(ListVar.index('ps_Remote')))
reply_msg = self.ser.read(7)
val = struct.unpack('BBHHB',reply_msg)
return val[3]
def Read_ps_OpenLoop(self):
self.read_var(self.index_to_hex(ListVar.index('ps_OpenLoop')))
reply_msg = self.ser.read(7)
val = struct.unpack('BBHHB',reply_msg)
return val[3]
def Read_ps_SoftInterlocks(self):
op_bin = 1
ActiveSoftInterlocks = []
SoftInterlocksList = ['N/A', 'Sobre-tensao na carga 1', 'N/A',\
'N/A', 'N/A', 'N/A', 'N/A', 'N/A', 'N/A',\
'Sobre-tensao na carga 2', 'N/A', \
'N/A', 'N/A', 'N/A', 'N/A', 'N/A', 'N/A',\
'Sobre-tensao na carga 3', 'N/A', \
'N/A', 'N/A', 'N/A', 'N/A', 'N/A', 'N/A',\
'Sobre-tensao na carga 4', 'N/A', \
'N/A', 'N/A', 'N/A', 'N/A', 'N/A', 'N/A']
self.read_var(self.index_to_hex(ListVar.index('ps_SoftInterlocks')))
reply_msg = self.ser.read(9)
print(reply_msg)
val = struct.unpack('BBHIB',reply_msg)
print('Soft Interlocks ativos:')
for i in range(len('{0:b}'.format(val[3]))):
if (val[3] & (op_bin << i)) == 2**i:
ActiveSoftInterlocks.append(SoftInterlocksList[i])
print(SoftInterlocksList[i])
print('---------------------------------------------------------------')
return val[3]
def Read_ps_HardInterlocks(self):
op_bin = 1
ActiveHardInterlocks = []
HardInterlocksList = ['Sobre-corrente na carga 1', 'N/A', \
'Sobre-tensao no DC-Link do modulo 1', \
'Sub-tensao no DC-Link do modulo 1', \
'Falha no rele de entrada do DC-Link do modulo 1', \
'Falha no fusivel de entrada do DC-Link do modulo 1', \
'Falha nos drivers do modulo 1', \
'Sobre-temperatura no modulo 1', \
'Sobre-corrente na carga 2', 'N/A', \
'Sobre-tensao no DC-Link do modulo 2', \
'Sub-tensao no DC-Link do modulo 2', \
'Falha no rele de entrada do DC-Link do modulo 2', \
'Falha no fusivel de entrada do DC-Link do modulo 2', \
'Falha nos drivers do modulo 2', \
'Sobre-temperatura no modulo 2', \
'Sobre-corrente na carga 3', 'N\A', \
'Sobre-tensao no DC-Link do modulo 3', \
'Sub-tensao no DC-Link do modulo 3', \
'Falha no rele de entrada no DC-Link do modulo 3', \
'Falha no fusivel de entrada do DC-Link do modulo 3', \
'Falha nos drivers do modulo 3', \
'Sobre-temperatura no modulo 3', \
'Sobre-corrente na carga 4', 'N/A', \
'Sobre-tensao no DC-Link do modulo 4', \
'Sub-tensao no DC-Link do modulo 4', \
'Falha no rele de entrada do DC-Link do modulo 4', \
'Falha no fusivel de entrada do DC-Link do modulo 4', \
'Falha nos drivers do modulo 4', \
'Sobre-temperatura no modulo 4']
self.read_var(self.index_to_hex(ListVar.index('ps_HardInterlocks')))
reply_msg = self.ser.read(9)
print(reply_msg)
val = struct.unpack('BBHIB',reply_msg)
print('Hard Interlocks ativos:')
for i in range(len('{0:b}'.format(val[3]))):
if (val[3] & (op_bin << i)) == 2**i:
ActiveHardInterlocks.append(HardInterlocksList[i])
print(HardInterlocksList[i])
print('---------------------------------------------------------------')
return val[3]
def Read_iRef(self):
self.read_var(self.index_to_hex(ListVar.index('iRef')))
reply_msg = self.ser.read(9)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def Read_wfmRef_Gain(self):
self.read_var(self.index_to_hex(ListVar.index('wfmRef_Gain')))
reply_msg = self.ser.read(9)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def Read_wfmRef_Offset(self):
self.read_var(self.index_to_hex(ListVar.index('wfmRef_Offset')))
reply_msg = self.ser.read(9)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def Read_sigGen_Enable(self):
self.read_var(self.index_to_hex(ListVar.index('sigGen_Enable')))
reply_msg = self.ser.read(7)
val = struct.unpack('BBHHB',reply_msg)
return val[3]
def Read_sigGen_Type(self):
self.read_var(self.index_to_hex(ListVar.index('sigGen_Type')))
reply_msg = self.ser.read(7)
val = struct.unpack('BBHHB',reply_msg)
return val[3]
def Read_sigGen_Ncycles(self):
self.read_var(self.index_to_hex(ListVar.index('sigGen_Ncycles')))
reply_msg = self.ser.read(7)
val = struct.unpack('BBHHB',reply_msg)
return val[3]
def Read_sigGen_PhaseStart(self):
self.read_var(self.index_to_hex(ListVar.index('sigGen_PhaseStart')))
reply_msg = self.ser.read(9)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def Read_sigGen_PhaseEnd(self):
self.read_var(self.index_to_hex(ListVar.index('sigGen_PhaseEnd')))
reply_msg = self.ser.read(9)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def Read_sigGen_Freq(self):
self.read_var(self.index_to_hex(ListVar.index('sigGen_Freq')))
reply_msg = self.ser.read(9)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def Read_sigGen_Amplitude(self):
self.read_var(self.index_to_hex(ListVar.index('sigGen_Amplitude')))
reply_msg = self.ser.read(9)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def Read_sigGen_Offset(self):
self.read_var(self.index_to_hex(ListVar.index('sigGen_Offset')))
reply_msg = self.ser.read(9)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def Read_sigGen_Aux(self):
self.read_var(self.index_to_hex(ListVar.index('sigGen_Aux')))
reply_msg = self.ser.read(9)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def Read_dp_ID(self):
self.read_var(self.index_to_hex(ListVar.index('dp_ID')))
reply_msg = self.ser.read(7)
val = struct.unpack('BBHHB',reply_msg)
return val[3]
def Read_dp_Class(self):
self.read_var(self.index_to_hex(ListVar.index('dp_Class')))
reply_msg = self.ser.read(7)
val = struct.unpack('BBHHB',reply_msg)
return val[3]
def Read_dp_Coeffs(self):
self.read_var(self.index_to_hex(ListVar.index('dp_Coeffs')))
reply_msg = self.ser.read(69)
val = struct.unpack('BBHffffffffffffffffB',reply_msg)
return [val[3],val[4],val[5],val[6],val[7],val[8],val[9],val[10],val[11],val[12],val[13],val[14],val[15],val[16],val[17],val[18]]
def Read_ps_Model(self):
self.read_var(self.index_to_hex(ListVar.index('ps_Model')))
reply_msg = self.ser.read(7)
val = struct.unpack('BBHHB',reply_msg)
return val
def read_ps_model(self):
reply_msg = self.Read_ps_Model()
return ListPSModels[reply_msg[3]]
def Read_wfmRef_PtrBufferStart(self):
self.read_var(self.index_to_hex(ListVar.index('wfmRef_PtrBufferStart')))
reply_msg = self.ser.read(9)
val = struct.unpack('BBHIB',reply_msg)
return val[3]
def Read_wfmRef_PtrBufferEnd(self):
self.read_var(self.index_to_hex(ListVar.index('wfmRef_PtrBufferEnd')))
reply_msg = self.ser.read(9)
val = struct.unpack('BBHIB',reply_msg)
return val[3]
def Read_wfmRef_PtrBufferK(self):
self.read_var(self.index_to_hex(ListVar.index('wfmRef_PtrBufferK')))
reply_msg = self.ser.read(9)
val = struct.unpack('BBHIB',reply_msg)
return val[3]
def Read_wfmRef_SyncMode(self):
self.read_var(self.index_to_hex(ListVar.index('wfmRef_SyncMode')))
reply_msg = self.ser.read(7)
val = struct.unpack('BBHHB',reply_msg)
return val[3]
def Read_iRef1(self):
self.read_var(self.index_to_hex(45))
reply_msg = self.ser.read(9)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def Read_iRef2(self):
self.read_var(self.index_to_hex(46))
reply_msg = self.ser.read(9)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def Read_iRef3(self):
self.read_var(self.index_to_hex(47))
reply_msg = self.ser.read(9)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def Read_iRef4(self):
self.read_var(self.index_to_hex(48))
reply_msg = self.ser.read(9)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def Read_counterSetISlowRefx4(self):
self.read_var(self.index_to_hex(49))
reply_msg = self.ser.read(9)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
======================================================================
Métodos de Escrita de Valores das Variáveis BSMP
O retorno do método são os bytes de retorno da mensagem
======================================================================
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
def Write_sigGen_Freq(self,float_value):
hex_float = self.float_to_hex(float_value)
send_packet = self.ComWriteVar+self.WriteFloatSizePayload+self.index_to_hex(ListVar.index('sigGen_Freq'))+hex_float
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(5)
def Write_sigGen_Amplitude(self,float_value):
hex_float = self.float_to_hex(float_value)
send_packet = self.ComWriteVar+self.WriteFloatSizePayload+self.index_to_hex(ListVar.index('sigGen_Amplitude'))+hex_float
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(5)
def Write_sigGen_Offset(self,float_value):
hex_float = self.float_to_hex(float_value)
send_packet = self.ComWriteVar+self.WriteFloatSizePayload+self.index_to_hex(ListVar.index('sigGen_Offset'))+hex_float
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(5)
def Write_sigGen_Aux(self,float_value):
hex_float = self.float_to_hex(float_value)
send_packet = self.ComWriteVar+self.WriteFloatSizePayload+self.index_to_hex(ListVar.index('sigGen_Aux'))+hex_float
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(5)
def Write_dp_ID(self,double_value):
hex_double = self.double_to_hex(double_value)
send_packet = self.ComWriteVar+self.WriteDoubleSizePayload+self.index_to_hex(ListVar.index('dp_ID'))+hex_double
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(5)
def Write_dp_Class(self,double_value):
hex_double = self.double_to_hex(double_value)
send_packet = self.ComWriteVar+self.WriteDoubleSizePayload+self.index_to_hex(ListVar.index('dp_Class'))+hex_double
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(5)
def Write_dp_Coeffs(self,list_float):
hex_float_list = []
#list_full = list_float[:]
#while(len(list_full) < self.DP_MODULE_MAX_COEFF):
# list_full.append(0)
list_full = [0 for i in range(self.DP_MODULE_MAX_COEFF)]
list_full[:len(list_float)] = list_float[:]
for float_value in list_full:
hex_float = self.float_to_hex(float(float_value))
hex_float_list.append(hex_float)
str_float_list = ''.join(hex_float_list)
payload_size = self.size_to_hex(1+4*self.DP_MODULE_MAX_COEFF) #Payload: ID + 16floats
send_packet = self.ComWriteVar+payload_size+self.index_to_hex(ListVar.index('dp_Coeffs'))+str_float_list
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(5)
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
======================================================================
Métodos de Escrita de Curvas BSMP
O retorno do método são os bytes de retorno da mensagem
======================================================================
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
def Send_wfmRef_Curve(self,block_idx,data):
block_hex = struct.pack('>H',block_idx).decode('ISO-8859-1')
val = []
for k in range(0,len(data)):
val.append(self.float_to_hex(float(data[k])))
payload_size = struct.pack('>H', (len(val)*4)+3).decode('ISO-8859-1')
curva_hex = ''.join(val)
send_packet = self.ComSendWfmRef+payload_size+self.index_to_hex(ListCurv.index('wfmRef_Curve'))+block_hex+curva_hex
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(5)
def Recv_wfmRef_Curve(self,block_idx):
block_hex = struct.pack('>H',block_idx).decode('ISO-8859-1')
payload_size = self.size_to_hex(1+2) #Payload: ID+Block_index
send_packet = self.ComRequestCurve+payload_size+self.index_to_hex(ListCurv.index('wfmRef_Curve'))+block_hex
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
recv_msg = self.ser.read(1+1+2+1+2+8192+1) #Address+Command+Size+ID+Block_idx+data+checksum
val = []
for k in range(7,len(recv_msg)-1,4):
val.append(struct.unpack('f',recv_msg[k:k+4]))
return val
def Recv_samplesBuffer(self):
block_hex = struct.pack('>H',0).decode('ISO-8859-1')
payload_size = self.size_to_hex(1+2) #Payload: ID+Block_index
send_packet = self.ComRequestCurve+payload_size+self.index_to_hex(ListCurv.index('samplesBuffer'))+block_hex
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
recv_msg = self.ser.read(1+1+2+1+2+16384+1) #Address+Command+Size+ID+Block_idx+data+checksum
val = []
try:
for k in range(7,len(recv_msg)-1,4):
val.extend(struct.unpack('f',recv_msg[k:k+4]))
except:
pass
return val
def Send_fullwfmRef_Curve(self,block_idx,data):
block_hex = struct.pack('>H',block_idx).decode('ISO-8859-1')
val = []
for k in range(0,len(data)):
val.append(self.float_to_hex(float(data[k])))
payload_size = struct.pack('>H', (len(val)*4)+3).decode('ISO-8859-1')
curva_hex = ''.join(val)
send_packet = self.ComSendWfmRef+payload_size+self.index_to_hex(ListCurv.index('fullwfmRef_Curve'))+block_hex+curva_hex
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(5)
def Recv_fullwfmRef_Curve(self,block_idx):
block_hex = struct.pack('>H',block_idx).decode('ISO-8859-1')
payload_size = self.size_to_hex(1+2) #Payload: ID+Block_index
send_packet = self.ComRequestCurve+payload_size+self.index_to_hex(ListCurv.index('fullwfmRef_Curve'))+block_hex
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
recv_msg = self.ser.read(1+1+2+1+2+16384+1) #Address+Command+Size+ID+Block_idx+data+checksum
val = []
for k in range(7,len(recv_msg)-1,4):
val.append(struct.unpack('f',recv_msg[k:k+4]))
return val
def Recv_samplesBuffer_blocks(self,block_idx):
block_hex = struct.pack('>H',block_idx).decode('ISO-8859-1')
payload_size = self.size_to_hex(1+2) #Payload: ID+Block_index
send_packet = self.ComRequestCurve+payload_size+self.index_to_hex(ListCurv.index('samplesBuffer_blocks'))+block_hex
send_msg = self.checksum(self.SlaveAdd+send_packet)
#t0 = time.time()
self.ser.write(send_msg.encode('ISO-8859-1'))
recv_msg = self.ser.read(1+1+2+1+2+1024+1) #Address+Command+Size+ID+Block_idx+data+checksum
#print(time.time()-t0)
#print(recv_msg)
val = []
for k in range(7,len(recv_msg)-1,4):
val.extend(struct.unpack('f',recv_msg[k:k+4]))
return val
def Recv_samplesBuffer_allblocks(self):
buff = []
#self.DisableSamplesBuffer()
for i in range(0,16):
#t0 = time.time()
buff.extend(self.Recv_samplesBuffer_blocks(i))
#print(time.time()-t0)
#self.EnableSamplesBuffer()
return buff
def read_curve_block(self,curve_id,block_id):
block_hex = struct.pack('>H',block_id).decode('ISO-8859-1')
payload_size = self.size_to_hex(1+2) #Payload: curve_id + block_id
send_packet = self.ComRequestCurve+payload_size+self.index_to_hex(curve_id)+block_hex
send_msg = self.checksum(self.SlaveAdd+send_packet)
#t0 = time.time()
self.ser.reset_input_buffer()
self.ser.write(send_msg.encode('ISO-8859-1'))
recv_msg = self.ser.read(1+1+2+1+2+size_curve_block[curve_id]+1) #Address+Command+Size+ID+Block_idx+data+checksum
#print(time.time()-t0)
#print(recv_msg)
val = []
for k in range(7,len(recv_msg)-1,4):
val.extend(struct.unpack('f',recv_msg[k:k+4]))
return val
def write_curve_block(self,curve_id,block_id,data):
block_hex = struct.pack('>H',block_id).decode('ISO-8859-1')
val = []
for k in range(0,len(data)):
val.append(self.float_to_hex(float(data[k])))
payload_size = struct.pack('>H', (len(val)*4)+3).decode('ISO-8859-1')
curva_hex = ''.join(val)
send_packet = self.ComSendWfmRef+payload_size+self.index_to_hex(curve_id)+block_hex+curva_hex
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(5)
def write_wfmref(self,curve,data):
#curve = ListCurv_v2_1.index('wfmref')
block_size = int(size_curve_block[curve]/4)
print(block_size)
blocks = [data[x:x+block_size] for x in range(0, len(data), block_size)]
ps_status = self.read_ps_status()
wfmref_selected = self.read_bsmp_variable(14,'uint16_t')
if( (wfmref_selected == curve) and (ps_status['state'] == 'RmpWfm' or ps_status['state'] == 'MigWfm') ):
print("\n The specified curve ID is currently selected and PS is on " + ps_status['state'] + " state. Choose a different curve ID to proceed.\n")
else:
for block_id in range(len(blocks)):
self.write_curve_block(curve, block_id, blocks[block_id])
print(blocks[block_id])
def read_buf_samples_ctom(self):
buf = []
curve_id = ListCurv_v2_1.index('buf_samples_ctom')
ps_status = self.read_ps_status()
if ps_status['model'] == 'FBP':
for i in range(num_blocks_curves_fbp[curve_id]):
buf.extend(self.read_curve_block(curve_id,i))
else:
for i in range(num_blocks_curves_fax[curve_id]):
buf.extend(self.read_curve_block(curve_id,i))
return buf
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
======================================================================
Funções Serial
======================================================================
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
def Connect(self,port='COM2',baud=6000000):
try:
SerialDRS.ser = serial.Serial(port,baud,timeout=1) #port format should be 'COM'+number
return True
except:
return False
def Disconnect(self):
if (self.ser.isOpen()):
try:
self.ser.close()
return True
except:
return False
def SetSlaveAdd(self,address):
self.SlaveAdd = struct.pack('B',address).decode('ISO-8859-1')
def GetSlaveAdd(self):
return struct.unpack('B',self.SlaveAdd.encode())[0]
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
======================================================================
Funções auxiliares
======================================================================
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
def read_vars_common(self, print_all = False):
loop_state = ["Closed Loop","Open Loop"]
ps_status = self.read_ps_status()
if ps_status['open_loop'] == 0:
if (ps_status['model'] == 'FAC_ACDC') or (ps_status['model'] == 'FAC_2S_ACDC') or (ps_status['model'] == 'FAC_2P4S_ACDC'):
setpoint_unit = " V"
else:
setpoint_unit = " A"
else:
setpoint_unit = " %"
print("\nPS Model: " + ps_status['model'])
print("State: " + ps_status['state'])
print("Loop State: " + loop_state[ps_status['open_loop']])
print("\nSetpoint: " + str(round(self.read_bsmp_variable(1,'float'),3)) + setpoint_unit)
print("Reference: " + str(round(self.read_bsmp_variable(2,'float'),3)) + setpoint_unit)
if print_all:
print(self.read_ps_status())
print("\nCounter set_slowref: " + str(round(self.read_bsmp_variable(4,'uint32_t'),3)))
print("Counter sync pulse: " + str(round(self.read_bsmp_variable(5,'uint32_t'),3)))
self.get_siggen_vars()
self.get_wfmref_vars(0)
self.get_wfmref_vars(1)
self.get_scope_vars()
def decode_interlocks(self,reg_interlocks,list_interlocks):
active_interlocks = []
for i in range(32):
if(reg_interlocks & (1 << i)):
active_interlocks.append(list_interlocks[i])
print('\t' + list_interlocks[i])
return active_interlocks
def read_vars_fbp(self, n = 1, dt = 0.5):
try:
for i in range(n):
print('\n--- Measurement #' + str(i+1) + ' ------------------------------------------\n')
self.read_vars_common()
soft_itlks = self.read_bsmp_variable(31,'uint32_t')
print("\nSoft Interlocks: " + str(soft_itlks))
if(soft_itlks):
self.decode_interlocks(soft_itlks, list_fbp_soft_interlocks)
print('')
hard_itlks = self.read_bsmp_variable(32,'uint32_t')
print("Hard Interlocks: " + str(hard_itlks))
if(hard_itlks):
self.decode_interlocks(hard_itlks, list_fbp_hard_interlocks)
print("\nLoad Current: " + str(round(self.read_bsmp_variable(33,'float'),3)) + " A")
print("Load Voltage: " + str(round(self.read_bsmp_variable(34,'float'),3)) + " V")
print("Load Resistance: " + str(abs(round(self.read_bsmp_variable(34,'float') / self.read_bsmp_variable(33,'float'),3))) + " Ohm")
print("Load Power: " + str(abs(round(self.read_bsmp_variable(34,'float') * self.read_bsmp_variable(33,'float'),3))) + " W")
print("DC-Link Voltage: " + str(round(self.read_bsmp_variable(35,'float'),3)) + " V")
print("Heat-Sink Temp: " + str(round(self.read_bsmp_variable(36,'float'),3)) + " °C")
print("Duty-Cycle: " + str(round(self.read_bsmp_variable(37,'float'),3)) + " %")
time.sleep(dt)
except:
pass
def read_vars_fbp_dclink(self, n = 1, dt = 0.5):
try:
for i in range(n):
print('\n--- Measurement #' + str(i+1) + ' ------------------------------------------\n')
self.read_vars_common()
hard_itlks = self.read_bsmp_variable(32,'uint32_t')
print("\nHard Interlocks: " + str(hard_itlks))
if(hard_itlks):
self.decode_interlocks(hard_itlks, list_fbp_dclink_hard_interlocks)
print("\nModules status: " + str(round(self.read_bsmp_variable(33,'uint32_t'),3)))
print("DC-Link Voltage: " + str(round(self.read_bsmp_variable(34,'float'),3)) + " V")
print("PS1 Voltage: " + str(round(self.read_bsmp_variable(35,'float'),3)) + " V")
print("PS2 Voltage: " + str(round(self.read_bsmp_variable(36,'float'),3)) + " V")
print("PS3 Voltage: " + str(round(self.read_bsmp_variable(37,'float'),3)) + " V")
print("Dig Pot Tap: " + str(round(self.read_bsmp_variable(38,'uint8_t'),3)))
time.sleep(dt)
except:
pass
def read_vars_fac_acdc(self, n = 1, dt = 0.5, iib = 1):
#try:
for i in range(n):
print('\n--- Measurement #' + str(i+1) + ' ------------------------------------------\n')
self.read_vars_common()
soft_itlks = self.read_bsmp_variable(31,'uint32_t')
print("\nSoft Interlocks: " + str(soft_itlks))
if(soft_itlks):
self.decode_interlocks(soft_itlks, list_fac_acdc_soft_interlocks)
print('')
hard_itlks = self.read_bsmp_variable(32,'uint32_t')
print("Hard Interlocks: " + str(hard_itlks))
if(hard_itlks):
self.decode_interlocks(hard_itlks, list_fac_acdc_hard_interlocks)
iib_is_itlks = self.read_bsmp_variable(45,'uint32_t')
print("\nIIB IS Interlocks: " + str(iib_is_itlks))
if(iib_is_itlks):
self.decode_interlocks(iib_is_itlks, list_fac_acdc_iib_is_interlocks)
iib_is_alarms = self.read_bsmp_variable(46,'uint32_t')
print("IIB IS Alarms: " + str(iib_is_alarms))
if(iib_is_alarms):
self.decode_interlocks(iib_is_alarms, list_fac_acdc_iib_is_alarms)
iib_cmd_itlks = self.read_bsmp_variable(57,'uint32_t')
print("\nIIB Cmd Interlocks: " + str(iib_cmd_itlks))
if(iib_cmd_itlks):
self.decode_interlocks(iib_cmd_itlks, list_fac_acdc_iib_cmd_interlocks)
iib_cmd_alarms = self.read_bsmp_variable(58,'uint32_t')
print("IIB Cmd Alarms: " + str(iib_cmd_alarms))
if(iib_cmd_alarms):
self.decode_interlocks(iib_cmd_alarms, list_fac_acdc_iib_cmd_alarms)
print("\nCapBank Voltage: " + str(round(self.read_bsmp_variable(33,'float'),3)) + " V")
print("Rectifier Current: " + str(round(self.read_bsmp_variable(34,'float'),3)) + " A")
print("Duty-Cycle: " + str(round(self.read_bsmp_variable(35,'float'),3)) + " %")
if(iib):
print("\nIIB IS Input Current: " + str(round(self.read_bsmp_variable(36,'float'),3)) + " A")
print("IIB IS Input Voltage: " + str(round(self.read_bsmp_variable(37,'float'),3)) + " V")
print("IIB IS IGBT Temp: " + str(round(self.read_bsmp_variable(38,'float'),3)) + " °C")
print("IIB IS Driver Voltage: " + str(round(self.read_bsmp_variable(39,'float'),3)) + " V")
print("IIB IS Driver Current: " + str(round(self.read_bsmp_variable(40,'float'),3)) + " A")
print("IIB IS Inductor Temp: " + str(round(self.read_bsmp_variable(41,'float'),3)) + " °C")
print("IIB IS Heat-Sink Temp: " + str(round(self.read_bsmp_variable(42,'float'),3)) + " °C")
print("IIB IS Board Temp: " + str(round(self.read_bsmp_variable(43,'float'),3)) + " °C")
print("IIB IS Board RH: " + str(round(self.read_bsmp_variable(44,'float'),3)) + " %")
print("IIB IS Interlocks: " + str(round(self.read_bsmp_variable(45,'uint32_t'),3)))
print("IIB IS Alarms: " + str(round(self.read_bsmp_variable(46,'uint32_t'),3)))
print("\nIIB Cmd Load Voltage: " + str(round(self.read_bsmp_variable(47,'float'),3)) + " V")
print("IIB Cmd CapBank Voltage: " + str(round(self.read_bsmp_variable(48,'float'),3)) + " V")
print("IIB Cmd Rectifier Inductor Temp: " + str(round(self.read_bsmp_variable(49,'float'),3)) + " °C")
print("IIB Cmd Rectifier Heat-Sink Temp: " + str(round(self.read_bsmp_variable(50,'float'),3)) + " °C")
print("IIB Cmd External Boards Voltage: " + str(round(self.read_bsmp_variable(51,'float'),3)) + " V")
print("IIB Cmd Auxiliary Board Current: " + str(round(self.read_bsmp_variable(52,'float'),3)) + " A")
print("IIB Cmd IDB Board Current: " + str(round(self.read_bsmp_variable(53,'float'),3)) + " A")
print("IIB Cmd Ground Leakage Current: " + str(round(self.read_bsmp_variable(54,'float'),3)) + " A")
print("IIB Cmd Board Temp: " + str(round(self.read_bsmp_variable(55,'float'),3)) + " °C")
print("IIB Cmd Board RH: " + str(round(self.read_bsmp_variable(56,'float'),3)) + " %")
print("IIB Cmd Interlocks: " + str(round(self.read_bsmp_variable(57,'uint32_t'),3)))
print("IIB Cmd Alarms: " + str(round(self.read_bsmp_variable(58,'uint32_t'),3)))
time.sleep(dt)
#except:
# pass
def read_vars_fac_dcdc(self, n = 1, dt = 0.5, iib = 1):
try:
for i in range(n):
print('\n--- Measurement #' + str(i+1) + ' ------------------------------------------\n')
self.read_vars_common()
print("\nSync Pulse Counter: " + str(round(self.read_bsmp_variable(5,'uint32_t'),3)))
print("WfmRef Index: " + str( (round(self.read_bsmp_variable(20,'uint32_t'),3) - round(self.read_bsmp_variable(18,'uint32_t'),3))/2 + 1))
soft_itlks = self.read_bsmp_variable(31,'uint32_t')
print("\nSoft Interlocks: " + str(soft_itlks))
if(soft_itlks):
self.decode_interlocks(soft_itlks, list_fac_dcdc_soft_interlocks)
print('')
hard_itlks = self.read_bsmp_variable(32,'uint32_t')
print("Hard Interlocks: " + str(hard_itlks))
if(hard_itlks):
self.decode_interlocks(hard_itlks, list_fac_dcdc_hard_interlocks)
iib_itlks = self.read_bsmp_variable(51,'uint32_t')
print("\nIIB Interlocks: " + str(iib_itlks))
if(iib_itlks):
self.decode_interlocks(iib_itlks, list_fac_dcdc_iib_interlocks)
iib_alarms = self.read_bsmp_variable(52,'uint32_t')
print("IIB Alarms: " + str(iib_alarms))
if(iib_alarms):
self.decode_interlocks(iib_alarms, list_fac_dcdc_iib_alarms)
print("\nLoad Current: " + str(round(self.read_bsmp_variable(33,'float'),3)) + " A")
print("Load Current DCCT 1: " + str(round(self.read_bsmp_variable(34,'float'),3)) + " A")
print("Load Current DCCT 2: " + str(round(self.read_bsmp_variable(35,'float'),3)) + " A")
print("\nCapBank Voltage: " + str(round(self.read_bsmp_variable(36,'float'),3)) + " V")
print("\nDuty-Cycle: " + str(round(self.read_bsmp_variable(37,'float'),3)) + " %")
if(iib):
print("\nIIB CapBank Voltage: " + str(round(self.read_bsmp_variable(38,'float'),3)) + " V")
print("IIB Input Current: " + str(round(self.read_bsmp_variable(39,'float'),3)) + " A")
print("IIB Output Current: " + str(round(self.read_bsmp_variable(40,'float'),3)) + " A")
print("IIB IGBT Leg 1 Temp: " + str(round(self.read_bsmp_variable(41,'float'),3)) + " °C")
print("IIB IGBT Leg 2 Temp: " + str(round(self.read_bsmp_variable(42,'float'),3)) + " °C")
print("IIB Inductor Temp: " + str(round(self.read_bsmp_variable(43,'float'),3)) + " °C")
print("IIB Heat-Sink Temp: " + str(round(self.read_bsmp_variable(44,'float'),3)) + " °C")
print("IIB Driver Voltage: " + str(round(self.read_bsmp_variable(45,'float'),3)) + " V")
print("IIB Driver Current 1: " + str(round(self.read_bsmp_variable(46,'float'),3)) + " A")
print("IIB Driver Current 2: " + str(round(self.read_bsmp_variable(47,'float'),3)) + " A")
print("IIB Ground Leakage Current: " + str(round(self.read_bsmp_variable(48,'float'),3)) + " A")
print("IIB Board Temp: " + str(round(self.read_bsmp_variable(49,'float'),3)) + " °C")
print("IIB Board RH: " + str(round(self.read_bsmp_variable(50,'float'),3)) + " %")
print("IIB Interlocks: " + str(round(self.read_bsmp_variable(51,'uint32_t'),3)))
print("IIB Alarms: " + str(round(self.read_bsmp_variable(52,'uint32_t'),3)))
time.sleep(dt)
except:
pass
def read_vars_fac_dcdc_ema(self, n = 1, dt = 0.5, iib = 0):
try:
for i in range(n):
print('\n--- Measurement #' + str(i+1) + ' ------------------------------------------\n')
self.read_vars_common()
soft_itlks = self.read_bsmp_variable(31,'uint32_t')
print("\nSoft Interlocks: " + str(soft_itlks))
if(soft_itlks):
self.decode_interlocks(soft_itlks, list_fac_dcdc_ema_soft_interlocks)
print('')
hard_itlks = self.read_bsmp_variable(32,'uint32_t')
print("Hard Interlocks: " + str(hard_itlks))
if(hard_itlks):
self.decode_interlocks(hard_itlks, list_fac_dcdc_ema_hard_interlocks)
iib_itlks = self.read_bsmp_variable(49,'uint32_t')
print("IIB Interlocks: " + str(iib_itlks))
if(iib_itlks):
self.decode_interlocks(iib_itlks, list_fac_dcdc_ema_iib_interlocks)
iib_alarms = self.read_bsmp_variable(50,'uint32_t')
print("IIB Alarms: " + str(iib_alarms))
if(iib_alarms):
self.decode_interlocks(iib_alarms, list_fac_dcdc_ema_iib_alarms)
print("\nLoad Current: " + str(round(self.read_bsmp_variable(33,'float'),3)))
print("DC-Link Voltage: " + str(round(self.read_bsmp_variable(34,'float'),3)))
print("\nDuty-Cycle: " + str(round(self.read_bsmp_variable(35,'float'),3)))
if(iib):
print("\nIIB Input Voltage: " + str(round(self.read_bsmp_variable(36,'float'),3)) + " V")
print("IIB Input Current: " + str(round(self.read_bsmp_variable(37,'float'),3)) + " A")
print("IIB Output Current: " + str(round(self.read_bsmp_variable(38,'float'),3)) + " A")
print("IIB IGBT 1 Temp: " + str(round(self.read_bsmp_variable(39,'float'),3)) + " °C")
print("IIB IGBT 2 Temp: " + str(round(self.read_bsmp_variable(40,'float'),3)) + " °C")
print("IIB Inductor Temp: " + str(round(self.read_bsmp_variable(41,'float'),3)) + " °C")
print("IIB Heat-Sink Temp: " + str(round(self.read_bsmp_variable(42,'float'),3)) + " °C")
print("IIB Driver Voltage: " + str(round(self.read_bsmp_variable(43,'float'),3)) + " V")
print("IIB Driver Current 1: " + str(round(self.read_bsmp_variable(44,'float'),3)) + " A")
print("IIB Driver Current 2: " + str(round(self.read_bsmp_variable(45,'float'),3)) + " A")
print("IIB Ground Leakage Current: " + str(round(self.read_bsmp_variable(46,'float'),3)) + " A")
print("IIB Board Temp: " + str(round(self.read_bsmp_variable(47,'float'),3)) + " °C")
print("IIB Board RH: " + str(round(self.read_bsmp_variable(48,'float'),3)) + " %")
print("IIB Interlocks: " + str(round(self.read_bsmp_variable(49,'uint32_t'),3)))
print("IIB Alarms: " + str(round(self.read_bsmp_variable(50,'uint32_t'),3)))
time.sleep(dt)
except:
pass
def read_vars_fac_2s_acdc(self, n = 1, add_mod_a = 2, dt = 0.5, iib = 0):
old_add = self.GetSlaveAdd()
try:
for i in range(n):
self.SetSlaveAdd(add_mod_a)
print('\n--- Measurement #' + str(i+1) + ' ------------------------------------------\n')
self.read_vars_common()
print('\n *** MODULE A ***')
soft_itlks = self.read_bsmp_variable(31,'uint32_t')
print("\nSoft Interlocks: " + str(soft_itlks))
if(soft_itlks):
self.decode_interlocks(soft_itlks, list_fac_2s_acdc_soft_interlocks)
print('')
hard_itlks = self.read_bsmp_variable(32,'uint32_t')
print("Hard Interlocks: " + str(hard_itlks))
if(hard_itlks):
self.decode_interlocks(hard_itlks, list_fac_2s_acdc_hard_interlocks)
iib_is_itlks = self.read_bsmp_variable(45,'uint32_t')
print("\nIIB IS Interlocks: " + str(iib_is_itlks))
if(iib_is_itlks):
self.decode_interlocks(iib_is_itlks, list_fac_2s_acdc_iib_is_interlocks)
iib_is_alarms = self.read_bsmp_variable(46,'uint32_t')
print("IIB IS Alarms: " + str(iib_is_alarms))
if(iib_is_alarms):
self.decode_interlocks(iib_is_alarms, list_fac_2s_acdc_iib_is_alarms)
iib_cmd_itlks = self.read_bsmp_variable(57,'uint32_t')
print("\nIIB Cmd Interlocks: " + str(iib_cmd_itlks))
if(iib_cmd_itlks):
self.decode_interlocks(iib_cmd_itlks, list_fac_2s_acdc_iib_cmd_interlocks)
iib_cmd_alarms = self.read_bsmp_variable(58,'uint32_t')
print("IIB Cmd Alarms: " + str(iib_cmd_alarms))
if(iib_cmd_alarms):
self.decode_interlocks(iib_cmd_alarms, list_fac_2s_acdc_iib_cmd_alarms)
print("\nCapBank Voltage: " + str(round(self.read_bsmp_variable(33,'float'),3)) + " V")
print("Rectifier Current: " + str(round(self.read_bsmp_variable(34,'float'),3)) + " A")
print("Duty-Cycle: " + str(round(self.read_bsmp_variable(35,'float'),3)) + " %")
if(iib):
print("\nIIB IS Input Current: " + str(round(self.read_bsmp_variable(36,'float'),3)) + " A")
print("IIB IS Input Voltage: " + str(round(self.read_bsmp_variable(37,'float'),3)) + " V")
print("IIB IS IGBT Temp: " + str(round(self.read_bsmp_variable(38,'float'),3)) + " °C")
print("IIB IS Driver Voltage: " + str(round(self.read_bsmp_variable(39,'float'),3)) + " V")
print("IIB IS Driver Current: " + str(round(self.read_bsmp_variable(40,'float'),3)) + " A")
print("IIB IS Inductor Temp: " + str(round(self.read_bsmp_variable(41,'float'),3)) + " °C")
print("IIB IS Heat-Sink Temp: " + str(round(self.read_bsmp_variable(42,'float'),3)) + " °C")
print("IIB IS Board Temp: " + str(round(self.read_bsmp_variable(43,'float'),3)) + " °C")
print("IIB IS Board RH: " + str(round(self.read_bsmp_variable(44,'float'),3)) + " %")
print("IIB IS Interlocks: " + str(round(self.read_bsmp_variable(45,'uint32_t'),3)))
print("IIB IS Alarms: " + str(round(self.read_bsmp_variable(46,'uint32_t'),3)))
print("\nIIB Cmd Load Voltage: " + str(round(self.read_bsmp_variable(47,'float'),3)) + " V")
print("IIB Cmd CapBank Voltage: " + str(round(self.read_bsmp_variable(48,'float'),3)) + " V")
print("IIB Cmd Rectifier Inductor Temp: " + str(round(self.read_bsmp_variable(49,'float'),3)) + " °C")
print("IIB Cmd Rectifier Heat-Sink Temp: " + str(round(self.read_bsmp_variable(50,'float'),3)) + " °C")
print("IIB Cmd External Boards Voltage: " + str(round(self.read_bsmp_variable(51,'float'),3)) + " V")
print("IIB Cmd Auxiliary Board Current: " + str(round(self.read_bsmp_variable(52,'float'),3)) + " A")
print("IIB Cmd IDB Board Current: " + str(round(self.read_bsmp_variable(53,'float'),3)) + " A")
print("IIB Cmd Ground Leakage Current: " + str(round(self.read_bsmp_variable(54,'float'),3)) + " A")
print("IIB Cmd Board Temp: " + str(round(self.read_bsmp_variable(55,'float'),3)) + " °C")
print("IIB Cmd Board RH: " + str(round(self.read_bsmp_variable(56,'float'),3)) + " %")
print("IIB Cmd Interlocks: " + str(round(self.read_bsmp_variable(57,'uint32_t'),3)))
print("IIB Cmd Alarms: " + str(round(self.read_bsmp_variable(58,'uint32_t'),3)))
self.SetSlaveAdd(add_mod_a+1)
print('\n *** MODULE B ***')
soft_itlks = self.read_bsmp_variable(31,'uint32_t')
print("\nSoft Interlocks: " + str(soft_itlks))
if(soft_itlks):
self.decode_interlocks(soft_itlks, list_fac_2s_acdc_soft_interlocks)
print('')
hard_itlks = self.read_bsmp_variable(32,'uint32_t')
print("Hard Interlocks: " + str(hard_itlks))
if(hard_itlks):
self.decode_interlocks(hard_itlks, list_fac_2s_acdc_hard_interlocks)
iib_is_itlks = self.read_bsmp_variable(45,'uint32_t')
print("\nIIB IS Interlocks: " + str(iib_is_itlks))
if(iib_is_itlks):
self.decode_interlocks(iib_is_itlks, list_fac_2s_acdc_iib_is_interlocks)
iib_is_alarms = self.read_bsmp_variable(46,'uint32_t')
print("IIB IS Alarms: " + str(iib_is_alarms))
if(iib_is_alarms):
self.decode_interlocks(iib_is_alarms, list_fac_2s_acdc_iib_is_alarms)
iib_cmd_itlks = self.read_bsmp_variable(57,'uint32_t')
print("\nIIB Cmd Interlocks: " + str(iib_cmd_itlks))
if(iib_cmd_itlks):
self.decode_interlocks(iib_cmd_itlks, list_fac_2s_acdc_iib_cmd_interlocks)
iib_cmd_alarms = self.read_bsmp_variable(58,'uint32_t')
print("IIB Cmd Alarms: " + str(iib_cmd_alarms))
if(iib_cmd_alarms):
self.decode_interlocks(iib_cmd_alarms, list_fac_2s_acdc_iib_cmd_alarms)
print("\nCapBank Voltage: " + str(round(self.read_bsmp_variable(33,'float'),3)) + " V")
print("Rectifier Current: " + str(round(self.read_bsmp_variable(34,'float'),3)) + " A")
print("Duty-Cycle: " + str(round(self.read_bsmp_variable(35,'float'),3)) + " %")
if(iib):
print("\nIIB IS Input Current: " + str(round(self.read_bsmp_variable(36,'float'),3)) + " A")
print("IIB IS Input Voltage: " + str(round(self.read_bsmp_variable(37,'float'),3)) + " V")
print("IIB IS IGBT Temp: " + str(round(self.read_bsmp_variable(38,'float'),3)) + " °C")
print("IIB IS Driver Voltage: " + str(round(self.read_bsmp_variable(39,'float'),3)) + " V")
print("IIB IS Driver Current: " + str(round(self.read_bsmp_variable(40,'float'),3)) + " A")
print("IIB IS Inductor Temp: " + str(round(self.read_bsmp_variable(41,'float'),3)) + " °C")
print("IIB IS Heat-Sink Temp: " + str(round(self.read_bsmp_variable(42,'float'),3)) + " °C")
print("IIB IS Board Temp: " + str(round(self.read_bsmp_variable(43,'float'),3)) + " °C")
print("IIB IS Board RH: " + str(round(self.read_bsmp_variable(44,'float'),3)) + " %")
print("IIB IS Interlocks: " + str(round(self.read_bsmp_variable(45,'uint32_t'),3)))
print("IIB IS Alarms: " + str(round(self.read_bsmp_variable(46,'uint32_t'),3)))
print("\nIIB Cmd Load Voltage: " + str(round(self.read_bsmp_variable(47,'float'),3)) + " V")
print("IIB Cmd CapBank Voltage: " + str(round(self.read_bsmp_variable(48,'float'),3)) + " V")
print("IIB Cmd Rectifier Inductor Temp: " + str(round(self.read_bsmp_variable(49,'float'),3)) + " °C")
print("IIB Cmd Rectifier Heat-Sink Temp: " + str(round(self.read_bsmp_variable(50,'float'),3)) + " °C")
print("IIB Cmd External Boards Voltage: " + str(round(self.read_bsmp_variable(51,'float'),3)) + " V")
print("IIB Cmd Auxiliary Board Current: " + str(round(self.read_bsmp_variable(52,'float'),3)) + " A")
print("IIB Cmd IDB Board Current: " + str(round(self.read_bsmp_variable(53,'float'),3)) + " A")
print("IIB Cmd Ground Leakage Current: " + str(round(self.read_bsmp_variable(54,'float'),3)) + " A")
print("IIB Cmd Board Temp: " + str(round(self.read_bsmp_variable(55,'float'),3)) + " °C")
print("IIB Cmd Board RH: " + str(round(self.read_bsmp_variable(56,'float'),3)) + " %")
print("IIB Cmd Interlocks: " + str(round(self.read_bsmp_variable(57,'uint32_t'),3)))
print("IIB Cmd Alarms: " + str(round(self.read_bsmp_variable(58,'uint32_t'),3)))
time.sleep(dt)
self.SetSlaveAdd(old_add)
except:
self.SetSlaveAdd(old_add)
def read_vars_fac_2s_dcdc(self, n = 1, com_add = 1, dt = 0.5, iib = 0):
old_add = self.GetSlaveAdd()
iib_offset = 14*(iib-1)
try:
for i in range(n):
self.SetSlaveAdd(com_add)
print('\n--- Measurement #' + str(i+1) + ' ------------------------------------------\n')
self.read_vars_common()
print("\nSync Pulse Counter: " + str(round(self.read_bsmp_variable(5,'uint32_t'),3)))
soft_itlks = self.read_bsmp_variable(31,'uint32_t')
print("\nSoft Interlocks: " + str(soft_itlks))
if(soft_itlks):
self.decode_interlocks(soft_itlks, list_fac_2s_dcdc_soft_interlocks)
print('')
hard_itlks = self.read_bsmp_variable(32,'uint32_t')
print("Hard Interlocks: " + str(hard_itlks))
if(hard_itlks):
self.decode_interlocks(hard_itlks, list_fac_2s_dcdc_hard_interlocks)
print("\nLoad Current: " + str(round(self.read_bsmp_variable(33,'float'),3)) + " A")
print("Load Current DCCT 1: " + str(round(self.read_bsmp_variable(34,'float'),3)) + " A")
print("Load Current DCCT 2: " + str(round(self.read_bsmp_variable(35,'float'),3)) + " A")
print("\nCapBank Voltage 1: " + str(round(self.read_bsmp_variable(36,'float'),3)) + " V")
print("CapBank Voltage 2: " + str(round(self.read_bsmp_variable(37,'float'),3)) + " V")
print("\nDuty-Cycle 1: " + str(round(self.read_bsmp_variable(38,'float'),3)) + " %")
print("Duty-Cycle 2: " + str(round(self.read_bsmp_variable(39,'float'),3)) + " %")
if(iib):
print("\nIIB CapBank Voltage: " + str(round(self.read_bsmp_variable(40 + iib_offset,'float'),3)) + " V")
print("IIB Input Current: " + str(round(self.read_bsmp_variable(41 + iib_offset,'float'),3)) + " A")
print("IIB Output Current: " + str(round(self.read_bsmp_variable(42 + iib_offset,'float'),3)) + " A")
print("IIB IGBT Leg 1 Temp: " + str(round(self.read_bsmp_variable(43 + iib_offset,'float'),3)) + " °C")
print("IIB IGBT Leg 2 Temp: " + str(round(self.read_bsmp_variable(44 + iib_offset,'float'),3)) + " °C")
print("IIB Inductor Temp: " + str(round(self.read_bsmp_variable(45 + iib_offset,'float'),3)) + " °C")
print("IIB Heat-Sink Temp: " + str(round(self.read_bsmp_variable(46 + iib_offset,'float'),3)) + " °C")
print("IIB Driver Voltage: " + str(round(self.read_bsmp_variable(47 + iib_offset,'float'),3)) + " V")
print("IIB Driver Current 1: " + str(round(self.read_bsmp_variable(48 + iib_offset,'float'),3)) + " A")
print("IIB Driver Current 2: " + str(round(self.read_bsmp_variable(49 + iib_offset,'float'),3)) + " A")
print("IIB Board Temp: " + str(round(self.read_bsmp_variable(50 + iib_offset,'float'),3)) + " °C")
print("IIB Board RH: " + str(round(self.read_bsmp_variable(51 + iib_offset,'float'),3)) + " %")
iib_itlks = self.read_bsmp_variable(52 + iib_offset,'uint32_t')
print("\nIIB Interlocks: " + str(iib_itlks))
if(iib_itlks):
self.decode_interlocks(iib_itlks, list_fac_2s_dcdc_iib_interlocks)
iib_alarms = self.read_bsmp_variable(53 + iib_offset,'uint32_t')
print("IIB Alarms: " + str(iib_alarms))
if(iib_alarms):
self.decode_interlocks(iib_alarms, list_fac_2s_dcdc_iib_alarms)
time.sleep(dt)
self.SetSlaveAdd(old_add)
except:
self.SetSlaveAdd(old_add)
def read_vars_fac_2p4s_acdc(self, n = 1, add_mod_a = 1, dt = 0.5, iib = 0):
self.read_vars_fac_2s_acdc(n, add_mod_a, dt, iib)
def read_vars_fac_2p4s_dcdc(self, n = 1, com_add = 1, dt = 0.5, iib = 0):
old_add = self.GetSlaveAdd()
try:
for i in range(n):
self.SetSlaveAdd(com_add)
print('\n--- Measurement #' + str(i+1) + ' ------------------------------------------\n')
self.read_vars_common()
print("\nSync Pulse Counter: " + str(round(self.read_bsmp_variable(5,'uint32_t'),3)))
soft_itlks = self.read_bsmp_variable(31,'uint32_t')
print("\nSoft Interlocks: " + str(soft_itlks))
if(soft_itlks):
self.decode_interlocks(soft_itlks, list_fac_2p4s_dcdc_soft_interlocks)
print('')
hard_itlks = self.read_bsmp_variable(32,'uint32_t')
print("Hard Interlocks: " + str(hard_itlks))
if(hard_itlks):
self.decode_interlocks(hard_itlks, list_fac_2p4s_dcdc_hard_interlocks)
print("\nLoad Current: " + str(round(self.read_bsmp_variable(33,'float'),3)))
print("Load Current DCCT 1: " + str(round(self.read_bsmp_variable(34,'float'),3)))
print("Load Current DCCT 2: " + str(round(self.read_bsmp_variable(35,'float'),3)))
print("\nArm Current 1: " + str(round(self.read_bsmp_variable(36,'float'),3)))
print("Arm Current 2: " + str(round(self.read_bsmp_variable(37,'float'),3)))
print("\nCapBank Voltage 1: " + str(round(self.read_bsmp_variable(38,'float'),3)))
print("CapBank Voltage 2: " + str(round(self.read_bsmp_variable(39,'float'),3)))
print("CapBank Voltage 3: " + str(round(self.read_bsmp_variable(40,'float'),3)))
print("CapBank Voltage 4: " + str(round(self.read_bsmp_variable(41,'float'),3)))
print("CapBank Voltage 5: " + str(round(self.read_bsmp_variable(42,'float'),3)))
print("CapBank Voltage 6: " + str(round(self.read_bsmp_variable(43,'float'),3)))
print("CapBank Voltage 7: " + str(round(self.read_bsmp_variable(44,'float'),3)))
print("CapBank Voltage 8: " + str(round(self.read_bsmp_variable(45,'float'),3)))
print("\nDuty-Cycle 1: " + str(round(self.read_bsmp_variable(46,'float'),3)))
print("Duty-Cycle 2: " + str(round(self.read_bsmp_variable(47,'float'),3)))
print("Duty-Cycle 3: " + str(round(self.read_bsmp_variable(48,'float'),3)))
print("Duty-Cycle 4: " + str(round(self.read_bsmp_variable(49,'float'),3)))
print("Duty-Cycle 5: " + str(round(self.read_bsmp_variable(50,'float'),3)))
print("Duty-Cycle 6: " + str(round(self.read_bsmp_variable(51,'float'),3)))
print("Duty-Cycle 7: " + str(round(self.read_bsmp_variable(52,'float'),3)))
print("Duty-Cycle 8: " + str(round(self.read_bsmp_variable(53,'float'),3)))
if(iib):
print("\nIIB CapBank Voltage: " + str(round(self.read_bsmp_variable(54,'float'),3)) + " V")
print("IIB Input Current: " + str(round(self.read_bsmp_variable(55, 'float'),3)) + " A")
print("IIB Output Current: " + str(round(self.read_bsmp_variable(56,'float'),3)) + " A")
print("IIB IGBT Leg 1 Temp: " + str(round(self.read_bsmp_variable(57,'float'),3)) + " °C")
print("IIB IGBT Leg 2 Temp: " + str(round(self.read_bsmp_variable(58,'float'),3)) + " °C")
print("IIB Inductor Temp: " + str(round(self.read_bsmp_variable(59,'float'),3)) + " °C")
print("IIB Heat-Sink Temp: " + str(round(self.read_bsmp_variable(60,'float'),3)) + " °C")
print("IIB Driver Voltage: " + str(round(self.read_bsmp_variable(61,'float'),3)) + " V")
print("IIB Driver Current 1: " + str(round(self.read_bsmp_variable(62,'float'),3)) + " A")
print("IIB Driver Current 2: " + str(round(self.read_bsmp_variable(63,'float'),3)) + " A")
print("IIB Board Temp: " + str(round(self.read_bsmp_variable(64,'float'),3)) + " °C")
print("IIB Board RH: " + str(round(self.read_bsmp_variable(65,'float'),3)) + " %")
iib_itlks = self.read_bsmp_variable(66,'uint32_t')
print("\nIIB Interlocks: " + str(iib_itlks))
if(iib_itlks):
self.decode_interlocks(iib_itlks, list_fac_2p4s_dcdc_iib_interlocks)
iib_alarms = self.read_bsmp_variable(67,'uint32_t')
print("IIB Alarms: " + str(iib_alarms))
if(iib_alarms):
self.decode_interlocks(iib_alarms, list_fac_2p4s_dcdc_iib_alarms)
print("\nIIB CapBank Voltage: " + str(round(self.read_bsmp_variable(68,'float'),3)) + " V")
print("IIB Input Current: " + str(round(self.read_bsmp_variable(69,'float'),3)) + " A")
print("IIB Output Current: " + str(round(self.read_bsmp_variable(70,'float'),3)) + " A")
print("IIB IGBT Leg 1 Temp: " + str(round(self.read_bsmp_variable(71,'float'),3)) + " °C")
print("IIB IGBT Leg 2 Temp: " + str(round(self.read_bsmp_variable(72,'float'),3)) + " °C")
print("IIB Inductor Temp: " + str(round(self.read_bsmp_variable(73,'float'),3)) + " °C")
print("IIB Heat-Sink Temp: " + str(round(self.read_bsmp_variable(74,'float'),3)) + " °C")
print("IIB Driver Voltage: " + str(round(self.read_bsmp_variable(75,'float'),3)) + " V")
print("IIB Driver Current 1: " + str(round(self.read_bsmp_variable(76,'float'),3)) + " A")
print("IIB Driver Current 2: " + str(round(self.read_bsmp_variable(77,'float'),3)) + " A")
print("IIB Board Temp: " + str(round(self.read_bsmp_variable(78,'float'),3)) + " °C")
print("IIB Board RH: " + str(round(self.read_bsmp_variable(79,'float'),3)) + " %")
iib_itlks = self.read_bsmp_variable(80,'uint32_t')
print("\nIIB Interlocks: " + str(iib_itlks))
if(iib_itlks):
self.decode_interlocks(iib_itlks, list_fac_2p4s_dcdc_iib_interlocks)
iib_alarms = self.read_bsmp_variable(81,'uint32_t')
print("IIB Alarms: " + str(iib_alarms))
if(iib_alarms):
self.decode_interlocks(iib_alarms, list_fac_2p4s_dcdc_iib_alarms)
time.sleep(dt)
self.SetSlaveAdd(old_add)
except:
self.SetSlaveAdd(old_add)
def read_vars_fap(self, n = 1, com_add = 1, dt = 0.5, iib = 1):
old_add = self.GetSlaveAdd()
try:
for i in range(n):
self.SetSlaveAdd(com_add)
print('\n--- Measurement #' + str(i+1) + ' ------------------------------------------\n')
self.read_vars_common()
soft_itlks = self.read_bsmp_variable(31,'uint32_t')
print("\nSoft Interlocks: " + str(soft_itlks))
if(soft_itlks):
self.decode_interlocks(soft_itlks, list_fap_soft_interlocks)
print('')
hard_itlks = self.read_bsmp_variable(32,'uint32_t')
print("Hard Interlocks: " + str(hard_itlks))
if(hard_itlks):
self.decode_interlocks(hard_itlks, list_fap_hard_interlocks)
iib_itlks = self.read_bsmp_variable(56,'uint32_t')
print("\nIIB Interlocks: " + str(iib_itlks))
if(iib_itlks):
self.decode_interlocks(iib_itlks, list_fap_iib_interlocks)
iib_alarms = self.read_bsmp_variable(57,'uint32_t')
print("\nIIB Alarms: " + str(iib_alarms))
if(iib_alarms):
self.decode_interlocks(iib_alarms, list_fap_iib_alarms)
iload = self.read_bsmp_variable(33,'float')
print("\nLoad Current: " + str(round(iload,3)) + " A")
print("Load Current DCCT 1: " + str(round(self.read_bsmp_variable(34,'float'),3)) + " A")
print("Load Current DCCT 2: " + str(round(self.read_bsmp_variable(35,'float'),3)) + " A")
if not iload == 0:
print("\nLoad Resistance: " + str(abs(round(self.read_bsmp_variable(43,'float') / iload ,3))) + " Ohm")
else:
print("\nLoad Resistance: 0 Ohm")
print("Load Power: " + str(abs(round(self.read_bsmp_variable(43,'float') * self.read_bsmp_variable(33,'float'),3))) + " W")
print("\nDC-Link Voltage: " + str(round(self.read_bsmp_variable(36,'float'),3)) + " V")
print("\nIGBT 1 Current: " + str(round(self.read_bsmp_variable(37,'float'),3)) + " A")
print("IGBT 2 Current: " + str(round(self.read_bsmp_variable(38,'float'),3)) + " A")
print("\nIGBT 1 Duty-Cycle: " + str(round(self.read_bsmp_variable(39,'float'),3)) + " %")
print("IGBT 2 Duty-Cycle: " + str(round(self.read_bsmp_variable(40,'float'),3)) + " %")
print("Differential Duty-Cycle: " + str(round(self.read_bsmp_variable(41,'float'),3)) + " %")
if(iib):
print("\nIIB Input Voltage: " + str(round(self.read_bsmp_variable(42,'float'),3)) + " V")
print("IIB Output Voltage: " + str(round(self.read_bsmp_variable(43,'float'),3)) + " V")
print("IIB IGBT 1 Current: " + str(round(self.read_bsmp_variable(44,'float'),3)) + " A")
print("IIB IGBT 2 Current: " + str(round(self.read_bsmp_variable(45,'float'),3)) + " A")
print("IIB IGBT 1 Temp: " + str(round(self.read_bsmp_variable(46,'float'),3)) + " °C")
print("IIB IGBT 2 Temp: " + str(round(self.read_bsmp_variable(47,'float'),3)) + " °C")
print("IIB Driver Voltage: " + str(round(self.read_bsmp_variable(48,'float'),3)) + " V")
print("IIB Driver Current 1: " + str(round(self.read_bsmp_variable(49,'float'),3)) + " A")
print("IIB Driver Current 2: " + str(round(self.read_bsmp_variable(50,'float'),3)) + " A")
print("IIB Inductor Temp: " + str(round(self.read_bsmp_variable(51,'float'),3)) + " °C")
print("IIB Heat-Sink Temp: " + str(round(self.read_bsmp_variable(52,'float'),3)) + " °C")
print("IIB Ground Leakage Current: " + str(round(self.read_bsmp_variable(53,'float'),3)) + " A")
print("IIB Board Temp: " + str(round(self.read_bsmp_variable(54,'float'),3)) + " °C")
print("IIB Board RH: " + str(round(self.read_bsmp_variable(55,'float'),3)) + " %")
print("IIB Interlocks: " + str(round(self.read_bsmp_variable(56,'uint32_t'),3)))
print("IIB Alarms: " + str(round(self.read_bsmp_variable(57,'uint32_t'),3)))
time.sleep(dt)
self.SetSlaveAdd(old_add)
except:
self.SetSlaveAdd(old_add)
def read_vars_fap_4p(self, n = 1, com_add = 1, dt = 0.5, iib = 0):
old_add = self.GetSlaveAdd()
iib_offset = 16*(iib-1)
try:
for i in range(n):
self.SetSlaveAdd(com_add)
print('\n--- Measurement #' + str(i+1) + ' ------------------------------------------\n')
self.read_vars_common()
soft_itlks = self.read_bsmp_variable(31,'uint32_t')
print("\nSoft Interlocks: " + str(soft_itlks))
if(soft_itlks):
self.decode_interlocks(soft_itlks, list_fap_4p_soft_interlocks)
print('')
hard_itlks = self.read_bsmp_variable(32,'uint32_t')
print("Hard Interlocks: " + str(hard_itlks))
if(hard_itlks):
self.decode_interlocks(hard_itlks, list_fap_4p_hard_interlocks)
for j in range(4):
iib_itlks = self.read_bsmp_variable(72 + j*16,'uint32_t')
print("\nIIB " + str(j+1) + " Interlocks: " + str(iib_itlks))
if(iib_itlks):
self.decode_interlocks(iib_itlks, list_fap_4p_iib_interlocks)
iib_alarms = self.read_bsmp_variable(73 + j*16,'uint32_t')
print("IIB " + str(j+1) + " Alarms: " + str(iib_alarms))
if(iib_alarms):
self.decode_interlocks(iib_alarms, list_fap_4p_iib_alarms)
print("\n Mean Load Current: " + str(round(self.read_bsmp_variable(33,'float'),3)) + " A")
print("Load Current 1: " + str(round(self.read_bsmp_variable(34,'float'),3)) + " A")
print("Load Current 2: " + str(round(self.read_bsmp_variable(35,'float'),3)) + " A")
print("Load Voltage: " + str(round(self.read_bsmp_variable(36,'float'),3)) + " V")
print("\nIGBT 1 Current Mod 1: " + str(round(self.read_bsmp_variable(37,'float'),3)) + " A")
print("IGBT 2 Current Mod 1: " + str(round(self.read_bsmp_variable(38,'float'),3)) + " A")
print("IGBT 1 Current Mod 2: " + str(round(self.read_bsmp_variable(39,'float'),3)) + " A")
print("IGBT 2 Current Mod 2: " + str(round(self.read_bsmp_variable(40,'float'),3)) + " A")
print("IGBT 1 Current Mod 3: " + str(round(self.read_bsmp_variable(41,'float'),3)) + " A")
print("IGBT 2 Current Mod 3: " + str(round(self.read_bsmp_variable(42,'float'),3)) + " A")
print("IGBT 1 Current Mod 4: " + str(round(self.read_bsmp_variable(43,'float'),3)) + " A")
print("IGBT 2 Current Mod 4: " + str(round(self.read_bsmp_variable(44,'float'),3)) + " A")
print("\nDC-Link Voltage Mod 1: " + str(round(self.read_bsmp_variable(45,'float'),3)) + " V")
print("DC-Link Voltage Mod 2: " + str(round(self.read_bsmp_variable(46,'float'),3)) + " V")
print("DC-Link Voltage Mod 3: " + str(round(self.read_bsmp_variable(47,'float'),3)) + " V")
print("DC-Link Voltage Mod 4: " + str(round(self.read_bsmp_variable(48,'float'),3)) + " V")
print("\nMean Duty-Cycle: " + str(round(self.read_bsmp_variable(49,'float'),3)) + " %")
print("IGBT 1 Duty-Cycle Mod 1: " + str(round(self.read_bsmp_variable(50,'float'),3)) + " %")
print("IGBT 2 Duty-Cycle Mod 1: " + str(round(self.read_bsmp_variable(51,'float'),3)) + " %")
print("IGBT 1 Duty-Cycle Mod 2: " + str(round(self.read_bsmp_variable(52,'float'),3)) + " %")
print("IGBT 2 Duty-Cycle Mod 2: " + str(round(self.read_bsmp_variable(53,'float'),3)) + " %")
print("IGBT 1 Duty-Cycle Mod 3: " + str(round(self.read_bsmp_variable(54,'float'),3)) + " %")
print("IGBT 2 Duty-Cycle Mod 3: " + str(round(self.read_bsmp_variable(55,'float'),3)) + " %")
print("IGBT 1 Duty-Cycle Mod 4: " + str(round(self.read_bsmp_variable(56,'float'),3)) + " %")
print("IGBT 2 Duty-Cycle Mod 4: " + str(round(self.read_bsmp_variable(57,'float'),3)) + " %")
if not iib == 0:
print("\nIIB " + str(iib) + " Input Voltage: " + str(round(self.read_bsmp_variable(58 + iib_offset,'float'),3)) + " V")
print("IIB " + str(iib) + " Output Voltage: " + str(round(self.read_bsmp_variable(59 + iib_offset,'float'),3)) + " V")
print("IIB " + str(iib) + " IGBT 1 Current: " + str(round(self.read_bsmp_variable(60 + iib_offset,'float'),3)) + " A")
print("IIB " + str(iib) + " IGBT 2 Current: " + str(round(self.read_bsmp_variable(61 + iib_offset,'float'),3)) + " A")
print("IIB " + str(iib) + " IGBT 1 Temp: " + str(round(self.read_bsmp_variable(62 + iib_offset,'float'),3)) + " °C")
print("IIB " + str(iib) + " IGBT 2 Temp: " + str(round(self.read_bsmp_variable(63 + iib_offset,'float'),3)) + " °C")
print("IIB " + str(iib) + " Driver Voltage: " + str(round(self.read_bsmp_variable(64 + iib_offset,'float'),3)) + " V")
print("IIB " + str(iib) + " Driver Current 1: " + str(round(self.read_bsmp_variable(65 + iib_offset,'float'),3)) + " A")
print("IIB " + str(iib) + " Driver Current 2: " + str(round(self.read_bsmp_variable(66 + iib_offset,'float'),3)) + " A")
print("IIB " + str(iib) + " Inductor Temp: " + str(round(self.read_bsmp_variable(67 + iib_offset,'float'),3)) + " °C")
print("IIB " + str(iib) + " Heat-Sink Temp: " + str(round(self.read_bsmp_variable(68 + iib_offset,'float'),3)) + " °C")
print("IIB " + str(iib) + " Ground Leakage Current: " + str(round(self.read_bsmp_variable(69 + iib_offset,'float'),3)) + " A")
print("IIB " + str(iib) + " Board Temp: " + str(round(self.read_bsmp_variable(70 + iib_offset,'float'),3)) + " °C")
print("IIB " + str(iib) + " Board RH: " + str(round(self.read_bsmp_variable(71 + iib_offset,'float'),3)) + " %")
print("IIB " + str(iib) + " Interlocks: " + str(round(self.read_bsmp_variable(72 + iib_offset,'uint32_t'),3)))
print("IIB " + str(iib) + " Alarms: " + str(round(self.read_bsmp_variable(73 + iib_offset,'uint32_t'),3)))
time.sleep(dt)
self.SetSlaveAdd(old_add)
except Exception as e:
print(e)
self.SetSlaveAdd(old_add)
def read_vars_fap_2p2s(self, n = 1, com_add = 1, dt = 0.5, iib = 0):
old_add = self.GetSlaveAdd()
iib_offset = 16*(iib-1)
try:
for i in range(n):
self.SetSlaveAdd(com_add)
print('\n--- Measurement #' + str(i+1) + ' ------------------------------------------\n')
self.read_vars_common()
soft_itlks = self.read_bsmp_variable(31,'uint32_t')
print("\nSoft Interlocks: " + str(soft_itlks))
if(soft_itlks):
self.decode_interlocks(soft_itlks, list_fap_2p2s_soft_interlocks)
print('')
hard_itlks = self.read_bsmp_variable(32,'uint32_t')
print("Hard Interlocks: " + str(hard_itlks))
if(hard_itlks):
self.decode_interlocks(hard_itlks, list_fap_2p2s_hard_interlocks)
for j in range(4):
iib_itlks = self.read_bsmp_variable(78 + j*16,'uint32_t')
print("\nIIB " + str(j+1) + " Interlocks: " + str(iib_itlks))
if(iib_itlks):
self.decode_interlocks(iib_itlks, list_fap_4p_iib_interlocks)
iib_alarms = self.read_bsmp_variable(79 + j*16,'uint32_t')
print("IIB " + str(j+1) + " Alarms: " + str(iib_alarms))
if(iib_alarms):
self.decode_interlocks(iib_alarms, list_fap_4p_iib_alarms)
print("\nMean Load Current: " + str(round(self.read_bsmp_variable(33,'float'),3)) + " A")
print("Load Current 1: " + str(round(self.read_bsmp_variable(34,'float'),3)) + " A")
print("Load Current 2: " + str(round(self.read_bsmp_variable(35,'float'),3)) + " A")
print("\nArm Current 1: " + str(round(self.read_bsmp_variable(36,'float'),3)) + " A")
print("Arm Current 2: " + str(round(self.read_bsmp_variable(37,'float'),3)) + " A")
print("\nIGBT 1 Current Mod 1: " + str(round(self.read_bsmp_variable(38,'float'),3)) + " A")
print("IGBT 2 Current Mod 1: " + str(round(self.read_bsmp_variable(39,'float'),3)) + " A")
print("IGBT 1 Current Mod 2: " + str(round(self.read_bsmp_variable(40,'float'),3)) + " A")
print("IGBT 2 Current Mod 2: " + str(round(self.read_bsmp_variable(41,'float'),3)) + " A")
print("IGBT 1 Current Mod 3: " + str(round(self.read_bsmp_variable(42,'float'),3)) + " A")
print("IGBT 2 Current Mod 3: " + str(round(self.read_bsmp_variable(43,'float'),3)) + " A")
print("IGBT 1 Current Mod 4: " + str(round(self.read_bsmp_variable(44,'float'),3)) + " A")
print("IGBT 2 Current Mod 4: " + str(round(self.read_bsmp_variable(45,'float'),3)) + " A")
print("\nDC-Link Voltage Mod 1: " + str(round(self.read_bsmp_variable(50,'float'),3)) + " V")
print("DC-Link Voltage Mod 2: " + str(round(self.read_bsmp_variable(51,'float'),3)) + " V")
print("DC-Link Voltage Mod 3: " + str(round(self.read_bsmp_variable(52,'float'),3)) + " V")
print("DC-Link Voltage Mod 4: " + str(round(self.read_bsmp_variable(53,'float'),3)) + " V")
print("\nMean Duty-Cycle: " + str(round(self.read_bsmp_variable(54,'float'),3)) + " %")
print("Differential Duty-Cycle: " + str(round(self.read_bsmp_variable(55,'float'),3)) + " %")
print("\nIGBT 1 Duty-Cycle Mod 1: " + str(round(self.read_bsmp_variable(56,'float'),3)) + " %")
print("IGBT 2 Duty-Cycle Mod 1: " + str(round(self.read_bsmp_variable(57,'float'),3)) + " %")
print("IGBT 1 Duty-Cycle Mod 2: " + str(round(self.read_bsmp_variable(58,'float'),3)) + " %")
print("IGBT 2 Duty-Cycle Mod 2: " + str(round(self.read_bsmp_variable(59,'float'),3)) + " %")
print("IGBT 1 Duty-Cycle Mod 3: " + str(round(self.read_bsmp_variable(60,'float'),3)) + " %")
print("IGBT 2 Duty-Cycle Mod 3: " + str(round(self.read_bsmp_variable(61,'float'),3)) + " %")
print("IGBT 1 Duty-Cycle Mod 4: " + str(round(self.read_bsmp_variable(62,'float'),3)) + " %")
print("IGBT 2 Duty-Cycle Mod 4: " + str(round(self.read_bsmp_variable(63,'float'),3)) + " %")
if not iib == 0:
print("\nIIB " + str(iib) + " Input Voltage: " + str(round(self.read_bsmp_variable(64 + iib_offset,'float'),3)) + " V")
print("IIB " + str(iib) + " Output Voltage: " + str(round(self.read_bsmp_variable(65 + iib_offset,'float'),3)) + " V")
print("IIB " + str(iib) + " IGBT 1 Current: " + str(round(self.read_bsmp_variable(66 + iib_offset,'float'),3)) + " A")
print("IIB " + str(iib) + " IGBT 2 Current: " + str(round(self.read_bsmp_variable(67 + iib_offset,'float'),3)) + " A")
print("IIB " + str(iib) + " IGBT 1 Temp: " + str(round(self.read_bsmp_variable(68 + iib_offset,'float'),3)) + " °C")
print("IIB " + str(iib) + " IGBT 2 Temp: " + str(round(self.read_bsmp_variable(69 + iib_offset,'float'),3)) + " °C")
print("IIB " + str(iib) + " Driver Voltage: " + str(round(self.read_bsmp_variable(70 + iib_offset,'float'),3)) + " V")
print("IIB " + str(iib) + " Driver Current 1: " + str(round(self.read_bsmp_variable(71 + iib_offset,'float'),3)) + " A")
print("IIB " + str(iib) + " Driver Current 2: " + str(round(self.read_bsmp_variable(72 + iib_offset,'float'),3)) + " A")
print("IIB " + str(iib) + " Inductor Temp: " + str(round(self.read_bsmp_variable(73 + iib_offset,'float'),3)) + " °C")
print("IIB " + str(iib) + " Heat-Sink Temp: " + str(round(self.read_bsmp_variable(74 + iib_offset,'float'),3)) + " °C")
print("IIB " + str(iib) + " Ground Leakage Current: " + str(round(self.read_bsmp_variable(75 + iib_offset,'float'),3)) + " A")
print("IIB " + str(iib) + " Board Temp: " + str(round(self.read_bsmp_variable(76 + iib_offset,'float'),3)) + " °C")
print("IIB " + str(iib) + " Board RH: " + str(round(self.read_bsmp_variable(77 + iib_offset,'float'),3)) + " %")
print("IIB " + str(iib) + " Interlocks: " + str(round(self.read_bsmp_variable(78 + iib_offset,'uint32_t'),3)))
print("IIB " + str(iib) + " Alarms: " + str(round(self.read_bsmp_variable(79 + iib_offset,'uint32_t'),3)))
time.sleep(dt)
self.SetSlaveAdd(old_add)
except Exception as e:
print(e)
self.SetSlaveAdd(old_add)
def read_vars_fap_225A(self, n = 1, com_add = 1, dt = 0.5):
old_add = self.GetSlaveAdd()
try:
for i in range(n):
self.SetSlaveAdd(com_add)
print('\n--- Measurement #' + str(i+1) + ' ------------------------------------------\n')
self.read_vars_common()
soft_itlks = self.read_bsmp_variable(31,'uint32_t')
print("\nSoft Interlocks: " + str(soft_itlks))
if(soft_itlks):
self.decode_interlocks(soft_itlks, list_fap_225A_soft_interlocks)
print('')
hard_itlks = self.read_bsmp_variable(32,'uint32_t')
print("Hard Interlocks: " + str(hard_itlks))
if(hard_itlks):
self.decode_interlocks(hard_itlks, list_fap_225A_hard_interlocks)
print("\nLoad Current: " + str(round(self.read_bsmp_variable(33,'float'),3)) + " A")
print("\nIGBT 1 Current: " + str(round(self.read_bsmp_variable(34,'float'),3)) + " A")
print("IGBT 2 Current: " + str(round(self.read_bsmp_variable(35,'float'),3)) + " A")
print("\nIGBT 1 Duty-Cycle: " + str(round(self.read_bsmp_variable(36,'float'),3)) + " %")
print("IGBT 2 Duty-Cycle: " + str(round(self.read_bsmp_variable(37,'float'),3)) + " %")
print("Differential Duty-Cycle: " + str(round(self.read_bsmp_variable(38,'float'),3)) + " %")
time.sleep(dt)
self.SetSlaveAdd(old_add)
except:
self.SetSlaveAdd(old_add)
def read_vars_fbp_2s_ufjf(self, n = 1, com_add = 1, dt = 0.5):
old_add = self.GetSlaveAdd()
try:
for i in range(n):
self.SetSlaveAdd(com_add)
print('\n--- Measurement #' + str(i+1) + ' ------------------------------------------\n')
self.read_vars_common()
soft_itlks = self.read_bsmp_variable(31,'uint32_t')
print("\nSoft Interlocks: " + str(soft_itlks))
if(soft_itlks):
self.decode_interlocks(soft_itlks, list_fbp_soft_interlocks)
print('')
hard_itlks = self.read_bsmp_variable(32,'uint32_t')
print("Hard Interlocks: " + str(hard_itlks))
if(hard_itlks):
self.decode_interlocks(hard_itlks, list_fbp_hard_interlocks)
print("\nLoad Current: " + str(round(self.read_bsmp_variable(33,'float'),3)) + " A")
print("Load Error: " + str(round(self.read_bsmp_variable(34,'float'),3)) + " A")
print("\nMod 1 Load Voltage: " + str(round(self.read_bsmp_variable(36,'float'),3)) + " V")
print("Mod 3 Load Voltage: " + str(round(self.read_bsmp_variable(40,'float'),3)) + " V")
#print("\nMod 1 DC-Link Voltage: " + str(round(self.read_bsmp_variable(29,'float'),3)) + " V")
#print("Mod 1 Temperature: " + str(round(self.read_bsmp_variable(31,'float'),3)) + " °C")
#print("\nMod 3 DC-Link Voltage: " + str(round(self.read_bsmp_variable(33,'float'),3)) + " V")
#print("Mod 3 Temperature: " + str(round(self.read_bsmp_variable(35,'float'),3)) + " °C")
print("\nMod 1 Duty-Cycle: " + str(round(self.read_bsmp_variable(32,'float'),3)) + " %")
print("Mod 3 Duty-Cycle: " + str(round(self.read_bsmp_variable(36,'float'),3)) + " %")
time.sleep(dt)
self.SetSlaveAdd(old_add)
except:
self.SetSlaveAdd(old_add)
def read_vars_fac_2p_acdc_imas(self, n = 1, add_mod_a = 2, dt = 0.5, iib = 0):
old_add = self.GetSlaveAdd()
try:
for i in range(n):
self.SetSlaveAdd(add_mod_a)
print('\n--- Measurement #' + str(i+1) + ' ------------------------------------------\n')
self.read_vars_common()
print('\n *** MODULE A ***')
soft_itlks = self.read_bsmp_variable(31,'uint32_t')
print("\nSoft Interlocks: " + str(soft_itlks))
if(soft_itlks):
self.decode_interlocks(soft_itlks, list_fac_2p_acdc_imas_soft_interlocks)
print('')
hard_itlks = self.read_bsmp_variable(32,'uint32_t')
print("Hard Interlocks: " + str(hard_itlks))
if(hard_itlks):
self.decode_interlocks(hard_itlks, list_fac_2p_acdc_imas_hard_interlocks)
print("\nCapBank Voltage: " + str(round(self.read_bsmp_variable(33,'float'),3)) + " V")
print("Rectifier Current: " + str(round(self.read_bsmp_variable(34,'float'),3)) + " A")
print("Duty-Cycle: " + str(round(self.read_bsmp_variable(35,'float'),3)) + " %")
self.SetSlaveAdd(add_mod_a+1)
print('\n *** MODULE B ***')
soft_itlks = self.read_bsmp_variable(31,'uint32_t')
print("\nSoft Interlocks: " + str(soft_itlks))
if(soft_itlks):
self.decode_interlocks(soft_itlks, list_fac_2p_acdc_imas_soft_interlocks)
print('')
hard_itlks = self.read_bsmp_variable(32,'uint32_t')
print("Hard Interlocks: " + str(hard_itlks))
if(hard_itlks):
self.decode_interlocks(hard_itlks, list_fac_2p_acdc_imas_hard_interlocks)
print("\nCapBank Voltage: " + str(round(self.read_bsmp_variable(33,'float'),3)) + " V")
print("Rectifier Current: " + str(round(self.read_bsmp_variable(34,'float'),3)) + " A")
print("Duty-Cycle: " + str(round(self.read_bsmp_variable(35,'float'),3)) + " %")
time.sleep(dt)
self.SetSlaveAdd(old_add)
except:
self.SetSlaveAdd(old_add)
raise
def read_vars_fac_2p_dcdc_imas(self, n = 1, com_add = 1, dt = 0.5, iib = 0):
old_add = self.GetSlaveAdd()
try:
for i in range(n):
self.SetSlaveAdd(com_add)
print('\n--- Measurement #' + str(i+1) + ' ------------------------------------------\n')
self.read_vars_common()
print("\nSync Pulse Counter: " + str(round(self.read_bsmp_variable(5,'uint32_t'),3)))
soft_itlks = self.read_bsmp_variable(31,'uint32_t')
print("\nSoft Interlocks: " + str(soft_itlks))
if(soft_itlks):
self.decode_interlocks(soft_itlks, list_fac_2p_dcdc_imas_soft_interlocks)
print('')
hard_itlks = self.read_bsmp_variable(32,'uint32_t')
print("Hard Interlocks: " + str(hard_itlks))
if(hard_itlks):
self.decode_interlocks(hard_itlks, list_fac_2p_dcdc_imas_hard_interlocks)
print("\nLoad Current: " + str(round(self.read_bsmp_variable(33,'float'),3)) + ' A')
print("Load Current Error: " + str(round(self.read_bsmp_variable(34,'float'),3)) + ' A')
print("\nArm 1 Current: " + str(round(self.read_bsmp_variable(35,'float'),3)) + ' A')
print("Arm 2 Current: " + str(round(self.read_bsmp_variable(36,'float'),3)) + ' A')
print("Arms Current Diff: " + str(round(self.read_bsmp_variable(37,'float'),3)) + ' A')
print("\nCapBank Voltage 1: " + str(round(self.read_bsmp_variable(38,'float'),3)) + ' V')
print("CapBank Voltage 2: " + str(round(self.read_bsmp_variable(39,'float'),3)) + ' V')
print("\nDuty-Cycle 1: " + str(round(self.read_bsmp_variable(40,'float'),3)) + ' %')
print("Duty-Cycle 2: " + str(round(self.read_bsmp_variable(41,'float'),3)) + ' %')
print("Differential Duty-Cycle: " + str(round(self.read_bsmp_variable(42,'float'),3)) + ' %')
time.sleep(dt)
self.SetSlaveAdd(old_add)
except:
self.SetSlaveAdd(old_add)
raise
def check_param_bank(self, param_file):
fbp_param_list = []
max_sampling_freq = 600000
c28_sysclk = 150e6
with open(param_file,newline='') as f:
reader = csv.reader(f)
for row in reader:
fbp_param_list.append(row)
for param in fbp_param_list:
if str(param[0]) == 'Num_PS_Modules' and param[1] > 4:
print("Invalid " + str(param[0]) + ": " + str(param[1]) + ". Maximum is 4")
elif str(param[0]) == 'Freq_ISR_Controller' and param[1] > 6000000:
print("Invalid " + str(param[0]) + ": " + str(param[1]) + ". Maximum is 4" )
else:
for n in range(64):
try:
print(str(param[0]) + "["+ str(n) + "]: " + str(param[n+1]))
print(self.set_param(str(param[0]),n,float(param[n+1])))
except:
break
def set_param_bank(self, param_file):
fbp_param_list = []
with open(param_file,newline='') as f:
reader = csv.reader(f)
for row in reader:
fbp_param_list.append(row)
for param in fbp_param_list:
if str(param[0]) == 'PS_Name':
print(str(param[0]) + "[0]: " + str(param[1]))
print(self.set_ps_name(str(param[1])))
else:
for n in range(64):
try:
print(str(param[0]) + "["+ str(n) + "]: " + str(param[n+1]))
print(self.set_param(str(param[0]),n,float(param[n+1])))
except:
break
#self.save_param_bank()
def get_default_ramp_waveform(self, interval=500, nrpts=4000, ti=None, fi=None, forms=None):
from siriuspy.magnet.util import get_default_ramp_waveform
return get_default_ramp_waveform(interval, nrpts, ti, fi, forms)
def save_ramp_waveform(self, ramp):
filename = input('Digite o nome do arquivo: ')
with open( filename + '.csv', 'w', newline='') as f:
writer = csv.writer(f, delimiter=';')
writer.writerow(ramp)
def save_ramp_waveform_col(self, ramp):
filename = input('Digite o nome do arquivo: ')
with open( filename + '.csv', 'w', newline='') as f:
writer = csv.writer(f)
for val in ramp:
writer.writerow([val])
def read_vars_fac_n(self, n = 1, dt = 0.5):
old_add = self.GetSlaveAdd()
try:
for i in range(n):
print('\n--- Measurement #' + str(i+1) + ' ------------------------------------------\n')
self.SetSlaveAdd(1)
self.read_vars_fac_dcdc()
print('\n-----------------------\n')
self.SetSlaveAdd(2)
self.read_vars_fac_acdc()
time.sleep(dt)
self.SetSlaveAdd(old_add)
except:
self.SetSlaveAdd(old_add)
def get_step_buffer_fbp_ufjf(self, net1, net2, i_0, i_f, dly):
self.set_param('Analog_Var_Max',4,net1)
self.set_param('Analog_Var_Max',5,net2)
self.set_slowref(i_0)
time.sleep(0.5)
self.enable_buf_samples()
time.sleep(dly)
self.set_slowref(i_f)
self.disable_buf_samples()
buf = self.read_buf_samples_ctom()
buf1 = buf[0:4096:2]
buf2 = buf[1:4096:2]
fig = plt.figure()
ax1 = fig.add_subplot(2,1,1)
ax2 = fig.add_subplot(2,1,2)
ax1.plot(buf1)
ax1.grid()
ax2.plot(buf2)
ax2.grid()
fig.show()
return [buf1,buf2]
def set_buf_samples_freq(self, fs):
self.set_param('Freq_TimeSlicer',1,fs)
self.save_param_eeprom('Freq_TimeSlicer',1)
self.reset_udc()
def calc_pi(self, r_load, l_load, f_bw, v_dclink, send_drs = 0, dsp_id = 0):
kp = 2*3.1415*f_bw*l_load/v_dclink
ki = kp*r_load/l_load
print('\n Kp = ' + str(kp))
print(' Ki = ' + str(ki) + '\n')
if send_drs:
self.set_dsp_coeffs(3,dsp_id,[kp,ki,0.95,-0.95])
return [kp,ki]
def config_dsp_modules_drs_fap_tests(self):
kp_load = 0
ki_load = 20.95
kp_share = 0.000032117
ki_share = 0.0012
drs.set_dsp_coeffs(3,0,[kp_load,ki_load,0.6,0])
drs.set_dsp_coeffs(3,1,[kp_share,ki_share,0.0015,-0.0015])
drs.save_dsp_modules_eeprom()
def set_prbs_sampling_freq(self,freq, type_memory):
self.set_param('Freq_TimeSlicer',0,freq)
self.set_param('Freq_TimeSlicer',1,freq)
self.save_param_bank(type_memory)
def get_dsp_modules_bank(self, list_dsp_classes = [1,2,3,4,5,6], print_modules = 1):
dsp_modules_bank = []
for dsp_class in list_dsp_classes:
for dsp_id in range(num_dsp_modules[dsp_class]):
dsp_module = [dsp_classes_names[dsp_class], dsp_class, dsp_id]
for dsp_coeff in range(num_coeffs_dsp_modules[dsp_class]):
try:
coeff = self.get_dsp_coeff(dsp_class,dsp_id,dsp_coeff)
if dsp_class == 3 and dsp_coeff == 1:
coeff *= self.get_param('Freq_ISR_Controller',0)
dsp_module.append(coeff)
except:
dsp_module.append('nan')
dsp_modules_bank.append(dsp_module)
if(print_modules):
print(dsp_module)
return dsp_modules_bank
def store_dsp_modules_bank_csv(self, bank):
filename = input('Digite o nome do arquivo: ')
with open( filename + '.csv', 'w', newline='') as f:
writer = csv.writer(f, delimiter=',')
for dsp_module in bank:
writer.writerow(dsp_module)
def set_dsp_modules_bank(self, dsp_modules_file, save_eeprom = 0):
dsp_modules_row = []
with open(dsp_modules_file,newline='') as f:
reader = csv.reader(f)
for dsp_module in reader:
if not dsp_module == []:
if not dsp_module[0][0] == '#':
list_coeffs = []
for coeff in dsp_module[3:3+num_coeffs_dsp_modules[int(dsp_module[1])]]:
list_coeffs.append(float(coeff))
print(str(int(dsp_module[1])) + ' ' + str(int(dsp_module[2])) + ' ' + str(list_coeffs))
self.set_dsp_coeffs(int(dsp_module[1]),int(dsp_module[2]),list_coeffs)
if(save_eeprom):
self.save_dsp_modules_eeprom()
else:
print('\n *** Aviso: Os coeficientes configurados não foram salvos na memória EEPROM. Caso deseje salvar, utilize o argumento save_eeprom = 1')
def set_param_bank(self, param_file):
fbp_param_list = []
with open(param_file,newline='') as f:
reader = csv.reader(f)
for row in reader:
fbp_param_list.append(row)
for param in fbp_param_list:
if str(param[0]) == 'PS_Name':
print(str(param[0]) + "[0]: " + str(param[1]))
print(self.set_ps_name(str(param[1])))
else:
for n in range(64):
try:
print(str(param[0]) + "["+ str(n) + "]: " + str(param[n+1]))
print(self.set_param(str(param[0]),n,float(param[n+1])))
except:
break
#self.save_param_bank()
def select_param_bank(self, cfg_dsp_modules = 0):
add = int(input('\n Digite o endereco serial atual do controlador a ser configurado: '))
oldadd = self.GetSlaveAdd()
self.SetSlaveAdd(add)
areas = ['IA','LA','PA']
ps_models = ['fbp','fbp_dclink','fap','fap_4p','fap_2p4s','fac','fac_2s']
ps_folders = ['fbp','fbp_dclink','fap','fap',]
la_fap = ['TB-Fam:PS-B','TS-01:PS-QF1A','TS-01:PS-QF1B','TS-02:PS-QD2',
'TS-02:PS-QF2','TS-03:PS-QF3','TS-04:PS-QD4A','TS-04:PS-QD4B',
'TS-04:PS-QF4']
print('\n Selecione area: \n')
print(' 0: Sala de racks')
print(' 1: Linhas de transporte')
print(' 2: Sala de fontes\n')
area = int(input(' Digite o numero correspondente: '))
if area == 0:
sector = input('\n Digite o setor da sala de racks [1 a 20]: ')
if int(sector) < 10:
sector = '0' + sector
rack = input('\n Escolha o rack em que a fonte se encontra [1/2/3]: ')
#if (rack != '1') and (rack != '2'):
if not ((rack == '1') or (rack == '2') or (sector == '09' and rack == '3')):
print(' \n *** RACK INEXISTENTE ***\n')
return
print('\n Escolha o tipo de fonte: \n')
print(' 0: FBP')
print(' 1: FBP-DCLink\n')
ps_model = int(input(' Digite o numero correspondente: '))
if ps_model == 0:
crate = '_crate_' + input('\n Digite a posicao do bastidor, de cima para baixo. Leve em conta os bastidores que ainda nao foram instalados : ')
elif ps_model == 1:
crate = ''
else:
print(' \n *** TIPO DE FONTE INEXISTENTE ***\n')
return
file_dir = '../ps_parameters/IA-' + sector + '/' + ps_models[ps_model] + '/'
file_name = 'parameters_' + ps_models[ps_model] + '_IA-' + sector + 'RaPS0' + rack + crate + '.csv'
file_path = file_dir + file_name
print('\n Banco de parametros a ser utilizado: ' + file_path)
elif area == 1:
print('\n Escolha o tipo de fonte: \n')
print(' 0: FBP')
print(' 1: FBP-DCLink')
print(' 2: FAP\n')
ps_model = int(input(' Digite o numero correspondente: '))
if ps_model == 0 or ps_model == 1:
crate = input('\n Digite a posicao do bastidor, de cima para baixo. Leve em conta os bastidores que ainda nao foram instalados : ')
ps_name = '_LA-RaPS06_crate_' + crate
file_dir = '../ps_parameters/LA/' + ps_models[ps_model] + '/'
file_name = 'parameters_' + ps_models[ps_model] + ps_name + '.csv'
file_path = file_dir + file_name
elif ps_model == 2:
ps_list = []
file_dir = '../ps_parameters/LA/fap/'
for entry in os.listdir(file_dir):
if os.path.isfile(os.path.join(file_dir, entry)):
ps_list.append(entry)
print('\n ### Lista de fontes FAP da linha de transporte ### \n')
for idx, ps in enumerate(ps_list):
print(' ' + str(idx) + ': ' + ps)
ps_idx = int(input('\n Escolha o índice da fonte correspondente: '))
file_path = file_dir + ps_list[ps_idx]
else:
print(' \n *** TIPO DE FONTE INEXISTENTE ***\n')
return
print('\n Banco de parametros a ser utilizado: ' + file_path)
elif area == 2:
print('\n Escolha o tipo de fonte: \n')
print(' 0: FAC')
print(' 1: FAP\n')
ps_model = int(input(' Digite o numero correspondente: '))
if ps_model == 0:
ps_list = []
file_dir = '../ps_parameters/PA/fac/'
for entry in os.listdir(file_dir):
if os.path.isfile(os.path.join(file_dir, entry)):
ps_list.append(entry)
print('\n ### Lista de bastidores de controle FAC da sala de fontes ### \n')
for idx, ps in enumerate(ps_list):
print(' ', idx, ': ', ps)
ps_idx = int(input('\n Escolha o índice da fonte correspondente: '))
file_path = file_dir + ps_list[ps_idx]
elif ps_model == 1:
ps_list = []
file_dir = '../ps_parameters/PA/fap/'
for entry in os.listdir(file_dir):
if os.path.isfile(os.path.join(file_dir, entry)):
ps_list.append(entry)
print('\n ### Lista de bastidores de controle FAP da sala de fontes ### \n')
for idx, ps in enumerate(ps_list):
print(' ', idx, ': ', ps)
ps_idx = int(input('\n Escolha o índice da fonte correspondente: '))
file_path = file_dir + ps_list[ps_idx]
else:
print(' \n *** TIPO DE FONTE INEXISTENTE ***\n')
return
print('\n Banco de parametros a ser utilizado: ' + file_path)
else:
print(' \n *** SALA INEXISTENTE ***\n')
return
r = input('\n Tem certeza que deseja prosseguir? [Y/N]: ')
if (r != 'Y') and (r != 'y'):
print(' \n *** OPERAÇÃO CANCELADA ***\n')
return
self.SetSlaveAdd(add)
if ps_model == 0 and cfg_dsp_modules == 1:
print('\n Enviando parametros de controle para controlador ...')
dsp_file_dir = '../dsp_parameters/IA-' + sector + '/' + ps_models[ps_model] + '/'
dsp_file_name = 'dsp_parameters_' + ps_models[ps_model] + '_IA-' + sector + 'RaPS0' + rack + crate + '.csv'
dsp_file_path = dsp_file_dir + dsp_file_name
self.set_dsp_modules_bank(dsp_file_path)
print('\n Gravando parametros de controle na memoria ...')
time.sleep(1)
self.save_dsp_modules_eeprom()
print('\n Enviando parametros de operacao para controlador ...\n')
time.sleep(1)
self.set_param_bank(file_path)
print('\n Gravando parametros de operacao na memoria EEPROM onboard ...')
self.save_param_bank(2)
time.sleep(5)
print('\n Resetando UDC ...')
self.reset_udc()
time.sleep(2)
print('\n Pronto! Não se esqueça de utilizar o novo endereço serial para se comunicar com esta fonte! :)\n')
self.SetSlaveAdd(oldadd)
def get_siggen_vars(self):
print('\n### SigGen Variables ###\n')
print('Enable: ' + str((round(self.read_bsmp_variable(6,'uint16_t'),3))))
print('Type: ' + ListSigGenTypes_v2_1[int(round(self.read_bsmp_variable(7,'uint16_t'),3))])
print('Num Cycles: ' + str(round(self.read_bsmp_variable(8,'uint16_t'),3)))
print('Index: ' + str(round(self.read_bsmp_variable(9,'float'),3)))
print('Frequency: ' + str(round(self.read_bsmp_variable(10,'float'),3)))
print('Amplitude: ' + str(round(self.read_bsmp_variable(11,'float'),3)))
print('Offset: ' + str(round(self.read_bsmp_variable(12,'float'),3)))
self.read_var(self.index_to_hex(13))
reply_msg = self.ser.read(21)
val = struct.unpack('BBHffffB',reply_msg)
print('Aux Param 0: ' + str(val[3]))
print('Aux Param 1: ' + str(val[4]))
print('Aux Param 2: ' + str(val[5]))
print('Aux Param 3: ' + str(val[6]))
def firmware_initialization(self):
print("\n ### Inicialização de firmware ### \n")
print("\n Lendo status...")
print(self.read_ps_status())
print("\n Lendo versão de firmware...")
self.read_udc_version()
print("\n Desbloqueando UDC...")
print(self.unlock_udc(0xFFFF))
print("\n Habilitando EEPROM onboard...")
self.enable_onboard_eeprom()
print("\n Alterando senha...")
print(self.set_param('Password',0,0xCAFE))
print(self.save_param_eeprom('Password',0,2))
print("\n Configurando banco de parâmetros...")
self.select_param_bank()
print("\n ### Fim da inicialização de firmware ### \n")
def cfg_hensys_ps_model(self):
list_files = ['fbp_dclink/parameters_fbp_dclink_hensys.csv',
'fac/parameters_fac_acdc_hensys.csv',
'fac/parameters_fac_dcdc_hensys.csv',
'fac/parameters_fac_2s_acdc_hensys.csv',
'fac/parameters_fac_2s_dcdc_hensys.csv',
'fac/parameters_fac_2p4s_acdc_hensys.csv',
'fac/parameters_fac_2p4s_dcdc_hensys.csv',
'fap/parameters_fap_hensys.csv',
'fap/parameters_fap_2p2s_hensys.csv',
'fap/parameters_fap_4p_hensys.csv']
print('\n Desbloqueando UDC ...')
print(self.unlock_udc(0xCAFE))
print('\n *** Escolha o modelo de fonte a ser configurado ***\n')
print(' 0: FBP-DClink')
print(' 1: FAC-ACDC')
print(' 2: FAC-DCDC')
print(' 3: FAC-2S-ACDC')
print(' 4: FAC-2S-DCDC')
print(' 5: FAC-2P4S-ACDC')
print(' 6: FAC-2P4S-DCDC')
print(' 7: FAP')
print(' 8: FAP-2P2S')
print(' 9: FAP-4P')
model_idx = int(input('\n Digite o índice correspondente: '))
file_path = '../ps_parameters/development/' + list_files[model_idx]
print('\n Banco de parametros a ser utilizado: ' + file_path)
r = input('\n Tem certeza que deseja prosseguir? [Y/N]: ')
if (r != 'Y') and (r != 'y'):
print(' \n *** OPERAÇÃO CANCELADA ***\n')
return
print('\n Enviando parametros de operacao para controlador ...\n')
time.sleep(1)
self.set_param_bank(file_path)
print('\n Gravando parametros de operacao na memoria EEPROM onboard ...')
self.save_param_bank(2)
time.sleep(5)
print('\n Resetando UDC ...')
self.reset_udc()
time.sleep(2)
print('\n Pronto! Nao se esqueca de utilizar o novo endereco serial para se comunicar com esta fonte! :)\n')
def test_bid_board(self, password):
r = input("\n Antes de iniciar, certifique-se que o bastidor foi energizado sem a placa BID.\n Para prosseguir, conecte a placa BID a ser testada e pressione qualquer tecla... ")
print("\n Desbloqueando UDC ...")
print(self.unlock_udc(password))
print("\n Carregando banco de parametros da memoria onboard ...")
print(self.load_param_bank(type_memory = 2))
print("\n Banco de parametros da memoria onboard:\n")
max_param = ListParameters.index('Scope_Source')
param_bank_onboard = []
for param in ListParameters[0:max_param]:
val = self.get_param(param,0)
print(param + ':',val)
param_bank_onboard.append(val)
print("\n Salvando banco de parametros na memoria offboard ...")
print(self.save_param_bank(type_memory = 1))
time.sleep(5)
print("\n Resetando UDC ...")
self.reset_udc()
time.sleep(3)
self.read_ps_status()
print("\n Desbloqueando UDC ...")
print(self.unlock_udc(password))
print("\n Carregando banco de parametros da memoria offboard ...")
print(self.load_param_bank(type_memory = 1))
self.read_ps_status()
print("\n Verificando banco de parametros offboard apos reset ... \n")
try:
param_bank_offboard = []
for param in ListParameters[0:max_param]:
val = self.get_param(param,0)
print(param, val)
param_bank_offboard.append(val)
if param_bank_onboard == param_bank_offboard:
print("\n Placa BID aprovada!\n")
else:
print("\n Placa BID reprovada!\n")
except:
print(" Placa BID reprovada!\n")
def upload_parameters_bid(self, password):
print("\n Desbloqueando UDC ...")
print(self.unlock_udc(password))
print("\n Carregando banco de parametros da memoria offboard ...")
print(self.load_param_bank(type_memory = 1))
time.sleep(1)
print("\n Salvando banco de parametros na memoria onboard ...")
print(self.save_param_bank(type_memory = 2))
time.sleep(5)
print("\n Carregando coeficientes de controle da memoria offboard ...")
print(self.load_dsp_modules_eeprom(type_memory = 1))
time.sleep(1)
print("\n Salvando coeficientes de controle na memoria onboard ...\n")
print(self.save_dsp_modules_eeprom(type_memory = 2))
def download_parameters_bid(self,password):
print("\n Desbloqueando UDC ...")
print(self.unlock_udc(password))
print("\n Carregando banco de parametros da memoria onboard ...")
print(self.load_param_bank(type_memory = 2))
time.sleep(1)
print("\n Salvando banco de parametros na memoria offboard ...")
print(self.save_param_bank(type_memory = 1))
time.sleep(5)
print("\n Carregando coeficientes de controle da memoria onboard ...")
print(self.load_dsp_modules_eeprom(type_memory = 2))
time.sleep(1)
print("\n Salvando coeficientes de controle na memoria offboard ...")
print(self.save_dsp_modules_eeprom(type_memory = 1))
|
[
"serial.Serial",
"siriuspy.magnet.util.get_default_ramp_waveform",
"math.isnan",
"csv.reader",
"csv.writer",
"numpy.float32",
"struct.unpack",
"struct.pack",
"time.sleep",
"matplotlib.pyplot.figure",
"os.path.join",
"os.listdir"
] |
[((31867, 31882), 'serial.Serial', 'serial.Serial', ([], {}), '()\n', (31880, 31882), False, 'import serial\n'), ((34022, 34045), 'struct.pack', 'struct.pack', (['"""f"""', 'value'], {}), "('f', value)\n", (34033, 34045), False, 'import struct\n'), ((34679, 34702), 'struct.pack', 'struct.pack', (['"""H"""', 'value'], {}), "('H', value)\n", (34690, 34702), False, 'import struct\n'), ((34850, 34873), 'struct.pack', 'struct.pack', (['"""I"""', 'value'], {}), "('I', value)\n", (34861, 34873), False, 'import struct\n'), ((35013, 35036), 'struct.pack', 'struct.pack', (['"""B"""', 'value'], {}), "('B', value)\n", (35024, 35036), False, 'import struct\n'), ((35181, 35205), 'struct.pack', 'struct.pack', (['""">H"""', 'value'], {}), "('>H', value)\n", (35192, 35205), False, 'import struct\n'), ((35402, 35424), 'struct.pack', 'struct.pack', (['"""B"""', 'csum'], {}), "('B', csum)\n", (35413, 35424), False, 'import struct\n'), ((35886, 35923), 'struct.pack', 'struct.pack', (['bytesFormat[format]', 'val'], {}), '(bytesFormat[format], val)\n', (35897, 35923), False, 'import struct\n'), ((53741, 53756), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (53751, 53756), False, 'import time\n'), ((58197, 58212), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (58207, 58212), False, 'import time\n'), ((58868, 58917), 'struct.unpack', 'struct.unpack', (['"""BBHLLHHHHHHfffffffffB"""', 'reply_msg'], {}), "('BBHLLHHHHHHfffffffffB', reply_msg)\n", (58881, 58917), False, 'import struct\n'), ((59626, 59641), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (59636, 59641), False, 'import time\n'), ((60145, 60158), 'numpy.float32', 'np.float32', (['(0)'], {}), '(0)\n', (60155, 60158), True, 'import numpy as np\n'), ((60193, 60206), 'numpy.float32', 'np.float32', (['(0)'], {}), '(0)\n', (60203, 60206), True, 'import numpy as np\n'), ((68649, 68664), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (68659, 68664), False, 'import time\n'), ((73319, 73352), 'struct.unpack', 'struct.unpack', (['"""BBHHB"""', 'reply_msg'], {}), "('BBHHB', reply_msg)\n", (73332, 73352), False, 'import struct\n'), ((75957, 75990), 'struct.unpack', 'struct.unpack', (['"""BBHfB"""', 'reply_msg'], {}), "('BBHfB', reply_msg)\n", (75970, 75990), False, 'import struct\n'), ((77353, 77386), 'struct.unpack', 'struct.unpack', (['"""BBHfB"""', 'reply_msg'], {}), "('BBHfB', reply_msg)\n", (77366, 77386), False, 'import struct\n'), ((86458, 86491), 'struct.unpack', 'struct.unpack', (['"""BBHfB"""', 'reply_msg'], {}), "('BBHfB', reply_msg)\n", (86471, 86491), False, 'import struct\n'), ((101204, 101250), 'struct.unpack', 'struct.unpack', (['typeFormat[type_var]', 'reply_msg'], {}), '(typeFormat[type_var], reply_msg)\n', (101217, 101250), False, 'import struct\n'), ((101650, 101687), 'struct.unpack', 'struct.unpack', (['"""16s"""', 'reply_msg[4:20]'], {}), "('16s', reply_msg[4:20])\n", (101663, 101687), False, 'import struct\n'), ((101859, 101897), 'struct.unpack', 'struct.unpack', (['"""16s"""', 'reply_msg[20:36]'], {}), "('16s', reply_msg[20:36])\n", (101872, 101897), False, 'import struct\n'), ((102252, 102285), 'struct.unpack', 'struct.unpack', (['"""BBHfB"""', 'reply_msg'], {}), "('BBHfB', reply_msg)\n", (102265, 102285), False, 'import struct\n'), ((102452, 102485), 'struct.unpack', 'struct.unpack', (['"""BBHfB"""', 'reply_msg'], {}), "('BBHfB', reply_msg)\n", (102465, 102485), False, 'import struct\n'), ((102650, 102683), 'struct.unpack', 'struct.unpack', (['"""BBHfB"""', 'reply_msg'], {}), "('BBHfB', reply_msg)\n", (102663, 102683), False, 'import struct\n'), ((102848, 102881), 'struct.unpack', 'struct.unpack', (['"""BBHfB"""', 'reply_msg'], {}), "('BBHfB', reply_msg)\n", (102861, 102881), False, 'import struct\n'), ((103046, 103079), 'struct.unpack', 'struct.unpack', (['"""BBHfB"""', 'reply_msg'], {}), "('BBHfB', reply_msg)\n", (103059, 103079), False, 'import struct\n'), ((103244, 103277), 'struct.unpack', 'struct.unpack', (['"""BBHfB"""', 'reply_msg'], {}), "('BBHfB', reply_msg)\n", (103257, 103277), False, 'import struct\n'), ((103442, 103475), 'struct.unpack', 'struct.unpack', (['"""BBHfB"""', 'reply_msg'], {}), "('BBHfB', reply_msg)\n", (103455, 103475), False, 'import struct\n'), ((103669, 103702), 'struct.unpack', 'struct.unpack', (['"""BBHfB"""', 'reply_msg'], {}), "('BBHfB', reply_msg)\n", (103682, 103702), False, 'import struct\n'), ((103896, 103929), 'struct.unpack', 'struct.unpack', (['"""BBHfB"""', 'reply_msg'], {}), "('BBHfB', reply_msg)\n", (103909, 103929), False, 'import struct\n'), ((104123, 104156), 'struct.unpack', 'struct.unpack', (['"""BBHfB"""', 'reply_msg'], {}), "('BBHfB', reply_msg)\n", (104136, 104156), False, 'import struct\n'), ((104350, 104383), 'struct.unpack', 'struct.unpack', (['"""BBHfB"""', 'reply_msg'], {}), "('BBHfB', reply_msg)\n", (104363, 104383), False, 'import struct\n'), ((104579, 104612), 'struct.unpack', 'struct.unpack', (['"""BBHfB"""', 'reply_msg'], {}), "('BBHfB', reply_msg)\n", (104592, 104612), False, 'import struct\n'), ((104808, 104841), 'struct.unpack', 'struct.unpack', (['"""BBHfB"""', 'reply_msg'], {}), "('BBHfB', reply_msg)\n", (104821, 104841), False, 'import struct\n'), ((105037, 105070), 'struct.unpack', 'struct.unpack', (['"""BBHfB"""', 'reply_msg'], {}), "('BBHfB', reply_msg)\n", (105050, 105070), False, 'import struct\n'), ((105266, 105299), 'struct.unpack', 'struct.unpack', (['"""BBHfB"""', 'reply_msg'], {}), "('BBHfB', reply_msg)\n", (105279, 105299), False, 'import struct\n'), ((105464, 105497), 'struct.unpack', 'struct.unpack', (['"""BBHfB"""', 'reply_msg'], {}), "('BBHfB', reply_msg)\n", (105477, 105497), False, 'import struct\n'), ((105662, 105695), 'struct.unpack', 'struct.unpack', (['"""BBHfB"""', 'reply_msg'], {}), "('BBHfB', reply_msg)\n", (105675, 105695), False, 'import struct\n'), ((105860, 105893), 'struct.unpack', 'struct.unpack', (['"""BBHfB"""', 'reply_msg'], {}), "('BBHfB', reply_msg)\n", (105873, 105893), False, 'import struct\n'), ((106058, 106091), 'struct.unpack', 'struct.unpack', (['"""BBHfB"""', 'reply_msg'], {}), "('BBHfB', reply_msg)\n", (106071, 106091), False, 'import struct\n'), ((106262, 106295), 'struct.unpack', 'struct.unpack', (['"""BBHHB"""', 'reply_msg'], {}), "('BBHHB', reply_msg)\n", (106275, 106295), False, 'import struct\n'), ((106468, 106501), 'struct.unpack', 'struct.unpack', (['"""BBHHB"""', 'reply_msg'], {}), "('BBHHB', reply_msg)\n", (106481, 106501), False, 'import struct\n'), ((106674, 106707), 'struct.unpack', 'struct.unpack', (['"""BBHHB"""', 'reply_msg'], {}), "('BBHHB', reply_msg)\n", (106687, 106707), False, 'import struct\n'), ((106884, 106917), 'struct.unpack', 'struct.unpack', (['"""BBHHB"""', 'reply_msg'], {}), "('BBHHB', reply_msg)\n", (106897, 106917), False, 'import struct\n'), ((107761, 107794), 'struct.unpack', 'struct.unpack', (['"""BBHIB"""', 'reply_msg'], {}), "('BBHIB', reply_msg)\n", (107774, 107794), False, 'import struct\n'), ((110757, 110790), 'struct.unpack', 'struct.unpack', (['"""BBHIB"""', 'reply_msg'], {}), "('BBHIB', reply_msg)\n", (110770, 110790), False, 'import struct\n'), ((111290, 111323), 'struct.unpack', 'struct.unpack', (['"""BBHfB"""', 'reply_msg'], {}), "('BBHfB', reply_msg)\n", (111303, 111323), False, 'import struct\n'), ((111500, 111533), 'struct.unpack', 'struct.unpack', (['"""BBHfB"""', 'reply_msg'], {}), "('BBHfB', reply_msg)\n", (111513, 111533), False, 'import struct\n'), ((111714, 111747), 'struct.unpack', 'struct.unpack', (['"""BBHfB"""', 'reply_msg'], {}), "('BBHfB', reply_msg)\n", (111727, 111747), False, 'import struct\n'), ((111928, 111961), 'struct.unpack', 'struct.unpack', (['"""BBHHB"""', 'reply_msg'], {}), "('BBHHB', reply_msg)\n", (111941, 111961), False, 'import struct\n'), ((112138, 112171), 'struct.unpack', 'struct.unpack', (['"""BBHHB"""', 'reply_msg'], {}), "('BBHHB', reply_msg)\n", (112151, 112171), False, 'import struct\n'), ((112354, 112387), 'struct.unpack', 'struct.unpack', (['"""BBHHB"""', 'reply_msg'], {}), "('BBHHB', reply_msg)\n", (112367, 112387), False, 'import struct\n'), ((112576, 112609), 'struct.unpack', 'struct.unpack', (['"""BBHfB"""', 'reply_msg'], {}), "('BBHfB', reply_msg)\n", (112589, 112609), False, 'import struct\n'), ((112794, 112827), 'struct.unpack', 'struct.unpack', (['"""BBHfB"""', 'reply_msg'], {}), "('BBHfB', reply_msg)\n", (112807, 112827), False, 'import struct\n'), ((113004, 113037), 'struct.unpack', 'struct.unpack', (['"""BBHfB"""', 'reply_msg'], {}), "('BBHfB', reply_msg)\n", (113017, 113037), False, 'import struct\n'), ((113224, 113257), 'struct.unpack', 'struct.unpack', (['"""BBHfB"""', 'reply_msg'], {}), "('BBHfB', reply_msg)\n", (113237, 113257), False, 'import struct\n'), ((113438, 113471), 'struct.unpack', 'struct.unpack', (['"""BBHfB"""', 'reply_msg'], {}), "('BBHfB', reply_msg)\n", (113451, 113471), False, 'import struct\n'), ((113646, 113679), 'struct.unpack', 'struct.unpack', (['"""BBHfB"""', 'reply_msg'], {}), "('BBHfB', reply_msg)\n", (113659, 113679), False, 'import struct\n'), ((113844, 113877), 'struct.unpack', 'struct.unpack', (['"""BBHHB"""', 'reply_msg'], {}), "('BBHHB', reply_msg)\n", (113857, 113877), False, 'import struct\n'), ((114048, 114081), 'struct.unpack', 'struct.unpack', (['"""BBHHB"""', 'reply_msg'], {}), "('BBHHB', reply_msg)\n", (114061, 114081), False, 'import struct\n'), ((114255, 114303), 'struct.unpack', 'struct.unpack', (['"""BBHffffffffffffffffB"""', 'reply_msg'], {}), "('BBHffffffffffffffffB', reply_msg)\n", (114268, 114303), False, 'import struct\n'), ((114590, 114623), 'struct.unpack', 'struct.unpack', (['"""BBHHB"""', 'reply_msg'], {}), "('BBHHB', reply_msg)\n", (114603, 114623), False, 'import struct\n'), ((114930, 114963), 'struct.unpack', 'struct.unpack', (['"""BBHIB"""', 'reply_msg'], {}), "('BBHIB', reply_msg)\n", (114943, 114963), False, 'import struct\n'), ((115156, 115189), 'struct.unpack', 'struct.unpack', (['"""BBHIB"""', 'reply_msg'], {}), "('BBHIB', reply_msg)\n", (115169, 115189), False, 'import struct\n'), ((115378, 115411), 'struct.unpack', 'struct.unpack', (['"""BBHIB"""', 'reply_msg'], {}), "('BBHIB', reply_msg)\n", (115391, 115411), False, 'import struct\n'), ((115596, 115629), 'struct.unpack', 'struct.unpack', (['"""BBHHB"""', 'reply_msg'], {}), "('BBHHB', reply_msg)\n", (115609, 115629), False, 'import struct\n'), ((115774, 115807), 'struct.unpack', 'struct.unpack', (['"""BBHfB"""', 'reply_msg'], {}), "('BBHfB', reply_msg)\n", (115787, 115807), False, 'import struct\n'), ((115952, 115985), 'struct.unpack', 'struct.unpack', (['"""BBHfB"""', 'reply_msg'], {}), "('BBHfB', reply_msg)\n", (115965, 115985), False, 'import struct\n'), ((116130, 116163), 'struct.unpack', 'struct.unpack', (['"""BBHfB"""', 'reply_msg'], {}), "('BBHfB', reply_msg)\n", (116143, 116163), False, 'import struct\n'), ((116308, 116341), 'struct.unpack', 'struct.unpack', (['"""BBHfB"""', 'reply_msg'], {}), "('BBHfB', reply_msg)\n", (116321, 116341), False, 'import struct\n'), ((116501, 116534), 'struct.unpack', 'struct.unpack', (['"""BBHfB"""', 'reply_msg'], {}), "('BBHfB', reply_msg)\n", (116514, 116534), False, 'import struct\n'), ((196523, 196580), 'siriuspy.magnet.util.get_default_ramp_waveform', 'get_default_ramp_waveform', (['interval', 'nrpts', 'ti', 'fi', 'forms'], {}), '(interval, nrpts, ti, fi, forms)\n', (196548, 196580), False, 'from siriuspy.magnet.util import get_default_ramp_waveform\n'), ((197888, 197903), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (197898, 197903), False, 'import time\n'), ((197946, 197961), 'time.sleep', 'time.sleep', (['dly'], {}), '(dly)\n', (197956, 197961), False, 'import time\n'), ((198142, 198154), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (198152, 198154), True, 'import matplotlib.pyplot as plt\n'), ((209448, 209461), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (209458, 209461), False, 'import time\n'), ((209623, 209636), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (209633, 209636), False, 'import time\n'), ((209718, 209731), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (209728, 209731), False, 'import time\n'), ((210666, 210702), 'struct.unpack', 'struct.unpack', (['"""BBHffffB"""', 'reply_msg'], {}), "('BBHffffB', reply_msg)\n", (210679, 210702), False, 'import struct\n'), ((213327, 213340), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (213337, 213340), False, 'import time\n'), ((213511, 213524), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (213521, 213524), False, 'import time\n'), ((213605, 213618), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (213615, 213618), False, 'import time\n'), ((214706, 214719), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (214716, 214719), False, 'import time\n'), ((214809, 214822), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (214819, 214822), False, 'import time\n'), ((216003, 216016), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (216013, 216016), False, 'import time\n'), ((216159, 216172), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (216169, 216172), False, 'import time\n'), ((216331, 216344), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (216341, 216344), False, 'import time\n'), ((216778, 216791), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (216788, 216791), False, 'import time\n'), ((216935, 216948), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (216945, 216948), False, 'import time\n'), ((217106, 217119), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (217116, 217119), False, 'import time\n'), ((54010, 54025), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (54020, 54025), False, 'import time\n'), ((54296, 54311), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (54306, 54311), False, 'import time\n'), ((54560, 54575), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (54570, 54575), False, 'import time\n'), ((54829, 54844), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (54839, 54844), False, 'import time\n'), ((55073, 55088), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (55083, 55088), False, 'import time\n'), ((55317, 55332), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (55327, 55332), False, 'import time\n'), ((55561, 55576), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (55571, 55576), False, 'import time\n'), ((55807, 55822), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (55817, 55822), False, 'import time\n'), ((56077, 56092), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (56087, 56092), False, 'import time\n'), ((56344, 56359), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (56354, 56359), False, 'import time\n'), ((56617, 56632), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (56627, 56632), False, 'import time\n'), ((56884, 56899), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (56894, 56899), False, 'import time\n'), ((57157, 57172), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (57167, 57172), False, 'import time\n'), ((57418, 57433), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (57428, 57433), False, 'import time\n'), ((57679, 57694), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (57689, 57694), False, 'import time\n'), ((57931, 57946), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (57941, 57946), False, 'import time\n'), ((60286, 60300), 'numpy.float32', 'np.float32', (['(20)'], {}), '(20)\n', (60296, 60300), True, 'import numpy as np\n'), ((60337, 60350), 'numpy.float32', 'np.float32', (['(1)'], {}), '(1)\n', (60347, 60350), True, 'import numpy as np\n'), ((60387, 60400), 'numpy.float32', 'np.float32', (['(1)'], {}), '(1)\n', (60397, 60400), True, 'import numpy as np\n'), ((61932, 61947), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (61942, 61947), False, 'import time\n'), ((62001, 62016), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (62011, 62016), False, 'import time\n'), ((62066, 62081), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (62076, 62081), False, 'import time\n'), ((62131, 62146), 'time.sleep', 'time.sleep', (['(1.5)'], {}), '(1.5)\n', (62141, 62146), False, 'import time\n'), ((76776, 76812), 'struct.unpack', 'struct.unpack', (['"""BBHffffB"""', 'reply_msg'], {}), "('BBHffffB', reply_msg)\n", (76789, 76812), False, 'import struct\n'), ((78172, 78208), 'struct.unpack', 'struct.unpack', (['"""BBHffffB"""', 'reply_msg'], {}), "('BBHffffB', reply_msg)\n", (78185, 78208), False, 'import struct\n'), ((79790, 79823), 'struct.unpack', 'struct.unpack', (['"""BBHfB"""', 'reply_msg'], {}), "('BBHfB', reply_msg)\n", (79803, 79823), False, 'import struct\n'), ((82684, 82697), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (82694, 82697), False, 'import csv\n'), ((84703, 84731), 'csv.writer', 'csv.writer', (['f'], {'delimiter': '""","""'}), "(f, delimiter=',')\n", (84713, 84731), False, 'import csv\n'), ((90091, 90108), 'time.sleep', 'time.sleep', (['delay'], {}), '(delay)\n', (90101, 90108), False, 'import time\n'), ((100152, 100165), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (100162, 100165), False, 'import csv\n'), ((128091, 128127), 'serial.Serial', 'serial.Serial', (['port', 'baud'], {'timeout': '(1)'}), '(port, baud, timeout=1)\n', (128104, 128127), False, 'import serial\n'), ((138194, 138208), 'time.sleep', 'time.sleep', (['dt'], {}), '(dt)\n', (138204, 138208), False, 'import time\n'), ((194755, 194768), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (194765, 194768), False, 'import csv\n'), ((195713, 195726), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (195723, 195726), False, 'import csv\n'), ((196763, 196791), 'csv.writer', 'csv.writer', (['f'], {'delimiter': '""";"""'}), "(f, delimiter=';')\n", (196773, 196791), False, 'import csv\n'), ((197020, 197033), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (197030, 197033), False, 'import csv\n'), ((200531, 200559), 'csv.writer', 'csv.writer', (['f'], {'delimiter': '""","""'}), "(f, delimiter=',')\n", (200541, 200559), False, 'import csv\n'), ((200831, 200844), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (200841, 200844), False, 'import csv\n'), ((201842, 201855), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (201852, 201855), False, 'import csv\n'), ((209295, 209308), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (209305, 209308), False, 'import time\n'), ((34276, 34299), 'struct.pack', 'struct.pack', (['"""f"""', 'value'], {}), "('f', value)\n", (34287, 34299), False, 'import struct\n'), ((60565, 60578), 'numpy.float32', 'np.float32', (['(0)'], {}), '(0)\n', (60575, 60578), True, 'import numpy as np\n'), ((60615, 60636), 'numpy.float32', 'np.float32', (['(6.0 / 5.0)'], {}), '(6.0 / 5.0)\n', (60625, 60636), True, 'import numpy as np\n'), ((60671, 60692), 'numpy.float32', 'np.float32', (['(6.0 / 5.0)'], {}), '(6.0 / 5.0)\n', (60681, 60692), True, 'import numpy as np\n'), ((120611, 120639), 'struct.pack', 'struct.pack', (['""">H"""', 'block_idx'], {}), "('>H', block_idx)\n", (120622, 120639), False, 'import struct\n'), ((121233, 121261), 'struct.pack', 'struct.pack', (['""">H"""', 'block_idx'], {}), "('>H', block_idx)\n", (121244, 121261), False, 'import struct\n'), ((121772, 121809), 'struct.unpack', 'struct.unpack', (['"""f"""', 'recv_msg[k:k + 4]'], {}), "('f', recv_msg[k:k + 4])\n", (121785, 121809), False, 'import struct\n'), ((121885, 121905), 'struct.pack', 'struct.pack', (['""">H"""', '(0)'], {}), "('>H', 0)\n", (121896, 121905), False, 'import struct\n'), ((122600, 122628), 'struct.pack', 'struct.pack', (['""">H"""', 'block_idx'], {}), "('>H', block_idx)\n", (122611, 122628), False, 'import struct\n'), ((123230, 123258), 'struct.pack', 'struct.pack', (['""">H"""', 'block_idx'], {}), "('>H', block_idx)\n", (123241, 123258), False, 'import struct\n'), ((123774, 123811), 'struct.unpack', 'struct.unpack', (['"""f"""', 'recv_msg[k:k + 4]'], {}), "('f', recv_msg[k:k + 4])\n", (123787, 123811), False, 'import struct\n'), ((123904, 123932), 'struct.pack', 'struct.pack', (['""">H"""', 'block_idx'], {}), "('>H', block_idx)\n", (123915, 123932), False, 'import struct\n'), ((124533, 124570), 'struct.unpack', 'struct.unpack', (['"""f"""', 'recv_msg[k:k + 4]'], {}), "('f', recv_msg[k:k + 4])\n", (124546, 124570), False, 'import struct\n'), ((124972, 124999), 'struct.pack', 'struct.pack', (['""">H"""', 'block_id'], {}), "('>H', block_id)\n", (124983, 124999), False, 'import struct\n'), ((125635, 125672), 'struct.unpack', 'struct.unpack', (['"""f"""', 'recv_msg[k:k + 4]'], {}), "('f', recv_msg[k:k + 4])\n", (125648, 125672), False, 'import struct\n'), ((125775, 125802), 'struct.pack', 'struct.pack', (['""">H"""', 'block_id'], {}), "('>H', block_id)\n", (125786, 125802), False, 'import struct\n'), ((128473, 128498), 'struct.pack', 'struct.pack', (['"""B"""', 'address'], {}), "('B', address)\n", (128484, 128498), False, 'import struct\n'), ((132213, 132227), 'time.sleep', 'time.sleep', (['dt'], {}), '(dt)\n', (132223, 132227), False, 'import time\n'), ((133450, 133464), 'time.sleep', 'time.sleep', (['dt'], {}), '(dt)\n', (133460, 133464), False, 'import time\n'), ((142148, 142162), 'time.sleep', 'time.sleep', (['dt'], {}), '(dt)\n', (142158, 142162), False, 'import time\n'), ((145538, 145552), 'time.sleep', 'time.sleep', (['dt'], {}), '(dt)\n', (145548, 145552), False, 'import time\n'), ((155491, 155505), 'time.sleep', 'time.sleep', (['dt'], {}), '(dt)\n', (155501, 155505), False, 'import time\n'), ((159636, 159650), 'time.sleep', 'time.sleep', (['dt'], {}), '(dt)\n', (159646, 159650), False, 'import time\n'), ((166918, 166932), 'time.sleep', 'time.sleep', (['dt'], {}), '(dt)\n', (166928, 166932), False, 'import time\n'), ((171657, 171671), 'time.sleep', 'time.sleep', (['dt'], {}), '(dt)\n', (171667, 171671), False, 'import time\n'), ((178404, 178418), 'time.sleep', 'time.sleep', (['dt'], {}), '(dt)\n', (178414, 178418), False, 'import time\n'), ((185441, 185455), 'time.sleep', 'time.sleep', (['dt'], {}), '(dt)\n', (185451, 185455), False, 'import time\n'), ((187216, 187230), 'time.sleep', 'time.sleep', (['dt'], {}), '(dt)\n', (187226, 187230), False, 'import time\n'), ((189443, 189457), 'time.sleep', 'time.sleep', (['dt'], {}), '(dt)\n', (189453, 189457), False, 'import time\n'), ((191974, 191988), 'time.sleep', 'time.sleep', (['dt'], {}), '(dt)\n', (191984, 191988), False, 'import time\n'), ((194368, 194382), 'time.sleep', 'time.sleep', (['dt'], {}), '(dt)\n', (194378, 194382), False, 'import time\n'), ((197567, 197581), 'time.sleep', 'time.sleep', (['dt'], {}), '(dt)\n', (197577, 197581), False, 'import time\n'), ((36010, 36044), 'struct.unpack', 'struct.unpack', (['"""H"""', 'val_b[i:i + 2]'], {}), "('H', val_b[i:i + 2])\n", (36023, 36044), False, 'import struct\n'), ((60859, 60872), 'numpy.float32', 'np.float32', (['(0)'], {}), '(0)\n', (60869, 60872), True, 'import numpy as np\n'), ((60909, 60922), 'numpy.float32', 'np.float32', (['(1)'], {}), '(1)\n', (60919, 60922), True, 'import numpy as np\n'), ((60959, 60972), 'numpy.float32', 'np.float32', (['(1)'], {}), '(1)\n', (60969, 60972), True, 'import numpy as np\n'), ((84085, 84098), 'math.isnan', 'math.isnan', (['p'], {}), '(p)\n', (84095, 84098), False, 'import math\n'), ((122439, 122476), 'struct.unpack', 'struct.unpack', (['"""f"""', 'recv_msg[k:k + 4]'], {}), "('f', recv_msg[k:k + 4])\n", (122452, 122476), False, 'import struct\n'), ((61129, 61142), 'numpy.float32', 'np.float32', (['(5)'], {}), '(5)\n', (61139, 61142), True, 'import numpy as np\n'), ((61179, 61192), 'numpy.float32', 'np.float32', (['(1)'], {}), '(1)\n', (61189, 61192), True, 'import numpy as np\n'), ((61229, 61242), 'numpy.float32', 'np.float32', (['(1)'], {}), '(1)\n', (61239, 61242), True, 'import numpy as np\n'), ((205777, 205797), 'os.listdir', 'os.listdir', (['file_dir'], {}), '(file_dir)\n', (205787, 205797), False, 'import os\n'), ((206922, 206942), 'os.listdir', 'os.listdir', (['file_dir'], {}), '(file_dir)\n', (206932, 206942), False, 'import os\n'), ((61411, 61424), 'numpy.float32', 'np.float32', (['(1)'], {}), '(1)\n', (61421, 61424), True, 'import numpy as np\n'), ((61461, 61474), 'numpy.float32', 'np.float32', (['(1)'], {}), '(1)\n', (61471, 61474), True, 'import numpy as np\n'), ((61511, 61524), 'numpy.float32', 'np.float32', (['(1)'], {}), '(1)\n', (61521, 61524), True, 'import numpy as np\n'), ((207630, 207650), 'os.listdir', 'os.listdir', (['file_dir'], {}), '(file_dir)\n', (207640, 207650), False, 'import os\n'), ((205837, 205866), 'os.path.join', 'os.path.join', (['file_dir', 'entry'], {}), '(file_dir, entry)\n', (205849, 205866), False, 'import os\n'), ((206982, 207011), 'os.path.join', 'os.path.join', (['file_dir', 'entry'], {}), '(file_dir, entry)\n', (206994, 207011), False, 'import os\n'), ((207690, 207719), 'os.path.join', 'os.path.join', (['file_dir', 'entry'], {}), '(file_dir, entry)\n', (207702, 207719), False, 'import os\n')]
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import pytest
import irispy.iris_tools as iris_tools
import numpy as np
import numpy.testing as np_test
source_data = np.array([[ 0.563, 1.132, -1.343],
[-0.719, 1.441, 1.566]])
source_data1 = np.array([[1, 2, 3],
[4, 5, 6]])
def test_convert_DN_to_photons_NUV():
"""
"""
expected_output = np.array([[ 10.134, 20.376, -24.174],
[-12.942, 25.938, 28.188]])
photons_count = iris_tools.convert_DN_to_photons(source_data, 'NUV')
np_test.assert_allclose(photons_count, expected_output)
def test_convert_DN_to_photons_FUV():
"""
"""
expected_output = np.array([[ 2.252, 4.528, -5.372],
[-2.876, 5.764, 6.264]])
photons_count = iris_tools.convert_DN_to_photons(source_data, 'FUV')
np_test.assert_allclose(photons_count, expected_output)
def test_convert_DN_to_photons_SJI():
"""
"""
expected_output = np.array( [[18, 36, 54],
[72, 90, 108]])
photons_count = iris_tools.convert_DN_to_photons(source_data1, 'SJI')
np_test.assert_allclose(photons_count, expected_output)
def test_convert_photons_to_DN_NUV():
"""
"""
expected_output = np.array([[ 0.05555556, 0.11111111, 0.16666667],
[ 0.22222222, 0.27777778, 0.33333333]])
DN = iris_tools.convert_photons_to_DN(source_data1, 'NUV')
np_test.assert_allclose(DN, expected_output)
def test_convert_photons_to_DN_FUV():
"""
"""
expected_output = np.array([[ 0.25, 0.5 , 0.75],
[ 1. , 1.25, 1.5 ]])
DN = iris_tools.convert_photons_to_DN(source_data1, 'FUV')
np_test.assert_allclose(DN, expected_output)
def test_convert_photons_to_DN_SJI():
"""
"""
expected_output = np.array( [[ 0.05555556, 0.11111111, 0.16666667],
[ 0.22222222, 0.27777778, 0.33333333]])
photons_count = iris_tools.convert_photons_to_DN(source_data1, 'SJI')
np_test.assert_allclose(photons_count, expected_output)
def test_calculate_intensity_fractional_uncertainty_photons_NUV():
"""
"""
expected_output = np.array([[21.62313576, 10.82312339, 7.223111057],
[5.423098745, 4.34308646, 3.623074202]])
calculated_intensity = iris_tools.calculate_intensity_fractional_uncertainty(source_data1, 'photons', 'NUV')
np_test.assert_allclose(expected_output, calculated_intensity)
def test_calculate_intensity_fractional_uncertainty_photons_FUV():
"""
"""
expected_output = np.array([[ 12.44025723, 6.240192305 , 4.173461127],
[ 3.140063694, 2.52 , 2.106603375]])
calculated_intensity = iris_tools.calculate_intensity_fractional_uncertainty(source_data1, 'photons', 'FUV')
np_test.assert_allclose(expected_output, calculated_intensity)
def test_calculate_intensity_fractional_uncertainty_photons_SJI():
"""
"""
expected_output = np.array([[21.62313576, 10.82312339, 7.223111057],
[5.423098745, 4.34308646, 3.623074202]])
calculated_intensity = iris_tools.calculate_intensity_fractional_uncertainty(source_data1, 'photons', 'SJI')
np_test.assert_allclose(expected_output, calculated_intensity)
def test_calculate_intensity_fractional_uncertainty_data_not_recognised():
"""
"""
assert pytest.raises(ValueError, iris_tools.calculate_intensity_fractional_uncertainty, source_data1, None, 'FUV')
def test_get_iris_response_response_version():
"""
"""
assert pytest.raises(ValueError, iris_tools.get_iris_response, response_version=4)
def test_get_iris_response_not_equal_to_one():
"""
"""
assert pytest.raises(ValueError, iris_tools.get_iris_response, pre_launch=True, response_version=3)
def test_get_iris_response_response_file():
"""
"""
assert pytest.raises(KeyError, iris_tools.get_iris_response, response_file="hello.py")
# def test_get_iris_response():
# """
# """
# def test_gaussian1d_on_linear_bg():
# """
# """
# def test_calculate_orbital_wavelength_variation():
# """
# """
|
[
"irispy.iris_tools.calculate_intensity_fractional_uncertainty",
"pytest.raises",
"irispy.iris_tools.convert_DN_to_photons",
"numpy.array",
"irispy.iris_tools.convert_photons_to_DN",
"numpy.testing.assert_allclose"
] |
[((181, 239), 'numpy.array', 'np.array', (['[[0.563, 1.132, -1.343], [-0.719, 1.441, 1.566]]'], {}), '([[0.563, 1.132, -1.343], [-0.719, 1.441, 1.566]])\n', (189, 239), True, 'import numpy as np\n'), ((265, 297), 'numpy.array', 'np.array', (['[[1, 2, 3], [4, 5, 6]]'], {}), '([[1, 2, 3], [4, 5, 6]])\n', (273, 297), True, 'import numpy as np\n'), ((372, 436), 'numpy.array', 'np.array', (['[[10.134, 20.376, -24.174], [-12.942, 25.938, 28.188]]'], {}), '([[10.134, 20.376, -24.174], [-12.942, 25.938, 28.188]])\n', (380, 436), True, 'import numpy as np\n'), ((471, 523), 'irispy.iris_tools.convert_DN_to_photons', 'iris_tools.convert_DN_to_photons', (['source_data', '"""NUV"""'], {}), "(source_data, 'NUV')\n", (503, 523), True, 'import irispy.iris_tools as iris_tools\n'), ((526, 581), 'numpy.testing.assert_allclose', 'np_test.assert_allclose', (['photons_count', 'expected_output'], {}), '(photons_count, expected_output)\n', (549, 581), True, 'import numpy.testing as np_test\n'), ((651, 709), 'numpy.array', 'np.array', (['[[2.252, 4.528, -5.372], [-2.876, 5.764, 6.264]]'], {}), '([[2.252, 4.528, -5.372], [-2.876, 5.764, 6.264]])\n', (659, 709), True, 'import numpy as np\n'), ((744, 796), 'irispy.iris_tools.convert_DN_to_photons', 'iris_tools.convert_DN_to_photons', (['source_data', '"""FUV"""'], {}), "(source_data, 'FUV')\n", (776, 796), True, 'import irispy.iris_tools as iris_tools\n'), ((799, 854), 'numpy.testing.assert_allclose', 'np_test.assert_allclose', (['photons_count', 'expected_output'], {}), '(photons_count, expected_output)\n', (822, 854), True, 'import numpy.testing as np_test\n'), ((924, 963), 'numpy.array', 'np.array', (['[[18, 36, 54], [72, 90, 108]]'], {}), '([[18, 36, 54], [72, 90, 108]])\n', (932, 963), True, 'import numpy as np\n'), ((990, 1043), 'irispy.iris_tools.convert_DN_to_photons', 'iris_tools.convert_DN_to_photons', (['source_data1', '"""SJI"""'], {}), "(source_data1, 'SJI')\n", (1022, 1043), True, 'import irispy.iris_tools as iris_tools\n'), ((1046, 1101), 'numpy.testing.assert_allclose', 'np_test.assert_allclose', (['photons_count', 'expected_output'], {}), '(photons_count, expected_output)\n', (1069, 1101), True, 'import numpy.testing as np_test\n'), ((1170, 1261), 'numpy.array', 'np.array', (['[[0.05555556, 0.11111111, 0.16666667], [0.22222222, 0.27777778, 0.33333333]]'], {}), '([[0.05555556, 0.11111111, 0.16666667], [0.22222222, 0.27777778, \n 0.33333333]])\n', (1178, 1261), True, 'import numpy as np\n'), ((1280, 1333), 'irispy.iris_tools.convert_photons_to_DN', 'iris_tools.convert_photons_to_DN', (['source_data1', '"""NUV"""'], {}), "(source_data1, 'NUV')\n", (1312, 1333), True, 'import irispy.iris_tools as iris_tools\n'), ((1336, 1380), 'numpy.testing.assert_allclose', 'np_test.assert_allclose', (['DN', 'expected_output'], {}), '(DN, expected_output)\n', (1359, 1380), True, 'import numpy.testing as np_test\n'), ((1450, 1497), 'numpy.array', 'np.array', (['[[0.25, 0.5, 0.75], [1.0, 1.25, 1.5]]'], {}), '([[0.25, 0.5, 0.75], [1.0, 1.25, 1.5]])\n', (1458, 1497), True, 'import numpy as np\n'), ((1528, 1581), 'irispy.iris_tools.convert_photons_to_DN', 'iris_tools.convert_photons_to_DN', (['source_data1', '"""FUV"""'], {}), "(source_data1, 'FUV')\n", (1560, 1581), True, 'import irispy.iris_tools as iris_tools\n'), ((1584, 1628), 'numpy.testing.assert_allclose', 'np_test.assert_allclose', (['DN', 'expected_output'], {}), '(DN, expected_output)\n', (1607, 1628), True, 'import numpy.testing as np_test\n'), ((1698, 1789), 'numpy.array', 'np.array', (['[[0.05555556, 0.11111111, 0.16666667], [0.22222222, 0.27777778, 0.33333333]]'], {}), '([[0.05555556, 0.11111111, 0.16666667], [0.22222222, 0.27777778, \n 0.33333333]])\n', (1706, 1789), True, 'import numpy as np\n'), ((1819, 1872), 'irispy.iris_tools.convert_photons_to_DN', 'iris_tools.convert_photons_to_DN', (['source_data1', '"""SJI"""'], {}), "(source_data1, 'SJI')\n", (1851, 1872), True, 'import irispy.iris_tools as iris_tools\n'), ((1875, 1930), 'numpy.testing.assert_allclose', 'np_test.assert_allclose', (['photons_count', 'expected_output'], {}), '(photons_count, expected_output)\n', (1898, 1930), True, 'import numpy.testing as np_test\n'), ((2029, 2124), 'numpy.array', 'np.array', (['[[21.62313576, 10.82312339, 7.223111057], [5.423098745, 4.34308646, \n 3.623074202]]'], {}), '([[21.62313576, 10.82312339, 7.223111057], [5.423098745, 4.34308646,\n 3.623074202]])\n', (2037, 2124), True, 'import numpy as np\n'), ((2154, 2243), 'irispy.iris_tools.calculate_intensity_fractional_uncertainty', 'iris_tools.calculate_intensity_fractional_uncertainty', (['source_data1', '"""photons"""', '"""NUV"""'], {}), "(source_data1,\n 'photons', 'NUV')\n", (2207, 2243), True, 'import irispy.iris_tools as iris_tools\n'), ((2241, 2303), 'numpy.testing.assert_allclose', 'np_test.assert_allclose', (['expected_output', 'calculated_intensity'], {}), '(expected_output, calculated_intensity)\n', (2264, 2303), True, 'import numpy.testing as np_test\n'), ((2401, 2491), 'numpy.array', 'np.array', (['[[12.44025723, 6.240192305, 4.173461127], [3.140063694, 2.52, 2.106603375]]'], {}), '([[12.44025723, 6.240192305, 4.173461127], [3.140063694, 2.52, \n 2.106603375]])\n', (2409, 2491), True, 'import numpy as np\n'), ((2544, 2633), 'irispy.iris_tools.calculate_intensity_fractional_uncertainty', 'iris_tools.calculate_intensity_fractional_uncertainty', (['source_data1', '"""photons"""', '"""FUV"""'], {}), "(source_data1,\n 'photons', 'FUV')\n", (2597, 2633), True, 'import irispy.iris_tools as iris_tools\n'), ((2631, 2693), 'numpy.testing.assert_allclose', 'np_test.assert_allclose', (['expected_output', 'calculated_intensity'], {}), '(expected_output, calculated_intensity)\n', (2654, 2693), True, 'import numpy.testing as np_test\n'), ((2791, 2886), 'numpy.array', 'np.array', (['[[21.62313576, 10.82312339, 7.223111057], [5.423098745, 4.34308646, \n 3.623074202]]'], {}), '([[21.62313576, 10.82312339, 7.223111057], [5.423098745, 4.34308646,\n 3.623074202]])\n', (2799, 2886), True, 'import numpy as np\n'), ((2916, 3005), 'irispy.iris_tools.calculate_intensity_fractional_uncertainty', 'iris_tools.calculate_intensity_fractional_uncertainty', (['source_data1', '"""photons"""', '"""SJI"""'], {}), "(source_data1,\n 'photons', 'SJI')\n", (2969, 3005), True, 'import irispy.iris_tools as iris_tools\n'), ((3003, 3065), 'numpy.testing.assert_allclose', 'np_test.assert_allclose', (['expected_output', 'calculated_intensity'], {}), '(expected_output, calculated_intensity)\n', (3026, 3065), True, 'import numpy.testing as np_test\n'), ((3161, 3273), 'pytest.raises', 'pytest.raises', (['ValueError', 'iris_tools.calculate_intensity_fractional_uncertainty', 'source_data1', 'None', '"""FUV"""'], {}), "(ValueError, iris_tools.\n calculate_intensity_fractional_uncertainty, source_data1, None, 'FUV')\n", (3174, 3273), False, 'import pytest\n'), ((3336, 3411), 'pytest.raises', 'pytest.raises', (['ValueError', 'iris_tools.get_iris_response'], {'response_version': '(4)'}), '(ValueError, iris_tools.get_iris_response, response_version=4)\n', (3349, 3411), False, 'import pytest\n'), ((3479, 3575), 'pytest.raises', 'pytest.raises', (['ValueError', 'iris_tools.get_iris_response'], {'pre_launch': '(True)', 'response_version': '(3)'}), '(ValueError, iris_tools.get_iris_response, pre_launch=True,\n response_version=3)\n', (3492, 3575), False, 'import pytest\n'), ((3636, 3715), 'pytest.raises', 'pytest.raises', (['KeyError', 'iris_tools.get_iris_response'], {'response_file': '"""hello.py"""'}), "(KeyError, iris_tools.get_iris_response, response_file='hello.py')\n", (3649, 3715), False, 'import pytest\n')]
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from marionette import expected
from marionette import Wait
from marionette.by import By
from gaiatest.apps.base import Base
class SettingsForm(Base):
_settings_view_locator = (By.ID, 'view-settings')
_loading_overlay_locator = (By.ID, 'loading-overlay')
_settings_close_button_locator = (By.ID, 'settings-close')
_order_by_last_name_locator = (By.CSS_SELECTOR, 'p[data-l10n-id="contactsOrderBy"]')
_order_by_last_name_switch_locator = (By.CSS_SELECTOR, 'input[name="order.lastname"]')
_import_from_sim_button_locator = (By.CSS_SELECTOR, "li[id*='import-sim-option'] button")
_import_from_sdcard_locator = (By.CSS_SELECTOR, 'button.icon-sd')
_import_from_gmail_button_locator = (By.CSS_SELECTOR, 'button.icon-gmail')
_import_from_windows_live_button_locator = (By.CSS_SELECTOR, 'button.icon-live')
_import_settings_header = (By.ID, 'import-settings-header');
_export_to_sd_button_locator = (By.CSS_SELECTOR, 'button[data-l10n-id="memoryCard"]')
_import_contacts_locator = (By.CSS_SELECTOR, 'button[data-l10n-id="importContactsButton"]')
_export_contacts_locator = (By.CSS_SELECTOR, 'button[data-l10n-id="exportContactsButton"]')
_gmail_contacts_imported_locator = (By.CSS_SELECTOR, '.icon.icon-gmail > p > span')
_import_settings_locator = (By.ID, 'import-settings')
_select_contacts_locator = (By.ID, 'selectable-form')
def __init__(self, marionette):
Base.__init__(self, marionette)
view = self.marionette.find_element(*self._settings_view_locator)
Wait(self.marionette).until(lambda m: view.location['y'] == 0)
def tap_order_by_last_name(self):
last_name = Wait(self.marionette).until(
expected.element_present(*self._order_by_last_name_locator))
Wait(self.marionette).until(expected.element_displayed(last_name))
last_name.click()
@property
def order_by_last_name(self):
return self.marionette.find_element(*self._order_by_last_name_switch_locator).is_selected()
def tap_import_contacts(self):
import_contacts = Wait(self.marionette).until(
expected.element_present(*self._import_contacts_locator))
Wait(self.marionette).until(expected.element_displayed(import_contacts))
import_contacts.tap()
import_settings = self.marionette.find_element(*self._import_settings_locator)
Wait(self.marionette).until(lambda m: import_settings.location['x'] == 0)
def tap_export_contacts(self):
export_contacts = Wait(self.marionette).until(
expected.element_present(*self._export_contacts_locator))
Wait(self.marionette).until(expected.element_displayed(export_contacts))
export_contacts.tap()
import_settings = self.marionette.find_element(*self._import_settings_locator)
Wait(self.marionette).until(lambda m: import_settings.location['x'] == 0)
def tap_import_from_sim(self):
import_from_sim = Wait(self.marionette).until(
expected.element_present(*self._import_from_sim_button_locator))
Wait(self.marionette).until(expected.element_displayed(import_from_sim))
import_from_sim.tap()
from gaiatest.apps.contacts.app import Contacts
status_message = Wait(self.marionette).until(
expected.element_present(*Contacts._status_message_locator))
Wait(self.marionette).until(expected.element_displayed(status_message))
Wait(self.marionette).until(expected.element_not_displayed(status_message))
@property
def gmail_imported_contacts(self):
return self.marionette.find_element(*self._gmail_contacts_imported_locator).text
def tap_import_from_gmail(self):
import_from_gmail = Wait(self.marionette).until(
expected.element_present(*self._import_from_gmail_button_locator))
Wait(self.marionette).until(expected.element_displayed(import_from_gmail))
import_from_gmail.tap()
from gaiatest.apps.contacts.regions.gmail import GmailLogin
return GmailLogin(self.marionette)
def tap_import_from_sdcard(self):
import_from_sdcard = Wait(self.marionette).until(
expected.element_present(*self._import_from_sdcard_locator))
Wait(self.marionette).until(expected.element_displayed(import_from_sdcard))
import_from_sdcard.tap()
from gaiatest.apps.contacts.app import Contacts
status_message = Wait(self.marionette).until(
expected.element_present(*Contacts._status_message_locator))
Wait(self.marionette).until(expected.element_displayed(status_message))
Wait(self.marionette).until(expected.element_not_displayed(status_message))
def tap_export_to_sd(self):
export_to_sdcard = Wait(self.marionette).until(
expected.element_present(*self._export_to_sd_button_locator))
Wait(self.marionette).until(expected.element_displayed(export_to_sdcard))
export_to_sdcard.tap()
select_contacts = self.marionette.find_element(*self._select_contacts_locator)
Wait(self.marionette).until(lambda m: select_contacts.location['y'] == 0)
def tap_done(self):
close = self.marionette.find_element(*self._settings_close_button_locator)
close.tap()
Wait(self.marionette).until(expected.element_not_displayed(close))
from gaiatest.apps.contacts.app import Contacts
return Contacts(self.marionette)
def tap_back_from_import_contacts(self):
header = self.marionette.find_element(*self._import_settings_header)
# TODO: remove tap with coordinates after Bug 1061698 is fixed
header.tap(25, 25)
Wait(self.marionette).until(expected.element_not_displayed(header))
|
[
"marionette.Wait",
"gaiatest.apps.contacts.app.Contacts",
"gaiatest.apps.contacts.regions.gmail.GmailLogin",
"marionette.expected.element_present",
"marionette.expected.element_not_displayed",
"gaiatest.apps.base.Base.__init__",
"marionette.expected.element_displayed"
] |
[((1633, 1664), 'gaiatest.apps.base.Base.__init__', 'Base.__init__', (['self', 'marionette'], {}), '(self, marionette)\n', (1646, 1664), False, 'from gaiatest.apps.base import Base\n'), ((4244, 4271), 'gaiatest.apps.contacts.regions.gmail.GmailLogin', 'GmailLogin', (['self.marionette'], {}), '(self.marionette)\n', (4254, 4271), False, 'from gaiatest.apps.contacts.regions.gmail import GmailLogin\n'), ((5625, 5650), 'gaiatest.apps.contacts.app.Contacts', 'Contacts', (['self.marionette'], {}), '(self.marionette)\n', (5633, 5650), False, 'from gaiatest.apps.contacts.app import Contacts\n'), ((1910, 1969), 'marionette.expected.element_present', 'expected.element_present', (['*self._order_by_last_name_locator'], {}), '(*self._order_by_last_name_locator)\n', (1934, 1969), False, 'from marionette import expected\n'), ((2007, 2044), 'marionette.expected.element_displayed', 'expected.element_displayed', (['last_name'], {}), '(last_name)\n', (2033, 2044), False, 'from marionette import expected\n'), ((2324, 2380), 'marionette.expected.element_present', 'expected.element_present', (['*self._import_contacts_locator'], {}), '(*self._import_contacts_locator)\n', (2348, 2380), False, 'from marionette import expected\n'), ((2418, 2461), 'marionette.expected.element_displayed', 'expected.element_displayed', (['import_contacts'], {}), '(import_contacts)\n', (2444, 2461), False, 'from marionette import expected\n'), ((2765, 2821), 'marionette.expected.element_present', 'expected.element_present', (['*self._export_contacts_locator'], {}), '(*self._export_contacts_locator)\n', (2789, 2821), False, 'from marionette import expected\n'), ((2859, 2902), 'marionette.expected.element_displayed', 'expected.element_displayed', (['export_contacts'], {}), '(export_contacts)\n', (2885, 2902), False, 'from marionette import expected\n'), ((3206, 3269), 'marionette.expected.element_present', 'expected.element_present', (['*self._import_from_sim_button_locator'], {}), '(*self._import_from_sim_button_locator)\n', (3230, 3269), False, 'from marionette import expected\n'), ((3307, 3350), 'marionette.expected.element_displayed', 'expected.element_displayed', (['import_from_sim'], {}), '(import_from_sim)\n', (3333, 3350), False, 'from marionette import expected\n'), ((3504, 3563), 'marionette.expected.element_present', 'expected.element_present', (['*Contacts._status_message_locator'], {}), '(*Contacts._status_message_locator)\n', (3528, 3563), False, 'from marionette import expected\n'), ((3601, 3643), 'marionette.expected.element_displayed', 'expected.element_displayed', (['status_message'], {}), '(status_message)\n', (3627, 3643), False, 'from marionette import expected\n'), ((3681, 3727), 'marionette.expected.element_not_displayed', 'expected.element_not_displayed', (['status_message'], {}), '(status_message)\n', (3711, 3727), False, 'from marionette import expected\n'), ((3979, 4044), 'marionette.expected.element_present', 'expected.element_present', (['*self._import_from_gmail_button_locator'], {}), '(*self._import_from_gmail_button_locator)\n', (4003, 4044), False, 'from marionette import expected\n'), ((4082, 4127), 'marionette.expected.element_displayed', 'expected.element_displayed', (['import_from_gmail'], {}), '(import_from_gmail)\n', (4108, 4127), False, 'from marionette import expected\n'), ((4381, 4440), 'marionette.expected.element_present', 'expected.element_present', (['*self._import_from_sdcard_locator'], {}), '(*self._import_from_sdcard_locator)\n', (4405, 4440), False, 'from marionette import expected\n'), ((4478, 4524), 'marionette.expected.element_displayed', 'expected.element_displayed', (['import_from_sdcard'], {}), '(import_from_sdcard)\n', (4504, 4524), False, 'from marionette import expected\n'), ((4681, 4740), 'marionette.expected.element_present', 'expected.element_present', (['*Contacts._status_message_locator'], {}), '(*Contacts._status_message_locator)\n', (4705, 4740), False, 'from marionette import expected\n'), ((4778, 4820), 'marionette.expected.element_displayed', 'expected.element_displayed', (['status_message'], {}), '(status_message)\n', (4804, 4820), False, 'from marionette import expected\n'), ((4858, 4904), 'marionette.expected.element_not_displayed', 'expected.element_not_displayed', (['status_message'], {}), '(status_message)\n', (4888, 4904), False, 'from marionette import expected\n'), ((5007, 5067), 'marionette.expected.element_present', 'expected.element_present', (['*self._export_to_sd_button_locator'], {}), '(*self._export_to_sd_button_locator)\n', (5031, 5067), False, 'from marionette import expected\n'), ((5105, 5149), 'marionette.expected.element_displayed', 'expected.element_displayed', (['export_to_sdcard'], {}), '(export_to_sdcard)\n', (5131, 5149), False, 'from marionette import expected\n'), ((5515, 5552), 'marionette.expected.element_not_displayed', 'expected.element_not_displayed', (['close'], {}), '(close)\n', (5545, 5552), False, 'from marionette import expected\n'), ((5908, 5946), 'marionette.expected.element_not_displayed', 'expected.element_not_displayed', (['header'], {}), '(header)\n', (5938, 5946), False, 'from marionette import expected\n'), ((1747, 1768), 'marionette.Wait', 'Wait', (['self.marionette'], {}), '(self.marionette)\n', (1751, 1768), False, 'from marionette import Wait\n'), ((1869, 1890), 'marionette.Wait', 'Wait', (['self.marionette'], {}), '(self.marionette)\n', (1873, 1890), False, 'from marionette import Wait\n'), ((1979, 2000), 'marionette.Wait', 'Wait', (['self.marionette'], {}), '(self.marionette)\n', (1983, 2000), False, 'from marionette import Wait\n'), ((2283, 2304), 'marionette.Wait', 'Wait', (['self.marionette'], {}), '(self.marionette)\n', (2287, 2304), False, 'from marionette import Wait\n'), ((2390, 2411), 'marionette.Wait', 'Wait', (['self.marionette'], {}), '(self.marionette)\n', (2394, 2411), False, 'from marionette import Wait\n'), ((2588, 2609), 'marionette.Wait', 'Wait', (['self.marionette'], {}), '(self.marionette)\n', (2592, 2609), False, 'from marionette import Wait\n'), ((2724, 2745), 'marionette.Wait', 'Wait', (['self.marionette'], {}), '(self.marionette)\n', (2728, 2745), False, 'from marionette import Wait\n'), ((2831, 2852), 'marionette.Wait', 'Wait', (['self.marionette'], {}), '(self.marionette)\n', (2835, 2852), False, 'from marionette import Wait\n'), ((3029, 3050), 'marionette.Wait', 'Wait', (['self.marionette'], {}), '(self.marionette)\n', (3033, 3050), False, 'from marionette import Wait\n'), ((3165, 3186), 'marionette.Wait', 'Wait', (['self.marionette'], {}), '(self.marionette)\n', (3169, 3186), False, 'from marionette import Wait\n'), ((3279, 3300), 'marionette.Wait', 'Wait', (['self.marionette'], {}), '(self.marionette)\n', (3283, 3300), False, 'from marionette import Wait\n'), ((3463, 3484), 'marionette.Wait', 'Wait', (['self.marionette'], {}), '(self.marionette)\n', (3467, 3484), False, 'from marionette import Wait\n'), ((3573, 3594), 'marionette.Wait', 'Wait', (['self.marionette'], {}), '(self.marionette)\n', (3577, 3594), False, 'from marionette import Wait\n'), ((3653, 3674), 'marionette.Wait', 'Wait', (['self.marionette'], {}), '(self.marionette)\n', (3657, 3674), False, 'from marionette import Wait\n'), ((3938, 3959), 'marionette.Wait', 'Wait', (['self.marionette'], {}), '(self.marionette)\n', (3942, 3959), False, 'from marionette import Wait\n'), ((4054, 4075), 'marionette.Wait', 'Wait', (['self.marionette'], {}), '(self.marionette)\n', (4058, 4075), False, 'from marionette import Wait\n'), ((4340, 4361), 'marionette.Wait', 'Wait', (['self.marionette'], {}), '(self.marionette)\n', (4344, 4361), False, 'from marionette import Wait\n'), ((4450, 4471), 'marionette.Wait', 'Wait', (['self.marionette'], {}), '(self.marionette)\n', (4454, 4471), False, 'from marionette import Wait\n'), ((4640, 4661), 'marionette.Wait', 'Wait', (['self.marionette'], {}), '(self.marionette)\n', (4644, 4661), False, 'from marionette import Wait\n'), ((4750, 4771), 'marionette.Wait', 'Wait', (['self.marionette'], {}), '(self.marionette)\n', (4754, 4771), False, 'from marionette import Wait\n'), ((4830, 4851), 'marionette.Wait', 'Wait', (['self.marionette'], {}), '(self.marionette)\n', (4834, 4851), False, 'from marionette import Wait\n'), ((4966, 4987), 'marionette.Wait', 'Wait', (['self.marionette'], {}), '(self.marionette)\n', (4970, 4987), False, 'from marionette import Wait\n'), ((5077, 5098), 'marionette.Wait', 'Wait', (['self.marionette'], {}), '(self.marionette)\n', (5081, 5098), False, 'from marionette import Wait\n'), ((5277, 5298), 'marionette.Wait', 'Wait', (['self.marionette'], {}), '(self.marionette)\n', (5281, 5298), False, 'from marionette import Wait\n'), ((5487, 5508), 'marionette.Wait', 'Wait', (['self.marionette'], {}), '(self.marionette)\n', (5491, 5508), False, 'from marionette import Wait\n'), ((5880, 5901), 'marionette.Wait', 'Wait', (['self.marionette'], {}), '(self.marionette)\n', (5884, 5901), False, 'from marionette import Wait\n')]
|
import random
import pytz
import json
from django.dispatch import receiver
from django.db.models.signals import pre_save
from django_celery_beat.models import CrontabSchedule, PeriodicTask
from .models import Agent
days = {
"sun": 0,
"mon": 1,
"tue": 2,
"wed": 3,
"thu": 4,
"fri": 5,
"sat": 6,
}
@receiver(pre_save, sender=Agent)
def handle_schedules(sender, instance: Agent, **kwargs):
"""
Triggers everytime a backup or offsite schedule is created or changed
If schedule already exists, first deletes the associated periodic tasks
then recreates based on new schedule
"""
if instance.pk is not None:
schedule = sender.objects.get(pk=instance.pk).backup_schedule
if not schedule == instance.backup_schedule:
# schedule has been changed
tasks = PeriodicTask.objects.filter(
name__startswith=f"{instance.hostname}-{instance.pk}-backup"
)
if tasks:
tasks.delete()
for day, times in instance.backup_schedule.items():
day_of_week = days.get(day)
hours = ",".join(str(x) for x in times)
if hours:
schedule, _ = CrontabSchedule.objects.get_or_create(
minute=str(random.randint(0, 4)),
hour=hours,
day_of_week=day_of_week,
day_of_month="*",
month_of_year="*",
timezone=pytz.timezone("America/Los_Angeles"),
)
PeriodicTask.objects.create(
crontab=schedule,
name=f"{instance.hostname}-{instance.pk}-backup-day{day_of_week}",
task="core.tasks.incremental_backup_task",
enabled=instance.backups_enabled,
args=json.dumps([instance.pk]),
)
tasks = PeriodicTask.objects.filter(
name__startswith=f"{instance.hostname}-{instance.pk}-backup"
)
for task in tasks:
task.enabled = instance.backups_enabled
task.save(update_fields=["enabled"])
|
[
"random.randint",
"django.dispatch.receiver",
"json.dumps",
"django_celery_beat.models.PeriodicTask.objects.filter",
"pytz.timezone"
] |
[((331, 363), 'django.dispatch.receiver', 'receiver', (['pre_save'], {'sender': 'Agent'}), '(pre_save, sender=Agent)\n', (339, 363), False, 'from django.dispatch import receiver\n'), ((2024, 2118), 'django_celery_beat.models.PeriodicTask.objects.filter', 'PeriodicTask.objects.filter', ([], {'name__startswith': 'f"""{instance.hostname}-{instance.pk}-backup"""'}), "(name__startswith=\n f'{instance.hostname}-{instance.pk}-backup')\n", (2051, 2118), False, 'from django_celery_beat.models import CrontabSchedule, PeriodicTask\n'), ((860, 954), 'django_celery_beat.models.PeriodicTask.objects.filter', 'PeriodicTask.objects.filter', ([], {'name__startswith': 'f"""{instance.hostname}-{instance.pk}-backup"""'}), "(name__startswith=\n f'{instance.hostname}-{instance.pk}-backup')\n", (887, 954), False, 'from django_celery_beat.models import CrontabSchedule, PeriodicTask\n'), ((1561, 1597), 'pytz.timezone', 'pytz.timezone', (['"""America/Los_Angeles"""'], {}), "('America/Los_Angeles')\n", (1574, 1597), False, 'import pytz\n'), ((1958, 1983), 'json.dumps', 'json.dumps', (['[instance.pk]'], {}), '([instance.pk])\n', (1968, 1983), False, 'import json\n'), ((1335, 1355), 'random.randint', 'random.randint', (['(0)', '(4)'], {}), '(0, 4)\n', (1349, 1355), False, 'import random\n')]
|
#!/usr/bin/env python
'''
Copyright 2017 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import time
import yaml
import os
import sys
from theia import TheiaSniffer, TheiaEncryptedSender, TheiaProxy
if __name__ == '__main__':
with open('/etc/theia/agent.yaml') as c:
conf = yaml.load(c)
if not 'server_key' in conf or not conf['server_key']:
print("Missing encryption key\nCreate semetric key using python theia-genkey.py")
print("NOTE: This must match receiving server's key")
sys.exit(1)
proxy = TheiaProxy(conf)
proxy.start()
senders = []
for i in xrange(0, conf['threads']):
sender = TheiaEncryptedSender(
conf,
conf['server_key'],
send_url = ["tcp://{}:{}".format(
conf['destination']['name'],
conf['destination']['port']
)]
)
sender.start()
senders.append(sender)
sniffers = {}
for i in conf['interfaces']:
sniff = TheiaSniffer(conf, i)
sniff.start()
sniffers[i] = sniff
try:
while True:
if not proxy.is_alive():
proxy = TheiaProxy(conf)
proxy.start()
for s in senders:
if not s.is_alive():
sender = TheiaEncryptedSender(
conf,
conf['server_key'],
send_url = ["tcp://{}:{}".format(
conf['destination']['name'],
conf['destination']['port']
)]
)
sender.start()
senders.append(sender)
senders.remove(s)
for s in sniffers.keys():
if not sniffers[s].is_alive():
sniff = TheiaSniffer(conf, s)
sniff.start()
sniffers[i] = sniff
time.sleep(5)
except:
for s in sniffers.keys():
sniff[s].terminate()
for s in senders:
s.terminate()
proxy.terminate()
sys.exit()
|
[
"yaml.load",
"theia.TheiaProxy",
"time.sleep",
"theia.TheiaSniffer",
"sys.exit"
] |
[((1066, 1082), 'theia.TheiaProxy', 'TheiaProxy', (['conf'], {}), '(conf)\n', (1076, 1082), False, 'from theia import TheiaSniffer, TheiaEncryptedSender, TheiaProxy\n'), ((807, 819), 'yaml.load', 'yaml.load', (['c'], {}), '(c)\n', (816, 819), False, 'import yaml\n'), ((1041, 1052), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1049, 1052), False, 'import sys\n'), ((1553, 1574), 'theia.TheiaSniffer', 'TheiaSniffer', (['conf', 'i'], {}), '(conf, i)\n', (1565, 1574), False, 'from theia import TheiaSniffer, TheiaEncryptedSender, TheiaProxy\n'), ((2532, 2545), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (2542, 2545), False, 'import time\n'), ((2737, 2747), 'sys.exit', 'sys.exit', ([], {}), '()\n', (2745, 2747), False, 'import sys\n'), ((1717, 1733), 'theia.TheiaProxy', 'TheiaProxy', (['conf'], {}), '(conf)\n', (1727, 1733), False, 'from theia import TheiaSniffer, TheiaEncryptedSender, TheiaProxy\n'), ((2423, 2444), 'theia.TheiaSniffer', 'TheiaSniffer', (['conf', 's'], {}), '(conf, s)\n', (2435, 2444), False, 'from theia import TheiaSniffer, TheiaEncryptedSender, TheiaProxy\n')]
|
# -*- coding: utf-8 -*-
import abc
import six
from . import tracing
YDB_AUTH_TICKET_HEADER = "x-ydb-auth-ticket"
@six.add_metaclass(abc.ABCMeta)
class AbstractCredentials(object):
"""
An abstract class that provides auth metadata
"""
@six.add_metaclass(abc.ABCMeta)
class Credentials(object):
def __init__(self, tracer=None):
self.tracer = tracer if tracer is not None else tracing.Tracer(None)
@abc.abstractmethod
def auth_metadata(self):
"""
:return: An iterable with auth metadata
"""
pass
class AnonymousCredentials(Credentials):
@staticmethod
def auth_metadata():
return []
class AuthTokenCredentials(Credentials):
def __init__(self, token):
self._token = token
def auth_metadata(self):
return [(YDB_AUTH_TICKET_HEADER, self._token)]
class AccessTokenCredentials(Credentials):
def __init__(self, token):
self._token = token
def auth_metadata(self):
return [(YDB_AUTH_TICKET_HEADER, self._token)]
|
[
"six.add_metaclass"
] |
[((117, 147), 'six.add_metaclass', 'six.add_metaclass', (['abc.ABCMeta'], {}), '(abc.ABCMeta)\n', (134, 147), False, 'import six\n'), ((252, 282), 'six.add_metaclass', 'six.add_metaclass', (['abc.ABCMeta'], {}), '(abc.ABCMeta)\n', (269, 282), False, 'import six\n')]
|
from RLTest import Env
def test_ts_del_uncompressed():
# total samples = 101
sample_len = 101
with Env().getClusterConnectionIfNeeded() as r:
r.execute_command("ts.create", 'test_key', 'uncompressed')
for i in range(sample_len):
assert i == r.execute_command("ts.add", 'test_key', i, '1')
res = r.execute_command('ts.range', 'test_key', 0, 100)
i = 0
for sample in res:
assert sample == [i, '1'.encode('ascii')]
i += 1
r.execute_command('ts.del', 'test_key', 0, 100)
res = r.execute_command('ts.range', 'test_key', 0, 100)
assert len(res) == 0
def test_ts_del_uncompressed_in_range():
sample_len = 101
with Env().getClusterConnectionIfNeeded() as r:
r.execute_command("ts.create", 'test_key', 'uncompressed')
for i in range(sample_len):
assert i == r.execute_command("ts.add", 'test_key', i, '1')
res = r.execute_command('ts.range', 'test_key', 0, 100)
i = 0
for sample in res:
assert sample == [i, '1'.encode('ascii')]
i += 1
# delete 11 samples
r.execute_command('ts.del', 'test_key', 50, 60)
res = r.execute_command('ts.range', 'test_key', 0, 100)
assert len(res) == 90
def test_ts_del_compressed():
sample_len = 101
with Env().getClusterConnectionIfNeeded() as r:
r.execute_command("ts.create", 'test_key')
for i in range(sample_len):
assert i == r.execute_command("ts.add", 'test_key', i, '1')
res = r.execute_command('ts.range', 'test_key', 0, 100)
i = 0
for sample in res:
assert sample == [i, '1'.encode('ascii')]
i += 1
r.execute_command('ts.del', 'test_key', 0, 100)
res = r.execute_command('ts.range', 'test_key', 0, 100)
assert len(res) == 0
def test_ts_del_compressed_multi_chunk():
sample_len = 1001
with Env().getClusterConnectionIfNeeded() as r:
r.execute_command("ts.create", 'test_key')
for i in range(sample_len):
assert i == r.execute_command("ts.add", 'test_key', i, '1')
res = r.execute_command('ts.range', 'test_key', 0, sample_len - 1)
i = 0
for sample in res:
assert sample == [i, '1'.encode('ascii')]
i += 1
r.execute_command('ts.del', 'test_key', 0, 999)
res = r.execute_command('ts.range', 'test_key', 0, sample_len - 1)
assert len(res) == 1
def test_ts_del_compressed_out_range():
sample_len = 101
with Env().getClusterConnectionIfNeeded() as r:
r.execute_command("ts.create", 'test_key')
for i in range(sample_len):
assert i + 100 == r.execute_command("ts.add", 'test_key', i + 100, '1')
res = r.execute_command('ts.range', 'test_key', 0 + 100, sample_len + 100 - 1)
i = 0
for sample in res:
assert sample == [i + 100, '1'.encode('ascii')]
i += 1
r.execute_command('ts.del', 'test_key', 0, 500)
res = r.execute_command('ts.range', 'test_key', 0 + 100, sample_len + 100 - 1)
assert len(res) == 0
|
[
"RLTest.Env"
] |
[((113, 118), 'RLTest.Env', 'Env', ([], {}), '()\n', (116, 118), False, 'from RLTest import Env\n'), ((733, 738), 'RLTest.Env', 'Env', ([], {}), '()\n', (736, 738), False, 'from RLTest import Env\n'), ((1371, 1376), 'RLTest.Env', 'Env', ([], {}), '()\n', (1374, 1376), False, 'from RLTest import Env\n'), ((1977, 1982), 'RLTest.Env', 'Env', ([], {}), '()\n', (1980, 1982), False, 'from RLTest import Env\n'), ((2602, 2607), 'RLTest.Env', 'Env', ([], {}), '()\n', (2605, 2607), False, 'from RLTest import Env\n')]
|
"""
Copyright (C) 2020, <NAME>, https://www.gagolewski.com
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import numpy as np
import scipy.stats
def load_data(fname, preprocess):
"""
Loads the data matrix and preprocesses it, see benchmark() for more details.
"""
X = np.loadtxt(fname, ndmin=2)
X = X[:, X.var(axis=0) > 0] # remove all columns of 0 variance
# add a tiny bit of white noise:
X += np.random.normal(0.0, X.std(ddof=1)*1e-6, size=X.shape)
if preprocess == "scale_standard": # mean/sd
s = X.std(axis=0, ddof=1)
X = (X-X.mean(axis=0))/s
elif preprocess == "scale_robust": # median/(1.4826*MAD)
s = np.median(np.abs(X-np.median(X, axis=0)), axis=0)
s = s/scipy.stats.norm().ppf(0.75) # i.e., s*1.4826
s[s<1e-12] = 1.0 # don't scale columns of zero MAD
X = (X-np.median(X, axis=0))/s
elif preprocess == "original":
s = X.std(axis=None, ddof=1) # scale all columns proportionally
X = (X-X.mean(axis=0))/s
else:
raise Exception("unknown `preprocess`")
X = X.astype(np.float32, order="C", copy=False) # work with float32
return X
|
[
"numpy.median",
"numpy.loadtxt"
] |
[((1269, 1295), 'numpy.loadtxt', 'np.loadtxt', (['fname'], {'ndmin': '(2)'}), '(fname, ndmin=2)\n', (1279, 1295), True, 'import numpy as np\n'), ((1840, 1860), 'numpy.median', 'np.median', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (1849, 1860), True, 'import numpy as np\n'), ((1675, 1695), 'numpy.median', 'np.median', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (1684, 1695), True, 'import numpy as np\n')]
|
import numpy as np
import random
class ExperienceMemory(object):
def __init__(self, capacity, stateLength):
self.__capacity = capacity
self.__usedCapacity = 0
self.__stateLength = stateLength
self.__writePosition = 0
self.__writePositionReseted = False
self.__ids = np.zeros((self.__capacity), dtype='uint64')
self.__states = np.zeros((self.__capacity, self.__stateLength), dtype='float32')
self.__actions = np.zeros((self.__capacity), dtype='uint8')
self.__rewards = np.zeros((self.__capacity), dtype='float32')
self.__nextStates = np.zeros((self.__capacity, self.__stateLength), dtype='float32')
self.__nextStateIsTerminalStates = np.zeros((self.__capacity), dtype='bool')
self.__sampleCounter = 0
def store(self, state, action, reward, nextState, nextStateIsTerminalState):
experienceId = self.__sampleCounter
self.__ids[self.__writePosition] = experienceId
self.__states[self.__writePosition] = state
self.__actions[self.__writePosition] = action
self.__rewards[self.__writePosition] = reward
self.__nextStates[self.__writePosition] = nextState
self.__nextStateIsTerminalStates[self.__writePosition] = nextStateIsTerminalState
self.__writePosition += 1
self.__sampleCounter += 1
if not self.__writePositionReseted:
self.__usedCapacity += 1
if self.__writePosition == self.__capacity:
self.__writePosition = 0
self.__writePositionReseted = True
return experienceId
def size(self):
return self.__usedCapacity
def sample(self, numberOfSamples):
if self.__usedCapacity < numberOfSamples:
return None, None, None, None, None, None
sampleIndex = random.sample(range(self.__usedCapacity), numberOfSamples)
return self.__ids[sampleIndex], self.__states[sampleIndex], self.__actions[sampleIndex], self.__rewards[sampleIndex], self.__nextStates[sampleIndex], self.__nextStateIsTerminalStates[sampleIndex]
|
[
"numpy.zeros"
] |
[((320, 361), 'numpy.zeros', 'np.zeros', (['self.__capacity'], {'dtype': '"""uint64"""'}), "(self.__capacity, dtype='uint64')\n", (328, 361), True, 'import numpy as np\n'), ((388, 452), 'numpy.zeros', 'np.zeros', (['(self.__capacity, self.__stateLength)'], {'dtype': '"""float32"""'}), "((self.__capacity, self.__stateLength), dtype='float32')\n", (396, 452), True, 'import numpy as np\n'), ((478, 518), 'numpy.zeros', 'np.zeros', (['self.__capacity'], {'dtype': '"""uint8"""'}), "(self.__capacity, dtype='uint8')\n", (486, 518), True, 'import numpy as np\n'), ((546, 588), 'numpy.zeros', 'np.zeros', (['self.__capacity'], {'dtype': '"""float32"""'}), "(self.__capacity, dtype='float32')\n", (554, 588), True, 'import numpy as np\n'), ((619, 683), 'numpy.zeros', 'np.zeros', (['(self.__capacity, self.__stateLength)'], {'dtype': '"""float32"""'}), "((self.__capacity, self.__stateLength), dtype='float32')\n", (627, 683), True, 'import numpy as np\n'), ((727, 766), 'numpy.zeros', 'np.zeros', (['self.__capacity'], {'dtype': '"""bool"""'}), "(self.__capacity, dtype='bool')\n", (735, 766), True, 'import numpy as np\n')]
|
# coding=utf-8
u"""
Description: Generate new basic template from the result and add
it to data base.
User: Jerry.Fang
Date: 13-12-12
"""
from xlrd import open_workbook
from model.session import *
from template.match_rule import MatchRule
from template.logger import logger
from model.basic_element import BasicElement
from extractor.data_extractor import DataExtractor
from extractor.data_extractor import template_redis
from template.basic_template_generator import BasicTemplate
import copy
##
#Description: Open result file and add new template to database.
class TemplateUpdater():
def __init__(self):
self.input_file = u''
self._input_file_handle = []
pass
# Set input file's path.
def set_input_file(self, file_path):
self.input_file = file_path
logger.info('Set input result file path: %s .' % file_path)
# Open source xls file.
def open_xls_input_file(self):
try:
self._input_file_handle = open_workbook(self.input_file)
logger.info('Open file successfully: "%s" ' % self.input_file)
except Exception as exc:
logger.error('Can not open file: "%s" Info: %s' % (self.input_file, exc))
exit()
#record old attribute rule to dictionary.
@staticmethod
def record_old_attr_rule_in_dic(class_id, template_dic_pool, old_attr_rule_dic):
if class_id in template_dic_pool.keys():
for attr_name in template_dic_pool[class_id].keys():
for attr_val in template_dic_pool[class_id][attr_name]:
if u'attr_rule' in attr_val:
old_attr_rule_dic[attr_name] = attr_val
# Merge strategy: if attr_val in pool, use new rule; if attr_val not in pool, keep old rule.
@staticmethod
def merge_old_attr_rule_with_new_attr_rule(class_id, template_dic_pool, old_attr_rule_dic):
if class_id not in template_dic_pool.keys():
template_dic_pool[class_id] = dict()
for attr_name in old_attr_rule_dic.keys():
if attr_name not in template_dic_pool[class_id].keys():
template_dic_pool[class_id][attr_name] = []
template_dic_pool[class_id][attr_name].append(old_attr_rule_dic[attr_name])
# Rebuild attribute rule by class id.
@staticmethod
def rebuild_attr_rule_by_class_id(class_id, template_dic_pool):
# record old attribute rule.
old_attr_rule_dic = dict()
TemplateUpdater.record_old_attr_rule_in_dic(class_id, template_dic_pool, old_attr_rule_dic)
# Clean attribute rule by class id in database.
BasicTemplate.clean_attr_rule_by_class_id(class_id)
# Regenerate attribute rules by class id.
del template_dic_pool[class_id]
template_redis.delete(class_id)
DataExtractor.find_template_pool_in_db(class_id, template_dic_pool)
if class_id in template_dic_pool.keys():
BasicTemplate.gen_attr_rule_in_template(template_dic_pool[class_id])
# Merge old attr rule with new rule.
TemplateUpdater.merge_old_attr_rule_with_new_attr_rule(class_id, template_dic_pool, old_attr_rule_dic)
# Submit change to database.
TemplateUpdater.submit_attr_rule_by_class_id(class_id, template_dic_pool)
# Submit indicate attribute rule to database.
@staticmethod
def submit_attr_rule_by_class_id(class_id, template_dic_pool):
if class_id in template_dic_pool.keys():
with get_session() as session:
for attr_name in template_dic_pool[class_id].keys():
for i in range(0, len(template_dic_pool[class_id][attr_name])):
if u'attr_rule' in template_dic_pool[class_id][attr_name][i]:
m_lv1_id, m_lv2_id = MatchRule.decode_class_id(class_id)
be = session.query(BasicElement).filter_by(lv1_id=m_lv1_id, lv2_id=m_lv2_id).first()
if be:
lv1_name = be.lv1_name
lv2_name = be.lv2_name
else:
lv1_name = u''
lv2_name = u''
# Divide and store attribute rule to elements list.
m_basic_element_list = []
BasicTemplate.store_attr_rule_to_ele_list(
template_dic_pool[class_id][attr_name][i],
m_lv1_id, m_lv2_id, lv1_name, lv2_name, attr_name,
m_basic_element_list
)
session.add_all(m_basic_element_list)
session.commit()
# Generate class id <-> name map by class id.
@staticmethod
def gen_class_id_name_map_by_class_id(class_id, template_dic_pool, class_name_dic):
if class_id not in class_name_dic.keys():
class_name_dic[class_id] = []
with get_session() as session:
lv1_id, lv2_id = MatchRule.decode_class_id(class_id)
if class_id in template_dic_pool.keys():
be = session.query(BasicElement).filter_by(lv1_id=lv1_id, lv2_id=lv2_id).first()
if be:
class_name_dic[class_id].append(be.lv1_name)
class_name_dic[class_id].append(be.lv2_name)
class_name_dic[class_id].append(False)
else:
class_name_dic[class_id].append(u'未定义一级种类')
class_name_dic[class_id].append(u'未定义二级种类')
class_name_dic[class_id].append(False)
logger.warning('Find a unnamed template! class id: %d' % class_id)
# Read attribute value and insert new template to list wait for updating.
@staticmethod
def insert_new_element_to_update_list(
rd_line, attr_start_idx, attr_end_idx, attr_name_list, class_name_dic,
class_id, template_dic_pool, m_basic_element_update_list
):
for attr_val_block in rd_line[attr_start_idx:attr_end_idx]:
if u'' == attr_val_block:
continue
attr_val_list = attr_val_block.split(u'\t')
for attr_val in attr_val_list:
if u'' != attr_val:
# Check whether attribute is new.
attr_name = attr_name_list[rd_line.index(attr_val_block)]
is_new_basic_element = False
if attr_name not in template_dic_pool[class_id].keys():
is_new_basic_element = True
logger.debug('Find new attribute name: %s' % attr_name)
elif attr_val not in template_dic_pool[class_id][attr_name]:
is_new_basic_element = True
logger.debug('Find new attribute value: %s' % attr_val)
# Insert to list wait for updating.
if is_new_basic_element:
m_basic_element = BasicElement()
m_lv1_id, m_lv2_id = MatchRule.decode_class_id(class_id)
m_basic_element.lv1_id = m_lv1_id
m_basic_element.lv2_id = m_lv2_id
m_basic_element.lv1_name = class_name_dic[class_id][0]
m_basic_element.lv2_name = class_name_dic[class_id][1]
m_basic_element.attr_name = attr_name
m_basic_element.attr_val = attr_val
class_name_dic[class_id][2] = True
# Check whether the new element is already in the update list.
is_already_record = False
for m_record_be in m_basic_element_update_list:
if m_basic_element.lv1_id == m_record_be.lv1_id and \
m_basic_element.lv2_id == m_record_be.lv2_id and \
m_basic_element.attr_name == m_record_be.attr_name and \
m_basic_element.attr_val == m_record_be.attr_val:
is_already_record = True
break
# Insert to update list .
if False is is_already_record:
m_basic_element_update_list.append(m_basic_element)
##
# Updater main processing.
def update_templates(self):
# Open file handle.
self.open_xls_input_file()
# Generate table names' list.
sheet_names_list = self._input_file_handle.sheet_names()
# Product template pool.
template_dic_pool = {}
# New element waite for updating.
m_basic_element_update_list = []
# Read all sheets by name.
class_name_dic = {}
for sheet_name in sheet_names_list:
sheet_handle = self._input_file_handle.sheet_by_name(sheet_name)
row_num = sheet_handle.nrows
attr_start_idx = 0
attr_end_idx = 0
attr_name_list = []
class_id_idx = 0
# Read all lines.
for rd_row_idx in range(0, row_num):
# Read one line and replace all special words.
rd_line = sheet_handle.row_values(rd_row_idx)
for i in range(0, len(rd_line)):
rd_line[i] = rd_line[i].replace(u' ', u'\t').replace(u'、', u'\t')
MatchRule.replace_special_word(rd_line)
# Get attribute name or value start index and end index.
if 0 == rd_row_idx:
attr_start_idx = rd_line.index(u'解析结果')
logger.info('Find attribute start index = %d.' % attr_start_idx)
continue
# Get attribute name list and attribute and index.
if 1 == rd_row_idx:
attr_end_idx = rd_line.index(u'无法解析部分')
attr_name_list = rd_line[:]
class_id_idx = rd_line.index(u'类别编码')
logger.info('Find attribute end index = %d. class_id index = %d' % (attr_end_idx, class_id_idx))
continue
# Find template and store in template pool.
class_id = MatchRule.encode_class_id(int(rd_line[class_id_idx]), 0, u'xls')
DataExtractor.find_template_pool_in_db(class_id, template_dic_pool)
if class_id not in template_dic_pool.keys():
logger.info('Can not find template %s' % class_id)
continue
# Generate class name <-> id map function.
TemplateUpdater.gen_class_id_name_map_by_class_id(
class_id, template_dic_pool, class_name_dic
)
# Read attribute value and insert new template to list wait for updating.
self.insert_new_element_to_update_list(
rd_line, attr_start_idx, attr_end_idx, attr_name_list, class_name_dic,
class_id, template_dic_pool, m_basic_element_update_list
)
#Update all new basic elements to database.
with get_session() as session:
logger.info('Add new %d elements to table.' % len(m_basic_element_update_list))
session.add_all(m_basic_element_update_list)
session.commit()
# Rebuild all attribute rule by class id when flag is True.
for class_id in class_name_dic.keys():
if class_name_dic[class_id][2]:
# Rebuild attribute rule by indicated class id.
template_redis.delete(class_id)
TemplateUpdater.rebuild_attr_rule_by_class_id(class_id, template_dic_pool)
# Clean redis buffer.
template_redis.flushdb()
##
# Delete template by API indication.
@staticmethod
def del_template_by_api_ind(ind_info):
# delete all template attribute name.
if u'' == ind_info[u'unique_id']:
with get_session() as session:
be_pool = session.query(BasicElement).filter_by(
lv1_id=int(ind_info[u'first_type_code']),
lv2_id=int(ind_info[u'second_type_code']),
attr_name=ind_info[u'attr_name']
)
if be_pool.first():
be_pool.delete()
session.commit()
logger.info('Delete all attribute value of unique_id %s ok.' % ind_info[u'unique_id'])
# delete indication unique template.
else:
with get_session() as session:
be = session.query(BasicElement).filter_by(
unique_id=int(ind_info[u'unique_id'])
).first()
if be:
session.delete(be)
session.commit()
logger.info('Delete unique_id %s ok.' % ind_info[u'unique_id'])
# Rebuild attribute rule by class id.
template_dic_pool = {}
class_id = MatchRule.encode_class_id(
int(ind_info[u'first_type_code']), int(ind_info[u'second_type_code'])
)
DataExtractor.find_template_pool_in_db(class_id, template_dic_pool)
if class_id in template_dic_pool.keys():
TemplateUpdater.rebuild_attr_rule_by_class_id(class_id, template_dic_pool)
logger.info('Rebuild attribute rule ok.')
# Clean template redis.
class_id = MatchRule.encode_class_id(int(ind_info[u'first_type_code']), int(ind_info[u'second_type_code']))
template_redis.delete(class_id)
##
# Update template by API indication.
@staticmethod
def update_template_by_api_ind(ind_info):
# Check indication information.
if ind_info[u'first_type_code'] == u''\
or ind_info[u'second_type_code'] == u'' or ind_info[u'attr_name'] == u''\
or ind_info[u'attr_val'] == u'':
logger.error('indication is empty!')
return
# Update attribute name.
if ind_info[u'unique_id'] == u'':
with get_session() as session:
be_pool = session.query(BasicElement).filter_by(
lv1_id=int(ind_info[u'first_type_code']),
lv2_id=int(ind_info[u'second_type_code']),
attr_name=ind_info[u'attr_name']
).all()
if len(be_pool) > 0:
for be in be_pool:
be.attr_name = ind_info[u'attr_val']
session.commit()
logger.info('update attribute name %s -> %s ok.' %
(ind_info[u'attr_name'], ind_info[u'attr_val']))
# Update template to database.
else:
with get_session() as session:
be = session.query(BasicElement).filter_by(
unique_id=int(ind_info[u'unique_id'])
).first()
if be:
be.lv1_id = int(ind_info[u'first_type_code'])
be.lv2_id = int(ind_info[u'second_type_code'])
be.attr_name = ind_info[u'attr_name']
be.attr_val = ind_info[u'attr_val']
session.commit()
logger.info('update unique_id %s ok.' % ind_info[u'unique_id'])
# Rebuild attribute rule by class id when attribute value is not attr_rule.
if u'attr_rule' not in ind_info[u'attr_val']:
template_dic_pool = {}
class_id = MatchRule.encode_class_id(
int(ind_info[u'first_type_code']), int(ind_info[u'second_type_code'])
)
DataExtractor.find_template_pool_in_db(class_id, template_dic_pool)
if class_id in template_dic_pool.keys():
TemplateUpdater.rebuild_attr_rule_by_class_id(class_id, template_dic_pool)
logger.info('Rebuild attribute rule ok.')
# Clean template redis.
class_id = MatchRule.encode_class_id(int(ind_info[u'first_type_code']), int(ind_info[u'second_type_code']))
template_redis.delete(class_id)
##
# Retrieve template information by API indication.
@staticmethod
def retrieve_template_by_api_ind(ind_info, all_result):
# Check indication information.
if ind_info[u'first_type_code'] == u'' or ind_info[u'second_type_code'] == u''\
or ind_info[u'page'] == u'' or ind_info[u'page_size'] == u'':
logger.error('Input is empty!')
return
# Calculate start index and end index.
start_idx = (int(ind_info[u'page'])-1) * int(ind_info[u'page_size'])
end_idx = int(ind_info[u'page']) * int(ind_info[u'page_size'])
idx = 0
# Retrieve all attribute name.
if ind_info[u'attr_name'] == u'':
# Search data from database.
# find template.
template_dic_pool = {}
class_id = MatchRule.encode_class_id(int(ind_info[u'first_type_code']), int(ind_info[u'second_type_code']))
DataExtractor.find_template_pool_in_db(class_id, template_dic_pool)
if class_id in template_dic_pool.keys():
# write to result buffer.
all_result[u'total'] = len(template_dic_pool[class_id].keys())
all_result[u'rows'][u'attr_name'] = []
attr_name_list = template_dic_pool[class_id].keys()
for idx in range(start_idx, end_idx):
if idx >= len(attr_name_list) or idx < 0:
break
else:
all_result[u'rows'][u'attr_name'].append(attr_name_list[idx])
logger.debug('Retrieve attribute name : %s' % attr_name_list[idx])
logger.info('Retrieve %d attribute name OK!' % (idx + 1 - start_idx))
# Retrieve attribute value.
else:
# Search data from database.
with get_session() as session:
be_pool = session.query(BasicElement).filter_by(
lv1_id=int(ind_info[u'first_type_code']),
lv2_id=int(ind_info[u'second_type_code']),
attr_name=ind_info[u'attr_name']
).all()
if len(be_pool) > 0:
# Divide two list. one is without attr_rule; other is attr_rule list.
be_list_without_rule = []
be_list_rule = []
for be in be_pool:
if u'attr_rule' not in be.attr_val:
be_list_without_rule.append(be)
else:
be_list_rule.append(be)
# Write data to result buffer except attr_rule element.
all_result[u'total'] = len(be_list_without_rule)
for idx in range(start_idx, end_idx):
if idx >= len(be_list_without_rule) or idx < 0:
break
elif u'attr_rule' not in be_list_without_rule[idx].attr_val:
all_result[u'rows'][be_list_without_rule[idx].attr_val] = be_list_without_rule[idx].unique_id
logger.debug('Retrieve attribute value : %s, %d'
% (be_list_without_rule[idx].attr_val, be_list_without_rule[idx].unique_id))
# Write attribute rule to result buffer.
all_result[u'rows'][u'attr_rule'] = []
for idx in range(0, len(be_list_rule)):
if u'attr_rule' in be_list_rule[idx].attr_val:
all_result[u'rows'][u'attr_rule'].append(be_list_rule[idx].attr_val)
all_result[u'rows'][u'attr_rule'].append(be_list_rule[idx].unique_id)
logger.info('Retrieve %s attribute rule OK!' % be_list_rule[idx].attr_val)
# main function.
if __name__ == '__main__':
m_template_updater = TemplateUpdater()
m_template_updater.set_input_file(u'../../InputFile_test/result.xls')
m_template_updater.update_templates()
|
[
"template.match_rule.MatchRule.replace_special_word",
"template.logger.logger.debug",
"template.basic_template_generator.BasicTemplate.store_attr_rule_to_ele_list",
"model.basic_element.BasicElement",
"xlrd.open_workbook",
"template.basic_template_generator.BasicTemplate.clean_attr_rule_by_class_id",
"template.match_rule.MatchRule.decode_class_id",
"template.logger.logger.warning",
"template.logger.logger.info",
"extractor.data_extractor.template_redis.flushdb",
"template.logger.logger.error",
"extractor.data_extractor.DataExtractor.find_template_pool_in_db",
"extractor.data_extractor.template_redis.delete",
"template.basic_template_generator.BasicTemplate.gen_attr_rule_in_template"
] |
[((823, 882), 'template.logger.logger.info', 'logger.info', (["('Set input result file path: %s .' % file_path)"], {}), "('Set input result file path: %s .' % file_path)\n", (834, 882), False, 'from template.logger import logger\n'), ((2643, 2694), 'template.basic_template_generator.BasicTemplate.clean_attr_rule_by_class_id', 'BasicTemplate.clean_attr_rule_by_class_id', (['class_id'], {}), '(class_id)\n', (2684, 2694), False, 'from template.basic_template_generator import BasicTemplate\n'), ((2794, 2825), 'extractor.data_extractor.template_redis.delete', 'template_redis.delete', (['class_id'], {}), '(class_id)\n', (2815, 2825), False, 'from extractor.data_extractor import template_redis\n'), ((2834, 2901), 'extractor.data_extractor.DataExtractor.find_template_pool_in_db', 'DataExtractor.find_template_pool_in_db', (['class_id', 'template_dic_pool'], {}), '(class_id, template_dic_pool)\n', (2872, 2901), False, 'from extractor.data_extractor import DataExtractor\n'), ((11998, 12022), 'extractor.data_extractor.template_redis.flushdb', 'template_redis.flushdb', ([], {}), '()\n', (12020, 12022), False, 'from extractor.data_extractor import template_redis\n'), ((13916, 13947), 'extractor.data_extractor.template_redis.delete', 'template_redis.delete', (['class_id'], {}), '(class_id)\n', (13937, 13947), False, 'from extractor.data_extractor import template_redis\n'), ((16570, 16601), 'extractor.data_extractor.template_redis.delete', 'template_redis.delete', (['class_id'], {}), '(class_id)\n', (16591, 16601), False, 'from extractor.data_extractor import template_redis\n'), ((998, 1028), 'xlrd.open_workbook', 'open_workbook', (['self.input_file'], {}), '(self.input_file)\n', (1011, 1028), False, 'from xlrd import open_workbook\n'), ((1041, 1103), 'template.logger.logger.info', 'logger.info', (['(\'Open file successfully: "%s" \' % self.input_file)'], {}), '(\'Open file successfully: "%s" \' % self.input_file)\n', (1052, 1103), False, 'from template.logger import logger\n'), ((2963, 3031), 'template.basic_template_generator.BasicTemplate.gen_attr_rule_in_template', 'BasicTemplate.gen_attr_rule_in_template', (['template_dic_pool[class_id]'], {}), '(template_dic_pool[class_id])\n', (3002, 3031), False, 'from template.basic_template_generator import BasicTemplate\n'), ((14291, 14327), 'template.logger.logger.error', 'logger.error', (['"""indication is empty!"""'], {}), "('indication is empty!')\n", (14303, 14327), False, 'from template.logger import logger\n'), ((16957, 16988), 'template.logger.logger.error', 'logger.error', (['"""Input is empty!"""'], {}), "('Input is empty!')\n", (16969, 16988), False, 'from template.logger import logger\n'), ((17540, 17607), 'extractor.data_extractor.DataExtractor.find_template_pool_in_db', 'DataExtractor.find_template_pool_in_db', (['class_id', 'template_dic_pool'], {}), '(class_id, template_dic_pool)\n', (17578, 17607), False, 'from extractor.data_extractor import DataExtractor\n'), ((1149, 1222), 'template.logger.logger.error', 'logger.error', (['(\'Can not open file: "%s" Info: %s\' % (self.input_file, exc))'], {}), '(\'Can not open file: "%s" Info: %s\' % (self.input_file, exc))\n', (1161, 1222), False, 'from template.logger import logger\n'), ((5129, 5164), 'template.match_rule.MatchRule.decode_class_id', 'MatchRule.decode_class_id', (['class_id'], {}), '(class_id)\n', (5154, 5164), False, 'from template.match_rule import MatchRule\n'), ((9646, 9685), 'template.match_rule.MatchRule.replace_special_word', 'MatchRule.replace_special_word', (['rd_line'], {}), '(rd_line)\n', (9676, 9685), False, 'from template.match_rule import MatchRule\n'), ((10555, 10622), 'extractor.data_extractor.DataExtractor.find_template_pool_in_db', 'DataExtractor.find_template_pool_in_db', (['class_id', 'template_dic_pool'], {}), '(class_id, template_dic_pool)\n', (10593, 10622), False, 'from extractor.data_extractor import DataExtractor\n'), ((11836, 11867), 'extractor.data_extractor.template_redis.delete', 'template_redis.delete', (['class_id'], {}), '(class_id)\n', (11857, 11867), False, 'from extractor.data_extractor import template_redis\n'), ((18271, 18340), 'template.logger.logger.info', 'logger.info', (["('Retrieve %d attribute name OK!' % (idx + 1 - start_idx))"], {}), "('Retrieve %d attribute name OK!' % (idx + 1 - start_idx))\n", (18282, 18340), False, 'from template.logger import logger\n'), ((9876, 9940), 'template.logger.logger.info', 'logger.info', (["('Find attribute start index = %d.' % attr_start_idx)"], {}), "('Find attribute start index = %d.' % attr_start_idx)\n", (9887, 9940), False, 'from template.logger import logger\n'), ((10260, 10361), 'template.logger.logger.info', 'logger.info', (["('Find attribute end index = %d. class_id index = %d' % (attr_end_idx,\n class_id_idx))"], {}), "('Find attribute end index = %d. class_id index = %d' % (\n attr_end_idx, class_id_idx))\n", (10271, 10361), False, 'from template.logger import logger\n'), ((10704, 10754), 'template.logger.logger.info', 'logger.info', (["('Can not find template %s' % class_id)"], {}), "('Can not find template %s' % class_id)\n", (10715, 10754), False, 'from template.logger import logger\n'), ((12656, 12747), 'template.logger.logger.info', 'logger.info', (["('Delete all attribute value of unique_id %s ok.' % ind_info[u'unique_id'])"], {}), "('Delete all attribute value of unique_id %s ok.' % ind_info[\n u'unique_id'])\n", (12667, 12747), False, 'from template.logger import logger\n'), ((13109, 13172), 'template.logger.logger.info', 'logger.info', (["('Delete unique_id %s ok.' % ind_info[u'unique_id'])"], {}), "('Delete unique_id %s ok.' % ind_info[u'unique_id'])\n", (13120, 13172), False, 'from template.logger import logger\n'), ((13469, 13536), 'extractor.data_extractor.DataExtractor.find_template_pool_in_db', 'DataExtractor.find_template_pool_in_db', (['class_id', 'template_dic_pool'], {}), '(class_id, template_dic_pool)\n', (13507, 13536), False, 'from extractor.data_extractor import DataExtractor\n'), ((13717, 13758), 'template.logger.logger.info', 'logger.info', (['"""Rebuild attribute rule ok."""'], {}), "('Rebuild attribute rule ok.')\n", (13728, 13758), False, 'from template.logger import logger\n'), ((14927, 15030), 'template.logger.logger.info', 'logger.info', (["('update attribute name %s -> %s ok.' % (ind_info[u'attr_name'], ind_info[\n u'attr_val']))"], {}), "('update attribute name %s -> %s ok.' % (ind_info[u'attr_name'],\n ind_info[u'attr_val']))\n", (14938, 15030), False, 'from template.logger import logger\n'), ((15627, 15690), 'template.logger.logger.info', 'logger.info', (["('update unique_id %s ok.' % ind_info[u'unique_id'])"], {}), "('update unique_id %s ok.' % ind_info[u'unique_id'])\n", (15638, 15690), False, 'from template.logger import logger\n'), ((5800, 5866), 'template.logger.logger.warning', 'logger.warning', (["('Find a unnamed template! class id: %d' % class_id)"], {}), "('Find a unnamed template! class id: %d' % class_id)\n", (5814, 5866), False, 'from template.logger import logger\n'), ((6765, 6820), 'template.logger.logger.debug', 'logger.debug', (["('Find new attribute name: %s' % attr_name)"], {}), "('Find new attribute name: %s' % attr_name)\n", (6777, 6820), False, 'from template.logger import logger\n'), ((7178, 7192), 'model.basic_element.BasicElement', 'BasicElement', ([], {}), '()\n', (7190, 7192), False, 'from model.basic_element import BasicElement\n'), ((7238, 7273), 'template.match_rule.MatchRule.decode_class_id', 'MatchRule.decode_class_id', (['class_id'], {}), '(class_id)\n', (7263, 7273), False, 'from template.match_rule import MatchRule\n'), ((16111, 16178), 'extractor.data_extractor.DataExtractor.find_template_pool_in_db', 'DataExtractor.find_template_pool_in_db', (['class_id', 'template_dic_pool'], {}), '(class_id, template_dic_pool)\n', (16149, 16178), False, 'from extractor.data_extractor import DataExtractor\n'), ((16371, 16412), 'template.logger.logger.info', 'logger.info', (['"""Rebuild attribute rule ok."""'], {}), "('Rebuild attribute rule ok.')\n", (16382, 16412), False, 'from template.logger import logger\n'), ((18188, 18254), 'template.logger.logger.debug', 'logger.debug', (["('Retrieve attribute name : %s' % attr_name_list[idx])"], {}), "('Retrieve attribute name : %s' % attr_name_list[idx])\n", (18200, 18254), False, 'from template.logger import logger\n'), ((3825, 3860), 'template.match_rule.MatchRule.decode_class_id', 'MatchRule.decode_class_id', (['class_id'], {}), '(class_id)\n', (3850, 3860), False, 'from template.match_rule import MatchRule\n'), ((4409, 4575), 'template.basic_template_generator.BasicTemplate.store_attr_rule_to_ele_list', 'BasicTemplate.store_attr_rule_to_ele_list', (['template_dic_pool[class_id][attr_name][i]', 'm_lv1_id', 'm_lv2_id', 'lv1_name', 'lv2_name', 'attr_name', 'm_basic_element_list'], {}), '(template_dic_pool[class_id][\n attr_name][i], m_lv1_id, m_lv2_id, lv1_name, lv2_name, attr_name,\n m_basic_element_list)\n', (4450, 4575), False, 'from template.basic_template_generator import BasicTemplate\n'), ((6978, 7033), 'template.logger.logger.debug', 'logger.debug', (["('Find new attribute value: %s' % attr_val)"], {}), "('Find new attribute value: %s' % attr_val)\n", (6990, 7033), False, 'from template.logger import logger\n'), ((20383, 20457), 'template.logger.logger.info', 'logger.info', (["('Retrieve %s attribute rule OK!' % be_list_rule[idx].attr_val)"], {}), "('Retrieve %s attribute rule OK!' % be_list_rule[idx].attr_val)\n", (20394, 20457), False, 'from template.logger import logger\n'), ((19741, 19871), 'template.logger.logger.debug', 'logger.debug', (["('Retrieve attribute value : %s, %d' % (be_list_without_rule[idx].attr_val,\n be_list_without_rule[idx].unique_id))"], {}), "('Retrieve attribute value : %s, %d' % (be_list_without_rule[\n idx].attr_val, be_list_without_rule[idx].unique_id))\n", (19753, 19871), False, 'from template.logger import logger\n')]
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class Gating(nn.Module):
'''
FCN architecture for large scale scene coordiante regression.
'''
def __init__(self, num_experts, capacity=1):
'''
Constructor.
'''
super(Gating, self).__init__()
self.capacity = capacity
self.conv1 = nn.Conv2d(3, 8, 3, 1, 1)
self.conv2 = nn.Conv2d(8, 16, 3, 2, 1)
self.conv3 = nn.Conv2d(16, 32, 3, 2, 1)
self.conv4 = nn.Conv2d(32, 64*capacity, 3, 2, 1)
self.res1_conv1 = nn.Conv2d(64*capacity, 64*capacity, 3, 1, 1)
self.res1_conv2 = nn.Conv2d(64*capacity, 64*capacity, 1, 1, 0)
self.res1_conv3 = nn.Conv2d(64*capacity, 64*capacity, 3, 1, 1)
self.fc1 = nn.Conv2d(64*capacity, 64*capacity**2, 1, 1, 0)
self.fc2 = nn.Conv2d(64*capacity**2, 64*capacity**2, 1, 1, 0)
self.fc3 = nn.Conv2d(64*capacity**2, num_experts, 1, 1, 0)
def forward(self, inputs):
'''
Forward pass.
inputs -- 4D data tensor (BxCxHxW)
'''
x = inputs
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = F.relu(self.conv4(x))
x = F.relu(self.res1_conv1(x))
x = F.relu(self.res1_conv2(x))
x = F.relu(self.res1_conv3(x))
if self.capacity == 1:
x = torch.tanh(x)
x = F.avg_pool2d(x, x.size()[2:])
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
x = F.log_softmax(x, dim=1)
return x[:,:,0,0]
|
[
"torch.nn.Conv2d",
"torch.nn.functional.log_softmax",
"torch.tanh"
] |
[((318, 342), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(8)', '(3)', '(1)', '(1)'], {}), '(3, 8, 3, 1, 1)\n', (327, 342), True, 'import torch.nn as nn\n'), ((358, 383), 'torch.nn.Conv2d', 'nn.Conv2d', (['(8)', '(16)', '(3)', '(2)', '(1)'], {}), '(8, 16, 3, 2, 1)\n', (367, 383), True, 'import torch.nn as nn\n'), ((399, 425), 'torch.nn.Conv2d', 'nn.Conv2d', (['(16)', '(32)', '(3)', '(2)', '(1)'], {}), '(16, 32, 3, 2, 1)\n', (408, 425), True, 'import torch.nn as nn\n'), ((441, 478), 'torch.nn.Conv2d', 'nn.Conv2d', (['(32)', '(64 * capacity)', '(3)', '(2)', '(1)'], {}), '(32, 64 * capacity, 3, 2, 1)\n', (450, 478), True, 'import torch.nn as nn\n'), ((498, 546), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64 * capacity)', '(64 * capacity)', '(3)', '(1)', '(1)'], {}), '(64 * capacity, 64 * capacity, 3, 1, 1)\n', (507, 546), True, 'import torch.nn as nn\n'), ((563, 611), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64 * capacity)', '(64 * capacity)', '(1)', '(1)', '(0)'], {}), '(64 * capacity, 64 * capacity, 1, 1, 0)\n', (572, 611), True, 'import torch.nn as nn\n'), ((628, 676), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64 * capacity)', '(64 * capacity)', '(3)', '(1)', '(1)'], {}), '(64 * capacity, 64 * capacity, 3, 1, 1)\n', (637, 676), True, 'import torch.nn as nn\n'), ((687, 740), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64 * capacity)', '(64 * capacity ** 2)', '(1)', '(1)', '(0)'], {}), '(64 * capacity, 64 * capacity ** 2, 1, 1, 0)\n', (696, 740), True, 'import torch.nn as nn\n'), ((748, 806), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64 * capacity ** 2)', '(64 * capacity ** 2)', '(1)', '(1)', '(0)'], {}), '(64 * capacity ** 2, 64 * capacity ** 2, 1, 1, 0)\n', (757, 806), True, 'import torch.nn as nn\n'), ((812, 863), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64 * capacity ** 2)', 'num_experts', '(1)', '(1)', '(0)'], {}), '(64 * capacity ** 2, num_experts, 1, 1, 0)\n', (821, 863), True, 'import torch.nn as nn\n'), ((1345, 1368), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['x'], {'dim': '(1)'}), '(x, dim=1)\n', (1358, 1368), True, 'import torch.nn.functional as F\n'), ((1214, 1227), 'torch.tanh', 'torch.tanh', (['x'], {}), '(x)\n', (1224, 1227), False, 'import torch\n')]
|
#!/usr/bin/env python
import argparse
import sys
from typing import List, Optional
import bdfparser
import numpy as np
DRAWING_CHARS = "8,10,176-223"
def main():
args = parse_args()
font = bdfparser.Font(args.bdf_file)
# Choose glyphs from font and convert them to bitmaps
bitmaps = [get_bitmap_for_character(font, i) for i in range(256)]
# Make sure we're settled on the desired glyph height
height = args.height
if not height:
height = max(len(b) for b in bitmaps if b is not None)
# Make sure we have bitmaps for all the characters
fallback_bitmap = np.zeros((height, 8), dtype=np.uint8)
for i in range(len(bitmaps)):
if bitmaps[i] is None:
print(f"Warning: no glyph for char {i}", file=sys.stderr)
bitmaps[i] = fallback_bitmap
# Make the bitmaps all the same size
for i in range(len(bitmaps)):
bitmaps[i] = resize(bitmaps[i], 8, height, i in args.extend_chars)
# Write them to disk
with open(args.output_file, "wb") as f:
f.write(b"".join(to_bytes(b) for b in bitmaps))
def parse_args():
parser = argparse.ArgumentParser(
description="A tool for converting BDF fonts into DOS font format"
)
parser.add_argument(
"bdf_file", type=str, metavar="BDF-FILE",
help="BDF font file to convert"
)
parser.add_argument(
"output_file", type=str, metavar="OUTPUT-FILE",
help="Filename of resulting DOS font file"
)
parser.add_argument(
"--height", type=int, metavar="ROWS",
help="Target height. Glyphs that are too short will be padded to fit."
)
parser.add_argument(
"--extend-chars", "-x",
type=parse_byte_ranges, metavar="CHARS",
default=DRAWING_CHARS,
help=f"""
For the given character codes, enlarge the glyphs so that they
touch the edges of the bounding box. Only has an effect if the
bounding box is larger than the glyph size. If flag is not present,
defaults to "{DRAWING_CHARS}" (mostly CP437's box/line chars).
"""
)
return parser.parse_args()
def parse_byte_ranges(s):
"""Parses strings like "1,3-5" into set(1,3,4,5)."""
result = set()
for term in s.split(","):
parts = [int(p) for p in term.split("-")]
if len(parts) == 1:
hi = parts[0]
lo = parts[0]
elif len(parts) == 2:
lo, hi = min(parts), max(parts)
else:
raise ValueError(
f"""Couldn't parse "{term}" as byte or as a range of bytes"""
)
if lo < 0:
raise ValueError(f"Value out of range: {lo}")
elif hi > 255:
raise ValueError(f"Value out of range: {hi}")
result.update(range(lo, hi + 1))
return result
def get_bitmap_for_character(
font: bdfparser.Font,
char: int
) -> Optional[np.array]:
"""Returns a bitmap from the font that can represent the given CP437 code.
If no suitable glyph can be found, returns None.
"""
codepoints = get_codepoints_for_cp437(char)
available = font.glyphs.keys()
for codepoint in codepoints:
if codepoint in available:
glyph = font.glyphbycp(codepoint)
bitmap = to_bitmap(glyph)
return bitmap
return None
def get_codepoints_for_cp437(x) -> List[int]:
"""Returns possible Unicode codepoints for the given CP437 character.
This function returns a list because that allows for potential fallback
codepoints if the font does not have complete coverage. Currently, though,
this implementation only returns 1 codepoint for each character.
"""
# Handle printable ASCII chars
if x >= 32 and x <= 126:
return [x]
# Handle control chars, extended ASCII
LOWER = " ☺☻♥♦♣♠•◘○◙♂♀♪♫☼►◄↕‼¶§▬↨↑↓→←∟↔▲▼"
UPPER = "⌂" \
"ÇüéâäàåçêëèïîìÄÅÉæÆôöòûùÿÖÜ¢£¥₧ƒ" \
"áíóúñѪº¿⌐¬½¼¡«»░▒▓│┤╡╢╖╕╣║╗╝╜╛┐" \
"└┴┬├─┼╞╟╚╔╩╦╠═╬╧╨╤╥╙╘╒╓╫╪┘┌█▄▌▐▀" \
"αßΓπΣσµτΦϴΩδ∞∅∈∩≡±≥≤⌠⌡÷≈°∙·√ⁿ²■ "
if x < 32:
code = ord(LOWER[x])
else:
code = ord(UPPER[x - 127])
return [code]
def to_bitmap(glyph: bdfparser.Glyph) -> np.array:
"""Converts a Glyph into a 2D array of zeros and ones."""
lines = glyph.draw().todata() # Array of strings like "10101"
return np.array(
[[int(bit) for bit in line] for line in lines],
dtype=np.uint8
)
def resize(bitmap: np.array, new_width, new_height, extend=False):
height, width = bitmap.shape
def split(diff):
x = diff//2
y = diff - x
return (x, y)
add_top, add_bottom = split(new_height - height)
add_left, add_right = split(new_width - width)
for add_lines in [add_left, add_top, add_right, add_bottom]:
bitmap = np.rot90(bitmap)
if add_lines < 0:
# Delete lines from base of array
bitmap = bitmap[:add_lines]
elif add_lines > 0:
# Add lines to base of array
_, current_width = bitmap.shape
new_lines_shape = (add_lines, current_width)
if extend:
pattern_length = max(get_pattern_length(bitmap), 1)
pattern = bitmap[-pattern_length:]
new_lines = np.resize(pattern, new_lines_shape)
else:
new_lines = np.zeros(new_lines_shape, dtype=np.uint8)
bitmap = np.concatenate([bitmap, new_lines])
return bitmap
def get_pattern_length(bitmap: np.array, max_length=4):
"""Measure the length of any repeating pattern at the bottom of the array.
For example, if the bottom rows were of the form ...ABCDECDE, this
function would return 3, because the three rows CDE repeat.
This function returns the length of the longest pattern it finds, not to
surpass max_length. Returns 0 if no repeating pattern is found.
"""
height, _ = bitmap.shape
max_length = min(max_length, height//2)
for length in range(max_length, 1, -1):
a = bitmap[-length:]
b = bitmap[-2*length:-length]
if np.array_equal(a, b):
return length
return 0
def to_bytes(bitmap: np.array):
height, width = bitmap.shape
assert(1 <= height <= 32)
assert(width == 8)
def to_byte(row):
result = 0
for bit in row:
result = (result << 1) + bit
return result
return bytes(to_byte(row) for row in bitmap)
if __name__ == "__main__":
main()
|
[
"argparse.ArgumentParser",
"numpy.resize",
"numpy.zeros",
"numpy.rot90",
"bdfparser.Font",
"numpy.array_equal",
"numpy.concatenate"
] |
[((201, 230), 'bdfparser.Font', 'bdfparser.Font', (['args.bdf_file'], {}), '(args.bdf_file)\n', (215, 230), False, 'import bdfparser\n'), ((604, 641), 'numpy.zeros', 'np.zeros', (['(height, 8)'], {'dtype': 'np.uint8'}), '((height, 8), dtype=np.uint8)\n', (612, 641), True, 'import numpy as np\n'), ((1128, 1224), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""A tool for converting BDF fonts into DOS font format"""'}), "(description=\n 'A tool for converting BDF fonts into DOS font format')\n", (1151, 1224), False, 'import argparse\n'), ((4864, 4880), 'numpy.rot90', 'np.rot90', (['bitmap'], {}), '(bitmap)\n', (4872, 4880), True, 'import numpy as np\n'), ((6156, 6176), 'numpy.array_equal', 'np.array_equal', (['a', 'b'], {}), '(a, b)\n', (6170, 6176), True, 'import numpy as np\n'), ((5478, 5513), 'numpy.concatenate', 'np.concatenate', (['[bitmap, new_lines]'], {}), '([bitmap, new_lines])\n', (5492, 5513), True, 'import numpy as np\n'), ((5333, 5368), 'numpy.resize', 'np.resize', (['pattern', 'new_lines_shape'], {}), '(pattern, new_lines_shape)\n', (5342, 5368), True, 'import numpy as np\n'), ((5415, 5456), 'numpy.zeros', 'np.zeros', (['new_lines_shape'], {'dtype': 'np.uint8'}), '(new_lines_shape, dtype=np.uint8)\n', (5423, 5456), True, 'import numpy as np\n')]
|
"""
Various functions for inspecting and restructuring effects.
"""
from __future__ import print_function
import sys
from characteristic import attributes
from . import Effect, guard, ParallelEffects
import six
@attributes(['intent'], apply_with_init=False)
class StubIntent(object):
"""
An intent which wraps another intent, to flag that the intent should
be automatically resolved by :func:`resolve_stub`.
This intent is intentionally not performable by any default mechanism.
"""
def __init__(self, intent):
self.intent = intent
def resolve_effect(effect, result, is_error=False):
"""
Supply a result for an effect, allowing its callbacks to run.
The return value of the last callback is returned, unless any callback
returns another Effect, in which case an Effect representing that
operation plus the remaining callbacks will be returned.
This allows you to test your code in a somewhat "channel"-oriented
way:
eff = do_thing()
next_eff = resolve_effect(eff, first_result)
next_eff = resolve_effect(next_eff, second_result)
result = resolve_effect(next_eff, third_result)
Equivalently, if you don't care about intermediate results:
result = resolve_effect(
resolve_effect(
resolve_effect(
do_thing(),
first_result),
second_result),
third_result)
NOTE: parallel effects have no special support. They can be resolved with
a sequence, and if they're returned from another effect's callback they
will be returned just like any other effect.
"""
for i, (callback, errback) in enumerate(effect.callbacks):
cb = errback if is_error else callback
if cb is None:
continue
is_error, result = guard(cb, result)
if type(result) is Effect:
return Effect(
result.intent,
callbacks=result.callbacks + effect.callbacks[i + 1:])
if is_error:
six.reraise(*result)
return result
def fail_effect(effect, exception):
"""
Resolve an effect with an exception, so its error handler will be run.
"""
try:
raise exception
except:
return resolve_effect(effect, sys.exc_info(), is_error=True)
def resolve_stub(effect):
"""
Automatically perform an effect, if its intent is a StubIntent.
Note that resolve_stubs is preferred to this function, since it handles
chains of stub effects.
"""
if type(effect.intent) is StubIntent:
is_error, result = guard(effect.intent.intent.perform_effect, None)
return resolve_effect(effect, result, is_error=is_error)
else:
raise TypeError("resolve_stub can only resolve stubs, not %r"
% (effect,))
def resolve_stubs(effect):
"""
Successively performs effects with resolve_stub until a non-Effect value,
or an Effect with a non-stub intent is returned, and return that value.
Parallel effects are supported by recursively invoking resolve_stubs on
the child effects, if all of their children are stubs.
"""
if type(effect) is not Effect:
raise TypeError("effect must be Effect: %r" % (effect,))
while type(effect) is Effect:
if type(effect.intent) is StubIntent:
effect = resolve_stub(effect)
elif type(effect.intent) is ParallelEffects:
if not all(isinstance(x.intent, StubIntent)
for x in effect.intent.effects):
break
else:
effect = resolve_effect(
effect,
list(map(resolve_stubs, effect.intent.effects)))
else:
break
return effect
|
[
"six.reraise",
"sys.exc_info",
"characteristic.attributes"
] |
[((219, 264), 'characteristic.attributes', 'attributes', (["['intent']"], {'apply_with_init': '(False)'}), "(['intent'], apply_with_init=False)\n", (229, 264), False, 'from characteristic import attributes\n'), ((2067, 2087), 'six.reraise', 'six.reraise', (['*result'], {}), '(*result)\n', (2078, 2087), False, 'import six\n'), ((2318, 2332), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (2330, 2332), False, 'import sys\n')]
|
import argparse
from athene.utils.config import Config
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('output', help='/path/to/file/to/save/config')
args = parser.parse_args()
Config.save_config(args.output)
|
[
"athene.utils.config.Config.save_config",
"argparse.ArgumentParser"
] |
[((97, 122), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (120, 122), False, 'import argparse\n'), ((229, 260), 'athene.utils.config.Config.save_config', 'Config.save_config', (['args.output'], {}), '(args.output)\n', (247, 260), False, 'from athene.utils.config import Config\n')]
|
from direct.directnotify import DirectNotifyGlobal
from direct.distributed.DistributedObjectAI import DistributedObjectAI
from toontown.cogdominium import CogdoBarrelRoomConsts
import random
class DistributedCogdoBarrelAI(DistributedObjectAI):
notify = DirectNotifyGlobal.directNotify.newCategory("DistributedCogdoBarrelAI")
def __init__(self, air, index):
DistributedObjectAI.__init__(self, air)
self.index = index
self.state = CogdoBarrelRoomConsts.StateAvailable
self.brLaff = 0
def requestGrab(self):
toonup = CogdoBarrelRoomConsts.ToonUp
if self.state == CogdoBarrelRoomConsts.StateAvailable:
self.state = CogdoBarrelRoomConsts.StateUsed
self.sendUpdate("setState", [CogdoBarrelRoomConsts.StateUsed])
self.sendUpdate("setGrab", [self.air.getAvatarIdFromSender()])
self.brLaff = random.randint(toonup[0], toonup[1])
self.recieveToonUp()
def getIndex(self):
return self.index
def getState(self):
return self.state
def recieveToonUp(self):
avId = self.air.getAvatarIdFromSender()
av = self.air.doId2do.get(avId)
if not av:
return
av.toonUp(self.brLaff)
|
[
"direct.directnotify.DirectNotifyGlobal.directNotify.newCategory",
"random.randint",
"direct.distributed.DistributedObjectAI.DistributedObjectAI.__init__"
] |
[((258, 329), 'direct.directnotify.DirectNotifyGlobal.directNotify.newCategory', 'DirectNotifyGlobal.directNotify.newCategory', (['"""DistributedCogdoBarrelAI"""'], {}), "('DistributedCogdoBarrelAI')\n", (301, 329), False, 'from direct.directnotify import DirectNotifyGlobal\n'), ((375, 414), 'direct.distributed.DistributedObjectAI.DistributedObjectAI.__init__', 'DistributedObjectAI.__init__', (['self', 'air'], {}), '(self, air)\n', (403, 414), False, 'from direct.distributed.DistributedObjectAI import DistributedObjectAI\n'), ((894, 930), 'random.randint', 'random.randint', (['toonup[0]', 'toonup[1]'], {}), '(toonup[0], toonup[1])\n', (908, 930), False, 'import random\n')]
|
import sys
h, a = map(int, sys.stdin.readline().split())
def main():
return (h + a - 1) // a
if __name__ == '__main__':
ans = main()
print(ans)
|
[
"sys.stdin.readline"
] |
[((30, 50), 'sys.stdin.readline', 'sys.stdin.readline', ([], {}), '()\n', (48, 50), False, 'import sys\n')]
|
# Copyright (c) 2016-2017, the ElectrumX authors
#
# All rights reserved.
#
# See the file "LICENCE" for information about the copyright
# and warranty status of this software.
'''Backend database abstraction.
The abstraction needs to be improved to not heavily penalise LMDB.
'''
import os
from functools import partial
from lib.util import subclasses, increment_byte_string
def db_class(name):
'''Returns a DB engine class.'''
for db_class in subclasses(Storage):
if db_class.__name__.lower() == name.lower():
db_class.import_module()
return db_class
raise RuntimeError('unrecognised DB engine "{}"'.format(name))
class Storage(object):
'''Abstract base class of the DB backend abstraction.'''
def __init__(self, name, for_sync):
self.is_new = not os.path.exists(name)
self.for_sync = for_sync or self.is_new
self.open(name, create=self.is_new)
@classmethod
def import_module(cls):
'''Import the DB engine module.'''
raise NotImplementedError
def open(self, name, create):
'''Open an existing database or create a new one.'''
raise NotImplementedError
def close(self):
'''Close an existing database.'''
raise NotImplementedError
def get(self, key):
raise NotImplementedError
def put(self, key, value):
raise NotImplementedError
def write_batch(self):
'''Return a context manager that provides `put` and `delete`.
Changes should only be committed when the context manager
closes without an exception.
'''
raise NotImplementedError
def iterator(self, prefix=b'', reverse=False):
'''Return an iterator that yields (key, value) pairs from the
database sorted by key.
If `prefix` is set, only keys starting with `prefix` will be
included. If `reverse` is True the items are returned in
reverse order.
'''
raise NotImplementedError
class LevelDB(Storage):
'''LevelDB database engine.'''
@classmethod
def import_module(cls):
import plyvel
cls.module = plyvel
def open(self, name, create):
mof = 512 if self.for_sync else 128
self.db = self.module.DB(name, create_if_missing=create,
max_open_files=mof, compression=None)
self.close = self.db.close
self.get = self.db.get
self.put = self.db.put
self.iterator = self.db.iterator
self.write_batch = partial(self.db.write_batch, transaction=True,
sync=True)
class RocksDB(Storage):
'''RocksDB database engine.'''
@classmethod
def import_module(cls):
import rocksdb
cls.module = rocksdb
def open(self, name, create):
mof = 512 if self.for_sync else 128
compression = "no"
compression = getattr(self.module.CompressionType,
compression + "_compression")
options = self.module.Options(create_if_missing=create,
compression=compression,
use_fsync=True,
target_file_size_base=33554432,
max_open_files=mof)
self.db = self.module.DB(name, options)
self.get = self.db.get
self.put = self.db.put
def close(self):
# PyRocksDB doesn't provide a close method; hopefully this is enough
self.db = self.get = self.put = None
import gc
gc.collect()
class WriteBatch(object):
def __init__(self, db):
self.batch = RocksDB.module.WriteBatch()
self.db = db
def __enter__(self):
return self.batch
def __exit__(self, exc_type, exc_val, exc_tb):
if not exc_val:
self.db.write(self.batch)
def write_batch(self):
return RocksDB.WriteBatch(self.db)
class Iterator(object):
def __init__(self, db, prefix, reverse):
self.it = db.iteritems()
self.reverse = reverse
self.prefix = prefix
# Whether we are at the first item
self.first = True
def __iter__(self):
prefix = self.prefix
if self.reverse:
prefix = increment_byte_string(prefix)
self.it = reversed(self.it)
self.it.seek(prefix)
return self
def __next__(self):
k, v = self.it.__next__()
if self.first and self.reverse and not k.startswith(self.prefix):
k, v = self.it.__next__()
self.first = False
if not k.startswith(self.prefix):
# We're already ahead of the prefix
raise StopIteration
return k, v
def iterator(self, prefix=b'', reverse=False):
return RocksDB.Iterator(self.db, prefix, reverse)
class LMDB(Storage):
'''RocksDB database engine.'''
@classmethod
def import_module(cls):
import lmdb
cls.module = lmdb
def open(self, name, create):
# I don't see anything equivalent to max_open_files for for_sync
self.env = LMDB.module.Environment('.', subdir=True, create=create,
max_dbs=32, map_size=5 * 10 ** 10)
self.db = self.env.open_db(create=create)
def close(self):
self.env.close()
def get(self, key):
with self.env.begin(db=self.db) as tx:
return tx.get(key)
def put(self, key, value):
with self.env.begin(db=self.db, write=True) as tx:
tx.put(key, value)
def write_batch(self):
return self.env.begin(db=self.db, write=True)
def iterator(self, prefix=b'', reverse=False):
return LMDB.Iterator(self.db, self.env, prefix, reverse)
class Iterator:
def __init__(self, db, env, prefix, reverse):
self.transaction = env.begin(db=db)
self.transaction.__enter__()
self.db = db
self.prefix = prefix
self.reverse = reverse
self._stop = False
def __iter__(self):
self.iterator = LMDB.module.Cursor(self.db, self.transaction)
prefix = self.prefix
if self.reverse:
# Go to the first value after the prefix
prefix = increment_byte_string(prefix)
self.iterator.set_range(prefix)
if not self.iterator.key().startswith(self.prefix) and self.reverse:
# Go back to the first item starting with the prefix
self.iterator.prev()
return self
def __next__(self):
k, v = self.iterator.item()
if not k.startswith(self.prefix) or self._stop:
# We're already ahead of the prefix
self.transaction.__exit__()
raise StopIteration
next = self.iterator.next \
if not self.reverse else self.iterator.prev
# Stop after the next value if we're at the end of the DB
self._stop = not next()
return k, v
|
[
"functools.partial",
"os.path.exists",
"gc.collect",
"lib.util.subclasses",
"lib.util.increment_byte_string"
] |
[((458, 477), 'lib.util.subclasses', 'subclasses', (['Storage'], {}), '(Storage)\n', (468, 477), False, 'from lib.util import subclasses, increment_byte_string\n'), ((2551, 2608), 'functools.partial', 'partial', (['self.db.write_batch'], {'transaction': '(True)', 'sync': '(True)'}), '(self.db.write_batch, transaction=True, sync=True)\n', (2558, 2608), False, 'from functools import partial\n'), ((3617, 3629), 'gc.collect', 'gc.collect', ([], {}), '()\n', (3627, 3629), False, 'import gc\n'), ((818, 838), 'os.path.exists', 'os.path.exists', (['name'], {}), '(name)\n', (832, 838), False, 'import os\n'), ((4404, 4433), 'lib.util.increment_byte_string', 'increment_byte_string', (['prefix'], {}), '(prefix)\n', (4425, 4433), False, 'from lib.util import subclasses, increment_byte_string\n'), ((6488, 6517), 'lib.util.increment_byte_string', 'increment_byte_string', (['prefix'], {}), '(prefix)\n', (6509, 6517), False, 'from lib.util import subclasses, increment_byte_string\n')]
|
from sqlalchemy import select
from sqlalchemy.sql.functions import count
from sqlutil.sqlalchemy_methods import (
get_sqlalchemy_base_engine,
get_rows,
get_tables_by_reflection,
count_rows,
)
from tqdm import tqdm
from util import data_io, util_methods
if __name__ == "__main__":
# file = "sqlite:////home/tilo/tilo-tub/code/DrQA/data/wikipedia/docs.db"
file = "sqlite:////home/tilo/code/DrQA/data/wikipedia/docs.db"
base, engine = get_sqlalchemy_base_engine(file)
tables = get_tables_by_reflection(base.metadata, engine)
docs_table = tables["documents"]
with engine.connect() as conn:
num_rows = count_rows(engine, docs_table)
num_batches = 8
batch_size = num_rows // num_batches + 1
it = iter(tqdm(get_rows(conn, select([docs_table]))))
def row_gen():
for k in range(batch_size):
try:
d = next(it)
except StopIteration as e: # there is no next element in iterator
break
yield d
for batch_idx in range(num_batches):
data_io.write_jsonl(
f"drqa_wikipedia_{batch_idx}.jsonl.gz", row_gen(), mode="ab"
)
|
[
"sqlutil.sqlalchemy_methods.count_rows",
"sqlutil.sqlalchemy_methods.get_sqlalchemy_base_engine",
"sqlalchemy.select",
"sqlutil.sqlalchemy_methods.get_tables_by_reflection"
] |
[((461, 493), 'sqlutil.sqlalchemy_methods.get_sqlalchemy_base_engine', 'get_sqlalchemy_base_engine', (['file'], {}), '(file)\n', (487, 493), False, 'from sqlutil.sqlalchemy_methods import get_sqlalchemy_base_engine, get_rows, get_tables_by_reflection, count_rows\n'), ((507, 554), 'sqlutil.sqlalchemy_methods.get_tables_by_reflection', 'get_tables_by_reflection', (['base.metadata', 'engine'], {}), '(base.metadata, engine)\n', (531, 554), False, 'from sqlutil.sqlalchemy_methods import get_sqlalchemy_base_engine, get_rows, get_tables_by_reflection, count_rows\n'), ((647, 677), 'sqlutil.sqlalchemy_methods.count_rows', 'count_rows', (['engine', 'docs_table'], {}), '(engine, docs_table)\n', (657, 677), False, 'from sqlutil.sqlalchemy_methods import get_sqlalchemy_base_engine, get_rows, get_tables_by_reflection, count_rows\n'), ((789, 809), 'sqlalchemy.select', 'select', (['[docs_table]'], {}), '([docs_table])\n', (795, 809), False, 'from sqlalchemy import select\n')]
|
import tensorflow as tf
import numpy as np
import math
class Position_Encoder(object):
def __init__(self, emb_size, max_len=5000):
self.emb_size = emb_size
self.max_len = max_len
pe = np.zeros([max_len, emb_size], np.float32)
position = np.expand_dims(np.arange(0, max_len), 1).astype(np.float32)
div_term = np.exp(np.arange(0 ,emb_size, 2).astype(np.float32) * -(math.log(10000.0) / emb_size))
pe[:, 0::2] = np.sin(position * div_term)
pe[:, 1::2] = np.cos(position * div_term)
pe = np.expand_dims(pe, 1)
self.pe = tf.Variable(pe, trainable=False)
def __call__(self, inputs, seq_length):
with tf.variable_scope('position_encoder'):
embs = tf.transpose(inputs, [1, 0, 2])
max_time = tf.shape(embs)[0]
batch_size = tf.shape(embs)[1]
embs = embs * tf.sqrt(float(self.emb_size))
embs_ta = tf.TensorArray(dtype=tf.float32, size=max_time)
embs_ta = embs_ta.unstack(embs)
output_ta = tf.TensorArray(dtype=tf.float32, dynamic_size=True, size=0)
t0 = tf.constant(0, dtype=tf.int32)
f0 = tf.zeros([batch_size], dtype=tf.bool)
mask = tf.expand_dims(tf.cast(tf.sequence_mask(seq_length), tf.float32), -1)
def loop_fn(t, output_ta, f):
cur_emb = embs_ta.read(t)
output = tf.concat([cur_emb, tf.tile(self.pe[t], [batch_size, 1])], -1)
output_ta = output_ta.write(t, output)
f = tf.greater_equal(t + 1, seq_length)
return t + 1, output_ta, f
_, output_ta, _ = tf.while_loop(
cond=lambda _1, _2, f: tf.logical_not(tf.reduce_all(f)),
body=loop_fn,
loop_vars=(t0, output_ta, f0)
)
embs = tf.transpose(output_ta.stack(), [1, 0, 2])
embs *= mask
return embs
class Cnn_extractor(object):
def __init__(self, hidden_dim):
self.hidden_dim = hidden_dim
self.sw0 = tf.layers.Conv1D(self.hidden_dim, 1, padding='same')
self.bn0 = tf.layers.BatchNormalization()
self.sw1 = tf.layers.Conv1D(self.hidden_dim, 1, padding='same')
self.bn1 = tf.layers.BatchNormalization()
self.sw2 = tf.layers.Conv1D(self.hidden_dim, 2, padding='same')
self.bn2 = tf.layers.BatchNormalization()
self.sw2_2 = tf.layers.Conv1D(self.hidden_dim, 2, padding='same')
self.bn2_2 = tf.layers.BatchNormalization()
self.sw3 = tf.layers.Conv1D(self.hidden_dim, 3, padding='same')
self.bn3 = tf.layers.BatchNormalization()
self.sw3_2 = tf.layers.Conv1D(self.hidden_dim, 3, padding='same')
self.bn3_2 = tf.layers.BatchNormalization()
self.sw3_3 = tf.layers.Conv1D(self.hidden_dim, 3, padding='same')
self.bn3_3 = tf.layers.BatchNormalization()
def __call__(self, input):
with tf.variable_scope('cnn_extractor'):
input = self.sw0(input)
input = tf.nn.selu(input)
input = self.bn0(input)
sw1 = self.sw1(input)
sw1 = tf.nn.selu(sw1)
sw1 = self.bn1(sw1)
sw2 = self.sw2(input)
sw2 = tf.nn.selu(sw2)
sw2 = self.bn2(sw2)
sw2 = self.sw2_2(sw2)
sw2 = tf.nn.selu(sw2)
sw2 = self.bn2_2(sw2)
sw3 = self.sw3(input)
sw3 = tf.nn.selu(sw3)
sw3 = self.bn3(sw3)
sw3 = self.sw3_2(sw3)
sw3 = tf.nn.selu(sw3)
sw3 = self.bn3_2(sw3)
sw3 = self.sw3_3(sw3)
sw3 = tf.nn.selu(sw3)
sw3 = self.bn3_3(sw3)
cnn_output = tf.concat([sw1, sw2, sw3], -1)
cnn_output = tf.layers.dense(cnn_output, self.hidden_dim, activation=tf.nn.selu)
return tf.nn.dropout(cnn_output, keep_prob=0.5)
class Attention(object):
def __init__(self, hidden_dim, num_tags):
super(Attention, self).__init__()
self.hidden_dim = hidden_dim
self.num_tags = num_tags
self.attn_dense = tf.layers.Dense(self.hidden_dim, use_bias=False,
kernel_initializer=tf.contrib.layers.xavier_initializer())
self.attn_linear = tf.layers.Dense(self.hidden_dim, use_bias=True, activation=tf.nn.tanh,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
bias_initializer=tf.zeros_initializer())
self.__init_embs()
def __init_embs(self):
with tf.variable_scope('tag_embedding'):
self._tag_embeddings = tf.get_variable(name='_tag_embeddings', shape=[self.num_tags, 25], dtype=tf.float32)
def __call__(self, input, sequence_lengths):
with tf.variable_scope('attention'):
tag_embeddings = tf.nn.embedding_lookup(params=self._tag_embeddings,
ids=tf.constant(list(range(self.num_tags)), dtype=tf.int32),
name='tag_embeddings')
query = tf.transpose(input, [1, 0, 2])
max_time = tf.shape(query)[0]
batch_size = tf.shape(query)[1]
context = tf.tile(tf.expand_dims(tag_embeddings, 0),
[batch_size, 1, 1])
query_ta = tf.TensorArray(dtype=tf.float32, size=max_time)
query_ta = query_ta.unstack(query)
attn_ta = tf.TensorArray(dtype=tf.float32, dynamic_size=True, size=0)
output_ta = tf.TensorArray(dtype=tf.float32, dynamic_size=True, size=0)
t0 = tf.constant(0, dtype=tf.int32)
f0 = tf.zeros([batch_size], dtype=tf.bool)
def loop_fn(t, attn_ta, output_ta, f):
cur_q = query_ta.read(t)
gamma_h = self.attn_dense(context)
gamma_h = tf.squeeze(tf.matmul(gamma_h, tf.expand_dims(cur_q, -1)), -1)
weights = tf.nn.softmax(gamma_h, -1)
c_t = tf.squeeze(tf.matmul(tf.expand_dims(weights, 1), context), 1)
output = self.attn_linear(tf.concat([c_t, cur_q], -1))
attn_ta = attn_ta.write(t, gamma_h)
output_ta = output_ta.write(t, output)
f = tf.greater_equal(t + 1, sequence_lengths)
return t + 1, attn_ta, output_ta, f
_, attn_ta, output_ta, _ = tf.while_loop(
cond=lambda _1, _2, _3, f: tf.logical_not(tf.reduce_all(f)),
body=loop_fn,
loop_vars=(t0, attn_ta, output_ta, f0)
)
self.attn_cnn_outputs = tf.transpose(output_ta.stack(), [1, 0, 2])
attn_weights = tf.transpose(attn_ta.stack(), [1, 0, 2])
return attn_weights, self.attn_cnn_outputs
|
[
"tensorflow.contrib.layers.xavier_initializer",
"tensorflow.reduce_all",
"tensorflow.Variable",
"numpy.sin",
"numpy.arange",
"tensorflow.greater_equal",
"tensorflow.get_variable",
"tensorflow.nn.softmax",
"tensorflow.variable_scope",
"tensorflow.concat",
"tensorflow.nn.selu",
"math.log",
"tensorflow.layers.BatchNormalization",
"tensorflow.constant",
"tensorflow.transpose",
"tensorflow.tile",
"numpy.cos",
"tensorflow.sequence_mask",
"tensorflow.layers.Conv1D",
"tensorflow.zeros_initializer",
"tensorflow.expand_dims",
"tensorflow.layers.dense",
"numpy.zeros",
"numpy.expand_dims",
"tensorflow.shape",
"tensorflow.zeros",
"tensorflow.TensorArray",
"tensorflow.nn.dropout"
] |
[((213, 254), 'numpy.zeros', 'np.zeros', (['[max_len, emb_size]', 'np.float32'], {}), '([max_len, emb_size], np.float32)\n', (221, 254), True, 'import numpy as np\n'), ((462, 489), 'numpy.sin', 'np.sin', (['(position * div_term)'], {}), '(position * div_term)\n', (468, 489), True, 'import numpy as np\n'), ((512, 539), 'numpy.cos', 'np.cos', (['(position * div_term)'], {}), '(position * div_term)\n', (518, 539), True, 'import numpy as np\n'), ((553, 574), 'numpy.expand_dims', 'np.expand_dims', (['pe', '(1)'], {}), '(pe, 1)\n', (567, 574), True, 'import numpy as np\n'), ((593, 625), 'tensorflow.Variable', 'tf.Variable', (['pe'], {'trainable': '(False)'}), '(pe, trainable=False)\n', (604, 625), True, 'import tensorflow as tf\n'), ((2073, 2125), 'tensorflow.layers.Conv1D', 'tf.layers.Conv1D', (['self.hidden_dim', '(1)'], {'padding': '"""same"""'}), "(self.hidden_dim, 1, padding='same')\n", (2089, 2125), True, 'import tensorflow as tf\n'), ((2145, 2175), 'tensorflow.layers.BatchNormalization', 'tf.layers.BatchNormalization', ([], {}), '()\n', (2173, 2175), True, 'import tensorflow as tf\n'), ((2195, 2247), 'tensorflow.layers.Conv1D', 'tf.layers.Conv1D', (['self.hidden_dim', '(1)'], {'padding': '"""same"""'}), "(self.hidden_dim, 1, padding='same')\n", (2211, 2247), True, 'import tensorflow as tf\n'), ((2267, 2297), 'tensorflow.layers.BatchNormalization', 'tf.layers.BatchNormalization', ([], {}), '()\n', (2295, 2297), True, 'import tensorflow as tf\n'), ((2317, 2369), 'tensorflow.layers.Conv1D', 'tf.layers.Conv1D', (['self.hidden_dim', '(2)'], {'padding': '"""same"""'}), "(self.hidden_dim, 2, padding='same')\n", (2333, 2369), True, 'import tensorflow as tf\n'), ((2389, 2419), 'tensorflow.layers.BatchNormalization', 'tf.layers.BatchNormalization', ([], {}), '()\n', (2417, 2419), True, 'import tensorflow as tf\n'), ((2441, 2493), 'tensorflow.layers.Conv1D', 'tf.layers.Conv1D', (['self.hidden_dim', '(2)'], {'padding': '"""same"""'}), "(self.hidden_dim, 2, padding='same')\n", (2457, 2493), True, 'import tensorflow as tf\n'), ((2515, 2545), 'tensorflow.layers.BatchNormalization', 'tf.layers.BatchNormalization', ([], {}), '()\n', (2543, 2545), True, 'import tensorflow as tf\n'), ((2565, 2617), 'tensorflow.layers.Conv1D', 'tf.layers.Conv1D', (['self.hidden_dim', '(3)'], {'padding': '"""same"""'}), "(self.hidden_dim, 3, padding='same')\n", (2581, 2617), True, 'import tensorflow as tf\n'), ((2637, 2667), 'tensorflow.layers.BatchNormalization', 'tf.layers.BatchNormalization', ([], {}), '()\n', (2665, 2667), True, 'import tensorflow as tf\n'), ((2689, 2741), 'tensorflow.layers.Conv1D', 'tf.layers.Conv1D', (['self.hidden_dim', '(3)'], {'padding': '"""same"""'}), "(self.hidden_dim, 3, padding='same')\n", (2705, 2741), True, 'import tensorflow as tf\n'), ((2763, 2793), 'tensorflow.layers.BatchNormalization', 'tf.layers.BatchNormalization', ([], {}), '()\n', (2791, 2793), True, 'import tensorflow as tf\n'), ((2815, 2867), 'tensorflow.layers.Conv1D', 'tf.layers.Conv1D', (['self.hidden_dim', '(3)'], {'padding': '"""same"""'}), "(self.hidden_dim, 3, padding='same')\n", (2831, 2867), True, 'import tensorflow as tf\n'), ((2889, 2919), 'tensorflow.layers.BatchNormalization', 'tf.layers.BatchNormalization', ([], {}), '()\n', (2917, 2919), True, 'import tensorflow as tf\n'), ((684, 721), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""position_encoder"""'], {}), "('position_encoder')\n", (701, 721), True, 'import tensorflow as tf\n'), ((742, 773), 'tensorflow.transpose', 'tf.transpose', (['inputs', '[1, 0, 2]'], {}), '(inputs, [1, 0, 2])\n', (754, 773), True, 'import tensorflow as tf\n'), ((936, 983), 'tensorflow.TensorArray', 'tf.TensorArray', ([], {'dtype': 'tf.float32', 'size': 'max_time'}), '(dtype=tf.float32, size=max_time)\n', (950, 983), True, 'import tensorflow as tf\n'), ((1052, 1111), 'tensorflow.TensorArray', 'tf.TensorArray', ([], {'dtype': 'tf.float32', 'dynamic_size': '(True)', 'size': '(0)'}), '(dtype=tf.float32, dynamic_size=True, size=0)\n', (1066, 1111), True, 'import tensorflow as tf\n'), ((1129, 1159), 'tensorflow.constant', 'tf.constant', (['(0)'], {'dtype': 'tf.int32'}), '(0, dtype=tf.int32)\n', (1140, 1159), True, 'import tensorflow as tf\n'), ((1177, 1214), 'tensorflow.zeros', 'tf.zeros', (['[batch_size]'], {'dtype': 'tf.bool'}), '([batch_size], dtype=tf.bool)\n', (1185, 1214), True, 'import tensorflow as tf\n'), ((2965, 2999), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""cnn_extractor"""'], {}), "('cnn_extractor')\n", (2982, 2999), True, 'import tensorflow as tf\n'), ((3057, 3074), 'tensorflow.nn.selu', 'tf.nn.selu', (['input'], {}), '(input)\n', (3067, 3074), True, 'import tensorflow as tf\n'), ((3163, 3178), 'tensorflow.nn.selu', 'tf.nn.selu', (['sw1'], {}), '(sw1)\n', (3173, 3178), True, 'import tensorflow as tf\n'), ((3263, 3278), 'tensorflow.nn.selu', 'tf.nn.selu', (['sw2'], {}), '(sw2)\n', (3273, 3278), True, 'import tensorflow as tf\n'), ((3363, 3378), 'tensorflow.nn.selu', 'tf.nn.selu', (['sw2'], {}), '(sw2)\n', (3373, 3378), True, 'import tensorflow as tf\n'), ((3465, 3480), 'tensorflow.nn.selu', 'tf.nn.selu', (['sw3'], {}), '(sw3)\n', (3475, 3480), True, 'import tensorflow as tf\n'), ((3565, 3580), 'tensorflow.nn.selu', 'tf.nn.selu', (['sw3'], {}), '(sw3)\n', (3575, 3580), True, 'import tensorflow as tf\n'), ((3667, 3682), 'tensorflow.nn.selu', 'tf.nn.selu', (['sw3'], {}), '(sw3)\n', (3677, 3682), True, 'import tensorflow as tf\n'), ((3755, 3785), 'tensorflow.concat', 'tf.concat', (['[sw1, sw2, sw3]', '(-1)'], {}), '([sw1, sw2, sw3], -1)\n', (3764, 3785), True, 'import tensorflow as tf\n'), ((3811, 3878), 'tensorflow.layers.dense', 'tf.layers.dense', (['cnn_output', 'self.hidden_dim'], {'activation': 'tf.nn.selu'}), '(cnn_output, self.hidden_dim, activation=tf.nn.selu)\n', (3826, 3878), True, 'import tensorflow as tf\n'), ((3898, 3938), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['cnn_output'], {'keep_prob': '(0.5)'}), '(cnn_output, keep_prob=0.5)\n', (3911, 3938), True, 'import tensorflow as tf\n'), ((4653, 4687), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""tag_embedding"""'], {}), "('tag_embedding')\n", (4670, 4687), True, 'import tensorflow as tf\n'), ((4724, 4813), 'tensorflow.get_variable', 'tf.get_variable', ([], {'name': '"""_tag_embeddings"""', 'shape': '[self.num_tags, 25]', 'dtype': 'tf.float32'}), "(name='_tag_embeddings', shape=[self.num_tags, 25], dtype=tf\n .float32)\n", (4739, 4813), True, 'import tensorflow as tf\n'), ((4873, 4903), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""attention"""'], {}), "('attention')\n", (4890, 4903), True, 'import tensorflow as tf\n'), ((5204, 5234), 'tensorflow.transpose', 'tf.transpose', (['input', '[1, 0, 2]'], {}), '(input, [1, 0, 2])\n', (5216, 5234), True, 'import tensorflow as tf\n'), ((5459, 5506), 'tensorflow.TensorArray', 'tf.TensorArray', ([], {'dtype': 'tf.float32', 'size': 'max_time'}), '(dtype=tf.float32, size=max_time)\n', (5473, 5506), True, 'import tensorflow as tf\n'), ((5576, 5635), 'tensorflow.TensorArray', 'tf.TensorArray', ([], {'dtype': 'tf.float32', 'dynamic_size': '(True)', 'size': '(0)'}), '(dtype=tf.float32, dynamic_size=True, size=0)\n', (5590, 5635), True, 'import tensorflow as tf\n'), ((5660, 5719), 'tensorflow.TensorArray', 'tf.TensorArray', ([], {'dtype': 'tf.float32', 'dynamic_size': '(True)', 'size': '(0)'}), '(dtype=tf.float32, dynamic_size=True, size=0)\n', (5674, 5719), True, 'import tensorflow as tf\n'), ((5737, 5767), 'tensorflow.constant', 'tf.constant', (['(0)'], {'dtype': 'tf.int32'}), '(0, dtype=tf.int32)\n', (5748, 5767), True, 'import tensorflow as tf\n'), ((5785, 5822), 'tensorflow.zeros', 'tf.zeros', (['[batch_size]'], {'dtype': 'tf.bool'}), '([batch_size], dtype=tf.bool)\n', (5793, 5822), True, 'import tensorflow as tf\n'), ((797, 811), 'tensorflow.shape', 'tf.shape', (['embs'], {}), '(embs)\n', (805, 811), True, 'import tensorflow as tf\n'), ((840, 854), 'tensorflow.shape', 'tf.shape', (['embs'], {}), '(embs)\n', (848, 854), True, 'import tensorflow as tf\n'), ((1551, 1586), 'tensorflow.greater_equal', 'tf.greater_equal', (['(t + 1)', 'seq_length'], {}), '(t + 1, seq_length)\n', (1567, 1586), True, 'import tensorflow as tf\n'), ((4261, 4299), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {}), '()\n', (4297, 4299), True, 'import tensorflow as tf\n'), ((4461, 4499), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {}), '()\n', (4497, 4499), True, 'import tensorflow as tf\n'), ((4561, 4583), 'tensorflow.zeros_initializer', 'tf.zeros_initializer', ([], {}), '()\n', (4581, 4583), True, 'import tensorflow as tf\n'), ((5258, 5273), 'tensorflow.shape', 'tf.shape', (['query'], {}), '(query)\n', (5266, 5273), True, 'import tensorflow as tf\n'), ((5302, 5317), 'tensorflow.shape', 'tf.shape', (['query'], {}), '(query)\n', (5310, 5317), True, 'import tensorflow as tf\n'), ((5351, 5384), 'tensorflow.expand_dims', 'tf.expand_dims', (['tag_embeddings', '(0)'], {}), '(tag_embeddings, 0)\n', (5365, 5384), True, 'import tensorflow as tf\n'), ((6081, 6107), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['gamma_h', '(-1)'], {}), '(gamma_h, -1)\n', (6094, 6107), True, 'import tensorflow as tf\n'), ((6390, 6431), 'tensorflow.greater_equal', 'tf.greater_equal', (['(t + 1)', 'sequence_lengths'], {}), '(t + 1, sequence_lengths)\n', (6406, 6431), True, 'import tensorflow as tf\n'), ((289, 310), 'numpy.arange', 'np.arange', (['(0)', 'max_len'], {}), '(0, max_len)\n', (298, 310), True, 'import numpy as np\n'), ((1257, 1285), 'tensorflow.sequence_mask', 'tf.sequence_mask', (['seq_length'], {}), '(seq_length)\n', (1273, 1285), True, 'import tensorflow as tf\n'), ((6234, 6261), 'tensorflow.concat', 'tf.concat', (['[c_t, cur_q]', '(-1)'], {}), '([c_t, cur_q], -1)\n', (6243, 6261), True, 'import tensorflow as tf\n'), ((360, 385), 'numpy.arange', 'np.arange', (['(0)', 'emb_size', '(2)'], {}), '(0, emb_size, 2)\n', (369, 385), True, 'import numpy as np\n'), ((409, 426), 'math.log', 'math.log', (['(10000.0)'], {}), '(10000.0)\n', (417, 426), False, 'import math\n'), ((1433, 1469), 'tensorflow.tile', 'tf.tile', (['self.pe[t]', '[batch_size, 1]'], {}), '(self.pe[t], [batch_size, 1])\n', (1440, 1469), True, 'import tensorflow as tf\n'), ((6023, 6048), 'tensorflow.expand_dims', 'tf.expand_dims', (['cur_q', '(-1)'], {}), '(cur_q, -1)\n', (6037, 6048), True, 'import tensorflow as tf\n'), ((6151, 6177), 'tensorflow.expand_dims', 'tf.expand_dims', (['weights', '(1)'], {}), '(weights, 1)\n', (6165, 6177), True, 'import tensorflow as tf\n'), ((1730, 1746), 'tensorflow.reduce_all', 'tf.reduce_all', (['f'], {}), '(f)\n', (1743, 1746), True, 'import tensorflow as tf\n'), ((6597, 6613), 'tensorflow.reduce_all', 'tf.reduce_all', (['f'], {}), '(f)\n', (6610, 6613), True, 'import tensorflow as tf\n')]
|
# Copyright 2012 Red Hat, Inc.
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""logging utilities for translation
"""
from logging import handlers
from oslo.i18n import _translate
class TranslationHandler(handlers.MemoryHandler):
"""Handler that translates records before logging them.
The TranslationHandler takes a locale and a target logging.Handler object
to forward LogRecord objects to after translating them. This handler
depends on Message objects being logged, instead of regular strings.
The handler can be configured declaratively in the
``logging.conf`` as follows::
[handlers]
keys = translatedlog, translator
[handler_translatedlog]
class = handlers.WatchedFileHandler
args = ('/var/log/api-localized.log',)
formatter = context
[handler_translator]
class = oslo.i18n.log.TranslationHandler
target = translatedlog
args = ('zh_CN',)
If the specified locale is not available in the system, the handler will
log in the default locale.
"""
def __init__(self, locale=None, target=None):
"""Initialize a TranslationHandler
:param locale: locale to use for translating messages
:param target: logging.Handler object to forward
LogRecord objects to after translation
"""
# NOTE(luisg): In order to allow this handler to be a wrapper for
# other handlers, such as a FileHandler, and still be able to
# configure it using logging.conf, this handler has to extend
# MemoryHandler because only the MemoryHandlers' logging.conf
# parsing is implemented such that it accepts a target handler.
handlers.MemoryHandler.__init__(self, capacity=0, target=target)
self.locale = locale
def setFormatter(self, fmt):
self.target.setFormatter(fmt)
def emit(self, record):
# We save the message from the original record to restore it
# after translation, so other handlers are not affected by this
original_msg = record.msg
original_args = record.args
try:
self._translate_and_log_record(record)
finally:
record.msg = original_msg
record.args = original_args
def _translate_and_log_record(self, record):
record.msg = _translate.translate(record.msg, self.locale)
# In addition to translating the message, we also need to translate
# arguments that were passed to the log method that were not part
# of the main message e.g., log.info(_('Some message %s'), this_one))
record.args = _translate.translate_args(record.args, self.locale)
self.target.emit(record)
|
[
"oslo.i18n._translate.translate",
"logging.handlers.MemoryHandler.__init__",
"oslo.i18n._translate.translate_args"
] |
[((2307, 2371), 'logging.handlers.MemoryHandler.__init__', 'handlers.MemoryHandler.__init__', (['self'], {'capacity': '(0)', 'target': 'target'}), '(self, capacity=0, target=target)\n', (2338, 2371), False, 'from logging import handlers\n'), ((2944, 2989), 'oslo.i18n._translate.translate', '_translate.translate', (['record.msg', 'self.locale'], {}), '(record.msg, self.locale)\n', (2964, 2989), False, 'from oslo.i18n import _translate\n'), ((3241, 3292), 'oslo.i18n._translate.translate_args', '_translate.translate_args', (['record.args', 'self.locale'], {}), '(record.args, self.locale)\n', (3266, 3292), False, 'from oslo.i18n import _translate\n')]
|
# Copyright 2016 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals, absolute_import
from django.contrib.humanize.templatetags.humanize import naturaltime
from django.utils import timezone
import datetime, math
def sortable_time_str(d):
return d.strftime('%Y%m%d%H%M%S%f')
def display_time_str(d):
#return d.strftime('%H:%M:%S %m/%d/%y')
return naturaltime(d)
def human_time_str(d):
#return d.strftime('%H:%M:%S %m/%d/%y')
return naturaltime(d)
def get_local_timestamp():
return math.floor((timezone.localtime(
timezone.now()) - timezone.make_aware(datetime.datetime.fromtimestamp(0))).total_seconds())
def get_local_time():
return timezone.localtime(timezone.now())
def std_time_str(d):
return d.strftime('%H:%M:%S %m/%d/%y')
|
[
"django.utils.timezone.now",
"django.contrib.humanize.templatetags.humanize.naturaltime",
"datetime.datetime.fromtimestamp"
] |
[((926, 940), 'django.contrib.humanize.templatetags.humanize.naturaltime', 'naturaltime', (['d'], {}), '(d)\n', (937, 940), False, 'from django.contrib.humanize.templatetags.humanize import naturaltime\n'), ((1020, 1034), 'django.contrib.humanize.templatetags.humanize.naturaltime', 'naturaltime', (['d'], {}), '(d)\n', (1031, 1034), False, 'from django.contrib.humanize.templatetags.humanize import naturaltime\n'), ((1259, 1273), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (1271, 1273), False, 'from django.utils import timezone\n'), ((1114, 1128), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (1126, 1128), False, 'from django.utils import timezone\n'), ((1152, 1186), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (['(0)'], {}), '(0)\n', (1183, 1186), False, 'import datetime, math\n')]
|
# General Module imports-----------------------------------
from datetime import datetime, date, time
import yaml
import json
# General Django Imports----------------------------------
from django.shortcuts import render_to_response
from django.http import Http404, HttpResponse, HttpResponseRedirect
from django.template import RequestContext
from django.contrib.auth.models import User
from django.template import Template, Context
from django.core.urlresolvers import reverse
from django.contrib.auth.decorators import login_required
#from django.core.context_processors import csrf
#from django.views.decorators.csrf import csrf_exempt
#from django.views.decorators.cache import never_cache
#from django.views.decorators.csrf import csrf_protect
#from django.views.decorators.debug import sensitive_post_parameters
#from django.core import serializers
##from django.core.serializers import json
#from django.core.serializers.json import DjangoJSONEncoder
# Application Specific Model Imports-----------------------
import AuShadha.settings as settings
from AuShadha.settings import APP_ROOT_URL
from AuShadha.core.views.dijit_tree import DijitTreeNode, DijitTree
from patient.models import PatientDetail
class PatientTree( object ):
"""
Defines the Dijit UI for Patient Tree
"""
def __init__(self,kwargs):
self.request = kwargs['request']
self.variables = RequestContext(self.request, kwargs)
if not getattr(self.variables['patient_detail_obj'],'urls',None):
self.variables['patient_detail_obj'].save()
try:
d = open('patient/dijit_widgets/tree.yaml','r')
f = d.read()
d.close()
pane_template = Template( f )
rendered_pane = pane_template.render(self.variables)
self.yaml_file = yaml.load( rendered_pane )
except( IOError ):
raise Http404("No template file to render the pane ! ")
try:
self.user = self.request.user
except(AttributeError,ValueError,NameError,TypeError):
raise Exception("Invalid User or no user supplied")
def __unicode__(self):
return self.__call__()
def __call__(self):
y = self.yaml_file
patient_tree_node = DijitTree()
history_node = DijitTreeNode( y['history'])
preventives_node = DijitTreeNode( y['preventives'])
demographics_node = DijitTreeNode( y['demographics'])
medication_list_node = DijitTreeNode( y['medications'])
#admission_node = DijitTreeNode( y['admissions'])
visit_node= DijitTreeNode( y['visits'])
# icd10_node = DijitTreeNode( y['icd_10'] )
# icd10_pcs_node = DijitTreeNode( y['icd_10_pcs'] )
# fda_drug_db_node = DijitTreeNode( y['fda_drug_db'] )
#procedure_node = DijitTreeNode( y['procedures'] )
#imaging_node = DijitTreeNode( y['imaging'] )
#investigation_node= DijitTreeNode( y['investigation'] )
patient_tree_node.add_child_node( history_node )
patient_tree_node.add_child_node( preventives_node )
patient_tree_node.add_child_node( demographics_node )
patient_tree_node.add_child_node( medication_list_node )
#patient_tree_node.add_child_node( admission_node )
patient_tree_node.add_child_node( visit_node )
# patient_tree_node.add_child_node( icd10_node )
# patient_tree_node.add_child_node( icd10_pcs_node )
# patient_tree_node.add_child_node( fda_drug_db_node )
#patient_tree_node.add_child_node( procedure_node )
#patient_tree_node.add_child_node( imaging_node )
#patient_tree_node.add_child_node( investigation_node )
jsondata = patient_tree_node.to_json()
return jsondata
@login_required
def render_patient_tree(request,patient_id = None):
if request.method == "GET" and request.is_ajax():
if patient_id:
patient_id = int( patient_id )
else:
try:
patient_id = int( request.GET.get('patient_id') )
except (KeyError, NameError, ValueError,AttributeError):
raise Http404("Bad Request: Invalid Request Parameters")
try:
patient_detail_obj = PatientDetail.objects.get(pk = patient_id)
if not getattr(patient_detail_obj,'urls',None):
patient_detail_obj.save()
d = {'request' : request,
'patient_detail_obj': patient_detail_obj
}
tree = PatientTree(d)()
print(tree)
return HttpResponse(tree, content_type="application/json")
except (PatientDetail.DoesNotExist):
raise Http404("Bad Request: Patient Does Not Exist")
else:
raise Http404("Bad Request")
|
[
"patient.models.PatientDetail.objects.get",
"yaml.load",
"django.http.HttpResponse",
"AuShadha.core.views.dijit_tree.DijitTree",
"django.http.Http404",
"AuShadha.core.views.dijit_tree.DijitTreeNode",
"django.template.Template",
"django.template.RequestContext"
] |
[((1400, 1436), 'django.template.RequestContext', 'RequestContext', (['self.request', 'kwargs'], {}), '(self.request, kwargs)\n', (1414, 1436), False, 'from django.template import RequestContext\n'), ((2223, 2234), 'AuShadha.core.views.dijit_tree.DijitTree', 'DijitTree', ([], {}), '()\n', (2232, 2234), False, 'from AuShadha.core.views.dijit_tree import DijitTreeNode, DijitTree\n'), ((2258, 2285), 'AuShadha.core.views.dijit_tree.DijitTreeNode', 'DijitTreeNode', (["y['history']"], {}), "(y['history'])\n", (2271, 2285), False, 'from AuShadha.core.views.dijit_tree import DijitTreeNode, DijitTree\n'), ((2313, 2344), 'AuShadha.core.views.dijit_tree.DijitTreeNode', 'DijitTreeNode', (["y['preventives']"], {}), "(y['preventives'])\n", (2326, 2344), False, 'from AuShadha.core.views.dijit_tree import DijitTreeNode, DijitTree\n'), ((2373, 2405), 'AuShadha.core.views.dijit_tree.DijitTreeNode', 'DijitTreeNode', (["y['demographics']"], {}), "(y['demographics'])\n", (2386, 2405), False, 'from AuShadha.core.views.dijit_tree import DijitTreeNode, DijitTree\n'), ((2437, 2468), 'AuShadha.core.views.dijit_tree.DijitTreeNode', 'DijitTreeNode', (["y['medications']"], {}), "(y['medications'])\n", (2450, 2468), False, 'from AuShadha.core.views.dijit_tree import DijitTreeNode, DijitTree\n'), ((2546, 2572), 'AuShadha.core.views.dijit_tree.DijitTreeNode', 'DijitTreeNode', (["y['visits']"], {}), "(y['visits'])\n", (2559, 2572), False, 'from AuShadha.core.views.dijit_tree import DijitTreeNode, DijitTree\n'), ((4573, 4595), 'django.http.Http404', 'Http404', (['"""Bad Request"""'], {}), "('Bad Request')\n", (4580, 4595), False, 'from django.http import Http404, HttpResponse, HttpResponseRedirect\n'), ((1692, 1703), 'django.template.Template', 'Template', (['f'], {}), '(f)\n', (1700, 1703), False, 'from django.template import Template, Context\n'), ((1792, 1816), 'yaml.load', 'yaml.load', (['rendered_pane'], {}), '(rendered_pane)\n', (1801, 1816), False, 'import yaml\n'), ((4104, 4144), 'patient.models.PatientDetail.objects.get', 'PatientDetail.objects.get', ([], {'pk': 'patient_id'}), '(pk=patient_id)\n', (4129, 4144), False, 'from patient.models import PatientDetail\n'), ((4391, 4442), 'django.http.HttpResponse', 'HttpResponse', (['tree'], {'content_type': '"""application/json"""'}), "(tree, content_type='application/json')\n", (4403, 4442), False, 'from django.http import Http404, HttpResponse, HttpResponseRedirect\n'), ((1860, 1909), 'django.http.Http404', 'Http404', (['"""No template file to render the pane ! """'], {}), "('No template file to render the pane ! ')\n", (1867, 1909), False, 'from django.http import Http404, HttpResponse, HttpResponseRedirect\n'), ((4500, 4546), 'django.http.Http404', 'Http404', (['"""Bad Request: Patient Does Not Exist"""'], {}), "('Bad Request: Patient Does Not Exist')\n", (4507, 4546), False, 'from django.http import Http404, HttpResponse, HttpResponseRedirect\n'), ((4017, 4067), 'django.http.Http404', 'Http404', (['"""Bad Request: Invalid Request Parameters"""'], {}), "('Bad Request: Invalid Request Parameters')\n", (4024, 4067), False, 'from django.http import Http404, HttpResponse, HttpResponseRedirect\n')]
|
import os
import unittest
import numpy as np
import numpy.random as rnd
import tensorflow as tf
from pymanopt.function import TensorFlow
from . import _backend_tests
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
class TestUnaryFunction(_backend_tests.TestUnaryFunction):
def setUp(self):
super().setUp()
x = tf.Variable(tf.zeros(self.n, dtype=np.float64), name="x")
@TensorFlow(x)
def cost(x):
return tf.reduce_sum(x ** 2)
self.cost = cost
class TestNaryFunction(_backend_tests.TestNaryFunction):
def setUp(self):
super().setUp()
n = self.n
x = tf.Variable(tf.zeros(n, dtype=np.float64), name="x")
y = tf.Variable(tf.zeros(n, dtype=np.float64), name="y")
@TensorFlow(x, y)
def cost(x, y):
return tf.tensordot(x, y, axes=1)
self.cost = cost
class TestNaryParameterGrouping(_backend_tests.TestNaryParameterGrouping):
def setUp(self):
super().setUp()
n = self.n
x = tf.Variable(tf.zeros(n, dtype=np.float64), name="x")
y = tf.Variable(tf.zeros(n, dtype=np.float64), name="y")
z = tf.Variable(tf.zeros(n, dtype=np.float64), name="z")
@TensorFlow(x, y, z)
def cost(x, y, z):
return tf.reduce_sum(x ** 2 + y + z ** 3)
self.cost = cost
class TestVector(_backend_tests.TestVector):
def setUp(self):
super().setUp()
n = self.n
X = tf.Variable(tf.zeros(n, dtype=np.float64))
@TensorFlow(X)
def cost(X):
return tf.exp(tf.reduce_sum(X ** 2))
self.cost = cost
class TestMatrix(_backend_tests.TestMatrix):
def setUp(self):
super().setUp()
m = self.m
n = self.n
X = tf.Variable(tf.zeros((m, n), dtype=np.float64))
@TensorFlow(X)
def cost(X):
return tf.exp(tf.reduce_sum(X ** 2))
self.cost = cost
class TestTensor3(_backend_tests.TestTensor3):
def setUp(self):
super().setUp()
n1 = self.n1
n2 = self.n2
n3 = self.n3
X = tf.Variable(tf.zeros([n1, n2, n3], dtype=np.float64))
@TensorFlow(X)
def cost(X):
return tf.exp(tf.reduce_sum(X ** 2))
self.cost = cost
class TestMixed(_backend_tests.TestMixed):
def setUp(self):
super().setUp()
n1 = self.n1
n2 = self.n2
n3 = self.n3
n4 = self.n4
n5 = self.n5
n6 = self.n6
x = tf.Variable(tf.zeros(n1, dtype=np.float64))
y = tf.Variable(tf.zeros([n2, n3], dtype=np.float64))
z = tf.Variable(tf.zeros([n4, n5, n6], dtype=np.float64))
@TensorFlow(x, y, z)
def cost(x, y, z):
return (tf.exp(tf.reduce_sum(x ** 2)) +
tf.exp(tf.reduce_sum(y ** 2)) +
tf.exp(tf.reduce_sum(z ** 2)))
self.cost = cost
class TestUserProvidedSession(unittest.TestCase):
def test_user_session(self):
class MockSession:
def run(*args, **kwargs):
raise RuntimeError
n = 10
x = tf.Variable(tf.zeros(n, dtype=tf.float64), name="x")
@TensorFlow(x, session=MockSession())
def cost(x):
return tf.reduce_sum(x)
with self.assertRaises(RuntimeError):
cost(rnd.randn(n))
|
[
"tensorflow.reduce_sum",
"numpy.random.randn",
"pymanopt.function.TensorFlow",
"tensorflow.zeros",
"tensorflow.tensordot"
] |
[((397, 410), 'pymanopt.function.TensorFlow', 'TensorFlow', (['x'], {}), '(x)\n', (407, 410), False, 'from pymanopt.function import TensorFlow\n'), ((764, 780), 'pymanopt.function.TensorFlow', 'TensorFlow', (['x', 'y'], {}), '(x, y)\n', (774, 780), False, 'from pymanopt.function import TensorFlow\n'), ((1225, 1244), 'pymanopt.function.TensorFlow', 'TensorFlow', (['x', 'y', 'z'], {}), '(x, y, z)\n', (1235, 1244), False, 'from pymanopt.function import TensorFlow\n'), ((1530, 1543), 'pymanopt.function.TensorFlow', 'TensorFlow', (['X'], {}), '(X)\n', (1540, 1543), False, 'from pymanopt.function import TensorFlow\n'), ((1842, 1855), 'pymanopt.function.TensorFlow', 'TensorFlow', (['X'], {}), '(X)\n', (1852, 1855), False, 'from pymanopt.function import TensorFlow\n'), ((2187, 2200), 'pymanopt.function.TensorFlow', 'TensorFlow', (['X'], {}), '(X)\n', (2197, 2200), False, 'from pymanopt.function import TensorFlow\n'), ((2709, 2728), 'pymanopt.function.TensorFlow', 'TensorFlow', (['x', 'y', 'z'], {}), '(x, y, z)\n', (2719, 2728), False, 'from pymanopt.function import TensorFlow\n'), ((341, 375), 'tensorflow.zeros', 'tf.zeros', (['self.n'], {'dtype': 'np.float64'}), '(self.n, dtype=np.float64)\n', (349, 375), True, 'import tensorflow as tf\n'), ((451, 472), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(x ** 2)'], {}), '(x ** 2)\n', (464, 472), True, 'import tensorflow as tf\n'), ((648, 677), 'tensorflow.zeros', 'tf.zeros', (['n'], {'dtype': 'np.float64'}), '(n, dtype=np.float64)\n', (656, 677), True, 'import tensorflow as tf\n'), ((713, 742), 'tensorflow.zeros', 'tf.zeros', (['n'], {'dtype': 'np.float64'}), '(n, dtype=np.float64)\n', (721, 742), True, 'import tensorflow as tf\n'), ((824, 850), 'tensorflow.tensordot', 'tf.tensordot', (['x', 'y'], {'axes': '(1)'}), '(x, y, axes=1)\n', (836, 850), True, 'import tensorflow as tf\n'), ((1044, 1073), 'tensorflow.zeros', 'tf.zeros', (['n'], {'dtype': 'np.float64'}), '(n, dtype=np.float64)\n', (1052, 1073), True, 'import tensorflow as tf\n'), ((1109, 1138), 'tensorflow.zeros', 'tf.zeros', (['n'], {'dtype': 'np.float64'}), '(n, dtype=np.float64)\n', (1117, 1138), True, 'import tensorflow as tf\n'), ((1174, 1203), 'tensorflow.zeros', 'tf.zeros', (['n'], {'dtype': 'np.float64'}), '(n, dtype=np.float64)\n', (1182, 1203), True, 'import tensorflow as tf\n'), ((1291, 1325), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(x ** 2 + y + z ** 3)'], {}), '(x ** 2 + y + z ** 3)\n', (1304, 1325), True, 'import tensorflow as tf\n'), ((1489, 1518), 'tensorflow.zeros', 'tf.zeros', (['n'], {'dtype': 'np.float64'}), '(n, dtype=np.float64)\n', (1497, 1518), True, 'import tensorflow as tf\n'), ((1796, 1830), 'tensorflow.zeros', 'tf.zeros', (['(m, n)'], {'dtype': 'np.float64'}), '((m, n), dtype=np.float64)\n', (1804, 1830), True, 'import tensorflow as tf\n'), ((2135, 2175), 'tensorflow.zeros', 'tf.zeros', (['[n1, n2, n3]'], {'dtype': 'np.float64'}), '([n1, n2, n3], dtype=np.float64)\n', (2143, 2175), True, 'import tensorflow as tf\n'), ((2539, 2569), 'tensorflow.zeros', 'tf.zeros', (['n1'], {'dtype': 'np.float64'}), '(n1, dtype=np.float64)\n', (2547, 2569), True, 'import tensorflow as tf\n'), ((2595, 2631), 'tensorflow.zeros', 'tf.zeros', (['[n2, n3]'], {'dtype': 'np.float64'}), '([n2, n3], dtype=np.float64)\n', (2603, 2631), True, 'import tensorflow as tf\n'), ((2657, 2697), 'tensorflow.zeros', 'tf.zeros', (['[n4, n5, n6]'], {'dtype': 'np.float64'}), '([n4, n5, n6], dtype=np.float64)\n', (2665, 2697), True, 'import tensorflow as tf\n'), ((3163, 3192), 'tensorflow.zeros', 'tf.zeros', (['n'], {'dtype': 'tf.float64'}), '(n, dtype=tf.float64)\n', (3171, 3192), True, 'import tensorflow as tf\n'), ((3291, 3307), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['x'], {}), '(x)\n', (3304, 3307), True, 'import tensorflow as tf\n'), ((1591, 1612), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(X ** 2)'], {}), '(X ** 2)\n', (1604, 1612), True, 'import tensorflow as tf\n'), ((1903, 1924), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(X ** 2)'], {}), '(X ** 2)\n', (1916, 1924), True, 'import tensorflow as tf\n'), ((2248, 2269), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(X ** 2)'], {}), '(X ** 2)\n', (2261, 2269), True, 'import tensorflow as tf\n'), ((3372, 3384), 'numpy.random.randn', 'rnd.randn', (['n'], {}), '(n)\n', (3381, 3384), True, 'import numpy.random as rnd\n'), ((2887, 2908), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(z ** 2)'], {}), '(z ** 2)\n', (2900, 2908), True, 'import tensorflow as tf\n'), ((2783, 2804), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(x ** 2)'], {}), '(x ** 2)\n', (2796, 2804), True, 'import tensorflow as tf\n'), ((2835, 2856), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(y ** 2)'], {}), '(y ** 2)\n', (2848, 2856), True, 'import tensorflow as tf\n')]
|
"""
ios.py
Handle arguments, configuration file
@author: K.Edeline
"""
import sys
import argparse
import configparser
import logging
import logging.config
import os
class IOManager():
"""
extend me
"""
def __init__(self, child=None, parse_args=True):
self.child = child
self.parse_args = parse_args
self.args = None
self.config = None
self.logger = None
def load_ios(self):
"""
Load all ios
"""
if not self.parse_args:
return
child_class = type(self).__name__
if child_class == "DXTop":
self.arguments_dxtop()
self.log()
elif child_class == "DXAgent":
self.arguments_dxagent()
if "start" in self.args.cmd:
self.configuration_dxagent()
self.log()
elif child_class == "DXWeb":
self.arguments_dxweb()
self.configuration_dxweb()
self.log()
########################################################
# ARGPARSE
########################################################
def arguments_dxweb(self):
"""
Parse dxweb arguments
"""
parser = argparse.ArgumentParser(description='Diagnostic Agent web app')
parser.add_argument('-c' , '--config', type=str, default="./dxagent.ini",
help='configuration file location')
parser.add_argument('-l' , '--log-file', type=str, default="dxweb.log",
help='log file location (default: dxweb.log)')
parser.add_argument('-t' , '--target', type=str, default=None,
help='gNMI target(default from dxagent.ini)')
parser.add_argument('-k' , '--certs-dir', type=str,
default="./certs/",
help='certificate/key files location')
parser.add_argument('-v' , '--verbose', action='store_true',
help='increase output level')
self.args = parser.parse_args()
return self.args
def arguments_dxtop(self):
"""
Parse dxtop arguments
"""
parser = argparse.ArgumentParser(description='Diagnostic Agent console app')
parser.add_argument('-l' , '--log-file', type=str, default="dxtop.log",
help='log file location (default: dxtop.log)')
parser.add_argument('-v' , '--verbose', action='store_true',
help='increase output level')
self.args = parser.parse_args()
return self.args
def arguments_dxagent(self):
"""
Parse dxagent arguments
"""
parser = argparse.ArgumentParser(description='Diagnostic Agent')
parser.add_argument('cmd', type=str,
choices=["start", "stop", "restart", "status"],
)
parser.add_argument('-l' , '--log-file', type=str,
default="/var/log/dxagent.log",
help='log file location (default: dxagent.log)')
parser.add_argument('-c' , '--config', type=str, default="./dxagent.ini",
help='configuration file location')
parser.add_argument('-r' , '--ressources-dir', type=str,
default="./res/",
help='configuration file location')
parser.add_argument('-k' , '--certs-dir', type=str,
default="./certs/",
help='certificate/key files location')
parser.add_argument('-s' , '--disable-shm', action='store_true',
help='disable shared memory segment '
'(cannot use dxtop)')
parser.add_argument('-v' , '--verbose', action='store_true',
help='increase output level')
self.args = parser.parse_args()
# retreive absolute paths
self.args.config = os.path.abspath(self.args.config)
self.args.ressources_dir = os.path.abspath(self.args.ressources_dir)
self.args.certs_dir = os.path.abspath(self.args.certs_dir)
return self.args
########################################################
# CONFIGPARSER
########################################################
def configuration_dxweb(self):
"""
Parse configuration file
"""
if self.args == None or self.args.config == None:
raise IOSException("Arguments not found")
self.config = configparser.ConfigParser()
parsed = self.config.read(self.args.config)
if not parsed:
print("Configuration file not found:", self.args.config)
sys.exit(1)
# parse gnmi target url
if self.args.target:
self.gnmi_target = self.args.target
else:
self.gnmi_target = self.config["gnmi"].get("target")
return self.config
def configuration_dxagent(self):
"""
Parse configuration file
"""
if self.args == None or self.args.config == None:
raise IOSException("Arguments not found")
self.config = configparser.ConfigParser()
parsed = self.config.read(self.args.config)
if not parsed:
print("Configuration file not found:", self.args.config)
sys.exit(1)
# set default configuration directory
if "config_directory" not in self.config["virtualbox"]:
default_config_dir = "/home/{}/.config".format(
self.config["virtualbox"]["vbox_user"])
self.config["virtualbox"]["config_directory"] = default_config_dir
# parse gnmi target url
self.gnmi_target = self.config["gnmi"].get("target")
# parse VPP gNMI nodes
self.vpp_gnmi_nodes = []
vpp_gnmi_nodes = self.config["vpp"].get("gnmi_nodes")
if vpp_gnmi_nodes:
self.vpp_gnmi_nodes = [node.rstrip().lstrip() for node in vpp_gnmi_nodes.split(",")]
# parse ioam gNMI nodes
self.ioam_gnmi_nodes = []
ioam_gnmi_nodes = self.config["ioam"].get("gnmi_nodes")
if ioam_gnmi_nodes:
self.ioam_gnmi_nodes = [node.rstrip().lstrip() for node in ioam_gnmi_nodes.split(",")]
return self.config
########################################################
# LOGGING
########################################################
def log(self):
"""
load logging facility
"""
if self.args == None:
raise IOManagerException("Arguments not found")
# create logger
self.logger = logging.getLogger(self.child.__class__.__name__)
self.logger.setLevel(logging.DEBUG)
# log file handler
fh = logging.FileHandler(self.args.log_file if self.args.log_file.startswith("/") else "./"+self.args.log_file)
fh.setLevel(logging.DEBUG if self.args.verbose else logging.INFO)
# add formatter to handlers
formatter = logging.Formatter("%(asctime)s %(message)s",
"%m-%d %H:%M:%S")
fh.setFormatter(formatter)
self.logger.addHandler(fh)
# log functions
self.debug = self.logger.debug
self.info = self.logger.info
self.warn = self.logger.warn
self.error = self.logger.error
self.critical = self.logger.critical
# Disable logging from other modules
self.logger.propagate = False
return self.logger
class IOManagerException(Exception):
"""
IOManagerException(Exception)
"""
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
|
[
"os.path.abspath",
"argparse.ArgumentParser",
"logging.Formatter",
"sys.exit",
"configparser.ConfigParser",
"logging.getLogger"
] |
[((1175, 1238), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Diagnostic Agent web app"""'}), "(description='Diagnostic Agent web app')\n", (1198, 1238), False, 'import argparse\n'), ((2126, 2193), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Diagnostic Agent console app"""'}), "(description='Diagnostic Agent console app')\n", (2149, 2193), False, 'import argparse\n'), ((2628, 2683), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Diagnostic Agent"""'}), "(description='Diagnostic Agent')\n", (2651, 2683), False, 'import argparse\n'), ((3934, 3967), 'os.path.abspath', 'os.path.abspath', (['self.args.config'], {}), '(self.args.config)\n', (3949, 3967), False, 'import os\n'), ((4001, 4042), 'os.path.abspath', 'os.path.abspath', (['self.args.ressources_dir'], {}), '(self.args.ressources_dir)\n', (4016, 4042), False, 'import os\n'), ((4071, 4107), 'os.path.abspath', 'os.path.abspath', (['self.args.certs_dir'], {}), '(self.args.certs_dir)\n', (4086, 4107), False, 'import os\n'), ((4491, 4518), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (4516, 4518), False, 'import configparser\n'), ((5128, 5155), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (5153, 5155), False, 'import configparser\n'), ((6585, 6633), 'logging.getLogger', 'logging.getLogger', (['self.child.__class__.__name__'], {}), '(self.child.__class__.__name__)\n', (6602, 6633), False, 'import logging\n'), ((6945, 7007), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s %(message)s"""', '"""%m-%d %H:%M:%S"""'], {}), "('%(asctime)s %(message)s', '%m-%d %H:%M:%S')\n", (6962, 7007), False, 'import logging\n'), ((4670, 4681), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (4678, 4681), False, 'import sys\n'), ((5307, 5318), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (5315, 5318), False, 'import sys\n')]
|
import unittest
import os
import names
import uuid
import requests
import json
class TestDeleteUser(unittest.TestCase):
BASE_URL = 'http://localhost:5000'
def setUp(self):
self.API_URL = self.BASE_URL + '/user'
def tearDown(self):
pass
def test_delete_user_sunny_day(self):
# create user
user_name = names.get_full_name()
payload = {
'name': user_name
}
response_1 = requests.post(self.API_URL, json=payload)
self.assertIsNotNone(response_1)
self.assertTrue(response_1.status_code == 201)
body = response_1.json()
self.assertIsNotNone(body)
self.assertIsNotNone(body['id'])
self.assertTrue(len(body['id']) > 0)
id = body['id']
# delete user
response_2 = requests.delete(self.API_URL + '/' + id)
self.assertIsNotNone(response_2)
self.assertTrue(response_2.status_code == 204)
def test_delete_user_nonexisting_user(self):
nonexisting_user_id = str(uuid.uuid4())
response = requests.delete(self.API_URL + '/' + nonexisting_user_id)
# check response code and content
self.assertIsNotNone(response)
self.assertTrue(response.status_code == 404)
body = response.json()
self.assertIsNotNone(body)
self.assertIsNotNone(body['error'])
self.assertTrue(body['error'] == 'Not found')
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"uuid.uuid4",
"requests.delete",
"requests.post",
"names.get_full_name"
] |
[((1459, 1474), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1472, 1474), False, 'import unittest\n'), ((352, 373), 'names.get_full_name', 'names.get_full_name', ([], {}), '()\n', (371, 373), False, 'import names\n'), ((455, 496), 'requests.post', 'requests.post', (['self.API_URL'], {'json': 'payload'}), '(self.API_URL, json=payload)\n', (468, 496), False, 'import requests\n'), ((815, 855), 'requests.delete', 'requests.delete', (["(self.API_URL + '/' + id)"], {}), "(self.API_URL + '/' + id)\n", (830, 855), False, 'import requests\n'), ((1069, 1126), 'requests.delete', 'requests.delete', (["(self.API_URL + '/' + nonexisting_user_id)"], {}), "(self.API_URL + '/' + nonexisting_user_id)\n", (1084, 1126), False, 'import requests\n'), ((1036, 1048), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (1046, 1048), False, 'import uuid\n')]
|
import torch
class Stitcher:
def __init__(self, stitching_config, sr=16000):
self.eval_win = stitching_config["eval_win"]
self.eval_hop = stitching_config["eval_hop"]
self.fft_hop = stitching_config["hop_size"]
self.sr = sr
self.stitch_margin = int(
(self.eval_win * 10 - self.eval_hop * 10) / 10 * self.sr / self.fft_hop
) # 16ms window
def get_stitch(self, x, masks):
"""
this method use mask as stitching rule
x: original magnitude spectrogram features corresponding to each window (expects single channel)
masks: mask for each window (2 sources + 1 noise)
"""
PERM = []
for n in range(len(masks) - 1):
# first find the permutations for each segments
past = masks[n][:, :, :-1].permute(2, 0, 1) # 2 x F x T
now = masks[n + 1][:, :, :-1].permute(2, 0, 1) # 2 x F x T
E_prev = past * torch.abs(x[n]) # 2 x F x T
E_now = now * torch.abs(x[n + 1])
# Calculate a similarity matrix.
similarity_matrix = torch.zeros((2, 2))
for i in range(2):
for j in range(2):
d = (
E_prev[j, :, -self.stitch_margin :]
- E_now[i, :, : self.stitch_margin]
)
similarity_matrix[i, j] = -torch.sum(
torch.pow(torch.abs(d), 0.5)
) # 0.5
sim0 = similarity_matrix[0, 0] + similarity_matrix[1, 1]
sim1 = similarity_matrix[0, 1] + similarity_matrix[1, 0]
if sim0 >= sim1:
perm = [0, 1]
else:
perm = [1, 0]
PERM.append(perm)
return PERM
def get_connect(self, PERM, mask):
state = 0
N_M1 = [0]
for i, item in enumerate(PERM):
if item[0] == 1:
state = 1 - state
N_M1.append(state)
res1 = []
res2 = []
noise = []
# perm
for i in range(len(N_M1)):
if N_M1[i] == 0:
res1.append(mask[i][:, :, 0])
res2.append(mask[i][:, :, 1])
else:
res1.append(mask[i][:, :, 1])
res2.append(mask[i][:, :, 0])
noise.append(mask[i][:, :, 2])
# winner-take-tall
for i, (r1, r2, n) in enumerate(zip(res1, res2, noise)):
m = torch.stack((r1, r2, n), dim=2) # F x T x 3
m_max = torch.amax(m, dim=2, keepdim=True)
m = torch.where(m == m_max, m, torch.tensor(1e-4, dtype=torch.float32))
res1[i] = m[:, :, 0]
res2[i] = m[:, :, 1]
noise[i] = m[:, :, 2]
# Average the masks of the overlapping region.
hop = int(self.eval_hop * self.sr / self.fft_hop)
F, win = res1[0].shape
all_L = int(hop * (len(mask) - 1) + win)
res_1 = torch.zeros((F, all_L))
res_2 = torch.zeros((F, all_L))
res_noise = torch.zeros((F, all_L))
indicator = torch.zeros((1, all_L))
for i in range(len(mask)):
wav = mask[i]
st = hop * i
if wav.shape[1] < win:
en = st + wav.shape[1]
else:
en = st + win
# need to normalize it
res_1[:, st:en] += res1[i]
res_2[:, st:en] += res2[i]
res_noise[:, st:en] += noise[i]
indicator[:, st:en] += 1
indicator[indicator == 0] = 1
return (res_1 / indicator, res_2 / indicator, res_noise / indicator)
|
[
"torch.stack",
"torch.zeros",
"torch.amax",
"torch.abs",
"torch.tensor"
] |
[((3009, 3032), 'torch.zeros', 'torch.zeros', (['(F, all_L)'], {}), '((F, all_L))\n', (3020, 3032), False, 'import torch\n'), ((3049, 3072), 'torch.zeros', 'torch.zeros', (['(F, all_L)'], {}), '((F, all_L))\n', (3060, 3072), False, 'import torch\n'), ((3093, 3116), 'torch.zeros', 'torch.zeros', (['(F, all_L)'], {}), '((F, all_L))\n', (3104, 3116), False, 'import torch\n'), ((3137, 3160), 'torch.zeros', 'torch.zeros', (['(1, all_L)'], {}), '((1, all_L))\n', (3148, 3160), False, 'import torch\n'), ((1118, 1137), 'torch.zeros', 'torch.zeros', (['(2, 2)'], {}), '((2, 2))\n', (1129, 1137), False, 'import torch\n'), ((2512, 2543), 'torch.stack', 'torch.stack', (['(r1, r2, n)'], {'dim': '(2)'}), '((r1, r2, n), dim=2)\n', (2523, 2543), False, 'import torch\n'), ((2577, 2611), 'torch.amax', 'torch.amax', (['m'], {'dim': '(2)', 'keepdim': '(True)'}), '(m, dim=2, keepdim=True)\n', (2587, 2611), False, 'import torch\n'), ((965, 980), 'torch.abs', 'torch.abs', (['x[n]'], {}), '(x[n])\n', (974, 980), False, 'import torch\n'), ((1020, 1039), 'torch.abs', 'torch.abs', (['x[n + 1]'], {}), '(x[n + 1])\n', (1029, 1039), False, 'import torch\n'), ((2655, 2696), 'torch.tensor', 'torch.tensor', (['(0.0001)'], {'dtype': 'torch.float32'}), '(0.0001, dtype=torch.float32)\n', (2667, 2696), False, 'import torch\n'), ((1464, 1476), 'torch.abs', 'torch.abs', (['d'], {}), '(d)\n', (1473, 1476), False, 'import torch\n')]
|
# This file contains content licensed by https://github.com/chaiyujin/glow-pytorch/blob/master/LICENSE
import torch
import torch.nn as nn
from models.modules import thops
from models.modules.layers import Conv2d, Conv2dZeros
class AffineCoupling(nn.Module):
def __init__(self, in_channels, out_channels, hidden_channels):
super().__init__()
self.NN = nn.Sequential(
Conv2d(in_channels, hidden_channels), # ActNorm
nn.ReLU(inplace=False),
Conv2d(hidden_channels, hidden_channels, kernel_size=[1, 1]), # ActNorm
nn.ReLU(inplace=False),
Conv2dZeros(hidden_channels, out_channels) # w/o ActNorm
)
def forward(self, inp, logdet=None, reverse=False):
a, b = thops.split_feature(inp, "split")
h = self.NN(a)
shift, scale = thops.split_feature(h, "cross")
scale = torch.sigmoid(scale + 2.)
if not reverse: # Normal flow
b += shift
b *= scale
d_logdet = thops.sum(torch.log(scale), dim=[1, 2, 3])
else:
b = b / scale
b = b - shift
d_logdet = -thops.sum(torch.log(scale), dim=[1, 2, 3])
logdet = logdet + d_logdet
z = thops.cat_feature(a, b)
return z, logdet
|
[
"torch.nn.ReLU",
"models.modules.thops.cat_feature",
"models.modules.thops.split_feature",
"torch.sigmoid",
"models.modules.layers.Conv2d",
"models.modules.layers.Conv2dZeros",
"torch.log"
] |
[((818, 851), 'models.modules.thops.split_feature', 'thops.split_feature', (['inp', '"""split"""'], {}), "(inp, 'split')\n", (837, 851), False, 'from models.modules import thops\n'), ((898, 929), 'models.modules.thops.split_feature', 'thops.split_feature', (['h', '"""cross"""'], {}), "(h, 'cross')\n", (917, 929), False, 'from models.modules import thops\n'), ((946, 972), 'torch.sigmoid', 'torch.sigmoid', (['(scale + 2.0)'], {}), '(scale + 2.0)\n', (959, 972), False, 'import torch\n'), ((1308, 1331), 'models.modules.thops.cat_feature', 'thops.cat_feature', (['a', 'b'], {}), '(a, b)\n', (1325, 1331), False, 'from models.modules import thops\n'), ((402, 438), 'models.modules.layers.Conv2d', 'Conv2d', (['in_channels', 'hidden_channels'], {}), '(in_channels, hidden_channels)\n', (408, 438), False, 'from models.modules.layers import Conv2d, Conv2dZeros\n'), ((492, 514), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(False)'}), '(inplace=False)\n', (499, 514), True, 'import torch.nn as nn\n'), ((528, 588), 'models.modules.layers.Conv2d', 'Conv2d', (['hidden_channels', 'hidden_channels'], {'kernel_size': '[1, 1]'}), '(hidden_channels, hidden_channels, kernel_size=[1, 1])\n', (534, 588), False, 'from models.modules.layers import Conv2d, Conv2dZeros\n'), ((618, 640), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(False)'}), '(inplace=False)\n', (625, 640), True, 'import torch.nn as nn\n'), ((654, 696), 'models.modules.layers.Conv2dZeros', 'Conv2dZeros', (['hidden_channels', 'out_channels'], {}), '(hidden_channels, out_channels)\n', (665, 696), False, 'from models.modules.layers import Conv2d, Conv2dZeros\n'), ((1094, 1110), 'torch.log', 'torch.log', (['scale'], {}), '(scale)\n', (1103, 1110), False, 'import torch\n'), ((1227, 1243), 'torch.log', 'torch.log', (['scale'], {}), '(scale)\n', (1236, 1243), False, 'import torch\n')]
|
'''
merge_sort는 길이 n이 1이 될 때까지 2로 나누어주는 BST와 마찬가지로 divide하여
merge하므로 BST와 같은원리로 (logn)만큼 divide 해주고
merge하면서 값을 비교하여 정렬 하므로 최대 n번 정도의 복잡도로 동작하므로
안좋아도 nlogn의 성능을 갖게 됩니다.
quick_sort pivot값 잘 골랐을 때와 같은 성능을 나타내고
임시로 저장할 공간을 못쓸 때는 quick_sort를 사용합니다.
time complexity : O(nlogn)
백준에서 compile했을 때 에러남
'''
import sys
from sys import stdin
sys.setrecursionlimit(1500)
def merge_sort(arr):
if len(arr) <= 1:
return arr
pivot = len(arr)//2
left = merge_sort(arr[:len(arr)//2])
right = merge_sort(arr[len(arr)//2:])
return merge(left, right)
def merge(left, right):
l_idx, r_idx, temp = 0, 0, []
while l_idx < len(left) and r_idx < len(right):
if left[l_idx] <= right[r_idx]:
temp.append(left[l_idx])
l_idx += 1
else:
temp.append(right[r_idx])
r_idx += 1
temp += left[l_idx:]
temp += right[r_idx:]
return temp
def sort(arr):
return merge_sort(arr)
'''
arr = []
N = int(stdin.readline())
for x in range(N):
arr.append(int(stdin.readline()))
for y in sort(arr):
print(y)
'''
|
[
"sys.setrecursionlimit"
] |
[((334, 361), 'sys.setrecursionlimit', 'sys.setrecursionlimit', (['(1500)'], {}), '(1500)\n', (355, 361), False, 'import sys\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Data pipeline utilities."""
# pylint: disable=invalid-name,dangerous-default-value
# pylint: disable=too-many-arguments
from typing import Dict, List, Tuple
import pandas as pd
import pandera as pa
import prefect
from prefect import task
import src.aggregate_data as ad
import src.city_neighbourhoods as cn
import src.city_pub_data as cpd
import src.trips as bt
from src.process_trips import process_trips_data
from src.stations_metadata import get_stations_metadata, transform_metadata
from src.utils import export_df_to_multiple_csv_files
@task
def get_bikeshare_stations_metadata(
open_tor_data_url: str,
stations_params: Dict[str, str],
stations_cols_wanted: List[str],
) -> pd.DataFrame:
"""Retrieve and process bikeshare stations metadata."""
logger = prefect.context.get("logger")
df = get_stations_metadata(open_tor_data_url, stations_params)
df = transform_metadata(df, stations_cols_wanted)
logger.info(f"Retrieved {len(df):,} rows of bikeshare station metadata.")
return df
@task
def get_bikeshare_trips_data(
trips_data_glob_str: str,
trips_nan_cols: List[str],
trips_duplicated_cols: List[str],
) -> pd.DataFrame:
"""Retrieve and process bikeshare trips data."""
logger = prefect.context.get("logger")
df = bt.load_trips_data(trips_data_glob_str)
df = process_trips_data(df, trips_nan_cols, trips_duplicated_cols)
logger.info(f"Retrieved {len(df):,} rows of bikeshare trips data.")
return df
@task
def get_city_cultural_hotspots_data(
open_tor_data_url: str, ch_params: Dict[str, str]
) -> pd.DataFrame:
"""Retrieve cultural hotspots open dataset."""
logger = prefect.context.get("logger")
df = cpd.get_cultural_hotspots(open_tor_data_url, ch_params)
logger.info(
f"Retrieved {len(df):,} rows of citywide cultural hotspot data."
)
return df
@task
def get_city_points_of_interest_data(
open_tor_data_url: str, poi_params: Dict[str, str]
) -> pd.DataFrame:
"""Retrieve points of interest open dataset."""
logger = prefect.context.get("logger")
df = cpd.get_poi_data(open_tor_data_url, poi_params)
logger.info(
f"Retrieved {len(df):,} rows of citywide points-of-interest data."
)
return df
@task
def get_city_neighbourhood_boundary_data(
open_tor_data_url: str,
neigh_boundary_params: Dict[str, str],
neigh_cols_to_show: List[str],
) -> pd.DataFrame:
"""Retrieve city neighbourhood boundary open dataset."""
logger = prefect.context.get("logger")
gdf = cpd.get_neighbourhood_boundary_land_area_data(
open_tor_data_url, neigh_boundary_params, neigh_cols_to_show
)
logger.info(
f"Retrieved {len(gdf):,} rows of city neighbourhood boundary data."
)
return gdf
@task
def get_city_public_transit_locations_data(
open_tor_data_url: str, pt_params: Dict[str, str]
) -> pd.DataFrame:
"""Retrieve city public transit locations open dataset."""
logger = prefect.context.get("logger")
df = cpd.get_public_transit_locations(open_tor_data_url, pt_params)
logger.info(
f"Retrieved {len(df):,} rows of city public transit location data."
)
return df
@task
def get_city_college_university_locations_data() -> pd.DataFrame:
"""Retrieve city college and university location data."""
logger = prefect.context.get("logger")
df = cpd.get_coll_univ_locations()
logger.info(
f"Retrieved {len(df):,} rows of city college-univ location data."
)
return df
@task
def get_neighbourhood_profile_data(
open_tor_data_url: str, neigh_profile_params: Dict[str, str]
) -> pd.DataFrame:
"""Retrieve city neighbourhood profiles open dataset."""
logger = prefect.context.get("logger")
df = cn.get_neighbourhood_profile_data(
open_tor_data_url, neigh_profile_params
)
logger.info(
f"Retrieved {len(df):,} rows of city neighbourhood profile data."
)
return df
@task(nout=6)
def aggregate_data(
gdf: pd.DataFrame,
df_poi: pd.DataFrame,
dfch_essentials: pd.DataFrame,
df_coll_univ: pd.DataFrame,
df_pt_slice: pd.DataFrame,
df_neigh_demog: pd.DataFrame,
df_stations: pd.DataFrame,
) -> Tuple[pd.DataFrame]:
"""Combine neighbourhood stats and hourly bikeshare trips."""
geo_cols = ["AREA_NAME", "geometry", "Shape__Area"]
logger = prefect.context.get("logger")
# Add neighbourhood to points-of-interest data
df_poi_new = pa.check_io(out=ad.poi_new_schema)(
cn.get_data_with_neighbourhood
)(
gdf[geo_cols],
df_poi.rename(
columns={
"POI_LATITUDE": "lat",
"POI_LONGITUDE": "lon",
}
)[["ID", "NAME", "lat", "lon"]],
"lat",
"lon",
"ID",
use_prefect=True,
)
logger.info("Added neighbourhood to city points-of-interest data")
# Add neighbourhood to cultural hotspots data
dfch_essentials_new = pa.check_output(ad.ch_essentials_new_schema)(
cn.get_data_with_neighbourhood
)(
gdf[geo_cols],
dfch_essentials.rename(
columns={
"POI_LATITUDE": "lat",
"POI_LONGITUDE": "lon",
}
)[["ID", "NAME", "lat", "lon"]],
"lat",
"lon",
"ID",
use_prefect=True,
)
logger.info("Added neighbourhood to city cultural hotspots data")
# Add neighbourhood to college and university location data
df_coll_univ_new = pa.check_output(ad.coll_univ_schema_new)(
cn.get_data_with_neighbourhood
)(
gdf[geo_cols],
df_coll_univ,
"lat",
"lon",
"institution_id",
use_prefect=True,
)
logger.info(
"Added neighbourhood to city college and university locations data"
)
# Add neighbourhood to public transit locations data
df_pt_slice_new = pa.check_output(ad.pub_trans_locations_schema_new)(
cn.get_data_with_neighbourhood
)(
gdf[geo_cols],
df_pt_slice,
"lat",
"lon",
"stop_id",
use_prefect=True,
)
logger.info("Added neighbourhood to city public transit locations data")
# Aggregate above neighbourhood stats and combine with demographics
df_neigh_stats = ad.combine_neigh_stats(
gdf,
df_pt_slice_new,
df_coll_univ_new,
dfch_essentials_new,
df_poi_new,
df_neigh_demog,
)
logger.info("Aggregated statistics per city neighbourhood")
# Add neighbourhood to stations locations
df_stations_new = pa.check_output(ad.stations_schema_merged)(
cn.get_data_with_neighbourhood
)(
gdf[geo_cols],
df_stations,
"lat",
"lon",
"station_id",
use_prefect=True,
)
logger.info("Added stations to bikeshare station metadata")
# Add stations to combined+aggregated neighbourhood stats
df_stations_new = ad.combine_stations_metadata_neighbourhood(
df_stations_new, df_neigh_stats
)
logger.info(
"Combined stats and bikeshare station metadata per neighbourhood"
)
return [
df_poi_new,
dfch_essentials_new,
df_coll_univ_new,
df_pt_slice_new,
df_neigh_stats,
df_stations_new,
]
@task
def combine_trips_neighbourhood_data(
df: pd.DataFrame, cols: List[str], df_stations_new: pd.DataFrame
) -> pd.DataFrame:
"""Combine hourly ridership and neighbourhood aggregated stats."""
logger = prefect.context.get("logger")
df_hour_by_station_merged = ad.combine_hourly_trips_per_station(
df, cols, df_stations_new
)
logger.info(
"Created aggregation of hourly trips per station with "
"neighbourhood stats"
)
return df_hour_by_station_merged
@task
def export_aggregated_data_multiple_csvs(
df: pd.DataFrame,
cols_to_export: List[str],
nrows_per_staged_csv_file: int,
) -> None:
"""Split a single DataFrame into multiple CSV files."""
pa.check_input(ad.hourly_trips_by_station_merged_schema)(
export_df_to_multiple_csv_files
)(
df,
cols_to_export,
"local_stage",
nrows_per_staged_csv_file,
use_prefect=True,
)
|
[
"src.trips.load_trips_data",
"pandera.check_output",
"src.aggregate_data.combine_neigh_stats",
"src.city_pub_data.get_poi_data",
"src.city_pub_data.get_public_transit_locations",
"src.city_pub_data.get_coll_univ_locations",
"src.city_neighbourhoods.get_neighbourhood_profile_data",
"src.city_pub_data.get_neighbourhood_boundary_land_area_data",
"src.aggregate_data.combine_hourly_trips_per_station",
"src.stations_metadata.get_stations_metadata",
"src.city_pub_data.get_cultural_hotspots",
"pandera.check_io",
"src.aggregate_data.combine_stations_metadata_neighbourhood",
"pandera.check_input",
"prefect.context.get",
"prefect.task",
"src.process_trips.process_trips_data",
"src.stations_metadata.transform_metadata"
] |
[((4022, 4034), 'prefect.task', 'task', ([], {'nout': '(6)'}), '(nout=6)\n', (4026, 4034), False, 'from prefect import task\n'), ((836, 865), 'prefect.context.get', 'prefect.context.get', (['"""logger"""'], {}), "('logger')\n", (855, 865), False, 'import prefect\n'), ((875, 932), 'src.stations_metadata.get_stations_metadata', 'get_stations_metadata', (['open_tor_data_url', 'stations_params'], {}), '(open_tor_data_url, stations_params)\n', (896, 932), False, 'from src.stations_metadata import get_stations_metadata, transform_metadata\n'), ((942, 986), 'src.stations_metadata.transform_metadata', 'transform_metadata', (['df', 'stations_cols_wanted'], {}), '(df, stations_cols_wanted)\n', (960, 986), False, 'from src.stations_metadata import get_stations_metadata, transform_metadata\n'), ((1301, 1330), 'prefect.context.get', 'prefect.context.get', (['"""logger"""'], {}), "('logger')\n", (1320, 1330), False, 'import prefect\n'), ((1340, 1379), 'src.trips.load_trips_data', 'bt.load_trips_data', (['trips_data_glob_str'], {}), '(trips_data_glob_str)\n', (1358, 1379), True, 'import src.trips as bt\n'), ((1389, 1450), 'src.process_trips.process_trips_data', 'process_trips_data', (['df', 'trips_nan_cols', 'trips_duplicated_cols'], {}), '(df, trips_nan_cols, trips_duplicated_cols)\n', (1407, 1450), False, 'from src.process_trips import process_trips_data\n'), ((1719, 1748), 'prefect.context.get', 'prefect.context.get', (['"""logger"""'], {}), "('logger')\n", (1738, 1748), False, 'import prefect\n'), ((1758, 1813), 'src.city_pub_data.get_cultural_hotspots', 'cpd.get_cultural_hotspots', (['open_tor_data_url', 'ch_params'], {}), '(open_tor_data_url, ch_params)\n', (1783, 1813), True, 'import src.city_pub_data as cpd\n'), ((2109, 2138), 'prefect.context.get', 'prefect.context.get', (['"""logger"""'], {}), "('logger')\n", (2128, 2138), False, 'import prefect\n'), ((2148, 2195), 'src.city_pub_data.get_poi_data', 'cpd.get_poi_data', (['open_tor_data_url', 'poi_params'], {}), '(open_tor_data_url, poi_params)\n', (2164, 2195), True, 'import src.city_pub_data as cpd\n'), ((2557, 2586), 'prefect.context.get', 'prefect.context.get', (['"""logger"""'], {}), "('logger')\n", (2576, 2586), False, 'import prefect\n'), ((2597, 2708), 'src.city_pub_data.get_neighbourhood_boundary_land_area_data', 'cpd.get_neighbourhood_boundary_land_area_data', (['open_tor_data_url', 'neigh_boundary_params', 'neigh_cols_to_show'], {}), '(open_tor_data_url,\n neigh_boundary_params, neigh_cols_to_show)\n', (2642, 2708), True, 'import src.city_pub_data as cpd\n'), ((3034, 3063), 'prefect.context.get', 'prefect.context.get', (['"""logger"""'], {}), "('logger')\n", (3053, 3063), False, 'import prefect\n'), ((3073, 3135), 'src.city_pub_data.get_public_transit_locations', 'cpd.get_public_transit_locations', (['open_tor_data_url', 'pt_params'], {}), '(open_tor_data_url, pt_params)\n', (3105, 3135), True, 'import src.city_pub_data as cpd\n'), ((3398, 3427), 'prefect.context.get', 'prefect.context.get', (['"""logger"""'], {}), "('logger')\n", (3417, 3427), False, 'import prefect\n'), ((3437, 3466), 'src.city_pub_data.get_coll_univ_locations', 'cpd.get_coll_univ_locations', ([], {}), '()\n', (3464, 3466), True, 'import src.city_pub_data as cpd\n'), ((3780, 3809), 'prefect.context.get', 'prefect.context.get', (['"""logger"""'], {}), "('logger')\n", (3799, 3809), False, 'import prefect\n'), ((3819, 3893), 'src.city_neighbourhoods.get_neighbourhood_profile_data', 'cn.get_neighbourhood_profile_data', (['open_tor_data_url', 'neigh_profile_params'], {}), '(open_tor_data_url, neigh_profile_params)\n', (3852, 3893), True, 'import src.city_neighbourhoods as cn\n'), ((4428, 4457), 'prefect.context.get', 'prefect.context.get', (['"""logger"""'], {}), "('logger')\n", (4447, 4457), False, 'import prefect\n'), ((6365, 6480), 'src.aggregate_data.combine_neigh_stats', 'ad.combine_neigh_stats', (['gdf', 'df_pt_slice_new', 'df_coll_univ_new', 'dfch_essentials_new', 'df_poi_new', 'df_neigh_demog'], {}), '(gdf, df_pt_slice_new, df_coll_univ_new,\n dfch_essentials_new, df_poi_new, df_neigh_demog)\n', (6387, 6480), True, 'import src.aggregate_data as ad\n'), ((7032, 7107), 'src.aggregate_data.combine_stations_metadata_neighbourhood', 'ad.combine_stations_metadata_neighbourhood', (['df_stations_new', 'df_neigh_stats'], {}), '(df_stations_new, df_neigh_stats)\n', (7074, 7107), True, 'import src.aggregate_data as ad\n'), ((7605, 7634), 'prefect.context.get', 'prefect.context.get', (['"""logger"""'], {}), "('logger')\n", (7624, 7634), False, 'import prefect\n'), ((7667, 7729), 'src.aggregate_data.combine_hourly_trips_per_station', 'ad.combine_hourly_trips_per_station', (['df', 'cols', 'df_stations_new'], {}), '(df, cols, df_stations_new)\n', (7702, 7729), True, 'import src.aggregate_data as ad\n'), ((4526, 4560), 'pandera.check_io', 'pa.check_io', ([], {'out': 'ad.poi_new_schema'}), '(out=ad.poi_new_schema)\n', (4537, 4560), True, 'import pandera as pa\n'), ((5034, 5078), 'pandera.check_output', 'pa.check_output', (['ad.ch_essentials_new_schema'], {}), '(ad.ch_essentials_new_schema)\n', (5049, 5078), True, 'import pandera as pa\n'), ((5571, 5611), 'pandera.check_output', 'pa.check_output', (['ad.coll_univ_schema_new'], {}), '(ad.coll_univ_schema_new)\n', (5586, 5611), True, 'import pandera as pa\n'), ((5971, 6021), 'pandera.check_output', 'pa.check_output', (['ad.pub_trans_locations_schema_new'], {}), '(ad.pub_trans_locations_schema_new)\n', (5986, 6021), True, 'import pandera as pa\n'), ((6665, 6707), 'pandera.check_output', 'pa.check_output', (['ad.stations_schema_merged'], {}), '(ad.stations_schema_merged)\n', (6680, 6707), True, 'import pandera as pa\n'), ((8112, 8168), 'pandera.check_input', 'pa.check_input', (['ad.hourly_trips_by_station_merged_schema'], {}), '(ad.hourly_trips_by_station_merged_schema)\n', (8126, 8168), True, 'import pandera as pa\n')]
|
# For using stack in python
# https://www.geeksforgeeks.org/stack-in-python/
# https://www.youtube.com/watch?v=zwb3GmNAtFk&ab_channel=codebasics << NOTE good resource
from Abstract_Data_Type.StackADT import StackADT
def insert_into_sorted_stack(stack, element) -> None:
# Edge case
if stack is None:
raise ValueError("Invalid Stack")
# Base case
if stack.size() == 0 or stack.peek() <= element:
stack.push(element)
return
top_element = stack.pop()
insert_into_sorted_stack(stack, element) # Hypothesis
stack.push(top_element)
# No induction Step
def sort_stack(stack) -> None:
# Edge case
if stack is None:
raise ValueError("Invalid Stack")
# Base case
if stack.size() == 0 or stack.size() == 1: # Already sorted
return
top_element = stack.pop()
sort_stack(stack) # stack has been reduced
insert_into_sorted_stack(stack, top_element)
if __name__ == "__main__":
s = StackADT([5, 1, 0, 2])
s.print() # unsorted stack
sort_stack(s)
s.print() # sorted stack
|
[
"Abstract_Data_Type.StackADT.StackADT"
] |
[((987, 1009), 'Abstract_Data_Type.StackADT.StackADT', 'StackADT', (['[5, 1, 0, 2]'], {}), '([5, 1, 0, 2])\n', (995, 1009), False, 'from Abstract_Data_Type.StackADT import StackADT\n')]
|
import KratosMultiphysics
import KratosMultiphysics.GeoMechanicsApplication as KratosGeo
def Factory(settings, Model):
if(type(settings) != KratosMultiphysics.Parameters):
raise Exception("expected input shall be a Parameters object, encapsulating a json string")
return GapClosureInterfaceActivationProcess(Model, settings["Parameters"])
## All the python processes should be derived from "python_process"
class GapClosureInterfaceActivationProcess(KratosMultiphysics.Process):
def __init__(self, Model, settings ):
KratosMultiphysics.Process.__init__(self)
model_part = Model[settings["model_part_name"].GetString()]
params = KratosMultiphysics.Parameters("{}")
params.AddValue("model_part_name",settings["model_part_name"])
params.AddValue("gap_width_threshold",settings["gap_width_threshold"])
params.AddValue("consider_gap_closure",settings["consider_gap_closure"])
self.process = KratosGeo.GapClosureInterfaceProcess(model_part, params)
def ExecuteInitialize(self):
self.process.ExecuteInitialize()
def ExecuteInitializeSolutionStep(self):
self.process.ExecuteInitializeSolutionStep()
def ExecuteFinalizeSolutionStep(self):
self.process.ExecuteFinalizeSolutionStep()
def ExecuteFinalize(self):
self.process.ExecuteFinalize()
|
[
"KratosMultiphysics.Parameters",
"KratosMultiphysics.Process.__init__",
"KratosMultiphysics.GeoMechanicsApplication.GapClosureInterfaceProcess"
] |
[((548, 589), 'KratosMultiphysics.Process.__init__', 'KratosMultiphysics.Process.__init__', (['self'], {}), '(self)\n', (583, 589), False, 'import KratosMultiphysics\n'), ((677, 712), 'KratosMultiphysics.Parameters', 'KratosMultiphysics.Parameters', (['"""{}"""'], {}), "('{}')\n", (706, 712), False, 'import KratosMultiphysics\n'), ((967, 1023), 'KratosMultiphysics.GeoMechanicsApplication.GapClosureInterfaceProcess', 'KratosGeo.GapClosureInterfaceProcess', (['model_part', 'params'], {}), '(model_part, params)\n', (1003, 1023), True, 'import KratosMultiphysics.GeoMechanicsApplication as KratosGeo\n')]
|
import numpy as np
import robodk
import time
import queue
from scipy.spatial.transform import Rotation, Slerp
from PIL import Image
from robolink import *
from matplotlib import pyplot as plt
import multiprocessing
from constants import BELT_VELOCITY
BOX_RANDOM_ANGLE = np.pi / 8.0
BOX_X_RANDOM = 50.0
GRAVITY = -9.81
class SimulationLoop:
CONVEYOR_BELT_END = 100.0
def __init__(self, queue, lock):
self.sleep_for = 1.0 / 60.0
self.link = Robolink()
self.box_velocity = np.array([0.0, -BELT_VELOCITY, 0.0])
self.paused = False
self.done = False
self.previous_sim_time = None
self.queue = queue
self.box = self.link.Item('Box')
self.write_lock = lock
def run(self):
self.link.setSimulationSpeed(1.0)
self.previous_sim_time = self.link.SimulationTime()
while not self.done:
self._read_queue()
if self.paused:
time.sleep(0.05)
continue
self._step_simulation()
time.sleep(self.sleep_for)
def _read_queue(self):
try:
msg = self.queue.get(False)
try:
self.write_lock.acquire()
getattr(self, msg[0])(*msg[1:])
finally:
self.write_lock.release()
except queue.Empty:
pass
def _step_simulation(self):
current_time = self.link.SimulationTime()
diff = current_time - self.previous_sim_time
try:
self.write_lock.acquire()
self.previous_sim_time = current_time
if self.box.Parent().Name() != 'picking_setup':
# Box is in the robot's hand. Don't do anything.
return
current_pose = np.array(self.box.Pose().Rows())
if current_pose[1, 3] < self.CONVEYOR_BELT_END:
self.reset_box()
return
if self.box.Parent().Name() == "picking_setup":
# On conveyor belt. Let's move it.
current_pose[:3, 3] += diff * self.box_velocity * 1000.0 # Pose is in millimeters.
if current_pose[2, 3] > 5.0:
z = current_pose[2, 3]
current_pose[2, 3] = max(0.0, z + diff * GRAVITY * 1000.0)
self.box.setPose(robodk.Mat(current_pose.tolist()))
finally:
self.write_lock.release()
def reset_box(self):
gripper = self.link.Item('Gripper')
gripper.DetachAll()
try:
box = self.link.Item('Box')
if box.Name() == "Box":
box.Delete()
except Exception as e:
print(e)
box_template = self.link.Item('BoxTemplate')
box_template.Copy()
self.box = self.link.Paste(self.link.Item('picking_setup'))
self.box.setName("Box")
self.box.setParent(self.link.Item('picking_setup'))
box_pose = np.array(self.box.Pose().Rows())
box_pose[:3, :3] = Rotation.from_rotvec([0.0, 0.0,
-np.pi / 2.0 + np.random.uniform(-BOX_RANDOM_ANGLE, BOX_RANDOM_ANGLE)
]).as_matrix()
box_pose[0, 3] = 200.0 + np.random.uniform(-BOX_X_RANDOM, BOX_X_RANDOM)
box_pose[1, 3] = 1800.0
box_pose[2, 3] = 0.0
self.box.setPose(robodk.Mat(box_pose.tolist()))
self.box.Scale(np.random.uniform(np.array([0.7, 0.7, 0.1]), np.ones(3)).tolist())
def pause(self, value):
self.paused = value
if not self.paused:
self.previous_sim_time = self.link.SimulationTime()
def close(self):
self.done = True
def simulation_loop(queue, lock):
loop = SimulationLoop(queue, lock).run()
class Simulation:
def __init__(self):
self.queue = multiprocessing.Queue()
self.write_lock = multiprocessing.Lock()
self.background_thread = multiprocessing.Process(target=simulation_loop, args=(self.queue, self.write_lock), daemon=True)
self.background_thread.start()
def reset_box(self):
self.queue.put(('reset_box',))
def pause(self, value):
self.queue.put(('pause', value))
def close(self):
self.queue.put(('close',))
|
[
"numpy.random.uniform",
"multiprocessing.Lock",
"numpy.ones",
"time.sleep",
"numpy.array",
"multiprocessing.Queue",
"multiprocessing.Process"
] |
[((504, 540), 'numpy.array', 'np.array', (['[0.0, -BELT_VELOCITY, 0.0]'], {}), '([0.0, -BELT_VELOCITY, 0.0])\n', (512, 540), True, 'import numpy as np\n'), ((3787, 3810), 'multiprocessing.Queue', 'multiprocessing.Queue', ([], {}), '()\n', (3808, 3810), False, 'import multiprocessing\n'), ((3837, 3859), 'multiprocessing.Lock', 'multiprocessing.Lock', ([], {}), '()\n', (3857, 3859), False, 'import multiprocessing\n'), ((3893, 3994), 'multiprocessing.Process', 'multiprocessing.Process', ([], {'target': 'simulation_loop', 'args': '(self.queue, self.write_lock)', 'daemon': '(True)'}), '(target=simulation_loop, args=(self.queue, self.\n write_lock), daemon=True)\n', (3916, 3994), False, 'import multiprocessing\n'), ((1048, 1074), 'time.sleep', 'time.sleep', (['self.sleep_for'], {}), '(self.sleep_for)\n', (1058, 1074), False, 'import time\n'), ((3193, 3239), 'numpy.random.uniform', 'np.random.uniform', (['(-BOX_X_RANDOM)', 'BOX_X_RANDOM'], {}), '(-BOX_X_RANDOM, BOX_X_RANDOM)\n', (3210, 3239), True, 'import numpy as np\n'), ((958, 974), 'time.sleep', 'time.sleep', (['(0.05)'], {}), '(0.05)\n', (968, 974), False, 'import time\n'), ((3398, 3423), 'numpy.array', 'np.array', (['[0.7, 0.7, 0.1]'], {}), '([0.7, 0.7, 0.1])\n', (3406, 3423), True, 'import numpy as np\n'), ((3425, 3435), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (3432, 3435), True, 'import numpy as np\n'), ((3082, 3136), 'numpy.random.uniform', 'np.random.uniform', (['(-BOX_RANDOM_ANGLE)', 'BOX_RANDOM_ANGLE'], {}), '(-BOX_RANDOM_ANGLE, BOX_RANDOM_ANGLE)\n', (3099, 3136), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
# Copyright (c) 2015-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test Vesting tokens."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import os
import json
import math
import http.client
import urllib.parse
class VestingBasicsTest (BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
self.extra_args = [["-txindex=1"]]
def setup_chain(self):
super().setup_chain()
#Append rpcauth to bitcoin.conf before initialization
rpcauth = "rpcauth=rt:93648e835a54c573682c2eb19f882535$7681e9c5b74bdd85e78166031d2058e1069b3ed7ed967c93fc63abba06f31144"
rpcuser = "rpcuser=rpcuser💻"
rpcpassword = "<PASSWORD>password=<PASSWORD>🔑"
with open(os.path.join(self.options.tmpdir+"/node0", "litecoin.conf"), 'a', encoding='utf8') as f:
f.write(rpcauth+"\n")
def run_test(self):
self.log.info("Preparing the workspace...")
# mining 1000 blocks, total budget: 14949.77187643 LTC
for i in range(0,2):
self.nodes[0].generate(500)
blocks = 500*(i+1)
self.log.info(str(blocks)+" blocks mined...")
################################################################################
# Checking RPC tl_sendvesting and related (starting in block 1000) #
################################################################################
url = urllib.parse.urlparse(self.nodes[0].url)
#Old authpair
authpair = url.username + ':' + url.password
headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
addresses = []
accounts = ["john", "doe", "another", "mark", "tango"]
# for graphs for addresses[1]
vested = []
unvested = []
volume_ltc = []
#vested ALL for addresses[4]
bvested = []
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
self.log.info("watching LTC general balance")
params = str([""]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, False, "getbalance",params)
self.log.info(out)
assert_equal(out['error'], None)
adminAddress = 'QgKxFUBgR8y4xFy3s9ybpbDvYNKr4HTKPb'
privkey = '<KEY>'
self.log.info("importing admin address")
params = str([privkey]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, True, "importprivkey",params)
# self.log.info(out)
assert_equal(out['error'], None)
self.log.info("watching private key of admin address")
params = str([adminAddress]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, False, "dumpprivkey",params)
# self.log.info(out)
assert_equal(out['error'], None)
self.log.info("Creating addresses")
addresses = tradelayer_createAddresses(accounts, conn, headers)
addresses.append(adminAddress)
# self.log.info(addresses)
self.log.info("Funding addresses with LTC")
amount = 5
tradelayer_fundingAddresses(addresses, amount, conn, headers)
self.nodes[0].generate(1)
self.log.info("Checking the LTC balance in every account")
tradelayer_checkingBalance(accounts, amount, conn, headers)
self.log.info("Funding addresses[3] with 12000 LTC")
amount = 2000
params = str([addresses[3], amount]).replace("'",'"')
for i in range(0,6):
out = tradelayer_HTTP(conn, headers, False, "sendtoaddress",params)
# self.log.info(out)
assert_equal(out['error'], None)
self.nodes[0].generate(1)
self.log.info("Creating new tokens (sendissuancefixed)")
array = [0]
params = str([addresses[2],2,0,"lihki","","","90000000",array]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, True, "tl_sendissuancefixed",params)
# self.log.info(out)
self.log.info("Self Attestation for addresses")
tradelayer_selfAttestation(addresses,conn, headers)
self.log.info("Checking attestations")
out = tradelayer_HTTP(conn, headers, False, "tl_list_attestation")
# self.log.info(out)
result = []
registers = out['result']
for addr in addresses:
for i in registers:
if i['att sender'] == addr and i['att receiver'] == addr and i['kyc_id'] == 0:
result.append(True)
assert_equal(result, [True, True, True, True, True, True])
self.log.info("Checking vesting tokens property")
params = str([3])
out = tradelayer_HTTP(conn, headers, True, "tl_getproperty",params)
# self.log.info(out)
assert_equal(out['result']['propertyid'],3)
assert_equal(out['result']['name'],'Vesting Tokens')
assert_equal(out['result']['data'],'Divisible Tokens')
assert_equal(out['result']['url'],'www.tradelayer.org')
assert_equal(out['result']['divisible'],True)
assert_equal(out['result']['totaltokens'],'1500000.00000000')
self.log.info("Checking the property")
params = str([4])
out = tradelayer_HTTP(conn, headers, True, "tl_getproperty",params)
assert_equal(out['error'], None)
# self.log.info(out)
assert_equal(out['result']['propertyid'],4)
assert_equal(out['result']['name'],'lihki')
assert_equal(out['result']['data'],'')
assert_equal(out['result']['url'],'')
assert_equal(out['result']['divisible'],True)
assert_equal(out['result']['totaltokens'],'90000000.00000000')
self.log.info("sendvesting from adminAddress to first address")
params = str([adminAddress, addresses[0], "2000"]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, False, "tl_sendvesting",params)
# self.log.info(out)
assert_equal(out['error'], None)
self.nodes[0].generate(1)
self.log.info("Checking tokens in receiver address")
params = str([addresses[0], 3]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, True, "tl_getbalance",params)
# self.log.info(out)
assert_equal(out['error'], None)
assert_equal(out['result']['balance'],'2000.00000000')
assert_equal(out['result']['reserve'],'0.00000000')
self.log.info("Checking unvested ALLs ")
params = str([addresses[0]]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, False, "tl_getunvested",params)
# self.log.info(out)
assert_equal(out['result']['unvested'],'2000.00000000')
self.log.info("Checking the time lock of one year")
self.log.info("sendvesting from first to second address")
params = str([addresses[0], addresses[1], "1000"]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, False, "tl_sendvesting",params)
# self.log.info(out)
assert_equal(out['error'], None)
self.nodes[0].generate(1)
self.log.info("Checking tokens in receiver address")
params = str([addresses[1], 3]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, False, "tl_getbalance",params)
# self.log.info(out)
assert_equal(out['error'], None)
assert_equal(out['result']['balance'],'0.00000000')
assert_equal(out['result']['reserve'],'0.00000000')
self.log.info("Checking unvested ALLs ")
params = str([addresses[1]]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, False, "tl_getunvested",params)
# self.log.info(out)
assert_equal(out['result']['unvested'],'0.00000000')
out = tradelayer_HTTP(conn, headers, True, "tl_getinfo")
block = out['result']['block']
self.log.info("block height :"+str(block))
self.log.info("Waiting for one year")
for i in range(20):
self.nodes[0].generate(1)
self.log.info("sendvesting from first to second address, again")
params = str([addresses[0], addresses[1], "1000"]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, False, "tl_sendvesting",params)
# self.log.info(out)
assert_equal(out['error'], None)
self.nodes[0].generate(1)
self.log.info("sendvesting from first to 5th addresses")
params = str([addresses[0], addresses[4], "500"]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, False, "tl_sendvesting",params)
# self.log.info(out)
assert_equal(out['error'], None)
self.nodes[0].generate(1)
self.log.info("Restarting for the node, in order to test persistence")
self.restart_node(0) #stop and start
url = urllib.parse.urlparse(self.nodes[0].url)
#New authpair
authpair = url.username + ':' + url.password
headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
self.log.info("Checking tokens in receiver addresses")
params = str([addresses[1], 3]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, False, "tl_getbalance",params)
# self.log.info(out)
assert_equal(out['error'], None)
assert_equal(out['result']['balance'],'1000.00000000')
assert_equal(out['result']['reserve'],'0.00000000')
params = str([addresses[4], 3]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, False, "tl_getbalance",params)
# self.log.info(out)
assert_equal(out['error'], None)
assert_equal(out['result']['balance'],'500.00000000')
assert_equal(out['result']['reserve'],'0.00000000')
self.log.info("Checking unvested ALLs ")
params = str([addresses[1]]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, False, "tl_getunvested",params)
# self.log.info(out)
assert_equal(out['result']['unvested'],'1000.00000000')
params = str([addresses[4]]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, False, "tl_getunvested",params)
# self.log.info(out)
assert_equal(out['result']['unvested'],'500.00000000')
# 200 LTC implies release 7.5% of ALLs from unvested to balance
# NOTE: In regtest 200 LTC volume is equivalent to 20000 (x100) LTCs in testnet or mainnet
self.log.info("Creating LTC volume in DEx")
self.log.info("Sending a DEx sell tokens offer")
params = str([addresses[2], 4, "1000", "200", 250, "0.00001", "2", 1]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, True, "tl_senddexoffer",params)
assert_equal(out['error'], None)
# self.log.info(out)
self.nodes[0].generate(1)
self.log.info("Checking the offer in DEx")
params = str([addresses[2]]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, True, "tl_getactivedexsells",params)
# self.log.info(out)
assert_equal(out['error'], None)
assert_equal(out['result'][0]['propertyid'], 4)
assert_equal(out['result'][0]['action'], 2)
assert_equal(out['result'][0]['seller'], addresses[2])
assert_equal(out['result'][0]['ltcsdesired'], '200.00000000')
assert_equal(out['result'][0]['amountavailable'], '1000.00000000')
assert_equal(out['result'][0]['unitprice'], '0.20000000')
assert_equal(out['result'][0]['minimumfee'], '0.00001000')
self.log.info("Accepting the full offer")
params = str([addresses[3], addresses[2], 4, "1000"]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, True, "tl_senddexaccept",params)
assert_equal(out['error'], None)
# self.log.info(out)
self.nodes[0].generate(1)
self.log.info("Checking the offer status")
params = str([addresses[2]]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, True, "tl_getactivedexsells",params)
# self.log.info(out)
assert_equal(out['error'], None)
assert_equal(out['result'][0]['propertyid'], 4)
assert_equal(out['result'][0]['action'], 2)
assert_equal(out['result'][0]['seller'], addresses[2])
assert_equal(out['result'][0]['ltcsdesired'], '0.00000000')
assert_equal(out['result'][0]['amountavailable'], '0.00000000')
assert_equal(out['result'][0]['unitprice'], '0.20000000')
assert_equal(out['result'][0]['minimumfee'], '0.00001000')
assert_equal(out['result'][0]['accepts'][0]['buyer'], addresses[3])
assert_equal(out['result'][0]['accepts'][0]['amountdesired'], '1000.00000000')
assert_equal(out['result'][0]['accepts'][0]['ltcstopay'], '200.00000000')
self.log.info("Paying the tokens")
params = str([addresses[3], addresses[2], "200"]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, True, "tl_send_dex_payment",params)
# self.log.info(out)
self.nodes[0].generate(1)
self.log.info("Checking token balance in buyer address")
params = str([addresses[3], 4]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, True, "tl_getbalance",params)
# self.log.info(out)
assert_equal(out['error'], None)
assert_equal(out['result']['balance'], '1000.00000000')
assert_equal(out['result']['reserve'],'0.00000000')
self.log.info("Checking LTC Volume")
params = str([4, 1, 3000]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, True, "tl_get_ltcvolume",params)
# self.log.info(out)
assert_equal(out['error'], None)
assert_equal(out['result']['volume'], '200.00000000')
volume0 = float(out['result']['volume'])
self.nodes[0].generate(1)
self.log.info("Checking vesting in related address")
params = str([addresses[1], 1]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, False, "tl_getbalance",params)
# self.log.info(out)
assert_equal(out['error'], None)
assert_equal(out['result']['balance'],'75.25749000') # 7.5% of vesting (NOTE: check the round up)
assert_equal(out['result']['reserve'],'0.00000000')
vested0 = float(out['result']['balance'])
params = str([addresses[4], 1]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, False, "tl_getbalance",params)
# self.log.info(out)
assert_equal(out['error'], None)
# assert_equal(out['result']['balance'],'75.25749000') # 7.5% of vesting (NOTE: check the round up)
assert_equal(out['result']['reserve'],'0.00000000')
vested1 = float(out['result']['balance'])
bvested.append(vested1)
self.log.info("Checking unvested ALLs ")
params = str([addresses[1]]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, False, "tl_getunvested",params)
# self.log.info(out)
assert_equal(out['result']['unvested'],'924.74251000')
unvested0 = float(out['result']['unvested'])
volume_ltc.append(volume0)
vested.append(vested0)
unvested.append(unvested0)
self.log.info("Checking vesting info")
out = tradelayer_HTTP(conn, headers, False, "tl_getvesting_info",params)
# self.log.info(out)
assert_equal(out['error'], None)
assert_equal(out['result']['propertyid'], 3)
assert_equal(out['result']['name'], 'Vesting Tokens')
assert_equal(out['result']['data'], 'Divisible Tokens')
assert_equal(out['result']['url'], 'www.tradelayer.org')
assert_equal(out['result']['divisible'], True)
assert_equal(out['result']['issuer'], 'QgKxFUBgR8y4xFy3s9ybpbDvYNKr4HTKPb')
assert_equal(out['result']['activation block'], 100)
assert_equal(out['result']['litecoin volume'], '200.00000000')
assert_equal(out['result']['vested percentage'], '7.52574900')
assert_equal(out['result']['last vesting block'], 1037)
assert_equal(out['result']['total vested'], '150.51498000')
assert_equal(out['result']['owners'], 3)
assert_equal(out['result']['total tokens'], '1500000.00000000')
assert_equal(out['result']['kyc_ids allowed'], '[]')
# 400 LTC implies release 15.05% of ALLs from unvested to balance
# Remember: 400 LTCs in regtest is 40000 (x100) LTCs in testnet/mainnet
self.log.info("Sending a DEx sell tokens offer")
params = str([addresses[2], 4, "1000000", "200000", 250, "0.00001", "2", 1]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, True, "tl_senddexoffer",params)
assert_equal(out['error'], None)
# self.log.info(out)
self.nodes[0].generate(1)
self.log.info("Checking the offer in DEx")
params = str([addresses[2]]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, True, "tl_getactivedexsells",params)
# self.log.info(out)
assert_equal(out['error'], None)
assert_equal(out['result'][0]['propertyid'], 4)
assert_equal(out['result'][0]['action'], 2)
assert_equal(out['result'][0]['seller'], addresses[2])
assert_equal(out['result'][0]['ltcsdesired'], '200000.00000000')
assert_equal(out['result'][0]['amountavailable'], '1000000.00000000')
assert_equal(out['result'][0]['unitprice'], '0.20000000')
assert_equal(out['result'][0]['minimumfee'], '0.00001000')
self.log.info("Accepting the part of the offer")
params = str([addresses[3], addresses[2], 4, "1000"]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, True, "tl_senddexaccept",params)
assert_equal(out['error'], None)
# self.log.info(out)
self.nodes[0].generate(1)
self.log.info("Checking the offer status")
params = str([addresses[2]]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, True, "tl_getactivedexsells",params)
# self.log.info(out)
assert_equal(out['error'], None)
assert_equal(out['result'][0]['accepts'][0]['buyer'], addresses[3])
assert_equal(out['result'][0]['accepts'][0]['amountdesired'], '1000.00000000')
assert_equal(out['result'][0]['accepts'][0]['ltcstopay'], '200.00000000')
self.log.info("Paying the tokens")
params = str([addresses[3], addresses[2], "200"]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, True, "tl_send_dex_payment",params)
# self.log.info(out)
self.nodes[0].generate(1)
self.log.info("Checking token balance in buyer address")
params = str([addresses[3], 4]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, True, "tl_getbalance",params)
# self.log.info(out)
assert_equal(out['error'], None)
assert_equal(out['result']['balance'], '2000.00000000')
assert_equal(out['result']['reserve'],'0.00000000')
self.log.info("Checking LTC Volume")
params = str([4, 1, 99999]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, True, "tl_get_ltcvolume",params)
# self.log.info(out)
assert_equal(out['error'], None)
assert_equal(out['result']['volume'], '400.00000000')
volume1 = float(out['result']['volume'])
self.nodes[0].generate(2)
self.log.info("Checking vesting in related addresses")
params = str([addresses[1], 1]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, False, "tl_getbalance",params)
# self.log.info(out)
assert_equal(out['error'], None)
assert_equal(out['result']['balance'],'150.51498000') # 15.05% of vesting (NOTE: check the round up)
assert_equal(out['result']['reserve'],'0.00000000')
vested1 = float(out['result']['balance'])
params = str([addresses[4], 1]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, False, "tl_getbalance",params)
# self.log.info(out)
assert_equal(out['error'], None)
# assert_equal(out['result']['balance'],'150.51498000') # 15.05% of vesting (NOTE: check the round up)
assert_equal(out['result']['reserve'],'0.00000000')
vested2 = float(out['result']['balance'])
bvested.append(vested2)
self.log.info("Checking unvested ALLs ")
params = str([addresses[1]]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, False, "tl_getunvested",params)
# self.log.info(out)
assert_equal(out['result']['unvested'],'849.48502000')
unvested1 = float(out['result']['unvested'])
volume_ltc.append(volume1)
vested.append(vested1)
unvested.append(unvested1)
self.log.info("Checking vesting info")
out = tradelayer_HTTP(conn, headers, False, "tl_getvesting_info",params)
# self.log.info(out)
assert_equal(out['error'], None)
assert_equal(out['result']['propertyid'], 3)
assert_equal(out['result']['name'], 'Vesting Tokens')
assert_equal(out['result']['data'], 'Divisible Tokens')
assert_equal(out['result']['url'], 'www.tradelayer.org')
assert_equal(out['result']['divisible'], True)
assert_equal(out['result']['issuer'], 'QgKxFUBgR8y4xFy3s9ybpbDvYNKr4HTKPb')
assert_equal(out['result']['activation block'], 100)
assert_equal(out['result']['litecoin volume'], '400.00000000')
assert_equal(out['result']['vested percentage'], '15.05149900')
assert_equal(out['result']['last vesting block'], 1041)
assert_equal(out['result']['total vested'], '301.02996000')
assert_equal(out['result']['owners'], 3)
assert_equal(out['result']['total tokens'], '1500000.00000000')
assert_equal(out['result']['kyc_ids allowed'], '[]')
# Adding 200 LTCs in each step
for i in range(0,20):
self.log.info("Loop number:"+str(i))
# self.log.info("Checking the offer in DEx")
# params = str([addresses[2]]).replace("'",'"')
# out = tradelayer_HTTP(conn, headers, True, "tl_getactivedexsells",params)
# self.log.info(out)
self.log.info("Accepting the part of the offer")
params = str([addresses[3], addresses[2], 4, "1000"]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, True, "tl_senddexaccept",params)
assert_equal(out['error'], None)
# self.log.info(out)
self.nodes[0].generate(1)
self.log.info("Checking the offer status")
params = str([addresses[2]]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, True, "tl_getactivedexsells",params)
# self.log.info(out)
assert_equal(out['error'], None)
assert_equal(out['result'][0]['accepts'][0]['buyer'], addresses[3])
assert_equal(out['result'][0]['accepts'][0]['amountdesired'], '1000.00000000')
assert_equal(out['result'][0]['accepts'][0]['ltcstopay'], '200.00000000')
self.log.info("Paying the tokens")
params = str([addresses[3], addresses[2], "200"]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, True, "tl_send_dex_payment",params)
# self.log.info(out)
self.nodes[0].generate(1)
time.sleep(0.35)
self.log.info("Checking token balance in buyer address")
params = str([addresses[3], 4]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, True, "tl_getbalance",params)
# self.log.info(out)
assert_equal(out['error'], None)
nresult = 2000 + 1000 * (i + 1)
sresult = str(nresult)+'.00000000'
assert_equal(out['result']['balance'], sresult)
assert_equal(out['result']['reserve'],'0.00000000')
self.log.info("Checking LTC Volume")
params = str([4, 1, 99999]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, True, "tl_get_ltcvolume",params)
# self.log.info(out)
assert_equal(out['error'], None)
nvolume = 400 + 200 * (i + 1)
svolume = str(nvolume)+'.00000000'
assert_equal(out['result']['volume'], svolume)
volume1 = float(out['result']['volume'])
self.nodes[0].generate(1)
self.log.info("Checking vesting in in addresses[1]")
params = str([addresses[1], 1]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, False, "tl_getbalance",params)
# self.log.info(out)
assert_equal(out['error'], None)
assert_equal(out['result']['reserve'],'0.00000000')
vested1 = float(out['result']['balance'])
self.log.info("Checking unvested ALLs in addresses[1]")
params = str([addresses[1]]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, False, "tl_getunvested",params)
# self.log.info(out)
unvested1 = float(out['result']['unvested'])
assert_equal(unvested1 + vested1, 1000)
volume_ltc.append(volume1)
vested.append(vested1)
unvested.append(unvested1)
self.log.info("Checking vesting in addresses[4]")
params = str([addresses[4], 1]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, False, "tl_getbalance",params)
# self.log.info(out)
assert_equal(out['error'], None)
assert_equal(out['result']['reserve'],'0.00000000')
vested2 = float(out['result']['balance'])
bvested.append(vested2)
self.log.info("Checking unvested ALLs in addresses[4]")
params = str([addresses[4]]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, False, "tl_getunvested",params)
# self.log.info(out)
unvested2 = float(out['result']['unvested'])
assert_equal(unvested2 + vested2, 500)
time.sleep(0.2)
self.log.info("Checking LTC Volume")
params = str([4, 1, 99999]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, True, "tl_get_ltcvolume",params)
# self.log.info(out)
assert_equal(out['error'], None)
assert_equal(out['result']['volume'], '4400.00000000')
# At this volume the vesting must be 41.08 %
self.log.info("Checking final vesting in addresses[1]")
params = str([addresses[1], 1]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, False, "tl_getbalance",params)
# self.log.info(out)
assert_equal(out['error'], None)
assert_equal(out['result']['balance'],'410.86307000')
self.log.info("Checking final unvested ALLs in addresses[1]")
params = str([addresses[1]]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, False, "tl_getunvested",params)
# self.log.info(out)
assert_equal(out['error'], None)
assert_equal(out['result']['unvested'], '589.13693000')
self.log.info("Checking final vesting in addresses[4]")
params = str([addresses[4], 1]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, False, "tl_getbalance",params)
# self.log.info(out)
assert_equal(out['error'], None)
assert_equal(out['result']['balance'],'205.43153500')
self.log.info("Checking final unvested ALLs in addresses[4]")
params = str([addresses[4]]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, False, "tl_getunvested",params)
# self.log.info(out)
assert_equal(out['error'], None)
assert_equal(out['result']['unvested'], '294.56846500')
# pl.plot(volume_ltc, vested,'-b', label='vested amount for addresses[1]')
# pl.plot(volume_ltc, bvested,'-r', label='vested amount for addresses[3]')
# pl.legend(loc='upper left')
# pl.show()
conn.close()
self.stop_nodes()
if __name__ == '__main__':
VestingBasicsTest ().main ()
|
[
"os.path.join"
] |
[((960, 1021), 'os.path.join', 'os.path.join', (["(self.options.tmpdir + '/node0')", '"""litecoin.conf"""'], {}), "(self.options.tmpdir + '/node0', 'litecoin.conf')\n", (972, 1021), False, 'import os\n')]
|
#
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://aws.amazon.com/apache2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
#
"""
Handshake tests using Openssl 0.9.8 s_client against s2nd
"""
import argparse
import os
import sys
import subprocess
import itertools
import multiprocessing
import threading
import uuid
import re
import string
from os import environ
from multiprocessing.pool import ThreadPool
from s2n_test_constants import *
from time import sleep
S_CLIENT_NEGOTIATED_CIPHER_PREFIX="Cipher : "
PROTO_VERS_TO_S_CLIENT_ARG = {
S2N_TLS10 : "-tls1",
S2N_TLS11 : "-tls1_1",
S2N_TLS12 : "-tls1_2",
}
use_corked_io=False
def cleanup_processes(*processes):
for p in processes:
p.kill()
p.wait()
def validate_version(expected_version, output):
for line in output.splitlines():
if ACTUAL_VERSION_STR.format(expected_version or S2N_TLS10) in line:
return 0
return -1
def validate_data_transfer(expected_data, s_client_out, s2nd_out):
"""
Verify that the application data written between s_client and s2nd is encrypted and decrypted successfuly.
"""
found = 0
for line in s2nd_out.splitlines():
if expected_data in line:
found = 1
break
if found == 0:
print ("Did not find " + expected_data + " in output from s2nd")
return -1
found = 0
for line in s_client_out.splitlines():
if expected_data in line:
found = 1
break
if found == 0:
print ("Did not find " + expected_data + " in output from s_client")
return -1
return 0
def find_expected_cipher(expected_cipher, s_client_out):
"""
Make sure s_client and s2nd negotiate the cipher suite we expect
"""
s_client_out_len = len(s_client_out)
full_expected_string = S_CLIENT_NEGOTIATED_CIPHER_PREFIX + expected_cipher
for line in s_client_out.splitlines():
if full_expected_string in line:
return 0
break
print("Failed to find " + expected_cipher + " in s_client output")
return -1
def read_process_output_until(process, marker):
output = ""
while True:
line = process.stdout.readline().decode("utf-8")
output += line
if marker in line:
return output
return output
def try_handshake(endpoint, port, cipher, ssl_version, server_name=None, strict_hostname=False, server_cert=None, server_key=None,
server_cert_key_list=None, expected_server_cert=None, server_cipher_pref=None, ocsp=None, sig_algs=None, curves=None, resume=False, no_ticket=False,
prefer_low_latency=False, enter_fips_mode=False, client_auth=None, client_cert=DEFAULT_CLIENT_CERT_PATH,
client_key=DEFAULT_CLIENT_KEY_PATH, expected_cipher=None, expected_extensions=None):
"""
Attempt to handshake against s2nd listening on `endpoint` and `port` using Openssl s_client
:param int endpoint: endpoint for s2nd to listen on
:param int port: port for s2nd to listen on
:param str cipher: ciphers for Openssl s_client to offer. See https://www.openssl.org/docs/man1.0.2/apps/ciphers.html
:param int ssl_version: SSL version for s_client to use
:param str server_name: server_name value for s_client to send
:param bool strict_hostname: whether s_client should strictly check to see if server certificate matches the server_name
:param str server_cert: path to certificate for s2nd to use
:param str server_key: path to private key for s2nd to use
:param list server_cert_key_list: a list of (cert_path, key_path) tuples for multicert tests.
:param str expected_server_cert: Path to the expected server certificate should be sent to s_client.
:param str ocsp: path to OCSP response file for stapling
:param str sig_algs: Signature algorithms for s_client to offer
:param str curves: Elliptic curves for s_client to offer
:param bool resume: True if s_client should try to reconnect to s2nd and reuse the same TLS session. False for normal negotiation.
:param bool no_ticket: True if s2n server should not use session ticket to resume the same TLS session.
:param bool prefer_low_latency: True if s2nd should use 1500 for max outgoing record size. False for default max.
:param bool enter_fips_mode: True if s2nd should enter libcrypto's FIPS mode. Libcrypto must be built with a FIPS module to enter FIPS mode.
:param bool client_auth: True if the test should try and use client authentication
:param str client_cert: Path to the client's cert file
:param str client_key: Path to the client's private key file
:param str expected_cipher: the cipher we expect to negotiate
:param list expected_extensions: list of expected extensions that s_client should receive.
:return: 0 on successfully negotiation(s), -1 on failure
"""
# Override certificate for ECDSA if unspecified. We can remove this when we
# support multiple certificates
if server_cert is None and server_cert_key_list is None and "ECDSA" in cipher:
server_cert = TEST_ECDSA_CERT
server_key = TEST_ECDSA_KEY
# Fire up s2nd
s2nd_cmd = ["../../bin/s2nd"]
if server_cert is not None:
s2nd_cmd.extend(["--cert", server_cert])
if server_key is not None:
s2nd_cmd.extend(["--key", server_key])
if server_cert_key_list is not None:
for cert_key_path in server_cert_key_list:
cert_path = cert_key_path[0]
key_path = cert_key_path[1]
s2nd_cmd.extend(["--cert", cert_path])
s2nd_cmd.extend(["--key", key_path])
if ocsp is not None:
s2nd_cmd.extend(["--ocsp", ocsp])
if prefer_low_latency == True:
s2nd_cmd.append("--prefer-low-latency")
if client_auth is not None:
s2nd_cmd.append("-m")
s2nd_cmd.extend(["-t", client_cert])
if use_corked_io:
s2nd_cmd.append("-C")
s2nd_cmd.extend([str(endpoint), str(port)])
s2nd_ciphers = "test_all_tls12"
if server_cipher_pref is not None:
s2nd_ciphers = server_cipher_pref
if enter_fips_mode == True:
s2nd_ciphers = "test_all_fips"
s2nd_cmd.append("--enter-fips-mode")
s2nd_cmd.append("-c")
s2nd_cmd.append(s2nd_ciphers)
if no_ticket:
s2nd_cmd.append("-T")
s2nd = subprocess.Popen(s2nd_cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
# Make sure s2nd has started
s2nd.stdout.readline()
s_client_cmd = ["openssl", "s_client", "-connect", str(endpoint) + ":" + str(port)]
if ssl_version is not None:
s_client_cmd.append(PROTO_VERS_TO_S_CLIENT_ARG[ssl_version])
if cipher is not None:
s_client_cmd.extend(["-cipher", cipher])
# For verifying extensions that s2nd sends expected extensions
s_client_cmd.append("-tlsextdebug")
# Fire up s_client
s_client = subprocess.Popen(s_client_cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
s_client_out = ""
s2nd_out = ""
openssl_connect_marker = "CONNECTED"
openssl_reconnect_marker = "drop connection and then reconnect"
end_of_msg_marker = "__end_of_msg__"
# Wait until openssl and s2n have finished the handshake and are connected to each other
s_client_out += read_process_output_until(s_client, openssl_connect_marker)
s2nd_out += read_process_output_until(s2nd, openssl_connect_marker)
if resume == True:
for i in range(0,5):
# Wait for openssl to resume connection 5 times in a row, and verify resumption works.
s_client_out += read_process_output_until(s_client, openssl_reconnect_marker)
s2nd_out += read_process_output_until(s2nd, openssl_connect_marker)
data_to_validate = cipher + " " + str(uuid.uuid4())
# Write the data to openssl towards s2n server
msg = (data_to_validate + "\n" + end_of_msg_marker + "\n\n").encode("utf-8")
s_client.stdin.write(msg)
s_client.stdin.flush()
# Write the data to s2n towards openssl client
s2nd.stdin.write(msg)
s2nd.stdin.flush()
# Wait for the Data transfer to complete between OpenSSL and s2n
s_client_out += read_process_output_until(s_client, end_of_msg_marker)
s2nd_out += read_process_output_until(s2nd, end_of_msg_marker)
cleanup_processes(s2nd, s_client)
if validate_data_transfer(data_to_validate, s_client_out, s2nd_out) != 0:
return -1
if validate_version(ssl_version, s2nd_out) != 0:
return -1
if resume is True:
if validate_resume(s2nd_out) != 0:
return -1
if ocsp is not None:
if validate_ocsp(s_client_out) != 0:
return -1
if expected_cipher is not None:
if find_expected_cipher(expected_cipher, s_client_out) != 0:
return -1
if strict_hostname is True:
if validate_hostname(s_client_out) != 0:
return -1
if expected_server_cert is not None:
if validate_selected_certificate(s_client_out, expected_server_cert) != 0:
return -1
if expected_extensions is not None:
for extension in expected_extensions:
if extension.s_client_validate(s_client_out) != 0:
return -1
return 0
def cert_path_to_str(cert_path):
# Converts a path to a cert into a string usable for printing to test output
# Example: "./test_certs/rsa_2048_sha256_client_cert.pem" => "RSA-2048-SHA256"
return '-'.join(cert_path[cert_path.rfind('/')+1:].split('_')[:3]).upper()
def print_result(result_prefix, return_code):
suffix = ""
if return_code == 0:
if sys.stdout.isatty():
suffix = "\033[32;1mPASSED\033[0m"
else:
suffix = "PASSED"
else:
if sys.stdout.isatty():
suffix = "\033[31;1mFAILED\033[0m"
else:
suffix ="FAILED"
print(result_prefix + suffix)
def create_thread_pool():
threadpool_size = multiprocessing.cpu_count() * 4 # Multiply by 4 to increase parallelization between integration tests
print("\tCreating ThreadPool of size: " + str(threadpool_size))
threadpool = ThreadPool(processes=threadpool_size)
return threadpool
def run_handshake_test(host, port, ssl_version, cipher, fips_mode, no_ticket, use_client_auth, client_cert_path, client_key_path):
cipher_name = cipher.openssl_name
cipher_vers = cipher.min_tls_vers
# Skip the cipher if openssl can't test it. 3DES/RC4 are disabled by default in 1.1.1
if not cipher.openssl_1_1_1_compatible:
return 0
if ssl_version and ssl_version < cipher_vers:
return 0
client_cert_str=str(use_client_auth)
if (use_client_auth is not None) and (client_cert_path is not None):
client_cert_str = cert_path_to_str(client_cert_path)
ret = try_handshake(host, port, cipher_name, ssl_version, no_ticket=no_ticket, enter_fips_mode=fips_mode, client_auth=use_client_auth, client_cert=client_cert_path, client_key=client_key_path)
result_prefix = "Cipher: %-30s ClientCert: %-16s Vers: %-8s ... " % (cipher_name, client_cert_str, S2N_PROTO_VERS_TO_STR[ssl_version])
print_result(result_prefix, ret)
return ret
def handshake_test(host, port, test_ciphers, fips_mode, no_ticket=False, use_client_auth=None, use_client_cert=None, use_client_key=None):
"""
Basic handshake tests using all valid combinations of supported cipher suites and TLS versions.
"""
print("\n\tRunning handshake tests:")
failed = 0
for ssl_version in [S2N_TLS10, None]:
print("\n\tTesting ciphers using client version: " + S2N_PROTO_VERS_TO_STR[ssl_version])
port_offset = 0
results = []
# Only test non ECC ciphers, openssl 0.9.8 has trouble with ECDHE.
# Only test 1.0/SSLv3 ciphers since 0.9.8 only supports those.
for cipher in filter(lambda x: "ECDHE" not in x.openssl_name and x.min_tls_vers < S2N_TLS11, test_ciphers):
async_result = run_handshake_test(host, port + port_offset, ssl_version, cipher, fips_mode, no_ticket, use_client_auth, use_client_cert, use_client_key)
port_offset += 1
results.append(async_result)
for async_result in results:
if async_result != 0:
failed = 1
return failed
def main():
parser = argparse.ArgumentParser(description='Runs TLS server integration tests against s2nd using Openssl s_client')
parser.add_argument('host', help='The host for s2nd to bind to')
parser.add_argument('port', type=int, help='The port for s2nd to bind to')
parser.add_argument('--use_corked_io', action='store_true', help='Turn corked IO on/off')
parser.add_argument('--libcrypto', default='openssl-1.1.1', choices=S2N_LIBCRYPTO_CHOICES,
help="""The Libcrypto that s2n was built with. s2n supports different cipher suites depending on
libcrypto version. Defaults to openssl-1.1.1.""")
args = parser.parse_args()
use_corked_io = args.use_corked_io
# Retrieve the test ciphers to use based on the libcrypto version s2n was built with
test_ciphers = S2N_LIBCRYPTO_TO_TEST_CIPHERS[args.libcrypto]
host = args.host
port = args.port
libcrypto_version = args.libcrypto
fips_mode = False
if environ.get("S2N_TEST_IN_FIPS_MODE") is not None:
fips_mode = True
print("\nRunning s2nd in FIPS mode.")
print("\nRunning tests with: " + os.popen('openssl version').read())
if use_corked_io == True:
print("Corked IO is on")
failed = 0
failed += handshake_test(host, port, test_ciphers, fips_mode)
return failed
if __name__ == "__main__":
sys.exit(main())
|
[
"subprocess.Popen",
"uuid.uuid4",
"multiprocessing.pool.ThreadPool",
"argparse.ArgumentParser",
"os.popen",
"os.environ.get",
"sys.stdout.isatty",
"multiprocessing.cpu_count"
] |
[((6792, 6865), 'subprocess.Popen', 'subprocess.Popen', (['s2nd_cmd'], {'stdin': 'subprocess.PIPE', 'stdout': 'subprocess.PIPE'}), '(s2nd_cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)\n', (6808, 6865), False, 'import subprocess\n'), ((7341, 7449), 'subprocess.Popen', 'subprocess.Popen', (['s_client_cmd'], {'stdin': 'subprocess.PIPE', 'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.STDOUT'}), '(s_client_cmd, stdin=subprocess.PIPE, stdout=subprocess.\n PIPE, stderr=subprocess.STDOUT)\n', (7357, 7449), False, 'import subprocess\n'), ((10617, 10654), 'multiprocessing.pool.ThreadPool', 'ThreadPool', ([], {'processes': 'threadpool_size'}), '(processes=threadpool_size)\n', (10627, 10654), False, 'from multiprocessing.pool import ThreadPool\n'), ((12814, 12927), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Runs TLS server integration tests against s2nd using Openssl s_client"""'}), "(description=\n 'Runs TLS server integration tests against s2nd using Openssl s_client')\n", (12837, 12927), False, 'import argparse\n'), ((10101, 10120), 'sys.stdout.isatty', 'sys.stdout.isatty', ([], {}), '()\n', (10118, 10120), False, 'import sys\n'), ((10234, 10253), 'sys.stdout.isatty', 'sys.stdout.isatty', ([], {}), '()\n', (10251, 10253), False, 'import sys\n'), ((10429, 10456), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (10454, 10456), False, 'import multiprocessing\n'), ((13775, 13811), 'os.environ.get', 'environ.get', (['"""S2N_TEST_IN_FIPS_MODE"""'], {}), "('S2N_TEST_IN_FIPS_MODE')\n", (13786, 13811), False, 'from os import environ\n'), ((8248, 8260), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (8258, 8260), False, 'import uuid\n'), ((13934, 13961), 'os.popen', 'os.popen', (['"""openssl version"""'], {}), "('openssl version')\n", (13942, 13961), False, 'import os\n')]
|
import codecs
import json
import markdownify
strSourcePath = 'd:\\工作\\b2t\\'
strMDPath = 'd:\\工作\\b2t\\markdown\\'
def readjson(strFileName):
try:
jsonfin = codecs.open('d:\\工作\\b2t\\thoughts.json', 'r', 'utf-8')
objthoughts = json.load(jsonfin)
except Exception as e:
print(e)
return None
else:
return objthoughts
pass
def scanjson(listjson):
'''
return mapping:
listTupleFileContents=[]
listJsonLinks = []
# dictID2Thought = {}
'''
dictTupleFileContents={}
dictJsonLinks = {}
# dictID2Thought = {}
for jsonthought in listjson:
strtitle = str(jsonthought['title'])
strcontent = ''
strtitle = strtitle.replace('\\',u'·').replace('/',u'、').replace('*','_').replace('?',u'?').replace('"',u'“').replace('<','《').replace('>','》').replace('|','_').replace(':',u':')
#invalid filename chars: ':\/?*"<>|'
if jsonthought.__contains__('text'):
strcontent = jsonthought['text']
if jsonthought.__contains__('tmap.id'):
strID = jsonthought['tmap.id']
if not dictTupleFileContents.__contains__(strID):
dictTupleFileContents[strID] = (strtitle, strcontent)
if jsonthought.__contains__('tmap.edges') and jsonthought['tmap.edges'].__len__() > 2:
dictEdges = json.loads(jsonthought['tmap.edges'])
for edge in dict(dictEdges).items():
if dictJsonLinks.__contains__(strID):
dictJsonLinks[strID].append(dict(edge[1])['to'])
else:
dictJsonLinks[strID] = [dict(edge[1])['to']]
return dictTupleFileContents, dictJsonLinks
pass
def writeFileContent(strFileName, strContentHTML):
strContentMD = markdownify.markdownify(strContentHTML, heading_style="ATX")
mdout = codecs.open(strMDPath + strFileName + '.md', 'a', 'utf-8')
mdout.write(strContentMD)
mdout.close()
pass
def insertlink(strFileName, listLinks):
mdout = codecs.open(strMDPath + strFileName + '.md', 'a', 'utf-8')
for strLink in listLinks:
strLine = '''[[%s]]\n\r''' % strLink
mdout.writelines(strLine)
mdout.close()
pass
strFileName = strSourcePath + 'thoughts.json'
if __name__ == "__main__":
jsonThought = readjson(strFileName)
dictTupleFileContents, dictJsonLinks = scanjson(jsonThought)
for item in dict(dictTupleFileContents).items():
strID = item[0]
tupleContent = item[1]
strFileName = tupleContent[0]
strContent = tupleContent[1]
listLinkIDs = []
listLinks = []
if dict(dictJsonLinks).__contains__(strID):
listLinkIDs = dict(dictJsonLinks)[strID]
for strLinkID in listLinkIDs:
strLink = dict(dictTupleFileContents)[strLinkID][0]
listLinks.append(strLink)
writeFileContent(strFileName, strContent)
insertlink(strFileName, listLinks)
pass
|
[
"json.load",
"codecs.open",
"markdownify.markdownify",
"json.loads"
] |
[((1903, 1963), 'markdownify.markdownify', 'markdownify.markdownify', (['strContentHTML'], {'heading_style': '"""ATX"""'}), "(strContentHTML, heading_style='ATX')\n", (1926, 1963), False, 'import markdownify\n'), ((1977, 2035), 'codecs.open', 'codecs.open', (["(strMDPath + strFileName + '.md')", '"""a"""', '"""utf-8"""'], {}), "(strMDPath + strFileName + '.md', 'a', 'utf-8')\n", (1988, 2035), False, 'import codecs\n'), ((2160, 2218), 'codecs.open', 'codecs.open', (["(strMDPath + strFileName + '.md')", '"""a"""', '"""utf-8"""'], {}), "(strMDPath + strFileName + '.md', 'a', 'utf-8')\n", (2171, 2218), False, 'import codecs\n'), ((182, 237), 'codecs.open', 'codecs.open', (['"""d:\\\\工作\\\\b2t\\\\thoughts.json"""', '"""r"""', '"""utf-8"""'], {}), "('d:\\\\工作\\\\b2t\\\\thoughts.json', 'r', 'utf-8')\n", (193, 237), False, 'import codecs\n'), ((261, 279), 'json.load', 'json.load', (['jsonfin'], {}), '(jsonfin)\n', (270, 279), False, 'import json\n'), ((1436, 1473), 'json.loads', 'json.loads', (["jsonthought['tmap.edges']"], {}), "(jsonthought['tmap.edges'])\n", (1446, 1473), False, 'import json\n')]
|
import pytest
from calculator.calculator import Calculator
def test_add():
calculator = Calculator()
result = calculator.add(15)
assert result == 15
def test_subtract():
calculator = Calculator(20)
result = calculator.subtract(15)
assert result == 5
def test_multiply():
calculator = Calculator(2)
result = calculator.multiply(15)
assert result == 30
def test_divide():
calculator = Calculator(50)
result = calculator.divide(0)
assert result == 50
def test_nth_root():
calculator = Calculator(100)
result = calculator.nth_root(2)
assert result == 10
def test_reset():
calculator = Calculator(10)
result = calculator.reset()
assert result == 0
|
[
"calculator.calculator.Calculator"
] |
[((99, 111), 'calculator.calculator.Calculator', 'Calculator', ([], {}), '()\n', (109, 111), False, 'from calculator.calculator import Calculator\n'), ((214, 228), 'calculator.calculator.Calculator', 'Calculator', (['(20)'], {}), '(20)\n', (224, 228), False, 'from calculator.calculator import Calculator\n'), ((335, 348), 'calculator.calculator.Calculator', 'Calculator', (['(2)'], {}), '(2)\n', (345, 348), False, 'from calculator.calculator import Calculator\n'), ((454, 468), 'calculator.calculator.Calculator', 'Calculator', (['(50)'], {}), '(50)\n', (464, 468), False, 'from calculator.calculator import Calculator\n'), ((573, 588), 'calculator.calculator.Calculator', 'Calculator', (['(100)'], {}), '(100)\n', (583, 588), False, 'from calculator.calculator import Calculator\n'), ((692, 706), 'calculator.calculator.Calculator', 'Calculator', (['(10)'], {}), '(10)\n', (702, 706), False, 'from calculator.calculator import Calculator\n')]
|
#!/usr/bin/env python3
# Copyright (C) 2015 <NAME> <<EMAIL>>
#
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
# Generates the simdpp/dispatch/collect_macros_generated.h file
# Use as $ ./tools/gen_dispatcher_collect_macros.py > simdpp/dispatch/collect_macros_generated.h
from gen_common import output_template
num_archs = 15
single_arch_template = '''
#ifdef SIMDPP_DISPATCH_ARCH$num$
#define SIMDPP_ARCH_PP_LIST SIMDPP_DISPATCH_ARCH$num$
#include <simdpp/detail/preprocess_single_arch.h>
// Use the results of preprocess_single_arch.h to define
// SIMDPP_DISPATCH_$num$_NAMESPACE
#if SIMDPP_ARCH_PP_NS_USE_NULL
#define SIMDPP_DISPATCH_$num$_NS_ID_NULL SIMDPP_INSN_ID_NULL
#else
#define SIMDPP_DISPATCH_$num$_NS_ID_NULL
#endif
#if SIMDPP_ARCH_PP_NS_USE_SSE2
#define SIMDPP_DISPATCH_$num$_NS_ID_SSE2 SIMDPP_INSN_ID_SSE2
#else
#define SIMDPP_DISPATCH_$num$_NS_ID_SSE2
#endif
#if SIMDPP_ARCH_PP_NS_USE_SSE3
#define SIMDPP_DISPATCH_$num$_NS_ID_SSE3 SIMDPP_INSN_ID_SSE3
#else
#define SIMDPP_DISPATCH_$num$_NS_ID_SSE3
#endif
#if SIMDPP_ARCH_PP_NS_USE_SSSE3
#define SIMDPP_DISPATCH_$num$_NS_ID_SSSE3 SIMDPP_INSN_ID_SSSE3
#else
#define SIMDPP_DISPATCH_$num$_NS_ID_SSSE3
#endif
#if SIMDPP_ARCH_PP_NS_USE_SSE4_1
#define SIMDPP_DISPATCH_$num$_NS_ID_SSE4_1 SIMDPP_INSN_ID_SSE4_1
#else
#define SIMDPP_DISPATCH_$num$_NS_ID_SSE4_1
#endif
#if SIMDPP_ARCH_PP_NS_USE_AVX
#define SIMDPP_DISPATCH_$num$_NS_ID_AVX SIMDPP_INSN_ID_AVX
#else
#define SIMDPP_DISPATCH_$num$_NS_ID_AVX
#endif
#if SIMDPP_ARCH_PP_NS_USE_AVX2
#define SIMDPP_DISPATCH_$num$_NS_ID_AVX2 SIMDPP_INSN_ID_AVX2
#else
#define SIMDPP_DISPATCH_$num$_NS_ID_AVX2
#endif
#if SIMDPP_ARCH_PP_NS_USE_FMA3
#define SIMDPP_DISPATCH_$num$_NS_ID_FMA3 SIMDPP_INSN_ID_FMA3
#else
#define SIMDPP_DISPATCH_$num$_NS_ID_FMA3
#endif
#if SIMDPP_ARCH_PP_NS_USE_FMA4
#define SIMDPP_DISPATCH_$num$_NS_ID_FMA4 SIMDPP_INSN_ID_FMA4
#else
#define SIMDPP_DISPATCH_$num$_NS_ID_FMA4
#endif
#if SIMDPP_ARCH_PP_NS_USE_XOP
#define SIMDPP_DISPATCH_$num$_NS_ID_XOP SIMDPP_INSN_ID_XOP
#else
#define SIMDPP_DISPATCH_$num$_NS_ID_XOP
#endif
#if SIMDPP_ARCH_PP_NS_USE_AVX512F
#define SIMDPP_DISPATCH_$num$_NS_ID_AVX512F SIMDPP_INSN_ID_AVX512F
#else
#define SIMDPP_DISPATCH_$num$_NS_ID_AVX512F
#endif
#if SIMDPP_ARCH_PP_NS_USE_NEON
#define SIMDPP_DISPATCH_$num$_NS_ID_NEON SIMDPP_INSN_ID_NEON
#else
#define SIMDPP_DISPATCH_$num$_NS_ID_NEON
#endif
#if SIMDPP_ARCH_PP_NS_USE_NEON_FLT_SP
#define SIMDPP_DISPATCH_$num$_NS_ID_NEON_FLT_SP SIMDPP_INSN_ID_NEON_FLT_SP
#else
#define SIMDPP_DISPATCH_$num$_NS_ID_NEON_FLT_SP
#endif
#if SIMDPP_ARCH_PP_NS_USE_ALTIVEC
#define SIMDPP_DISPATCH_$num$_NS_ID_ALTIVEC SIMDPP_INSN_ID_ALTIVEC
#else
#define SIMDPP_DISPATCH_$num$_NS_ID_ALTIVEC
#endif
#define SIMDPP_DISPATCH_$num$_NAMESPACE SIMDPP_PP_PASTE15(arch, $n$
SIMDPP_DISPATCH_$num$_NS_ID_NULL, $n$
SIMDPP_DISPATCH_$num$_NS_ID_SSE2, $n$
SIMDPP_DISPATCH_$num$_NS_ID_SSE3, $n$
SIMDPP_DISPATCH_$num$_NS_ID_SSSE3, $n$
SIMDPP_DISPATCH_$num$_NS_ID_SSE4_1, $n$
SIMDPP_DISPATCH_$num$_NS_ID_AVX, $n$
SIMDPP_DISPATCH_$num$_NS_ID_AVX2, $n$
SIMDPP_DISPATCH_$num$_NS_ID_AVX512F, $n$
SIMDPP_DISPATCH_$num$_NS_ID_FMA3, $n$
SIMDPP_DISPATCH_$num$_NS_ID_FMA4, $n$
SIMDPP_DISPATCH_$num$_NS_ID_XOP, $n$
SIMDPP_DISPATCH_$num$_NS_ID_NEON, $n$
SIMDPP_DISPATCH_$num$_NS_ID_NEON_FLT_SP, $n$
SIMDPP_DISPATCH_$num$_NS_ID_ALTIVEC)
#define SIMDPP_DISPATCH_$num$_FN_REGISTER(ARRAY,NAME,FUN_TYPE) $n$
ARRAY[$num$-1] = SIMDPP_DISPATCH_$num$_NAMESPACE::register_fn_##NAME((FUN_TYPE)(NULL));
#define SIMDPP_DISPATCH_$num$_FN_DECLARE(NAME,FUN_TYPE) $n$
namespace SIMDPP_DISPATCH_$num$_NAMESPACE { $n$
::simdpp::detail::FnVersion register_fn_##NAME(FUN_TYPE); }
#undef SIMDPP_ARCH_PP_LIST
#else
#define SIMDPP_DISPATCH_$num$_FN_REGISTER(ARRAY,NAME,FUN_TYPE)
#define SIMDPP_DISPATCH_$num$_FN_DECLARE(NAME,FUN_TYPE)
#endif'''
single_fn_register_template = ' SIMDPP_DISPATCH_$num$_FN_REGISTER(ARRAY,NAME,FUN_TYPE) $n$'
single_fn_declare_template = ' SIMDPP_DISPATCH_$num$_FN_DECLARE(NAME,FUN_TYPE) $n$'
# print the actual file
print('''/* Copyright (C) 2015 <NAME> <<EMAIL>>
Distributed under the Boost Software License, Version 1.0.
(See accompanying file LICENSE_1_0.txt or copy at
http://www.boost.org/LICENSE_1_0.txt)
*/
// This file is generated automatically. See tools/gen_dispatcher_collect_macros.py
#ifndef LIBSIMDPP_DISPATCH_COLLECT_MACROS_GENERATED_H
#define LIBSIMDPP_DISPATCH_COLLECT_MACROS_GENERATED_H
#ifndef LIBSIMDPP_SIMD_H
#error "This file must be included through simd.h"
#endif
#if SIMDPP_EMIT_DISPATCHER
''')
print('#define SIMDPP_DISPATCH_MAX_ARCHS ' + str(num_archs) + '\n')
for i in range(1, num_archs+1):
vars = { 'num' : str(i) }
output_template(single_arch_template, vars)
print('''
#define SIMDPP_DISPATCH_DECLARE_FUNCTIONS(NAME,FUN_TYPE) \\''')
for i in range(1, num_archs+1):
vars = { 'num' : str(i) }
output_template(single_fn_declare_template, vars)
print('''
#define SIMDPP_DISPATCH_COLLECT_FUNCTIONS(ARRAY,NAME,FUN_TYPE) \\''')
for i in range(1, num_archs+1):
vars = { 'num' : str(i) }
output_template(single_fn_register_template, vars)
print('''
#endif // SIMDPP_EMIT_DISPATCHER
#endif
''')
|
[
"gen_common.output_template"
] |
[((5743, 5786), 'gen_common.output_template', 'output_template', (['single_arch_template', 'vars'], {}), '(single_arch_template, vars)\n', (5758, 5786), False, 'from gen_common import output_template\n'), ((5948, 5997), 'gen_common.output_template', 'output_template', (['single_fn_declare_template', 'vars'], {}), '(single_fn_declare_template, vars)\n', (5963, 5997), False, 'from gen_common import output_template\n'), ((6159, 6209), 'gen_common.output_template', 'output_template', (['single_fn_register_template', 'vars'], {}), '(single_fn_register_template, vars)\n', (6174, 6209), False, 'from gen_common import output_template\n')]
|
import datetime
from django.template import Library
from django.utils.html import format_html, format_html_join
register = Library()
@register.simple_tag
def stars(score):
if score is None:
return ''
if isinstance(score, float):
score = int(score)
html_tags = format_html_join(
'\n',
'<span class="fa fa-star" value="{}"></span>',
(str(i) for i in range(0, score))
)
return html_tags
@register.simple_tag
def estimated_delivery_date(days):
current_date = datetime.datetime.now().date()
date_plus_fifteen = current_date + datetime.timedelta(days=days)
upper_date = date_plus_fifteen + datetime.timedelta(days=12)
return f"le {date_plus_fifteen.day}/{date_plus_fifteen.month} \
et le {upper_date.day}/{upper_date.month}"
|
[
"django.template.Library",
"datetime.timedelta",
"datetime.datetime.now"
] |
[((125, 134), 'django.template.Library', 'Library', ([], {}), '()\n', (132, 134), False, 'from django.template import Library\n'), ((605, 634), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': 'days'}), '(days=days)\n', (623, 634), False, 'import datetime\n'), ((672, 699), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(12)'}), '(days=12)\n', (690, 699), False, 'import datetime\n'), ((535, 558), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (556, 558), False, 'import datetime\n')]
|
import markdown
import Famcy
import json
class displayTag(Famcy.FamcyBlock):
"""
Represents the block to display
paragraph.
"""
def __init__(self):
self.value = displayTag.generate_template_content()
super(displayTag, self).__init__()
self.init_block()
@classmethod
def generate_template_content(cls):
return {
"title": "displayTag",
"content": "displayTag content",
}
def init_block(self):
self.body = Famcy.div()
self.body["id"] = self.id
self.body["className"] = "displayTag"
h3_temp = Famcy.h3()
h4_temp = Famcy.h4()
self.body.addElement(h3_temp)
self.body.addElement(h4_temp)
def render_inner(self):
self.body.children[0].innerHTML = self.value["title"]
self.body.children[1].innerHTML = self.value["content"]
return self.body
|
[
"Famcy.h3",
"Famcy.h4",
"Famcy.div"
] |
[((511, 522), 'Famcy.div', 'Famcy.div', ([], {}), '()\n', (520, 522), False, 'import Famcy\n'), ((622, 632), 'Famcy.h3', 'Famcy.h3', ([], {}), '()\n', (630, 632), False, 'import Famcy\n'), ((651, 661), 'Famcy.h4', 'Famcy.h4', ([], {}), '()\n', (659, 661), False, 'import Famcy\n')]
|
import pandas as pd
from pybaseball.statcast_batter import statcast_batter, statcast_batter_exitvelo_barrels
def test_statcast_batter_exitvelo_barrels() -> None:
result: pd.DataFrame = statcast_batter_exitvelo_barrels(2019)
assert result is not None
assert not result.empty
assert len(result.columns) == 19
assert len(result) == 250
def test_statcast_batter() -> None:
result: pd.DataFrame = statcast_batter('2019-01-01', '2019-12-31', 642715)
assert result is not None
assert not result.empty
assert len(result.columns) == 89
assert len(result) == 2418
|
[
"pybaseball.statcast_batter.statcast_batter_exitvelo_barrels",
"pybaseball.statcast_batter.statcast_batter"
] |
[((192, 230), 'pybaseball.statcast_batter.statcast_batter_exitvelo_barrels', 'statcast_batter_exitvelo_barrels', (['(2019)'], {}), '(2019)\n', (224, 230), False, 'from pybaseball.statcast_batter import statcast_batter, statcast_batter_exitvelo_barrels\n'), ((422, 473), 'pybaseball.statcast_batter.statcast_batter', 'statcast_batter', (['"""2019-01-01"""', '"""2019-12-31"""', '(642715)'], {}), "('2019-01-01', '2019-12-31', 642715)\n", (437, 473), False, 'from pybaseball.statcast_batter import statcast_batter, statcast_batter_exitvelo_barrels\n')]
|
# ===============================================================================
# Copyright 2015 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from traits.api import Property
from traitsui.api import UItem, Item, HGroup, VGroup, EnumEditor
# ============= standard library imports ========================
# ============= local library imports ==========================
from traitsui.editors import TabularEditor
from traitsui.tabular_adapter import TabularAdapter
from pychron.core.helpers.traitsui_shortcuts import okcancel_view
from pychron.envisage.icon_button_editor import icon_button_editor
class SubviewAdapter(TabularAdapter):
columns = [('', 'name')]
name_text = Property
font = '10'
def _get_name_text(self):
return self.item
def view(title):
agrp = HGroup(Item('selected', show_label=False,
editor=EnumEditor(name='names'),
tooltip='List of available plot options'),
icon_button_editor('controller.save_options', 'disk',
tooltip='Save changes to options'),
icon_button_editor('controller.save_as_options', 'save_as',
tooltip='Save options with a new name'),
icon_button_editor('controller.add_options',
'add',
tooltip='Add new plot options'),
icon_button_editor('controller.delete_options',
'delete',
tooltip='Delete current plot options',
enabled_when='delete_enabled'),
icon_button_editor('controller.factory_default', 'edit-bomb',
enabled_when='selected',
tooltip='Apply factory defaults'))
sgrp = UItem('subview_names',
width=-120,
editor=TabularEditor(editable=False,
adapter=SubviewAdapter(),
selected='selected_subview'))
ogrp = UItem('subview',
style='custom')
bgrp = HGroup(sgrp, ogrp)
v = okcancel_view(VGroup(agrp, bgrp),
width=800,
height=750,
resizable=True,
title=title)
return v
# ============= EOF =============================================
|
[
"traitsui.api.HGroup",
"traitsui.api.EnumEditor",
"traitsui.api.VGroup",
"traitsui.api.UItem",
"pychron.envisage.icon_button_editor.icon_button_editor"
] |
[((2827, 2859), 'traitsui.api.UItem', 'UItem', (['"""subview"""'], {'style': '"""custom"""'}), "('subview', style='custom')\n", (2832, 2859), False, 'from traitsui.api import UItem, Item, HGroup, VGroup, EnumEditor\n'), ((2888, 2906), 'traitsui.api.HGroup', 'HGroup', (['sgrp', 'ogrp'], {}), '(sgrp, ogrp)\n', (2894, 2906), False, 'from traitsui.api import UItem, Item, HGroup, VGroup, EnumEditor\n'), ((1633, 1726), 'pychron.envisage.icon_button_editor.icon_button_editor', 'icon_button_editor', (['"""controller.save_options"""', '"""disk"""'], {'tooltip': '"""Save changes to options"""'}), "('controller.save_options', 'disk', tooltip=\n 'Save changes to options')\n", (1651, 1726), False, 'from pychron.envisage.icon_button_editor import icon_button_editor\n'), ((1778, 1882), 'pychron.envisage.icon_button_editor.icon_button_editor', 'icon_button_editor', (['"""controller.save_as_options"""', '"""save_as"""'], {'tooltip': '"""Save options with a new name"""'}), "('controller.save_as_options', 'save_as', tooltip=\n 'Save options with a new name')\n", (1796, 1882), False, 'from pychron.envisage.icon_button_editor import icon_button_editor\n'), ((1934, 2022), 'pychron.envisage.icon_button_editor.icon_button_editor', 'icon_button_editor', (['"""controller.add_options"""', '"""add"""'], {'tooltip': '"""Add new plot options"""'}), "('controller.add_options', 'add', tooltip=\n 'Add new plot options')\n", (1952, 2022), False, 'from pychron.envisage.icon_button_editor import icon_button_editor\n'), ((2111, 2243), 'pychron.envisage.icon_button_editor.icon_button_editor', 'icon_button_editor', (['"""controller.delete_options"""', '"""delete"""'], {'tooltip': '"""Delete current plot options"""', 'enabled_when': '"""delete_enabled"""'}), "('controller.delete_options', 'delete', tooltip=\n 'Delete current plot options', enabled_when='delete_enabled')\n", (2129, 2243), False, 'from pychron.envisage.icon_button_editor import icon_button_editor\n'), ((2369, 2494), 'pychron.envisage.icon_button_editor.icon_button_editor', 'icon_button_editor', (['"""controller.factory_default"""', '"""edit-bomb"""'], {'enabled_when': '"""selected"""', 'tooltip': '"""Apply factory defaults"""'}), "('controller.factory_default', 'edit-bomb', enabled_when=\n 'selected', tooltip='Apply factory defaults')\n", (2387, 2494), False, 'from pychron.envisage.icon_button_editor import icon_button_editor\n'), ((2930, 2948), 'traitsui.api.VGroup', 'VGroup', (['agrp', 'bgrp'], {}), '(agrp, bgrp)\n', (2936, 2948), False, 'from traitsui.api import UItem, Item, HGroup, VGroup, EnumEditor\n'), ((1523, 1547), 'traitsui.api.EnumEditor', 'EnumEditor', ([], {'name': '"""names"""'}), "(name='names')\n", (1533, 1547), False, 'from traitsui.api import UItem, Item, HGroup, VGroup, EnumEditor\n')]
|
import os.path
import muninn.util as util
class StorageBackend(object):
def __init__(self):
self.supports_symlinks = False
self.global_prefix = ''
def get_tmp_root(self, product):
if self._tmp_root:
tmp_root = os.path.join(self._tmp_root, product.core.archive_path)
util.make_path(tmp_root)
return tmp_root
def run_for_product(self, product, fn, use_enclosing_directory):
tmp_root = self.get_tmp_root(product)
product_path = self.product_path(product)
with util.TemporaryDirectory(dir=tmp_root, prefix=".run_for_product-",
suffix="-%s" % product.core.uuid.hex) as tmp_path:
self.get(product, product_path, tmp_path, use_enclosing_directory)
paths = [os.path.join(tmp_path, basename) for basename in os.listdir(tmp_path)]
return fn(paths)
def prepare(self):
# Prepare storage for use.
raise NotImplementedError()
def exists(self):
# Check that storage exists.
raise NotImplementedError()
def initialize(self, configuration):
# Initialize storage.
raise NotImplementedError()
def destroy(self):
# Destroy storage
raise NotImplementedError()
def product_path(self, product): # TODO refactor away?
# Product path within storage
raise NotImplementedError()
# TODO lower-granularity put/get/delete
def put(self, paths, properties, use_enclosing_directory, use_symlinks=None,
retrieve_files=None, run_for_product=None):
# Place product file(s) into storage
raise NotImplementedError()
def get(self, product, product_path, target_path, use_enclosing_directory, use_symlinks=None):
# Retrieve product file(s) from storage
raise NotImplementedError()
def size(self, product_path):
# Return product storage size
raise NotImplementedError()
def delete(self, product_path, properties):
# Delete product file(s) from storage
raise NotImplementedError()
def move(self, product, archive_path, paths=None):
# Move product
raise NotImplementedError()
|
[
"muninn.util.TemporaryDirectory",
"muninn.util.make_path"
] |
[((326, 350), 'muninn.util.make_path', 'util.make_path', (['tmp_root'], {}), '(tmp_root)\n', (340, 350), True, 'import muninn.util as util\n'), ((558, 666), 'muninn.util.TemporaryDirectory', 'util.TemporaryDirectory', ([], {'dir': 'tmp_root', 'prefix': '""".run_for_product-"""', 'suffix': "('-%s' % product.core.uuid.hex)"}), "(dir=tmp_root, prefix='.run_for_product-', suffix=\n '-%s' % product.core.uuid.hex)\n", (581, 666), True, 'import muninn.util as util\n')]
|
# Lint as: python3
# Copyright 2021 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions for generic image dataset creation."""
import os
from delf.python.datasets import utils
class ImagesFromList():
"""A generic data loader that loads images from a list.
Supports images of different sizes.
"""
def __init__(self, root, image_paths, imsize=None, bounding_boxes=None,
loader=utils.default_loader):
"""ImagesFromList object initialization.
Args:
root: String, root directory path.
image_paths: List, relative image paths as strings.
imsize: Integer, defines the maximum size of longer image side.
bounding_boxes: List of (x1,y1,x2,y2) tuples to crop the query images.
loader: Callable, a function to load an image given its path.
Raises:
ValueError: Raised if `image_paths` list is empty.
"""
# List of the full image filenames.
images_filenames = [os.path.join(root, image_path) for image_path in
image_paths]
if not images_filenames:
raise ValueError("Dataset contains 0 images.")
self.root = root
self.images = image_paths
self.imsize = imsize
self.images_filenames = images_filenames
self.bounding_boxes = bounding_boxes
self.loader = loader
def __getitem__(self, index):
"""Called to load an image at the given `index`.
Args:
index: Integer, image index.
Returns:
image: Tensor, loaded image.
"""
path = self.images_filenames[index]
if self.bounding_boxes is not None:
img = self.loader(path, self.imsize, self.bounding_boxes[index])
else:
img = self.loader(path, self.imsize)
return img
def __len__(self):
"""Implements the built-in function len().
Returns:
len: Number of images in the dataset.
"""
return len(self.images_filenames)
|
[
"os.path.join"
] |
[((1571, 1601), 'os.path.join', 'os.path.join', (['root', 'image_path'], {}), '(root, image_path)\n', (1583, 1601), False, 'import os\n')]
|
#!/usr/bin/env python
from __future__ import print_function
from __future__ import absolute_import
import threading
from soma import aims
import os
import sys
from optparse import OptionParser
import threading
import tempfile
import shutil
import soma.subprocess
import time
import six
from six.moves import zip
def aims_test_thread_read(filenames, verbose=True):
class Loadfile(object):
def __init__(self, filename, lock, objnum, verbose):
self._filename = filename
self.lock = lock
self.objnum = objnum
self.verbose = verbose
def __call__(self):
if self.verbose:
print('reading %s...' % self._filename)
obj = aims.read(self._filename)
if self.verbose:
print('read %s: %s' % (self._filename, str(type(obj))))
self.lock.acquire()
self.objnum[0] += 1
self.lock.release()
aims.carto.PluginLoader.load() # do this once in main thread
threads = []
lock = threading.RLock()
# objnum is a list, not an int, because the counter has to be shared
# between all threads: a list is, an int is not
objnum = [0]
starttime = time.time()
for fname in filenames:
thread = threading.Thread(
target=Loadfile(fname, lock, objnum, verbose))
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
duration = time.time() - starttime
print('finished. Read %d / %d objects in %.3f seconds.'
% (objnum[0], len(filenames), duration))
nmissing = len(filenames) - objnum[0]
if nmissing != 0:
print('Not all objects were loaded, %d missing.' % nmissing)
raise RuntimeError('Not all objects were loaded, %d missing.'
% nmissing)
def _convertFileFormat(aimsobj, directory, prefix, format, is_soma=False):
if is_soma:
exts = somaio_extensions(aimsobj, format)
else:
exts = aims.Finder.extensions(format)
if len(exts) == 0:
return None
exts2 = [x for x in exts if x != '']
if len(exts) != len(exts2):
exts2.append('')
exts = exts2
del exts2
formatok = False
for ext in exts:
if ext == '':
newfilename = os.path.join(directory, prefix)
else:
newfilename = os.path.join(directory,
'.'.join((prefix, ext)))
try:
aims.write(aimsobj, newfilename, format=format)
if not os.path.exists(newfilename):
for f in os.listdir(directory):
if not f.endswith( '.minf' ) \
and (ext == '' or f.endswith('.' + ext)):
newfilename = os.path.join(directory, f)
break
else:
shutil.rmtree(directory)
os.mkdir(directory)
continue
f = aims.Finder()
if f.check(newfilename) and f.format() == format:
formatok = True
break
else:
# print('could not read', newfilename)
shutil.rmtree(directory)
os.mkdir(directory)
except:
shutil.rmtree(directory)
os.mkdir(directory)
continue
if formatok:
return newfilename
return None
def somaio_formats(aimsobj):
try:
fclass = getattr(aims.carto,
'FormatDictionary_%s' % aims.typeCode(aimsobj))
except:
if isinstance(aimsobj, aims.carto.GenericObject):
fclass = aims.carto.FormatDictionary_Object
else:
return
formats = fclass.writeFormats()
exts = fclass.writeExtensions()
ext_by_format = dict([(f, []) for f in formats])
for ext, flist in six.iteritems(exts):
for f in flist:
ext_by_format[f].append(ext)
return ext_by_format
def somaio_extensions(aimsobj, format):
try:
fclass = getattr(aims.carto,
'FormatDictionary_%s' % aims.typeCode(aimsobj))
except:
if isinstance(aimsobj, aims.carto.GenericObject):
fclass = aims.carto.FormatDictionary_Object
else:
return []
exts = fclass.writeExtensions()
exts_for_format = [ext for ext, formats in six.iteritems(exts)
if format in formats]
return exts_for_format
def test_all_formats(filename, number=30, separate_process=False):
f = aims.Finder()
if not f.check(filename):
raise IOError('%f is not readable' % filename)
ot = f.objectType(), f.dataType()
aimsobj = aims.read(filename)
formats = aims.IOObjectTypesDictionary.formats(*ot)
soma_io_formats = somaio_formats(aimsobj)
success = True
unsafe_formats = []
safe_formats = []
all_formats = list(zip(formats, [False] * len(formats))) \
+ [(f, True) for f in soma_io_formats]
for format, is_soma in all_formats:
# JP2 writer in Qt (4.8.1 at least) systematically crashes.
if format in ('JP2'):
continue
print('testing: %s / %s, format: %s' % (ot[0], ot[1], format))
try:
directory = tempfile.mkdtemp(prefix='aims_thread_test')
newfilename = _convertFileFormat(aimsobj, directory, 'aims_test',
format, is_soma)
if not newfilename:
print('could not generate format', format)
# shutil.rmtree( directory )
continue
print('testing read on %s...' % newfilename)
try:
if separate_process:
soma.subprocess.check_call([sys.executable, '-m',
'soma.aims.tests.test_pyaims_thread_read', '-i',
newfilename, '-n', str(number), '--silent'])
else:
aims_test_thread_read([newfilename] * number,
verbose=False)
print('Passed.')
safe_formats.append(format)
# shutil.rmtree( directory )
except:
print('format %s is unsafe.' % format)
success = False
unsafe_formats.append(format)
finally:
shutil.rmtree(directory)
print('All done for %s / %s. Success =' % ot, success)
if not success:
return {ot: unsafe_formats}, {ot: safe_formats}
return {}, {ot: safe_formats}
if __name__ == '__main__':
parser = OptionParser(
description='Perform tests of threaded concurrent loading of aims objects in pyaims')
parser.add_option('-i', '--input', dest='infiles',
help='files to be read concurrently', action='append', default=[])
parser.add_option('-n', '--number', dest='number', type='int',
help='number of times each file should be read at the same time. Default: 30 if one input filename, 1 otherwise', default=0)
parser.add_option('-a', '--all', dest='all', action='store_true',
default=False,
help='test all possible formats for each input file (convert to all of them and test)')
parser.add_option('-s', '--subprocess', dest='subprocess',
action='store_true', default=False,
help='use subprocesses to run formats tests (with -a option). By default, they run in a single process, so a thread-related crash will end all tests (but will be easier to trace with a debugger).')
parser.add_option('--silent', dest='silent', action='store_true',
default=False,
help='be less verbose in per-file tests (no -a option)')
parser.add_option('-l', '--loop', dest='loop',
action='store_true', help='loop the execution endlessly (until it crashes). Useful for debugging rare crashes')
options, args = parser.parse_args()
filenames = options.infiles + args
if len(filenames) == 0:
print('no input files.')
parser.parse_args(['-h'])
if options.number == 0:
if len(filenames) == 1 or options.all:
num = 30
else:
num = 1
else:
num = options.number
# import libxml2
# libxml2.newTextReaderFilename( '/tmp/ra_head.gii.minf' )
# import xml.parsers.expat
# open( '/tmp/xml.xml', 'w' ).write( '<?xml version="1.0" encoding="utf-8" ?><grop></grop>' )
# p = xml.parsers.expat.ParserCreate()
# p.ParseFile( open( '/tmp/xml.xml' ) )
from soma.qt_gui.qt_backend import QtGui
app = QtGui.QApplication(sys.argv)
doit = True
while doit:
if options.all:
unsafe_formats = {}
safe_formats = {}
for filename in filenames:
tested_formats = test_all_formats(filename, num,
separate_process=options.subprocess)
unsafe_formats.update(tested_formats[0])
safe_formats.update(tested_formats[1])
if len(unsafe_formats) != 0:
print('Results:')
print('unsafe formats:')
print(unsafe_formats)
print('safe formats:')
print(safe_formats)
raise RuntimeError('Some tests failed.')
else:
print('OK.')
print('safe formats:')
print(safe_formats)
else:
filenames = filenames * num
aims_test_thread_read(filenames, verbose=not options.silent)
if not options.loop:
doit = False
|
[
"soma.qt_gui.qt_backend.QtGui.QApplication",
"os.mkdir",
"soma.aims.typeCode",
"soma.aims.Finder",
"optparse.OptionParser",
"shutil.rmtree",
"threading.RLock",
"os.path.exists",
"soma.aims.carto.PluginLoader.load",
"soma.aims.Finder.extensions",
"time.time",
"soma.aims.write",
"soma.aims.read",
"tempfile.mkdtemp",
"soma.aims.IOObjectTypesDictionary.formats",
"six.iteritems",
"os.path.join",
"os.listdir"
] |
[((954, 984), 'soma.aims.carto.PluginLoader.load', 'aims.carto.PluginLoader.load', ([], {}), '()\n', (982, 984), False, 'from soma import aims\n'), ((1045, 1062), 'threading.RLock', 'threading.RLock', ([], {}), '()\n', (1060, 1062), False, 'import threading\n'), ((1222, 1233), 'time.time', 'time.time', ([], {}), '()\n', (1231, 1233), False, 'import time\n'), ((3913, 3932), 'six.iteritems', 'six.iteritems', (['exts'], {}), '(exts)\n', (3926, 3932), False, 'import six\n'), ((4599, 4612), 'soma.aims.Finder', 'aims.Finder', ([], {}), '()\n', (4610, 4612), False, 'from soma import aims\n'), ((4750, 4769), 'soma.aims.read', 'aims.read', (['filename'], {}), '(filename)\n', (4759, 4769), False, 'from soma import aims\n'), ((4784, 4825), 'soma.aims.IOObjectTypesDictionary.formats', 'aims.IOObjectTypesDictionary.formats', (['*ot'], {}), '(*ot)\n', (4820, 4825), False, 'from soma import aims\n'), ((6706, 6809), 'optparse.OptionParser', 'OptionParser', ([], {'description': '"""Perform tests of threaded concurrent loading of aims objects in pyaims"""'}), "(description=\n 'Perform tests of threaded concurrent loading of aims objects in pyaims')\n", (6718, 6809), False, 'from optparse import OptionParser\n'), ((8803, 8831), 'soma.qt_gui.qt_backend.QtGui.QApplication', 'QtGui.QApplication', (['sys.argv'], {}), '(sys.argv)\n', (8821, 8831), False, 'from soma.qt_gui.qt_backend import QtGui\n'), ((1476, 1487), 'time.time', 'time.time', ([], {}), '()\n', (1485, 1487), False, 'import time\n'), ((2021, 2051), 'soma.aims.Finder.extensions', 'aims.Finder.extensions', (['format'], {}), '(format)\n', (2043, 2051), False, 'from soma import aims\n'), ((726, 751), 'soma.aims.read', 'aims.read', (['self._filename'], {}), '(self._filename)\n', (735, 751), False, 'from soma import aims\n'), ((2314, 2345), 'os.path.join', 'os.path.join', (['directory', 'prefix'], {}), '(directory, prefix)\n', (2326, 2345), False, 'import os\n'), ((2499, 2546), 'soma.aims.write', 'aims.write', (['aimsobj', 'newfilename'], {'format': 'format'}), '(aimsobj, newfilename, format=format)\n', (2509, 2546), False, 'from soma import aims\n'), ((3011, 3024), 'soma.aims.Finder', 'aims.Finder', ([], {}), '()\n', (3022, 3024), False, 'from soma import aims\n'), ((4430, 4449), 'six.iteritems', 'six.iteritems', (['exts'], {}), '(exts)\n', (4443, 4449), False, 'import six\n'), ((5314, 5357), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {'prefix': '"""aims_thread_test"""'}), "(prefix='aims_thread_test')\n", (5330, 5357), False, 'import tempfile\n'), ((6469, 6493), 'shutil.rmtree', 'shutil.rmtree', (['directory'], {}), '(directory)\n', (6482, 6493), False, 'import shutil\n'), ((2566, 2593), 'os.path.exists', 'os.path.exists', (['newfilename'], {}), '(newfilename)\n', (2580, 2593), False, 'import os\n'), ((2620, 2641), 'os.listdir', 'os.listdir', (['directory'], {}), '(directory)\n', (2630, 2641), False, 'import os\n'), ((3230, 3254), 'shutil.rmtree', 'shutil.rmtree', (['directory'], {}), '(directory)\n', (3243, 3254), False, 'import shutil\n'), ((3271, 3290), 'os.mkdir', 'os.mkdir', (['directory'], {}), '(directory)\n', (3279, 3290), False, 'import os\n'), ((3319, 3343), 'shutil.rmtree', 'shutil.rmtree', (['directory'], {}), '(directory)\n', (3332, 3343), False, 'import shutil\n'), ((3356, 3375), 'os.mkdir', 'os.mkdir', (['directory'], {}), '(directory)\n', (3364, 3375), False, 'import os\n'), ((3583, 3605), 'soma.aims.typeCode', 'aims.typeCode', (['aimsobj'], {}), '(aimsobj)\n', (3596, 3605), False, 'from soma import aims\n'), ((4161, 4183), 'soma.aims.typeCode', 'aims.typeCode', (['aimsobj'], {}), '(aimsobj)\n', (4174, 4183), False, 'from soma import aims\n'), ((2901, 2925), 'shutil.rmtree', 'shutil.rmtree', (['directory'], {}), '(directory)\n', (2914, 2925), False, 'import shutil\n'), ((2946, 2965), 'os.mkdir', 'os.mkdir', (['directory'], {}), '(directory)\n', (2954, 2965), False, 'import os\n'), ((2802, 2828), 'os.path.join', 'os.path.join', (['directory', 'f'], {}), '(directory, f)\n', (2814, 2828), False, 'import os\n')]
|
from vistautils.iter_utils import only
from adam.language import TokenSequenceLinguisticDescription
from adam.learner import LearningExample, MemorizingLanguageLearner
from adam.perception import (
BagOfFeaturesPerceptualRepresentationFrame,
PerceptualRepresentation,
)
def test_pipeline():
curriculum = [
LearningExample(
perception=PerceptualRepresentation(
[BagOfFeaturesPerceptualRepresentationFrame(("red", "truck"))]
),
linguistic_description=TokenSequenceLinguisticDescription(("red", "truck")),
)
]
learner: MemorizingLanguageLearner[
BagOfFeaturesPerceptualRepresentationFrame, TokenSequenceLinguisticDescription
] = MemorizingLanguageLearner()
for example in curriculum:
learner.observe(example)
# shouldn't be able to describe "red" or "truck" alone
assert not learner.describe(
PerceptualRepresentation([BagOfFeaturesPerceptualRepresentationFrame(("red",))])
)
assert not learner.describe(
PerceptualRepresentation([BagOfFeaturesPerceptualRepresentationFrame(("truck",))])
)
# but should be able to describe "red truck"
red_truck_descriptions = learner.describe(
PerceptualRepresentation(
[BagOfFeaturesPerceptualRepresentationFrame(("red", "truck"))]
)
)
assert len(red_truck_descriptions) == 1
red_truck_description = only(red_truck_descriptions)
assert red_truck_description.as_token_sequence() == ("red", "truck")
|
[
"vistautils.iter_utils.only",
"adam.language.TokenSequenceLinguisticDescription",
"adam.perception.BagOfFeaturesPerceptualRepresentationFrame",
"adam.learner.MemorizingLanguageLearner"
] |
[((730, 757), 'adam.learner.MemorizingLanguageLearner', 'MemorizingLanguageLearner', ([], {}), '()\n', (755, 757), False, 'from adam.learner import LearningExample, MemorizingLanguageLearner\n'), ((1434, 1462), 'vistautils.iter_utils.only', 'only', (['red_truck_descriptions'], {}), '(red_truck_descriptions)\n', (1438, 1462), False, 'from vistautils.iter_utils import only\n'), ((524, 576), 'adam.language.TokenSequenceLinguisticDescription', 'TokenSequenceLinguisticDescription', (["('red', 'truck')"], {}), "(('red', 'truck'))\n", (558, 576), False, 'from adam.language import TokenSequenceLinguisticDescription\n'), ((1284, 1344), 'adam.perception.BagOfFeaturesPerceptualRepresentationFrame', 'BagOfFeaturesPerceptualRepresentationFrame', (["('red', 'truck')"], {}), "(('red', 'truck'))\n", (1326, 1344), False, 'from adam.perception import BagOfFeaturesPerceptualRepresentationFrame, PerceptualRepresentation\n'), ((950, 1002), 'adam.perception.BagOfFeaturesPerceptualRepresentationFrame', 'BagOfFeaturesPerceptualRepresentationFrame', (["('red',)"], {}), "(('red',))\n", (992, 1002), False, 'from adam.perception import BagOfFeaturesPerceptualRepresentationFrame, PerceptualRepresentation\n'), ((1078, 1132), 'adam.perception.BagOfFeaturesPerceptualRepresentationFrame', 'BagOfFeaturesPerceptualRepresentationFrame', (["('truck',)"], {}), "(('truck',))\n", (1120, 1132), False, 'from adam.perception import BagOfFeaturesPerceptualRepresentationFrame, PerceptualRepresentation\n'), ((412, 472), 'adam.perception.BagOfFeaturesPerceptualRepresentationFrame', 'BagOfFeaturesPerceptualRepresentationFrame', (["('red', 'truck')"], {}), "(('red', 'truck'))\n", (454, 472), False, 'from adam.perception import BagOfFeaturesPerceptualRepresentationFrame, PerceptualRepresentation\n')]
|
# python
import lx, lxifc, lxu.command, modo, tagger, random
CMD_NAME = tagger.CMD_PTAG_SELECTION_FCL
global_tags = None
global_poly_count = 0
def list_commands():
timer = tagger.DebugTimer()
global global_tags
global global_poly_count
fcl = []
global_tags = [
set(),
set(),
set()
]
global_poly_count = 0
mesh_editor = MeshEditorClass()
mesh_read_successful = mesh_editor.do_mesh_read()
selmode = tagger.selection.get_mode()
if global_poly_count == 0 or selmode not in ['polygon', 'edge', 'vertex']:
fcl.append("%s {%s}" % (tagger.CMD_NOOP, tagger.LABEL_NO_POLYS))
timer.end()
return fcl
elif global_poly_count > tagger.MAX_FCL_POLY_INSPECT:
fcl.append("%s {%s}" % (tagger.CMD_NOOP, tagger.LABEL_MAX_POLY))
timer.end()
return fcl
if sum([len(tags) for tags in global_tags]) == 0:
fcl.append("%s {%s}" % (tagger.CMD_NOOP, tagger.LABEL_NO_TAGS))
timer.end()
return fcl
if len(global_tags) > tagger.MAX_FCL:
fcl.append("%s {%s}" % (tagger.CMD_NOOP, tagger.LABEL_MAX_FCL))
timer.end()
return fcl
for n in range(len(global_tags)):
if not global_tags[n]:
continue
for tag in sorted(global_tags[n]):
tagType = [tagger.MATERIAL, tagger.PART, tagger.PICK][n]
if tagType == tagger.MATERIAL:
command = tagger.CMD_SELECT_ALL_BY_MATERIAL
elif tagType == tagger.PART:
command = tagger.CMD_SELECT_ALL_BY_PART
elif tagType == tagger.PICK:
command = tagger.CMD_SELECT_ALL_BY_SET
fcl.append("%s {%s}" % (command, tag))
timer.end()
return fcl
class CommandClass(tagger.CommanderClass):
#_commander_default_values = []
def commander_arguments(self):
return [
{
'name': tagger.QUERY,
'label': tagger.LABEL_QUERY,
'datatype': 'integer',
'default': '',
'values_list_type': 'fcl',
'values_list': list_commands,
'flags': ['query'],
}
]
def commander_notifiers(self):
return [("select.event", "polygon +ldt"),("select.event", "item +ldt"), ("tagger.notifier", "")]
lx.bless(CommandClass, CMD_NAME)
class MeshEditorClass(tagger.MeshEditorClass):
def mesh_read_action(self):
global global_tags
global global_poly_count
stringTag = lx.object.StringTag()
stringTag.set(self.polygon_accessor)
selected_polys = self.get_selected_polys()
for poly in selected_polys:
global_poly_count += 1
if global_poly_count > tagger.MAX_FCL_POLY_INSPECT:
break
self.polygon_accessor.Select(poly)
material = stringTag.Get(lx.symbol.i_POLYTAG_MATERIAL)
if material:
global_tags[0].add(material)
part = stringTag.Get(lx.symbol.i_POLYTAG_PART)
if part:
global_tags[1].add(part)
pick = stringTag.Get(lx.symbol.i_POLYTAG_PICK)
if pick:
global_tags[2].update(pick.split(";"))
|
[
"tagger.DebugTimer",
"tagger.selection.get_mode",
"lx.bless",
"lx.object.StringTag"
] |
[((2404, 2436), 'lx.bless', 'lx.bless', (['CommandClass', 'CMD_NAME'], {}), '(CommandClass, CMD_NAME)\n', (2412, 2436), False, 'import lx, lxifc, lxu.command, modo, tagger, random\n'), ((180, 199), 'tagger.DebugTimer', 'tagger.DebugTimer', ([], {}), '()\n', (197, 199), False, 'import lx, lxifc, lxu.command, modo, tagger, random\n'), ((471, 498), 'tagger.selection.get_mode', 'tagger.selection.get_mode', ([], {}), '()\n', (496, 498), False, 'import lx, lxifc, lxu.command, modo, tagger, random\n'), ((2600, 2621), 'lx.object.StringTag', 'lx.object.StringTag', ([], {}), '()\n', (2619, 2621), False, 'import lx, lxifc, lxu.command, modo, tagger, random\n')]
|
# noinspection PyPep8Naming
from controls.composite import CompositeControl
class SizeControl(CompositeControl):
@classmethod
def get_fields(cls):
from functions import ParameterTemplate
fields = [
ParameterTemplate("Width", "int", default=3),
ParameterTemplate("Height", "int", default=3)
]
return fields
|
[
"functions.ParameterTemplate"
] |
[((236, 280), 'functions.ParameterTemplate', 'ParameterTemplate', (['"""Width"""', '"""int"""'], {'default': '(3)'}), "('Width', 'int', default=3)\n", (253, 280), False, 'from functions import ParameterTemplate\n'), ((294, 339), 'functions.ParameterTemplate', 'ParameterTemplate', (['"""Height"""', '"""int"""'], {'default': '(3)'}), "('Height', 'int', default=3)\n", (311, 339), False, 'from functions import ParameterTemplate\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Author: <NAME>
@Contact: <EMAIL>
@File: model.py
@Time: 2018/10/13 6:35 PM
Modified by
@Author: <NAME>
@Contact: <EMAIL>
@Time: 2020/3/9 9:32 PM
"""
import os
import sys
import copy
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
def knn(x, k):
inner = -2*torch.matmul(x.transpose(2, 1), x)
xx = torch.sum(x**2, dim=1, keepdim=True)
pairwise_distance = -xx - inner - xx.transpose(2, 1)
idx = pairwise_distance.topk(k=k, dim=2)[1] # (batch_size, num_points, k) this picks largest
return idx
def get_graph_feature(x,k=20,spatial_dim=None):
batch_size = x.size(0)
num_dims = x.size(1)
num_points = x.size(2)
if spatial_dim is None:
idx = knn(x, k=k)
else:
idx = knn(x[:, :spatial_dim], k=k)
idx_base = torch.arange(0, batch_size, device=x.device).view(-1, 1, 1)*num_points
idx = idx + idx_base
idx = idx.view(-1)
x = x.transpose(2, 1).contiguous() # (batch_size, num_points, num_dims) -> (batch_size*num_points, num_dims) # batch_size * num_points * k + range(0, batch_size*num_points)
feature = x.view(batch_size*num_points, -1)[idx, :]
feature = feature.view(batch_size, num_points, k, num_dims)
x = x.view(batch_size, num_points, 1, num_dims).repeat(1, 1, k, 1)
feature = torch.cat((feature-x, x), dim=3).permute(0, 3, 1, 2).contiguous()
return feature # (batch_size, 2*num_dims, num_points, k)
class Net(nn.Module):
def __init__(self,spatial_dim=3,num_in_feat=3,k=20,dim_embed=1024,num_classes=10):
super(Net, self).__init__()
self.spatial_dim=spatial_dim
self.num_in_feat=num_in_feat
self.num_classes=num_classes
self.k = k
self.dim_embed=dim_embed
self.bn1 = nn.BatchNorm2d(64)
self.bn2 = nn.BatchNorm2d(64)
self.bn3 = nn.BatchNorm2d(64)
self.bn4 = nn.BatchNorm2d(64)
self.bn5 = nn.BatchNorm2d(64)
self.bn6 = nn.BatchNorm1d(self.dim_embed)
self.bn7 = nn.BatchNorm1d(512)
self.bn8 = nn.BatchNorm1d(256)
self.conv1 = nn.Sequential(nn.Conv2d(2*self.num_in_feat, 64, kernel_size=1, bias=False),
self.bn1,
nn.LeakyReLU(negative_slope=0.2))
self.conv2 = nn.Sequential(nn.Conv2d(64, 64, kernel_size=1, bias=False),
self.bn2,
nn.LeakyReLU(negative_slope=0.2))
self.conv3 = nn.Sequential(nn.Conv2d(64*2, 64, kernel_size=1, bias=False),
self.bn3,
nn.LeakyReLU(negative_slope=0.2))
self.conv4 = nn.Sequential(nn.Conv2d(64, 64, kernel_size=1, bias=False),
self.bn4,
nn.LeakyReLU(negative_slope=0.2))
self.conv5 = nn.Sequential(nn.Conv2d(64*2, 64, kernel_size=1, bias=False),
self.bn5,
nn.LeakyReLU(negative_slope=0.2))
self.conv6 = nn.Sequential(nn.Conv1d(64*3,self.dim_embed, kernel_size=1, bias=False),
self.bn6,
nn.LeakyReLU(negative_slope=0.2))
self.conv7 = nn.Sequential(nn.Conv1d(self.dim_embed+64*3, 512, kernel_size=1, bias=False),
self.bn7,
nn.LeakyReLU(negative_slope=0.2))
self.conv8 = nn.Sequential(nn.Conv1d(512, 256, kernel_size=1, bias=False),
self.bn8,
nn.LeakyReLU(negative_slope=0.2))
self.conv9 = nn.Conv1d(256,self.num_classes, kernel_size=1, bias=False)
def forward(self, x):
batch_size = x.size(0)
num_points = x.size(2)
x = get_graph_feature(x, k=self.k,spatial_dim=self.spatial_dim) # (batch_size, 9, num_points) -> (batch_size, 9*2, num_points, k)
x = self.conv1(x) # (batch_size, 9*2, num_points, k) -> (batch_size, 64, num_points, k)
x = self.conv2(x) # (batch_size, 64, num_points, k) -> (batch_size, 64, num_points, k)
x1 = x.max(dim=-1, keepdim=False)[0] # (batch_size, 64, num_points, k) -> (batch_size, 64, num_points)
x = get_graph_feature(x1, k=self.k) # (batch_size, 64, num_points) -> (batch_size, 64*2, num_points, k)
x = self.conv3(x) # (batch_size, 64*2, num_points, k) -> (batch_size, 64, num_points, k)
x = self.conv4(x) # (batch_size, 64, num_points, k) -> (batch_size, 64, num_points, k)
x2 = x.max(dim=-1, keepdim=False)[0] # (batch_size, 64, num_points, k) -> (batch_size, 64, num_points)
x = get_graph_feature(x2, k=self.k) # (batch_size, 64, num_points) -> (batch_size, 64*2, num_points, k)
x = self.conv5(x) # (batch_size, 64*2, num_points, k) -> (batch_size, 64, num_points, k)
x3 = x.max(dim=-1, keepdim=False)[0] # (batch_size, 64, num_points, k) -> (batch_size, 64, num_points)
x = torch.cat((x1, x2, x3), dim=1) # (batch_size, 64*3, num_points)
x = self.conv6(x) # (batch_size, 64*3, num_points) -> (batch_size, self.dim_embed, num_points)
x = x.max(dim=-1, keepdim=True)[0] # (batch_size, self.dim_embed, num_points) -> (batch_size, self.dim_embed, 1)
x = x.repeat(1, 1, num_points) # (batch_size, 1024, num_points)
x = torch.cat((x, x1, x2, x3), dim=1) # (batch_size, 1024+64*3, num_points)
x = self.conv7(x) # (batch_size, 1024+64*3, num_points) -> (batch_size, 512, num_points)
x = self.conv8(x) # (batch_size, 512, num_points) -> (batch_size, 256, num_points)
x = self.conv9(x) # (batch_size, 256, num_points) -> (batch_size, num_classes, num_points)
return x
|
[
"torch.nn.Conv1d",
"torch.nn.BatchNorm1d",
"torch.cat",
"torch.nn.Conv2d",
"torch.nn.BatchNorm2d",
"torch.arange",
"torch.nn.LeakyReLU",
"torch.sum"
] |
[((438, 476), 'torch.sum', 'torch.sum', (['(x ** 2)'], {'dim': '(1)', 'keepdim': '(True)'}), '(x ** 2, dim=1, keepdim=True)\n', (447, 476), False, 'import torch\n'), ((1869, 1887), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(64)'], {}), '(64)\n', (1883, 1887), True, 'import torch.nn as nn\n'), ((1907, 1925), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(64)'], {}), '(64)\n', (1921, 1925), True, 'import torch.nn as nn\n'), ((1945, 1963), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(64)'], {}), '(64)\n', (1959, 1963), True, 'import torch.nn as nn\n'), ((1983, 2001), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(64)'], {}), '(64)\n', (1997, 2001), True, 'import torch.nn as nn\n'), ((2021, 2039), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(64)'], {}), '(64)\n', (2035, 2039), True, 'import torch.nn as nn\n'), ((2059, 2089), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['self.dim_embed'], {}), '(self.dim_embed)\n', (2073, 2089), True, 'import torch.nn as nn\n'), ((2109, 2128), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(512)'], {}), '(512)\n', (2123, 2128), True, 'import torch.nn as nn\n'), ((2148, 2167), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(256)'], {}), '(256)\n', (2162, 2167), True, 'import torch.nn as nn\n'), ((3803, 3862), 'torch.nn.Conv1d', 'nn.Conv1d', (['(256)', 'self.num_classes'], {'kernel_size': '(1)', 'bias': '(False)'}), '(256, self.num_classes, kernel_size=1, bias=False)\n', (3812, 3862), True, 'import torch.nn as nn\n'), ((5272, 5302), 'torch.cat', 'torch.cat', (['(x1, x2, x3)'], {'dim': '(1)'}), '((x1, x2, x3), dim=1)\n', (5281, 5302), False, 'import torch\n'), ((5687, 5720), 'torch.cat', 'torch.cat', (['(x, x1, x2, x3)'], {'dim': '(1)'}), '((x, x1, x2, x3), dim=1)\n', (5696, 5720), False, 'import torch\n'), ((2204, 2266), 'torch.nn.Conv2d', 'nn.Conv2d', (['(2 * self.num_in_feat)', '(64)'], {'kernel_size': '(1)', 'bias': '(False)'}), '(2 * self.num_in_feat, 64, kernel_size=1, bias=False)\n', (2213, 2266), True, 'import torch.nn as nn\n'), ((2346, 2378), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {'negative_slope': '(0.2)'}), '(negative_slope=0.2)\n', (2358, 2378), True, 'import torch.nn as nn\n'), ((2415, 2459), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(64)'], {'kernel_size': '(1)', 'bias': '(False)'}), '(64, 64, kernel_size=1, bias=False)\n', (2424, 2459), True, 'import torch.nn as nn\n'), ((2541, 2573), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {'negative_slope': '(0.2)'}), '(negative_slope=0.2)\n', (2553, 2573), True, 'import torch.nn as nn\n'), ((2610, 2658), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64 * 2)', '(64)'], {'kernel_size': '(1)', 'bias': '(False)'}), '(64 * 2, 64, kernel_size=1, bias=False)\n', (2619, 2658), True, 'import torch.nn as nn\n'), ((2738, 2770), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {'negative_slope': '(0.2)'}), '(negative_slope=0.2)\n', (2750, 2770), True, 'import torch.nn as nn\n'), ((2807, 2851), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(64)'], {'kernel_size': '(1)', 'bias': '(False)'}), '(64, 64, kernel_size=1, bias=False)\n', (2816, 2851), True, 'import torch.nn as nn\n'), ((2933, 2965), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {'negative_slope': '(0.2)'}), '(negative_slope=0.2)\n', (2945, 2965), True, 'import torch.nn as nn\n'), ((3002, 3050), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64 * 2)', '(64)'], {'kernel_size': '(1)', 'bias': '(False)'}), '(64 * 2, 64, kernel_size=1, bias=False)\n', (3011, 3050), True, 'import torch.nn as nn\n'), ((3130, 3162), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {'negative_slope': '(0.2)'}), '(negative_slope=0.2)\n', (3142, 3162), True, 'import torch.nn as nn\n'), ((3199, 3259), 'torch.nn.Conv1d', 'nn.Conv1d', (['(64 * 3)', 'self.dim_embed'], {'kernel_size': '(1)', 'bias': '(False)'}), '(64 * 3, self.dim_embed, kernel_size=1, bias=False)\n', (3208, 3259), True, 'import torch.nn as nn\n'), ((3338, 3370), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {'negative_slope': '(0.2)'}), '(negative_slope=0.2)\n', (3350, 3370), True, 'import torch.nn as nn\n'), ((3407, 3473), 'torch.nn.Conv1d', 'nn.Conv1d', (['(self.dim_embed + 64 * 3)', '(512)'], {'kernel_size': '(1)', 'bias': '(False)'}), '(self.dim_embed + 64 * 3, 512, kernel_size=1, bias=False)\n', (3416, 3473), True, 'import torch.nn as nn\n'), ((3551, 3583), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {'negative_slope': '(0.2)'}), '(negative_slope=0.2)\n', (3563, 3583), True, 'import torch.nn as nn\n'), ((3620, 3666), 'torch.nn.Conv1d', 'nn.Conv1d', (['(512)', '(256)'], {'kernel_size': '(1)', 'bias': '(False)'}), '(512, 256, kernel_size=1, bias=False)\n', (3629, 3666), True, 'import torch.nn as nn\n'), ((3748, 3780), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {'negative_slope': '(0.2)'}), '(negative_slope=0.2)\n', (3760, 3780), True, 'import torch.nn as nn\n'), ((899, 943), 'torch.arange', 'torch.arange', (['(0)', 'batch_size'], {'device': 'x.device'}), '(0, batch_size, device=x.device)\n', (911, 943), False, 'import torch\n'), ((1407, 1441), 'torch.cat', 'torch.cat', (['(feature - x, x)'], {'dim': '(3)'}), '((feature - x, x), dim=3)\n', (1416, 1441), False, 'import torch\n')]
|
import os
fileDirectoryRoot = 'root'
localfilelist=[]
def generateList():
del localfilelist[:]
for root, dirs, files in os.walk(fileDirectoryRoot):
#localfilelist.append(os.path.relpath(root,fileDirectoryRoot))
prefx = os.path.relpath(root,fileDirectoryRoot)
if (prefx != '.'):
prefx = '/'+prefx
else:
prefx = ''
for f in files:
localfilelist.append(prefx+'/'+f)
for d in dirs:
localfilelist.append(prefx+'/'+d+'/')
for items in localfilelist:
print(items)
generateList()
|
[
"os.walk",
"os.path.relpath"
] |
[((123, 149), 'os.walk', 'os.walk', (['fileDirectoryRoot'], {}), '(fileDirectoryRoot)\n', (130, 149), False, 'import os\n'), ((226, 266), 'os.path.relpath', 'os.path.relpath', (['root', 'fileDirectoryRoot'], {}), '(root, fileDirectoryRoot)\n', (241, 266), False, 'import os\n')]
|
import idautils
import idaapi
import idc
def instructions(start_ea, end_ea):
"""
Returns the list of instruction addresses in the given address range (including).
"""
return list(idautils.Heads(start_ea, end_ea))
def basic_blocks(func_addr):
"""
Generator that yields tuples of start and end addresses of all basic blocks in the given function.
"""
f = idaapi.get_func(func_addr)
flow = idaapi.FlowChart(f)
for block in flow:
yield block.startEA, block.endEA
def func_instructions(func_addr):
"""
Generator that yields the instruction addresses of all instructions in the given function.
"""
for bb_start, bb_end in basic_blocks(func_addr):
for instr_addr in instructions(bb_start, bb_end):
yield instr_addr
def func_last_instr(func_addr):
"""
Returns the address of the last instruction of the given function.
"""
return list(func_instructions(func_addr))[-1]
def func_mnemonics(func_addr):
"""
Generator that yields the mnemonics of all instructions in the given functions.
"""
for bb_start, bb_end in basic_blocks(func_addr):
for mnemonic in mnemonics(bb_start, bb_end):
yield mnemonic
def mnemonics(start_ea, end_ea):
"""
Generator that yields all mnemonics in the given address range (including).
"""
for instr_addr in instructions(start_ea, end_ea):
yield idc.GetMnem(instr_addr)
def count_mnemonics(start_ea, end_ea, needle_mnemonics):
"""
Counts how many times the mnemonics in needle_mnemonics occur in the given address range (including).
"""
count = 0
for mnemonic in mnemonics(start_ea, end_ea):
if mnemonic in needle_mnemonics:
count += 1
return count
|
[
"idautils.Heads",
"idaapi.FlowChart",
"idc.GetMnem",
"idaapi.get_func"
] |
[((389, 415), 'idaapi.get_func', 'idaapi.get_func', (['func_addr'], {}), '(func_addr)\n', (404, 415), False, 'import idaapi\n'), ((427, 446), 'idaapi.FlowChart', 'idaapi.FlowChart', (['f'], {}), '(f)\n', (443, 446), False, 'import idaapi\n'), ((197, 229), 'idautils.Heads', 'idautils.Heads', (['start_ea', 'end_ea'], {}), '(start_ea, end_ea)\n', (211, 229), False, 'import idautils\n'), ((1435, 1458), 'idc.GetMnem', 'idc.GetMnem', (['instr_addr'], {}), '(instr_addr)\n', (1446, 1458), False, 'import idc\n')]
|
# coding=utf-8
# Copyright 2021 RigL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for weight_symmetry.prune."""
import glob
from os import path
from absl.testing import absltest
from absl.testing import flagsaver
from rigl.experimental.jax import prune
class PruneTest(absltest.TestCase):
def test_prune_fixed_schedule(self):
"""Tests training/pruning driver with a fixed global sparsity."""
experiment_dir = self.create_tempdir().full_path
eval_flags = dict(
epochs=1,
pruning_rate=0.95,
experiment_dir=experiment_dir,
)
with flagsaver.flagsaver(**eval_flags):
prune.main([])
outfile = path.join(experiment_dir, '*', 'events.out.tfevents.*')
files = glob.glob(outfile)
self.assertTrue(len(files) == 1 and path.exists(files[0]))
def test_prune_global_pruning_schedule(self):
"""Tests training/pruning driver with a global sparsity schedule."""
experiment_dir = self.create_tempdir().full_path
eval_flags = dict(
epochs=10,
pruning_schedule='[(5, 0.33), (7, 0.66), (9, 0.95)]',
experiment_dir=experiment_dir,
)
with flagsaver.flagsaver(**eval_flags):
prune.main([])
outfile = path.join(experiment_dir, '*', 'events.out.tfevents.*')
files = glob.glob(outfile)
self.assertTrue(len(files) == 1 and path.exists(files[0]))
def test_prune_local_pruning_schedule(self):
"""Tests training/pruning driver with a single layer sparsity schedule."""
experiment_dir = self.create_tempdir().full_path
eval_flags = dict(
epochs=10,
pruning_schedule='{1:[(5, 0.33), (7, 0.66), (9, 0.95)]}',
experiment_dir=experiment_dir,
)
with flagsaver.flagsaver(**eval_flags):
prune.main([])
outfile = path.join(experiment_dir, '*', 'events.out.tfevents.*')
files = glob.glob(outfile)
self.assertTrue(len(files) == 1 and path.exists(files[0]))
if __name__ == '__main__':
absltest.main()
|
[
"absl.testing.absltest.main",
"rigl.experimental.jax.prune.main",
"absl.testing.flagsaver.flagsaver",
"os.path.exists",
"glob.glob",
"os.path.join"
] |
[((2509, 2524), 'absl.testing.absltest.main', 'absltest.main', ([], {}), '()\n', (2522, 2524), False, 'from absl.testing import absltest\n'), ((1118, 1151), 'absl.testing.flagsaver.flagsaver', 'flagsaver.flagsaver', ([], {}), '(**eval_flags)\n', (1137, 1151), False, 'from absl.testing import flagsaver\n'), ((1159, 1173), 'rigl.experimental.jax.prune.main', 'prune.main', (['[]'], {}), '([])\n', (1169, 1173), False, 'from rigl.experimental.jax import prune\n'), ((1191, 1246), 'os.path.join', 'path.join', (['experiment_dir', '"""*"""', '"""events.out.tfevents.*"""'], {}), "(experiment_dir, '*', 'events.out.tfevents.*')\n", (1200, 1246), False, 'from os import path\n'), ((1261, 1279), 'glob.glob', 'glob.glob', (['outfile'], {}), '(outfile)\n', (1270, 1279), False, 'import glob\n'), ((1680, 1713), 'absl.testing.flagsaver.flagsaver', 'flagsaver.flagsaver', ([], {}), '(**eval_flags)\n', (1699, 1713), False, 'from absl.testing import flagsaver\n'), ((1721, 1735), 'rigl.experimental.jax.prune.main', 'prune.main', (['[]'], {}), '([])\n', (1731, 1735), False, 'from rigl.experimental.jax import prune\n'), ((1753, 1808), 'os.path.join', 'path.join', (['experiment_dir', '"""*"""', '"""events.out.tfevents.*"""'], {}), "(experiment_dir, '*', 'events.out.tfevents.*')\n", (1762, 1808), False, 'from os import path\n'), ((1823, 1841), 'glob.glob', 'glob.glob', (['outfile'], {}), '(outfile)\n', (1832, 1841), False, 'import glob\n'), ((2251, 2284), 'absl.testing.flagsaver.flagsaver', 'flagsaver.flagsaver', ([], {}), '(**eval_flags)\n', (2270, 2284), False, 'from absl.testing import flagsaver\n'), ((2292, 2306), 'rigl.experimental.jax.prune.main', 'prune.main', (['[]'], {}), '([])\n', (2302, 2306), False, 'from rigl.experimental.jax import prune\n'), ((2324, 2379), 'os.path.join', 'path.join', (['experiment_dir', '"""*"""', '"""events.out.tfevents.*"""'], {}), "(experiment_dir, '*', 'events.out.tfevents.*')\n", (2333, 2379), False, 'from os import path\n'), ((2394, 2412), 'glob.glob', 'glob.glob', (['outfile'], {}), '(outfile)\n', (2403, 2412), False, 'import glob\n'), ((1323, 1344), 'os.path.exists', 'path.exists', (['files[0]'], {}), '(files[0])\n', (1334, 1344), False, 'from os import path\n'), ((1885, 1906), 'os.path.exists', 'path.exists', (['files[0]'], {}), '(files[0])\n', (1896, 1906), False, 'from os import path\n'), ((2456, 2477), 'os.path.exists', 'path.exists', (['files[0]'], {}), '(files[0])\n', (2467, 2477), False, 'from os import path\n')]
|
import pytest
from streamsets.testframework.decorators import stub
@stub
def test_buffer_size_in_bytes(sdc_builder, sdc_executor):
pass
@stub
@pytest.mark.parametrize('stage_attributes', [{'job_type': 'AVRO_PARQUET'}])
def test_compression_codec(sdc_builder, sdc_executor, stage_attributes):
pass
@stub
def test_data_time_zone(sdc_builder, sdc_executor):
pass
@stub
@pytest.mark.parametrize('stage_attributes', [{'job_type': 'AVRO_PARQUET'}])
def test_dictionary_page_size(sdc_builder, sdc_executor, stage_attributes):
pass
@stub
def test_files_prefix(sdc_builder, sdc_executor):
pass
@stub
def test_files_suffix(sdc_builder, sdc_executor):
pass
@stub
@pytest.mark.parametrize('stage_attributes', [{'job_type': 'AVRO_PARQUET'}])
def test_job_type(sdc_builder, sdc_executor, stage_attributes):
pass
@stub
@pytest.mark.parametrize('stage_attributes', [{'job_type': 'AVRO_PARQUET'}])
def test_max_padding_size(sdc_builder, sdc_executor, stage_attributes):
pass
@stub
@pytest.mark.parametrize('stage_attributes', [{'on_record_error': 'DISCARD'},
{'on_record_error': 'STOP_PIPELINE'},
{'on_record_error': 'TO_ERROR'}])
def test_on_record_error(sdc_builder, sdc_executor, stage_attributes):
pass
@stub
@pytest.mark.parametrize('stage_attributes', [{'job_type': 'AVRO_PARQUET'}])
def test_page_size(sdc_builder, sdc_executor, stage_attributes):
pass
@stub
def test_preconditions(sdc_builder, sdc_executor):
pass
@stub
def test_rate_per_second(sdc_builder, sdc_executor):
pass
@stub
def test_required_fields(sdc_builder, sdc_executor):
pass
@stub
@pytest.mark.parametrize('stage_attributes', [{'job_type': 'AVRO_PARQUET'}])
def test_row_group_size(sdc_builder, sdc_executor, stage_attributes):
pass
@stub
def test_temporary_file_directory(sdc_builder, sdc_executor):
pass
|
[
"pytest.mark.parametrize"
] |
[((152, 227), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""stage_attributes"""', "[{'job_type': 'AVRO_PARQUET'}]"], {}), "('stage_attributes', [{'job_type': 'AVRO_PARQUET'}])\n", (175, 227), False, 'import pytest\n'), ((388, 463), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""stage_attributes"""', "[{'job_type': 'AVRO_PARQUET'}]"], {}), "('stage_attributes', [{'job_type': 'AVRO_PARQUET'}])\n", (411, 463), False, 'import pytest\n'), ((692, 767), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""stage_attributes"""', "[{'job_type': 'AVRO_PARQUET'}]"], {}), "('stage_attributes', [{'job_type': 'AVRO_PARQUET'}])\n", (715, 767), False, 'import pytest\n'), ((850, 925), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""stage_attributes"""', "[{'job_type': 'AVRO_PARQUET'}]"], {}), "('stage_attributes', [{'job_type': 'AVRO_PARQUET'}])\n", (873, 925), False, 'import pytest\n'), ((1016, 1168), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""stage_attributes"""', "[{'on_record_error': 'DISCARD'}, {'on_record_error': 'STOP_PIPELINE'}, {\n 'on_record_error': 'TO_ERROR'}]"], {}), "('stage_attributes', [{'on_record_error': 'DISCARD'},\n {'on_record_error': 'STOP_PIPELINE'}, {'on_record_error': 'TO_ERROR'}])\n", (1039, 1168), False, 'import pytest\n'), ((1346, 1421), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""stage_attributes"""', "[{'job_type': 'AVRO_PARQUET'}]"], {}), "('stage_attributes', [{'job_type': 'AVRO_PARQUET'}])\n", (1369, 1421), False, 'import pytest\n'), ((1713, 1788), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""stage_attributes"""', "[{'job_type': 'AVRO_PARQUET'}]"], {}), "('stage_attributes', [{'job_type': 'AVRO_PARQUET'}])\n", (1736, 1788), False, 'import pytest\n')]
|
from django.views.decorators.csrf import csrf_exempt
import string
import random
import json
import os
import datetime
import time
from random import randint
from django.http import HttpResponseBadRequest
from .base_simulator import handle_request
from scripts.dblog import append_log, db_log
first_db_record = 0
def write_file(out_filename, content):
with open(os.path.join("scripts", out_filename), 'w') as out_file:
out_file.write(content)
def read_file(in_filename):
with open(os.path.join("scripts", in_filename), 'r+') as in_file:
return in_file.read().splitlines()
def read_file_all(in_filename):
with open(os.path.join("scripts", in_filename), 'r+') as in_file:
return in_file.read()
def read_json_file(in_filename, log):
fail_count = 0
while True:
try:
r = read_file_all(in_filename)
out = json.loads(r)
return out
except Exception as e:
fail_count += 1
time.sleep(1)
append_log(log, "dashboard_monitor::read_json_file::", fail_count, e)
def random_words(size):
out = []
lst = read_file("words.txt")
for s in range(0, size):
o = random.choice(lst)
out.append(o)
return out
def string_num_generator(size):
chars = string.digits
return ''.join(random.choice(chars) for _ in range(size))
def string_generator(size):
chars = string.digits + string.ascii_uppercase + string.ascii_lowercase
return ''.join(random.choice(chars) for _ in range(size))
def get_rules():
rules = []
r_count = randint(2, 6)
has_anyany = False
for r in range(0, r_count):
rule = {}
s_choice = random.choice([0, 1, 2, 3])
if s_choice == 1:
srclist = []
s_len = randint(2, 10)
for s in range(0, s_len):
srclist.append(str(randint(1, 65535)))
src = ",".join(srclist)
elif s_choice == 2:
s_start = randint(1, 65500)
s_end = randint(s_start, 65535)
src = str(s_start) + "-" + str(s_end)
elif s_choice == 3:
s_port = randint(1, 65500)
src = str(s_port)
else:
src = "any"
d_choice = random.choice([0, 1, 2, 3])
if d_choice == 1:
dstlist = []
d_len = randint(2, 10)
for d in range(0, d_len):
dstlist.append(str(randint(1, 65535)))
dst = ",".join(dstlist)
elif d_choice == 2:
d_start = randint(1, 65500)
d_end = randint(d_start, 65535)
dst = str(d_start) + "-" + str(d_end)
elif d_choice == 3:
d_port = randint(1, 65500)
dst = str(d_port)
else:
dst = "any"
rule["policy"] = random.choice(["allow", "deny"])
if has_anyany:
rule["protocol"] = random.choice(["tcp", "udp"])
else:
rule["protocol"] = random.choice(["any", "tcp", "udp", "icmp"])
if rule["protocol"] == "icmp" or rule["protocol"] == "any":
has_anyany = True
rule["srcPort"] = "any"
rule["dstPort"] = "any"
else:
rule["srcPort"] = src
rule["dstPort"] = dst
rules.append(rule)
return rules
def run(orgs, tags, acls, policies):
t = int(time.time() * 1000.0)
random.seed(((t & 0xff000000) >> 24) +
((t & 0x00ff0000) >> 8) +
((t & 0x0000ff00) << 8) +
((t & 0x000000ff) << 24))
neworgs = []
newtags = {}
newacls = {}
newpolicies = {}
isotime = datetime.datetime.now().isoformat()
for o in range(0, int(orgs)):
w = random_words(2)
org_name = (w[0] + " " + w[1]).title()
org_id = string_num_generator(18)
org_code = string_generator(7)
org_url = "{{url}}/o/" + org_code + "/manage/organization/overview"
neworgs.append({"id": org_id, "name": org_name, "url": org_url})
used_tags = [0, 2]
used_names = ["Unknown", "MerakiInternal"]
newtags[org_id] = []
t0_desc = "Unknown group applies when a policy is specified for unsuccessful group classification"
t2_desc = "MerakiInternal group is used by Meraki devices for internal and dashboard communication"
newtags[org_id].append({"groupId": 0 + first_db_record, "value": 0, "name": "Unknown", "description": t0_desc,
"versionNum": 0, "networkObjectId": None, "createdAt": isotime,
"updatedAt": isotime})
newtags[org_id].append({"groupId": 1 + first_db_record, "value": 2, "name": "MerakiInternal",
"description": t2_desc, "versionNum": 0, "networkObjectId": None,
"createdAt": isotime, "updatedAt": isotime})
for t in range(0, int(tags)):
while True:
tw = random_words(6)
tag_name = (tw[0] + " " + tw[1]).title()
if tag_name not in used_names:
used_names.append(tag_name)
break
tag_desc = (tw[2] + " " + tw[3] + " " + tw[4] + " " + tw[5]).title()
while True:
tag_num = randint(3, 65529)
if tag_num not in used_tags:
break
used_tags.append(tag_num)
newtags[org_id].append({"groupId": t + 2, "value": tag_num, "name": tag_name, "description": tag_desc,
"versionNum": 1, "networkObjectId": None, "createdAt": isotime,
"updatedAt": isotime})
newacls[org_id] = []
for a in range(0, int(acls)):
while True:
tw = random_words(6)
acl_name = (tw[0] + " " + tw[1]).title()
if acl_name not in used_names:
used_names.append(acl_name)
break
acl_desc = (tw[2] + " " + tw[3] + " " + tw[4] + " " + tw[5]).title()
acl_ver = random.choice(["ipv4", "ipv6", "agnostic"])
acl_rules = get_rules()
newacls[org_id].append({"aclId": a + first_db_record, "name": acl_name, "description": acl_desc,
"ipVersion": acl_ver, "rules": acl_rules, "versionNum": 1, "createdAt": isotime,
"updatedAt": isotime})
newpolicies[org_id] = []
for b in range(0, int(policies)):
while True:
tw = random_words(6)
pol_name = (tw[0] + " " + tw[1]).title()
if pol_name not in used_names:
used_names.append(pol_name)
break
pol_desc = (tw[2] + " " + tw[3] + " " + tw[4] + " " + tw[5]).title()
pol_catch = random.choice(["global", "deny all", "allow all"])
pol_acls = []
apply_acl = random.choice([True, False])
if apply_acl:
for x in range(0, randint(2, 9)):
newpol = random.choice(newacls[org_id])["aclId"]
if newpol not in pol_acls:
pol_acls.append(newpol)
pol_src = random.choice(newtags[org_id])["groupId"]
pol_dst = random.choice(newtags[org_id])["groupId"]
newpolicies[org_id].append({"name": pol_name, "description": pol_desc, "monitorModeEnabled": False,
"versionNum": 1, "catchAllRule": pol_catch, "bindingEnabled": True,
"aclIds": pol_acls, "updatedAt": isotime, "srcGroupId": pol_src,
"dstGroupId": pol_dst})
write_file("orgs.json", json.dumps(neworgs, indent=4))
write_file("groups.json", json.dumps(newtags, indent=4))
write_file("acls.json", json.dumps(newacls, indent=4))
write_file("bindings.json", json.dumps(newpolicies, indent=4))
@csrf_exempt
def parse_url(request):
log = []
baseurl = "/".join(request.build_absolute_uri().split("/")[:3])
p = request.path.replace("/meraki/api/v1/organizations/", "").replace("/meraki/api/v1/organizations", "")
arr = p.split("/")
isotime = datetime.datetime.now().isoformat()
org_id = arr[0]
fixedvals = {"organizations": {"id": "{{id-num:18}}", "url": "{{url}}/o/{{id-mix:7}}/manage/organization/overview"},
"groups": {"groupId": "{{length}}", "versionNum": 1, "createdAt": isotime, "updatedAt": isotime},
"acls": {"aclId": "{{length}}", "versionNum": 1, "createdAt": isotime, "updatedAt": isotime},
"bindings": {"versionNum": 1, "updatedAt": isotime}}
postvals = {"organizations": {"name": None},
"groups": {"name": None, "description": None, "value": None, "networkObjectId": None},
"acls": {"name": None, "description": None, "ipVersion": None, "rules": None},
"bindings": {"srcGroupId": None, "dstGroupId": None, "name": None, "description": None, "aclIds": None,
"catchAllRule": None, "bindingEnabled": None, "monitorModeEnabled": None}}
info = {"organizations": {"id": "id", "unique": [{"id": []}]},
"groups": {"id": "groupId", "unique": [{"value": [], "groupId": []}]},
"acls": {"id": "aclId", "unique": [{"name": [], "aclId": []}]},
"bindings": {"none_as_delete_key": "aclIds", "put_unique": ["srcGroupId", "dstGroupId"],
"unique_results": []}}
append_log(log, "dashboard_simulator::", request.path)
ret = None
try:
if len(arr) == 1:
file_type = "orgs.json"
full_dataset = []
dataset = read_json_file(file_type, log)
if arr[0] == "":
elem_id = None
else:
elem_id = arr[0]
endpoint = "organizations"
else:
file_type = arr[2] + ".json"
full_dataset = read_json_file(file_type, log)
dataset = full_dataset.pop(org_id, [])
if len(arr) == 3 or request.method == "POST":
elem_id = None
else:
elem_id = arr[3]
endpoint = arr[2]
if endpoint == "bindings" and (request.method == "POST" or request.method == "DELETE"):
append_log(log, "dashboard_monitor::bindings::Unsupported Method")
db_log("dashboard_simulator", log)
return HttpResponseBadRequest("Unsupported Method")
if request.body:
jd = json.loads(request.body)
else:
jd = None
updated_data, ret = handle_request(request.method, jd, baseurl, endpoint, elem_id, dataset, fixedvals, postvals,
info)
if updated_data:
if isinstance(full_dataset, list):
write_file(file_type, json.dumps(full_dataset + [updated_data], indent=4))
else:
full_dataset[org_id] = updated_data
write_file(file_type, json.dumps(full_dataset, indent=4))
except Exception as e:
append_log(log, "dashboard_simulator::Exception.", e)
db_log("dashboard_simulator", log)
return ret
|
[
"random.randint",
"json.loads",
"django.http.HttpResponseBadRequest",
"random.choice",
"json.dumps",
"time.time",
"datetime.datetime.now",
"time.sleep",
"scripts.dblog.append_log",
"random.seed",
"os.path.join",
"scripts.dblog.db_log"
] |
[((1597, 1610), 'random.randint', 'randint', (['(2)', '(6)'], {}), '(2, 6)\n', (1604, 1610), False, 'from random import randint\n'), ((3411, 3517), 'random.seed', 'random.seed', (['(((t & 4278190080) >> 24) + ((t & 16711680) >> 8) + ((t & 65280) << 8) + ((\n t & 255) << 24))'], {}), '(((t & 4278190080) >> 24) + ((t & 16711680) >> 8) + ((t & 65280) <<\n 8) + ((t & 255) << 24))\n', (3422, 3517), False, 'import random\n'), ((9647, 9701), 'scripts.dblog.append_log', 'append_log', (['log', '"""dashboard_simulator::"""', 'request.path'], {}), "(log, 'dashboard_simulator::', request.path)\n", (9657, 9701), False, 'from scripts.dblog import append_log, db_log\n'), ((11333, 11367), 'scripts.dblog.db_log', 'db_log', (['"""dashboard_simulator"""', 'log'], {}), "('dashboard_simulator', log)\n", (11339, 11367), False, 'from scripts.dblog import append_log, db_log\n'), ((1202, 1220), 'random.choice', 'random.choice', (['lst'], {}), '(lst)\n', (1215, 1220), False, 'import random\n'), ((1703, 1730), 'random.choice', 'random.choice', (['[0, 1, 2, 3]'], {}), '([0, 1, 2, 3])\n', (1716, 1730), False, 'import random\n'), ((2263, 2290), 'random.choice', 'random.choice', (['[0, 1, 2, 3]'], {}), '([0, 1, 2, 3])\n', (2276, 2290), False, 'import random\n'), ((2829, 2861), 'random.choice', 'random.choice', (["['allow', 'deny']"], {}), "(['allow', 'deny'])\n", (2842, 2861), False, 'import random\n'), ((7836, 7865), 'json.dumps', 'json.dumps', (['neworgs'], {'indent': '(4)'}), '(neworgs, indent=4)\n', (7846, 7865), False, 'import json\n'), ((7897, 7926), 'json.dumps', 'json.dumps', (['newtags'], {'indent': '(4)'}), '(newtags, indent=4)\n', (7907, 7926), False, 'import json\n'), ((7956, 7985), 'json.dumps', 'json.dumps', (['newacls'], {'indent': '(4)'}), '(newacls, indent=4)\n', (7966, 7985), False, 'import json\n'), ((8019, 8052), 'json.dumps', 'json.dumps', (['newpolicies'], {'indent': '(4)'}), '(newpolicies, indent=4)\n', (8029, 8052), False, 'import json\n'), ((370, 407), 'os.path.join', 'os.path.join', (['"""scripts"""', 'out_filename'], {}), "('scripts', out_filename)\n", (382, 407), False, 'import os\n'), ((503, 539), 'os.path.join', 'os.path.join', (['"""scripts"""', 'in_filename'], {}), "('scripts', in_filename)\n", (515, 539), False, 'import os\n'), ((650, 686), 'os.path.join', 'os.path.join', (['"""scripts"""', 'in_filename'], {}), "('scripts', in_filename)\n", (662, 686), False, 'import os\n'), ((885, 898), 'json.loads', 'json.loads', (['r'], {}), '(r)\n', (895, 898), False, 'import json\n'), ((1338, 1358), 'random.choice', 'random.choice', (['chars'], {}), '(chars)\n', (1351, 1358), False, 'import random\n'), ((1506, 1526), 'random.choice', 'random.choice', (['chars'], {}), '(chars)\n', (1519, 1526), False, 'import random\n'), ((1802, 1816), 'random.randint', 'randint', (['(2)', '(10)'], {}), '(2, 10)\n', (1809, 1816), False, 'from random import randint\n'), ((2362, 2376), 'random.randint', 'randint', (['(2)', '(10)'], {}), '(2, 10)\n', (2369, 2376), False, 'from random import randint\n'), ((2916, 2945), 'random.choice', 'random.choice', (["['tcp', 'udp']"], {}), "(['tcp', 'udp'])\n", (2929, 2945), False, 'import random\n'), ((2991, 3035), 'random.choice', 'random.choice', (["['any', 'tcp', 'udp', 'icmp']"], {}), "(['any', 'tcp', 'udp', 'icmp'])\n", (3004, 3035), False, 'import random\n'), ((3385, 3396), 'time.time', 'time.time', ([], {}), '()\n', (3394, 3396), False, 'import time\n'), ((3663, 3686), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3684, 3686), False, 'import datetime\n'), ((6134, 6177), 'random.choice', 'random.choice', (["['ipv4', 'ipv6', 'agnostic']"], {}), "(['ipv4', 'ipv6', 'agnostic'])\n", (6147, 6177), False, 'import random\n'), ((6920, 6970), 'random.choice', 'random.choice', (["['global', 'deny all', 'allow all']"], {}), "(['global', 'deny all', 'allow all'])\n", (6933, 6970), False, 'import random\n'), ((7021, 7049), 'random.choice', 'random.choice', (['[True, False]'], {}), '([True, False])\n', (7034, 7049), False, 'import random\n'), ((8322, 8345), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (8343, 8345), False, 'import datetime\n'), ((10700, 10724), 'json.loads', 'json.loads', (['request.body'], {}), '(request.body)\n', (10710, 10724), False, 'import json\n'), ((11274, 11327), 'scripts.dblog.append_log', 'append_log', (['log', '"""dashboard_simulator::Exception."""', 'e'], {}), "(log, 'dashboard_simulator::Exception.', e)\n", (11284, 11327), False, 'from scripts.dblog import append_log, db_log\n'), ((993, 1006), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (1003, 1006), False, 'import time\n'), ((1019, 1088), 'scripts.dblog.append_log', 'append_log', (['log', '"""dashboard_monitor::read_json_file::"""', 'fail_count', 'e'], {}), "(log, 'dashboard_monitor::read_json_file::', fail_count, e)\n", (1029, 1088), False, 'from scripts.dblog import append_log, db_log\n'), ((1996, 2013), 'random.randint', 'randint', (['(1)', '(65500)'], {}), '(1, 65500)\n', (2003, 2013), False, 'from random import randint\n'), ((2034, 2057), 'random.randint', 'randint', (['s_start', '(65535)'], {}), '(s_start, 65535)\n', (2041, 2057), False, 'from random import randint\n'), ((2556, 2573), 'random.randint', 'randint', (['(1)', '(65500)'], {}), '(1, 65500)\n', (2563, 2573), False, 'from random import randint\n'), ((2594, 2617), 'random.randint', 'randint', (['d_start', '(65535)'], {}), '(d_start, 65535)\n', (2601, 2617), False, 'from random import randint\n'), ((5318, 5335), 'random.randint', 'randint', (['(3)', '(65529)'], {}), '(3, 65529)\n', (5325, 5335), False, 'from random import randint\n'), ((7312, 7342), 'random.choice', 'random.choice', (['newtags[org_id]'], {}), '(newtags[org_id])\n', (7325, 7342), False, 'import random\n'), ((7376, 7406), 'random.choice', 'random.choice', (['newtags[org_id]'], {}), '(newtags[org_id])\n', (7389, 7406), False, 'import random\n'), ((10471, 10537), 'scripts.dblog.append_log', 'append_log', (['log', '"""dashboard_monitor::bindings::Unsupported Method"""'], {}), "(log, 'dashboard_monitor::bindings::Unsupported Method')\n", (10481, 10537), False, 'from scripts.dblog import append_log, db_log\n'), ((10554, 10588), 'scripts.dblog.db_log', 'db_log', (['"""dashboard_simulator"""', 'log'], {}), "('dashboard_simulator', log)\n", (10560, 10588), False, 'from scripts.dblog import append_log, db_log\n'), ((10612, 10656), 'django.http.HttpResponseBadRequest', 'HttpResponseBadRequest', (['"""Unsupported Method"""'], {}), "('Unsupported Method')\n", (10634, 10656), False, 'from django.http import HttpResponseBadRequest\n'), ((2157, 2174), 'random.randint', 'randint', (['(1)', '(65500)'], {}), '(1, 65500)\n', (2164, 2174), False, 'from random import randint\n'), ((2717, 2734), 'random.randint', 'randint', (['(1)', '(65500)'], {}), '(1, 65500)\n', (2724, 2734), False, 'from random import randint\n'), ((7110, 7123), 'random.randint', 'randint', (['(2)', '(9)'], {}), '(2, 9)\n', (7117, 7123), False, 'from random import randint\n'), ((11042, 11093), 'json.dumps', 'json.dumps', (['(full_dataset + [updated_data])'], {'indent': '(4)'}), '(full_dataset + [updated_data], indent=4)\n', (11052, 11093), False, 'import json\n'), ((11203, 11237), 'json.dumps', 'json.dumps', (['full_dataset'], {'indent': '(4)'}), '(full_dataset, indent=4)\n', (11213, 11237), False, 'import json\n'), ((1890, 1907), 'random.randint', 'randint', (['(1)', '(65535)'], {}), '(1, 65535)\n', (1897, 1907), False, 'from random import randint\n'), ((2450, 2467), 'random.randint', 'randint', (['(1)', '(65535)'], {}), '(1, 65535)\n', (2457, 2467), False, 'from random import randint\n'), ((7155, 7185), 'random.choice', 'random.choice', (['newacls[org_id]'], {}), '(newacls[org_id])\n', (7168, 7185), False, 'import random\n')]
|
"""Generator for modifications and modifications lists.
The objects in this module are generator used to generate the `Mod`
and the `ModList` based on precise parametrization.
The `ModOption`-derived classes are used to generates 1 option (e.g. an
integer, a string, ...) based on a type of option (e.g. a sequence, a range,
...).
The `ModGenerator` contains multiple `ModOption` and generates 1 `Mod`
object by enumerating all the different combination of options.
The `ModListGenerator` contains multiple `ModGenerator` and generates 1
`ModList` object by enumerating all the different combination of mods.
"""
import abc
import importlib
import os
import inflection
from fragscapy.modlist import ModList
from fragscapy.modifications.mod import Mod
# Package where the modifications are stored (and loaded from)
MOD_PACKAGE = 'fragscapy.modifications'
# Directory where the modifications are stored
MOD_DIR = 'modifications'
class ModGeneratorError(ValueError):
"""Error with the mods generation."""
class ModOption(abc.ABC):
"""Abstract generator for an option in a modification.
This class can generates a single option in a mod (i.e. 1 of the
parameter passed to the constructor of the mod). Any subclass should
implement a `.get_option(i)` and a `.nb_options()` methods. It can then
be used to generate 1 instance of the option (based on the parameter
given on init).
It can be used as a generator or as list-like object.
Args:
mod_name: The name of the modification (used only for errors
messages).
opt_name: The name of this option (used only for errors messages).
Attributes:
mod_name: The name of the modification.
opt_name: The name of this option.
Examples:
>>> for opt in mod_option: # Used in for-loops
... print(opt)
>>> n = len(mod_option) # Size = number of different instances
>>> opt = mod_option[n-1] # Retrieve last instance
"""
def __init__(self, mod_name, opt_name):
self.opt_name = opt_name
self.mod_name = mod_name
@abc.abstractmethod
def get_option(self, i):
"""Returns the i-th instance of the option.
The result must be deterministic, constant for a given `i`. E.g.
asking for `.get_option(10)` must always output the same result.
Args:
i: the number of the configuration.
Raises:
ModGeneratorError: `i` is out of bounds (i<0 or i>=len).
Returns:
The i-th option.
"""
raise NotImplementedError
def inbound_or_raise(self, i):
"""Raises a `ModGeneratorError` if is out of bound (i<0 or i>=len)."""
if not isinstance(i, int):
self._raise_error("Index is not an integer, got '{}'".format(i))
if i < 0 or i >= self.nb_options():
self._raise_error(
"Index should be between 0 and {}, got '{}'".format(
self.nb_options()-1, i
)
)
@abc.abstractmethod
def nb_options(self):
"""Returns the number of possible options for this generator."""
raise NotImplementedError
def _raise_error(self, msg):
"""Raises a `ModGeneratorError` along with indication of the option and
the name of the mod."""
raise ModGeneratorError("Error with option '{}' of mod '{}': {}".format(
self.opt_name, self.mod_name, msg))
def __len__(self):
return self.nb_options()
def __getitem__(self, i):
return self.get_option(i)
def __iter__(self):
return (self.get_option(i) for i in range(self.nb_options()))
def __str__(self):
return "{}".format(self.opt_name)
def __repr__(self):
return "{}".format(self.__class__.__name__)
class ModOptionRange(ModOption):
"""Modification option generator for range of integer.
Its behavior is the same as the built-in python function `range`.
The argument is a list of 1, 2 or 3 integers (positives or negatives are
supported). If 1 integer is passed, the range goes from 0 to arg[0] with a
step of 1. If 2 integers are passed, the range goes from arg[0] to arg[1]
with a step of 1. If 3 integers are passed, the range goes from arg[0] to
arg[1] with a step of arg[2].
Args:
mod_name: The name of the mod (used only for error messages).
args: A list of 1, 2 or 3 integers.
Attributes:
mod_name: The name of the modification.
opt_name: The name of this option ('range').
start: The start of the range.
stop: The stop of the range.
step: The step if the range.
Raises:
ModGeneratorError: See the message for details.
Examples:
>>> list(ModOptionRange("foo", [1]))
[0, 1]
>>> list(ModOptionRange("foo", [5,8]))
[5, 6, 7, 8]
>>> list(ModOptionRange("foo", [-10,-1]))
[-10, -9, -8, -7, -6, -5, -4, -3, -2, -1]
>>> list(ModOptionRange("foo", [-10,-1, 3]))
[-10, -7, -4, -1]
"""
def __init__(self, mod_name, args):
super(ModOptionRange, self).__init__(mod_name, "range")
# Parsing of options
self.start = 0
self.stop = None
self.step = 1
if not args:
self._raise_error("Too few arguments, got none")
elif len(args) == 1:
self.stop = self._int(args, 0)
elif len(args) == 2:
self.start = self._int(args, 0)
self.stop = self._int(args, 1)
elif len(args) == 3:
self.start = self._int(args, 0)
self.stop = self._int(args, 1)
self.step = self._int(args, 2)
else:
self._raise_error("Too much arguments, got '{}'".format(args))
# Checking validity of options
if self.step == 0:
self._raise_error("'step' can't be 0")
if self.step > 0 and self.start > self.stop:
self._raise_error(
"'start' ('{}') can't be bigger than 'stop' ('{}')".format(
self.start, self.stop
)
)
if self.step < 0 and self.start < self.stop:
self._raise_error(
"'start' ('{}') can't be smaller than 'stop' ('{}')".format(
self.start, self.stop
)
)
def _int(self, l, i):
"""Small function to cast the i-th value of l to an integer or raises
a ModGeneratorError if not possible."""
try:
return int(l[i])
except ValueError:
self._raise_error(
"Can't cast argument n°{} to int, got '{}'".format(i, l[0])
)
def get_option(self, i):
"""See `ModOption.get_option`."""
self.inbound_or_raise(i)
return self.start + self.step * i
def nb_options(self):
"""See `ModOption.nb_options`."""
return (self.stop - self.start)//self.step + 1
def __str__(self):
return "range {} {} {}".format(self.start, self.stop, self.step)
def __repr__(self):
return "ModOptionRange({}, [{}, {}, {}])".format(
self.mod_name, self.start, self.stop, self.step
)
class ModOptionSequenceStr(ModOption):
"""Modification option generator for a sequence of strings.
The argument is a list of strings which will be the different
values used in the same order.
Args:
mod_name: The name of the mod (used only for error messages).
args: The list of arguments to parametrize the generator.
Attributes:
mod_name: The name of the modification.
opt_name: The name of this option ('seq_str').
seq: The sequence of strings.
Raises:
ModGeneratorError: See the message for details.
Examples:
>>> list(ModOptionSequenceStr("foo", ["a", "b", "c", "d"]))
['a', 'b', 'c', 'd']
"""
def __init__(self, mod_name, args):
super(ModOptionSequenceStr, self).__init__(mod_name, "seq_str")
# Verify there is at least 1 element
if not args:
self._raise_error("No string in sequence")
self.seq = args
def get_option(self, i):
"""See `ModOption.get_option`."""
self.inbound_or_raise(i)
return self.seq[i]
def nb_options(self):
"""See `Option.nb_options`."""
return len(self.seq)
def __str__(self):
return "seq_str {}".format(" ".join(self.seq))
def __repr__(self):
return "ModOptionSequenceStr({}, {})".format(self.mod_name, self.seq)
class ModOptionSequenceInt(ModOption):
"""Modification option generator for a sequence of integers.
The argument is a list of integers which will be the different
values used in the same order.
Args:
mod_name: The name of the mod (used only for error messages).
args: A list of integers.
Attributes:
mod_name: The name of the modification.
opt_name: The name of this option ('seq_int').
seq: The sequence of integers.
Raises:
ModGeneratorError: See the message for details.
Examples:
>>> list(ModOptionSequenceInt("foo", [1, 10, 2, 20, 3, 30]))
[1, 10, 2, 20, 3, 30]
"""
def __init__(self, mod_name, args):
super(ModOptionSequenceInt, self).__init__(mod_name, "seq_int")
# Verify there is at least 1 element
if not args:
self._raise_error("No number in sequence")
self.seq = list()
for arg in args:
try:
self.seq.append(int(arg))
except ValueError:
self._raise_error("Non-int argument, got '{}'".format(arg))
def get_option(self, i):
"""See `ModOption.get_option`."""
self.inbound_or_raise(i)
return self.seq[i]
def nb_options(self):
"""See `ModOption.nb_options`."""
return len(self.seq)
def __str__(self):
return "seq_int {}".format(" ".join(str(n) for n in self.seq))
def __repr__(self):
return "ModOptionSequenceInt({}, {})".format(self.mod_name, self.seq)
class ModOptionSequenceFloat(ModOption):
"""Modification option generator for a sequence of floats.
The argument is a list of floats which will be the different
values used in the same order.
Args:
mod_name: The name of the mod (used only for error messages).
args: A list of floats.
Attributes:
mod_name: The name of the modification.
opt_name: The name of this option ('seq_float').
seq: The sequence of floats.
Raises:
ModGeneratorError: See the message for details.
Examples:
>>> list(ModOptionSequenceFloat("foo", [1, 10.5, 2.4, 20, 3, 30.48]))
[1.0, 10.5, 2.4, 20, 3, 30.48]
"""
def __init__(self, mod_name, args):
super(ModOptionSequenceFloat, self).__init__(mod_name, "seq_float")
# Verify there is at least 1 element
if not args:
self._raise_error("No number in sequence")
self.seq = list()
for arg in args:
try:
self.seq.append(float(arg))
except ValueError:
self._raise_error("Non-float argument, got '{}'".format(arg))
def get_option(self, i):
"""See `ModOption.get_option`."""
self.inbound_or_raise(i)
return self.seq[i]
def nb_options(self):
"""See `ModOption.nb_options`."""
return len(self.seq)
def __str__(self):
return "seq_float {}".format(" ".join(str(n) for n in self.seq))
def __repr__(self):
return "ModOptionSequenceFloat({}, {})".format(self.mod_name, self.seq)
class ModOptionStr(ModOption):
"""Modification option generator with 1 possibility: a string.
The args is a list (for consistency with other mod options) with a single
element: the string.
Args:
mod_name: The name of the mod (used only for error messages).
args: A list with 1 string.
Attributes:
mod_name: The name of the modification.
opt_name: The name of this option ('str').
s: The string.
Raises:
ModGeneratorError: See the message for details.
Examples:
>>> list(ModOptionStr("foo", ["bar"]))
["bar"]
"""
def __init__(self, mod_name, args):
super(ModOptionStr, self).__init__(mod_name, "str")
# Verify there is exactly 1 argument
if len(args) != 1:
self._raise_error(
"There should be only 1 element, got '{}'".format(args)
)
self.s = args[0]
def get_option(self, i):
"""See `ModOption.get_option`."""
self.inbound_or_raise(i)
return self.s
def nb_options(self):
"""Returns always 1 because there is ony 1 instance possible. See
`ModOption.nb_options` for more info."""
return 1
def __str__(self):
return "str {}".format(self.s)
def __repr__(self):
return "ModOptionStr({}, [{}])".format(self.mod_name, self.s)
class ModOptionInt(ModOption):
"""Modification option generator with 1 possibility: an int.
The args is a list (for consistency with other mod options) with a single
element: the integer.
Args:
mod_name: The name of the mod (used only for error messages).
args: A list with 1 int.
Attributes:
mod_name: The name of the modification.
opt_name: The name of this option ('int').
n: The integer.
Raises:
ModGeneratorError: See the message for details.
Examples:
>>> list(ModOptionInt("foo", [18]))
[18]
"""
def __init__(self, mod_name, args):
super(ModOptionInt, self).__init__(mod_name, "int")
# Verify there is exactly 1 argument
if len(args) != 1:
self._raise_error(
"There should be only 1 element, got '{}'".format(args)
)
try:
self.n = int(args[0])
except ValueError:
self._raise_error("Can't cast '{}' to an integer".format(args[0]))
def get_option(self, i):
"""See `ModOption.get_option`."""
self.inbound_or_raise(i)
return self.n
def nb_options(self):
"""Returns always 1 because there is ony 1 instance possible. See
`ModOption.nb_options` for more info."""
return 1
def __str__(self):
return "int {}".format(self.n)
def __repr__(self):
return "ModOptionInt({}, [{}])".format(self.mod_name, self.n)
class ModOptionFloat(ModOption):
"""Modification option generator with 1 possibility: a float.
The args is a list (for consistency with other mod options) with a single
element: the float.
Args:
mod_name: The name of the mod (used only for error messages).
args: A list with 1 float.
Attributes:
mod_name: The name of the modification.
opt_name: The name of this option ('float').
n: The integer.
Raises:
ModGeneratorError: See the message for details.
Examples:
>>> list(ModOptionFloat("foo", [18]))
[18.0]
>>> list(ModOptionFloat("foo", [42.58]))
[42.58]
"""
def __init__(self, mod_name, args):
super(ModOptionFloat, self).__init__(mod_name, "float")
# Verify there is exactly 1 argument
if len(args) != 1:
self._raise_error(
"There should be only 1 element, got '{}'".format(args)
)
try:
self.n = float(args[0])
except ValueError:
self._raise_error("Can't cast '{}' to a float".format(args[0]))
def get_option(self, i):
"""See `ModOption.get_option`."""
self.inbound_or_raise(i)
return self.n
def nb_options(self):
"""Returns always 1 because there is ony 1 instance possible. See
`ModOption.nb_options` for more info."""
return 1
def __str__(self):
return "float {}".format(self.n)
def __repr__(self):
return "ModOptionFloat({}, [{}])".format(self.mod_name, self.n)
class ModOptionNone(ModOption):
"""Modification option generator with 1 possibility: None.
The args is a list (for consistency with other mod options) with no
elements.
Args:
mod_name: The name of the mod (used only for error messages).
args: A list with 0 elements.
Attributes:
mod_name: The name of the modification.
opt_name: The name of this option ('none').
Raises:
ModGeneratorError: See the message for details.
Examples:
>>> list(ModOptionNone("foo", []))
[None]
"""
def __init__(self, mod_name, args):
super(ModOptionNone, self).__init__(mod_name, "none")
# Verify there is exactly 1 argument
if args:
self._raise_error(
"There should be no element, got '{}'".format(args)
)
def get_option(self, i):
"""See `ModOption.get_option`."""
self.inbound_or_raise(i)
def nb_options(self):
"""Returns always 1 because there is ony 1 instance possible. See
`ModOption.nb_options` for more info."""
return 1
def __str__(self):
return "none"
def __repr__(self):
return "ModOptionNone({}, [])".format(self.mod_name)
class ModGenerator(object):
"""Generator for a modification.
It can also generate 'None' instead of a modification. This means that the
modification should not be used (e.g. not included in a modlist). This
"possibility" makes the length of the generator 1 bigger than what could be
expected.
For dynamic and evolution purposes, the `Mod` object is imported based on
the `mod_name` given. It can then be used to generate all the possible
mods with all the possible combinations for the options, as described in
`mod_opts`.
Args:
mod_name: The name of the modification (for importing the correct mod
and improve error messages).
mod_opts: A list with the options to use to build `ModOption`
objects.
optional: True if the mod is optional (the modlist can be generated
without this mod). Default is 'False'.
Attributes:
mod_name: The name of the modification (for importing the correct mod
and improve error messages).
optional: Is the mod optional.
Examples:
It can be used as a generator or as list-like object.
>>> for mod in ModGenerator("echo", ["seq_str foo bar"]):
... print(repr(mod))
None
Echo<string: foo>
Echo<string: bar>
>>> print(ModGenerator("ipv6_frag", ["range 1280 6000 50"])[50])
Ipv6Frag 3730
>>> len(ModGenerator("select", [0, 2, "seq_int 3 4 5", "range 7 20"]))
43
"""
def __init__(self, mod_name, mod_opts, optional=False):
self.mod_name = mod_name
self._mod = get_mod(mod_name)
self.optional = optional
self._mod_opts = list()
for opt in mod_opts:
# Find the right ModOption or default to Str or Int
if isinstance(opt, str):
opt_args = opt.split()
opt_type = opt_args[0]
if opt_type == "range":
self._mod_opts.append(
ModOptionRange(mod_name, opt_args[1:])
)
elif opt_type == "seq_str":
self._mod_opts.append(
ModOptionSequenceStr(mod_name, opt_args[1:])
)
elif opt_type == "seq_int":
self._mod_opts.append(
ModOptionSequenceInt(mod_name, opt_args[1:])
)
elif opt_type == "seq_float":
self._mod_opts.append(
ModOptionSequenceFloat(mod_name, opt_args[1:])
)
elif opt_type == "str":
self._mod_opts.append(
ModOptionStr(mod_name, opt_args[1:])
)
elif opt_type == "int":
self._mod_opts.append(
ModOptionInt(mod_name, opt_args[1:])
)
elif opt_type == "float":
self._mod_opts.append(
ModOptionFloat(mod_name, opt_args[1:])
)
elif opt_type == "none":
self._mod_opts.append(
ModOptionNone(mod_name, opt_args[1:])
)
else: # By default consider it as a string
self._mod_opts.append(
ModOptionStr(mod_name, [opt])
)
else: # By default consider it as an int
self._mod_opts.append(
ModOptionInt(mod_name, [opt])
)
def get_mod(self, i):
"""Returns the i-th instance of the mod.
The result must be deterministic, constant for a given `i`. E.g.
asking for `.get_mod(10)` must always output the same result.
Args:
i: the number of the configuration.
Raises:
ModGeneratorError: `i` is out of bounds (i<0 or i>=len).
Returns:
The i-th `Mod` instance or 'None' if the instance of the mod is
the one with no mod at all. By implementation 'None' is always
returned by the 0-th instance.
"""
# Check the correctness of i
if not isinstance(i, int):
raise ModGeneratorError(
"Index is not an integer, got '{}'".format(i)
)
if i < 0 or i >= self.nb_mods():
raise ModGeneratorError(
"Error with mod '{}': 'i' should be between 0 and {}, got '{}'"
.format(self.mod_name, self.nb_mods()-1, i)
)
# Handle the 'no-mod' possibility
if self.optional:
if i == 0:
return None
i -= 1
# Generate one of the other possibility
opts = list()
for opt in self._mod_opts:
opts.append(opt[i % len(opt)])
i -= i % len(opt)
i //= len(opt)
return self._mod(*opts)
def nb_mods(self):
"""Returns the number of different mods possible.
It is basically the multiplication of the length of the different
`ModOption` it is composed of. And 1 more possibility for the
'no-mod' possibility.
"""
ret = 1
for opt in self._mod_opts:
ret *= len(opt)
if self.optional:
ret += 1 # The 'no-mod' possibility
return ret
def __getitem__(self, i):
return self.get_mod(i)
def __len__(self):
return self.nb_mods()
def __iter__(self):
return (self.get_mod(i) for i in range(self.nb_mods()))
def __str__(self):
return (
"{{ \n"
" \"mod_name\": \"{}\",\n"
" \"mod_opts\": [{}],\n"
" \"optional\": \"{}\"\n"
"}}"
).format(
self.mod_name,
", ".join("\""+str(opt)+"\"" for opt in self._mod_opts),
str(self.optional)
)
def __repr__(self):
return "ModGenerator({}, opts=[{}]{})".format(
self.mod_name,
", ".join(opt.opt_name for opt in self._mod_opts),
", optional=True" if self.optional else ""
)
class ModListGenerator(object):
"""Generator for a modification list.
The `ModList` object is created based on the specifications for each of
its mods as it come from the `Config` object.
It simply creates a `ModGenerator` for each of the defined mod, store
them and use them to generate 1 modlist instance (i.e. A ModList with
1 mod instance from the ModGenerator for each mod).
Args:
mods: A list of mods where each element is a dictionary containing the
key 'mod_name' with the name of the mod and the key 'mod_opts'
with the list of options to use to build the ModGenerator.
Examples:
>>> modlist_gen = ModListGenerator([
... {"mod_name": "ipv6_frag", "mod_opts": ["seq_str 1280 1500"]},
... {"mod_name": "echo", "mod_opts": ["seq_str foo bar fuz ball"]},
... {"mod_name": "select", "mod_opts": [1, 2, 3, 4, 5]}
... ])
>>> print(repr(modlist_gen))
ModListGenerator(mods=[ipv6_frag, echo, select])
>>> len(modlist_gen)
30
>>> modlist_gen[5]
ModList [
- Ipv6Frag<fragsize: 1500>
- Echo<string: fuz>
]
>>> modlist_gen[25]
ModList [
- Ipv6Frag<fragsize=1280>
- Echo<string=fuz>
- Select<sequence=[1, 2, 3, 4, 5]>
]
"""
def __init__(self, mods):
self._mod_generators = [
ModGenerator(mod['mod_name'], mod['mod_opts'], mod['optional'])
for mod in mods
]
def get_modlist(self, i):
"""Returns the i-th instance of the modlist.
The result must be deterministic, constant for a given `i`. E.g.
asking for `.get_modlist(10)` must always output the same result.
Args:
i: the number of the configuration.
Raises:
ModGeneratorError: `i` is out of bounds (i<0 or i>=len).
Returns:
The i-th `ModList` instance.
"""
if not isinstance(i, int):
raise ModGeneratorError(
"Index is not an integer, got '{}'".format(i)
)
if i < 0 or i >= self.nb_modlists():
raise ModGeneratorError(
"Index should be between 0 and {}, got '{}'"
.format(self.nb_modlists()-1, i)
)
modlist = ModList()
for mod_generator in self._mod_generators:
mod = mod_generator[i % len(mod_generator)]
if mod is not None:
modlist.append(mod)
i -= i % len(mod_generator)
i //= len(mod_generator)
return modlist
def nb_modlists(self):
"""Returns the number of different modlsits possible.
It is basically the multiplication of the length of the different
`ModGenerator` it is composed of.
"""
ret = 1
for mod_generator in self._mod_generators:
ret *= len(mod_generator)
return ret
def __getitem__(self, i):
return self.get_modlist(i)
def __len__(self):
return self.nb_modlists()
def __iter__(self):
return (self.get_modlist(i) for i in range(self.nb_modlists()))
def __str__(self):
return "[\n {}\n]".format(
",\n ".join(str(mod_gen).replace('\n', '\n ')
for mod_gen in self._mod_generators)
)
def __repr__(self):
return "ModListGenerator(mods=[{}])".format(
", ".join(mod_gen.mod_name for mod_gen in self._mod_generators)
)
def get_all_mods():
"""Retrieves all the available mods using `importlib` and `os.listdir`.
Returns:
A list of python classes which are all the modifications found and
that can be used. All the objects returned are subclass of `Mod`.
"""
dirname = os.path.dirname(__file__)
all_mods = list()
for mod_name in os.listdir(os.path.join(dirname, MOD_DIR)):
if not mod_name.endswith('.py'):
continue
if mod_name in ('__init__.py', 'mod.py'):
continue
mod_name = mod_name[:-3]
try:
all_mods.append(get_mod(mod_name))
except ImportError:
# The mod could no be loaded or was not a subclass of Mod
continue
return all_mods
def get_mod(mod_name):
"""Imports a mod from its name using `importlib`.
Args:
mod_name: The name of the mod (snake_case of CamelCase are accepted).
Returns:
The python class which corresponds to the modification.
Raises:
ImportError: The class was not found or it is not a subclass of `Mod`.
Examples:
>>> get_mod("DropOne")
<class 'fragscapy.modifications.drop_one.DropOne'>
>>> get_mod("drop_one")
<class 'fragscapy.modifications.drop_one.DropOne'>
"""
pkg_name = "{}.{}".format(MOD_PACKAGE, inflection.underscore(mod_name))
mod_name = inflection.camelize(mod_name)
pkg = importlib.import_module(pkg_name)
try:
mod = getattr(pkg, mod_name)
except AttributeError: # There is no class named correctly
raise ImportError(
"No class named {} in module {}"
.format(mod_name, pkg_name)
)
if not issubclass(mod, Mod):
raise ImportError(
"{}.{} is not a subclass of `fragscapy.modifications.mod.Mod`"
.format(pkg_name, mod_name)
)
return mod
|
[
"inflection.underscore",
"fragscapy.modlist.ModList",
"importlib.import_module",
"os.path.dirname",
"os.path.join",
"inflection.camelize"
] |
[((27649, 27674), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (27664, 27674), False, 'import os\n'), ((28760, 28789), 'inflection.camelize', 'inflection.camelize', (['mod_name'], {}), '(mod_name)\n', (28779, 28789), False, 'import inflection\n'), ((28801, 28834), 'importlib.import_module', 'importlib.import_module', (['pkg_name'], {}), '(pkg_name)\n', (28824, 28834), False, 'import importlib\n'), ((26161, 26170), 'fragscapy.modlist.ModList', 'ModList', ([], {}), '()\n', (26168, 26170), False, 'from fragscapy.modlist import ModList\n'), ((27728, 27758), 'os.path.join', 'os.path.join', (['dirname', 'MOD_DIR'], {}), '(dirname, MOD_DIR)\n', (27740, 27758), False, 'import os\n'), ((28712, 28743), 'inflection.underscore', 'inflection.underscore', (['mod_name'], {}), '(mod_name)\n', (28733, 28743), False, 'import inflection\n')]
|
#Copyright (C) 2021 <NAME>, <NAME>, University of California, Berkeley
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
import tensorflow as tf
import functools
from augmentation import _augment, _augment_deformnet
def _parse_function(example_proto):
features = {"X": tf.VarLenFeature(tf.float32),
"S": tf.VarLenFeature(tf.int64),
"shape0": tf.FixedLenFeature((), tf.int64),
"shape1": tf.FixedLenFeature((), tf.int64),
"shape2": tf.FixedLenFeature((), tf.int64)}
parsed_features = tf.parse_single_example(example_proto, features)
img = tf.sparse_tensor_to_dense(parsed_features["X"])
depth = tf.cast(parsed_features["shape0"], tf.int32)
height = tf.cast(parsed_features["shape1"], tf.int32)
width = tf.cast(parsed_features["shape2"], tf.int32)
label = tf.sparse_tensor_to_dense(parsed_features["S"])
img = tf.reshape(img, tf.stack([depth,height, width, 1]))
label = tf.reshape(label, tf.stack([depth, height, width, 1]))
label = tf.cast(label, tf.int32)
return img, label
def _parse_function_all(mode):
def __parse(example_proto):
if mode=='img':
features = {"X": tf.VarLenFeature(tf.float32),
"shape0": tf.FixedLenFeature((), tf.int64),
"shape1": tf.FixedLenFeature((), tf.int64),
"shape2": tf.FixedLenFeature((), tf.int64),
}
parsed_features = tf.parse_single_example(example_proto, features)
img = tf.sparse_tensor_to_dense(parsed_features["X"])
depth = tf.cast(parsed_features["shape0"], tf.int32)
height = tf.cast(parsed_features["shape1"], tf.int32)
width = tf.cast(parsed_features["shape2"], tf.int32)
img = tf.reshape(img, tf.stack([depth,height, width, 1]))
return img
elif mode=='seg':
features = {"S": tf.VarLenFeature(tf.int64),
"shape0": tf.FixedLenFeature((), tf.int64),
"shape1": tf.FixedLenFeature((), tf.int64),
"shape2": tf.FixedLenFeature((), tf.int64),
}
parsed_features = tf.parse_single_example(example_proto, features)
seg = tf.sparse_tensor_to_dense(parsed_features["S"])
depth = tf.cast(parsed_features["shape0"], tf.int32)
height = tf.cast(parsed_features["shape1"], tf.int32)
width = tf.cast(parsed_features["shape2"], tf.int32)
seg = tf.reshape(seg, tf.stack([depth,height, width, 1]))
return seg
elif 'mesh' in mode:
mesh_id = mode.split('_')[-1]
features = {"Y_"+mesh_id: tf.VarLenFeature(tf.float32)
}
parsed_features = tf.parse_single_example(example_proto, features)
mesh = tf.sparse_tensor_to_dense(parsed_features["Y_"+mesh_id])
node_num = tf.cast(tf.shape(mesh)[0]/6, tf.int32)
mesh = tf.reshape(mesh, tf.stack([node_num, 6 ]))
return mesh
elif mode=='transform':
features = {"Transform": tf.VarLenFeature(tf.float32)}
parsed_features = tf.parse_single_example(example_proto, features)
transform = tf.sparse_tensor_to_dense(parsed_features["Transform"])
transform = tf.reshape(transform, [4, 4])
return transform
elif mode=='spacing':
features = {"Spacing": tf.VarLenFeature(tf.float32)}
parsed_features = tf.parse_single_example(example_proto, features)
spacing = tf.sparse_tensor_to_dense(parsed_features["Spacing"])
spacing = tf.reshape(spacing, [3])
return spacing
else:
raise ValueError('invalid name')
return __parse
def get_baseline_dataset(filenames, preproc_fn=functools.partial(_augment),
threads=5,
batch_size=1,
shuffle=True):
num_x = len(filenames)
# Create a dataset from the filenames and labels
files = tf.data.Dataset.from_tensor_slices(filenames)
dataset = files.apply(tf.contrib.data.parallel_interleave(
tf.data.TFRecordDataset, cycle_length=threads))
# Map our preprocessing function to every element in our dataset, taking
# advantage of multithreading
dataset = dataset.map(_parse_function, num_parallel_calls=threads)
# dataset = dataset.map(_process_pathnames, num_parallel_calls=threads)
dataset = dataset.map(preproc_fn, num_parallel_calls=threads)
if shuffle:
dataset = dataset.shuffle(384)
# It's necessary to repeat our data for all epochs
dataset = dataset.repeat().batch(batch_size)
dataset = dataset.prefetch(buffer_size=batch_size)
return dataset
def get_baseline_dataset_deformnet(filenames, preproc_fn=functools.partial(_augment_deformnet),
threads=1,
batch_size=0,
mesh_ids = [2], # default is LV blood pool 2
shuffle=True,
if_seg=True,
shuffle_buffer=10000,
num_gcn_blocks=3):
num_x = len(filenames)
# Create a dataset from the filenames and labels
files = tf.data.Dataset.from_tensor_slices(filenames)
if shuffle:
files = files.shuffle(shuffle_buffer)
dataset = files.apply(tf.contrib.data.parallel_interleave(tf.data.TFRecordDataset, cycle_length=threads))
# Map our preprocessing function to every element in our dataset, taking
# advantage of multithreading
dataset_input = dataset.map(_parse_function_all('img'))
mesh_list = []
for i in mesh_ids:
dataset_mesh = dataset.map(_parse_function_all('mesh_'+str(i)))
mesh_list.append(dataset_mesh)
out_list = []
for i in range(num_gcn_blocks):
out_list += mesh_list
if if_seg:
dataset_seg = dataset.map(_parse_function_all('seg'))
out_list = [dataset_seg]+out_list
dataset_output = tf.data.Dataset.zip(tuple(out_list))
#dataset_output = tf.data.Dataset.zip((dataset_seg, tuple(mesh_list), tuple(mesh_list), tuple(mesh_list)))
dataset = tf.data.Dataset.zip((dataset_input, dataset_output))
dataset = dataset.map(preproc_fn)
dataset = dataset.repeat()
if batch_size >0:
dataset = dataset.batch(batch_size, drop_remainder=True)
return dataset
|
[
"functools.partial",
"tensorflow.sparse_tensor_to_dense",
"tensorflow.reshape",
"tensorflow.data.Dataset.from_tensor_slices",
"tensorflow.stack",
"tensorflow.parse_single_example",
"tensorflow.cast",
"tensorflow.contrib.data.parallel_interleave",
"tensorflow.shape",
"tensorflow.data.Dataset.zip",
"tensorflow.FixedLenFeature",
"tensorflow.VarLenFeature"
] |
[((1026, 1074), 'tensorflow.parse_single_example', 'tf.parse_single_example', (['example_proto', 'features'], {}), '(example_proto, features)\n', (1049, 1074), True, 'import tensorflow as tf\n'), ((1084, 1131), 'tensorflow.sparse_tensor_to_dense', 'tf.sparse_tensor_to_dense', (["parsed_features['X']"], {}), "(parsed_features['X'])\n", (1109, 1131), True, 'import tensorflow as tf\n'), ((1142, 1186), 'tensorflow.cast', 'tf.cast', (["parsed_features['shape0']", 'tf.int32'], {}), "(parsed_features['shape0'], tf.int32)\n", (1149, 1186), True, 'import tensorflow as tf\n'), ((1198, 1242), 'tensorflow.cast', 'tf.cast', (["parsed_features['shape1']", 'tf.int32'], {}), "(parsed_features['shape1'], tf.int32)\n", (1205, 1242), True, 'import tensorflow as tf\n'), ((1253, 1297), 'tensorflow.cast', 'tf.cast', (["parsed_features['shape2']", 'tf.int32'], {}), "(parsed_features['shape2'], tf.int32)\n", (1260, 1297), True, 'import tensorflow as tf\n'), ((1308, 1355), 'tensorflow.sparse_tensor_to_dense', 'tf.sparse_tensor_to_dense', (["parsed_features['S']"], {}), "(parsed_features['S'])\n", (1333, 1355), True, 'import tensorflow as tf\n'), ((1491, 1515), 'tensorflow.cast', 'tf.cast', (['label', 'tf.int32'], {}), '(label, tf.int32)\n', (1498, 1515), True, 'import tensorflow as tf\n'), ((4264, 4291), 'functools.partial', 'functools.partial', (['_augment'], {}), '(_augment)\n', (4281, 4291), False, 'import functools\n'), ((4506, 4551), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['filenames'], {}), '(filenames)\n', (4540, 4551), True, 'import tensorflow as tf\n'), ((5261, 5298), 'functools.partial', 'functools.partial', (['_augment_deformnet'], {}), '(_augment_deformnet)\n', (5278, 5298), False, 'import functools\n'), ((5717, 5762), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['filenames'], {}), '(filenames)\n', (5751, 5762), True, 'import tensorflow as tf\n'), ((6645, 6697), 'tensorflow.data.Dataset.zip', 'tf.data.Dataset.zip', (['(dataset_input, dataset_output)'], {}), '((dataset_input, dataset_output))\n', (6664, 6697), True, 'import tensorflow as tf\n'), ((755, 783), 'tensorflow.VarLenFeature', 'tf.VarLenFeature', (['tf.float32'], {}), '(tf.float32)\n', (771, 783), True, 'import tensorflow as tf\n'), ((804, 830), 'tensorflow.VarLenFeature', 'tf.VarLenFeature', (['tf.int64'], {}), '(tf.int64)\n', (820, 830), True, 'import tensorflow as tf\n'), ((856, 888), 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['()', 'tf.int64'], {}), '((), tf.int64)\n', (874, 888), True, 'import tensorflow as tf\n'), ((914, 946), 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['()', 'tf.int64'], {}), '((), tf.int64)\n', (932, 946), True, 'import tensorflow as tf\n'), ((972, 1004), 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['()', 'tf.int64'], {}), '((), tf.int64)\n', (990, 1004), True, 'import tensorflow as tf\n'), ((1380, 1415), 'tensorflow.stack', 'tf.stack', (['[depth, height, width, 1]'], {}), '([depth, height, width, 1])\n', (1388, 1415), True, 'import tensorflow as tf\n'), ((1444, 1479), 'tensorflow.stack', 'tf.stack', (['[depth, height, width, 1]'], {}), '([depth, height, width, 1])\n', (1452, 1479), True, 'import tensorflow as tf\n'), ((4576, 4663), 'tensorflow.contrib.data.parallel_interleave', 'tf.contrib.data.parallel_interleave', (['tf.data.TFRecordDataset'], {'cycle_length': 'threads'}), '(tf.data.TFRecordDataset, cycle_length=\n threads)\n', (4611, 4663), True, 'import tensorflow as tf\n'), ((5851, 5938), 'tensorflow.contrib.data.parallel_interleave', 'tf.contrib.data.parallel_interleave', (['tf.data.TFRecordDataset'], {'cycle_length': 'threads'}), '(tf.data.TFRecordDataset, cycle_length=\n threads)\n', (5886, 5938), True, 'import tensorflow as tf\n'), ((1903, 1951), 'tensorflow.parse_single_example', 'tf.parse_single_example', (['example_proto', 'features'], {}), '(example_proto, features)\n', (1926, 1951), True, 'import tensorflow as tf\n'), ((1970, 2017), 'tensorflow.sparse_tensor_to_dense', 'tf.sparse_tensor_to_dense', (["parsed_features['X']"], {}), "(parsed_features['X'])\n", (1995, 2017), True, 'import tensorflow as tf\n'), ((2038, 2082), 'tensorflow.cast', 'tf.cast', (["parsed_features['shape0']", 'tf.int32'], {}), "(parsed_features['shape0'], tf.int32)\n", (2045, 2082), True, 'import tensorflow as tf\n'), ((2104, 2148), 'tensorflow.cast', 'tf.cast', (["parsed_features['shape1']", 'tf.int32'], {}), "(parsed_features['shape1'], tf.int32)\n", (2111, 2148), True, 'import tensorflow as tf\n'), ((2169, 2213), 'tensorflow.cast', 'tf.cast', (["parsed_features['shape2']", 'tf.int32'], {}), "(parsed_features['shape2'], tf.int32)\n", (2176, 2213), True, 'import tensorflow as tf\n'), ((1653, 1681), 'tensorflow.VarLenFeature', 'tf.VarLenFeature', (['tf.float32'], {}), '(tf.float32)\n', (1669, 1681), True, 'import tensorflow as tf\n'), ((1707, 1739), 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['()', 'tf.int64'], {}), '((), tf.int64)\n', (1725, 1739), True, 'import tensorflow as tf\n'), ((1765, 1797), 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['()', 'tf.int64'], {}), '((), tf.int64)\n', (1783, 1797), True, 'import tensorflow as tf\n'), ((1823, 1855), 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['()', 'tf.int64'], {}), '((), tf.int64)\n', (1841, 1855), True, 'import tensorflow as tf\n'), ((2248, 2283), 'tensorflow.stack', 'tf.stack', (['[depth, height, width, 1]'], {}), '([depth, height, width, 1])\n', (2256, 2283), True, 'import tensorflow as tf\n'), ((2610, 2658), 'tensorflow.parse_single_example', 'tf.parse_single_example', (['example_proto', 'features'], {}), '(example_proto, features)\n', (2633, 2658), True, 'import tensorflow as tf\n'), ((2677, 2724), 'tensorflow.sparse_tensor_to_dense', 'tf.sparse_tensor_to_dense', (["parsed_features['S']"], {}), "(parsed_features['S'])\n", (2702, 2724), True, 'import tensorflow as tf\n'), ((2745, 2789), 'tensorflow.cast', 'tf.cast', (["parsed_features['shape0']", 'tf.int32'], {}), "(parsed_features['shape0'], tf.int32)\n", (2752, 2789), True, 'import tensorflow as tf\n'), ((2811, 2855), 'tensorflow.cast', 'tf.cast', (["parsed_features['shape1']", 'tf.int32'], {}), "(parsed_features['shape1'], tf.int32)\n", (2818, 2855), True, 'import tensorflow as tf\n'), ((2876, 2920), 'tensorflow.cast', 'tf.cast', (["parsed_features['shape2']", 'tf.int32'], {}), "(parsed_features['shape2'], tf.int32)\n", (2883, 2920), True, 'import tensorflow as tf\n'), ((2362, 2388), 'tensorflow.VarLenFeature', 'tf.VarLenFeature', (['tf.int64'], {}), '(tf.int64)\n', (2378, 2388), True, 'import tensorflow as tf\n'), ((2414, 2446), 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['()', 'tf.int64'], {}), '((), tf.int64)\n', (2432, 2446), True, 'import tensorflow as tf\n'), ((2472, 2504), 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['()', 'tf.int64'], {}), '((), tf.int64)\n', (2490, 2504), True, 'import tensorflow as tf\n'), ((2530, 2562), 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['()', 'tf.int64'], {}), '((), tf.int64)\n', (2548, 2562), True, 'import tensorflow as tf\n'), ((2955, 2990), 'tensorflow.stack', 'tf.stack', (['[depth, height, width, 1]'], {}), '([depth, height, width, 1])\n', (2963, 2990), True, 'import tensorflow as tf\n'), ((3198, 3246), 'tensorflow.parse_single_example', 'tf.parse_single_example', (['example_proto', 'features'], {}), '(example_proto, features)\n', (3221, 3246), True, 'import tensorflow as tf\n'), ((3266, 3324), 'tensorflow.sparse_tensor_to_dense', 'tf.sparse_tensor_to_dense', (["parsed_features['Y_' + mesh_id]"], {}), "(parsed_features['Y_' + mesh_id])\n", (3291, 3324), True, 'import tensorflow as tf\n'), ((3123, 3151), 'tensorflow.VarLenFeature', 'tf.VarLenFeature', (['tf.float32'], {}), '(tf.float32)\n', (3139, 3151), True, 'import tensorflow as tf\n'), ((3422, 3445), 'tensorflow.stack', 'tf.stack', (['[node_num, 6]'], {}), '([node_num, 6])\n', (3430, 3445), True, 'import tensorflow as tf\n'), ((3601, 3649), 'tensorflow.parse_single_example', 'tf.parse_single_example', (['example_proto', 'features'], {}), '(example_proto, features)\n', (3624, 3649), True, 'import tensorflow as tf\n'), ((3674, 3729), 'tensorflow.sparse_tensor_to_dense', 'tf.sparse_tensor_to_dense', (["parsed_features['Transform']"], {}), "(parsed_features['Transform'])\n", (3699, 3729), True, 'import tensorflow as tf\n'), ((3754, 3783), 'tensorflow.reshape', 'tf.reshape', (['transform', '[4, 4]'], {}), '(transform, [4, 4])\n', (3764, 3783), True, 'import tensorflow as tf\n'), ((3541, 3569), 'tensorflow.VarLenFeature', 'tf.VarLenFeature', (['tf.float32'], {}), '(tf.float32)\n', (3557, 3569), True, 'import tensorflow as tf\n'), ((3938, 3986), 'tensorflow.parse_single_example', 'tf.parse_single_example', (['example_proto', 'features'], {}), '(example_proto, features)\n', (3961, 3986), True, 'import tensorflow as tf\n'), ((4009, 4062), 'tensorflow.sparse_tensor_to_dense', 'tf.sparse_tensor_to_dense', (["parsed_features['Spacing']"], {}), "(parsed_features['Spacing'])\n", (4034, 4062), True, 'import tensorflow as tf\n'), ((4085, 4109), 'tensorflow.reshape', 'tf.reshape', (['spacing', '[3]'], {}), '(spacing, [3])\n', (4095, 4109), True, 'import tensorflow as tf\n'), ((3355, 3369), 'tensorflow.shape', 'tf.shape', (['mesh'], {}), '(mesh)\n', (3363, 3369), True, 'import tensorflow as tf\n'), ((3878, 3906), 'tensorflow.VarLenFeature', 'tf.VarLenFeature', (['tf.float32'], {}), '(tf.float32)\n', (3894, 3906), True, 'import tensorflow as tf\n')]
|
'''
Date: 2021-08-04 22:49:53
LastEditors: <NAME>,<EMAIL>
LastEditTime: 2021-09-18 12:46:46
FilePath: \Python\listcom.py
'''
import serial #导入模块
import serial.tools.list_ports
port_list = list(serial.tools.list_ports.comports())
print(port_list)
if len(port_list) == 0:
print('无可用串口')
else:
for i in range(0,len(port_list)):
print(port_list[i])
|
[
"serial.tools.list_ports.comports"
] |
[((208, 242), 'serial.tools.list_ports.comports', 'serial.tools.list_ports.comports', ([], {}), '()\n', (240, 242), False, 'import serial\n')]
|
"""Implementation of TA 836 Record"""
from datetime import datetime, timedelta
from itertools import combinations
from typing import Tuple
from schwifty import BIC, IBAN
from swissdta.constants import ChargesRule, IdentificationBankAddress, IdentificationPurpose, FillSide, PaymentType
from swissdta.fields import AlphaNumeric, Amount, Currency, Date, Iban, Numeric
from swissdta.records.record import DTARecord
from swissdta.util import remove_whitespace, is_swiss_iban
class DTARecord836(DTARecord): # pylint: disable=too-many-instance-attributes
"""TA 836 Record implementation.
Payments with an IBAN in Switzerland and abroad, in all currencies.
This type of transaction can only be used if
the beneficiary's account number corresponds
to the IBAN standard for the country concerned.
The constructor of this class should not accept record
values. All fields should be set after initialization and all
field attributes must use a subclass of `dta.fields.Field`.
Attributes:
reference: 11 characters transaction no. defined by the
ordering party; must be unique within a data file. The
first r characters sender id are added automatically.
client_account: Account to be debited (Only IBAN
is accepted, despite the fact that the
standard accepts both with or without IBAN)
value_date: The date at which the payment should be processed
currency: The currency for the amount of the payment
amount: The actual amount of the payment
conversion_rate: Only indicated if previously agreed
on the basis of the bank's foreign exchange rate.
A maximum of 6 decimal places is permitted.
client_address1: Ordering party's address (first 35 characters)
client_address2: Ordering party's address (middle 35 characters)
client_address3: Ordering party's address (last 35 characters)
bank_address_type: Identification bank address,
use ``IdentificationBankAddress`` for the values.
bank_address1: Beneficiary's institution
When option ``IdentificationBankAddress.BIC_ADDRESS`` or
``IdentificationBankAddress.SWIFTH_ADDRESS`` (``'A'``):
8- or 11-digit BIC address (=SWIFT address)
When option
``IdentificationBankAddress.BENEFICIARY_ADDRESS``:
Name and address of the beneficiary's institution
If Field 58 contains a CH or LI IBAN, no details on the
financial institution are required. In this case, option
``IdentificationBankAddress.BENEFICIARY_ADDRESS`` (``'D'``)
must be chosen in disc format and the address field
completed with blanks.
bank_address2: Beneficiary's institution
When option ``IdentificationBankAddress.BIC_ADDRESS`` or
``IdentificationBankAddress.SWIFTH_ADDRESS`` (``'A'``):
Must be blank and bank_address1 must be a 8- or 11-digit
BIC address (=SWIFT address). When option
``IdentificationBankAddress.BENEFICIARY_ADDRESS``:
Name and address of the beneficiary's institution
If Field 58 contains a CH or LI IBAN, no details on the
financial institution are required. In this case, option
``IdentificationBankAddress.BENEFICIARY_ADDRESS`` (``'D'``)
must be chosen in disc format and the address field
completed with blanks.
recipient_iban: The beneficiary's IBAN
recipient_name: Name of the beneficiary
recipient_address1: Address of the beneficiary (first 35 characters)
recipient_address2: Address of the beneficiary (last 35 characters)
identification_purpose: Identification of purpose,
use ``IdentificationPurpose`` for the values.
purpose1: Purpose of the payment
Structured reference number:
1 line of 20 positions fixed (without blanks),
commencing with 2-digit check-digit (PP), rest blank
Unstructured, free text: first of
up to 3 lines of 35 characters
purpose2: Purpose of the payment
Structured reference number: Must be blank
Unstructured, free text: second of
up to 3 lines of 35 characters
purpose3: Purpose of the payment
Structured reference number: Must be blank
Unstructured, free text: third of
up to 3 lines of 35 characters
charges_rules: Rules for charges, use ``ChargesRule`` for the values
"""
reference = AlphaNumeric(length=11, fillchar='0', fillside=FillSide.LEFT)
client_account = Iban(length=24)
value_date = Date()
currency = Currency()
amount = Amount(length=15)
conversion_rate = Amount(length=12)
client_address1 = AlphaNumeric(length=35, truncate=True)
client_address2 = AlphaNumeric(length=35, truncate=True)
client_address3 = AlphaNumeric(length=35, truncate=True)
bank_address_type = AlphaNumeric(length=1, allowed_values=IdentificationBankAddress)
bank_address1 = AlphaNumeric(length=35)
bank_address2 = AlphaNumeric(length=35)
recipient_iban = Iban(length=34)
recipient_name = AlphaNumeric(length=35, truncate=True)
recipient_address1 = AlphaNumeric(length=35, truncate=True)
recipient_address2 = AlphaNumeric(length=35, truncate=True)
identification_purpose = AlphaNumeric(length=1, allowed_values=IdentificationPurpose)
purpose1 = AlphaNumeric(length=35)
purpose2 = AlphaNumeric(length=35)
purpose3 = AlphaNumeric(length=35)
charges_rules = Numeric(length=1, allowed_values=ChargesRule)
_template = (
'01{header}{reference}{client_account}{value_date}{currency}{amount}{padding:<11}\r\n'
'02{conversion_rate}{client_address1}{client_address2}{client_address3}{padding:<9}\r\n'
'03{bank_address_type}{bank_address1}{bank_address2}{recipient_iban}{padding:<21}\r\n'
'04{recipient_name}{recipient_address1}{recipient_address2}{padding:<21}\r\n'
'05{identification_purpose}{purpose1}{purpose2}{purpose3}{charges_rules}{padding:<19}'
)
def __init__(self):
super().__init__()
self.header.transaction_type = 836
@property
def client_address(self) -> Tuple[str, str, str]:
"""The 3 lines of the client address as a tuple of 3 strings."""
return self.client_address1, self.client_address2, self.client_address3
@client_address.setter
def client_address(self, client_address: Tuple[str, str, str]) -> None:
self.client_address1, self.client_address2, self.client_address3 = client_address
@property
def bank_address(self) -> Tuple[str, str]:
"""The 2 lines of the bank address as a tuple of 2 strings."""
return self.bank_address1, self.bank_address2
@bank_address.setter
def bank_address(self, bank_address: Tuple[str, str]) -> None:
self.bank_address1, self.bank_address2 = bank_address
@property
def recipient_address(self) -> Tuple[str, str]:
"""The 2 lines of the recipient address as a tuple of 2 strings."""
return self.recipient_address1, self.recipient_address2
@recipient_address.setter
def recipient_address(self, recipient_address: Tuple[str, str]) -> None:
self.recipient_address1, self.recipient_address2 = recipient_address
@property
def purpose(self) -> Tuple[str, str, str]:
"""The 3 lines of the purpose as a tuple of 3 strings."""
return self.purpose1, self.purpose2, self.purpose3
@purpose.setter
def purpose(self, purpose: Tuple[str, str, str]) -> None:
self.purpose1, self.purpose2, self.purpose3 = purpose
def generate(self) -> str:
"""Generate a TA 836 record as a string.
The returned value is a simple string. Make sure
to encode it to the ISO Latincode 8859-1 format
in accordance with the DTA Standard and Formats.
Returns: A TA 836 record as a string.
"""
return self._template.format(
header=self.header.generate(),
# First 5 positions must contain a valid DTA identification (sender id).
# Remaining 11 positions must contain a transaction reference number.
# The generation of the full (16x) reference from the valid DTA identification is done automatically here
reference=f'{self.header.sender_id}{self.reference}',
client_account=self.client_account,
value_date=self.value_date,
currency=self.currency,
amount=self.amount,
conversion_rate=self.conversion_rate,
client_address1=self.client_address1,
client_address2=self.client_address2,
client_address3=self.client_address3,
bank_address_type=self.bank_address_type,
bank_address1=self.bank_address1,
bank_address2=self.bank_address2,
recipient_iban=self.recipient_iban,
recipient_name=self.recipient_name,
recipient_address1=self.recipient_address1,
recipient_address2=self.recipient_address2,
identification_purpose=self.identification_purpose,
purpose1=self.purpose1,
purpose2=self.purpose2,
purpose3=self.purpose3,
charges_rules=self.charges_rules,
padding=''
)
def validate(self) -> None: # pylint: disable=too-complex, too-many-branches
"""Validate the field's value of the record."""
super().validate()
if self.header.processing_date != '000000':
self.header.add_error('processing_date', "NOT PERMITTED: header processing date must be '000000'.")
if self.header.recipient_clearing.strip():
self.header.add_error('recipient_clearing',
"NOT ALLOWED: beneficiary's bank clearing number must be blank.")
if self.header.transaction_type != '836':
self.header.add_error('transaction_type', "INVALID: Transaction type must be TA 836.")
if self.header.payment_type not in {str(payment_type.value) for payment_type in PaymentType}:
self.header.add_error('payment_type', "INVALID: Payment type must be 0 or 1 TA 836.")
if not remove_whitespace(self.reference):
self.add_error('reference', "MISSING TRANSACTION NUMBER: Reference may not be blank.")
try:
client_iban = IBAN(self.client_account, allow_invalid=False)
except ValueError: # Will throw ValueError if it is not a valid IBAN
self.add_error(
'client_account',
"IBAN INVALID: Client account must be a valid with a 21 digit Swiss IBAN (CH resp. LI) ."
)
else:
if not is_swiss_iban(client_iban):
self.add_error(
'client_account',
"IBAN INVALID: Client account must be a valid with a 21 digit Swiss IBAN (CH resp. LI) ."
)
# Bank clearing is at pos 5-9 in IBAN
if self.client_account[4:9].lstrip('0') != self.header.client_clearing.strip():
self.add_error('client_account',
"IID IN IBAN NOT IDENTICAL WITH BC-NO: IID in IBAN (pos. 5 to 9) must concur with the "
"ordering party's BC no.")
now = datetime.now()
ten_days_ago = now - timedelta(days=10)
sixty_days_ahead = now + timedelta(days=60)
try:
value_date = datetime.strptime(self.value_date, Date.DATE_FORMAT)
except ValueError:
self.add_error('value_date', "INVALID: Must contain a valid date.")
else:
if value_date < ten_days_ago:
self.add_error('value_date', "EXPIRED: value date may not be elapsed more than 10 calendar days.")
elif value_date > sixty_days_ahead:
self.add_error('value_date', "TOO FAR AHEAD: value date may not exceed the reading in date + 60 days.")
decimal_places = len(self.amount.strip().split(',', maxsplit=1)[1])
if self.currency == 'CHF' and decimal_places > 2:
self.add_error('currency',
"MORE THAN 2 DECIMAL PLACES: Amount may not contain more than 2 decimal places.")
elif self.currency != 'CHF' and decimal_places > 3:
self.add_error(
'currency',
" MORE THAN 3 DECIMAL PLACES: Amount may not contain more than 3 decimal places (foreign currencies)."
)
if not any(self.client_address):
self.add_error('client_address', "INCOMPLETE: Ordering party address, at least one line must exist.")
if self.bank_address_type == IdentificationBankAddress.SWIFT_ADDRESS:
try:
BIC(self.bank_address1).validate()
except ValueError:
self.add_error(
'bank_address_type',
f"INCORRECT FIELD IDENTIFICATION: bank address type {IdentificationBankAddress.SWIFT_ADDRESS} "
f"may only be used if an 8 or 11 character BIC address (SWIFT) exists."
)
# No specification on how to validate a bank's address if the `bank_address_type` is not SWIFT.
if all(not line1.strip() or not line2.strip() for line1, line2 in combinations(self.client_address, 2)):
self.add_error('client_address', "INCOMPLETE: At least two address lines must exist.")
if any('/C/' in address for address in self.client_address):
self.add_error('client_address', "INVALID: /C/ may not be present for TA 836.")
# XXX Missing validation of IPI reference if identification purpose is structured (I)
|
[
"swissdta.fields.Numeric",
"swissdta.fields.Iban",
"swissdta.util.is_swiss_iban",
"schwifty.BIC",
"swissdta.util.remove_whitespace",
"swissdta.fields.Currency",
"datetime.datetime.now",
"swissdta.fields.Amount",
"schwifty.IBAN",
"datetime.datetime.strptime",
"datetime.timedelta",
"itertools.combinations",
"swissdta.fields.AlphaNumeric",
"swissdta.fields.Date"
] |
[((4669, 4730), 'swissdta.fields.AlphaNumeric', 'AlphaNumeric', ([], {'length': '(11)', 'fillchar': '"""0"""', 'fillside': 'FillSide.LEFT'}), "(length=11, fillchar='0', fillside=FillSide.LEFT)\n", (4681, 4730), False, 'from swissdta.fields import AlphaNumeric, Amount, Currency, Date, Iban, Numeric\n'), ((4752, 4767), 'swissdta.fields.Iban', 'Iban', ([], {'length': '(24)'}), '(length=24)\n', (4756, 4767), False, 'from swissdta.fields import AlphaNumeric, Amount, Currency, Date, Iban, Numeric\n'), ((4785, 4791), 'swissdta.fields.Date', 'Date', ([], {}), '()\n', (4789, 4791), False, 'from swissdta.fields import AlphaNumeric, Amount, Currency, Date, Iban, Numeric\n'), ((4807, 4817), 'swissdta.fields.Currency', 'Currency', ([], {}), '()\n', (4815, 4817), False, 'from swissdta.fields import AlphaNumeric, Amount, Currency, Date, Iban, Numeric\n'), ((4831, 4848), 'swissdta.fields.Amount', 'Amount', ([], {'length': '(15)'}), '(length=15)\n', (4837, 4848), False, 'from swissdta.fields import AlphaNumeric, Amount, Currency, Date, Iban, Numeric\n'), ((4872, 4889), 'swissdta.fields.Amount', 'Amount', ([], {'length': '(12)'}), '(length=12)\n', (4878, 4889), False, 'from swissdta.fields import AlphaNumeric, Amount, Currency, Date, Iban, Numeric\n'), ((4912, 4950), 'swissdta.fields.AlphaNumeric', 'AlphaNumeric', ([], {'length': '(35)', 'truncate': '(True)'}), '(length=35, truncate=True)\n', (4924, 4950), False, 'from swissdta.fields import AlphaNumeric, Amount, Currency, Date, Iban, Numeric\n'), ((4973, 5011), 'swissdta.fields.AlphaNumeric', 'AlphaNumeric', ([], {'length': '(35)', 'truncate': '(True)'}), '(length=35, truncate=True)\n', (4985, 5011), False, 'from swissdta.fields import AlphaNumeric, Amount, Currency, Date, Iban, Numeric\n'), ((5034, 5072), 'swissdta.fields.AlphaNumeric', 'AlphaNumeric', ([], {'length': '(35)', 'truncate': '(True)'}), '(length=35, truncate=True)\n', (5046, 5072), False, 'from swissdta.fields import AlphaNumeric, Amount, Currency, Date, Iban, Numeric\n'), ((5098, 5162), 'swissdta.fields.AlphaNumeric', 'AlphaNumeric', ([], {'length': '(1)', 'allowed_values': 'IdentificationBankAddress'}), '(length=1, allowed_values=IdentificationBankAddress)\n', (5110, 5162), False, 'from swissdta.fields import AlphaNumeric, Amount, Currency, Date, Iban, Numeric\n'), ((5183, 5206), 'swissdta.fields.AlphaNumeric', 'AlphaNumeric', ([], {'length': '(35)'}), '(length=35)\n', (5195, 5206), False, 'from swissdta.fields import AlphaNumeric, Amount, Currency, Date, Iban, Numeric\n'), ((5227, 5250), 'swissdta.fields.AlphaNumeric', 'AlphaNumeric', ([], {'length': '(35)'}), '(length=35)\n', (5239, 5250), False, 'from swissdta.fields import AlphaNumeric, Amount, Currency, Date, Iban, Numeric\n'), ((5272, 5287), 'swissdta.fields.Iban', 'Iban', ([], {'length': '(34)'}), '(length=34)\n', (5276, 5287), False, 'from swissdta.fields import AlphaNumeric, Amount, Currency, Date, Iban, Numeric\n'), ((5310, 5348), 'swissdta.fields.AlphaNumeric', 'AlphaNumeric', ([], {'length': '(35)', 'truncate': '(True)'}), '(length=35, truncate=True)\n', (5322, 5348), False, 'from swissdta.fields import AlphaNumeric, Amount, Currency, Date, Iban, Numeric\n'), ((5374, 5412), 'swissdta.fields.AlphaNumeric', 'AlphaNumeric', ([], {'length': '(35)', 'truncate': '(True)'}), '(length=35, truncate=True)\n', (5386, 5412), False, 'from swissdta.fields import AlphaNumeric, Amount, Currency, Date, Iban, Numeric\n'), ((5438, 5476), 'swissdta.fields.AlphaNumeric', 'AlphaNumeric', ([], {'length': '(35)', 'truncate': '(True)'}), '(length=35, truncate=True)\n', (5450, 5476), False, 'from swissdta.fields import AlphaNumeric, Amount, Currency, Date, Iban, Numeric\n'), ((5507, 5567), 'swissdta.fields.AlphaNumeric', 'AlphaNumeric', ([], {'length': '(1)', 'allowed_values': 'IdentificationPurpose'}), '(length=1, allowed_values=IdentificationPurpose)\n', (5519, 5567), False, 'from swissdta.fields import AlphaNumeric, Amount, Currency, Date, Iban, Numeric\n'), ((5583, 5606), 'swissdta.fields.AlphaNumeric', 'AlphaNumeric', ([], {'length': '(35)'}), '(length=35)\n', (5595, 5606), False, 'from swissdta.fields import AlphaNumeric, Amount, Currency, Date, Iban, Numeric\n'), ((5622, 5645), 'swissdta.fields.AlphaNumeric', 'AlphaNumeric', ([], {'length': '(35)'}), '(length=35)\n', (5634, 5645), False, 'from swissdta.fields import AlphaNumeric, Amount, Currency, Date, Iban, Numeric\n'), ((5661, 5684), 'swissdta.fields.AlphaNumeric', 'AlphaNumeric', ([], {'length': '(35)'}), '(length=35)\n', (5673, 5684), False, 'from swissdta.fields import AlphaNumeric, Amount, Currency, Date, Iban, Numeric\n'), ((5705, 5750), 'swissdta.fields.Numeric', 'Numeric', ([], {'length': '(1)', 'allowed_values': 'ChargesRule'}), '(length=1, allowed_values=ChargesRule)\n', (5712, 5750), False, 'from swissdta.fields import AlphaNumeric, Amount, Currency, Date, Iban, Numeric\n'), ((11539, 11553), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (11551, 11553), False, 'from datetime import datetime, timedelta\n'), ((10435, 10468), 'swissdta.util.remove_whitespace', 'remove_whitespace', (['self.reference'], {}), '(self.reference)\n', (10452, 10468), False, 'from swissdta.util import remove_whitespace, is_swiss_iban\n'), ((10609, 10655), 'schwifty.IBAN', 'IBAN', (['self.client_account'], {'allow_invalid': '(False)'}), '(self.client_account, allow_invalid=False)\n', (10613, 10655), False, 'from schwifty import BIC, IBAN\n'), ((11583, 11601), 'datetime.timedelta', 'timedelta', ([], {'days': '(10)'}), '(days=10)\n', (11592, 11601), False, 'from datetime import datetime, timedelta\n'), ((11635, 11653), 'datetime.timedelta', 'timedelta', ([], {'days': '(60)'}), '(days=60)\n', (11644, 11653), False, 'from datetime import datetime, timedelta\n'), ((11692, 11744), 'datetime.datetime.strptime', 'datetime.strptime', (['self.value_date', 'Date.DATE_FORMAT'], {}), '(self.value_date, Date.DATE_FORMAT)\n', (11709, 11744), False, 'from datetime import datetime, timedelta\n'), ((10949, 10975), 'swissdta.util.is_swiss_iban', 'is_swiss_iban', (['client_iban'], {}), '(client_iban)\n', (10962, 10975), False, 'from swissdta.util import remove_whitespace, is_swiss_iban\n'), ((13534, 13570), 'itertools.combinations', 'combinations', (['self.client_address', '(2)'], {}), '(self.client_address, 2)\n', (13546, 13570), False, 'from itertools import combinations\n'), ((12990, 13013), 'schwifty.BIC', 'BIC', (['self.bank_address1'], {}), '(self.bank_address1)\n', (12993, 13013), False, 'from schwifty import BIC, IBAN\n')]
|
import matplotlib.pyplot as plt
import seaborn as sea
sea.set(style = 'whitegrid')
iris = sea.load_dataset('iris')
ax = sea.stripplot(x = 'species', y = 'sepal_length', data = iris)
plt.title('Graph')
plt.show()
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"seaborn.load_dataset",
"seaborn.stripplot",
"seaborn.set"
] |
[((55, 81), 'seaborn.set', 'sea.set', ([], {'style': '"""whitegrid"""'}), "(style='whitegrid')\n", (62, 81), True, 'import seaborn as sea\n'), ((91, 115), 'seaborn.load_dataset', 'sea.load_dataset', (['"""iris"""'], {}), "('iris')\n", (107, 115), True, 'import seaborn as sea\n'), ((121, 176), 'seaborn.stripplot', 'sea.stripplot', ([], {'x': '"""species"""', 'y': '"""sepal_length"""', 'data': 'iris'}), "(x='species', y='sepal_length', data=iris)\n", (134, 176), True, 'import seaborn as sea\n'), ((184, 202), 'matplotlib.pyplot.title', 'plt.title', (['"""Graph"""'], {}), "('Graph')\n", (193, 202), True, 'import matplotlib.pyplot as plt\n'), ((203, 213), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (211, 213), True, 'import matplotlib.pyplot as plt\n')]
|
from django.urls import path
from User import admin, views
urlpatterns = [
path('wxlogin', views.wxLogin),
path('githublogin', views.githubLogin),
path('repo_search', views.repo_search),
path('repo_request', views.repo_request),
path('reply_request', views.reply_request),
path('request_info', views.request_info),
path('test', views.test),
]
|
[
"django.urls.path"
] |
[((80, 110), 'django.urls.path', 'path', (['"""wxlogin"""', 'views.wxLogin'], {}), "('wxlogin', views.wxLogin)\n", (84, 110), False, 'from django.urls import path\n'), ((116, 154), 'django.urls.path', 'path', (['"""githublogin"""', 'views.githubLogin'], {}), "('githublogin', views.githubLogin)\n", (120, 154), False, 'from django.urls import path\n'), ((160, 198), 'django.urls.path', 'path', (['"""repo_search"""', 'views.repo_search'], {}), "('repo_search', views.repo_search)\n", (164, 198), False, 'from django.urls import path\n'), ((204, 244), 'django.urls.path', 'path', (['"""repo_request"""', 'views.repo_request'], {}), "('repo_request', views.repo_request)\n", (208, 244), False, 'from django.urls import path\n'), ((250, 292), 'django.urls.path', 'path', (['"""reply_request"""', 'views.reply_request'], {}), "('reply_request', views.reply_request)\n", (254, 292), False, 'from django.urls import path\n'), ((298, 338), 'django.urls.path', 'path', (['"""request_info"""', 'views.request_info'], {}), "('request_info', views.request_info)\n", (302, 338), False, 'from django.urls import path\n'), ((344, 368), 'django.urls.path', 'path', (['"""test"""', 'views.test'], {}), "('test', views.test)\n", (348, 368), False, 'from django.urls import path\n')]
|
import torch
import torch.nn as nn
from ltr.models.layers.blocks import conv_block
class ConvGRUCell(nn.Module):
def __init__(self, input_dim, hidden_dim, kernel_size, padding_mode='zeros'):
" Referenced from https://github.com/happyjin/ConvGRU-pytorch"
super(ConvGRUCell, self).__init__()
self.hidden_dim = hidden_dim
if padding_mode == 'zeros':
if not isinstance(kernel_size, (list, tuple)):
kernel_size = (kernel_size, kernel_size)
padding = kernel_size[0] // 2, kernel_size[1] // 2
self.conv_reset = nn.Conv2d(input_dim + hidden_dim, self.hidden_dim, kernel_size, padding=padding)
self.conv_update = nn.Conv2d(input_dim + hidden_dim, self.hidden_dim, kernel_size, padding=padding)
self.conv_state_new = nn.Conv2d(input_dim+hidden_dim, self.hidden_dim, kernel_size, padding=padding)
else:
self.conv_reset = conv_block(input_dim + hidden_dim, hidden_dim, kernel_size=kernel_size, stride=1,
padding=int(kernel_size // 2), batch_norm=False, relu=False,
padding_mode=padding_mode)
self.conv_update = conv_block(input_dim + hidden_dim, hidden_dim, kernel_size=kernel_size, stride=1,
padding=int(kernel_size // 2), batch_norm=False, relu=False,
padding_mode=padding_mode)
self.conv_state_new = conv_block(input_dim + hidden_dim, hidden_dim, kernel_size=kernel_size, stride=1,
padding=int(kernel_size // 2), batch_norm=False, relu=False,
padding_mode=padding_mode)
def forward(self, input, state_cur):
input_state_cur = torch.cat([input, state_cur], dim=1)
reset_gate = torch.sigmoid(self.conv_reset(input_state_cur))
update_gate = torch.sigmoid(self.conv_update(input_state_cur))
input_state_cur_reset = torch.cat([input, reset_gate*state_cur], dim=1)
state_new = torch.tanh(self.conv_state_new(input_state_cur_reset))
state_next = (1.0 - update_gate) * state_cur + update_gate * state_new
return state_next
|
[
"torch.nn.Conv2d",
"torch.cat"
] |
[((1848, 1884), 'torch.cat', 'torch.cat', (['[input, state_cur]'], {'dim': '(1)'}), '([input, state_cur], dim=1)\n', (1857, 1884), False, 'import torch\n'), ((2059, 2108), 'torch.cat', 'torch.cat', (['[input, reset_gate * state_cur]'], {'dim': '(1)'}), '([input, reset_gate * state_cur], dim=1)\n', (2068, 2108), False, 'import torch\n'), ((596, 681), 'torch.nn.Conv2d', 'nn.Conv2d', (['(input_dim + hidden_dim)', 'self.hidden_dim', 'kernel_size'], {'padding': 'padding'}), '(input_dim + hidden_dim, self.hidden_dim, kernel_size, padding=padding\n )\n', (605, 681), True, 'import torch.nn as nn\n'), ((708, 793), 'torch.nn.Conv2d', 'nn.Conv2d', (['(input_dim + hidden_dim)', 'self.hidden_dim', 'kernel_size'], {'padding': 'padding'}), '(input_dim + hidden_dim, self.hidden_dim, kernel_size, padding=padding\n )\n', (717, 793), True, 'import torch.nn as nn\n'), ((824, 909), 'torch.nn.Conv2d', 'nn.Conv2d', (['(input_dim + hidden_dim)', 'self.hidden_dim', 'kernel_size'], {'padding': 'padding'}), '(input_dim + hidden_dim, self.hidden_dim, kernel_size, padding=padding\n )\n', (833, 909), True, 'import torch.nn as nn\n')]
|
from flask import Blueprint
home_blueprint = Blueprint('home_blueprint', __name__)
from . import views
|
[
"flask.Blueprint"
] |
[((46, 83), 'flask.Blueprint', 'Blueprint', (['"""home_blueprint"""', '__name__'], {}), "('home_blueprint', __name__)\n", (55, 83), False, 'from flask import Blueprint\n')]
|
#
# 为 GUI 封装的函数 不可直接运行
# Author: Xiaohei
# Updatetime: 2021-12-01
#
import cv2
import os
import numpy
import pickle
from enhance import image_enhance
def get_descriptors(img):
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
img = clahe.apply(img)
img = image_enhance.image_enhance(img)
img = numpy.array(img, dtype=numpy.uint8)
# Threshold
ret, img = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)
# Normalize to 0 and 1 range
img[img == 255] = 1
# Harris corners
harris_corners = cv2.cornerHarris(img, 3, 3, 0.04)
harris_normalized = cv2.normalize(harris_corners, 0, 255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32FC1)
threshold_harris = 125
# Extract keypoints
keypoints = []
for x in range(0, harris_normalized.shape[0]):
for y in range(0, harris_normalized.shape[1]):
if harris_normalized[x][y] > threshold_harris:
keypoints.append(cv2.KeyPoint(y, x, 1))
# Define descriptor
orb = cv2.ORB_create()
# Compute descriptors
_, des = orb.compute(img, keypoints)
return keypoints, des
def match(des1, path, name_lst):
avg_lst = []
if name_lst:
for name in name_lst:
with open("{}/{}".format(path, name), "rb+") as f:
des2 = pickle.load(f)
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
matches = sorted(bf.match(des1, des2), key=lambda match: match.distance)
score = 0
for match in matches:
score += match.distance
avg = score / len(matches)
avg_lst.append(avg)
return avg_lst
else:
return None
def run_app(image_path, data_path):
img1 = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE)
if img1 is not None:
img1 = cv2.resize(img1, dsize=(256, 364))
kp1, des1 = get_descriptors(img1)
else:
raise Exception("Invalid image path!")
address_lst = [name for name in os.listdir(data_path) if os.path.isfile(os.path.join(data_path, name))]
name_lst = list(address_lst)
avgs = match(des1, data_path, name_lst)
score_threshold = 40
if avgs is not None:
if min(avgs) < score_threshold:
flag = True
name = name_lst[avgs.index(min(avgs))]
else:
flag = False
name = name_lst[avgs.index(min(avgs))]
name1 = image_path.replace("\\", "/").split("/")[-1].split(".")[0]
# name1 = input("Input a name to save the fingerprint: ")
if name1:
with open("{}/{}".format(data_path, name1), "wb+") as f:
pickle.dump(des1, f)
else:
flag = False
name = "None"
name1 = image_path.replace("\\", "/").split("/")[-1].split(".")[0]
# name1 = input("Input a name to save the fingerprint: ")
if name1:
with open("{}/{}".format(data_path, name1), "wb+") as f:
pickle.dump(des1, f)
return flag, name
|
[
"os.listdir",
"pickle.dump",
"enhance.image_enhance.image_enhance",
"cv2.threshold",
"cv2.BFMatcher",
"cv2.normalize",
"cv2.imread",
"pickle.load",
"numpy.array",
"cv2.ORB_create",
"cv2.KeyPoint",
"cv2.createCLAHE",
"os.path.join",
"cv2.cornerHarris",
"cv2.resize"
] |
[((191, 242), 'cv2.createCLAHE', 'cv2.createCLAHE', ([], {'clipLimit': '(2.0)', 'tileGridSize': '(8, 8)'}), '(clipLimit=2.0, tileGridSize=(8, 8))\n', (206, 242), False, 'import cv2\n'), ((280, 312), 'enhance.image_enhance.image_enhance', 'image_enhance.image_enhance', (['img'], {}), '(img)\n', (307, 312), False, 'from enhance import image_enhance\n'), ((323, 358), 'numpy.array', 'numpy.array', (['img'], {'dtype': 'numpy.uint8'}), '(img, dtype=numpy.uint8)\n', (334, 358), False, 'import numpy\n'), ((391, 460), 'cv2.threshold', 'cv2.threshold', (['img', '(127)', '(255)', '(cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)'], {}), '(img, 127, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)\n', (404, 460), False, 'import cv2\n'), ((562, 595), 'cv2.cornerHarris', 'cv2.cornerHarris', (['img', '(3)', '(3)', '(0.04)'], {}), '(img, 3, 3, 0.04)\n', (578, 595), False, 'import cv2\n'), ((620, 709), 'cv2.normalize', 'cv2.normalize', (['harris_corners', '(0)', '(255)'], {'norm_type': 'cv2.NORM_MINMAX', 'dtype': 'cv2.CV_32FC1'}), '(harris_corners, 0, 255, norm_type=cv2.NORM_MINMAX, dtype=cv2.\n CV_32FC1)\n', (633, 709), False, 'import cv2\n'), ((1032, 1048), 'cv2.ORB_create', 'cv2.ORB_create', ([], {}), '()\n', (1046, 1048), False, 'import cv2\n'), ((1769, 1813), 'cv2.imread', 'cv2.imread', (['image_path', 'cv2.IMREAD_GRAYSCALE'], {}), '(image_path, cv2.IMREAD_GRAYSCALE)\n', (1779, 1813), False, 'import cv2\n'), ((1855, 1889), 'cv2.resize', 'cv2.resize', (['img1'], {'dsize': '(256, 364)'}), '(img1, dsize=(256, 364))\n', (1865, 1889), False, 'import cv2\n'), ((1362, 1410), 'cv2.BFMatcher', 'cv2.BFMatcher', (['cv2.NORM_HAMMING'], {'crossCheck': '(True)'}), '(cv2.NORM_HAMMING, crossCheck=True)\n', (1375, 1410), False, 'import cv2\n'), ((2026, 2047), 'os.listdir', 'os.listdir', (['data_path'], {}), '(data_path)\n', (2036, 2047), False, 'import os\n'), ((1330, 1344), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1341, 1344), False, 'import pickle\n'), ((2066, 2095), 'os.path.join', 'os.path.join', (['data_path', 'name'], {}), '(data_path, name)\n', (2078, 2095), False, 'import os\n'), ((3014, 3034), 'pickle.dump', 'pickle.dump', (['des1', 'f'], {}), '(des1, f)\n', (3025, 3034), False, 'import pickle\n'), ((974, 995), 'cv2.KeyPoint', 'cv2.KeyPoint', (['y', 'x', '(1)'], {}), '(y, x, 1)\n', (986, 995), False, 'import cv2\n'), ((2696, 2716), 'pickle.dump', 'pickle.dump', (['des1', 'f'], {}), '(des1, f)\n', (2707, 2716), False, 'import pickle\n')]
|
from django.db import models
from json_field import JSONField
class Service(models.Model):
name = models.CharField(max_length=64)
secret = models.CharField(max_length=128)
service_id = models.CharField(max_length=128)
ips = JSONField(default=[])
validate_ip = models.BooleanField(default=True)
def __str__(self):
return self.name
class Message(models.Model):
message = models.CharField(max_length=64)
sender = models.CharField(max_length=64, db_index=True)
country = models.CharField(max_length=2)
price = models.FloatField()
price_wo_vat = models.FloatField()
currency = models.CharField(max_length=3)
service_id = models.CharField(max_length=128)
message_id = models.CharField(max_length=128)
keyword = models.CharField(max_length=64)
shortcode = models.CharField(max_length=64)
operator = models.CharField(max_length=128)
billing_type = models.CharField(max_length=2)
status = models.CharField(max_length=64)
test = models.CharField(max_length=16)
sig = models.CharField(max_length=128)
def __str__(self):
return '{} - {} from {} on {}'.format(
self.keyword,
self.message,
self.sender,
self.shortcode,
)
class Payment(models.Model):
service = models.ForeignKey(Service, related_name='payments')
message = models.OneToOneField(Message)
pin = models.CharField(max_length=16, unique=True)
used = models.BooleanField(default=False)
def __str__(self):
return '{} - {}'.format(self.id, self.pin)
|
[
"django.db.models.OneToOneField",
"django.db.models.CharField",
"django.db.models.ForeignKey",
"django.db.models.FloatField",
"django.db.models.BooleanField",
"json_field.JSONField"
] |
[((104, 135), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(64)'}), '(max_length=64)\n', (120, 135), False, 'from django.db import models\n'), ((149, 181), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(128)'}), '(max_length=128)\n', (165, 181), False, 'from django.db import models\n'), ((199, 231), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(128)'}), '(max_length=128)\n', (215, 231), False, 'from django.db import models\n'), ((242, 263), 'json_field.JSONField', 'JSONField', ([], {'default': '[]'}), '(default=[])\n', (251, 263), False, 'from json_field import JSONField\n'), ((282, 315), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)'}), '(default=True)\n', (301, 315), False, 'from django.db import models\n'), ((410, 441), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(64)'}), '(max_length=64)\n', (426, 441), False, 'from django.db import models\n'), ((455, 501), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(64)', 'db_index': '(True)'}), '(max_length=64, db_index=True)\n', (471, 501), False, 'from django.db import models\n'), ((516, 546), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(2)'}), '(max_length=2)\n', (532, 546), False, 'from django.db import models\n'), ((559, 578), 'django.db.models.FloatField', 'models.FloatField', ([], {}), '()\n', (576, 578), False, 'from django.db import models\n'), ((598, 617), 'django.db.models.FloatField', 'models.FloatField', ([], {}), '()\n', (615, 617), False, 'from django.db import models\n'), ((633, 663), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(3)'}), '(max_length=3)\n', (649, 663), False, 'from django.db import models\n'), ((681, 713), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(128)'}), '(max_length=128)\n', (697, 713), False, 'from django.db import models\n'), ((731, 763), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(128)'}), '(max_length=128)\n', (747, 763), False, 'from django.db import models\n'), ((778, 809), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(64)'}), '(max_length=64)\n', (794, 809), False, 'from django.db import models\n'), ((826, 857), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(64)'}), '(max_length=64)\n', (842, 857), False, 'from django.db import models\n'), ((873, 905), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(128)'}), '(max_length=128)\n', (889, 905), False, 'from django.db import models\n'), ((925, 955), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(2)'}), '(max_length=2)\n', (941, 955), False, 'from django.db import models\n'), ((969, 1000), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(64)'}), '(max_length=64)\n', (985, 1000), False, 'from django.db import models\n'), ((1012, 1043), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(16)'}), '(max_length=16)\n', (1028, 1043), False, 'from django.db import models\n'), ((1054, 1086), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(128)'}), '(max_length=128)\n', (1070, 1086), False, 'from django.db import models\n'), ((1318, 1369), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Service'], {'related_name': '"""payments"""'}), "(Service, related_name='payments')\n", (1335, 1369), False, 'from django.db import models\n'), ((1384, 1413), 'django.db.models.OneToOneField', 'models.OneToOneField', (['Message'], {}), '(Message)\n', (1404, 1413), False, 'from django.db import models\n'), ((1424, 1468), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(16)', 'unique': '(True)'}), '(max_length=16, unique=True)\n', (1440, 1468), False, 'from django.db import models\n'), ((1480, 1514), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (1499, 1514), False, 'from django.db import models\n')]
|
from decimal import Decimal
from flask import render_template, url_for, abort
from flask_classy import FlaskView, route
from flask_login import login_required
from sqlalchemy import and_
from werkzeug.utils import redirect
from OrderSystem import db, sentry
from OrderSystem import forms
from OrderSystem.routing.CRUDBase import CRUDBase
from OrderSystem.sql.ORM import Budget, Subteam, Order
from OrderSystem.utilities.Helpers import flash_errors
from OrderSystem.utilities.Permissions import update_order_status_access_required
from OrderSystem.utilities.ServerLogger import log_event
class Budgets(FlaskView, CRUDBase):
"""
The Budgets system provides team members with a way to view how much money their subteam still has available, as
well as drilling down into individual subteams and specific orders
"""
route_base = ""
BUDGET_FULL_THRESH = 0.75 # 75%
BUDGET_MEDIUM_THRESH = 0.50 # 50%
BUDGET_LOW_THRESH = 0.25 # 25%
def create(self):
"""
No implementation
"""
pass
@route('/<int:fiscal_year>')
@login_required
def index(self, fiscal_year):
"""
Shows the user an overview of the budgets for subteams this year
@return: List of subteams color-coded with their amount of money remaining
"""
subteams = db.session.query(Subteam).all()
ids = []
names = []
css_classes = []
cash_left = []
started_with = []
for subteam in subteams:
try:
budget = db.session.query(Budget).filter(
and_(Budget.fiscal_year == fiscal_year, Budget.subteam_id == subteam.id)).first()
curr_orders = db.session.query(Order).filter(
and_(Order.fiscal_year == fiscal_year, Order.part_for_subteam == subteam.id,
Order.pending_approval == False))
dollars_left = Decimal(budget.dollar_amount)
for order in curr_orders:
dollars_left -= Decimal(order.total)
# Decide what class to use
if (dollars_left / budget.dollar_amount) > self.BUDGET_FULL_THRESH:
css_class = "budget-full"
elif self.BUDGET_MEDIUM_THRESH < (dollars_left / budget.dollar_amount) < self.BUDGET_FULL_THRESH:
css_class = "budget-low"
elif self.BUDGET_LOW_THRESH < (dollars_left / budget.dollar_amount) < self.BUDGET_MEDIUM_THRESH:
css_class = "budget-verylow"
elif 0 < (dollars_left / budget.dollar_amount) < self.BUDGET_LOW_THRESH:
css_class = "budget-critical"
else:
css_class = "budget-empty"
ids.append(subteam.id)
names.append(subteam.name)
css_classes.append(css_class)
cash_left.append('{0:.2f}'.format(dollars_left))
started_with.append('{0:.2f}'.format(budget.dollar_amount))
except:
ids.append(subteam.id)
names.append(subteam.name)
css_classes.append("")
cash_left.append(0)
started_with.append(0)
return render_template('settings/budgets/index.html', subteams=names, cash_left=cash_left,
started_with=started_with, css_classes=css_classes, fiscal_year=fiscal_year,
ids=ids, page="budgets",
thresholds=[self.BUDGET_FULL_THRESH, self.BUDGET_MEDIUM_THRESH, self.BUDGET_LOW_THRESH])
@route('/<int:fiscal_year>/<int:subteam_id>/set', methods=['GET', 'POST'])
@update_order_status_access_required
def update(self, fiscal_year, subteam_id):
"""
Changes the amount of money that a subteam is marked as having available
@param subteam_id: The database-given ID of the subteam to update the budget of
@param fiscal_year: The current FRC season
@return: Redirect to Budgets index
"""
try:
form = forms.SetBudgetForm()
existing_budget = db.session.query(Budget).filter(
and_(Budget.subteam_id == subteam_id, Budget.fiscal_year == fiscal_year)).first()
if form.validate_on_submit():
if existing_budget is None:
# Subteam didn't have a budget previously set
db.session.add(Budget(subteam_id, form.amount.data, fiscal_year))
else:
# Subteam had an existing budget; update the previous one instead of creating a new DB row
existing_budget.dollar_amount = form.amount.data
db.session.commit()
return redirect(url_for('Budgets:index', fiscal_year=fiscal_year))
else:
flash_errors(form)
return render_template('settings/budgets/set.html', form=form, page="budgets")
except Exception as e:
log_event("ERROR", e)
sentry.captureException()
abort(500)
def delete(self):
"""
No implementation
"""
pass
@route('/<int:fiscal_year>/<int:subteam_id>')
@login_required
def view_orders_by_subteam(self, fiscal_year, subteam_id):
"""
Shows a list of orders for the given subteam
@param subteam_id: The database-given ID of the subteam to update the budget of
@param fiscal_year: The current FRC season
@return: List of all orders for the given subteam, along with the member who ordered the part, and other info
"""
orders_by_subteam = db.session.query(Order).filter(
and_(Order.fiscal_year == fiscal_year, Order.part_for_subteam == subteam_id,
Order.pending_approval == False))
subteam = db.session.query(Subteam).filter(Subteam.id == subteam_id).first()
subtotal = 0
credit = 0
shipping = 0
total = 0
for order in orders_by_subteam:
subtotal += order.part_total_price
credit += order.credit
shipping += order.part_shipping_cost
total += order.total
return render_template('settings/budgets/orders-by-subteam.html', orders_by_subteam=orders_by_subteam,
total=total, credit=credit, shipping=shipping, subtotal=subtotal, subteam=subteam,
fiscal_year=fiscal_year, page="budgets")
|
[
"OrderSystem.utilities.ServerLogger.log_event",
"OrderSystem.sql.ORM.Budget",
"decimal.Decimal",
"sqlalchemy.and_",
"OrderSystem.db.session.query",
"flask.abort",
"OrderSystem.db.session.commit",
"flask_classy.route",
"OrderSystem.utilities.Helpers.flash_errors",
"OrderSystem.sentry.captureException",
"flask.url_for",
"OrderSystem.forms.SetBudgetForm",
"flask.render_template"
] |
[((1056, 1083), 'flask_classy.route', 'route', (['"""/<int:fiscal_year>"""'], {}), "('/<int:fiscal_year>')\n", (1061, 1083), False, 'from flask_classy import FlaskView, route\n'), ((3650, 3723), 'flask_classy.route', 'route', (['"""/<int:fiscal_year>/<int:subteam_id>/set"""'], {'methods': "['GET', 'POST']"}), "('/<int:fiscal_year>/<int:subteam_id>/set', methods=['GET', 'POST'])\n", (3655, 3723), False, 'from flask_classy import FlaskView, route\n'), ((5242, 5286), 'flask_classy.route', 'route', (['"""/<int:fiscal_year>/<int:subteam_id>"""'], {}), "('/<int:fiscal_year>/<int:subteam_id>')\n", (5247, 5286), False, 'from flask_classy import FlaskView, route\n'), ((3276, 3564), 'flask.render_template', 'render_template', (['"""settings/budgets/index.html"""'], {'subteams': 'names', 'cash_left': 'cash_left', 'started_with': 'started_with', 'css_classes': 'css_classes', 'fiscal_year': 'fiscal_year', 'ids': 'ids', 'page': '"""budgets"""', 'thresholds': '[self.BUDGET_FULL_THRESH, self.BUDGET_MEDIUM_THRESH, self.BUDGET_LOW_THRESH]'}), "('settings/budgets/index.html', subteams=names, cash_left=\n cash_left, started_with=started_with, css_classes=css_classes,\n fiscal_year=fiscal_year, ids=ids, page='budgets', thresholds=[self.\n BUDGET_FULL_THRESH, self.BUDGET_MEDIUM_THRESH, self.BUDGET_LOW_THRESH])\n", (3291, 3564), False, 'from flask import render_template, url_for, abort\n'), ((6291, 6523), 'flask.render_template', 'render_template', (['"""settings/budgets/orders-by-subteam.html"""'], {'orders_by_subteam': 'orders_by_subteam', 'total': 'total', 'credit': 'credit', 'shipping': 'shipping', 'subtotal': 'subtotal', 'subteam': 'subteam', 'fiscal_year': 'fiscal_year', 'page': '"""budgets"""'}), "('settings/budgets/orders-by-subteam.html',\n orders_by_subteam=orders_by_subteam, total=total, credit=credit,\n shipping=shipping, subtotal=subtotal, subteam=subteam, fiscal_year=\n fiscal_year, page='budgets')\n", (6306, 6523), False, 'from flask import render_template, url_for, abort\n'), ((4133, 4154), 'OrderSystem.forms.SetBudgetForm', 'forms.SetBudgetForm', ([], {}), '()\n', (4152, 4154), False, 'from OrderSystem import forms\n'), ((4952, 5023), 'flask.render_template', 'render_template', (['"""settings/budgets/set.html"""'], {'form': 'form', 'page': '"""budgets"""'}), "('settings/budgets/set.html', form=form, page='budgets')\n", (4967, 5023), False, 'from flask import render_template, url_for, abort\n'), ((5777, 5890), 'sqlalchemy.and_', 'and_', (['(Order.fiscal_year == fiscal_year)', '(Order.part_for_subteam == subteam_id)', '(Order.pending_approval == False)'], {}), '(Order.fiscal_year == fiscal_year, Order.part_for_subteam == subteam_id,\n Order.pending_approval == False)\n', (5781, 5890), False, 'from sqlalchemy import and_\n'), ((1338, 1363), 'OrderSystem.db.session.query', 'db.session.query', (['Subteam'], {}), '(Subteam)\n', (1354, 1363), False, 'from OrderSystem import db, sentry\n'), ((1943, 1972), 'decimal.Decimal', 'Decimal', (['budget.dollar_amount'], {}), '(budget.dollar_amount)\n', (1950, 1972), False, 'from decimal import Decimal\n'), ((4775, 4794), 'OrderSystem.db.session.commit', 'db.session.commit', ([], {}), '()\n', (4792, 4794), False, 'from OrderSystem import db, sentry\n'), ((4913, 4931), 'OrderSystem.utilities.Helpers.flash_errors', 'flash_errors', (['form'], {}), '(form)\n', (4925, 4931), False, 'from OrderSystem.utilities.Helpers import flash_errors\n'), ((5067, 5088), 'OrderSystem.utilities.ServerLogger.log_event', 'log_event', (['"""ERROR"""', 'e'], {}), "('ERROR', e)\n", (5076, 5088), False, 'from OrderSystem.utilities.ServerLogger import log_event\n'), ((5101, 5126), 'OrderSystem.sentry.captureException', 'sentry.captureException', ([], {}), '()\n', (5124, 5126), False, 'from OrderSystem import db, sentry\n'), ((5139, 5149), 'flask.abort', 'abort', (['(500)'], {}), '(500)\n', (5144, 5149), False, 'from flask import render_template, url_for, abort\n'), ((5733, 5756), 'OrderSystem.db.session.query', 'db.session.query', (['Order'], {}), '(Order)\n', (5749, 5756), False, 'from OrderSystem import db, sentry\n'), ((1775, 1888), 'sqlalchemy.and_', 'and_', (['(Order.fiscal_year == fiscal_year)', '(Order.part_for_subteam == subteam.id)', '(Order.pending_approval == False)'], {}), '(Order.fiscal_year == fiscal_year, Order.part_for_subteam == subteam.id,\n Order.pending_approval == False)\n', (1779, 1888), False, 'from sqlalchemy import and_\n'), ((2051, 2071), 'decimal.Decimal', 'Decimal', (['order.total'], {}), '(order.total)\n', (2058, 2071), False, 'from decimal import Decimal\n'), ((4827, 4876), 'flask.url_for', 'url_for', (['"""Budgets:index"""'], {'fiscal_year': 'fiscal_year'}), "('Budgets:index', fiscal_year=fiscal_year)\n", (4834, 4876), False, 'from flask import render_template, url_for, abort\n'), ((1723, 1746), 'OrderSystem.db.session.query', 'db.session.query', (['Order'], {}), '(Order)\n', (1739, 1746), False, 'from OrderSystem import db, sentry\n'), ((4235, 4307), 'sqlalchemy.and_', 'and_', (['(Budget.subteam_id == subteam_id)', '(Budget.fiscal_year == fiscal_year)'], {}), '(Budget.subteam_id == subteam_id, Budget.fiscal_year == fiscal_year)\n', (4239, 4307), False, 'from sqlalchemy import and_\n'), ((4505, 4554), 'OrderSystem.sql.ORM.Budget', 'Budget', (['subteam_id', 'form.amount.data', 'fiscal_year'], {}), '(subteam_id, form.amount.data, fiscal_year)\n', (4511, 4554), False, 'from OrderSystem.sql.ORM import Budget, Subteam, Order\n'), ((5924, 5949), 'OrderSystem.db.session.query', 'db.session.query', (['Subteam'], {}), '(Subteam)\n', (5940, 5949), False, 'from OrderSystem import db, sentry\n'), ((1610, 1682), 'sqlalchemy.and_', 'and_', (['(Budget.fiscal_year == fiscal_year)', '(Budget.subteam_id == subteam.id)'], {}), '(Budget.fiscal_year == fiscal_year, Budget.subteam_id == subteam.id)\n', (1614, 1682), False, 'from sqlalchemy import and_\n'), ((4186, 4210), 'OrderSystem.db.session.query', 'db.session.query', (['Budget'], {}), '(Budget)\n', (4202, 4210), False, 'from OrderSystem import db, sentry\n'), ((1557, 1581), 'OrderSystem.db.session.query', 'db.session.query', (['Budget'], {}), '(Budget)\n', (1573, 1581), False, 'from OrderSystem import db, sentry\n')]
|
"""
Test colorings for edgeless graphs.
Copyright 2020. <NAME>.
"""
from pytest import mark
from common import create_edgeless, parameters, len_iter, check_surjective
@mark.parametrize('vertices,colors', parameters(7, 8))
def test_edgeless(vertices: int, colors: int):
"""Test edgeless graph colorings."""
graph = create_edgeless(vertices)
colorings = graph.colorings(colors)
num_colorings = len_iter(colorings)
assert num_colorings == colors ** vertices
assert check_surjective(graph, colors, num_colorings)
|
[
"common.create_edgeless",
"common.parameters",
"common.check_surjective",
"common.len_iter"
] |
[((325, 350), 'common.create_edgeless', 'create_edgeless', (['vertices'], {}), '(vertices)\n', (340, 350), False, 'from common import create_edgeless, parameters, len_iter, check_surjective\n'), ((411, 430), 'common.len_iter', 'len_iter', (['colorings'], {}), '(colorings)\n', (419, 430), False, 'from common import create_edgeless, parameters, len_iter, check_surjective\n'), ((489, 535), 'common.check_surjective', 'check_surjective', (['graph', 'colors', 'num_colorings'], {}), '(graph, colors, num_colorings)\n', (505, 535), False, 'from common import create_edgeless, parameters, len_iter, check_surjective\n'), ((207, 223), 'common.parameters', 'parameters', (['(7)', '(8)'], {}), '(7, 8)\n', (217, 223), False, 'from common import create_edgeless, parameters, len_iter, check_surjective\n')]
|
import os
ls=["python main.py --configs configs/eval_ricord1a_unetplusplus_timm-regnetx_002_0_GridDistortion.yml",
"python main.py --configs configs/eval_ricord1a_unetplusplus_timm-regnetx_002_1_GridDistortion.yml",
"python main.py --configs configs/eval_ricord1a_unetplusplus_timm-regnetx_002_2_GridDistortion.yml",
"python main.py --configs configs/eval_ricord1a_unetplusplus_timm-regnetx_002_3_GridDistortion.yml",
"python main.py --configs configs/eval_ricord1a_unetplusplus_timm-regnetx_002_4_GridDistortion.yml",
]
for l in ls:
os.system(l)
|
[
"os.system"
] |
[((538, 550), 'os.system', 'os.system', (['l'], {}), '(l)\n', (547, 550), False, 'import os\n')]
|
"""
Django settings for pirauber_project project.
Generated by 'django-admin startproject' using Django 2.2.4.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
import django_heroku
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Reading .env file
env = environ.Env()
environ.Env.read_env()
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env('DJANGO_SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DJANGO_DEBUG", False)
ALLOWED_HOSTS = ['.herokuapp.com', 'localhost', '127.0.0.1']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
# 3rd party
'storages',
'crispy_forms',
'allauth',
'allauth.account',
'phonenumber_field',
'intl_tel_input',
'django_tables2',
# Local
'users.apps.UsersConfig',
'pages.apps.PagesConfig',
'rides.apps.RidesConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'pirauber_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'pirauber_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
"default": env.db("DATABASE_URL", default="postgres:///pirauber")
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'pt-BR'
TIME_ZONE = 'America/Sao_Paulo'
USE_I18N = True
USE_L10N = True
USE_TZ = True
LOCALE_PATHS = [
os.path.join(BASE_DIR, 'locale'),
]
# Activate Django-Heroku.
django_heroku.settings(locals())
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static'), ]
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
MEDIA_URL = '/mediafiles/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'mediafiles')
# Staging and Production settings
if not DEBUG:
# Security Settings
SECURE_BROWSER_XSS_FILTER = True
X_FRAME_OPTIONS = 'DENY'
SECURE_SSL_REDIRECT = True
SECURE_HSTS_SECONDS = 3600
SECURE_HSTS_INCLUDE_SUBDOMAINS = True
SECURE_HSTS_PRELOAD = True
SECURE_CONTENT_TYPE_NOSNIFF = True
SESSION_COOKIE_SECURE = True
CSRF_COOKIE_SECURE = True
# AWS Settings
AWS_ACCESS_KEY_ID = env('DJANGO_AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = env('DJANGO_AWS_SECRET_ACCESS_KEY')
AWS_STORAGE_BUCKET_NAME = env('DJANGO_AWS_STORAGE_BUCKET_NAME')
AWS_S3_CUSTOM_DOMAIN = f'{AWS_STORAGE_BUCKET_NAME}.s3.amazonaws.com'
AWS_S3_OBJECT_PARAMETERS = {'CacheControl': 'max-age=86400', }
AWS_DEFAULT_ACL = None
# S3 static settings
STATIC_LOCATION = 'static'
STATIC_URL = f'https://{AWS_S3_CUSTOM_DOMAIN}/{STATIC_LOCATION}/'
STATICFILES_STORAGE = "storages.backends.s3boto3.S3Boto3Storage"
# S3 public media settings
PUBLIC_MEDIA_LOCATION = 'media'
MEDIA_URL = f'https://{AWS_S3_CUSTOM_DOMAIN}/{PUBLIC_MEDIA_LOCATION}/'
DEFAULT_FILE_STORAGE = 'pirauber_project.storage_backends.MediaStorage'
# Custom User
AUTH_USER_MODEL = 'users.CustomUser'
# Crispy Forms
CRISPY_TEMPLATE_PACK = 'bootstrap4'
# django-allauth config
LOGIN_REDIRECT_URL = 'home'
ACCOUNT_LOGOUT_REDIRECT_URL = 'home'
ACCOUNT_SESSION_REMEMBER = True
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_UNIQUE_EMAIL = True
ACCOUNT_SIGNUP_PASSWORD_ENTER_TWICE = False
SITE_ID = 1
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
)
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
ACCOUNT_FORMS = {
'login': 'pirauber_project.forms.CustomLoginForm',
'signup': 'pirauber_project.forms.CustomSignupForm',
}
# phonenumber_field config
PHONENUMBER_DEFAULT_REGION = 'BR'
|
[
"os.path.abspath",
"os.path.join",
"environ.Env.read_env",
"environ.Env"
] |
[((535, 548), 'environ.Env', 'environ.Env', ([], {}), '()\n', (546, 548), False, 'import environ\n'), ((549, 571), 'environ.Env.read_env', 'environ.Env.read_env', ([], {}), '()\n', (569, 571), False, 'import environ\n'), ((3677, 3714), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""staticfiles"""'], {}), "(BASE_DIR, 'staticfiles')\n", (3689, 3714), False, 'import os\n'), ((3959, 3995), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""mediafiles"""'], {}), "(BASE_DIR, 'mediafiles')\n", (3971, 3995), False, 'import os\n'), ((3439, 3471), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""locale"""'], {}), "(BASE_DIR, 'locale')\n", (3451, 3471), False, 'import os\n'), ((3735, 3767), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""static"""'], {}), "(BASE_DIR, 'static')\n", (3747, 3767), False, 'import os\n'), ((480, 505), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (495, 505), False, 'import os\n'), ((2071, 2106), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""templates"""'], {}), "(BASE_DIR, 'templates')\n", (2083, 2106), False, 'import os\n')]
|
"""
Registering all the views in flask.
Includes both html and json end points.
"""
import flask
from geo.db import connection
import geo.views.form as form
import geo.views.resources as resources
import geo.views.new_resources as new_resources
import geo.views.moderation as moderation
import geo.views.moderation_submit as moderationsubmit
import geo.views.map
import geo.views.form_submit as formsubmit
import geo.views.type_summary as type_summary
import geo.views.country_summary as country_summary
import geo.views.user as user
import geo.views.index as index
# json services
import geo.views.json.location as location
import geo.views.json.menu as menu
import geo.views.json.summarydata as summarydata
import geo.views.json.linechart as linechart
import geo.views.json.get_resources as get_resources
import geo.views.json.add_ai as add_ai
import geo.views.show_ai as show_ai
import geo.views.allunits as allunits
# static html
import geo.views.static.partners as partners
app = flask.Flask(__name__)
app.secret_key = '\<KEY>>(\xb5\x92a\x87\xbf\xca3\xc9F\xec\xe3\x06aQ0\x19\xb1\xbf\xd0\xae\x8b\x8a5\xfbW\xab\x18\x08uV\x94)\xa0\x99\xfb\x0b1\x0f\xa2n\xba\xa3mya\xf8\xdfR\'F@\xd9\xb2\x10S\xf4r~\xae\x94\x1c\x7f\xd1J\x86\x1ar.m"\xdc\x18\x85\x80\xb8\x18\x1cG\x81\x1e]\xb3E\x01i\xf4\xd9_\x18\xfar\xbe`\xaa\xa7+3\x92\xe8Q'
#app.config['SERVER_NAME'] = "http://globalenergyobservatory.org/dev"
conn = connection.Db()
form.db = conn
resources.db = conn
new_resources.db = conn
menu.db = conn
geo.views.map.db = conn
location.db = conn
formsubmit.db = conn
type_summary.db = conn
country_summary.db = conn
summarydata.db = conn
linechart.db = conn
user.db = conn
index.db = conn
moderation.db = conn
moderationsubmit.db = conn
get_resources.db = conn
add_ai.db = conn
show_ai.db = conn
allunits.db = conn
# html
app.register_blueprint(form.mod)
app.register_blueprint(resources.mod)
app.register_blueprint(new_resources.mod)
app.register_blueprint(moderation.mod)
app.register_blueprint(geo.views.map.mod)
app.register_blueprint(formsubmit.mod)
app.register_blueprint(type_summary.mod)
app.register_blueprint(country_summary.mod)
app.register_blueprint(user.mod)
app.register_blueprint(index.mod)
app.register_blueprint(moderationsubmit.mod)
app.register_blueprint(add_ai.mod)
app.register_blueprint(show_ai.mod)
app.register_blueprint(allunits.mod)
# json services
app.register_blueprint(summarydata.mod)
app.register_blueprint(location.mod)
app.register_blueprint(menu.mod)
app.register_blueprint(linechart.mod)
app.register_blueprint(get_resources.mod)
# static html
app.register_blueprint(partners.mod)
|
[
"flask.Flask",
"geo.db.connection.Db"
] |
[((989, 1010), 'flask.Flask', 'flask.Flask', (['__name__'], {}), '(__name__)\n', (1000, 1010), False, 'import flask\n'), ((1404, 1419), 'geo.db.connection.Db', 'connection.Db', ([], {}), '()\n', (1417, 1419), False, 'from geo.db import connection\n')]
|
__author__ = 'aymgal'
# implementations of proximal operators adapted to sparsity
import numpy as np
from slitronomy.Util import util
def prox_sparsity_wavelets(coeffs_input, step, level_const=None, level_pixels=None, l_norm=1):
"""
Apply soft or hard threshold on all wavelets scales excepts the last one (the coarse scale)
"""
if l_norm not in [0, 1]:
raise ValueError("Sparsity proximal operator only defined with l0- and l1-norms")
if step == 0:
return coeffs_input
coeffs = np.copy(coeffs_input)
n_scales = coeffs.shape[0]
# apply threshold operation to all starlet scales except the coarsest
for s in range(n_scales-1):
thresh = step
if level_const is not None:
thresh *= level_const[s]
if level_pixels is not None:
thresh *= level_pixels[s, :, :]
if l_norm == 0:
coeffs[s, :, :] = util.hard_threshold(coeffs[s, :, :], thresh)
else:
coeffs[s, :, :] = util.soft_threshold(coeffs[s, :, :], thresh)
return coeffs
def prox_positivity(image_input):
image = np.copy(image_input)
image[image < 0] = 0.
return image
def full_prox_sparsity_positivity(image, transform, inverse_transform,
weights, noise_levels, thresh, thresh_increm,
n_scales, l_norm, formulation, force_positivity):
"""
returns the proximal operator of the regularisation term
g = lambda * |Phi^T HG|_0
or
g = lambda * |Phi^T HG|_1
"""
level_const = thresh * np.ones(n_scales)
level_const[0] += thresh_increm # possibly a stronger threshold for first decomposition levels (small scales features)
level_pixels = weights * noise_levels
if formulation == 'analysis':
coeffs = transform(image)
elif formulation == 'synthesis':
coeffs = image
# apply proximal operator
step = 1 # because threshold is already expressed in data units
coeffs_proxed = prox_sparsity_wavelets(coeffs, step=step,
level_const=level_const,
level_pixels=level_pixels,
l_norm=l_norm)
if formulation == 'analysis':
image_proxed = inverse_transform(coeffs_proxed)
elif formulation == 'synthesis':
image_proxed = coeffs_proxed
if force_positivity and formulation == 'analysis':
image_proxed = prox_positivity(image_proxed)
# TODO: apply positivity also in 'synthesis' formulation (i.e. to coeffs in starlet space?)
return image_proxed
|
[
"slitronomy.Util.util.soft_threshold",
"slitronomy.Util.util.hard_threshold",
"numpy.ones",
"numpy.copy"
] |
[((526, 547), 'numpy.copy', 'np.copy', (['coeffs_input'], {}), '(coeffs_input)\n', (533, 547), True, 'import numpy as np\n'), ((1118, 1138), 'numpy.copy', 'np.copy', (['image_input'], {}), '(image_input)\n', (1125, 1138), True, 'import numpy as np\n'), ((1599, 1616), 'numpy.ones', 'np.ones', (['n_scales'], {}), '(n_scales)\n', (1606, 1616), True, 'import numpy as np\n'), ((917, 961), 'slitronomy.Util.util.hard_threshold', 'util.hard_threshold', (['coeffs[s, :, :]', 'thresh'], {}), '(coeffs[s, :, :], thresh)\n', (936, 961), False, 'from slitronomy.Util import util\n'), ((1006, 1050), 'slitronomy.Util.util.soft_threshold', 'util.soft_threshold', (['coeffs[s, :, :]', 'thresh'], {}), '(coeffs[s, :, :], thresh)\n', (1025, 1050), False, 'from slitronomy.Util import util\n')]
|
import unittest
import jsonmask
fixture = {
"kind": "plus#activity",
"etag": "\"DOKFJGXi7L9ogpHc3dzouWOBEEg/ZiaatWNPRL3cQ-I-WbeQPR_yVa0\"",
"title": "Congratulations! You have successfully fetched an explicit public activity. The attached video is your...",
"published": "2011-09-08T21:17:41.232Z",
"updated": "2011-10-04T17:25:26.000Z",
"id": "z12gtjhq3qn2xxl2o224exwiqruvtda0i",
"url": "https://plus.google.com/102817283354809142195/posts/F97fqZwJESL",
"actor": {
"id": "102817283354809142195",
"displayName": "<NAME>",
"url": "https://plus.google.com/102817283354809142195",
"image": {
"url": "https://lh4.googleusercontent.com/-yth5HLY4Qi4/AAAAAAAAAAI/AAAAAAAAPVs/fAq4PVOVBdc/photo.jpg?sz=50"
}
},
"verb": "post",
"object": {
"objectType": "note",
"content": "Congratulations! You have successfully fetched an explicit public activity. The attached video is your reward. :)",
"url": "https://plus.google.com/102817283354809142195/posts/F97fqZwJESL",
"replies": {
"totalItems": 16,
"selfLink": "https://www.googleapis.com/plus/v1/activities/z12gtjhq3qn2xxl2o224exwiqruvtda0i/comments"
},
"plusoners": {
"totalItems": 44,
"selfLink": "https://www.googleapis.com/plus/v1/activities/z12gtjhq3qn2xxl2o224exwiqruvtda0i/people/plusoners"
},
"resharers": {
"totalItems": 1,
"selfLink": "https://www.googleapis.com/plus/v1/activities/z12gtjhq3qn2xxl2o224exwiqruvtda0i/people/resharers"
},
"attachments": [{
"objectType": "video",
"displayName": "<NAME> - Never Gonna Give You Up",
"content": "Music video by <NAME> performing Never Gonna Give You Up. YouTube view counts pre-VEVO: 2,573,462 (C) 1987 PWL",
"url": "http://www.youtube.com/watch?v=dQw4w9WgXcQ",
"image": {
"url": "https://lh3.googleusercontent.com/proxy/ex1bQ9_TpVClePgZxFmCPVxYeJUHW5dixt53FLmup-q44pd1mwO6rPIPti6tDWbjitBclMm5Ou595xPEMKq2b8Qu3mQ_TzX0kOqksE8o1w=w506-h284-n",
"type": "image/jpeg",
"height": 284,
"width": 506
},
"embed": {
"url": "http://www.youtube.com/v/dQw4w9WgXcQ&hl=en&fs=1&autoplay=1",
"type": "application/x-shockwave-flash"
}
}]
},
"provider": {
"title": "Google+"
},
"access": {
"kind": "plus#acl",
"description": "Public",
"items": [{
"type": "public"
}]
}
}
filter_tests = [{
"m": 'a', "o": None, "e": None
}, {
"m": 'a', "o": {"b": 1}, "e": None
}, {
"m": 'a', "o": [{"b": 1}], "e": None
}, {
"m": None, "o": {"a": 1}, "e": {"a": 1}
}, {
"m": '', "o": {"a": 1}, "e": {"a": 1}
}, {
"m": 'a', "o": {"a": 1, "b": 1}, "e": {"a": 1}
}, {
"m": 'notEmptyStr', "o": {"notEmptyStr": ''}, "e": {"notEmptyStr": ''}
}, {
"m": 'notEmptyNum', "o": {"notEmptyNum": 0}, "e": {"notEmptyNum": 0}
}, {
"m": 'a,b', "o": {"a": 1, "b": 1, "c": 1}, "e": {"a": 1, "b": 1}
}, {
"m": 'obj/s', "o": {"obj": {"s": 1, "t": 2}, "b": 1}, "e": {"obj": {"s": 1}}
}, {
"m": 'arr/s', "o": {"arr": [{"s": 1, "t": 2}, {"s": 2, "t": 3}], "b": 1}, "e": {"arr": [{"s": 1}, {"s": 2}]}
}, {
"m": 'a/s/g,b', "o": {"a": {"s": {"g": 1, "z": 1}}, "t": 2, "b": 1}, "e": {"a": {"s": {"g": 1}}, "b": 1}
}, {
"m": 'a/*/g', "o": {"a": {"s": {"g": 3}, "t": {"g": 4}, "u": {"z": 1}}, "b": 1}, "e": {"a": {"s": {"g": 3}, "t": {"g": 4}}}
}, {
"m": 'a/*', "o": {"a": {"s": {"g": 3}, "t": {"g": 4}, "u": {"z": 1}}, "b": 3}, "e": {"a": {"s": {"g": 3}, "t": {"g": 4}, "u": {"z": 1}}}
}, {
"m": 'a(g)', "o": {"a": [{"g": 1, "d": 2}, {"g": 2, "d": 3}]}, "e": {"a": [{"g": 1}, {"g": 2}]}
}, {
"m": 'a,c', "o": {"a": [], "c": {}}, "e": {"a": [], "c": {}}
}, {
"m": 'b(d/*/z)', "o": {"b": [{"d": {"g": {"z": 22}, "b": 34}}]}, "e": {"b": [{"d": {"g": {"z": 22}}}]
}
}, {
"m": 'url,obj(url,a/url)', "o": {"url": 1, "id": '1', "obj": {"url": 'h', "a": [{"url": 1, "z": 2}], "c": 3}}, "e": {"url": 1, "obj": {"url": 'h', "a": [{"url": 1}]}}
}, {
"m": 'kind', "o": fixture, "e": {"kind": 'plus#activity'}
}, {
"m": 'object(objectType)', "o": fixture, "e": {"object": {"objectType": 'note'}}
}, {
"m": 'url,object(content,attachments/url)', "o": fixture, "e": {
"url": 'https://plus.google.com/102817283354809142195/posts/F97fqZwJESL', "object": {
"content": 'Congratulations! You have successfully fetched an explicit public activity. The attached video is your reward. :)', "attachments": [{"url": 'http://www.youtube.com/watch?v=dQw4w9WgXcQ'}]
}
}
}, {
"m": 'i', "o": [{"i": 1, "o": 2}, {"i": 2, "o": 2}], "e": [{"i": 1}, {"i": 2}]
}]
compiler_tests = {
'a': {"a": {"type": 'object'}}, 'a,b,c': {
"a": {"type": 'object'}, "b": {"type": 'object'}, "c": {"type": 'object'}
}, 'a/*/c': {
"a": {"type": 'object', "properties": {
'*': {"type": 'object', "properties": {
"c": {"type": 'object'}
}}
}}
}, 'a,b(d/*/g,b),c': {
"a": {"type": 'object'}, "b": {"type": 'array', "properties": {
"d": {"type": 'object', "properties": {
'*': {"type": 'object', "properties": {
"g": {"type": 'object'}
}}
}}, "b": {"type": 'object'}
}}, "c": {"type": 'object'}
}
}
# Filter tests
filter_test_compiled_mask = {
"a": {"type": 'object'},
"b": {
"type": 'array',
"properties": {
"d": {
"type": 'object',
"properties": {
'*': {
"type": 'object',
"properties": {
"z": {"type": 'object'}
}
}
}
},
"b": {
"type": 'array',
"properties": {
"g": {"type": 'object'}
}
}
}
},
"c": {"type": 'object'}
}
filter_test_object = {
"a": 11,
"n": 00,
"b": [{
"d": {"g": {"z": 22}, "b": 34, "c": {"a": 32}},
"b": [{"z": 33}],
"k": 99
}],
"c": 44,
"g": 99
}
filter_test_expected = {
"a": 11,
"b": [{
"d": {
"g": {
"z": 22
}
}
}],
"c": 44
}
class TestCase(unittest.TestCase):
def test_filter(self):
actual = jsonmask.apply_mask(filter_test_object, filter_test_compiled_mask)
self.assertEqual(filter_test_expected, actual)
def make_test(test):
e = test['e']
o = test['o']
m = test['m']
def _test(self):
self.assertEqual(e, jsonmask.Mask(m)(o))
_test.__doc__ = 'm = %s original = %s expected = %s\n' % (m, o, e)
return _test
def make_compiler_test(sel, expected_compiled):
def _test(self):
self.assertEqual(expected_compiled, jsonmask.compile_mask(sel))
_test.__doc__ = 'sel = %s expected = %s' % (sel, expected_compiled)
return _test
for i, test in enumerate(filter_tests):
setattr(TestCase, 'test_filter_%s' % i, make_test(test))
for i, (sel, expected) in enumerate(compiler_tests.items()):
setattr(TestCase, 'test_compiler_%s' %
i, make_compiler_test(sel, expected))
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"jsonmask.compile_mask",
"jsonmask.Mask",
"jsonmask.apply_mask"
] |
[((7481, 7496), 'unittest.main', 'unittest.main', ([], {}), '()\n', (7494, 7496), False, 'import unittest\n'), ((6597, 6663), 'jsonmask.apply_mask', 'jsonmask.apply_mask', (['filter_test_object', 'filter_test_compiled_mask'], {}), '(filter_test_object, filter_test_compiled_mask)\n', (6616, 6663), False, 'import jsonmask\n'), ((7073, 7099), 'jsonmask.compile_mask', 'jsonmask.compile_mask', (['sel'], {}), '(sel)\n', (7094, 7099), False, 'import jsonmask\n'), ((6845, 6861), 'jsonmask.Mask', 'jsonmask.Mask', (['m'], {}), '(m)\n', (6858, 6861), False, 'import jsonmask\n')]
|
# python 3
import sys
import os
from java import jclass
from inspect import isfunction
from venv import logger
class CompileMixin:
@staticmethod
def _compile(code, funcName):
ns = {}
try:
exec(code, ns)
except Exception as e:
logger.error("code: `{0}` 编译时出错,exception: {1}".format(code, e))
raise e
func = None
may_be_function = ns[funcName]
if isfunction(may_be_function):
func = may_be_function
if not func:
logger.error("code: `{0}` 没有找到可用的函数".format(code))
raise ValueError("Code Error , function not found")
return func
def call(source, funcName, params):
func = CompileMixin._compile(source, funcName)
print("func: ", func)
return func(params)
def call1(source, funcName, params):
#func必须有返回值,且格式为[Object[], Object[]]
func = CompileMixin._compile(source, funcName)
#print("func: ", func)
ResultOfCall = jclass("com.mrl.communicate.middle.ResultOfCall")
result = ResultOfCall()
resultRaw = func(params)
result.setComplete(resultRaw[0])
result.setIntermission(resultRaw[1])
return result
|
[
"inspect.isfunction",
"java.jclass"
] |
[((988, 1037), 'java.jclass', 'jclass', (['"""com.mrl.communicate.middle.ResultOfCall"""'], {}), "('com.mrl.communicate.middle.ResultOfCall')\n", (994, 1037), False, 'from java import jclass\n'), ((441, 468), 'inspect.isfunction', 'isfunction', (['may_be_function'], {}), '(may_be_function)\n', (451, 468), False, 'from inspect import isfunction\n')]
|
from abc import ABC
from copy import copy
from typing import List, Iterable, Optional, Union
from cardbuilder.common import Language
from cardbuilder.input.word import Word, WordForm
class WordList(ABC):
"""The base class for all word lists; all word lists inherit from this class. Behaves like a Python list by
implementing iteration, length, item retrieval by index and slicing."""
def __init__(self, word_input_forms: Iterable[str], language: Language, additional_forms: Optional[List[WordForm]]):
"""
Args:
word_input_forms: Strings representing the raw input forms of each word in the word list.
language: The language of words in the word list.
additional_forms: Any additional forms, such as conjugations, which these words can be retrieved as.
"""
self.words = [Word(input_form, language, additional_forms)
for input_form in word_input_forms]
def __getitem__(self, index: Union[int, slice]) -> Union[Word, 'WordList']:
if isinstance(index, int):
return self.words[index]
elif isinstance(index, slice):
list_copy = copy(self)
list_copy.words = self.words[index]
return list_copy
else:
raise TypeError('WordList indices must be either integers or slices')
def __iter__(self):
return iter(self.words)
def __len__(self):
return len(self.words)
def __repr__(self):
return repr(self.words)
|
[
"copy.copy",
"cardbuilder.input.word.Word"
] |
[((856, 900), 'cardbuilder.input.word.Word', 'Word', (['input_form', 'language', 'additional_forms'], {}), '(input_form, language, additional_forms)\n', (860, 900), False, 'from cardbuilder.input.word import Word, WordForm\n'), ((1175, 1185), 'copy.copy', 'copy', (['self'], {}), '(self)\n', (1179, 1185), False, 'from copy import copy\n')]
|
import os
import pickle
EXTENSION = ".citygraph"
def _fix_path(path):
# if path is None: set it to current directory
# then check that path is an existing directory
# (raises a FileNotFoundError if not)
if path is None:
path = os.getcwd()
if not os.path.isdir(path):
raise FileNotFoundError("CityGraph.city_io: " + path + " does not exist")
return path
def _get_abs_path(city, path):
# city : either a string (city name) or an instance of City
# path : absolute path to a folder with read/write access
if isinstance(city, str):
city_name = city
else:
city_name = city.name
return os.sep.join([path,
city_name]) + EXTENSION
def is_saved(city, path=None):
"""
Returns True if a city of the same name as already been saved.
:param city: city or city name
:type city: :py:class:`City<city_graph.city.City>` or str
:param str path: path of the folder where cities are saved. default: current directory
:returns: True if a city of the same name has already been saved.
:rtype: bool
"""
# set path to current directory if None.
# raise Exception if path does not exist
path = _fix_path(path)
# path to the file
path = _get_abs_path(city, path)
return os.path.isfile(path)
def save(city, path=None, overwrite=False):
"""
Save the city in a file.
:param obj or str city: city to save (:py:class:`City<city_graph.city.City>` or str)
:param str path: path of the folder where cities are saved. default: currrent directory
:param bool overwrite: if True, will overwrite any saved city of the same name.
default: False
:returns: the path of the file into which the city was saved
:raises: :py:class:`FileNotFoundError`: if path does not exist
:raises: :py:class:`FileExists`: if overwrite is False
and a city of the same name has already been saved.
"""
# set path to current directory if None.
# raise Exception if path does not exist
path = _fix_path(path)
# path to the file
path = _get_abs_path(city, path)
# file already exist, and overwrite is false:
if os.path.exists(path):
if not overwrite:
raise FileExistsError("CityGraph.city_io: can not save in", path, "(aleady exists)")
with open(path, "wb") as f:
pickle.dump(city, f)
return path
def load(city_name, path=None):
"""
:param str city_name: name of the city to load
:param str path: path of the folder where cities are saved. default: current directory
:raises: :py:class:`FileNotFoundError`: if no city of this name has been saved
:returns: An instance of :py:class:`City<city_graph.city.City>`
"""
if not is_saved(city_name, path):
raise FileNotFoundError("loading city: " + path + " does not exist")
path = _get_abs_path(city_name, _fix_path(path))
with open(path, "rb") as f:
city = pickle.load(f)
return city
|
[
"pickle.dump",
"os.getcwd",
"os.path.isdir",
"os.path.exists",
"os.path.isfile",
"pickle.load",
"os.sep.join"
] |
[((1309, 1329), 'os.path.isfile', 'os.path.isfile', (['path'], {}), '(path)\n', (1323, 1329), False, 'import os\n'), ((2197, 2217), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (2211, 2217), False, 'import os\n'), ((254, 265), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (263, 265), False, 'import os\n'), ((277, 296), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (290, 296), False, 'import os\n'), ((662, 692), 'os.sep.join', 'os.sep.join', (['[path, city_name]'], {}), '([path, city_name])\n', (673, 692), False, 'import os\n'), ((2383, 2403), 'pickle.dump', 'pickle.dump', (['city', 'f'], {}), '(city, f)\n', (2394, 2403), False, 'import pickle\n'), ((2981, 2995), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (2992, 2995), False, 'import pickle\n')]
|
import factory
import random
from faker import Faker
from titlecase import titlecase
from django.contrib.auth import get_user_model
from django.utils.text import slugify
from django.utils.timezone import get_current_timezone
from .models import Category, Post, RelatedLinkGroup, RelatedLink, Blog, Comment, AuthorPage
def gen_headline():
# faker doesn't provide a way to generate headlines in Title Case, without periods, so make our own.
fake = Faker()
return titlecase(fake.text(max_nb_chars=48).rstrip('.'))
def gen_html_content():
# faker doesn't provide raw html text, so convert the output of fake.paragraphs()
fake = Faker()
grafs = fake.paragraphs()
htmlstr = ''
for g in grafs:
htmlstr += "<p>{}</p>\n\n".format(g)
return htmlstr
def gen_comment_body():
# faker only provides sentences as a list; we want them with line breaks
fake = Faker()
sentences = fake.sentences(3)
return "\n\n".join(sentences)
def gen_tags():
# Rather than create a bazillion random tags, make a pool of 15 possible tags,
# and choose n tags from this pool to be added to calling post.
# Returns 1-5 random tags from this list:
TAGS = [
'Linux', 'Mac OS', 'Windows', 'Python', 'Perl', 'Rust', 'Go',
'JavaScript', 'Java', 'Swift', 'C++', 'PHP', 'CSS', 'SASS', 'SQL', ]
return random.sample(TAGS, random.randint(1, 5))
class CategoryFactory(factory.django.DjangoModelFactory):
class Meta:
model = Category
django_get_or_create = ('slug', )
title = factory.Faker('catch_phrase')
slug = factory.LazyAttribute(lambda o: slugify(o.title)[:20])
class PostFactory(factory.django.DjangoModelFactory):
class Meta:
model = Post
title = factory.LazyAttribute(lambda o: gen_headline())
slug = factory.LazyAttribute(lambda o: slugify(o.title)[:48])
content = factory.LazyAttribute(lambda o: gen_html_content())
author = factory.LazyAttribute(lambda o: get_user_model().objects.all().order_by('?').first())
summary = factory.Faker('text')
pub_date = factory.Faker('date_time_this_decade', tzinfo=get_current_timezone())
@factory.post_generation
# Associate zero or more tags with this post
def add_tags(self, build, extracted, **kwargs):
for tag in gen_tags():
self.tags.add(tag)
class CommentFactory(factory.django.DjangoModelFactory):
# MUST be called with a Post object as parent.
class Meta:
model = Comment
approved = factory.LazyAttribute(lambda o: True)
body = factory.LazyAttribute(lambda o: gen_comment_body())
email = factory.Faker('safe_email')
name = factory.Faker('name')
website = factory.Faker('url')
ip_address = factory.Faker('ipv4')
user_agent = factory.Faker('user_agent')
class RelatedLinkFactory(factory.django.DjangoModelFactory):
class Meta:
model = RelatedLink
# Must pass in a RelatedLinkGroup as group= when instantiating
# i.e. Most useful when instantiating a RelatedLinkGroupFactory
site_title = factory.Faker('company')
site_url = factory.Faker('url')
class RelatedLinkGroupFactory(factory.django.DjangoModelFactory):
class Meta:
model = RelatedLinkGroup
django_get_or_create = ('slug', )
@factory.post_generation
# Create related links
def related_links(self, build, extracted, **kwargs):
RelatedLinkFactory.create_batch(5, group=self)
class BlogFactory(factory.django.DjangoModelFactory):
class Meta:
model = Blog
title = factory.Faker('company')
slug = factory.LazyAttribute(lambda o: slugify(o.title)[:30])
tagline = factory.Faker('bs')
class AuthorPageFactory(factory.django.DjangoModelFactory):
class Meta:
model = AuthorPage
user = factory.LazyAttribute(lambda o: get_user_model().objects.all().order_by('?').first())
about = factory.Faker('text')
|
[
"factory.Faker",
"django.utils.timezone.get_current_timezone",
"random.randint",
"faker.Faker",
"django.contrib.auth.get_user_model",
"django.utils.text.slugify",
"factory.LazyAttribute"
] |
[((458, 465), 'faker.Faker', 'Faker', ([], {}), '()\n', (463, 465), False, 'from faker import Faker\n'), ((650, 657), 'faker.Faker', 'Faker', ([], {}), '()\n', (655, 657), False, 'from faker import Faker\n'), ((903, 910), 'faker.Faker', 'Faker', ([], {}), '()\n', (908, 910), False, 'from faker import Faker\n'), ((1564, 1593), 'factory.Faker', 'factory.Faker', (['"""catch_phrase"""'], {}), "('catch_phrase')\n", (1577, 1593), False, 'import factory\n'), ((2059, 2080), 'factory.Faker', 'factory.Faker', (['"""text"""'], {}), "('text')\n", (2072, 2080), False, 'import factory\n'), ((2525, 2562), 'factory.LazyAttribute', 'factory.LazyAttribute', (['(lambda o: True)'], {}), '(lambda o: True)\n', (2546, 2562), False, 'import factory\n'), ((2638, 2665), 'factory.Faker', 'factory.Faker', (['"""safe_email"""'], {}), "('safe_email')\n", (2651, 2665), False, 'import factory\n'), ((2677, 2698), 'factory.Faker', 'factory.Faker', (['"""name"""'], {}), "('name')\n", (2690, 2698), False, 'import factory\n'), ((2713, 2733), 'factory.Faker', 'factory.Faker', (['"""url"""'], {}), "('url')\n", (2726, 2733), False, 'import factory\n'), ((2751, 2772), 'factory.Faker', 'factory.Faker', (['"""ipv4"""'], {}), "('ipv4')\n", (2764, 2772), False, 'import factory\n'), ((2790, 2817), 'factory.Faker', 'factory.Faker', (['"""user_agent"""'], {}), "('user_agent')\n", (2803, 2817), False, 'import factory\n'), ((3078, 3102), 'factory.Faker', 'factory.Faker', (['"""company"""'], {}), "('company')\n", (3091, 3102), False, 'import factory\n'), ((3118, 3138), 'factory.Faker', 'factory.Faker', (['"""url"""'], {}), "('url')\n", (3131, 3138), False, 'import factory\n'), ((3573, 3597), 'factory.Faker', 'factory.Faker', (['"""company"""'], {}), "('company')\n", (3586, 3597), False, 'import factory\n'), ((3678, 3697), 'factory.Faker', 'factory.Faker', (['"""bs"""'], {}), "('bs')\n", (3691, 3697), False, 'import factory\n'), ((3913, 3934), 'factory.Faker', 'factory.Faker', (['"""text"""'], {}), "('text')\n", (3926, 3934), False, 'import factory\n'), ((1386, 1406), 'random.randint', 'random.randint', (['(1)', '(5)'], {}), '(1, 5)\n', (1400, 1406), False, 'import random\n'), ((2142, 2164), 'django.utils.timezone.get_current_timezone', 'get_current_timezone', ([], {}), '()\n', (2162, 2164), False, 'from django.utils.timezone import get_current_timezone\n'), ((1637, 1653), 'django.utils.text.slugify', 'slugify', (['o.title'], {}), '(o.title)\n', (1644, 1653), False, 'from django.utils.text import slugify\n'), ((1857, 1873), 'django.utils.text.slugify', 'slugify', (['o.title'], {}), '(o.title)\n', (1864, 1873), False, 'from django.utils.text import slugify\n'), ((3641, 3657), 'django.utils.text.slugify', 'slugify', (['o.title'], {}), '(o.title)\n', (3648, 3657), False, 'from django.utils.text import slugify\n'), ((1991, 2007), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (2005, 2007), False, 'from django.contrib.auth import get_user_model\n'), ((3847, 3863), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (3861, 3863), False, 'from django.contrib.auth import get_user_model\n')]
|
from __future__ import print_function
import argparse
import httplib
import json
import re
# VERSION 0.1
# FROM https://github.com/jongho/kafka-burrow-telegraf-reporter
# This code was written with inspiration from kafka_jolokia_reporter.py (https://github.com/paksu/kafka-jolokia-telegraf-collector)
def get_http_response(conn, path):
conn.request("GET", path)
response = conn.getresponse()
assert response.status == 200
return response.read()
def get_clusters_from_burrow(conn):
path="/v2/kafka"
#print(path)
response = json.loads(get_http_response(conn, path))
if 'clusters' not in response:
return []
return response['clusters']
def get_consumers_from_burrow(conn, cluster):
path="/v2/kafka/{}/consumer".format(cluster)
#print(path)
response = json.loads(get_http_response(conn, path))
if 'consumers' not in response:
return []
return response['consumers']
def get_consumer_lag_status_from_burrow(conn, cluster, consumer):
path="/v2/kafka/{}/consumer/{}/lag".format(cluster, consumer)
#print(path)
response = json.loads(get_http_response(conn, path))
if 'status' not in response:
return {}
return response['status']
def fetch_consumer_lags_from_burrow(host, port):
conn = httplib.HTTPConnection(host, port)
consumer_lags = []
for cluster in get_clusters_from_burrow(conn):
for consumer in get_consumers_from_burrow(conn, cluster):
consumer_lags.append(get_consumer_lag_status_from_burrow(conn, cluster, consumer))
conn.close()
return consumer_lags
def get_formated_str(dictionary, keys, prefix=''):
return ','.join(['{}{}={}'.format(prefix, k, dictionary[k]) for k in keys])
def translate_lag_data(lag_data):
"""
Parses a Kafka Consumer Lag data from burrow and converts it to set of InfluxDB Line protocol
Currently supports at least Kafka 0.10.2 and Burrow (https://github.com/linkedin/Burrow 2017-03-07 commit)
https://github.com/linkedin/Burrow
https://docs.influxdata.com/influxdb/v1.2/write_protocols/line_protocol_reference/
EXAMPLE:
- FROM:
{
"cluster": "test",
"complete": true,
"group": "TestGroup",
"maxlag": null,
"partition_count": 1,
"partitions": [
{
"end": {
"lag": 0,
"max_offset": 14132620,
"offset": 14132620,
"timestamp": 1491449760344
},
"partition": 0,
"start": {
"lag": 0,
"max_offset": 14132620,
"offset": 14132620,
"timestamp": 1491449751328
},
"status": "OK",
"topic": "Common-Test"
},
...
],
"status": "OK",
"totallag": 0
}
- TO:
kafka.consumer.lag,cluster=test,group=TestGroup complete=True,totallag=0,partition_count=1
kafka.consumer.tp.lag,cluster=test,group=TestGroup,topic=Common-Test,partition=0 start.lag=0,start.max_offset=14132620,start.offset=14132620,start.timestamp=1491449751328,end.lag=0,end.max_offset=14132620,end.offset=14132620,end.timestamp=1491449751328
...
"""
metrics = []
# kafka.consumer.lag
lag_measurement = 'kafka.consumer.lag'
lag_tags = get_formated_str(lag_data, ['cluster', 'group'])
lag_fields = get_formated_str(lag_data, ['complete', 'totallag', 'partition_count'])
#print("lag_tags: {}".format(lag_tags))
#print("lag_fields: {}".format(lag_fields))
metrics.append("{},{} {}".format(lag_measurement, lag_tags, lag_fields))
# kafka.consumer.tp.lag
tp_lag_measurement = 'kafka.consumer.tp.lag'
for tp_lag_data in lag_data['partitions']:
#print("tp_lag_data: {}".format(tp_lag_data))
tg_lag_tags = lag_tags + ',' + get_formated_str(tp_lag_data, ['topic', 'partition'])
tg_lag_fields = get_formated_str(tp_lag_data['start'], ['lag', 'max_offset', 'offset', 'timestamp'], 'start.') + ',' + \
get_formated_str(tp_lag_data['end'], ['lag', 'max_offset', 'offset', 'timestamp'], 'end.')
metrics.append("{},{} {}".format(tp_lag_measurement, tg_lag_tags, tg_lag_fields))
return metrics
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Kafka Burrow Reporter')
parser.add_argument('--burrow-host', default='localhost', help='Burrow host')
parser.add_argument('--burrow-port', type=int, default=8000, help='Burrow port')
args = parser.parse_args()
for lag_data in fetch_consumer_lags_from_burrow(args.burrow_host, args.burrow_port):
for line in translate_lag_data(lag_data):
print(line)
|
[
"httplib.HTTPConnection",
"argparse.ArgumentParser"
] |
[((1299, 1333), 'httplib.HTTPConnection', 'httplib.HTTPConnection', (['host', 'port'], {}), '(host, port)\n', (1321, 1333), False, 'import httplib\n'), ((4514, 4574), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Kafka Burrow Reporter"""'}), "(description='Kafka Burrow Reporter')\n", (4537, 4574), False, 'import argparse\n')]
|
import os
import uuid
import paramiko
import requests
def random_filename(filename):
ext = os.path.splitext(filename)[1]
new_filename = uuid.uuid4().hex + ext
return new_filename
def sftp_upload(host, port, username, password, local, remote):
sf = paramiko.Transport(host, port)
sf.connect(username=username, password=password)
sftp = paramiko.SFTPClient.from_transport(sf)
try:
if os.path.isdir(local): # 判断本地参数是目录还是文件
for f in os.listdir(local): # 遍历本地目录
sftp.put(os.path.join(local + f), os.path.join(remote + f)) # 上传目录中的文件
else:
sftp.put(local, remote) # 上传文件
except Exception as e:
print('upload exception:', e)
sf.close()
|
[
"os.listdir",
"uuid.uuid4",
"os.path.isdir",
"paramiko.Transport",
"os.path.splitext",
"os.path.join",
"paramiko.SFTPClient.from_transport"
] |
[((269, 299), 'paramiko.Transport', 'paramiko.Transport', (['host', 'port'], {}), '(host, port)\n', (287, 299), False, 'import paramiko\n'), ((364, 402), 'paramiko.SFTPClient.from_transport', 'paramiko.SFTPClient.from_transport', (['sf'], {}), '(sf)\n', (398, 402), False, 'import paramiko\n'), ((98, 124), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (114, 124), False, 'import os\n'), ((423, 443), 'os.path.isdir', 'os.path.isdir', (['local'], {}), '(local)\n', (436, 443), False, 'import os\n'), ((147, 159), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (157, 159), False, 'import uuid\n'), ((483, 500), 'os.listdir', 'os.listdir', (['local'], {}), '(local)\n', (493, 500), False, 'import os\n'), ((537, 560), 'os.path.join', 'os.path.join', (['(local + f)'], {}), '(local + f)\n', (549, 560), False, 'import os\n'), ((562, 586), 'os.path.join', 'os.path.join', (['(remote + f)'], {}), '(remote + f)\n', (574, 586), False, 'import os\n')]
|
#
# キャプチャー画像を推定する
# キャプチャー画像を100x100にリサイズする
#
#---------------------------------------------------------
#import keras
import tensorflow as tf
from tensorflow.python.keras.models import Model
from tensorflow.python.keras.layers import *
from tensorflow.python.keras.models import load_model
import numpy as np
import os
import serial
import time
from PIL import Image
import cv2
# 学習済みモデルのロード
model = load_model("./original_img.h5")
model.summary()
#['c', 'p', 'g', 'b']-> [0, 1, 2, 3]
# 動作確認用
img_arr = []
image = Image.open('./img/g/0.png')
image = image.convert("RGB")
image = image.resize((100, 100))
data = np.asarray(image)
img_arr.append(data)
img_arr = np.array(img_arr)
img_arr = img_arr.astype('float32')/255
img_arr.shape[:]
y_pred = model.predict(img_arr)
print(y_pred)
#['c', 'p', 'g', 'b']-> [0, 1, 2, 3]
# ビデオ初期化
#img_cap = cv2.VideoCapture(0)
img_cap = cv2.VideoCapture(1)
#with serial.Serial('/dev/cu.usbmodem14301', timeout=0.1) as ser:
while True:
# ビデオ画像の処理
img_arr = []
ret, img_base = img_cap.read()
xp = int(img_base.shape[1]) #1920
yp = int(img_base.shape[0]) #1080
cx = int(xp/2)
cy = int(yp/2)
#print(xp, " + ", yp)
resize = 100
img_crop = cv2.resize(img_base[cy-500:cy+500, cx-500:cx+500], (resize, resize))
cv2.imshow('Images for CNN', img_crop)
imgCV_RGB = img_crop[:, :, ::-1]
img_pil = Image.fromarray(imgCV_RGB)
data = np.asarray(img_pil)
img_arr.append(data)
img_arr = np.array(img_arr)
img_arr = img_arr.astype('float32')/255
img_arr.shape[:]
# 予測
#['c', 'p', 'g', 'b']-> [0, 1, 2, 3]
y_pred = model.predict(img_arr)
#print(y_pred)
# 結果の表示
if y_pred[0].argmax() == 0:
if (y_pred[0][0] > 0.7):
print("")
elif y_pred[0].argmax() == 1:
if (y_pred[0][1] > 0.7):
print("グー!!")
elif y_pred[0].argmax() == 2:
if (y_pred[0][2] > 0.7):
print("チョキ!!")
elif y_pred[0].argmax() == 3:
if (y_pred[0][3] > 0.7):
print("パー!!")
if cv2.waitKey(10) == 27:
break
# ビデオ開放
cv2.destroyAllWindows()
|
[
"tensorflow.python.keras.models.load_model",
"cv2.waitKey",
"numpy.asarray",
"cv2.imshow",
"PIL.Image.open",
"cv2.VideoCapture",
"numpy.array",
"PIL.Image.fromarray",
"cv2.destroyAllWindows",
"cv2.resize"
] |
[((405, 436), 'tensorflow.python.keras.models.load_model', 'load_model', (['"""./original_img.h5"""'], {}), "('./original_img.h5')\n", (415, 436), False, 'from tensorflow.python.keras.models import load_model\n'), ((520, 547), 'PIL.Image.open', 'Image.open', (['"""./img/g/0.png"""'], {}), "('./img/g/0.png')\n", (530, 547), False, 'from PIL import Image\n'), ((617, 634), 'numpy.asarray', 'np.asarray', (['image'], {}), '(image)\n', (627, 634), True, 'import numpy as np\n'), ((666, 683), 'numpy.array', 'np.array', (['img_arr'], {}), '(img_arr)\n', (674, 683), True, 'import numpy as np\n'), ((879, 898), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(1)'], {}), '(1)\n', (895, 898), False, 'import cv2\n'), ((2113, 2136), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (2134, 2136), False, 'import cv2\n'), ((1223, 1299), 'cv2.resize', 'cv2.resize', (['img_base[cy - 500:cy + 500, cx - 500:cx + 500]', '(resize, resize)'], {}), '(img_base[cy - 500:cy + 500, cx - 500:cx + 500], (resize, resize))\n', (1233, 1299), False, 'import cv2\n'), ((1296, 1334), 'cv2.imshow', 'cv2.imshow', (['"""Images for CNN"""', 'img_crop'], {}), "('Images for CNN', img_crop)\n", (1306, 1334), False, 'import cv2\n'), ((1387, 1413), 'PIL.Image.fromarray', 'Image.fromarray', (['imgCV_RGB'], {}), '(imgCV_RGB)\n', (1402, 1413), False, 'from PIL import Image\n'), ((1426, 1445), 'numpy.asarray', 'np.asarray', (['img_pil'], {}), '(img_pil)\n', (1436, 1445), True, 'import numpy as np\n'), ((1485, 1502), 'numpy.array', 'np.array', (['img_arr'], {}), '(img_arr)\n', (1493, 1502), True, 'import numpy as np\n'), ((2067, 2082), 'cv2.waitKey', 'cv2.waitKey', (['(10)'], {}), '(10)\n', (2078, 2082), False, 'import cv2\n')]
|
import os
import logging
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
logging.getLogger('tensorflow').disabled = True
import numpy as np
import tensorflow as tf
import tensorflow_addons as tfa
from tqdm import tqdm, tqdm_notebook
from augment import CTAugment
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops
import math
from error import test_error
import logging
class OurCosineDecay(tf.keras.experimental.CosineDecay):
def __call__(self, step):
with ops.name_scope_v2(self.name or "CosineDecay"):
initial_learning_rate = ops.convert_to_tensor_v2(
self.initial_learning_rate, name="initial_learning_rate")
dtype = initial_learning_rate.dtype
decay_steps = math_ops.cast(self.decay_steps, dtype)
global_step_recomp = math_ops.cast(step, dtype)
global_step_recomp = math_ops.minimum(global_step_recomp, decay_steps)
completed_fraction = global_step_recomp / decay_steps
cosine_decayed = math_ops.cos(
constant_op.constant(7 / 16 * math.pi) * completed_fraction)
decayed = (1 - self.alpha) * cosine_decayed + self.alpha
return math_ops.multiply(initial_learning_rate, decayed)
def training(model, full_x_l, full_x_u, full_y_l, hparams, n_classes, file_name, log_interval=200):
def weak_transformation(x):
x = tf.image.random_flip_left_right(x)
max_shift = tf.cast(x.shape[1] * 0.125, dtype=tf.dtypes.int32)
shift = tf.random.uniform([x.shape[0], 2], minval=-max_shift, maxval=max_shift, dtype=tf.dtypes.int32)
return tfa.image.translate(x, tf.cast(shift, tf.dtypes.float32))
def pseudolabel(class_dist):
argmax = tf.math.argmax(class_dist, axis=1)
return tf.one_hot(argmax, class_dist.shape[1])
def threshold_gate(one_hot, logits, threshold):
max_probs = tf.math.multiply(one_hot, tf.nn.softmax(logits))
return tf.cast(max_probs > threshold, max_probs.dtype) # * max_probs
def sample_labeled_data(ds=full_x_l, y=full_y_l, batch_size=hparams['B']):
total_samples = ds.shape[0]
if total_samples >= batch_size:
choices = np.random.choice(np.arange(total_samples), batch_size, replace=False)
else:
choices = np.random.choice(np.arange(total_samples), batch_size, replace=True)
x_l = ds[choices, :, :, :]
y_l = y[choices]
return x_l, y_l
def step(x_l, y_l, x_u):
with tf.GradientTape() as tape:
# labeled data
x_l_weak = weak_transformation(x_l)
output_l_weak = model(x_l_weak, True)
loss_l = loss_fn_l(y_l, output_l_weak)
# update CTAugment weights
x_l_strong, choices, bins = cta.augment_batch(x_l)
output_l_strong = model(x_l_strong, True)
cta.update_weights_batch(y_l, output_l_strong, choices, bins)
# unlabeled data
x_u_weak = weak_transformation(x_u)
output_u_weak = model(x_u_weak, True)
y_u = pseudolabel(output_u_weak)
y_u = threshold_gate(y_u, output_u_weak, hparams['tau'])
x_u_strong, choices, bins = cta.augment_batch(x_u)
output_u_strong = model(x_u_strong, True)
loss_u = loss_fn_u(y_u, output_u_strong)
# add losses together
loss = loss_l + hparams['lamda'] * loss_u
gradients = tape.gradient(loss, model.trainable_weights)
optimizer.apply_gradients(zip(gradients, model.trainable_weights))
schedule = OurCosineDecay(hparams['eta'], hparams['K'])
#optimizer = tf.keras.optimizers.SGD(schedule, momentum=hparams['beta'], nesterov=hparams['nesterov'])
optimizer = tfa.optimizers.SGDW(hparams['weight_decay'],
schedule, momentum=hparams['beta'],
nesterov=hparams['nesterov'])
loss_fn_u = tf.keras.losses.CategoricalCrossentropy(from_logits=True)
loss_fn_l = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
loss_fn_u = tf.keras.losses.CategoricalCrossentropy(from_logits=True)
loss_fn_l = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
cta = CTAugment(hparams['cta_classes'], hparams['cta_decay'], hparams['cta_threshold'], hparams['cta_depth'])
# ds_l = tf.data.Dataset.from_tensor_slices((full_x_l, full_y_l))
ds_u = tf.data.Dataset.from_tensor_slices(full_x_u)
# split into batches
# ds_l = ds_l.batch(hparams['B']).prefetch(-1)
ds_u = ds_u.batch(int(hparams['mu'] * hparams['B'])).prefetch(-1)
# if type casting needed: x = tf.cast(x, tf.float32)
training_step = 0
epoch = 0
best_training_accuracy = 0
# for epoch in range(hparams['epochs']):
# for (x_l, y_l), x_u in tqdm(zip(ds_l, ds_u), desc='epoch {}/{}'.format(epoch + 1, hparams['epochs']),
# total=val_interval, ncols=100, ascii=True):
# training_step += 1
# step(x_l, y_l, x_u)
#for epoch in range(hparams['epochs']):
while training_step < hparams['K']:
epoch += 1
for x_u in tqdm(ds_u, desc='epoch {}'.format(epoch),
total=hparams['total'], ncols=100, ascii=True):
training_step += 1
x_l, y_l = sample_labeled_data()
step(x_l, y_l, x_u)
if training_step >= hparams['K']:
break
err = test_error(model, full_x_l, full_y_l)
logging.info('epoch: {}, labeled accuracy: {}'.format(epoch, err))
if err > best_training_accuracy:
best_training_accuracy = err
tf.keras.models.save_model(model, filepath=file_name)
return model
|
[
"tensorflow.python.framework.constant_op.constant",
"tensorflow.keras.losses.CategoricalCrossentropy",
"numpy.arange",
"error.test_error",
"tensorflow.keras.losses.SparseCategoricalCrossentropy",
"tensorflow.nn.softmax",
"tensorflow.one_hot",
"tensorflow.random.uniform",
"tensorflow.python.ops.math_ops.minimum",
"tensorflow.cast",
"tensorflow.python.ops.math_ops.cast",
"tensorflow.python.framework.ops.convert_to_tensor_v2",
"tensorflow.python.framework.ops.name_scope_v2",
"tensorflow.image.random_flip_left_right",
"augment.CTAugment",
"tensorflow.math.argmax",
"tensorflow_addons.optimizers.SGDW",
"tensorflow.data.Dataset.from_tensor_slices",
"tensorflow.python.ops.math_ops.multiply",
"tensorflow.keras.models.save_model",
"tensorflow.GradientTape",
"logging.getLogger"
] |
[((67, 98), 'logging.getLogger', 'logging.getLogger', (['"""tensorflow"""'], {}), "('tensorflow')\n", (84, 98), False, 'import logging\n'), ((3844, 3959), 'tensorflow_addons.optimizers.SGDW', 'tfa.optimizers.SGDW', (["hparams['weight_decay']", 'schedule'], {'momentum': "hparams['beta']", 'nesterov': "hparams['nesterov']"}), "(hparams['weight_decay'], schedule, momentum=hparams[\n 'beta'], nesterov=hparams['nesterov'])\n", (3863, 3959), True, 'import tensorflow_addons as tfa\n'), ((4044, 4101), 'tensorflow.keras.losses.CategoricalCrossentropy', 'tf.keras.losses.CategoricalCrossentropy', ([], {'from_logits': '(True)'}), '(from_logits=True)\n', (4083, 4101), True, 'import tensorflow as tf\n'), ((4118, 4181), 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'tf.keras.losses.SparseCategoricalCrossentropy', ([], {'from_logits': '(True)'}), '(from_logits=True)\n', (4163, 4181), True, 'import tensorflow as tf\n'), ((4199, 4256), 'tensorflow.keras.losses.CategoricalCrossentropy', 'tf.keras.losses.CategoricalCrossentropy', ([], {'from_logits': '(True)'}), '(from_logits=True)\n', (4238, 4256), True, 'import tensorflow as tf\n'), ((4273, 4336), 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'tf.keras.losses.SparseCategoricalCrossentropy', ([], {'from_logits': '(True)'}), '(from_logits=True)\n', (4318, 4336), True, 'import tensorflow as tf\n'), ((4348, 4456), 'augment.CTAugment', 'CTAugment', (["hparams['cta_classes']", "hparams['cta_decay']", "hparams['cta_threshold']", "hparams['cta_depth']"], {}), "(hparams['cta_classes'], hparams['cta_decay'], hparams[\n 'cta_threshold'], hparams['cta_depth'])\n", (4357, 4456), False, 'from augment import CTAugment\n'), ((4534, 4578), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['full_x_u'], {}), '(full_x_u)\n', (4568, 4578), True, 'import tensorflow as tf\n'), ((1469, 1503), 'tensorflow.image.random_flip_left_right', 'tf.image.random_flip_left_right', (['x'], {}), '(x)\n', (1500, 1503), True, 'import tensorflow as tf\n'), ((1524, 1574), 'tensorflow.cast', 'tf.cast', (['(x.shape[1] * 0.125)'], {'dtype': 'tf.dtypes.int32'}), '(x.shape[1] * 0.125, dtype=tf.dtypes.int32)\n', (1531, 1574), True, 'import tensorflow as tf\n'), ((1591, 1689), 'tensorflow.random.uniform', 'tf.random.uniform', (['[x.shape[0], 2]'], {'minval': '(-max_shift)', 'maxval': 'max_shift', 'dtype': 'tf.dtypes.int32'}), '([x.shape[0], 2], minval=-max_shift, maxval=max_shift,\n dtype=tf.dtypes.int32)\n', (1608, 1689), True, 'import tensorflow as tf\n'), ((1810, 1844), 'tensorflow.math.argmax', 'tf.math.argmax', (['class_dist'], {'axis': '(1)'}), '(class_dist, axis=1)\n', (1824, 1844), True, 'import tensorflow as tf\n'), ((1860, 1899), 'tensorflow.one_hot', 'tf.one_hot', (['argmax', 'class_dist.shape[1]'], {}), '(argmax, class_dist.shape[1])\n', (1870, 1899), True, 'import tensorflow as tf\n'), ((2037, 2084), 'tensorflow.cast', 'tf.cast', (['(max_probs > threshold)', 'max_probs.dtype'], {}), '(max_probs > threshold, max_probs.dtype)\n', (2044, 2084), True, 'import tensorflow as tf\n'), ((5651, 5688), 'error.test_error', 'test_error', (['model', 'full_x_l', 'full_y_l'], {}), '(model, full_x_l, full_y_l)\n', (5661, 5688), False, 'from error import test_error\n'), ((557, 602), 'tensorflow.python.framework.ops.name_scope_v2', 'ops.name_scope_v2', (["(self.name or 'CosineDecay')"], {}), "(self.name or 'CosineDecay')\n", (574, 602), False, 'from tensorflow.python.framework import ops\n'), ((640, 727), 'tensorflow.python.framework.ops.convert_to_tensor_v2', 'ops.convert_to_tensor_v2', (['self.initial_learning_rate'], {'name': '"""initial_learning_rate"""'}), "(self.initial_learning_rate, name=\n 'initial_learning_rate')\n", (664, 727), False, 'from tensorflow.python.framework import ops\n'), ((814, 852), 'tensorflow.python.ops.math_ops.cast', 'math_ops.cast', (['self.decay_steps', 'dtype'], {}), '(self.decay_steps, dtype)\n', (827, 852), False, 'from tensorflow.python.ops import math_ops\n'), ((887, 913), 'tensorflow.python.ops.math_ops.cast', 'math_ops.cast', (['step', 'dtype'], {}), '(step, dtype)\n', (900, 913), False, 'from tensorflow.python.ops import math_ops\n'), ((947, 996), 'tensorflow.python.ops.math_ops.minimum', 'math_ops.minimum', (['global_step_recomp', 'decay_steps'], {}), '(global_step_recomp, decay_steps)\n', (963, 996), False, 'from tensorflow.python.ops import math_ops\n'), ((1272, 1321), 'tensorflow.python.ops.math_ops.multiply', 'math_ops.multiply', (['initial_learning_rate', 'decayed'], {}), '(initial_learning_rate, decayed)\n', (1289, 1321), False, 'from tensorflow.python.ops import math_ops\n'), ((1724, 1757), 'tensorflow.cast', 'tf.cast', (['shift', 'tf.dtypes.float32'], {}), '(shift, tf.dtypes.float32)\n', (1731, 1757), True, 'import tensorflow as tf\n'), ((1999, 2020), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['logits'], {}), '(logits)\n', (2012, 2020), True, 'import tensorflow as tf\n'), ((2582, 2599), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (2597, 2599), True, 'import tensorflow as tf\n'), ((5875, 5928), 'tensorflow.keras.models.save_model', 'tf.keras.models.save_model', (['model'], {'filepath': 'file_name'}), '(model, filepath=file_name)\n', (5901, 5928), True, 'import tensorflow as tf\n'), ((2295, 2319), 'numpy.arange', 'np.arange', (['total_samples'], {}), '(total_samples)\n', (2304, 2319), True, 'import numpy as np\n'), ((2401, 2425), 'numpy.arange', 'np.arange', (['total_samples'], {}), '(total_samples)\n', (2410, 2425), True, 'import numpy as np\n'), ((1122, 1160), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(7 / 16 * math.pi)'], {}), '(7 / 16 * math.pi)\n', (1142, 1160), False, 'from tensorflow.python.framework import constant_op\n')]
|
import cv2
import numpy
def colorDetection(image):
#Converts image HSV type image
hsvImage = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
testImage = image
#Ranges for color detection
lowerYellow = numpy.array([20, 100, 100])
upperYellow = numpy.array([30,255, 255])
lowerBlue = numpy.array([85,100,100])
upperBlue = numpy.array([124,255,255])
lowerRed = numpy.array([0,100,100])
upperRed = numpy.array([19,255,255])
#Ranges applied to the hsvImage
yMask = cv2.inRange(hsvImage, lowerYellow, upperYellow)
bMask = cv2.inRange(hsvImage, lowerBlue, upperBlue)
rMask = cv2.inRange(hsvImage, lowerRed, upperRed)
#Finding the contours on the image
yContours, yHierarchy = cv2.findContours(yMask, cv2.cv.CV_RETR_TREE,
cv2.cv.CV_CHAIN_APPROX_SIMPLE)
bContours, bHierarchy = cv2.findContours(bMask, cv2.cv.CV_RETR_TREE,
cv2.cv.CV_CHAIN_APPROX_SIMPLE)
rContours, rHierarchy = cv2.findContours(rMask, cv2.cv.CV_RETR_TREE,
cv2.cv.CV_CHAIN_APPROX_SIMPLE)
#Given at least one yellow contour
if len(yContours) > 0:
# Find the index of the largest contour
yAreas = [cv2.contourArea(i) for i in yContours]
yMaxIndex = numpy.argmax(yAreas)
yCnt = yContours[yMaxIndex]
#Find coordinate for boundary rectangle
yx,yy,yw,yh = cv2.boundingRect(yCnt)
#Draw rectangle
cv2.rectangle(testImage,(yx-15,yy-15),(yx+yw+15,yy+yh+15),(0,255,255),0)
#Given at least one blue contour
if len(bContours) > 0:
# Find the index of the largest contour
bAreas = [cv2.contourArea(i) for i in bContours]
bMaxIndex = numpy.argmax(bAreas)
bCnt = bContours[bMaxIndex]
#Find coordinate for boundary rectangle
bx,by,bw,bh = cv2.boundingRect(bCnt)
#Draw rectangle
cv2.rectangle(testImage,(bx-15,by-15),(bx+bw+15,by+bh+15),(255,0,0),0)
#Given at least one red contour
if len(rContours) > 0:
# Find the index of the largest contour
rAreas = [cv2.contourArea(i) for i in rContours]
rMaxIndex = numpy.argmax(rAreas)
rCnt = rContours[rMaxIndex]
#Find coordinate for boundary rectangle
rx,ry,rw,rh = cv2.boundingRect(rCnt)
#Draw rectangle
cv2.rectangle(testImage,(rx-15,ry-15),(rx+rw+15,ry+rh+15),(0,0,255),0)
# #Displaying the masks individually and the final image
# cv2.imshow('Yellow Mask', yMask)
# cv2.imshow('Blue Mask', bMask)
# cv2.imshow('Red Mask', rMask)
# cv2.imshow('Altered image', testImage)
return testImage
def main():
#Default Camera (cv2.videocapture(-1) the parameter indexes your cameras)
camera = cv2.VideoCapture(-1)
while camera.isOpened():
_, image = camera.read()
cv2.imshow('Original', image)
rectImg = colorDetection(image)
cv2.imshow('Color Detector', rectImg)
cv2.waitKey(5)
if __name__ == '__main__':
main()
|
[
"cv2.contourArea",
"numpy.argmax",
"cv2.cvtColor",
"cv2.waitKey",
"cv2.imshow",
"cv2.VideoCapture",
"numpy.array",
"cv2.rectangle",
"cv2.boundingRect",
"cv2.inRange",
"cv2.findContours"
] |
[((103, 141), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2HSV'], {}), '(image, cv2.COLOR_BGR2HSV)\n', (115, 141), False, 'import cv2\n'), ((214, 241), 'numpy.array', 'numpy.array', (['[20, 100, 100]'], {}), '([20, 100, 100])\n', (225, 241), False, 'import numpy\n'), ((260, 287), 'numpy.array', 'numpy.array', (['[30, 255, 255]'], {}), '([30, 255, 255])\n', (271, 287), False, 'import numpy\n'), ((303, 330), 'numpy.array', 'numpy.array', (['[85, 100, 100]'], {}), '([85, 100, 100])\n', (314, 330), False, 'import numpy\n'), ((345, 373), 'numpy.array', 'numpy.array', (['[124, 255, 255]'], {}), '([124, 255, 255])\n', (356, 373), False, 'import numpy\n'), ((387, 413), 'numpy.array', 'numpy.array', (['[0, 100, 100]'], {}), '([0, 100, 100])\n', (398, 413), False, 'import numpy\n'), ((427, 454), 'numpy.array', 'numpy.array', (['[19, 255, 255]'], {}), '([19, 255, 255])\n', (438, 454), False, 'import numpy\n'), ((502, 549), 'cv2.inRange', 'cv2.inRange', (['hsvImage', 'lowerYellow', 'upperYellow'], {}), '(hsvImage, lowerYellow, upperYellow)\n', (513, 549), False, 'import cv2\n'), ((562, 605), 'cv2.inRange', 'cv2.inRange', (['hsvImage', 'lowerBlue', 'upperBlue'], {}), '(hsvImage, lowerBlue, upperBlue)\n', (573, 605), False, 'import cv2\n'), ((618, 659), 'cv2.inRange', 'cv2.inRange', (['hsvImage', 'lowerRed', 'upperRed'], {}), '(hsvImage, lowerRed, upperRed)\n', (629, 659), False, 'import cv2\n'), ((728, 803), 'cv2.findContours', 'cv2.findContours', (['yMask', 'cv2.cv.CV_RETR_TREE', 'cv2.cv.CV_CHAIN_APPROX_SIMPLE'], {}), '(yMask, cv2.cv.CV_RETR_TREE, cv2.cv.CV_CHAIN_APPROX_SIMPLE)\n', (744, 803), False, 'import cv2\n'), ((864, 939), 'cv2.findContours', 'cv2.findContours', (['bMask', 'cv2.cv.CV_RETR_TREE', 'cv2.cv.CV_CHAIN_APPROX_SIMPLE'], {}), '(bMask, cv2.cv.CV_RETR_TREE, cv2.cv.CV_CHAIN_APPROX_SIMPLE)\n', (880, 939), False, 'import cv2\n'), ((1000, 1075), 'cv2.findContours', 'cv2.findContours', (['rMask', 'cv2.cv.CV_RETR_TREE', 'cv2.cv.CV_CHAIN_APPROX_SIMPLE'], {}), '(rMask, cv2.cv.CV_RETR_TREE, cv2.cv.CV_CHAIN_APPROX_SIMPLE)\n', (1016, 1075), False, 'import cv2\n'), ((2787, 2807), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(-1)'], {}), '(-1)\n', (2803, 2807), False, 'import cv2\n'), ((1300, 1320), 'numpy.argmax', 'numpy.argmax', (['yAreas'], {}), '(yAreas)\n', (1312, 1320), False, 'import numpy\n'), ((1427, 1449), 'cv2.boundingRect', 'cv2.boundingRect', (['yCnt'], {}), '(yCnt)\n', (1443, 1449), False, 'import cv2\n'), ((1482, 1578), 'cv2.rectangle', 'cv2.rectangle', (['testImage', '(yx - 15, yy - 15)', '(yx + yw + 15, yy + yh + 15)', '(0, 255, 255)', '(0)'], {}), '(testImage, (yx - 15, yy - 15), (yx + yw + 15, yy + yh + 15),\n (0, 255, 255), 0)\n', (1495, 1578), False, 'import cv2\n'), ((1745, 1765), 'numpy.argmax', 'numpy.argmax', (['bAreas'], {}), '(bAreas)\n', (1757, 1765), False, 'import numpy\n'), ((1872, 1894), 'cv2.boundingRect', 'cv2.boundingRect', (['bCnt'], {}), '(bCnt)\n', (1888, 1894), False, 'import cv2\n'), ((1927, 2021), 'cv2.rectangle', 'cv2.rectangle', (['testImage', '(bx - 15, by - 15)', '(bx + bw + 15, by + bh + 15)', '(255, 0, 0)', '(0)'], {}), '(testImage, (bx - 15, by - 15), (bx + bw + 15, by + bh + 15),\n (255, 0, 0), 0)\n', (1940, 2021), False, 'import cv2\n'), ((2187, 2207), 'numpy.argmax', 'numpy.argmax', (['rAreas'], {}), '(rAreas)\n', (2199, 2207), False, 'import numpy\n'), ((2314, 2336), 'cv2.boundingRect', 'cv2.boundingRect', (['rCnt'], {}), '(rCnt)\n', (2330, 2336), False, 'import cv2\n'), ((2369, 2463), 'cv2.rectangle', 'cv2.rectangle', (['testImage', '(rx - 15, ry - 15)', '(rx + rw + 15, ry + rh + 15)', '(0, 0, 255)', '(0)'], {}), '(testImage, (rx - 15, ry - 15), (rx + rw + 15, ry + rh + 15),\n (0, 0, 255), 0)\n', (2382, 2463), False, 'import cv2\n'), ((2879, 2908), 'cv2.imshow', 'cv2.imshow', (['"""Original"""', 'image'], {}), "('Original', image)\n", (2889, 2908), False, 'import cv2\n'), ((2958, 2995), 'cv2.imshow', 'cv2.imshow', (['"""Color Detector"""', 'rectImg'], {}), "('Color Detector', rectImg)\n", (2968, 2995), False, 'import cv2\n'), ((3004, 3018), 'cv2.waitKey', 'cv2.waitKey', (['(5)'], {}), '(5)\n', (3015, 3018), False, 'import cv2\n'), ((1241, 1259), 'cv2.contourArea', 'cv2.contourArea', (['i'], {}), '(i)\n', (1256, 1259), False, 'import cv2\n'), ((1686, 1704), 'cv2.contourArea', 'cv2.contourArea', (['i'], {}), '(i)\n', (1701, 1704), False, 'import cv2\n'), ((2128, 2146), 'cv2.contourArea', 'cv2.contourArea', (['i'], {}), '(i)\n', (2143, 2146), False, 'import cv2\n')]
|
#!/usr/bin/env python3
import requests
from pprint import pprint as pp # part of the standard library
from datetime import date
# import webbrowser
## define some constants
NASAAPI = 'https://api.nasa.gov/planetary/apod?'
with open('nasa_api_key', 'r') as file:
MYKEY = "&api_key=" + file.read().replace('\n', '')
## pretty print json
def main():
"""run-time code"""
## Variables
today = date.today()
d = "&date=" + today.isoformat()
print("The default request is for today's date. Would you like to request a particular date? Yes/No:")
answer = input()
if answer == 'Yes' or answer == 'yes':
d = "&date=" + input("Enter the date in the format YEAR-MM-DD:")
nasaapiobj = requests.get(NASAAPI + MYKEY + d) # call the webservice
nasaread = nasaapiobj.json() # parse the JSON blob returned
# Show converted json
print(nasaread) # show converted JSON without pprint
input('\nThis is converted json. Press ENTER to continue.') # pause for enter
# Show Pretty Print json
pp(nasaread) # this is pretty print in action
# pprint.pprint(convertedjson) # if you do a simple import pprint, the result is a long usage
input('\nThis is pretty printed JSON. Press ENTER to continue.') # pause for ENTER
# Print the description of the photo we are about to view
print(nasaread['explanation']) # display the value for the key explanation
print("Link to the APOD:", nasaread.get('hdurl',"No HD URL for today!"))
#input('\nPress ENTER to view this photo of the day') # pause for ENTER
# webbrowser.open(nasaread['hdurl']) # open in the webbrowser
main()
|
[
"pprint.pprint",
"datetime.date.today",
"requests.get"
] |
[((408, 420), 'datetime.date.today', 'date.today', ([], {}), '()\n', (418, 420), False, 'from datetime import date\n'), ((720, 753), 'requests.get', 'requests.get', (['(NASAAPI + MYKEY + d)'], {}), '(NASAAPI + MYKEY + d)\n', (732, 753), False, 'import requests\n'), ((1040, 1052), 'pprint.pprint', 'pp', (['nasaread'], {}), '(nasaread)\n', (1042, 1052), True, 'from pprint import pprint as pp\n')]
|
from model.contact import Contact
from model.group import Group
import random
def test_delete_contact_in_group(app, orm):
if len(orm.get_group_list()) == 0:
app.group.create(Group(name="test"))
if len(orm.get_contact_list()) == 0:
app.contact.add_contact(Contact(firstname="asdfg", middlename="asdfg", lastname="asdfg", nickname="asdfg", title="asdfg",
company="asdfg", address="asdfg", homephone="565656", mobilephone="677565", workphone="76876687", fax="67678678",
email="<EMAIL>", email2="<EMAIL>",email3="<EMAIL>", homepage="<EMAIL>",
day="7", month="August", year="2000",
address2="trtyyt", secondaryphone="75757", notes="uiygfhjkf"))
contact = random.choice(orm.get_contact_list())
group = random.choice(orm.get_group_list())
if len(orm.get_groups_this_contacts(contact)) == 0:
app.contact.add_contact_in_group(contact, group)
groups_this_contacts_before = orm.get_groups_this_contacts(contact)
group_for_del = random.choice(groups_this_contacts_before)
app.contact.delete_contact_in_group(contact, group_for_del)
groups_this_contacts_after = orm.get_groups_this_contacts(contact)
groups_this_contacts_before.remove(group)
assert sorted(groups_this_contacts_before, key=Contact.id_or_max) == sorted(groups_this_contacts_after, key=Contact.id_or_max)
|
[
"model.contact.Contact",
"random.choice",
"model.group.Group"
] |
[((1074, 1116), 'random.choice', 'random.choice', (['groups_this_contacts_before'], {}), '(groups_this_contacts_before)\n', (1087, 1116), False, 'import random\n'), ((187, 205), 'model.group.Group', 'Group', ([], {'name': '"""test"""'}), "(name='test')\n", (192, 205), False, 'from model.group import Group\n'), ((280, 687), 'model.contact.Contact', 'Contact', ([], {'firstname': '"""asdfg"""', 'middlename': '"""asdfg"""', 'lastname': '"""asdfg"""', 'nickname': '"""asdfg"""', 'title': '"""asdfg"""', 'company': '"""asdfg"""', 'address': '"""asdfg"""', 'homephone': '"""565656"""', 'mobilephone': '"""677565"""', 'workphone': '"""76876687"""', 'fax': '"""67678678"""', 'email': '"""<EMAIL>"""', 'email2': '"""<EMAIL>"""', 'email3': '"""<EMAIL>"""', 'homepage': '"""<EMAIL>"""', 'day': '"""7"""', 'month': '"""August"""', 'year': '"""2000"""', 'address2': '"""trtyyt"""', 'secondaryphone': '"""75757"""', 'notes': '"""uiygfhjkf"""'}), "(firstname='asdfg', middlename='asdfg', lastname='asdfg', nickname=\n 'asdfg', title='asdfg', company='asdfg', address='asdfg', homephone=\n '565656', mobilephone='677565', workphone='76876687', fax='67678678',\n email='<EMAIL>', email2='<EMAIL>', email3='<EMAIL>', homepage='<EMAIL>',\n day='7', month='August', year='2000', address2='trtyyt', secondaryphone\n ='75757', notes='uiygfhjkf')\n", (287, 687), False, 'from model.contact import Contact\n')]
|
import torch
import torch.nn as nn
from torch import Tensor
class Displacement(nn.Module):
r"""
Displacement Layer computes the displacement vector for each point in the source image, with its corresponding point
(or points) in target image.
The output is a displacement matrix constructed from all displacement vectors.
This metric measures the shift from source point to predicted target point, and can be applied for matching
accuracy.
Together with displacement matrix d, this function will also return a grad_mask, which helps to filter out dummy
nodes in practice.
.. math::
\mathbf{d}_i = \sum_{j \in V_2} \left( \mathbf{S}_{i, j} P_{2j} \right)- P_{1i}
Proposed by `"<NAME> al. Deep Learning of Graph Matching. CVPR 2018."
<http://openaccess.thecvf.com/content_cvpr_2018/html/Zanfir_Deep_Learning_of_CVPR_2018_paper.html>`_
"""
def __init__(self):
super(Displacement, self).__init__()
def forward(self, s: Tensor, P_src: Tensor, P_tgt: Tensor, ns_gt: Tensor=None):
r"""
:param s: :math:`(b\times n_1 \times n_2)` permutation or doubly stochastic matrix. :math:`b`: batch size.
:math:`n_1`: number of nodes in source image. :math:`n_2`: number of nodes in target image
:param P_src: :math:`(b\times n_1 \times 2)` point set on source image
:param P_tgt: :math:`(b\times n_2 \times 2)` point set on target image
:param ns_gt: :math:`(b)` number of exact pairs. We support batched instances with different number of nodes,
therefore ``ns_gt`` is required to specify the exact number of nodes of each instance in the batch.
:return: displacement matrix d,
mask for dummy nodes grad_mask. If ``ns_gt=None``, it will not be calculated and None is returned.
"""
if ns_gt is None:
max_n = s.shape[1]
P_src = P_src[:, 0:max_n, :]
grad_mask = None
else:
grad_mask = torch.zeros_like(P_src)
for b, n in enumerate(ns_gt):
grad_mask[b, 0:n] = 1
d = torch.matmul(s, P_tgt) - P_src
return d, grad_mask
|
[
"torch.matmul",
"torch.zeros_like"
] |
[((1993, 2016), 'torch.zeros_like', 'torch.zeros_like', (['P_src'], {}), '(P_src)\n', (2009, 2016), False, 'import torch\n'), ((2110, 2132), 'torch.matmul', 'torch.matmul', (['s', 'P_tgt'], {}), '(s, P_tgt)\n', (2122, 2132), False, 'import torch\n')]
|
# Copyright 2019-2021 Wingify Software Pvt. Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import json
import sys
from vwo.services.segmentor.segment_evaluator import SegmentEvaluator
with open("tests/data/segmentor_test_cases.json") as json_file:
segmentor_test_cases = json.load(json_file)
class TestSpecialCharacters(unittest.TestCase):
def setUp(self):
self.segment_evaluator = SegmentEvaluator()
self.test_cases = segmentor_test_cases.get("special_characters")
def test_test_special_character_pound(self):
test_case = self.test_cases.get("test_special_character_pound")
if sys.version_info[0] < 3:
test_case["custom_variables"]["eq"] = test_case["custom_variables"]["eq"].encode("utf-8")
self.assertIs(
self.segment_evaluator.evaluate(test_case.get("dsl"), test_case.get("custom_variables")),
test_case.get("expectation"),
)
|
[
"vwo.services.segmentor.segment_evaluator.SegmentEvaluator",
"json.load"
] |
[((796, 816), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (805, 816), False, 'import json\n'), ((921, 939), 'vwo.services.segmentor.segment_evaluator.SegmentEvaluator', 'SegmentEvaluator', ([], {}), '()\n', (937, 939), False, 'from vwo.services.segmentor.segment_evaluator import SegmentEvaluator\n')]
|
# Django imports
from django.shortcuts import render
from django.http import HttpResponse, HttpResponseRedirect, Http404
from django.urls import reverse
# Internal imports
from .models import Book, Chapter
# External imports
from django.utils import timezone
# * Function to list the five latest books
def index(request):
# We get a list (maximum of 10 books as of this version) with all books that have
# been published before now, ordered by their publishing time
latest_books_list = Book.objects.filter(published_at__lte=timezone.now()).order_by('-published_at')[:10]
context = {'latest_books_list': latest_books_list}
return render(request, 'catalog/index.html', context)
# * Function to show all details of a specific book, including a list of chapters
def details(request, book_id):
try:
# The book needs the right ID, and to be published before now().
book = Book.objects.get(pk=book_id, published_at__lte=timezone.now())
# The list of chapters already come from the book's chapter_set, so it only needs
# to filter the chapters based on publishing time
chapters = book.chapter_set.filter(published_at__lte=timezone.now())
except (Book.DoesNotExist) as e:
raise Http404("Error: ", e)
context = {'book': book, 'chapters': chapters}
return render(request, 'catalog/details.html', context)
# * Function to view a specific chapter
def chapter(request, book_id, chapter_id):
try:
# The book needs the right ID, and to be published before now().
book = Book.objects.get(pk=book_id, published_at__lte=timezone.now())
# This is just one chapter so it needs the ID to differentiate from the others in the
# book's chapter_set, and also the publishing time check
chapter = book.chapter_set.get(pk=chapter_id, published_at__lte=timezone.now())
except (Book.DoesNotExist, Chapter.DoesNotExist) as e:
raise Http404("Error: ", e)
context = {'chapter': chapter}
return render(request, 'catalog/chapter.html', context)
# * Function to vote in a specific chapter
def vote(request, book_id, chapter_id):
try:
# The book needs the right ID, and to be published before now().
book = Book.objects.get(pk=book_id, published_at__lte=timezone.now())
# This is just one chapter so it needs the ID to differentiate from the others in the
# book's chapter_set, and also the publishing time check
chapter = book.chapter_set.get(pk=chapter_id, published_at__lte=timezone.now())
except (Book.DoesNotExist, Chapter.DoesNotExist) as e:
raise Http404("Error: ", e)
try:
selected_choice = request.POST['choice'] # Gets the choice from the POST data
except (KeyError):
# Redisplay the question voting form because there was no vote (KeyError from lack of vote in POST).
context = {'chapter': chapter, 'error_message': "You didn't vote."}
return render(request, 'catalog/chapter.html', context)
else:
# Check for the selected vote, 1 for positive and 0 for negative vote
if selected_choice == "1":
chapter.score += 1
elif selected_choice == "0":
chapter.score -= 1
chapter.save() # TODO: Look into F(), at Django docs. Better performance
# ! Always return an HttpResponseRedirect after successfully dealing with POST data.
# This prevents data from being posted twice if a user hits the Back button.
return HttpResponseRedirect(reverse('catalog:details', args=(book.id,)))
|
[
"django.shortcuts.render",
"django.urls.reverse",
"django.http.Http404",
"django.utils.timezone.now"
] |
[((652, 698), 'django.shortcuts.render', 'render', (['request', '"""catalog/index.html"""', 'context'], {}), "(request, 'catalog/index.html', context)\n", (658, 698), False, 'from django.shortcuts import render\n'), ((1336, 1384), 'django.shortcuts.render', 'render', (['request', '"""catalog/details.html"""', 'context'], {}), "(request, 'catalog/details.html', context)\n", (1342, 1384), False, 'from django.shortcuts import render\n'), ((2020, 2068), 'django.shortcuts.render', 'render', (['request', '"""catalog/chapter.html"""', 'context'], {}), "(request, 'catalog/chapter.html', context)\n", (2026, 2068), False, 'from django.shortcuts import render\n'), ((1251, 1272), 'django.http.Http404', 'Http404', (['"""Error: """', 'e'], {}), "('Error: ', e)\n", (1258, 1272), False, 'from django.http import HttpResponse, HttpResponseRedirect, Http404\n'), ((1951, 1972), 'django.http.Http404', 'Http404', (['"""Error: """', 'e'], {}), "('Error: ', e)\n", (1958, 1972), False, 'from django.http import HttpResponse, HttpResponseRedirect, Http404\n'), ((2635, 2656), 'django.http.Http404', 'Http404', (['"""Error: """', 'e'], {}), "('Error: ', e)\n", (2642, 2656), False, 'from django.http import HttpResponse, HttpResponseRedirect, Http404\n'), ((2976, 3024), 'django.shortcuts.render', 'render', (['request', '"""catalog/chapter.html"""', 'context'], {}), "(request, 'catalog/chapter.html', context)\n", (2982, 3024), False, 'from django.shortcuts import render\n'), ((3544, 3587), 'django.urls.reverse', 'reverse', (['"""catalog:details"""'], {'args': '(book.id,)'}), "('catalog:details', args=(book.id,))\n", (3551, 3587), False, 'from django.urls import reverse\n'), ((957, 971), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (969, 971), False, 'from django.utils import timezone\n'), ((1183, 1197), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (1195, 1197), False, 'from django.utils import timezone\n'), ((1613, 1627), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (1625, 1627), False, 'from django.utils import timezone\n'), ((1861, 1875), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (1873, 1875), False, 'from django.utils import timezone\n'), ((2297, 2311), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (2309, 2311), False, 'from django.utils import timezone\n'), ((2545, 2559), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (2557, 2559), False, 'from django.utils import timezone\n'), ((538, 552), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (550, 552), False, 'from django.utils import timezone\n')]
|
from app import create_app
from config import Config
app = create_app(Config)
|
[
"app.create_app"
] |
[((61, 79), 'app.create_app', 'create_app', (['Config'], {}), '(Config)\n', (71, 79), False, 'from app import create_app\n')]
|
# -*- coding: utf-8 -*-
__all__ = ["KeplerOp"]
import theano
import theano.tensor as tt
from theano import gof
from ..build_utils import get_cache_version, get_compile_args, get_header_dirs
class KeplerOp(gof.COp):
__props__ = ()
func_file = "./kepler.cc"
func_name = "APPLY_SPECIFIC(kepler)"
def __init__(self, **kwargs):
super(KeplerOp, self).__init__(self.func_file, self.func_name)
def c_code_cache_version(self):
return get_cache_version()
def c_headers(self, compiler):
return ["exoplanet/theano_helpers.h", "exoplanet/kepler.h"]
def c_header_dirs(self, compiler):
return get_header_dirs(eigen=False)
def c_compile_args(self, compiler):
return get_compile_args(compiler)
def make_node(self, mean_anom, eccen):
in_args = [
tt.as_tensor_variable(mean_anom),
tt.as_tensor_variable(eccen),
]
return gof.Apply(self, in_args, [in_args[0].type(), in_args[0].type()])
def infer_shape(self, node, shapes):
return shapes[0], shapes[0]
def grad(self, inputs, gradients):
M, e = inputs
sinf, cosf = self(M, e)
bM = tt.zeros_like(M)
be = tt.zeros_like(M)
# e * cos(f)
ecosf = e * cosf
# 1 - e^2
ome2 = 1 - e ** 2
# Partials
dfdM = (1 + ecosf) ** 2 / ome2 ** 1.5
dfde = (2 + ecosf) * sinf / ome2
if not isinstance(gradients[0].type, theano.gradient.DisconnectedType):
bM += gradients[0] * cosf * dfdM
be += gradients[0] * cosf * dfde
if not isinstance(gradients[1].type, theano.gradient.DisconnectedType):
bM -= gradients[1] * sinf * dfdM
be -= gradients[1] * sinf * dfde
return [bM, be]
def R_op(self, inputs, eval_points):
if eval_points[0] is None:
return eval_points
return self.grad(inputs, eval_points)
|
[
"theano.tensor.zeros_like",
"theano.tensor.as_tensor_variable"
] |
[((1187, 1203), 'theano.tensor.zeros_like', 'tt.zeros_like', (['M'], {}), '(M)\n', (1200, 1203), True, 'import theano.tensor as tt\n'), ((1217, 1233), 'theano.tensor.zeros_like', 'tt.zeros_like', (['M'], {}), '(M)\n', (1230, 1233), True, 'import theano.tensor as tt\n'), ((835, 867), 'theano.tensor.as_tensor_variable', 'tt.as_tensor_variable', (['mean_anom'], {}), '(mean_anom)\n', (856, 867), True, 'import theano.tensor as tt\n'), ((881, 909), 'theano.tensor.as_tensor_variable', 'tt.as_tensor_variable', (['eccen'], {}), '(eccen)\n', (902, 909), True, 'import theano.tensor as tt\n')]
|
import unittest
from jmilkfansblog.controllers import admin
from jmilkfansblog.controllers import rest_api
from jmilkfansblog import create_app
from jmilkfansblog.models import db
class TestURLs(unittest.TestCase):
"""Unit test for route functions."""
def setUp(self):
# Destroy the Flask-Admin and Flask-Result object after delete app
# object
admin._views = []
rest_api.resource = []
app = create_app('jmilkfansblog.config.TestConfig')
self.client = app.test_client()
# Using Test app for db
db.app = app
db.create_all()
def tearDown(self):
db.session.remove()
db.drop_all()
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"jmilkfansblog.models.db.create_all",
"jmilkfansblog.create_app",
"jmilkfansblog.models.db.session.remove",
"jmilkfansblog.models.db.drop_all"
] |
[((716, 731), 'unittest.main', 'unittest.main', ([], {}), '()\n', (729, 731), False, 'import unittest\n'), ((445, 490), 'jmilkfansblog.create_app', 'create_app', (['"""jmilkfansblog.config.TestConfig"""'], {}), "('jmilkfansblog.config.TestConfig')\n", (455, 490), False, 'from jmilkfansblog import create_app\n'), ((593, 608), 'jmilkfansblog.models.db.create_all', 'db.create_all', ([], {}), '()\n', (606, 608), False, 'from jmilkfansblog.models import db\n'), ((642, 661), 'jmilkfansblog.models.db.session.remove', 'db.session.remove', ([], {}), '()\n', (659, 661), False, 'from jmilkfansblog.models import db\n'), ((670, 683), 'jmilkfansblog.models.db.drop_all', 'db.drop_all', ([], {}), '()\n', (681, 683), False, 'from jmilkfansblog.models import db\n')]
|
"""melive URL Configuration
See:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
import debug_toolbar
from django.conf.urls import url
from django.contrib import admin
from django.contrib.sitemaps import views
from django.contrib.sitemaps.views import sitemap
from django.urls import include, path
from django.views.decorators.csrf import csrf_exempt
from django.views.generic import TemplateView
from graphene_django.views import GraphQLView
from core.sitemap import B40_Sitemap, UserMNG_Sitemap
sitemaps = {"core": B40_Sitemap, "userMng": UserMNG_Sitemap}
urlpatterns = [
path("admin/", admin.site.urls),
path("", include("core.urls")),
path("", include("userMng.urls")),
path("oauth/", include("social_django.urls", namespace="social")),
url(
r"^robots.txt$", TemplateView.as_view(template_name="robots.txt", content_type="text/plain"), name="robots_file"
),
path("sitemap.xml", sitemap, {"sitemaps": sitemaps}, name="django.contrib.sitemaps.views.sitemap"),
path("__debug__/", include(debug_toolbar.urls)),
# expose graphql server api, incl. GraphQL IDE - and
# disable CSRF token requirement because for now it is PUBLIC API
# TODO it should be protected
path("graphql", csrf_exempt(GraphQLView.as_view(graphiql=True))),
]
|
[
"graphene_django.views.GraphQLView.as_view",
"django.views.generic.TemplateView.as_view",
"django.urls.path",
"django.urls.include"
] |
[((1048, 1079), 'django.urls.path', 'path', (['"""admin/"""', 'admin.site.urls'], {}), "('admin/', admin.site.urls)\n", (1052, 1079), False, 'from django.urls import include, path\n'), ((1368, 1471), 'django.urls.path', 'path', (['"""sitemap.xml"""', 'sitemap', "{'sitemaps': sitemaps}"], {'name': '"""django.contrib.sitemaps.views.sitemap"""'}), "('sitemap.xml', sitemap, {'sitemaps': sitemaps}, name=\n 'django.contrib.sitemaps.views.sitemap')\n", (1372, 1471), False, 'from django.urls import include, path\n'), ((1094, 1114), 'django.urls.include', 'include', (['"""core.urls"""'], {}), "('core.urls')\n", (1101, 1114), False, 'from django.urls import include, path\n'), ((1130, 1153), 'django.urls.include', 'include', (['"""userMng.urls"""'], {}), "('userMng.urls')\n", (1137, 1153), False, 'from django.urls import include, path\n'), ((1175, 1224), 'django.urls.include', 'include', (['"""social_django.urls"""'], {'namespace': '"""social"""'}), "('social_django.urls', namespace='social')\n", (1182, 1224), False, 'from django.urls import include, path\n'), ((1261, 1336), 'django.views.generic.TemplateView.as_view', 'TemplateView.as_view', ([], {'template_name': '"""robots.txt"""', 'content_type': '"""text/plain"""'}), "(template_name='robots.txt', content_type='text/plain')\n", (1281, 1336), False, 'from django.views.generic import TemplateView\n'), ((1491, 1518), 'django.urls.include', 'include', (['debug_toolbar.urls'], {}), '(debug_toolbar.urls)\n', (1498, 1518), False, 'from django.urls import include, path\n'), ((1714, 1748), 'graphene_django.views.GraphQLView.as_view', 'GraphQLView.as_view', ([], {'graphiql': '(True)'}), '(graphiql=True)\n', (1733, 1748), False, 'from graphene_django.views import GraphQLView\n')]
|
from flask import Blueprint
from .constants import GET, COUNTER
RESOURCE = 'health'
PATH = f'/api/v1/{RESOURCE}'
api = Blueprint(RESOURCE, __name__, url_prefix=PATH)
@api.route('/', methods=[GET])
def get():
COUNTER.labels(GET, PATH).inc()
return {'status': 'ok'}
|
[
"flask.Blueprint"
] |
[((121, 167), 'flask.Blueprint', 'Blueprint', (['RESOURCE', '__name__'], {'url_prefix': 'PATH'}), '(RESOURCE, __name__, url_prefix=PATH)\n', (130, 167), False, 'from flask import Blueprint\n')]
|
"""
client.py
"""
# Standard library
import socket
import re
import pickle
import sys
# Third party
import pygame
# Local source
import game_functions as gf
import square
# Server port, IPv4 will be prompted
PORT = 26256
# Server data constraints
HEADER_SIZE = 16
FORMAT_TYPE = 'utf-8'
def main():
server_ip = ipPrompt()
# Create and connect client socket
client = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
client.connect((server_ip,PORT))
# Report to client that connection has been established with server
print(f"[CLIENT] You have connected to the server @ {server_ip}")
# Initialize and manage pygame settings
print("[CLIENT] Launching game window...")
pygame.init()
pygame.display.set_caption("Socket Squares")
clock = pygame.time.Clock()
# Declare pygame screen and resolution
screen = pygame.display.set_mode((800,600))
# Done
print("[CLIENT] Receiving character data...")
header_data = client.recv(HEADER_SIZE).decode(FORMAT_TYPE)
if header_data:
header_data = int(header_data)
my_square = client.recv(header_data)
my_square = pickle.loads(my_square)
my_square = square.PlayerSquare(my_square, screen)
print("[CLIENT] Character data received.")
# List of all current player squares
player_squares = [None,None,None,None,None,None,None,None]
clock.tick(60)
while True:
gf.check_events(screen, my_square)
player_squares = pickleSwap(my_square, client)
gf.update_screen(screen, my_square, player_squares)
else:
# Exit cleanly
client.close()
print("You have disconnected from the server. Now exiting...")
pygame.quit()
sys.exit()
def ipPrompt():
# Prompt user for IPv4, determine if given IPv4 is "valid" using regex. Don't continue until pass regex
temp_ipv4 = ""
regex_passed = None
while not regex_passed:
temp_ipv4 = input("\nEnter the IPv4 Address of a server to connect to: ")
regex_passed = re.search("^[0-9]{1,3}\.{1}[0-9]{1,3}\.{1}[0-9]{1,3}\.{1}[0-9]{1,3}$", temp_ipv4)
if not regex_passed:
print("Invalid IPv4. Please try again following the format: X.X.X.X")
return temp_ipv4
def printArray(given_array):
for item in given_array:
if item is not None:
pass
def pickleSwap(data, client):
# Turn coordinates of player square into a tuple, send to server and receive all square updates
alldata = pickle.dumps((data.center_x, data.center_y))
send_length = f"{len(alldata):<{HEADER_SIZE}}"
send_length = str(send_length).encode(FORMAT_TYPE)
client.send(send_length)
client.send(alldata)
# Receive player_squares list
squares = client.recv(HEADER_SIZE)
squares = int(squares)
squares = client.recv(squares)
squares = pickle.loads(squares)
printArray(squares)
return squares
main()
|
[
"pickle.loads",
"pygame.quit",
"pygame.display.set_mode",
"socket.socket",
"game_functions.update_screen",
"pygame.init",
"square.PlayerSquare",
"game_functions.check_events",
"pygame.display.set_caption",
"pygame.time.Clock",
"re.search",
"sys.exit",
"pickle.dumps"
] |
[((383, 432), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (396, 432), False, 'import socket\n'), ((708, 721), 'pygame.init', 'pygame.init', ([], {}), '()\n', (719, 721), False, 'import pygame\n'), ((726, 770), 'pygame.display.set_caption', 'pygame.display.set_caption', (['"""Socket Squares"""'], {}), "('Socket Squares')\n", (752, 770), False, 'import pygame\n'), ((783, 802), 'pygame.time.Clock', 'pygame.time.Clock', ([], {}), '()\n', (800, 802), False, 'import pygame\n'), ((860, 895), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(800, 600)'], {}), '((800, 600))\n', (883, 895), False, 'import pygame\n'), ((1145, 1168), 'pickle.loads', 'pickle.loads', (['my_square'], {}), '(my_square)\n', (1157, 1168), False, 'import pickle\n'), ((1185, 1223), 'square.PlayerSquare', 'square.PlayerSquare', (['my_square', 'screen'], {}), '(my_square, screen)\n', (1204, 1223), False, 'import square\n'), ((2519, 2563), 'pickle.dumps', 'pickle.dumps', (['(data.center_x, data.center_y)'], {}), '((data.center_x, data.center_y))\n', (2531, 2563), False, 'import pickle\n'), ((2876, 2897), 'pickle.loads', 'pickle.loads', (['squares'], {}), '(squares)\n', (2888, 2897), False, 'import pickle\n'), ((1421, 1455), 'game_functions.check_events', 'gf.check_events', (['screen', 'my_square'], {}), '(screen, my_square)\n', (1436, 1455), True, 'import game_functions as gf\n'), ((1529, 1580), 'game_functions.update_screen', 'gf.update_screen', (['screen', 'my_square', 'player_squares'], {}), '(screen, my_square, player_squares)\n', (1545, 1580), True, 'import game_functions as gf\n'), ((1718, 1731), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (1729, 1731), False, 'import pygame\n'), ((1740, 1750), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1748, 1750), False, 'import sys\n'), ((2054, 2142), 're.search', 're.search', (['"""^[0-9]{1,3}\\\\.{1}[0-9]{1,3}\\\\.{1}[0-9]{1,3}\\\\.{1}[0-9]{1,3}$"""', 'temp_ipv4'], {}), "('^[0-9]{1,3}\\\\.{1}[0-9]{1,3}\\\\.{1}[0-9]{1,3}\\\\.{1}[0-9]{1,3}$',\n temp_ipv4)\n", (2063, 2142), False, 'import re\n')]
|