input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
"""Imports"""
import json
import pytest
import demistomock as demisto
from netaddr import IPAddress
IOC_RES_LEN = 38
'''Tests'''
@pytest.mark.helper_commands
class TestHelperFunctions:
@pytest.mark.get_outbound_ioc_values
def test_get_outbound_ioc_values_1(self, mocker):
"""Test on_demand"""
from ExportIndicators import get_outbound_ioc_values, RequestArguments
with open('ExportIndicators_test/TestHelperFunctions/iocs_cache_values_text.json', 'r') as iocs_text_values_f:
iocs_text_dict = json.loads(iocs_text_values_f.read())
mocker.patch.object(demisto, 'getIntegrationContext', return_value={"last_output": iocs_text_dict})
request_args = RequestArguments(query='', out_format='text', limit=50, offset=0)
ioc_list = get_outbound_ioc_values(
on_demand=True,
request_args=request_args
)
for ioc_row in ioc_list:
assert ioc_row in iocs_text_dict
@pytest.mark.get_outbound_ioc_values
def test_get_outbound_ioc_values_2(self, mocker):
"""Test update by not on_demand with no refresh"""
import CommonServerPython as CSP
mocker.patch.object(CSP, 'parse_date_range', return_value=(1578383899, 1578383899))
import ExportIndicators as ei
with open('ExportIndicators_test/TestHelperFunctions/iocs_cache_values_text.json', 'r') as iocs_text_values_f:
iocs_text_dict = json.loads(iocs_text_values_f.read())
mocker.patch.object(demisto, 'getIntegrationContext', return_value={"last_output": iocs_text_dict})
mocker.patch.object(ei, 'refresh_outbound_context', return_value=iocs_text_dict)
mocker.patch.object(demisto, 'getLastRun', return_value={'last_run': 1578383898000})
request_args = ei.RequestArguments(query='', out_format='text', limit=50, offset=0)
ioc_list = ei.get_outbound_ioc_values(
on_demand=False,
request_args=request_args,
cache_refresh_rate='1 minute'
)
for ioc_row in ioc_list:
assert ioc_row in iocs_text_dict
@pytest.mark.get_outbound_ioc_values
def test_get_outbound_ioc_values_3(self, mocker):
"""Test update by not on_demand with refresh"""
import CommonServerPython as CSP
mocker.patch.object(CSP, 'parse_date_range', return_value=(1578383898, 1578383898))
import ExportIndicators as ei
with open('ExportIndicators_test/TestHelperFunctions/iocs_cache_values_text.json', 'r') as iocs_text_values_f:
iocs_text_dict = json.loads(iocs_text_values_f.read())
mocker.patch.object(demisto, 'getIntegrationContext', return_value={"last_output": iocs_text_dict})
mocker.patch.object(ei, 'refresh_outbound_context', return_value=iocs_text_dict)
mocker.patch.object(demisto, 'getLastRun', return_value={'last_run': 1578383898000})
request_args = ei.RequestArguments(query='', out_format='text', limit=50, offset=0)
ioc_list = ei.get_outbound_ioc_values(
on_demand=False,
request_args=request_args,
cache_refresh_rate='1 minute'
)
for ioc_row in ioc_list:
assert ioc_row in iocs_text_dict
@pytest.mark.get_outbound_ioc_values
def test_get_outbound_ioc_values_4(self, mocker):
"""Test update by request params change - limit"""
import CommonServerPython as CSP
mocker.patch.object(CSP, 'parse_date_range', return_value=(1578383898, 1578383898))
import ExportIndicators as ei
with open('ExportIndicators_test/TestHelperFunctions/iocs_cache_values_text.json', 'r') as iocs_text_values_f:
iocs_text_dict = json.loads(iocs_text_values_f.read())
mocker.patch.object(demisto, 'getIntegrationContext', return_value={"last_output": iocs_text_dict,
"last_limit": 1, "last_offset": 0,
"last_query": "type:ip",
"last_format": "text"})
mocker.patch.object(ei, 'refresh_outbound_context', return_value=iocs_text_dict)
mocker.patch.object(demisto, 'getLastRun', return_value={'last_run': 1578383898000})
request_args = ei.RequestArguments(query='type:ip', out_format='text', limit=50, offset=0)
ioc_list = ei.get_outbound_ioc_values(
on_demand=False,
request_args=request_args,
cache_refresh_rate='1 minute'
)
for ioc_row in ioc_list:
assert ioc_row in iocs_text_dict
@pytest.mark.get_outbound_ioc_values
def test_get_outbound_ioc_values_5(self, mocker):
"""Test update by request params change - offset"""
import CommonServerPython as CSP
mocker.patch.object(CSP, 'parse_date_range', return_value=(1578383898, 1578383898))
import ExportIndicators as ei
with open('ExportIndicators_test/TestHelperFunctions/iocs_cache_values_text.json', 'r') as iocs_text_values_f:
iocs_text_dict = json.loads(iocs_text_values_f.read())
mocker.patch.object(demisto, 'getIntegrationContext', return_value={"last_output": iocs_text_dict,
"last_limit": 50, "last_offset": 1,
"last_query": "type:ip",
"last_format": "text"})
mocker.patch.object(ei, 'refresh_outbound_context', return_value=iocs_text_dict)
mocker.patch.object(demisto, 'getLastRun', return_value={'last_run': 1578383898000})
request_args = ei.RequestArguments(query='type:ip', out_format='text', limit=50, offset=0)
ioc_list = ei.get_outbound_ioc_values(
on_demand=False,
request_args=request_args,
cache_refresh_rate='1 minute'
)
for ioc_row in ioc_list:
assert ioc_row in iocs_text_dict
@pytest.mark.get_outbound_ioc_values
def test_get_outbound_ioc_values_6(self, mocker):
"""Test update by request params change - query"""
import CommonServerPython as CSP
mocker.patch.object(CSP, 'parse_date_range', return_value=(1578383898, 1578383898))
import ExportIndicators as ei
with open('ExportIndicators_test/TestHelperFunctions/iocs_cache_values_text.json', 'r') as iocs_text_values_f:
iocs_text_dict = json.loads(iocs_text_values_f.read())
mocker.patch.object(demisto, 'getIntegrationContext', return_value={"last_output": iocs_text_dict,
"last_limit": 50, "last_offset": 0,
"last_query": "type:URL",
"last_format": "text"})
mocker.patch.object(ei, 'refresh_outbound_context', return_value=iocs_text_dict)
mocker.patch.object(demisto, 'getLastRun', return_value={'last_run': 1578383898000})
request_args = ei.RequestArguments(query='type:ip', out_format='text', limit=50, offset=0)
ioc_list = ei.get_outbound_ioc_values(
on_demand=False,
request_args=request_args,
cache_refresh_rate='1 minute'
)
for ioc_row in ioc_list:
assert ioc_row in iocs_text_dict
@pytest.mark.list_to_str
def test_list_to_str_1(self):
"""Test invalid"""
from ExportIndicators import list_to_str
with pytest.raises(AttributeError):
invalid_list_value = 2
list_to_str(invalid_list_value)
with pytest.raises(AttributeError):
invalid_list_value = {'invalid': 'invalid'}
list_to_str(invalid_list_value)
@pytest.mark.list_to_str
def test_list_to_str_2(self):
"""Test empty"""
from ExportIndicators import list_to_str
assert list_to_str(None) == ''
assert list_to_str([]) == ''
assert list_to_str({}) == ''
@pytest.mark.list_to_str
def test_list_to_str_3(self):
"""Test non empty fields"""
from ExportIndicators import list_to_str
valid_list_value = [1, 2, 3, 4]
assert list_to_str(valid_list_value) == '1,2,3,4'
assert list_to_str(valid_list_value, '.') == '1.2.3.4'
assert list_to_str(valid_list_value, map_func=lambda x: f'{x}a') == '1a,2a,3a,4a'
@pytest.mark.get_params_port
def test_get_params_port_1(self):
"""Test invalid"""
from CommonServerPython import DemistoException
from ExportIndicators import get_params_port
params = {'longRunningPort': 'invalid'}
with pytest.raises(DemistoException):
get_params_port(params)
@pytest.mark.get_params_port
def test_get_params_port_2(self):
"""Test empty"""
from ExportIndicators import get_params_port
params = {'longRunningPort': ''}
with pytest.raises(ValueError):
get_params_port(params)
@pytest.mark.get_params_port
def test_get_params_port_3(self):
"""Test valid"""
from ExportIndicators import get_params_port
params = {'longRunningPort': '80'}
assert get_params_port(params) == 80
@pytest.mark.refresh_outbound_context
def test_refresh_outbound_context_1(self, mocker):
"""Test out_format=text"""
import ExportIndicators as ei
with open('ExportIndicators_test/TestHelperFunctions/demisto_iocs.json', 'r') as iocs_json_f:
iocs_json = json.loads(iocs_json_f.read())
mocker.patch.object(ei, 'find_indicators_with_limit', return_value=iocs_json)
request_args = ei.RequestArguments(query='', out_format='text', limit=38)
ei_vals = ei.refresh_outbound_context(request_args)
for ioc in iocs_json:
ip = ioc.get('value')
assert ip in ei_vals
@pytest.mark.refresh_outbound_context
def test_refresh_outbound_context_2(self, mocker):
"""Test out_format= XSOAR json"""
import ExportIndicators as ei
with open('ExportIndicators_test/TestHelperFunctions/demisto_iocs.json', 'r') as iocs_json_f:
iocs_json = json.loads(iocs_json_f.read())
mocker.patch.object(ei, 'find_indicators_with_limit', return_value=iocs_json)
request_args = ei.RequestArguments(query='', out_format='XSOAR json', limit=38)
ei_vals = ei.refresh_outbound_context(request_args)
assert isinstance(ei_vals, str)
ei_vals = json.loads(ei_vals)
assert iocs_json == ei_vals
@pytest.mark.refresh_outbound_context
def test_refresh_outbound_context_3(self, mocker):
"""Test out_format=xsoar-csv"""
import ExportIndicators as ei
with open('ExportIndicators_test/TestHelperFunctions/demisto_iocs.json', 'r') as iocs_json_f:
iocs_json = json.loads(iocs_json_f.read())
mocker.patch.object(ei, 'find_indicators_with_limit', return_value=iocs_json)
request_args = ei.RequestArguments(query='', out_format='XSOAR csv', limit=38)
ei_vals = ei.refresh_outbound_context(request_args)
with open('ExportIndicators_test/TestHelperFunctions/iocs_out_csv.txt', 'r') as iocs_out_f:
iocs_out = iocs_out_f.read()
for ioc in iocs_out.split('\n'):
assert ioc in ei_vals
@pytest.mark.refresh_outbound_context
def test_refresh_outbound_context_4(self, mocker):
"""Test out_format=XSOAR json-seq"""
import ExportIndicators as ei
with open('ExportIndicators_test/TestHelperFunctions/demisto_iocs.json', 'r') as iocs_json_f:
iocs_json = json.loads(iocs_json_f.read())
mocker.patch.object(ei, 'find_indicators_with_limit', return_value=iocs_json)
request_args = ei.RequestArguments(query='', out_format='XSOAR json-seq', limit=38)
ei_vals = ei.refresh_outbound_context(request_args)
with open('ExportIndicators_test/TestHelperFunctions/iocs_out_json_seq.txt', 'r') as iocs_out_f:
iocs_out = iocs_out_f.read()
assert iocs_out == ei_vals
@pytest.mark.refresh_outbound_context
def test_refresh_outbound_context_5(self, mocker):
"""Test out_format=json"""
import ExportIndicators as ei
with open('ExportIndicators_test/TestHelperFunctions/demisto_url_iocs.json', 'r') as iocs_json_f:
iocs_json = json.loads(iocs_json_f.read())
mocker.patch.object(ei, 'find_indicators_with_limit', return_value=iocs_json)
request_args = ei.RequestArguments(query='', out_format='json', limit=2)
ei_vals = ei.refresh_outbound_context(request_args)
ei_vals = json.loads(ei_vals)
with open('ExportIndicators_test/TestHelperFunctions/iocs_out_json.json', 'r') as iocs_json_out_f:
iocs_json_out = json.loads(iocs_json_out_f.read())
assert iocs_json_out == ei_vals
@pytest.mark.refresh_outbound_context
def test_refresh_outbound_context_6(self, mocker):
"""Test out_format=json-seq"""
import ExportIndicators as ei
with open('ExportIndicators_test/TestHelperFunctions/demisto_iocs.json', 'r') as iocs_json_f:
iocs_json = json.loads(iocs_json_f.read())
mocker.patch.object(ei, 'find_indicators_with_limit', return_value=iocs_json)
request_args = ei.RequestArguments(query='', out_format='json-seq', limit=38)
ei_vals = ei.refresh_outbound_context(request_args)
with open('ExportIndicators_test/TestHelperFunctions/iocs_out_json_seq_old.txt', 'r') as iocs_out_f:
iocs_out = iocs_out_f.read()
for iocs_out_line in iocs_out.split('\n'):
assert iocs_out_line in ei_vals
@pytest.mark.refresh_outbound_context
def test_refresh_outbound_context_7(self, mocker):
"""Test out_format=csv"""
import ExportIndicators as ei
with open('ExportIndicators_test/TestHelperFunctions/demisto_iocs.json', 'r') as iocs_json_f:
iocs_json = json.loads(iocs_json_f.read())
mocker.patch.object(ei, 'find_indicators_with_limit', return_value=iocs_json)
request_args = ei.RequestArguments(query='', out_format='csv', limit=38)
ei_vals = ei.refresh_outbound_context(request_args)
with open('ExportIndicators_test/TestHelperFunctions/iocs_out_csv_old.txt', 'r') as iocs_out_f:
iocs_out = iocs_out_f.read()
for ioc in iocs_out.split('\n'):
assert ioc in ei_vals
@pytest.mark.find_indicators_with_limit
def test_find_indicators_with_limit_1(self, mocker):
"""Test find indicators limit"""
import ExportIndicators as ei
with open('ExportIndicators_test/TestHelperFunctions/demisto_iocs.json', 'r') as iocs_json_f:
iocs_json = json.loads(iocs_json_f.read())
limit = 30
mocker.patch.object(ei, 'find_indicators_with_limit_loop', return_value=(iocs_json, 1))
ei_vals = ei.find_indicators_with_limit(indicator_query='', limit=limit, offset=0)
assert len(ei_vals) == limit
@pytest.mark.find_indicators_with_limit
def test_find_indicators_with_limit_and_offset_1(self, mocker):
"""Test find indicators limit and offset"""
import ExportIndicators as ei
with open('ExportIndicators_test/TestHelperFunctions/demisto_iocs.json', 'r') as iocs_json_f:
iocs_json = json.loads(iocs_json_f.read())
limit = 30
offset = 1
mocker.patch.object(ei, 'find_indicators_with_limit_loop', return_value=(iocs_json, 1))
ei_vals = ei.find_indicators_with_limit(indicator_query='', limit=limit, offset=offset)
assert len(ei_vals) == limit
# check that the first value is the second on the list
assert ei_vals[0].get('value') == '172.16.58.3'
@pytest.mark.find_indicators_with_limit_loop
def test_find_indicators_with_limit_loop_1(self, mocker):
"""Test find indicators stops when reached last page"""
import ExportIndicators as ei
with open('ExportIndicators_test/TestHelperFunctions/demisto_iocs.json', 'r') as iocs_json_f:
iocs_dict = {'iocs': json.loads(iocs_json_f.read())}
limit = 50
mocker.patch.object(demisto, 'searchIndicators', return_value=iocs_dict)
ei_vals, nxt_pg = ei.find_indicators_with_limit_loop(indicator_query='', limit=limit)
assert nxt_pg == 1 # assert entered into loop
@pytest.mark.find_indicators_with_limit_loop
def test_find_indicators_with_limit_loop_2(self, mocker):
"""Test find indicators stops when reached limit"""
import ExportIndicators as ei
with open('ExportIndicators_test/TestHelperFunctions/demisto_iocs.json', 'r') as iocs_json_f:
iocs_dict = {'iocs': json.loads(iocs_json_f.read())}
limit = 30
mocker.patch.object(demisto, 'searchIndicators', return_value=iocs_dict)
ei.PAGE_SIZE = IOC_RES_LEN
ei_vals, nxt_pg = ei.find_indicators_with_limit_loop(indicator_query='', limit=limit,
last_found_len=IOC_RES_LEN)
assert nxt_pg == 1 # assert entered into loop
@pytest.mark.create_values_for_returned_dict
def test_create_values_for_returned_dict_1(self):
"""Test XSOAR CSV out"""
from ExportIndicators import create_values_for_returned_dict, FORMAT_XSOAR_CSV, RequestArguments, CTX_VALUES_KEY
with open('ExportIndicators_test/TestHelperFunctions/demisto_iocs.json', 'r') as iocs_json_f:
iocs_json = json.loads(iocs_json_f.read())
request_args = RequestArguments(query='', out_format=FORMAT_XSOAR_CSV)
returned_dict, _ = create_values_for_returned_dict(iocs_json, request_args)
csv_out = returned_dict.get(CTX_VALUES_KEY)
# assert len(csv_out) == IOC_RES_LEN + 1
with open('ExportIndicators_test/TestHelperFunctions/iocs_out_csv.txt', 'r') as iocs_out_f:
expected_csv_out = iocs_out_f.read()
for csv_line in csv_out.split('\n'):
assert csv_line in expected_csv_out
@pytest.mark.create_values_for_returned_dict
def test_create_values_for_returned_dict_2(self):
"""Test XSOAR JSON out"""
from ExportIndicators import create_values_for_returned_dict, FORMAT_XSOAR_JSON, CTX_VALUES_KEY, RequestArguments
with open('ExportIndicators_test/TestHelperFunctions/demisto_iocs.json', 'r') as iocs_json_f:
iocs_json = json.load(iocs_json_f)
request_args = RequestArguments(query='', out_format=FORMAT_XSOAR_JSON)
returned_dict, _ = create_values_for_returned_dict(iocs_json, request_args)
json_out = json.loads(returned_dict.get(CTX_VALUES_KEY))
assert json_out == iocs_json
@pytest.mark.create_values_for_returned_dict
def test_create_values_for_returned_dict_3(self):
"""Test XSOAR JSON_SEQ out"""
from ExportIndicators import create_values_for_returned_dict, FORMAT_XSOAR_JSON_SEQ, CTX_VALUES_KEY, RequestArguments
with open('ExportIndicators_test/TestHelperFunctions/demisto_iocs.json', 'r') as iocs_json_f:
iocs_json = json.loads(iocs_json_f.read())
request_args = RequestArguments(query='', out_format=FORMAT_XSOAR_JSON_SEQ)
returned_dict, _ = create_values_for_returned_dict(iocs_json, request_args)
json_seq_out = returned_dict.get(CTX_VALUES_KEY)
for seq_line in json_seq_out.split('\n'):
assert json.loads(seq_line) in iocs_json
@pytest.mark.create_values_for_returned_dict
def test_create_values_for_returned_dict_4(self):
"""Test TEXT out"""
from ExportIndicators import create_values_for_returned_dict, FORMAT_TEXT, CTX_VALUES_KEY, RequestArguments
with open('ExportIndicators_test/TestHelperFunctions/demisto_iocs.json', 'r') as iocs_json_f:
iocs_json = json.loads(iocs_json_f.read())
request_args = RequestArguments(query='', out_format=FORMAT_TEXT)
returned_dict, _ = create_values_for_returned_dict(iocs_json, request_args)
text_out = returned_dict.get(CTX_VALUES_KEY)
with open('ExportIndicators_test/TestHelperFunctions/iocs_cache_values_text.json', 'r') as iocs_txt_f:
iocs_txt_json = json.load(iocs_txt_f)
for line in text_out.split('\n'):
assert line in iocs_txt_json
@pytest.mark.create_values_out_dict
def test_create_values_for_returned_dict_5(self):
"""Test JSON out"""
from ExportIndicators import create_values_for_returned_dict, FORMAT_JSON, CTX_VALUES_KEY, RequestArguments
with open('ExportIndicators_test/TestHelperFunctions/demisto_url_iocs.json', 'r') as iocs_json_f:
iocs_json = json.loads(iocs_json_f.read())
request_args = RequestArguments(query='', out_format=FORMAT_JSON)
returned_dict, _ = create_values_for_returned_dict(iocs_json, request_args)
json_out = json.loads(returned_dict.get(CTX_VALUES_KEY))
with open('ExportIndicators_test/TestHelperFunctions/iocs_out_json.json', 'r') as iocs_json_out_f:
iocs_json_out = json.loads(iocs_json_out_f.read())
assert iocs_json_out == json_out
@pytest.mark.create_values_out_dict
def test_create_values_for_returned_dict_6(self):
"""Test JSON_SEQ out"""
from ExportIndicators import create_values_for_returned_dict, FORMAT_JSON_SEQ, CTX_VALUES_KEY, RequestArguments
with open('ExportIndicators_test/TestHelperFunctions/demisto_url_iocs.json', 'r') as iocs_json_f:
iocs_json = json.loads(iocs_json_f.read())
request_args = RequestArguments(query='', out_format=FORMAT_JSON_SEQ)
returned_dict, | |
# coding: utf-8
import pprint
import re
import six
class CreateListenerOption:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'admin_state_up': 'bool',
'client_ca_tls_container_ref': 'str',
'default_pool_id': 'str',
'default_tls_container_ref': 'str',
'description': 'str',
'http2_enable': 'bool',
'insert_headers': 'ListenerInsertHeaders',
'loadbalancer_id': 'str',
'name': 'str',
'project_id': 'str',
'protocol': 'str',
'protocol_port': 'int',
'sni_container_refs': 'list[str]',
'tags': 'list[Tag]',
'tls_ciphers_policy': 'str',
'enable_member_retry': 'bool',
'keepalive_timeout': 'int',
'client_timeout': 'int',
'member_timeout': 'int',
'ipgroup': 'CreateListenerIpGroupOption',
'transparent_client_ip_enable': 'bool'
}
attribute_map = {
'admin_state_up': 'admin_state_up',
'client_ca_tls_container_ref': 'client_ca_tls_container_ref',
'default_pool_id': 'default_pool_id',
'default_tls_container_ref': 'default_tls_container_ref',
'description': 'description',
'http2_enable': 'http2_enable',
'insert_headers': 'insert_headers',
'loadbalancer_id': 'loadbalancer_id',
'name': 'name',
'project_id': 'project_id',
'protocol': 'protocol',
'protocol_port': 'protocol_port',
'sni_container_refs': 'sni_container_refs',
'tags': 'tags',
'tls_ciphers_policy': 'tls_ciphers_policy',
'enable_member_retry': 'enable_member_retry',
'keepalive_timeout': 'keepalive_timeout',
'client_timeout': 'client_timeout',
'member_timeout': 'member_timeout',
'ipgroup': 'ipgroup',
'transparent_client_ip_enable': 'transparent_client_ip_enable'
}
def __init__(self, admin_state_up=None, client_ca_tls_container_ref=None, default_pool_id=None, default_tls_container_ref=None, description=None, http2_enable=None, insert_headers=None, loadbalancer_id=None, name=None, project_id=None, protocol=None, protocol_port=None, sni_container_refs=None, tags=None, tls_ciphers_policy=None, enable_member_retry=None, keepalive_timeout=None, client_timeout=None, member_timeout=None, ipgroup=None, transparent_client_ip_enable=None):
"""CreateListenerOption - a model defined in huaweicloud sdk"""
self._admin_state_up = None
self._client_ca_tls_container_ref = None
self._default_pool_id = None
self._default_tls_container_ref = None
self._description = None
self._http2_enable = None
self._insert_headers = None
self._loadbalancer_id = None
self._name = None
self._project_id = None
self._protocol = None
self._protocol_port = None
self._sni_container_refs = None
self._tags = None
self._tls_ciphers_policy = None
self._enable_member_retry = None
self._keepalive_timeout = None
self._client_timeout = None
self._member_timeout = None
self._ipgroup = None
self._transparent_client_ip_enable = None
self.discriminator = None
if admin_state_up is not None:
self.admin_state_up = admin_state_up
if client_ca_tls_container_ref is not None:
self.client_ca_tls_container_ref = client_ca_tls_container_ref
if default_pool_id is not None:
self.default_pool_id = default_pool_id
if default_tls_container_ref is not None:
self.default_tls_container_ref = default_tls_container_ref
if description is not None:
self.description = description
if http2_enable is not None:
self.http2_enable = http2_enable
if insert_headers is not None:
self.insert_headers = insert_headers
self.loadbalancer_id = loadbalancer_id
if name is not None:
self.name = name
if project_id is not None:
self.project_id = project_id
self.protocol = protocol
self.protocol_port = protocol_port
if sni_container_refs is not None:
self.sni_container_refs = sni_container_refs
if tags is not None:
self.tags = tags
if tls_ciphers_policy is not None:
self.tls_ciphers_policy = tls_ciphers_policy
if enable_member_retry is not None:
self.enable_member_retry = enable_member_retry
if keepalive_timeout is not None:
self.keepalive_timeout = keepalive_timeout
if client_timeout is not None:
self.client_timeout = client_timeout
if member_timeout is not None:
self.member_timeout = member_timeout
if ipgroup is not None:
self.ipgroup = ipgroup
if transparent_client_ip_enable is not None:
self.transparent_client_ip_enable = transparent_client_ip_enable
@property
def admin_state_up(self):
"""Gets the admin_state_up of this CreateListenerOption.
监听器的管理状态。只支持设定为true,该字段的值无实际意义。
:return: The admin_state_up of this CreateListenerOption.
:rtype: bool
"""
return self._admin_state_up
@admin_state_up.setter
def admin_state_up(self, admin_state_up):
"""Sets the admin_state_up of this CreateListenerOption.
监听器的管理状态。只支持设定为true,该字段的值无实际意义。
:param admin_state_up: The admin_state_up of this CreateListenerOption.
:type: bool
"""
self._admin_state_up = admin_state_up
@property
def client_ca_tls_container_ref(self):
"""Gets the client_ca_tls_container_ref of this CreateListenerOption.
监听器使用的CA证书ID。
:return: The client_ca_tls_container_ref of this CreateListenerOption.
:rtype: str
"""
return self._client_ca_tls_container_ref
@client_ca_tls_container_ref.setter
def client_ca_tls_container_ref(self, client_ca_tls_container_ref):
"""Sets the client_ca_tls_container_ref of this CreateListenerOption.
监听器使用的CA证书ID。
:param client_ca_tls_container_ref: The client_ca_tls_container_ref of this CreateListenerOption.
:type: str
"""
self._client_ca_tls_container_ref = client_ca_tls_container_ref
@property
def default_pool_id(self):
"""Gets the default_pool_id of this CreateListenerOption.
监听器的默认后端云服务器组ID。当请求没有匹配的转发策略时,转发到默认后端云服务器上处理。
:return: The default_pool_id of this CreateListenerOption.
:rtype: str
"""
return self._default_pool_id
@default_pool_id.setter
def default_pool_id(self, default_pool_id):
"""Sets the default_pool_id of this CreateListenerOption.
监听器的默认后端云服务器组ID。当请求没有匹配的转发策略时,转发到默认后端云服务器上处理。
:param default_pool_id: The default_pool_id of this CreateListenerOption.
:type: str
"""
self._default_pool_id = default_pool_id
@property
def default_tls_container_ref(self):
"""Gets the default_tls_container_ref of this CreateListenerOption.
监听器使用的服务器证书ID。
:return: The default_tls_container_ref of this CreateListenerOption.
:rtype: str
"""
return self._default_tls_container_ref
@default_tls_container_ref.setter
def default_tls_container_ref(self, default_tls_container_ref):
"""Sets the default_tls_container_ref of this CreateListenerOption.
监听器使用的服务器证书ID。
:param default_tls_container_ref: The default_tls_container_ref of this CreateListenerOption.
:type: str
"""
self._default_tls_container_ref = default_tls_container_ref
@property
def description(self):
"""Gets the description of this CreateListenerOption.
监听器的描述信息
:return: The description of this CreateListenerOption.
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this CreateListenerOption.
监听器的描述信息
:param description: The description of this CreateListenerOption.
:type: str
"""
self._description = description
@property
def http2_enable(self):
"""Gets the http2_enable of this CreateListenerOption.
HTTP2功能的开启状态。该字段只有当监听器的协议是TERMINATED_HTTPS时生效。
:return: The http2_enable of this CreateListenerOption.
:rtype: bool
"""
return self._http2_enable
@http2_enable.setter
def http2_enable(self, http2_enable):
"""Sets the http2_enable of this CreateListenerOption.
HTTP2功能的开启状态。该字段只有当监听器的协议是TERMINATED_HTTPS时生效。
:param http2_enable: The http2_enable of this CreateListenerOption.
:type: bool
"""
self._http2_enable = http2_enable
@property
def insert_headers(self):
"""Gets the insert_headers of this CreateListenerOption.
:return: The insert_headers of this CreateListenerOption.
:rtype: ListenerInsertHeaders
"""
return self._insert_headers
@insert_headers.setter
def insert_headers(self, insert_headers):
"""Sets the insert_headers of this CreateListenerOption.
:param insert_headers: The insert_headers of this CreateListenerOption.
:type: ListenerInsertHeaders
"""
self._insert_headers = insert_headers
@property
def loadbalancer_id(self):
"""Gets the loadbalancer_id of this CreateListenerOption.
监听器关联的负载均衡器 ID
:return: The loadbalancer_id of this CreateListenerOption.
:rtype: str
"""
return self._loadbalancer_id
@loadbalancer_id.setter
def loadbalancer_id(self, loadbalancer_id):
"""Sets the loadbalancer_id of this CreateListenerOption.
监听器关联的负载均衡器 ID
:param loadbalancer_id: The loadbalancer_id of this CreateListenerOption.
:type: str
"""
self._loadbalancer_id = loadbalancer_id
@property
def name(self):
"""Gets the name of this CreateListenerOption.
监听器名称
:return: The name of this CreateListenerOption.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this CreateListenerOption.
监听器名称
:param name: The name of this CreateListenerOption.
:type: str
"""
self._name = name
@property
def project_id(self):
"""Gets the project_id of this CreateListenerOption.
监听器所在的项目ID。
:return: The project_id of this CreateListenerOption.
:rtype: str
"""
return self._project_id
@project_id.setter
def project_id(self, project_id):
"""Sets the project_id of this CreateListenerOption.
监听器所在的项目ID。
:param project_id: The project_id of this CreateListenerOption.
:type: str
"""
self._project_id = project_id
@property
def protocol(self):
"""Gets the protocol of this CreateListenerOption.
监听器的监听协议。 支持TCP、HTTP、UDP、TERMINATED_HTTPS。
:return: The protocol of this CreateListenerOption.
:rtype: str
"""
return self._protocol
@protocol.setter
def protocol(self, protocol):
"""Sets the protocol of this CreateListenerOption.
监听器的监听协议。 支持TCP、HTTP、UDP、TERMINATED_HTTPS。
:param protocol: The protocol of this CreateListenerOption.
:type: str
"""
self._protocol = protocol
@property
def protocol_port(self):
"""Gets the protocol_port of this CreateListenerOption.
监听器的监听端口。
:return: The protocol_port of this CreateListenerOption.
:rtype: int
"""
return self._protocol_port
@protocol_port.setter
def protocol_port(self, protocol_port):
"""Sets the protocol_port of this CreateListenerOption.
监听器的监听端口。
:param protocol_port: The protocol_port of this CreateListenerOption.
:type: int
"""
self._protocol_port = protocol_port
@property
def sni_container_refs(self):
"""Gets the sni_container_refs of this CreateListenerOption.
监听器使用的SNI证书(带域名的服务器证书)ID的列表。 各SNI证书的域名不允许重复。 各SNI证书域名总数不超过30。
:return: The sni_container_refs of this CreateListenerOption.
:rtype: list[str]
"""
return self._sni_container_refs
@sni_container_refs.setter
def sni_container_refs(self, sni_container_refs):
"""Sets the sni_container_refs of this CreateListenerOption.
监听器使用的SNI证书(带域名的服务器证书)ID的列表。 各SNI证书的域名不允许重复。 各SNI证书域名总数不超过30。
:param sni_container_refs: The sni_container_refs of this CreateListenerOption.
:type: list[str]
"""
self._sni_container_refs = sni_container_refs
@property
def tags(self):
"""Gets the tags of this CreateListenerOption.
标签列表
:return: The tags of this CreateListenerOption.
:rtype: list[Tag]
"""
return self._tags
@tags.setter
def tags(self, tags):
"""Sets the tags of this CreateListenerOption.
标签列表
:param tags: The tags of this CreateListenerOption.
:type: list[Tag]
"""
self._tags = tags
@property
def tls_ciphers_policy(self):
"""Gets the tls_ciphers_policy of this CreateListenerOption.
监听器使用的安全策略,仅对TERMINATED_HTTPS协议类型的监听器有效,且默认值为tls-1-0。 取值包括:tls-1-0-inherit,tls-1-0, tls-1-1, tls-1-2, tls-1-2-strict,tls-1-2-fs六种安全策略
:return: The tls_ciphers_policy of this CreateListenerOption.
:rtype: str
"""
return self._tls_ciphers_policy
@tls_ciphers_policy.setter
def tls_ciphers_policy(self, tls_ciphers_policy):
"""Sets the tls_ciphers_policy of this CreateListenerOption.
监听器使用的安全策略,仅对TERMINATED_HTTPS协议类型的监听器有效,且默认值为tls-1-0。 取值包括:tls-1-0-inherit,tls-1-0, tls-1-1, tls-1-2, tls-1-2-strict,tls-1-2-fs六种安全策略
:param tls_ciphers_policy: The tls_ciphers_policy of this CreateListenerOption.
:type: str
"""
self._tls_ciphers_policy = tls_ciphers_policy
@property
def enable_member_retry(self):
"""Gets the enable_member_retry of this CreateListenerOption.
是否关闭后端服务器的重试。 仅protocol为HTTP、HTTPS时支持指定该字段。
:return: The enable_member_retry of this CreateListenerOption.
:rtype: bool
"""
return self._enable_member_retry
@enable_member_retry.setter
def enable_member_retry(self, enable_member_retry):
"""Sets the enable_member_retry of this CreateListenerOption.
是否关闭后端服务器的重试。 仅protocol为HTTP、HTTPS时支持指定该字段。
:param enable_member_retry: The enable_member_retry of this CreateListenerOption.
:type: bool
"""
self._enable_member_retry = enable_member_retry
@property
def keepalive_timeout(self):
"""Gets the keepalive_timeout of this CreateListenerOption.
TCP监听器配置空闲超时时间,取值范围为(10-900s)默认值为300s,HTTP/TERMINATED_HTTPS监听器为客户端连接空闲超时时间,取值范围为(1-300s)默认值为15s。 UDP监听器不支持此字段
:return: The keepalive_timeout of this CreateListenerOption.
:rtype: int
"""
return self._keepalive_timeout
@keepalive_timeout.setter
def keepalive_timeout(self, keepalive_timeout):
"""Sets the keepalive_timeout of this CreateListenerOption.
TCP监听器配置空闲超时时间,取值范围为(10-900s)默认值为300s,HTTP/TERMINATED_HTTPS监听器为客户端连接空闲超时时间,取值范围为(1-300s)默认值为15s。 UDP监听器不支持此字段
:param keepalive_timeout: The keepalive_timeout of this CreateListenerOption.
:type: int
"""
self._keepalive_timeout = keepalive_timeout
@property
def client_timeout(self):
"""Gets the client_timeout of this CreateListenerOption.
等待客户端请求超时时间,仅限协议为HTTP, TERMINATED_HTTPS的监听器配置。取值范围为1-60s, 默认值为60s TCP,UDP协议的监听器不支持此字段
:return: The client_timeout of this CreateListenerOption.
:rtype: int
"""
return self._client_timeout
@client_timeout.setter
def client_timeout(self, client_timeout):
"""Sets the client_timeout of this CreateListenerOption.
等待客户端请求超时时间,仅限协议为HTTP, TERMINATED_HTTPS的监听器配置。取值范围为1-60s, 默认值为60s TCP,UDP协议的监听器不支持此字段
:param client_timeout: The client_timeout of this CreateListenerOption.
:type: int
"""
self._client_timeout = client_timeout
@property
def member_timeout(self):
"""Gets the member_timeout of this CreateListenerOption.
等待后端服务器请求超时时间,仅限协议为HTTP, TERMINATED_HTTPS的监听器配置。取值范围为1-300s,默认为60s TCP,UDP协议的监听器不支持此字段
:return: The member_timeout of this CreateListenerOption.
:rtype: int
"""
return self._member_timeout
@member_timeout.setter
def member_timeout(self, member_timeout):
"""Sets the member_timeout of this CreateListenerOption.
等待后端服务器请求超时时间,仅限协议为HTTP, TERMINATED_HTTPS的监听器配置。取值范围为1-300s,默认为60s TCP,UDP协议的监听器不支持此字段
:param member_timeout: The member_timeout of this CreateListenerOption.
:type: int
"""
self._member_timeout = member_timeout
@property
def ipgroup(self):
"""Gets the ipgroup of this CreateListenerOption.
:return: The ipgroup | |
a child or children
as a father (mother), in place of the natural or adoptive
father (mother). [thefreedictionary]"""),
'lino_xl.lib.humanlinks.Link' : _("""A link between two persons."""),
'lino_xl.lib.humanlinks.Link.parent' : _("""Pointer to the person who is "parent"."""),
'lino_xl.lib.humanlinks.Link.child' : _("""Pointer to the person who is "child"."""),
'lino_xl.lib.humanlinks.Link.type' : _("""The type of link. Pointer to LinkTypes."""),
'lino_xl.lib.humanlinks.LinksByHuman' : _("""Show all links for which this human is either parent or child."""),
'lino_xl.lib.humanlinks.LinksByHuman.master' : _("""alias of lino_xl.lib.contacts.models.Person"""),
'lino_xl.lib.humanlinks.LinksByHuman.model' : _("""alias of Link"""),
'lino_xl.lib.ledger.fields.DcAmountField' : _("""An editable virtual PriceField to get and set both database fields
amount and dc at once. It may be used only on
models which also defines these two fields."""),
'lino_xl.lib.ledger.roles.VoucherSupervisor' : _("""Somebody who can edit vouchers which have been written by other
users."""),
'lino_xl.lib.lists.Plugin' : _("""See lino.core.Plugin."""),
'lino_xl.lib.notes.Plugin' : _("""See lino.core.Plugin."""),
'lino_xl.lib.online.users.UserStates' : _("""The list of possible choices for the state field
of a User."""),
'lino_xl.lib.online.users.MarkUserActive' : _("""Activate this user. This requires that the user has confirmed their
verifcation code, and that a username and password are set."""),
'lino_xl.lib.online.users.RegisterUser' : _("""Fill a form in order to register as a new system user."""),
'lino_xl.lib.online.users.NewUsers' : _("""List of new users to be confirmed by the system admin."""),
'lino_xl.lib.online.users.CheckedSubmitInsert' : _("""Like the standard lino.core.actions.SubmitInsert, but
checks certain things before accepting the new user."""),
'lino_xl.lib.online.users.VerifyUser' : _("""Enter your verification code."""),
'lino_xl.lib.online.users.User' : _("""Adds the following database fields to the User model."""),
'lino_xl.lib.online.users.User.callme_mode' : _("""Whether other users can see my contact data."""),
'lino_xl.lib.online.users.User.verification_code' : _("""A random string set for every new user. Used for
online_registration."""),
'lino_xl.lib.online.users.User.user_state' : _("""The registration state of this user."""),
'lino_xl.lib.outbox.RecipientTypes' : _("""A list of possible values for the type field of a
Recipient."""),
'lino_xl.lib.outbox.MailableType' : _("""Mixin for Models that serve as type of a Mailable.
Concrete examples are cal.EventType, cal.GuestRole,
notes.NoteType."""),
'lino_xl.lib.outbox.MailableType.templates_group' : _("""Should contain a string "<app_label>/<Model>" of the Mailable
being typed by this MailableType. Example:"""),
'lino_xl.lib.outbox.CreateMail' : _("""Creates an outbox mail and displays it."""),
'lino_xl.lib.outbox.Mailable' : _("""Mixin for models that provide a "Post" button. A Mailable model
must also inherit from mixins.Printable or some subclass
thereof."""),
'lino_xl.lib.outbox.Recipient' : _("""Abstract base for inbox.Recipient and outbox.Recipient."""),
'lino_xl.lib.outbox.SendMail' : _("""Sends an outbox.Mail as an email."""),
'lino_xl.lib.outbox.SentByPartner' : _("""Shows the Mails that have been sent to a given Partner."""),
'lino_xl.lib.outbox.SentByPartner.master' : _("""alias of lino_xl.lib.contacts.models.Partner"""),
'lino_xl.lib.outbox.SentByPartner.model' : _("""alias of Mail"""),
'lino_xl.lib.postings.Plugin' : _("""See lino.core.Plugin."""),
'lino_xl.lib.postings.CreatePostings' : _("""Creates a series of new Postings from this Postable.
The Postable gives the list of recipients, and there will
be one Posting for each recipient."""),
'lino_xl.lib.postings.Postable' : _("""Mixin for models that provide a "Post" button."""),
'lino_xl.lib.postings.PostingStates' : _("""List of possible values for the state field of a
Posting."""),
'lino_xl.lib.postings.Posting' : _("""A Posting is the fact that a letter or other item
has been sent using snail mail."""),
'lino_xl.lib.products.Plugin' : _("""See lino.core.Plugin."""),
'lino_xl.lib.products.ProductCat' : _("""A product category is a way to group products."""),
'lino_xl.lib.products.Product' : _("""A product is something you can sell or buy."""),
'lino_xl.lib.properties.Plugin' : _("""See lino.core.Plugin."""),
'lino_xl.lib.properties.DoYouLike' : _("""A list of possible answers to questions of type "How much do you
like ...?"."""),
'lino_xl.lib.properties.HowWell' : _("""A list of possible answers to questions of type "How well ...?":
"not at all", "a bit", "moderate", "quite well" and "very well" """),
'lino_xl.lib.properties.PropType' : _("""The type of the values that a property accepts.
Each PropType may (or may not) imply a list of choices."""),
'lino_xl.lib.properties.PropChoice' : _("""A Choice for a given PropType. text is the text to be displayed
in combo boxes."""),
'lino_xl.lib.properties.PropGroup' : _("""A Property Group defines a list of Properties that fit together
under a common name. Examples of Property Groups: Skills, Soft
Skills, Obstacles There will be one menu entry per Group."""),
'lino_xl.lib.properties.PropertyOccurence' : _("""A Property Occurence is when a Property occurs, possibly having a
certain value."""),
'lino_xl.lib.properties.ChoicesByType' : _("""Lists all PropChoices for a given PropType."""),
'lino_xl.lib.properties.ChoicesByType.master' : _("""alias of PropType"""),
'lino_xl.lib.properties.ChoicesByType.model' : _("""alias of PropChoice"""),
'lino_xl.lib.reception.Plugin' : _("""See lino.core.Plugin."""),
'lino_xl.lib.reception.Plugin.required_user_groups' : _("""The required user groups for viewing actors of this plugin."""),
'lino_xl.lib.reception.CheckinVisitor' : _("""Mark this visitor as arrived."""),
'lino_xl.lib.reception.ReceiveVisitor' : _("""The "Receive" action on a Guest."""),
'lino_xl.lib.reception.CheckoutVisitor' : _("""The "Checkout" action on a Guest."""),
'lino_xl.lib.reception.AppointmentsByPartner' : _("""Show the participations in upcoming calendar events for a given
partner."""),
'lino_xl.lib.reception.AppointmentsByPartner.model' : _("""alias of lino_xl.lib.cal.models.Guest"""),
'lino_xl.lib.reception.AppointmentsByPartner.master' : _("""alias of lino_xl.lib.contacts.models.Person"""),
'lino_xl.lib.reception.ExpectedGuests' : _("""General table of all expected guests."""),
'lino_xl.lib.reception.ExpectedGuests.model' : _("""alias of lino_xl.lib.cal.models.Guest"""),
'lino_xl.lib.reception.Visitors' : _("""Common base class for the following tables:"""),
'lino_xl.lib.reception.Visitors.model' : _("""alias of lino_xl.lib.cal.models.Guest"""),
'lino_xl.lib.reception.BusyVisitors' : _("""Show busy visitors (with any user)."""),
'lino_xl.lib.reception.BusyVisitors.model' : _("""alias of lino_xl.lib.cal.models.Guest"""),
'lino_xl.lib.reception.WaitingVisitors' : _("""Show waiting visitors (for any user)."""),
'lino_xl.lib.reception.WaitingVisitors.model' : _("""alias of lino_xl.lib.cal.models.Guest"""),
'lino_xl.lib.reception.GoneVisitors' : _("""Show gone visitors (for any user)."""),
'lino_xl.lib.reception.GoneVisitors.model' : _("""alias of lino_xl.lib.cal.models.Guest"""),
'lino_xl.lib.reception.MyWaitingVisitors' : _("""Show visitors waiting for me."""),
'lino_xl.lib.reception.MyWaitingVisitors.model' : _("""alias of lino_xl.lib.cal.models.Guest"""),
'lino_xl.lib.reception.MyBusyVisitors' : _("""Show the visitors with whom I am busy."""),
'lino_xl.lib.reception.MyBusyVisitors.model' : _("""alias of lino_xl.lib.cal.models.Guest"""),
'lino_xl.lib.reception.MyGoneVisitors' : _("""Show my visitors who have gone."""),
'lino_xl.lib.reception.MyGoneVisitors.model' : _("""alias of lino_xl.lib.cal.models.Guest"""),
'lino_xl.lib.reception.workflows.CloseMeeting' : _("""Close the meeting (mark it as "took place") and check out all
guests. Ask confirmation naming the guests who need to check out."""),
'lino_xl.lib.rooms.Plugin' : _("""See lino.core.Plugin."""),
'lino_xl.lib.sales.Plugin' : _("""See lino.core.plugin.Plugin."""),
'lino_xl.lib.sepa.Plugin' : _("""See lino.core.plugin.Plugin."""),
'lino_xl.lib.sepa.fields.UppercaseTextFieldElement' : _("""A CharFieldElement which accepts only upper-case characters."""),
'lino_xl.lib.sepa.fields.UppercaseTextField' : _("""A custom CharField that accepts only uppercase caracters."""),
'lino_xl.lib.sepa.fields.BICField' : _("""Database field used to store a BIC."""),
'lino_xl.lib.sepa.fields.IBANField' : _("""Database field used to store an IBAN."""),
'lino_xl.lib.sepa.BankAccount' : _("""Adds a field bank_account and its chooser."""),
'lino_xl.lib.sepa.Payable' : _("""Model mixin for database objects that are considered payable
transactions. To be combined with some mixin which defines a
field partner."""),
'lino_xl.lib.sepa.Payable.payment_term' : _("""See lino_xl.lib.ledger.mixins.PartnerRelated.payment_term"""),
'lino_xl.lib.sepa.Payable.title' : _("""A char field with a description for this transaction."""),
'lino_xl.lib.sepa.BankAccountChecker' : _("""Checks for the following data problems:"""),
'lino_xl.lib.sepa.BankAccountChecker.model' : _("""alias of BankAccount"""),
'lino_xl.lib.sepa.Account' : _("""A bank account related to a given Partner."""),
'lino_xl.lib.sepa.Account.statements' : _("""A virtual field which displays the date of the last imported
statement for this account. Clicking on this date will open
the B2C account <lino_cosi.lib.b2c.models.Account> with same
IBAN number."""),
'lino_xl.lib.sepa.AccountsByPartner' : _("""Show the bank account(s) defined for a given partner. To be
included to a detail window on partner."""),
'lino_xl.lib.sepa.AccountsByPartner.master' : _("""alias of lino_xl.lib.contacts.models.Partner"""),
'lino_xl.lib.sepa.AccountsByPartner.model' : _("""alias of Account"""),
'lino_xl.lib.sepa.roles.SepaUser' : _("""Can see imported statements and movements per partner."""),
'lino_xl.lib.sepa.roles.SepaStaff' : _("""Can see imported statements and movements also globally in the
:menuselection`Explorer` menu."""),
'lino_xl.lib.skills.SuggestedTicketsByEndUser' : _("""Shows the tickets of other users which need help on a faculty for
which I am competent."""),
'lino_xl.lib.skills.SuggestedTicketsByEndUser.master' : _("""alias of lino_xl.lib.contacts.models.Person"""),
'lino_xl.lib.skills.SuggestedTicketsByEndUser.model' : _("""alias of lino_xl.lib.tickets.models.Ticket"""),
'lino_xl.lib.skills.Competence' : _("""A skill offer is when a given user is declared to have a
given skill."""),
'lino_xl.lib.skills.Demand' : _("""A Skill demand is when a given demander declares to need a
given skill."""),
'lino_xl.lib.skills.Demand.importance' : _("""How important this skill is for this demand."""),
'lino_xl.lib.skills.Skill' : _("""A skill is a knowledge or ability which can be
required in order to work e.g. on some ticket, and which
individual users can have (offer) or not."""),
'lino_xl.lib.stars.Plugin' : _("""See lino.core.plugin.Plugin."""),
'lino_xl.lib.stars.Star' : _("""Represents the fact that a given database object is starred by a
given User."""),
'lino_xl.lib.stars.Star.owner' : _("""The starred database object"""),
'lino_xl.lib.stars.Star.user' : _("""The starring user (pointer to :class:lino.modlib.users.models.User`"""),
'lino_xl.lib.stars.Star.master' : _("""The starred object that caused this stared object"""),
'lino_xl.lib.teams.Plugin' : _("""See lino.core.Plugin."""),
'lino_xl.lib.tickets.roles.Searcher' : _("""A user who can see all tickets."""),
'lino_xl.lib.tickets.roles.Triager' : _("""A user who is responsible for triaging new tickets."""),
'lino_xl.lib.tickets.roles.Reporter' : _("""A user who can create new tickets and edit their own tickets."""),
'lino_xl.lib.tickets.roles.TicketsStaff' : _("""Can configure tickets functionality."""),
'lino_xl.lib.tim2lino.Plugin' : _("""See lino.core.plugin.Plugin."""),
'lino_xl.lib.tim2lino.Plugin.languages' : _("""The language distribution used in the database to import. Mandatory
parameter. No default value."""),
'lino_xl.lib.tim2lino.Plugin.use_dbfread' : _("""Whether to use <NAME>'s dbfread package to read the file."""),
'lino_xl.lib.tim2lino.Plugin.use_dbf_py' : _("""Whether to use <NAME>'s dbf package to read the file."""),
'lino_xl.lib.tim2lino.Plugin.dbf_table_ext' : _("""The file extension of TIM tables. Meaningful values are '.DBF' or
.FOX."""),
'lino_xl.lib.topics.Plugin' : _("""See lino.core.plugin.Plugin."""),
'lino_xl.lib.topics.AddInterestField' : _("""An editable virtual field used for adding an interest to the
object."""),
'lino_xl.lib.topics.TopicGroup' : _("""Currently not used."""),
'lino_xl.lib.topics.Interest' : _("""An interest is the fact that a given partner is interested in a
given topic."""),
'lino_xl.lib.topics.Topic' : _("""A topic is something somebody can be | |
= self.owner_id
result['DomainName'] = self.domain_name
result['StartTime'] = self.start_time
result['EndTime'] = self.end_time
result['Interval'] = self.interval
result['IspNameEn'] = self.isp_name_en
result['LocationNameEn'] = self.location_name_en
return result
def from_map(self, map={}):
self.owner_id = map.get('OwnerId')
self.domain_name = map.get('DomainName')
self.start_time = map.get('StartTime')
self.end_time = map.get('EndTime')
self.interval = map.get('Interval')
self.isp_name_en = map.get('IspNameEn')
self.location_name_en = map.get('LocationNameEn')
return self
class DescribeVodDomainTrafficDataResponse(TeaModel):
def __init__(self, request_id=None, domain_name=None, start_time=None, end_time=None, data_interval=None, traffic_data_per_interval=None):
self.request_id = request_id
self.domain_name = domain_name
self.start_time = start_time
self.end_time = end_time
self.data_interval = data_interval
self.traffic_data_per_interval = traffic_data_per_interval
def validate(self):
self.validate_required(self.request_id, 'request_id')
self.validate_required(self.domain_name, 'domain_name')
self.validate_required(self.start_time, 'start_time')
self.validate_required(self.end_time, 'end_time')
self.validate_required(self.data_interval, 'data_interval')
self.validate_required(self.traffic_data_per_interval, 'traffic_data_per_interval')
if self.traffic_data_per_interval:
self.traffic_data_per_interval.validate()
def to_map(self):
result = {}
result['RequestId'] = self.request_id
result['DomainName'] = self.domain_name
result['StartTime'] = self.start_time
result['EndTime'] = self.end_time
result['DataInterval'] = self.data_interval
if self.traffic_data_per_interval is not None:
result['TrafficDataPerInterval'] = self.traffic_data_per_interval.to_map()
else:
result['TrafficDataPerInterval'] = None
return result
def from_map(self, map={}):
self.request_id = map.get('RequestId')
self.domain_name = map.get('DomainName')
self.start_time = map.get('StartTime')
self.end_time = map.get('EndTime')
self.data_interval = map.get('DataInterval')
if map.get('TrafficDataPerInterval') is not None:
temp_model = DescribeVodDomainTrafficDataResponseTrafficDataPerInterval()
self.traffic_data_per_interval = temp_model.from_map(map['TrafficDataPerInterval'])
else:
self.traffic_data_per_interval = None
return self
class DescribeVodDomainTrafficDataResponseTrafficDataPerIntervalDataModule(TeaModel):
def __init__(self, time_stamp=None, value=None, domestic_value=None, overseas_value=None, https_value=None, https_domestic_value=None, https_overseas_value=None):
self.time_stamp = time_stamp
self.value = value
self.domestic_value = domestic_value
self.overseas_value = overseas_value
self.https_value = https_value
self.https_domestic_value = https_domestic_value
self.https_overseas_value = https_overseas_value
def validate(self):
self.validate_required(self.time_stamp, 'time_stamp')
self.validate_required(self.value, 'value')
self.validate_required(self.domestic_value, 'domestic_value')
self.validate_required(self.overseas_value, 'overseas_value')
self.validate_required(self.https_value, 'https_value')
self.validate_required(self.https_domestic_value, 'https_domestic_value')
self.validate_required(self.https_overseas_value, 'https_overseas_value')
def to_map(self):
result = {}
result['TimeStamp'] = self.time_stamp
result['Value'] = self.value
result['DomesticValue'] = self.domestic_value
result['OverseasValue'] = self.overseas_value
result['HttpsValue'] = self.https_value
result['HttpsDomesticValue'] = self.https_domestic_value
result['HttpsOverseasValue'] = self.https_overseas_value
return result
def from_map(self, map={}):
self.time_stamp = map.get('TimeStamp')
self.value = map.get('Value')
self.domestic_value = map.get('DomesticValue')
self.overseas_value = map.get('OverseasValue')
self.https_value = map.get('HttpsValue')
self.https_domestic_value = map.get('HttpsDomesticValue')
self.https_overseas_value = map.get('HttpsOverseasValue')
return self
class DescribeVodDomainTrafficDataResponseTrafficDataPerInterval(TeaModel):
def __init__(self, data_module=None):
self.data_module = []
def validate(self):
self.validate_required(self.data_module, 'data_module')
if self.data_module:
for k in self.data_module:
if k :
k.validate()
def to_map(self):
result = {}
result['DataModule'] = []
if self.data_module is not None:
for k in self.data_module:
result['DataModule'].append(k.to_map() if k else None)
else:
result['DataModule'] = None
return result
def from_map(self, map={}):
self.data_module = []
if map.get('DataModule') is not None:
for k in map.get('DataModule'):
temp_model = DescribeVodDomainTrafficDataResponseTrafficDataPerIntervalDataModule()
temp_model = temp_model.from_map(k)
self.data_module.append(temp_model)
else:
self.data_module = None
return self
class DescribeVodDomainBpsDataRequest(TeaModel):
def __init__(self, owner_id=None, domain_name=None, start_time=None, end_time=None, interval=None, isp_name_en=None, location_name_en=None):
self.owner_id = owner_id
self.domain_name = domain_name
self.start_time = start_time
self.end_time = end_time
self.interval = interval
self.isp_name_en = isp_name_en
self.location_name_en = location_name_en
def validate(self):
pass
def to_map(self):
result = {}
result['OwnerId'] = self.owner_id
result['DomainName'] = self.domain_name
result['StartTime'] = self.start_time
result['EndTime'] = self.end_time
result['Interval'] = self.interval
result['IspNameEn'] = self.isp_name_en
result['LocationNameEn'] = self.location_name_en
return result
def from_map(self, map={}):
self.owner_id = map.get('OwnerId')
self.domain_name = map.get('DomainName')
self.start_time = map.get('StartTime')
self.end_time = map.get('EndTime')
self.interval = map.get('Interval')
self.isp_name_en = map.get('IspNameEn')
self.location_name_en = map.get('LocationNameEn')
return self
class DescribeVodDomainBpsDataResponse(TeaModel):
def __init__(self, request_id=None, domain_name=None, start_time=None, end_time=None, location_name_en=None, isp_name_en=None, data_interval=None, bps_data_per_interval=None):
self.request_id = request_id
self.domain_name = domain_name
self.start_time = start_time
self.end_time = end_time
self.location_name_en = location_name_en
self.isp_name_en = isp_name_en
self.data_interval = data_interval
self.bps_data_per_interval = bps_data_per_interval
def validate(self):
self.validate_required(self.request_id, 'request_id')
self.validate_required(self.domain_name, 'domain_name')
self.validate_required(self.start_time, 'start_time')
self.validate_required(self.end_time, 'end_time')
self.validate_required(self.location_name_en, 'location_name_en')
self.validate_required(self.isp_name_en, 'isp_name_en')
self.validate_required(self.data_interval, 'data_interval')
self.validate_required(self.bps_data_per_interval, 'bps_data_per_interval')
if self.bps_data_per_interval:
self.bps_data_per_interval.validate()
def to_map(self):
result = {}
result['RequestId'] = self.request_id
result['DomainName'] = self.domain_name
result['StartTime'] = self.start_time
result['EndTime'] = self.end_time
result['LocationNameEn'] = self.location_name_en
result['IspNameEn'] = self.isp_name_en
result['DataInterval'] = self.data_interval
if self.bps_data_per_interval is not None:
result['BpsDataPerInterval'] = self.bps_data_per_interval.to_map()
else:
result['BpsDataPerInterval'] = None
return result
def from_map(self, map={}):
self.request_id = map.get('RequestId')
self.domain_name = map.get('DomainName')
self.start_time = map.get('StartTime')
self.end_time = map.get('EndTime')
self.location_name_en = map.get('LocationNameEn')
self.isp_name_en = map.get('IspNameEn')
self.data_interval = map.get('DataInterval')
if map.get('BpsDataPerInterval') is not None:
temp_model = DescribeVodDomainBpsDataResponseBpsDataPerInterval()
self.bps_data_per_interval = temp_model.from_map(map['BpsDataPerInterval'])
else:
self.bps_data_per_interval = None
return self
class DescribeVodDomainBpsDataResponseBpsDataPerIntervalDataModule(TeaModel):
def __init__(self, time_stamp=None, value=None, domestic_value=None, overseas_value=None, https_value=None, https_domestic_value=None, https_overseas_value=None):
self.time_stamp = time_stamp
self.value = value
self.domestic_value = domestic_value
self.overseas_value = overseas_value
self.https_value = https_value
self.https_domestic_value = https_domestic_value
self.https_overseas_value = https_overseas_value
def validate(self):
self.validate_required(self.time_stamp, 'time_stamp')
self.validate_required(self.value, 'value')
self.validate_required(self.domestic_value, 'domestic_value')
self.validate_required(self.overseas_value, 'overseas_value')
self.validate_required(self.https_value, 'https_value')
self.validate_required(self.https_domestic_value, 'https_domestic_value')
self.validate_required(self.https_overseas_value, 'https_overseas_value')
def to_map(self):
result = {}
result['TimeStamp'] = self.time_stamp
result['Value'] = self.value
result['DomesticValue'] = self.domestic_value
result['OverseasValue'] = self.overseas_value
result['HttpsValue'] = self.https_value
result['HttpsDomesticValue'] = self.https_domestic_value
result['HttpsOverseasValue'] = self.https_overseas_value
return result
def from_map(self, map={}):
self.time_stamp = map.get('TimeStamp')
self.value = map.get('Value')
self.domestic_value = map.get('DomesticValue')
self.overseas_value = map.get('OverseasValue')
self.https_value = map.get('HttpsValue')
self.https_domestic_value = map.get('HttpsDomesticValue')
self.https_overseas_value = map.get('HttpsOverseasValue')
return self
class DescribeVodDomainBpsDataResponseBpsDataPerInterval(TeaModel):
def __init__(self, data_module=None):
self.data_module = []
def validate(self):
self.validate_required(self.data_module, 'data_module')
if self.data_module:
for k in self.data_module:
if k :
k.validate()
def to_map(self):
result = {}
result['DataModule'] = []
if self.data_module is not None:
for k in self.data_module:
result['DataModule'].append(k.to_map() if k else None)
else:
result['DataModule'] = None
return result
def from_map(self, map={}):
self.data_module = []
if map.get('DataModule') is not None:
for k in map.get('DataModule'):
temp_model = DescribeVodDomainBpsDataResponseBpsDataPerIntervalDataModule()
temp_model = temp_model.from_map(k)
self.data_module.append(temp_model)
else:
self.data_module = None
return self
class DescribeVodDomainUsageDataRequest(TeaModel):
def __init__(self, owner_id=None, domain_name=None, start_time=None, end_time=None, type=None, area=None, field=None):
self.owner_id = owner_id
self.domain_name = domain_name
self.start_time = start_time
self.end_time = end_time
self.type = type
self.area = area
self.field = field
def validate(self):
self.validate_required(self.start_time, 'start_time')
self.validate_required(self.end_time, 'end_time')
self.validate_required(self.field, 'field')
def to_map(self):
result = {}
result['OwnerId'] = self.owner_id
result['DomainName'] = self.domain_name
result['StartTime'] = self.start_time
result['EndTime'] = self.end_time
result['Type'] = self.type
result['Area'] = self.area
result['Field'] = self.field
return result
def from_map(self, map={}):
self.owner_id = map.get('OwnerId')
self.domain_name = map.get('DomainName')
self.start_time = map.get('StartTime')
self.end_time = map.get('EndTime')
self.type = map.get('Type')
self.area = map.get('Area')
self.field = map.get('Field')
return self
class DescribeVodDomainUsageDataResponse(TeaModel):
def __init__(self, request_id=None, domain_name=None, start_time=None, end_time=None, type=None, area=None, data_interval=None, usage_data_per_interval=None):
self.request_id = request_id
self.domain_name = domain_name
self.start_time = start_time
self.end_time = end_time
self.type = type
self.area = area
self.data_interval = data_interval
self.usage_data_per_interval = usage_data_per_interval
def validate(self):
self.validate_required(self.request_id, 'request_id')
self.validate_required(self.domain_name, 'domain_name')
self.validate_required(self.start_time, 'start_time')
self.validate_required(self.end_time, 'end_time')
self.validate_required(self.type, 'type')
self.validate_required(self.area, 'area')
self.validate_required(self.data_interval, 'data_interval')
self.validate_required(self.usage_data_per_interval, 'usage_data_per_interval')
if self.usage_data_per_interval:
self.usage_data_per_interval.validate()
def to_map(self):
result = {}
result['RequestId'] = self.request_id
result['DomainName'] = self.domain_name
result['StartTime'] = self.start_time
result['EndTime'] = self.end_time
result['Type'] = self.type
result['Area'] = self.area
result['DataInterval'] = self.data_interval
if self.usage_data_per_interval is not None:
result['UsageDataPerInterval'] = self.usage_data_per_interval.to_map()
else:
result['UsageDataPerInterval'] = None
return result
def from_map(self, map={}):
self.request_id = map.get('RequestId')
self.domain_name = map.get('DomainName')
self.start_time = map.get('StartTime')
self.end_time = map.get('EndTime')
self.type = map.get('Type')
self.area = map.get('Area')
self.data_interval = map.get('DataInterval')
if map.get('UsageDataPerInterval') is not None:
temp_model = DescribeVodDomainUsageDataResponseUsageDataPerInterval()
self.usage_data_per_interval = temp_model.from_map(map['UsageDataPerInterval'])
else:
self.usage_data_per_interval = None
return self
class DescribeVodDomainUsageDataResponseUsageDataPerIntervalDataModule(TeaModel):
def __init__(self, time_stamp=None, value=None):
self.time_stamp = time_stamp
self.value = value
def validate(self):
self.validate_required(self.time_stamp, 'time_stamp')
self.validate_required(self.value, 'value')
def to_map(self):
result = {}
result['TimeStamp'] = self.time_stamp
result['Value'] = self.value
return result
def from_map(self, map={}):
self.time_stamp = map.get('TimeStamp')
self.value = map.get('Value')
return self
class DescribeVodDomainUsageDataResponseUsageDataPerInterval(TeaModel):
def __init__(self, data_module=None):
self.data_module = []
def validate(self):
self.validate_required(self.data_module, 'data_module')
if self.data_module:
for k in self.data_module:
if k :
k.validate()
def to_map(self):
result = {}
result['DataModule'] = []
if self.data_module is not None:
for k in self.data_module:
result['DataModule'].append(k.to_map() if k else None)
else:
result['DataModule'] = None
return result
def from_map(self, map={}):
self.data_module = []
if map.get('DataModule') is not None:
for k in map.get('DataModule'):
temp_model = DescribeVodDomainUsageDataResponseUsageDataPerIntervalDataModule()
temp_model = temp_model.from_map(k)
self.data_module.append(temp_model)
else:
self.data_module = None
return self
class DescribeVodCertificateListRequest(TeaModel):
def __init__(self, owner_id=None, security_token=None, domain_name=None):
self.owner_id = owner_id
self.security_token = security_token
self.domain_name = domain_name
def validate(self):
pass
def to_map(self):
result = {}
result['OwnerId'] = self.owner_id
result['SecurityToken'] = self.security_token
result['DomainName'] = self.domain_name
return result
def from_map(self, map={}):
self.owner_id = map.get('OwnerId')
self.security_token = map.get('SecurityToken')
self.domain_name = map.get('DomainName')
return self
class DescribeVodCertificateListResponse(TeaModel):
def __init__(self, request_id=None, certificate_list_model=None):
self.request_id = request_id
self.certificate_list_model = certificate_list_model
def validate(self):
self.validate_required(self.request_id, 'request_id')
self.validate_required(self.certificate_list_model, 'certificate_list_model')
if self.certificate_list_model:
self.certificate_list_model.validate()
def to_map(self):
result = {}
result['RequestId'] = self.request_id
if self.certificate_list_model is not None:
result['CertificateListModel'] = self.certificate_list_model.to_map()
else:
result['CertificateListModel'] = None
return result
def from_map(self, map={}):
self.request_id = map.get('RequestId')
if map.get('CertificateListModel') is not None:
temp_model = DescribeVodCertificateListResponseCertificateListModel()
self.certificate_list_model = temp_model.from_map(map['CertificateListModel'])
else:
self.certificate_list_model = None
return self
class DescribeVodCertificateListResponseCertificateListModelCertListCert(TeaModel):
def __init__(self, cert_name=None, cert_id=None, fingerprint=None, common=None, issuer=None, last_time=None):
self.cert_name = cert_name
self.cert_id = cert_id
self.fingerprint = fingerprint
self.common = common
self.issuer = issuer
self.last_time = last_time
def validate(self):
self.validate_required(self.cert_name, 'cert_name')
self.validate_required(self.cert_id, 'cert_id')
self.validate_required(self.fingerprint, 'fingerprint')
self.validate_required(self.common, 'common')
self.validate_required(self.issuer, 'issuer')
self.validate_required(self.last_time, 'last_time')
def to_map(self):
result = {}
result['CertName'] = self.cert_name
result['CertId'] = self.cert_id
result['Fingerprint'] = self.fingerprint
result['Common'] = self.common
result['Issuer'] = self.issuer
result['LastTime'] = self.last_time
return result
def from_map(self, map={}):
self.cert_name = map.get('CertName')
self.cert_id = map.get('CertId')
self.fingerprint = | |
<gh_stars>10-100
from logging import Logger
import requests
import time
try:
# web3 4.0
from eth_account.internal.transactions import assert_valid_fields
except ImportError:
# web3 5.0
from eth_account._utils.transactions import assert_valid_fields
from sto.ethereum.utils import mk_contract_address, get_constructor_arguments
from sto.models.broadcastaccount import _BroadcastAccount, _PreparedTransaction
from sto.models.utils import now
from eth_account import Account
from eth_utils import to_checksum_address
from sqlalchemy.orm import Session, Query
from typing import Optional, Iterable
from web3 import Web3
from web3.contract import Contract
from sto.models.implementation import _BroadcastAccount
class NetworkAndDatabaseNonceOutOfSync(Exception):
pass
class AddressConfigurationMismatch(Exception):
pass
class CouldNotVerifyOnEtherScan(Exception):
pass
class EthereumStoredTXService:
"""A transaction service that writes entries to a local database before trying to broadcast them to the blockchain."""
#: Can't trust auto estimate
SPECIAL_GAS_LIMIT_FOR_CONTRACT_DEPLOYMENT = 3500000 # Number from Ethereum tester, cannot exceed this
#: Can't trust auto estimate
SPECIAL_GAS_LIMIT_FOR_NORMAL_TX = 666111
def __init__(self, network: str, dbsession: Session, web3: Web3, private_key_hex: str, gas_price, gas_limit, broadcast_account_model, prepared_tx_model):
assert isinstance(web3, Web3)
self.network = network # "kovan"
self.dbsession = dbsession
self.web3 = web3
self.private_key_hex = private_key_hex
self.account = Account.privateKeyToAccount(private_key_hex)
# SQLAlchemy models, allow caller to supply their own
self.broadcast_account_model = broadcast_account_model
self.prepared_tx_model = prepared_tx_model
self.gas_price = gas_price or 20*10**9 # Default 20 GWei
assert self.gas_price > 1*10**9, "Are you sure you want less than 1 GWei gas price?"
if gas_limit:
assert type(gas_limit) == int
self.gas_limit = gas_limit
@property
def address(self):
return self.account.address
def get_or_create_broadcast_account(self):
"""
:return:
"""
# Some early prototype sanity checks
assert self.address.startswith("0x")
assert self.network in ("kovan", "ethereum", "testing", "ropsten") # TODO: Sanity check - might want to remove this
account = self.dbsession.query(self.broadcast_account_model).filter_by(network=self.network, address=self.address).one_or_none()
if not account:
account = self.broadcast_account_model(network=self.network, address=self.address)
self.dbsession.add(account)
self.dbsession.flush()
return account
def get_next_nonce(self):
broadcast_account = self.get_or_create_broadcast_account()
return broadcast_account.current_nonce
def ensure_accounts_in_sync(self):
"""Make sure that our internal nonce and external nonce looks correct."""
broadcast_account = self.get_or_create_broadcast_account()
tx_count = self.web3.eth.getTransactionCount(broadcast_account.address)
if tx_count != broadcast_account.current_nonce:
NetworkAndDatabaseNonceOutOfSync("Nonced out of sync. Network: {}, database: {}. Maybe you have a pending broadcasts propagating?".format(tx_count, broadcast_account.current_nonce))
def allocate_transaction(self,
broadcast_account: _BroadcastAccount,
receiver: Optional[str],
contract_address: Optional[str],
contract_deployment: bool,
nonce: int,
note: str,
unsigned_payload: dict,
gas_price: Optional[int],
gas_limit: Optional[int],
external_id: Optional[str]=None,
) -> _PreparedTransaction:
"""Put a transaction to the pending queue of the current broadcast list."""
if receiver:
assert receiver.startswith("0x")
assert contract_deployment in (True, False)
assert broadcast_account.current_nonce == nonce
assert type(unsigned_payload) == dict
assert_valid_fields(unsigned_payload)
tx = self.prepared_tx_model() # type: _PreparedTransaction
tx.nonce = nonce
tx.human_readable_description = note
tx.receiver = receiver
tx.contract_address = contract_address
tx.contract_deployment = contract_deployment
tx.unsigned_payload = unsigned_payload
tx.external_id = external_id
broadcast_account.txs.append(tx)
broadcast_account.current_nonce += 1
return tx
def generate_tx_data(self, nonce: int, contract_tx=False) -> dict:
"""Generate transaction control parameters.
:param contract: We use a special hardcoded gas estimate for 4,000,000 when deploying contracts. Kovan misestimates the cost of deploying SecurityToken and thus the transaction always fails with the auto estimate.
"""
# See TRANSACTION_VALID_VALUES
tx_data = {}
tx_data["nonce"] = nonce
# Mikko's rule of thumb estimator because local accounts do not estimate gas too well
if self.gas_limit:
tx_data["gas"] = self.gas_limit
elif contract_tx:
tx_data["gas"] = EthereumStoredTXService.SPECIAL_GAS_LIMIT_FOR_CONTRACT_DEPLOYMENT
else:
tx_data["gas"] = EthereumStoredTXService.SPECIAL_GAS_LIMIT_FOR_NORMAL_TX
if self.gas_price:
tx_data["gasPrice"] = self.gas_price
return tx_data
def deploy_contract(self, contract_name: str, abi: dict, note: str, constructor_args=None) -> _PreparedTransaction:
"""Deploys a contract."""
if not constructor_args:
constructor_args = {}
abi_data = abi[contract_name]
assert "source" in abi_data, "We need to have special postprocessed ABI data bundle, as we need the contract source code for EtherScan verification"
contract_class = Contract.factory(
web3=self.web3,
abi=abi_data["abi"],
bytecode=abi_data["bytecode"],
bytecode_runtime=abi_data["bytecode_runtime"],
)
broadcast_account = self.get_or_create_broadcast_account()
next_nonce = self.get_next_nonce()
# Creates a dict for signing
tx_data = self.generate_tx_data(next_nonce, contract_tx=True)
constructed_txn = contract_class.constructor(**constructor_args).buildTransaction(tx_data)
constructor_arguments = get_constructor_arguments(contract_class, kwargs=constructor_args)
derived_contract_address = mk_contract_address(self.address, next_nonce)
derived_contract_address = to_checksum_address(derived_contract_address.lower())
constructed_txn["to"] = "" # Otherwise database serializer complains about bytes string
tx = self.allocate_transaction(
broadcast_account=broadcast_account,
receiver=None,
contract_address=derived_contract_address,
contract_deployment=True,
nonce=next_nonce,
note=note,
unsigned_payload=constructed_txn,
gas_price=self.gas_price,
gas_limit=self.gas_limit,
)
self.dbsession.flush() # Populate other_data
tx.abi = abi_data
tx.constructor_arguments = constructor_arguments
assert tx.compiler_version
assert tx.flattened_source_code
return tx
def get_contract_proxy(self, contract_name: str, abi: dict, address: str, use_bytecode: bool=True) -> Contract:
"""Get web3.Contract to interact directly with the network"""
abi_data = abi[contract_name]
contract_class = Contract.factory(
web3=self.web3,
abi=abi_data["abi"],
)
if use_bytecode:
contract_class.bytecode = abi_data["bytecode"],
contract_class.bytecode_runtime = abi_data["bytecode_runtime"],
return contract_class(address=to_checksum_address(address))
def interact_with_contract(self, contract_name: str, abi: dict, address: str, note: str, func_name: str, args=None, receiver=None, use_bytecode=True) -> _PreparedTransaction:
"""Does a transaction against a contract."""
assert address.startswith("0x")
if not args:
args = {}
contract = self.get_contract_proxy(contract_name, abi, address, use_bytecode=use_bytecode)
broadcast_account = self.get_or_create_broadcast_account()
next_nonce = self.get_next_nonce()
func = contract.get_function_by_name(func_name)
tx_data = self.generate_tx_data(next_nonce)
constructed_txn = func(**args).buildTransaction(tx_data)
tx = self.allocate_transaction(
broadcast_account=broadcast_account,
receiver=receiver,
contract_address=address,
contract_deployment=False,
nonce=next_nonce,
note=note,
unsigned_payload=constructed_txn,
gas_price=self.gas_price,
gas_limit=self.gas_limit,
)
self.dbsession.flush()
return tx
def is_distributed(self, external_id: str, contract_address: str) -> bool:
"""Prevent us sending the same transaction twice."""
if self.dbsession.query(self.prepared_tx_model).filter_by(external_id=external_id, contract_address=contract_address).one_or_none():
return True
return False
def distribute_tokens(self, external_id: str, receiver_address: str, raw_amount: int, token_address: str, abi: dict, note: str, contract_name="ERC20", func_name="transfer", receiver=None) -> _PreparedTransaction:
"""Send out tokens."""
assert receiver_address.startswith("0x")
assert token_address.startswith("0x")
assert type(raw_amount) == int
assert raw_amount >= 1
# Prevent us sending the same transaction twice
if self.dbsession.query(self.prepared_tx_model).filter_by(external_id=external_id, contract_address=token_address).one_or_none():
raise RuntimeError("Already distributed token:{} id:{}".format(token_address, external_id))
contract = self.get_contract_proxy(contract_name, abi, token_address)
broadcast_account = self.get_or_create_broadcast_account()
next_nonce = self.get_next_nonce()
args = [receiver_address, raw_amount]
func = getattr(contract.functions, func_name)
tx_data = self.generate_tx_data(next_nonce)
constructed_txn = func(*args).buildTransaction(tx_data)
tx = self.allocate_transaction(
broadcast_account=broadcast_account,
receiver=receiver_address,
contract_address=token_address,
contract_deployment=False,
nonce=next_nonce,
note=note,
unsigned_payload=constructed_txn,
gas_price=self.gas_price,
gas_limit=self.gas_limit,
external_id=external_id,
)
self.dbsession.flush()
return tx
def get_raw_token_balance(self, token_address: str, abi: dict, contract_name="ERC20Basic", func_name="balanceOf") -> int:
"""Check that we have enough token balance for distribute operations."""
assert token_address.startswith("0x")
contract = self.get_contract_proxy(contract_name, abi, token_address)
broadcast_account = self.get_or_create_broadcast_account()
args = {
"who": broadcast_account.address,
}
func = getattr(contract.functions, func_name)
result = func(**args).call()
return result
def get_pending_broadcasts(self) -> Query:
"""All transactions that need to be broadcasted."""
return self.dbsession.query(self.prepared_tx_model).filter_by(broadcasted_at=None).order_by(self.prepared_tx_model.nonce).join(self.broadcast_account_model).filter_by(network=self.network)
def get_pending_broadcasts_for_address(self, address: str) -> Query:
"""All transactions that need to be broadcasted for a specific broadcasting account."""
return self.dbsession.query(self.prepared_tx_model).filter_by(broadcasted_at=None).order_by(self.prepared_tx_model.nonce).join(self.broadcast_account_model).filter_by(network=self.network).filter(self.broadcast_account_model.address == self.address)
def get_unmined_txs(self) -> Query:
"""All transactions that do not yet have a block assigned."""
return self.dbsession.query(self.prepared_tx_model).filter(self.prepared_tx_model.txid != None).filter_by(result_block_num=None).join(self.broadcast_account_model).filter_by(network=self.network)
def get_last_transactions(self, limit: int) -> Query:
"""Fetch latest transactions."""
assert type(limit) == int
return self.dbsession.query(self.prepared_tx_model).order_by(self.prepared_tx_model.created_at.desc()).limit(limit)
def broadcast(self, tx: _PreparedTransaction):
"""Push transactions to Ethereum network."""
if tx.broadcast_account.address != self.address:
raise AddressConfigurationMismatch("Could not broadcast {} due to address mismatch. A pendign transaction was created for account {}, but we are using configured account {}".format(tx.human_readable_description, tx.broadcast_account.address, self.address))
tx_data = tx.unsigned_payload
signed = self.web3.eth.account.signTransaction(tx_data, self.private_key_hex)
tx.txid = signed.hash.hex()
self.web3.eth.sendRawTransaction(signed.rawTransaction)
tx.broadcasted_at = now()
return tx
def update_status(self, tx: _PreparedTransaction):
"""Update tx status from Etheruem network."""
assert tx.txid
# https://web3py.readthedocs.io/en/stable/web3.eth.html#web3.eth.Eth.getTransactionReceipt
receipt = self.web3.eth.getTransactionReceipt(tx.txid)
if receipt:
tx.result_block_num = receipt["blockNumber"]
# https://ethereum.stackexchange.com/a/6003/620
if receipt["status"] == 0:
tx.result_transaction_success = False
tx.result_transaction_reason = "Transaction failed" # TODO: Need some logic to separate failure modes
else:
tx.result_transaction_success = True
tx.result_fetched_at = now()
return tx
@classmethod
def print_transactions(self, txs: Iterable[_PreparedTransaction]):
"""Print transaction status to the console"""
from tabulate import tabulate # https://bitbucket.org/astanin/python-tabulate
import colorama # https://pypi.org/project/colorama/
colorama.init()
table = []
for tx in txs:
status = tx.get_status()
if status == "waiting":
status = colorama.Fore.BLUE + status + colorama.Fore.RESET
elif status == "broadcasted":
status = colorama.Fore.YELLOW + status + colorama.Fore.RESET
elif status == "mining":
status = colorama.Fore.YELLOW + status + colorama.Fore.RESET
elif status in ("success", "verified"):
status = colorama.Fore.GREEN + status + colorama.Fore.RESET
status += ":" + str(tx.result_block_num)
elif status == "failed":
status = colorama.Fore.RED + status + colorama.Fore.RESET
status += ":" + str(tx.result_block_num)
else:
raise RuntimeError("Does not compute")
table.append((tx.txid, status, tx.nonce, tx.get_from(), tx.get_to(), tx.human_readable_description[0:64]))
print(tabulate(table, headers=["TXID", "Status and block", "Nonce", "From", "To", "Note"]))
def verify_on_etherscan(logger: Logger, network: str, tx: _PreparedTransaction, api_key: str, session, timeout=120):
"""Verify a contrcact deployment on Etherscan.
Uses https://etherscan.io/apis#contracts
"""
assert network in ("ethereum", "kovan", "ropsten", "rinkerby")
if network != "ethereum":
url = "https://api-{}.etherscan.io/api".format(network)
else:
url = "https://api.etherscan.io/api"
assert tx.result_transaction_success
assert tx.contract_deployment
source = tx.flattened_source_code
assert source.strip(), "Source code missing"
compiler = tx.compiler_version
address = tx.contract_address
constructor_arguments = tx.constructor_arguments
contract_name = tx.contract_name
data = {
"apikey": api_key,
"module": "contract",
"contractaddress": address,
"action": "verifysourcecode",
"sourceCode": source,
"contractname": contract_name,
"compilerversion": "v" + compiler, # https://etherscan.io/solcversions
"constructorArguements": constructor_arguments[2:], # Remove leading 0x
"optimizationUsed": 1, # TODO: Hardcoded
"runs": 500, # TODO: Hardcoded
}
info_data = data.copy()
del info_data["sourceCode"] # Too verbose
del info_data["apikey"] # Security
logger.info("Calling EtherScan API as: %s", info_data)
#
# Step 1: EtherScan validates input and gives us a ticket id to track the submission status
#
resp | |
<reponame>lesyk/Evolife<filename>Other/Ants/Ants.py
#!/usr/bin/env python
##############################################################################
# EVOLIFE www.dessalles.fr/Evolife <NAME> #
# Telecom ParisTech 2014 www.dessalles.fr #
##############################################################################
##############################################################################
# Ants #
##############################################################################
""" Collective foraging:
Though individual agents follow erratic paths to find food,
the collective may discover optimal paths.
"""
# In this story, 'ants' move in search for food
# In the absence of pheromone, ants move randomly for some time,
# and then head back toward the colony.
# When they find food, they return to the colony while laying down pheromone.
# If they find pheromone, ants tend to follow it.
####### NOTE: this is just a sketch. The programme must be completed to
####### display appropriate behaviour
import sys
from time import sleep
import random
import threading
import time
sys.path.append('..')
sys.path.append('../../..')
import Evolife.Scenarii.Parameters as EPar
import Evolife.Ecology.Observer as EO
import Evolife.Ecology.Individual as EI
import Evolife.Ecology.Group as EG
import Evolife.Ecology.Population as EP
import Evolife.QtGraphics.Evolife_Window as EW
import Evolife.Tools.Tools as ET
import Landscapes
print ET.boost() # significantly accelerates python on some platforms
# two functions to convert from complex numbers into (x,y) coordinates
c2t = lambda c: (int(round(c.real)),int(round(c.imag))) # converts a complex into a couple
t2c = lambda (x,y): complex(x,y) # converts a couple into a complex
#################################################
# Aspect of ants, food and pheromons on display
#################################################
AntAspect = ('black', 6) # 6 = size
AntAspectWhenLaden = ('red1', 7) # 6 = size
FoodAspect = ('yellow', 14)
FoodDepletedAspect = ('brown', 14)
PPAspect = (17, 2) # 17th colour
NPAspect = ('blue', 2)
class Ant_Observer(EO.Observer):
""" Stores global variables for observation
"""
def __init__(self, Scenario):
EO.Observer.__init__(self, Scenario)
self.CurrentChanges = [] # stores temporary changes
self.recordInfo('CurveNames', [('yellow', 'Year (each ant moves once a year on average)\n\t\tx\n\t\tAmount of food collected')])
self.FoodCollected = 0
def recordChanges(self, Info):
# stores current changes
# Info is a couple (InfoName, Position) and Position == (x,y) or a longer tuple
self.CurrentChanges.append(Info)
def get_info(self, Slot):
" this is called when display is required "
if Slot == 'PlotOrders': return [('yellow', (self.StepId//Gbl.Parameter('PopulationSize'), self.FoodCollected))] # curve
else: return EO.Observer.get_info(self, Slot)
def get_data(self, Slot):
if Slot == 'Positions':
CC = self.CurrentChanges
# print CC
self.CurrentChanges = []
return tuple(CC)
else: return EO.Observer.get_data(self, Slot)
class LandCell(Landscapes.LandCell):
""" Defines what's in one location on the ground
"""
# Cell content is defined as a triple (Food, NegativePheromon, PositivePheromon)
def __init__(self, F=0, NP=0, PP=0):
self.VoidCell = (0, 0, 0) # content of a void cell
self.setContent((F,NP,PP))
def clean(self):
return self.setContent((self.Content()[0],0,0))
def food(self, addendum=0):
(F,NP,PP) = self.Content()
if addendum: self.setContent((F + addendum, NP, PP))
return F + addendum
def np(self, addendum=0):
(F,NP,PP) = self.Content()
if addendum: self.setContent((F, self.limit(NP + addendum), PP))
return NP + addendum
def pp(self, addendum=0):
(F,NP,PP) = self.Content()
if addendum: self.setContent((F, NP, self.limit(PP + addendum)))
return PP + addendum
def limit(self, Pheromone):
return min(Pheromone, Gbl.Parameter('Saturation'))
# def __add__(self, Other):
# # redefines the '+' operation between cells
# return LandCell(self.food()+Other.food(),
# self.limit(self.np() + Other.np()),
# self.limit(self.pp() + Other.pp())
def evaporate(self):
# Pheromone evaporation should be programmed about here
if self.np() > 0:
self.np(-Gbl.Parameter('Evaporation')) # Repulsive ('negative') pheromone
if self.pp() > 0:
self.pp(-Gbl.Parameter('Evaporation')) # Attractive ('positive') Pheromone
if self.np() <= 0 and self.pp() <= 0:
self.clean()
return True
return False
def add_food(self):
#print "food added to " + str(self)
Observer.recordChanges((self.Name, self.locate() + FoodAspect))
Land.food(self.locate(), +1)
#print "food added to " + str(self)
time.sleep(Gbl.Parameter('FoodRenewTime'))
if BackgroundProcesses == True:
add_food(self)
class FoodSource:
""" Location where food is available
"""
def __init__(self, Name):
self.Name = Name
self.FoodAmount = 0
self.Location = (-1,-1)
self.Radius = (Gbl.Parameter('FoodSourceSize')+1)//2
self.Distribution = Gbl.Parameter('FoodQuantity') // ((2*self.Radius+1) ** 2)
self.Area = []
#adding background processig of food addition
t = threading.Timer(Gbl.Parameter('FoodRenewTime'), add_food, [self])
t.start()
def locate(self, Location = None):
if Location:
self.Location = Location
return self.Location
def __repr__(self):
return "[%s, %d, %s...]" % (self.Name, self.FoodAmount, str(self.Area)[:22])
class Landscape(Landscapes.Landscape):
""" A 2-D grid with cells that contains food or pheromone
"""
def __init__(self, Size, NbFoodSources):
Landscapes.Landscape.__init__(self, Size, CellType=LandCell)
# Positioning Food Sources
self.FoodSourceNumber = NbFoodSources
self.FoodSources = []
self.LastFood = time.time() + Gbl.Parameter('TimeRunningOutOfFood')
for FSn in range(self.FoodSourceNumber):
FS = FoodSource('FS%d' % FSn)
FS.locate((random.randint(0,Size-1),random.randint(0,Size-1)))
self.FoodSources.append(FS)
for Pos in self.neighbours(FS.locate(), Radius=FS.Radius):
FS.Area.append(Pos)
self.food(Pos, FS.Distribution) # Cell contains a certain amount of food
Observer.recordChanges((FS.Name, FS.locate() + FoodAspect)) # to display food sources
# def Modify(self, (x,y), Modification):
# self.Ground[x][y] += Modification # uses addition as redefined in LandCell
# return self.Ground[x][y]
# def FoodSourceConsistency(self):
# for FS in self.FoodSources:
# amount = 0
# for Pos in FS.Area:
# amount += self.food(Pos)
# if amount != FS.FoodAmount:
# print('************ error consistency %s: %d %d' % (FS.Name, amount, FS.FoodAmount))
# print [self.food(Pos) for Pos in FS.Area]
# FS.FoodAmount = amount
def food(self, Pos, delta=0):
if delta:
# let the food source know
for FS in self.FoodSources:
if Pos in FS.Area:
FS.FoodAmount += delta
if FS.FoodAmount <= 0:
Observer.recordChanges((FS.Name, FS.locate() + FoodDepletedAspect)) # to display food sources
return self.Cell(Pos).food(delta) # adds food
def foodQuantity(self):
return sum([FS.FoodAmount for FS in self.FoodSources])
def npheromone(self, Pos, delta=0):
if delta:
self.ActiveCells.append(Pos)
Observer.recordChanges(('NP%d_%d' % Pos, Pos + NPAspect)) # for ongoing display of negative pheromone
return self.Cell(Pos).np(delta) # adds repulsive pheromone
def ppheromone(self, Pos, delta=0):
if delta:
self.ActiveCells.append(Pos)
Observer.recordChanges(('PP%d_%d' % Pos, Pos + PPAspect)) # for ongoing display of positive pheromone
return self.Cell(Pos).pp(delta) # adds attractive pheromone
def evaporation(self):
for Pos in self.ActiveCells.list()[:]:
if self.Cell(Pos).evaporate(): # no pheromone left
# call 'erase' for updating display when there is no pheromone left
self.erase(Pos) # for ongoing display
self.ActiveCells.remove(Pos)
def erase(self, Pos):
" says to Observer that there is no pheromon left at that location "
Observer.recordChanges(('NP%d_%d' % Pos, Pos + (-1,))) # negative colour means erase from display
Observer.recordChanges(('PP%d_%d' % Pos, Pos + (-1,))) # negative colour means erase from display
def update_(self):
# scans ground for food and pheromone - May be used for statistics
Food = NPher = PPher = []
for (Position, Cell) in Land.travel():
if Cell.Food: Food.append((Pos, Cell.food()))
if Cell.NPheromone: NPher.append((Pos, Cell.np()))
if Cell.PPheromone: PPher.append((Pos, Cell.pp()))
return (Food, NPher, PPher)
class Ant(EI.Individual):
""" Defines individual agents
"""
def __init__(self, Scenario, IdNb, InitialPosition):
EI.Individual.__init__(self, Scenario, ID=IdNb)
self.Colony = InitialPosition # Location of the colony nest
self.location = InitialPosition
self.PPStock = Gbl.Parameter('PPMax')
self.Action = 'Move'
self.moves()
def Sniff(self):
" Looks for the next place to go "
Neighbourhood = Land.neighbours(self.location, self.Scenario.Parameter('SniffingDistance'))
random.shuffle(Neighbourhood) # to avoid anisotropy
acceptable = None
best = -Gbl.Parameter('Saturation') # best == pheromone balance found so far
for NewPos in Neighbourhood:
# looking for position devoid of negative pheromon
if NewPos == self.location: continue
if Land.food(NewPos) > 0:
# Food is always good to take
acceptable = NewPos
break
found = Land.ppheromone(NewPos) # attractiveness of positive pheromone
found -= Land.npheromone(NewPos) # repulsiveness of negative pheromone
if found > best:
acceptable = NewPos
best = found
return acceptable
def returnHome(self):
" The ant heads toward the colony "
Direction = t2c(self.Colony) - t2c(self.location) # complex number operation
Distance = abs(Direction)
if Distance >= Gbl.Parameter('SniffingDistance'):
# Negative pheromone
if Gbl.Parameter('NPDeposit'):
Land.npheromone(self.location, Gbl.Parameter('NPDeposit')) # marking current position as visited with negative pheromone
# Positive pheromone
Land.ppheromone(self.location, self.PPStock) # marking current posiiton as interesting with positive pheromone
Direction /= Distance # normed vector
# MaxSteps = int(Gbl.Parameter('LandSize') / 2 / Distance) #
Decrease = int(self.PPStock / Distance) # Linear decrease
self.PPStock -= Decrease
# if self.PPStock <= Gbl.Parameter('PPMin'): self.PPStock = Gbl.Parameter('PPMin') # always lay some amount of positive pheromone
self.location = c2t(t2c(self.location) + 2 * Direction) # complex number operation
self.location = Land.ToricConversion(self.location) # landscape is a tore
Observer.recordChanges((self.ID, self.location + AntAspectWhenLaden)) # for ongoing display of ants
else:
# Home reached
self.PPStock = Gbl.Parameter('PPMax')
self.Action = 'Move'
#print Land.LastFood
Land.LastFood = time.time()
def moves(self):
""" Basic behavior: move by looking for neighbouring unvisited cells.
If food is in sight, return straight back home.
Lay down negative pheromone on visited cells.
Lay down positive pheromone on returning home.
"""
if self.Action == 'BackHome':
self.returnHome()
else:
NextPos = self.Sniff()
# print self.ID, 'in', self.location, 'sniffs', NextPos
if NextPos is None or random.randint(0,100) < Gbl.Parameter('Exploration'):
# either all neighbouring cells have been visited or in the mood for exploration
NextPos = c2t(t2c(self.location) + complex(random.randint(-1,1),random.randint(-1,1)))
NextPos = Land.ToricConversion(NextPos)
# Marking the old location as visited
if Gbl.Parameter('NPDeposit'):
Land.npheromone(self.location, Gbl.Parameter('NPDeposit'))
# Observer.recordChanges(('NP%d_%d' % self.location, self.location + NPAspect)) # for ongoing display of negative pheromone
self.location = NextPos
if Land.food(self.location) > 0:
Land.food(self.location, -1) # consume one unit of food
Observer.FoodCollected += 1
self.Action = 'BackHome' # return when having found food
Observer.recordChanges((self.ID, self.location + AntAspect)) # for ongoing display of ants
def position(self):
return c2t(self.Position)
class Group(EG.Group):
# The group is a container for individuals.
# Individuals are stored in self.members
def __init__(self, Scenario, ColonyPosition, ID=1, Size=100):
self.ColonyPosition = ColonyPosition
EP.Group.__init__(self, Scenario, ID=ID, Size=Size)
def createIndividual(self, ID=None, Newborn=True):
# calling local class 'Individual'
return Ant(self.Scenario, self.free_ID(Prefix='A'), self.ColonyPosition) # call to local class 'Ant'
class Population(EP.Population):
" defines the population of agents "
def __init__(self, Scenario, Observer, ColonyPosition):
self.ColonyPosition = ColonyPosition
EP.Population.__init__(self, Scenario, Observer)
" creates a population of ant agents "
self.AllMoved = 0 # counts the number of times all agents have moved on average
self.SimulationEnd = 400 * self.popSize
# allows to run on the simulation beyond stop condition
def createGroup(self, ID=0, Size=0):
return Group(self.Scenario, self.ColonyPosition, ID=ID, Size=Size) # Call to local class 'Group'
def One_Decision(self):
""" This function is repeatedly called by the simulation thread.
One ant is randomly chosen and decides what it does
"""
EP.Population.one_year(self) # performs statistics
ant = self.selectIndividual()
ant.moves()
Moves = self.year // self.popSize # One step = all Walkers have moved once on average
# print (self.year, self.AllMoved, Moves),
if Moves > self.AllMoved:
Land.evaporation()
self.AllMoved = Moves
if (Land.foodQuantity() <= 0):
self.SimulationEnd -= 1
print time.time() - Land.LastFood
return False if time.time() - Land.LastFood > Gbl.Parameter('StartTimeForSearching') else True
#return self.SimulationEnd > 0 # stops the simulation when True
if __name__ == "__main__":
print __doc__
#############################
# Global objects #
#############################
BackgroundProcesses = True #flag for background processes
Gbl = EPar.Parameters('_Params.evo') # Loading global parameter values
Observer = Ant_Observer(Gbl) # Observer contains statistics
Land = Landscape(Gbl.Parameter('LandSize'), Gbl.Parameter('NbFoodSources'))
Pop = Population(Gbl, | |
<filename>data_processor.py
import numpy as np
import json
import glob
import copy
import os.path
import re
from tqdm import tqdm
from sklearn.feature_extraction import FeatureHasher
def read_label_csv(path):
label_table = dict()
with open(path, "r", encoding='ISO-8859-1') as f:
for line in f.readlines()[1:]:
fname, label = line.strip().split(",")
label_table[fname] = int(label)
return label_table
def read_json(path):
with open(path, "r") as f:
return json.load(f)
def parse_set(path):
ret = set()
f = open(path, "r")
lines = f.readlines()
f.close()
for line in lines:
ret.add(line.rstrip())
return ret
class PeminerParser:
def __init__(self, path):
self.report = read_json(path)
self.vector = []
def hashing(self):
h = FeatureHasher(n_features=188)
hash_dict = dict()
for i in self.report.items():
hash_dict[i[0]] = i[1]
digest = h.transform([hash_dict])
return digest.toarray().tolist()[0]
def process_report(self):
# 전체 데이터 사용 (그런데 피처 해싱을 곁들인)
self.vector += self.hashing()
return self.vector
class EmberParser:
'''
예제에서 사용하지 않은 특징도 사용하여 벡터화 할 것을 권장
'''
def __init__(self, path):
self.report = read_json(path)
self.vector = []
def get_histogram_info(self):
histogram = np.array(self.report["histogram"])
total = histogram.sum()
vector = histogram / total
return vector.tolist()
def get_string_info(self):
strings = self.report["strings"]
hist_divisor = float(strings['printables']) if strings['printables'] > 0 else 1.0
vector = [
strings['numstrings'],
strings['avlength'],
strings['printables'],
strings['entropy'],
strings['paths'],
strings['urls'],
strings['registry'],
strings['MZ']
]
vector += (np.asarray(strings['printabledist']) / hist_divisor).tolist()
return vector
def get_general_file_info(self):
general = self.report["general"]
vector = [
general['size'], general['vsize'], general['has_debug'], general['exports'], general['imports'],
general['has_relocations'], general['has_resources'], general['has_signature'], general['has_tls'],
general['symbols']
]
return vector
def get_coff_info(self):
if self.report["header"]["coff"]["timestamp"] == 0:
return [1]
else:
return [0]
def get_data_directories_info(self):
# except runtime header
dd = self.report["datadirectories"]
val1 = 0
for i in dd:
if i["name"] == "CLR_RUNTIME_HEADER":
val1 += i["virtual_address"]
return [val1 / max(1, len(dd))]
def get_clr_runtime_header_info(self):
dd = self.report["datadirectories"]
val1 = 0
val2 = 0
for i in dd:
if i["name"] == "CLR_RUNTIME_HEADER":
val1 = i["virtual_address"]
val2 = i["size"]
return [val1, val2]
def get_section_info(self):
sections = self.report["section"]["sections"]
h = FeatureHasher(n_features=24)
feat_length = len(sections)
numerical_val = [0, 0, 0]
numerical_arg = ["size", "entropy", "vsize"]
ret = []
hash_props = dict()
for i in sections:
for j in range(3):
numerical_val[j] += i[numerical_arg[j]]
for j in i["props"]:
if j not in hash_props:
hash_props[j] = 1
else:
hash_props[j] += 1
hash_props[i["name"]] = 1
for i in range(3):
numerical_val[i] /= max(1, feat_length)
digest = h.transform([hash_props])
return ret + digest.toarray().tolist()[0]
def get_imports_title_info(self):
feats = 27
ret = np.zeros(feats)
imports = self.report["imports"]
h = FeatureHasher(n_features=feats, input_type="string")
hash_list = []
for i in imports.keys():
hash_list.append(i)
if len(hash_list) == 0:
return ret.tolist()
digest = h.transform(hash_list).toarray()
for i in range(digest.shape[0]):
ret = np.add(ret, digest[i])
return ret.tolist()
def process_report(self):
vector = []
vector += self.get_general_file_info()
vector += self.get_histogram_info()
vector += self.get_string_info()
vector += self.get_clr_runtime_header_info()
vector += self.get_coff_info()
vector += self.get_section_info()
vector += self.get_imports_title_info()
'''
특징 추가
'''
return vector
class PestudioParser:
'''
사용할 특징을 선택하여 벡터화 할 것을 권장
'''
def __init__(self, path):
self.na = "n/a"
self.report = read_json(path)
self.vector = []
self.severity_tags = parse_set("util_pestudio/severity_tag.txt")
self.mal_api1 = parse_set("util_pestudio/API_used_in_Malware_1.txt")
self.mal_api2 = parse_set("util_pestudio/API_used_in_Malware_2.txt")
def get_overview_info(self):
version = 0
mal_sig = 0
bad_desc = 0
val = self.report["image"]["overview"]["signature"].lower()
ver = self.report["image"]["overview"]["file-version"]
desc = self.report["image"]["overview"]["description"]
if val.find("sfx") != -1:
mal_sig = 0.5
if val.find("upx") != -1:
mal_sig = 0.5
if val.find("trojan") != -1:
mal_sig = 1
if val.find("huigezi") != -1:
mal_sig = 1
if desc == "Mira Malware":
bad_desc = 1
if desc == "Remote Service Application" and ver == "1, 0, 0, 1":
bad_desc = 1
if desc == "If you are seeing this, you should already know.":
bad_desc = 1
if ver is not None and len(re.split("[,.]", ver)) > 7:
version = 1
return [version, mal_sig, bad_desc]
def get_indicator_vector(self):
ret = [0, 0]
indicators = self.report["image"]["indicators"]
if indicators["@hint"] == self.na or indicators["@hint"] == '0':
return [0, 0]
for i in indicators["indicator"]:
if i["@xml-id"] in self.severity_tags:
ret[0] += 1
if int(i["@severity"]) == 1:
ret[1] += 1
return ret
def get_compiler_stamp_info(self):
if "file-header" not in self.report["image"]:
return [0]
token = self.report["image"]["file-header"]["compiler-stamp"]
if int(token.split(" ")[-1]) > 2018:
return [1]
else:
return [0]
def get_seh_info(self):
if "optional-header" not in self.report["image"]:
return [0]
val = self.report["image"]["optional-header"]["structured-exception-handling"]
if val == "true":
return [1]
else:
return [0]
def get_section_info(self):
ret = [0, 0, 0]
if "sections" not in self.report["image"]:
return ret
token = self.report["image"]["sections"]["section"]
if isinstance(token, dict):
if token["@name"] and token["@writable"] == 'x':
ret[0] += 1
if token["@name"] == ".rsrc":
if token["@entropy"] != self.na:
ret[1] = float(token["@entropy"])
if token["@file-ratio"] != self.na:
ret[2] = float(token["@file-ratio"])
else:
for i in token:
if i["@name"] == ".text" and i["@writable"] == "x":
ret[0] += 1
if i["@name"] == ".rsrc":
if i["@entropy"] != self.na:
ret[1] = float(i["@entropy"])
if i["@file-ratio"] != self.na:
ret[2] = float(i["@file-ratio"])
return ret
def count_libraries_blacklist(self):
if "libraries" not in self.report["image"]:
return [0]
ret = 0
token = self.report["image"]["libraries"]
if token["@hint"] == self.na or "library" not in token:
return [0]
elif isinstance(token["library"], dict):
if token["library"]["@blacklist"] == 'x':
ret += 1
else:
for i in token["library"]:
if i["@blacklist"] == "x":
ret += 1
return [ret]
def get_imports_vector(self):
mal_set = self.mal_api1 | self.mal_api2
# import_count = 0
blacklist = 0
malicious = 0
if "imports" not in self.report["image"] or "import" not in self.report["image"]["imports"]:
return [0, 0]
token = self.report["image"]["imports"]["import"]
if isinstance(token, dict):
if token["@blacklist"] == 'x':
blacklist = 1
if token["@name"] in mal_set:
malicious = 1
else:
for i in token:
if i["@blacklist"] == 'x':
blacklist += 1
if i["@name"] not in mal_set:
continue
else:
malicious += 1
return [malicious, blacklist]
def get_tls_callback_info(self):
if "tls-callbacks" not in self.report["image"]:
return [0]
upx_w = 1
null_w = 0.5
tls = self.report["image"]["tls-callbacks"]
if tls == self.na or tls["@hint"] == self.na:
return [0]
val = int(tls["@hint"])
ret = 0
if val == 1:
if tls["callback"] is None:
return [null_w]
token = tls["callback"]["@section"].lower()
if token.startswith("upx"):
ret += upx_w
else:
ret += 0.1
else:
for i in tls["callback"]:
if i is None:
ret += null_w
continue
token = i["@section"].lower()
if token.startswith("upx"):
ret += upx_w
else:
ret += 0.1
return [ret]
def get_certification_info(self):
if "certificate" not in self.report["image"]:
return [0]
if self.report["image"]["certificate"] == self.na:
return [0.5]
if self.report["image"]["certificate"] == "expired":
return [1]
else:
return [0]
def get_overlay_vector(self):
if "overlay" not in self.report["image"]:
return [0, 0, 0, 0]
no_overlay = 0
hint_ratio = 0
file_ratio = 0
mal_token = 0
if isinstance(self.report["image"]["overlay"], str):
no_overlay = 1
mal_token = 1
return [no_overlay, 0, 0, mal_token]
regex_fraction = re.compile(r'[1-9][0-9]*/[1-9][0-9]*')
hint_str = self.report["image"]["overlay"]["@hint"]
file_ratio = self.report["image"]["overlay"]["file-ratio"][:-1]
file_ratio = float(file_ratio) / 100
m = regex_fraction.search(hint_str)
if m is None:
txt = self.report["image"]["overlay"]["first-bytes-text"]
if isinstance(txt, str):
is_kisa = txt.find("K I S A") != -1
if is_kisa and file_ratio < 0.03:
mal_token = 1
else:
val = m.group(0)
val = val.split('/')
hint_ratio = abs(int(val[0]) / int(val[1]))
return [no_overlay, hint_ratio, file_ratio, mal_token]
def signature_detection_info(self):
bl_pestudio = int(self.report["image"]["strings"]["@bl"])
return [bl_pestudio]
def process_report(self):
ret = []
ret += self.get_overview_info()
ret += self.get_indicator_vector()
ret += self.get_section_info()
ret += self.get_compiler_stamp_info()
ret += self.get_seh_info()
ret += self.count_libraries_blacklist()
ret += self.get_imports_vector()
# ret += self.get_tls_callback_info()
# ret += self.get_certification_info()
# ret += self.signature_detection_info()
ret += self.get_overlay_vector()
return ret
def get_train_path(petype: str):
checker = set()
checker.add("EMBER")
checker.add("PEMINER")
checker.add("PESTUDIO")
if petype not in checker:
print("Invalid path!")
return
return f"데이터/{petype}/학습데이터/"
def get_valid_path(petype: str):
checker = set()
checker.add("EMBER")
checker.add("PEMINER")
checker.add("PESTUDIO")
if petype not in checker:
print("Invalid path!")
return
return f"데이터/{petype}/검증데이터/"
def get_test_path(petype: str):
checker = set()
checker.add("EMBER")
checker.add("PEMINER")
checker.add("PESTUDIO")
if petype not in checker:
print("Invalid path!")
return
return f"데이터/{petype}/테스트데이터/"
def process_dataset(datatype: str, _pestudio=False):
basepath = ""
labels = None
if datatype == "TRAIN":
labels = read_label_csv("데이터/학습데이터_정답.csv")
basepath = get_train_path("PEMINER")
elif datatype == "VALID":
labels = read_label_csv("데이터/검증데이터_정답.csv")
basepath = get_valid_path("PEMINER")
else:
print("레이블이 없는 데이터입니다.")
return dict()
datadict = dict()
print("Start handling PEMINER")
for fname in tqdm(glob.glob(basepath + '*')):
# Erase ".json"
key = fname[:-5].split('/')[-1]
# Insert feature vector
datadict[key] = copy.deepcopy(PeminerParser(fname).process_report())
print("PEMINER finished")
if datatype == "TRAIN":
basepath = get_train_path("EMBER")
else:
basepath = get_valid_path("EMBER")
print("Start handling EMBER")
for fname in tqdm(glob.glob(basepath + '*')):
key = fname[:-5].split('/')[-1]
datadict[key] += copy.deepcopy(EmberParser(fname).process_report())
print("EMBER finished")
if _pestudio:
if datatype == "TRAIN":
basepath = get_train_path("PESTUDIO")
else:
basepath = get_valid_path("PESTUDIO")
print("Start handling PESTUDIO")
for i in tqdm(labels.keys()):
path = basepath + i + ".json"
# print(path)
if not os.path.isfile(path):
del datadict[i]
else:
datadict[i] += copy.deepcopy(PestudioParser(path).process_report())
print("PESTUDIO finished")
ret | |
[4.4270e+00, 1.5150e+00, 1.9412e+01],
[5.4930e+00, 2.4339e+01, 1.8848e+01],
[5.2230e+00, 2.3079e+01, 1.9656e+01],
[5.4960e+00, 2.1839e+01, 1.8816e+01],
[5.6050e+00, 2.4950e+00, 2.1551e+01],
[3.9530e+00, 1.3860e+00, 1.8622e+01],
[4.8760e+00, 2.4386e+01, 1.8089e+01],
[4.2980e+00, 2.3074e+01, 1.9952e+01],
[5.7910e+00, 2.3069e+01, 2.0443e+01],
[5.3100e+00, 2.1040e+01, 1.9334e+01],
[4.9150e+00, 2.1834e+01, 1.8037e+01],
[5.2960e+00, 1.3090e+01, 7.3150e+00],
[4.8690e+00, 1.5421e+01, 8.9330e+00],
[4.1040e+00, 1.5085e+01, 7.8270e+00],
[4.4270e+00, 1.3915e+01, 7.0120e+00],
[5.4930e+00, 1.1939e+01, 6.4480e+00],
[5.2230e+00, 1.0679e+01, 7.2560e+00],
[5.4960e+00, 9.4390e+00, 6.4160e+00],
[5.6050e+00, 1.4895e+01, 9.1510e+00],
[3.9530e+00, 1.3786e+01, 6.2220e+00],
[4.8760e+00, 1.1986e+01, 5.6890e+00],
[4.2980e+00, 1.0674e+01, 7.5520e+00],
[5.7910e+00, 1.0669e+01, 8.0430e+00],
[5.3100e+00, 8.6400e+00, 6.9340e+00],
[4.9150e+00, 9.4340e+00, 5.6370e+00],
[1.7696e+01, 6.9000e-01, 7.3150e+00],
[1.7269e+01, 3.0210e+00, 8.9330e+00],
[1.6504e+01, 2.6850e+00, 7.8270e+00],
[1.6827e+01, 1.5150e+00, 7.0120e+00],
[1.7893e+01, 2.4339e+01, 6.4480e+00],
[1.7623e+01, 2.3079e+01, 7.2560e+00],
[1.7896e+01, 2.1839e+01, 6.4160e+00],
[1.8005e+01, 2.4950e+00, 9.1510e+00],
[1.6353e+01, 1.3860e+00, 6.2220e+00],
[1.7276e+01, 2.4386e+01, 5.6890e+00],
[1.6698e+01, 2.3074e+01, 7.5520e+00],
[1.8191e+01, 2.3069e+01, 8.0430e+00],
[1.7710e+01, 2.1040e+01, 6.9340e+00],
[1.7315e+01, 2.1834e+01, 5.6370e+00],
[1.7696e+01, 1.3090e+01, 1.9715e+01],
[1.7269e+01, 1.5421e+01, 2.1333e+01],
[1.6504e+01, 1.5085e+01, 2.0227e+01],
[1.6827e+01, 1.3915e+01, 1.9412e+01],
[1.7893e+01, 1.1939e+01, 1.8848e+01],
[1.7623e+01, 1.0679e+01, 1.9656e+01],
[1.7896e+01, 9.4390e+00, 1.8816e+01],
[1.8005e+01, 1.4895e+01, 2.1551e+01],
[1.6353e+01, 1.3786e+01, 1.8622e+01],
[1.7276e+01, 1.1986e+01, 1.8089e+01],
[1.6698e+01, 1.0674e+01, 1.9952e+01],
[1.8191e+01, 1.0669e+01, 2.0443e+01],
[1.7710e+01, 8.6400e+00, 1.9334e+01],
[1.7315e+01, 9.4340e+00, 1.8037e+01],
[7.1040e+00, 1.3090e+01, 5.0850e+00],
[7.5310e+00, 1.5421e+01, 3.4670e+00],
[8.2960e+00, 1.5085e+01, 4.5730e+00],
[7.9730e+00, 1.3915e+01, 5.3880e+00],
[6.9070e+00, 1.1939e+01, 5.9520e+00],
[7.1770e+00, 1.0679e+01, 5.1440e+00],
[6.9040e+00, 9.4390e+00, 5.9840e+00],
[6.7950e+00, 1.4895e+01, 3.2490e+00],
[8.4470e+00, 1.3786e+01, 6.1780e+00],
[7.5240e+00, 1.1986e+01, 6.7110e+00],
[8.1020e+00, 1.0674e+01, 4.8480e+00],
[6.6090e+00, 1.0669e+01, 4.3570e+00],
[7.0900e+00, 8.6400e+00, 5.4660e+00],
[7.4850e+00, 9.4340e+00, 6.7630e+00],
[7.1040e+00, 6.9000e-01, 1.7485e+01],
[7.5310e+00, 3.0210e+00, 1.5867e+01],
[8.2960e+00, 2.6850e+00, 1.6973e+01],
[7.9730e+00, 1.5150e+00, 1.7788e+01],
[6.9070e+00, 2.4339e+01, 1.8352e+01],
[7.1770e+00, 2.3079e+01, 1.7544e+01],
[6.9040e+00, 2.1839e+01, 1.8384e+01],
[6.7950e+00, 2.4950e+00, 1.5649e+01],
[8.4470e+00, 1.3860e+00, 1.8578e+01],
[7.5240e+00, 2.4386e+01, 1.9111e+01],
[8.1020e+00, 2.3074e+01, 1.7248e+01],
[6.6090e+00, 2.3069e+01, 1.6757e+01],
[7.0900e+00, 2.1040e+01, 1.7866e+01],
[7.4850e+00, 2.1834e+01, 1.9163e+01],
[1.9504e+01, 1.3090e+01, 1.7485e+01],
[1.9931e+01, 1.5421e+01, 1.5867e+01],
[2.0696e+01, 1.5085e+01, 1.6973e+01],
[2.0373e+01, 1.3915e+01, 1.7788e+01],
[1.9307e+01, 1.1939e+01, 1.8352e+01],
[1.9577e+01, 1.0679e+01, 1.7544e+01],
[1.9304e+01, 9.4390e+00, 1.8384e+01],
[1.9195e+01, 1.4895e+01, 1.5649e+01],
[2.0847e+01, 1.3786e+01, 1.8578e+01],
[1.9924e+01, 1.1986e+01, 1.9111e+01],
[2.0502e+01, 1.0674e+01, 1.7248e+01],
[1.9009e+01, 1.0669e+01, 1.6757e+01],
[1.9490e+01, 8.6400e+00, 1.7866e+01],
[1.9885e+01, 9.4340e+00, 1.9163e+01],
[1.9504e+01, 6.9000e-01, 5.0850e+00],
[1.9931e+01, 3.0210e+00, 3.4670e+00],
[2.0696e+01, 2.6850e+00, 4.5730e+00],
[2.0373e+01, 1.5150e+00, 5.3880e+00],
[1.9307e+01, 2.4339e+01, 5.9520e+00],
[1.9577e+01, 2.3079e+01, 5.1440e+00],
[1.9304e+01, 2.1839e+01, 5.9840e+00],
[1.9195e+01, 2.4950e+00, 3.2490e+00],
[2.0847e+01, 1.3860e+00, 6.1780e+00],
[1.9924e+01, 2.4386e+01, 6.7110e+00],
[2.0502e+01, 2.3074e+01, 4.8480e+00],
[1.9009e+01, 2.3069e+01, 4.3570e+00],
[1.9490e+01, 2.1040e+01, 5.4660e+00],
[1.9885e+01, 2.1834e+01, 6.7630e+00],
[1.7696e+01, 2.4110e+01, 1.7485e+01],
[1.7269e+01, 2.1779e+01, 1.5867e+01],
[1.6504e+01, 2.2115e+01, 1.6973e+01],
[1.6827e+01, 2.3285e+01, 1.7788e+01],
[1.7893e+01, 4.6100e-01, 1.8352e+01],
[1.7623e+01, 1.7210e+00, 1.7544e+01],
[1.7896e+01, 2.9610e+00, 1.8384e+01],
[1.8005e+01, 2.2305e+01, 1.5649e+01],
[1.6353e+01, 2.3414e+01, 1.8578e+01],
[1.7276e+01, 4.1400e-01, 1.9111e+01],
[1.6698e+01, 1.7260e+00, 1.7248e+01],
[1.8191e+01, 1.7310e+00, 1.6757e+01],
[1.7710e+01, 3.7600e+00, 1.7866e+01],
[1.7315e+01, 2.9660e+00, 1.9163e+01],
[1.7696e+01, 1.1710e+01, 5.0850e+00],
[1.7269e+01, 9.3790e+00, 3.4670e+00],
[1.6504e+01, 9.7150e+00, 4.5730e+00],
[1.6827e+01, 1.0885e+01, 5.3880e+00],
[1.7893e+01, 1.2861e+01, 5.9520e+00],
[1.7623e+01, 1.4121e+01, 5.1440e+00],
[1.7896e+01, 1.5361e+01, 5.9840e+00],
[1.8005e+01, 9.9050e+00, 3.2490e+00],
[1.6353e+01, 1.1014e+01, 6.1780e+00],
[1.7276e+01, 1.2814e+01, 6.7110e+00],
[1.6698e+01, 1.4126e+01, 4.8480e+00],
[1.8191e+01, 1.4131e+01, 4.3570e+00],
[1.7710e+01, 1.6160e+01, 5.4660e+00],
[1.7315e+01, 1.5366e+01, 6.7630e+00],
[5.2960e+00, 2.4110e+01, 5.0850e+00],
[4.8690e+00, 2.1779e+01, 3.4670e+00],
[4.1040e+00, 2.2115e+01, 4.5730e+00],
[4.4270e+00, 2.3285e+01, 5.3880e+00],
[5.4930e+00, 4.6100e-01, 5.9520e+00],
[5.2230e+00, 1.7210e+00, 5.1440e+00],
[5.4960e+00, 2.9610e+00, 5.9840e+00],
[5.6050e+00, 2.2305e+01, 3.2490e+00],
[3.9530e+00, 2.3414e+01, 6.1780e+00],
[4.8760e+00, 4.1400e-01, 6.7110e+00],
[4.2980e+00, 1.7260e+00, 4.8480e+00],
[5.7910e+00, 1.7310e+00, 4.3570e+00],
[5.3100e+00, 3.7600e+00, 5.4660e+00],
[4.9150e+00, 2.9660e+00, 6.7630e+00],
[5.2960e+00, 1.1710e+01, 1.7485e+01],
[4.8690e+00, 9.3790e+00, 1.5867e+01],
[4.1040e+00, 9.7150e+00, 1.6973e+01],
[4.4270e+00, 1.0885e+01, 1.7788e+01],
[5.4930e+00, 1.2861e+01, 1.8352e+01],
[5.2230e+00, 1.4121e+01, 1.7544e+01],
[5.4960e+00, 1.5361e+01, 1.8384e+01],
[5.6050e+00, 9.9050e+00, 1.5649e+01],
[3.9530e+00, 1.1014e+01, 1.8578e+01],
[4.8760e+00, 1.2814e+01, 1.9111e+01],
[4.2980e+00, 1.4126e+01, 1.7248e+01],
[5.7910e+00, 1.4131e+01, 1.6757e+01],
[5.3100e+00, 1.6160e+01, 1.7866e+01],
[4.9150e+00, 1.5366e+01, 1.9163e+01],
[1.9504e+01, 1.1710e+01, 7.3150e+00],
[1.9931e+01, 9.3790e+00, 8.9330e+00],
[2.0696e+01, 9.7150e+00, 7.8270e+00],
[2.0373e+01, 1.0885e+01, 7.0120e+00],
[1.9307e+01, 1.2861e+01, 6.4480e+00],
[1.9577e+01, 1.4121e+01, 7.2560e+00],
[1.9304e+01, 1.5361e+01, 6.4160e+00],
[1.9195e+01, 9.9050e+00, 9.1510e+00],
[2.0847e+01, 1.1014e+01, 6.2220e+00],
[1.9924e+01, 1.2814e+01, 5.6890e+00],
[2.0502e+01, 1.4126e+01, 7.5520e+00],
[1.9009e+01, 1.4131e+01, 8.0430e+00],
[1.9490e+01, 1.6160e+01, 6.9340e+00],
[1.9885e+01, 1.5366e+01, 5.6370e+00],
[1.9504e+01, 2.4110e+01, 1.9715e+01],
[1.9931e+01, 2.1779e+01, 2.1333e+01],
[2.0696e+01, 2.2115e+01, 2.0227e+01],
[2.0373e+01, 2.3285e+01, 1.9412e+01],
[1.9307e+01, 4.6100e-01, 1.8848e+01],
[1.9577e+01, 1.7210e+00, 1.9656e+01],
[1.9304e+01, 2.9610e+00, 1.8816e+01],
[1.9195e+01, 2.2305e+01, 2.1551e+01],
[2.0847e+01, 2.3414e+01, 1.8622e+01],
[1.9924e+01, 4.1400e-01, 1.8089e+01],
[2.0502e+01, 1.7260e+00, 1.9952e+01],
[1.9009e+01, 1.7310e+00, 2.0443e+01],
[1.9490e+01, 3.7600e+00, 1.9334e+01],
[1.9885e+01, 2.9660e+00, 1.8037e+01],
[7.1040e+00, 1.1710e+01, 1.9715e+01],
[7.5310e+00, 9.3790e+00, 2.1333e+01],
[8.2960e+00, 9.7150e+00, 2.0227e+01],
[7.9730e+00, 1.0885e+01, 1.9412e+01],
[6.9070e+00, 1.2861e+01, 1.8848e+01],
[7.1770e+00, 1.4121e+01, 1.9656e+01],
[6.9040e+00, 1.5361e+01, 1.8816e+01],
[6.7950e+00, 9.9050e+00, 2.1551e+01],
[8.4470e+00, 1.1014e+01, 1.8622e+01],
[7.5240e+00, 1.2814e+01, 1.8089e+01],
[8.1020e+00, 1.4126e+01, 1.9952e+01],
[6.6090e+00, 1.4131e+01, 2.0443e+01],
[7.0900e+00, 1.6160e+01, 1.9334e+01],
[7.4850e+00, 1.5366e+01, 1.8037e+01],
[7.1040e+00, 2.4110e+01, 7.3150e+00],
[7.5310e+00, 2.1779e+01, 8.9330e+00],
[8.2960e+00, 2.2115e+01, 7.8270e+00],
[7.9730e+00, 2.3285e+01, 7.0120e+00],
[6.9070e+00, 4.6100e-01, 6.4480e+00],
[7.1770e+00, 1.7210e+00, 7.2560e+00],
[6.9040e+00, 2.9610e+00, 6.4160e+00],
[6.7950e+00, 2.2305e+01, 9.1510e+00],
[8.4470e+00, 2.3414e+01, 6.2220e+00],
[7.5240e+00, 4.1400e-01, 5.6890e+00],
[8.1020e+00, 1.7260e+00, 7.5520e+00],
[6.6090e+00, 1.7310e+00, 8.0430e+00],
[7.0900e+00, 3.7600e+00, 6.9340e+00],
[7.4850e+00, 2.9660e+00, 5.6370e+00],
[2.3896e+01, 1.1150e+00, 1.7910e+01],
[2.3469e+01, 2.7330e+00, 1.5579e+01],
[2.2704e+01, 1.6270e+00, 1.5915e+01],
[2.3027e+01, 8.1200e-01, 1.7085e+01],
[2.4093e+01, 2.4800e-01, 1.9061e+01],
[2.3823e+01, 1.0560e+00, 2.0321e+01],
[2.4096e+01, 2.1600e-01, 2.1561e+01],
[2.4205e+01, 2.9510e+00, 1.6105e+01],
[2.2553e+01, 2.2000e-02, 1.7214e+01],
[2.3476e+01, 2.4289e+01, 1.9014e+01],
[2.2898e+01, 1.3520e+00, 2.0326e+01],
[2.4391e+01, 1.8430e+00, 2.0331e+01],
[2.3910e+01, 7.3400e-01, 2.2360e+01],
[2.3515e+01, 2.4237e+01, 2.1566e+01],
[2.3896e+01, 1.3515e+01, 5.5100e+00],
[2.3469e+01, 1.5133e+01, 3.1790e+00],
[2.2704e+01, 1.4027e+01, 3.5150e+00],
[2.3027e+01, 1.3212e+01, 4.6850e+00],
[2.4093e+01, 1.2648e+01, 6.6610e+00],
[2.3823e+01, 1.3456e+01, 7.9210e+00],
[2.4096e+01, 1.2616e+01, 9.1610e+00],
[2.4205e+01, 1.5351e+01, 3.7050e+00],
[2.2553e+01, 1.2422e+01, 4.8140e+00],
[2.3476e+01, 1.1889e+01, 6.6140e+00],
[2.2898e+01, 1.3752e+01, 7.9260e+00],
[2.4391e+01, 1.4243e+01, 7.9310e+00],
[2.3910e+01, 1.3134e+01, 9.9600e+00],
[2.3515e+01, 1.1837e+01, 9.1660e+00],
[1.1496e+01, 1.1150e+00, 5.5100e+00],
[1.1069e+01, 2.7330e+00, 3.1790e+00],
[1.0304e+01, 1.6270e+00, 3.5150e+00],
[1.0627e+01, 8.1200e-01, 4.6850e+00],
[1.1693e+01, 2.4800e-01, 6.6610e+00],
[1.1423e+01, 1.0560e+00, 7.9210e+00],
[1.1696e+01, 2.1600e-01, 9.1610e+00],
[1.1805e+01, 2.9510e+00, 3.7050e+00],
[1.0153e+01, 2.2000e-02, 4.8140e+00],
[1.1076e+01, 2.4289e+01, 6.6140e+00],
[1.0498e+01, 1.3520e+00, 7.9260e+00],
[1.1991e+01, 1.8430e+00, 7.9310e+00],
[1.1510e+01, 7.3400e-01, 9.9600e+00],
[1.1115e+01, 2.4237e+01, 9.1660e+00],
[1.1496e+01, 1.3515e+01, 1.7910e+01],
[1.1069e+01, 1.5133e+01, 1.5579e+01],
[1.0304e+01, 1.4027e+01, 1.5915e+01],
[1.0627e+01, 1.3212e+01, 1.7085e+01],
[1.1693e+01, 1.2648e+01, 1.9061e+01],
[1.1423e+01, 1.3456e+01, 2.0321e+01],
[1.1696e+01, 1.2616e+01, 2.1561e+01],
[1.1805e+01, 1.5351e+01, 1.6105e+01],
[1.0153e+01, 1.2422e+01, 1.7214e+01],
[1.1076e+01, 1.1889e+01, 1.9014e+01],
[1.0498e+01, 1.3752e+01, 2.0326e+01],
[1.1991e+01, 1.4243e+01, 2.0331e+01],
[1.1510e+01, 1.3134e+01, 2.2360e+01],
[1.1115e+01, 1.1837e+01, 2.1566e+01],
[9.0400e-01, 1.1285e+01, 5.5100e+00],
[1.3310e+00, 9.6670e+00, 3.1790e+00],
[2.0960e+00, 1.0773e+01, 3.5150e+00],
[1.7730e+00, 1.1588e+01, 4.6850e+00],
[7.0700e-01, 1.2152e+01, 6.6610e+00],
[9.7700e-01, 1.1344e+01, 7.9210e+00],
[7.0400e-01, 1.2184e+01, 9.1610e+00],
[5.9500e-01, 9.4490e+00, 3.7050e+00],
[2.2470e+00, 1.2378e+01, 4.8140e+00],
[1.3240e+00, 1.2911e+01, 6.6140e+00],
[1.9020e+00, 1.1048e+01, 7.9260e+00],
[4.0900e-01, 1.0557e+01, 7.9310e+00],
[8.9000e-01, 1.1666e+01, 9.9600e+00],
[1.2850e+00, 1.2963e+01, 9.1660e+00],
[9.0400e-01, 2.3685e+01, 1.7910e+01],
[1.3310e+00, 2.2067e+01, 1.5579e+01],
[2.0960e+00, 2.3173e+01, 1.5915e+01],
[1.7730e+00, 2.3988e+01, 1.7085e+01],
[7.0700e-01, 2.4552e+01, 1.9061e+01],
[9.7700e-01, 2.3744e+01, 2.0321e+01],
[7.0400e-01, 2.4584e+01, 2.1561e+01],
[5.9500e-01, 2.1849e+01, 1.6105e+01],
[2.2470e+00, 2.4778e+01, 1.7214e+01],
[1.3240e+00, 5.1100e-01, 1.9014e+01],
[1.9020e+00, 2.3448e+01, 2.0326e+01],
[4.0900e-01, 2.2957e+01, 2.0331e+01],
[8.9000e-01, 2.4066e+01, 2.2360e+01],
[1.2850e+00, 5.6300e-01, 2.1566e+01],
[1.3304e+01, 1.1285e+01, 1.7910e+01],
[1.3731e+01, 9.6670e+00, 1.5579e+01],
[1.4496e+01, 1.0773e+01, 1.5915e+01],
[1.4173e+01, 1.1588e+01, 1.7085e+01],
[1.3107e+01, 1.2152e+01, 1.9061e+01],
[1.3377e+01, 1.1344e+01, 2.0321e+01],
[1.3104e+01, 1.2184e+01, 2.1561e+01],
[1.2995e+01, 9.4490e+00, 1.6105e+01],
[1.4647e+01, 1.2378e+01, 1.7214e+01],
[1.3724e+01, 1.2911e+01, 1.9014e+01],
[1.4302e+01, 1.1048e+01, 2.0326e+01],
[1.2809e+01, 1.0557e+01, 2.0331e+01],
[1.3290e+01, 1.1666e+01, 2.2360e+01],
[1.3685e+01, 1.2963e+01, 2.1566e+01],
[1.3304e+01, 2.3685e+01, 5.5100e+00],
[1.3731e+01, 2.2067e+01, 3.1790e+00],
[1.4496e+01, 2.3173e+01, 3.5150e+00],
[1.4173e+01, 2.3988e+01, 4.6850e+00],
[1.3107e+01, 2.4552e+01, 6.6610e+00],
[1.3377e+01, 2.3744e+01, 7.9210e+00],
[1.3104e+01, 2.4584e+01, 9.1610e+00],
[1.2995e+01, 2.1849e+01, 3.7050e+00],
[1.4647e+01, 2.4778e+01, 4.8140e+00],
[1.3724e+01, 5.1100e-01, 6.6140e+00],
[1.4302e+01, 2.3448e+01, 7.9260e+00],
[1.2809e+01, 2.2957e+01, 7.9310e+00],
[1.3290e+01, 2.4066e+01, 9.9600e+00],
[1.3685e+01, 5.6300e-01, 9.1660e+00],
[1.1496e+01, 2.3685e+01, 1.9290e+01],
[1.1069e+01, 2.2067e+01, 2.1621e+01],
[1.0304e+01, 2.3173e+01, 2.1285e+01],
[1.0627e+01, 2.3988e+01, 2.0115e+01],
[1.1693e+01, 2.4552e+01, 1.8139e+01],
[1.1423e+01, 2.3744e+01, 1.6879e+01],
[1.1696e+01, 2.4584e+01, 1.5639e+01],
[1.1805e+01, 2.1849e+01, 2.1095e+01],
[1.0153e+01, 2.4778e+01, 1.9986e+01],
[1.1076e+01, 5.1100e-01, 1.8186e+01],
[1.0498e+01, 2.3448e+01, 1.6874e+01],
[1.1991e+01, 2.2957e+01, 1.6869e+01],
[1.1510e+01, 2.4066e+01, 1.4840e+01],
[1.1115e+01, 5.6300e-01, 1.5634e+01],
[1.1496e+01, 1.1285e+01, 6.8900e+00],
[1.1069e+01, 9.6670e+00, 9.2210e+00],
[1.0304e+01, 1.0773e+01, 8.8850e+00],
[1.0627e+01, 1.1588e+01, 7.7150e+00],
[1.1693e+01, 1.2152e+01, 5.7390e+00],
[1.1423e+01, 1.1344e+01, 4.4790e+00],
[1.1696e+01, 1.2184e+01, 3.2390e+00],
[1.1805e+01, 9.4490e+00, 8.6950e+00],
[1.0153e+01, 1.2378e+01, 7.5860e+00],
[1.1076e+01, 1.2911e+01, 5.7860e+00],
[1.0498e+01, 1.1048e+01, 4.4740e+00],
[1.1991e+01, 1.0557e+01, 4.4690e+00],
[1.1510e+01, 1.1666e+01, 2.4400e+00],
[1.1115e+01, 1.2963e+01, 3.2340e+00],
[2.3896e+01, 2.3685e+01, 6.8900e+00],
[2.3469e+01, 2.2067e+01, 9.2210e+00],
[2.2704e+01, 2.3173e+01, 8.8850e+00],
[2.3027e+01, 2.3988e+01, 7.7150e+00],
[2.4093e+01, 2.4552e+01, 5.7390e+00],
[2.3823e+01, 2.3744e+01, 4.4790e+00],
[2.4096e+01, 2.4584e+01, 3.2390e+00],
[2.4205e+01, 2.1849e+01, 8.6950e+00],
[2.2553e+01, 2.4778e+01, 7.5860e+00],
[2.3476e+01, 5.1100e-01, 5.7860e+00],
[2.2898e+01, 2.3448e+01, 4.4740e+00],
[2.4391e+01, 2.2957e+01, 4.4690e+00],
[2.3910e+01, 2.4066e+01, 2.4400e+00],
[2.3515e+01, 5.6300e-01, 3.2340e+00],
[2.3896e+01, 1.1285e+01, 1.9290e+01],
[2.3469e+01, 9.6670e+00, 2.1621e+01],
[2.2704e+01, 1.0773e+01, 2.1285e+01],
[2.3027e+01, 1.1588e+01, 2.0115e+01],
[2.4093e+01, 1.2152e+01, 1.8139e+01],
[2.3823e+01, 1.1344e+01, 1.6879e+01],
[2.4096e+01, 1.2184e+01, 1.5639e+01],
[2.4205e+01, 9.4490e+00, 2.1095e+01],
[2.2553e+01, 1.2378e+01, 1.9986e+01],
[2.3476e+01, 1.2911e+01, 1.8186e+01],
[2.2898e+01, 1.1048e+01, 1.6874e+01],
[2.4391e+01, 1.0557e+01, 1.6869e+01],
[2.3910e+01, 1.1666e+01, 1.4840e+01],
[2.3515e+01, 1.2963e+01, 1.5634e+01],
[1.3304e+01, 1.3515e+01, 6.8900e+00],
[1.3731e+01, 1.5133e+01, 9.2210e+00],
[1.4496e+01, 1.4027e+01, 8.8850e+00],
[1.4173e+01, 1.3212e+01, 7.7150e+00],
[1.3107e+01, 1.2648e+01, 5.7390e+00],
[1.3377e+01, 1.3456e+01, 4.4790e+00],
[1.3104e+01, 1.2616e+01, 3.2390e+00],
[1.2995e+01, 1.5351e+01, 8.6950e+00],
[1.4647e+01, 1.2422e+01, 7.5860e+00],
[1.3724e+01, 1.1889e+01, 5.7860e+00],
[1.4302e+01, 1.3752e+01, 4.4740e+00],
[1.2809e+01, 1.4243e+01, 4.4690e+00],
[1.3290e+01, 1.3134e+01, 2.4400e+00],
[1.3685e+01, 1.1837e+01, 3.2340e+00],
[1.3304e+01, 1.1150e+00, 1.9290e+01],
[1.3731e+01, 2.7330e+00, 2.1621e+01],
[1.4496e+01, 1.6270e+00, 2.1285e+01],
[1.4173e+01, 8.1200e-01, 2.0115e+01],
[1.3107e+01, 2.4800e-01, 1.8139e+01],
[1.3377e+01, 1.0560e+00, 1.6879e+01],
[1.3104e+01, 2.1600e-01, 1.5639e+01],
[1.2995e+01, 2.9510e+00, 2.1095e+01],
[1.4647e+01, 2.2000e-02, 1.9986e+01],
[1.3724e+01, 2.4289e+01, 1.8186e+01],
[1.4302e+01, 1.3520e+00, 1.6874e+01],
[1.2809e+01, 1.8430e+00, 1.6869e+01],
[1.3290e+01, 7.3400e-01, 1.4840e+01],
[1.3685e+01, 2.4237e+01, 1.5634e+01],
[9.0400e-01, 1.3515e+01, 1.9290e+01],
[1.3310e+00, 1.5133e+01, 2.1621e+01],
[2.0960e+00, 1.4027e+01, 2.1285e+01],
[1.7730e+00, 1.3212e+01, 2.0115e+01],
[7.0700e-01, 1.2648e+01, 1.8139e+01],
[9.7700e-01, 1.3456e+01, 1.6879e+01],
[7.0400e-01, 1.2616e+01, 1.5639e+01],
[5.9500e-01, 1.5351e+01, 2.1095e+01],
[2.2470e+00, 1.2422e+01, 1.9986e+01],
[1.3240e+00, 1.1889e+01, | |
<reponame>ariroffe/logics<gh_stars>10-100
from itertools import product
from copy import copy
from logics.classes.propositional import Formula
from logics.classes.exceptions import NotWellFormed
class LocalValidityMixin:
def _get_truth_value_combinations(self, formula_or_inference):
"""Will return an iterator that yields all possible truth value combinations for the number of atomics present
For example, for ['∧', ['p'], ['q']] the iterator will yield (0, 0), (0, 1), (1, 0), (1, 1)
For a formula with 3 atomics, (0, 0, 0), (0, 0, 1), (0, 1, 0), (0, 1, 1), (1, 0, 0), ...
"""
atomics = list(formula_or_inference.atomics_inside(self.language))
if atomics:
truth_value_combinations = product(self.truth_values, repeat=len(atomics))
return truth_value_combinations
else:
# A formula with no atomics is one with only sentential constants.
# In this case I return a dummy iterable of length 1 so that 1 valuation is considered in is_valid methods
return [(1,)]
def _get_atomic_valuation_dict(self, formula_or_inference, combination):
"""Given a Formula or Inference and a combination yielded by an iterator like the above (e.g. (0, 0, 1))
Will return a dict with the atomic strings as keys and the truth values as values
e.g, for combination (0,1) {'p': 0, 'q': 1}
"""
atomics = list(formula_or_inference.atomics_inside(self.language))
atomic_valuation_dict = {atomics[index]: combination[index] for index in range(len(atomics))}
return atomic_valuation_dict
def is_locally_valid(self, formula_or_inference):
"""Determines if a formula or inference is locally valid
Local validity means the following:
*A formula or inference is locally valid iff it is satisfied by every valuation*
* For formulae, since they are evaluated by default with the conclusion standard, this is equivalent to
claiming that *the formula is a tautology*.
* For regular inferences this is the standard notion of validity for mixed logics. Since the premises and
conclusions are formulae, applying the definition of satisfaction (see above) yields the following: *for
every valuation v, if the valuation of the premises belong to the premise standard, the valuation of some
conclusion belongs to the conclusion standard*
* For metainferences things get more interesting. Metainferential local validity is not just
"if the premises are valid, then the conclusion is valid" (that is the *global* notion of metainferential
validity, see below). Rather, it means *no valuation satisfies all the premises and none of the
conclusions*. So, for example, in `CL`, the metainference `(p / q) // (r / s)` is globally valid (since
the premise is not valid), but locally invalid (there is a valuation that satisfies the premise but not
the conclusion of the metainference, namely `p=1, q=1, r=1, s=0`)
Parameters
----------
formula_or_inference: logics.classes.propositional.Formula or logics.classes.propositional.Inference
The formula or inference to evaluate for local validity. Inferences may be of level > 1
Returns
-------
bool
True if the formula / inference is locally valid, False otherwise
Examples
--------
>>> from logics.instances.propositional.many_valued_semantics import classical_mvl_semantics as CL, ST_mvl_semantics as ST
>>> CL.is_locally_valid(classical_parser.parse('p or not p'))
True
>>> CL.is_locally_valid(classical_parser.parse('p, p then q / q'))
True
>>> CL.is_locally_valid(classical_parser.parse('q, p then q / p'))
False
>>> CL.is_locally_valid(classical_parser.parse('(p / q) // (r / s)'))
False
>>> CL.is_locally_valid(classical_parser.parse('(A / B), (B / C) // (A / C)'))
True
>>> ST.is_locally_valid(classical_parser.parse('(A / B), (B / C) // (A / C)'))
False
"""
truth_value_combinations = self._get_truth_value_combinations(formula_or_inference)
for combination in truth_value_combinations:
atomic_valuation_dict = self._get_atomic_valuation_dict(formula_or_inference, combination)
if not self.satisfies(formula_or_inference, atomic_valuation_dict):
return False
return True
def is_locally_antivalid(self, formula_or_inference):
"""Determines if a formula or inference is locally antivalid
Local antivalidity means the following:
*A formula or inference is locally antivalid iff no valuation satisfies it*
* Since formulae are evaluated by default with the conclusion standard, this is equivalent to claiming that
*the formula is a contradiction*
* A (meta)inference `Γ / Δ` will be antivalid iff every valuation satisfies all the premises an none of the
conclusions. One may think of it as an inferential equivalent of the notion of contradiction for formulae.
Note that anti-vality is a stronger notion than invalidity.
The parameters and return value are similar to the method above.
Examples
--------
>>> from logics.instances.propositional.many_valued_semantics import classical_mvl_semantics as CL
>>> CL.is_locally_antivalid(classical_parser.parse('q, p then q / p'))
False
>>> CL.is_locally_antivalid(classical_parser.parse('p or not p / p and not p'))
True
"""
truth_value_combinations = self._get_truth_value_combinations(formula_or_inference)
for combination in truth_value_combinations:
atomic_valuation_dict = self._get_atomic_valuation_dict(formula_or_inference, combination)
# e.g, for combination (0,1) {'p': 0, 'q': 1}
if self.satisfies(formula_or_inference, atomic_valuation_dict):
return False
return True
def is_contingent(self, formula_or_inference):
"""Returns True if the Formula / Inference is neither locally valid nor antivalid, False otherwise
Examples
--------
>>> from logics.instances.propositional.many_valued_semantics import classical_mvl_semantics as CL
>>> CL.is_contingent(classical_parser.parse('p'))
True
>>> CL.is_contingent(classical_parser.parse('p, p then q / q'))
False
>>> CL.is_contingent(classical_parser.parse('p or not p / p and not p'))
False
>>> CL.is_contingent(classical_parser.parse('q, p then q / p'))
True
"""
return not self.is_locally_valid(formula_or_inference) and \
not self.is_locally_antivalid(formula_or_inference)
class ValidityShortcutsMixin:
"""Some shortcut methods for the classes below, makes them more legible"""
def is_valid(self, inference):
"""Shortcut for ``is_locally_valid(inference)``"""
return self.is_locally_valid(inference)
def is_antivalid(self, inference):
"""Shortcut for ``is_locally_antivalid(inference)``"""
return self.is_locally_antivalid(inference)
def is_tautology(self, formula):
"""Shortcut for ``is_locally_valid(formula)``"""
return self.is_locally_valid(formula)
def is_contradiction(self, formula):
"""Shortcut for ``is_locally_antivalid(formula)``"""
return self.is_locally_antivalid(formula)
class MixedManyValuedSemantics(LocalValidityMixin, ValidityShortcutsMixin):
"""Class for many-valued semantics, which may contain different standards for premises and conclusions (e.g. ST, TS)
Parameters
----------
language: language: logics.classes.propositional.Language or logics.classes.propositional.InfiniteLanguage
Instance of Language or InfiniteLanguage
truth_values: list
A list of the truth values (which may be int, str, etc.). In the instances below I use strings.
premise_designated_values: list
Must be a sublist of `truth_values`, representing the premise standard
conclusion_designated_values: list
Must be a sublist of `truth_values`, representing the conclusion standard
truth_function_dict: dict
Dict containing the logical constants (str) as keys, and n-dimensional lists as values (for constants of arity
n). Will also accept any indexable (things with a `__getitem__` method, e.g. numpy arrays)
and any n-ary callable .
sentential_constant_values_dict: dict
Dict containing the sentential constans (str) as keys, and their truth values (members of `truth_values`) as
values
use_molecular_valuation_fast_version: bool, optional
Implements a faster version of the molecular valuation function (e.g. if asked for a disjunction will return
'1' with one true disjunct, without evaluating the other). In counterpart, it is less general, since it assumes
the Kleene truth matrices. Defaults to ``False``.
name: str
Name of the system (only for prettier printing to the console)
Notes
-----
The order of `truth_values` is the order in which the rows and columns will be read in the truth functions.
For instance, if `truth_values` is ``['0', 'i', '1']``, the truth function:
.. code:: python
{
# (...)
'$':
[[v1, v2, v3],
[v4, v5, v6],
[v7, v8, v9]],
# (...)
}
for a constant $ will be intepreted as saying that
* $('0', '0') = v1
* $('i', '1') = v6
* $('1', '1') = v9
* etc.
However, if `truth_values` is entered as ``['1', 'i', '0']``, then
* $('0', '0') = v9
* $('i', '1') = v4
* $('1', '1') = v1
* etc.
Raises
------
ValueError
If the premise or conclusion designated values are not a sublist of the truth values, some logical constant of
the language does not receive a truth function (or receives something that is neither a callable nor an
indexible), or some sentential constant does not receive a truth value (or gets a truth value not present in
`truth_values`)
Examples
--------
Definition of classical logic `CL`
>>> from logics.instances.propositional.languages import classical_infinite_language_with_sent_constants
>>> from logics.classes.propositional.semantics import MixedManyValuedSemantics
>>> bivalued_truth_values = ['1', '0'] # The order is important for the lines below
>>> classical_truth_functions = {
... '~': ['0', '1'],
... '∧': [ #1 #0
... ['1', '0'], # 1
... ['0', '0']], # 0
... '∨': [ #1 #0
... ['1', '1'], # 1
... ['1', '0']], # 0
... '→': [ #1 #0
... ['1', '0'], # 1
... ['1', '1']], # 0
... '↔': [ #1 #0
... ['1', '0'], # 1
... ['0', '1']], # 0
... }
>>> CL = MixedManyValuedSemantics(language=classical_infinite_language_with_sent_constants,
... truth_values=bivalued_truth_values,
... premise_designated_values=['1'],
... conclusion_designated_values=['1'],
... truth_function_dict=classical_truth_functions,
... sentential_constant_values_dict= {'⊥': '0', '⊤': '1'},
... name='CL')
Example of a non-classical 3-valued system: the many-valued system `ST`
>>> from logics.instances.propositional.languages import classical_infinite_language_with_sent_constants
>>> from logics.classes.propositional.semantics import MixedManyValuedSemantics
| |
returned have QUERY's head in the head
and the stored instances of TABLENAMES in the body.
"""
# This is different than abduction because instead of replacing
# a proof attempt with saving a literal, we want to save a literal
# after a successful proof attempt.
assert False, "Not yet implemented"
def abduce(self, query, tablenames, find_all=True):
"""Computes additional literals that if true would make
(some instance of) QUERY true. Returns a list of rules
where the head represents an instance of the QUERY and
the body is the collection of literals that must be true
in order to make that instance true. If QUERY is a rule,
each result is an instance of the head of that rule, and
the computed literals if true make the body of that rule
(and hence the head) true. If FIND_ALL is true, the
return list has at most one element.
Limitation: every negative literal relevant to a proof of
QUERY is unconditionally true, i.e. no literals are saved
when proving a negative literal is true.
"""
assert compile.is_datalog(query), "Explain requires a formula"
if compile.is_atom(query):
literals = [query]
output = query
else:
literals = query.body
output = query.head
# We need all the variables we will be using in the output, which
# here is just the head of QUERY (or QUERY itself if it is an atom)
abductions = self.top_down_abduction(
output.variables(), literals, find_all=find_all,
save=lambda lit, binding: lit.table in tablenames)
results = [compile.Rule(output.plug(abd.binding), abd.support)
for abd in abductions]
self.log(query.tablename(), "abduction result:")
self.log(query.tablename(), "\n".join([str(x) for x in results]))
return results
def consequences(self, filter=None, tablenames=None):
"""Return all the true instances of any table that is defined
in this theory. Default tablenames is DEFINED_TABLENAMES.
"""
if tablenames is None:
tablenames = self.defined_tablenames()
results = set()
# create queries: need table names and arities
for table in tablenames:
if filter is None or filter(table):
arity = self.arity(table)
vs = []
for i in xrange(0, arity):
vs.append("x" + str(i))
vs = [compile.Variable(var) for var in vs]
query = compile.Literal(table, vs)
results |= set(self.select(query))
return results
def top_down_evaluation(self, variables, literals,
binding=None, find_all=True):
"""Compute all bindings of VARIABLES that make LITERALS
true according to the theory (after applying the unifier BINDING).
If FIND_ALL is False, stops after finding one such binding.
Returns a list of dictionary bindings.
"""
# logging.debug("CALL: top_down_evaluation(vars={}, literals={}, "
# "binding={})".format(
# iterstr(variables), iterstr(literals),
# str(binding)))
results = self.top_down_abduction(variables, literals,
binding=binding, find_all=find_all,
save=None)
# logging.debug("EXIT: top_down_evaluation(vars={}, literals={}, "
# "binding={}) returned {}".format(
# iterstr(variables), iterstr(literals),
# str(binding), iterstr(results)))
return [x.binding for x in results]
def top_down_abduction(self, variables, literals, binding=None,
find_all=True, save=None):
"""Compute all bindings of VARIABLES that make LITERALS
true according to the theory (after applying the
unifier BINDING), if we add some number of additional
literals. Note: will not save any literals that are
needed to prove a negated literal since the results
would not make sense. Returns a list of TopDownResults.
"""
if binding is None:
binding = self.new_bi_unifier()
caller = self.TopDownCaller(variables, binding, self,
find_all=find_all, save=save)
if len(literals) == 0:
self.top_down_finish(None, caller)
else:
# Note: must use same unifier in CALLER and CONTEXT
context = self.TopDownContext(literals, 0, binding, None, 0)
self.top_down_eval(context, caller)
return list(set(caller.results))
#########################################
## Internal implementation
def top_down_eval(self, context, caller):
"""Compute all instances of LITERALS (from LITERAL_INDEX and above)
that are true according to the theory (after applying the
unifier BINDING to LITERALS). Returns False or an answer.
"""
# no recursive rules, ever; this style of algorithm will not terminate
lit = context.literals[context.literal_index]
# logging.debug("CALL: top_down_eval({}, {})".format(str(context),
# str(caller)))
# abduction
if caller.save is not None and caller.save(lit, context.binding):
self.print_call(lit, context.binding, context.depth)
# save lit and binding--binding may not be fully flushed out
# when we save (or ever for that matter)
caller.support.append((lit, context.binding))
self.print_save(lit, context.binding, context.depth)
success = self.top_down_finish(context, caller)
caller.support.pop() # pop in either case
if success:
return True
else:
self.print_fail(lit, context.binding, context.depth)
return False
# regular processing
if lit.is_negated():
# logging.debug("{} is negated".format(str(lit)))
# recurse on the negation of the literal
plugged = lit.plug(context.binding)
assert plugged.is_ground(), \
"Negated literal not ground when evaluated: " + str(plugged)
self.print_call(lit, context.binding, context.depth)
new_context = self.TopDownContext(
[lit.complement()], 0, context.binding, None,
context.depth + 1)
new_caller = self.TopDownCaller(caller.variables, caller.binding,
caller.theory, find_all=False,
save=None)
# Make sure new_caller has find_all=False, so we stop as soon
# as we can.
# Ensure save=None so that abduction does not save anything.
# Saving while performing NAF makes no sense.
if self.top_down_includes(new_context, new_caller):
self.print_fail(lit, context.binding, context.depth)
return False
else:
# don't need bindings b/c LIT must be ground
return self.top_down_finish(context, caller, redo=False)
elif lit.tablename() == 'true':
self.print_call(lit, context.binding, context.depth)
return self.top_down_finish(context, caller, redo=False)
elif lit.tablename() == 'false':
self.print_fail(lit, context.binding, context.depth)
return False
else:
return self.top_down_truth(context, caller)
def top_down_truth(self, context, caller):
"""Do top-down evaluation over the root theory at which
the call was made and all the included theories.
"""
return caller.theory.top_down_includes(context, caller)
def top_down_includes(self, context, caller):
"""Top-down evaluation of all the theories included in this theory."""
is_true = self.top_down_th(context, caller)
if is_true and not caller.find_all:
return True
for th in self.includes:
is_true = th.top_down_includes(context, caller)
if is_true and not caller.find_all:
return True
return False
def top_down_th(self, context, caller):
"""Top-down evaluation for the rules in SELF.CONTENTS."""
# logging.debug("top_down_th({})".format(str(context)))
lit = context.literals[context.literal_index]
self.print_call(lit, context.binding, context.depth)
for rule in self.head_index(lit.table):
unifier = self.new_bi_unifier()
# Prefer to bind vars in rule head
undo = self.bi_unify(self.head(rule), unifier, lit,
context.binding)
# self.log(lit.table, "Rule: {}, Unifier: {}, Undo: {}".format(
# str(rule), str(unifier), str(undo)))
if undo is None: # no unifier
continue
if len(self.body(rule)) == 0:
if self.top_down_finish(context, caller):
unify.undo_all(undo)
if not caller.find_all:
return True
else:
unify.undo_all(undo)
else:
new_context = self.TopDownContext(
rule.body, 0, unifier, context, context.depth + 1)
if self.top_down_eval(new_context, caller):
unify.undo_all(undo)
if not caller.find_all:
return True
else:
unify.undo_all(undo)
self.print_fail(lit, context.binding, context.depth)
return False
def top_down_finish(self, context, caller, redo=True):
"""Helper that is called once top_down successfully completes
a proof for a literal. Handles (i) continuing search
for those literals still requiring proofs within CONTEXT,
(ii) adding solutions to CALLER once all needed proofs have
been found, and (iii) printing out Redo/Exit during tracing.
Returns True if the search is finished and False otherwise.
Temporary, transparent modification of CONTEXT.
"""
if context is None:
# Found an answer; now store it
if caller is not None:
# flatten bindings and store before we undo
# copy caller.support and store before we undo
binding = {}
for var in caller.variables:
binding[var] = caller.binding.apply(var)
result = self.TopDownResult(
binding, [support[0].plug(support[1], caller=caller)
for support in caller.support])
caller.results.append(result)
return True
else:
self.print_exit(context.literals[context.literal_index],
context.binding, context.depth)
# continue the search
if context.literal_index < len(context.literals) - 1:
context.literal_index += 1
finished = self.top_down_eval(context, caller)
context.literal_index -= 1 # in case answer is False
else:
finished = self.top_down_finish(context.previous, caller)
# return search result (after printing a Redo if failure)
if redo and (not finished or caller.find_all):
self.print_redo(context.literals[context.literal_index],
context.binding, context.depth)
return finished
def print_call(self, literal, binding, depth):
self.log(literal.table, "{}Call: {}".format("| " * depth,
literal.plug(binding)))
def print_exit(self, literal, binding, depth):
self.log(literal.table, "{}Exit: {}".format("| " * depth,
literal.plug(binding)))
def print_save(self, literal, binding, depth):
self.log(literal.table, "{}Save: {}".format("| " * depth,
literal.plug(binding)))
def print_fail(self, literal, binding, depth):
self.log(literal.table, "{}Fail: {}".format("| " * depth,
literal.plug(binding)))
return False
def print_redo(self, literal, binding, depth):
self.log(literal.table, "{}Redo: {}".format("| " * depth,
literal.plug(binding)))
return False
#########################################
## Routines for specialization
@classmethod
def new_bi_unifier(cls, dictionary=None):
"""Return a unifier compatible with unify.bi_unify."""
return unify.BiUnifier(dictionary=dictionary)
# lambda (index):
# compile.Variable("x" + str(index)), dictionary=dictionary)
def arity(self, tablename):
"""Return the number of arguments TABLENAME takes or None if
unknown because TABLENAME is not defined here.
"""
# assuming a fixed arity for all tables
formulas = self.head_index(tablename)
if len(formulas) == 0:
return None
first = formulas[0]
# should probably have an overridable function for computing
# the arguments of a head. Instead we assume heads have .arguments
return len(self.head(first).arguments)
def defined_tablenames(self):
"""This routine returns the list of all table names that are
| |
<reponame>hotpxl/minpy-jit<filename>minpy/segment.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import ast
import types
import inspect
from collections import OrderedDict
from functools import wraps, reduce
from mxnet import nd
from . import core
_segment_cnt = 0
_ndarray_funcs = nd.__dict__.values()
def segment_reform(function_ast, print_new_segment):
# CR(haoran): I feel this class definition is largly
# unnecessary. The functionality is quite specific and doesn't
# offer much generalization. Besides, try use `map` and `reduce`
# to generalize on functions instead of data structure, i.e. try
# to write in a functional fashion.
# XCR(yutian): The main reason to abstract this class out is I
# think it may be helpful when the program needs to walk through
# the ast nodes for collecting some information/doing
# computationsm, which relies on its children's result. I think
# this scenario may be common.
# XCR(haoran): "it may be helpful when the program needs to walk
# through the ast nodes" -> that's why we have ast.NodeVisitor and
# ast.NodeTransformer
# if you look at it more closely, you will find that it is not at
# all generic. InfoCollector's visit functions are not uniform;
# they depend on the exact type of node that is being visited. and
# this logic is particular to segmentation logic. a good rule of
# thumb is DRY (take it with a grain of salt though)
# this is why i propose the separation of rules (when can a node
# be fused)
# WHILE you are separating the logic of aggregation of rules but
# not the rules itself
# i think it also deals with problems mentioned below (ln 120 and
# 133). i'm still working on it. i'm trying to work from your
# existing rules and see if i can come up with a SOUND (but not
# COMPLETE) version. you might go ahead working on the codegen
# part with minjie in the mean time. at least the code runs
# smoothly now
# XCR(yutian): the last part of my comment "for collecting some
# information/doing computationsm, which relies on its children's
# result" -> this is the purpose for infohelper, which is originally
# written within visit_Node function.
# Its genericness/versatility is heavily related to the type-tracing
# result.
# And you're right that it seems poor given current type info.
# We would have better judgement on that once your changes are done.
# If still poor, I would remove it.
# For the point of "not the rules itself", I think it's possible
# to add more rules by making classes like 'NodeRewriterRuleA',
# 'NodeRewriterRuleB'.
# I agree the elegant solution is to support incremental fusion.
# Will think about that.
# XCR(haoran): it's genericness is NOT related to type-tracing
# results. this is why: the segment function consists of two
# parts. 1. marking expressions (and subexpressions) as
# fusable. 2. actually fuse statements together mind 1 only
# involves expressions and 2 only statements
# part 2 is your "fusing consecutive assignments" currently.
# now for part 1, we have to investigate expressions. there are
# two steps in this: determine the type of expr(call or attr or
# binop), and then do stuff (if it's a call then check for
# atomicity if it's binop then blablabla). two steps together is
# called a rule. there are many rules: BinOp says either one is
# ndarary, call says it must be atomic and so on. (i'm approaching
# the problem with this dimension).
# the problem is InfoHelper is doing step 2 and InfoCollector is
# doing step 1. that's why i'm saying you are separating logic at
# the wrong place/dimension. as a result, you still have to write
# visit_BLABLA for every expression in InfoCollector, but this is
# already done by ast.NodeVisitor
# anyways i find this discussion very helpful. none of this
# thinking was formed before
class InfoHelper():
def __init__(self,
name,
init_value,
get_default_value,
update_func=None,
rewrite_cond=None):
self._name = name
self.init_value = init_value
self._get_default_value = get_default_value
self._update_func = update_func
self._rewrite_cond = rewrite_cond
def set(self, node, value):
setattr(node, self._name, value)
def get(self, node):
return getattr(node, self._name, self._get_default_value)
def do_rewrite(self, node):
return self._rewrite_cond(self.get(node))
def update(self, *values):
return self._update_func(*values)
class InfoCollector(ast.NodeTransformer):
def __init__(self, info_helper, funcs=[]):
super(InfoCollector, self).__init__()
self._info_helper = info_helper
self._funcs = {func.__name__: func for func in funcs}
def _collect_info(self, node, attrs=[], funcs=[]):
self.generic_visit(node)
info = self._info_helper.init_value
for name in attrs:
child = getattr(node, name)
if isinstance(child, list):
info = reduce(
self._info_helper.update,
[info] + list(map(self._info_helper.get, child)))
else:
info = self._info_helper.update(
info, self._info_helper.get(child))
info = reduce(
self._info_helper.update, [info] +
list(map(lambda name: self._funcs[name](node), funcs)))
self._info_helper.set(node, info)
return node
def visit_FunctionDef(self, node):
self.generic_visit(node)
return node
def visit_If(self, node):
self.generic_visit(node)
return node
def visit_Assign(self, node):
return self._collect_info(node, attrs=['value'])
def visit_Expr(self, node):
return self._collect_info(node, attrs=['value'])
def visit_Call(self, node):
# CR(haoran): atomic functions could also take lists or
# dictionaries of ndarrays, or read-only objects. how do
# you deal with that?
# On the other hand, prevent stuff like `atomic(3 if
# some_flag else 2, ...)` from fusing
# I don't have a solution but i feel there is a simple
# solution
#
# XCR(yutian): It doesn't check the input list yet.
#
# I don't have a simple solution yet.
#
# List several questions come to my mind:
# - how to get the elements, and their types, of dict/list?
# - how to figure which elements of the list/dict
# are created inside the function?
# - let's assume we could get the function definition,
# how to handle the recursive function call?
# - how to do above things in a simple way?
return self._collect_info(
node, attrs=['args'], funcs=['is_atomic_func'])
def visit_BinOp(self, node):
# CR(haoran): incorrect? numpy.ndarray + integer_literal
# is also a valid fusable operation
# XCR(yutian): fixed
# XCR(haoran): this is incorrect either! The correct
# condition is: either or both sides is NDArray. Not
# including the case where both sides are numbers
# XCR(yutian): Take a = b + (c + d), where b is NDArray
# and c,d are numeric.
# For Binary Op, we might allow both-numeric-value case
# and add the NDArray checking at the very end, e.g.
# the type of right operand of assignment operation
# in this case.
# This final checkingis missing at present. I'll work
# on this.
return self._collect_info(
node, attrs=['left', 'right'], funcs=['is_ndarray_or_numeric'])
def visit_Name(self, node):
return self._collect_info(node, funcs=['is_ndarray_or_numeric'])
def visit_Num(self, node):
return self._collect_info(node)
def visit_Attribute(self, node):
# Treat an attribute expr as a whole
return self._collect_info(node, funcs=['is_ndarray_or_numeric'])
def visit_Subscript(self, node):
# Treat a subscript expr as a whole
return self._collect_info(node, funcs=['is_ndarray_or_numeric'])
class NodeRewriter(ast.NodeTransformer):
def __init__(self, info_helper):
super(NodeRewriter, self).__init__()
self._info_helper = info_helper
def fuse_consecutive_assign_and_expr(self, stmts):
def make_ast_call(func_name, ins, outs):
return ast.Assign(
targets=[
ast.Tuple(
elts=[
ast.Name(id=e, ctx=ast.Store()) for e in outs
],
ctx=ast.Store())
],
value=ast.Call(
func=ast.Name(id=func_name, ctx=ast.Load()),
args=[ast.Name(id=e, ctx=ast.Load()) for e in ins],
keywords=[]))
def make_ast_function_def(func_name, stmts, ins, outs):
return ast.FunctionDef(
name=func_name,
args=ast.arguments(
args=[ast.arg(arg=e, annotation=None) for e in ins],
vararg=None,
kwonlyargs=[],
kw_defaults=[],
kwarg=None,
defaults=[]),
body=[
*stmts,
ast.Return(value=ast.Tuple(
elts=[
ast.Name(id=e, ctx=ast.Load()) for e in outs
],
ctx=ast.Load()))
],
decorator_list=[],
returns=None)
def fuse(nodes):
ins, outs = infer_inputs_and_outputs_given_nodes(nodes)
global _segment_cnt
if print_new_segment:
print('Segment {} info: '.format(_segment_cnt))
print('\tinput list: ', ins)
print('\toutput list: ', outs)
for i, e in enumerate(nodes):
print('\t ast node {} {}'.format(i, type(e).__name__))
print('\n')
func_name = '_fuse_func_{}'.format(_segment_cnt)
_segment_cnt += 1
func_def = make_ast_function_def(func_name, nodes, ins, outs)
call_node = make_ast_call(func_name, ins, outs)
new_funcdefs.append(func_def)
return call_node
def get_consecutive_assign_and_expr(stmts):
pos, leng = (0, 0)
while pos < len(stmts):
if (isinstance(stmts[pos], ast.Assign)
or isinstance(stmts[pos], ast.Expr)
) and self._info_helper.do_rewrite(stmts[pos]):
leng += 1
else:
if leng > 0:
yield (pos - leng, leng)
leng = 0
pos += 1
if leng > 0:
yield (pos - leng, leng)
removed_num = 0
for (st, leng) in get_consecutive_assign_and_expr(stmts):
st -= removed_num
stmts[st] = fuse(stmts[st:st + leng])
removed_num += leng | |
# @today 0 -> text:today, meaning:monday
if "@today" in constraints:
if "0" in constraints["@today"]:
resolved_dict["@today_00"] = "today"
if "1" in constraints["@today"]:
resolved_dict["@today_01"] = "current"
if "2" in constraints["@today"]:
resolved_dict["@today_02"] = "currently"
# @weekly_time: could make a list 0: week 1: today 2: tmr
if "@weekly_time" in constraints:
if "0" in constraints["@weekly_time"]:
resolved_dict["@weekly_time_00"] = "week"
if "1" in constraints["@weekly_time"]:
resolved_dict["@weekly_time_01"] = "today"
if "2" in constraints["@weekly_time"]:
resolved_dict["@weekly_time_02"] = "tomorrow"
if "3" in constraints["@weekly_time"]:
resolved_dict["@weekly_time_03"] = "weekend"
if "6" in constraints["@weekly_time"]:
resolved_dict["@weekly_time_06"] = random.sample(["next_week", "this week", "next_few_days", "next 7 days"], 1)[0]
# navigate domain
for i in range(1,9):
if f"@traffic_info_{i}" in constraints:
if -1 in constraints[f"@traffic_info_{i}"]:
resolved_dict[f"@traffic_info_{i}0"] = "heavy_traffic"
elif -2 in constraints[f"@traffic_info_{i}"]:
resolved_dict[f"@traffic_info_{i}0"] = "no_traffic"
return resolved_dict
def generate_weather_dialogues(kb_path, dialogue, delex_to_chat_dict, delex_resolved_args_list, key_group, constraints, rain_record):
resolved_dicts = []
str_dialogues = []
# consider contraints in key "0"
negation = False
# assume there's no multiple constraints for each key
for key in key_group["1"]:
i = 0
ent = key_group["1"][key]
if "0" in ent:
# detected constraint exists
if key == "@weather_attribute": # negation
negation = True
constraints["negation"] = 0 # weekly negation
if key == "@temperature_high" or key == "@temperature_low":
constraints[f"{key}_{i+1}"] = []
constraints[f"{key}_{i+1}"].append(0)
if ("@weekly_time" in constraints and ("0" in constraints["@weekly_time"] or "6" in constraints["@weekly_time"])) or "@weekly_time" not in constraints:
constraints["max_min_week"] = True
else:
constraints["max_min"] = True
# consider constraints that are not detected in delex process
# go through ```dialogue``` to match key words
base_resolved_dict = fill_constraints(constraints)
# only handle "1" group TODO extend to "2"
sql, sql_cols = generate_sql("weather", key_group["1"], constraints) # from key group and constraints generate sql
resolved_dicts = get_entities("weather", base_resolved_dict, kb_path, sql, sql_cols, constraints) # use sql to query the KB and get the possible entities
for resolved_dict in resolved_dicts:
# Generate new dialogue
for delex_word, knowledge_value in resolved_dict.items():
if delex_word in delex_to_chat_dict:
for chat in delex_to_chat_dict[delex_word]:
if knowledge_value == "rain" and rain_record is not None:
location_delex = delex_word[:-1].replace("weather_attribute", "location")
location_value = resolved_dict[location_delex]
for item in rain_record:
if item[0] == location_value:
knowledge_value = "raining"
else:
knowledge_value = knowledge_value
chat.str = chat.str.replace(delex_word, knowledge_value)
# Detect "word y" "word ing"
# Generate string version of the new dialogue
str_dialogue = []
for turn_id, request, response in dialogue:
str_dialogue.append((turn_id, request.str.replace(" y ", "").replace(" ing ", ""), response.str.replace(" y ", "").replace(" ing ", "")))
# Reset all RevertibleString to the original chat
for delex_word in delex_to_chat_dict.keys():
for chat in delex_to_chat_dict[delex_word]:
chat.to_origin()
str_dialogues.append(str_dialogue)
assert len(str_dialogues) > 0
return str_dialogues
def generate_schedule_dialogues(kb_path, dialogue, delex_to_chat_dict, delex_resolved_args_list, key_group, constraints):
resolved_dicts = []
str_dialogues = []
base_resolved_dict = fill_constraints(constraints)
# from key group and constraints generate sql
# use sql to query the KB and get the possible entities
# only handle "1" group TODO extend to "2"
sql, sql_cols = generate_sql("schedule", key_group["1"], constraints)
resolved_dicts = get_entities("schedule", base_resolved_dict, kb_path, sql, sql_cols, constraints)
if resolved_dicts is None:
return None
for resolved_dict in resolved_dicts:
# Generate new dialogue
for delex_word, knowledge_value in resolved_dict.items():
if delex_word in delex_to_chat_dict:
for chat in delex_to_chat_dict[delex_word]:
chat.str = chat.str.replace(delex_word, knowledge_value.lower())
# Detect "word y" "word ing"
# Generate string version of the new dialogue
str_dialogue = []
for turn_id, request, response in dialogue:
str_dialogue.append((turn_id, request.str, response.str))
# Reset all RevertibleString to the original chat
for delex_word in delex_to_chat_dict.keys():
for chat in delex_to_chat_dict[delex_word]:
chat.to_origin()
str_dialogues.append(str_dialogue)
assert len(str_dialogues) > 0
return str_dialogues
def generate_navigate_dialogues(kb_path, dialogue, delex_to_chat_dict, delex_resolved_args_list, key_group, constraints):
resolved_dicts = []
str_dialogues = []
# consider contraints in key "0"
# assume there's no multiple constraints for each key
for key in key_group["1"]:
i = 0
ent = key_group["1"][key]
if "0" in ent:
# detected constraint exists
if key == "@traffic_info": # may be "heavy_traffic" / "no_traffic"
if "avoid" in " ".join([ sent.str for sent in delex_to_chat_dict[f"{key}_{i+1}{0}"]]):
constraints[f"{key}_{i+1}"] = []
constraints[f"{key}_{i+1}"].append(-1) # negation: avoid heavy_traffic
else:
constraints[f"{key}_{i+1}"] = []
constraints[f"{key}_{i+1}"].append(-2) # no_traffic, soft constraint, ignore
# consider constraints that are not detected in delex process
for turn_id, request, response in dialogue:
for sent in [request.str, response.str]: # go through ```dialogue``` to match key words
# navigate domain:
if ("less traffic" in sent) or ("least traffic" in sent): # 1. traffic_info: "less traffic" \ "least traffic" \ "avoid @traffic_info_x0"(heavy_traffic) -1
if f"@traffic_info_{i+1}" not in constraints:
constraints[f"@traffic_info_{i+1}"] = []
constraints[f"@traffic_info_{i+1}"].append(0) # choose the one with less traffic
constraints["max_min"] = True
if ("closest" in sent) or ("quickest" in sent) or ("nearest" in sent) or ("shortest" in sent): # 2. distance: "closest" \ "quickest" \
if f"@distance_{i+1}" not in constraints:
constraints[f"@distance_{i+1}"] = []
constraints[f"@distance_{i+1}"].append(0) # choose the one with less distance
constraints["max_min"] = True
base_resolved_dict = fill_constraints(constraints)
# from key group and constraints generate sql
# use sql to query the KB and get the possible entities
# only handle "1" group TODO extend to "2"
sql, sql_cols = generate_sql("navigate", key_group["1"], constraints)
resolved_dicts = get_entities("navigate", base_resolved_dict, kb_path, sql, sql_cols, constraints)
if resolved_dicts is None:
return None
for resolved_dict in resolved_dicts:
# Generate new dialogue
for delex_word, knowledge_value in resolved_dict.items():
if delex_word in delex_to_chat_dict:
for chat in delex_to_chat_dict[delex_word]:
chat.str = chat.str.replace(delex_word, knowledge_value.lower())
# Detect "word y" "word ing"
# Generate string version of the new dialogue
str_dialogue = []
for turn_id, request, response in dialogue:
str_dialogue.append((turn_id, request.str, response.str))
# Reset all RevertibleString to the original chat
for delex_word in delex_to_chat_dict.keys():
for chat in delex_to_chat_dict[delex_word]:
chat.to_origin()
str_dialogues.append(str_dialogue)
assert len(str_dialogues) > 0
return str_dialogues
def generate_dialogues(kb_path, meta_dialogue, rain_record):
dialogue, delex_to_chat_dict, delex_resolved_args_list, dialog_type = meta_dialogue
poi = Type2POI["schedule"]
# get maximum entity values for entities
key_group = {}
for meta in delex_resolved_args_list:
last_underscore_index = meta.rindex('_')
delex_key = meta[:last_underscore_index]
delex_index = meta[last_underscore_index+1]
if (delex_key == poi):
dist_index = ""
else:
dist_index = meta[-1]
if delex_key != poi:
if delex_index not in key_group:
key_group[delex_index] = {}
if delex_key not in key_group[delex_index]:
key_group[delex_index][delex_key] = []
key_group[delex_index][delex_key].append(dist_index)
if dialog_type == "schedule":
for i in key_group:
key_dict = key_group[i]
for turn_id, request, response in dialogue:
for sent in [request.str, response.str]:
expand_num = max([len(item) for item in list(key_dict.values())])
for key in key_dict:
if len(key_dict[key]) < expand_num:
expand_key = key_dict[key] * int(expand_num / len(key_dict[key]))
key_dict[key] = expand_key
if "0" in key_group:
constraints = key_group["0"]
else:
constraints = {}
if dialog_type == "weather":
str_dialogues = generate_weather_dialogues(kb_path, dialogue, delex_to_chat_dict, delex_resolved_args_list, key_group, constraints, rain_record)
elif dialog_type == "schedule":
str_dialogues = generate_schedule_dialogues(kb_path, dialogue, delex_to_chat_dict, delex_resolved_args_list, key_group, constraints)
else:
str_dialogues = generate_navigate_dialogues(kb_path, dialogue, delex_to_chat_dict, delex_resolved_args_list, key_group, constraints)
return str_dialogues
def generte_dialog_test_set(d, knowledge_folder, dialogue_path, num_augmented_dialogue, kb_types, split, output_folder):
num_sample = len(kb_types[split])
for i in tqdm(range(num_sample), total=num_sample, ncols=100):
domain = kb_types[split][i]
if domain != d:
continue
# Read KB & dataset
# print("Read KB & dataset ... ")
kb_path = os.path.join(knowledge_folder+"/"+split, f"dialog_{i}.db")
if not os.path.exists(kb_path):
with open(os.path.join(output_folder, f"dialog_{i}.txt"), "w") as f:
f.write("")
continue
dialogues = read_dialogue(dialogue_path)
# Generate dialog index
index_dialog = generate_dialogue_index(dialogues)
with open(f"{knowledge_folder}/{split}_rain_record.json", "r") as f:
rain_record_map = json.load(f)
if str(i) in rain_record_map:
rain_record = rain_record_map[str(i)]
else:
rain_record = None
meta_dialogues = index_dialog.loc[index_dialog['domain'] == domain, 'meta'][:num_augmented_dialogue]
str_dialogues = []
for template_id, meta_dialogue in enumerate(meta_dialogues):
# print("[template_id]", template_id)
str_dialogue = generate_dialogues(kb_path, meta_dialogue, rain_record)
if str_dialogue is not None:
str_dialogues.extend(str_dialogue)
assert len(str_dialogues) > 0
dump_dialogue_to_file(str_dialogues, os.path.join(output_folder, f"dialog_{i}.txt"))
if __name__ == "__main__":
# Parse args
parser = argparse.ArgumentParser(description='Generation SMD')
parser.add_argument('--dialogue_path', type=str, default='./templates/weather_template.txt', help='dialog path, default: ./templates/weather_template.txt')
parser.add_argument('--knowledge_folder', type=str, default='./SMD/KBs', help='knowledge base folder, default: ./SMD/KBs')
parser.add_argument('--output_folder', type=str, default='./SMD', help='output folder path for generation result, default: ./SMD')
parser.add_argument('--domain', type=str, default="weather", help='dialogue domain and KB domain, default: weather')
parser.add_argument('--num_augmented_knowledge', type=int, default=10, help='number of augmented knowledge, default: 10')
parser.add_argument('--num_augmented_dialogue', type=int, default=10, help='number of augmented dialogue, default: 10')
parser.add_argument('--random_seed', type=int, default=0, help='random seed for reproducible sampling, default: 0')
parser.add_argument('--split', type=str, default="train", help='KB source, default: train')
parser.add_argument('--build_db', action="store_true", help='whether to do build database from the dataset')
args = vars(parser.parse_args())
# Print begin information
print('== Selective Generation SMD Dialogue ==')
print(args)
# Build DB if needed
if args["build_db"]:
build_db(args["knowledge_folder"], args["split"])
exit()
| |
0.1992188, 0.1718750, 0.1484375, 0.1289062, 0.1132812,
0.0976562, 0.0859375, 0.0781250, 0.0742188, 0.0742188, 0.0742188,
0.0781250, 0.0859375, 0.0976562, 0.1132812, 0.1289062, 0.1484375,
0.1718750, 0.1992188, 0.2265625, 0.2539062, 0.2851562, 0.3164062,
0.3476562, 0.3828125, 0.4179688, 0.4492188, 0.4843750, 0.5156250,
0.5468750, 0.5781250, 0.6054688, 0.6328125, 0.6601562, 0.6835938,
0.7031250, 0.7187500, 0.7343750, 0.7460938, 0.7539062, 0.7578125,
0.7617188, 0.7578125, 0.7539062, 0.7460938, 0.7343750, 0.7187500,
0.7031250, 0.6835938, 0.6601562, 0.6328125, 0.6054688, 0.5781250,
0.5468750, 0.5156250, 0.4843750, 0.4492188, 0.4140625, 0.3828125,
0.3476562, 0.3164062, 0.2851562, 0.2539062, 0.2265625, 0.1992188,
0.1718750, 0.1484375, 0.1289062, 0.1132812, 0.0976562, 0.0859375,
0.0781250, 0.0742188, 0.0742188, 0.0742188, 0.0781250, 0.0859375,
0.0976562, 0.1132812, 0.1289062, 0.1484375, 0.1718750, 0.1992188,
0.2265625, 0.2539062, 0.2851562, 0.3164062, 0.3476562, 0.3828125,
0.4179688, 0.4492188, 0.4843750, 0.5156250, 0.5468750, 0.5781250,
0.6054688, 0.6328125, 0.6601562, 0.6835938, 0.7031250, 0.7187500,
0.7343750, 0.7460938, 0.7539062, 0.7578125, 0.7617188, 0.7578125,
0.7539062, 0.7460938, 0.7343750, 0.7187500, 0.7031250, 0.6835938,
0.6601562, 0.6328125, 0.6054688, 0.5781250, 0.5468750, 0.5156250,
0.4843750, 0.4492188, 0.4140625, 0.3828125, 0.3476562, 0.3164062,
0.2851562, 0.2539062, 0.2265625, 0.1992188, 0.1718750, 0.1484375,
0.1289062, 0.1132812, 0.0976562, 0.0859375, 0.0781250, 0.0742188,
0.0742188, 0.0742188, 0.0781250, 0.0859375, 0.0976562, 0.1132812,
0.1289062, 0.1484375, 0.1718750, 0.1992188, 0.2265625, 0.2539062,
0.2851562, 0.3164062, 0.3476562, 0.3828125, 0.4179688, 0.4492188,
0.4843750, 0.5156250, 0.5468750, 0.5781250, 0.6054688, 0.6328125,
0.6601562, 0.6835938, 0.7031250, 0.7187500, 0.7343750, 0.7460938,
0.7539062, 0.7578125, 0.7617188, 0.7578125, 0.7539062, 0.7460938,
0.7343750, 0.7187500, 0.7031250, 0.6835938, 0.6601562, 0.6328125,
0.6054688, 0.5781250, 0.5468750, 0.5156250, 0.4843750, 0.4492188,
0.4140625, 0.3828125, 0.3476562, 0.3164062, 0.2851562, 0.2539062,
0.2265625, 0.1992188, 0.1718750, 0.1484375, 0.1289062, 0.1132812,
0.0976562, 0.0859375, 0.0781250, 0.0781250]),
array([ 0.0273438, 0.0273438, 0.0429688, 0.0585938, 0.0742188, 0.0859375,
0.1015625, 0.1171875, 0.1328125, 0.1523438, 0.1679688, 0.1835938,
0.1992188, 0.2148438, 0.2304688, 0.2460938, 0.2617188, 0.2773438,
0.2929688, 0.3046875, 0.3203125, 0.3359375, 0.3515625, 0.3632812,
0.3789062, 0.3906250, 0.4023438, 0.4179688, 0.4296875, 0.4414062,
0.4531250, 0.4648438, 0.4765625, 0.4882812, 0.5000000, 0.5078125,
0.5195312, 0.5312500, 0.5429688, 0.5507812, 0.5625000, 0.5742188,
0.5859375, 0.5976562, 0.6093750, 0.6210938, 0.6328125, 0.6445312,
0.6601562, 0.6718750, 0.6875000, 0.7031250, 0.7187500, 0.7343750,
0.7500000, 0.7656250, 0.7851562, 0.8007812, 0.8203125, 0.8398438,
0.8593750, 0.8789062, 0.8984375, 0.9179688, 0.9414062, 0.9609375,
0.9804688, 0.9882812, 0.9687500, 0.9492188, 0.9257812, 0.9062500,
0.8867188, 0.8671875, 0.8476562, 0.8281250, 0.8085938, 0.7890625,
0.7734375, 0.7578125, 0.7421875, 0.7265625, 0.7109375, 0.6992188,
0.6875000, 0.6718750, 0.6640625, 0.6523438, 0.6445312, 0.6328125,
0.6250000, 0.6171875, 0.6132812, 0.6054688, 0.6015625, 0.5937500,
0.5898438, 0.5859375, 0.5781250, 0.5742188, 0.5703125, 0.5625000,
0.5585938, 0.5507812, 0.5468750, 0.5390625, 0.5312500, 0.5234375,
0.5156250, 0.5039062, 0.4921875, 0.4804688, 0.4687500, 0.4531250,
0.4414062, 0.4218750, 0.4062500, 0.3867188, 0.3671875, 0.3476562,
0.3242188, 0.3046875, 0.2812500, 0.2539062, 0.2304688, 0.2070312,
0.1796875, 0.1523438, 0.1250000, 0.0976562, 0.0703125, 0.0429688,
0.0195312, 0.9882812, 0.9609375, 0.9335938, 0.9101562, 0.8867188,
0.8632812, 0.8398438, 0.8203125, 0.8007812, 0.7812500, 0.7617188,
0.7460938, 0.7343750, 0.7187500, 0.7070312, 0.6953125, 0.6875000,
0.6796875, 0.6718750, 0.6679688, 0.6640625, 0.6601562, 0.6601562,
0.6562500, 0.6562500, 0.6562500, 0.6562500, 0.6601562, 0.6601562,
0.6601562, 0.6601562, 0.6640625, 0.6640625, 0.6640625, 0.6601562,
0.6601562, 0.6562500, 0.6523438, 0.6484375, 0.6406250, 0.6328125,
0.6250000, 0.6132812, 0.6015625, 0.5859375, 0.5703125, 0.5507812,
0.5312500, 0.5117188, 0.4882812, 0.4648438, 0.4375000, 0.4140625,
0.3828125, 0.3554688, 0.3242188, 0.2929688, 0.2617188, 0.2265625,
0.1953125, 0.1601562, 0.1289062, 0.0937500, 0.0625000, 0.0312500,
0.9960938, 0.9648438, 0.9335938, 0.9062500, 0.8789062, 0.8554688,
0.8320312, 0.8085938, 0.7890625, 0.7695312, 0.7539062, 0.7382812,
0.7265625, 0.7148438, 0.7070312, 0.6992188, 0.6953125, 0.6914062,
0.6914062, 0.6914062, 0.6953125, 0.6992188, 0.7031250, 0.7070312,
0.7148438, 0.7187500, 0.7265625, 0.7343750, 0.7421875, 0.7500000,
0.7539062, 0.7617188, 0.7656250, 0.7695312, 0.7734375, 0.7734375,
0.7734375, 0.7734375, 0.7695312, 0.7617188, 0.7539062, 0.7460938,
0.7343750, 0.7187500, 0.7031250, 0.6835938, 0.6601562, 0.6367188,
0.6093750, 0.5820312, 0.5546875, 0.5195312, 0.4882812, 0.4531250,
0.4179688, 0.3789062, 0.3398438, 0.3398438]),
array([ 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0]),
)
### IDL colormap 33 :: Blue-Red ###
color_map_luts['idl33'] = \
(
array([ 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0117188, 0.0273438, 0.0429688,
0.0585938, 0.0742188, 0.0898438, 0.1054688, 0.1210938, 0.1367188,
0.1523438, 0.1679688, 0.1835938, 0.1992188, 0.2148438, 0.2304688,
0.2460938, 0.2617188, 0.2773438, 0.2929688, 0.3085938, 0.3242188,
0.3398438, 0.3554688, 0.3710938, 0.3867188, 0.4023438, 0.4179688,
0.4335938, 0.4492188, 0.4648438, 0.4804688, 0.4960938, 0.5117188,
0.5273438, 0.5429688, 0.5585938, 0.5742188, 0.5898438, 0.6054688,
0.6210938, 0.6367188, 0.6523438, 0.6679688, 0.6835938, 0.6992188,
0.7148438, 0.7304688, 0.7460938, 0.7617188, 0.7773438, 0.7929688,
0.8085938, 0.8242188, 0.8398438, 0.8554688, 0.8710938, 0.8867188,
0.9023438, 0.9179688, 0.9335938, 0.9492188, 0.9648438, 0.9804688,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9765625,
0.9609375, 0.9414062, 0.9257812, 0.9101562, 0.8906250, 0.8750000,
0.8554688, 0.8398438, 0.8242188, 0.8046875, 0.7890625, 0.7695312,
0.7539062, 0.7382812, 0.7187500, 0.7031250, 0.6835938, 0.6679688,
0.6523438, 0.6328125, 0.6171875, 0.5976562, 0.5820312, 0.5664062,
0.5468750, 0.5312500, 0.5117188, 0.5117188]),
array([ 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0117188, 0.0273438,
0.0429688, 0.0585938, 0.0742188, 0.0898438, 0.1054688, 0.1210938,
0.1367188, 0.1523438, 0.1679688, 0.1835938, 0.1992188, 0.2148438,
0.2304688, 0.2460938, 0.2617188, 0.2773438, 0.2929688, 0.3085938,
0.3242188, 0.3398438, 0.3554688, 0.3710938, 0.3867188, 0.4023438,
0.4179688, 0.4335938, 0.4492188, 0.4648438, 0.4804688, 0.4960938,
0.5117188, 0.5273438, 0.5429688, 0.5585938, 0.5742188, 0.5898438,
0.6054688, 0.6210938, 0.6367188, 0.6523438, 0.6679688, 0.6835938,
0.6992188, 0.7148438, 0.7304688, 0.7460938, 0.7617188, 0.7773438,
0.7929688, 0.8085938, 0.8242188, 0.8398438, 0.8554688, 0.8710938,
0.8867188, 0.9023438, 0.9179688, 0.9335938, 0.9492188, 0.9648438,
0.9804688, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9804688, 0.9648438, 0.9492188, 0.9335938, 0.9179688,
0.9023438, 0.8867188, 0.8710938, 0.8554688, 0.8398438, 0.8242188,
0.8085938, 0.7929688, 0.7773438, 0.7617188, 0.7460938, 0.7304688,
0.7148438, 0.6992188, 0.6835938, 0.6679688, 0.6523438, 0.6367188,
0.6210938, 0.6054688, 0.5898438, 0.5742188, 0.5585938, 0.5429688,
0.5273438, 0.5117188, 0.4960938, 0.4804688, 0.4648438, 0.4492188,
0.4335938, 0.4179688, 0.4023438, 0.3867188, 0.3710938, 0.3554688,
0.3398438, 0.3242188, 0.3085938, 0.2929688, 0.2773438, 0.2617188,
0.2460938, 0.2304688, 0.2148438, 0.1992188, 0.1835938, 0.1679688,
0.1523438, 0.1367188, 0.1210938, 0.1054688, 0.0898438, 0.0742188,
0.0585938, 0.0429688, 0.0273438, 0.0117188, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,
0.0000000, 0.0000000, 0.0000000, 0.0000000]),
array([ 0.5117188, 0.5117188, 0.5273438, 0.5429688, 0.5585938, 0.5742188,
0.5898438, 0.6054688, 0.6210938, 0.6367188, 0.6523438, 0.6679688,
0.6835938, 0.6992188, 0.7148438, 0.7304688, 0.7460938, 0.7617188,
0.7773438, 0.7929688, 0.8085938, 0.8242188, 0.8398438, 0.8554688,
0.8710938, 0.8867188, 0.9023438, 0.9179688, 0.9335938, 0.9492188,
0.9648438, 0.9804688, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938, 0.9960938,
0.9960938, 0.9960938, 0.9960938, | |
state_out = State(x=x, v=vf, beta=state.beta)
logdet = tf.reduce_sum(scale, axis=1)
return state_out, logdet
def _update_v_forward(
self,
state: State,
step: int,
training: bool = None
):
"""Update the momentum `v` in the forward leapfrog step.
Args:
network (tf.keras.Layers): Network to use
state (State): Input state
t (float): Current leapfrog step, represented as periodic time.
training (bool): Currently training?
Returns:
new_state (State): New state, with updated momentum.
logdet (float): Jacobian factor
"""
if self.config.hmc:
return super()._update_v_forward(state, step, training)
eps = self.veps[step]
x = self.normalizer(state.x)
grad = self.grad_potential(x, state.beta)
S, T, Q = self._call_vnet((x, grad), step, training)
scale = self._vsw * (0.5 * eps * S)
transl = self._vtw * T
transf = self._vqw * (eps * Q)
expS = tf.exp(scale)
expQ = tf.exp(transf)
vf = state.v * expS - 0.5 * eps * (grad * expQ - transl)
state_out = State(x=x, v=vf, beta=state.beta)
logdet = tf.reduce_sum(scale, axis=1)
return state_out, logdet
def _full_x_update_forward(
self,
state: State,
step: int,
training: bool = None
):
"""Perform a full-step position update in the forward direction."""
m, mc = self._get_mask(step)
sumlogdet = tf.zeros((state.x.shape[0],))
state, logdet = self._update_x_forward(state, step,
(m, mc), training, first=True)
sumlogdet += logdet
state, logdet = self._update_x_forward(state, step,
(mc, m), training, first=False)
sumlogdet += logdet
return state, sumlogdet
def _update_x_forward(
self,
state: State,
step: int,
masks: Tuple[tf.Tensor, tf.Tensor], # [m, 1. - m]
training: bool = None,
first: bool = True,
):
"""Update the position `x` in the forward leapfrog step.
Args:
state (State): Input state
t (float): Current leapfrog step, represented as periodic time.
training (bool): Currently training?
Returns:
new_state (State): New state, with updated momentum.
logdet (float): logdet of Jacobian factor.
"""
if self.config.hmc:
return super()._update_x_forward(state, step, masks, training)
m, mc = masks
eps = self.xeps[step]
x = self.normalizer(state.x)
S, T, Q = self._call_xnet((x, state.v), m, step, training, first)
scale = self._xsw * (eps * S)
transl = self._xtw * T
transf = self._xqw * (eps * Q)
expS = tf.exp(scale)
expQ = tf.exp(transf)
if self.config.use_ncp:
_x = 2 * tf.math.atan(tf.math.tan(x/2.) * expS)
_y = _x + eps * (state.v * expQ + transl)
xf = (m * x) + (mc * _y)
cterm = tf.math.cos(x / 2) ** 2
sterm = (expS * tf.math.sin(x / 2)) ** 2
logdet_ = tf.math.log(expS / (cterm + sterm))
logdet = tf.reduce_sum(mc * logdet_, axis=1)
else:
y = x * expS + eps * (state.v * expQ + transl)
xf = (m * x) + (mc * y)
logdet = tf.reduce_sum(mc * scale, axis=1)
xf = self.normalizer(xf)
state_out = State(x=xf, v=state.v, beta=state.beta)
return state_out, logdet
def _full_v_update_backward(
self,
state: State,
step: int,
training: bool = None
):
"""Perform a full update of the momentum in the backward direction."""
step_r = self.config.num_steps - step - 1
eps = self.veps[step_r]
x = self.normalizer(state.x)
grad = self.grad_potential(x, state.beta)
S, T, Q = self._call_vnet((x, grad), step_r, training)
scale = self._vsw * (-eps * S)
transf = self._vqw * (eps * Q)
transl = self._vtw * T
expS = tf.exp(scale)
expQ = tf.exp(transf)
vb = expS * (state.v + eps * (grad * expQ - transl))
state_out = State(x=x, v=vb, beta=state.beta)
logdet = tf.reduce_sum(scale, axis=1)
return state_out, logdet
def _half_v_update_backward(
self,
state: State,
step: int,
training: bool = None
):
"""Perform a half update of the momentum in the backward direction."""
step_r = self.config.num_steps - step - 1
eps = self.veps[step_r]
x = self.normalizer(state.x)
grad = self.grad_potential(x, state.beta)
S, T, Q = self._call_vnet((x, grad), step_r, training)
scale = self._vsw * (-0.5 * eps * S)
transf = self._vqw * (eps * Q)
transl = self._vtw * T
expS = tf.exp(scale)
expQ = tf.exp(transf)
vb = expS * (state.v + 0.5 * eps * (grad * expQ - transl))
state_out = State(x=x, v=vb, beta=state.beta)
logdet = tf.reduce_sum(scale, axis=1)
return state_out, logdet
def _update_v_backward(
self,
state: State,
step: int,
training: bool = None
):
"""Update the momentum `v` in the backward leapfrog step.
Args:
state (State): Input state.
t (float): Current leapfrog step, represented as periodic time.
training (bool): Currently training?
Returns:
new_state (State): New state, with updated momentum.
logdet (float): Jacobian factor.
"""
eps = self.veps[step]
x = self.normalizer(state.x)
grad = self.grad_potential(x, state.beta)
S, T, Q = self._call_vnet((x, grad), step, training)
scale = self._vsw * (-0.5 * eps * S)
transf = self._vqw * (eps * Q)
transl = self._vtw * T
expS = tf.exp(scale)
expQ = tf.exp(transf)
vb = expS * (state.v + 0.5 * eps * (grad * expQ - transl))
state_out = State(x=x, v=vb, beta=state.beta)
logdet = tf.reduce_sum(scale, axis=1)
return state_out, logdet
def _full_x_update_backward(
self,
state: State,
step: int,
training: bool = None
):
"""Perform a full-step position update in the backward direction."""
step_r = self.config.num_steps - step - 1
m, mc = self._get_mask(step_r)
sumlogdet = tf.zeros((state.x.shape[0],))
state, logdet = self._update_x_backward(state, step_r,
(mc, m), training)
sumlogdet += logdet
state, logdet = self._update_x_backward(state, step_r,
(m, mc), training)
sumlogdet += logdet
return state, sumlogdet
def _update_x_backward(
self,
state: State,
step: int,
masks: Tuple[tf.Tensor, tf.Tensor], # [m, 1. - m]
training: bool = None,
first: bool = True,
):
"""Update the position `x` in the backward leapfrog step.
Args:
state (State): Input state
t (float): Current leapfrog step, represented as periodic time.
training (bool): Currently training?
Returns:
new_state (State): New state, with updated momentum.
logdet (float): logdet of Jacobian factor.
"""
if self.config.hmc:
return super()._update_x_backward(state, step, masks, training)
# Call `XNet` using `self._scattered_xnet`
m, mc = masks
eps = self.xeps[step]
x = self.normalizer(state.x)
S, T, Q = self._call_xnet((x, state.v), m, step, training, first)
scale = self._xsw * (-eps * S)
transl = self._xtw * T
transf = self._xqw * (eps * Q)
expS = tf.exp(scale)
expQ = tf.exp(transf)
if self.config.use_ncp:
term1 = 2 * tf.math.atan(expS * tf.math.tan(state.x / 2))
term2 = expS * eps * (state.v * expQ + transl)
y = term1 - term2
xb = (m * x) + (mc * y)
cterm = tf.math.cos(x / 2) ** 2
sterm = (expS * tf.math.sin(x / 2)) ** 2
logdet_ = tf.math.log(expS / (cterm + sterm))
logdet = tf.reduce_sum(mc * logdet_, axis=1)
else:
y = expS * (x - eps * (state.v * expQ + transl))
xb = m * x + mc * y
logdet = tf.reduce_sum(mc * scale, axis=1)
xb = self.normalizer(xb)
state_out = State(xb, v=state.v, beta=state.beta)
return state_out, logdet
@staticmethod
def mixed_loss(loss: tf.Tensor, weight: float) -> (tf.Tensor):
"""Returns: tf.reduce_mean(weight / loss - loss / weight)."""
return tf.reduce_mean((weight / loss) - (loss / weight))
def calc_losses(self, states: MonteCarloStates, accept_prob: tf.Tensor):
"""Calculate the total loss."""
wl_init = self.lattice.calc_wilson_loops(states.init.x)
wl_prop = self.lattice.calc_wilson_loops(states.proposed.x)
# Calculate the plaquette loss
ploss = tf.constant(0.)
if self.plaq_weight > 0:
dwloops = 2 * (1. - tf.math.cos(wl_prop - wl_init))
ploss = accept_prob * tf.reduce_sum(dwloops, axis=(1, 2))
# ==== FIXME: Try using mixed loss??
if self.config.use_mixed_loss:
ploss = self.mixed_loss(ploss, self.plaq_weight)
else:
ploss = tf.reduce_mean(-ploss / self.plaq_weight, axis=0)
# Calculate the charge loss
qloss = tf.constant(0.)
if self.charge_weight > 0:
q_init = tf.reduce_sum(tf.sin(wl_init), axis=(1, 2)) / (2 * np.pi)
q_prop = tf.reduce_sum(tf.sin(wl_prop), axis=(1, 2)) / (2 * np.pi)
qloss = (accept_prob * (q_prop - q_init) ** 2) + 1e-4
if self.config.use_mixed_loss:
qloss = self.mixed_loss(qloss, self.charge_weight)
else:
qloss = tf.reduce_mean(-qloss / self.charge_weight, axis=0)
return ploss, qloss
def _get_lr(self, step=None) -> (tf.Tensor):
if step is None:
step = self.optimizer.iterations
if callable(self.lr):
return self.lr(step)
return K.get_value(self.optimizer.lr)
@tf.function
def train_step(
self,
inputs: Tuple[tf.Tensor, tf.Tensor],
) -> tuple[tf.Tensor, AttrDict]:
"""Perform a single training step.
Returns:
states.out.x (tf.Tensor): Next `x` state in the Markov Chain.
metrics (AttrDict): Dictionary of various metrics for logging.
"""
def _traj_summ(x, key=None):
if key is not None:
return {
f'{key}': tf.squeeze(x),
f'{key}_start': x[0],
f'{key}_mid': x[midpt],
f'{key}_end': x[-1],
}
return (x[0], x[midpt], x[1])
start = time.time()
with tf.GradientTape() as tape:
x, beta = inputs
tape.watch(x)
states, metrics = self((x, beta), training=True)
accept_prob = metrics.get('accept_prob', None)
ploss, qloss = self.calc_losses(states, accept_prob)
loss = | |
logits = logits + weights['b_output']
if config.label_type == 'multi_head_sparse' or config.label_type == 'multi_head_one_hot':
logits2= tf.matmul(kc, weights['w_output_head2']) + weights['b_output_head2']
else:
logits2 = tf.constant(0, dtype=tf.float32)
self.logits = logits
self.logits2 = logits2
return logits, logits2
def save_pickle(self, epoch=None):
"""Save model using pickle.
This is quite space-inefficient. But it's easier to read out.
"""
save_path = self.save_path
if epoch is not None:
save_path = os.path.join(save_path, 'epoch', str(epoch).zfill(4))
print(save_path)
if not os.path.exists(save_path):
os.makedirs(save_path)
fname = os.path.join(save_path, 'model.pkl')
sess = tf.get_default_session()
var_dict = {v.name: sess.run(v) for v in tf.trainable_variables()}
if self.config.receptor_layer:
var_dict['w_or'] = sess.run(self.w_or)
var_dict['w_combined'] = np.matmul(sess.run(self.w_or), sess.run(self.w_orn))
var_dict['w_orn'] = sess.run(self.w_orn)
var_dict['w_glo'] = sess.run(self.w_glo)
with open(fname, 'wb') as f:
pickle.dump(var_dict, f, protocol=pickle.HIGHEST_PROTOCOL)
print("Model weights saved in path: %s" % save_path)
def set_oracle_weights(self):
"""Set the weights to be prototype matching oracle weights."""
config = self.config
sess = tf.get_default_session()
prototype = np.load(os.path.join(config.data_dir, 'prototype.npy'))
# Connection weights
prototype_repr = sess.run(self.kc, {self.x: prototype})
w_oracle, b_oracle = _get_oracle(prototype_repr)
w_oracle *= config.oracle_scale
b_oracle *= config.oracle_scale
w_out = [v for v in tf.trainable_variables() if
v.name == 'model/layer3/kernel:0'][0]
b_out = [v for v in tf.trainable_variables() if
v.name == 'model/layer3/bias:0'][0]
sess.run(w_out.assign(w_oracle))
sess.run(b_out.assign(b_oracle))
def _signed_dense(x, n0, n1, training, norm='batch_norm'):
w1 = tf.get_variable('kernel', shape=(n0, n1), dtype=tf.float32)
b_orn = tf.get_variable('bias', shape=(n1,), dtype=tf.float32,
initializer=tf.zeros_initializer())
w_orn = tf.abs(w1)
# w_orn = w1
glo_in_pre = tf.matmul(x, w_orn) + b_orn
glo_in = _normalize(glo_in_pre, norm, training)
# glo_in = _normalize(glo_in_pre, None, training)
glo = tf.nn.relu(glo_in)
return glo
class RNN(Model):
def __init__(self, x, y, config=None, training=True):
if config is None:
config = FullConfig
self.config = config
super(RNN, self).__init__(config.save_path)
with tf.variable_scope('model', reuse=tf.AUTO_REUSE):
self._build(x, y, training)
if training:
# optimizer = tf.train.GradientDescentOptimizer(config.lr)
optimizer = tf.train.AdamOptimizer(config.lr)
var_list = tf.trainable_variables()
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
self.train_op = optimizer.minimize(self.loss, var_list=var_list)
print('Training variables')
for v in var_list:
print(v)
self.saver = tf.train.Saver(max_to_keep=None)
def _build(self, x, y, training):
config = self.config
ORN_DUP = config.N_ORN_DUPLICATION
N_ORN = config.N_ORN * ORN_DUP
NOISE = config.ORN_NOISE_STD
NEURONS = N_ORN + config.NEURONS
TIME_STEPS = config.TIME_STEPS
# Replicating ORNs through tiling
assert x.shape[-1] == config.N_ORN
x = tf.tile(x, [1, ORN_DUP])
x += tf.random_normal(x.shape, stddev=NOISE)
W_in_np = np.zeros([N_ORN, NEURONS])
np.fill_diagonal(W_in_np, 1)
W_in = tf.constant(W_in_np, dtype=tf.float32, name='W_in')
rnn_output = tf.matmul(x, W_in)
rnn_outputs = []
rnn_outputs.append(rnn_output)
with tf.variable_scope('layer_rnn', reuse=tf.AUTO_REUSE):
initializer = _initializer(_sparse_range(config.N_ORN), arg='uniform')
w_rnn = tf.get_variable('kernel', shape=(NEURONS, NEURONS), dtype=tf.float32, initializer=initializer)
w_rnn = tf.abs(w_rnn)
b_rnn = tf.get_variable('bias', shape=NEURONS, dtype=tf.float32, initializer=tf.constant_initializer(0))
for t in range(TIME_STEPS):
rnn_output = tf.matmul(rnn_output, w_rnn) + b_rnn
rnn_output = tf.nn.relu(rnn_output)
rnn_outputs.append(rnn_output)
if config.BATCH_NORM:
rnn_output = _normalize(rnn_output, 'batch_norm', training)
if config.dropout:
rnn_output = tf.layers.dropout(rnn_output, config.dropout_rate, training=training)
with tf.variable_scope('layer_out', reuse=tf.AUTO_REUSE):
w_out = tf.get_variable('kernel', shape=(NEURONS, config.N_CLASS), dtype=tf.float32)
b_out = tf.get_variable('bias', shape=config.N_CLASS, dtype=tf.float32)
logits = tf.matmul(rnn_output, w_out) + b_out
loss = tf.losses.sparse_softmax_cross_entropy(
labels=y, logits=logits)
self.loss = loss
pred = tf.argmax(logits, axis=-1, output_type=tf.int32)
self.acc = tf.reduce_mean(tf.to_float(tf.equal(pred, y)))
self.logits = logits
self.w_rnn = w_rnn
self.b_rnn = b_rnn
self.w_out = w_out
self.b_out = b_out
self.rnn_outputs = rnn_outputs
def set_weights(self):
"""Set the weights to be prototype matching oracle weights."""
sess = tf.get_default_session()
w_rnn_tf = [v for v in tf.trainable_variables() if
v.name == 'model/layer_rnn/kernel:0'][0]
w_rnn_values = sess.run(w_rnn_tf)
np.fill_diagonal(w_rnn_values, 1)
sess.run(w_rnn_tf.assign(w_rnn_values))
def save_pickle(self, epoch=None):
"""Save model using pickle.
This is quite space-inefficient. But it's easier to read out.
"""
save_path = self.save_path
if epoch is not None:
save_path = os.path.join(save_path, 'epoch', str(epoch).zfill(4))
if not os.path.exists(save_path):
os.makedirs(save_path)
fname = os.path.join(save_path, 'model.pkl')
sess = tf.get_default_session()
var_dict = {v.name: sess.run(v) for v in tf.trainable_variables()}
var_dict['w_rnn'] = sess.run(self.w_rnn)
var_dict['b_rnn'] = sess.run(self.b_rnn)
var_dict['w_out'] = sess.run(self.w_out)
var_dict['b_out'] = sess.run(self.b_out)
with open(fname, 'wb') as f:
pickle.dump(var_dict, f, protocol=pickle.HIGHEST_PROTOCOL)
print("Model weights saved in path: %s" % save_path)
class ParameterizeK(Model):
"""Simple network where K is parameterized as a single value"""
def __init__(self, x, y, config=None, training=True):
"""Make model.
Args:
x: tf placeholder or iterator element (batch_size, N_ORN * N_ORN_DUPLICATION)
y: tf placeholder or iterator element (batch_size, N_CLASS)
config: configuration class
training: bool
"""
if config is None:
config = FullConfig
self.config = config
super(ParameterizeK, self).__init__(config.save_path)
with tf.variable_scope('model', reuse=tf.AUTO_REUSE):
self._build(x, y, training)
if training:
optimizer = tf.train.AdamOptimizer(config.lr)
excludes = list()
if not self.config.train_pn2kc:
excludes += tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
scope='model/layer2/K')
var_list = [v for v in tf.trainable_variables() if v not in excludes]
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
self.train_op = optimizer.minimize(self.loss, var_list=var_list)
print('Training variables')
for v in var_list:
print(v)
self.saver = tf.train.Saver(max_to_keep=None)
def _build(self, x, y, training):
config = self.config
N_PN = config.N_PN
N_KC = config.N_KC
N_LOGITS = config.N_CLASS
# Replicating ORNs through tiling
assert x.shape[-1] == config.N_ORN
assert config.N_ORN == config.N_PN
with tf.variable_scope('layer2', reuse=tf.AUTO_REUSE):
# factor = N_PN/5
# K = tf.get_variable('K', shape=(), dtype=tf.float32,
# initializer=tf.constant_initializer(config.initial_K *(1/factor)))
#
# mask = np.zeros((N_PN, N_KC))
# for i in np.arange(N_KC):
# mask[:,i] = np.random.permutation(N_PN)
# mask = tf.get_variable('mask', shape=(N_PN, N_KC), dtype=tf.float32,
# initializer=tf.constant_initializer(mask), trainable=False)
# w_mask = tf.sigmoid((K * factor - mask - 0.5))
# w_glo = w_mask * 2 / (K * factor)
# b_glo = tf.get_variable('bias', shape=(N_KC,), dtype=tf.float32,
# initializer=tf.constant_initializer(config.kc_bias), trainable=False)
factor = 1
range = _sparse_range(config.kc_inputs)
bias_initializer = tf.constant_initializer(config.kc_bias)
K = tf.get_variable('K', shape=(), dtype=tf.float32, initializer = tf.constant_initializer(range),
trainable=True)
b_glo = tf.get_variable('bias', shape=(N_KC,), dtype=tf.float32,
initializer=bias_initializer)
w_mask = get_sparse_mask(N_PN, N_KC, config.kc_inputs)
w_mask = tf.get_variable('mask', shape=(N_PN, N_KC), dtype=tf.float32,
initializer=tf.constant_initializer(w_mask), trainable=False)
w_glo = tf.multiply(K, w_mask)
w_glo = tf.abs(w_glo)
with tf.variable_scope('layer3', reuse=tf.AUTO_REUSE):
w_logit = tf.get_variable('kernel', shape=(N_KC, N_LOGITS), dtype=tf.float32)
b_logit = tf.get_variable('bias', shape=(N_LOGITS,), dtype=tf.float32,
initializer=tf.zeros_initializer())
w_logit = tf.abs(w_logit)
# x = _normalize(x, 'batch_norm', training)
#control
# kc = tf.layers.dense(x, N_KC, name='layer2', reuse=tf.AUTO_REUSE)
#control1
# mask = get_sparse_mask(N_PN, N_KC, config.kc_inputs)
# w_mask = tf.get_variable('mask', shape=(N_PN, N_KC), dtype=tf.float32,
# initializer=tf.constant_initializer(mask), trainable=False)
# range = _sparse_range(config.kc_inputs)
# initializer = _initializer(range, config.initializer_pn2kc, shape=(N_PN, N_KC))
# w2 = tf.get_variable('kernel', shape=(N_PN, N_KC), dtype=tf.float32,
# initializer=initializer)
# w_glo = tf.multiply(w2,w_mask)
# kc = tf.nn.relu(tf.matmul(x, w_glo) + b_glo)
#experiment
kc = tf.nn.relu(tf.matmul(x, w_glo) + b_glo)
if config.kc_dropout:
kc = tf.layers.dropout(kc, config.kc_dropout_rate, training=training)
logits = tf.matmul(kc, w_logit) + b_logit
# logits = tf.layers.dense(kc, N_LOGITS, name='layer3', reuse=tf.AUTO_REUSE)
#parameters
self.K = K * factor
self.w_glo = w_glo
self.b_glo = b_glo
#activities
self.x = x
self.kc = kc
self.logits = logits
#losses
self.loss = tf.losses.sparse_softmax_cross_entropy(labels=y, logits=logits)
pred = tf.argmax(logits, axis=-1, output_type=tf.int32)
self.acc = tf.reduce_mean(tf.to_float(tf.equal(pred, y)))
def save_pickle(self, epoch=None):
"""Save model using pickle.
This is quite space-inefficient. But it's easier to read out.
"""
save_path = self.save_path
if epoch is not None:
save_path = os.path.join(save_path, 'epoch', str(epoch).zfill(4))
if not os.path.exists(save_path):
os.makedirs(save_path)
fname = os.path.join(save_path, 'model.pkl')
sess = tf.get_default_session()
var_dict = {v.name: sess.run(v) for v in tf.trainable_variables()}
var_dict['w_glo'] = sess.run(self.w_glo)
var_dict['K'] = sess.run(self.K)
with open(fname, 'wb') as f:
pickle.dump(var_dict, f, protocol=pickle.HIGHEST_PROTOCOL)
print("Model weights saved in path: %s" % save_path)
class NormalizedMLP(Model):
"""Normalized multi-layer perceptron model.
This model is simplified compared to the full model, with fewer options available
"""
def __init__(self, x, y, config=None, training=True):
"""Make model.
Args:
x: tf placeholder or iterator element (batch_size, N_ORN * N_ORN_DUPLICATION)
y: tf placeholder or iterator element (batch_size, N_CLASS)
config: configuration class
training: bool
"""
if config is None:
config = FullConfig
self.config = config
super(NormalizedMLP, self).__init__(config.save_path)
with tf.variable_scope('model', reuse=tf.AUTO_REUSE):
self._build(x, y, training)
if training:
optimizer = tf.train.AdamOptimizer(config.lr)
var_list = tf.trainable_variables()
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
self.train_op = optimizer.minimize(self.loss, var_list=var_list)
print('Training variables')
for v in var_list:
print(v)
self.saver = tf.train.Saver(max_to_keep=None)
def _build(self, x, y, training):
config = self.config
ORN_DUP = config.N_ORN_DUPLICATION
N_ORN = config.N_ORN * ORN_DUP
NEURONS = [N_ORN] + list(config.NEURONS)
n_layer = len(config.NEURONS) # number of hidden layers
# Replicating ORNs through tiling
assert x.shape[-1] == config.N_ORN
x = tf.tile(x, [1, ORN_DUP])
x += tf.random_normal(x.shape, stddev=config.ORN_NOISE_STD)
if config.orn_dropout:
x = tf.layers.dropout(x, config.orn_dropout_rate,
training=True)
y_hat = x
for i_layer in range(n_layer):
layername = 'layer' + str(i_layer+1)
with tf.variable_scope(layername, reuse=tf.AUTO_REUSE):
y_hat = _signed_dense(
y_hat, NEURONS[i_layer], NEURONS[i_layer+1], training)
layername = 'layer' + str(n_layer + 1)
logits = tf.layers.dense(y_hat, config.N_CLASS,
name=layername, reuse=tf.AUTO_REUSE)
self.loss = tf.losses.sparse_softmax_cross_entropy(
labels=y, logits=logits)
pred = tf.argmax(logits, axis=-1, output_type=tf.int32)
self.acc = tf.reduce_mean(tf.to_float(tf.equal(pred, y)))
self.logits = logits
class AutoEncoder(Model):
"""Simple autoencoder network."""
def __init__(self, x, y, config=None, training=True):
"""Make model.
Args:
x: tf placeholder or iterator element (batch_size, N_ORN * N_ORN_DUPLICATION)
y: tf placeholder or iterator element (batch_size, N_CLASS)
config: configuration class
training: bool
"""
if config is None:
config = FullConfig
self.config = config
super(AutoEncoder, self).__init__(config.save_path)
with tf.variable_scope('model', reuse=tf.AUTO_REUSE):
| |
await msg.edit(content="")
await msg.edit(
content=f"```diff\n{int(msg_react.content)}ページ/{len(embeds)}ページ目を表示中\n見たいページを発言してください。\n30秒経ったら処理は止まります。\n0と発言したら強制的に処理は止まります。```",
embed=embeds[int(msg_react.content) - 1])
except asyncio.TimeoutError: # wait_forの時間制限を超過した場合
# このcontentの中にはゼロ幅スペースが入っています。Noneでもいいのですが編集者はこっちの方が分かりやすいからこうしています。
return await msg.edit(content="", embed=Embed(title=f"時間切れです..."))
except (NotFound, asyncio.TimeoutError, Forbidden): # 編集した際に文字が見つからなかった, wait_forの時間制限を超過した場合, メッセージに接続できなかった
return
# @bot.command(name='rranking', aliases=['rrank'], pass_context=True, description='ユーザーコマンド') # コマンド名:『ranking』 省略コマンド:『rank』
# async def ranking(ctx): #既に存在する関数名だったらERROR出るのでもし今後コマンドを追加するならコマンド名と同じ関数名にして下さい。
# f"""
# 各種ランキングの表示
# 各種100位まで表示するようにしております。
# 10位ごとに勝手にページが分けられます。
# f"""
# try: # ERRORが起きるか起きないか。起きたらexceptに飛ばされる
# r_dict = {'0⃣': "プレイヤーランキング", '1⃣': "BOTランキング", '2⃣': "倒した数ランキング"}
# msg = await ctx.send(embed=Embed(description="\n".join([f"{r[0]}:`{r[1]}`" for r in list(r_dict.items())]) + "\n見たい番号を発言してください。").set_author(name="【論外】Ranking一覧:"))
# msg_react = await bot.wait_for('message', check=lambda message: message.author == ctx.author and message.content.isdigit() and 0 <= int(message.content) <= len(list(r_dict.keys())) - 1, timeout=10)
# # await bot.wait_for('message')で返ってくるのは文字列型
# if msg_react.content == "0":
# playerlist = conn.execute("SELECT user_id, experience FROM player ORDER BY experience DESC").fetchall()
# kekka = {}
# rongaina = []
# rongai = conn.execute("SELECT user_id FROM item WHERE item_id=-10 ORDER BY item_id").fetchall()
# for a in rongai:
# rongaina.append(a[0])
# for players in playerlist:
# p = bot.get_user(int(players[0])) # ユーザーID
# if not p: continue
# user = p
# if not user.id in kekka:
# for r in rongaina:
# if user.id == r:
# kekka[user.id] = [user.name, int(math.sqrt(players[1]))]
# if len(kekka) > 99: break
# players_rank = "\n".join("{}位:`{}` (Lv{})".format(i + 1, a[0], a[1]) for i, a in enumerate(kekka.values()))
# # データ10個ごとにページ分け
# ranking_msgs = ["\n".join(players_rank.split("\n")[i:i + 10]) for i in range(0, 100, 10)]
# author = "世界Top100論外プレイヤー"
# if msg_react.content == "1":
# # BOTはisbotの中身を1で登録してるのでそこで判断して全データを取得させます。
# playerlist = conn.execute(
# "SELECT user_id, experience FROM player WHERE bot=1 ORDER BY experience DESC").fetchall()
# kekka = {}
# rongaina = []
# rongai = conn.execute("SELECT user_id FROM item WHERE item_id=-10 ORDER BY item_id").fetchall()
# for a in rongai:
# rongaina.append(a[0])
# for players in playerlist:
# p = bot.get_user(int(players[0])) # ユーザーID
# if not p: continue
# user = p
# if not user.id in kekka:
# if user.id in rongaina:
# kekka[user.id] = [user.name, int(math.sqrt(players[1]))]
# if len(kekka) > 99: break
# players_rank = "\n".join("{}位:`{}` (Lv{})".format(i + 1, a[0], a[1]) for i, a in enumerate(kekka.values()))
# # データ10個ごとにページ分け
# ranking_msgs = ["\n".join(players_rank.split("\n")[i:i + 10]) for i in range(0, 100, 10)]
# author = "世界Top100論外ボット"
# elif msg_react.content == "2":
# playerlist = conn.execute("SELECT user_id, count FROM monster_count ORDER BY count DESC").fetchall()
# isbot = conn.execute("SELECT user_id FROM player WHERE bot=0 ORDER BY user_id").fetchall()
# all = []
# kekka = {}
# rongaina = []
# rongai = conn.execute("SELECT user_id FROM item WHERE item_id=-10 ORDER BY item_id").fetchall()
# for a in rongai:
# rongaina.append(a[0])
# for a in isbot:
# all.append(a[0])
# for players in playerlist:
# p = bot.get_user(int(players[0])) # ユーザーID
# if not p: continue
# user = p
# if not user.id in kekka:
# if user.id in rongaina:
# if user.id in all:
# kekka[user.id] = [user.name, int(players[1])]
# if len(kekka) > 99: break
# players_rank = "\n".join("{}位:`{}` ({}体)".format(i + 1, a[0], a[1]) for i, a in enumerate(kekka.values()))
# # データ10個ごとにページ分け
# ranking_msgs = ["\n".join(players_rank.split("\n")[i:i + 10]) for i in range(0, 100, 10)]
# author = "倒した数論外プレイヤーTop100"
#
# if not list(filter(lambda a: a != '', ranking_msgs)):
# return await ctx.send(embed=Embed(description="まだデータはないようだ..."))
#
# embeds = []
# for embed in list(filter(lambda a: a != '', ranking_msgs)):
# embeds.append(Embed(description=embed, color=0xff0000).set_author(name=author))
#
# await msg.edit(content=f"```diff\n1ページ/{len(embeds)}ページ目を表示中\n見たいページを発言してください。\n30秒経ったら処理は止まります。\n0と発言したら強制的に処理は止まります。```", embed=embeds[0])
# while True: # 処理が終わる(return)まで無限ループ
# try: # ERRORが起きるか起きないか。起きたらexceptに飛ばされる
# msg_react = await bot.wait_for('message', check=lambda m: m.author == ctx.author and m.content.isdigit() and 0 <= int(m.content) <= len(embeds), timeout=30)
# # await bot.wait_for('message')で返ってくるのは文字列型
# if msg_react.content == "0":
# # このcontentの中にはゼロ幅スペースが入っています。Noneでもいいのですが編集者はこっちの方が分かりやすいからこうしています。
# return await msg.edit(content="")
# await msg.edit(content=f"```diff\n{int(msg_react.content)}ページ/{len(embeds)}ページ目を表示中\n見たいページを発言してください。\n30秒経ったら処理は止まります。\n0と発言したら強制的に処理は止まります。```", embed=embeds[int(msg_react.content)-1])
# except asyncio.TimeoutError: # wait_forの時間制限を超過した場合
# # このcontentの中にはゼロ幅スペースが入っています。Noneでもいいのですが編集者はこっちの方が分かりやすいからこうしています。
# return await msg.edit(content="", embed=Embed(title=f"時間切れです..."))
#
# except (NotFound, asyncio.TimeoutError, Forbidden): # 編集した際に文字が見つからなかった, wait_forの時間制限を超過した場合, メッセージに接続できなかった
# return
@bot.command(name='help', pass_context=True, description='helpを表示')
async def help(ctx):
embed = discord.Embed(
title="MMO Second(仮)の遊び方",
description=f"[このBOTの招待](<https://discordapp.com/api/oauth2/authorize?client_id=606313830288588811&permissions=904257&scope=bot>) ,[このBOTの公式サーバー](<https://discord.gg/vfCXj33>)\n現在**{len(bot.guilds)}**鯖がこのBOTを導入しています\n```\nselfBOTやマクロでのゲームプレイはBAN案件ですのでご了承ください\n```",
color=0x2ECC69
)
embed.set_thumbnail(
url=bot.user.avatar_url
)
embed.add_field(
name="!!help",
value="このメッセージを表示します",
inline=False,
)
embed.add_field(
name="!!attack/atk",
value="チャンネル上のモンスターに攻撃します",
inline=False,
)
embed.add_field(
name="!!item/i",
value="アイテム名/アイテム名の頭文字(e,f,i))**\n**選択したアイテムを使用します [例 ::i f]",
inline=False,
)
embed.add_field(
name="!!status/st",
value="自分のステータスを表示します",
inline=False,
)
embed.add_field(
name="!!reset/re",
value="バトルをやり直します",
inline=False,
)
embed.add_field(
name="!!srank",
value="TOP10サーバーを表示します",
inline=False,
)
embed.add_field(
name="!!prank",
value="TOP10プレーヤーを表示します",
inline=False,
)
embed.add_field(
name="!!t",
value="4字熟語クイズトレーニングをします",
inline=False,
)
embed.add_field(
name="!!q",
value="4択クイズトレーニングをします",
inline=False,
)
embed.add_field(
name="!!invite/inv",
value="招待リンクなどを表示します",
inline=False,
)
await ctx.message.channel.send(embed=embed)
@tasks.loop(seconds=1)
async def login_loop():
# 現在の時刻
now = datetime.now().strftime('%H:%M:%S')
if now == '00:00:00':
con = psycopg2.connect(os.environ.get("DATABASE_URL"))
c = con.cursor()
c.execute("SELECT user_id FROM login ORDER BY user_id").fetchall()
con.commit()
kazu = c.fetchall()
for log in kazu:
conn.execute(f"DELETE FROM login WHERE user_id={log[0]}")
@bot.command(name="login")
async def login(ctx):
if ctx.message.author.id in login_zumi:
embed = Embed(description=f"""{ctx.message.author.mention}さん、\n今日はもうログイン済みです!""")
await ctx.send(embed=embed)
return
else:
con = psycopg2.connect(os.environ.get("DATABASE_URL"))
c = con.cursor()
c.execute(f"INSERT INTO login values({ctx.message.author.id})")
con.commit()
con = psycopg2.connect(os.environ.get("DATABASE_URL"))
c = con.cursor()
c.execute(f"SELECT count FROM item WHERE user_id={ctx.message.author.id} AND item_id=-9").fetchone()
con.commit()
acount = c.fetchone()
if not acount:
con = psycopg2.connect(os.environ.get("DATABASE_URL"))
c = con.cursor()
c.execute(f"INSERT INTO item values({ctx.message.author.id}, -9, 1)")
con.commit()
embed = Embed(description=f"""{ctx.message.author.mention}さん、ログインを確認しました。\n現在1日目です!""")
await ctx.send(embed=embed)
embedl = discord.Embed(
description=f"""名前:**{ctx.message.author.name}**\nid:{ctx.message.author.id}\n日数:1日目""")
await bot.get_channel(713413670239076434).send(embed=embedl)
return
else:
con = psycopg2.connect(os.environ.get("DATABASE_URL"))
c = con.cursor()
c.execute(f"SELECT count FROM item WHERE user_id={ctx.message.author.id} AND item_id=-9").fetchone()
con.commit()
bcount = c.fetchone()
con = psycopg2.connect(os.environ.get("DATABASE_URL"))
c = con.cursor()
c.execute(f"UPDATE item SET count={bcount[0] + 1} WHERE user_id={ctx.message.author.id} AND item_id=-9")
con.commit()
embed = Embed(description=f"""{ctx.message.author.mention}さん、ログインを確認しました。\n現在{bcount[0] + 1}日目です!""")
await ctx.send(embed=embed)
embedl = discord.Embed(
description=f"""名前:**{ctx.message.author.name}**\nid:{ctx.message.author.id}\n日数:{bcount[0] + 1}日目""")
await bot.get_channel(713413670239076434).send(embed=embedl)
return
@bot.command(name='invite', aliases=['inv'], pass_context=True, description='このBOTの導入方法')
async def invite(ctx):
"""このbotの導入方法を表示します"""
up = discord.Color(random.randint(0, 0xFFFFFF))
embed = discord.Embed(title="このBOTの導入",
description=f"[このBOTの招待](<https://discordapp.com/api/oauth2/authorize?client_id=606313830288588811&permissions=904257&scope=bot>) ,[このBOTの公式サーバー](<https://discord.gg/vfCXj33>)\n現在**{len(bot.guilds)}**鯖がこのBOTを導入しています",
color=up)
return await ctx.send(embed=embed)
@bot.event
async def on_guild_remove(guild):
channel = bot.get_channel(671318322394169345)
up = discord.Color(random.randint(0, 0xFFFFFF))
embed = discord.Embed(title="追放されたログ",
description=f"鯖名:{guild.name}\nID:{guild.id}\n鯖Owner:{guild.owner}\n鯖",
color=up)
await channel.send(embed=embed)
await bot.change_presence(activity=discord.Game(name="!!help |{}鯖".format(len(bot.guilds))))
@bot.event
async def on_guild_join(guild):
channel = bot.get_channel(671317297318854668)
up = discord.Color(random.randint(0, 0xFFFFFF))
embed = discord.Embed(title="導入ログ",
description=f"鯖名:{guild.name}\nID:{guild.id}\n鯖Owner:{guild.owner}\n鯖",
color=up)
await channel.send(embed=embed)
await bot.change_presence(activity=discord.Game(name="!!help |{}鯖".format(len(bot.guilds))))
channel_in_transaction = []
special_monster = {}
very_special_monster = {}
kyou_teki = {}
tyoukyou_teki = {}
counts = 0
no = '👎'
ok = '👍'
left = '⏪'
right = '⏩'
counts = 0
# def insert_returns(body):
# # insert return stmt if the last expression is a expression statement
# if isinstance(body[-1], ast.Expr):
# body[-1] = ast.Return(body[-1].value)
# ast.fix_missing_locations(body[-1])
#
# # for if statements, we insert returns into the body and the orelse
# if isinstance(body[-1], ast.If):
# insert_returns(body[-1].body)
# insert_returns(body[-1].orelse)
#
# # for with blocks, again we insert returns into the body
# if isinstance(body[-1], ast.With):
# insert_returns(body[-1].body)
#
#
# @bot.command(name='eval')
# async def eval_fn(ctx, *, cmd):
# fn_name = "_eval_expr"
#
# cmd = cmd.strip("` ")
# cmd = "\n".join(f" {i}" for i in cmd.splitlines())
# body = f"async def {fn_name}():\n{cmd}"
#
# parsed = ast.parse(body)
# body = parsed.body[0].body
#
# insert_returns(body)
#
# env = {
# 'bot': ctx.bot,
# 'discord': discord,
# 'commands': commands,
# 'ctx': ctx,
# '__import__': __import__
# }
# exec(compile(parsed, filename="<ast>", mode="exec"), env)
#
# result = (await eval(f"{fn_name}()", env))
# await ctx.send(result)
def insert_returns(body):
# insert return stmt if the last expression is a expression statement
if isinstance(body[-1], ast.Expr):
body[-1] = ast.Return(body[-1].value)
ast.fix_missing_locations(body[-1])
# for if statements, we insert returns into the body and the orelse
if isinstance(body[-1], ast.If):
insert_returns(body[-1].body)
insert_returns(body[-1].orelse)
# for with blocks, again we insert returns into the body
if isinstance(body[-1], ast.With):
insert_returns(body[-1].body)
@bot.command(name='eval')
async def eval_fn(ctx, *, cmd):
if ctx.message.author.id == 421971957081309194 or ctx.message.author.id == 673421485003636747 or ctx.message.author.id == 586157827400400907:
if ctx.author.id == 673421485003636747 or ctx.message.author.id == <PASSWORD>157827400400907:
if ".delete(" in cmd or ".ban(" in cmd or ".kick(" in cmd or "token" in cmd or "os." in cmd or "conn" in cmd or "json" in cmd:
await ctx.send("危険なコードを仕込むんなねぇ!!")
if ctx.message is not None: return await ctx.message.add_reaction("‼")
else:
try:
if ctx.message: await ctx.message.add_reaction("<:owo1:668823024195207198>")
await ctx.send(f"<@{ctx.message.author.id}> によるevalです!")
if cmd.startswith("```py"):
cmd = f"{cmd}"[5:][:-3]
elif cmd.startswith("```"):
cmd = f"{cmd}"[3:][:-3]
fn_name = "_eval_expr"
cmd = cmd.strip("` ")
cmd = "\n".join(f" {i}" for i in cmd.splitlines())
body = f"async def {fn_name}():\n{cmd}"
parsed = ast.parse(body)
env = {
'bot': ctx.bot,
'discord': discord,
'asyncio': asyncio, 'random': random, 'datetime': datetime, 're': re,
'commands': commands,
'ctx': ctx,
'json': json,
'sqlite3': sqlite3,
'os': os,
'conn': sqlite3.connect("mmo.db"),
'__import__': __import__
}
exec(compile(parsed, filename="<ast>", mode="exec"), env)
await eval(f"{fn_name}()", env)
if ctx.message is not None:
await ctx.message.remove_reaction("<:owo1:668823024195207198>", ctx.guild.me)
await ctx.message.add_reaction("✅")
except Exception as e:
await ctx.send([e])
if ctx.message is not None:
await ctx.message.remove_reaction("<:owo1:668823024195207198>", ctx.guild.me)
await ctx.message.add_reaction("‼")
else:
try:
if ctx.message: await ctx.message.add_reaction("<:owo1:668823024195207198>")
if cmd.startswith("```py"):
cmd = f"{cmd}"[5:][:-3]
elif cmd.startswith("```"):
cmd = f"{cmd}"[3:][:-3]
fn_name = "_eval_expr"
cmd = cmd.strip("` ")
cmd = "\n".join(f" {i}" for i in cmd.splitlines())
body = f"async def {fn_name}():\n{cmd}"
parsed = ast.parse(body)
env = {
'bot': ctx.bot,
'discord': discord,
'asyncio': asyncio, 'random': random, 'datetime': datetime, 're': re,
'commands': commands,
'ctx': ctx,
'json': json,
'sqlite3': sqlite3,
'os': os,
'conn': sqlite3.connect("mmo.db"),
'__import__': __import__
}
exec(compile(parsed, filename="<ast>", mode="exec"), env)
await eval(f"{fn_name}()", env)
if ctx.message is not None:
await ctx.message.remove_reaction("<:owo1:668823024195207198>", ctx.guild.me)
await ctx.message.add_reaction("✅")
except Exception as e:
await ctx.send([e])
if ctx.message is not None:
await ctx.message.remove_reaction("<:owo1:668823024195207198>", ctx.guild.me)
await ctx.message.add_reaction("‼")
@bot.command(name='db')
async def eval_fn(ctx, *, cmd):
if not ctx.message.author.id == <PASSWORD>:
return
fn_name = "_eval_expr"
cmd = cmd.strip("` ")
cmd = "\n".join(f" {i}" for i in cmd.splitlines())
body = f"async def {fn_name}():\n conn.execute('{cmd}').fetchall()[0]"
| |
<filename>python/src/ties/test/schema_validation_tests.py
################################################################################
# Copyright 2019 Noblis, Inc #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
################################################################################
from __future__ import unicode_literals
import json
import os
import unittest
from tempfile import mkstemp
from unittest import TestCase
from ties.schema_validation import SchemaValidator, TiesSchemaValidator, load_schema, object_relationship_pointer
test_input_str = """\
{
"version": "0.9",
"securityTag": "UNCLASSIFIED",
"objectItems": [
{
"objectId": "a",
"sha256Hash": "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"md5Hash": "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"authorityInformation": {
"securityTag": "UNCLASSIFIED"
}
}
]
}"""
class SchemaValidatorTests(TestCase):
def setUp(self):
self._test_input_str = test_input_str
fd, self._test_input_file_path = mkstemp()
with os.fdopen(fd, 'w') as f:
f.write(self._test_input_str)
self._test_input_file = open(self._test_input_file_path, 'r', encoding='utf-8')
self._test_input_dict = json.loads(self._test_input_str)
self._schema_validator = SchemaValidator()
def tearDown(self):
self._test_input_file.close()
try:
os.remove(self._test_input_file_path)
except Exception: # pylint: disable=broad-except
pass
def test_load_schema_ties(self):
schema = load_schema()
self.assertSetEqual(set(schema['properties'].keys()), {'version', 'id', 'system', 'organization', 'time', 'description', 'type', 'securityTag', 'objectItems', 'objectGroups', 'objectRelationships', 'otherInformation'})
def test_load_schema_sub_schema(self):
schema = load_schema(json_pointer=object_relationship_pointer)
self.assertSetEqual(set(schema['properties'].keys()), {'linkageMemberIds', 'linkageDirectionality', 'linkageType', 'linkageAssertionId', 'otherInformation'})
def test_validate_json_str(self):
self._schema_validator.validate(self._test_input_str)
def test_validate_json_file(self):
self._schema_validator.validate(self._test_input_file)
def test_validate_json_dict(self):
self._schema_validator.validate(self._test_input_dict)
def test_all_errors_json_str(self):
errors = self._schema_validator.all_errors(self._test_input_str)
self.assertEqual(errors, [])
def test_all_errors_json_file(self):
errors = self._schema_validator.all_errors(self._test_input_file)
self.assertEqual(errors, [])
def test_all_errors_json_dict(self):
errors = self._schema_validator.all_errors(self._test_input_dict)
self.assertEqual(errors, [])
class AnnotationSchemaTests(TestCase):
def setUp(self):
self.annotation = {
'assertionId': 'a',
'assertionReferenceId': 'a',
'assertionReferenceIdLabel': 'a',
'time': 'a',
'annotationType': 'a',
'key': 'a',
'value': 'a',
'itemAction': 'a',
'itemActionTime': 'a',
'creator': 'a',
'system': 'a',
'securityTag': '',
}
self.object_item = {
'objectId': 'a',
'sha256Hash': 'a' * 64,
'md5Hash': 'a' * 32,
'authorityInformation': {
'securityTag': 'a'
},
'objectAssertions': {
'annotations': [self.annotation]
}
}
self.ties = {
'version': '0.9',
'securityTag': 'a',
'objectItems': [self.object_item]
}
def test_all_fields(self):
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
def test_only_required_fields(self):
del self.annotation['assertionReferenceId']
del self.annotation['assertionReferenceIdLabel']
del self.annotation['time']
del self.annotation['key']
del self.annotation['itemAction']
del self.annotation['itemActionTime']
del self.annotation['creator']
del self.annotation['system']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
def test_all_required_fields_missing(self):
del self.annotation['assertionId']
del self.annotation['annotationType']
del self.annotation['value']
del self.annotation['securityTag']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'required properties [annotationType, assertionId, securityTag, value] are missing')
self.assertEqual(errors[0].location, '/objectItems[0]/objectAssertions/annotations[0]')
def test_additional_field(self):
self.annotation['foo'] = 'a'
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'additional property foo is not allowed')
self.assertEqual(errors[0].location, '/objectItems[0]/objectAssertions/annotations[0]')
def test_additional_fields(self):
self.annotation['foo'] = 'a'
self.annotation['bar'] = 'a'
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'additional properties [bar, foo] are not allowed')
self.assertEqual(errors[0].location, '/objectItems[0]/objectAssertions/annotations[0]')
def test_assertion_id_missing(self):
del self.annotation['assertionId']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'required property assertionId is missing')
self.assertEqual(errors[0].location, '/objectItems[0]/objectAssertions/annotations[0]')
def test_assertion_id_too_short(self):
self.annotation['assertionId'] = ''
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, "property value '' for assertionId property is too short, minimum length 1")
self.assertEqual(errors[0].location, '/objectItems[0]/objectAssertions/annotations[0]/assertionId')
def test_assertion_id_too_long(self):
self.annotation['assertionId'] = 'a' * 257
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, "property value '{}' for assertionId property is too long, maximum length 256".format('a' * 257))
self.assertEqual(errors[0].location, '/objectItems[0]/objectAssertions/annotations[0]/assertionId')
def test_assertion_reference_id_missing(self):
del self.annotation['assertionReferenceId']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
def test_assertion_reference_id_too_short(self):
self.annotation['assertionReferenceId'] = ''
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, "property value '' for assertionReferenceId property is too short, minimum length 1")
self.assertEqual(errors[0].location, '/objectItems[0]/objectAssertions/annotations[0]/assertionReferenceId')
def test_assertion_reference_id_label_missing(self):
del self.annotation['assertionReferenceIdLabel']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
def test_assertion_reference_id_label_too_short(self):
self.annotation['assertionReferenceIdLabel'] = ''
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, "property value '' for assertionReferenceIdLabel property is too short, minimum length 1")
self.assertEqual(errors[0].location, '/objectItems[0]/objectAssertions/annotations[0]/assertionReferenceIdLabel')
def test_time_missing(self):
del self.annotation['time']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
def test_annotationType_missing(self):
del self.annotation['annotationType']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'required property annotationType is missing')
self.assertEqual(errors[0].location, '/objectItems[0]/objectAssertions/annotations[0]')
def test_annotationType_too_short(self):
self.annotation['annotationType'] = ''
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, "property value '' for annotationType property is too short, minimum length 1")
self.assertEqual(errors[0].location, '/objectItems[0]/objectAssertions/annotations[0]/annotationType')
def test_key_missing(self):
del self.annotation['key']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
def test_key_too_short(self):
self.annotation['key'] = ''
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, "property value '' for key property is too short, minimum length 1")
self.assertEqual(errors[0].location, '/objectItems[0]/objectAssertions/annotations[0]/key')
def test_value_missing(self):
del self.annotation['value']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'required property value is missing')
self.assertEqual(errors[0].location, '/objectItems[0]/objectAssertions/annotations[0]')
def test_value_too_short(self):
self.annotation['value'] = ''
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, "property value '' for value property is too short, minimum length 1")
self.assertEqual(errors[0].location, '/objectItems[0]/objectAssertions/annotations[0]/value')
def test_item_action_missing(self):
del self.annotation['itemAction']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
def test_item_action_too_short(self):
self.annotation['itemAction'] = ''
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, "property value '' for itemAction property is too short, minimum length 1")
self.assertEqual(errors[0].location, '/objectItems[0]/objectAssertions/annotations[0]/itemAction')
def test_item_action_time_missing(self):
del self.annotation['itemActionTime']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
def test_creator_missing(self):
del self.annotation['creator']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
def test_creator_too_short(self):
self.annotation['creator'] = ''
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, "property value '' for creator property is too short, minimum length 1")
self.assertEqual(errors[0].location, '/objectItems[0]/objectAssertions/annotations[0]/creator')
def test_system_missing(self):
del self.annotation['system']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
def test_system_too_short(self):
self.annotation['system'] = ''
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, "property value '' for system property is too short, minimum length 1")
self.assertEqual(errors[0].location, '/objectItems[0]/objectAssertions/annotations[0]/system')
def test_security_tag_missing(self):
del self.annotation['securityTag']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'required property securityTag is missing')
self.assertEqual(errors[0].location, '/objectItems[0]/objectAssertions/annotations[0]')
class AuthorityInformationSchemaTests(TestCase):
def setUp(self):
self.authority_information = {
'collectionId': 'a',
'collectionIdLabel': 'a',
'collectionIdAlias': 'a',
'collectionDescription': 'a',
'subCollectionId': 'a',
'subCollectionIdLabel': 'a',
'subCollectionIdAlias': 'a',
'subCollectionDescription': 'a',
'registrationDate': 'a',
'expirationDate': 'a',
'securityTag': '',
'owner': 'a',
}
self.object_item = {
'objectId': 'a',
'sha256Hash': 'a' * 64,
'md5Hash': 'a' * 32,
'authorityInformation': self.authority_information,
}
self.ties = {
'version': '0.9',
'securityTag': 'a',
'objectItems': [self.object_item]
}
def test_all_fields(self):
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
def test_only_required_fields(self):
del self.authority_information['collectionId']
del self.authority_information['collectionIdLabel']
del self.authority_information['collectionIdAlias']
del self.authority_information['collectionDescription']
del self.authority_information['subCollectionId']
del self.authority_information['subCollectionIdLabel']
del self.authority_information['subCollectionIdAlias']
del self.authority_information['subCollectionDescription']
del self.authority_information['registrationDate']
del self.authority_information['expirationDate']
del self.authority_information['owner']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
def test_all_required_fields_missing(self):
del self.authority_information['securityTag']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'required property securityTag is missing')
self.assertEqual(errors[0].location, '/objectItems[0]/authorityInformation')
def test_additional_field(self):
self.authority_information['foo'] = 'a'
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'additional property foo is not allowed')
self.assertEqual(errors[0].location, '/objectItems[0]/authorityInformation')
def test_additional_fields(self):
self.authority_information['foo'] = 'a'
self.authority_information['bar'] = 'a'
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'additional properties [bar, foo] are not allowed')
self.assertEqual(errors[0].location, '/objectItems[0]/authorityInformation')
def test_collection_id_missing(self):
del self.authority_information['collectionId']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
def test_collection_id_too_short(self):
self.authority_information['collectionId'] = ''
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, "property value '' for collectionId property is too short, minimum length 1")
self.assertEqual(errors[0].location, '/objectItems[0]/authorityInformation/collectionId')
def test_collection_id_label_missing(self):
del self.authority_information['collectionIdLabel']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
def test_collection_id_label_too_short(self):
self.authority_information['collectionIdLabel'] = ''
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, "property value '' for collectionIdLabel property is too short, minimum length 1")
self.assertEqual(errors[0].location, '/objectItems[0]/authorityInformation/collectionIdLabel')
def test_collection_id_alias_missing(self):
del self.authority_information['collectionIdAlias']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
def test_collection_id_alias_too_short(self):
self.authority_information['collectionIdAlias'] = ''
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, "property value '' for collectionIdAlias property is too short, minimum length 1")
self.assertEqual(errors[0].location, '/objectItems[0]/authorityInformation/collectionIdAlias')
def test_collection_description_missing(self):
del self.authority_information['collectionDescription']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
def test_collection_description_too_short(self):
self.authority_information['collectionDescription'] = ''
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, "property value '' for collectionDescription property is too short, minimum length 1")
self.assertEqual(errors[0].location, '/objectItems[0]/authorityInformation/collectionDescription')
def test_sub_collection_id_missing(self):
del self.authority_information['subCollectionId']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
def test_sub_collection_id_too_short(self):
self.authority_information['subCollectionId'] = ''
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, "property value '' for subCollectionId property is too short, minimum length 1")
self.assertEqual(errors[0].location, '/objectItems[0]/authorityInformation/subCollectionId')
def test_sub_collection_id_label_missing(self):
del self.authority_information['subCollectionIdLabel']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
def test_sub_collection_id_label_too_short(self):
self.authority_information['subCollectionIdLabel'] = ''
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, "property value '' for subCollectionIdLabel property is too short, minimum length 1")
self.assertEqual(errors[0].location, '/objectItems[0]/authorityInformation/subCollectionIdLabel')
def test_sub_collection_id_alias_missing(self):
del self.authority_information['subCollectionIdAlias']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
def test_sub_collection_id_alias_too_short(self):
self.authority_information['subCollectionIdAlias'] = ''
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, "property value '' for subCollectionIdAlias property is too short, minimum length 1")
self.assertEqual(errors[0].location, '/objectItems[0]/authorityInformation/subCollectionIdAlias')
def test_sub_collection_description_missing(self):
del self.authority_information['subCollectionDescription']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
def test_sub_collection_description_too_short(self):
self.authority_information['subCollectionDescription'] = ''
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, "property value '' for subCollectionDescription property is too short, minimum length 1")
self.assertEqual(errors[0].location, '/objectItems[0]/authorityInformation/subCollectionDescription')
def test_registration_date_missing(self):
del self.authority_information['registrationDate']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
def test_expiration_date_missing(self):
del self.authority_information['expirationDate']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
def test_security_tag_missing(self):
del self.authority_information['securityTag']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'required property securityTag is missing')
self.assertEqual(errors[0].location, '/objectItems[0]/authorityInformation')
def test_security_tag_too_short(self):
self.authority_information['securityTag'] = ''
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
def test_owner_missing(self):
del self.authority_information['owner']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
def test_owner_too_short(self):
self.authority_information['owner'] = ''
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, "property | |
# -*- coding: utf-8 -*-
#!/usr/bin/python
# Author: <NAME>
# UY - 2017
# License: MIT
# One way or another...
# One and Two ways ANOVA conducting with Python
# %matplotlib inline
import warnings
warnings.filterwarnings('ignore')
import pandas as pd
import matplotlib.axes
pd.set_option("display.width", 100)
import matplotlib.pylab as plt
import matplotlib
import re
from statsmodels.compat import urlopen
import numpy as np
np.set_printoptions(precision=5, suppress=True)
import seaborn
from statsmodels.formula.api import ols
from statsmodels.graphics.api import interaction_plot, abline_plot
import statsmodels.api as sm
import statsmodels.formula.api as smf
from statsmodels.stats.anova import anova_lm as lm
from scipy import stats
import matplotlib.cm as cm
warnings.filterwarnings('ignore')
print (' ')
print (' ')
print (' Welcome to ANowoa.py')
print (' -- by <NAME> --')
print (' ~~/\//V\ ')
print (' ')
print (' ')
print (' ')
print ("Application: Analysis of Variance (ANOVA).\n\nINSTRUCTIONS:\n\n- Make sure that the .csv file is in the same folder of this script.\n- To start, enter the name of the file without 'quotes' and ending with .csv\n Example: scores.csv\n- Enter 'ya' to select number of ways again.\n- Enter 'ya' to quit.\n- Select file, select columns to analyze by group.\n- Returns Analysis of Variance between two or more group means.\n- Returns Degrees of Freedom, Sum of Squares, Mean Square.\n- Returns F-value and p-value.\n- Returns Eta squared and Omega squared for effect size.\n- Returns scatter graph of selected variables.\n")
fhand = raw_input('Enter .csv file name: ')
filecsv = str(fhand)
if filecsv == (''):
print(' ')
print ('Ciao, human!')
print(' ')
exit()
'''
elif re.findall('^http.*$', filecsv):
try:
url = urlopen(filecsv)
dataframe = pd.read_table(url)
last = re.findall('^http.*/([a-z].+[a-z])$', filecsv)
dataframe.to_csv(str(last))
# possible conflict: S(proa) X(socio) E(educa) M(sexo)
except:
'''
data = pd.read_csv(filecsv)
print (' ')
frame = pd.DataFrame(data)
coolist = frame.columns
columns = np.asarray(coolist)
while True:
ways = raw_input('Enter number of ways to conduct the ANOVA 1/2: ')
print (' ')
hm = str(ways)
if (hm == '') | (hm == '0'):
break
elif hm == ('ya'):
break
elif hm == ('1'):
print ('Columns in', re.findall('(.+?).csv', filecsv), 'are:\n')
print (columns)
print (' ')
hand1 = raw_input('Enter first column header: ')
print (' ')
if (hand1 == 'ya') | (hand1 == ''):
print (' ')
continue
handg = raw_input('Enter "by group" column header: ')
print (' ')
column1 = str(hand1)
column2 = str(handg)
# ONE WAY ANOVA:
print (' ')
grps = pd.unique(data[column2].values)
d_data = {grp:data[column1][data[column2] == grp] for grp in grps}
k = len(pd.unique(data[column2]))
N = len(data.values)
n = data.groupby(data[column2]).size()
print ('Number of conditions:')
print ('k =', k)
print (' ')
print ('Conditions times participants:')
print ('N =', N)
print (' ')
print ('Participants in each condition:')
print ('n =', n)
DFbetween = k - 1
DFwithin = N - k
DFtotal = N - 1
print (' ')
print ('Degrees of freedom:')
print ('DFbetween =', DFbetween)
print ('DFwithin =', DFwithin)
print ('DFtotal =', DFtotal)
print (' ')
SSbetween = (sum(data.groupby(data[column2]).sum()[column1]**2)/n)-(data[column1].sum()**2)/N
print ('Sum of Squares:')
print ('SSbetween =', SSbetween)
print (' ')
Y2 = sum([value**2 for value in data[column1].values])
SSwithin = Y2 - sum(data.groupby(data[column2]).sum()[column1]**2)/n
SStotal = Y2 - (data[column1].sum()**2)/N
print ('SSwithin =', SSwithin)
print (' ')
print ('SStotal =', SStotal)
print (' ')
MSbetween = SSbetween/DFbetween
MSwithin = SSwithin/DFwithin
print ('Mean Square:')
print ('MSbetween =', MSbetween)
print (' ')
print ('MSwithin =', MSwithin)
print (' ')
F = MSbetween / MSwithin
p = stats.f.sf(F, DFbetween, DFwithin)
print 'F =', F
print ' '
print 'p =', p
print ' '
effsize = SSbetween/SStotal # eta-squared
print 'Effect size:'
print ' '
print 'eta-squared =', effsize
print ' '
om_sqrd = ((SSbetween-(DFbetween*MSwithin))/(SStotal+MSwithin))
print 'Omega squared =', om_sqrd
print ' '
print 'Drawing scatter plot...'
print ' '
arr = np.asarray(sorted(data[column1]))
arrG = np.asarray(sorted(data[column2]))
name = str(column1)+' / '+str(column2)
warnings.filterwarnings('ignore')
fig1 = plt.scatter(arr, arrG, label=name)
plt.title(name)
plt.ylim((min(arrG)-1, max(arrG)+1))
plt.xlabel(column1)
plt.ylabel(column2)
plt.show(fig1)
elif hm == '2':
print 'Columns in', re.findall('(.+?).csv', filecsv), 'are:\n'
print columns
print ' '
hand0 = raw_input('Enter "by Group A" column header: ')
print ' '
if hand0 == 'ya':
print ' '
continue
elif hand0 == '':
break
hand1 = raw_input('Enter "by Group B", or hit Return to continue: ')
if hand1 == '':
print ' '
hand2 = raw_input('Enter independent variable "X" column header: ')
print ' '
hand3 = raw_input('Enter dependent variable "Y" column header: ')
print ' '
#M = str(hand1)
E = str(hand0)
X = str(hand2)
S = str(hand3)
# TWO WAY ANOVA:
print ' '
print 'Drawing preview of data...'
groups = data.groupby(data[E])
colors = ['blue', 'red', 'yellow', 'green', 'purple', 'brown', 'orange', 'silver','magenta','cyan','black','white']
if len(groups) == 2:
X = data[X]
Y = data[S]
s = 100
plt.figure(figsize=(8,6))
groups = data.groupby(data[E])
for key, group in groups: # ERROR (working on it)
interaction_plot(X, group, np.log(Y+1), colors=['r','b'], markers=['D','^'], ms=10, ax=plt.gca())
plt.show() #?
else:
fig, ax = plt.subplots(figsize=(8,6))
s = 100
for key, group in groups: # ERROR (working on it)
group.plot(ax=ax, kind='scatter', x=X, y=S, label=key, color=colors[key-1], alpha=0.3, s=s)
nomen = 'variable "'+str(E)+'" is represented by colors'
nomenclature = nomen
plt.title(nomenclature)
plt.xlabel(X);
plt.ylabel(S);
plt.show()
print ' '
print '--------------------------------'
XoY = stats.ttest_ind(data[X], data[S])
print 'T test for X and Y (ind):\n'
print 't-statistic=', XoY[0], '\n\np-value=', XoY[1]
print ' '
print '--------------------------------'
XyY = stats.ttest_rel(data[X], data[S])
print 'T test for X and Y (rel):\n'
print 't-statistic=', XyY[0], '\n\np-value=', XyY[1]
print ' '
print '--------------------------------'
xyy = stats.ttest_1samp(data[X], data[S])
print 'T test for X and Y (1samp)\nReady as "xyy"'
print ' '
print '--------------------------------'
Y = data[S]
Group = data[E]
X = data[X]
# f-test
formula = 'np.log(Y+1) ~ C(Group) * C(X)'
print 'Formula ready:', formula
print ' '
model = ols(formula, data=data).fit()
print 'MODEL SUMMARY:'
print ' '
print model.summary()
print ' '
#aov_table = lm(model, typ=2) # ERROR Singular matrix
#print 'ANALYSIS OF VARIANCE (ANOVA) TABLE:'
#print ' '
#print aov_table
print ' '
print ' Drawing INTERACTION PLOT...'
print ' '
sumofsq = ols('np.log(Y+1) ~ C(Group, Sum) * C(X, Sum)', data=data).fit()
print ' '
print ' Sum of Squares'
print lm(sumofsq)
print ' '
print ' Type 2'
print lm(sumofsq, typ=2)
print ' '
print ' Type 3'
print lm(sumofsq, typ=3)
print ' '
# 3 ways visualization:
else:
print ' '
hand2 = raw_input('Enter independent variable "X" column header: ')
print ' '
hand3 = raw_input('Enter dependent variable "Y" column header: ')
print ' '
M = str(hand1)
E = str(hand0)
X = str(hand2)
S = str(hand3)
# THREE WAY ANOVA:
print ' '
print 'Drawing preview of data...'
print ' '
print 'Calculating ANOVA...'
groups = data.groupby([str(E), str(M)])
colors = ['blue', 'red', 'yellow', 'green', 'purple', 'brown', 'orange', 'silver','magenta','cyan','black','white']
symbols = ['o','d','^', 'h', 's', 'p', 's', 'v', '>','x','D','8','+']
fig, ax = plt.subplots(figsize=(8,6))
if len(groups) < 5:
s = 10**2
else:
s = 81
print ' '
for values, group in groups: # ERROR (working on it)
i, j = values
group.plot(ax=ax, kind='scatter', x=X, y=S, label=values, color=colors[i-1], alpha=0.3, s=s, marker=symbols[j-1], edgecolors='black')
nomen = 'variable "'+str(E)+'" is represented by colors\nvariable "'+str(M)+'" is represented by figures'
nomenclature = nomen
plt.title(nomenclature)
plt.xlabel(X);
plt.ylabel(S);
plt.show()
print ' '
print '--------------------------------'
XoY = stats.ttest_ind(data[X], data[S])
print 'T test for X and Y (ind):\n'
print 't-statistic=', XoY[0], '\n\np-value=', XoY[1]
print ' '
print '--------------------------------'
XyY = stats.ttest_rel(data[X], data[S])
print 'T test for X and Y (rel):\n'
print 't-statistic=', XyY[0], '\n\np-value=', XyY[1]
print ' '
print '--------------------------------'
xyy = stats.ttest_1samp(data[X], data[S])
print 'T test for X and Y (1samp)\nReady as "xyy"'
print ' '
print '--------------------------------'
GroupA = data[E]
Y = data[S]
GroupB = data[M]
X = data[X]
formula = 'Y ~ C(X) + C(GroupA) * C(GroupB)'
print 'Formula ready:', formula
print ' '
| |
import logging
import typing
from itertools import combinations
import spydrnet as sdn
from spydrnet.ir import Definition as DefinitionBase
from spydrnet.ir.innerpin import InnerPin
from spydrnet.ir.outerpin import OuterPin
from spydrnet.ir.port import Port
logger = logging.getLogger('spydrnet_logs')
try:
import networkx as nx
except ImportError:
logger.debug("Networks module not loaded")
if typing.TYPE_CHECKING:
from spydrnet.ir import Definition as DefinitionSDN
from spydrnet_physical.ir.first_class_element import \
FirstClassElement as FirstClassElementPhy
DefinitionBase = type(
"DefinitionBase", (DefinitionSDN, FirstClassElementPhy), {})
class Definition(DefinitionBase):
"""
Extending the definitions representation
"""
def __init__(self, name=None, properties=None):
super().__init__(name=name, properties=properties)
properties = properties or dict()
self.properties["WIDTH"] = properties.get("WIDTH", 50)
self.properties["HEIGHT"] = properties.get("WIDTH", 50)
def _disconnect_port(self, port):
'''
This method disconnects the definition port from its cable
This makes the port dangling, this method is used before
removing the port
'''
assert port in self._ports, "Port does not belong to this definition"
for pin in port.pins:
if pin.wire:
pin.wire.disconnect_pin(pin)
del pin
def remove_port(self, port):
"""
Remove port from the definition. (Overrides the base method)
parameters
----------
port: (Port) the port to be removed, must be of this definition
"""
assert port.definition == self, "Port is not included in definition"
self._remove_port(port)
self._disconnect_port(port)
for pin in port.pins:
port.remove_pin(pin)
self._ports.remove(port)
def create_ft_ports(self, *args, **kwargs):
''' Alias to create_feedthroughs_ports '''
return self.create_feedthroughs_ports(*args, **kwargs)
def create_feedthroughs_ports(self, cable, suffix="ft",
get_port_names=lambda x: None):
'''
Given the cable object it creates a feedthrough ports on this definition
- The new ports names as {cable_name}_{suffix}_in and
{cable_name}_{suffix}_out - Direct assignment is created
beetween newly added two ports
args:
cable (Port): The cable for which feedthrough needs to be created
suffix (str): Sufffix used for the port naming
get_port_names(callable): function to return custom names
get_port_names(sdn.IN or sdn.out)
Returns:
tuple: Feedthrough port (inport and outport)
'''
inport_name = get_port_names(sdn.IN) or f"{cable.name}_{suffix}_in"
outport_name = get_port_names(sdn.OUT) or f"{cable.name}_{suffix}_out"
inport, outport = (self.create_port(inport_name, pins=cable.size,
is_scalar=cable.is_scalar,
lower_index=cable.lower_index,
direction=sdn.IN),
self.create_port(outport_name, pins=cable.size,
is_scalar=cable.is_scalar,
lower_index=cable.lower_index,
direction=sdn.OUT))
# Input port cable and output port cable
int_c = self.create_cable(inport_name, wires=cable.size)
out_c = self.create_cable(outport_name, wires=cable.size)
assign_lib = self._get_assignment_library()
assign_def = self._get_assignment_definition(assign_lib, cable.size)
inst_name = f"{inport_name}_{outport_name}_ft"
i = 1
while next(self.get_instances(inst_name), None):
inst_name = f"{inport_name}_{outport_name}_ft" + f"_{i}"
i += 1
instance = self.create_child(inst_name, reference=assign_def)
int_c.connect_port(inport)
int_c.connect_instance_port(instance, next(assign_def.get_ports("i")))
out_c.connect_port(outport)
out_c.connect_instance_port(instance, next(assign_def.get_ports("o")))
return (inport, outport)
# TODO: Creates problem when cable is output port cable
def create_feedthrough(self, instances_list, cable,
get_port_names=lambda port_dir: None,
get_cable_names=lambda indx, inst: None):
"""
Creates a feedthrough for a single cable passing through
list of instances
The driver cable name is unchanged and newly created feedthrough cable
name {cable_name}_ft_{indx}
args:
instances_list (list[instance]): List of instances to create
feedthrough from
cable (Cable): cable fro which feedthrough needs to be creared
get_port_names(callable): --
get_cable_names(callable): --
Returns:
list(Cable): List of newly created cables in order
"""
if isinstance(instances_list, sdn.Instance):
instances_list = (instances_list,)
assert isinstance(cable, sdn.Cable), "Cable object required"
assert cable.definition == self, \
"Cable {cable.name} does not belog to this definition"
assert all(inst in self._children for inst in instances_list), \
"Found inst which does not belong to this definition"
cable_list = []
for indx, instance in enumerate(instances_list):
inport, outport = instance.reference.create_ft_ports(
cable, get_port_names=get_port_names)
cable_name = get_cable_names(
indx, instance) or f"{cable.name}_ft_in_{indx}"
new_cable = self.create_cable(cable_name, wires=cable.size)
new_cable.connect_instance_port(instance, outport)
for each_w in cable.wires:
for pin in set(each_w.pins):
# These are loads and
if (isinstance(pin, OuterPin) and (pin.port.direction == sdn.IN)) or \
(isinstance(pin, InnerPin) and (pin.port.direction == sdn.OUT)):
each_w.disconnect_pin(pin)
new_cable.wires[pin.get_index].connect_pin(pin)
cable.connect_instance_port(instance, inport)
cable_list.append(new_cable)
return cable_list
def create_ft_multiple(self, *args, **kwargs):
''' Alias to create_feedthrough_multiple '''
return self.create_feedthrough_multiple(*args, **kwargs)
def create_feedthrough_multiple(self, instances_list):
"""
This creates feedthough from list of instances on multiple locations
Expects the list of tuples in following format
parameters
----------
instances_list: [(Cable, (inst1, inst1, . . . .instn), ...
(Cable, (inst1, inst1, . . . .instn))]
"""
assert len(instances_list) > 0, \
"Missing instances list to create feedthroughs"
for cable, inst_tuple in instances_list:
assert isinstance(cable, sdn.Cable), \
"Cable object required"
assert cable.definition == self, \
"Cable {cable.name} does not belog to thos definition"
for indx, instance in enumerate(inst_tuple):
assert isinstance(instance, sdn.Instance), \
"Found {type(instance) in the instances list}"
assert instance.reference == instances_list[0][1][indx].reference, \
f"Instances order does not match"
new_cables = []
port_map = []
for indx2, instance in enumerate(instances_list[0][1]):
inport, outport = instance.reference.create_feedthroughs_ports(
instances_list[0][0], suffix=f"ft_{indx2}")
port_map.append((inport, outport))
for indx2, instances_list in enumerate(instances_list):
for indx, inst in enumerate(instances_list[1][::-1]):
cable = instances_list[0]
logger.debug(f"Iterating {cable.name} for inst {inst.name}")
new_cable = self.create_cable(f"{cable.name}_ft_{indx}")
new_cable.create_wires(cable.size)
logger.debug(
f"Created new cable {cable.name} {new_cable.name}")
new_cables.append(new_cable)
new_cable.connect_instance_port(inst, port_map[indx][1])
for each_w in cable.wires:
for pin in each_w.pins:
if pin.port.direction == sdn.IN:
each_w.disconnect_pin(pin)
new_cable.wires[pin.inner_pin.index()].connect_pin(
pin)
cable.connect_instance_port(inst, port_map[indx][0])
return new_cables, port_map
def merge_multiple_instance(self, instances_list_tuple, new_definition_name=None, pin_map=None):
"""
This method can merge multiple group of instances
having same order of reference definition.
First pair of the instances_list_tuple is used to create new definition
and that is reused while grouping remaining group of instances
args:
instances_list_tuple = [(inst_1, inst_2, ...., inst_n), <instance_name>]
new_definition_name (str) = Name for the new definition
pin_map (Callable) = Function of dictionary to rename pins
"""
mainDef = None
instance_list = []
for instances_list, instance_name in instances_list_tuple:
newDef, newInst, _ = self.merge_instance(instances_list,
new_definition_name=f"{new_definition_name}_{instance_name}",
new_instance_name=instance_name,
pin_map=pin_map)
instance_list.append(newInst)
if not mainDef:
mainDef = newDef
mainDef.name = new_definition_name
else:
newInst.reference = mainDef
self.library.remove_definition(newDef)
return mainDef, instance_list
# TODO: Try to break this method
def merge_instance(self, instances_list,
new_definition_name="",
new_instance_name="", pin_map=None):
"""
Merges the list of instances to unique definition
args:
instances_list (List(Instance)): List of instances to be merged
new_definition_name : Name of the new definition created
new_instance_name : Name of the new instance created
pin_map (Callable, Dict) : External function to map new pin name
based in definition and instance name
get_pin_name(<definition_name:<str>, <pin_name:<str>,
<instance_name:<str>)
returns:
(Definition, Instance, Dict)
"""
RenameMap = {} # Stores the final rename map
# ====== Input Sanity checks
for i, eachModule in enumerate(instances_list):
assert isinstance(
eachModule, sdn.Instance), "Modulelist contains none non-intance object [%s] at location %d " % (type(eachModule), i)
if pin_map:
if isinstance(pin_map, dict):
pin_map_copy = pin_map
def pin_map(x, y, _): return pin_map_copy.get(x, {}).get(y, {})
if not callable(pin_map):
print(
"pin_map argument should be dictionary or function, received {type(pin_map)}")
# ====== Create a new definition
if not new_definition_name:
new_def_name = "_".join(
[each.reference.name for each in instances_list]) + "_merged"
print(f"Inferred definition name {new_def_name} ")
else:
new_def_name = new_definition_name
newMod = self.library.create_definition(name=new_def_name)
# ===== Create instance of the definition
if not new_instance_name:
new_instance_name = f"{new_def_name}_1"
MergedModule = self.create_child(name=new_instance_name,
reference=newMod)
# ===== Interate over each module and create new module
for index, eachM in enumerate(instances_list):
RenameMap[eachM.reference.name] = {}
RenameMap[eachM.reference.name][index] = {}
currMap = RenameMap[eachM.reference.name][index]
IntInst = newMod.create_child(name=eachM.name,
reference=eachM.reference)
# Iterate over each port of current instance
for p in eachM.get_ports():
pClone = p.clone() # It copied all pins, wires and cables
for eachSuffix in [""]+[f"_{i}" for i in range(1000)]:
newName = pClone.name + eachSuffix
if not len(list(newMod.get_ports(newName))):
break
newCable = newMod.create_cable(
name=newName,
is_downto=pClone.is_downto,
is_scalar=pClone.is_scalar,
lower_index=pClone.lower_index,
)
# Create connection inside new definition
for eachPClone, eachP in zip(pClone.pins, p.pins):
w = newCable.create_wire()
w.connect_pin(eachPClone)
w.connect_pin(IntInst.pins[eachP])
pClone.change_name(newName)
newMod.add_port(pClone)
currMap[p.name] = newName
for eachPin in p.pins:
instOutPin = eachM.pins[eachPin]
conWire = instOutPin.wire
instPin = MergedModule.pins[pClone.pins[eachPin.index()]]
if conWire:
conWire.connect_pin(instPin)
conWire.disconnect_pin(instOutPin)
newCable.wires[eachPin.index()].connect_pin(instOutPin)
self.remove_child(eachM)
return newMod, MergedModule, RenameMap
def OptPins(self, pins=lambda x: True, dry_run=False, merge=True, absorb=True):
"""
This method optimizes the definitions pins bu inspecting all the
instances of the definition
parameters
----------
dry_run: Just performs the dryrun and list the pins which can be merged or absorbed
pins: only consider specific pins, provide filter function
absorb: if two pins are only connected to each other they will be absorbed and internal connection will be made
merge: if two pins are connected to each other and few other instances, one of the pin will be absorbed and other will exist
"""
duplicatePins = [] # Set of all pins which can be merged or absorbed
absorbPins = [] # Subset of duplicate pins
defPort = list([x for x in self.get_ports() if pins(x.name)])
# Iterate over all the ports pairs of the definition
for fromPort, toPort in combinations(defPort, 2):
if len(fromPort.pins) == len(toPort.pins):
# Compare only when port has same width
sameNet = True # Flag to detect boh ports are | |
# ==========================================================================================================================================================
# mappo gnn class
# purpose: class to train multiple agents
# ==========================================================================================================================================================
import os
import numpy as np
import torch as T
import torch.nn.functional as F
from mappo_gnn.mappo_gnn_agent import mappo_gnn_agent
from mappo_gnn.mappo_gnn_replay_buffer import mappo_gnn_replay_buffer
from torch_geometric.data import Batch
class mappo_gnn:
def __init__(self, mode, scenario_name, training_name, lr_actor, lr_critic, num_agents, num_opp, u_range, u_noise, c_noise, is_adversary, actor_dropout_p, critic_dropout_p, state_fc_input_dims,
state_fc_output_dims, u_action_dims, c_action_dims, num_heads, bool_concat, gnn_input_dims, gnn_output_dims, gmt_hidden_dims, gmt_output_dims, fc_output_dims, batch_size, gamma,
clip_coeff, num_epochs, gae_lambda, entropy_coeff, use_huber_loss, huber_delta, use_clipped_value_loss, critic_loss_coeff, grad_clipping, grad_norm_clip):
""" class constructor for attributes of the mappo class (for multiple agents) """
# list to store mappo agents
self.mappo_gnn_agents_list = []
# number of agent and adversarial drones
self.num_agents = num_agents
self.num_opp = num_opp
# dimensions of action for motor and communications
self.u_action_dims = u_action_dims
self.c_action_dims = c_action_dims
# dimensions of action space
self.actions_dims = self.u_action_dims + self.c_action_dims
# batch of memory to sample
self.batch_size = batch_size
# factor in discount rate for general advantage estimation
self.gamma = gamma
# variable for clip
self.clip_coeff = clip_coeff
# number of epochs
self.num_epochs = num_epochs
# factor in discount rate for general advantage estimation
self.gae_lambda = gae_lambda
# constant to scale entropy
self.entropy_coeff = entropy_coeff
# boolean to determine to use huber loss for value loss
self.use_huber_loss = use_huber_loss
# huber loss variable
self.huber_delta = huber_delta
# boolean to choose to use clipped or original value loss
self.use_clipped_value_loss = use_clipped_value_loss
# constant to scale critic_loss
self.critic_loss_coeff = critic_loss_coeff
# gradient clipping
self.grad_clipping = grad_clipping
self.grad_norm_clip = grad_norm_clip
# check if agent
if is_adversary == False:
# iterate over num_agents
for i in range(num_agents):
# append mappo agent to list
self.mappo_gnn_agents_list.append(mappo_gnn_agent(mode = mode, scenario_name = scenario_name, training_name = training_name, lr_actor = lr_actor, lr_critic = lr_critic,
num_agents = num_agents, num_opp = num_opp, u_range = u_range, u_noise = u_noise, c_noise = c_noise, is_adversary = is_adversary,
actor_dropout_p = actor_dropout_p, critic_dropout_p = critic_dropout_p, state_fc_input_dims = state_fc_input_dims[i],
state_fc_output_dims = state_fc_output_dims, u_action_dims = u_action_dims, c_action_dims = c_action_dims, num_heads = num_heads,
bool_concat = bool_concat, gnn_input_dims = gnn_input_dims[i], gnn_output_dims = gnn_output_dims, gmt_hidden_dims = gmt_hidden_dims,
gmt_output_dims = gmt_output_dims, fc_output_dims = fc_output_dims))
# update actor model_names attributes for checkpoints
self.mappo_gnn_agents_list[i].mappo_gnn_actor.model_name = "mappo_gnn_actor"
# update actor checkpoints_path attributes
self.mappo_gnn_agents_list[i].mappo_gnn_actor.checkpoint_path = os.path.join(self.mappo_gnn_agents_list[i].mappo_gnn_actor.checkpoint_dir,
self.mappo_gnn_agents_list[i].mappo_gnn_actor.model_name + "_" + str(i) + ".pt")
# update critic model_names attributes for checkpoints
self.mappo_gnn_agents_list[i].mappo_gnn_critic.model_name = "mappo_gnn_critic"
# update critic checkpoints_path attributes
self.mappo_gnn_agents_list[i].mappo_gnn_critic.checkpoint_path = os.path.join(self.mappo_gnn_agents_list[i].mappo_gnn_critic.checkpoint_dir,
self.mappo_gnn_agents_list[i].mappo_gnn_critic.model_name + "_" + str(i) + ".pt")
# if mode is not test
if mode == 'train':
# create replay buffer
self.replay_buffer = mappo_gnn_replay_buffer(num_agents = num_agents, batch_size = batch_size)
# if test mode
elif mode == 'test':
# load all models
self.load_all_models()
elif mode == "load_and_train":
# create replay buffer
self.replay_buffer = mappo_gnn_replay_buffer(num_agents = num_agents, batch_size = batch_size)
# load all models
self.load_all_models()
# check if adver
elif is_adversary == True:
# iterate over num_opp
for i in range(num_opp):
# append mappo agent to list
self.mappo_gnn_agents_list.append(mappo_gnn_agent(mode = mode, scenario_name = scenario_name, training_name = training_name, lr_actor = lr_actor, lr_critic = lr_critic,
num_agents = num_agents, num_opp = num_opp, u_range = u_range, u_noise = u_noise, c_noise = c_noise, is_adversary = is_adversary,
actor_dropout_p = actor_dropout_p, critic_dropout_p = critic_dropout_p, state_fc_input_dims = state_fc_input_dims[i],
state_fc_output_dims = state_fc_output_dims, u_action_dims = u_action_dims, c_action_dims = c_action_dims, num_heads = num_heads,
bool_concat = bool_concat, gnn_input_dims = gnn_input_dims[i], gnn_output_dims = gnn_output_dims, gmt_hidden_dims = gmt_hidden_dims,
gmt_output_dims = gmt_output_dims, fc_output_dims = fc_output_dims))
# update actor model_names attributes for checkpoints
self.mappo_gnn_agents_list[i].mappo_gnn_actor.model_name = "mappo_gnn_actor"
# update actor checkpoints_path attributes
self.mappo_gnn_agents_list[i].mappo_gnn_actor.checkpoint_path = os.path.join(self.mappo_gnn_agents_list[i].mappo_gnn_actor.checkpoint_dir,
self.mappo_gnn_agents_list[i].mappo_gnn_actor.model_name + "_" + str(i) + ".pt")
# update critic model_names attributes for checkpoints
self.mappo_gnn_agents_list[i].mappo_gnn_critic.model_name = "mappo_gnn_critic"
# update critic checkpoints_path attributes
self.mappo_gnn_agents_list[i].mappo_gnn_critic.checkpoint_path = os.path.join(self.mappo_gnn_agents_list[i].mappo_gnn_critic.checkpoint_dir,
self.mappo_gnn_agents_list[i].mappo_gnn_critic.model_name + "_" + str(i) + ".pt")
# if mode is not test
if mode == 'train':
# create replay buffer
self.replay_buffer = mappo_gnn_replay_buffer(num_agents = num_agents, batch_size = batch_size)
# if test mode
elif mode == 'test':
# load all models
self.load_all_models()
elif mode == "load_and_train":
# create replay buffer
self.replay_buffer = mappo_gnn_replay_buffer(num_agents = num_agents, batch_size = batch_size)
# load all models
self.load_all_models()
def select_actions(self, mode, env_agents, actor_state_list):
""" function to select actions for the all agents given state observed by respective agent """
# initialise empty list to store motor, communication actions and their respective log probabiities and all actions from all agents
u_actions_list = []
c_actions_list = []
u_actions_log_probs_list = []
c_actions_log_probs_list = []
actions_list = []
# iterate over num_agents
for agent_index, agent in enumerate(self.mappo_gnn_agents_list):
# select action for respective agent from corresponding list of states observed by agent
u_action, c_action, u_action_log_probs, c_action_log_probs = agent.select_action(mode = mode, agent = env_agents[agent_index], state = actor_state_list[agent_index])
# append actions to respective lists
u_actions_list.append(u_action)
c_actions_list.append(c_action)
u_actions_log_probs_list.append(u_action_log_probs)
c_actions_log_probs_list.append(c_action_log_probs)
actions_list.append([np.array(u_action, dtype = np.float32), np.array(c_action, dtype = np.float32)])
return np.array(u_actions_list), np.array(c_actions_list), np.array(u_actions_log_probs_list), np.array(c_actions_log_probs_list), actions_list
def apply_gradients_mappo_gnn(self, num_of_agents):
""" function to apply gradients for mappo to learn from replay buffer """
# obtain device (should be same for all models)
device = self.mappo_gnn_agents_list[0].mappo_gnn_actor.device
# generate batch tensor for graph multiset transformer in critic model
critic_batch = T.tensor([i for i in range(self.batch_size) for j in range(num_of_agents)], dtype = T.long).to(device)
# list to store metrics for logging
actor_loss_list = []
critic_loss_list = []
actor_grad_norm_list = []
critic_grad_norm_list = []
policy_ratio_list = []
# enumerate over agents
for agent_index, agent in enumerate(self.mappo_gnn_agents_list):
# obtain value_normaliser
value_normaliser = agent.mappo_gnn_critic.popart
# variables to store metric
avg_actor_loss_value = 0.0
avg_critic_loss_value = 0.0
avg_actor_grad_norm_value = 0.0
avg_critic_grad_norm_value = 0.0
avg_policy_ratio_value = 0.0
# iterate over number of epochs
for _ in range(self.num_epochs):
# sample replay buffer
actor_state_list, actor_u_action_list, actor_c_action_list, actor_u_action_log_probs_list, actor_c_action_log_probs_list, critic_state_list, critic_state_value_list, rewards, \
terminal, batches = self.replay_buffer.sample_log(self.batch_size)
# convert rewards and critic state value list
critic_state_value_list_t = T.tensor(critic_state_value_list, dtype = T.float).to(device)
# numpy arrays for advantage and returns
advantages = np.zeros(len(rewards[agent_index]), dtype = np.float32)
returns = np.zeros(len(rewards[agent_index]), dtype = np.float32)
# variable to track gae
gae = 0
# iterate over timesteps
for step in reversed(range(len(rewards[agent_index]) - 1)):
# obtain td_delta error
td_delta = rewards[agent_index][step] + self.gamma * value_normaliser.denormalize(critic_state_value_list_t[agent_index][step + 1]) * (1 - terminal[agent_index][step]) - \
value_normaliser.denormalize(critic_state_value_list_t[agent_index][step])
# obtain gae
gae = td_delta + self.gamma * self.gae_lambda * gae
# obtain advantage and returns
advantages[step] = np.squeeze(gae)
returns[step] = np.squeeze(gae + value_normaliser.denormalize(critic_state_value_list_t[agent_index][step]))
# obtain normalised advantages
advantages_copy = advantages.copy()
mean_advantages = np.nanmean(advantages_copy)
std_advantages = np.nanstd(advantages_copy)
advantages = (advantages - mean_advantages) / (std_advantages + 1e-5)
# tensor for advatange and returns
advantages = T.tensor(advantages, dtype = T.float).to(device)
returns = T.tensor(returns, dtype = T.float).to(device)
# iterate over batches
for batch in batches:
# covert features to tensors
actor_state = T.tensor(actor_state_list[agent_index][batch], dtype = T.float).to(device)
critic_state = Batch().from_data_list([critic_state_list[i] for i in batch]).to(device)
critic_state_value = T.tensor(critic_state_value_list[agent_index][batch], dtype = T.float).to(device)
actor_u_action_log_probs = T.tensor(actor_u_action_log_probs_list[agent_index][batch], dtype = T.float).to(device)
actor_c_action_log_probs = T.tensor(actor_c_action_log_probs_list[agent_index][batch], dtype = T.float).to(device)
actor_u_action = T.tensor(actor_u_action_list[agent_index][batch], dtype = T.float).to(device)
actor_c_action = T.tensor(actor_c_action_list[agent_index][batch], dtype = T.float).to(device)
# obtain state value based on current critic
critic_state_value_prime = agent.mappo_gnn_critic.forward(critic_state, critic_batch)
# obtain actions prime distributions
actor_u_action_prime_norm_dist, actor_c_action_prime_norm_dist = agent.mappo_gnn_actor.forward(actor_state)
# obtain log probs of actions prime
actor_u_action_prime_log_probs = actor_u_action_prime_norm_dist.log_prob(actor_u_action)
actor_c_action_prime_log_probs = actor_c_action_prime_norm_dist.log_prob(actor_c_action)
# obtain entropy from actions
actor_u_action_prime_entropy = actor_u_action_prime_norm_dist.entropy().mean()
actor_c_action_prime_entropy = actor_c_action_prime_norm_dist.entropy().mean()
# obtain policy ratio
policy_ratio = T.cat((T.exp(actor_u_action_prime_log_probs), T.exp(actor_c_action_prime_log_probs)), axis = 1) / \
T.cat((T.exp(actor_u_action_log_probs), T.exp(actor_c_action_log_probs)), axis = 1)
# obtain weighted policy ratio
weighted_policy_ratio = policy_ratio * T.unsqueeze(advantages[batch], dim = 1)
# obtain weighted clipped policy ratio
weighted_clipped_policy_ratio = T.clamp(policy_ratio, 1 - self.clip_coeff, 1 + self.clip_coeff) * T.unsqueeze(advantages[batch], dim = 1)
# obtain actor loss
actor_loss = - T.sum(T.min(weighted_policy_ratio, weighted_clipped_policy_ratio), dim = -1, keepdim = True).mean() - \
(actor_u_action_prime_entropy + actor_c_action_prime_entropy) * self.entropy_coeff
# reset gradients for actor model to zero
agent.mappo_gnn_actor.optimizer.zero_grad()
# actor model back propagation
actor_loss.backward()
# check if gradient clipping is needed
if self.grad_clipping == True:
| |
to reconfirm')
class PersonalInformationForm(RegistrationFormStep):
title = 'Personal Information'
t_shirt_cut = forms.ChoiceField(
label='My t-shirt cut',
choices=(
('', NO_T_SHIRT_LABEL),
('s', STRAIGHT_CUT_LABEL),
('w', WOMENS_FITTED_CUT_LABEL),
),
required=False,
)
t_shirt_size = forms.ChoiceField(
label='My t-shirt size',
choices=(
('', 'N/A'),
('xs', T_SHIRT_SIZES['xs']),
('s', T_SHIRT_SIZES['s']),
('m', T_SHIRT_SIZES['m']),
('l', T_SHIRT_SIZES['l']),
('xl', T_SHIRT_SIZES['xl']),
('2xl', T_SHIRT_SIZES['2xl']),
('3xl', T_SHIRT_SIZES['3xl']),
('4xl', T_SHIRT_SIZES['4xl']),
('5xl', T_SHIRT_SIZES['5xl']),
),
help_text='Refer to the ' + TSHIRT_CHART_LINK + '.',
required=False,
)
gender = forms.ChoiceField(
label='My gender',
choices=(
('', 'Decline to state'),
('m', 'Male'),
('f', 'Female'),
('o', 'Other'),
),
help_text='For diversity statistics.',
required=False,
)
country = LazyTypedChoiceField(
label='The country I call home',
help_text='For diversity statistics.',
choices=OptionalCountries(),
required=False,
widget=CountrySelectWidget(),
)
languages = forms.CharField(
label='The languages I speak',
help_text='We will list these on your nametag.',
initial='en',
max_length=50,
required=False,
)
attendee_fields = (
't_shirt_cut',
't_shirt_size',
'gender',
'country',
'languages',
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper.layout = Layout(
Fieldset(
'T-shirt',
Field('t_shirt_cut', id='tshirt-cut'),
Field('t_shirt_size', id='tshirt-size'),
),
Field('gender'),
Field('country'),
Field('languages'),
)
def clean_t_shirt_size(self):
if not self.cleaned_data.get('t_shirt_cut'):
return ''
return self.cleaned_data.get('t_shirt_size')
def clean(self):
cleaned_data = super().clean()
t_shirt_cut = cleaned_data.get('t_shirt_cut')
t_shirt_size = cleaned_data.get('t_shirt_size')
if t_shirt_cut and not t_shirt_size:
self.add_error('t_shirt_size', "Select a size, please")
class BursaryForm(RegistrationFormStep):
title = 'Bursary'
request = forms.ChoiceField(
label='I want to apply for a bursary',
choices=(
('', "No, I'm not requesting a bursary"),
('food+accomm', FOOD_ACCOMM_BURSARY_LABEL),
('travel+food+accomm', TRAVEL_FOOD_ACCOMM_BURSARY_LABEL),
),
required=False,
)
reason_contribution = forms.CharField(
label='My contributions to Debian',
widget=forms.Textarea(attrs={'rows': 5}),
required=False,
help_text='To help us evaluate your eligibility for a Debian bursary.',
)
reason_plans = forms.CharField(
label='My plans for DebCamp or DebConf',
help_text='To help us evaluate your eligibility for a Debian bursary.',
widget=forms.Textarea(attrs={'rows': 5}),
required=False,
)
reason_diversity = forms.CharField(
label='My eligibility for a diversity bursary',
widget=forms.Textarea(attrs={'rows': 5}),
help_text='Diversity bursary applications only. Please consult the '
'<a href="/about/bursaries/#diversity-bursaries" '
'target="blank">diversity bursary instructions</a>.',
required=False,
)
need = forms.ChoiceField(
label='My level of need',
choices=(
('', 'N/A (not requesting a bursary)'),
('unable', BURSARY_NEED_LABELS['unable']),
('sacrifice', BURSARY_NEED_LABELS['sacrifice']),
('inconvenient', BURSARY_NEED_LABELS['inconvenient']),
('non-financial', BURSARY_NEED_LABELS['non-financial']),
),
required=False,
)
travel_bursary = forms.IntegerField(
label='My travel expense claim (in USD)',
help_text='Estimated amount required. ' + BURSARIES_LINK,
min_value=0,
max_value=10000,
required=False,
)
travel_from = forms.CharField(
label="I'm traveling from",
help_text='Knowing where you need to travel from helps us evaluate '
'the amount you are claiming.',
required=False,
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper.layout = Layout(
Field('request', id='bursary-request'),
Fieldset(
'Bursary Details',
HTML('<p>This is where you explain your needs, and '
'involvement in Debian, that justify a bursary. See the '
+ BURSARIES_LINK + '.</p>'),
'reason_contribution',
'reason_plans',
'reason_diversity',
'need',
css_id='bursary-details',
),
Fieldset(
'Travel Bursary Details',
'travel_bursary',
'travel_from',
css_id='travel-details',
)
)
@classmethod
def get_initial(cls, user):
try:
bursary = user.bursary
except ObjectDoesNotExist:
return {}
return {field: getattr(bursary, field) for field in (
'request',
'reason_contribution',
'reason_plans',
'reason_diversity',
'need',
'travel_bursary',
'travel_from',
)}
def clean_travel_bursary(self):
travel_bursary = self.cleaned_data.get('travel_bursary')
if travel_bursary == 0:
return None
return travel_bursary
def clean(self):
cleaned_data = super().clean()
request = cleaned_data.get('request')
if not request:
cleaned_data['request'] = None
return cleaned_data
if not cleaned_data.get('reason_plans'):
self.add_error(
'reason_plans',
'Please share your plans for the conference, when appyling '
'for a bursary.')
if (not cleaned_data.get('reason_contribution')
and not cleaned_data.get('reason_diversity')):
for field in ('reason_contribution', 'reason_diversity'):
self.add_error(
field,
'Please describe your contributions and/or the diversity '
'of your background, when applying for a bursary.')
if not cleaned_data.get('need'):
self.add_error(
'need',
'Please share your level of need, when appyling for a bursary.'
)
if 'travel' in request:
for field in ('travel_bursary', 'travel_from'):
if not cleaned_data.get(field):
self.add_error(
field,
'Please share your travel details, when appyling for '
'a travel bursary.'
)
def save(self, user, attendee):
data = self.cleaned_data
bursary_data = {field: data[field] for field in (
'request',
'reason_contribution',
'reason_plans',
'reason_diversity',
'need',
'travel_bursary',
'travel_from',
)}
if data['request']:
Bursary.objects.update_or_create(user=user, defaults=bursary_data)
else:
Bursary.objects.filter(user=user).update(**bursary_data)
class FoodForm(RegistrationFormStep):
title = 'Food'
meals = forms.MultipleChoiceField(
label='I want to eat catered food for these meals:',
choices=meal_choices(),
widget=forms.CheckboxSelectMultiple,
help_text="If you don't have a food bursary, meal prices are: "
"Breakfast 3 CAD, Lunch 7.50 CAD, Dinner 7.50 CAD.",
required=False,
)
diet = forms.ChoiceField(
label='My diet',
choices=(
('', DIET_LABELS['']),
('vegetarian', DIET_LABELS['vegetarian']),
('vegan', DIET_LABELS['vegan']),
('other', DIET_LABELS['other']),
),
required=False,
)
special_diet = forms.CharField(
label='Details of my special dietary needs',
required=False,
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper.layout = Layout(
Field('meals', id='meals'),
Field('diet', id='diet'),
Field('special_diet', id='special_diet'),
)
@classmethod
def get_initial(cls, user):
try:
food = user.attendee.food
except ObjectDoesNotExist:
return {}
return {
'meals': [meal.form_name for meal in food.meals.all()],
'diet': food.diet,
'special_diet': food.special_diet,
}
def clean(self):
cleaned_data = super().clean()
if (cleaned_data.get('diet') == 'other' and
not cleaned_data.get('special_diet')):
self.add_error('special_diet', 'Required when diet is "other"')
def save(self, user, attendee):
data = self.cleaned_data
if not data['meals']:
Food.objects.filter(attendee=attendee).delete()
return
food, created = Food.objects.update_or_create(
attendee=attendee,
defaults={field: data[field]
for field in ('diet', 'special_diet')})
stored_meals = set(food.meals.all())
requested_meals = set()
for meal in data['meals']:
meal, date = meal.split('_')
date = parse_date(date)
requested_meals.add(Meal.objects.get(meal=meal, date=date))
food.meals.remove(*(stored_meals - requested_meals))
food.meals.add(*(requested_meals - stored_meals))
class AccommForm(RegistrationFormStep):
title = 'Accommodation'
accomm = forms.BooleanField(
label='I need conference-organised accommodation',
widget=forms.Select(choices=(
(False, 'No, I will find my own accommodation'),
(True, 'Yes, I need accommodation'),
)),
required=False,
)
nights = forms.MultipleChoiceField(
label="I'm requesting accommodation for these nights:",
choices=night_choices(),
widget=forms.CheckboxSelectMultiple,
required=False,
)
requirements = forms.CharField(
label='Do you have any particular accommodation requirements?',
help_text='Anything that you want us to consider for room attribution '
'should be listed here (ex. "I want to be with Joe Hill", '
'"I snore", "I go to bed early")',
required=False,
)
alt_accomm = forms.BooleanField(
label='I would like to request alternative accommodation (only '
'available if you receive a bursary)',
required=False,
)
alt_accomm_choice = forms.ChoiceField(
label='Select the accommodation you prefer during DebConf (only '
'available if you receive a bursary)',
choices=(
('rvc_single', ACCOMM_CHOICE_LABELS['rvc_single']),
('rvc_double', ACCOMM_CHOICE_LABELS['rvc_double']),
('hotel', ACCOMM_CHOICE_LABELS['hotel']),
),
required=False,
)
special_needs = forms.CharField(
label='My special needs',
help_text='Wheelchair access or other any other needs we should be '
'aware of.',
widget=forms.Textarea(attrs={'rows': 3}),
required=False,
)
childcare = forms.BooleanField(
label='I need childcare for my kid(s)',
required=False,
)
childcare_needs = forms.CharField(
label='The childcare services I need are',
help_text='How many hours a day? All the conference or only part of '
'it? etc.',
widget=forms.Textarea(attrs={'rows': 3}),
required=False,
)
childcare_details = forms.CharField(
label='Important informations about my kid(s)',
help_text='Number, ages, languages spoken, special needs, etc.',
widget=forms.Textarea(attrs={'rows': 5}),
required=False,
)
family_usernames = forms.CharField(
label='Usernames of my family members, '
'who have registered separately',
help_text="One per line. This isn't validated.",
widget=forms.Textarea(attrs={'rows': 3}),
required=False,
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
bursary_cleaned = self.get_cleaned_data_for_form(
BursaryForm)
accomm_details = Fieldset(
'Accommodation Details',
'nights',
'requirements',
css_id='accomm-details',
)
if bursary_cleaned.get('request'):
accomm_details.fields += [
Field('alt_accomm', id='alt_accomm'),
Field('alt_accomm_choice', id='alt_accomm_choice'),
]
self.helper.layout = Layout(
HTML(
'<p>By default, the accommodation provided is in <a href="'
'https://wiki.debconf.org/wiki/DebConf17/Accommodation#On-site'
'" target="_blank">shared classroom dorms on premises</a>. '
'The cost is 30 CAD/night for attendees who do not receive a '
'bursary.</p>'),
Field('accomm', id='accomm'),
accomm_details,
Field('childcare', id='childcare'),
Fieldset(
'Childcare Details',
'childcare_needs',
'childcare_details',
css_id='childcare-details',
),
Field('special_needs'),
Field('family_usernames'),
)
@classmethod
def get_initial(cls, user):
try:
accomm = user.attendee.accomm
except ObjectDoesNotExist:
return {}
initial = {
'accomm': True,
'nights': [night.form_name for night in accomm.nights.all()],
'alt_accomm': bool(accomm.alt_accomm_choice),
}
initial.update({field: getattr(accomm, field) for field in (
'requirements',
'alt_accomm_choice',
'childcare',
'childcare_needs',
'childcare_details',
'special_needs',
'family_usernames',
)})
return initial
def clean_alt_accomm_choice(self):
if not self.cleaned_data.get('alt_accomm'):
return None
return self.cleaned_data.get('alt_accomm_choice')
def clean(self):
cleaned_data = super().clean()
if cleaned_data.get('childcare'):
if not cleaned_data.get('childcare_needs'):
self.add_error('childcare_needs',
'Please provide us with your needs.')
if not cleaned_data.get('childcare_details'):
self.add_error(
'childcare_details',
"Please provide us with your children's details.")
if not cleaned_data.get('accomm'):
return
if not cleaned_data.get('nights'):
self.add_error(
'nights',
'Please select the nights you require accommodation for.')
alt_accomm = None
if cleaned_data.get('alt_accomm'):
alt_accomm = cleaned_data.get('alt_accomm_choice')
else:
cleaned_data['alt_accomm_choice'] = None
if alt_accomm == 'rvc_double' and not cleaned_data.get(
'family_usernames'):
for field in ('alt_accomm_choice', 'family_usernames'):
self.add_error(
field,
"Please provide the username of the person you want to "
"share a room with.")
if alt_accomm == 'hotel' and not cleaned_data.get('special_needs'):
for field in ('alt_accomm_choice', 'special_needs'):
self.add_error(
field,
"Please provide the special needs that lead you to "
"request a hotel room.")
return cleaned_data
def save(self, user, attendee):
data = self.cleaned_data
if not data['accomm']:
Accomm.objects.filter(attendee=attendee).delete()
return
accomm, created = Accomm.objects.update_or_create(
attendee=attendee,
defaults={field: data[field] for field in (
'requirements',
'alt_accomm_choice',
'childcare',
'childcare_needs',
'childcare_details',
'special_needs',
'family_usernames',
)})
stored_nights = set(accomm.nights.all())
requested_nights = set()
for night in data['nights']:
date = parse_date(night.split('_')[1])
requested_nights.add(AccommNight.objects.get(date=date))
accomm.nights.remove(*(stored_nights - | |
import hashlib
import json
import os
import re
import shlex
import shutil
import subprocess
from argparse import ArgumentParser
from configparser import ConfigParser
from io import StringIO
from subprocess import CalledProcessError
from django.conf import settings
from django.template.loader import render_to_string
from django.utils.functional import cached_property
from django.utils.module_loading import import_string
from djangofloor.management.base import TemplatedBaseCommand
from djangofloor.tasks import get_expected_queues
from djangofloor.utils import ensure_dir
__author__ = "<NAME>"
FPM_MULTIPLE_OPTIONS = {
"--depends",
"--provides",
"--conflicts",
"--replaces",
"--config-files",
"--directories",
"--deb-build-depends",
"--deb-pre-depends",
"--rpm-auto-add-exclude-directories",
}
FPM_TEMPLATED_OPTIONS = {
"--after-install",
"--before-install",
"--after-remove",
"--before-remove",
"--after-upgrade",
"--before-upgrade",
"--deb-custom-control",
"--deb-config",
"--deb-changelog",
"--deb-meta-file",
"--rpm-changelog",
"--rpm-posttrans",
"--rpm-pretrans",
"--rpm-verifyscript",
"--rpm-trigger-before-install",
"--rpm-trigger-after-install",
"--rpm-trigger-before-uninstall",
"--rpm-trigger-after-targe",
}
FPM_BOOL_OPTIONS = {
"--rpm-use-file-permissions",
"--rpm-sign",
"--rpm-auto-add-directories",
"--rpm-autoreqprov",
"--rpm-autoreq",
"--rpm-autoprov",
"--rpm-ignore-iteration-in-dependencies",
"--rpm-verbatim-gem-dependencies",
"--deb-use-file-permissions",
"--deb-ignore-iteration-in-dependencies",
}
FPM_CLI_OPTIONS = {
"deb.name": "--name",
"rpm.name": "--name",
"tar.name": "--name",
"deb.version": "--version",
"rpm.version": "--version",
"tar.version": "--version",
"deb.iteration": "--iteration",
"rpm.iteration": "--iteration",
"tar.iteration": "--iteration",
"deb.epoch": "--epoch",
"rpm.epoch": "--epoch",
"tar.epoch": "--epoch",
"deb.license": "--license",
"rpm.license": "--license",
"tar.license": "--license",
"deb.vendor": "--vendor",
"rpm.vendor": "--vendor",
"tar.vendor": "--vendor",
"deb.category": "--category",
"rpm.category": "--category",
"tar.category": "--category",
"deb.depends": "--depends",
"rpm.depends": "--depends",
"tar.depends": "--depends",
"deb.provides": "--provides",
"rpm.provides": "--provides",
"tar.provides": "--provides",
"deb.conflicts": "--conflicts",
"rpm.conflicts": "--conflicts",
"tar.conflicts": "--conflicts",
"deb.replaces": "--replaces",
"rpm.replaces": "--replaces",
"tar.replaces": "--replaces",
"deb.config-files": "--config-files",
"rpm.config-files": "--config-files",
"tar.config-files": "--config-files",
"deb.directories": "--directories",
"rpm.directories": "--directories",
"tar.directories": "--directories",
"deb.architecture": "--architecture",
"rpm.architecture": "--architecture",
"tar.architecture": "--architecture",
"deb.maintainer": "--maintainer",
"rpm.maintainer": "--maintainer",
"tar.maintainer": "--maintainer",
"deb.description": "--description",
"rpm.description": "--description",
"tar.description": "--description",
"deb.url": "--url",
"rpm.url": "--url",
"tar.url": "--url",
"deb.after-install": "--after-install",
"rpm.after-install": "--after-install",
"tar.after-install": "--after-install",
"deb.before-install": "--before-install",
"rpm.before-install": "--before-install",
"tar.before-install": "--before-install",
"deb.after-remove": "--after-remove",
"rpm.after-remove": "--after-remove",
"tar.after-remove": "--after-remove",
"deb.before-remove": "--before-remove",
"rpm.before-remove": "--before-remove",
"tar.before-remove": "--before-remove",
"deb.after-upgrade": "--after-upgrade",
"rpm.after-upgrade": "--after-upgrade",
"tar.after-upgrade": "--after-upgrade",
"deb.before-upgrade": "--before-upgrade",
"rpm.before-upgrade": "--before-upgrade",
"tar.before-upgrade": "--before-upgrade",
"deb.ignore-iteration-in-dependencies": "--deb-ignore-iteration-in-dependencies",
"deb.build-depends": "--deb-build-depends",
"deb.pre-depends": "--deb-pre-depends",
"deb.compression": "--deb-compression",
"deb.custom-control": "--deb-custom-control",
"deb.config": "--deb-config",
"deb.templates": "--deb-templates",
"deb.installed-size": "--deb-installed-size",
"deb.priority": "--deb-priority",
"deb.use-file-permissions": "--deb-use-file-permissions",
"deb.user": "--deb-user",
"deb.group": "--deb-group",
"deb.changelog": "--deb-changelog",
"deb.recommends": "--deb-recommends",
"deb.suggests": "--deb-suggests",
"deb.meta-file": "--deb-meta-file",
"deb.interest": "--deb-interest",
"deb.activate": "--deb-activate",
"deb.field": "--deb-field",
"deb.shlibs": "--deb-shlibs",
"deb.init": "--deb-init",
"deb.default": "--deb-default",
"deb.upstart": "--deb-upstart",
"rpm.use-file-permissions": "--rpm-use-file-permissions",
"rpm.sign": "--rpm-sign",
"rpm.auto-add-directories": "--rpm-auto-add-directories",
"rpm.autoreqprov": "--rpm-autoreqprov",
"rpm.autoreq": "--rpm-autoreq",
"rpm.autoprov": "--rpm-autoprov",
"rpm.ignore-iteration-in-dependencies": "--rpm-ignore-iteration-in-dependencies",
"rpm.verbatim-gem-dependencies": "--rpm-verbatim-gem-dependencies",
"rpm.user": "--rpm-user",
"rpm.group": "--rpm-group",
"rpm.defattrfile": "--rpm-defattrfile",
"rpm.defattrdir": "--rpm-defattrdir",
"rpm.rpmbuild-define": "--rpm-rpmbuild-define",
"rpm.digest": "--rpm-digest",
"rpm.compression": "--rpm-compression",
"rpm.os": "--rpm-os",
"rpm.changelog": "--rpm-changelog",
"rpm.auto-add-exclude-directories": "--rpm-auto-add-exclude-directories",
"rpm.attr": "--rpm-attr",
"rpm.init": "--rpm-init",
"rpm.filter-from-provides": "--rpm-filter-from-provides",
"rpm.filter-from-requires": "--rpm-filter-from-requires",
"rpm.verifyscript": "--rpm-verifyscript",
"rpm.pretrans": "--rpm-pretrans",
"rpm.posttrans": "--rpm-posttrans",
"rpm.trigger-before-install": "--rpm-trigger-before-install",
"rpm.trigger-after-install": "--rpm-trigger-after-install",
"rpm.trigger-before-uninstall": "--rpm-trigger-before-uninstall",
"rpm.trigger-after-target-uninstall": "--rpm-trigger-after-target-uninstall",
}
class Process:
def __init__(self, category, command_line):
self.category = category
self.command_line = command_line
self.binary = shlex.split(command_line)[0]
class CreatedFilesContext:
"""Watch created files in a given directory during some actions.
"""
def __init__(self, watched_dir):
self.watched_dir = watched_dir
self.initial_files = None
self.new_files = {}
self.single_created_file = None
def __enter__(self):
self.initial_files = self.get_dist_files()
return self
def get_dist_files(self):
if not os.path.isdir(self.watched_dir):
return {}
return {
x: os.stat(os.path.join(self.watched_dir, x)).st_mtime
for x in os.listdir(self.watched_dir)
}
def __exit__(self, exc_type, exc_val, exc_tb):
new_files = self.get_dist_files()
for dist_filename, mtime in new_files.items():
if mtime > self.initial_files.get(dist_filename, 0.0):
dist_path = os.path.join(self.watched_dir, dist_filename)
self.new_files[dist_path] = dist_filename
if self.single_created_file is None:
self.single_created_file = (dist_path, dist_filename)
else:
self.single_created_file = None
class Command(TemplatedBaseCommand):
"""Create a complete Debian package using a Vagrant box. You must build a different package for each distrib."""
default_written_files_locations = [
("djangofloor", "djangofloor/packaging"),
(settings.DF_MODULE_NAME, "%s/packaging" % settings.DF_MODULE_NAME),
]
packaging_config_files = ["dev/config-packaging.ini"]
available_distributions = {
"ubuntu/precise64": "deb",
"ubuntu/trusty64": "deb",
"ubuntu/wily64": "deb",
"ubuntu/xenial64": "deb",
"ubuntu/yakkety64": "deb",
"ubuntu/zesty64": "deb",
"ubuntu/artful64": "deb",
"debian/wheezy64": "deb",
"debian/jessie64": "deb",
"debian/stretch64": "deb",
"centos/7": "rpm",
"fedora/25-cloud-base": "rpm",
}
BUILD_PACKAGE = 1
SHOW_CONFIG = 2
DO_NOT_DESTROY_VAGRANT = 4
RUN_PACKAGE_AFTER_BUILD = 8
hooks_section = "global"
processes_section = "processes"
def __init__(self, stdout=None, stderr=None, no_color=False):
super().__init__(stdout=stdout, stderr=stderr, no_color=no_color)
self.build_dir = None
self.dist_dir = None
self.hooks = {
"pre_prepare_vagrant_box": None,
"post_prepare_vagrant_box": None,
"pre_install_project": None,
"post_install_project": None,
"pre_install_config": None,
"post_install_config": None,
"pre_install_python": None,
"post_install_python": None,
"pre_build_package": None,
"post_build_package": None,
"pre_run_package": None,
"post_run_package": None,
"pre_install_dependencies": None,
"post_install_dependencies": None,
"pre_destroy_vagrant_box": None,
"post_destroy_vagrant_box": None,
}
self.force_mode = False
self.custom_config_filename = None
self.default_setting_merger = None
self.template_context = {}
self.verbose_mode = False
self.source_dir = "."
self.vagrant_distrib = "ubuntu/artful64"
self.written_files_locations = []
self.processes = {}
self.run_options = 0
self.action = self.BUILD_PACKAGE
def add_arguments(self, parser):
super().add_arguments(parser)
assert isinstance(parser, ArgumentParser)
parser.add_argument("--build-dir", default="./build")
parser.add_argument("--dist-dir", default="./dist")
parser.add_argument(
"-C",
"--config",
help="Config file for FPM packaging and default config file",
default=None,
)
parser.add_argument(
"--show-config",
help="Display a most complete configuration file, displaying "
"all available options.",
action="store_true",
default=False,
)
parser.add_argument(
"--source-dir",
default=".",
help="Path of your project source. "
'"." by default, expecting that you run this command '
"from the source dir.",
)
parser.add_argument(
"--clean", help="Remove temporary dirs", action="store_true", default=False
)
parser.add_argument(
"--distrib",
default=self.vagrant_distrib,
choices=tuple(self.available_distributions),
)
parser.add_argument(
"--no-destroy",
default=False,
action="store_true",
help="Do not destroy the Vagrant virtual machine",
)
parser.add_argument(
"--run-package",
default=False,
action="store_true",
help="Do not destroy the Vagrant virtual machine, install package and run processes",
)
parser.add_argument(
"--include",
default=[],
action="append",
help="Where to search templates and static files.\n"
' If not used, use "--include djangofloor:djangofloor/packages '
'--include %s:%s/packages".\n\n'
'Syntax: "dotted.module.path:root/folder". '
'\nCan be used multiple times, root/folder must be a subfolder of the "templates" '
"folder." % (settings.DF_MODULE_NAME, settings.DF_MODULE_NAME),
)
parser.add_argument(
"--extra-context",
nargs="*",
help="Extra variable for the template system "
"(--extra-context=NAME:VALUE)",
default=[],
)
parser.description = """Create a self-contained package (deb, tar or rpm) for the project.
The package is created in a Vagrant box.
"""
def load_options(self, options):
self.build_dir = os.path.abspath(options["build_dir"])
self.dist_dir = os.path.abspath(options["dist_dir"])
self.source_dir = options["source_dir"] # project source
self.verbose_mode = options["verbosity"] > 1
self.force_mode = options["clean"]
self.vagrant_distrib = options["distrib"]
self.custom_config_filename = options["config"]
if options["no_destroy"]:
self.run_options |= self.DO_NOT_DESTROY_VAGRANT
if options["run_package"]:
self.run_options |= self.RUN_PACKAGE_AFTER_BUILD
if options["show_config"]:
self.run_options |= self.SHOW_CONFIG
else:
self.run_options |= self.BUILD_PACKAGE
parser = self.get_config_parser()
for hook_name in self.hooks:
if parser.has_option(self.hooks_section, hook_name):
self.hooks[hook_name] = parser.get(self.hooks_section, hook_name)
self.default_setting_merger = self.get_merger(
[options["config"]] if options["config"] else []
)
self.template_context = self.get_template_context(
self.default_setting_merger, options["extra_context"]
)
for value in options["include"]:
module_name, sep, folder_name = value.partition(":")
if sep != ":":
self.stderr.write('Invalid "include" value: %s' % value)
continue
self.written_files_locations.append((module_name, folder_name))
if not self.written_files_locations:
self.written_files_locations = self.default_written_files_locations
def handle(self, *args, **options):
self.load_options(options)
self.prepare_vagrant_box()
try:
self.install_python()
self.install_project(self.available_distributions[self.vagrant_distrib])
self.install_dependencies()
if self.run_options & self.BUILD_PACKAGE:
self.install_config()
self.build_package(self.available_distributions[self.vagrant_distrib])
elif self.run_options & self.SHOW_CONFIG:
self.show_config(self.available_distributions[self.vagrant_distrib])
if self.run_options & self.RUN_PACKAGE_AFTER_BUILD:
self.run_package()
except CalledProcessError:
pass
finally:
self.destroy_vagrant_box()
@cached_property
def host_install_dir(self):
return ensure_dir(os.path.join(self.build_dir, "opt"), parent=False)
@cached_property
def host_tmp_dir(self):
return ensure_dir(os.path.join(self.build_dir, "tmp"), parent=False)
@cached_property
def host_package_dir(self):
return ensure_dir(os.path.join(self.build_dir, "pkg"), parent=False)
@cached_property
def host_fpm_project_config_filename(
self,
): # written by 'get_project_info.py' from the Vagrant box
return os.path.join(self.host_tmp_dir, "fpm-project.ini")
@cached_property
def host_fpm_default_config_filename(self):
return os.path.join(self.host_tmp_dir, "fpm-default.ini")
@cached_property
def host_fpm_custom_config_filename(self):
return os.path.join(self.host_tmp_dir, "fpm-custom.ini")
@cached_property
def vagrant_box_dir(self):
return ensure_dir(os.path.join(self.build_dir, "vagrant"), parent=False)
@cached_property
def vagrant_install_dir(self):
return os.path.join("/opt", settings.DF_MODULE_NAME)
@cached_property
def vagrant_package_dir(self):
return "/pkg"
@cached_property
def vagrant_fpm_project_config_filename(self):
return os.path.join(self.vagrant_tmp_dir, "fpm-project.ini")
@cached_property
def vagrant_fpm_custom_config_filename(self):
return os.path.join(self.vagrant_tmp_dir, "fpm-custom.ini")
@cached_property
def vagrant_fpm_default_config_filename(self):
return os.path.join(self.vagrant_tmp_dir, "fpm-default.ini")
@cached_property
def vagrant_tmp_dir(self):
return os.path.join("/tmp", settings.DF_MODULE_NAME)
@cached_property
def bind_dirs(self):
return [
(self.host_tmp_dir, self.vagrant_tmp_dir),
(self.host_package_dir, self.vagrant_package_dir),
]
def execute_hook(self, hook_name):
if not self.hooks[hook_name]:
return False
self.stdout.write(
self.style.NOTICE(
"executing %s hook [%s]…" % (hook_name, self.hooks[hook_name])
)
)
func = import_string(self.hooks[hook_name])
func(self)
return True
def prepare_vagrant_box(self):
self.execute_hook("pre_prepare_vagrant_box")
# noinspection PyUnresolvedReferences
vagrant_content = render_to_string(
"djangofloor/vagrant/Vagrantfile", self.template_context
)
with open(os.path.join(self.vagrant_box_dir, "Vagrantfile"), "w") as fd:
fd.write(vagrant_content)
subprocess.check_call(["vagrant", "up"], cwd=self.vagrant_box_dir)
self.execute_hook("post_prepare_vagrant_box")
def destroy_vagrant_box(self):
if self.run_options & (
self.DO_NOT_DESTROY_VAGRANT | self.RUN_PACKAGE_AFTER_BUILD
):
return
self.execute_hook("pre_destroy_vagrant_box")
subprocess.check_call(
["vagrant", "destroy", "--force"], cwd=self.vagrant_box_dir
)
self.execute_hook("post_destroy_vagrant_box")
def install_python(self):
self.stdout.write(self.style.SUCCESS("installing Python…"))
self.execute_hook("pre_install_python")
self.copy_vagrant_script("djangofloor/vagrant/install_python.sh")
self.execute_hook("post_install_python")
def install_project(self, package_type):
self.stdout.write(self.style.SUCCESS("creating dist file…"))
self.execute_hook("pre_install_project")
with CreatedFilesContext(os.path.join(self.source_dir, "dist")) as ctx:
subprocess.check_call(["python3", "setup.py", "sdist"], cwd=self.source_dir)
if ctx.single_created_file is None:
raise ValueError("unable to create source dist file")
(dist_path, dist_filename) = ctx.single_created_file
shutil.copy2(dist_path, os.path.join(self.host_tmp_dir, dist_filename))
self.stdout.write(self.style.SUCCESS("installing source file…"))
self.copy_vagrant_script(
"djangofloor/vagrant/install_project.sh", {"dist_filename": dist_filename}
)
self.copy_vagrant_script(
"djangofloor/vagrant/fpm-default.ini",
execute=False,
host_filename=self.host_fpm_default_config_filename,
vagrant_filename=self.vagrant_fpm_default_config_filename,
)
self.copy_vagrant_script("djangofloor/vagrant/get_project_info.py")
self.update_template_context(package_type)
self.execute_hook("post_install_project")
def install_dependencies(self):
self.stdout.write(self.style.SUCCESS("installing extra Python dependencies…"))
self.execute_hook("pre_install_dependencies")
self.copy_vagrant_script("djangofloor/vagrant/install_dependencies.sh")
self.execute_hook("post_install_dependencies")
def install_config(self):
self.stdout.write(self.style.SUCCESS("installing static files…"))
self.execute_hook("pre_install_config")
writers = self.get_file_writers(
self.written_files_locations, context=self.template_context
)
for target_filename in sorted(writers): # fix the writing order
writer = writers[target_filename]
writer.write(
self.host_package_dir,
self.template_context,
dry_mode=False,
verbose_mode=self.verbose_mode,
)
# noinspection PyUnresolvedReferences
script_content = render_to_string(
"djangofloor/vagrant/systemd-web.service", self.template_context
)
# noinspection PyStringFormat
filename = os.path.join(
self.host_package_dir,
"etc",
"systemd",
"system",
"%(DF_MODULE_NAME)s-HTTP-worker.service" % self.template_context,
)
ensure_dir(filename, parent=True)
with open(filename, "w") as fd:
fd.write(script_content)
local_template_context = {}
local_template_context.update(self.template_context)
for queue in get_expected_queues():
local_template_context["queue"] = queue
# noinspection PyUnresolvedReferences
script_content = render_to_string(
"djangofloor/vagrant/systemd-worker.service", local_template_context
)
# noinspection PyStringFormat
filename = os.path.join(
self.host_package_dir,
"etc",
"systemd",
"system",
"%(DF_MODULE_NAME)s-%(queue)s.service" % local_template_context,
)
with open(filename, "w") as fd:
fd.write(script_content)
self.copy_vagrant_script("djangofloor/vagrant/configure_project.sh")
self.execute_hook("post_install_config")
def run_package(self):
self.execute_hook("pre_run_package")
self.stdout.write(self.style.SUCCESS("run the created package…"))
self.copy_vagrant_script("djangofloor/vagrant/run_package.sh")
self.execute_hook("post_run_package")
def build_package(self, package_type):
self.execute_hook("pre_build_package")
self.stdout.write(self.style.SUCCESS("building %s package…") % package_type)
cmd = self.get_fpm_command_line(package_type)
with open(os.path.join(self.host_tmp_dir, "fpm.json"), "w") as | |
print("set rotation speed: {} deg/sec".format(rs))
@stage_decorator(list(detectors) + motor)
@bpp.monitor_during_decorator([zps.pi_r])
@run_decorator(md=_md)
def fly_inner_scan():
# close shutter, dark images: numer=chunk_size (e.g.20)
print("\nshutter closed, taking dark images...")
yield from _set_andor_param(
exposure_time=exposure_time, period=period, chunk_size=20
)
yield from _take_dark_image(detectors, motor, num_dark=1, simu=simu)
yield from bps.sleep(1)
yield from _set_andor_param(
exposure_time=exposure_time, period=period, chunk_size=chunk_size
)
# open shutter, tomo_images
yield from _open_shutter(simu=simu)
print("\nshutter opened, taking tomo images...")
yield from mv(zps.pi_r, current_rot_angle + offset_angle)
status = yield from abs_set(zps.pi_r, target_rot_angle, wait=False)
yield from bps.sleep(1)
while not status.done:
yield from trigger_and_read(list(detectors) + motor)
# bkg images
print("\nTaking background images...")
yield from _set_rotation_speed(rs=30)
# yield from abs_set(zps.pi_r.velocity, rs)
for flt in filters:
yield from mv(flt, 1)
yield from mv(flt, 1)
yield from bps.sleep(1)
yield from _set_andor_param(
exposure_time=exposure_time, period=period, chunk_size=20
)
yield from _take_bkg_image(
motor_x_out,
motor_y_out,
motor_z_out,
motor_r_out,
detectors,
motor,
num_bkg=1,
simu=False,
traditional_sequence_flag=traditional_sequence_flag,
)
yield from _close_shutter(simu=simu)
yield from _move_sample_in(
motor_x_ini,
motor_y_ini,
motor_z_ini,
motor_r_ini,
trans_first_flag=traditional_sequence_flag,
)
for flt in filters:
yield from mv(flt, 0)
uid = yield from fly_inner_scan()
yield from mv(Andor.cam.image_mode, 1)
print("scan finished")
txt = get_scan_parameter(print_flag=0)
insert_text(txt)
print(txt)
return uid
def user_fly_only(
exposure_time=0.1,
end_rot_angle=180,
period=0.15,
chunk_size=20,
rs=1,
note="",
simu=False,
dark_scan_id=0,
bkg_scan_id=0,
md=None,
):
global ZONE_PLATE
motor_x_ini = zps.sx.position
motor_y_ini = zps.sy.position
motor_z_ini = zps.sz.position
motor_r_ini = zps.pi_r.position
motor = [zps.sx, zps.sy, zps.sz, zps.pi_r]
detectors = [Andor, ic3]
# offset_angle = 0 #-0.5 * rs * np.sign(relative_rot_angle)
current_rot_angle = zps.pi_r.position
target_rot_angle = end_rot_angle
_md = {
"detectors": ["Andor"],
"motors": [mot.name for mot in motor],
"XEng": XEng.position,
"ion_chamber": ic3.name,
"plan_args": {
"exposure_time": exposure_time,
"end_rot_angle": end_rot_angle,
"period": period,
"chunk_size": chunk_size,
"rs": rs,
"note": note if note else "None",
"zone_plate": ZONE_PLATE,
"dark_scan_id": dark_scan_id,
"bkg_scan_id": bkg_scan_id,
},
"plan_name": "user_fly_only",
"chunk_size": chunk_size,
"plan_pattern": "linspace",
"plan_pattern_module": "numpy",
"hints": {},
"operator": "FXI",
"note": note if note else "None",
"zone_plate": ZONE_PLATE,
#'motor_pos': wh_pos(print_on_screen=0),
}
_md.update(md or {})
try:
dimensions = [(zps.pi_r.hints["fields"], "primary")]
except (AttributeError, KeyError):
pass
else:
_md["hints"].setdefault("dimensions", dimensions)
yield from _set_andor_param(
exposure_time=exposure_time, period=period, chunk_size=chunk_size
)
yield from _set_rotation_speed(rs=rs)
print("set rotation speed: {} deg/sec".format(rs))
@stage_decorator(list(detectors) + motor)
@bpp.monitor_during_decorator([zps.pi_r])
@run_decorator(md=_md)
def fly_inner_scan():
yield from _open_shutter(simu=simu)
status = yield from abs_set(zps.pi_r, target_rot_angle, wait=False)
while not status.done:
yield from trigger_and_read(list(detectors) + motor)
uid = yield from fly_inner_scan()
yield from mv(Andor.cam.image_mode, 1)
print("scan finished")
# yield from _set_rotation_speed(rs=30)
txt = get_scan_parameter(print_flag=0)
insert_text(txt)
print(txt)
return uid
def user_dark_only(exposure_time=0.1, chunk_size=20, note="", simu=False, md=None):
"""
Take dark field images.
Inputs:
-------
exposure_time: float, in unit of sec
chunk_size: int, default setting is 20
number of images taken for each trigger of Andor camera
note: string
adding note to the scan
simu: Bool, default is False
True: will simulate closing/open shutter without really closing/opening
False: will really close/open shutter
"""
global ZONE_PLATE
period = exposure_time # default to exposure time for backgrounds
detectors = [Andor, ic3]
motor = []
_md = {
"detectors": ["Andor"],
"XEng": XEng.position,
"ion_chamber": ic3.name,
"plan_args": {
"exposure_time": exposure_time,
"chunk_size": chunk_size,
"note": note if note else "None",
"zone_plate": ZONE_PLATE,
},
"plan_name": "user_dark_only",
"chunk_size": chunk_size,
"plan_pattern": "linspace",
"plan_pattern_module": "numpy",
"hints": {},
"operator": "FXI",
"note": note if note else "None",
"zone_plate": ZONE_PLATE,
#'motor_pos': wh_pos(print_on_screen=0),
}
_md.update(md or {})
try:
dimensions = [(zps.pi_r.hints["fields"], "primary")]
except (AttributeError, KeyError):
pass
else:
_md["hints"].setdefault("dimensions", dimensions)
yield from _set_andor_param(
exposure_time=exposure_time, period=period, chunk_size=chunk_size
)
@stage_decorator(list(detectors) + motor)
@run_decorator(md=_md)
def inner_scan():
yield from _set_andor_param(
exposure_time=exposure_time, period=period, chunk_size=chunk_size
)
yield from _take_dark_image(detectors, motor, num_dark=1, simu=simu)
uid = yield from inner_scan()
yield from mv(Andor.cam.image_mode, 1)
print("dark finished")
txt = get_scan_parameter(print_flag=0)
insert_text(txt)
print(txt)
return uid
def user_bkg_only(
exposure_time=0.1,
chunk_size=20,
out_x=None,
out_y=2000,
out_z=None,
out_r=None,
note="",
simu=False,
relative_move_flag=1,
traditional_sequence_flag=1,
md=None,
):
"""
Move sample out of the way and take background (aka flat) images.
Inputs:
-------
exposure_time: float, in unit of sec
chunk_size: int, default setting is 20
number of images taken for each trigger of Andor camera
out_x: float, default is 0
relative movement of sample in "x" direction using zps.sx to move out sample (in unit of um)
NOTE: BE CAUSION THAT IT WILL ROTATE SAMPLE BY "out_r" FIRST, AND THEN MOVE X, Y, Z
out_y: float, default is 0
relative movement of sample in "y" direction using zps.sy to move out sample (in unit of um)
NOTE: BE CAUSION THAT IT WILL ROTATE SAMPLE BY "out_r" FIRST, AND THEN MOVE X, Y, Z
out_z: float, default is 0
relative movement of sample in "z" direction using zps.sz to move out sample (in unit of um)
NOTE: BE CAUSION THAT IT WILL ROTATE SAMPLE BY "out_r" FIRST, AND THEN MOVE X, Y, Z
out_r: float, default is 0
relative movement of sample by rotating "out_r" degrees, using zps.pi_r to move out sample
NOTE: BE CAUSION THAT IT WILL ROTATE SAMPLE BY "out_r" FIRST, AND THEN MOVE X, Y, Z
note: string
adding note to the scan
simu: Bool, default is False
True: will simulate closing/open shutter without really closing/opening
False: will really close/open shutter
"""
global ZONE_PLATE
period = exposure_time # default to exposure time for backgrounds
motor_x_ini = zps.sx.position
motor_y_ini = zps.sy.position
motor_z_ini = zps.sz.position
motor_r_ini = zps.pi_r.position
if relative_move_flag:
motor_x_out = motor_x_ini + out_x if out_x else motor_x_ini
motor_y_out = motor_y_ini + out_y if out_y else motor_y_ini
motor_z_out = motor_z_ini + out_z if out_z else motor_z_ini
motor_r_out = motor_r_ini + out_r if out_r else motor_r_ini
else:
motor_x_out = out_x if out_x else motor_x_ini
motor_y_out = out_y if out_y else motor_y_ini
motor_z_out = out_z if out_z else motor_z_ini
motor_r_out = out_r if out_r else motor_r_ini
motor = [zps.sx, zps.sy, zps.sz, zps.pi_r]
detectors = [Andor, ic3]
current_rot_angle = zps.pi_r.position
_md = {
"detectors": ["Andor"],
"motors": [mot.name for mot in motor],
"XEng": XEng.position,
"ion_chamber": ic3.name,
"plan_args": {
"exposure_time": exposure_time,
"chunk_size": chunk_size,
"out_x": out_x,
"out_y": out_y,
"out_z": out_z,
"out_r": out_r,
"relative_move_flag": relative_move_flag,
"traditional_sequence_flag": traditional_sequence_flag,
"note": note if note else "None",
"zone_plate": ZONE_PLATE,
},
"plan_name": "user_bkg_only",
"chunk_size": chunk_size,
"plan_pattern": "linspace",
"plan_pattern_module": "numpy",
"hints": {},
"operator": "FXI",
"note": note if note else "None",
"zone_plate": ZONE_PLATE,
#'motor_pos': wh_pos(print_on_screen=0),
}
_md.update(md or {})
try:
dimensions = [(zps.pi_r.hints["fields"], "primary")]
except (AttributeError, KeyError):
pass
else:
_md["hints"].setdefault("dimensions", dimensions)
# yield from _set_andor_param(exposure_time=exposure_time, period=period, chunk_size=chunk_size)
@stage_decorator(list(detectors) + motor)
@bpp.monitor_during_decorator([zps.pi_r])
@run_decorator(md=_md)
def fly_inner_scan():
yield from _open_shutter(simu=simu)
# bkg images
print("\nTaking background images...")
yield from _set_rotation_speed(rs=30)
yield from _take_bkg_image(
motor_x_out,
motor_y_out,
motor_z_out,
motor_r_out,
detectors,
motor,
num_bkg=1,
simu=False,
traditional_sequence_flag=traditional_sequence_flag,
)
yield from _move_sample_in(
motor_x_ini,
motor_y_ini,
motor_z_ini,
motor_r_ini,
trans_first_flag=traditional_sequence_flag,
)
uid = yield from fly_inner_scan()
yield from mv(Andor.cam.image_mode, 1)
print("bkg finished")
txt = get_scan_parameter(print_flag=0)
insert_text(txt)
print(txt)
return uid
def user_multiple_fly_scans(
xyz_list,
bkg_every_x_scans=10,
exposure_time=0.1,
angle=70,
period=0.15,
chunk_size=20,
out_x=None,
out_y=None,
out_z=None,
out_r=None,
rs=1,
note="",
simu=False,
relative_move_flag=0,
traditional_sequence_flag=1,
md=None,
):
# first take dark field
dark_scan_id = yield from user_dark_only(exposure_time, chunk_size, note, simu, md)
# open shutter for rest of data taking
yield from _open_shutter(simu=simu)
print("\nshutter opened")
bkg_index = 0
bkg_scan_id = None
for i, pos in enumerate(xyz_list):
x, y, z = pos
if i == 0 or bkg_index + bkg_every_x_scans <= i:
# take background
bkg_scan_id = yield from user_bkg_only(
exposure_time,
chunk_size,
out_x,
out_y,
out_z,
out_r,
note,
simu,
relative_move_flag,
traditional_sequence_flag,
md,
)
bkg_index = i
# mv x, y, z, r position
yield from mv(zps.sx, x, zps.sy, y, zps.sz, z, zps.pi_r, angle)
# take tomo
angle *= -1 # rocker scan, switch angle back and forth
while True:
try:
scan_id = yield from user_fly_only(
exposure_time,
angle,
period,
chunk_size,
rs,
note,
simu,
dark_scan_id,
bkg_scan_id,
md,
)
break
except Exception as e:
print(e)
print("Finished scans %s - %s" % (dark_scan_id, scan_id))
def user_mosaic_gen(x_start, x_stop, x_step, y_start, y_stop, y_step, z_pos):
xyz_list = []
for y in range(y_start, y_stop + y_step, y_step):
for x in range(x_start, x_stop + x_step, x_step):
xyz_list.append((x, y, z_pos))
return xyz_list
def user_hex_mosaic_xyz(
x_start, x_stop, x_step, x_offset, y_start, y_stop, y_step, z_pos
):
xyz_list = []
# apply the x_offse every other row
apply_offset = False
for y in range(y_start, y_stop + y_step, y_step):
if apply_offset:
offset = x_offset
else:
offset = 0
apply_offset = not apply_offset
for x in range(x_start, x_stop + x_step, x_step):
xyz_list.append((x + offset, y, z_pos))
return xyz_list
def v4_z_offset(xyz_list):
# offset is only dependent on y
new_xyz_list = | |
<filename>data-hub-api/apps/cdms_api/soap/api.py
import os
import hmac
import base64
import hashlib
import datetime
import requests
import logging
from uuid import uuid4
from suds.sax.parser import Parser
from django.utils import timezone
from django.conf import settings
from django.template import Engine, Context
from django.core.exceptions import ImproperlyConfigured
from ..exceptions import ErrorResponseException, LoginErrorException, UnexpectedResponseException
logger = logging.getLogger('cmds_api.soap.api')
CDMS_EXPIRATION_TOKEN_DELTA = 5 # in mins
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
def flatten_xml_string(xml_string):
"""
Flatten the xml string so that the output is in one line and without spaces/tabs etc.
This is because otherwise the request is not valid and gets rejected.
"""
return ''.join([line.strip() for line in xml_string.split('\n')])
def render_to_string(template_name, context):
"""
The same as the Django one but this only searches for templates in the './templates' folder.
"""
template_engine = Engine(dirs=[os.path.join(BASE_DIR, 'templates')])
template = template_engine.get_template(template_name)
return template.render(Context(context))
def get_request_now():
"""
When constructing the SOAP request, the timestamps have to be naive but with localtime values.
E.g. if the current offset is utc+1 and the utc now is 2016/03/30 0:00, the SOAP endpoint expects 2016/03/30 1:00
without tzinfo. That's pretty ugly but ¯\_(ツ)_/¯
In order to do that, this function gets the utc value, translates it into a local one and makes it naive by
deleting the tzinfo.
"""
now = timezone.localtime(timezone.now())
return timezone.make_naive(now)
class CDMSSoapAPI(object):
"""
Object used to make SOAP requests to CDMS.
At the moment it's quite basic as it's meant to be used only for authentication but it can definitely be extended
and improved.
Example usage:
api = CDMSSoapAPI(username, password)
api.get_whoami() # returns dict with user id
api.get_user(user_id) # returns data about user with id == 'user_id'
That's pretty much it.
The methods raise `ErrorResponseException` for generic errors with raw SOAP response in `e.content` or
`LoginErrorException` in case of errors with (username, password) authentication/authorization.
In theory you would try/catch LoginErrorException and return invalid creds error msg.
To use the module for determining if a user can access CDMS, you only need to do something like:
api = CDMSSoapAPI(username, password)
try:
user_ids = api.get_whoami()
user_data = api.get_user(user_ids['UserId'])
# valid username/password
except LoginErrorException:
# wrong username/password
The error triggering logic can be improved as it could really be more error specific (e.g. 404, 400 etc.).
For now, we only need to catch LoginErrorExceptions so it's good enough.
The annoying thing is that the response have status code == 500 in all cases so you have to parse
the msg and try to find the real reason (I know, right?).
Behind the scenes, the code
- uses the creds to authenticate against the ID Provider which returns a token if everything is fine
- uses that token to ask the STS for a CDMS auth token instead
- this last token can then be used to make authenticated calls to Dynamics and get back objects
The authentication data is cached so that subsequent calls don't have to re-authenticate if the token has not
expired.
"""
CRM_DATA_ENDPOINT = '{}/XRMServices/2011/Organization.svc'.format(settings.CDMS_BASE_URL)
CRM_ADFS_ENDPOINT = '{}/13/usernamemixed'.format(settings.CDMS_ADFS_URL)
CRM_RSTS_ENDPOINT = '{}/adfs/services/trust/13/IssuedTokenMixedSymmetricBasic256'.format(settings.CDMS_RSTS_URL)
def __init__(self, username, password):
if not settings.CDMS_BASE_URL or not settings.CDMS_ADFS_URL or not settings.CDMS_RSTS_URL:
raise ImproperlyConfigured(
'Please set CDMS_BASE_URL, CDMS_ADFS_URL and CDMS_RSTS_URL in your settings.'
)
self.username = username
self.password = password
self.auth_context = {}
# #### AUTH METHODS #### #
def _generate_hmac_signature(self, binary_secret, creation_date, expiration_date):
"""
Internal method which returns data used for the SOAP request signature.
"""
timestamp = render_to_string(
'partials/timestamp.xml', {
'creation_date': creation_date,
'expiration_date': expiration_date
}
)
timestamp = flatten_xml_string(timestamp)
timestamp_hasher = hashlib.sha1()
timestamp_hasher.update(timestamp.encode('utf8'))
timestamp_digest = base64.b64encode(timestamp_hasher.digest()).decode('ascii')
signed_info = render_to_string(
'partials/hmac.xml', {'signature_digest': timestamp_digest}
)
signed_info = flatten_xml_string(signed_info)
hmac_hash = hmac.new(base64.b64decode(binary_secret), digestmod=hashlib.sha1)
hmac_hash.update(signed_info.encode('utf8'))
hashed = base64.b64encode(hmac_hash.digest()).decode('ascii')
return hashed, timestamp_digest
def _extract_auth_tokens(self, resp_content):
"""
Internal method which extracts the auth data from the content of a SOAP response, generates signatures
and other related fields needed for the next SOAP request and returns a dict with all of them.
"""
parser = Parser()
doc = parser.parse(string=resp_content)
now = get_request_now()
creation_date = now.isoformat()
expiration_date_dt = (now + datetime.timedelta(minutes=CDMS_EXPIRATION_TOKEN_DELTA))
expiration_date = expiration_date_dt.isoformat()
rst_resp = doc.childAtPath('Envelope/Body/RequestSecurityTokenResponseCollection/RequestSecurityTokenResponse')
enc_data = rst_resp.childAtPath('RequestedSecurityToken/EncryptedData')
key_identifier = rst_resp.childAtPath('RequestedAttachedReference/SecurityTokenReference/KeyIdentifier').text
ciphertext_key = enc_data.childAtPath('KeyInfo/EncryptedKey/CipherData/CipherValue').text
ciphertext_token = enc_data.childAtPath('CipherData/CipherValue').text
x509_info = enc_data.childAtPath(
'KeyInfo/EncryptedKey/KeyInfo/SecurityTokenReference/X509Data/X509IssuerSerial'
)
x509_issuer_name = x509_info.childAtPath('X509IssuerName').text
x509_serial_number = x509_info.childAtPath('X509SerialNumber').text
binary_secret = rst_resp.childAtPath('RequestedProofToken/BinarySecret').text
signature, signature_digest = self._generate_hmac_signature(binary_secret, creation_date, expiration_date)
return {
'ciphertext_key': ciphertext_key,
'ciphertext_token': ciphertext_token,
'key_identifier': key_identifier,
'creation_date': creation_date,
'expiration_date': expiration_date,
'expiration_date_dt': expiration_date_dt,
'X509_issuer_name': x509_issuer_name,
'X509_serial_number': x509_serial_number,
'signature_digest': signature_digest,
'signature': signature,
}
def _make_soap_request_for_authentication(self, to_address, template, context):
"""
This is the same as `make_raw_soap_request` but it's internally used to make auth SOAP requests.
It expects requests to return 'OK' in most cases.
The only 2 cases where the requests could fail are:
- auth/authorization error: => raises LoginErrorException
- server error: => raises UnexpectedResponseException
"""
try:
return self.make_raw_soap_request(to_address, template, context)
except ErrorResponseException as e:
if e.status_code == 500:
"""
As the response is most of the time a 500 error with the 'message' being slightly different and
somewhat meanful in each case, we need to parse the xml and try to find out what
the real error is.
In this case, we are matching the exact text response (I know :-|) to see if it's an
authentication/authorization error.
"""
parser = Parser()
doc = parser.parse(string=e.content)
reason = doc.childAtPath('Envelope/Body/Fault/Reason/Text').text.lower()
if (reason == 'at least one security token in the message could not be validated.'):
# not sure 'fixing' the status code is a good idea here but to me it makes sense
raise LoginErrorException(
'Invalid credentials', status_code=400, content=e.content
)
else:
raise UnexpectedResponseException(
e, status_code=e.status_code, content=e.content
)
raise e
def _make_auth_id_provider_soap_request(self):
"""
Makes a request to the ID Provider and returns the SOAP response if it succeeds.
Raises
- LoginErrorException if username/password are invalid
- UnexpectedResponseException in case of other errors
"""
now = get_request_now()
template = 'request_id_provider_token.xml'
applies_to_address = '{}/adfs/services/trust'.format(
settings.CDMS_RSTS_URL.replace('https', 'http')
) # doesn't like https here ¯\_(ツ)_/¯
req_context = {
'creation_date': now.isoformat(),
'expiration_date': (now + datetime.timedelta(minutes=CDMS_EXPIRATION_TOKEN_DELTA)).isoformat(),
'username': self.username,
'password': <PASSWORD>,
'applies_to_address': applies_to_address
}
return self._make_soap_request_for_authentication(self.CRM_ADFS_ENDPOINT, template, req_context)
def _make_auth_RSTS_token_soap_request(self):
"""
Makes a request to:
- the ID Provider to get the authentication token
- the RSTS to get the CDMS token from the ID Prov token
Raises:
- LoginErrorException if username/password are invalid or the user can't access CDMS
- UnexpectedResponseException in case of other errors
"""
id_provider_token_response = self._make_auth_id_provider_soap_request()
req_context = self._extract_auth_tokens(id_provider_token_response.content)
template = 'request_RSTS_token.xml'
req_context.update({
'applies_to_address': settings.CDMS_BASE_URL
})
return self._make_soap_request_for_authentication(self.CRM_RSTS_ENDPOINT, template, req_context)
# #### BASE SOAP REQUEST METHODS #### #
def make_raw_soap_request(self, to_address, template, context):
"""
Base method used to make SOAP requests to `to_address` using the resolved template/context as
request body. It raises a generic ErrorResponseException if things fail.
Unless you know what you're doing, you probably want to use the `make_authenticated_soap_request`
method instead of this one.
This is a base method and doesn't have much logic in it.
"""
logger.debug('Calling CDMS URL {}'.format(to_address))
headers = {'Content-Type': 'application/soap+xml; charset=UTF-8'}
# request context with defaults values overridable by context
req_context = {
'random_uuid': uuid4,
'to_address': to_address,
}
req_context.update(context)
req_body = render_to_string(template, req_context)
req_body = flatten_xml_string(req_body)
resp = requests.post(to_address, req_body, headers=headers, verify=False)
if not resp.ok:
logger.debug('Got CDMS error (%s): %s' % (resp.status_code, resp.content))
raise ErrorResponseException(
'{} with status code {}'.format(to_address, resp.status_code),
content=resp.content.decode('utf-8'),
status_code=resp.status_code
)
return resp
def _has_auth_context_expired(self):
"""
Internal method used to determine if a cached authentication token exists and can be used.
This is to avoid asking for a new token for every SOAP request.
"""
if not self.auth_context:
return True
expiration_date = self.auth_context['expiration_date_dt']
expired = (get_request_now() - datetime.timedelta(minutes=1)) >= expiration_date
if expired:
self.auth_context = {} # so that next time it's faster
return expired
def make_authenticated_soap_request(self, to_address, template, context):
"""
Higher level SOAP request method used to call `to_address` using the resolved template/context as
request body.
This uses the cached authentication token or gets a new token if that doesn't exist or is expired.
It's meant to make life easier for devs that don't have to deal with the auth logic and can just
focus on the actual SOAP request they want to make.
Note: it can be made more efficient by refreshing the token instead of authenticating | |
"""
GCore/SolveIK.py
Requires:
sys
numpy
scipy
Grip
ISCV (project, cloud, )
"""
import sys
import numpy as np
import ISCV
from GCore import Character, Recon, list_of_lists_to_splits
import scipy.linalg.lapack as LAPACK
def computeChannelAffectedEffectors(jointCutOff, jointParents, jointChanSplits, effectorJoints):
'''Returns for each channel the list of effectors that are affected (their joint is a child of the channel's joint).'''
# first compute for each joint a list of all the channels back to the root (or jointCutOff)
numJoints = len(jointParents)
assert(len(jointChanSplits) == numJoints*2+1)
numChannels = jointChanSplits[-1]
j2pjs = [[x] for x in xrange(numJoints)] # for each joint, a list of all the parent joints
for ji,pi in enumerate(jointParents):
if ji != jointCutOff and pi != -1: j2pjs[ji].extend(j2pjs[pi])
jcs = [[range(jointChanSplits[2*ji],jointChanSplits[2*ji+2]) for ji in js] for js in j2pjs] # turn joints into lists of channels
jcs = [[di for dis in jc for di in dis] for jc in jcs] # flatten the inner lists...
channelAffectedEffectors = [[] for x in xrange(numChannels)]
for ei,pi in enumerate(effectorJoints):
assert(pi != -1)
for ci in jcs[pi]: channelAffectedEffectors[ci].append(ei)
usedChannels = np.where(np.array(map(len, channelAffectedEffectors))!=0)[0]
usedChannels = np.array(list(usedChannels),dtype=np.int32)
usedCAEs = [channelAffectedEffectors[ci] for ci in usedChannels]
return usedChannels, list_of_lists_to_splits(usedCAEs)
def make_effectorData(skelDict, jointCutOff=-1, p_o_w = None):
"""
effectorData is a structure that holds all the information for computing positions and derivatives of effectors (markers)
when varying channels UNDER the jointCutOff.
Args:
skelDict (GskelDict): The Skeleton to process.
Returns:
structure: "effectorData" containing:
effectorJoints, effectorOffsets, effectorWeights:
to compute the position of the effector, get the global matrix of the joint and apply it to the offset
the weight may control the IK
usedChannels:
the list of channels that might be involved (they lie between an effector and the jointCutOff)
usedChannelWeights:
the weights for each channel: by default, all ones (this might affect the stiffness of a joint in IK)
usedCAEs, usedCAEsSplits:
"used channel affected effectors". for each channel, the list of effectors that are affected by varying that channel.
Requires:
computeChannelAffectedEffectors
"""
if p_o_w is None:
markerParents,markerOffsets,markerWeights = skelDict['markerParents'],skelDict['markerOffsets'],skelDict['markerWeights']
else:
markerParents,markerOffsets,markerWeights = p_o_w
effectorJoints = markerParents
numMarkers = len(effectorJoints)
effectorOffsets = np.zeros((numMarkers,3,4),dtype=np.float32)
effectorWeights = np.zeros((numMarkers,3,4),dtype=np.float32)
effectorOffsets[:] = np.eye(3,4,dtype=np.float32)
effectorOffsets[:,:,3] = markerOffsets
effectorWeights[:,:,3] = markerWeights.reshape(-1,1)
usedChannels, (usedCAEs, usedCAEsSplits) = computeChannelAffectedEffectors(jointCutOff, skelDict['jointParents'], skelDict['jointChanSplits'], effectorJoints)
usedChannelWeights = np.ones(len(usedChannels),dtype=np.float32)
effectorData = (effectorJoints, effectorOffsets, effectorWeights, usedChannels, usedChannelWeights, usedCAEs, usedCAEsSplits)
return effectorData
def skeleton_marker_positions(skelDict, rootMat, chanValues, effectorLabels, effectorData, markerWeights=None):
"""
Based on the pose implied by the chanValues and rootMat, compute the 3D world-space
positions of the markers.
Multiple effectors may determine the position of the marker. effectorLabels provides this mapping.
The weights for the markers, if any, are set by markerWeights.
Args:
skelDict (GskelDict): the skeleton
rootMat (float[3][4]): reference frame of the Skeleton.
chanValues (float[]) List of channel values to pose the skeleton
effectorLabels : the marker that each effector determines
effectorData : (effectorJoints, effectorOffsets, ...)
markerWeights : the weight that each effector has on its marker
Returns:
int[]: Labels for the 3D positions of the markers.
float[][3]: 3D positions of where the target would be in the pose.
Requires:
Character.pose_skeleton
ISCV.marker_positions
"""
Character.pose_skeleton(skelDict['Gs'], skelDict, chanValues, rootMat)
labels = np.unique(effectorLabels)
els2 = np.int32([list(labels).index(x) for x in effectorLabels])
x3ds = ISCV.marker_positions(skelDict['Gs'], effectorData[0], effectorData[1], els2, markerWeights)
return x3ds, labels
def solveIK(skelDict, chanValues, effectorData, effectorTargets, outerIts=10, rootMat=None):
"""
Given an initial skeleton pose (chanValues), effectors (ie constraints: joint, offset, weight, target), solve for the skeleton pose.
Effector weights and targets are 3x4 matrices.
* Setting 1 in the weight's 4th column makes a position constraint.
* Setting 100 in the weight's first 3 columns makes an orientation constraint.
Args:
skelDict (GskelDict): The Skeleton to process
chanValues (float[]): Initial pose of the skeleton as Translation and many rotations applied to joints in the skelDict.
effectorData (big o'l structure!):
effectorJoints, effectorOffsets, effectorWeights, usedChannels, usedChannelWeights, usedCAEs, usedCAEsSplits
effectorTargets (?): What's this?
outerIts (int): IK Iterations to solve the skeleton. Default = 10
rootMat (float[3][4]): reference frame of the Skeleton. Default = None
Returns:
None: The result is an update of the skelDict to the solution - chanValues, channelMats, and Gs.
Requires:
Character.pose_skeleton_with_chan_mats
ISCV.pose_effectors
ISCV.derror_dchannel
ISCV.JTJ
"""
effectorJoints, effectorOffsets, effectorWeights, usedChannels, usedChannelWeights, usedCAEs, usedCAEsSplits = effectorData
jointParents = skelDict['jointParents']
Gs = skelDict['Gs']
Ls = skelDict['Ls']
jointChans = skelDict['jointChans']
jointChanSplits = skelDict['jointChanSplits']
numChannels = jointChanSplits[-1]
numEffectors = len(effectorJoints)
numUsedChannels = len(usedChannels)
channelMats = np.zeros((numChannels,3,4), dtype=np.float32)
#usedEffectors = np.array(np.where(np.sum(effectorWeights,axis=(1,2)) != 0)[0], dtype=np.int32)
usedEffectors = np.array(np.where(effectorWeights.reshape(-1) != 0)[0], dtype=np.int32)
# numUsedEffectors= len(usedEffectors)
effectors = np.zeros((numEffectors,3,4),dtype=np.float32)
residual = np.zeros((numEffectors,3,4),dtype=np.float32)
derrors = np.zeros((numUsedChannels,numEffectors,3,4), dtype=np.float32)
# steps = np.ones((numUsedChannels),dtype=np.float32)*0.2
# steps[np.where(jointChans[usedChannels] < 3)[0]] = 30.
# steps = 1.0/steps
delta = np.zeros((numUsedChannels),dtype=np.float32)
# JJTB = np.zeros((numEffectors*12),dtype=np.float32)
JTJ = np.zeros((numUsedChannels, numUsedChannels),dtype=np.float32)
JTB = np.zeros((numUsedChannels),dtype=np.float32)
JT = derrors.reshape(numUsedChannels,-1)
JTJdiag = np.diag_indices_from(JTJ)
B = residual.reshape(-1)
# TODO, calculate the exact requirements on the tolerance
B_len = len(B)
tolerance = 0.00001
it_eps = (B_len**0.5)*tolerance
for it in xrange(outerIts):
# TODO, only usedChannels are changing, only update the matrices that have changed after the first iteration.
# TODO Look into damping, possibly clip residuals?
# updates the channelMats and Gs
Character.pose_skeleton_with_chan_mats(channelMats, Gs, skelDict, chanValues, rootMat)
bestScore = ISCV.pose_effectors(effectors, residual, Gs, effectorJoints, effectorOffsets, effectorWeights, effectorTargets)
if np.linalg.norm(B) < it_eps: break # early termination
ISCV.derror_dchannel(derrors, channelMats, usedChannels, usedChannelWeights, usedCAEs, usedCAEsSplits, jointChans, effectors, effectorWeights)
# if True: # DLS method : solve (JTJ + k^2 I) delta = JTB
ISCV.JTJ(JTJ,JTB,JT,B,usedEffectors) #np.dot(JT, B, out=JTB); np.dot(JT, JT.T, out=JTJ)
JTJ[JTJdiag] += 1
JTJ[JTJdiag] *= 1.1
_, delta[:], _ = LAPACK.dposv(JTJ,JTB) # Use Positive Definite Solver
# Use General Solver
# delta[:] = np.linalg.solve(JTJ, JTB)
# elif it==0: # SVD method: solve J delta = B
# delta[:] = np.linalg.lstsq(JT.T[usedEffectors], B[usedEffectors], rcond=0.0001)[0].reshape(-1)
# else: # J transpose method
# testScale = ISCV.J_transpose(delta, JJTB, JT, B)
# #np.dot(JT, B, out=delta); np.dot(JT.T,delta,out=JJTB); delta *= np.dot(B,JJTB)/(np.dot(JJTB,JJTB)+1.0)
#scale = np.max(np.abs(delta*steps))
#if scale > 1.0: delta *= 1.0/scale
#np.clip(delta,-steps,steps,out=delta)
chanValues[usedChannels] += delta
# TODO: add channel limits
#bestScore = ISCV.lineSearch(chanValues, usedChannels, delta, Gs, Ls, jointParents, jointChans, jointChanSplits,
# rootMat, effectorJoints, effectorOffsets, effectorWeights, effectorTargets, innerIts, bestScore)
#print np.mean(B*B)
Character.pose_skeleton(Gs, skelDict, chanValues, rootMat)
def solveIK1Ray(skelDict, effectorData, x3ds, effectorIndices_3d, E, effectorIndices_2d, outerIts=10, rootMat=None):
"""
solveIK routine form Label.py - Has Single ray constraint equations enables
Given effectors (joint, offset, weight) and constraints for those (3d and 2d), solve for the skeleton pose.
Effector offsets, weights and targets are 3-vectors
Args:
skelDict (GskelDict): The Skeleton to process
effectorData (big o'l structure!):
effectorJoints, effectorOffsets, effectorWeights, usedChannels, usedChannelWeights, usedCAEs, usedCAEsSplits
x3ds (float[][3]): 3D Reconstructions
effectorIndices_3d (?): What's this?
E (): Equations for 1-Ray constraints, or MDMA.
effectorIndices_2d (?): What's this?
outerIts (int): IK Iterations to solve the skeleton. Default = 10
rootMat (float[3][4]): reference frame of the Skeleton. Default = None
Returns:
None: The result is an update of the skelDict to the solution - chanValues, channelMats, and Gs.
Requires:
Character.pose_skeleton_with_chan_mats
ISCV.derror_dchannel_single_ray
ISCV.JTJ_single_ray
"""
if rootMat is None: rootMat = np.eye(3,4,dtype=np.float32)
effectorJoints, effectorOffsets, effectorWeightsOld, usedChannels, usedChannelWeights, usedCAEs, usedCAEsSplits = effectorData
chanValues = skelDict['chanValues']
jointParents = skelDict['jointParents']
Gs = skelDict['Gs']
Ls = skelDict['Ls']
jointChans = skelDict['jointChans']
jointChanSplits = skelDict['jointChanSplits']
numChannels = jointChanSplits[-1]
numEffectors = len(effectorJoints)
num3ds = len(effectorIndices_3d)
num2ds = len(effectorIndices_2d)
effectorOffsets = np.copy(effectorOffsets[:,:,3])
effectorWeights = np.zeros(numEffectors, dtype=np.float32)
effectorWeights[effectorIndices_3d] = 1 # TODO Why does this fail? effectorWeightsOld[effectorIndices_3d,0,3]
effectorWeights[effectorIndices_2d] = 1 # effectorWeightsOld[effectorIndices_2d,0,3]
numUsedChannels = len(usedChannels)
channelMats = np.zeros((numChannels,3,4), dtype=np.float32)
effectors = np.zeros((numEffectors,3),dtype=np.float32)
residual = np.zeros((num3ds,3),dtype=np.float32)
residual2 = np.zeros((num2ds,2),dtype=np.float32)
derrors = np.zeros((numUsedChannels,numEffectors,3), dtype=np.float32)
delta = np.zeros((numUsedChannels),dtype=np.float32)
JTJ = np.zeros((numUsedChannels, numUsedChannels),dtype=np.float32)
JTB = np.zeros((numUsedChannels),dtype=np.float32)
JT = derrors.reshape(numUsedChannels,-1)
JTJdiag = np.diag_indices_from(JTJ)
for it in xrange(outerIts):
# TODO, only usedChannels are changing, only update the matrices that have changed after the first iteration.
# updates the channelMats and Gs
Character.pose_skeleton_with_chan_mats(channelMats, Gs, skelDict, chanValues, rootMat)
bestScore = ISCV.pose_effectors_single_ray(effectors, residual, residual2, Gs, effectorJoints, effectorOffsets, effectorWeights, x3ds, effectorIndices_3d, E, effectorIndices_2d)
if np.sum(residual*residual)+np.sum(residual2*residual2) <= 1e-5*(num3ds+num2ds): break # early termination
ISCV.derror_dchannel_single_ray(derrors, channelMats, usedChannels, usedChannelWeights, usedCAEs, usedCAEsSplits, jointChans, effectors, effectorWeights)
# J = d_effectors/dc
# err(c) = x3ds - effectors[effectorIndices_3d], e0 + E effectors[effectorIndices_2d]; err(c+delta) = x3ds - effectors[effectorIndices_3d] - J[effectorIndices_3d] delta, e0 + E effectors[effectorIndices_2d] + E J[effectorIndices_2d] delta = 0
# J dc = B; (J[effectorIndices_3d] ; E J[effectorIndices_2d]) dc = B ; e0
# DLS method : solve (JTJ + k^2 I) delta = JTB
ISCV.JTJ_single_ray(JTJ,JTB,JT,residual,effectorIndices_3d,E,effectorIndices_2d,residual2) #np.dot(JT, B, out=JTB); np.dot(JT, JT.T, out=JTJ)
JTJ[JTJdiag] += 1
JTJ[JTJdiag] *= 1.1
# delta[:] = np.linalg.solve(JTJ, JTB)
_, delta[:], _ = LAPACK.dposv(JTJ,JTB) # Use Positive Definite Solver
chanValues[usedChannels] += delta
# TODO: add channel limits
# # J_transpose method, 3d only: scaling problems with translation
#JT = derrors[:,effectorIndices_3d,:].reshape(numUsedChannels,-1)
#np.dot(JT, B, out=delta)
#np.dot(JT.T,delta,out=JJTB)
#delta *= np.dot(B,JJTB)/(np.dot(JJTB,JJTB)+1)
#delta[:3] *= 100000.
#testScale = ISCV.Jtranspose_SR(delta, JJTB, JT, residual,effectorIndices_3d,residual2,effectorIndices_2d)
Character.pose_skeleton(Gs, skelDict, chanValues, rootMat)
def scoreIK(skelDict, chanValues, effectorData, effectorTargets, rootMat=None):
"""
Args:
skelDict (GskelDict): The Skeleton to process
Returns:
?
Requires:
Character.pose_skeleton
ISCV.score_effectors
"""
Character.pose_skeleton(skelDict['Gs'], skelDict, chanValues, rootMat)
return (ISCV.score_effectors(skelDict['Gs'], effectorData[0], effectorData[1], effectorData[2], effectorTargets)/np.sum(effectorData[1]))**0.5
def bake_ball_joints(skelDict):
"""
For every 3 DoF joint, multiply in matrices to reduce gimbal lock.
Includes pre-conversion python code.
Args:
skelDict (GskelDict): The Skeleton to process.
Requires:
ISCV.bake_ball_joints
"""
Ls = skelDict['Ls']
jointChans = skelDict['jointChans']
jointChanSplits = skelDict['jointChanSplits']
chanValues = skelDict['chanValues']
if not skelDict.has_key('Ls_orig'): skelDict['Ls_orig'] = Ls.copy()
Ls_orig = skelDict['Ls_orig']
ISCV.bake_ball_joints(Ls, jointChans, jointChanSplits, chanValues)
def unbake_ball_joints(skelDict):
"""
Args:
skelDict (GskelDict): The Skeleton to process.
Ls_orig (float[?}): Unbaked arrangement of Local Matrices of the skeleton's 3-DoF joints.
Returns:
None: Results are a transformation of the skelDict
Requires:
ISCV.unbake_ball_joints
"""
Ls = skelDict['Ls']
jointChans = skelDict['jointChans']
jointChanSplits = skelDict['jointChanSplits']
chanValues = skelDict['chanValues']
if not skelDict.has_key('Ls_orig'): skelDict['Ls_orig'] = Ls.copy()
Ls_orig = skelDict['Ls_orig']
ISCV.unbake_ball_joints(Ls, jointChans, jointChanSplits, chanValues, Ls_orig)
def solve_skeleton_from_2d(x2ds, splits, labels, effectorLabels, Ps, skelDict, effectorData, rootMat, outerIts=5):
"""
Given a posed skeleton and some labelled 2d points, solve the skeleton to better fit the points.
Args:
x2ds (float[][2]): 2d Detections from all cameras
splits (int[]): list of camera indices
labels (int[]): Assigned labels | |
<reponame>leilinen/flink
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from abc import ABCMeta
from py4j.java_gateway import get_java_class
from typing import Optional
from pyflink.java_gateway import get_gateway
__all__ = [
'CheckpointStorage',
'JobManagerCheckpointStorage',
'FileSystemCheckpointStorage',
'CustomCheckpointStorage']
def _from_j_checkpoint_storage(j_checkpoint_storage):
if j_checkpoint_storage is None:
return None
gateway = get_gateway()
JCheckpointStorage = gateway.jvm.org.apache.flink.runtime.state.CheckpointStorage
JJobManagerCheckpointStorage = gateway.jvm.org.apache.flink.runtime.state.storage \
.JobManagerCheckpointStorage
JFileSystemCheckpointStorage = gateway.jvm.org.apache.flink.runtime.state.storage \
.FileSystemCheckpointStorage
j_clz = j_checkpoint_storage.getClass()
if not get_java_class(JCheckpointStorage).isAssignableFrom(j_clz):
raise TypeError("%s is not an instance of CheckpointStorage." % j_checkpoint_storage)
if get_java_class(JJobManagerCheckpointStorage).isAssignableFrom(j_clz):
return JobManagerCheckpointStorage(j_jobmanager_checkpoint_storage=j_checkpoint_storage)
elif get_java_class(JFileSystemCheckpointStorage).isAssignableFrom(j_clz):
return FileSystemCheckpointStorage(j_filesystem_checkpoint_storage=j_checkpoint_storage)
else:
return CustomCheckpointStorage(j_checkpoint_storage)
class CheckpointStorage(object, metaclass=ABCMeta):
"""
Checkpoint storage defines how :class:`StateBackend`'s store their state for fault-tolerance
in streaming applications. Various implementations store their checkpoints in different fashions
and have different requirements and availability guarantees.
For example, :class:`JobManagerCheckpointStorage` stores checkpoints in the memory of the
`JobManager`. It is lightweight and without additional dependencies but is not scalable
and only supports small state sizes. This checkpoints storage policy is convenient for local
testing and development.
:class:`FileSystemCheckpointStorage` stores checkpoints in a filesystem. For systems like HDFS
NFS drives, S3, and GCS, this storage policy supports large state size, in the magnitude of many
terabytes while providing a highly available foundation for streaming applications. This
checkpoint storage policy is recommended for most production deployments.
**Raw Bytes Storage**
The `CheckpointStorage` creates services for raw bytes storage.
The raw bytes storage (through the CheckpointStreamFactory) is the fundamental service that
simply stores bytes in a fault tolerant fashion. This service is used by the JobManager to
store checkpoint and recovery metadata and is typically also used by the keyed- and operator-
state backends to store checkpoint state.
**Serializability**
Implementations need to be serializable(`java.io.Serializable`), because they are distributed
across parallel processes (for distributed execution) together with the streaming application
code.
Because of that `CheckpointStorage` implementations are meant to be like _factories_ that create
the proper state stores that provide access to the persistent layer. That way, the storage
policy can be very lightweight (contain only configurations) which makes it easier to be
serializable.
**Thread Safety**
Checkpoint storage implementations have to be thread-safe. Multiple threads may be creating
streams concurrently.
"""
def __init__(self, j_checkpoint_storage):
self._j_checkpoint_storage = j_checkpoint_storage
class JobManagerCheckpointStorage(CheckpointStorage):
"""
The `CheckpointStorage` checkpoints state directly to the JobManager's memory (hence the
name), but savepoints will be persisted to a file system.
This checkpoint storage is primarily for experimentation, quick local setups, or for streaming
applications that have very small state: Because it requires checkpoints to go through the
JobManager's memory, larger state will occupy larger portions of the JobManager's main memory,
reducing operational stability. For any other setup, the `FileSystemCheckpointStorage`
should be used. The `FileSystemCheckpointStorage` but checkpoints state directly to files
rather than to the JobManager's memory, thus supporting larger state sizes and more highly
available recovery.
**State Size Considerations**
State checkpointing with this checkpoint storage is subject to the following conditions:
- Each individual state must not exceed the configured maximum state size
(see :func:`get_max_state_size`.
- All state from one task (i.e., the sum of all operator states and keyed states from all
chained operators of the task) must not exceed what the RPC system supports, which is
be default < 10 MB. That limit can be configured up, but that is typically not advised.
- The sum of all states in the application times all retained checkpoints must comfortably
fit into the JobManager's JVM heap space.
**Persistence Guarantees**
For the use cases where the state sizes can be handled by this storage, it does
guarantee persistence for savepoints, externalized checkpoints (of configured), and checkpoints
(when high-availability is configured).
**Configuration**
As for all checkpoint storage, this type can either be configured within the application (by
creating the storage with the respective constructor parameters and setting it on the execution
environment) or by specifying it in the Flink configuration.
If the storage was specified in the application, it may pick up additional configuration
parameters from the Flink configuration. For example, if the backend if configured in the
application without a default savepoint directory, it will pick up a default savepoint
directory specified in the Flink configuration of the running job/cluster. That behavior is
implemented via the :func:`configure` method.
"""
# The default maximal size that the snapshotted memory state may have (5 MiBytes).
DEFAULT_MAX_STATE_SIZE = 5 * 1024 * 1024
def __init__(self,
checkpoint_path=None,
max_state_size=None,
j_jobmanager_checkpoint_storage=None):
"""
Creates a new JobManagerCheckpointStorage, setting optionally the paths to persist
checkpoint metadata to, as well as configuring state thresholds.
WARNING: Increasing the size of this value beyond the default value
(:data:`DEFAULT_MAX_STATE_SIZE`) should be done with care.
The checkpointed state needs to be send to the JobManager via limited size RPC messages,
and there and the JobManager needs to be able to hold all aggregated state in its memory.
Example:
::
>>> checkpoint_storage = JobManagerCheckpointStorage()
:param checkpoint_path: The path to write checkpoint metadata to. If none, the value from
the runtime configuration will be used.
:param max_state_size: The maximal size of the serialized state. If none, the
:data:`DEFAULT_MAX_STATE_SIZE` will be used.
:param j_jobmanager_checkpoint_storage: For internal use, please keep none.
"""
if j_jobmanager_checkpoint_storage is None:
gateway = get_gateway()
JJobManagerCheckpointStorage = gateway.jvm.org.apache.flink.runtime.state.storage\
.JobManagerCheckpointStorage
JPath = gateway.jvm.org.apache.flink.core.fs.Path
if checkpoint_path is not None:
checkpoint_path = JPath(checkpoint_path)
if max_state_size is None:
max_state_size = JJobManagerCheckpointStorage.DEFAULT_MAX_STATE_SIZE
j_jobmanager_checkpoint_storage = JJobManagerCheckpointStorage(checkpoint_path,
max_state_size)
super(JobManagerCheckpointStorage, self).__init__(j_jobmanager_checkpoint_storage)
def get_checkpoint_path(self) -> Optional[str]:
"""
Gets the base directory where all the checkpoints are stored.
The job-specific checkpoint directory is created inside this directory.
:return: The base directory for checkpoints.
"""
j_path = self._j_checkpoint_storage.getCheckpointPath()
if j_path is None:
return None
else:
return j_path.toString()
def get_max_state_size(self) -> int:
"""
Gets the maximum size that an individual state can have, as configured in the
constructor. By default :data:`DEFAULT_MAX_STATE_SIZE` will be used.
"""
return self._j_checkpoint_storage.getMaxStateSize()
def get_savepoint_path(self) -> Optional[str]:
"""
Gets the base directory where all the savepoints are stored.
The job-specific savepoint directory is created inside this directory.
:return: The base directory for savepoints.
"""
j_path = self._j_checkpoint_storage.getSavepointPath()
if j_path is None:
return None
else:
return j_path.toString()
def __str__(self):
return self._j_checkpoint_storage.toString()
class FileSystemCheckpointStorage(CheckpointStorage):
"""
`FileSystemCheckpointStorage` checkpoints state as files to a filesystem.
Each checkpoint will store all its files in a subdirectory that includes the
checkpoints number, such as `hdfs://namenode:port/flink-checkpoints/chk-17/`.
**State Size Considerations**
This checkpoint storage stores small state chunks directly with the metadata, to avoid creating
many small files. The threshold for that is configurable. When increasing this threshold, the
size of the checkpoint metadata increases. The checkpoint metadata of all retained completed
checkpoints needs to fit into the JobManager's heap memory. This is typically not a problem,
unless the threashold `get_min_file_size_threshold` is increased significantly.
**Persistence Guarantees**
Checkpoints from this checkpoint storage are as persistent and available as the filesystem
that it is written to. If the file system is a persistent distributed file system, this
checkpoint storage supports highly available setups. The backend additionally supports
savepoints and externalized checkpoints.
**Configuration**
As for all checkpoint storage policies, this backend can either be configured within the
application (by creating the storage with the respective constructor parameters and setting
it on the execution environment) or by specifying it in the Flink configuration.
If the checkpoint storage was specified in the application, it may pick up additional
configuration parameters from the Flink configuration. For example, if the | |
else:
unknown_words.add(word)
print(f"Bonusing {len(known_words)} prewrite words: {' '.join(sorted(known_words))}")
print(f"Not bonusing {len(unknown_words)} unknown words: {' '.join(sorted(unknown_words))}")
# Beginning of sentence suggestions
if use_bos_suggs and use_bos_suggs != 'manual' and not enable_bos_suggs:
print("Warning: requested BOS suggs but they're not enabled.")
use_bos_suggs = False
is_bos = len(cur_word) == 0 and toks[-1] in ['<D>', '<S>']
if use_bos_suggs and is_bos:
if promise is not None:
print("Warning: promise enabled but making beginning-of-sentence suggestions!")
if use_bos_suggs == 'manual':
phrases, sug_state = manual_bos.get_manual_bos(sofar, sug_state)
elif use_bos_suggs in ['diverse', 'continue']:
phrases, sug_state, _ = get_bos_suggs(sofar, sug_state, bos_sugg_flag=use_bos_suggs, constraints=constraints)
else:
phrases = None
if phrases is not None:
return phrases, sug_state
if use_sufarr and not enable_sufarr:
print("Warning: requested sufarr but not enabled.")
use_sufarr = False
if temperature == 0:
if use_sufarr and len(cur_word) == 0:
assert sentiment is None, "sufarr doesn't support sentiment yet"
assert promise is None, "sufarr doesn't support promises yet"
beam_width = 100
beam = beam_search_sufarr_init(model, toks)
context_tuple = (toks[-1],)
if word_bonuses is None:
# The multiplication makes a copy.
word_bonuses = model.unigram_probs_wordsonly * -rare_word_bonus
else:
word_bonuses = word_bonuses.copy()
# Don't double-bonus words that have already been used.
for word in set(toks):
word_idx = model.model.vocab_index(word)
word_bonuses[word_idx] = 0.
for i in range(length_after_first):
beam_chunks = cytoolz.partition_all(8, beam)
parallel_futures = yield [executor.submit(
beam_search_sufarr_extend, domain, chunk, context_tuple, i, beam_width, length_after_first=length_after_first, word_bonuses=word_bonuses, prefix=prefix, constraints=constraints, **kw)
for chunk in beam_chunks]
parallel_beam = list(cytoolz.concat(parallel_futures))
prefix = ''
# FIXME: maintain diversity in first-words here?
beam = heapq.nlargest(beam_width, parallel_beam)
# entry 2 is "DONE"
if all(ent[2] for ent in beam):
break
ents = [BeamEntry(*ent) for ent in beam]
if len(ents) == 0:
# Fall back on the full LM, but just for one word.
first_word_ents = yield executor.submit(beam_search_phrases, domain, toks, beam_width=100, length_after_first=1, prefix_logprobs=prefix_logprobs, constraints=constraints)
phrases = [(ent.words, None) for ent in first_word_ents[:3]]
else:
result = [ents.pop(0)]
first_words = {ent.words[0] for ent in result}
while len(result) < 3 and len(ents) > 0:
ents.sort(reverse=True, key=lambda ent: (ent.words[0] not in first_words, ent.score))
best = ents.pop(0)
first_words.add(best.words[0])
result.append(best)
phrases = [([word for word in ent.words if word[0] != '<'], None) for ent in result]
else: # sufarr
# Use beam search on LM.
if prefix_logprobs is None:
sentence_enders = yield executor.submit(get_sentence_enders, domain, toks)
else:
sentence_enders = []
beam_search_kwargs = dict(constraints=constraints)
if sentiment:
clf_startstate = sentiment_classifier.get_state(toks)
# Include a broader range of first words if we may need to diversify by sentiment after the fact.
num_first_words = 3 - len(sentence_enders) if sentiment is None else 20
num_intermediates = 20
max_logprob_penalty = MAX_LOGPROB_PENALTY
# Launch a job to get first words.
if num_first_words:
first_word_ents = yield executor.submit(beam_search_phrases,
domain, toks, beam_width=num_first_words, length_after_first=1, prefix_logprobs=prefix_logprobs, **beam_search_kwargs)
else:
first_word_ents = []
first_words = {ent[1][0]: fwent_idx for fwent_idx, ent in enumerate(first_word_ents)}
if promise is not None:
promise_slot = promise['slot']
promise_words = promise['words']
# Remove the first word of the promise from the pool, we'll get to it later.
promise_first_word = promise_words[0]
if promise_first_word in first_words:
first_word_ents.pop(first_words[promise_first_word])
else:
promise_slot = None
jobs = [executor.submit(beam_search_phrases_loop, model, [ent],
start_idx=1,
beam_width=num_intermediates,
length_after_first=length_after_first, **beam_search_kwargs)
for ent in first_word_ents]
if promise is not None and len(promise_words) < 5 and not any(x in promise_words for x in '.?!'):
# Sneak an extra job into the queue...
promise_extension = True
# Promise provided, but we need to extend it with some new words.
remaining_length = max(1, length_after_first - len(' '.join(promise_words)))
jobs.append(executor.submit(beam_search_phrases,
model, toks + promise_words, beam_width=num_intermediates,
length_after_first=remaining_length, **beam_search_kwargs))
else:
promise_extension = False
results = (yield jobs)
if promise_extension:
# The extra job computed a bunch of possible promise continuations. Hold them aside.
promise_extension_results = results.pop()
# Convert them into a format compatible with our beam search.
# Make the score positive, so we can know not to taboo this entry.
promise_beam = [(ent[0] + 500, promise_words + ent[1]) for ent in promise_extension_results]
results.append(promise_beam)
# Now build final suggestions.
is_new_word = len(cur_word) == 0
active_entities = []
final_tok = toks[-1]
if final_tok in suggested_already:
suggested_already_this_tok = suggested_already[final_tok]
else:
suggested_already_this_tok = suggested_already[final_tok] = set()
for beam in results:
for ent in beam:
llk = ent[0]
words = ent[1]
# Penalize a suggestion that has already been made exactly like this before.
if llk < 0 and is_new_word and ' '.join(words[:3]) in suggested_already_this_tok:
print("Taboo:", ' '.join(words))
llk -= 5000.
active_entities.append((llk, words, {}))
# Add sentence-enders in the mix, but flagged special.
for ender in sentence_enders[:2]:
active_entities.append((995, [ender], {'type': 'eos'}))
# Add the highest likelihood promise continuation also, also flagged special.
if promise is not None:
llk = 999
if promise_extension:
words = promise_beam[0][1]
else:
words = promise_words
active_entities.append((llk, words, {'type': 'promise'}))
# If we're at the beginning of a sentence, add the special sentiment sentence starters.
if sentiment is not None and use_bos_suggs == 'sentiment' and len(cur_word) == 0 and toks[-1] in ["<D>", "<S>"]:
sent_idx = sum(1 for tok in toks if tok == '</S>')
if sentiment == 'diverse':
sent_targets = [[0, 1], [2], [3, 4]]
else:
sent_targets = [[sentiment - 1]] * 3
this_time_taboo = set()
for tgt_sentiments in sent_targets:
sent_bos_options = [
(tgt_sentiment, bos_option)
for tgt_sentiment in tgt_sentiments
for bos_option in sentiment_starters_by_stars_and_sentnum[tgt_sentiment][min(sent_idx, 2)]]
random.shuffle(sent_bos_options)
for tgt_sentiment, bos_option in sent_bos_options:
toks = bos_option.split()
first_3_words = ' '.join(toks[:3])
if first_3_words in this_time_taboo:
continue
if first_3_words in suggested_already_this_tok:
print("bos taboo:", bos_option)
continue
active_entities.append((100, toks, {'type': 'sentiment_bos', 'sentiment': tgt_sentiment / 4}))
this_time_taboo.add(first_3_words)
break
# Pad the active entities with null suggestions.
for i in range(3):
active_entities.append((-9999, [''], {'type': 'null'}))
active_entities.sort(reverse=True)
# Compute sentiment data
if sentiment is not None:
if sentiment == 'diverse':
# Diversify the suggestions by sentiment.
def summarize_posterior(sent_posteriors):
return np.mean(sent_posteriors, axis=0) @ sentiment_classifier.sentiment_weights
objective = scalar_diversity
else:
# Try to maximize the likelihood of the desired sentiment
target_sentiment = sentiment - 1
assert 0 <= target_sentiment < 5
def summarize_posterior(sent_posteriors):
return np.mean(sent_posteriors, axis=0)[target_sentiment]
def objective(slots):
return np.sum(slots)
classify_jobs = []
classify_jobs_meta = []
for entity_idx, (llk, words, meta) in enumerate(active_entities):
if meta.get('type') == 'eos' or 'sentiment' in meta:
continue
classify_jobs.append(words)
classify_jobs_meta.append(entity_idx)
classify_jobs_results = (yield map_as_jobs(executor, partial(sentiment_classifier.classify_seq_by_tok, clf_startstate), classify_jobs, chunksize=32))
sentiment_data = [ent[2].get('sentiment', .5) for ent in active_entities]
for entity_idx, posterior in zip(classify_jobs_meta, itertools.chain.from_iterable(classify_jobs_results)):
sentiment_data[entity_idx] = summarize_posterior(posterior)
entity_idx = 0
promise_entity_idx = 0
if promise is not None:
# The zeroth entity should be the promise.
assert active_entities[promise_entity_idx][2]['type'] == 'promise'
# Start open-assignment at the first entity.
entity_idx += 1
# Take 3 suggestions
assignments = [None] * 3
first_words_used = {}
if promise is not None:
first_words_used[promise['words'][0]] = promise_slot
for slot_idx in range(3):
if slot_idx == promise_slot:
# Assign the existing promise to this entry.
# We may extend it later with one of the computed extensions.
assignments[slot_idx] = promise_entity_idx
continue
while True:
llk, words, meta = active_entities[entity_idx]
first_word = words[0]
if first_word in first_words_used:
entity_idx += 1
continue
if first_word != '':
first_words_used[first_word] = slot_idx
assignments[slot_idx] = entity_idx
entity_idx += 1
break
if sentiment is not None:
# Tweak the suggestions as requested.
print("First words:", ' '.join(ent[1][0] for ent in first_word_ents))
cur_summaries = np.array([sentiment_data[entity_idx] for entity_idx in assignments])
cur_objective = objective(cur_summaries)
min_logprob_allowed = min(active_entities[entity_idx][0] for entity_idx in assignments) + max_logprob_penalty
if SHOW_SENTIMENT_OPTIONS:
for i in np.argsort(sentiment_data):
llk, words, meta = active_entities[i]
if llk < min_logprob_allowed:
continue
print(f'{sentiment_data[i]:.2f} {llk:.2f}', ' '.join(words))
# Greedily replace suggestions so as to increase sentiment diversity.
while True:
for entity_idx in assignments:
llk, words, meta = active_entities[entity_idx]
sentiment = sentiment_data[entity_idx]
print(f"{sentiment:3.2f} {llk:6.2f} {' '.join(words)}")
print()
print()
candidates = []
for entity_idx, (llk, words, meta) in enumerate(active_entities):
if llk < min_logprob_allowed:
continue
cur_summary = sentiment_data[entity_idx]
# Would this increase the objective if we added it?
# Case 1: it replaces an existing word
replaces_slot = first_words_used.get(words[0])
if replaces_slot is not None:
prev_llk = active_entities[assignments[replaces_slot]][0]
if llk < prev_llk + max_logprob_penalty:
# Too much relevance cost.
continue
if replaces_slot == promise_slot:
# This could replace the promise iff it was a continuation.
if words[:len(promise_words)] == promise_words:
# print("Considering replacing promise", words)
pass
else:
continue
elif prev_llk >= 0:
# Sorry, this was a | |
#!/usr/bin/env python
# coding: utf-8
# For python 2 compatibility
from __future__ import unicode_literals
import sys
from codecs import open
from os.path import split, join, exists
from os import getcwd, mkdir
import subprocess
import unicodedata
#from traceback import print_exc
# Python Version Compatibility
major = sys.version_info[0]
minor = sys.version_info[1]
if major < 3:
rinput = raw_input
else:
rinput = input
if major == 2 and minor == 6:
check_output = lambda a: subprocess.Popen(a,
stdout=subprocess.PIPE).communicate()[0]
else:
check_output = subprocess.check_output
COLOR = ['git', 'config', '--get', 'gitli.color']
LIST = ['git', 'config', '--get', 'gitli.list.option']
DEFAULT_LIST_FILTER = 'all'
GITLIDIR = '.gitli'
ISSUES = '.issues'
OPEN = '.issues-open'
LAST = '.issues-last'
CURRENT = '.issues-current'
COMMENTS = '.issues-comments'
MSEPARATOR = ','
OSEPARATOR = '\n'
ITYPES = ['Task', 'Bug', 'Enhancement']
class BColors:
BLUE = '\033[1;34m'
GREEN = '\033[1;32m'
YELLOW = '\033[1;33m'
CYAN = '\033[1;36m'
WHITE = '\033[1;37m'
ENDC = '\033[0m'
def disable(self):
self.BLUE = ''
self.GREEN = ''
self.YELLOW = ''
self.CYAN = ''
self.WHITE = ''
self.ENDC = ''
def is_colored_output():
'''
:rtype: True if gitli.color is on in the git config.
'''
try:
value = check_output(COLOR).strip().lower().decode('utf-8')
return value in ('auto', 'on', 'true')
except Exception:
return False
def get_default_list_filter():
'''
:rtype: The default list filter specified in the git config or
DEFAULT_LIST_FILTER.
'''
try:
value = check_output(LIST)
if not value:
return DEFAULT_LIST_FILTER
else:
return value.strip().lower().decode('utf-8')
except Exception:
return DEFAULT_LIST_FILTER
def ask_type(verbose=False, default=1):
'''Asks the user what type of issue to create.
:verbose: If False, the default type is returned without asking the user.
:default: The default issue type.
:rtype: The issue type selected by the user or the default one if verbose
is False.
'''
if not verbose:
return 1
ttype = rinput('Issue type: 1-Task, 2-Bug, 3-Enhancement [{0}]: '\
.format(default))
ttype = ttype.strip().lower()
if not ttype:
return default
elif ttype in ('1', 'task'):
return 1
elif ttype in ('2', 'bug'):
return 2
elif ttype in ('3', 'enhancement'):
return 3
else:
return 1
def ask_milestone(path, verbose=False, default=None):
'''Asks the user what milestone to associate the issue with.
:param path: The path to the .gitli directory.
:param verbose: If False, the default milestone is returned without asking
the user.
:param default: The default milestone. If None, the current milestone is
provided as the default.
:rtype: The milestone selected by th euser or the default one if verbose
is False.
'''
if default is None:
with open(join(path, CURRENT), 'r', encoding='utf-8') as current:
current_value = current.read()
else:
current_value = default
if not verbose:
return current_value
milestone = rinput('Milestone: [{0}]: '.format(current_value)).strip()
if not milestone:
milestone = current_value
return milestone
def add_open(path, issue_number):
'''Add a new issue to the open list.
:param path: The path to the .gitli directory.
:param issue_number: The issue to open.
'''
with open(join(path, OPEN), 'a', encoding='utf-8') as iopen:
iopen.write('{0}{1}'.format(issue_number, OSEPARATOR))
def remove_open(path, issue_number):
'''Remove an issue number from the issues-open file.
:param path: The path to the .gitli directory.
:param issue_number: The issue to close.
'''
with open(join(path, OPEN), 'r', encoding='utf-8') as iopen:
issues = iopen.read().split(OSEPARATOR)
new_issues = OSEPARATOR.join((issue for issue in issues if issue !=
issue_number))
with open(join(path, OPEN), 'w', encoding='utf-8') as iopen:
iopen.write(new_issues)
def get_open_issues(path):
'''
:param path: The path to the .gitli directory.
:rtype: A list of issue numbers that are open.
'''
with open(join(path, OPEN), 'r', encoding='utf-8') as iopen:
issues = iopen.read().split(OSEPARATOR)
return issues
def filter_issues(issue, filters, open_issues, milestones, itypes):
'''Indicate whether or not an issue should be displayed (True) or not
(False).
:param issue: The issue tuple to filter.
(issue_number, title, issue_type, milestone)
:param filters: A list of filters, [str]. e.g., 'close', '0.1', 'task'.
:param open_issues: A list of the issue numbers that are open. [str]
:param milestones: A list of milestones, [str], that the issue must be
associated with. If empty, the issue milestone is not checked.
:param itypes: A list of issue types, [str], used to filter the issue. If
the list is empty, the issue type is not checked.
:rtype: True if the issue passes all filters and can be displayed. False
otherwise.
'''
if 'open' in filters and issue[0] not in open_issues:
return False
if 'close' in filters and issue[0] in open_issues:
return False
if len(milestones) > 0 and issue[3] not in milestones:
return False
if len(itypes) > 0 and ITYPES[issue[2] - 1].lower() not in itypes:
return False
return True
def get_issue(path, issue_number):
'''Return a tuple (issue_number, title, issue_type, milestone).
:param path: The path to the .gitli directory.
:param issue_number: The number of the issue to retrieve.
:rtype: A tuple representing the issue or None if not found.
'''
with open(join(path, ISSUES), 'r', encoding='utf-8') as issues_file:
lines = issues_file.readlines()
size = len(lines)
index = 0
issue = None
while index < size:
issue = (
lines[index].strip(),
lines[index + 1].strip(),
int(lines[index + 2].strip()),
lines[index + 3].strip())
if issue[0] == issue_number:
break
else:
issue = None
index += 4
return issue
def get_issues(path, filters, open_issues, milestones, itypes):
'''Returns a list of issues that match the filters.
[(issue_number, title, issue_type, milestone)]
:param path: The path to the .gitli directory.
:param filters: A list of filters, [str]. e.g., 'close', '0.1', 'task'.
:param open_issues: A list of the issue numbers that are open. [str]
:param milestones: A list of milestones, [str], that the issue must be
associated with. If empty, the issue milestone is not checked.
:param itypes: A list of issue types, [str], used to filter the issue. If
the list is empty, the issue type is not checked.
:rtype: A list of issue tuples matching the filters.
'''
with open(join(path, ISSUES), 'r', encoding='utf-8') as issues_file:
lines = issues_file.readlines()
issues = []
size = len(lines)
index = 0
while index < size:
issue = (
lines[index].strip(),
lines[index + 1].strip(),
int(lines[index + 2].strip()),
lines[index + 3].strip())
if filter_issues(issue, filters, open_issues, milestones, itypes):
issues.append(issue)
index += 4
return issues
def print_issues(issues, open_issues, bcolor):
'''Prints the issues on stdout.
[(issue_number, title, issue_type, milestone)]
:param issues: The list of tuples representing the issues to print.
:param open_issues: The list of the issue numbers that are open.
:param bcolor: An instance of the BColors class used to colorize the
output.
'''
for (number, title, type_id, milestone) in issues:
if number in open_issues:
open_text = 'open'
color = bcolor.YELLOW
else:
open_text = 'closed'
color = bcolor.GREEN
milestone_text = '[' + milestone + ']'
type_text = '[' + ITYPES[type_id - 1] + ']'
print('{5}#{0:<4}{9} {6}{1}{9} {7}{2:<6} {3:<7}{9} - {8}{4}{9}'
.format(number, align(title, 48), type_text, milestone_text, open_text,
bcolor.CYAN, bcolor.WHITE, bcolor.BLUE, color, bcolor.ENDC))
def init(path):
'''Initialize the .gitli directory by creating the gitli files.
:param path: The path to the .gitli directory.
'''
if not exists(path):
mkdir(path)
new_path = join(path, ISSUES)
if not exists(new_path):
open(new_path, 'w', encoding='utf-8').close()
new_path = join(path, OPEN)
if not exists(new_path):
open(new_path, 'w', encoding='utf-8').close()
new_path = join(path, COMMENTS)
if not exists(new_path):
open(new_path, 'w', encoding='utf-8').close()
new_path = join(path, LAST)
if not exists(new_path):
with open(new_path, 'w', encoding='utf-8') as last:
last.write('0')
new_path = join(path, CURRENT)
if not exists(new_path):
with open(new_path, 'w', encoding='utf-8') as current:
current.write('0.1')
def new_issue(path, title, verbose=False):
'''Creates a new issue: add the issue to the issues file, add the issue
number to the issues-open file, and increment the last issue number in
issues-last.
:param path: The path to the .gitli directory.
:param title: The title of the issue.
:param verbose: If True, ask the user for the issue type and milestone.
'''
with open(join(path, LAST), 'r', encoding='utf-8') as last:
issue_number = int(last.read().strip()) + 1
ttype = ask_type(verbose)
milestone = ask_milestone(path, verbose)
with open(join(path, ISSUES), 'a', encoding='utf-8') as issues:
issues.write('{0}\n{1}\n{2}\n{3}\n'.format(issue_number, title,
ttype, milestone))
add_open(path, issue_number)
with open(join(path, LAST), 'w', encoding='utf-8') as last:
last.write('{0}'.format(issue_number))
def close_issue(path, issue_number):
'''Closes the issue by removing its number from the issues-open file.
:param path: The path to the .gitli directory.
:param issue_number: The number of the issue to close.
'''
remove_open(path, issue_number)
def list_issues(path, filters=None, bcolor=BColors()):
'''Prints a list of issues matching the provided filters.
:param path: The path to the .gitli directory.
:param filters: A list of filters such as ['open', '0.1', 'task']
:param bcolor: An instance of the BColors class to colorize the output.
'''
if filters is None or len(filters) == 0:
filters = [get_default_list_filter()]
else:
filters = | |
from django.shortcuts import render
from . import models, forms
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login, logout
from django.http import HttpResponse, HttpResponseRedirect
from django.contrib.auth.decorators import login_required
from django.contrib.admin.views.decorators import staff_member_required
from django.contrib import messages
from django.http import JsonResponse
from django.core.mail import EmailMessage
from django.utils.safestring import mark_safe
from random import randint
import json
import datetime
import os
import shutil
# Markdown/HTML libraries
import markdown2
import html2markdown
# Global variables
team_limit = 4
# Function to create random ID for users and teams
def random_with_N_digits(n):
# Getting all IDs used
ids = list(models.ID.objects.all())
# Looping until unique ID is generated
while True:
# Generating ID
range_start = 10**(n-1)
range_end = (10**n)-1
id = randint(range_start, range_end)
# Breaking loop in ID is not used before
if not id in ids:
break
# Saving ID to database
id_model = models.ID(generated_id=id)
id_model.save()
# Returning ID
return id
def index(request):
return render(request, "hackathon/index.html")
def register(request):
# Check if form was submitted
if request.method == "POST":
# Shortcut variable
data = request.POST
# Grabbing all form data
first_name = data.get("first_name")
last_name = data.get("last_name")
birthday = data.get("birthday")
title = data.get("title")
gender = data.get("gender")
email = data.get("email")
phone_number = data.get("phone_number")
areas_of_expertise = data.get("areas_of_expertise")
past_accomplishments = data.get("past_accomplishments")
github_link = data.get("github_link")
linkedin_link = data.get("linkedin_link")
personal_website_link = data.get("personal_website_link")
profile_picture = data.get("profile_picture")
username = data.get("username")
password = data.get("password")
communication = data.get("communication")
public_speaking = data.get("public-speaking")
teamwork = data.get("teamwork")
leadership = data.get("leadership")
# Checking if username is taken
if not User.objects.filter(username=username).exists():
# Checking if email is taken
if not User.objects.filter(email=email).exists():
# Modifying birthday to correct date format
birthday = datetime.datetime.strptime(birthday, '%m/%d/%Y').strftime('%Y-%m-%d')
# Creating and saving default User
user = User(first_name=first_name, last_name=last_name, email=email, username=username, password=password)
user.set_password(password)
user.save()
# Creating and saving user profile - linked to User
profile = models.UserProfile(user=user, id=random_with_N_digits(8), birthday=birthday, title=title, gender=gender, phone_number=phone_number, areas_of_expertise=areas_of_expertise, past_accomplishments=past_accomplishments, github_link=github_link, linkedin_link=linkedin_link, personal_website_link=personal_website_link, profile_picture=profile_picture, communication=communication, public_speaking=public_speaking, teamwork=teamwork, leadership=leadership)
# Getting current hackathon from subdomain
hackathon_name = request.get_host().split(".")[0]
hackathon = models.Hackathon.objects.get(name=hackathon_name)
# Creating hackathon identification
hid = models.HackathonIdentification(hackathon_name=hackathon.name, model_id=profile.id)
hid.save()
# Grabbing profile picture
if 'profile_picture' in request.FILES: # checking if they provided picture
profile.profile_picture = request.FILES['profile_picture']
else:
filename = "default-" + username + ".png"
media_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + "/media/"
shutil.copyfile(media_dir + "default.png", media_dir + filename)
profile.profile_picture = filename
profile.save()
login(request, user)
messages.success(request, "Account successfully created.")
return HttpResponseRedirect("/")
else:
messages.success(request, "Your email is already taken. Please enter a different one.")
else:
messages.success(request, "Your username is already taken. Please enter a different one.")
form = forms.UserForm()
return render(request, "hackathon/register.html", context={"form": form})
def user_login(request):
if request.method == "POST":
username = request.POST.get("username")
password = <PASSWORD>("password")
user = authenticate(username=username, password=password)
if user:
if user.is_active:
login(request, user)
# Register user into hackathon if they aren't already
# Getting current hackathon from subdomain
hackathon_name = request.get_host().split(".")[0]
# Getting HID
hid = models.HackathonIdentification.objects.filter(hackathon_name=hackathon_name, model_id=user.profile.id)
if not hid:
hid = models.HackathonIdentification(hackathon_name=hackathon_name, model_id=user.profile.id)
hid.save()
return HttpResponseRedirect("/")
else:
messages.success(request, "Your account has be deactivated. Please re-register.")
else:
messages.success(request, "Invalid credentials. Please try again.")
return render(request, "hackathon/login.html")
@login_required(login_url="/login/")
def user_logout(request):
logout(request)
messages.success(request, "Successfully logged out.")
return HttpResponseRedirect("/")
@login_required(login_url="/login/")
def update(request):
# Check if form was submitted
if request.method == "POST":
# Shortcut variable
data = request.POST
# Grabbing all form data
first_name = data.get("first_name")
last_name = data.get("last_name")
birthday = data.get("birthday")
title = data.get("title")
gender = data.get("gender")
email = data.get("email")
phone_number = data.get("phone_number")
# notification_type = data.get("notification_type")
areas_of_expertise = data.get("areas_of_expertise")
past_accomplishments = data.get("past_accomplishments")
github_link = data.get("github_link")
linkedin_link = data.get("linkedin_link")
personal_website_link = data.get("personal_website_link")
profile_picture = data.get("profile_picture")
username = data.get("username")
communication = data.get("communication")
public_speaking = data.get("public-speaking")
teamwork = data.get("teamwork")
leadership = data.get("leadership")
# Modifying birthday to correct date format
birthday = datetime.datetime.strptime(birthday, '%m/%d/%Y').strftime('%Y-%m-%d')
# Updating and saving user profile - linked to User
profile = models.UserProfile.objects.get(user=request.user)
profile.user.first_name = first_name
profile.user.last_name = last_name
profile.user.email = email
profile.user.username = username
profile.birthday = birthday
profile.school = school
profile.gender = gender
profile.phone_number = phone_number
profile.areas_of_expertise = areas_of_expertise
profile.past_accomplishments = past_accomplishments
profile.github_link = github_link
profile.linkedin_link = linkedin_link
profile.personal_website_link = personal_website_link
profile.communication = communication
profile.public_speaking = public_speaking
profile.teamwork = teamwork
profile.leadership = leadership
# Grabbing profile picture
if 'profile_picture' in request.FILES: # checking if they provided picture
profile.profile_picture = request.FILES['profile_picture']
else:
profile.profile_picture = "default.png"
profile.save()
messages.success(request, "Account successfully updated.")
return HttpResponseRedirect("/")
# Setting form values to automatically fill
form = forms.UserForm(initial={
"first_name": request.user.first_name,
"last_name": request.user.last_name,
"email": request.user.email,
"username": request.user.username,
"birthday": datetime.datetime.strptime(str(request.user.profile.birthday), '%Y-%m-%d').strftime('%m/%d/%Y'),
"school": request.user.profile.school,
"gender": request.user.profile.gender,
"phone_number": request.user.profile.phone_number,
"areas_of_expertise": request.user.profile.areas_of_expertise,
"past_accomplishments": request.user.profile.past_accomplishments,
"github_link": request.user.profile.github_link,
"linkedin_link": request.user.profile.linkedin_link,
"personal_website_link": request.user.profile.personal_website_link,
"profile_picture": request.user.profile.profile_picture,
})
return render(request, "hackathon/update.html", context={
"form": form,
# Adding in soft skill values to manually add to form in HTML file
"communication": request.user.profile.communication,
"public_speaking": request.user.profile.public_speaking,
"teamwork": request.user.profile.teamwork,
"leadership": request.user.profile.leadership,
})
@login_required(login_url="/login/")
def competitors(request):
# Getting current hackathon from subdomain
hackathon_name = request.get_host().split(".")[0]
# Get all models within hackathon
hids = models.HackathonIdentification.objects.filter(hackathon_name=hackathon_name)
hackathon_competitors = []
for hid in hids:
if not hid.model_id.startswith("t") and not hid.model_id.startswith("n"):
competitor = models.UserProfile.objects.get(id=hid.model_id)
if competitor != request.user.profile:
if not competitor.user.is_staff:
if not competitor.user.is_superuser:
hackathon_competitors.append(competitor)
# Loads and displays all competitors within hackathon circle
competitors = {}
for competitor in hackathon_competitors:
parameters = {}
if competitor.team_id:
team = models.Team.objects.get(id=competitor.team_id)
parameters["team"] = team
else:
parameters["team"] = None
if request.user.profile.team_id:
if request.user.profile.team_id == competitor.team_id:
parameters["invite"] = False
else:
parameters["invite"] = True
else:
parameters["invite"] = False
team_members = models.UserProfile.objects.filter(team_id=request.user.profile.team_id)
if len(team_members) >= team_limit:
parameters["max"] = True
else:
parameters["max"] = False
competitors[competitor] = parameters
return render(request, "hackathon/competitors.html", context={"competitors": competitors})
@login_required(login_url="/login/")
def create_team(request):
if request.method == "POST":
data = request.POST
# Getting data from form
name = data.get("name")
description = data.get("description")
# Creating team with unique and branded ID
team = models.Team(id="team-" + str(random_with_N_digits(8)), name=name, description=description, leader=request.user.profile.id)
team.save()
# Getting currently authenticated user and setting their team_id to team created
user = models.UserProfile.objects.get(user=request.user)
user.team_id = team.id
user.save()
# Creating team identification
tid = models.TeamIdentification(team_id=team.id, user_id=user.id)
tid.save()
# Getting current hackathon from subdomain
hackathon_name = request.get_host().split(".")[0]
hackathon = models.Hackathon.objects.get(name=hackathon_name)
# Creating hackathon identification
hid = models.HackathonIdentification(hackathon_name=hackathon.name, model_id=team.id)
hid.save()
messages.success(request, "Team successfully created.")
return HttpResponseRedirect("/")
return render(request, "hackathon/create_team.html")
@login_required(login_url="/login/")
def teams(request):
# Getting current hackathon from subdomain
hackathon_name = request.get_host().split(".")[0]
# Get all models within hackathon
hids = models.HackathonIdentification.objects.filter(hackathon_name=hackathon_name)
hackathon_teams = []
for hid in hids:
if hid.model_id.startswith("t"):
team = models.Team.objects.get(id=hid.model_id)
hackathon_teams.append(team)
# Loading and displaying all teams
teams = {}
for team in hackathon_teams:
id = team.id
teammates = []
for user in models.UserProfile.objects.all():
if user.team_id == id:
teammates.append(user)
teams[team] = teammates
return render(request, "hackathon/teams.html", context={"teams": teams})
@login_required(login_url="/login/")
def notifications(request):
# Getting current hackathon from subdomain
hackathon_name = request.get_host().split(".")[0]
# Get all models within hackathon
hids = models.HackathonIdentification.objects.filter(hackathon_name=hackathon_name)
hackathon_notifications = []
for hid in hids:
if hid.model_id.startswith("n"):
notification = models.Notification.objects.get(id=hid.model_id)
hackathon_notifications.append(notification)
notifications = {}
# Grabbing teams for each action notification
for notification in hackathon_notifications:
if notification.target_id == str(request.user.profile.id):
if notification.type == "action" and notification.source_id:
team = models.Team.objects.get(id=notification.source_id)
notifications[notification] = team
else:
notifications[notification] = False
return render(request, "hackathon/notifications.html", context={"notifications": notifications})
def contact_support(request):
if request.method == "POST":
# Grabbing information from contact us form
name = request.POST.get("name")
email = request.POST.get("email")
message = request.POST.get("message")
# Sending the email
EmailMessage("HackCollab - " + request.get_host().split(".")[0] + " Contact Us", "From: " + name + "\n\n" + message + "\n\nEmail: " + email, to=["<EMAIL>"]).send()
# Redirecting to success page
return HttpResponseRedirect("/success/")
return render(request, "hackathon/contact_support.html")
def success(request):
return render(request, "hackathon/success.html")
@login_required(login_url="/login/")
def view_profile(request, user_id):
profile = models.UserProfile.objects.get(id=user_id)
context = {"profile": profile}
if profile.team_id:
team = models.Team.objects.get(id=profile.team_id)
context["team"] = team
return render(request, "hackathon/profile.html", context=context)
@login_required(login_url="/login/")
def view_team(request, team_id):
team = models.Team.objects.get(id=team_id)
# Loading all members
teammates = []
for tid in models.TeamIdentification.objects.filter(team_id=team_id):
profile = models.UserProfile.objects.get(id=tid.user_id)
teammates.append(profile)
return render(request, "hackathon/view_team.html", context={
"team": team,
"teammates": teammates,
"leader": models.UserProfile.objects.get(id=team.leader),
})
@login_required(login_url="/login/")
def team(request, team_id):
team = models.Team.objects.get(id=team_id)
if request.user.profile.team_id == team.id:
team = models.Team.objects.get(id=team_id)
# Getting current hackathon from subdomain
hackathon_name = request.get_host().split(".")[0]
# Get all models within hackathon
hids = models.HackathonIdentification.objects.filter(hackathon_name=hackathon_name)
hackathon_notifications = []
for hid in hids:
if hid.model_id.startswith("n"):
notification = models.Notification.objects.get(id=hid.model_id)
hackathon_notifications.append(notification)
# Loading all notifications for the team
notifications = {}
for notification in hackathon_notifications:
if notification.target_id == team_id:
notifications[notification] = models.UserProfile.objects.get(id=notification.source_id)
# Loading all members
teammates = []
for user in models.UserProfile.objects.all():
if user.team_id == team_id:
teammates.append(user)
return render(request, "hackathon/team.html", context={
"team": team,
"leader": models.UserProfile.objects.get(id=team.leader),
"teammates": teammates,
"notifications": notifications,
"submitted": models.TeamSubmission.objects.filter(hackathon_name=hackathon_name, team_id=team_id).exists(),
})
else:
# Return error if user is not | |
f.close()
commandSuccess=True
elif a[0] == 'showall':
for i in range(len(texts)):
#print texts(i)
_ba.chatmessage(str(i) + '. ' + texts[i])
commandSuccess=True
elif a[0] == 'del' and len(a)>1:
try:
if len(texts) > 1:
texts.pop(int(a[1]))
#write to file
with open(python_path + '/BsTextOnMap.py') as (file):
s = [ row for row in file ]
s[0] = 'texts = ' + str(texts) + '\n'
f = open(python_path + '/BsTextOnMap.py', 'w')
for i in s:
f.write(i)
f.close()
commandSuccess=True
else:
sendError(f"At least one text should be present",clientID)
except:
pass
else:
ba.screenmessage(f"Usage: /text showall or /text add [text] or /text del [textnumber]", clients=[clientID], transient=True)
#ADMIN
elif m == '/admin':
if True: #try:
clID = int(a[0])
updated = roles.admins
ros = _ba.get_game_roster()
for i in ros:
if (i is not None) and (i != {}) and (i['client_id'] == clID):
name = i['players'][0]['name']
newID = i['account_id']
if a[1] == 'add':
if newID not in updated:
roles.admins.append(newID)
commandSuccess=True
else: sendError(f"{str(name)}, is already an admin !",clientID)
elif a[1] == 'remove':
if newID in updated:
roles.admins.remove(newID)
commandSuccess=True
else: sendError(f"{str(name)}, is already not an admin !",clientID)
updated = roles.admins
if (len(a) > 2) and (uniqueID in roles.owners) and commandSuccess:
if str(a[2]).startswith('perm'):
#Add them to members.json (log)
m = open(membersFile, 'r')
d = json.loads(m)
if (newID not in d['admins']): d['admins'][newID] = []
if (name not in d['admins'][newID]): d['admins'][newID].append(name)
m2 = open(membersFile, 'w')
m2.write(json.dumps(d, indent=4))
m2.close()
#Add them to roles.py
with open(python_path + '/roles.py') as (file):
s = [ row for row in file ]
s[9] = 'admins = ' + str(updated) + '\n'
f = open(python_path + '/roles.py', 'w')
for i in s:
f.write(i)
f.close()
'''except:
ba.screenmessage(f"Using: /admin [ClientID] add/remove perm/None", clients=[clientID], transient=True)'''
#BAN
elif m == '/ban':
try:
clID = int(a[0])
updated = roles.banList
ros = _ba.get_game_roster()
for i in ros:
if (i is not None) and (i != {}) and (i['client_id'] == clID):
name = i['players'][0]['name']
new = i['account_id']
if new not in roles.banList:
if len(a) > 1: roles.banList[new] = [i['display_string'], str(a[1])] #Add Name If Provided
else: roles.banList[new] = [i['display_string']]
updated = roles.banList
commandSuccess=True
_ba.chatmessage(f"{str(name)}, has been BANNED !")
_ba.disconnect_client(clID)
else: sendError(f"{str(name)}, is already BANNED !",clientID)
if not commandSuccess: return
m = open(membersFile, 'r')
d = json.loads(m)
if (newID not in d['banList']): d['banList'][newID] = []
if (name not in d['banList'][newID]): d['banList'][newID].append(name)
m2 = open(membersFile, 'w')
m2.write(json.dumps(d, indent=4))
m2.close()
with open(python_path + '/roles.py') as (file):
s = [ row for row in file ]
s[2] = 'banList = ' + str(updated) + '\n'
f = open(python_path + '/roles.py', 'w')
for i in s:
f.write(i)
f.close()
except:
ba.screenmessage(f"Using: /ban ClientID (optional-NickNameForIdentification)", clients=[clientID], transient=True)
#SPECIAL
elif m == '/special':
try:
clID = int(a[0])
updated = roles.special
ros = _ba.get_game_roster()
cmds = a[2:]
for i in ros:
if (i is not None) and (i != {}) and (i['client_id'] == clID):
name = i['players'][0]['name']
newID = i['account_id']
success = False
if a[1] == 'add':
if newID not in updated:
roles.special[newID] = cmds
commandSuccess=True
else:
for cmd in cmds:
if (cmd.startswith('/')) and (cmd not in roles.special[newID]):
roles.special[newID].append(cmd)
success = True
else: sendError(f"{str(name)} already has perms to '{cmd}' !\n (Note: cmd should start with '/')",clientID)
commandSuccess=True
if success: _ba.chatmessage(f"Now {str(name)} can use {str(cmds)}...")
elif a[1] == 'remove':
if (len(a) > 2) and (newID in updated):
for cmd in cmds:
if (cmd.startswith('/')) and (cmd not in roles.special[newID]):
roles.special[newID].remove(cmd)
success = True
else: sendError(f"{str(name)} has no perms to '{cmd}' for you to remove again !\n (Note: cmd should start with '/')",clientID)
commandSuccess=True
if success: _ba.chatmessage(f"Now {str(name)} can't use {str(cmds)}...")
if (len(a) < 3) and (newID in updated):
roles.special.pop(newID)
commandSuccess=True
else: sendError(f"{str(name)} already don't have special perms !",clientID)
updated = roles.special
if (len(a) > 2) and (uniqueID in roles.owners):
if commandSuccess:
#Add them to members.json (log)
m = open(membersFile, 'r')
d = json.loads(m)
if (newID not in d['special']): d['special'][newID] = []
if (name not in d['special'][newID]): d['special'][newID].append(name)
m2 = open(membersFile, 'w')
m2.write(json.dumps(d, indent=4))
m2.close()
#Add them to roles.py
with open(python_path + '/roles.py') as (file):
s = [ row for row in file ]
s[10] = 'special = ' + str(updated) + '\n'
f = open(python_path + '/roles.py', 'w')
for i in s:
f.write(i)
f.close()
except:
ba.screenmessage(f"Using: /special [ClientID] add/remove Cmds", clients=[clientID], transient=True)
#PARTYNAME
elif m == '/partyname':
if a == []:
ba.screenmessage(f"Usage: /partyname Name of party", clients=[clientID], transient=True)
else:
name = ''
for word in a:
name += word + ' '
try:
_ba.set_public_party_name(name)
ba.screenmessage(f"Party name changed to '{name}'.")
mysettings.server_name = name
commandSuccess=True
except:
sendError("failed to change party's name")
#PARTY
elif m == '/party':
if a == []:
ba.screenmessage(f"Usage: /party 0(pvt) or 1(pub)", clients=[clientID], transient=True)
elif (a[0] == '0') or (a[0].startswith('Pri')) or (a[0] == 'Pvt'):
try:
_ba.set_public_party_enabled(False)
_ba.chatmessage('Party is Private...')
commandSuccess=True
except:
sendError('failed to change',clientID)
elif a[0] == '1' or (a[0].startswith('Pub')):
try:
_ba.set_public_party_enabled(True)
_ba.chatmessage('Party is Public...')
commandSuccess=True
except:
sendError('failed to change',clientID)
else:
ba.screenmessage(f"Usage: /party 0(pvt) or 1(pub)", clients=[clientID], transient=True)
#SET SCREEN TEXT COLOR
elif m in ('/setscreentextcolor', '/settextcolor', '/setscreencolor'):
try:
if len(a) > 1: screenTextColor = (int(a[0]), int(a[1]), int(a[2]))
if (len(a) == 1) and (isinstance(a[0], int)): screenTextColor = tuple(a[0])
commandSuccess = True
except:
ba.screenmessage('Usage: /setscreentextcolor R G B', transient=True, clients=[clientID])
#WL
elif m == '/wl': #whiteListMode
try:
wlm = settings['whiteListMode']
if len(a) < 2:
if a[0].lower() in ('no', 'off', 'disable'):
if wlm :
wlm = False
ba.screenmessage("Server WhiteList Mode disabled for 30 seconds\n if want to disable permanently, use\n '/settings whiteListMode disable'", color=(1,0,0), clients=[clientID], transient=True)
ba.Timer(30, ba.Call(enable_back), timetype=ba.TimeType.REAL)
else: ba.screenmessage("Wait what, why u wanna disable a thing\n which is already disabled..?", color=(1,0,0), clients=[clientID], transient=True)
if a[0].lower() in ('yes', 'on', 'enable'):
ba.screenmessage("Use '/settings whiteListMode enable' instead of this cmd!", color=(1,0,0), clients=[clientID], transient=True)
else:
clID = int(a[1])
#refresh/update jsons
m = open(membersFile, 'r')
org_mem = json.loads(m)
org_mem['serverWhiteList'] = roles.serverWhiteList
m2 = open(membersFile, 'w')
m2.write(json.dumps(org_mem, indent=4))
m2.close()
updated = roles.serverWhiteList
ros = _ba.get_game_roster()
for i in ros:
if (i is not None) and (i != {}) and (i['client_id'] == clID):
name = i['players'][0]['display_string']
newID = i['account_id']
success = False
if a[1] == 'add':
if newID not in updated:
roles.serverWhiteList[newID] = name
commandSuccess=True
else: sendError(f"{str(name)}, is already in serverWhiteList!",clientID)
elif a[1] == 'remove':
if newID in updated:
roles.serverWhiteList.pop(newID)
commandSuccess=True
else: sendError(f"{str(name)} already not in serverWhiteList!",clientID)
updated = roles.serverWhiteList
if (len(a) > 2) and (uniqueID in roles.owners):
if commandSuccess:
#Add them to members.json (log)
m = open(membersFile, 'r')
d = json.loads(m)
d['serverWhiteList'] = updated
m2 = open(membersFile, 'w')
m2.write(json.dumps(d, indent=4))
m2.close()
#Add them to roles.py
with open(python_path + '/roles.py') as (file):
s = [ row for row in file ]
s[3] = 'serverWhiteList = ' + str(updated) + '\n'
f = open(python_path + '/roles.py', 'w')
for i in s:
f.write(i)
f.close()
def enable_back():
wlm = True
except:
ba.screenmessage(f"Using: /wl [ClientID] add/remove", clients=[clientID], transient=True)
#CHAT COOL DOWN
elif m == '/cd':
try:
if a[0].lower() in ('no', 'off', 'disable'):
if chatCoolDownTime:
chatCoolDownTime = False
#commandSuccess = True #This line maybe used by you in other commands
else: ba.screenmessage("Wait what, why u wanna disable a thing\n which is already disabled..?", color=(1,0,0), clients=[clientID], transient=True)
else:
try:
if int(a[0].lower()) in range(300):
chatCoolDownTime = int(a[0])
_ba.chatmessage("Successfully set chatCoolDown time to {} seconds :)".format(str(a[0])))
#commandSuccess = True #This line maybe used by you in other commands
else: ba.screenmessage("Oof... 300 seconds is maximum cooldown, Why this much?", color=(1,1,1), clients=[clientID], transient=True)
except:
ba.screenmessage("Give an Integer as arg... you can't trick me\n Usage: '/cd CD_Time_In_Integer'", color=(1,0,0), clients=[clientID], transient=True)
except:
ba.screenmessage("Usage:\n'/cd disable/off/no' [or] '/cd CD_Time_In_Integer' for enabling...", color=(1,0,0), clients=[clientID], transient=True)
#PAUSE
elif m == '/pause':
activity.globalsnode.paused = activity.globalsnode.paused == False
commandSuccess=True
#SETTINGS
elif m in ('/settings', '/set', '/setting'):
try:
success = False
enables = ('yes', 'on', 'enable')
disables = ('no', 'off', 'disable')
_set_ = a[0]
if _set_.lower() in ('powerups', 'p', 'pups'):
if len(a) <= 2:
sendError(f"Invalid key !, Try checking by '/help settings'",clientID)
else:
_set = str(a[1])
if _set in powerups:
if str(a[2]).lower() in enables:
if powerups[_set] != True:
powerups[_set] = True
commandSuccess=True
else: sendError(f"This Setting is already enabled !",clientID)
| |
<filename>src/DCGMM/dataset/TF2_Dataset.py
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import math
import gzip
import pickle
from scipy import ndimage
import numpy as np
from tensorflow import data
from DCGMM.parsers import Kwarg_Parser
from DCGMM.utils import log
import tensorflow_datasets as tfds
Dataset_Type = [
'SVHN' ,
'MNIST' ,
'CUB200' ,
'EMNIST' ,
'Fruits' ,
'CIFAR10' ,
'MADBase' ,
'NotMNIST' ,
'Devanagari' ,
'FashionMNIST',
'ISOLET' ,
'CelebA' ,
]
class TF2_Dataset(object):
def __init__(self, **kwargs):
self.parser = Kwarg_Parser(**kwargs)
self.dataset_name = self.parser.add_argument('--dataset_name' , type=str , default='mnist' , help='tfds download string')
self.dataset_dir = self.parser.add_argument('--dataset_dir' , type=str , default='./datasets' , help='set the default directory to search for dataset files or create them')
self.dataset_file = self.parser.add_argument('--dataset_file' , type=str , default='MNIST' , help='load a compressed pickle file. If not present, a download attempt is made. This may take a while for large datasets such as SVHN')
self.renormalize01 = self.parser.add_argument('--renormalize01' , type=str , default='no' , choices=['no', 'yes'] , help='renormalize the data in a range [-1, +1] instead the "normal" range of [0, +1]')
self.renormalizeC = self.parser.add_argument('--renormalizeC' , type=float , default=255. , help='renormalize the data by dividing all channels by x')
self.slice = self.parser.add_argument('--slice' , type=int , default=[-1, -1] , help='replace all images in dataset by a N x M-patch cropped from the center of each image. if non negative, the two value represent N and M, otherwise the full image is used')
self.squeeze = self.parser.add_argument('--squeeze' , type=int , default=[-1, -1] , help='squeeze all images in dataset to a N x M-patch')
self.decimate_class_train = self.parser.add_argument('--decimate_class_train' , type=int , default=[] , help='defines the classes (train dataset) whose number of examples should be reduced')
self.decimate_percentage_train = self.parser.add_argument('--decimate_percentage_train' , type=float, default=[] , help='define the reduction factor for train dataset (range [0.0, 1.0]), if only one reduction value is given, it is used for all defined (decimate_class_train) classes')
self.decimate_class_test = self.parser.add_argument('--decimate_class_test' , type=int , default=[] , help='defines the classes (test dataset) whose number of examples should be reduced')
self.decimate_percentage_test = self.parser.add_argument('--decimate_percentage_test' , type=float, default=[] , help='define the reduction factor for test dataset (range [0.0, 1.0]), if only one reduction value is given, it is used for all defined (decimate_class_test) classes')
self.noise_train = self.parser.add_argument('--noise_train' , type=float, default=0. , help='use noise factor to add noise to the complete training data (min, max) * factor')
self.noise_test = self.parser.add_argument('--noise_test' , type=float, default=0. , help='use noise factor to add noise to the complete test data (min, max) * factor')
self.data_type = self.parser.add_argument('--data_type ' , type=int , default=32, choices=[32,64] , help='training batch size')
self.batch_size = self.parser.add_argument('--batch_size' , type=int , default=100 , help='training batch size')
self.test_batch_size = self.parser.add_argument('--test_batch_size' , type=int , default=self.batch_size , help='test batch size')
self.rotation_per_iteration = self.parser.add_argument('--rotation_per_iteration' , type=float, default=0 , help='rotate the input data (images) of 1 degree per training iteration')
self.brightness_change_per_iteration = self.parser.add_argument('--brightness_change_per_iteration', type=float, default=0 , help='increase or decrease the brightness of the input data (images) by 0.001 each iteration (max decrease/increase= -0.5/+0.5, data clipping [-1, 1])')
_5_work_days = ( # tuple(amplitude, shift, noise) # @7000 iteration
(.4, .6, 0.026), # class 0
(.3, .4, 0.026), # class 1
(.3, .3, 0.026),) # class 2
self.distribution_change_per_task = self.parser.add_argument('--distribution_change_per_task', type=eval, default=0, nargs='0 is off, else see 5 day example')
# self.parser.add_argument('--TX_distribution' , type=float, default=[1., 1., 1., 0, 0, 0, 0, 0, 0, 0], nargs='*')
self.dataset_file += '' if self.dataset_file.endswith('.pkl.gz') else '.pkl.gz'
self.dataset_path = self.dataset_dir
file_path = os.path.join(self.dataset_path, self.dataset_file)
"""
with gzip.open(file_path) as f:
data = pickle.load(f)
self.properties = data['properties']
self.raw_train_samples = data['data_train']
self.raw_train_labels = data['labels_train']
self.raw_test_samples = data['data_test']
self.raw_test_labels = data['labels_test']
print (self.properties)
"""
print ("Loading ", self.dataset_name) ;
(xtr,ytr),info_tr = tfds.load(self.dataset_name, batch_size=-1, split="train",as_supervised=True, with_info=True)
(xtst,ytst),info_test= tfds.load(self.dataset_name, batch_size=-1, split="test",as_supervised=True, with_info=True)
h,w,c = info_test.features['image'].shape
num_classes = info_test.features['label'].num_classes
print ("CCCCC=",info_test.features) ;
if self.data_type==32:
dt = np.float32
else:
dt = np.float64
ytr_np = ytr.numpy().astype("int64") ;
ytst_np = ytst.numpy().astype("int64") ;
onehot_tr_raw = np.zeros([ytr_np.shape[0],num_classes],dtype=dt)
onehot_tst_raw = np.zeros([ytst_np.shape[0],num_classes],dtype=dt)
onehot_tr_raw[range(0,ytr_np.shape[0]),ytr_np] = 1 ;
onehot_tst_raw[range(0,ytst_np.shape[0]),ytst_np] = 1 ;
properties = {'num_of_channels':c, 'num_classes':10, 'dimensions':[h,w]}
self.properties = properties ;
self.raw_train_samples = xtr.numpy().astype(dt).reshape(-1,h,w,c) ;
self.raw_train_labels = onehot_tr_raw
self.raw_test_samples = xtst.numpy().astype(dt).reshape(-1,h,w,c) ;
self.raw_test_labels = onehot_tst_raw ;
print('raw train sample values are between [{}, {}]'.format(np.min(self.raw_train_samples), np.max(self.raw_train_samples)))
print('raw test sample values are between [{}, {}]'.format(np.min(self.raw_test_samples), np.max(self.raw_test_samples)))
# FIXME: re-normalize the data
if self.renormalize01 == 'yes':
lower, upper = 0, +1
self.raw_train_samples = (upper - lower) * np.divide(
np.subtract(self.raw_train_samples, np.min(self.raw_train_samples)),
np.subtract(np.max(self.raw_train_samples), np.min(self.raw_train_samples))
) + lower
self.raw_test_samples = (upper - lower) * np.divide(
np.subtract(self.raw_test_samples, np.min(self.raw_test_samples)),
np.subtract(np.max(self.raw_test_samples), np.min(self.raw_test_samples))
) + lower
self.raw_train_samples /= self.renormalizeC ;
self.raw_test_samples /= self.renormalizeC ;
print('train sample values are between [{}, {}]'.format(np.min(self.raw_train_samples), np.max(self.raw_train_samples)))
print('test sample values are between [{}, {}]'.format(np.min(self.raw_test_samples), np.max(self.raw_test_samples)))
self.properties['train_shape'] = self.raw_train_samples.shape
self.properties['test_shape'] = self.raw_test_samples.shape
self.scalar_labels_train = self.raw_train_labels.argmax(axis=1)
self.scalar_labels_test = self.raw_test_labels.argmax(axis=1)
self.indices_train = np.arange(self.raw_train_samples.shape[0])
self.indices_test = np.arange(self.raw_test_samples.shape[0])
def get_iterator(self, type='training', enum=False, **kwargs):
batch_size = kwargs.get('batch_size', 100)
classes = kwargs.get('classes', range(10))
epochs = kwargs.get('epochs', 1)
ds_obj_train, ds_obj_test, _, _ = self.get_dataset(classes=classes, batch_size=batch_size, epochs=epochs)
if type == 'training': return enumerate(iter(ds_obj_train)) if enum else iter(ds_obj_train)
if type == 'testing' : return enumerate(iter(ds_obj_test)) if enum else iter(ds_obj_test) # always 1 epochs
raise Exception('invalid type (default=training or testing)')
def get_class_indices(self, classes):
int_class = int(classes)
mask_train = (self.scalar_labels_train == int_class)
mask_test = (self.scalar_labels_test == int_class)
return self.indices_train[mask_train], self.indices_test[mask_test]
def get_dataset(self, classes, **kwargs):
''' Returns TF dataset objects for train and test sets '''
epochs = kwargs.get('epochs', None) # infinity
batch_size = kwargs.get('batch_size', None)
test_batch_size = kwargs.get('test_batch_size', None)
indices_set_train = []
indices_set_test = []
for class_ in classes:
indices_train, indices_test = self.get_class_indices(class_)
indices_set_train += [indices_train]
indices_set_test += [indices_test]
all_indices_train = np.concatenate(indices_set_train, axis=0)
all_indices_test = np.concatenate(indices_set_test, axis=0)
np.random.shuffle(all_indices_train)
np.random.shuffle(all_indices_test)
data_train = self.raw_train_samples[all_indices_train]
data_test = self.raw_test_samples[all_indices_test]
labels_train = self.raw_train_labels[all_indices_train]
labels_test = self.raw_test_labels[all_indices_test]
h, w = self.properties['dimensions']
c = self.properties['num_of_channels']
data_train_reshaped = data_train.reshape(-1, h, w, c)
data_test_reshaped = data_test.reshape(-1, h, w, c)
# Construct a Dataset object (TF2) for drawing batches/shuffling here
ds_obj_train = data.Dataset.from_tensor_slices((data_train_reshaped, labels_train))
ds_obj_train = ds_obj_train.batch(batch_size if batch_size else self.batch_size , drop_remainder=True)
ds_obj_train = ds_obj_train.repeat(epochs) # infinity if None (default)
ds_obj_test = data.Dataset.from_tensor_slices((data_test_reshaped, labels_test))
ds_obj_test = ds_obj_test.batch(test_batch_size if test_batch_size else self.test_batch_size, drop_remainder=True)
return ds_obj_train, ds_obj_test, data_train.shape[0], data_test.shape[0]
def rotate(self, xs, iteration, task, **kwargs):
''' rotate all images by a given angle (cmd param or kwarg) and iteration
@param xs : data
@param iteration: iteration for task rotation (iteration % 360)
@param task : current training task
'''
task_rotation = kwargs.get(f'T{task + 1}_rotation')
if not (self.rotation_per_iteration or task_rotation): return xs # no rotation
if self.rotation_per_iteration and self.rotation_per_iteratio != 0:
task_rotation = self.rotation_per_iteration
rotation_angle = task_rotation * (iteration) % 360
if task_rotation:
rotation_angle = task_rotation
if rotation_angle == 0.0: return xs
return ndimage.rotate(xs, rotation_angle, reshape=False, axes=(2, 1))
def brighten_darken(self, xs, iteration, task, **kwargs):
''' increase/decrease the brightness of images by a given value [-1., 1.] (clipping of images to [0., 1.]) '''
task_brighten = kwargs.get(f'T{task + 1}_brightness')
if not (self.brightness_change_per_iteration or task_brighten): return xs # no brighten
if self.brightness_change_per_iteration != 0.:
task_brightness = self.brightness_change_function()
if task_brighten:
task_brightness = task_brighten
if task_brightness == 0: return xs
return np.clip(xs + task_brightness, 0.0, 1.0)
def brightness_change_function(self, toggle=+1.0, task_brightness=0., lower_limit=-0.5, upper_limit=0.5):
''' like scanner running light '''
me = self.brightness_change_function.__func__
new_value = task_brightness + (self.brightness_change_per_iteration * toggle)
if new_value >= upper_limit or new_value <= lower_limit: me.__defaults__ = (me.__defaults__[0] * -1,) + me.__defaults__[1:] # update toggle parameter
task_brightness += self.brightness_change_per_iteration * toggle
me.__defaults__ = (me.__defaults__[0], task_brightness) + me.__defaults__[2:] # update task_brightness parameter
return task_brightness
def distribution_change_function(self, task=0, tasks_per_day=10):
''' sinusoidal change of the class distribution (similar to network data distribution)
@param tasks_per_day: number of tasks represent one day
@return : normed reduction parameter as list
'''
me = self.distribution_change_function.__func__
omega = 1 / (tasks_per_day / (np.pi * 2))
distribution = | |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import ast
import json
import os
import re
from botocore.vendored import requests
import logging
import secret_manager
import hmac
import hashlib
class LabelBot:
LABEL_PAGE_PARSE = 30 # Limit for total labels per page to parse
def __init__(self,
repo=os.environ.get("repo"),
github_user=None,
github_oauth_token=None,
bot_user=None,
bot_oauth_token=None,
prediction_url=None,
apply_secret=True):
"""
Initializes the Label Bot
:param repo: GitHub repository that is being referenced
:param github_user: GitHub username
:param github_oauth_token: GitHub authentication token (Personal access token)
:param apply_secret: GitHub secret credential (Secret credential that is unique to a GitHub developer)
"""
self.repo = repo
self.github_user = github_user
self.github_oauth_token = github_oauth_token
self.bot_user = bot_user
self.bot_oauth_token = bot_oauth_token
self.prediction_url = prediction_url
if apply_secret:
self._get_secret()
self.auth = (self.github_user, self.github_oauth_token)
self.bot_auth = (self.bot_user, self.bot_oauth_token)
self.all_labels = None
def _get_rate_limit(self):
"""
This method gets the remaining rate limit that is left from the GitHub API
:return Remaining API requests left that GitHub will allow
"""
res = requests.get('https://api.github.com/rate_limit',
auth=self.auth)
res.raise_for_status()
data = res.json()['rate']
return data['remaining']
def _get_secret(self):
"""
This method is to get secret value from Secrets Manager
"""
secret = json.loads(secret_manager.get_secret())
self.github_user = secret["github_user"]
self.github_oauth_token = secret["github_oauth_token"]
self.webhook_secret = secret["webhook_secret"]
self.bot_user = secret["bot_user"]
self.bot_oauth_token = secret["bot_oauth_token"]
self.prediction_url = secret["prediction_url"]
def _tokenize(self, string):
"""
This method is to extract labels from comments
:param string: String parsed from a GitHub comment
:return Set of Labels which have been extracted
"""
substring = string[string.find('[') + 1: string.rfind(']')]
labels = [' '.join(label.split()).lower() for label in substring.split(',')]
return labels
def _ascii_only(self, raw_string, sub_string):
"""
This method is to convert all non-alphanumeric characters from raw_string to sub_string
:param raw_string The original string messy string
:param sub_string The string we want to convert to
:return Fully converted string
"""
converted_string = re.sub("[^0-9a-zA-Z]", sub_string, raw_string)
return converted_string.lower()
def _find_all_labels(self):
"""
This method finds all existing labels in the repo
:return A set of all labels which have been extracted from the repo
"""
url = f'https://api.github.com/repos/{self.repo}/labels'
response = requests.get(url, auth=self.auth)
response.raise_for_status()
# Getting total pages of labels present
if "link" not in response.headers:
pages = 1
else:
pages = int(self._ascii_only(response.headers['link'], " ").split()[-3])
all_labels = []
for page in range(1, pages + 1):
url = 'https://api.github.com/repos/' + self.repo + '/labels?page=' + str(page) \
+ '&per_page=%s' % self.LABEL_PAGE_PARSE
response = requests.get(url, auth=self.auth)
for item in response.json():
all_labels.append(item['name'].lower())
self.all_labels = set(all_labels)
return set(all_labels)
def _format_labels(self, labels):
"""
This method formats labels that a user specifies for a specific issue. This is meant
to provide functionality for the operations on labels
:param labels: The messy labels inputted by the user which we want to format
:return: Formatted labels to send for CRUD operations
"""
assert self.all_labels, "Find all labels first"
# clean labels, remove duplicated spaces. ex: "hello world" -> "hello world"
labels = [" ".join(label.split()) for label in labels]
labels = [label for label in labels if label.lower() in self.all_labels]
return labels
def add_labels(self, issue_num, labels):
"""
This method is to add a list of labels to one issue.
It checks whether labels exist in the repo, and adds existing labels to the issue
:param issue_num: The specific issue number we want to label
:param labels: The labels which we want to add
:return Response denoting success or failure for logging purposes
"""
labels = self._format_labels(labels)
issue_labels_url = f'https://api.github.com/repos/{self.repo}/issues/{issue_num}/labels'
response = requests.post(issue_labels_url, json.dumps(labels), auth=self.auth)
if response.status_code == 200:
logging.info(f'Successfully added labels to {issue_num}: {labels}.')
return True
else:
logging.error(f'Could not add the labels to {issue_num}: {labels}. '
f'\nResponse: {json.dumps(response.json())}')
return False
def remove_labels(self, issue_num, labels):
"""
This method is to remove a list of labels to one issue.
It checks whether labels exist in the repo, and removes existing labels to the issue
:param issue_num: The specific issue number we want to label
:param labels: The labels which we want to remove
:return Response denoting success or failure for logging purposes
"""
labels = self._format_labels(labels)
issue_labels_url = f'https://api.github.com/repos/{self.repo}/issues/{issue_num}/labels/'
for label in labels:
delete_label_url = issue_labels_url + label
response = requests.delete(delete_label_url, auth=self.auth)
if response.status_code == 200:
logging.info(f'Successfully removed label to {issue_num}: {label}.')
else:
logging.error(f'Could not remove the label to {issue_num}: {label}. '
f'\nResponse: {json.dumps(response.json())}')
return False
return True
def update_labels(self, issue_num, labels):
"""
This method is to update a list of labels to one issue.
It checks whether labels exist in the repo, and updates existing labels to the issue
:param issue_num: The specific issue number we want to label
:param labels: The labels which we want to remove
:return Response denoting success or failure for logging purposes
"""
labels = self._format_labels(labels)
issue_labels_url = f'https://api.github.com/repos/{self.repo}/issues/{issue_num}/labels'
response = requests.put(issue_labels_url, data=json.dumps(labels), auth=self.auth)
if response.status_code == 200:
logging.info(f'Successfully updated labels to {issue_num}: {labels}.')
return True
else:
logging.error(f'Could not update the labels to {issue_num}: {labels}. '
f'\nResponse: {json.dumps(response.json())}')
return False
def replace_label(self, issue_num, labels):
"""
This method is to change a label to another in an issue
:param issue_num: The specific issue number we want to label
:param labels: The labels which we want to change from and to
:return: Response denoting success or failure for logging purposes
"""
labels = self._format_labels(labels)
if len(labels) != 2:
logging.error('Must only specify 2 labels when wanting to change labels')
return False
logging.info('Label on {} to change from: {} to {}'.format(str(issue_num), str(labels[0]), str(labels[1])))
if self.remove_labels(issue_num, [labels[0]]) and self.add_labels(issue_num, [labels[1]]):
return True
else:
return False
def predict_label(self, issue_num):
predict_issue = {"issues": [issue_num]}
header = {"Content-Type": 'application/json'}
response = requests.post(self.prediction_url, data=json.dumps(predict_issue), headers=header)
predicted_labels = response.json()[0]["predictions"]
if response.status_code == 200:
logging.info(f'Successfully predicted labels to {issue_num}: {predicted_labels}')
else:
logging.error("Unable to predict labels")
return False
if 'Question' in predicted_labels:
message = "Hey, this is the MXNet Label Bot and I think you have raised a question. \n" \
"For questions, you can also submit on MXNet discussion forum (https://discuss.mxnet.io), " \
"where it will get a wider audience and allow others to learn as well. Thanks! \n "
self.add_github_labels(issue_num, ['question'])
else:
message = "Hey, this is the MXNet Label Bot. \n Thank you for submitting the issue! I will try and " \
"suggest some labels so that the appropriate MXNet community members can help " \
"resolve it. \n "
if predicted_labels:
message += 'Here are my recommended label(s): {}'.format(', '.join(predicted_labels))
self.create_comment(issue_num, message)
return True
def create_comment(self, issue_num, message):
"""
This method will trigger a comment to an issue by the label bot
:param issue_num: The issue we want to comment
:param message: The comment message we want to send
:return Response denoting success or failure for logging purposes
"""
send_msg = {"body": message}
issue_comments_url = f'https://api.github.com/repos/{self.repo}/issues/{issue_num}/comments'
response = requests.post(issue_comments_url, data=json.dumps(send_msg), auth=self.bot_auth)
if response.status_code == 201:
logging.info(f'Successfully commented {send_msg} to: {issue_num}')
return True
else:
logging.error(f'Could not comment \n {json.dumps(response.json())}')
return False
def label_action(self, actions):
"""
This method will perform an actions for the labels that are provided. This function delegates
the appropriate action to the correct methods.
:param actions: The action we want to take on the label
:return Response denoting success or failure for logging purposes
"""
if "add" in actions:
return self.add_labels(actions["add"][0], actions["add"][1])
elif "remove" in actions:
return self.remove_labels(actions["remove"][0], actions["remove"][1])
elif "update" in actions:
return self.update_labels(actions["update"][0], actions["update"][1])
elif "replace" in actions:
return self.replace_label(actions["replace"][0], actions["replace"][1])
else:
return False
def _secure_webhook(self, event):
"""
This method will validate the security of the webhook, it confirms that the secret
of the webhook is matched and | |
from tkinter import *
from tkinter import ttk
from colorexlib.colorexlib.common.datastructures import HeatMap, Tile
from colorexlib.colorexlib.common.styling import Themes, StyleSheet
import math
class HeatMapWindow(Frame):
def __init__(self, parent, heatmap=None, **kwargs):
# Determine the validity of all parameters passed.
if(not isinstance(parent, Tk)):
raise TypeError("argument 'parent' \
must be of type 'Tk'")
elif(not isinstance(heatmap, HeatMap)):
raise TypeError("argument 'heatmap' \
must be of type 'HeatMap'")
# call parent initialize
Frame.__init__(self, parent, **kwargs)
# declare some constants, styles for stylesheet
styles = heatmap.stylesheet.styles
# tile styles and constants
self.const = dict()
self.const["TILESIZE"] = styles['tile_size']
# plane styles and constants
self.const["PLANE_TOP_MARGIN"] = styles['plane_top_margin']
# canvas styles and constants
self.const["CANVAS_SIZE_FACTOR"] = styles['canvas_size_factor']
self.const["CANVAS_TOP_MARGIN"] = styles['canvas_top_margin']
self.const["CANVAS_BOTTOM_MARGIN"] = styles['canvas_bottom_margin']
# label styles and constants
self.const["YLABEL_MARGIN"] = styles['ylabel_margin']
self.const["XLABEL_MARGIN"] = styles['xlabel_margin']
# axes styles and constants
self.const['AXES_TITLE_FONT'] = styles['axes_title_font']
self.const['AXES_TITLE_SIZE'] = styles['axes_title_size']
self.const['AXES_TITLE_BOLD'] = styles['axes_title_bold']
self.const["AXES_TICK_LENGTH"] = styles['axes_tick_length']
self.const['AXES_LABEL_FONT'] = styles['axes_label_font']
self.const['AXES_LABEL_SIZE'] = styles['axes_label_size']
self.const['AXES_LABEL_BOLD'] = styles['axes_label_bold']
# title styles and constants
self.const['TITLE_FONT'] = styles['title_font']
self.const['TITLE_SIZE'] = styles['title_size']
self.const['TITLE_BOLD'] = styles['title_bold']
self.const['TITLE_YCOORD'] = styles['title_ycoord']
# subtitle styles and constants
self.const['SUBTITLE_FONT'] = styles['subtitle_font']
self.const['SUBTITLE_SIZE'] = styles['subtitle_size']
self.const['SUBTITLE_BOLD'] = styles['subtitle_bold']
self.const['SUBTITLE_YCOORD'] = styles['subtitle_ycoord']
# determine and generate the font for all axes labels.
self.axes_label_font = list()
font_family = self.const["AXES_LABEL_FONT"]
font_size = str(self.const["AXES_LABEL_SIZE"])
bold = self.const["AXES_LABEL_BOLD"]
self.axes_label_font.append(font_family)
self.axes_label_font.append(font_size)
if(bold):
self.axes_label_font.append('bold')
# set some heatmap properties
self.heatmap = heatmap
self.title = heatmap.title
self.subtitle = heatmap.subtitle
self.ncols = self.heatmap.cols
self.nrows = self.heatmap.rows
self.dataformatter = self.heatmap.dataformatter
# determine which rows and columns are data,
# and which rows and columns are the axes
# labels, and set accordingly.
if(self.heatmap.rowcolheaders):
# There are row and column headers
# already specified in the HeatMap object.
# decrease ncols and nrows
self.ncols -= 1
self.nrows -= 1
# row and column headers have been set in data source.
self.heatmap_data = list(map(lambda row: row[1:],
heatmap.grid[1:]))
if(isinstance(heatmap.xaxislabels, list)):
self.xaxis_labels = heatmap.xaxislabels
else:
self.xaxis_labels = heatmap.grid[0][1:]
if(isinstance(heatmap.yaxislabels, list)):
self.yaxis_labels = heatmap.yaxislabels
else:
self.yaxis_labels = list(map(lambda row: row[0],
heatmap.grid[1:]))
else:
# There are NO row and column headers in
# the HeatMap object, which means all the
# items in HeatMap are essentially data
# items to be plotted as a Tile.
self.heatmap_data = self.heatmap.grid
self.xaxis_labels = heatmap.xaxislabels
self.yaxis_labels = heatmap.yaxislabels
# set the axes title values
self.xaxis_title = heatmap.xaxistitle
self.yaxis_title = heatmap.yaxistitle
# create layout frames
self.bottom_frame = Frame(self)
self.right_frame = Frame(self)
self.left_frame = Frame(self)
self.parent = parent
# create a canvas object, and set window properties.
self.canvas_width = int(
self.const["CANVAS_SIZE_FACTOR"]*
self.parent.winfo_screenwidth())
self.canvas_height = int(
self.const["CANVAS_SIZE_FACTOR"]*
self.parent.winfo_screenheight())
# set window minimum size.
self.parent.minsize(width=400,
height=300)
# position the window
self.parent.geometry(str(self.canvas_width)+
"x"+str(self.canvas_height)+"+0+0")
# initialize the canvas and render.
self.canvas = Canvas(self.left_frame, bg="white",
width=self.canvas_width, height=self.canvas_height)
self.init_canvas()
self.render()
# bind some events to handlers
self.left_frame.bind("<Configure>",self.on_configure)
self.canvas.update()
def onMouseEnterTile(self, event, tile_id):
# restyle the tile item in canvas.
self.canvas.itemconfigure(str(tile_id),
width=5, outline="black")
# raise the tile above all other canvas items.
self.canvas.tag_raise(str(tile_id),"heatmap")
# change the mouse cursor in the heatmap
self.canvas.config(cursor="crosshair")
def onMouseLeaveTile(self, event, tile_id):
# reset the tile's style in canvas.
self.canvas.itemconfigure(str(tile_id), width=0)
# hide the tile's balloon popup.
self.hide_all_tile_balloons()
# reset the cursor
self.canvas.config(cursor="")
# push the tiles to a lower layer in canvas,
# to allow axes lines to show.
self.canvas.tag_lower("tiles",ALL)
def onMouseClickTile(self, event, tile_id, tile_data):
self.hide_all_tile_balloons()
# restyle the tile that has been selected
self.canvas.itemconfigure(str(tile_id),
width=11, outline="black")
# Determine the coordinates of the tile.
coords = self.canvas.coords(str(tile_id))
x = coords[0]+self.const["TILESIZE"][0]
y = coords[1]
# show the current tile's popup balloon.
self.show_current_tile_balloon(x, y,
tile_data)
def show_current_tile_balloon(self, x, y, tile):
text_x = x+35
text_y = y-45
data = tile.value
label = tile.label
if(self.dataformatter is not None):
final_data = self.dataformatter.format(data)
else:
final_data = data
# Insert data into the balloon about the selected Tile.
textid = self.canvas.create_text(text_x,
text_y,text=final_data,
font="Arial 11 bold",tag="current_tile_popup",
fill="white", anchor="w")
if(label is not None):
labelid = self.canvas.create_text(text_x,
text_y-20,text=label,
font="Arial 10", tag="current_tile_popup",
fill="white", anchor="w")
text_bbox = self.canvas.bbox("current_tile_popup")
# Generate points for the balloon
margin = 10
point1 = (x,y)
point2 = (text_bbox[0]-margin, text_bbox[1]-margin)
point3 = (text_bbox[2]+margin, text_bbox[1]-margin)
point4 = (text_bbox[2]+margin, text_bbox[3]+margin)
point5 = (text_bbox[0]-margin, text_bbox[3]+margin)
# Draw the balloon polygon on the canvas.
self.canvas.create_polygon(point1[0],point1[1],point2[0],
point2[1],point3[0],point3[1],point4[0],point4[1],
point5[0],point5[1],point1[0],point1[1],fill="black",
tag="current_tile_popup")
self.canvas.tag_raise("current_tile_popup","heatmap")
self.canvas.tag_raise(str(textid),"current_tile_popup")
if(label is not None):
self.canvas.tag_raise(str(labelid),"current_tile_popup")
def hide_all_tile_balloons(self):
self.canvas.delete("current_tile_popup")
def init_canvas(self):
# set background color of canvas
self.canvas.config(background=
self.heatmap.theme.palette["background"])
# draw the titles on the canvas.
# title
title_font = list()
title_font.append(self.const["TITLE_FONT"])
title_font.append(self.const["TITLE_SIZE"])
if(self.const["TITLE_BOLD"]):
title_font.append("bold")
self.canvas.create_text(0,
self.const["TITLE_YCOORD"],
anchor="center", text=self.title,
fill=self.heatmap.theme.palette["on-background"]
, font=title_font, tag="header")
# subtitle
subtitle_font = list()
subtitle_font.append(self.const["SUBTITLE_FONT"])
subtitle_font.append(self.const["SUBTITLE_SIZE"])
if(self.const["SUBTITLE_BOLD"]):
subtitle_font.append("bold")
self.canvas.create_text(0,
self.const['SUBTITLE_YCOORD'],
anchor="center", text=self.subtitle,
fill=self.heatmap.theme.palette["on-background"],
font=subtitle_font, tag="header")
# draw the cartesian plane on the canvas at canvas
# origin first vertical axis
yaxis_start = (0,0)
yaxis_end = (0,(self.const["TILESIZE"][1]*self.nrows))
self.canvas.create_line(yaxis_start[0],yaxis_start[1],
yaxis_end[0], yaxis_end[1], tag=("heatmap",
"cartesian"))
for n in range(self.nrows):
point = (yaxis_start[0], yaxis_start[1]+
(self.const["TILESIZE"][1]*n))
try:
label = self.yaxis_labels[n]
except:
label = ""
self.canvas_draw_yaxis_tick(point, label)
# horizontal axis
xaxis_start = yaxis_end
xaxis_end = (xaxis_start[0]+(self.const["TILESIZE"][0]*
self.ncols), xaxis_start[1])
self.canvas.create_line(xaxis_start[0], xaxis_start[1],
xaxis_end[0], xaxis_end[1], tag=("heatmap",
"cartesian"))
for n in range(self.ncols):
point = (xaxis_start[0]+(self.const["TILESIZE"][0]*
(n+1)), xaxis_start[1])
try:
label = self.xaxis_labels[n]
except:
label = ""
self.canvas_draw_xaxis_tick(point, label)
# plot all tiles into the heat map cartesian plane
plot_start = yaxis_start
for i in range(self.ncols):
for j in range(self.nrows):
point = self.canvas_get_new_point(plot_start,
i*self.const["TILESIZE"][0],
j*self.const["TILESIZE"][1])
self.canvas_draw_tile(point,self.heatmap_data[j][i].rgb,
self.heatmap_data[j][i], width=0, outline="white")
# push the tiles to a lower layer in canvas,
# to allow axes lines to show.
self.canvas.tag_lower("tiles",ALL)
# draw the axes titles for xaxis, yaxis.
if(self.xaxis_title != "" or self.yaxis_title != ""):
hm_leftx = self.canvas.bbox("heatmap")[0]
hm_lefty = self.canvas.bbox("heatmap")[1]
hm_rightx = self.canvas.bbox("heatmap")[2]
hm_righty = self.canvas.bbox("heatmap")[3]
tiles_leftx = self.canvas.bbox("tiles")[0]
tiles_lefty = self.canvas.bbox("tiles")[1]
tiles_rightx = self.canvas.bbox("tiles")[2]
tiles_righty = self.canvas.bbox("tiles")[3]
xaxis_label_x = (tiles_rightx-tiles_leftx)/2 + tiles_leftx
xaxis_label_y = (hm_righty) + 20
yaxis_label_x = hm_leftx - 20
yaxis_label_y = (tiles_righty - tiles_lefty)/2
if(self.xaxis_title != ""):
axistitle_font = list()
axistitle_font.append(self.const['AXES_TITLE_FONT'])
axistitle_font.append(self.const['AXES_TITLE_SIZE'])
if(self.const['AXES_TITLE_BOLD']):
axistitle_font.append("bold")
self.canvas.create_text(xaxis_label_x, xaxis_label_y,
fill=self.heatmap.theme.palette["on-background"],
text=self.xaxis_title, font=axistitle_font,
tag=("heatmap","cartesian"))
if(self.yaxis_title != ""):
self.canvas.create_text(yaxis_label_x, yaxis_label_y,
fill=self.heatmap.theme.palette["on-background"],
text=self.yaxis_title, font=axistitle_font,
angle=90, tag=("heatmap","cartesian"))
# center the heatmap horizontally.
self.canvas_center_header()
self.canvas_center_heatmap(centerAlongX=True,
centerAlongY=True)
# create some scrollbars
self.scroll_y = Scrollbar(self.right_frame,
command=self.canvas.yview, orient=VERTICAL)
self.canvas.configure(yscrollcommand=self.scroll_y.set)
self.scroll_x = Scrollbar(self.bottom_frame,
command=self.canvas.xview, orient=HORIZONTAL)
self.canvas.configure(xscrollcommand=self.scroll_x.set)
# ################################################# Not using this as a legend may not be necessary due to heatmap interactivity.
# def canvas_draw_legend(self):
# # create the legend or key for the heatmap
# legendbox_x1 = 50
# legendbox_y1 = self.const["PLANE_TOP_MARGIN"]
# legendbox_x2 = 250
# legendbox_y2 = self.const["PLANE_TOP_MARGIN"]+300
# self.canvas.create_rectangle(legendbox_x1, legendbox_y1,
# legendbox_x2, legendbox_y2)
# self.canvas.create_text((legendbox_x1+legendbox_x2)/2,
# legendbox_y1+40,text="Legend")
# self.canvas.create_rectangle(legendbox_x1+15, legendbox_y1+60,
# legendbox_x1+30, legendbox_y1+75)
# self.canvas.create_text(legendbox_x1+45, legendbox_y1+60,text="20-30",
# anchor="w")
# ################################################# Not using this as a legend may not be necessary due to heatmap interactivity.
def canvas_center_heatmap(self,centerAlongX=True,
centerAlongY=True):
''' centers the cartesian plane illustrating
the heat map '''
tag = "heatmap"
# get the bounding box coordinates for heatmap
# on the canvas.
bbox = self.canvas.bbox(tag)
bbox_x1 = bbox[0]
bbox_x2 = bbox[2]
bbox_y1 = bbox[1]
bbox_width = bbox_x2 - bbox_x1
canvas_width = self.canvas_width
final_x = (canvas_width/2) - (bbox_width/2)
dx = final_x - bbox_x1
# center heatmap horizontally if
# it was indicated.
if(centerAlongX):
for item in self.canvas.find_withtag(tag):
self.canvas.move(item, dx, 0)
# center heatmap vertically if
# it was indicated.
if(centerAlongY):
for item in self.canvas.find_withtag(tag):
self.canvas.move(item,0,
self.const["PLANE_TOP_MARGIN"])
# update the scrollregion to access
# any hidden parts of canvas not visible
# on the screen.
self.canvas_update_scrollregion()
def canvas_center_header(self):
''' horizontally centers the header of the heatmap
containing title, subtitle. '''
tag = "header"
# Determine bounding box coordinates of header
# on the canvas.
bbox = self.canvas.bbox(tag)
bbox_x1 = bbox[0]
bbox_x2 = bbox[2]
bbox_width = bbox_x2 - bbox_x1
canvas_width = self.canvas_width
final_x = (canvas_width/2) - (bbox_width/2)
dx = final_x - bbox_x1
# move the header accordingly to center
# horizontally.
self.canvas.move(tag, dx, 0)
self.canvas_update_scrollregion()
def canvas_get_new_point(self, old_point, dx, dy):
old_x = old_point[0]
old_y = old_point[1]
new_x = old_x + dx
new_y = old_y + dy
return (new_x, new_y)
def canvas_draw_tile(self, topleft_point,
fill_color, tile, | |
<reponame>AlexRaschia/Py-nergie_demo
# -*- coding: utf-8 -*-
"""
Projet Py-nergie
DS Bootcamp Juin 2021
"""
import streamlit as st
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
#from sklearn.preprocessing import StandardScaler, MinMaxScaler
st.set_option('deprecation.showPyplotGlobalUse', False)
st.set_page_config(layout="centered")
pd.set_option('display.max_rows', 100)
pd.set_option('display.max_columns', None)
# Importation du dataset
@st.cache(suppress_st_warning=True)
def importandtreat_dfs():
Ener = pd.read_csv('eco2mix-regional-cons-def.csv', sep=';', low_memory=False)
# Tri par date et heure puis ordre alphabétique des Régions
Ener_sorted = Ener.sort_values(by = ['Date - Heure', 'Région'], ascending = True)
Ener_sorted = Ener_sorted.reset_index()
Ener_sorted = Ener_sorted.drop(['index'], axis=1)
# Suppression des colonnes de flux, code INSEE et Nature
flux=[col for col in Ener_sorted if col.startswith('Flux')]
Ener_sorted.drop(flux, axis=1, inplace=True)
Ener_sorted.drop(['Code INSEE région'], axis=1, inplace=True)
Ener_sorted.drop(['Nature'], axis=1, inplace=True)
Ener_sorted['Date - Heure'] = pd.to_datetime(Ener_sorted['Date - Heure'])
# Remplacement des fillna pour les colonnes de Prod et Conso
colnam_NaNtoSup = [col for col in Ener_sorted if '(MW)' in col]
for name in colnam_NaNtoSup:
Ener_sorted[name] = Ener_sorted[name].fillna(0)
# creation de la colonne Production qui représente la somme de toutes les productions
Ener_sorted.insert(5, 'Production (MW)', 0)
# calcul de la Production pour chaque ligne
Ener_sorted['Production (MW)'] = Ener_sorted['Thermique (MW)'] + Ener_sorted['Nucléaire (MW)'] \
+ Ener_sorted['Eolien (MW)'] + Ener_sorted['Solaire (MW)'] \
+ Ener_sorted['Hydraulique (MW)'] + Ener_sorted['Pompage (MW)'] \
+ Ener_sorted['Bioénergies (MW)']
# creation des colonnes Année, Jour et Mois
Ener_sorted.insert(2, 'Jour', pd.DatetimeIndex(Ener_sorted['Date']).day)
Ener_sorted.insert(3, 'Mois', pd.DatetimeIndex(Ener_sorted['Date']).month)
Ener_sorted.insert(4, 'Année', pd.DatetimeIndex(Ener_sorted['Date']).year)
# insérer l'import et le traitement des dataframes complémentaires
# Importation des données de Températures par Région et par jour (ODRE)
Temp_per_day = pd.read_csv('temperature-quotidienne-regionale.csv', sep=';')
Temp_per_day = Temp_per_day.sort_values(by = ['Date', 'Région'], ascending = True)
Temp_per_day = Temp_per_day.reset_index()
Temp_per_day = Temp_per_day.drop(['index', 'Code INSEE région'], axis=1)
# Importation des données de population par an (INSEE)
Pop_per_year = pd.read_csv('INSEE_pop_2016-2021.csv', sep=';')
Pop_per_year = Pop_per_year.melt(id_vars="Région", var_name="Année", value_name="Population")
Pop_per_year = Pop_per_year.sort_values(by = ['Année', 'Région'], ascending = True)
Pop_per_year = Pop_per_year.reset_index()
Pop_per_year = Pop_per_year.drop(['index'], axis=1)
# Importation des données entreprises (INSEE) par secteur (primaire, secondaire, tertiaire) et taille (micro, PME, ETI, GE)
Ent_2016 = pd.read_csv('stat_ent_2016_final.csv', sep=';')
Ent_2017 = pd.read_csv('stat_ent_2017_final.csv', sep=';')
Ent_2018 = pd.read_csv('stat_ent_2018_final.csv', sep=';')
Ent_2016.insert(1, 'Année', 2016)
Ent_2017.insert(1, 'Année', 2017)
Ent_2018.insert(1, 'Année', 2018)
# Création du dataframe Ent à partir des dataframes annuels par concatenation
Ent_int = pd.concat([Ent_2016,Ent_2017], axis=0)
Ent = pd.concat([Ent_int, Ent_2018], axis=0)
Ent = Ent.sort_values(by = ['Année', 'Région'], ascending = True)
Ent = Ent.reset_index()
Ent = Ent.drop(['index'], axis=1)
# prétrtaitement pour ML
Col_todel = Ener_sorted.columns[8:]
Ener_light = Ener_sorted.copy()
Ener_light.drop(Col_todel, axis=1, inplace=True)
Ener_light.drop(['Jour', 'Mois', 'Année'], axis=1, inplace=True)
# Récupération des pics journaliers sur la Puissance Consommée
Energy = Ener_light.groupby(["Date","Région"])["Consommation (MW)"].max().reset_index()
# insertion de l'année dans le datafreame Energy
Energy.insert(1, 'Année', pd.DatetimeIndex(Energy['Date']).year)
# Selection des data sur les années 2016 à 2018
Energy_selection = Energy[(Energy["Année"]>2015) & (Energy["Année"]<2019)].reset_index()
Energy_selection = Energy_selection.drop(['index'], axis=1)
# insertion de la colonne pour le merge Date-Région
Energy_selection.insert(0, 'Date-Reg', Energy_selection['Date'] + ' ' + Energy_selection['Région'])
Temp_per_day.insert(0, 'Date-Reg', Temp_per_day['Date'] + ' ' + Temp_per_day['Région'])
Temp_per_day.drop(['Date', 'Région'], axis=1, inplace=True)
Pop_per_year.insert(0, 'An-Reg', Pop_per_year['Année'] + ' ' + Pop_per_year['Région'])
Pop_per_year.drop(['Année', 'Région'], axis=1, inplace=True)
# Fusion des data
data_fusion = Energy_selection.merge(right = Temp_per_day, on = 'Date-Reg', how = 'left')
data_fusion.drop('Date-Reg', axis=1, inplace=True)
data_fusion['Année'] = data_fusion['Année'].astype(str)
# insertion de la colonne pour le merge Année-Région
data_fusion.insert(0, 'An-Reg', data_fusion['Année'] + ' ' + data_fusion['Région'])
# Fusion des data avec la population
data_fusion_2 = data_fusion.merge(right = Pop_per_year, on = 'An-Reg', how = 'left')
# insertion de la colonne pour le merge Année-Région
Ent['Année']=Ent['Année'].astype(str)
Ent.insert(0, 'An-Reg', Ent['Année'] + ' ' + Ent['Région'])
Ent.drop(['Année', 'Région'], axis=1, inplace=True)
# Fusion des data avec les données entreprises
data = data_fusion_2.merge(right = Ent, on = 'An-Reg', how = 'left')
data.drop(['An-Reg', 'Année', 'TMin (°C)', 'TMax (°C)'], axis=1, inplace=True)
return Ener, Ener_sorted, Temp_per_day, Pop_per_year, Ent, data
Ener, Ener_sorted, Temp_per_day, Pop_per_year, Ent, data = importandtreat_dfs()
# Création d'un dataframe par année
Ener_sorted_2013 = Ener_sorted[Ener_sorted['Année'] == 2013]
Ener_sorted_2014 = Ener_sorted[Ener_sorted['Année'] == 2014]
Ener_sorted_2015 = Ener_sorted[Ener_sorted['Année'] == 2015]
Ener_sorted_2016 = Ener_sorted[Ener_sorted['Année'] == 2016]
Ener_sorted_2017 = Ener_sorted[Ener_sorted['Année'] == 2017]
Ener_sorted_2018 = Ener_sorted[Ener_sorted['Année'] == 2018]
Ener_sorted_2019 = Ener_sorted[Ener_sorted['Année'] == 2019]
Ener_sorted_2020 = Ener_sorted[Ener_sorted['Année'] == 2020]
Ener_sorted_2021 = Ener_sorted[Ener_sorted['Année'] == 2021]
# Définition du menu de navigation
st.sidebar.header("Projet Py-Nergie")
choix = st.sidebar.radio("Menu de Navigation",
('Présentation du Projet',
'Aspects Techniques',
'Data Sets',
'Dataviz Consommation',
'Dataviz Production',
'Modèle SARIMA',
'Modèles de Régression'))
# Définition page Présentation du projet
if choix == 'Présentation du Projet':
st.title("Projet Py-Nergie - DS Juin à Sept 2021")
st.write("")
st.image("pylone.jpg")
st.write("")
st.header("Présentation du Projet")
st.write('Le sujet de ce projet porte sur l’analyse des données de production\n'
'et de consommation du réseau électrique français. \n'
'La source du jeu de données est celle de l’ODRE (Open Data Réseaux Energies)\n'
'avec un accès à toutes les informations de consommation et de production\n'
'par Région et par filière jour par jour (toutes les 1/2 heures) depuis 2013.')
st.write("")
st.image("ODRE.jpg")
st.image("ODRE2.jpg")
st.write("")
st.header("Objectifs du projet")
st.write('Les objectifs du projet s’expriment à travers trois questionnements\n'
'que nous nous sommes posés et auxquels nous avons voulu répondre \n'
'dans un analyse détaillée présentée dans ce rapport. \n'
'Les trois problématiques se résument ainsi :\n'
' - Comment est assuré l’équilibre entre consommation et production\n'
' au niveau national et régional ? \n'
' - Quelles sont les sources d’énergies au niveau national et régional qui contribuent\n'
' à satisfaire les besoins d’électricité et dans quelles proportions ?\n'
' - Sommes-nous capables de prédire correctement la consommation avec un/des\n'
' modèle(s) de machine learning afin de prévoir les besoins de production ?\n')
# Définition page Aspects Techniques
elif choix == 'Aspects Techniques':
st.title("Aspects techniques du réseau électrique")
st.write("Le développement des usages électriques depuis le milieu du XXe siècle a abouti \n"
"à la construction d’un système de production centralisé, associé à un réseau \n"
"électrique interconnecté et maillé à l’échelle nationale et continentale. \n"
"Ce réseau électrique est constitué de 3 types de réseaux :")
st.subheader("Le réseau de transport")
st.write(" - le réseau de transport est basé sur une structure de réseau maillée \n"
"(«autoroutes de l’énergie»). Ils est à haute tension (225kV et 400 kV) et a pour \n"
"but de transporter l'énergie des grands centres de production vers les régions \n"
"consommatrices d'électricité. Les grandes puissances transitées imposent des lignes \n"
"électriques de forte capacité de transit, ainsi qu'une structure maillée (ou interconnectée)")
st.image("réseau transport 400 a 225 kV.jpg", width=350)
link1 = '[source : RTE](https://assets.rte-france.com/prod/public/2020-07/SDDR%202019%20Chapitre%2002%20-%20Le%20renouvellement%20du%20r%C3%A9seau%20existant.pdf)'
st.markdown(link1, unsafe_allow_html=True)
st.subheader("Le réseau de répartition")
st.write(" - Le réseaux de répartition (haute tension de l'ordre de 63kV et 90 kV) a pour but \n"
"d'assurer à l'échelle régionale la fourniture d'électricité. L'énergie y est injectée \n"
"essentiellement par le réseau de transport via des transformateurs, mais également par \n"
"des centrales électriques de moyennes puissances (inférieures à environ 100 MW). \n"
"Le réseau de répartition est distribué de manière assez homogène sur le territoire d'une région.")
st.image("réseau repartition 90 a 63kV.jpg", width=350)
st.markdown(link1, unsafe_allow_html=True)
st.subheader("Les réseaux publics de distribution")
st.write(" - Les réseaux publics de distribution d'électricité desservent en moyenne et basse tension \n"
"(20 kV et 400 V), selon une architecture en arborescence, les consommateurs finaux et \n"
"les clients domestiques et professionnels (commerçants, artisans, petites industries). \n"
"Leur longueur cumulée dépasse 1,3 million de kilomètres. L’interface entre les réseaux \n"
"moyenne et basse tension est assurée par quelque 700 000 « postes de distribution ».\n"
"Le développement de la production d’énergie décentralisée (éolien, photovoltaïque, etc.) \n"
"et de nouveaux usages (autoproduction, électromobilité, etc.) modifient le rôle des réseaux \n"
"de distribution qui deviennent collecteurs de l'énergie produite par les plus petites \n"
"installations de production.")
st.subheader("Synthèse du réseau électrique de la production à la consommation")
st.image("synoptique réseaux.jpg")
link2 = "[source : Commmision de régulation de l'énergie](http://modules-pedagogiques.cre.fr/m1/index.html)"
st.markdown(link2, unsafe_allow_html=True)
# Définition page Data Sets
elif choix == 'Data Sets':
st.title("Descriptif des Data Sets")
st.subheader("Jeu de données initial éco2mix")
st.write("Données éCO2mix régionales consolidées et définitives (janvier 2013 à juin 2021)")
st.write("https://opendata.reseaux-energies.fr/explore/dataset/eco2mix-regional")
st.write("Ce jeu de données, présente les données régionales consolidées depuis janvier 2020 \n"
"et définitives (de janvier 2013 à décembre 2019) issues de l'application éCO2mix. | |
the client"""
with self.metadata_lock:
details = {}
for field in CLIENT_FIELDS:
if hasattr(self, field):
details[field] = getattr(self, field)
if self.sensors:
details['sensors'] = []
for sensor_id, sensor in self.sensors.items():
sensor_details = sensor.to_dict()
sensor_details['sensor_id'] = sensor_id
details['sensors'].append(sensor_details)
return details
def GetLatestSamples(self):
# Retrieves the latest samples from the (main) Phidget
return list(self.phidget_sensor.data_buffer)
def GetDataFiles(self, time_from, time_to, latest=False):
files_to_send = []
for sensor in self.sensors.values():
if not sensor.datastore_uploaded:
logging.info('Sensor upload directory does not exist')
continue
files = os.listdir(sensor.datastore_uploaded)
if len(files) <= 0:
logging.info('No files in %s', sensor.datastore_uploaded)
return None
logging.info('There are %d files in the uploaded directory', len(files))
files.sort(key=lambda x: os.path.getmtime(os.path.join(sensor.datastore_uploaded,x)))
if latest:
# simply return the latest
fullname = os.path.join(sensor.datastore_uploaded, files[-1])
if not os.path.isfile(fullname):
logging.error('Unexpected error for %s', fullname)
return None
files_to_send.append(fullname)
file_start_string = files[-1].split('_')[1].split('.')[0]
temp_name = file_start_string + '.dat'
outfilename = os.path.join(TEMPORARY_FILE_DIRECTORY,temp_name)
else:
logging.info('Time from %s to %s', time_from, time_to)
temp_name = datetime.datetime.strftime(time_from,FILESTORE_NAMING)+\
'to'+\
datetime.datetime.strftime(time_to,FILESTORE_NAMING)+\
'.dat'
outfilename = os.path.join(TEMPORARY_FILE_DIRECTORY,temp_name)
for this_file in files:
file_start_string = this_file.split('_')[1].split('.')[0]
file_start_datetime = datetime.datetime.strptime(file_start_string,FILESTORE_NAMING)
# the files are nominally 10 minutes long, so find the nominal end time
file_end_datetime = file_start_datetime + datetime.timedelta(seconds=600)
#logging.info(str(file_start_datetime))
if file_start_datetime < time_from and file_end_datetime < time_from:
continue
if file_start_datetime > time_to:
continue
logging.info('Found file '+file_start_string+' '+str(file_start_datetime)+' '+str(file_end_datetime))
fullname = os.path.join(sensor.datastore_uploaded, this_file)
if not os.path.isfile(fullname):
logging.error('Unexpected error for %s', fullname)
continue
files_to_send.append(fullname)
if len(files_to_send) > 0:
logging.info(files_to_send)
with open(outfilename, 'w') as fout:
for file_name in files_to_send:
logging.info('Adding %s',file_name)
with open(file_name,'r') as fin:
for line in fin:
fout.write(line)
fin.close()
fout.close()
return outfilename
return None
def GetRecentPicks(self):
# returns the latest picks from the (main) Phidget
return list(self.phidget_sensor.recent_picks)
def GetSensorNtpCorrectedTimestamp(self):
# returns the corrected time stamp for the (main) Phidgets sensor
return self.phidget_sensor.GetNtpCorrectedTimestamp()
def SendClientUpdate(self):
logging.error('SendClientUpdate not implemented')
def __init__(self):
print 'PyCSN Amazon Starting!'
log = logging.getLogger('')
log.setLevel(logging.INFO)
the_format = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
logfile = LOGFILE
if os.path.isdir(LOGFILE_DIRECTORY):
logfile = os.path.join(LOGFILE_DIRECTORY, LOGFILE)
print 'Will log to', logfile
fh = logging.handlers.RotatingFileHandler(logfile, maxBytes=BYTES_IN_LOGFILE, backupCount=LOGFILE_BACKUPS)
fh.setFormatter(the_format)
log.addHandler(fh)
if sys.platform == 'win32':
ch = logging.StreamHandler(sys.stdout)
ch.setFormatter(the_format)
log.addHandler(ch)
try:
# is_secure from:
# https://stackoverflow.com/questions/28115250/boto-ssl-certificate-verify-failed-certificate-verify-failed-while-connecting
self.s3 = boto.connect_s3(aws_access_key_id, aws_secret_access_key, is_secure=False) #, debug=2)
self.bucket = self.s3.get_bucket(aws_bucket_wavedata)
self.bucket_stationdata = self.s3.get_bucket(aws_bucket_stationdata)
except Exception, errtxt:
logging.error('Error connecting to Amazon S3/SQS %s', errtxt)
#raise sys.exit(errtxt)
current_directory = os.path.dirname(os.path.realpath(__file__))
self.stationFile = os.path.join(current_directory, STATION + '.yaml')
logging.info('Instantiating client from %s', self.stationFile)
with open(self.stationFile) as config_file:
config = yaml.load(config_file.read())
for field in CLIENT_FIELDS:
setattr(self, field, config.get(field))
self.registered = True
self.server = TARGET_SERVER
self.client_id = config.get('client_id', 'T100001')
time_servers = config.get('time_servers','')
if len(time_servers) == 0:
self.time_servers = TIME_SERVERS
else:
if not type(time_servers) is list:
time_servers = [time_servers]
servers = set(TIME_SERVERS) - set(time_servers)
self.time_servers = time_servers + list(servers)
logging.info('Station configuration includes NTP server(s) %s: Will use %s', time_servers, self.time_servers)
logging.info('Connecting to FinDer ActiveMQ broker')
stomp = self.connectActiveMQ()
if stomp == None:
logging.error('No connection to ActiveMQ')
# Create locks for changing metadata
self.metadata_lock = threading.RLock()
self.software_version = SOFTWARE_VERSION
self.sensors = {}
self.phidget_sensor = None
for sensor_dict in config.get('sensors', []):
sensor_id = sensor_dict.get('sensor_id')
if sensor_id is not None:
# include the SQS queue for picks in the sensor's configuration
# as well as the lat/lon/floor
sensor_dict['software_version'] = SOFTWARE_VERSION
sensor_dict['stomp'] = stomp
sensor_dict['stomp_topic'] = ACTIVEMQ_TOPIC
sensor_dict['connect_stomp'] = self.connectActiveMQ
#sensor_dict['pick_queue'] = self.pick_queue
sensor_dict['latitude'] = config.get('latitude')
sensor_dict['longitude'] = config.get('longitude')
sensor_dict['floor'] = config.get('floor')
sensor_dict['client_id'] = self.client_id
self.sensors[sensor_id] = PhidgetsSensor(config=sensor_dict)
# Store reference to the first sensor (and usually the only one
if self.phidget_sensor is None:
self.phidget_sensor = self.sensors[sensor_id]
if len(self.sensors) == 0:
logging.error("Client has no sensors")
return
for sensor_id, sensor in self.sensors.items():
logging.info('Client has Sensor %s',sensor_id)
if not sensor.phidget_attached:
raise sys.exit('Phidget not attached - abort')
else:
sensor.setFileStoreInterval(HEARTBEAT_INTERVAL.total_seconds())
# we start off assuming no clock drift i.e. the adjustment is zero
self.clock_offset = 0.0
logging.info('Starting NTP thread')
try:
ntpDaemon = threading.Thread(target=self.ntpThread)
ntpDaemon.setDaemon(True)
ntpDaemon.start()
except Exception, errtxt:
raise sys.exit(errtxt)
logging.info('Starting heartbeat thread')
try:
heartbeatDaemon = threading.Thread(target=self.heartbeatThread)
heartbeatDaemon.setDaemon(True)
heartbeatDaemon.start()
except Exception, errtxt:
raise sys.exit(errtxt)
logging.info('Starting web server on port %s', PORT_NUMBER)
try:
webServerDaemon = threading.Thread(target=self.webServerThread)
webServerDaemon.setDaemon(True)
webServerDaemon.start()
except Exception, errtxt:
raise sys.exit(errtxt)
logging.info('Starting storage check thread')
try:
storageDaemon = threading.Thread(target=self.storageThread)
storageDaemon.setDaemon(True)
storageDaemon.start()
except Exception, errtxt:
raise sys.exit(errtxt)
signal.signal(signal.SIGTERM, self.catch_sigterm)
def run(self):
HEALTH_TIME = 60
self.server_start_time = time.time()
logging.info('Running ...')
try:
health = True
while health:
time.sleep(HEALTH_TIME)
time_now = self.GetSensorNtpCorrectedTimestamp()
for sensor_id, sensor in self.sensors.items():
if sensor.collect_samples and time_now - sensor.last_sample_seconds > 600:
logging.warn('Health Check: Sensor %s No sensor data for %s seconds. Restart Phidget', sensor_id, time_now - sensor.last_sample_seconds )
sensor.StartPhidget()
elif not sensor.phidget_attached:
logging.warn('Health Check: Sensor %s Not attached. Restart Phidget',sensor_id)
sensor.StartPhidget()
if not heartbeats:
logging.warning('Health Check: Heartbeat thread has stopped')
if not ntp:
logging.warning('Health Check: NTP thread has stopped')
if not web:
logging.warning('Health Check: Web thread has stopped')
if not check_storage:
logging.warning('Health Check: Check storage thread has stopped')
logging.warn('Main thread terminating')
except KeyboardInterrupt:
logging.warn('Keyboard Interrupt: PyCSN processing terminating')
self.cleanup()
def catch_sigterm(self, signum, frame):
logging.warning('Caught SIGTERM %s %s - Calling the cleaner', signum, frame)
self.cleanup()
def cleanup(self):
heartbeats = False
ntp = False
web = False
check_storage = False
health = False
# Stop sample collection from all sensors
logging.info('Stopping Sensor Data Collection and uploading files')
for sensor in self.sensors.values():
sensor.StopSampleCollection()
# process any files that need to be uploaded
try:
self.uploadDataFiles()
except IOError, e:
logging.error("IO Error on uploadDataFiles %s %s", e.errno, e)
except Exception, e:
logging.error('Unexpected error on uploadDataFiles %s', e)
sys.exit()
def uploadDataFiles(self):
for sensor in self.sensors.values():
pending_uploads = sensor.datafiles_not_yet_uploaded()
if len(pending_uploads) > 0:
logging.info('There are %s pending data files to upload', len(pending_uploads))
pending_count = 0
for filename in pending_uploads:
if not filename.endswith('.dat'):
continue
"""
filename_zip = filename + '.bz2'
output = bz2.BZ2File(filename_zip, 'wb')
infile = open(filename, 'rb')
output.write(infile.read())
output.close()
infile.close()
"""
filename_zip = filename + '.gz'
infile = open(filename, 'rb')
with gzip.open(filename_zip, 'wb') as f:
f.write(infile.read())
f.close()
infile.close()
filename_day = os.path.basename(filename).split('_')[1][:8] # extract the YYYYMMDD part
key_name = filename_day + '/' + self.client_id + '/' + os.path.basename(filename_zip)
try:
key = self.bucket.new_key(key_name)
key.set_contents_from_filename(filename_zip)
logging.info('File %s was uploaded as %s', filename, key_name)
sensor.mark_file_uploaded(filename)
os.remove(filename_zip)
except Exception, e:
logging.error('Error uploading file %s to Amazon as %s: %s', filename, key_name, e)
pending_count += 1
if pending_count >= MAX_PENDING_UPLOADS:
logging.warn('Limit of %s pending uploads added', pending_count)
break
class PyCSNWebServer(BaseHTTPRequestHandler):
TIMESTAMP_NAMING = "%Y-%m-%dT%H:%M:%S"
TIMESTAMP_NAMING_PLOT = "[%-H, %-M, %-S]"
TIMESTAMP_JSON = "%Y%m%d %H:%M:%S.%f"
TIMESTAMP_NAMING_PRETTY = "2000-09-21T23:20:00"
SAMPLES_TO_SEND = 500
def log_message(self, format, *args):
# uncomment following to enable Web log entries
#logging.info("%s - - [%s] %s" % (self.address_string(),self.log_date_time_string(),format%args))
return
def send_message(self, message):
self.send_response(200)
self.send_header('Content-type','text/html')
self.end_headers()
# Send the html message
self.wfile.write("<body>")
self.wfile.write("<h1>CSN Phidgets Client</h1>")
self.wfile.write(message)
return
#Handler for the GET requests
def do_GET(self):
global this_client
try:
if self.path == '/favicon.ico':
return
elif self.path == '/uptime':
self.send_response(200)
self.send_header('Content-type','text/html')
self.end_headers()
elapsed_time = time.time() - this_client.server_start_time
uptime = datetime.timedelta(seconds=elapsed_time)
self.wfile.write("Uptime "+str(uptime))
return
elif self.path == '/version':
self.send_response(200)
self.send_header('Content-type','text/html')
self.end_headers()
self.wfile.write(str(SOFTWARE_VERSION))
return
elif self.path == '/getdata':
# The client will send a JSON encoded set of the very latest samples from the sensor
#logging.info('Request for latest sensor data in JSON format')
self.send_response(200)
self.send_header('Content-type','application/json')
self.end_headers()
latest_samples = this_client.GetLatestSamples()
if latest_samples == None:
self.wfile.write("No Sensor Data available")
return
if len(latest_samples) > self.SAMPLES_TO_SEND:
latest_samples = latest_samples[-self.SAMPLES_TO_SEND:]
json_samples = json.dumps(latest_samples)
#json_samples = json.dumps([(datetime.datetime.fromtimestamp(t).strftime(self.TIMESTAMP_JSON),acc) for (t,acc) in latest_samples])
self.wfile.write(json_samples)
return
elif self.path == '/getlatestsample':
# The client will send the JSON encoded very latest sample from the sensor
self.send_response(200)
self.send_header('Content-type','application/json')
self.end_headers()
logging.debug('Request for latest sensor reading in JSON format')
latest_samples = this_client.GetLatestSamples()
if latest_samples == None:
latest_sample = []
else:
latest_sample = latest_samples[-1]
json_sample = json.dumps(latest_sample)
#json_samples = | |
<reponame>awb-carleton/pattern-analysis<filename>foldit/check_models.py
from foldit.foldit_data import load_extend_data, make_series
from pattern_extraction import *
import argparse
from sklearn import svm, linear_model, ensemble
from sklearn.metrics import mean_squared_error, r2_score, explained_variance_score
import pygam
import pickle
import numpy as np
import pandas as pd
import logging
import json
import sys
import os
import string
import re
from ticc.TICC_solver import TICC
from typing import Dict, Tuple, List
from itertools import combinations, groupby, chain
from util import category_lookup
import matplotlib
matplotlib.use("Agg")
SUBPATTERN_KRANGE = [5, 10]
def compute_cluster_times(data: pd.DataFrame, cluster_lookup: Dict[int, np.ndarray],
mrf_lookup: Dict[int, Dict[int, np.ndarray]], puz_idx_lookup: dict) -> pd.DataFrame:
cluster_times = []
logging.debug("computing cluster times")
for _, r in data.iterrows():
if r.relevant_sids is None or (r.uid, r.pid) not in puz_idx_lookup:
continue
deltas = sorted([d for d in r.deltas if d.sid in r.relevant_sids], key=lambda x: x.timestamp)
ts = np.array([d.timestamp for d in deltas])
actions = np.array([sum(d.action_diff.values()) for d in deltas])
if actions.sum() == 0:
logging.debug("SKIPPING {} {}, no actions recorded".format(r.uid, r.pid))
continue
result_dict = {'uid': r.uid, 'pid': r.pid}
valid = True
for k in cluster_lookup:
puz_cs = cluster_lookup[k][slice(*puz_idx_lookup[(r.uid, r.pid)])]
if len(ts) != len(puz_cs):
logging.debug("SKIPPING {} {}, k={}, mismatch between number of timestamps and cluster data".format(r.uid, r.pid, k))
valid = False
continue
for ci in range(k):
if not is_null_cluster(mrf_lookup[k][ci]):
result_dict["cluster_{}_time_k{}".format(ci, k)] = time_played(ts[puz_cs == ci])
result_dict["cluster_{}_ratio_k{}".format(ci, k)] = result_dict["cluster_{}_time_k{}".format(ci, k)] / r.relevant_time
result_dict["cluster_{}_action_k{}".format(ci, k)] = sum(actions[puz_cs == ci])
result_dict["cluster_{}_action_ratio_k{}".format(ci, k)] = result_dict["cluster_{}_action_k{}".format(ci, k)] / actions.sum()
if valid:
cluster_times.append(result_dict)
return data.merge(pd.DataFrame(data=cluster_times), on=['pid', 'uid'])
def compute_subcluster_times(data: pd.DataFrame, cluster_lookup: dict, subclusters: dict,
subseries_lookup: dict, puz_idx_lookup: dict) -> pd.DataFrame:
results = {}
logging.debug("generating timestamps")
for _, r in data.iterrows():
if r.relevant_sids is None or (r.uid, r.pid) not in puz_idx_lookup:
continue
deltas = sorted([d for d in r.deltas if d.sid in r.relevant_sids], key=lambda x: x.timestamp)
ts = np.array([d.timestamp for d in deltas])
actions = np.array([sum(d.action_diff.values()) for d in deltas])
if actions.sum() == 0:
logging.debug("SKIPPING {} {}, no actions recorded".format(r.uid, r.pid))
continue
results[(r.uid, r.pid)] = {"times": {'uid': r.uid, 'pid': r.pid}, "ts": ts, "actions": actions, "valid": True}
logging.debug("computing subcluster times")
for k in cluster_lookup:
all_clusters = cluster_lookup[k]
for cid in subclusters[k]:
for k2 in SUBPATTERN_KRANGE:
if k2 not in subclusters[k][cid]:
continue
all_subclusters = all_clusters.astype(np.str)
labels = ["{}{}".format(cid, string.ascii_uppercase[x]) for x in range(k2)]
cs = subclusters[k][cid][k2]
for (_, _, start_idx), (s, e) in subseries_lookup[k][cid]['idx_lookup'].items():
all_subclusters[start_idx: start_idx + (min(e, len(cs)) - s)] = [labels[c] for c in cs[s:e]]
for uid, pid in results:
puz_cs = all_subclusters[slice(*puz_idx_lookup[(uid, pid)])]
ts = results[(uid, pid)]["ts"]
actions = results[(uid, pid)]["actions"]
if len(ts) != len(puz_cs):
results[(uid, pid)]["valid"] = False
continue
for scid in labels:
results[(uid, pid)]["times"]["sk{}_subcluster_{}_time_k{}".format(k2, scid, k)] = time_played(ts[puz_cs == scid])
results[(uid, pid)]["times"]["sk{}_subcluster_{}_ratio_k{}".format(k2, scid, k)] = results[(uid, pid)]["times"]["sk{}_subcluster_{}_time_k{}".format(k2, scid, k)] / r.relevant_time
results[(uid, pid)]["times"]["sk{}_subcluster_{}_action_k{}".format(k2, scid, k)] = sum(actions[puz_cs == scid])
results[(uid, pid)]["times"]["sk{}_subcluster_{}_action_ratio_k{}".format(k2, scid, k)] = results[(uid, pid)]["times"]["sk{}_subcluster_{}_action_k{}".format(k2, scid, k)] / actions.sum()
subcluster_times = [v["times"] for v in results.values() if v["valid"]]
return data.merge(pd.DataFrame(data=subcluster_times), on=['pid', 'uid'])
if __name__ == "__main__":
parser = argparse.ArgumentParser(prog='check_models.py')
parser.add_argument("datapath")
parser.add_argument("--new-ticc", action='store_true')
parser.add_argument("--debug", action='store_true')
parser.add_argument("--no-test", action='store_true')
args = parser.parse_args()
seed =13*17*31
if args.debug:
logging.getLogger().setLevel(logging.DEBUG)
if not os.path.exists(args.datapath):
logging.error("datapath {} does not exist".format(args.datapath))
sys.exit(1)
with open(args.datapath + "/config.json") as fp:
config = json.load(fp)
pids = config["pids"]
test_pids = config["test_pids"]
krange = config["krange"]
soln_lookup = {}
parent_lookup = {}
child_lookup = {}
data, puz_metas = load_extend_data(pids, soln_lookup, parent_lookup, child_lookup, config["evolver"], 600)
test_data, test_metas = load_extend_data(test_pids, soln_lookup, parent_lookup, child_lookup, config["evolver"], 600)
if args.new_ticc:
logging.debug("Constructing time series")
puz_idx_lookup, series_lookup, noise = make_series(data)
num_features = next(x for x in series_lookup.values()).shape[1]
idx_lookup, all_series = combine_user_series(series_lookup, noise)
puz_idx_lookup = {(uid, pid): (s + idx_lookup[uid[:uid.index('e')] if 'e' in uid else uid][0],
e + idx_lookup[uid[:uid.index('e')] if 'e' in uid else uid][0])
for (uid, pid), (s, e) in puz_idx_lookup.items()}
np.savetxt(args.datapath + "/noise_values.txt", noise)
np.savetxt(args.datapath + "/all_series.txt", all_series)
with open(args.datapath + "/puz_idx_lookup.pickle", 'wb') as fp:
pickle.dump(puz_idx_lookup, fp)
with open(args.datapath + "/idx_lookup.pickle", 'wb') as fp:
pickle.dump(idx_lookup, fp)
run_TICC({"all": all_series}, args.datapath, krange)
cluster_lookup, mrf_lookup, model_lookup, bic_lookup = load_TICC_output(args.datapath, ["all"], krange)
subseries_lookup = {}
for k in krange:
patterns = get_patterns(mrf_lookup["all"][k], cluster_lookup["all"][k], puz_idx_lookup)
subseries_lookup[k] = make_subseries_lookup(k, patterns, mrf_lookup["all"][k], all_series, noise)
run_sub_TICC(subseries_lookup, args.datapath, "all", SUBPATTERN_KRANGE)
else:
noise = np.loadtxt(args.datapath + "/noise_values.txt")
all_series = np.loadtxt(args.datapath + "/all_series.txt")
with open(args.datapath + "/idx_lookup.pickle", 'rb') as fp:
idx_lookup = pickle.load(fp)
with open(args.datapath + "/puz_idx_lookup.pickle", 'rb') as fp:
puz_idx_lookup = pickle.load(fp)
cluster_lookup, mrf_lookup, model_lookup, bic_lookup = load_TICC_output(args.datapath, ["all"], krange)
with open(args.datapath + "/all/subpatterns/subseries_lookup.pickle", 'rb') as fp:
subseries_lookup = pickle.load(fp)
sub_lookup = load_sub_lookup(args.datapath + "/all", subseries_lookup)
if args.no_test:
sys.exit(0)
test_puz_idx_lookup, test_series_lookup, _ = make_series(test_data, noise)
test_idx_lookup, test_all_series = combine_user_series(test_series_lookup, noise)
test_puz_idx_lookup = {(uid, pid): (s + test_idx_lookup[uid[:uid.index('e')] if 'e' in uid else uid][0],
e + test_idx_lookup[uid[:uid.index('e')] if 'e' in uid else uid][0])
for (uid, pid), (s, e) in test_puz_idx_lookup.items()}
logging.debug("Predicting clusters on test data")
cluster_lookup["test"] = {}
test_subseries_lookup = {}
test_subcluster_lookup = {}
for k in krange:
cluster_lookup["test"][k] = predict_from_saved_model(test_all_series, model_lookup["all"][k])
test_patterns = get_patterns(mrf_lookup["all"][k], cluster_lookup["test"][k], test_puz_idx_lookup)
test_subseries_lookup[k] = make_subseries_lookup(k, test_patterns, mrf_lookup["all"][k], test_all_series, noise)
test_subcluster_lookup[k] = {}
for cid in range(k):
if cid not in test_subseries_lookup[k]:
continue
test_subcluster_lookup[k][cid] = {}
for k2 in SUBPATTERN_KRANGE:
test_subcluster_lookup[k][cid][k2] = predict_from_saved_model(test_subseries_lookup[k][cid]["series"],
sub_lookup["models"][k][cid][k2])
logging.debug("Computing cluster times")
results = compute_cluster_times(data, cluster_lookup["all"], mrf_lookup["all"], puz_idx_lookup)
results = compute_subcluster_times(results, cluster_lookup["all"], sub_lookup["clusters"], subseries_lookup, puz_idx_lookup)
test_results = compute_cluster_times(test_data, cluster_lookup["test"], mrf_lookup["all"], test_puz_idx_lookup)
test_results = compute_subcluster_times(test_results, cluster_lookup["test"], test_subcluster_lookup, test_subseries_lookup,
test_puz_idx_lookup)
baseline_features = ["relevant_time", "time", "action_count_all", "action_count_relevant", "action_count_best", "action_rate_all",
"action_rate_relevant", "median_prior_perf", "experience", "best_energy_time"]
with open("data/user_metadata_v2.csv") as fp:
user_metas = {(r['uid'], r['pid']): r for r in csv.DictReader(fp)}
for v in user_metas.values():
v['time'] = int(v['time'])
v['relevant_time'] = int(float(v['relevant_time']))
v['best_energy_time'] = int(v['best_energy_time'])
v['action_count_all'] = int(v['action_count_all'])
v['action_count_relevant'] = int(v['action_count_relevant'])
v['action_count_best'] = int(v['action_count_best'])
v['best_energy'] = float(v['best_energy'])
v['perf'] = float(v['perf'])
user_meta_lookup = {uid: list(metas) for uid, metas in groupby(sorted(user_metas.values(), key=lambda m: m['uid']), lambda m: m['uid'])}
with open("data/puzzle_categories_latest.csv") as fp:
puz_cat = {r['nid']: r['categories'].split(',') for r in csv.DictReader(fp)}
results["action_count_all"] = results.apply(lambda r: user_metas[(r.uid, r.pid)]["action_count_all"], axis=1)
results["action_count_relevant"] = results.apply(lambda r: user_metas[(r.uid, r.pid)]["action_count_relevant"], axis=1)
results["action_count_best"] = results.apply(lambda r: user_metas[(r.uid, r.pid)]["action_count_best"], axis=1)
results["best_energy_time"] = results.apply(lambda r: user_metas[(r.uid, r.pid)]["best_energy_time"], axis=1)
results["action_rate_all"] = results.apply(lambda r: r.action_count_all / r.time, axis=1)
results["action_rate_relevant"] = results.apply(lambda r: r.action_count_relevant / r.relevant_time, axis=1)
results["experience"] = results.apply(lambda r: len([x for x in user_meta_lookup[r.uid] if x['pid'] < r.pid and category_lookup["beginner"] not in puz_cat[x['pid']]]), axis=1)
results["median_prior_perf"] = results.apply(lambda r: np.median([float(x['perf']) for x in user_meta_lookup[r.uid] if x['pid'] < r.pid and category_lookup["beginner"] not in puz_cat[x['pid']]]), axis=1)
results.median_prior_perf.fillna(results.median_prior_perf.median(), inplace=True)
test_results["action_count_all"] = test_results.apply(lambda r: user_metas[(r.uid, r.pid)]["action_count_all"], axis=1)
test_results["action_count_relevant"] = test_results.apply(lambda r: user_metas[(r.uid, r.pid)]["action_count_relevant"], axis=1)
test_results["action_count_best"] = test_results.apply(lambda r: user_metas[(r.uid, r.pid)]["action_count_best"], axis=1)
test_results["best_energy_time"] = test_results.apply(lambda r: user_metas[(r.uid, r.pid)]["best_energy_time"], axis=1)
test_results["action_rate_all"] = test_results.apply(lambda r: r.action_count_all / r.time, axis=1)
test_results["action_rate_relevant"] = test_results.apply(lambda r: r.action_count_relevant / r.relevant_time, axis=1)
test_results["experience"] = test_results.apply(lambda r: len([x for x in user_meta_lookup[r.uid] if x['pid'] < r.pid and category_lookup["beginner"] not in puz_cat[x['pid']]]), axis=1)
test_results["median_prior_perf"] = test_results.apply(lambda r: np.median([float(x['perf']) for x in user_meta_lookup[r.uid] if x['pid'] < r.pid and category_lookup["beginner"] not in puz_cat[x['pid']]]), axis=1)
test_results.median_prior_perf.fillna(test_results.median_prior_perf.median(), inplace=True)
models = {#"ridge": linear_model.Ridge,
"ensemble": ensemble.GradientBoostingRegressor,}
#"gam": pygam.LinearGAM}
model_params = {"ridge": {"random_state": seed},
"ensemble": {"random_state": seed, "learning_rate": 0.1, "subsample": 0.5,
"loss": "huber", "n_estimators": 1000, "n_iter_no_change": 100,
"alpha": 0.95},
"gam": {}}
print("BASELINE MODELS")
for label, model in models.items():
fsets = [fset for fset in chain(*[combinations(baseline_features, n) for n in range(1, len(baseline_features) + 1)])]
ms = [model(**model_params[label]).fit(results[list(fset)], results.perf) for fset in fsets]
base_fset, base_m = min(zip(fsets, ms), key=lambda x: mean_squared_error(test_results.perf, x[1].predict(test_results[list(x[0])])))
print(label)
print("explained variance", explained_variance_score(test_results.perf,
base_m.predict(test_results[list(base_fset)])))
print("RMSE", mean_squared_error(test_results.perf,
base_m.predict(test_results[list(base_fset)]))**0.5)
print(base_fset)
print()
print()
print("MODELS OF PATTERN TIMES".format(k))
for ftype in ["time", "ratio", "action", "action_ratio"]:
print(ftype)
for label, model in models.items():
print(label)
fsets = []
for k in krange:
print("k={}".format(k), end=", ")
sys.stdout.flush()
patterns_best_fsets = []
for cid in range(k):
if "cluster_{}_{}_k{}".format(cid, ftype, k) not in results.columns:
continue
std_fsets = [["cluster_{}_{}_k{}".format(n, ftype, k) for n in range(k) if "cluster_{}_{}_k{}".format(n, ftype, k)
in results.columns]]
std_fsets.append([x for x in std_fsets[0] if "_{}_".format(cid) not in x])
for k2 in SUBPATTERN_KRANGE:
std_fsets.append(std_fsets[1] + ["sk{}_subcluster_{}{}_{}_k{}".format(k2, cid, l, ftype, k) for l
in string.ascii_uppercase[:k2] if "sk{}_subcluster_{}{}_{}_k{}".format(k2, cid, l, ftype, k) in test_results.columns])
for fset in std_fsets:
fset.extend(base_fset)
ms = [model(**model_params[label]).fit(results[fset], results.perf) for fset in std_fsets]
best_fset = min(zip(ms, std_fsets),
key=lambda x: mean_squared_error(test_results.perf, x[0].predict(test_results[x[1]])))[1]
patterns_best_fsets.extend([f for f in best_fset if re.search(r"cluster_{}[_A-Z]".format(cid), f)])
features = patterns_best_fsets # [f for f in results.columns if "_k{}".format(k) in f]
best_fset = features
best_fset.extend(base_fset)
best_m = model(**model_params[label]).fit(results[best_fset], results.perf)
cur_MSE | |
from copy import deepcopy
import time
# Agent that solves Sudoku puzzles
present_mode = True; # step by step
class Sudoku:
def __init__(self):
self.boards_so_far = []
self.sudoku_table = [0]*9
self.read_table()
self.heuristic = 0 # number of zeros
for i in range(0,len(self.sudoku_table)):
for j in range(0,len(self.sudoku_table)):
if self.sudoku_table[i][j] == "0":
# heuristic starts off as number of zeros
self.heuristic += 1
self.sudoku_table[i][j]+= self.get_possibilities(i,j,self.sudoku_table)
#for x in range(0,len(self.sudoku_table)):
# for y in range(0,len(self.sudoku_table)):
def read_table(self):
""" Reads sudoku puzzle from file, each row represents one section, 9 sections total
Ex.
For the puzzle:
8 0 0 4 0 6 0 0 7
0 0 0 0 0 0 4 0 0
0 1 0 0 0 0 6 5 0
5 0 9 0 3 0 7 8 0
0 0 0 0 7 0 0 0 0
0 4 8 0 2 0 1 0 3
0 5 2 0 0 0 0 9 0
0 0 1 0 0 0 0 0 0
3 0 0 9 0 2 0 0 5
8,0,0,0,0,0,0,1,0 would be line 1 of input,
4,0,6,0,0,0,0,0,0 would be line 2 of input,
etc.
Stored in nested array with each index representing a section which contains another array.
For the puzzle above, resulting data structure is:
[['8', '0', '0', '0', '0', '0', '0', '1', '0'], ['4', '0', '6', '0', '0', '0', '0', '0', '0'], ['0', '0', '7', '4', '0', '0', '6', '5', '0'],
['5', '0', '9', '0', '0', '0', '0', '4', '8'], ['0', '3', '0', '0', '7', '0', '0', '2', '0'], ['7', '8', '0', '0', '0', '0', '1', '0', '3'],
['0', '5', '2', '0', '0', '1', '3', '0', '0'], ['0', '0', '0', '0', '0', '0', '9', '0', '2'], ['0', '9', '0', '0', '0', '0', '0', '0', '5']]
"""
sudoku_file = open("input_sudoku_puzzle.txt", "r")
self.sudoku_table = [0]*9
for i in range(len(self.sudoku_table)):
self.sudoku_table[i] = sudoku_file.readline().rstrip('\n').split(',')
# Prints Sudoku in a nice, standard format
def print_table_with_possibilities(self, board):
for sections_of_three in range(3):
for row in range(3):
for i in range(0+sections_of_three*3,3*(sections_of_three+1)):
if i % 3 == 0 :
print ""
for z in range(0+(row*3),3*(row+1)):
#print "\nrow is %s " % row
#print "\ntimes run %s" % z
#print "\n i is %s" % i
if self.sudoku_table[i][z][1:] != "":
print "|%s|%-9s" % (board[i][z][0], board[i][z][1:]),
else:
print "|%s|%-9s" % (board[i][z][0], ""),
print "\t\t",
print "\n\n"
def print_table(self,board):
for sections_of_three in range(3):
for row in range(3):
for i in range(0+sections_of_three*3,3*(sections_of_three+1)):
if i % 3 ==0 :
print ""
for z in range(0+(row*3),3*(row+1)):
#print "\nrow is %s " % row
#print "\ntimes run %s" % z
#print "\n i is %s" % i
print "%s" % board[i][z][0],
print " ",
else:
for z in range(0+(row*3),3*(row+1)):
#print "\nrow is %s " % row
#print "\ntimes run %s" % z
#print "\n i is %s" % i
print "%s" % board[i][z][0],
print " ",
print ""
def get_possibilities(self,i,j,board):
possibilites = ""
for potential_poss in range(1,10):
# get first character of each location of section
first_character_list = list()
for x in range(0,9):
first_character_list.append(board[i][x][0])
# check section
if str(potential_poss) not in first_character_list:
# check row
row = list()
row_i_start = 3 * (i/3)
row_j_start = 3 * (j/3)
for row_i_iterator in range(row_i_start, row_i_start + 3):
for row_j_iterator in range(row_j_start, row_j_start + 3):
row.append(board[row_i_iterator][row_j_iterator][0])
if str(potential_poss) not in row:
# check columns
column = list()
col_i_start = i%3;
col_j_start = j%3;
for col_i_iter in xrange(col_i_start, col_i_start + 7, 3):
for col_j_iter in xrange(col_j_start, col_j_start + 7, 3):
column.append(board[col_i_iter][col_j_iter][0])
if str(potential_poss) not in column:
possibilites+=str(potential_poss)
return possibilites
def skip_invalid_single_possibilities(self, board,i,j):
value = board[i][j]
# iterate through row
row_i_start = 3 * (i/3)
row_j_start = 3 * (j/3)
for row_i_iterator in range(row_i_start, row_i_start + 3):
for row_j_iterator in range(row_j_start, row_j_start + 3):
if board[row_i_iterator][row_j_iterator] in value and not (row_i_iterator == i and row_j_iterator == j) and len(board[row_i_iterator][row_j_iterator])==2:
print "skipped"
print "position"+str(i) + str(j)
return True
print '\n'
# iterate through columns
col_i_start = i%3;
col_j_start = j%3;
for col_i_iter in xrange(col_i_start, col_i_start + 7, 3):
for col_j_iter in xrange(col_j_start, col_j_start + 7, 3):
if board[col_i_iter][col_j_iter] in value and not (col_i_iter == i and col_j_iter == j) and len(board[col_i_iter][col_j_iter])==2:
print "skipped"
print "position"+str(i) + str(j)
return True
print '\n'
for x in range(0,9):
if board[i][x] in value and x != j and len(board[i][x])==2:
print "skipped"
print "position"+str(i) + str(j)
return True
return False
# Returns a list of triples which contain c[0] sector, c[1] position, and c[2] string of possibilities
def get_cells_with_allowed_num_poss(self, count, board, previous_possibilities):
list = []
for i in range(9):
for j in range(9):
if len(board[i][j]) == count:
if self.skip_invalid_single_possibilities(board,i,j):
continue
print list
for value in previous_possibilities:
if value == [i,j,board[i][j]]:
continue
list.append([i,j,board[i][j]])
return list
#Returns a boolean depending upon if each cell has a single possibility
def board_filled(self,board):
for i in range(9):
for j in range(9):
if len(board[i][j]) > 1:
return False
return True
# TO BE DONE, PRIORITY
def no_more_possibilites(self,board):
# iterate through board
# find any cell i,j with value 0 AND length 1
for x in range(0,9):
for y in xrange(0,9):
if(len(board[x][y]) == 1 and board[x][y][0] == '0'):
# this is a bad cell, no more possibilities for this cell
return True
# Good board, we can continue
return False
# TO BE DONE, PRIORITY
def place_poss_in_board(self,value,section, position, board):
new_board = deepcopy(board)
new_board[section][position]=str(value)
return new_board
# TO BE DONE
def violation_occured(self,section, position, board):
value = board[section][position][0]
row_i_start = 3 * (section/3)
row_j_start = 3 * (position/3)
for row_i_iterator in range(row_i_start, row_i_start + 3):
for row_j_iterator in range(row_j_start, row_j_start + 3):
if value in board[row_i_iterator][row_j_iterator][0] and not (row_i_iterator == section and row_j_iterator == position):
print board[row_i_iterator][row_j_iterator]
return True
print '\n'
# iterate through columns
col_i_start = section%3;
col_j_start = position%3;
for col_i_iter in xrange(col_i_start, col_i_start + 7, 3):
for col_j_iter in xrange(col_j_start, col_j_start + 7, 3):
if value in board[col_i_iter][col_j_iter][0] and not (col_i_iter == section and col_j_iter == position):
print board[col_i_iter][col_j_iter]
return True
print '\n'
for x in range(0,9):
if value in board[section][x][0] and x != position:
print board[section][x]
return True
return False
def unmark_board(self, section, position, board):
value = board[section][position]
print "position"+str(section) + str(position)
# iterate through row
row_i_start = 3 * (section/3)
row_j_start = 3 * (position/3)
for row_i_iterator in range(row_i_start, row_i_start + 3):
for row_j_iterator in range(row_j_start, row_j_start + 3):
if value not in board[row_i_iterator][row_j_iterator] and not (row_i_iterator == section and row_j_iterator == position):
board[row_i_iterator][row_j_iterator]=board[row_i_iterator][row_j_iterator]+value
board[row_i_iterator][row_j_iterator] = board[row_i_iterator][row_j_iterator][0]+''.join(sorted(board[row_i_iterator][row_j_iterator][1:]))
print board[row_i_iterator][row_j_iterator]
print '\n'
# iterate through columns
col_i_start = section%3;
col_j_start = position%3;
for col_i_iter in xrange(col_i_start, col_i_start + 7, 3):
for col_j_iter in xrange(col_j_start, col_j_start + 7, 3):
if value not in board[col_i_iter][col_j_iter] and not (col_i_iter == section and col_j_iter == position):
board[col_i_iter][col_j_iter]=board[col_i_iter][col_j_iter]+ value
board[col_i_iter][col_j_iter] = board[col_i_iter][col_j_iter][0] + ''.join(sorted(board[col_i_iter][col_j_iter][1:]))
print board[col_i_iter][col_j_iter]
print '\n'
for x in range(0,9):
if value not in board[section][x] and x != position:
board[section][x]=board[section][x]+value
board[section][x]=board[section][x][0] + ''.join(sorted(board[section][x][1:]))
print board[section][x]
board[section][position] = board[section][position].replace(value, '0')
board[section][position] = board[section][position]+value
board[section][position] = ''.join(sorted(board[section][position]))
return
# TO BE DONE, PRIORITY
def remark_board(self, section, position, board):
value = board[section][position]
print "position"+str(section) + str(position)
# iterate through row
row_i_start = 3 * (section/3)
row_j_start = 3 * (position/3)
for row_i_iterator in range(row_i_start, row_i_start + 3):
for row_j_iterator in range(row_j_start, row_j_start + 3):
if value in board[row_i_iterator][row_j_iterator] and not (row_i_iterator == section and row_j_iterator == position):
board[row_i_iterator][row_j_iterator]=board[row_i_iterator][row_j_iterator].replace(value, "")
print board[row_i_iterator][row_j_iterator]
print '\n'
| |
EmptyResultException:
return iter([])
else:
# call the __iter__ method directly
return iter(self._qr)
def __iter__(self):
return self.execute()
class UpdateQuery(BaseQuery):
def __init__(self, model, **kwargs):
self.update_query = kwargs
super(UpdateQuery, self).__init__(model)
def clone(self):
query = UpdateQuery(self.model, **self.update_query)
query._where = dict(self._where)
query._joins = list(self._joins)
return query
def parse_update(self):
sets = {}
for k, v in self.update_query.iteritems():
try:
field = self.model._meta.get_field_by_name(k)
except AttributeError:
field = self.model._meta.get_related_field_by_name(k)
if field is None:
raise
sets[field.name] = field.db_value(v)
return sets
def sql(self):
joins, where, where_data, alias_map = self.compile_where()
set_statement = self.parse_update()
params = []
update_params = []
for k, v in set_statement.iteritems():
params.append(v)
update_params.append('%s=%s' % (k, self.interpolation))
update = 'UPDATE %s SET %s' % (
self.model._meta.db_table, ', '.join(update_params))
where = ' AND '.join(where)
pieces = [update]
if where:
pieces.append('WHERE %s' % where)
params.extend(self.convert_where_to_params(where_data))
return ' '.join(pieces), params
def join(self, *args, **kwargs):
raise AttributeError('Update queries do not support JOINs in sqlite')
def execute(self):
result = self.raw_execute()
return self.database.rows_affected(result)
class DeleteQuery(BaseQuery):
def clone(self):
query = DeleteQuery(self.model)
query._where = dict(self._where)
query._joins = list(self._joins)
return query
def sql(self):
joins, where, where_data, alias_map = self.compile_where()
params = []
delete = 'DELETE FROM %s' % (self.model._meta.db_table)
where = ' AND '.join(where)
pieces = [delete]
if where:
pieces.append('WHERE %s' % where)
params.extend(self.convert_where_to_params(where_data))
return ' '.join(pieces), params
def join(self, *args, **kwargs):
raise AttributeError('Update queries do not support JOINs in sqlite')
def execute(self):
result = self.raw_execute()
return self.database.rows_affected(result)
class InsertQuery(BaseQuery):
def __init__(self, model, **kwargs):
self.insert_query = kwargs
super(InsertQuery, self).__init__(model)
def parse_insert(self):
cols = []
vals = []
for k, v in self.insert_query.iteritems():
field = self.model._meta.get_field_by_name(k)
cols.append(k)
vals.append(field.db_value(v))
return cols, vals
def sql(self):
cols, vals = self.parse_insert()
insert = 'INSERT INTO %s (%s) VALUES (%s)' % (
self.model._meta.db_table,
','.join(cols),
','.join(self.interpolation for v in vals)
)
return insert, vals
def where(self, *args, **kwargs):
raise AttributeError('Insert queries do not support WHERE clauses')
def join(self, *args, **kwargs):
raise AttributeError('Insert queries do not support JOINs')
def execute(self):
result = self.raw_execute()
return self.database.last_insert_id(result, self.model)
class Field(object):
db_field = ''
default = None
field_template = "%(column_type)s%(nullable)s"
def get_attributes(self):
return {}
def __init__(self, null=False, db_index=False, *args, **kwargs):
self.null = null
self.db_index = db_index
self.attributes = self.get_attributes()
self.default = kwargs.get('default', None)
kwargs['nullable'] = ternary(self.null, '', ' NOT NULL')
self.attributes.update(kwargs)
def add_to_class(self, klass, name):
self.name = name
self.model = klass
setattr(klass, name, None)
def render_field_template(self):
col_type = self.model._meta.database.column_for_field(self.db_field)
self.attributes['column_type'] = col_type
return self.field_template % self.attributes
def to_sql(self):
rendered = self.render_field_template()
return '%s %s' % (self.name, rendered)
def null_wrapper(self, value, default=None):
if (self.null and value is None) or default is None:
return value
return value or default
def db_value(self, value):
return value
def python_value(self, value):
return value
def lookup_value(self, lookup_type, value):
return self.db_value(value)
class CharField(Field):
db_field = 'string'
field_template = '%(column_type)s(%(max_length)d)%(nullable)s'
def get_attributes(self):
return {'max_length': 255}
def db_value(self, value):
if self.null and value is None:
return value
value = value or ''
return value[:self.attributes['max_length']]
def lookup_value(self, lookup_type, value):
if lookup_type == 'contains':
return '*%s*' % self.db_value(value)
elif lookup_type == 'icontains':
return '%%%s%%' % self.db_value(value)
else:
return self.db_value(value)
class TextField(Field):
db_field = 'text'
def db_value(self, value):
return self.null_wrapper(value, '')
def lookup_value(self, lookup_type, value):
if lookup_type == 'contains':
return '*%s*' % self.db_value(value)
elif lookup_type == 'icontains':
return '%%%s%%' % self.db_value(value)
else:
return self.db_value(value)
class DateTimeField(Field):
db_field = 'datetime'
def python_value(self, value):
if isinstance(value, string_types):
value = value.rsplit('.', 1)[0]
return datetime(*time.strptime(value, '%Y-%m-%d %H:%M:%S')[:6])
return value
class IntegerField(Field):
db_field = 'integer'
def db_value(self, value):
return self.null_wrapper(value, 0)
def python_value(self, value):
if value is not None:
return int(value)
class BooleanField(IntegerField):
db_field = 'boolean'
def db_value(self, value):
if value:
return 1
return 0
def python_value(self, value):
return bool(value)
class FloatField(Field):
db_field = 'float'
def db_value(self, value):
return self.null_wrapper(value, 0.0)
def python_value(self, value):
if value is not None:
return float(value)
class PrimaryKeyField(IntegerField):
db_field = 'primary_key'
field_template = "%(column_type)s NOT NULL PRIMARY KEY"
class ForeignRelatedObject(object):
def __init__(self, to, name):
self.field_name = name
self.to = to
self.cache_name = '_cache_%s' % name
def __get__(self, instance, instance_type=None):
if not getattr(instance, self.cache_name, None):
id = getattr(instance, self.field_name, 0)
qr = self.to.select().where(**{self.to._meta.pk_name: id}).execute()
setattr(instance, self.cache_name, next(qr))
return getattr(instance, self.cache_name)
def __set__(self, instance, obj):
assert isinstance(obj, self.to), "Cannot assign %s, invalid type" % obj
setattr(instance, self.field_name, obj.get_pk())
setattr(instance, self.cache_name, obj)
class ReverseForeignRelatedObject(object):
def __init__(self, related_model, name):
self.field_name = name
self.related_model = related_model
def __get__(self, instance, instance_type=None):
query = {self.field_name: instance.get_pk()}
qr = self.related_model.select().where(**query)
return qr
class ForeignKeyField(IntegerField):
db_field = 'foreign_key'
field_template = '%(column_type)s%(nullable)s REFERENCES %(to_table)s (%(to_pk)s)'
def __init__(self, to, null=False, related_name=None, *args, **kwargs):
self.to = to
self.related_name = related_name
kwargs.update({
'to_table': to._meta.db_table,
'to_pk': to._meta.pk_name
})
super(ForeignKeyField, self).__init__(null=null, *args, **kwargs)
def add_to_class(self, klass, name):
self.descriptor = name
self.name = name + '_id'
self.model = klass
if self.related_name is None:
self.related_name = klass._meta.db_table + '_set'
klass._meta.rel_fields[name] = self.name
setattr(klass, self.descriptor, ForeignRelatedObject(self.to, self.name))
setattr(klass, self.name, None)
reverse_rel = ReverseForeignRelatedObject(klass, self.name)
setattr(self.to, self.related_name, reverse_rel)
def lookup_value(self, lookup_type, value):
if isinstance(value, Model):
return value.get_pk()
return value or None
def db_value(self, value):
if isinstance(value, Model):
return value.get_pk()
return value
# define a default database object in the module scope
database = SqliteDatabase(DATABASE_NAME)
class BaseModelOptions(object):
def __init__(self, model_class, options=None):
# configurable options
options = options or {'database': database}
for k, v in options.items():
setattr(self, k, v)
self.rel_fields = {}
self.fields = {}
self.model_class = model_class
def get_field_names(self):
fields = [self.pk_name]
fields.extend([f for f in sorted(self.fields.keys()) if f != self.pk_name])
return fields
def get_field_by_name(self, name):
if name in self.fields:
return self.fields[name]
raise AttributeError('Field named %s not found' % name)
def get_related_field_by_name(self, name):
if name in self.rel_fields:
return self.fields[self.rel_fields[name]]
def get_related_field_for_model(self, model, name=None):
for field in self.fields.values():
if isinstance(field, ForeignKeyField) and field.to == model:
if name is None or name == field.name or name == field.descriptor:
return field
def get_reverse_related_field_for_model(self, model, name=None):
for field in model._meta.fields.values():
if isinstance(field, ForeignKeyField) and field.to == self.model_class:
if name is None or name == field.name or name == field.descriptor:
return field
def rel_exists(self, model):
return self.get_related_field_for_model(model) or \
self.get_reverse_related_field_for_model(model)
class BaseModel(type):
inheritable_options = ['database']
def __new__(cls, name, bases, attrs):
cls = super(BaseModel, cls).__new__(cls, name, bases, attrs)
attr_dict = {}
meta = attrs.pop('Meta', None)
if meta:
attr_dict = meta.__dict__
for b in bases:
base_meta = getattr(b, '_meta', None)
if not base_meta:
continue
for (k, v) in base_meta.__dict__.items():
if k in cls.inheritable_options and k not in attr_dict:
attr_dict[k] = v
_meta = BaseModelOptions(cls, attr_dict)
if not hasattr(_meta, 'db_table'):
_meta.db_table = re.sub('[^a-z]+', '_', cls.__name__.lower())
setattr(cls, '_meta', _meta)
_meta.pk_name = None
for name, attr in cls.__dict__.items():
if isinstance(attr, Field):
attr.add_to_class(cls, name)
_meta.fields[attr.name] = attr
if isinstance(attr, PrimaryKeyField):
_meta.pk_name = attr.name
if _meta.pk_name is None:
_meta.pk_name = 'id'
pk = PrimaryKeyField()
pk.add_to_class(cls, _meta.pk_name)
_meta.fields[_meta.pk_name] = pk
_meta.model_name = cls.__name__
if hasattr(cls, '__unicode__'):
setattr(cls, '__repr__', lambda self: '<%s: %s>' % (
_meta.model_name, self.__unicode__()))
exception_class = type('%sDoesNotExist' % _meta.model_name, (DoesNotExist,), {})
cls.DoesNotExist = exception_class
return cls
class Model(object):
__metaclass__ = BaseModel
def __init__(self, *args, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
def __eq__(self, other):
return other.__class__ == self.__class__ and \
self.get_pk() and \
other.get_pk() == self.get_pk()
def get_field_dict(self):
def get_field_val(field):
field_value = getattr(self, field.name)
if not self.get_pk() and field_value is None and field.default is not None:
if callable(field.default):
field_value = field.default()
else:
field_value = field.default
setattr(self, field.name, field_value)
return (field.name, field_value)
pairs = map(get_field_val, self._meta.fields.values())
return dict(pairs)
@classmethod
def create_table(cls):
cls._meta.database.create_table(cls)
for field_name, field_obj in cls._meta.fields.items():
if isinstance(field_obj, PrimaryKeyField):
cls._meta.database.create_index(cls, field_obj.name, True)
elif isinstance(field_obj, ForeignKeyField):
cls._meta.database.create_index(cls, field_obj.name)
elif field_obj.db_index:
cls._meta.database.create_index(cls, field_obj.name)
@classmethod
def drop_table(cls, fail_silently=False):
cls._meta.database.drop_table(cls, fail_silently)
@classmethod
def select(cls, query=None):
return SelectQuery(cls, query)
@classmethod
def update(cls, **query):
return UpdateQuery(cls, **query)
@classmethod
def insert(cls, **query):
return InsertQuery(cls, **query)
@classmethod
def delete(cls, **query):
return DeleteQuery(cls, | |
include_earliest_year="all_bioactivity_records",
ic50_conversion_strategy="all_relations_half_ic50",
fit_ic50=False,
)["final"]
c_doc_uid = Counter(ds.data["doc_uid"])
x = np.vectorize(lambda uid: c_doc_uid[uid])(ds.data["doc_uid"])
y = value = to_pki(ds.data["value"])
kernel = Benchmarks2018StructuralSimilarity(source=ds).data["kernel"]
result1 = _distance_to_nth_neighbour(kernel, value)
lsp2, result2 = _n_neighbours_in_radius(kernel, value)
_min, _max = min(np.nanmin(result1), np.nanmin(result2)), max(np.nanmax(result1), np.nanmax(result2))
ax = fig.add_subplot(len(target_uids),2,2*i+1)
x = np.arange(len(result1))
mask = np.logical_not(np.isnan(result1))
ax.plot(x[mask], result1[mask])
ax.set_xlabel("Distance-sorted neighbours")
ax.set_ylabel(target_name(target_uid) + "\n\nSpearman's Rho")
ax.set_ylim((_min-.05, _max+.05))
ax = fig.add_subplot(len(target_uids),2,2*i+2)
mask = np.logical_not(np.isnan(result2))
ax.plot(lsp2[mask], result2[mask])
ax.set_xlabel("Similarity threshold")
ax.set_ylabel("Spearman's Rho")
ax.set_ylim((_min-.05, _max+.05))
fig.tight_layout()
return fig
def similar_compounds(target_uid, n_top, n_bottom, n_random, seed=43):
ds = mean_warszycki_logki(
target_uid=target_uid,
chembl_filename="chembl_24.db",
threshold=None,
include_earliest_year="all_bioactivity_records",
ic50_conversion_strategy="all_relations_half_ic50",
fit_ic50=False,
)["final"]
uid = ds.data["uid"]
kernel = Benchmarks2018StructuralSimilarity(source=ds).data["kernel"]
ix, iy = np.tril_indices(kernel.shape[0], -1)
idx = np.argsort(kernel[ix, iy])
l = len(idx)
idx = idx[np.sort(np.concatenate((
np.arange(n_bottom),
np.arange(l-n_top, l),
n_bottom + np.random.RandomState(seed=seed).choice(
l - n_top - n_bottom,
size=n_random,
replace=False,
)
)))]
ix, iy = ix[idx], iy[idx]
uid1, uid2 = uid[ix], uid[iy]
sim = np.vectorize(lambda f: "~{:.4f}".format(f))(kernel[ix, iy])
arr = np.stack((uid1, uid2, sim), axis=1)
header = np.array(["uid", "uid", "similarity"])
return _arr_header_to_html(arr, header)
def same_paper_cross_paper(target_uids):
fig = plt.figure(figsize=(len(target_uids)*4, 4))
for i, target_uid in enumerate(target_uids):
d = Benchmarks2018StructuralSimilarity(source=mean_warszycki_logki(
target_uid=target_uid,
chembl_filename="chembl_24.db",
threshold=None,
include_earliest_year="all_bioactivity_records",
ic50_conversion_strategy="all_relations_half_ic50",
fit_ic50=True,
)["final"])
kernel = d.data["kernel"]
same_paper = d.data["doc_uid"].reshape(1,-1) == d.data["doc_uid"].reshape(-1,1)
cross_paper = np.logical_not(same_paper)
same_paper[range(len(same_paper)),range(len(same_paper))] = False
ax = fig.add_subplot(1,len(target_uids),i+1)
ax.hist(kernel.ravel()[same_paper.ravel()], bins=43, label="same paper", alpha=.5, density=True)
ax.hist(kernel.ravel()[cross_paper.ravel()], bins=43, label="cross paper", alpha=.5, density=True)
ax.legend()
ax.set_xlabel("Structural similarity")
ax.set_title(target_name(target_uid))
fig.tight_layout()
return fig
def year_structural_pareto(target_uids):
@njit
def _first(arr, x):
for i in range(len(arr)):
if arr[i] == x:
return i
raise ValueError()
result = []
for i, target_uid in enumerate(target_uids):
result.append("TARGET: {}".format(target_name(target_uid)))
result.append("")
d = Benchmarks2018StructuralSimilarity(source=mean_warszycki_logki(
target_uid=target_uid,
chembl_filename="chembl_24.db",
threshold=None,
include_earliest_year="all_bioactivity_records",
ic50_conversion_strategy="all_relations_half_ic50",
fit_ic50=True,
)["final"])
kernel = d.data["kernel"]
year = d.data["year"]
idx = np.flip(np.argsort(kernel.ravel()))
delta_year = np.abs(year.reshape(-1,1) - year.reshape(1,-1)).ravel()[idx]
for dy in sorted(set(delta_year.ravel())-set([0,0.])):
_idx = idx[_first(delta_year, dy)]
i, j = _idx // kernel.shape[0], _idx % kernel.shape[0]
result.append("SIMILARITY: {:.3f}, DELTA YEAR: {}".format(
kernel[i,j],
int(dy)
))
for m in (i,j):
result.append("UID: {}, SMILES: {}, VALUE: {}, YEAR: {}, DOC_UID: {}".format(
d.data["uid"][m],
d.data["smiles"][m],
d.data["value"][m],
int(d.data["year"][m]),
d.data["doc_uid"][m],
))
result.append("")
return '\n'.join(result) + '\n'
def aaaiiaii(value, groups, kernel, time_split):
from numba import jit, njit
result_all = np.zeros((kernel.size, 4), dtype=np.float)
result_all_groups = np.zeros((kernel.size, 4), dtype=np.float)
result_all_counter = np.zeros(4, dtype=np.int)
result_nearest = np.empty((kernel.shape[0],2), dtype=np.float)
result_nearest.fill(np.nan)
@njit
def f(value, groups, kernel, result_all, result_all_groups, result_all_counter, result_nearest):
for i in range(kernel.shape[0]):
for j in range(kernel.shape[1]):
if groups[i] > groups[j] or (groups[i] < groups[j] and not time_split): # test to train
idx = 3-(2*int(value[i])+int(value[j])) # aa ai ia ii
result_all[result_all_counter[idx], idx] = kernel[i,j]
result_all_groups[result_all_counter[idx], idx] = groups[i]
result_all_counter[idx] += 1
if np.isnan(result_nearest[i, value[j]]) or kernel[i,j] > result_nearest[i, value[j]]:
result_nearest[i, value[j]] = kernel[i,j]
f(value, groups, kernel, result_all, result_all_groups, result_all_counter, result_nearest)
return {
"aa": (result_all[:result_all_counter[0],0], result_all_groups[:result_all_counter[0],0]),
"ai": (result_all[:result_all_counter[1],1], result_all_groups[:result_all_counter[1],1]),
"ia": (result_all[:result_all_counter[2],2], result_all_groups[:result_all_counter[2],2]),
"ii": (result_all[:result_all_counter[3],3], result_all_groups[:result_all_counter[3],3]),
"nearest_i": result_nearest[:,0],
"nearest_a": result_nearest[:,1],
}
def splits_analysis(target_uids):
def plot(value, groups, kernel, axes, split_label, time_split=False):
dct = aaaiiaii(value, groups, kernel, time_split)
not_nan_mask = np.logical_not(np.logical_or(
np.isnan(dct["nearest_a"]),
np.isnan(dct["nearest_i"]),
))
aa, ai, ia, ii = (
dct["nearest_a"][not_nan_mask][value[not_nan_mask]==1],
dct["nearest_i"][not_nan_mask][value[not_nan_mask]==1],
dct["nearest_a"][not_nan_mask][value[not_nan_mask]==0],
dct["nearest_i"][not_nan_mask][value[not_nan_mask]==0],
)
histtype, linewidth = "step", 3
axes[0].hist(
aa, bins=43, label="AA",
density=True, histtype=histtype, linewidth=linewidth,
)
axes[0].hist(
ai, bins=43, label="AI",
density=True, histtype=histtype, linewidth=linewidth,
)
axes[0].hist(
ia, bins=43, label="IA",
density=True, histtype=histtype, linewidth=linewidth,
)
axes[0].hist(
ii, bins=43, label="II",
density=True, histtype=histtype, linewidth=linewidth,
)
axes[0].set_xlim((0.,1.))
axes[0].set_xlabel("Nearest neighbour similarity")
axes[0].set_ylabel(split_label + '\n')
axes[0].legend()
S = 8
axes[1].scatter(ia, ii, label="inactive", c="green", s=S, alpha=.3)
axes[1].scatter(aa, ai, label="active", c="xkcd:sky blue", s=S, alpha=.3)
axes[1].scatter(ia.mean(), ii.mean(), facecolors="none", edgecolors='red', s=150)
axes[1].scatter(ia.mean(), ii.mean(), c="green", marker="x", s=43)
axes[1].scatter(aa.mean(), ai.mean(), facecolors="none", edgecolors="red", s=150)
axes[1].scatter(aa.mean(), ai.mean(), c="blue", marker="x", s=43)
axes[1].plot([0.2, 0.9], [0.2, 0.9])
axes[1].set_aspect("equal")
axes[1].legend()
axes[1].set_xlabel("Nearest active similarity")
axes[1].set_ylabel("Nearest inactive similarity")
return [np.mean(x) for x in (aa, ai, ia, ii)]
figs = []
muv_result = []
for target_uid in target_uids:
muv_result.append(target_name(target_uid))
d = mean_warszycki_logki(
target_uid=target_uid,
chembl_filename="chembl_24.db",
threshold=2.,
include_earliest_year="all_bioactivity_records",
ic50_conversion_strategy="all_relations_half_ic50",
fit_ic50=True,
)["final"]
value = d.data["value"]
kd = Benchmarks2018StructuralSimilarity(source=d)
kernel = kd.data["kernel"]
bac_groups = BalancedAgglomerativeClustering(
source=kd,
kernel="kernel",
n_groups=5,
).data["groups"]
cv_groups = CrossValidation(
source=d,
n_groups=5,
seed=43,
).data["groups"]
spectral_groups = SpectralClustering(
source=kd,
kernel="kernel",
n_groups=5,
).data["groups"]
scaffold_groups = MurckoScaffoldSplit(
source=d,
generic=True,
isomeric=False,
).data["groups"]
paper_groups = PaperSplit(source=d).data["groups"]
year_groups = d.data["year"]
fig = plt.figure(figsize=(8,24))
fig.axes_counter = 0
def _axes():
axes = []
for _ in range(2):
fig.axes_counter += 1
axes.append(fig.add_subplot(6,2,fig.axes_counter))
return axes
for groups, split_label in (
(paper_groups, "paper split"),
(bac_groups, "balanced agglomerative clustering"),
(spectral_groups, "spectral clustering"),
(cv_groups, "cross validation"),
(scaffold_groups, "scaffold split"),
):
aa, ai, ia, ii = plot(value, groups, kernel, _axes(), split_label)
muv = aa - ai + ii - ia
muv_result.append("{:.3f} - {:.3f} + {:.3f} - {:.3f} = {:.3f}".format(aa, ai, ii, ia, muv))
aa, ai, ia, ii = plot(value, year_groups, kernel, _axes(), split_label="time split", time_split=True)
muv = aa - ai + ii - ia
muv_result.append("{:.3f} - {:.3f} + {:.3f} - {:.3f} = {:.3f}".format(aa, ai, ii, ia, muv))
fig.tight_layout()
figs.append(fig)
return tuple(['\n'.join(muv_result)+'\n'] + figs)
def splits_analysis_3_columns(target_uids):
def plot(value, groups, kernel, axes, split_label, time_split=False):
dct = aaaiiaii(value, groups, kernel, time_split)
for k in ["aa", "ai", "ia", "ii"]:
axes[0].hist(
dct[k][0], bins=43, label=k.upper(),
density=True, histtype="step", linewidth=3,
)
axes[0].set_xlim((0.,1.))
axes[0].set_xlabel("All pairs similarity")
axes[0].set_ylabel(split_label + '\n')
axes[0].legend()
not_nan_mask = np.logical_not(np.logical_or(
np.isnan(dct["nearest_a"]),
np.isnan(dct["nearest_i"]),
))
aa, ai, ia, ii = (
dct["nearest_a"][not_nan_mask][value[not_nan_mask]==1],
dct["nearest_i"][not_nan_mask][value[not_nan_mask]==1],
dct["nearest_a"][not_nan_mask][value[not_nan_mask]==0],
dct["nearest_i"][not_nan_mask][value[not_nan_mask]==0],
)
histtype, linewidth = "step", 3
axes[1].hist(
aa, bins=43, label="AA",
density=True, histtype=histtype, linewidth=linewidth,
)
axes[1].hist(
ai, bins=43, label="AI",
density=True, histtype=histtype, linewidth=linewidth,
)
axes[1].hist(
ia, bins=43, label="IA",
density=True, histtype=histtype, linewidth=linewidth,
)
axes[1].hist(
ii, bins=43, label="II",
density=True, histtype=histtype, linewidth=linewidth,
)
axes[1].set_xlim((0.,1.))
axes[1].set_xlabel("Nearest neighbour similarity")
axes[1].legend()
S = 8
axes[2].scatter(ia, ii, label="inactive", c="green", s=S, alpha=.3)
axes[2].scatter(aa, ai, label="active", c="xkcd:sky blue", s=S, alpha=.3)
axes[2].scatter(ia.mean(), ii.mean(), facecolors="none", edgecolors='red', s=150)
axes[2].scatter(ia.mean(), ii.mean(), c="green", marker="x", s=43)
axes[2].scatter(aa.mean(), ai.mean(), facecolors="none", edgecolors="red", s=150)
axes[2].scatter(aa.mean(), ai.mean(), c="blue", marker="x", s=43)
axes[2].plot([0.2, 0.9], [0.2, 0.9])
axes[2].set_aspect("equal")
axes[2].legend()
axes[2].set_xlabel("Nearest active similarity")
axes[2].set_ylabel("Nearest inactive similarity")
return [np.mean(x) for x in (aa, ai, ia, ii)]
figs = []
muv_result = []
for target_uid in target_uids:
muv_result.append(target_name(target_uid))
d = mean_warszycki_logki(
target_uid=target_uid,
chembl_filename="chembl_24.db",
threshold=2.,
include_earliest_year="all_bioactivity_records",
ic50_conversion_strategy="all_relations_half_ic50",
fit_ic50=True,
)["final"]
value = d.data["value"]
kd = Benchmarks2018StructuralSimilarity(source=d)
kernel = kd.data["kernel"]
bac_groups = BalancedAgglomerativeClustering(
source=kd,
kernel="kernel",
n_groups=5,
).data["groups"]
cv_groups = CrossValidation(
source=d,
n_groups=5,
seed=43,
).data["groups"]
spectral_groups = SpectralClustering(
source=kd,
kernel="kernel",
n_groups=5,
).data["groups"]
scaffold_groups = MurckoScaffoldSplit(
source=d,
generic=True,
isomeric=False,
).data["groups"]
paper_groups = PaperSplit(source=d).data["groups"]
year_groups = d.data["year"]
fig = plt.figure(figsize=(12,24))
fig.axes_counter = 0
def _axes():
axes = []
for _ in range(3):
fig.axes_counter += 1
axes.append(fig.add_subplot(6,3,fig.axes_counter))
return axes
for groups, split_label in (
(paper_groups, "paper split"),
(bac_groups, "balanced agglomerative clustering"),
(spectral_groups, "spectral clustering"),
(cv_groups, "cross validation"),
(scaffold_groups, "scaffold split"),
):
aa, ai, ia, ii = plot(value, groups, kernel, _axes(), split_label)
muv = aa - ai + ii - ia
muv_result.append("{:.3f} - {:.3f} + {:.3f} - {:.3f} = {:.3f}".format(aa, ai, ii, ia, muv))
aa, ai, ia, ii = plot(value, year_groups, kernel, _axes(), split_label="time split", time_split=True)
muv = aa - ai + ii - ia
muv_result.append("{:.3f} - {:.3f} + {:.3f} - {:.3f} = {:.3f}".format(aa, ai, ii, ia, muv))
fig.tight_layout()
figs.append(fig)
return tuple(['\n'.join(muv_result)+'\n'] + figs)
def splits_analysis_2(target_uids):
fig = plt.figure(figsize=(4*(len(target_uids)+1),4))
for i, target_uid in enumerate(target_uids):
d = mean_warszycki_logki(
target_uid=target_uid,
chembl_filename="chembl_24.db",
threshold=2.,
include_earliest_year="all_bioactivity_records",
ic50_conversion_strategy="all_relations_half_ic50",
fit_ic50=True,
)["final"]
value = d.data["value"]
kd = Benchmarks2018StructuralSimilarity(source=d)
kernel = kd.data["kernel"]
bac_groups = BalancedAgglomerativeClustering(
source=kd,
kernel="kernel",
n_groups=5,
).data["groups"]
cv_groups = CrossValidation(
source=d,
n_groups=5,
seed=43,
).data["groups"]
spectral_groups = SpectralClustering(
source=kd,
kernel="kernel",
n_groups=5,
).data["groups"]
scaffold_groups = MurckoScaffoldSplit(
source=d,
generic=True,
isomeric=False,
).data["groups"]
paper_groups = PaperSplit(source=d).data["groups"]
year_groups = d.data["year"]
ax = fig.add_subplot(1,len(target_uids),i+1)
for groups, label in (
(paper_groups, "paper split"),
(bac_groups, "balanced agglomerative clustering"),
(spectral_groups, "spectral clustering"),
(cv_groups, "cross validation"),
(scaffold_groups, "scaffold split"),
):
dct = aaaiiaii(value, groups, kernel, time_split=False)
x = np.maximum(dct["nearest_a"], dct["nearest_i"])
ax.hist(
x, bins=43, label=label,
density=True, histtype="step", linewidth=1,
)
dct = aaaiiaii(value, year_groups, kernel, time_split=True)
x = np.maximum(dct["nearest_a"], dct["nearest_i"])
x = x[np.logical_not(np.isnan(x))]
ax.hist(
x, bins=43, label="time split",
density=True, histtype="step", linewidth=3,
)
ax.set_xlabel("Nearest neighbour similarity")
ax.set_title(target_name(target_uid))
if i == len(target_uids) - 1:
ax.legend(fontsize="small", bbox_to_anchor=(1.04,1))
fig.tight_layout()
return fig
def simplest_dataset_hist(mus):
fig = plt.figure(figsize=(4*len(mus), 8))
alpha = .6
for i, mu in enumerate(mus):
ax = fig.add_subplot(2,len(mus),i+1)
xs = np.linspace(-4.3,4.3,437)
ax.fill_between(
xs, norm(loc=mu).pdf(xs),
label='"inactive"', alpha=alpha,
)
ax.fill_between(
xs, norm(loc=-mu).pdf(xs),
label='"active"', alpha=alpha,
)
ax.set_xlabel("mean: {:.1f}".format(mu))
ax.set_ylim((0.,0.6))
ax.legend()
if i == 0:
ax.set_ylabel("Normal\n")
ax = fig.add_subplot(2,len(mus),len(mus)+i+1)
xs = np.linspace(-2.1,2.1,437)
ax.fill_between(
xs, uniform(loc=mu-1, scale=2.).pdf(xs),
label='"inactive"', alpha=alpha,
)
ax.fill_between(
xs, uniform(loc=-mu-1, scale=2.).pdf(xs),
label='"active"', alpha=alpha,
)
ax.set_xlabel("mean: {:.1f}".format(mu))
ax.set_ylim((0.,0.7))
| |
% dvmrel.mat_type
raise NotImplementedError(msg)
elif card_type == 'DVGRID':
for dvgrid_id in ids:
dvgrids = model.dvgrids[dvgrid_id]
for dvgrid in dvgrids:
desvars_used.add(dvgrid.desvar_id)
nids_used.add(dvgrid.nid)
cids_used.add(dvgrid.cid)
elif card_type == 'TF':
for tf_id in ids:
tfs = model.transfer_functions[tf_id]
for transfer_function in tfs:
nids_used.update(transfer_function.nids)
elif card_type in ['NSM', 'NSM1', 'NSML', 'NSML1']:
_store_nsm(model, ids, pids_used)
elif card_type in ['POINTAX', 'AXIC', 'RINGAX']:
pass
#for eid in ids:
#elem = model.plotels[eid]
#nids_used.update(elem.node_ids)
elif card_type in ['PBRSECT', 'PBMSECT']:
for pid in ids:
prop = model.properties[pid]
if prop.outp:
sets_used.add(prop.outp)
if prop.brps:
for unused_key, value in prop.brps.items():
sets_used.add(value)
#if prop.cores:
#for key, value in prop.cores.items():
#pids_used.add(value)
elif card_type == 'CYJOIN':
for idi in ids:
cyjoin = model.cyjoin[idi]
nids_used.update(cyjoin.nids)
elif card_type == 'TABLEHT':
for idi in ids:
table = model.tables[idi]
tableh1_ids = table.y.tolist()
tableh1_used.update(tableh1_ids)
del tableh1_ids
elif card_type == 'PCONV':
for idi in ids:
pconv = model.convection_properties[idi]
if pconv.tid is not None:
tableht_used.add(pconv.tid)
elif card_type == 'CONV':
for idi in ids:
bcs = model.bcs[idi]
for conv in bcs:
if conv.type != 'CONV':
continue
pconv_used.add(conv.pconid)
elif card_type == 'BLSEG':
for idi in ids:
blseg = model.blseg[idi]
# line_id
nids_used.update(blseg.nodes)
#print(blseg.get_stats())
elif card_type == 'BCONP':
for idi in ids:
bconp = model.bconp[idi]
# master
# slave
# ???
#print(bconp.get_stats())
cids_used.add(bconp.cid)
friction_ids_used.add(bconp.friction_id)
#elif card_type == 'FORCEAX':
#pass
#ring_id
#sid
elif card_type in not_implemented_types:
model.log.warning(f'skipping {card_type}')
else:
raise NotImplementedError(card_type)
#for pid, prop in model.properties.items():
#prop = model.properties[pid]
#if prop.type in no_materials:
#continue
#elif prop.type == 'PSHELL':
#mids_used.extend([mid for mid in prop.material_ids if mid is not None])
#elif prop.type == 'PCONEAX':
#mids_used.extend([mid for mid in model.Mids() if mid is not None])
#elif prop.type in prop_mid:
#mids_used.append(prop.Mid())
#elif prop.type in ['PCOMP', 'PCOMPG', 'PCOMPS']:
#mids_used.extend(prop.Mids())
#elif prop.type == 'PBCOMP':
#mids_used.append(prop.Mid())
#mids_used.extend(prop.Mids())
#else:
#raise NotImplementedError(prop)
remove_desvars = False
_remove(
model,
nids_used, cids_used,
pids_used, pids_mass_used,
mids_used,
spcs_used, mpcs_used,
pconv_used, tableht_used, tableh1_used,
desvars_used,
remove_nids=remove_nids,
remove_cids=remove_cids,
remove_pids=remove_pids,
remove_mids=remove_mids,
remove_spcs=remove_spcs, remove_mpcs=remove_mpcs,
unused_remove_desvars=remove_desvars,
)
def _store_elements(card_type, model, ids, nids_used, pids_used, mids_used, cids_used):
if card_type in ['CTETRA', 'CPENTA', 'CPYRAM', 'CHEXA', 'CHACAB']:
for eid in ids:
elem = model.elements[eid]
nids_used.update(elem.node_ids)
pids_used.add(elem.Pid())
elif card_type in ['CELAS1', 'CDAMP1', 'CVISC', 'CDAMP5']:
for eid in ids:
elem = model.elements[eid]
nids_used.update(elem.node_ids)
pids_used.add(elem.Pid())
elif card_type in ['CELAS2', 'CDAMP2']:
for eid in ids:
elem = model.elements[eid]
nids_used.update(elem.node_ids)
elif card_type in ['CELAS3', 'CDAMP3']:
for eid in ids:
elem = model.elements[eid]
nids_used.update(elem.node_ids)
pids_used.add(elem.Pid())
elif card_type in ['CELAS4', 'CDAMP4', 'GENEL']:
for eid in ids:
elem = model.elements[eid]
nids_used.update(elem.node_ids)
elif card_type in ['CTRIA3', 'CQUAD4', 'CTRIA6', 'CTRIAR', 'CQUAD8', 'CQUADR',
'CTRIAX', 'CQUADX', 'CQUAD']:
for eid in ids:
elem = model.elements[eid]
nids_used.update(elem.node_ids)
pids_used.add(elem.Pid())
if isinstance(elem.theta_mcid, int):
cids_used.add(elem.theta_mcid)
elif card_type in ['CTRIAX6', ]:
for eid in ids:
elem = model.elements[eid]
nids_used.update(elem.node_ids)
mids_used.add(elem.Mid())
elif card_type in ['CSHEAR', 'CTUBE']:
for eid in ids:
elem = model.elements[eid]
nids_used.update(elem.node_ids)
pids_used.add(elem.Pid())
elif card_type in ['CPLSTN3', 'CPLSTN4', 'CPLSTN6', 'CPLSTN8',
'CPLSTS3', 'CPLSTS4', 'CPLSTS6', 'CPLSTS8',
'CQUADX4', 'CQUADX8', 'CTRIAX6',
'CTRAX3', 'CTRAX6']:
for eid in ids:
elem = model.elements[eid]
nids_used.update(elem.node_ids)
pids_used.add(elem.Pid())
elif card_type in ['CROD', 'CRAC2D', 'CRAC3D']:
for eid in ids:
elem = model.elements[eid]
nids_used.update(elem.node_ids)
pids_used.add(elem.Pid())
elif card_type in ['CONROD']:
for eid in ids:
elem = model.elements[eid]
nids_used.update(elem.node_ids)
pids_used.add(elem.Mid())
elif card_type == 'CCONEAX':
for eid in ids:
elem = model.elements[eid]
pids_used.add(elem.Pid())
elif card_type in ['CBAR', 'CBEAM', 'CBEND']:
for eid in ids:
elem = model.elements[eid]
nids_used.update(elem.node_ids)
pids_used.add(elem.Pid())
if elem.g0 is not None:
assert isinstance(elem.g0, int), elem.g0
nids_used.add(elem.g0)
elif card_type == 'CBEAM3':
for eid in ids:
elem = model.elements[eid]
nids_used.add(elem.Ga())
nids_used.add(elem.Gb())
if elem.gc is not None:
nids_used.add(elem.gc)
pids_used.add(elem.Pid())
if elem.g0 is not None:
assert isinstance(elem.g0, int), elem.g0
elif card_type == 'CFAST':
for eid in ids:
elem = model.elements[eid]
nids_used.update(elem.node_ids)
pids_used.add(elem.Pid())
elif card_type == 'CGAP':
for eid in ids:
elem = model.elements[eid]
nids_used.update(elem.node_ids)
pids_used.add(elem.Pid())
if elem.g0 is not None:
assert isinstance(elem.g0, int), elem.g0
nids_used.add(elem.G0())
elif card_type == 'CBUSH':
for eid in ids:
elem = model.elements[eid]
elem = model.elements[eid]
nids_used.update(elem.node_ids)
if elem.g0 is not None:
assert isinstance(elem.g0, int), elem.g0
nids_used.add(elem.G0())
pids_used.add(elem.Pid())
if elem.cid is not None:
cids_used.add(elem.Cid())
elif card_type in ['CBUSH1D', 'CBUSH2D']:
for eid in ids:
elem = model.elements[eid]
nids_used.update(elem.node_ids)
pids_used.add(elem.Pid())
cids_used.add(elem.Cid())
else:
raise NotImplementedError(card_type)
def _store_nsm(model, ids, pids_used):
"""helper for ``remove_unused``"""
for nsm_id in ids:
nsms = model.nsms[nsm_id]
for nsm in nsms:
idsi = nsm.ids
if nsm.nsm_type in ['PROD', 'PBARL', 'PBEAML',
'PSHELL', 'PCOMP', ]:
if len(idsi) == 1 and idsi[0] == 'ALL':
idsi = list(model.properties.keys())
#raise NotImplementedError('found ALL...\n%s' % str(nsm))
pids_used.update(idsi)
elif nsm.nsm_type in ['CONROD', 'ELEMENT']:
# we skip this because we assume all elements are used
#if len(idsi) == 1 and idsi[0] == 'ALL':
#raise NotImplementedError('found ALL...\n%s' % str(nsm))
#eids_used.update(idsi)
pass
else:
msg = 'found nsm_type=%r...\n%s' % (nsm.nsm_type, str(nsm))
raise NotImplementedError(msg)
def _store_loads(model, unused_card_type, unused_ids, nids_used, eids_used, cids_used):
"""helper for ``remove_unused``"""
for loads in model.loads.values():
for load in loads:
if load.type in ['FORCE', 'MOMENT']:
nids_used.add(load.node_id)
cids_used.add(load.Cid())
elif load.type in ['FORCE1', 'FORCE2', 'MOMENT1', 'MOMENT2']:
nids_used.update(load.node_ids)
elif load.type == 'GRAV':
cids_used.add(load.Cid())
elif load.type == 'RANDPS':
pass
elif load.type == 'PLOAD':
nids_used.update(load.node_ids)
elif load.type == 'PLOAD1':
#eid = integer(card, 2, 'eid')
pass
elif load.type == 'PLOAD2':
#eids_used.update(load.element_ids)
pass
elif load.type == 'PLOAD4':
# eids, g1, g34
cids_used.add(load.Cid())
elif load.type == 'DEFORM':
eids_used.add(load.Eid())
elif load.type == 'SPCD':
nids_used.update(load.node_ids)
elif load.type == 'GMLOAD':
cids_used.add(load.Cid())
elif load.type in ['RFORCE', 'RFORCE1']:
nids_used.add(load.node_id)
cids_used.add(load.Cid())
elif load.type == 'TEMP':
nids_used.update(list(load.temperatures.keys()))
elif load.type == 'ACCEL':
# nids?
cids_used.add(load.Cid())
elif load.type == 'ACCEL1':
# nids?
cids_used.add(load.Cid())
elif load.type in ['QBDY1', 'QBDY2', 'QBDY3', 'QHBDY']:
pass
#'QBDY1', 'QBDY2', 'QBDY3', 'QHBDY', 'PLOADX1
elif load.type in ['PLOADX1']:
nids_used.update(load.node_ids)
elif load.type in ['SLOAD']:
nids_used.update(load.node_ids)
elif load.type in ['LOAD', 'LSEQ', 'LOADCYN']:
pass
elif load.type in ['QVOL', 'TEMPRB']:
# eids
pass
elif load.type in ['TEMPAX']:
pass # not done...
else:
raise NotImplementedError(load)
def _store_dresp1(model, ids, nids_used, pids_used):
"""helper for ``remove_unused``"""
for dresp_id in ids:
dresp = model.dresps[dresp_id]
if dresp.property_type in ['PSHELL', 'PCOMP', 'PCOMPG', 'PBAR', 'PBARL', 'PBEAM',
'PROD', 'PDAMP', 'PVISC', 'PTUBE', 'PSHEAR', 'PELAS',
'PSOLID', 'PBEAML']:
pids_used.update(dresp.atti_values())
elif dresp.property_type == 'ELEM':
if dresp.response_type in ['STRESS', 'FRSTRE',
'CFAILURE',
'TFORC', 'FRFORC']:
#eids_used.update(dresp.atti_values())
pass
else:
msg = (
str(dresp) + 'region=%r property_type=%r response_type=%r, '
'atta=%r attb=%s atti=%s' % (
dresp.region, dresp.property_type, dresp.response_type,
dresp.atta, dresp.attb, dresp.atti))
raise NotImplementedError(msg)
#elif dresp.property_type == 'STRESS':
elif dresp.property_type is None:
if dresp.response_type in ['WEIGHT', 'EIGN', 'VOLUME', 'LAMA', 'CEIG',
'FREQ', 'STABDER']:
pass
elif dresp.response_type in ['DISP', 'FRDISP', 'TDISP', 'RMSDISP', 'PSDDISP',
'TVELO', 'FRVELO', 'RMSVELO',
'TACCL', 'FRACCL', 'RMSACCL',
'SPCFORCE', 'TSPCF', 'FRSPCF',
'FORCE', 'TFORC', 'FRFORC']:
nids_used.update(dresp.atti)
elif dresp.response_type in ['FLUTTER', 'TRIM', 'DIVERG']:
# flutter_id / trim_id
pass
else:
msg = (
str(dresp) + 'region=%r property_type=%r response_type=%r '
'atta=%r attb=%s atti=%s' % (
dresp.region, dresp.property_type, dresp.response_type,
dresp.atta, dresp.attb, dresp.atti))
raise NotImplementedError(msg)
else:
msg = (
str(dresp) + 'region=%r property_type=%r response_type=%r '
'atta=%r attb=%s atti=%s' % (
dresp.region, dresp.property_type, dresp.response_type,
dresp.atta, dresp.attb, dresp.atti))
raise NotImplementedError(msg)
def _store_masses(card_type, model, ids, nids_used, pids_mass_used, cids_used) -> None:
"""handles masses"""
if card_type in ['CONM1', 'CONM2']:
for eid in ids:
elem = model.masses[eid]
nids_used.add(elem.Nid())
cids_used.add(elem.Cid())
elif card_type in ['CMASS1', 'CMASS3']:
for eid in ids:
elem = model.masses[eid]
pids_mass_used.add(elem.Pid())
nids_used.update(elem.node_ids)
elif card_type in ['CMASS2', 'CMASS4']:
for eid in ids:
elem = model.masses[eid]
nids_used.update(elem.node_ids)
else:
raise NotImplementedError(card_type)
def _remove(model: BDF,
nids_used, cids_used,
pids_used, pids_mass_used, mids_used, spcs_used, mpcs_used,
pconv_used, tableht_used, tableh1_used,
unused_desvars_used,
remove_nids=True, remove_cids=True,
remove_pids=True, remove_mids=True,
remove_spcs=True, remove_mpcs=True,
unused_remove_desvars=True):
"""actually removes the cards"""
nids = set(model.nodes.keys())
pids = set(model.properties.keys())
pids_mass = set(model.properties_mass.keys())
cids = set(model.coords.keys())
mids = set(model.materials.keys())
spcs = set(model.spcs.keys()) # spcadds?
mpcs = set(model.mpcs.keys()) # mpcadds?
nids_to_remove = list(nids - nids_used)
pids_to_remove = list(pids - pids_used)
pids_mass_to_remove = list(pids_mass - pids_mass_used)
mids_to_remove = list(mids - mids_used)
cids_to_remove = list(cids - cids_used)
#for subcase in model.subcases:
# if 'SPC' in subcase:
# value = subcase['SPC']
# spcs_used.add(value)
# if 'MPC' in subcase:
# value = subcase['MPC']
# mpcs_used.add(value)
# #if 'LOAD' in subcase:
# #value = subcase['LOAD']
# #loads_used.add(value)
spcs_to_remove = list(spcs - spcs_used)
mpcs_to_remove = list(mpcs - mpcs_used)
if 0 in cids_to_remove:
cids_to_remove.remove(0)
if remove_nids and nids_to_remove:
for nid in nids_to_remove:
del model.nodes[nid]
nids_to_remove.sort()
model.log.debug('removed nodes %s' % nids_to_remove)
if remove_cids and cids_to_remove:
for cid in cids_to_remove:
del model.coords[cid]
cids_to_remove.sort()
model.log.debug('removing coords %s' % cids_to_remove)
if remove_pids and pids_to_remove:
for pid in pids_mass_to_remove:
del model.properties_mass[pid]
pids_mass_to_remove.sort()
model.log.debug('removing properties_mass %s' % pids_mass_to_remove)
| |
key in opts:
opts[key] = value
else:
raise ValueError('%s is not a valid option'%(key))
if opts['use_jd']:
# Set the recycling strategy
if opts['recycle_type'] == 'num_recycling':
recycle_type = TACS.NUM_RECYCLE
else:
recycle_type = TACS.SUM_TWO
problem.addFrequencyConstraint(opts['sigma'], opts['num_eigs'],
opts['ks_weight'], opts['offset'],
opts['scale'], opts['max_jd_size'],
opts['eig_tol'], opts['use_jd'],
opts['fgmres_size'], opts['eig_rtol'],
opts['eig_atol'], opts['num_recycle'],
opts['recycle_type'],
opts['track_eigen_iters'])
else: # use the Lanczos method
problem.addFrequencyConstraint(opts['sigma'], opts['num_eigs'],
opts['ks_weight'], opts['offset'],
opts['scale'],
opts['max_lanczos'], opts['tol'], 0,
0, 0, 0, 0, TACS.SUM_TWO,
opts['track_eigen_iters'])
return
def densityBasedRefine(forest, assembler, index=0,
lower=0.05, upper=0.5, reverse=False,
min_lev=0, max_lev=TMR.MAX_LEVEL):
"""
Apply a density-based refinement criteria.
This function takes in a Quad or OctForest that has been used for analysis and its
corresponding Assembler object. It then uses the data set in the constitutive object
to extract the density within each element. If the density falls below the the bound
*lower* the element is coarsened, if the density exceeds *upper* the element is
refined. If *reverse* is set, this scheme is reversed so low design values are
refined. The refinement is applied directly to the forest.
Args:
forest (QuadForest or OctForest): OctForest or QuadForest to refine
assembler (Assembler): The TACS.Assembler object associated with forest
index (int): The index used in the call to getDVOutputValue
lower (float): the lower limit used for coarsening
upper (float): the upper limit used for refinement
reverse (bool): Reverse the refinement scheme
min_lev (int): Minimum refinement level
max_lev (int): Maximum refinement level
"""
# Create refinement array
num_elems = assembler.getNumElements()
refine = np.zeros(num_elems, dtype=np.int32)
# Get the elements from the Assembler object
elems = assembler.getElements()
# Set the parametric point where the density will be evaluated. Use the
# parametric origin within the element.
pt = np.zeros(3, dtype=float)
for i in range(num_elems):
# Extract the constitutive object from the element, if it is defined, otherwise
# skip the refinement.
c = elems[i].getConstitutive()
if c is not None:
value = c.getDVOutputValue(index, pt)
# Apply the refinement criteria
if reverse:
if value >= upper:
refine[i] = -1
elif value <= lower:
refine[i] = 1
else:
if value >= upper:
refine[i] = 1
elif value <= lower:
refine[i] = -1
# Refine the forest
forest.refine(refine, min_lev=min_lev, max_lev=max_lev)
return
class OptionData:
def __init__(self):
self.options = {}
self.types = {}
self.values = {}
self.desc = {}
self.lower = {}
self.upper = {}
return
def add_option(self, name, default=None, types=None,
values=None, desc=None, lower=None, upper=None):
'''
Add an option
'''
self.options[name] = default
self.types[name] = types
self.values[name] = values
self.desc[name] = desc
self.lower[name] = lower
self.upper[name] = upper
return
def __getitem__(self, name):
if not name in self.options:
raise KeyError('Key %s not in OptionData'%(name))
return self.options[name]
def __setitem__(self, name, value):
'''Set the item into the options dictionary'''
if not name in self.options:
desc = 'Key %s not in OptionData. '%(name)
desc += 'Set new item through add_option()'
raise KeyError(desc)
if (self.types[name] is not None and
not isinstance(value, self.types[name])):
raise ValueError('Value type does not match')
if (self.lower[name] is not None and
value < self.lower[name]):
raise ValueError('Value violates lower bound')
if (self.upper[name] is not None and
value > self.upper[name]):
raise ValueError('Value violates upper bound')
if (self.values[name] is not None and
not value in self.values[name]):
raise ValueError('Value not in value set %s'%(str(self.values[name])))
# Set the value
self.options[name] = value
return
class TopologyOptimizer:
"""
Optimizer wrapper for topology optimization problems
"""
def __init__(self, problem, options={}):
# Set the basic problem class
self.options = OptionData()
self._init_all_options()
self.opt = None
# Set the option names
for name, data in iteritems(options):
try:
self.options[name] = data
except ValueError as err:
print(err)
# Initialize the problem
self._initialize(problem)
return
def _init_all_options(self):
"""
Declare options before kwargs are processed in the init method.
"""
self.options.add_option('optimizer', 'Trust Region', values=_optimizers,
desc='Type of optimization algorithm')
self.options.add_option('tol', 1.0e-6, lower=0.0,
desc='Tolerance for termination')
self.options.add_option('maxiter', 200, lower=0, types=int,
desc='Maximum number of iterations')
desc = 'Finite difference step size. If no gradient check will be performed.'
self.options.add_option('dh', desc=desc)
self.options.add_option('norm_type', values=_norm_types,
desc='Norm type')
self.options.add_option('barrier_strategy', values=_barrier_types,
desc='Barrier strategy')
self.options.add_option('start_strategy', values=_start_types,
desc='Starting point strategy')
self.options.add_option('penalty_gamma',
desc='Value of penalty parameter gamma')
self.options.add_option('barrier_fraction',
desc='Barrier fraction')
self.options.add_option('barrier_power',
desc='Barrier power')
self.options.add_option('hessian_reset_freq', types=int,
desc='Hessian reset frequency')
self.options.add_option('qn_type', default='BFGS', values=_qn_types,
desc='Type of Hessian approximation to use')
self.options.add_option('max_qn_subspace', default=10, types=int,
desc='Size of the QN subspace')
self.options.add_option('qn_diag_factor',
desc='QN diagonal factor')
self.options.add_option('bfgs_update_type', values=_bfgs_updates,
desc='Type of BFGS update to apply')
desc = 'Boolean to indicate if a sequential linear method should be used'
self.options.add_option('use_sequential_linear', types=bool,
desc=desc)
self.options.add_option('affine_step_multiplier_min',
desc='Minimum multiplier for affine step')
self.options.add_option('init_barrier_parameter',
desc='Initial barrier parameter')
self.options.add_option('relative_barrier',
desc='Relative barrier parameter')
self.options.add_option('set_qn',
desc='Quasi-Newton')
self.options.add_option('qn_updates', types=bool,
desc='Update the Quasi-Newton')
# Line-search parameters
self.options.add_option('use_line_search', types=bool,
desc='Use line search')
self.options.add_option('max_ls_iters', types=int,
desc='Max number of line search iterations')
self.options.add_option('backtrack_ls', types=bool,
desc='Use backtracking line search')
self.options.add_option('armijo_param',
desc='Armijo parameter for line search')
self.options.add_option('penalty_descent_frac',
desc='Descent fraction penalty')
self.options.add_option('min_penalty_param',
desc='Minimum line search penalty')
# GMRES parameters
self.options.add_option('use_hvec_prod', types=bool,
desc='Use Hvec product with GMRES')
self.options.add_option('use_diag_hessian', types=bool,
desc='Use a diagonal Hessian')
self.options.add_option('use_qn_gmres_precon', types=bool,
desc='Use QN GMRES preconditioner')
self.options.add_option('set_nk_switch_tol',
desc='NK switch tolerance')
self.options.add_option('eisenstat_walker_param',
desc='Eisenstat Walker parameters: array([gamma, alpha])')
self.options.add_option('gmres_tol',
desc='GMRES tolerances: array([rtol, atol])')
self.options.add_option('gmres_subspace_size', types=int,
desc='GMRES subspace size')
# Output options
self.options.add_option('output_freq', types=int,
desc='Output frequency')
self.options.add_option('output_file', desc='Output file name')
self.options.add_option('major_iter_step_check', types=int,
desc='Major iter step check')
self.options.add_option('output_level', types=int,
desc='Output level')
self.options.add_option('grad_check_freq',
desc='Gradient check frequency: array([freq, step_size])')
# Set options for the trust region method
self.options.add_option('tr_adaptive_gamma_update', default=True, types=bool,
desc='Use the adaptive penalty algorithm')
self.options.add_option('tr_min_size', default=1e-6, lower=0.0,
desc='Minimum trust region radius size')
self.options.add_option('tr_max_size', default=10.0, lower=0.0,
desc='Maximum trust region radius size')
self.options.add_option('tr_init_size', default=1.0, lower=0.0,
desc='Initial trust region radius size')
self.options.add_option('tr_eta', default=0.25, lower=0.0, upper=1.0,
desc='Trust region radius acceptance ratio')
self.options.add_option('tr_penalty_gamma', default=10.0, lower=0.0,
desc='Trust region penalty parameter value')
self.options.add_option('tr_penalty_gamma_max', default=1e4, lower=0.0,
desc='Trust region maximum penalty parameter value')
# Trust region convergence tolerances
self.options.add_option('tr_infeas_tol', default=1e-5, lower=0.0,
desc='Trust region infeasibility tolerance (l1 norm)')
self.options.add_option('tr_l1_tol', default=1e-5, lower=0.0,
desc='Trust region optimality tolerance (l1 norm)')
self.options.add_option('tr_linfty_tol', default=1e-5, lower=0.0,
desc='Trust region optimality tolerance (l-infinity norm)')
# Trust region output file name
self.options.add_option('tr_output_file',
desc='Trust region output file name')
self.options.add_option('tr_write_output_freq', default=10, types=int,
desc='Trust region output frequency')
return
def _initialize(self, problem):
"""
Prepare the driver for execution.
This is the final thing to run during setup.
Args:
problem (ParOpt.Problem): ParOpt.Problem optimization problem
"""
# TODO:
# - logic for different opt algorithms
# - treat equality constraints
opt_type = self.options['optimizer']
# Set the limited-memory options
max_qn_subspace = self.options['max_qn_subspace']
if self.options['qn_type'] == 'BFGS':
qn_type = ParOpt.BFGS
elif self.options['qn_type'] == 'SR1':
qn_type = ParOpt.SR1
elif self.options['qn_type'] == 'No Hessian approx':
qn_type = ParOpt.NO_HESSIAN_APPROX
else:
qn_type = ParOpt.BFGS
# Create the problem
if opt_type == 'Trust Region':
# For the trust region method, you have to use a Hessian
# approximation
if qn_type == ParOpt.NO_HESSIAN_APPROX:
qn = ParOpt.BFGS
if max_qn_subspace < 1:
max_qn_subspace = 1
# Create the quasi-Newton method
qn = ParOpt.LBFGS(problem, subspace=max_qn_subspace)
# Retrieve the options for the trust region problem
tr_min_size = self.options['tr_min_size']
tr_max_size = self.options['tr_max_size']
tr_eta = self.options['tr_eta']
tr_penalty_gamma = self.options['tr_penalty_gamma']
tr_init_size = self.options['tr_init_size']
# Create the trust region sub-problem
tr_init_size = min(tr_max_size, max(tr_init_size, tr_min_size))
tr = ParOpt.TrustRegion(problem, qn, tr_init_size,
tr_min_size, tr_max_size,
tr_eta, tr_penalty_gamma)
# Set the penalty parameter
tr.setPenaltyGammaMax(self.options['tr_penalty_gamma_max'])
tr.setMaxTrustRegionIterations(self.options['maxiter'])
# Trust region convergence tolerances
infeas_tol = self.options['tr_infeas_tol']
l1_tol = self.options['tr_l1_tol']
linfty_tol = self.options['tr_linfty_tol']
tr.setTrustRegionTolerances(infeas_tol, l1_tol, linfty_tol)
# Trust region output file name
if self.options['tr_output_file'] is not None:
tr.setOutputFile(self.options['tr_output_file'])
tr.setOutputFrequency(self.options['tr_write_output_freq'])
# Create the interior-point optimizer for the trust region sub-problem
opt = ParOpt.InteriorPoint(tr, 0, ParOpt.NO_HESSIAN_APPROX)
self.tr = tr
else:
# Create the ParOpt object with the interior point method
opt = ParOpt.InteriorPoint(problem, max_qn_subspace, qn_type)
opt.setMaxMajorIterations(self.options['maxiter'])
# Apply the options to ParOpt
opt.setAbsOptimalityTol(self.options['tol'])
if self.options['dh']:
opt.checkGradients(self.options['dh'])
if self.options['norm_type']:
if self.options['norm_type'] == 'Infinity':
opt.setNormType(ParOpt.INFTY_NORM)
elif self.options['norm_type'] == 'L1':
opt.setNormType(ParOpt.L1_NORM)
elif self.options['norm_type'] == 'L2':
opt.setNormType(ParOpt.L2_NORM)
# Set barrier strategy
if self.options['barrier_strategy']:
if self.options['barrier_strategy'] == 'Monotone':
barrier_strategy = ParOpt.MONOTONE
elif self.options['barrier_strategy'] == 'Mehrotra':
barrier_strategy = ParOpt.MEHROTRA
elif self.options['barrier_strategy'] == 'Complementarity fraction':
barrier_strategy = ParOpt.COMPLEMENTARITY_FRACTION
opt.setBarrierStrategy(barrier_strategy)
# Set starting point strategy
if self.options['start_strategy']:
if self.options['start_strategy'] == 'None':
start_strategy = ParOpt.NO_START_STRATEGY
elif self.options['start_strategy'] == 'Least squares multipliers':
start_strategy = ParOpt.LEAST_SQUARES_MULTIPLIERS
elif self.options['start_strategy'] == 'Affine step':
start_strategy = ParOpt.AFFINE_STEP
opt.setStartingPointStrategy(start_strategy)
# Set norm type
if self.options['norm_type']:
if self.options['norm_type'] == 'Infinity':
norm_type = ParOpt.INFTY_NORM
elif self.options['norm_type'] | |
# processed Android keys
out_key_p = [
<KEY>, <KEY>, 0x872bc2af, 0x740c67d2, 0x06b5b538, 0x203471d9,
0x5b166908, 0x1992e2dd, 0x709c1604, 0xf44b2f24, 0x80b4e61e, 0xf4dd369b,
0x0b635c77, 0x3ece8651, 0x0d0bcd5b, 0x577afa1f, 0xef341b74, 0xfe722dca
]
out_key_s = [[
0xe15f9e9b, 0x03555599, <KEY>, 0xaddfbacb, 0x8563f318, 0x1731d807,
0xc70a3692, 0x5c2375d3, 0x93935e57, 0x63fffcb8, 0x7af11e27, 0x7b350860,
0x68d7c26f, 0xdaf049c4, 0xd14b68ec, 0xda9e11d7, 0x9705dbd0, 0x7cca75fe,
0x03aba426, 0x0f31fc8c, 0xedc14781, 0xaf7d0036, 0xea013ac8, 0x167e94c4,
0xb6fe76a8, 0x076e1b0a, 0x37d3f9b3, 0x9314b846, 0x949216ae, 0xe920d195,
0xe6eedebb, 0xf6e5bb9a, 0x622eee66, 0x1e13131d, 0xffe62a02, 0xd1ea1074,
0xe86c6f7b, 0x3bf2a360, 0xb81d9322, 0x98b98e89, 0xb4a25e21, 0x5d2b39a7,
0xdf2b838a, 0x7d6858fc, 0x53d7534f, 0x699c3bee, 0xef22acba, 0x56ba8780,
0xa8fbe73c, 0xb4caab7f, 0x32fb4391, 0xce117bb4, 0xb26d1c6d, 0x26efcb76,
0x7573e394, 0xd7edbe85, 0x6f61abb8, 0xc9fa6366, 0x45b8b08c, 0x2cf8c2b4,
0xd3cbabf1, 0x6cdde675, 0xae8f00ec, 0xef5107eb, 0x98ff45b5, 0x4b76ee02,
0x31e152be, 0x9f86d02c, 0x358bb661, 0xf821def5, 0x120e9c36, 0x46c23b3e,
0xff5062de, 0x41b8b28c, 0xa22c9f8d, 0x028bf7f9, 0x5ebe8f80, 0x78e8de1c,
0x0d594ad4, 0xec9819a9, 0x10761f9f, 0x2dac4a3c, 0x32d63a18, 0x9eaf8c9e,
0x724a3c41, 0xb9afb3b7, 0xbe6f2245, 0x684c7581, 0xd15b9adf, 0xee9437af,
0x22114a47, 0xe2a9eed4, 0x15c6068d, 0x1dfb4e19, 0xc32abdb2, 0x3bae15ee,
0x19ec45c2, 0x1f90957d, 0xff649405, 0x4ce2fa05, 0x8ef84fd1, 0x2ee46348,
0x84502a27, 0x42077b9b, 0x105183ae, 0xd44db5ac, 0xc754b5ad, 0xaa94e602,
0xeba1d85c, 0x647d6a2a, 0x2d2dc661, 0x17ef131f, 0x65035f04, 0x24bc155f,
0xb852bb07, 0xd2a03fcc, 0x9400b35d, 0x66a23536, 0x68cc1ab0, 0x2b366e16,
0x445202f1, 0x08e28943, 0x59dab809, 0x0d85b68d, 0xf69b85ff, 0x05ec1a2e,
0x9abb1c81, 0xec81e9ed, 0x31ed505f, 0x25f28f7f, 0xa768cab1, 0x6ffef36a,
0xb0347700, 0xa57519ae, 0x9b1b1ed5, 0x4d257368, 0xe0693824, 0x66c1b6d4,
0x28a8f8ba, 0x0d556d85, 0x7ef1cd17, 0x5f028fa3, 0xe5a61301, 0xffef2e0e,
0xb737cb57, 0x35de5f36, 0x244411c7, 0xb860e566, 0x107bc291, 0x163894c9,
0xce006743, 0xee8accbd, 0x2c546301, 0xb628a648, 0x8c9a5f88, 0x4cd0fafe,
0xf376a955, 0x1e67ee50, 0xe488161f, 0x6badb6a6, 0x7fe45f7f, 0x49515270,
0x8f921aa3, 0x7bb9547c, 0x4dd89e33, 0x5b66e4fd, 0x844acb4f, 0xf5fd71bb,
0xb37fd813, 0x26870647, 0x18b17366, 0xc1a76ad3, 0x082adb60, 0x0ec19a13,
0xf54da029, 0xd26d804d, 0xf5709013, 0xd691ac51, 0x1da76f01, 0x84310b0f,
0x98dd505e, 0x3e2887db, 0x9de8b16f, 0xfa84b608, 0x3b348c33, 0x28a3f3ab,
0x189f0238, 0xd7cf415c, 0x8a33849e, 0x0fd0e49f, 0xa6500b7e, 0xd17235ec,
0x7e1f150d, 0xacd0857d, 0x47f179a7, 0x41d258b1, 0x1af08047, 0x799bab6f,
0xd3bc6b2e, 0xa29e8a77, 0x58170aac, 0x96bc089b, 0x71692dec, 0x157aa527,
0x6b8427b1, 0xb7c3cd64, 0x335275e5, 0x31a58e0a, 0x56e77232, 0x8eb18fc5,
0xbe85fbec, 0x45b25fb1, 0x401b0c7c, 0x64268428, 0x2074cf4e, 0x3214c2b0,
0x0f878220, 0x99f3af0c, 0xab466398, 0xbd5d244a, 0xee73ba24, 0x6973a5a6,
0x58a1fd3a, 0x2922b89a, 0xf89afd2a, 0xa154a891, 0xb695fa10, 0xfc4815f4,
0xdf333dbf, 0xaac85b90, 0xf4f715e2, 0x883fd9c7, 0x9921b9a5, 0xf52e8325,
0x3c764f83, 0xdc2f15d0, 0x9d13ed18, 0x0e606226, 0x9a3df52b, 0x1aee312e,
0x7c9c956e, 0x74945570, 0x6511f87e, 0x079b1fac, 0x307ec31b, 0x7ce01d73,
0xd517313d, 0x33932d2b, 0xb9f6d593, 0xe09e0b96, 0x56b123a0, 0xffe4e3b6,
0xb00acaff, 0x79ac263d, 0xb40fcd02, 0xdd291445
],[
0xe88915a7, 0x2fd71acb, 0x8de63f40, 0xd9667945, 0xad7f4d6c, 0x7d471d97,
0x763fe4cf, 0x9b7be03f, 0xc2753c36, 0x485ca61c, 0x464f68f9, 0x68e20787,
0xf9b5112e, 0xefa30f29, 0xb0bf5579, 0x1ff012a9, 0x84eb1932, 0x860c72c3,
0xe78c719f, 0x09931794, 0xf40de80c, 0xc3734531, 0x47c0f73d, 0xed152258,
0xd1063d9b, 0x8ab6c8dc, 0x4bd5cf71, 0xe5bb0287, 0x8cfd2000, 0x4943cd0e,
0xb3d1d376, 0x5069c9ef, 0x21cd7de9, 0x0dc70db3, 0x2c52c071, 0x954b1899,
0xf629c4c8, 0x54f7bfea, 0x52736639, 0xdac7c000, 0xf6ea60eb, 0xa155a177,
0x3ed41d88, 0x2a967cd4, 0x8eda21bb, 0x28d8e3d7, 0x0b199754, 0x4d6e5dd8,
0x5355c236, 0x85102121, 0x8ee4939e, 0x30dc9a44, 0x16aea2f8, 0xd7f5e4e0,
0x81f46691, 0x941ec3ec, 0x0b90833b, 0x613c85c5, 0x72678ee4, 0x42af8034,
0x85327e5a, 0x0650eafd, 0x66ea2cbf, 0xb5db4c39, 0x561cc65d, 0xf856517b,
0xda186e32, 0x3c7cb9d8, 0x90a16ab0, 0x7231ad00, 0xd4f3b7af, 0x38409ee1,
0x25663e24, 0x2737afbc, 0x4ef9cde2, 0xdba641e1, 0x616e97dc, 0x6c951874,
0x8796c409, 0x421ee6c4, 0x8c151c79, 0xb11febf1, 0x98bc1204, 0xb028f602,
0x1504d1f6, 0x33202b57, 0xbd993956, 0x2359b3c7, 0xab331fa8, 0xd48afd73,
0xdefafcec, 0x75dd341b, 0x3b83626f, 0x7d3981cc, 0xa6380a9a, 0xf660aff5,
0xa29fcecd, 0xf895d432, 0x31e403e5, 0xb5bb3e12, 0x4601fe55, 0xa6055d21,
0x72d8b825, 0xdb8b8562, 0x0dc236c7, 0x41d3fbe5, 0xc6c02321, 0x6b68bdf8,
0x4e355453, 0x1ed80b3e, 0xc65d4ced, 0x0988916e, 0x8c3fff7d, 0x1c44a511,
0x6e190b89, 0xe4ef9975, 0x554d6a39, 0x4e4def49, 0xa294c0c0, 0x4811b319,
0x6708876d, 0xa1b35ca0, 0x9508cfde, 0x74bcfdfb, 0x43631d77, 0x21871456,
0xa6b83afb, 0xe96a7352, 0x47db29de, 0x39c197fa, 0x1404a39c, 0x92b3ab85,
0x0ec976b4, 0xc77c5425, 0x582d6b41, 0xc5de160f, 0x83f9293a, 0xf561f916,
0x4c3d9b6a, 0x170d6f94, 0x357180fd, 0xc73ad219, 0x727ea163, 0xe9bc0edb,
0x34266e50, 0x93fbc9e6, 0xb0ceae22, 0xc71caeed, 0x5229d6cc, 0xb072c679,
0xead50629, 0x328e387c, 0x31b38479, 0x9ffc2ede, 0x1aebd9f7, 0x66cde36d,
0x94eb1015, 0x214a341b, 0x4e2725ce, 0xb646ed42, 0x126a5d3b, 0xd45d974e,
0x0c23a1ed, 0xc7f23f3a, 0x039e06db, 0x45e7c121, 0xd4fb7c84, 0x93259026,
0x5c273558, 0xfe28386e, 0x55e97b1c, 0xc3273147, 0xaf9cf707, 0x714df708,
0xd020da13, 0x63ff077b, 0xa3e8b295, 0x149e5c40, 0xfec0a3ba, 0x56f00b03,
0x052eeec3, 0xa16bb594, 0x11f71787, 0x3d070441, 0x43921051, 0x81372cf4,
0x508689c9, 0xb67ef857, 0x3acfda1d, 0xe437d27d, 0x503c18a7, 0xcf3c2d49,
0x4fcfde8d, 0x9b9bb94c, 0x4185a775, 0x1ef11c15, 0x5e851380, 0xf0388cee,
0xc5444f7a, 0xe7d10b5b, 0xbbb2deec, 0x54412917, 0x3f7a98d7, 0x68585273,
0xe7fd9971, 0x5ca5fd84, 0xa264a533, 0x6cda27d8, 0x0bc4d33d, 0xfa9ef695,
0x9b1c3ab9, 0xa49ddf15, 0x213aa509, 0xcd2e0539, 0xd9fdb9b1, 0x612d781c,
0x6af5985a, 0xa0585c6d, 0x4d70e637, 0x436e1d58, 0x2e98d56e, 0x36c51320,
0x8424af0e, 0x3233250b, 0x51764e9c, 0x034bad26, 0x5c8550f0, 0x271d7047,
0xa5afaec5, 0xa4d41479, 0xbb775519, 0x5f94a186, 0x5fb27b56, 0xa48405bd,
0x8f543fdf, 0x23ac0b49, 0x9d36d6a0, 0x63739090, 0xc39314ce, 0x1c798ad2,
0x8f3fd9f0, 0x8330ff19, 0x851874b8, 0x32a79ca2, 0xbdd64e38, 0xb6ac2e6c,
0x4691accd, 0x4f5b9d71, 0x0bd4f753, 0xb3074a95, 0xb26e4510, 0x63969c27,
0x22e07207, 0x0129e524, 0xc766650e, 0x438be192
],[
0x3750c5d2, 0xfb85d7b4, 0x38836748, 0x2d9144ef, 0x795371b6, 0x56ed49ce,
0xad880cfb, 0xa49b9346, 0xcf773a62, 0xeec4ba92, 0xa4475a71, 0xdd7f2159,
0x0127c957, 0xefb0c0e2, 0x68bbca45, 0xa4e5eca5, 0x67b73975, 0x71d507fc,
0x69e075c7, 0x563c029a, 0x8f3376db, 0x2a6acc70, 0x556b9333, 0x5e5d182f,
0x29f5d5cc, 0xeafea42f, 0x69efc675, 0x68d15318, 0x95f9759b, 0xced1cbb4,
0x1882f46c, 0x6c326bce, 0xc9942a49, 0xddd8c723, 0x8e9ec07a, 0x01f7e12c,
0x645cb5cd, 0x391f1510, 0x163c35f5, 0xd94f9a87, 0xb74585d8, 0x60b1bc61,
0x521eaed9, 0xb73ccd09, 0xcdef5503, 0xc55df55f, 0xefd4c973, 0x78948287,
0xaab7bd7a, 0xeebefe2d, 0x2b8721c9, 0x2b04f7d0, 0xf2cbf31a, 0x7ee524d6,
0x36d46e55, 0x8dac73a0, 0x8427954d, 0x5f2a3a39, 0x6413a9f1, 0x022b301d,
0xa072616c, 0x3d3ef628, 0x98b9e887, 0x866685fb, 0x9e3fcfbc, 0xf3eeed63,
0xcaba75e3, 0xd9015927, 0x325626bc, 0x2c5b752d, 0xc1385649, 0x7d39f1ac,
0xb1b2e515, 0x1d70444d, 0x8f414d5f, 0x7ea7c37d, 0x7b2ef041, 0xab0d8d8c,
0xa8a61f2d, 0xb0bde42d, 0xb0b8a457, 0x44a920a8, 0x2db3ab91, 0xe6b7ad63,
0xbfb64cd4, 0x99568ae7, 0x1bceacc8, 0xcc5f7d17, 0x5fa452eb, 0x446b9f97,
0xe633ddb1, 0x60aca6f4, 0xdab2c9ea, 0xa5b630d6, 0x825f75ff, 0xa3ac0e6d,
0xb3894704, 0x13de228f, 0x4e9f6581, 0x6d107b01, 0xcab6097c, 0xe62b146f,
0xea71c048, 0xfe504e0e, 0x0702cc9a, 0x3d1a7d01, 0x13617030, 0x3d879f89,
0xc28b65ee, 0x5872c3c3, 0x05b4bb68, 0x9a861425, 0x8a4cd6fe, 0x55243744,
0x4858ecb6, 0xb568d8dd, 0xbebf9fdd, 0xc84fd4c6, 0x4f4af9c0, 0x9b08f63d,
0xf1a0376a, 0xa355f6cf, 0x2fcb228a, 0x0cdddf69, 0xe468afe7, 0x29398554,
0x7117abd5, 0xe2fe5567, 0x508a5d85, 0x49adf79d, 0x75011a15, 0x31d8e338,
0x74d222b5, 0x24960278, 0xcdff9aff, 0x7aad9fa0, 0x9b06269e, 0x69b501f1,
0xe0086ab7, 0xf2e16ce4, 0x8cb98307, 0x715b2506, 0x3cc16c6e, 0xd74378d2,
0xb510a616, 0x1922ebbc, 0x75d40946, 0xbc4f0b56, 0x4ab3a831, 0xf6eb3d5e,
0x7110bcd0, 0x105bfce5, 0x8ca82576, 0x96dbeea9, 0x40488279, 0x951974fb,
0x94b565e4, 0x692c10ce, 0x6a692d18, 0xaa0af02e, 0x7379d550, 0x9ce8b210,
0xd4635640, 0x33ea7667, 0x5e776e92, 0x9ae7c2d1, 0x2562c476, 0xe8b9342d,
0xd3d0e320, 0x3cc6af4f, 0x3f3042a1, 0x4bdc1927, 0x5a142bb4, 0x137d70ef,
0x7fb6018a, 0x080d779c, 0x550fee8b, 0xd71ac558, 0xa7298efc, 0x714e8084,
0x8e6d9001, 0x8ca5f159, 0x4a7c41d3, 0xfc3feac7, 0x61aa5710, 0xd13aa1bc,
0x665e4645, 0xfe4d4faf, 0x2ee5c84b, 0x91262e53, 0x699e98d9, 0x4f61f245,
0xbd6e788e, 0x1e5c2d6d, 0xc64185a4, 0xeef57cb3, 0x4d39a6b4, 0x15fa53f4,
0x9c8a0a48, 0x6442e21e, 0xf82b64ce, 0x73d86319, 0xf1a30515, 0x48f14387,
0x848a69a7, 0x8b1c7641, 0x8d271922, 0x135857d4, 0xa3f4e0a8, 0x97b75963,
0x1e761918, 0x6bb49070, 0x34dacfe6, 0xbe78db33, 0x51e3f2ea, 0xbd5ff0c9,
0xd15adc12, 0xadd67ab9, 0x0c0c5c33, 0x149c2097, 0xaad74487, 0x8436773d,
0x6ea35567, 0x54bb4ad0, 0x7447cf20, 0x9c8552a3, 0x811096a6, 0xa3434fba,
0x3803dbcc, 0x504714f7, 0x9052704c, 0xcf5df346, 0x17646400, 0x87cc0403,
0xfaa228ce, 0x6f2d3289, 0x808948f1, 0x505ef302, 0xaaca43db, 0x526f9953,
0x3fbb002b, 0xa7c7443b, 0x4d6e36cd, 0x0457ac81, 0x59139c59, 0x0e155100,
0xd2a1baa7, 0xecc08a20, 0xcdde24cb, 0x16ae51f8, 0xd9a1fa7e, 0xc50f461a,
0xb569ed99, 0x5a77293f, 0x02f86aa8, 0x050f0024
],[
0xd404b9a8, 0xd3438135, 0x227e435d, 0x31076cbd, 0xaee796dc, 0xe404313c,
0x2623800a, 0x093a69b2, 0x58ee884f, 0x776f4874, 0xe572d368, 0xd5a5cbe7,
0x3f3bbef0, 0x7c17d8f6, 0x220a067d, 0xd793de4d, 0xa0109a98, 0x62637a6b,
0x22d8d756, 0x5066308f, 0x7e90eca2, 0xc0b754bd, 0x4084b7c5, 0x9486c097,
0x36a046b5, 0x114975c0, 0xd91424c8, 0x890246d5, 0x59eb4a73, 0x9afa3756,
0x70b8c470, 0xc08ea016, 0x4c28c5f4, 0x9f623b08, 0x73fc47c4, 0xedfa1d69,
0x4a2b1786, 0xced564eb, 0xbe12a43a, 0x52e852a4, 0x3cb3c210, 0xca9ae070,
0xe33e7ed5, 0xd6af2ef1, 0xe49e5a83, 0xf5772eaa, 0x8551eb98, 0x1cf22cfc,
0xadaa0256, 0xecd056ca, 0xc209d2b9, 0x9b3e0762, 0x1ee2a087, 0x2b821484,
0x8fe22587, 0xce149c00, 0x91ce4d3e, 0x19a97f27, 0x46bcda1b, 0x404cd997,
0x82e82b04, 0x9d4dedbc, 0xc0859cb5, 0xf01b46c0, 0xb8b203cd, 0x45090f79,
0x8be4ab5d, 0xe2d1cd5c, 0xcbc8431b, 0xe7ee2388, 0x7e111b93, 0xc519d732,
0x0655ccf5, 0x783288a3, 0x9d698132, 0xaa0e34dd, 0x2d34f890, 0x27fe844e,
0x9cbc4da6, 0xd953afc3, 0xfb07a430, 0xcf035ecb, 0xcc4c8d9b, 0x2abd5860,
0xb82869b1, 0x3c70d06f, 0x207e13c0, 0x429c196e, 0xfe9ede86, 0x4f710351,
0xdf8e7c12, 0xe7f5c14f, 0x6f619bf3, 0xa6a99158, 0x23431a99, 0xdc2ae09d,
0x6894ad00, 0xdd61887e, 0x951926ba, 0xb653acd3, 0x4be2af6c, 0xaecb2462,
0x2cd45174, 0x0f92838b, 0x5664d019, 0x38a28976, 0x2dbbeeae, 0xbe54b161,
0xa7570953, 0xa9296b69, 0x6e8cd50d, 0x2dff6493, 0xd8897cc9, 0x9807846e,
0x067833e2, 0xff0a0865, 0x6798fb62, 0x38ff940e, 0x257dfead, 0x36ed0dd9,
0x87786abc, 0x2fcce945, 0x40baaf09, 0xa55edef1, 0x83231abf, 0x29579f57,
0x7d26bfd8, 0x24d3d02c, 0xbad9f470, 0x76049108, 0xe3c6e9fd, 0xaee57efa,
0x974bf27a, 0x4d753ea9, 0x326fa8bf, 0x0f234d18, 0x892d41f1, 0xa314e7a0,
0xe6ad75a3, 0x6b824a07, 0x3c54f6bb, 0x9b41b17f, 0xa717e8f7, 0xe0b4383b,
0xb9d9772c, 0x60bd9aea, 0xb0a28d0c, 0xec6c7f0e, 0x2475ad83, 0x81e3ac81,
0x4f9eb09d, 0x4ae9dcd1, 0xcacf2923, 0x138d5de0, 0xc23dc080, 0xdc6212c5,
0x49d40182, 0x9a299359, 0x96494f05, 0x50958bcc, 0xedd87cf1, 0x8e41d821,
0xdbc893e2, 0x81760ba1, 0x5bc77924, 0xe4c423cf, 0xfb96b131, 0x1ff9238d,
0x0f5ce7d5, 0x550fd44f, 0x2b9979f8, 0x14d1e16f, 0xe694fc5c, 0x06e9befc,
0xa328bce7, 0xb3be44b8, 0x11714887, 0xfd3856e6, 0x6e81a076, 0xdaabdacd,
0xddd1abde, 0xcaf9dabd, 0x50cb477d, 0x1c8fed49, 0xd25a8ad8, 0xa4b5a936,
0x2f7fdcc5, 0x769f6748, 0x416623d7, 0xd9181558, 0x0c864431, 0x00bd5e0d,
0xe64bb5c8, 0x88482e47, 0x1aeda9af, 0x95a56caf, 0x7135065a, 0xa1928e57,
0x8e6eedd9, 0xadc56171, 0xd3c859a0, 0xb13bec39, 0x1dcdb139, 0x188b3229,
0xf6733af0, 0x9c5902f1, 0xe62faa6e, 0xc36f65b6, 0x9cc971f2, 0x4d2ba095,
0x909a0f45, 0x7218f3a9, 0x563c0ce3, 0xf194acfd, 0x386df463, 0x8907bdcb,
0x300035a9, 0x00c7fdc5, 0x50adac43, 0x6e53e258, 0xe1f636b4, 0x271b7918,
0xfa7a3af8, 0x40913066, 0x3e8706de, 0xbd421d95, 0x004e20fc, 0x2a7bb121,
0xac159bf6, 0x49b64135, 0xebe39504, 0x60a191eb, 0xfdcd513d, 0x4bf25769,
0xa8b74196, 0x9fce29fc, 0xb25af8a7, 0x98a93a20, 0xc4bab38d, 0xbeef4028,
0xa4ac98d9, 0x7839b20b, 0x2034d530, 0x9f25f4a0, 0x099fa1a5, 0xd031b88f,
0x9d05688b, 0x5b2fd566, 0x661a06a9, 0x1fceb5ac, 0xca8b6bd5, 0x192151b9,
0x69e54eeb, 0x29429086, 0xa676e0c5, 0x5869aac7
]]
in_key_p = [
0x7965742c, 0x4a205f3d, 0x143f8f89, 0xc976e0b1, 0x37d227b0, 0x78968d06,
0x9f28933c, 0xd21a7537, 0x80eb812c, 0xe5a60d9b, 0xf2b6b13d, 0x67079baf,
0x0c73a7c2, 0x95d331dd, 0x80379ec4, 0x16b753b1, 0xc23f34ae, 0x7a3c45d7
]
in_key_s = [[
0x58af6ece, <KEY>, 0x033ef993, 0x4299c20b, 0x47adc709, 0xdb40ee14,
0x3772fa47, 0x473385d9, 0xbfc0af75, 0xd439ae96, 0x6a2ef2ee, 0x4a25f261,
0x69345881, 0x65dd6dfc, 0x7a87b813, 0x626a4332, 0x675a3e91, 0x2c19b6da,
0x62108522, 0x26cb31b9, 0x584df87d, 0x5024976f, 0x48136869, 0x5c56cba9,
0x5ad39e1b, 0x133f6eba, 0xb1c66e67, 0x90880621, 0xa9886abc, 0x5aafb5fd,
0x2623955d, 0x737cc474, 0xd5248060, 0x67c4b493, 0xbac12128, 0x095810ab,
0x613ab2f2, 0x30e1b44a, 0x8291449b, 0xaf474e70, 0x6cd5307b, 0xb13ad61d,
0x721871f8, 0xfd55db7f, 0x7415a01c, 0x580b8ca6, 0x284fe1b9, 0xa4f0bd0d,
0x7bf1167d, 0x82662fc7, 0xc7524e17, 0x2f7c69a2, 0x089fa280, 0x90e18cd8,
0x70536f17, 0xf5e7ed0d, 0x13388a46, 0x9db0cece, 0xc6710fe3, 0x00e399ad,
0x22e77d76, 0x63cde083, 0x757d804e, 0xf821aead, 0xf84b66e9, 0xe6bc3e7c,
0x5dfc3e57, 0x158c599d, 0x27dedf6b, 0x777bf721, 0x05d82093, 0x8b2bc85f,
0x09918b2f, 0xf4c702e8, 0xdf00cd28, 0x491a4fad, 0x64944ee2, 0x872ed2e7,
0xf3288db7, 0x1f93d679, 0xad42dd2d, 0xe8131a69, 0xd8ba3a70, 0x73f86d65,
0xb3c72776, 0x52cc70c8, 0xaba8c646, 0x4a323b09, 0x7d482403, 0x9e03399d,
0x2b717494, 0x6bed832b, 0xf8a661ba, 0xc07e4f5e, 0x589460bc, 0x1da78d74,
0xd8ecd29f, 0xba3ed619, 0xf2d647b0, 0xaf86f7a8, 0x4ca53870, 0xbfecf67f,
0xa778b6fe, 0x84d56e44, 0x1f4f61ed, 0x1f8329e1, 0xedd3e331, 0x27f854e3,
0x2da40439, 0xfbc0bb45, 0x91327b1f, 0xc819276c, 0x72ad0fae, 0xde13b223,
0xd2f381dc, 0x826bb46d, 0x295bc153, 0x9048ac23, 0x945605d9, 0x944d59cb,
0xba1a643d, 0xa16f9e33, 0xed95325e, 0xb1e5e9ca, 0xc2233f09, 0x44585853,
0x6a4eec8f, 0xf93c1555, 0xd6793587, 0xe934216b, 0x3a8332b3, 0x3a8466c9,
0xac7386cc, 0x01668a9f, 0xa28ff66f, 0xda303600, 0xd6e18e43, 0x3d592ada,
0xde2c3640, 0x8df5bd6b, 0x1ab26fbb, 0xe59ec9e8, 0xac9925b3, 0xc227130c,
0x467a9af0, 0xa9579945, 0x0e1652a4, 0x433805af, 0x4ae0f0fd, 0xd9218763,
0x54d623ff, 0x39bd38c8, 0xc639e971, 0xefed7056, 0xcf46f0d3, 0x0a43fb36,
0xe73e362e, 0x092400f6, 0x242821e7, 0xc3953cdb, 0x8c02d71c, 0xd9d5b909,
0x64b442af, 0x29d5ffba, 0xb479b691, 0x5aa9a01c, 0x49cbd1c9, 0x41eafbf8,
0x888144a6, 0x844c076d, 0x05581523, 0xc5e98ffd, 0x13056fe1, 0xa4056b01,
0x09f53013, 0x0ad00575, 0xacb8354d, 0x52ece455, 0xfd8890d3, 0xaf651f23,
0xad7374d2, 0x99cceab5, 0x2f0f603d, 0x5e7ea504, 0x608963e1, 0xc1bd2196,
0x200b27b3, 0xd9d1e761, 0xeff36e5a, 0x547b24c8, 0x7c7f77bc, 0xa9e78393,
0x6b9f3172, 0xc6529dbd, 0xb6e0011d, 0x40cda153, 0xe74ddd18, 0x01a98b3b,
0xd9b6f384, 0x57aaa89b, 0x98f36734, 0x98baaa5a, 0x47f961de, 0x12803dcb,
0x24d3e504, 0xb5fa31a1, 0xcda87476, 0x9cc48fc9, 0xbdd02ca2, 0xf5963721,
0x722cc439, 0x519ef966, 0xd5699454, 0xf8aeed1c, 0xc5ec22b8, 0x52d7eb6a,
0xc179828c, 0xb383272e, 0x206888fc, 0xaf1a692e, 0x217bf251, 0x6c0d0a71,
0x0c84184b, 0x79dd1780, 0x3b3f72a8, 0x33478e4b, 0x06bf0967, 0x9023fa3f,
0x8303a262, 0x7ac0e4a6, 0xd439deb1, 0x1dbef98b, 0xfef0be31, 0x1b87f008,
0x7c2196ff, 0xf5447601, 0xb1508f3a, 0x512cfd07, 0x3137b2d4, 0x768cffc8,
0x970c456d, 0xc06d34b4, 0xe257e53d, 0x8c75c72b, 0xc9db8a31, 0xde84bb8f,
0x5b332228, 0x8bf79c5a, 0x0b3efe49, 0xf0c4bf7e, 0xb958ed83, 0x5b37ee2d,
0xdb04c07a, 0x72739791, 0x55c40314, 0x5129c81c
],[
0x700c96f3, 0xde2d98f3, 0x503d5563, 0xa5a92702, 0x5f87b11c, 0xc5fdf6c2,
0x9d5eadf9, 0x82d21e82, 0xbfbe92ec, 0x27b25533, 0xf6c9aba1, 0x787d218d,
0xfdbf4423, 0x439ed927, 0x3201f7b4, 0xb8dfe640, 0x88ad318e, 0x2076ab45,
0xc8654627, 0x658d0920, 0x09fe3274, 0xf00fd288, 0xf3e47731, 0x6028108c,
0x98f52e66, 0x10b6f6c6, 0xfe6e6cbd, 0x18855ca0, 0x41b04ef1, 0x3a075160,
0x5158de83, 0xfbb9f0c9, 0x5e3fdc6c, 0xd72efef8, 0x04c4ef61, 0x99edda29,
0xc653fe1e, 0x6b85e447, 0xbe07d9f5, 0x16ce88d4, 0x6bf376dd, 0xa12cefde,
0x22fc5353, 0x2890980d, 0x8b99543c, 0xab2c42bc, 0x510892c5, 0x416951dd,
0x219d7d99, 0x5c83a431, 0x7f6b1f4e, 0x3cdddebd, 0xb96b4c75, 0xb88adf78,
0x48d54415, 0xd89aa204, 0x85fa0a84, 0xcceba68c, 0x6ff06438, 0x0f3bae05,
0xd2d85107, 0x19b91d81, 0x2c68aed8, 0xbbe8f8d2, 0xa26c27a8, 0xba1b02e0,
0x90f091fe, 0xa62a3797, 0x9fc43203, 0x59393925, 0x354aa050, 0xa709b895,
0x6b8aa793, 0x4a679a6c, 0x47eea590, 0x21aa4b78, 0xc103cef9, 0x7832f982,
0x0a19af36, 0x71253891, 0xa0c16436, 0x968852bd, 0x6694b976, 0x0884fb93,
0x46eb1e9f, 0xfa945c75, 0xd3c928fb, 0xd1c8bf8c, 0xaf20aaa9, 0x9fa86cd2,
0xdccded57, 0x1bdd4247, 0x94f91d5c, 0x7d6d5058, 0x11f0db4e, 0xf9a48f09,
0xffa3dfb4, 0xb27b4de0, 0xdeab8e3f, 0x20ad0f77, 0x9c13ff7c, 0x16acc3a5,
0x59fd4711, 0xe13fc78e, 0x286b7532, 0x3352f5bb, 0xa3305feb, 0x643cfc7b,
0x689de9f4, 0x4ea0b270, 0x532dc782, 0xa5c504c3, 0xbfc29608, 0x0f3fd845,
0xd62c9c37, 0x8f9d345a, 0x7bca7eb6, 0xda8e1fcc, 0x152b59ce, 0x625bb739,
0x49a5aa8f, 0x24417d34, 0xe9c9ed1b, 0x0e20a019, 0xe81dbc3a, 0xea7fdd74,
0xbd0a0794, 0x85585d33, 0xa48530d2, 0x991cc6ab, 0xa5488f6c, 0x4f1a494d,
0xb45f297f, 0x0f357907, 0x56574fec, 0x4d4519ff, 0x2b78fbdf, 0x28ca6528,
0x095d79b6, 0x48cb1657, 0x6b56eed0, 0xb0ccbe78, 0xe702aec1, 0x350bdfb7,
0x59e0e969, 0xa4154ba8, 0xba56355c, 0x545028bc, 0xef129a26, 0xc594c313,
0xf74051a7, 0x90f33de7, 0x7946623b, 0x06875cf1, 0xa47f30cd, 0x3fd1eee0,
0x848065a2, 0x4788db48, 0x7afff19f, 0x1a6f58aa, 0xa929b0be, 0x4297c802,
0xa5c9db5c, 0x972df7f5, 0xfb449508, 0xfa5e027f, 0x903d0acc, 0xd9481446,
0x485f43f3, 0xe99d44bb, 0xf830b7d5, 0x7a8d521c, 0x84b98afb, 0xe88c86df,
0xf59c4cd1, 0x9f66e618, 0x71f390ec, 0x59c364ef, 0x47e57d97, 0xdb769d9b,
0x8a5df152, 0xf3f1afc2, 0x23791aa5, 0x6032c1e6, 0xcdcd381b, 0x88298f9a,
0x0489b57b, 0x7206785b, 0x086f2c1b, 0x779c61e9, 0xf87ea443, 0x57c8da35,
0xa417c341, 0x7883bff4, 0x165beefa, 0xe630556f, 0xe136b428, 0x65f03ab7,
0xc218b820, 0xc4df8526, 0x2a4f4982, 0x124811e9, 0xf799a377, 0xfd1d0033,
0x663fb7ef, 0x1ccafabc, 0x44af1166, 0x5a164940, | |
<filename>renderchan/launcher.py<gh_stars>10-100
__author__ = '<NAME>'
from gettext import gettext as _
from argparse import ArgumentParser
import os
import subprocess
import datetime
import pwd
class Launcher:
def __init__(self):
self.pidFile = ""
self.logFile = "-"
self.outputFile = "-"
self.sourceDir = ""
self.renderDir = ""
self.mountDir = ""
self.user = ""
self.excludeDirs = {}
self.projectCommands = []
self.commands = []
self.dryRun = False
self.createdDirs = {}
self.mountedDirs = []
self.mountedSources = []
self.projects = []
def file_append(self, file, line):
if file == "-":
print(line)
elif file:
f = open(file, 'a')
print(str(line), file=f)
f.close()
def now(self):
return datetime.datetime.utcnow().isoformat()
def try_file_append(self, files, line, fileType, timeStamp):
lines = [str(line)]
if timeStamp:
lines[0] = self.now() + " " + lines[0]
tried = {}
for file in files:
if file in tried:
continue
tried[file] = True
try:
for l in lines:
self.file_append(file, l)
break
except Exception as e:
lines.insert(0, _("Cannot write to file (%s, %s), error: %s") % (fileType, file, str(e)))
if timeStamp:
lines[0] = self.now() + " " + lines[0]
def log(self, line):
self.try_file_append([self.logFile, self.outputFile, "-"], line, "log", True)
def info(self, line):
self.log(_("Info: ") + line)
def warning(self, line):
self.log(_("Warning: ") + line)
def error(self, line):
self.log(_("Error: ") + line)
def output(self, line):
self.try_file_append([self.logFile, self.outputFile, "-"], line, "output", False)
def outHeader(self, line):
self.output("-----------------------------------------------")
self.output("-- " + _("Begin"))
self.output("-- " + self.now())
self.output("-- " + str(line))
self.output("-----------------------------------------------")
def outFooter(self, line):
self.output("")
self.output("-----------------------------------------------")
self.output("-- " + _("End"))
self.output("-- " + self.now())
self.output("-----------------------------------------------")
self.output("")
def check_executable(self, command, comment):
result = False
try:
subprocess.check_call(command)
result = True
except subprocess.CalledProcessError:
pass
print(_("Check %s (%s): %s") % (command[0], comment, ("success" if result else "fail")))
return result
def setPidFile(self, pidFile):
if self.pidFile == pidFile:
return
if self.pidFile and os.path.isfile(pidFile):
os.remove(self.pidFile)
if pidFile and os.path.isfile(pidFile):
f = open(pidFile, 'r')
pid = int(f.read())
f.close()
try:
os.kill(pid, 0)
self.error(_("Another instance already launched"))
return False
except:
pass
try:
os.remove(pidFile)
except:
pass
self.pidFile = pidFile
if self.pidFile:
f = open(self.pidFile, 'w')
f.write(str(os.getpid()))
f.close()
return True
def run(self):
self.info(_("Launch"))
try:
for command in self.commands:
self.info(_("Process: ") + command[0] + " " + command[1])
if command[0] == "pid":
if not self.setPidFile(os.path.abspath(command[1])):
break
elif command[0] == "log":
prev = self.logFile
self.logFile = os.path.abspath(command[1]) if command[1] != "-" else "-"
if self.logFile != prev:
self.info(_("Start log"))
elif command[0] == "out":
self.outputFile = os.path.abspath(command[1]) if command[1] != "-" else "-"
elif command[0] == "src":
self.sourceDir = os.path.abspath(command[1])
elif command[0] == "render":
self.renderDir = os.path.abspath(command[1])
elif command[0] == "mount":
self.mountDir = os.path.abspath(command[1])
elif command[0] == "user":
self.user = command[1]
elif command[0] == "excl-dir":
self.excludeDirs[command[1]] = True
elif command[0] == "excl-clear":
self.excludeDirs = {}
elif command[0] == "prj-cmd":
self.projectCommands.append(command[1])
elif command[0] == "prj-clear":
self.projectCommands = []
elif command[0] == "run":
self.runProjects()
elif command[0] == "run-global-cmd":
self.runCommand(command[1], self.user, self.sourceDir)
else:
self.error(_("Unknown key: ") + command[0])
except:
self.error(_("Unhandled exception"))
self.umountAll()
self.setPidFile("")
self.info(_("Done"))
def runProjects(self):
self.info(_("Process projects in: ") + self.sourceDir)
if not self.sourceDir:
self.error(_("Source directory is not set"))
return
if not self.mountDir:
self.error(_("Mount directory is not set"))
return
if not self.renderDir:
self.error(_("Render directory is not set"))
return
self.umountAll()
self.info(_("Search projects in: ") + self.sourceDir)
self.projects = []
self.scan()
projects = self.projects
self.projects = []
if len(projects) == 0:
self.warning(_("No projects found"))
return
try:
self.createDirectory(self.mountDir)
self.mount(self.mountDir, self.sourceDir)
except Exception as e:
self.error(_("Cannot mount source directory %s, error: %s") % (self.sourceDir, str(e)))
prepared = []
for project in projects:
self.info(_("Prepare project: ") + project)
try:
projectLocal = project[len(self.sourceDir):]
while len(projectLocal) and projectLocal[0] == os.path.sep:
projectLocal = projectLocal[1:]
renderDir = os.path.join(self.renderDir, projectLocal)
mountDir = os.path.join(self.mountDir, projectLocal)
self.createDirectory(renderDir)
self.createDirectory(os.path.join(mountDir, "render"))
self.mount(os.path.join(mountDir, "render"), renderDir)
prepared.append(mountDir)
except Exception as e:
self.error(_("Cannot prepare project %s, error: %s") % (project, str(e)))
self.projects = []
for project in prepared:
self.info(_("Run project: ") + project)
for command in self.projectCommands:
self.runCommand(command, self.user, project)
self.umountAll()
def scan(self, sourceDir = None, level = 0):
if level >= 256:
self.error(_("Max recurse level reached (%s): %s") % ("scan", str(level)))
return
if not sourceDir:
sourceDir = self.sourceDir
if sourceDir in self.excludeDirs:
return
if sourceDir in self.excludeDirs:
return
if os.path.isfile(os.path.join(sourceDir, "project.conf")):
self.info(_("Project found: ") + sourceDir)
self.projects.append(sourceDir)
try:
for file in sorted(os.listdir(sourceDir)):
if os.path.isdir(os.path.join(sourceDir, file)):
self.scan(os.path.join(sourceDir, file), level + 1)
except Exception as e:
self.error(_("Cannot scan directory %s, error: %s") % (sourceDir, str(e)))
def runCommand(self, command, user = None, workDir = None, raiseException = False):
self.info(_("Run command: ") + str(command))
if self.dryRun:
return
if user:
if not isinstance(command, str):
command = subprocess.list2cmdline(command)
command = ["sudo", "-u", self.user, "/bin/bash", "-c", command]
self.outHeader(str(command))
exception = None
try:
f = open(self.outputFile, 'ab') if self.outputFile != "-" else None
subprocess.check_call(command, stdout = f, stderr = f, cwd = workDir)
except Exception as e:
exception = e
finally:
if f:
f.close()
self.outFooter(str(command))
if exception:
if raiseException:
raise exception
else:
self.error(_("Run command failed (%s), error: %s") % (str(command), str(exception)))
def isDirectory(self, path, level = 0):
if level >= 10:
self.error(_("Max recurse level reached (%s): %s") % ("isDirectory", str(level)))
return False
if self.dryRun:
for i in range(len(self.mountedDirs)):
t = self.mountedDirs[i]
s = self.mountedSources[i]
if len(t) and len(s):
if t[-1] != os.path.sep:
t = t + os.path.sep
if s[-1] != os.path.sep:
s = s + os.path.sep
if path[0:len(t)] == t and self.isDirectory(s + path[len(t):], level + 1):
return True
if os.path.isdir(path):
return True
def createDirectory(self, path):
if not self.isDirectory(path) and not path in self.createdDirs:
self.createDirectory(os.path.dirname(path))
self.info(_("Create directory: %s") % path)
if self.dryRun:
self.createdDirs[path] = True
else:
os.mkdir(path)
try:
if self.user:
pw = pwd.getpwnam(self.user)
os.chown(path, pw.pw_uid, pw.pw_gid)
except Exception as e:
self.error(_("Cannot change owner of directry (%s), error: %s") % (path, str(e)))
def mount(self, targetDir, sourceDir):
self.info(_("Mount directory '%s' to '%s'") % (sourceDir, targetDir))
self.runCommand(["mount", "--bind", sourceDir, targetDir], raiseException = True)
self.mountedDirs.insert(0, targetDir)
self.mountedSources.insert(0, sourceDir)
def umount(self, targetDir):
self.info(_("Unmount directory: ") + targetDir)
self.runCommand(["umount", targetDir], raiseException = True)
index = self.mountedDirs.index(targetDir)
self.mountedSources.pop(index)
self.mountedDirs.pop(index)
def umountAll(self):
self.info(_("Unmount all"))
while len(self.mountedDirs):
try:
self.umount(self.mountedDirs[0])
except Exception as e:
self.error(_("Cannot unmount directory (%s), error %s") % (self.mountedDirs[0], str(e)))
self.mountedSources.pop(0)
self.mountedDirs.pop(0)
class ConfigParser:
def __init__(self, file = None, text = None):
self.commands = []
self.index = 0
self.text = ""
if file:
f = open(file, "r")
self.text = f.read()
elif text:
self.text = text
self.parse()
def parse(self):
while(self.index < len(self.text)):
self.parseLine()
def parseLine(self):
line = ""
while True:
begin = self.index
comment = -1
nlscreen = -1
nl = len(self.text)
quotes = ""
for i in range(begin, len(self.text)):
screened = i > 0 and self.text[i-1] == '\\' and (i < 2 or self.text[i-2] != '\\')
rprev = i > 0 and self.text[i-1] == '\r'
nprev = i > 0 and self.text[i-1] == '\n'
rcurr = self.text[i] == '\r'
ncurr = self.text[i] == '\n'
rnext = i+1 < len(self.text) and self.text[i+1] == '\r'
nnext = i+1 < len(self.text) and self.text[i+1] == '\n'
if (rcurr and (nprev or not nnext)) or (ncurr and (rprev or not rnext)):
nl = i+1
break
if comment < 0 and nlscreen >= 0 and not self.text[i].isspace():
nlscreen = -1
if comment < 0 and not screened and (self.text[i] == '"' or self.text[i] == '\''):
if quotes and self.text[i] == quotes[-1]:
quotes = quotes[0:-1]
else:
quotes = quotes + self.text[i]
continue
if comment < 0 and not quotes and nlscreen < 0 and not screened and self.text[i] == '\\':
nlscreen = i
continue
if comment < 0 and not quotes and not screened and self.text[i] == '#':
comment = i
continue
if nlscreen >= 0:
line = line + self.text[begin:nlscreen]
elif comment >= 0:
line = line + self.text[begin:comment]
else:
line = line + self.text[begin:nl]
self.index = nl
if nlscreen < 0:
break
command = parseCommand(line)
if command:
self.commands.append(command)
def parseCommand(line):
line = line.strip()
space = len(line)
for i in range(len(line)):
if line[i].isspace():
space = i
break
if space > 0:
return [line[0:space].strip(), line[space:].strip()]
return None
def makeArgsParser():
| |
*args)
return api_call
def net_firewall_config_get(*args):
api_call = _invoke_api('net-firewall-config-get', *args)
return api_call
def net_firewall_config_get_iter(*args):
api_call = _invoke_api('net-firewall-config-get-iter', *args)
return api_call
def net_firewall_config_modify(*args):
api_call = _invoke_api('net-firewall-config-modify', *args)
return api_call
def net_firewall_config_modify_iter(*args):
api_call = _invoke_api('net-firewall-config-modify-iter', *args)
return api_call
def net_firewall_policy_create(*args):
api_call = _invoke_api('net-firewall-policy-create', *args)
return api_call
def net_firewall_policy_destroy(*args):
api_call = _invoke_api('net-firewall-policy-destroy', *args)
return api_call
def net_firewall_policy_get_iter(*args):
api_call = _invoke_api('net-firewall-policy-get-iter', *args)
return api_call
def net_firewall_policy_modify(*args):
api_call = _invoke_api('net-firewall-policy-modify', *args)
return api_call
def net_hosts_get_iter(*args):
api_call = _invoke_api('net-hosts-get-iter', *args)
return api_call
def net_interface_create(*args):
api_call = _invoke_api('net-interface-create', *args)
return api_call
def net_interface_delete(*args):
api_call = _invoke_api('net-interface-delete', *args)
return api_call
def net_interface_get_iter(*args):
api_call = _invoke_api('net-interface-get-iter', *args)
return api_call
def net_interface_migrate(*args):
api_call = _invoke_api('net-interface-migrate', *args)
return api_call
def net_interface_modify(*args):
api_call = _invoke_api('net-interface-modify', *args)
return api_call
def net_interface_modify_iter(*args):
api_call = _invoke_api('net-interface-modify-iter', *args)
return api_call
def net_interface_revert(*args):
api_call = _invoke_api('net-interface-revert', *args)
return api_call
def net_ipspaces_assign_vserver(*args):
api_call = _invoke_api('net-ipspaces-assign-vserver', *args)
return api_call
def net_ipspaces_create(*args):
api_call = _invoke_api('net-ipspaces-create', *args)
return api_call
def net_ipspaces_destroy(*args):
api_call = _invoke_api('net-ipspaces-destroy', *args)
return api_call
def net_ipspaces_get(*args):
api_call = _invoke_api('net-ipspaces-get', *args)
return api_call
def net_ipspaces_get_iter(*args):
api_call = _invoke_api('net-ipspaces-get-iter', *args)
return api_call
def net_ipspaces_rename(*args):
api_call = _invoke_api('net-ipspaces-rename', *args)
return api_call
def net_ndp_active_neighbor_destroy(*args):
api_call = _invoke_api('net-ndp-active-neighbor-destroy', *args)
return api_call
def net_ndp_active_neighbor_get(*args):
api_call = _invoke_api('net-ndp-active-neighbor-get', *args)
return api_call
def net_ndp_active_neighbor_get_iter(*args):
api_call = _invoke_api('net-ndp-active-neighbor-get-iter', *args)
return api_call
def net_ndp_default_router_delete_all(*args):
api_call = _invoke_api('net-ndp-default-router-delete-all', *args)
return api_call
def net_ndp_default_router_get(*args):
api_call = _invoke_api('net-ndp-default-router-get', *args)
return api_call
def net_ndp_default_router_get_iter(*args):
api_call = _invoke_api('net-ndp-default-router-get-iter', *args)
return api_call
def net_ndp_neighbor_get_iter(*args):
api_call = _invoke_api('net-ndp-neighbor-get-iter', *args)
return api_call
def net_ndp_prefix_delete_all(*args):
api_call = _invoke_api('net-ndp-prefix-delete-all', *args)
return api_call
def net_ndp_prefix_get(*args):
api_call = _invoke_api('net-ndp-prefix-get', *args)
return api_call
def net_ndp_prefix_get_iter(*args):
api_call = _invoke_api('net-ndp-prefix-get-iter', *args)
return api_call
def net_options_get(*args):
api_call = _invoke_api('net-options-get', *args)
return api_call
def net_options_modify(*args):
api_call = _invoke_api('net-options-modify', *args)
return api_call
def net_placement_cache_delete(*args):
api_call = _invoke_api('net-placement-cache-delete', *args)
return api_call
def net_placement_cache_get_iter(*args):
api_call = _invoke_api('net-placement-cache-get-iter', *args)
return api_call
def net_placement_discover(*args):
api_call = _invoke_api('net-placement-discover', *args)
return api_call
def net_port_broadcast_domain_add_ports(*args):
api_call = _invoke_api('net-port-broadcast-domain-add-ports', *args)
return api_call
def net_port_broadcast_domain_create(*args):
api_call = _invoke_api('net-port-broadcast-domain-create', *args)
return api_call
def net_port_broadcast_domain_destroy(*args):
api_call = _invoke_api('net-port-broadcast-domain-destroy', *args)
return api_call
def net_port_broadcast_domain_get(*args):
api_call = _invoke_api('net-port-broadcast-domain-get', *args)
return api_call
def net_port_broadcast_domain_get_iter(*args):
api_call = _invoke_api('net-port-broadcast-domain-get-iter', *args)
return api_call
def net_port_broadcast_domain_merge(*args):
api_call = _invoke_api('net-port-broadcast-domain-merge', *args)
return api_call
def net_port_broadcast_domain_modify(*args):
api_call = _invoke_api('net-port-broadcast-domain-modify', *args)
return api_call
def net_port_broadcast_domain_remove_ports(*args):
api_call = _invoke_api('net-port-broadcast-domain-remove-ports', *args)
return api_call
def net_port_broadcast_domain_rename(*args):
api_call = _invoke_api('net-port-broadcast-domain-rename', *args)
return api_call
def net_port_broadcast_domain_split(*args):
api_call = _invoke_api('net-port-broadcast-domain-split', *args)
return api_call
def net_port_delete(*args):
api_call = _invoke_api('net-port-delete', *args)
return api_call
def net_port_get(*args):
api_call = _invoke_api('net-port-get', *args)
return api_call
def net_port_get_iter(*args):
api_call = _invoke_api('net-port-get-iter', *args)
return api_call
def net_port_ifgrp_add_port(*args):
api_call = _invoke_api('net-port-ifgrp-add-port', *args)
return api_call
def net_port_ifgrp_create(*args):
api_call = _invoke_api('net-port-ifgrp-create', *args)
return api_call
def net_port_ifgrp_destroy(*args):
api_call = _invoke_api('net-port-ifgrp-destroy', *args)
return api_call
def net_port_ifgrp_get(*args):
api_call = _invoke_api('net-port-ifgrp-get', *args)
return api_call
def net_port_ifgrp_remove_port(*args):
api_call = _invoke_api('net-port-ifgrp-remove-port', *args)
return api_call
def net_port_modify(*args):
api_call = _invoke_api('net-port-modify', *args)
return api_call
def net_port_modify_iter(*args):
api_call = _invoke_api('net-port-modify-iter', *args)
return api_call
def net_routes_get_iter(*args):
api_call = _invoke_api('net-routes-get-iter', *args)
return api_call
def net_routes_lifs_get_iter(*args):
api_call = _invoke_api('net-routes-lifs-get-iter', *args)
return api_call
def net_routing_group_route_create(*args):
api_call = _invoke_api('net-routing-group-route-create', *args)
return api_call
def net_routing_group_route_destroy(*args):
api_call = _invoke_api('net-routing-group-route-destroy', *args)
return api_call
def net_routing_group_route_get_iter(*args):
api_call = _invoke_api('net-routing-group-route-get-iter', *args)
return api_call
def net_san_lif_placement_get(*args):
api_call = _invoke_api('net-san-lif-placement-get', *args)
return api_call
def net_subnet_add_ranges(*args):
api_call = _invoke_api('net-subnet-add-ranges', *args)
return api_call
def net_subnet_create(*args):
api_call = _invoke_api('net-subnet-create', *args)
return api_call
def net_subnet_destroy(*args):
api_call = _invoke_api('net-subnet-destroy', *args)
return api_call
def net_subnet_get(*args):
api_call = _invoke_api('net-subnet-get', *args)
return api_call
def net_subnet_get_iter(*args):
api_call = _invoke_api('net-subnet-get-iter', *args)
return api_call
def net_subnet_modify(*args):
api_call = _invoke_api('net-subnet-modify', *args)
return api_call
def net_subnet_remove_ranges(*args):
api_call = _invoke_api('net-subnet-remove-ranges', *args)
return api_call
def net_subnet_rename(*args):
api_call = _invoke_api('net-subnet-rename', *args)
return api_call
def net_traceroute6(*args):
api_call = _invoke_api('net-traceroute6', *args)
return api_call
def net_vlan_create(*args):
api_call = _invoke_api('net-vlan-create', *args)
return api_call
def net_vlan_delete(*args):
api_call = _invoke_api('net-vlan-delete', *args)
return api_call
def net_vlan_get(*args):
api_call = _invoke_api('net-vlan-get', *args)
return api_call
def net_vlan_get_iter(*args):
api_call = _invoke_api('net-vlan-get-iter', *args)
return api_call
def netgroups_file_delete(*args):
api_call = _invoke_api('netgroups-file-delete', *args)
return api_call
def netgroups_file_get(*args):
api_call = _invoke_api('netgroups-file-get', *args)
return api_call
def netgroups_file_get_iter(*args):
api_call = _invoke_api('netgroups-file-get-iter', *args)
return api_call
def nfs_all_flash_optimized_get(*args):
api_call = _invoke_api('nfs-all-flash-optimized-get', *args)
return api_call
def nfs_all_flash_optimized_get_iter(*args):
api_call = _invoke_api('nfs-all-flash-optimized-get-iter', *args)
return api_call
def nfs_service_get_create_defaults(*args):
api_call = _invoke_api('nfs-service-get-create-defaults', *args)
return api_call
def nfs_service_get_iter(*args):
api_call = _invoke_api('nfs-service-get-iter', *args)
return api_call
def nis_get_iter(*args):
api_call = _invoke_api('nis-get-iter', *args)
return api_call
def ntdtest_action_alt_simpleget(*args):
api_call = _invoke_api('ntdtest-action-alt-simpleget', *args)
return api_call
def ntdtest_action_alt_simpleget_optional(*args):
api_call = _invoke_api('ntdtest-action-alt-simpleget-optional', *args)
return api_call
def ntdtest_action_only_doit(*args):
api_call = _invoke_api('ntdtest-action-only-doit', *args)
return api_call
def ntdtest_action_only_doit_async(*args):
api_call = _invoke_api('ntdtest-action-only-doit-async', *args)
return api_call
def ntdtest_action_only_dothat(*args):
api_call = _invoke_api('ntdtest-action-only-dothat', *args)
return api_call
def ntdtest_action_simpleget(*args):
api_call = _invoke_api('ntdtest-action-simpleget', *args)
return api_call
def ntdtest_action_top_level_create(*args):
api_call = _invoke_api('ntdtest-action-top-level-create', *args)
return api_call
def ntdtest_action_top_level_create_alt(*args):
api_call = _invoke_api('ntdtest-action-top-level-create-alt', *args)
return api_call
def ntdtest_dnested_get(*args):
api_call = _invoke_api('ntdtest-dnested-get', *args)
return api_call
def ntdtest_dnested_get_iter(*args):
api_call = _invoke_api('ntdtest-dnested-get-iter', *args)
return api_call
def ntdtest_empty_tags_get_1(*args):
api_call = _invoke_api('ntdtest-empty-tags-get-1', *args)
return api_call
def ntdtest_empty_tags_get_10(*args):
api_call = _invoke_api('ntdtest-empty-tags-get-10', *args)
return api_call
def ntdtest_empty_tags_get_11(*args):
api_call = _invoke_api('ntdtest-empty-tags-get-11', *args)
return api_call
def ntdtest_empty_tags_get_12(*args):
api_call = _invoke_api('ntdtest-empty-tags-get-12', *args)
return api_call
def ntdtest_empty_tags_get_13(*args):
api_call = _invoke_api('ntdtest-empty-tags-get-13', *args)
return api_call
def ntdtest_empty_tags_get_2(*args):
api_call = _invoke_api('ntdtest-empty-tags-get-2', *args)
return api_call
def ntdtest_empty_tags_get_3(*args):
api_call = _invoke_api('ntdtest-empty-tags-get-3', *args)
return api_call
def ntdtest_empty_tags_get_4(*args):
api_call = _invoke_api('ntdtest-empty-tags-get-4', *args)
return api_call
def ntdtest_empty_tags_get_5(*args):
api_call = _invoke_api('ntdtest-empty-tags-get-5', *args)
return api_call
def ntdtest_empty_tags_get_6(*args):
api_call = _invoke_api('ntdtest-empty-tags-get-6', *args)
return api_call
def ntdtest_empty_tags_get_7(*args):
api_call = _invoke_api('ntdtest-empty-tags-get-7', *args)
return api_call
def ntdtest_empty_tags_get_8(*args):
api_call = _invoke_api('ntdtest-empty-tags-get-8', *args)
return api_call
def ntdtest_empty_tags_get_9(*args):
api_call = _invoke_api('ntdtest-empty-tags-get-9', *args)
return api_call
def ntdtest_extensive_alternate_create_1(*args):
api_call = _invoke_api('ntdtest-extensive-alternate-create-1', *args)
return api_call
def ntdtest_extensive_alternate_create_2(*args):
api_call = _invoke_api('ntdtest-extensive-alternate-create-2', *args)
return api_call
def ntdtest_extensive_alternate_destroy_1(*args):
api_call = _invoke_api('ntdtest-extensive-alternate-destroy-1', *args)
return api_call
def ntdtest_extensive_alternate_get_1(*args):
api_call = _invoke_api('ntdtest-extensive-alternate-get-1', *args)
return api_call
def ntdtest_extensive_alternate_get_2(*args):
api_call = _invoke_api('ntdtest-extensive-alternate-get-2', *args)
return api_call
def ntdtest_extensive_alternate_modify_1(*args):
api_call = _invoke_api('ntdtest-extensive-alternate-modify-1', *args)
return api_call
def ntdtest_extensive_default_create(*args):
api_call = _invoke_api('ntdtest-extensive-default-create', *args)
return api_call
def ntdtest_extensive_default_destroy(*args):
api_call = _invoke_api('ntdtest-extensive-default-destroy', *args)
return api_call
def ntdtest_extensive_default_get(*args):
api_call = _invoke_api('ntdtest-extensive-default-get', *args)
return api_call
def ntdtest_extensive_default_modify(*args):
api_call = _invoke_api('ntdtest-extensive-default-modify', *args)
return api_call
def ntdtest_extensive_destroy_iter(*args):
api_call = _invoke_api('ntdtest-extensive-destroy-iter', *args)
return api_call
def ntdtest_extensive_get_iter(*args):
api_call = _invoke_api('ntdtest-extensive-get-iter', *args)
return api_call
def ntdtest_extensive_method1_alternate(*args):
api_call = _invoke_api('ntdtest-extensive-method1-alternate', *args)
return api_call
def ntdtest_extensive_method1_default(*args):
api_call = _invoke_api('ntdtest-extensive-method1-default', *args)
return api_call
def ntdtest_extensive_method2_alternate(*args):
api_call = _invoke_api('ntdtest-extensive-method2-alternate', *args)
return api_call
def ntdtest_extensive_method2_default(*args):
api_call = _invoke_api('ntdtest-extensive-method2-default', *args)
return api_call
def ntdtest_extensive_method3_default(*args):
api_call = _invoke_api('ntdtest-extensive-method3-default', *args)
return api_call
def ntdtest_extensive_method4_alt(*args):
api_call = _invoke_api('ntdtest-extensive-method4-alt', *args)
return api_call
def ntdtest_extensive_method4_default(*args):
api_call = _invoke_api('ntdtest-extensive-method4-default', *args)
return api_call
def ntdtest_extensive_method5_alternate(*args):
api_call = _invoke_api('ntdtest-extensive-method5-alternate', *args)
return api_call
def ntdtest_extensive_method6_alternate(*args):
api_call = _invoke_api('ntdtest-extensive-method6-alternate', *args)
return api_call
def ntdtest_extensive_method6_alternate_1(*args):
api_call = _invoke_api('ntdtest-extensive-method6-alternate-1', *args)
return api_call
def ntdtest_extensive_method6_default(*args):
api_call = _invoke_api('ntdtest-extensive-method6-default', *args)
return api_call
def ntdtest_extensive_modify_iter(*args):
api_call = _invoke_api('ntdtest-extensive-modify-iter', *args)
return api_call
def ntdtest_folding_create(*args):
api_call = _invoke_api('ntdtest-folding-create', *args)
return api_call
def ntdtest_folding_deep_arrayof_get_iter(*args):
api_call = _invoke_api('ntdtest-folding-deep-arrayof-get-iter', *args)
return api_call
def ntdtest_folding_default_get(*args):
api_call = _invoke_api('ntdtest-folding-default-get', *args)
return api_call
def ntdtest_folding_destroy(*args):
api_call = _invoke_api('ntdtest-folding-destroy', *args)
return api_call
def ntdtest_folding_get(*args):
api_call = _invoke_api('ntdtest-folding-get', *args)
return api_call
def ntdtest_folding_get_collapsed_and_arrayof(*args):
api_call = _invoke_api('ntdtest-folding-get-collapsed-and-arrayof', *args)
return api_call
def ntdtest_folding_get_deep_element(*args):
api_call = _invoke_api('ntdtest-folding-get-deep-element', *args)
return api_call
def ntdtest_folding_get_element_no_array(*args):
api_call = _invoke_api('ntdtest-folding-get-element-no-array', *args)
return api_call
def ntdtest_folding_get_full_list(*args):
api_call = _invoke_api('ntdtest-folding-get-full-list', *args)
return api_call
def ntdtest_folding_get_iter(*args):
api_call = _invoke_api('ntdtest-folding-get-iter', *args)
return api_call
def ntdtest_folding_get_iter_mixed(*args):
api_call = _invoke_api('ntdtest-folding-get-iter-mixed', *args)
return api_call
def ntdtest_folding_get_multiple_field_list_shallow(*args):
api_call = _invoke_api('ntdtest-folding-get-multiple-field-list-shallow', *args)
return api_call
def ntdtest_folding_get_multiple_field_list_top(*args):
api_call = _invoke_api('ntdtest-folding-get-multiple-field-list-top', *args)
return api_call
def ntdtest_folding_get_multiple_fields_list_array_and_collapsed(*args):
api_call = _invoke_api('ntdtest-folding-get-multiple-fields-list-array-and-collapsed', *args)
return api_call
def ntdtest_folding_get_shallow_element(*args):
api_call = _invoke_api('ntdtest-folding-get-shallow-element', *args)
return api_call
def ntdtest_folding_get_single_field_list(*args):
api_call = _invoke_api('ntdtest-folding-get-single-field-list', *args)
return api_call
def ntdtest_folding_list_info(*args):
api_call = _invoke_api('ntdtest-folding-list-info', *args)
return api_call
def ntdtest_folding_list_info_alt(*args):
api_call = _invoke_api('ntdtest-folding-list-info-alt', *args)
return api_call
def ntdtest_folding_list_info_deep_element(*args):
api_call = _invoke_api('ntdtest-folding-list-info-deep-element', *args)
return api_call
def ntdtest_folding_multiple_arrays_create(*args):
api_call = _invoke_api('ntdtest-folding-multiple-arrays-create', *args)
return api_call
def ntdtest_folding_multiple_arrays_destroy(*args):
api_call = _invoke_api('ntdtest-folding-multiple-arrays-destroy', *args)
return api_call
def ntdtest_folding_multiple_arrays_get_iter(*args):
api_call = _invoke_api('ntdtest-folding-multiple-arrays-get-iter', *args)
return api_call
def ntdtest_get(*args):
api_call = _invoke_api('ntdtest-get', *args)
return api_call
def ntdtest_get_iter(*args):
api_call = _invoke_api('ntdtest-get-iter', *args)
return api_call
def ntdtest_iterfrom_alt_create(*args):
api_call = _invoke_api('ntdtest-iterfrom-alt-create', *args)
return api_call
def ntdtest_iterfrom_alt_destroy(*args):
api_call = _invoke_api('ntdtest-iterfrom-alt-destroy', *args)
return api_call
def ntdtest_iterfrom_alt_destroy_iter(*args):
api_call = _invoke_api('ntdtest-iterfrom-alt-destroy-iter', *args)
return api_call
def ntdtest_iterfrom_alt_get(*args):
api_call = _invoke_api('ntdtest-iterfrom-alt-get', *args)
return api_call
def ntdtest_iterfrom_alt_get_iter(*args):
api_call = _invoke_api('ntdtest-iterfrom-alt-get-iter', *args)
return api_call
def ntdtest_iterfrom_alt_list_info(*args):
api_call = _invoke_api('ntdtest-iterfrom-alt-list-info', *args)
return api_call
def ntdtest_iterfrom_alt_modify(*args):
api_call = _invoke_api('ntdtest-iterfrom-alt-modify', *args)
return api_call
def ntdtest_iterfrom_alt_modify_iter(*args):
api_call = _invoke_api('ntdtest-iterfrom-alt-modify-iter', *args)
return api_call
def ntdtest_iterfrom_create(*args):
api_call = _invoke_api('ntdtest-iterfrom-create', *args)
return api_call
def ntdtest_iterfrom_destroy(*args):
api_call = _invoke_api('ntdtest-iterfrom-destroy', *args)
return api_call
def ntdtest_iterfrom_destroy_iter(*args):
api_call = _invoke_api('ntdtest-iterfrom-destroy-iter', *args)
return api_call
def ntdtest_iterfrom_dupe_create(*args):
api_call = _invoke_api('ntdtest-iterfrom-dupe-create', *args)
return api_call
def ntdtest_iterfrom_dupe_destroy(*args):
api_call = _invoke_api('ntdtest-iterfrom-dupe-destroy', *args)
return api_call
def ntdtest_iterfrom_dupe_destroy_iter(*args):
api_call = _invoke_api('ntdtest-iterfrom-dupe-destroy-iter', *args)
return api_call
def ntdtest_iterfrom_dupe_get(*args):
api_call = _invoke_api('ntdtest-iterfrom-dupe-get', *args)
return api_call
def ntdtest_iterfrom_dupe_get_iter(*args):
api_call = _invoke_api('ntdtest-iterfrom-dupe-get-iter', *args)
return api_call
def ntdtest_iterfrom_dupe_list_info(*args):
api_call = _invoke_api('ntdtest-iterfrom-dupe-list-info', *args)
return api_call
def ntdtest_iterfrom_dupe_modify(*args):
api_call = _invoke_api('ntdtest-iterfrom-dupe-modify', *args)
return api_call
def ntdtest_iterfrom_dupe_modify_iter(*args):
api_call = _invoke_api('ntdtest-iterfrom-dupe-modify-iter', *args)
return api_call
def ntdtest_iterfrom_get(*args):
api_call = _invoke_api('ntdtest-iterfrom-get', *args)
return api_call
def ntdtest_iterfrom_get_iter(*args):
api_call = _invoke_api('ntdtest-iterfrom-get-iter', *args)
return api_call
def ntdtest_iterfrom_list_info(*args):
api_call = _invoke_api('ntdtest-iterfrom-list-info', *args)
return api_call
def ntdtest_iterfrom_modify(*args):
api_call = _invoke_api('ntdtest-iterfrom-modify', *args)
return api_call
def ntdtest_iterfrom_modify_iter(*args):
api_call = _invoke_api('ntdtest-iterfrom-modify-iter', *args)
return api_call
def ntdtest_iternoread_create(*args):
api_call = _invoke_api('ntdtest-iternoread-create', *args)
return api_call
def ntdtest_iternoread_destroy(*args):
api_call = _invoke_api('ntdtest-iternoread-destroy', *args)
return api_call
def ntdtest_iternoread_destroy_iter(*args):
api_call = _invoke_api('ntdtest-iternoread-destroy-iter', *args)
return api_call
def ntdtest_iternoread_get(*args):
api_call = | |
<filename>tools/ins2000_parser.py<gh_stars>10-100
import os
import sys
import argparse
import json
import struct
import math
import collections
import datetime
import time
PI = 3.1415926535897932
WGS84 = {
'a': 6378137.0,
'b': 6356752.3142,
'f': 0.0033528106643315515,
'e': 0.081819190837555025,
'e2': 0.0066943799893122479,
'wie': 7.2922115147e-5,
'GM': 398600441800000.00
}
ACEINNA_GYRO = (0.005/64)
ACEINNA_ACC = (0.005*9.80665/4000)
P2_33 = 1.164153218269348E-10
P2_33_DEG = 6.670106611340576E-09
P2_27_F = 2.270936965942383E-09
P2_29_F = 5.677342414855957E-10
P2_29 = 1.862645149230957E-09
FSAS_GYRO = 1.085069444444445E-07
FSAS_ACC = 1.525878906250000E-06
ISA100C_GYRO = 5.729577951308233E-08
ISA100C_ACC = 2.0E-8
RATES_SIZE = 26
rates = [
[0,100,2.0,100],
[1,100,100,100],
[3,200,ACEINNA_GYRO,ACEINNA_ACC],
[4,100,100,100],
[5,100,100,100],
[8,200,100,100],
[11,100,P2_33,P2_27_F],
[12,100,100,100],
[13,200,FSAS_GYRO,FSAS_ACC],
[16,200,100,100],
[19,200,100,100],
[20,100,100,100],
[26,200,ISA100C_GYRO,ISA100C_ACC],
[27,100,100,100],
[28,100,100,100],
[31,200,100,100],
[32,125,100,100],
[33,200,100,100],
[34,200,100,100],
[38,400,100,100],
[39,400,100,100],
[41,125,100,100],
[45,200,100,100],
[52,200,100,100],
[56,125,100,100],
[58,100,P2_33_DEG,P2_29],
]
class INS2000Parser:
gga_nmea_file = 'ins-gga.nmea'
gnssposvel_txt_file = 'gnssposvel.txt'
gnss_txt_file = 'gnss.txt'
gnssvel_txt_file = 'gnssvel.txt'
imu_txt_file = 'imu.txt'
ins_txt_file = 'ins.txt'
heading_txt_file = 'heading.txt'
process_txt_file = 'process.txt'
gnss_kml_file = 'gnss.kml'
ins_kml_file = 'ins.kml'
"""Parse INS2000 data"""
def __init__(self, bin_file, cfg_file):
self.bin_file = bin_file
self.cfg_file = cfg_file
self.out_prefix = ''
self.cfgs = None
self.data = []
self.buf = []
self.header_len = 0
self.data_len = 0
self.message_id = 0
self.message_type = 0
self.ids = []
self.idx = 0
self.sync_pattern = collections.deque(3*[0], 3)
self.sync_state = 0
self.lastlctime = 0
self.gnss_kmls = []
self.gnss_vels = []
self.inspvaxs = []
self.files = {}
self.packets = 0
def run(self):
"""start parse data"""
_, tmpfilename = os.path.split(self.bin_file)
shortname, _ = os.path.splitext(tmpfilename)
self.out_prefix = os.path.join(os.path.dirname(self.bin_file), shortname + '-')
self.init_files()
with open(self.cfg_file, 'r') as cfg_data:
self.cfgs = json.load(cfg_data)
with open(self.bin_file, 'rb') as buf_r:
while True:
tmp_data = buf_r.read(256)
if tmp_data:
self.parse_data(tmp_data)
else:
break
self.save_gnss_kml()
self.save_ins_kml()
self.close_files()
def init_files(self):
"""init all files"""
files = [self.gga_nmea_file, self.gnssposvel_txt_file, self.gnssposvel_txt_file,
self.gnss_txt_file, self.gnssvel_txt_file, self.imu_txt_file, self.ins_txt_file,
self.heading_txt_file, self.process_txt_file, self.gnss_kml_file, self.ins_kml_file]
for filename in files:
fo = open(self.out_prefix + filename, 'w')
self.files[filename] = fo
def close_files(self):
"""close all files"""
for _, fo in self.files.items():
fo.close()
def append_process_txt(self, data):
"""append process txt"""
self.write_file(self.process_txt_file, data)
def parse_data(self, data):
"""parse data"""
for _, new_byte in enumerate(data):
self.idx += 1
self.sync_pattern.append(new_byte)
if self.sync_state == 1:
self.buf.append(new_byte)
packet_len = len(self.buf)
if packet_len == 6:
# b_buf = b''.join(map(lambda x:int.to_bytes(x, 1, 'little'), self.buf))
b_buf = bytearray(self.buf)
self.message_id, = struct.unpack('<H', b_buf[4:6])
if self.message_id == 1462:
self.header_len = 12
else:
self.header_len = self.buf[3]
if self.header_len == packet_len:
if self.message_id == 1462:
self.message_type = 0
self.data_len = self.buf[3]
else:
self.message_type = self.buf[6]
# b_buf = b''.join(map(lambda x:int.to_bytes(x, 1, 'little'), self.buf))
b_buf = bytearray(self.buf)
self.data_len, = struct.unpack('<H', b_buf[8:10])
if self.data_len > 0 and packet_len == self.data_len + self.header_len + 4:
# self.data = b''.join(map(lambda x:int.to_bytes(x, 1, 'little'), self.buf))
self.data = bytearray(self.buf)
self.packets += 1
if self.check_crc(self.data):
self.decode_packet(self.data)
self.buf = []
self.sync_state = 0
else:
if list(self.sync_pattern) == [0xAA, 0x44, 0x12] or list(self.sync_pattern) == [0xAA, 0x44, 0x13]:
self.buf = [self.sync_pattern[0], self.sync_pattern[1], self.sync_pattern[2]]
self.sync_state = 1
continue
def check_crc(self, packet):
"""check packet crc"""
crc = self.crc(packet[:-4])
check_crc, = struct.unpack('<L', packet[-4:])
return crc == check_crc
def decode_packet(self, packet):
"""decode packet"""
message_id, = struct.unpack('<H', packet[4:6])
message_id_str = str(message_id)
if not message_id_str in self.cfgs["packetsTypeList"]:
return
message_str = self.cfgs["packetsTypeList"][message_id_str]
if not message_str in self.cfgs["outputPackets"]:
return
payload = self.cfgs["outputPackets"][message_str]["payload"]
bin_format, keys = self.output_fmt(payload)
try:
packets = struct.unpack(bin_format, packet[self.header_len:-4])
except Exception as e:
return
dict_pack = dict(zip(keys, packets))
dict_pack['header_message_id'] = message_id
if message_id == 1462:
dict_pack['header_gps_week'], = struct.unpack('<H', packet[6:8])
dict_pack['header_gps_seconds'], = struct.unpack('i', packet[8:12])
else:
dict_pack['header_gps_week'], = struct.unpack('<H', packet[14:16])
dict_pack['header_gps_seconds'], = struct.unpack('i', packet[16:20])
if message_id == 971:
self.trace_heading(dict_pack)
if message_id == 1465:
self.trace_gga_nmea(dict_pack)
if message_id == 1429:
self.trace_gnss_kml(dict_pack)
if message_id == 1430:
self.trace_gnss_vel(dict_pack)
if message_id == 1462:
self.trace_rawimusx(dict_pack)
def crc(self, data):
"""crc"""
crc_rst = 0
temp1 = 0
temp2 = 0
for byte_data in data:
temp1 = (crc_rst >> 8) & 0x00FFFFFF
temp2 = self.crc_value((crc_rst ^ byte_data) & 0xFF)
crc_rst = temp1 ^ temp2
return crc_rst
def crc_value(self, value):
"""Calculate a CRC value to be used by CRC calculation functions"""
j = 8
crc = value
while j > 0:
if crc & 1:
crc = (crc >> 1) ^ 0xEDB88320
else:
crc >>= 1
j -= 1
return crc
def output_fmt(self, payload):
"""generate struct format"""
packet_fmt = '<'
keys = []
for item in payload:
if item["type"] == "int8":
packet_fmt += 'b'
if item["type"] == "uint8":
packet_fmt += 'B'
if item["type"] == "bool":
packet_fmt += '?'
if item["type"] == "int16":
packet_fmt += 'h'
if item["type"] == "uint16":
packet_fmt += 'H'
if item["type"] == "int32":
packet_fmt += 'i'
if item["type"] == "uint32":
packet_fmt += 'I'
if item["type"] == "int64":
packet_fmt += 'q'
if item["type"] == "uint64":
packet_fmt += 'Q'
if item["type"] == "float":
packet_fmt += 'f'
if item["type"] == "double":
packet_fmt += 'd'
if item["type"] == "string":
packet_fmt += item["length"] + 's'
keys.append(item["name"])
return packet_fmt, keys
def trace_heading(self, msg):
"""trace heading"""
heading_txt = "%4d,%10.4f,%10.5f,%14.5f,%14.5f,%14.5f,%14.5f,%8d,%8d\n" % (msg['header_gps_week'],
msg['header_gps_seconds'] / 1000, msg['length'], msg['heading'], msg['pitch'],
msg['hdgstddev'], msg['ptchstddev'], msg['sol_stat'], msg['pos_type'])
self.write_file(self.heading_txt_file, heading_txt)
self.append_process_txt('$GPHEAD2,%s' % heading_txt)
def trace_gga_nmea(self, msg):
"""trace gga nmea"""
self.print_ins_txt(msg)
if math.fabs(msg['lat']) > 0.001:
self.inspvaxs.append(msg)
if not (math.fabs(msg['lat']) > 0.001 and (msg['ins_status'] == 3 or msg['ins_status'] == 6 or
msg['ins_status'] == 7)):
return
leverarm_v = [0.0, 0.0, 0.0]
eular = [msg['roll'] * PI / 180, msg['pitch'] * PI / 180, msg['azimuth'] * PI / 180]
c_vn = self.euler2dcm(eular)
leverarm_n = [0]*3
self.matrix_mutiply(c_vn, leverarm_v, 3, 3, 1, leverarm_n)
d_leverarm = [0]*3
pos = [msg['lat']*PI / 180, msg['lon']*PI / 180, msg['hgt'] + msg['undulation']]
m, n = self.update_m_n(pos)
d_leverarm[0] = leverarm_n[0] / (m + pos[2])
d_leverarm[1] = leverarm_n[1] / ((n + pos[2])*math.cos(pos[0]))
d_leverarm[2] = -leverarm_n[2]
self.matrix_add(pos, d_leverarm, 3, 1, pos)
position_type = self.getpostype(msg['pos_type'])
gga = self.output_gga_nmea(msg['header_gps_seconds'] / 1000, position_type, pos, 10, 1.0, 1.0)
self.write_file(self.gga_nmea_file, gga)
def print_ins_txt(self, msg):
"""print ins txt"""
ins_txt = "%4d,%10.4f,%14.9f,%14.9f,%10.4f,%10.4f,%10.4f,%10.4f,%14.9f,%14.9f,%14.9f,%d,%d\n" % (msg['header_gps_week'], msg['header_gps_seconds'] / 1000,
msg['lat'], msg['lon'], msg['hgt'] + msg['undulation'], msg['north_velocity'], msg['east_velocity'], msg['up_velocity'], msg['roll'], msg['pitch'],
msg['azimuth'], msg['ins_status'], msg['pos_type'])
self.write_file(self.ins_txt_file, ins_txt)
self.append_process_txt('$GPINS,%s' % ins_txt)
def trace_gnss_kml(self, msg):
"""trace gnss kml"""
self.print_gnss_txt(msg)
if math.fabs(msg['lat']) > 0.001:
self.gnss_kmls.append(msg)
def print_gnss_txt(self, msg):
"""print gnss txt"""
pos_type = self.getpostype(msg['pos_type'])
if msg['sol_status'] != 0:
pos_type = 0
if math.fmod(msg['header_gps_seconds'] / 1000 + 0.001, 1) < 0.01:
if pos_type >= 0:
gnss_txt = '%4d,%10.4f,%14.9f,%14.9f,%10.4f,%10.4f,%10.4f,%10.4f,%d\n' % (msg['header_gps_week'], msg['header_gps_seconds'] / 1000,
msg['lat'], msg['lon'], msg['hgt'] + msg['undulation'], msg['lat_sigma'], msg['lon_sigma'], msg['hgt_sigma'], pos_type)
self.write_file(self.gnss_txt_file, gnss_txt)
self.append_process_txt('$GPGNSS,%s' % gnss_txt)
def trace_gnss_vel(self, msg):
"""trace gnss vel"""
self.print_gnsssvel_txt(msg)
if math.fabs(msg['hor_spd']) > 0.0001 or math.fabs(msg['vert_spd']) > 0.0001 or math.fabs(msg['trk_gnd']) > 0.0001:
self.gnss_vels.append(msg)
def trace_rawimusx(self, msg):
"""trace rawimusx"""
lctime = msg['header_gps_seconds'] / 1000
fxyz_scale, wxyz_scale, sample_rate = [0.0, 0.0, 0.0]
x_accel, y_accel, z_accel, x_gyro, y_gyro, z_gyro = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
for i in range(RATES_SIZE):
if rates[i][0] == msg['imutype']:
sample_rate = rates[i][1]
wxyz_scale = rates[i][2]
fxyz_scale = rates[i][3]
break
x_accel = msg['x_accel'] * fxyz_scale * sample_rate
y_accel = -msg['y_accel'] *fxyz_scale * sample_rate
z_accel = msg['z_accel'] * fxyz_scale * sample_rate
x_gyro = msg['x_gyro'] * wxyz_scale * sample_rate
y_gyro = -msg['y_gyro'] * wxyz_scale * sample_rate
z_gyro = msg['z_gyro'] * wxyz_scale * sample_rate
if math.fmod(lctime + 0.02, 1) < 0.01 and lctime - self.lastlctime > 0.98:
msg['header_gps_seconds'] = math.floor(lctime + 0.02)
self.lastlctime = lctime
else:
lctime = 0
imu_txt = "%4d,%10.4f,%10.4f,%14.10f,%14.10f,%14.10f,%14.10f,%14.10f,%14.10f \n" % (msg['week'], msg['seconds'],
math.floor(lctime + 0.02), x_accel, y_accel, z_accel, x_gyro, y_gyro, z_gyro)
self.write_file(self.imu_txt_file, imu_txt)
self.append_process_txt("$GPIMU,%s" % imu_txt)
def print_gnsssvel_txt(self, msg):
"""print gnssvel txt"""
if math.fmod(msg['header_gps_seconds'] / 1000 + 0.001, 1) < 0.01:
gnssvel_txt = "%4d,%10.4f,%14.9f,%14.9f,%10.4f,%10.4f,%8d,%8d\n" % (msg['header_gps_week'], msg['header_gps_seconds'] / 1000,
msg['hor_spd'], msg['trk_gnd'], msg['vert_spd'], msg['latency'], msg['sol_status'], msg['vel_type'])
self.write_file(self.gnssvel_txt_file, gnssvel_txt)
self.append_process_txt('$GPVEL,%s' % gnssvel_txt)
def print_gnssposvel_txt(self, pos, vel):
"""print gnssposvel txt"""
if math.fabs(pos['lat']) > 0.001:
pos_type = self.getpostype(pos['pos_type'])
north_velocity = vel['hor_spd'] * math.cos(vel['trk_gnd'] * PI / 180)
east_velocity = vel['hor_spd'] * math.sin(vel['trk_gnd'] * PI / 180)
up_velocity = vel['vert_spd']
gnssposvel_txt = "%4d,%10.4f,%14.9f,%14.9f,%10.4f,%10.4f,%10.4f,%10.4f,%d,%10.4f,%10.4f,%10.4f,%10.4f\n" % (pos['header_gps_week'], pos['header_gps_seconds'] / 1000,
pos['lat'], pos['lon'], pos['hgt'] + pos['undulation'], pos['lat_sigma'], pos['lon_sigma'], pos['hgt_sigma'], pos_type, north_velocity, east_velocity, up_velocity, vel['trk_gnd'])
self.write_file(self.gnssposvel_txt_file, gnssposvel_txt)
def save_gnss_kml(self):
"""save gnss kml"""
gnss_kml = ''
gnss_kml += '<?xml version="1.0" encoding="UTF-8"?>\n'
gnss_kml += '<kml xmlns="http://www.opengis.net/kml/2.2">\n'
gnss_kml += '<Document>\n'
colors = ["ffffffff","ff0000ff","ffff00ff","50FF78F0","ff00ff00","ff00aaff"]
for i in range(6):
gnss_kml += '<Style id="P{:d}">\n'.format(i)
gnss_kml += '<IconStyle>\n'
gnss_kml += '<color>{:s}</color>\n'.format(colors[i])
gnss_kml += '<scale>0.3</scale>\n'
gnss_kml += '<Icon><href>http://maps.google.com/mapfiles/kml/shapes/track.png</href></Icon>'
gnss_kml += '</IconStyle>\n'
gnss_kml += '</Style>\n'
gnss_kml += "<Placemark>\n"\
+ "<name>Rover Track</name>\n"\
+ "<Style>\n"\
+ "<LineStyle>\n"\
+ "<color>ffffffff</color>\n"\
+ "</LineStyle>\n"\
+ "</Style>\n"\
+ "<LineString>\n"\
+ "<coordinates>\n"
for msg in self.gnss_kmls:
gnss_kml += '{:.9f},{:.9f},{:.3f}\n'.format(msg['lon'], msg['lat'], msg['hgt'] + msg['undulation'])
gnss_kml += "</coordinates>\n"\
+ "</LineString>\n"\
+ "</Placemark>\n"
gnss_kml += "<Folder>\n"\
+ "<name>Rover Position</name>\n"
for i, msg in enumerate(self.gnss_kmls):
ep = self.weeksecondstoutc(msg['header_gps_week'], | |
400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35773.358],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 36274.946],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35739.309],
[500.000, 710.484, 1100.000, 100.000, 0.000, 100.000, 900.000, 13167.000, 0.000, 36135.092],
[500.000, 710.484, 1100.000, 100.000, 0.000, 100.000, 900.000, 13167.000, 0.000, 35286.584],
[500.000, 710.484, 1100.000, 100.000, 0.000, 100.000, 900.000, 13167.000, 0.000, 35081.366]])
# Multi信号处理结果,先卖后买,使用卖出的现金买进,交割期为2天(股票)0天(现金)
self.multi_res = np.array(
[[0.0000, 357.2545, 0.0000, 6506.9627, 0.0000, 9965.1867],
[0.0000, 357.2545, 0.0000, 6506.9627, 0.0000, 10033.0650],
[0.0000, 178.6273, 0.0000, 8273.5864, 0.0000, 10034.8513],
[0.0000, 178.6273, 0.0000, 8273.5864, 0.0000, 10036.6376],
[150.3516, 178.6273, 0.0000, 6771.5740, 0.0000, 10019.3404],
[150.3516, 178.6273, 0.0000, 6771.5740, 0.0000, 10027.7062],
[150.3516, 178.6273, 0.0000, 6771.5740, 0.0000, 10030.1477],
[150.3516, 178.6273, 0.0000, 6771.5740, 0.0000, 10005.1399],
[150.3516, 178.6273, 0.0000, 6771.5740, 0.0000, 10002.5054],
[150.3516, 489.4532, 0.0000, 3765.8877, 0.0000, 9967.3860],
[75.1758, 391.5625, 0.0000, 5490.1377, 0.0000, 10044.4059],
[75.1758, 391.5625, 0.0000, 5490.1377, 0.0000, 10078.1430],
[75.1758, 391.5625, 846.3525, 392.3025, 0.0000, 10138.2709],
[75.1758, 391.5625, 846.3525, 392.3025, 0.0000, 10050.4768],
[75.1758, 391.5625, 846.3525, 392.3025, 0.0000, 10300.0711],
[75.1758, 391.5625, 846.3525, 392.3025, 0.0000, 10392.6970],
[75.1758, 391.5625, 169.2705, 4644.3773, 0.0000, 10400.5282],
[75.1758, 391.5625, 169.2705, 4644.3773, 0.0000, 10408.9220],
[75.1758, 0.0000, 169.2705, 8653.9776, 0.0000, 10376.5914],
[75.1758, 0.0000, 169.2705, 8653.9776, 0.0000, 10346.8794],
[75.1758, 0.0000, 169.2705, 8653.9776, 0.0000, 10364.7474],
[75.1758, 381.1856, 645.5014, 2459.1665, 0.0000, 10302.4570],
[18.7939, 381.1856, 645.5014, 3024.6764, 0.0000, 10747.4929],
[18.7939, 381.1856, 96.8252, 6492.3097, 0.0000, 11150.9107],
[18.7939, 381.1856, 96.8252, 6492.3097, 0.0000, 11125.2946],
[18.7939, 114.3557, 96.8252, 9227.3166, 0.0000, 11191.9956],
[18.7939, 114.3557, 96.8252, 9227.3166, 0.0000, 11145.7486],
[18.7939, 114.3557, 96.8252, 9227.3166, 0.0000, 11090.0768],
[132.5972, 114.3557, 864.3802, 4223.9548, 0.0000, 11113.8733],
[132.5972, 114.3557, 864.3802, 4223.9548, 0.0000, 11456.3281],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 21983.7333],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 22120.6165],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 21654.5327],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 21429.6550],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 21912.5643],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 22516.3100],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 23169.0777],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 23390.8080],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 23743.3742],
[132.5972, 559.9112, 864.3802, 9367.3999, 0.0000, 23210.7311],
[132.5972, 559.9112, 864.3802, 9367.3999, 0.0000, 24290.4375],
[132.5972, 559.9112, 864.3802, 9367.3999, 0.0000, 24335.3279],
[132.5972, 559.9112, 864.3802, 9367.3999, 0.0000, 18317.3553],
[132.5972, 559.9112, 864.3802, 9367.3999, 0.0000, 18023.4660],
[259.4270, 559.9112, 0.0000, 15820.6915, 0.0000, 24390.0527],
[259.4270, 559.9112, 0.0000, 15820.6915, 0.0000, 24389.6421],
[259.4270, 559.9112, 0.0000, 15820.6915, 0.0000, 24483.5953],
[0.0000, 559.9112, 0.0000, 18321.5674, 0.0000, 24486.1895],
[0.0000, 0.0000, 0.0000, 24805.3389, 0.0000, 24805.3389],
[0.0000, 0.0000, 0.0000, 24805.3389, 0.0000, 24805.3389]])
def test_loop_step_pt_sb00(self):
""" test loop step PT-signal, sell first"""
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=10000,
own_amounts=np.zeros(7, dtype='float'),
available_cash=10000,
available_amounts=np.zeros(7, dtype='float'),
op=self.pt_signals[0],
prices=self.prices[0],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 1 result in complete looping: \n'
f'cash_change: +{c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = 10000 + c_g + c_s
amounts = np.zeros(7, dtype='float') + a_p + a_s
self.assertAlmostEqual(cash, 7500)
self.assertTrue(np.allclose(amounts, np.array([0, 0, 0, 0, 555.5555556, 0, 0])))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=self.pt_res_sb00[2][7],
own_amounts=self.pt_res_sb00[2][0:7],
available_cash=self.pt_res_sb00[2][7],
available_amounts=self.pt_res_sb00[2][0:7],
op=self.pt_signals[3],
prices=self.prices[3],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 4 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.pt_res_sb00[2][7] + c_g + c_s
amounts = self.pt_res_sb00[2][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_sb00[3][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_sb00[3][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=self.pt_res_sb00[30][7],
own_amounts=self.pt_res_sb00[30][0:7],
available_cash=self.pt_res_sb00[30][7],
available_amounts=self.pt_res_sb00[30][0:7],
op=self.pt_signals[31],
prices=self.prices[31],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 32 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.pt_res_sb00[30][7] + c_g + c_s
amounts = self.pt_res_sb00[30][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_sb00[31][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_sb00[31][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=self.pt_res_sb00[59][7] + 10000,
own_amounts=self.pt_res_sb00[59][0:7],
available_cash=self.pt_res_sb00[59][7] + 10000,
available_amounts=self.pt_res_sb00[59][0:7],
op=self.pt_signals[60],
prices=self.prices[60],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 61 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.pt_res_sb00[59][7] + c_g + c_s + 10000
amounts = self.pt_res_sb00[59][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_sb00[60][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_sb00[60][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.pt_signals[61],
prices=self.prices[61],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 62 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_sb00[61][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_sb00[61][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=self.pt_res_sb00[95][7],
own_amounts=self.pt_res_sb00[95][0:7],
available_cash=self.pt_res_sb00[95][7],
available_amounts=self.pt_res_sb00[95][0:7],
op=self.pt_signals[96],
prices=self.prices[96],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 97 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.pt_res_sb00[96][7] + c_g + c_s
amounts = self.pt_res_sb00[96][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_sb00[96][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_sb00[96][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.pt_signals[97],
prices=self.prices[97],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 98 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_sb00[97][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_sb00[97][0:7]))
def test_loop_step_pt_bs00(self):
""" test loop step PT-signal, buy first"""
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=10000,
own_amounts=np.zeros(7, dtype='float'),
available_cash=10000,
available_amounts=np.zeros(7, dtype='float'),
op=self.pt_signals[0],
prices=self.prices[0],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 1 result in complete looping: \n'
f'cash_change: +{c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = 10000 + c_g + c_s
amounts = np.zeros(7, dtype='float') + a_p + a_s
self.assertAlmostEqual(cash, 7500)
self.assertTrue(np.allclose(amounts, np.array([0, 0, 0, 0, 555.5555556, 0, 0])))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=self.pt_res_bs00[2][7],
own_amounts=self.pt_res_bs00[2][0:7],
available_cash=self.pt_res_bs00[2][7],
available_amounts=self.pt_res_bs00[2][0:7],
op=self.pt_signals[3],
prices=self.prices[3],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 4 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.pt_res_bs00[2][7] + c_g + c_s
amounts = self.pt_res_bs00[2][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_bs00[3][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_bs00[3][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=self.pt_res_bs00[30][7],
own_amounts=self.pt_res_bs00[30][0:7],
available_cash=self.pt_res_bs00[30][7],
available_amounts=self.pt_res_bs00[30][0:7],
op=self.pt_signals[31],
prices=self.prices[31],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 32 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.pt_res_bs00[30][7] + c_g + c_s
amounts = self.pt_res_bs00[30][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_bs00[31][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_bs00[31][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=self.pt_res_bs00[59][7] + 10000,
own_amounts=self.pt_res_bs00[59][0:7],
available_cash=self.pt_res_bs00[59][7] + 10000,
available_amounts=self.pt_res_bs00[59][0:7],
op=self.pt_signals[60],
prices=self.prices[60],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 61 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.pt_res_bs00[59][7] + c_g + c_s + 10000
amounts = self.pt_res_bs00[59][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_bs00[60][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_bs00[60][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.pt_signals[61],
prices=self.prices[61],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 62 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_bs00[61][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_bs00[61][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=self.pt_res_bs00[95][7],
own_amounts=self.pt_res_bs00[95][0:7],
available_cash=self.pt_res_bs00[95][7],
available_amounts=self.pt_res_bs00[95][0:7],
op=self.pt_signals[96],
prices=self.prices[96],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 97 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.pt_res_bs00[96][7] + c_g + c_s
amounts = self.pt_res_bs00[96][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_bs00[96][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_bs00[96][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.pt_signals[97],
prices=self.prices[97],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 98 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_bs00[97][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_bs00[97][0:7]))
def test_loop_step_ps_sb00(self):
""" test loop step PS-signal, sell first"""
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=10000,
own_amounts=np.zeros(7, dtype='float'),
available_cash=10000,
available_amounts=np.zeros(7, dtype='float'),
op=self.ps_signals[0],
prices=self.prices[0],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 1 result in complete looping: \n'
f'cash_change: +{c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = 10000 + c_g + c_s
amounts = np.zeros(7, dtype='float') + | |
return Ravel(Add([self.func, other.func]), self.axis)
return Ravel(Add([self.func, Unravel(other, self.axis, self.func.shape[self.axis:self.axis+2])]), self.axis)
def _get(self, i, item):
if i != self.axis:
return Ravel(Get(self.func, i+(i>self.axis), item), self.axis-(i<self.axis))
if item.isconstant and numeric.isint(self.func.shape[self.axis+1]):
item, = item.eval()
i, j = divmod(item, self.func.shape[self.axis+1])
return Get(Get(self.func, self.axis, i), self.axis, j)
def _sum(self, axis):
if axis == self.axis:
return Sum(Sum(self.func, axis), axis)
return Ravel(Sum(self.func, axis+(axis>self.axis)), self.axis-(axis<self.axis))
def _derivative(self, var, seen):
return ravel(derivative(self.func, var, seen), axis=self.axis)
def _transpose(self, axes):
ravelaxis = axes.index(self.axis)
funcaxes = [ax+(ax>self.axis) for ax in axes]
funcaxes = funcaxes[:ravelaxis+1] + [self.axis+1] + funcaxes[ravelaxis+1:]
return Ravel(Transpose(self.func, funcaxes), ravelaxis)
def _takediag(self, axis, rmaxis):
if not {self.axis, self.axis+1} & {axis, rmaxis}:
return Ravel(TakeDiag(self.func, axis+(axis>self.axis), rmaxis+(rmaxis>self.axis)), self.axis-(self.axis>rmaxis))
def _diagonalize(self, axis, newaxis):
if axis != self.axis:
return Ravel(Diagonalize(self.func, axis+(axis>self.axis), newaxis+(newaxis>self.axis)), self.axis+(self.axis>=newaxis))
def _take(self, index, axis):
if axis not in (self.axis, self.axis+1):
return Ravel(Take(self.func, index, axis+(axis>self.axis)), self.axis)
def _unravel(self, axis, shape):
if axis != self.axis:
return Ravel(Unravel(self.func, axis+(axis>self.axis), shape), self.axis+(self.axis>axis))
elif shape == self.func.shape[axis:axis+2]:
return self.func
def _insertaxis(self, axis, length):
return Ravel(InsertAxis(self.func, axis+(axis>self.axis), length), self.axis+(axis<=self.axis))
def _mask(self, maskvec, axis):
if axis != self.axis:
return Ravel(Mask(self.func, maskvec, axis+(axis>self.axis)), self.axis)
@property
def blocks(self):
for ind, f in self.func.blocks:
newind = ravel(ind[self.axis][:,_] * self.func.shape[self.axis+1] + ind[self.axis+1][_,:], axis=0)
yield (ind[:self.axis] + (newind,) + ind[self.axis+2:]), ravel(f, axis=self.axis)
class Unravel(Array):
__slots__ = 'func', 'axis', 'unravelshape'
__cache__ = 'simplified',
@types.apply_annotations
def __init__(self, func:asarray, axis:types.strictint, shape:asshape):
assert 0 <= axis < func.ndim
assert func.shape[axis] == numpy.product(shape)
assert len(shape) == 2
self.func = func
self.axis = axis
self.unravelshape = shape
super().__init__(args=[func]+[asarray(sh) for sh in shape], shape=func.shape[:axis]+shape+func.shape[axis+1:], dtype=func.dtype)
@property
def simplified(self):
func = self.func.simplified
if self.shape[self.axis] == 1:
return InsertAxis(func, self.axis, 1).simplified
if self.shape[self.axis+1] == 1:
return InsertAxis(func, self.axis+1, 1).simplified
retval = func._unravel(self.axis, self.unravelshape)
if retval is not None:
assert retval.shape == self.shape
return retval.simplified
return Unravel(func, self.axis, self.unravelshape)
def _derivative(self, var, seen):
return unravel(derivative(self.func, var, seen), axis=self.axis, shape=self.unravelshape)
def evalf(self, f, sh1, sh2):
sh1, = sh1
sh2, = sh2
return f.reshape(f.shape[:self.axis+1]+(sh1, sh2)+f.shape[self.axis+2:])
def _ravel(self, axis):
if axis == self.axis:
return self.func
class Mask(Array):
__slots__ = 'func', 'axis', 'mask'
__cache__ = 'simplified',
@types.apply_annotations
def __init__(self, func:asarray, mask:types.frozenarray, axis:types.strictint):
assert len(mask) == func.shape[axis]
self.func = func
self.axis = axis
self.mask = mask
super().__init__(args=[func], shape=func.shape[:axis]+(mask.sum(),)+func.shape[axis+1:], dtype=func.dtype)
@property
def simplified(self):
func = self.func.simplified
if self.mask.all():
return func
if not self.mask.any():
return zeros_like(self)
retval = func._mask(self.mask, self.axis)
if retval is not None:
assert retval.shape == self.shape
return retval.simplified
return Mask(func, self.mask, self.axis)
def evalf(self, func):
return func[(slice(None),)*(self.axis+1)+(numpy.asarray(self.mask),)]
def _derivative(self, var, seen):
return mask(derivative(self.func, var, seen), self.mask, self.axis)
def _get(self, i, item):
if i != self.axis:
return Mask(Get(self.func, i, item), self.mask, self.axis-(i<self.axis))
if item.isconstant:
item, = item.eval()
where, = self.mask.nonzero()
return Get(self.func, i, where[item])
def _sum(self, axis):
if axis != self.axis:
return Mask(sum(self.func, axis), self.mask, self.axis-(axis<self.axis))
if self.shape[axis] == 1:
(item,), = self.mask.nonzero()
return Get(self.func, axis, item)
def _take(self, index, axis):
if axis != self.axis:
return Mask(Take(self.func, index, axis), self.mask, self.axis)
def _product(self):
if self.axis != self.ndim-1:
return Mask(Product(self.func), self.mask, self.axis)
def _mask(self, maskvec, axis):
if axis == self.axis:
newmask = numpy.zeros(len(self.mask), dtype=bool)
newmask[numpy.asarray(self.mask)] = maskvec
assert maskvec.sum() == newmask.sum()
return Mask(self.func, newmask, self.axis)
def _takediag(self, axis, rmaxis):
if self.axis not in (axis, rmaxis):
return Mask(TakeDiag(self.func, axis, rmaxis), self.mask, self.axis-(rmaxis<self.axis))
class FindTransform(Array):
__slots__ = 'transforms', 'bits'
@types.apply_annotations
def __init__(self, transforms:tuple, trans:types.strict[TransformChain]):
self.transforms = transforms
bits = []
bit = 1
while bit <= len(transforms):
bits.append(bit)
bit <<= 1
self.bits = numpy.array(bits[::-1])
super().__init__(args=[trans], shape=(), dtype=int)
def asdict(self, values):
assert len(self.transforms) == len(values)
return dict(zip(self.transforms, values))
def evalf(self, trans):
n = len(self.transforms)
index = 0
for bit in self.bits:
i = index|bit
if i <= n and trans >= self.transforms[i-1]:
index = i
index -= 1
if index < 0 or trans[:len(self.transforms[index])] != self.transforms[index]:
raise IndexError('trans not found')
return numpy.array(index)[_]
class Range(Array):
__slots__ = 'length', 'offset'
@types.apply_annotations
def __init__(self, length:asarray, offset:asarray=Zeros((), int)):
assert length.ndim == 0 and length.dtype == int
assert offset.ndim == 0 and offset.dtype == int
self.length = length
self.offset = offset
super().__init__(args=[length, offset], shape=[length], dtype=int)
def _take(self, index, axis):
return add(index, self.offset)
def evalf(self, length, offset):
length, = length
offset, = offset
return numpy.arange(offset, offset+length)[_]
class Polyval(Array):
'''
Computes the :math:`k`-dimensional array
.. math:: j_0,\\dots,j_{k-1} \\mapsto \\sum_{\substack{i_0,\\dots,i_{n-1}\\in\mathbb{N}\\\\i_0+\\cdots+i_{n-1}\\le d}} p_0^{i_0} \\cdots p_{n-1}^{i_{n-1}} c_{j_0,\\dots,j_{k-1},i_0,\\dots,i_{n-1}},
where :math:`p` are the :math:`n`-dimensional local coordinates and :math:`c`
is the argument ``coeffs`` and :math:`d` is the degree of the polynomial,
where :math:`d` is the length of the last :math:`n` axes of ``coeffs``.
.. warning::
All coefficients with a (combined) degree larger than :math:`d` should be
zero. Failing to do so won't raise an :class:`Exception`, but might give
incorrect results.
'''
__slots__ = 'points_ndim', 'coeffs', 'points', 'ngrad'
__cache__ = 'simplified',
@types.apply_annotations
def __init__(self, coeffs:asarray, points:asarray, ngrad:types.strictint=0):
if points.ndim != 1:
raise ValueError('argument `points` should have exactly one dimension')
if not numeric.isint(points.shape[0]):
raise ValueError('the shape of argument `points` should have be known, i.e. an `int`')
self.points_ndim = points.shape[0]
ndim = coeffs.ndim - self.points_ndim
if coeffs.ndim < ndim:
raise ValueError('argument `coeffs` should have at least one axis per spatial dimension')
self.coeffs = coeffs
self.points = points
self.ngrad = ngrad
super().__init__(args=[CACHE, points, coeffs], shape=coeffs.shape[:ndim]+(self.points_ndim,)*ngrad, dtype=float)
def evalf(self, cache, points, coeffs):
assert points.shape[1] == self.points_ndim
points = types.frozenarray(points)
coeffs = types.frozenarray(coeffs)
for igrad in range(self.ngrad):
coeffs = cache[numeric.poly_grad](coeffs, self.points_ndim)
return cache[numeric.poly_eval](coeffs, points)
def _derivative(self, var, seen):
# Derivative to argument `points`.
dpoints = Dot(_numpy_align(Polyval(self.coeffs, self.points, self.ngrad+1)[(...,*(_,)*var.ndim)], derivative(self.points, var, seen)), [self.ndim])
# Derivative to argument `coeffs`. `trans` shuffles the coefficient axes
# of `derivative(self.coeffs)` after the derivative axes.
shuffle = lambda a, b, c: (*range(0,a), *range(a+b,a+b+c), *range(a,a+b))
pretrans = shuffle(self.coeffs.ndim-self.points_ndim, self.points_ndim, var.ndim)
posttrans = shuffle(self.coeffs.ndim-self.points_ndim, var.ndim, self.ngrad)
dcoeffs = Transpose(Polyval(Transpose(derivative(self.coeffs, var, seen), pretrans), self.points, self.ngrad), posttrans)
return dpoints + dcoeffs
def _take(self, index, axis):
if axis < self.coeffs.ndim - self.points_ndim:
return Polyval(take(self.coeffs, index, axis), self.points, self.ngrad)
def _const_helper(self, *j):
if len(j) == self.ngrad:
coeffs = self.coeffs
for i in reversed(range(self.points_ndim)):
p = builtins.sum(k==i for k in j)
coeffs = math.factorial(p)*Get(coeffs, axis=i+self.coeffs.ndim-self.points_ndim, item=p)
return coeffs
else:
return stack([self._const_helper(*j, k) for k in range(self.points_ndim)], axis=self.coeffs.ndim-self.points_ndim+self.ngrad-len(j)-1)
@property
def simplified(self):
self = self.edit(lambda arg: arg.simplified if isevaluable(arg) else arg)
degree = 0 if self.points_ndim == 0 else self.coeffs.shape[-1]-1 if isinstance(self.coeffs.shape[-1], int) else float('inf')
if iszero(self.coeffs) or self.ngrad > degree:
return zeros_like(self)
elif self.ngrad == degree:
return self._const_helper().simplified
else:
return self
class RevolutionAngle(Array):
'''
Pseudo coordinates of a :class:`nutils.topology.RevolutionTopology`.
'''
__slots__ = ()
__cache__ = 'prepare_eval'
def __init__(self):
super().__init__(args=[], shape=[], dtype=float)
@property
def isconstant(self):
return False
def evalf(self):
raise Exception('RevolutionAngle should not be evaluated')
def _derivative(self, var, seen):
return (ones_like if isinstance(var, LocalCoords) and len(var) > 0 else zeros_like)(var)
@util.positional_only('self')
def prepare_eval(*args, **kwargs):
self, = args
return zeros_like(self)
# AUXILIARY FUNCTIONS (FOR INTERNAL USE)
_ascending = lambda arg: numpy.greater(numpy.diff(arg), 0).all()
_normdims = lambda ndim, shapes: tuple(numeric.normdim(ndim,sh) for sh in shapes)
def _jointdtype(*dtypes):
'determine joint dtype'
type_order = bool, int, float
kind_order = 'bif'
itype = builtins.max(kind_order.index(dtype.kind) if isinstance(dtype,numpy.dtype)
else type_order.index(dtype) for dtype in dtypes)
return type_order[itype]
def _matchndim(*arrays):
'introduce singleton dimensions to match ndims'
arrays = [asarray(array) for array in arrays]
ndim = builtins.max(array.ndim for array in arrays)
return tuple(array[(_,)*(ndim-array.ndim)] for array in arrays)
def _invtrans(trans):
trans = numpy.asarray(trans)
assert trans.dtype == int
invtrans = numpy.empty(len(trans), dtype=int)
invtrans[trans] = numpy.arange(len(trans))
return tuple(invtrans)
def _norm_and_sort(ndim, args):
'norm axes, sort, and assert unique'
normargs = tuple(sorted(numeric.normdim(ndim, arg) for arg in args))
assert _ascending(normargs) # strict
return normargs
def _concatblocks(items):
gathered = util.gather(items)
order = [ind for ind12, ind_f in gathered for ind, f in ind_f]
blocks = []
for (ind1, ind2), ind_f in gathered:
if len(ind_f) == 1:
ind, f = ind_f[0]
else:
inds, fs = zip(*sorted(ind_f, key=lambda item: order.index(item[0])))
ind = Concatenate(inds, axis=0)
f = Concatenate(fs, axis=len(ind1))
blocks.append(((ind1+(ind,)+ind2), f))
return tuple(blocks)
def _numpy_align(*arrays):
'''reshape arrays according to Numpy's broadcast conventions'''
arrays = [asarray(array) for array in arrays]
if len(arrays) > 1:
ndim = builtins.max([array.ndim for array in arrays])
for idim in range(ndim):
lengths = [array.shape[idim] for array in arrays if array.ndim == ndim and array.shape[idim] != 1]
length = lengths[0] if lengths else 1
assert all(l == length for l in lengths), | |
# Copyright 2019 T-Mobile US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" A single application instance which is hosted on a garden container which is on a diego cell.
"""
from itertools import chain
from logzero import logger
import monarch.pcf.util
import monarch.util as util
from monarch.pcf import TIMES_TO_REMOVE
from monarch.pcf.config import Config
class AppInstance(dict):
"""
A single instance of an application. Contains information about where it is hosted and the ports it is bound to.
diego_id: str; ID string of the Diego Cell which hosts this app instance.
diego_ip: str; IP of the Diego Cell which hosts this app instance.
diego_vi: str; Name of the virtual network interface on the diego cell for this application instance.
cont_id: str; GUID of the Garden Container which hosts this app instance.
cont_ip: str; IP of the Garden Container which hosts this app instance.
app_ports: set[(int, int)]; Ports the application instance is bound to (DiegoPort, ContainerPort).
"""
def serialize(self):
"""
Convert this application instance into a serializable dictionary.
:return: Serializable dictionary representation of the app instance.
"""
obj = self.copy()
obj['app_ports'] = [p for p in self['app_ports']]
return obj
def run_cmd_on_diego_cell(self, cmd, **kwargs):
"""
Run a command in the shell on the hosting diego cell.
:param cmd: Union[str, List[str]]; Command(s) to run on the Diego Cell.
:param kwargs: Additional arguments to pass to run_cmd_on_diego_cell.
:return: int, str, str; Returncode, stdout, stderr.
"""
return monarch.pcf.util.run_cmd_on_diego_cell(self['diego_id'], cmd, **kwargs)
def run_cmd_on_container(self, cmd, **kwargs):
"""
Run a command in the shell on the hosting diego cell.
:param cmd: Union[str, List[str]]; Command(s) to run on the container.
:param kwargs: Additional arguments to pass to run_cmd_on_container.
:return: int, str, str; Returncode, stdout, stderr.
"""
return monarch.pcf.util.run_cmd_on_container(self['diego_id'], self['cont_id'], cmd, **kwargs)
def crash(self):
"""
Crash this application instance.
:return: int; A returncode if the operation failed.
"""
logger.info('Crashing app instance at %s with container %s:%s.',
self['diego_id'], self['cont_ip'], self['cont_id'])
rcode, _, _ = self.run_cmd_on_container('pkill -SIGSEGV java')
if rcode:
logger.error("Failed to crash application container %s:%s.",
self['cont_id'], self['cont_ip'])
return rcode
return 0
def block(self, direction='ingress', ports='env'):
"""
Block access to this application instance on all its known hosts.
:param direction: str; Traffic direction to block.
:param ports: Union[str, set[int]]; Which ports to block, either 'env', 'all', or a custom list/set. If 'env', it
will read from the environment to determine what port to block, this is the default and will work for most apps.
Use 'all' if you want to block all traffic to and or from the application. Specify a custom list to only block
certain ports; IF A CUSTOM LIST IS SPECIFIED, it must also be passed to unblocking.
:return: Union[int, List[str]]; A returncode if any of the bosh ssh instances do not return 0 and a list of
commands that would have been run if `get_cmds` is True.
"""
direction = util.parse_direction(direction)
assert direction, "Could not parse direction!"
cmds = []
if ports == 'all':
logger.info("Targeting %s on %s", self['diego_id'], self['cont_ip'])
if direction in {'ingress', 'both'}:
cmds.append('sudo iptables -I FORWARD 1 -d {} -p tcp -j DROP'.format(self['cont_ip']))
if direction in {'egress', 'both'}:
cmds.append('sudo iptables -I FORWARD 1 -s {} -p tcp -j DROP'.format(self['cont_ip']))
if not cmds:
return 0 # noop
else:
if ports == 'env':
ports = map(lambda v: v[1], filter(self._app_port_not_whitelisted, self['app_ports']))
else:
assert isinstance(ports, set) or isinstance(ports, list), 'Ports argument is invalid'
for cport in ports:
logger.info("Targeting %s on %s:%d", self['diego_id'], self['cont_ip'], cport)
if direction in {'ingress', 'both'}:
cmds.append('sudo iptables -I FORWARD 1 -d {} -p tcp --dport {} -j DROP'
.format(self['cont_ip'], cport))
if direction in {'egress', 'both'}:
cmds.append('sudo iptables -I FORWARD 1 -s {} -p tcp --sport {} -j DROP'
.format(self['cont_ip'], cport))
if not cmds:
return 0 # noop
rcode, _, _ = self.run_cmd_on_diego_cell(cmds)
if rcode:
logger.error("Received return code %d from iptables call.", rcode)
return rcode
return 0
def unblock(self, ports=None):
"""
Unblock access to this application instance on all its known hosts. This will actually run the unblock commands
multiple times, as defined by `TIMES_TO_REMOVE` to prevent issues if an application was blocked multiple times.
:param ports: set[int]; List of custom ports to unblock.
"""
cmds = []
logger.info("Unblocking %s on %s", self['diego_id'], self['cont_ip'])
cmd = 'sudo iptables -D FORWARD -d {} -p tcp -j DROP'.format(self['cont_ip'])
for _ in range(TIMES_TO_REMOVE):
cmds.append(cmd)
cmd = 'sudo iptables -D FORWARD -s {} -p tcp -j DROP'.format(self['cont_ip'])
for _ in range(TIMES_TO_REMOVE):
cmds.append(cmd)
ports = chain(
map(
lambda v: v[1],
filter(self._app_port_not_whitelisted, self['app_ports'])
), (ports or [])
)
for cport in ports:
logger.info("Unblocking %s on %s:%d", self['diego_id'], self['cont_ip'], cport)
cmd = 'sudo iptables -D FORWARD -d {} -p tcp --dport {} -j DROP'.format(self['cont_ip'], cport)
for _ in range(TIMES_TO_REMOVE):
cmds.append(cmd)
cmd = 'sudo iptables -D FORWARD -s {} -p tcp --sport {} -j DROP'.format(self['cont_ip'], cport)
for _ in range(TIMES_TO_REMOVE):
cmds.append(cmd)
self.run_cmd_on_diego_cell(cmds, suppress_output=True)
def manipulate_network(self, *, latency=None, latency_sd=None, loss=None, loss_r=None,
duplication=None, corruption=None, rate=None, direction='egress'):
"""
Manipulate the network traffic from the application instance and its services. This will not work simultaneously
with network shaping, but the network shaping behavior can also be achieved via the rate parameter of this
method.
:param latency: int; Latency to introduce in milliseconds.
:param latency_sd: int; Standard deviation of the latency in milliseconds, if None, there will be no variance.
With relatively large variance values, packet reordering will occur.
:param loss: float; Percent in the range [0, 1] of packets which should be dropped/lost.
:param loss_r: float; Correlation coefficient in the range [0, 1] of the packet loss.
:param duplication: float; Percent in the range [0, 1] of packets which should be duplicated.
:param corruption: float; Percent in the range [0, 1] of packets which should be corrupted.
:param direction: str; Traffic direction to manipulate.
:param rate: Throughput rate limiting in kbps. See `rate` in https://man7.org/linux/man-pages/man8/tc-netem.8.html
:return: int; A returncode if any of the bosh ssh instances do not return 0.
"""
if not (latency or loss or duplication or corruption or rate):
# if no actions are specified, it is a noop
return 0
direction = util.parse_direction(direction)
assert direction, "Could not parse direction!"
setup_cmds = []
netem_cmds = []
iface = self['diego_vi']
# For notes regarding applying netem to ingress traffic see:
# https://wiki.linuxfoundation.org/networking/netem#how_can_i_use_netem_on_incoming_traffic3f
if direction in {'ingress', 'both'}:
# NOTE: ifb module will be left as loaded. this seems harmless enough and is simpler than trying to
# determine if we are the ones who loaded it. likewise with the ifb0 ip link being left in the up state
# N.B.: if changes are made to the filter command for some reason, then corresponding changes may be
# needed in the `unmanipulate_network` method since the del command used their is quite specific.
setup_cmds.extend([
'sudo modprobe ifb',
'sudo ip link set dev ifb0 up',
f'sudo tc qdisc add dev {iface} ingress',
f'sudo tc filter add dev {iface} parent ffff: protocol ip u32 match u32 0 0 flowid 1:1 action mirred egress redirect dev ifb0'
])
netem_cmds.append(['sudo', 'tc', 'qdisc', 'add', 'dev', 'ifb0', 'root', 'netem'])
if direction in {'egress', 'both'}:
netem_cmds.append(['sudo', 'tc', 'qdisc', 'add', 'dev', iface, 'root', 'netem'])
for netem_cmd in netem_cmds:
if latency:
assert latency > 0
netem_cmd.extend(['delay', '{}ms'.format(latency)])
if latency_sd:
assert latency_sd > 0
netem_cmd.extend(['{}ms'.format(latency_sd), 'distribution', 'normal'])
if loss:
assert 0 <= loss <= 1
netem_cmd.extend(['loss', '{}%'.format(loss * 100)])
if loss_r:
assert 0 <= loss_r <= 1
netem_cmd.append('{}%'.format(loss_r * 100))
if duplication:
assert 0 <= duplication <= 1
netem_cmd.extend(['duplicate', '{}%'.format(duplication * 100)])
if corruption:
assert 0 <= corruption <= 1
netem_cmd.extend(['corrupt', '{}%'.format(corruption * 100)])
if rate:
assert | |
isinstance(val, CpoExpr):
return val
# Check atoms (not cached)
vtyp = type(val)
ctyp = _PYTHON_TO_CPO_TYPE.get(vtyp)
if ctyp:
return CpoValue(val, ctyp)
# Check numpy scalars (special case when called from overloaded operator)
#if vtyp is NUMPY_NDARRAY and not val.shape:
if vtyp is NUMPY_NDARRAY:
return CpoValue(val, _PYTHON_TO_CPO_TYPE.get(val.dtype.type).parent_array_type)
# Create an array
return build_cpo_expr_array(val)
def build_cpo_expr_array(val):
""" Builds a mode larray expression from a Python value.
If active, this method uses the value cache to return the same CpoExpr for the same value.
Args:
val: Value to convert. Iterator or iterators of integers.
Returns:
Model array expression, not editable.
Raises:
Exception if conversion is not possible.
"""
def normalize_value(val):
assert not is_string(val), "Impossible to build an array expression from a string"
assert not isinstance(val, dict), "Impossible to build an array expression from a dictionary. Select values() or keys()."
try:
return _CacheKeyTuple(val)
except TypeError:
raise CpoException("Impossible to build an array expression from value '{}' of type '{}'".format(to_string(val), type(val)))
# Check if already a CPO expression
if isinstance(val, CpoExpr):
return val
# Check if already in the cache
if _CACHE_ACTIVE:
cpval = _CPO_VALUES_FROM_PYTHON.get_or_create('array', val, normalize_value, _create_cpo_array_expr)
else:
cpval = _create_cpo_array_expr(normalize_value(val))
return cpval
def build_cpo_tupleset(val):
""" Builds a TupleSet model expression from a Python value.
If active, this method uses the value cache to return the same CpoExpr for the same value.
Args:
val: Value to convert. Iterator or iterators of integers, or existing TupleSet expression.
Returns:
Model tupleset, not editable.
Raises:
Exception if conversion is not possible.
"""
def normalize_value(val):
assert not is_string(val), "Impossible to build a tuple set expression from a string"
assert not isinstance(val, dict), "Impossible to build a tuple set expression from a dictionary. Select values() or keys()."
try:
return tuple(tuple(x) for x in val)
except TypeError:
raise CpoException("Impossible to build a tuple set from value '{}' of type '{}'".format(to_string(val), type(val)))
# Check if already a TupleSet expression
if isinstance(val, CpoExpr) and val.is_type(Type_TupleSet):
return val
# Check if already in the cache
if _CACHE_ACTIVE:
cpval = _CPO_VALUES_FROM_PYTHON.get_or_create('tupleset', val, normalize_value, _create_cpo_tuple_set)
else:
cpval = _create_cpo_tuple_set(normalize_value(val))
return cpval
def build_cpo_transition_matrix(val):
""" Builds a TransitionMatrix model expression from a Python value.
If active, this method uses the value cache to return the same CpoExpr for the same value.
Args:
val: Value to convert. Iterator or iterators of integers, or existing TransitionMatrix expression.
Returns:
Model transition matrix, not editable.
Raises:
Exception if conversion is not possible.
"""
def normalize_value(val):
assert not is_string(val), "Impossible to build a transition matrix expression from a string"
assert not isinstance(val, dict), "Impossible to build a transition matrix expression from a dictionary. Select values() or keys()."
try:
return tuple(tuple(x) for x in val)
except TypeError:
raise CpoException("Impossible to build a transition matrix from value '{}' of type '{}'".format(to_string(val), type(val)))
# Check if already a TransitionMatrix expression
if isinstance(val, CpoExpr) and val.is_type(Type_TransitionMatrix):
return val
# Check if already in the cache
if _CACHE_ACTIVE:
cpval = _CPO_VALUES_FROM_PYTHON.get_or_create('matrix', val, normalize_value, _create_cpo_transition_matrix)
else:
cpval = _create_cpo_transition_matrix(normalize_value(val))
return cpval
def compare_expressions(x1, x2):
""" Compare two expressions for declaration order
Args:
x1: Expression 1
x2: Expression 2
Returns:
Integer value that is negative if v1 < v2, zero if v1 == v2 and positive if v1 > v2.
"""
# First sort by expression type
if x1.type is not x2.type:
return x1.type.id - x2.type.id
# Check object type
tx1 = type(x1)
tx2 = type(x2)
if tx1 is not tx2:
# Alias always loss
if tx1 is CpoAlias:
return 1
if tx2 is CpoAlias:
return -1
# Compare by name in natural order
return compare_natural(x1.get_name(), x2.get_name())
###############################################################################
## Private utility functions
###############################################################################
def _create_cpo_array_expr(val):
""" Create a new CP array expression from a given array Python value
Args:
val: Origin value, as a tuple or _CacheKeyTuple
Returns:
New expression
"""
# Determine type
typ = _get_cpo_array_type(val)
if typ is None:
raise CpoException("Impossible to build a CP Optimizer expression with value '{}'".format(val))
# Convert array elements if required
if typ.is_array_of_expr:
# Array of expressions
val = tuple(build_cpo_expr(v) for v in val)
elif typ is Type_IntArray:
# Convert all elements as simple integers
val = tuple(x.value if isinstance(x, CpoValue) else x for x in val)
# Return expression
return CpoValue(val, typ)
def _create_cpo_tuple_set(tset):
""" Create a new CP tuple set expression from a given Python tuple of tuples
Args:
tset: Origin value, as a tuple of tuples
Returns:
New expression
"""
# Verify new tuple set
if tset:
#assert len(tset) > 0, "Tuple set should not be empty"
size = len(tset[0])
assert all(len(t) == size for t in tset), "All tuples in 'tset' should have the same length"
assert all(all(is_int(v) for v in r) for r in tset), "All tupleset values should be integer"
# Create result expression
return CpoValue(tset, Type_TupleSet)
def _create_cpo_transition_matrix(trmx):
""" Create a new CP transition matrix expression from a given Python tuple of tuples
Args:
tset: Origin value, as a tuple of tuples
Returns:
New expression
"""
# Verify value
size = len(trmx)
assert size > 0, "Transition matrix should not be empty"
assert all(len(t) == size for t in trmx), "All matrix lines should have the same length " + str(size)
assert all(all(is_int(v) and v >= 0 for v in r) for r in trmx), "All matrix values should be positive integer"
# Create result expression
return CpoTransitionMatrix(values=trmx)
def _get_cpo_array_type(val):
""" Determine the CPO type of a given Python array value
Args:
val: Python value (list or tuple)
Returns:
Corresponding CPO Type, None if none
"""
# Check empty Array
if not val:
return Type_IntArray
# Get the most common type for all array elements
cet = None
for v in val:
nt = _get_cpo_type(v)
if nt is None:
return None
# Combine with global type
cet = nt if cet is None else cet.get_common_type(nt)
if cet is None:
return None
# Determine special array cases if all intervals
if cet is Type_IntInterval:
return Type_IntArray
if cet is Type_IntArray:
return Type_TupleSet
return cet.parent_array_type
def _get_cpo_type(val):
""" Determine the CPO type for a given Python value
Args:
val: Python value
Returns:
Corresponding CPO Type, None if none
"""
# Check simple types
ctyp = _PYTHON_TO_CPO_TYPE.get(type(val))
if ctyp:
return ctyp
# Check CPO Expr
if isinstance(val, CpoExpr):
return val.type
# Check numpy Array Scalars (special case when called from overloaded operator)
if type(val) is NUMPY_NDARRAY and not val.shape:
return _PYTHON_TO_CPO_TYPE.get(val.dtype.type)
# Check array
if isinstance(val, (tuple, list)):
if _is_cpo_int_interval(val):
return Type_IntInterval
return _get_cpo_array_type(val)
return None
def _clear_value_cache():
""" Clear the cache of CPO values
"""
_CPO_VALUES_FROM_PYTHON.clear()
def _get_cpo_type_str(val):
""" Get the CPO type name of a value
Args:
val: Value
Returns:
Value type string in CPO types
"""
return _get_cpo_type(val).get_name()
def _create_operation(oper, params):
""" Create a new expression that matches an operation descriptor
Search in the signatures which one matches a set or arguments
and then create an instance of the returned expression
Args:
oper: Operation descriptor
params: List of expression parameters
Returns:
New expression
Raises:
CpoException if no operation signature matches arguments
"""
# assert isinstance(oper, CpoOperation)
# Convert arguments in CPO expressions
args = tuple(map(build_cpo_expr, params))
# Search corresponding signature
s = _get_matching_signature(oper, args)
if s is None:
raise CpoException("The combination of parameters ({}) is not allowed for operation '{}' ({})"
.format(", ".join(map(_get_cpo_type_str, args)), oper.python_name, oper.cpo_name))
# Create result expression
return CpoFunctionCall(s.operation, s.return_type, args)
###############################################################################
## Private Functions
###############################################################################
# Mapping of Python types to CPO types
_PYTHON_TO_CPO_TYPE = {}
for t in BOOL_TYPES:
_PYTHON_TO_CPO_TYPE[t] = Type_Bool
for t in INTEGER_TYPES:
_PYTHON_TO_CPO_TYPE[t] = Type_Int
for t in FLOAT_TYPES:
_PYTHON_TO_CPO_TYPE[t] = Type_Float
_PYTHON_TO_CPO_TYPE[CpoIntVar] = Type_IntVar
_PYTHON_TO_CPO_TYPE[CpoIntervalVar] = Type_IntervalVar
_PYTHON_TO_CPO_TYPE[CpoSequenceVar] = Type_SequenceVar
_PYTHON_TO_CPO_TYPE[CpoTransitionMatrix] = Type_TransitionMatrix
_PYTHON_TO_CPO_TYPE[CpoStateFunction] = Type_StateFunction
def _is_cpo_int(arg):
""" Check that a value is a valid integer for cpo
Args:
arg: Value to check
Returns:
True if value is a valid CPO integer, False otherwise
"""
return is_int(arg) and (INT_MIN <= arg <= INT_MAX)
def _is_cpo_int_interval(val):
""" Check if a value represents an interval of CPO integers
Args:
val: Value to check
Returns:
True if value is a tuple of 2 values representing an | |
<reponame>gAldeia/itea-python
# Author: <NAME>
# Contact: <EMAIL>
# Version: 1.0.2
# Last modified: 06-25-2021 by <NAME>
"""Base class to represent an IT expression.
"""
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.utils.validation import check_array
class BaseITExpr(BaseEstimator):
"""This class describes the structure that an ``ITExpr`` should have, and
implements only the methods that have similar behavior for classification
and regression.
The ITEA implementations for classification and regression will create
a population of ``ITExpr`` instances and evolve this population to find a
final best solution ``itea.bestsol_``.
The best solution will be a scikit estimator and can be used in many scikit
methods. It can also be used to create ICE and PDP plots, which are
particularly interesting to complement explanations given by the
``ITExpr_explainer``.
Methods that should be specialized are created as virtual methods.
In practice, this class should never be instantiated.
"""
def __init__(self, *, expr, tfuncs, labels=[], **kwargs):
r"""Constructor method.
Parameters
----------
expr : list of Tuple[Transformation, Interaction]
list of IT terms to create an IT expression. It **must** be a
python built-in list.
An IT term is the tuple :math:`(t, p)`, where
:math:`t : \mathbb{R} \rightarrow \mathbb{R}` is a unary function
called **transformation** function, and :math:`p \in \mathbb{R}^d`
is a vector of size :math:`d`, where :math:`d` is the number of
variables of the problem. The tuple contains the information to
create an expression:
:math:`ITterm(x) = t \circ p(x),`
and :math:`p` is the **interaction** of the variables:
:math:`p(x) = \prod_{i=1}^{d} x_{i}^{k_{i}}.`
Each IT term is a tuple containing the name of the transformation
function and a list of exponents to be used in the interaction
function.
The whole expression is evaluated as
:math:`f(x) = \sum_{i=1}^{n} w_i \cdot t_i \circ p_i(x),`
with :math:`w_i` being a coefficient to be adjusted with the
``fit()`` method, and :math:`n` the number of terms.
tfuncs : dict
should always be a dict where the
keys are the names of the transformation functions and
the values are unary vectorized functions (for example,
numpy functions). For user-defined functions, see
numpy.vectorize for more information on how to vectorize
your transformation functions.
labels : list of strings, default=[]
list containing the labels of the variables that will be used.
When the list of labels is empty, the variables are named
:math:`x_0, x_1, \cdots`.
"""
self.expr = expr
self.labels = labels
self.tfuncs = tfuncs
self.n_terms = len(expr)
# attributes that are changed by fit method
self._is_fitted = False
self._fitness = np.inf
def to_str(self, places=3, term_separator=None):
r"""Method to represent the IT expression as a string.
The variable names used are the ones given to ``labels`` in the
constructor.
Some simplifications are made to omit trivial operations:
- if a variable has zero as an exponent, it is omitted (since it will
eval to 1 regardless of the x value);
- if the coefficient (or all coefficients, in the multi-class task)
is zero, the whole term is omitted.
Parameters
----------
places : int, default=3
Number of decimal places to round the coefficients when printing
the expression.
term_separator : string or None, default=None
string that will be used to contatenate each term. Suggestions
are ``['\n', ' + ', ', ']``. If set to None, then the separator
used is ``' + '`` .
"""
if term_separator == None:
term_separator = " + "
# If is not fitted, the method will use placeholder coefs and intercept
coefs = np.ones(self.n_terms)
intercept = np.array(0.0)
if self._is_fitted:
coefs = self.coef_
intercept = self.intercept_
str_terms = []
for w, (fi_str, ti) in zip(coefs.T, self.expr):
if np.all(w == 0):
continue
w_str = f"{w.round(places)}*"
t_str = " * ".join([
f"placeholder_{i}" + (f"^{t}" if t!=1 else "")
for i, t in enumerate(ti) if t!=0
])
str_terms.append(f"{w_str}{fi_str}({t_str})")
expr_str = term_separator.join(str_terms)
if len(self.labels)>0:
for i, l in enumerate(self.labels):
expr_str = expr_str.replace(f"placeholder_{i}", l)
else:
expr_str = expr_str.replace(f"placeholder_", "x")
return expr_str + f"{term_separator}{intercept.round(places)}"
def _eval(self, X):
r"""Method to evaluate each IT term on a given data set.
This makes the mapping from the original variable space to the
IT expression space.
Parameters
----------
X : numpy.array of shape (n_samples, n_features)
data set to be evaluated
Returns
-------
Z : numpy.array of shape (n_samples, n_terms)
the Z matrix will have one column for each IT term in the
expression, where the column ``Z[:, i]`` is the evaluation of
the i-th term for all samples.
This translates to:
:math:`Z_{(:, i)} = t_i \circ p_i(x).`
"""
Z = np.zeros( (len(X), self.n_terms) )
for i, (fi, ti) in enumerate( self.expr ):
Z[:, i] = self.tfuncs[fi]( np.prod(np.power(X, ti), axis=1) )
return Z
def __str__(self):
"""Overload of the ``__str__`` method. Calls ``itexpr.to_string()``
method with the default values for the arguments.
"""
return self.to_str()
def complexity(self):
"""Method to calculate the IT expression size as if it was an expression
tree, like the conventional representation for symbolic regression.
Some simplifications will be made (the same that we do in ``to_str()``),
so the complexity value corresponds to the string returned by the
method.
Returns
-------
complexity : int
the number of nodes that a symbolic tree would have if the
IT expression was converted to it.
"""
coefs = np.ones(self.n_terms)
if hasattr(self, "coef_"):
coefs = self.coef_
tlen = 0
for coef, (fi, ti) in zip(coefs, self.expr):
if np.all(coef == 0):
continue
# coef, multiplication and transformation function
tlen += 3
# exponents != [0, 1] always are a 3 node subtree
tlen += sum([3 for t in ti if t not in [0, 1]])
# when exponent is 1, then x^1 = x and we consider only 1 node
tlen += sum([1 for t in ti if t == 1])
# exponents equals to 0 are discarded, since x^0 = 1
# multiplication between each variable in the interaction
tlen += sum([1 for t in ti if t != 0]) - 1
# sum between terms and the intercept
return tlen + self.n_terms + 1
def gradient(self, X, tfuncs_dx, logit=False):
r"""Method to evaluate the gradient of the IT expression for all
data points in ``X``. The gradients are useful for the
``ITExpr_explainer`` class, which calculates feature importances
and generate plots using the gradient information.
Parameters
----------
X : numpy.array of shape (n_samples, n_features)
points to evaluate the gradients.
tfuncs_dx : dict
dictionary like ``tfuncs`` , where the key is the name of the
function (should have the derivatives of every function in
tfuncs) and the value is a vectorized function
representing its derivative.
logit : boolean, default=False
boolean variable indicating if the IT expression is being used
as a linear model or as a linear method of a logistic regression
predictor. When it is true, then we must consider the derivative
of the logistic regression.
let :math:`it(x)` be the IT expression. It is used in a logit model:
:math:`logit(x) = \frac{1}{1 + e^{-it(x)}}`
The partial derivative needed to calculate the gradient is:
:math:`\frac{\partial}{\partial x_i} logit(x)`
:math:`\Rightarrow \frac{e^{it(x)} it'(x)}{(e^{it(x)} + 1)^2}`
Returns
-------
nabla : numpy.array of shape (n_classes, n_samples, n_features)
returns a 3-dimensional array. For regression and binary
classification, ``n_classes=1``. Each line of the matrix inside
``nabla[i]`` is the gradient evaluated to the corresponding
sample in X.
To ilustrate:
- Gradient of observation i for regression:
``gradients(X, tfuncs_dx)[1, i, :]``
- Gradient of observation i according to coefficients to classify
the class j in multi-class classification:
``gradients(X, tfuncs_dx, logit=True)[j, i, :]``
"""
X = check_array(X)
# the gradients can be calculated even before fit
intercept = [0.0]
if hasattr(self, "intercept_"):
intercept = self.intercept_
coefs = np.ones(self.n_terms)
if hasattr(self, "coef_"):
coefs = self.coef_
# if coefs.ndim==1, then it is a regression ITExpr. Let's make it 2d
if coefs.ndim == 1:
coefs = coefs.reshape(-1, 1).T
# Storing the gradient of each term
term_gradients = []
# i iterates over terms, j over variables
for j in range(X.shape[1]):
# evaluating | |
<filename>src/falconpy/malquery.py
"""Falcon MalQuery API Interface Class
_______ __ _______ __ __ __
| _ .----.-----.--.--.--.--| | _ | |_.----|__| |--.-----.
|. 1___| _| _ | | | | _ | 1___| _| _| | <| -__|
|. |___|__| |_____|________|_____|____ |____|__| |__|__|__|_____|
|: 1 | |: 1 |
|::.. . | CROWDSTRIKE FALCON |::.. . | FalconPy
`-------' `-------'
OAuth2 API - Customer SDK
This is free and unencumbered software released into the public domain.
Anyone is free to copy, modify, publish, use, compile, sell, or
distribute this software, either in source code form or as a compiled
binary, for any purpose, commercial or non-commercial, and by any
means.
In jurisdictions that recognize copyright laws, the author or authors
of this software dedicate any and all copyright interest in the
software to the public domain. We make this dedication for the benefit
of the public at large and to the detriment of our heirs and
successors. We intend this dedication to be an overt act of
relinquishment in perpetuity of all present and future rights to this
software under copyright law.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
For more information, please refer to <https://unlicense.org>
"""
from ._util import process_service_request, force_default, handle_single_argument
from ._payload import malquery_fuzzy_payload, generic_payload_list
from ._payload import malquery_exact_search_payload, malquery_hunt_payload
from ._service_class import ServiceClass
from ._endpoint._malquery import _malquery_endpoints as Endpoints
class MalQuery(ServiceClass):
"""The only requirement to instantiate an instance of this class is one of the following:
- a valid client_id and client_secret provided as keywords.
- a credential dictionary with client_id and client_secret containing valid API credentials
{
"client_id": "CLIENT_ID_HERE",
"client_secret": "CLIENT_SECRET_HERE"
}
- a previously-authenticated instance of the authentication service class (oauth2.py)
- a valid token provided by the authentication service class (OAuth2.token())
"""
def get_quotas(self: object) -> dict:
"""Get information about search and download quotas in your environment
This method does not accept arguments or keywords.
Returns: dict object containing API response.
HTTP Method: GET
Swagger URL
https://assets.falcon.crowdstrike.com/support/api/swagger.html#/malquery/GetMalQueryQuotasV1
"""
return process_service_request(
calling_object=self,
endpoints=Endpoints,
operation_id="GetMalQueryQuotasV1"
)
@force_default(defaults=["body"], default_types=["dict"])
def fuzzy_search(self: object, body: dict = None, **kwargs) -> dict:
"""Search Falcon MalQuery quickly, but with more potential for false positives.
Search for a combination of hex patterns and strings in order to identify
samples based upon file content at byte level granularity.
Keyword arguments:
body -- full body payload, not required when ids keyword is provided.
{
"options": {
"filter_meta": [
"string"
],
"limit": 0
},
"patterns": [
{
"type": "string",
"value": "string"
}
]
}
filter_meta -- List of strings.
limit -- Integer representing maximum number of matches to return.
patterns -- List of dictionaries containing patterns to match.
{
"type": "string",
"value": "string
}
This method only supports keywords for providing arguments.
Returns: dict object containing API response.
HTTP Method: POST
Swagger URL
https://assets.falcon.crowdstrike.com/support/api/swagger.html#/malquery/PostMalQueryFuzzySearchV1
"""
if not body:
body = malquery_fuzzy_payload(passed_keywords=kwargs)
return process_service_request(
calling_object=self,
endpoints=Endpoints,
operation_id="PostMalQueryFuzzySearchV1",
body=body
)
@force_default(defaults=["parameters"], default_types=["dict"])
def get_download(self: object, *args, parameters: dict = None, **kwargs) -> object:
"""Download a file indexed by MalQuery.
Specify the file using its SHA256.
Only one file is supported at this time.
Keyword arguments:
ids -- List of SHA256s to retrieve. String or list of strings.
parameters -- full parameters payload, not required if ids is provided as a keyword.
Arguments: When not specified, the first argument to this method is assumed to be 'ids'.
All others are ignored.
Returns: binary object on SUCCESS, dict object containing API response on FAILURE.
HTTP Method: GET
Swagger URL
https://assets.falcon.crowdstrike.com/support/api/swagger.html#/malquery/GetMalQueryDownloadV1
"""
return process_service_request(
calling_object=self,
endpoints=Endpoints,
operation_id="GetMalQueryDownloadV1",
keywords=kwargs,
params=handle_single_argument(args, parameters, "ids")
)
@force_default(defaults=["parameters"], default_types=["dict"])
def get_metadata(self: object, *args, parameters: dict = None, **kwargs) -> dict:
"""Retrieve indexed files metadata by their hash
Keyword arguments:
ids -- List of SHA256s to retrieve metadata for. String or list of strings.
parameters -- full parameters payload, not required if ids is provided as a keyword.
Arguments: When not specified, the first argument to this method is assumed to be 'ids'.
All others are ignored.
Returns: dict object containing API response.
HTTP Method: GET
Swagger URL
https://assets.falcon.crowdstrike.com/support/api/swagger.html#/malquery/GetMalQueryMetadataV1
"""
return process_service_request(
calling_object=self,
endpoints=Endpoints,
operation_id="GetMalQueryMetadataV1",
keywords=kwargs,
params=handle_single_argument(args, parameters, "ids")
)
@force_default(defaults=["parameters"], default_types=["dict"])
def get_request(self: object, *args, parameters: dict = None, **kwargs) -> dict:
"""Check the status and results of an asynchronous request, such as hunt or exact-search.
Supports a single request id at this time.
Keyword arguments:
ids -- List of MalQuery identifiers to retrieve. String or list of strings.
parameters -- full parameters payload, not required if ids is provided as a keyword.
Arguments: When not specified, the first argument to this method is assumed to be 'ids'.
All others are ignored.
Returns: dict object containing API response.
HTTP Method: GET
Swagger URL
https://assets.falcon.crowdstrike.com/support/api/swagger.html#/malquery/GetMalQueryRequestV1
"""
return process_service_request(
calling_object=self,
endpoints=Endpoints,
operation_id="GetMalQueryRequestV1",
keywords=kwargs,
params=handle_single_argument(args, parameters, "ids")
)
@force_default(defaults=["parameters"], default_types=["dict"])
def get_samples(self: object, *args, parameters: dict = None, **kwargs) -> object:
"""Fetch a zip archive with password 'infected' containing the samples.
Call this once the samples-multidownload request has finished processing
Keyword arguments:
ids -- Multi-download job ID. String.
parameters -- full parameters payload, not required if ids is provided as a keyword.
Arguments: When not specified, the first argument to this method is assumed to be 'ids'.
All others are ignored.
Returns: binary object on SUCCESS, dict object containing API response on FAILURE.
HTTP Method: GET
Swagger URL
https://assets.falcon.crowdstrike.com/support/api/swagger.html#/malquery/GetMalQueryEntitiesSamplesFetchV1
"""
return process_service_request(
calling_object=self,
endpoints=Endpoints,
operation_id="GetMalQueryEntitiesSamplesFetchV1",
keywords=kwargs,
params=handle_single_argument(args, parameters, "ids")
)
@force_default(defaults=["body"], default_types=["dict"])
def samples_multidownload(self: object, *args, body: dict = None, **kwargs) -> dict:
"""Schedule samples for download. Use the result id with the /request endpoint to check
if the download is ready after which you can call get_samples to get the zip.
Keyword arguments:
body -- full body payload, not required when ids keyword is provided.
{
"samples": [
"string"
]
}
samples -- SHA256(s) of the samples to retrieve. String or list of strings.
Arguments: When not specified, the first argument to this method is assumed to be
'samples'. All others are ignored.
Returns: dict object containing API response.
HTTP Method: POST
Swagger URL
https://assets.falcon.crowdstrike.com/support/api/swagger.html#/malquery/PostMalQueryEntitiesSamplesMultidownloadV1
"""
if not body:
body = generic_payload_list(submitted_arguments=args,
submitted_keywords=kwargs,
payload_value="samples"
)
return process_service_request(
calling_object=self,
endpoints=Endpoints,
operation_id="PostMalQueryEntitiesSamplesMultidownloadV1",
body=body
)
@force_default(defaults=["body"], default_types=["dict"])
def exact_search(self: object, body: dict = None, **kwargs) -> dict:
"""Search Falcon MalQuery for a combination of hex patterns
and strings in order to identify samples based upon file content
at byte level granularity. You can filter results on criteria such
as file type, file size and first seen date.
Returns a request id which can be used with the /request endpoint.
Keyword arguments:
body -- full body payload, not required when ids keyword is provided.
{
"options": {
"filter_filetypes": [
"string"
],
"filter_meta": [
"string"
],
"limit": 0,
"max_date": "string",
"max_size": "string",
"min_date": "string",
"min_size": "string"
},
"patterns": [
{
"type": "string",
"value": "string"
}
]
}
filter_filetypes -- File types to filter on. List of strings.
filter_meta -- File metadata to filter on. List of strings.
limit -- Integer representing maximum number of matches to return.
max_date -- Maximum date to match. UTC formatted string.
min_date -- Minimum date to match. UTC formatted string.
max_size -- Maximum size in bytes to match. String.
min_size -- Minumum size in bytes to match. String.
patterns -- List of dictionaries containing patterns to match.
{
"type": "string",
"value": "string
}
This method only supports keywords for providing arguments.
Returns: dict object containing API response.
HTTP Method: POST
Swagger URL
https://assets.falcon.crowdstrike.com/support/api/swagger.html#/malquery/PostMalQueryExactSearchV1
"""
if not body:
body = malquery_exact_search_payload(passed_keywords=kwargs)
return process_service_request(
calling_object=self,
endpoints=Endpoints,
operation_id="PostMalQueryExactSearchV1",
body=body
)
@force_default(defaults=["body"], default_types=["dict"])
def hunt(self: object, body: dict = None, **kwargs) -> dict:
"""Schedule a YARA-based search for execution.
Returns a | |
""" Affine image registration module consisting of the following classes:
AffineMap: encapsulates the necessary information to perform affine
transforms between two domains, defined by a `static` and a `moving`
image. The `domain` of the transform is the set of points in the
`static` image's grid, and the `codomain` is the set of points in
the `moving` image. When we call the `transform` method, `AffineMap`
maps each point `x` of the domain (`static` grid) to the codomain
(`moving` grid) and interpolates the `moving` image at that point
to obtain the intensity value to be placed at `x` in the resulting
grid. The `transform_inverse` method performs the opposite operation
mapping points in the codomain to points in the domain.
ParzenJointHistogram: computes the marginal and joint distributions of
intensities of a pair of images, using Parzen windows [Parzen62]
with a cubic spline kernel, as proposed by Mattes et al. [Mattes03].
It also computes the gradient of the joint histogram w.r.t. the
parameters of a given transform.
MutualInformationMetric: computes the value and gradient of the mutual
information metric the way `Optimizer` needs them. That is, given
a set of transform parameters, it will use `ParzenJointHistogram`
to compute the value and gradient of the joint intensity histogram
evaluated at the given parameters, and evaluate the the value and
gradient of the histogram's mutual information.
AffineRegistration: it runs the multi-resolution registration, putting
all the pieces together. It needs to create the scale space of the
images and run the multi-resolution registration by using the Metric
and the Optimizer at each level of the Gaussian pyramid. At each
level, it will setup the metric to compute value and gradient of the
metric with the input images with different levels of smoothing.
References
----------
[Parzen62] <NAME>. On the estimation of a probability density
function and the mode. Annals of Mathematical Statistics,
33(3), 1065-1076, 1962.
[Mattes03] <NAME>., <NAME>., <NAME>., Lewellen, <NAME>.,
& <NAME>. PET-CT image registration in the chest using
free-form deformations. IEEE Transactions on Medical
Imaging, 22(1), 120-8, 2003.
"""
import numpy as np
import numpy.linalg as npl
import scipy.ndimage as ndimage
from ..core.optimize import Optimizer
from ..core.optimize import SCIPY_LESS_0_12
from . import vector_fields as vf
from . import VerbosityLevels
from .parzenhist import (ParzenJointHistogram,
sample_domain_regular,
compute_parzen_mi)
from .imwarp import (get_direction_and_spacings, ScaleSpace)
from .scalespace import IsotropicScaleSpace
_interp_options = ['nearest', 'linear']
_transform_method = {}
_transform_method[(2, 'nearest')] = vf.transform_2d_affine_nn
_transform_method[(3, 'nearest')] = vf.transform_3d_affine_nn
_transform_method[(2, 'linear')] = vf.transform_2d_affine
_transform_method[(3, 'linear')] = vf.transform_3d_affine
class AffineInversionError(Exception):
pass
class AffineMap(object):
def __init__(self, affine, domain_grid_shape=None, domain_grid2world=None,
codomain_grid_shape=None, codomain_grid2world=None):
""" AffineMap
Implements an affine transformation whose domain is given by
`domain_grid` and `domain_grid2world`, and whose co-domain is
given by `codomain_grid` and `codomain_grid2world`.
The actual transform is represented by the `affine` matrix, which
operate in world coordinates. Therefore, to transform a moving image
towards a static image, we first map each voxel (i,j,k) of the static
image to world coordinates (x,y,z) by applying `domain_grid2world`.
Then we apply the `affine` transform to (x,y,z) obtaining (x', y', z')
in moving image's world coordinates. Finally, (x', y', z') is mapped
to voxel coordinates (i', j', k') in the moving image by multiplying
(x', y', z') by the inverse of `codomain_grid2world`. The
`codomain_grid_shape` is used analogously to transform the static
image towards the moving image when calling `transform_inverse`.
If the domain/co-domain information is not provided (None) then the
sampling information needs to be specified each time the `transform`
or `transform_inverse` is called to transform images. Note that such
sampling information is not necessary to transform points defined in
physical space, such as stream lines.
Parameters
----------
affine : array, shape (dim + 1, dim + 1)
the matrix defining the affine transform, where `dim` is the
dimension of the space this map operates in (2 for 2D images,
3 for 3D images). If None, then `self` represents the identity
transformation.
domain_grid_shape : sequence, shape (dim,), optional
the shape of the default domain sampling grid. When `transform`
is called to transform an image, the resulting image will have
this shape, unless a different sampling information is provided.
If None, then the sampling grid shape must be specified each time
the `transform` method is called.
domain_grid2world : array, shape (dim + 1, dim + 1), optional
the grid-to-world transform associated with the domain grid.
If None (the default), then the grid-to-world transform is assumed
to be the identity.
codomain_grid_shape : sequence of integers, shape (dim,)
the shape of the default co-domain sampling grid. When
`transform_inverse` is called to transform an image, the resulting
image will have this shape, unless a different sampling
information is provided. If None (the default), then the sampling
grid shape must be specified each time the `transform_inverse`
method is called.
codomain_grid2world : array, shape (dim + 1, dim + 1)
the grid-to-world transform associated with the co-domain grid.
If None (the default), then the grid-to-world transform is assumed
to be the identity.
"""
self.set_affine(affine)
self.domain_shape = domain_grid_shape
self.domain_grid2world = domain_grid2world
self.codomain_shape = codomain_grid_shape
self.codomain_grid2world = codomain_grid2world
def set_affine(self, affine):
""" Sets the affine transform (operating in physical space)
Parameters
----------
affine : array, shape (dim + 1, dim + 1)
the matrix representing the affine transform operating in
physical space. The domain and co-domain information
remains unchanged. If None, then `self` represents the identity
transformation.
"""
self.affine = affine
if self.affine is None:
self.affine_inv = None
return
if np.any(np.isnan(affine)):
raise AffineInversionError('Affine contains invalid elements')
try:
self.affine_inv = npl.inv(affine)
except npl.LinAlgError:
raise AffineInversionError('Affine cannot be inverted')
def _apply_transform(self, image, interp='linear', image_grid2world=None,
sampling_grid_shape=None, sampling_grid2world=None,
resample_only=False, apply_inverse=False):
""" Transforms the input image applying this affine transform
This is a generic function to transform images using either this
(direct) transform or its inverse.
If applying the direct transform (`apply_inverse=False`):
by default, the transformed image is sampled at a grid defined by
`self.domain_shape` and `self.domain_grid2world`.
If applying the inverse transform (`apply_inverse=True`):
by default, the transformed image is sampled at a grid defined by
`self.codomain_shape` and `self.codomain_grid2world`.
If the sampling information was not provided at initialization of this
transform then `sampling_grid_shape` is mandatory.
Parameters
----------
image : array, shape (X, Y) or (X, Y, Z)
the image to be transformed
interp : string, either 'linear' or 'nearest'
the type of interpolation to be used, either 'linear'
(for k-linear interpolation) or 'nearest' for nearest neighbor
image_grid2world : array, shape (dim + 1, dim + 1), optional
the grid-to-world transform associated with `image`.
If None (the default), then the grid-to-world transform is assumed
to be the identity.
sampling_grid_shape : sequence, shape (dim,), optional
the shape of the grid where the transformed image must be sampled.
If None (the default), then `self.domain_shape` is used instead
(which must have been set at initialization, otherwise an exception
will be raised).
sampling_grid2world : array, shape (dim + 1, dim + 1), optional
the grid-to-world transform associated with the sampling grid
(specified by `sampling_grid_shape`, or by default
`self.domain_shape`). If None (the default), then the
grid-to-world transform is assumed to be the identity.
resample_only : Boolean, optional
If False (the default) the affine transform is applied normally.
If True, then the affine transform is not applied, and the input
image is just re-sampled on the domain grid of this transform.
apply_inverse : Boolean, optional
If False (the default) the image is transformed from the codomain
of this transform to its domain using the (direct) affine
transform. Otherwise, the image is transformed from the domain
of this transform to its codomain using the (inverse) affine
transform.
Returns
-------
transformed : array, shape `sampling_grid_shape` or `self.domain_shape`
the transformed image, sampled at the requested grid
"""
# Verify valid interpolation requested
if interp not in _interp_options:
raise ValueError('Unknown interpolation method: %s' % (interp,))
# Obtain sampling grid
if sampling_grid_shape is None:
if apply_inverse:
sampling_grid_shape = self.codomain_shape
else:
sampling_grid_shape = self.domain_shape
if sampling_grid_shape is None:
msg = 'Unknown sampling info. Provide a valid sampling_grid_shape'
raise ValueError(msg)
dim = len(sampling_grid_shape)
shape = np.array(sampling_grid_shape, dtype=np.int32)
# Verify valid image | |
<reponame>nikitaborisov/autowordl
words = ['AAHED', 'AALII', 'AARGH', 'AARTI', 'ABACA', 'ABACI', 'ABACK',
'ABACS', 'ABAFT', 'ABAKA', 'ABAMP', 'ABAND', 'ABASE', 'ABASH',
'ABASK', 'ABATE', 'ABAYA', 'ABBAS', 'ABBED', 'ABBES', 'ABBEY',
'ABBOT', 'ABCEE', 'ABEAM', 'ABEAR', 'ABELE', 'ABERS', 'ABETS',
'ABHOR', 'ABIDE', 'ABIES', 'ABLED', 'ABLER', 'ABLES', 'ABLET',
'ABLOW', 'ABMHO', 'ABODE', 'ABOHM', 'ABOIL', 'ABOMA', 'ABOON',
'ABORD', 'ABORE', 'ABORT', 'ABOUT', 'ABOVE', 'ABRAM', 'ABRAY',
'ABRIM', 'ABRIN', 'ABRIS', 'ABSEY', 'ABSIT', 'ABUNA', 'ABUNE',
'ABUSE', 'ABUTS', 'ABUZZ', 'ABYES', 'ABYSM', 'ABYSS', 'ACAIS',
'ACARI', 'ACCAS', 'ACCOY', 'ACERB', 'ACERS', 'ACETA', 'ACHAR',
'ACHED', 'ACHES', 'ACHOO', 'ACIDS', 'ACIDY', 'ACING', 'ACINI',
'ACKEE', 'ACKER', 'ACMES', 'ACMIC', 'ACNED', 'ACNES', 'ACOCK',
'ACOLD', 'ACORN', 'ACRED', 'ACRES', 'ACRID', 'ACROS', 'ACTED',
'ACTIN', 'ACTON', 'ACTOR', 'ACUTE', 'ACYLS', 'ADAGE', 'ADAPT',
'ADAWS', 'ADAYS', 'ADBOT', 'ADDAX', 'ADDED', 'ADDER', 'ADDIO',
'ADDLE', 'ADEEM', 'ADEPT', 'ADHAN', 'ADIEU', 'ADIOS', 'ADITS',
'ADMAN', 'ADMEN', 'ADMIN', 'ADMIT', 'ADMIX', 'ADOBE', 'ADOBO',
'ADOPT', 'ADORE', 'ADORN', 'ADOWN', 'ADOZE', 'ADRAD', 'ADRED',
'ADSUM', 'ADUKI', 'ADULT', 'ADUNC', 'ADUST', 'ADVEW', 'ADYTA',
'ADZED', 'ADZES', 'AECIA', 'AEDES', 'AEGIS', 'AEONS', 'AERIE',
'AEROS', 'AESIR', 'AFALD', 'AFARA', 'AFARS', 'AFEAR', 'AFFIX',
'AFIRE', 'AFLAJ', 'AFOOT', 'AFORE', 'AFOUL', 'AFRIT', 'AFROS',
'AFTER', 'AGAIN', 'AGAMA', 'AGAMI', 'AGAPE', 'AGARS', 'AGAST',
'AGATE', 'AGAVE', 'AGAZE', 'AGENE', 'AGENT', 'AGERS', 'AGGER',
'AGGIE', 'AGGRI', 'AGGRO', 'AGGRY', 'AGHAS', 'AGILA', 'AGILE',
'AGING', 'AGIOS', 'AGISM', 'AGIST', 'AGITA', 'AGLEE', 'AGLET',
'AGLEY', 'AGLOO', 'AGLOW', 'AGLUS', 'AGMAS', 'AGOGE', 'AGONE',
'AGONS', 'AGONY', 'AGOOD', 'AGORA', 'AGREE', 'AGRIA', 'AGRIN',
'AGROS', 'AGUED', 'AGUES', 'AGUNA', 'AGUTI', 'AHEAD', 'AHEAP',
'AHENT', 'AHIGH', 'AHIND', 'AHING', 'AHINT', 'AHOLD', 'AHULL',
'AHURU', 'AIDAS', 'AIDED', 'AIDER', 'AIDES', 'AIDOI', 'AIDOS',
'AIERY', 'AIGAS', 'AIGHT', 'AILED', 'AIMED', 'AIMER', 'AINEE',
'AINGA', 'AIOLI', 'AIRED', 'AIRER', 'AIRNS', 'AIRTH', 'AIRTS',
'AISLE', 'AITCH', 'AITUS', 'AIVER', 'AIYEE', 'AIZLE', 'AJIES',
'AJIVA', 'AJUGA', 'AJWAN', 'AKEES', 'AKELA', 'AKENE', 'AKING',
'AKITA', 'AKKAS', 'ALAAP', 'ALACK', 'ALAMO', 'ALAND', 'ALANE',
'ALANG', 'ALANS', 'ALANT', 'ALAPA', 'ALAPS', 'ALARM', 'ALARY',
'ALATE', 'ALAYS', 'ALBAS', 'ALBEE', 'ALBUM', 'ALCID', 'ALCOS',
'ALDEA', 'ALDER', 'ALDOL', 'ALECK', 'ALECS', 'ALEFS', 'ALEFT',
'ALEPH', 'ALERT', 'ALEWS', 'ALEYE', 'ALFAS', 'ALGAE', 'ALGAL',
'ALGAS', 'ALGID', 'ALGIN', 'ALGOR', 'ALGUM', 'ALIAS', 'ALIBI',
'ALIEN', 'ALIFS', 'ALIGN', 'ALIKE', 'ALINE', 'ALIST', 'ALIVE',
'ALIYA', 'ALKIE', 'ALKOS', 'ALKYD', 'ALKYL', 'ALLAY', 'ALLEE',
'ALLEL', 'ALLEY', 'ALLIS', 'ALLOD', 'ALLOT', 'ALLOW', 'ALLOY',
'ALLYL', 'ALMAH', 'ALMAS', 'ALMEH', 'ALMES', 'ALMUD', 'ALMUG',
'ALODS', 'ALOED', 'ALOES', 'ALOFT', 'ALOHA', 'ALOIN', 'ALONE',
'ALONG', 'ALOOF', 'ALOOS', 'ALOUD', 'ALOWE', 'ALPHA', 'ALTAR',
'ALTER', 'ALTHO', 'ALTOS', 'ALULA', 'ALUMS', 'ALURE', 'ALVAR',
'ALWAY', 'AMAHS', 'AMAIN', 'AMASS', 'AMATE', 'AMAUT', 'AMAZE',
'AMBAN', 'AMBER', 'AMBIT', 'AMBLE', 'AMBOS', 'AMBRY', 'AMEBA',
'AMEER', 'AMEND', 'AMENE', 'AMENS', 'AMENT', 'AMIAS', 'AMICE',
'AMICI', 'AMIDE', 'AMIDO', 'AMIDS', 'AMIES', 'AMIGA', 'AMIGO',
'AMINE', 'AMINO', 'AMINS', 'AMIRS', 'AMISS', 'AMITY', 'AMLAS',
'AMMAN', 'AMMON', 'AMMOS', 'AMNIA', 'AMNIC', 'AMNIO', 'AMOKS',
'AMOLE', 'AMONG', 'AMORT', 'AMOUR', 'AMOVE', 'AMOWT', 'AMPED',
'AMPLE', 'AMPLY', 'AMPUL', 'AMRIT', 'AMUCK', 'AMUSE', 'AMYLS',
'ANANA', 'ANATA', 'ANCHO', 'ANCLE', 'ANCON', 'ANDRO', 'ANEAR',
'ANELE', 'ANENT', 'ANGAS', 'ANGEL', 'ANGER', 'ANGLE', 'ANGLO',
'ANGRY', 'ANGST', 'ANIGH', 'ANILE', 'ANILS', 'ANIMA', 'ANIME',
'ANIMI', 'ANION', 'ANISE', 'ANKER', 'ANKHS', 'ANKLE', 'ANKUS',
'ANLAS', 'ANNAL', 'ANNAS', 'ANNAT', 'ANNEX', 'ANNOY', 'ANNUL',
'ANOAS', 'ANODE', 'ANOLE', 'ANOMY', 'ANSAE', 'ANTAE', 'ANTAR',
'ANTAS', 'ANTED', 'ANTES', 'ANTIC', 'ANTIS', 'ANTRA', 'ANTRE',
'ANTSY', 'ANURA', 'ANVIL', 'ANYON', 'AORTA', 'APACE', 'APAGE',
'APAID', 'APART', 'APAYD', 'APAYS', 'APEAK', 'APEEK', 'APERS',
'APERT', 'APERY', 'APGAR', 'APHID', 'APHIS', 'APIAN', 'APING',
'APIOL', 'APISH', 'APISM', 'APNEA', 'APODE', 'APODS', 'APOOP',
'APORT', 'APPAL', 'APPAY', 'APPEL', 'APPLE', 'APPLY', 'APPRO',
'APPUI', 'APPUY', 'APRES', 'APRON', 'APSES', 'APSIS', 'APSOS',
'APTED', 'APTER', 'APTLY', 'AQUAE', 'AQUAS', 'ARABA', 'ARAKS',
'ARAME', 'ARARS', 'ARBAS', 'ARBOR', 'ARCED', 'ARCHI', 'ARCOS',
'ARCUS', 'ARDEB', 'ARDOR', 'ARDRI', 'AREAD', 'AREAE', 'AREAL',
'AREAR', 'AREAS', 'ARECA', 'AREDD', 'AREDE', 'AREFY', 'AREIC',
'ARENA', 'ARENE', 'AREPA', 'ARERE', 'ARETE', 'ARETS', 'ARETT',
'ARGAL', 'ARGAN', 'ARGIL', 'ARGLE', 'ARGOL', 'ARGON', 'ARGOT',
'ARGUE', 'ARGUS', 'ARHAT', 'ARIAS', 'ARIEL', 'ARIKI', 'ARILS',
'ARIOT', 'ARISE', 'ARISH', 'ARKED', 'ARLED', 'ARLES', 'ARMED',
'ARMER', 'ARMET', 'ARMIL', 'ARMOR', 'ARNAS', 'ARNUT', 'AROBA',
'AROHA', 'AROID', 'AROMA', 'AROSE', 'ARPAS', 'ARPEN', 'ARRAH',
'ARRAS', 'ARRAY', 'ARRET', 'ARRIS', 'ARROW', 'ARROZ', 'ARSED',
'ARSES', 'ARSEY', 'ARSIS', 'ARSON', 'ARTAL', 'ARTEL', 'ARTIC',
'ARTIS', 'ARTSY', 'ARUHE', 'ARUMS', 'ARVAL', 'ARVEE', 'ARVOS',
'ARYLS', 'ASANA', 'ASCON', 'ASCOT', 'ASCUS', 'ASDIC', 'ASHED',
'ASHEN', 'ASHES', 'ASHET', 'ASIDE', 'ASKED', 'ASKER', 'ASKEW',
'ASKOI', 'ASKOS', 'ASPEN', 'ASPER', 'ASPIC', 'ASPIE', 'ASPIS',
'ASPRO', 'ASSAI', 'ASSAM', 'ASSAY', 'ASSES', 'ASSET', 'ASSEZ',
'ASSOT', 'ASTER', 'ASTIR', 'ASTUN', 'ASURA', 'ASWAY', 'ASWIM',
'ASYLA', 'ATAPS', 'ATAXY', 'ATIGI', 'ATILT', 'ATIMY', 'ATLAS',
'ATMAN', 'ATMAS', 'ATMOS', 'ATOCS', 'ATOKE', 'ATOKS', 'ATOLL',
'ATOMS', 'ATOMY', 'ATONE', 'ATONY', 'ATOPY', 'ATRIA', 'ATRIP',
'ATTAP', 'ATTAR', 'ATTIC', 'ATUAS', 'AUDAD', 'AUDIO', 'AUDIT',
'AUGER', 'AUGHT', 'AUGUR', 'AULAS', 'AULIC', 'AULOI', 'AULOS',
'AUMIL', 'AUNES', 'AUNTS', 'AUNTY', 'AURAE', 'AURAL', 'AURAR',
'AURAS', 'AUREI', 'AURES', 'AURIC', 'AURIS', 'AURUM', 'AUTOS',
'AUXIN', 'AVAIL', 'AVALE', 'AVANT', 'AVAST', 'AVELS', 'AVENS',
'AVERS', 'AVERT', 'AVGAS', 'AVIAN', 'AVINE', 'AVION', 'AVISE',
'AVISO', 'AVIZE', 'AVOID', 'AVOWS', 'AVYZE', 'AWAIT', 'AWAKE',
'AWARD', 'AWARE', 'AWARN', 'AWASH', 'AWATO', 'AWAVE', 'AWAYS',
'AWDLS', 'AWEEL', 'AWETO', 'AWFUL', 'AWING', 'AWMRY', 'AWNED',
'AWNER', 'AWOKE', 'AWOLS', 'AWORK', 'AXELS', 'AXIAL', 'AXILE',
'AXILS', 'AXING', 'AXIOM', 'AXION', 'AXITE', 'AXLED', 'AXLES',
'AXMAN', 'AXMEN', 'AXOID', 'AXONE', 'AXONS', 'AYAHS', 'AYAYA', 'AYELP',
'AYGRE', 'AYINS', 'AYONT', 'AYRES', 'AYRIE', 'AZANS', 'AZIDE', 'AZIDO',
'AZINE', 'AZLON', 'AZOIC', 'AZOLE', 'AZONS', 'AZOTE', 'AZOTH', 'AZUKI',
'AZURE', 'AZURN', 'AZURY', 'AZYGY', 'AZYME', 'AZYMS', 'BAAED', 'BAALS',
'BABAS', 'BABEL', 'BABES', 'BABKA', 'BABOO', 'BABUL', 'BABUS', 'BACCA',
'BACCO', 'BACCY', 'BACHA', 'BACHS', 'BACKS', 'BACON', 'BADDY', 'BADGE',
'BADLY', 'BAELS', 'BAFFS', 'BAFFY', 'BAFTS', 'BAGEL', 'BAGGY', 'BAGHS',
'BAGIE', 'BAHTS', 'BAHUS', 'BAHUT', 'BAILS', 'BAIRN', 'BAISA', 'BAITH',
'BAITS', 'BAIZA', 'BAIZE', 'BAJAN', 'BAJRA', 'BAJRI', 'BAJUS', 'BAKED',
'BAKEN', 'BAKER', 'BAKES', 'BAKRA', 'BALAS', 'BALDS', 'BALDY', 'BALED',
'BALER', 'BALES', 'BALKS', 'BALKY', 'BALLS', 'BALLY', 'BALMS', 'BALMY',
'BALOO', 'BALSA', 'BALTI', 'BALUN', 'BALUS', 'BAMBI', 'BANAK', 'BANAL',
'BANCO', 'BANCS', 'BANDA', 'BANDH', 'BANDS', 'BANDY', 'BANED', 'BANES',
'BANGS', 'BANIA', 'BANJO', 'BANKS', 'BANNS', 'BANTS', 'BANTU', 'BANTY',
'BANYA', 'BAPUS', 'BARBE', 'BARBS', 'BARBY', 'BARCA', 'BARDE', 'BARDO',
'BARDS', 'BARDY', 'BARED', 'BARER', 'BARES', 'BARFI', 'BARFS', 'BARGE',
'BARIC', 'BARKS', 'BARKY', 'BARMS', 'BARMY', 'BARNS', 'BARNY', 'BARON',
'BARPS', 'BARRA', 'BARRE', 'BARRO', 'BARRY', 'BARYE', 'BASAL', 'BASAN',
'BASED', 'BASEN', 'BASER', 'BASES', 'BASHO', 'BASIC', 'BASIJ', 'BASIL',
'BASIN', 'BASIS', 'BASKS', 'BASON', 'BASSE', 'BASSI', 'BASSO', 'BASSY',
'BASTA', 'BASTE', 'BASTI', 'BASTO', 'BASTS', 'BATCH', 'BATED', 'BATES',
'BATHE', 'BATHS', 'BATIK', 'BATON', 'BATTA', 'BATTS', 'BATTU', 'BATTY',
'BAUDS', 'BAUKS', 'BAULK', 'BAURS', 'BAVIN', 'BAWDS', 'BAWDY', 'BAWKS',
'BAWLS', 'BAWNS', 'BAWRS', 'BAWTY', 'BAYED', 'BAYER', 'BAYES', 'BAYLE',
'BAYOU', 'BAYTS', 'BAZAR', 'BAZOO', 'BEACH', 'BEADS', 'BEADY', 'BEAKS',
'BEAKY', 'BEALS', 'BEAMS', 'BEAMY', 'BEANO', 'BEANS', 'BEANY', 'BEARD',
'BEARE', 'BEARS', 'BEAST', 'BEATH', 'BEATS', 'BEATY', 'BEAUS', 'BEAUT',
'BEAUX', 'BEBOP', 'BECAP', 'BECKE', 'BECKS', 'BEDAD', 'BEDEL', 'BEDES',
'BEDEW', 'BEDIM', 'BEDYE', 'BEECH', 'BEEDI', 'BEEFS', 'BEEFY', 'BEEPS',
'BEERS', 'BEERY', 'BEETS', 'BEFIT', 'BEFOG', 'BEGAD', 'BEGAN', 'BEGAR',
'BEGAT', 'BEGEM', 'BEGET', 'BEGIN', 'BEGOT', 'BEGUM', 'BEGUN', 'BEIGE',
'BEIGY', 'BEING', 'BEINS', 'BEKAH', 'BELAH', 'BELAR', 'BELAY', 'BELCH',
'BELEE', 'BELGA', 'BELIE', 'BELLE', 'BELLS', 'BELLY', 'BELON', 'BELOW',
'BELTS', 'BEMAD', 'BEMAS', 'BEMIX', 'BEMUD', 'BENCH', 'BENDS', 'BENDY',
'BENES', 'BENET', 'BENGA', 'BENIS', 'BENNE', 'BENNI', 'BENNY', 'BENTO',
'BENTS', 'BENTY', 'BEPAT', 'BERAY', 'BERES', 'BERET', 'BERGS', 'BERKO',
'BERKS', 'BERME', 'BERMS', 'BEROB', 'BERRY', 'BERTH', 'BERYL', 'BESAT',
'BESAW', 'BESEE', 'BESES', 'BESET', 'BESIT', 'BESOM', 'BESOT', 'BESTI',
'BESTS', 'BETAS', 'BETED', 'BETEL', 'BETES', 'BETHS', 'BETID', 'BETON',
'BETTA', 'BETTY', 'BEVEL', 'BEVER', 'BEVOR', 'BEVUE', 'BEVVY', 'BEWET',
'BEWIG', 'BEZEL', 'BEZES', 'BEZIL', 'BEZZY', 'BHAIS', 'BHAJI', 'BHANG',
'BHATS', 'BHELS', 'BHOOT', 'BHUNA', 'BHUTS', 'BIACH', 'BIALI', 'BIALY',
'BIBBS', 'BIBES', 'BIBLE', 'BICCY', 'BICEP', 'BICES', 'BIDDY', 'BIDED',
'BIDER', 'BIDES', 'BIDET', 'BIDIS', 'BIDON', 'BIELD', 'BIERS', 'BIFFO',
'BIFFS', 'BIFFY', 'BIFID', 'BIGAE', 'BIGGS', 'BIGGY', 'BIGHA', 'BIGHT',
'BIGLY', 'BIGOS', 'BIGOT', 'BIJOU', 'BIKED', 'BIKER', 'BIKES', 'BIKIE',
'BILBO', 'BILBY', 'BILED', 'BILES', 'BILGE', 'BILGY', 'BILKS', 'BILLS',
'BILLY', 'BIMAH', 'BIMAS', 'BIMBO', 'BINAL', 'BINDI', 'BINDS', 'BINER',
'BINES', 'BINGE', 'BINGO', 'BINGS', 'BINGY', 'BINIT', 'BINKS', 'BINTS',
'BIOGS', 'BIOME', 'BIONT', 'BIOTA', 'BIPED', 'BIPOD', 'BIRCH', 'BIRDS',
'BIRKS', 'BIRLE', 'BIRLS', 'BIROS', 'BIRRS', 'BIRSE', 'BIRSY', 'BIRTH',
'BISES', 'BISKS', 'BISOM', 'BISON', 'BITCH', 'BITER', 'BITES', 'BITOS',
'BITOU', 'BITSY', 'BITTE', 'BITTS', 'BITTY', 'BIVIA', 'BIVVY', 'BIZES',
'BIZZO', 'BIZZY', 'BLABS', 'BLACK', 'BLADE', 'BLADS', 'BLADY', 'BLAER',
'BLAES', 'BLAFF', 'BLAGS', 'BLAHS', 'BLAIN', 'BLAME', 'BLAMS', 'BLAND',
'BLANK', 'BLARE', 'BLART', 'BLASE', 'BLASH', 'BLAST', 'BLATE', 'BLATS',
'BLATT', 'BLAUD', 'BLAWN', 'BLAWS', 'BLAYS', 'BLAZE', 'BLEAK', 'BLEAR',
'BLEAT', 'BLEBS', 'BLECH', 'BLEED', 'BLEEP', 'BLEES', 'BLEND', 'BLENT',
'BLERT', 'BLESS', 'BLEST', 'BLETS', 'BLEYS', 'BLIMP', 'BLIMY', 'BLIND',
'BLING', 'BLINI', 'BLINK', 'BLINS', 'BLINY', 'BLIPS', 'BLISS', 'BLIST',
'BLITE', 'BLITS', 'BLITZ', 'BLIVE', 'BLOAT', 'BLOBS', 'BLOCK', 'BLOCS',
'BLOGS', 'BLOKE', 'BLOND', 'BLOOD', 'BLOOK', 'BLOOM', 'BLOOP', 'BLORE',
'BLOTS', 'BLOWN', 'BLOWS', 'BLOWY', 'BLUBS', 'BLUDE', 'BLUDS', 'BLUDY',
'BLUED', 'BLUER', 'BLUES', 'BLUET', 'BLUEY', 'BLUFF', 'BLUID', 'BLUME',
'BLUNK', 'BLUNT', 'BLURB', 'BLURS', 'BLURT', 'BLUSH', 'BLYPE', 'BOABS',
'BOAKS', 'BOARD', 'BOARS', 'BOART', 'BOAST', 'BOATS', 'BOBAC', 'BOBAK',
'BOBAS', 'BOBBY', 'BOBOL', 'BOBOS', 'BOCCA', 'BOCCE', 'BOCCI', 'BOCHE',
'BOCKS', 'BODED', 'BODES', 'BODGE', 'BODHI', 'BODLE', 'BOEPS', 'BOETS',
'BOEUF', 'BOFFO', 'BOFFS', 'BOGAN', 'BOGEY', 'BOGGY', 'BOGIE', 'BOGLE',
'BOGUE', 'BOGUS', 'BOHEA', 'BOHOS', 'BOILS', 'BOING', 'BOINK', 'BOITE',
'BOKED', 'BOKEH', 'BOKES', 'BOKOS', 'BOLAR', 'BOLAS', 'BOLDS', 'BOLES',
'BOLIX', 'BOLLS', 'BOLOS', 'BOLTS', 'BOLUS', 'BOMAS', 'BOMBE', 'BOMBO',
'BOMBS', 'BONCE', 'BONDS', 'BONED', 'BONER', 'BONES', 'BONEY', 'BONGO',
'BONGS', 'BONIE', 'BONKS', 'BONNE', 'BONNY', 'BONUS', 'BONZA', 'BONZE',
'BOOAI', 'BOOAY', 'BOOBS', 'BOOBY', 'BOODY', 'BOOED', 'BOOFY', 'BOOGY',
'BOOHS', 'BOOKS', 'BOOKY', 'BOOLS', 'BOOMS', 'BOOMY', 'BOONG', 'BOONS',
'BOORD', 'BOORS', 'BOOSE', 'BOOST', 'BOOTH', 'BOOTS', 'BOOTY', 'BOOZE',
'BOOZY', 'BOPPY', 'BORAK', 'BORAL', 'BORAS', 'BORAX', 'BORDE', 'BORDS',
'BORED', 'BOREE', 'BOREL', 'BORER', 'BORES', 'BORGO', 'BORIC', 'BORKS',
'BORMS', 'BORNA', 'BORNE', 'BORON', 'BORTS', 'BORTY', 'BORTZ', 'BOSIE',
'BOSKS', 'BOSKY', 'BOSOM', 'BOSON', 'BOSSY', 'BOSUN', 'BOTAS', 'BOTCH',
'BOTEL', 'BOTES', 'BOTHY', 'BOTTE', 'BOTTS', | |
<filename>services/models.py
from collections import defaultdict
from django.conf import settings
from django.contrib.gis.db import models
from django.contrib.gis.geos import Point
from django.contrib.sites.models import Site
from django.core.exceptions import ValidationError
from django.core.urlresolvers import reverse
from django.core.validators import MinValueValidator, MaxValueValidator, RegexValidator
from django.db.transaction import atomic
from django.template.loader import render_to_string
from django.utils.translation import ugettext_lazy as _, get_language
from sorl.thumbnail import ImageField
from sorl.thumbnail.shortcuts import get_thumbnail
from . import jira_support
from .tasks import email_provider_about_service_approval_task
from .utils import absolute_url, get_path_to_service
class NameInCurrentLanguageMixin(object):
@property
def name(self):
# Try to return the name field of the currently selected language
# if we have such a field and it has something in it.
# Otherwise, punt and return the first of the English, Arabic, or
# French names that has anything in it.
language = get_language()
field_name = 'name_%s' % language[:2]
if hasattr(self, field_name) and getattr(self, field_name):
return getattr(self, field_name)
return self.name_en or self.name_ar or self.name_fr
def __str__(self):
return self.name
class ProviderType(NameInCurrentLanguageMixin, models.Model):
number = models.IntegerField(unique=True)
name_en = models.CharField(
_("name in English"),
max_length=256,
default='',
blank=True,
)
name_ar = models.CharField(
_("name in Arabic"),
max_length=256,
default='',
blank=True,
)
name_fr = models.CharField(
_("name in French"),
max_length=256,
default='',
blank=True,
)
def get_api_url(self):
"""Return the PATH part of the URL to access this object using the API"""
return reverse('providertype-detail', args=[self.id])
def at_least_one_letter(s):
return any([c.isalpha() for c in s])
def blank_or_at_least_one_letter(s):
return s == '' or at_least_one_letter(s)
class Provider(NameInCurrentLanguageMixin, models.Model):
name_en = models.CharField(
# Translators: Provider name
_("name in English"),
max_length=256, # Length is a guess
default='',
blank=True,
validators=[blank_or_at_least_one_letter]
)
name_ar = models.CharField(
# Translators: Provider name
_("name in Arabic"),
max_length=256, # Length is a guess
default='',
blank=True,
validators=[blank_or_at_least_one_letter]
)
name_fr = models.CharField(
# Translators: Provider name
_("name in French"),
max_length=256, # Length is a guess
default='',
blank=True,
validators=[blank_or_at_least_one_letter]
)
type = models.ForeignKey(
ProviderType,
verbose_name=_("type"),
)
phone_number = models.CharField(
_("phone number"),
max_length=20,
validators=[
RegexValidator(settings.PHONE_NUMBER_REGEX)
]
)
website = models.URLField(
_("website"),
blank=True,
default='',
)
description_en = models.TextField(
# Translators: Provider description
_("description in English"),
default='',
blank=True,
)
description_ar = models.TextField(
# Translators: Provider description
_("description in Arabic"),
default='',
blank=True,
)
description_fr = models.TextField(
# Translators: Provider description
_("description in French"),
default='',
blank=True,
)
user = models.OneToOneField(
to=settings.AUTH_USER_MODEL,
verbose_name=_('user'),
help_text=_('user account for this provider'),
)
number_of_monthly_beneficiaries = models.IntegerField(
_("number of targeted beneficiaries monthly"),
blank=True, null=True,
validators=[
MinValueValidator(0),
MaxValueValidator(1000000)
]
)
focal_point_name_en = models.CharField(
_("focal point name in English"),
max_length=256, # Length is a guess
default='',
blank=True,
validators=[blank_or_at_least_one_letter]
)
focal_point_name_ar = models.CharField(
_("focal point name in Arabic"),
max_length=256, # Length is a guess
default='',
blank=True,
validators=[blank_or_at_least_one_letter]
)
focal_point_name_fr = models.CharField(
_("focal point name in French"),
max_length=256, # Length is a guess
default='',
blank=True,
validators=[blank_or_at_least_one_letter]
)
focal_point_phone_number = models.CharField(
_("focal point phone number"),
max_length=20,
validators=[
RegexValidator(settings.PHONE_NUMBER_REGEX)
]
)
address_en = models.TextField(
_("provider address in English"),
default='',
blank=True,
)
address_ar = models.TextField(
_("provider address in Arabic"),
default='',
blank=True,
)
address_fr = models.TextField(
_("provider address in French"),
default='',
blank=True,
)
def get_api_url(self):
"""Return the PATH part of the URL to access this object using the API"""
return reverse('provider-detail', args=[self.id])
def get_fetch_url(self):
"""Return the PATH part of the URL to fetch this object using the API"""
return reverse('provider-fetch', args=[self.id])
def notify_jira_of_change(self):
JiraUpdateRecord.objects.create(
update_type=JiraUpdateRecord.PROVIDER_CHANGE,
provider=self
)
def get_admin_edit_url(self):
"""Return the PATH part of the URL to edit this object in the admin"""
return reverse('admin:services_provider_change', args=[self.id])
class ServiceAreaManager(models.GeoManager):
def top_level(self):
"""
Return the top-level areas, i.e. the ones with no parents
"""
return super().get_queryset().filter(parent=None)
class ServiceArea(NameInCurrentLanguageMixin, models.Model):
name_en = models.CharField(
_("name in English"),
max_length=256,
default='',
blank=True,
)
name_ar = models.CharField(
_("name in Arabic"),
max_length=256,
default='',
blank=True,
)
name_fr = models.CharField(
_("name in French"),
max_length=256,
default='',
blank=True,
)
parent = models.ForeignKey(
to='self',
verbose_name=_('parent area'),
help_text=_('the area that contains this area'),
null=True,
blank=True,
related_name='children',
)
lebanon_region = models.ForeignKey(
'LebanonRegion',
null=True,
default=None,
on_delete=models.SET_NULL,
)
objects = ServiceAreaManager()
@property
def centroid(self):
return self.lebanon_region.centroid
def get_api_url(self):
return reverse('servicearea-detail', args=[self.id])
class SelectionCriterion(models.Model):
"""
A selection criterion limits who can receive the service.
It's just a text string. E.g. "age under 18".
"""
text_en = models.CharField(max_length=100, blank=True, default='')
text_fr = models.CharField(max_length=100, blank=True, default='')
text_ar = models.CharField(max_length=100, blank=True, default='')
service = models.ForeignKey('services.Service', related_name='selection_criteria')
class Meta(object):
verbose_name_plural = _("selection criteria")
def clean(self):
if not any([self.text_en, self.text_fr, self.text_ar]):
raise ValidationError(_("Selection criterion must have text in at least "
"one language"))
def __str__(self):
return ', '.join([text for text in [self.text_en, self.text_ar, self.text_fr] if text])
def get_api_url(self):
return reverse('selectioncriterion-detail', args=[self.id])
class ServiceType(NameInCurrentLanguageMixin, models.Model):
number = models.IntegerField(unique=True)
icon = models.ImageField(
upload_to='service-type-icons',
verbose_name=_("icon"),
blank=True,
)
name_en = models.CharField(
_("name in English"),
max_length=256,
default='',
blank=True,
)
name_ar = models.CharField(
_("name in Arabic"),
max_length=256,
default='',
blank=True,
)
name_fr = models.CharField(
_("name in French"),
max_length=256,
default='',
blank=True,
)
comments_en = models.CharField(
_("comments in English"),
max_length=512,
default='',
blank=True,
)
comments_ar = models.CharField(
_("comments in Arabic"),
max_length=512,
default='',
blank=True,
)
comments_fr = models.CharField(
_("comments in French"),
max_length=512,
default='',
blank=True,
)
class Meta(object):
ordering = ['number', ]
def get_api_url(self):
return reverse('servicetype-detail', args=[self.id])
def get_icon_url(self):
"""Return URL PATH of the icon image for this record"""
# For convenience of serializers
if self.icon:
return self.icon.url
class Service(NameInCurrentLanguageMixin, models.Model):
provider = models.ForeignKey(
Provider,
verbose_name=_("provider"),
)
name_en = models.CharField(
# Translators: Service name
_("name in English"),
max_length=256,
default='',
blank=True,
)
name_ar = models.CharField(
# Translators: Service name
_("name in Arabic"),
max_length=256,
default='',
blank=True,
)
name_fr = models.CharField(
# Translators: Service name
_("name in French"),
max_length=256,
default='',
blank=True,
)
area_of_service = models.ForeignKey(
ServiceArea,
verbose_name=_("area of service"),
)
description_en = models.TextField(
# Translators: Service description
_("description in English"),
default='',
blank=True,
)
description_ar = models.TextField(
# Translators: Service description
_("description in Arabic"),
default='',
blank=True,
)
description_fr = models.TextField(
# Translators: Service description
_("description in French"),
default='',
blank=True,
)
additional_info_en = models.TextField(
_("additional information in English"),
blank=True,
default='',
)
additional_info_ar = models.TextField(
_("additional information in Arabic"),
blank=True,
default='',
)
additional_info_fr = models.TextField(
_("additional information in French"),
blank=True,
default='',
)
cost_of_service = models.TextField(
_("cost of service"),
blank=True,
default='',
)
is_mobile = models.BooleanField(
_("mobile service"),
blank=True,
default=False,
)
# Note: we don't let multiple non-archived versions of a service record pile up
# there should be no more than two, one in current status and/or one in some other
# status.
STATUS_DRAFT = 'draft'
STATUS_CURRENT = 'current'
STATUS_REJECTED = 'rejected'
STATUS_CANCELED = 'canceled'
STATUS_ARCHIVED = 'archived'
STATUS_CHOICES = (
# New service or edit of existing service is pending approval
(STATUS_DRAFT, _('draft')),
# This Service has been approved and not superseded. Only services with
# status 'current' appear in the public interface.
(STATUS_CURRENT, _('current')),
# The staff has rejected the service submission or edit
(STATUS_REJECTED, _('rejected')),
# The provider has canceled service. They can do this on draft or current services.
# It no longer appears in the public interface.
(STATUS_CANCELED, _('canceled')),
# The record is obsolete and we don't want to see it anymore
(STATUS_ARCHIVED, _('archived')),
)
status = models.CharField(
_('status'),
max_length=10,
choices=STATUS_CHOICES,
default=STATUS_DRAFT,
)
update_of = models.ForeignKey(
'self',
help_text=_('If a service record represents a modification of another service '
'record, this field links to that other record.'),
null=True,
blank=True,
related_name='updates',
)
location = models.PointField(
_('location'),
blank=True,
null=True,
)
# Open & close hours by day. If None, service is closed that day.
sunday_open = models.TimeField(null=True, blank=True)
sunday_close = models.TimeField(null=True, blank=True)
monday_open = models.TimeField(null=True, blank=True)
monday_close = models.TimeField(null=True, blank=True)
tuesday_open = models.TimeField(null=True, blank=True)
tuesday_close = models.TimeField(null=True, blank=True)
wednesday_open = models.TimeField(null=True, blank=True)
wednesday_close = models.TimeField(null=True, blank=True)
thursday_open = models.TimeField(null=True, blank=True)
thursday_close = models.TimeField(null=True, blank=True)
friday_open = models.TimeField(null=True, blank=True)
friday_close = models.TimeField(null=True, blank=True)
saturday_open = models.TimeField(null=True, blank=True)
saturday_close = models.TimeField(null=True, blank=True)
type = models.ForeignKey(
ServiceType,
verbose_name=_("type"),
)
objects = models.GeoManager()
image = ImageField(
upload_to="service-images/",
help_text=_(
"Upload an image file (GIF, JPEG, PNG, WebP) with a square aspect "
"ratio (Width equal to Height). The image size should be at least "
"1280 x 1280 for best results. SVG files are not supported."),
blank=True,
default='',
)
def get_api_url(self):
return reverse('service-detail', args=[self.id])
def get_absolute_url(self):
"""Called from CMS-related code to get app view from a search hit"""
return get_path_to_service(self.id)
def get_provider_fetch_url(self):
# For convenience of the serializer
return self.provider.get_fetch_url()
def get_admin_edit_url(self):
return reverse('admin:services_service_change', args=[self.id])
def email_provider_about_approval(self):
"""Schedule a task to send an email to the provider"""
email_provider_about_service_approval_task.delay(self.pk)
| |
radius: int, h: float, sigma: float,
mse: tuple[float, float], hard_thr: float, block_size: int, block_step: int, group_size: int, bm_range: int, bm_step: int,
ps_num: int, ps_range: int, ps_step: int, lowpass: list[float], color: bool, matrix: int
):
mask = core.GenBlockMask(core.ShufflePlanes(src, 0, vs.GRAY))
cleansed = core.NLMeans(ref, radius, block_size, math.ceil(block_size / 2), h, ref, color)
dif = core.MakeDiff(ref, cleansed)
dif = core.BMBasic(
dif, cleansed, radius=radius, th_mse=mse[0], hard_thr=hard_thr, sigma=sigma,
block_size=block_size, block_step=block_step, group_size=group_size, bm_range=bm_range,
bm_step=bm_step, ps_num=ps_num, ps_range=ps_range, ps_step=ps_step, matrix=matrix
)
dif = core.Aggregate(dif, radius, 1)
cleansed = core.MergeDiff(cleansed, dif)
dif = core.MakeDiff(ref, cleansed)
dif = core.BMFinal(
dif, cleansed, radius=radius, th_mse=mse[1], sigma=sigma,
block_size=block_size, block_step=block_step, group_size=group_size,
bm_range=bm_range, bm_step=bm_step, ps_num=ps_num, ps_range=ps_range,
ps_step=ps_step, matrix=matrix
)
dif = core.Aggregate(dif, radius, 1)
cleansed = core.MergeDiff(cleansed, dif)
ref = core.FreqMerge(cleansed, ref, block_size // 2 * 2 + 1, lowpass)
src = core.FreqMerge(cleansed, src, block_size // 2 * 2 + 1, lowpass)
return core.MaskedMerge(src, ref, mask, first_plane=True)
def Super(src: vs.VideoNode, pel=4):
if not isinstance(src, vs.VideoNode):
raise TypeError("Oyster.Super: src has to be a video clip!")
elif src.format.sample_type != vs.FLOAT or src.format.bits_per_sample < 32:
raise TypeError("Oyster.Super: the sample type of src has to be single precision!")
elif src.format.subsampling_w > 0 or src.format.subsampling_h > 0:
raise RuntimeError("Oyster.Super: subsampled stuff not supported!")
if not isinstance(pel, int):
raise TypeError("Oyster.Super: pel has to be an integer!")
elif pel not in [2, 4]:
raise RuntimeError("Oyster.Super: pel has to be 2 or 4!")
core = Core()
src = core.SetFieldBased(src, 0)
colorspace = src.format.color_family
if colorspace == vs.RGB:
src = core.RGB2OPP(src, 1)
clip = internal.super(core, src, pel)
del core
return clip
def Basic(src, super=None, radius=6, pel=4, sad=2000.0, short_time=False):
if not isinstance(src, vs.VideoNode):
raise TypeError("Oyster.Basic: src has to be a video clip!")
elif src.format.sample_type != vs.FLOAT or src.format.bits_per_sample < 32:
raise TypeError("Oyster.Basic: the sample type of src has to be single precision!")
elif src.format.subsampling_w > 0 or src.format.subsampling_h > 0:
raise RuntimeError("Oyster.Basic: subsampled stuff not supported!")
if not isinstance(super, vs.VideoNode) and super is not None:
raise TypeError("Oyster.Basic: super has to be a video clip or None!")
elif super is not None:
if super.format.sample_type != vs.FLOAT or super.format.bits_per_sample < 32 or super.format.subsampling_w > 0 or super.format.subsampling_h > 0:
raise RuntimeError("Oyster.Basic: corrupted super clip!")
if not isinstance(radius, int):
raise TypeError("Oyster.Basic: radius has to be an integer!")
elif radius < 1:
raise RuntimeError("Oyster.Basic: radius has to be greater than 0!")
if not isinstance(pel, int):
raise TypeError("Oyster.Basic: pel has to be an integer!")
elif pel not in [1, 2, 4]:
raise RuntimeError("Oyster.Basic: pel has to be 1, 2 or 4!")
if not isinstance(sad, float) and not isinstance(sad, int):
raise TypeError("Oyster.Basic: sad has to be a real number!")
elif sad <= 0.0:
raise RuntimeError("Oyster.Basic: sad has to be greater than 0!")
if not isinstance(short_time, bool):
raise TypeError("Oyster.Basic: short_time has to be boolean!")
core = Core()
color = True
rgb = False
colorspace = src.format.color_family
if colorspace == vs.RGB:
src = core.RGB2OPP(src, 1)
rgb = True
if colorspace == vs.GRAY:
color = False
src = core.SetFieldBased(src, 0)
super = core.SetFieldBased(super, 0) if super is not None else None
clip = internal.basic(core, src, super, radius, pel, sad, short_time, color)
clip = core.OPP2RGB(clip, 1) if rgb else clip
del core
return clip
def Deringing(
src: vs.VideoNode, ref: BasicClip, radius: int = 6, h: float = 6.4, sigma: float = 16.0,
mse: tuple[float, float] = [None, None], hard_thr: float = 3.2, block_size: int = 8,
block_step: int = 1, group_size: int = 32, bm_range: int = 24, bm_step: int = 1,
ps_num: int = 2, ps_range: int = 8, ps_step: int = 1, lowpass: list[float] = None
):
if not isinstance(src, vs.VideoNode):
raise TypeError("Oyster.Deringing: src has to be a video clip!")
elif src.format.sample_type != vs.FLOAT or src.format.bits_per_sample < 32:
raise TypeError("Oyster.Deringing: the sample type of src has to be single precision!")
elif src.format.subsampling_w > 0 or src.format.subsampling_h > 0:
raise RuntimeError("Oyster.Deringing: subsampled stuff not supported!")
if not isinstance(ref, vs.VideoNode):
raise TypeError("Oyster.Deringing: ref has to be a video clip!")
elif ref.format.sample_type != vs.FLOAT or ref.format.bits_per_sample < 32:
raise TypeError("Oyster.Deringing: the sample type of ref has to be single precision!")
elif ref.format.subsampling_w > 0 or ref.format.subsampling_h > 0:
raise RuntimeError("Oyster.Deringing: subsampled stuff not supported!")
if not isinstance(radius, int):
raise TypeError("Oyster.Deringing: radius has to be an integer!")
elif radius < 1:
raise RuntimeError("Oyster.Deringing: radius has to be greater than 0!")
if not isinstance(h, float) and not isinstance(h, int):
raise TypeError("Oyster.Deringing: h has to be a real number!")
elif h <= 0:
raise RuntimeError("Oyster.Deringing: h has to be greater than 0!")
if not isinstance(mse, list):
raise TypeError("Oyster.Deringing: mse parameter has to be an array!")
elif len(mse) != 2:
raise RuntimeError("Oyster.Deringing: mse parameter has to contain 2 elements exactly!")
for i in range(2):
if not isinstance(mse[i], float) and not isinstance(mse[i], int) and mse[i] is not None:
raise TypeError("Oyster.Deringing: elements in mse must be real numbers or None!")
if not isinstance(lowpass, list) and lowpass is not None:
raise TypeError("Oyster.Deringing: lowpass has to be a list or None!")
core = Core()
rgb = False
color = True
mse[0] = sigma * 160.0 + 1200.0 if mse[0] is None else mse[0]
mse[1] = sigma * 120.0 + 800.0 if mse[1] is None else mse[1]
lowpass = [0.0, sigma, 0.48, 1024.0, 1.0, 1024.0] if lowpass is None else lowpass
matrix = None
colorspace = src.format.color_family
if colorspace == vs.RGB:
rgb = True
matrix = 100
src = core.RGB2OPP(src, 1)
ref = core.RGB2OPP(ref, 1)
if colorspace == vs.GRAY:
color = False
src = core.SetFieldBased(src, 0)
ref = core.SetFieldBased(ref, 0)
clip = internal.deringing(core, src, ref, radius, h, sigma,
mse, hard_thr, block_size, block_step, group_size, bm_range, bm_step, ps_num, ps_range, ps_step,
lowpass, color, matrix)
clip = core.OPP2RGB(clip, 1) if rgb else clip
core.delete()
return clip
def Destaircase(
src: vs.VideoNode, ref: BasicClip, radius: int = 6, sigma: float = 16.0,
mse: tuple[float, float] = [None, None], hard_thr: float = 3.2, block_size: int = 8,
block_step: int = 1, group_size: int = 32, bm_range: int = 24, bm_step: int = 1,
ps_num: int = 2, ps_range: int = 8, ps_step: int = 1,
thr: float = 0.03125, elast: float = 0.015625, lowpass: list[float] = None
):
if not isinstance(src, vs.VideoNode):
raise TypeError("Oyster.Destaircase: src has to be a video clip!")
elif src.format.sample_type != vs.FLOAT or src.format.bits_per_sample < 32:
raise TypeError("Oyster.Destaircase: the sample type of src has to be single precision!")
elif src.format.subsampling_w > 0 or src.format.subsampling_h > 0:
raise RuntimeError("Oyster.Destaircase: subsampled stuff not supported!")
if not isinstance(ref, vs.VideoNode):
raise TypeError("Oyster.Destaircase: ref has to be a video clip!")
elif ref.format.sample_type != vs.FLOAT or ref.format.bits_per_sample < 32:
raise TypeError("Oyster.Destaircase: the sample type of ref has to be single precision!")
elif ref.format.subsampling_w > 0 or ref.format.subsampling_h > 0:
raise RuntimeError("Oyster.Destaircase: subsampled stuff not supported!")
if not isinstance(radius, int):
raise TypeError("Oyster.Destaircase: radius has to be an integer!")
elif radius < 1:
raise RuntimeError("Oyster.Destaircase: radius has to be greater than 0!")
if not isinstance(mse, list):
raise TypeError("Oyster.Destaircase: mse parameter has to be an array!")
elif len(mse) != 2:
raise RuntimeError("Oyster.Destaircase: mse parameter has to contain 2 elements exactly!")
for i in range(2):
if not isinstance(mse[i], float) and not isinstance(mse[i], int) and mse[i] is not None:
raise TypeError("Oyster.Destaircase: elements in mse must be real numbers or None!")
if not isinstance(thr, float) and not isinstance(thr, int):
raise TypeError("Oyster.Destaircase: thr has to be a real number!")
elif thr < 0 or thr > 1:
raise RuntimeError("Oyster.Destaircase: thr has to fall in [0, 1]!")
if not isinstance(elast, float) and not isinstance(elast, int):
raise TypeError("Oyster.Destaircase: elast has to be a real number!")
elif elast < 0 or elast > thr:
raise RuntimeError("Oyster.Destaircase: elast has to fall in [0, thr]!")
if not isinstance(lowpass, list) and lowpass is not None:
raise TypeError("Oyster.Destaircase: lowpass has to be a list or None!")
core = Core()
rgb = False
mse[0] = sigma * 160.0 + 1200.0 if mse[0] is None else mse[0]
mse[1] = sigma * 120.0 + 800.0 if mse[1] is None else mse[1]
lowpass = [0.0, sigma, 0.48, 1024.0, 1.0, | |
<reponame>alewis/jax_vumps<filename>testing/old_contractions.py
"""
A docstring
"""
import numpy as np
import scipy as sp
import numpy.linalg as npla
import scipy.linalg as spla
from functools import reduce
from bhtools.tebd.scon import scon
import bhtools.tebd.utils as utils
#from scipy.linalg import solve
"""
Conventions
2 3 4
| | |
O U
| | |
1 1 2
2---A---3 1
| |
1 2--A--3
"""
"""
2--A--3
|
O
|
1
"""
def Aop(A, O):
return Bop(O.T, A)
"""
1
|
O
|
2--B--3
"""
def Bop(O, B):
return scon( [O, B],
[(1, -1),
(1, -2, -3)])
#return np.einsum("ij, jkl", O, B)
def leftgather(left, right):
"""
2--left--right--3
| |
1 1 (vectorized)
| |
Mirror image if fromleft is false.
"""
dl, chil, chim = left.shape
dr, chim, chir = right.shape
#d2 = left.shape[0]*right.shape[0]
lr = np.dot(left, right).transpose((0,2,1,3))
lr = lr.reshape((dl*dr, chil, chir))
return lr
def qrmat(A, mode="full"):
"""
QR decomp. of A, with phase convention such that R has only positive
elements on the main diagonal. A is a matrix.
"""
Q, R = sp.linalg.qr(A, mode=mode)
phases = np.diag(np.sign(np.diag(R)))
Q = np.dot(Q, phases)
R = np.dot(np.conj(phases), R)
return (Q, R)
def qrpos(A):
"""
QR decomp. of A, with phase convention such that R has only positive
elements on the main diagonal.
If A is an MPS tensor (d, chiL, chiR), it is reshaped appropriately
before the throughput begins. In that case, Q will be a tensor
of the same size, while R will be a chiR x chiR matrix.
"""
Ashp = A.shape
if len(Ashp) == 2:
return qrmat(A)
elif len(Ashp) != 3:
print("A had invalid dimensions, ", A.shape)
A = fuse_left(A) #d*chiL, chiR
Q, R = qrmat(A, mode="economic")
Q = unfuse_left(Q, Ashp)
return (Q, R)
def rqmat(A, mode="full"):
"""
RQ decomp. of A, with phase convention such that R has only positive
elements on the main diagonal. A is a matrix.
"""
R, Q = sp.linalg.rq(A, mode=mode)
phases = np.diag(np.sign(np.diag(R)))
Q = np.dot(phases, Q)
R = np.dot(R, np.conj(phases))
return (Q, R)
def rqpos(A):
"""
RQ decomp. of A, with phase convention such that R has only positive
elements on the main diagonal.
If A is an MPS tensor (d, chiL, chiR), it is reshaped and
transposed appropriately
before the throughput begins. In that case, Q will be a tensor
of the same size, while R will be a chiL x chiL matrix.
"""
Ashp = A.shape
if len(Ashp) == 2:
return rqmat(A)
elif len(Ashp) != 3:
print("A had invalid dimensions, ", A.shape)
A = fuse_right(A) #chiL, d*chiR
R, Q = qrmat(A, mode="economic")
Q = unfuse_right(Q, Ashp)
return (Q, R)
def fuse_left(A):
oldshp = A.shape
d, chiL, chiR = oldshp
A = A.reshape(d*chiL, chiR)
return A
def unfuse_left(A, shp):
return A.reshape(shp)
def fuse_right(A):
oldshp = A.shape
d, chiL, chiR = oldshp
A = A.transpose((1, 0, 2)).reshape((chiL, d*chiR))
return A
def unfuse_right(A, shp):
d, chiL, chiR = shp
A = A.reshape((chiL, d, chiR)).transpose((1, 0, 2))
return A
"""
| |
1 1 (vectorized)
| |
3--left--right--4
| |
2 2
| |
"""
def leftgathermpo(left, right):
dl1, dl2, chil, chim = left.shape
dr1, dr2, chim, chir = right.shape
if dl1 != dl2 or dr1 !=dr2:
raise ValueError("bad shapes", left.shape, right.shape)
d2 = dl1*dr1
lr = np.dot(left, right) #dupL, ddownL, chil, dupR, ddownR, chir
lr = lr.transpose((0,3,1,4,2,5)) #dupL, dupR, ddownL, ddownR, chil, chir
lr = lr.reshape((d2, d2, chil, chir))
return lr
"""
2--lam--gam--3
|
1
|
where lam is stored 1--lam--2
If lam is None this is a no-op.
lam can either be a vector of diagonal entries or a matrix.
This function also works if gam is a matrix.
"""
def leftmult(lam, gam):
if lam is None:
return gam
ngam = len(gam.shape)
nlam = len(lam.shape)
if nlam==1:
return lam[:, None]*gam
if nlam==2:
if ngam==2:
return np.dot(lam, gam) #lambda is a matrix, note this assumes lam[2] hits gam[2]
if ngam==3:
idx = ([-2, 1], [-1, 1, -3])
return scon([lam, gam], idx)
#return np.einsum('bi, aic', lam, gam)
raise IndexError("Invalid shapes. Gamma: ", gam.shape, "Lambda: ", lam.shape)
"""
2--gam--lam--3
|
1
|
where lam is stored 1--lam--2
If lam is None this is a no-op.
lam can either be a vector of diagonal entries or a matrix.
This function also works if gam is a matrix.
"""
def rightmult(gam, lam):
if lam is None:
return gam
nlam = len(lam.shape)
if nlam==1:
return lam*gam
if nlam==2:
return np.dot(gam, lam)
raise IndexError("Invalid shapes. Gamma: ", gam.shape, "Lambda: ", lam.shape)
def gauge_transform(gl, A, gr):
"""
|
1
2--gl--A--gr--3
"""
return rightmult(leftmult(gl, A), gr)
################################################################################
#Chain contractors - MPS.
###############################################################################
# def XA(X, A):
# """
# 1 3
# | |
# X-A-5
# | |
# 2 4
# """
# return np.dot(X, A)
# def AX(A, X):
# """
# 3 1
# | |
# 5-A-X
# | |
# 4 2
# """
# A = np.transpose((A, (0,1,3,2)))
# return np.dot(X, A)
"""
|---B--1
| |
X---A--2
| |
|---B*-3
"""
def XopLmixed(A, B, X):
out = scon([B, A, np.conj(B), X],
[[1,3,-1],
[1,2,4,-2],
[2,5,-3],
[3,4,5]]
)
raise NotImplementedError()
return out
"""
1---B--|
| |
2---A--X
| |
3---B*-|
"""
def XopRmixed(A, B, X):
out = scon([B, A, np.conj(B), X],
[
[1, -1, 3],
[1, 2, -2, 4],
[2, -3, 5],
[3, 4, 5]
]
)
raise NotImplementedError()
return out
"""
|---B1----B2----...-BN*-1
| | | |
X---A1----A2----...-A3--2
| | | |
|---B1*---B2*---...-BN*-3
(note: AN and BN will be contracted into a single matrix)
"""
def leftchainmixed(As, Bs, X):
raise NotImplementedError()
for A, B in zip(As, Bs):
X = XopLmixed(A, B, X)
return X
def rightchainmixed(As, Bs, X):
raise NotImplementedError()
for A, B in zip(As[::-1], Bs[::-1]):
X = XopRmixed(A, B, X)
return X
"""
|---A1---A2---...-AN-2
| | | |
X | | ... |
| | | |
|---B1---B2---...-BN-1
(note: AN and BN will be contracted into a single matrix)
"""
def leftchain(As, Bs=None, X=None):
if Bs is None:
Bs = list(map(np.conj, As))
for A, B in zip(As, Bs):
X = XopL(A, B=B, X=X)
return X
"""
2---A1---A2---...-AN--
| | | |
| | ... | X
| | | |
1---B1---B2---...-BN--
(note: AN and BN will be contracted into a single matrix)
"""
def rightchain(As, Bs=None, X=None):
if Bs is None:
Bs = list(map(np.conj, As))
for A, B in zip(As[::-1], Bs[::-1]):
X = XopR(A, B=B, X=X)
return X
def leftchainop(As, Op=None, Bs=None, X=None):
"""
|---A1---A2---...-AN--2
| | | |
| |____|__ ... _|_
X | Op ... |
| |________ ... __|
| | | |
|---B1---B2---...-BN--1
(note: AN and BN will be contracted into a single matrix)
"""
if Op is None:
return leftchain(As, Bs=Bs, X=X)
if Bs is None:
Bs = map(np.conj, As)
N = len(As)
d = As[0].shape[0]
O = Op.reshape((d**N, d**N))
bigA = reduce(leftgather, As)
bigB = reduce(leftgather, Bs)
return XopL(bigA, B=bigB, O=O, X=X)
def rightchainop(As, Op=None, Bs=None, X=None):
"""
2---A1---A2---...-AN--|
| | | |
|____|__ ... _|_ |
| Op ... | X
|________ ... __| |
| | | |
1---B1---B2---...-BN--|
(note: AN and BN will be contracted into a single matrix)
"""
if Op is None:
return rightchain(As, Bs=Bs, X=X)
if Bs is None:
Bs = [np.conj(A) for A in As]
N = len(As)
d = As[0].shape[0]
O = Op.reshape((d**N, d**N))
bigA = reduce(leftgather, As[::-1])
bigB = reduce(leftgather, Bs[::-1])
return XopR(bigA, B=bigB, O=O, X=X)
# topchis = list(range(1, 4*N, 4))
# topchis.append(-1)
# botchis = list(range(2, 4*N, 4))
# botchis.append(-2)
# topds = list(range(3, 4*N, 4))
# botds = list(range(4, 4*(N+1), 4))
# if X is not None:
# to_contract = [X,]
# Xidx = (topchis[0], botchis[0])
# idxs = [Xidx,]
# else:
# botchis[0] = topchis[0]
# to_contract = []
# idxs = []
# Aidxs = [(td, tcl, tcr) for
# td, tcl, tcr in zip(topds, topchis[:-1], topchis[1:])]
# Bidxs = [(bd, bcl, bcr) for
# bd, bcl, bcr in zip(botds, botchis[:-1], botchis[1:])]
# # print [A.shape for A in As]
# # print [B.shape for B in Bs]
# # print Op.shape
# # if X is not None:
# # print X.shape
# # Opidxs = [list(topds) + list(botds)]
# | |
self.plotMeanButton3D = qt.QPushButton("Toggle mean shape visibility")
self.plotMeanButton3D.checkable = True
self.plotMeanButton3D.toolTip = "Toggle visibility of mean shape plot"
meanShapeLayout.addWidget(self.plotMeanButton3D,1,2)
self.plotMeanButton3D.enabled = False
self.plotMeanButton3D.connect('clicked(bool)', self.toggleMeanPlot)
meanButtonLable=qt.QLabel("Mean point label visibility: ")
meanShapeLayout.addWidget(meanButtonLable,2,1)
self.showMeanLabelsButton = qt.QPushButton("Toggle label visibility")
self.showMeanLabelsButton.checkable = True
self.showMeanLabelsButton.toolTip = "Toggle visibility of mean point labels"
meanShapeLayout.addWidget(self.showMeanLabelsButton,2,2)
self.showMeanLabelsButton.enabled = False
self.showMeanLabelsButton.connect('clicked(bool)', self.toggleMeanLabels)
meanColorLable=qt.QLabel("Mean shape color: ")
meanShapeLayout.addWidget(meanColorLable,3,1)
self.meanShapeColor = ctk.ctkColorPickerButton()
self.meanShapeColor.displayColorName = False
self.meanShapeColor.color = qt.QColor(250,128,114)
meanShapeLayout.addWidget(self.meanShapeColor,3,2)
self.meanShapeColor.connect('colorChanged(QColor)', self.toggleMeanColor)
self.scaleMeanShapeSlider = ctk.ctkSliderWidget()
self.scaleMeanShapeSlider.singleStep = .1
self.scaleMeanShapeSlider.minimum = 0
self.scaleMeanShapeSlider.maximum = 10
self.scaleMeanShapeSlider.value = 3
self.scaleMeanShapeSlider.setToolTip("Set scale for mean shape glyphs")
meanShapeSliderLabel=qt.QLabel("Mean shape glyph scale")
meanShapeLayout.addWidget(meanShapeSliderLabel,4,1)
meanShapeLayout.addWidget(self.scaleMeanShapeSlider,4,2)
self.scaleMeanShapeSlider.connect('valueChanged(double)', self.scaleMeanGlyph)
# Landmark Variance Section
distributionFrame=ctk.ctkCollapsibleButton()
distributionFrame.text="Landmark Variance Plot Options"
distributionLayout= qt.QGridLayout(distributionFrame)
exploreTabLayout.addRow(distributionFrame)
self.EllipseType=qt.QRadioButton()
ellipseTypeLabel=qt.QLabel("Ellipse type")
self.EllipseType.setChecked(True)
distributionLayout.addWidget(ellipseTypeLabel,2,1)
distributionLayout.addWidget(self.EllipseType,2,2,1,2)
self.SphereType=qt.QRadioButton()
sphereTypeLabel=qt.QLabel("Sphere type")
distributionLayout.addWidget(sphereTypeLabel,3,1)
distributionLayout.addWidget(self.SphereType,3,2,1,2)
self.CloudType=qt.QRadioButton()
cloudTypeLabel=qt.QLabel("Point cloud type")
distributionLayout.addWidget(cloudTypeLabel,4,1)
distributionLayout.addWidget(self.CloudType,4,2,1,2)
self.NoneType=qt.QRadioButton()
noneTypeLabel=qt.QLabel("None")
distributionLayout.addWidget(noneTypeLabel,5,1)
distributionLayout.addWidget(self.NoneType,5,2,1,2)
self.scaleSlider = ctk.ctkSliderWidget()
self.scaleSlider.singleStep = .1
self.scaleSlider.minimum = 0
self.scaleSlider.maximum = 10
self.scaleSlider.value = 3
self.scaleSlider.enabled = False
self.scaleSlider.setToolTip("Set scale for variance visualization")
sliderLabel=qt.QLabel("Scale Glyphs")
distributionLayout.addWidget(sliderLabel,2,3)
distributionLayout.addWidget(self.scaleSlider,3,3,1,2)
self.scaleSlider.connect('valueChanged(double)', self.onPlotDistribution)
self.plotDistributionButton = qt.QPushButton("Plot LM variance")
self.plotDistributionButton.checkable = True
self.plotDistributionButton.toolTip = "Visualize variance of landmarks from all subjects"
distributionLayout.addWidget(self.plotDistributionButton,7,1,1,4)
self.plotDistributionButton.enabled = False
self.plotDistributionButton.connect('clicked(bool)', self.onPlotDistribution)
#PC plot section
plotFrame=ctk.ctkCollapsibleButton()
plotFrame.text="PCA Scatter Plot Options"
plotLayout= qt.QGridLayout(plotFrame)
exploreTabLayout.addRow(plotFrame)
self.XcomboBox=qt.QComboBox()
Xlabel=qt.QLabel("X Axis")
plotLayout.addWidget(Xlabel,1,1)
plotLayout.addWidget(self.XcomboBox,1,2,1,3)
self.YcomboBox=qt.QComboBox()
Ylabel=qt.QLabel("Y Axis")
plotLayout.addWidget(Ylabel,2,1)
plotLayout.addWidget(self.YcomboBox,2,2,1,3)
self.factorNameLabel=qt.QLabel('Factor Name:')
plotLayout.addWidget(self.factorNameLabel,3,1)
self.factorName=qt.QLineEdit()
self.factorName.setToolTip("Enter factor name")
self.factorName.connect('textChanged(const QString &)', self.factorStringChanged)
plotLayout.addWidget(self.factorName,3,2)
self.inputFactorButton = qt.QPushButton("Add factor data")
self.inputFactorButton.checkable = True
self.inputFactorButton.toolTip = "Open table to input factor data"
plotLayout.addWidget(self.inputFactorButton,3,4)
self.inputFactorButton.enabled = False
self.inputFactorButton.connect('clicked(bool)', self.enterFactors)
self.selectFactor=qt.QComboBox()
self.selectFactor.addItem("No factor data")
selectFactorLabel=qt.QLabel("Select factor: ")
plotLayout.addWidget(selectFactorLabel,4,1)
plotLayout.addWidget(self.selectFactor,4,2,)
self.plotButton = qt.QPushButton("Scatter Plot")
self.plotButton.checkable = True
self.plotButton.toolTip = "Plot PCs"
plotLayout.addWidget(self.plotButton,5,1,1,5)
self.plotButton.enabled = False
self.plotButton.connect('clicked(bool)', self.plot)
# Lollipop Plot Section
lolliFrame=ctk.ctkCollapsibleButton()
lolliFrame.text="Lollipop Plot Options"
lolliLayout= qt.QGridLayout(lolliFrame)
exploreTabLayout.addRow(lolliFrame)
self.vectorOne=qt.QComboBox()
vectorOneLabel=qt.QLabel("Vector One: Red")
lolliLayout.addWidget(vectorOneLabel,1,1)
lolliLayout.addWidget(self.vectorOne,1,2,1,3)
self.vectorTwo=qt.QComboBox()
vector2Label=qt.QLabel("Vector Two: Green")
lolliLayout.addWidget(vector2Label,2,1)
lolliLayout.addWidget(self.vectorTwo,2,2,1,3)
self.vectorThree=qt.QComboBox()
vector3Label=qt.QLabel("Vector Three: Blue")
lolliLayout.addWidget(vector3Label,3,1)
lolliLayout.addWidget(self.vectorThree,3,2,1,3)
self.TwoDType=qt.QCheckBox()
self.TwoDType.checked = False
self.TwoDType.setText("Lollipop 2D Projection")
lolliLayout.addWidget(self.TwoDType,4,2)
self.lolliButton = qt.QPushButton("Lollipop Vector Plot")
self.lolliButton.checkable = True
self.lolliButton.toolTip = "Plot PC vectors"
lolliLayout.addWidget(self.lolliButton,6,1,1,6)
self.lolliButton.enabled = False
self.lolliButton.connect('clicked(bool)', self.lolliPlot)
################################### Visualize Tab ###################################
# Interactive view set up tab
selectTemplatesButton=ctk.ctkCollapsibleButton()
selectTemplatesButton.text="Setup Interactive Visualization"
selectTemplatesLayout= qt.QGridLayout(selectTemplatesButton)
visualizeTabLayout.addRow(selectTemplatesButton)
self.landmarkVisualizationType=qt.QRadioButton()
landmarkVisualizationTypeLabel=qt.QLabel("Mean shape visualization")
self.landmarkVisualizationType.setChecked(True)
self.landmarkVisualizationType.enabled = False
selectTemplatesLayout.addWidget(landmarkVisualizationTypeLabel,2,1)
selectTemplatesLayout.addWidget(self.landmarkVisualizationType,2,2,1,4)
self.modelVisualizationType=qt.QRadioButton()
self.modelVisualizationType.enabled = False
modelVisualizationTypeLabel=qt.QLabel("3D model visualization")
selectTemplatesLayout.addWidget(modelVisualizationTypeLabel,3,1)
selectTemplatesLayout.addWidget(self.modelVisualizationType,3,2,1,4)
self.landmarkVisualizationType.connect('toggled(bool)', self.onToggleVisualization)
self.modelVisualizationType.connect('toggled(bool)', self.onToggleVisualization)
self.grayscaleSelectorLabel = qt.QLabel("Specify reference model")
self.grayscaleSelectorLabel.setToolTip( "Load the model for the interactive visualization")
self.grayscaleSelectorLabel.enabled = False
selectTemplatesLayout.addWidget(self.grayscaleSelectorLabel,4,2)
self.grayscaleSelector = ctk.ctkPathLineEdit()
self.grayscaleSelector.filters = ctk.ctkPathLineEdit().Files
self.grayscaleSelector.nameFilters= ["Model (*.ply *.stl *.obj *.vtk *.vtp *.orig *.g .byu )"]
self.grayscaleSelector.enabled = False
self.grayscaleSelector.connect('validInputChanged(bool)', self.onModelSelected)
selectTemplatesLayout.addWidget(self.grayscaleSelector,4,3,1,3)
self.FudSelectLabel = qt.QLabel("Specify LM set for the selected model: ")
self.FudSelectLabel.setToolTip( "Select the landmark set that corresponds to the reference model")
self.FudSelectLabel.enabled = False
self.FudSelect = ctk.ctkPathLineEdit()
self.FudSelect.filters = ctk.ctkPathLineEdit().Files
self.FudSelect.nameFilters=["Landmarks (*.json *.mrk.json *.fcsv )"]
self.FudSelect.enabled = False
self.FudSelect.connect('validInputChanged(bool)', self.onModelSelected)
selectTemplatesLayout.addWidget(self.FudSelectLabel,5,2)
selectTemplatesLayout.addWidget(self.FudSelect,5,3,1,3)
self.selectorButton = qt.QPushButton("Apply")
self.selectorButton.checkable = True
selectTemplatesLayout.addWidget(self.selectorButton,6,1,1,5)
self.selectorButton.enabled = False
self.selectorButton.connect('clicked(bool)', self.onSelect)
# PC warping
vis=ctk.ctkCollapsibleButton()
vis.text='PCA Visualization Parameters'
visLayout= qt.QGridLayout(vis)
visualizeTabLayout.addRow(vis)
self.applyEnabled=False
def warpOnChangePC1(value):
if self.applyEnabled and self.slider1.boxValue() is not 'None':
self.onApply()
def warpOnChangePC2(value):
if self.applyEnabled and self.slider2.boxValue() is not 'None':
self.onApply()
self.PCList=[]
self.slider1=sliderGroup(onChanged = warpOnChangePC1)
self.slider1.connectList(self.PCList)
visLayout.addWidget(self.slider1,3,1,1,2)
self.slider2=sliderGroup(onChanged = warpOnChangePC2)
self.slider2.connectList(self.PCList)
visLayout.addWidget(self.slider2,4,1,1,2)
# Create Animations
animate=ctk.ctkCollapsibleButton()
animate.text='Create animation of PC Warping'
animateLayout= qt.QGridLayout(animate)
visualizeTabLayout.addRow(animate)
self.startRecordButton = qt.QPushButton("Start Recording")
self.startRecordButton.toolTip = "Start recording PCA warping applied manually using the slider bars."
self.startRecordButton.enabled = False
animateLayout.addWidget(self.startRecordButton,1,1,1,2)
self.startRecordButton.connect('clicked(bool)', self.onStartRecording)
self.stopRecordButton = qt.QPushButton("Stop Recording")
self.stopRecordButton.toolTip = "Stop recording PC warping and review recording in the Sequences module."
self.stopRecordButton.enabled = False
animateLayout.addWidget(self.stopRecordButton,1,5,1,2)
self.stopRecordButton.connect('clicked(bool)', self.onStopRecording)
# Reset button
resetButton = qt.QPushButton("Reset Scene")
resetButton.checkable = True
visualizeTabLayout.addRow(resetButton)
resetButton.toolTip = "Push to reset all fields."
resetButton.connect('clicked(bool)', self.reset)
self.layout.addStretch(1)
# Add menu buttons
self.addLayoutButton(500, 'GPA Module View', 'Custom layout for GPA module', 'LayoutSlicerMorphView.png', slicer.customLayoutSM)
self.addLayoutButton(501, 'Table Only View', 'Custom layout for GPA module', 'LayoutTableOnlyView.png', slicer.customLayoutTableOnly)
self.addLayoutButton(502, 'Plot Only View', 'Custom layout for GPA module', 'LayoutPlotOnlyView.png', slicer.customLayoutPlotOnly)
# module update helper functions
def assignLayoutDescription(self):
customLayoutId1=500
layoutManager = slicer.app.layoutManager()
layoutManager.setLayout(customLayoutId1)
#link whatever is in the 3D views
viewNode1 = slicer.mrmlScene.GetFirstNodeByName("View1") #name = "View"+ singletonTag
viewNode2 = slicer.mrmlScene.GetFirstNodeByName("View2")
viewNode1.SetAxisLabelsVisible(False)
viewNode2.SetAxisLabelsVisible(False)
viewNode1.SetLinkedControl(True)
viewNode2.SetLinkedControl(True)
# Assign nodes to appropriate views
redNode = layoutManager.sliceWidget('Red').sliceView().mrmlSliceNode()
self.meanLandmarkNode.GetDisplayNode().SetViewNodeIDs([viewNode1.GetID(),redNode.GetID()])
if hasattr(self, 'cloneLandmarkDisplayNode'):
self.cloneLandmarkDisplayNode.SetViewNodeIDs([viewNode2.GetID()])
# check for loaded reference model
if hasattr(self, 'modelDisplayNode'):
self.modelDisplayNode.SetViewNodeIDs([viewNode1.GetID()])
if hasattr(self, 'cloneModelDisplayNode'):
self.cloneModelDisplayNode.SetViewNodeIDs([viewNode2.GetID()])
# fit the red slice node to show the plot projections
rasBounds = [0,]*6
self.meanLandmarkNode.GetRASBounds(rasBounds)
redNode.GetSliceToRAS().SetElement(0, 3, (rasBounds[1]+rasBounds[0]) / 2.)
redNode.GetSliceToRAS().SetElement(1, 3, (rasBounds[3]+rasBounds[2]) / 2.)
redNode.GetSliceToRAS().SetElement(2, 3, (rasBounds[5]+rasBounds[4]) / 2.)
rSize = rasBounds[1]-rasBounds[0]
aSize = rasBounds[3]-rasBounds[2]
dimensions = redNode.GetDimensions()
aspectRatio = float(dimensions[0]) / float(dimensions[1])
if rSize > aSize:
redNode.SetFieldOfView(rSize, rSize/aspectRatio, 1.)
else:
redNode.SetFieldOfView(aSize*aspectRatio, aSize, 1.)
redNode.UpdateMatrices()
# reset 3D cameras
threeDWidget = layoutManager.threeDWidget(0)
if bool(threeDWidget.name == 'ThreeDWidget1'):
threeDView = threeDWidget.threeDView()
threeDView.resetFocalPoint()
threeDView.resetCamera()
else:
threeDWidget = layoutManager.threeDWidget(1)
threeDView = threeDWidget.threeDView()
threeDView.resetFocalPoint()
threeDView.resetCamera()
def textIn(self,label, dispText, toolTip):
""" a function to set up the appearance of a QlineEdit widget.
"""
# set up text line
textInLine=qt.QLineEdit();
textInLine.setText(dispText)
textInLine.toolTip = toolTip
# set up label
lineLabel=qt.QLabel()
lineLabel.setText(label)
# make clickable button
button=qt.QPushButton("..")
return textInLine, lineLabel, button
def updateList(self):
i,j,k=self.LM.lm.shape
self.PCList=[]
self.slider1.populateComboBox(self.PCList)
self.slider2.populateComboBox(self.PCList)
self.PCList.append('None')
self.LM.val=np.real(self.LM.val)
percentVar=self.LM.val/self.LM.val.sum()
self.vectorOne.clear()
self.vectorTwo.clear()
self.vectorThree.clear()
self.XcomboBox.clear()
self.XcomboBox.clear()
self.YcomboBox.clear()
self.vectorOne.addItem('None')
self.vectorTwo.addItem('None')
self.vectorThree.addItem('None')
if len(percentVar)<self.pcNumber:
self.pcNumber=len(percentVar)
for x in range(self.pcNumber):
tmp="{:.1f}".format(percentVar[x]*100)
string='PC '+str(x+1)+': '+str(tmp)+"%" +" var"
self.PCList.append(string)
self.XcomboBox.addItem(string)
self.YcomboBox.addItem(string)
self.vectorOne.addItem(string)
self.vectorTwo.addItem(string)
self.vectorThree.addItem(string)
def factorStringChanged(self):
if self.factorName.text is not "":
self.inputFactorButton.enabled = True
else:
self.inputFactorButton.enabled = False
def populateDistanceTable(self, files):
sortedArray = np.zeros(len(files), dtype={'names':('filename', 'procdist'),'formats':('U50','f8')})
sortedArray['filename']=files
sortedArray['procdist']=self.LM.procdist[:,0]
sortedArray.sort(order='procdist')
tableNode = slicer.mrmlScene.AddNewNodeByClass('vtkMRMLTableNode', 'Procrustes Distance Table')
GPANodeCollection.AddItem(tableNode)
col1=tableNode.AddColumn()
col1.SetName('ID')
col2=tableNode.AddColumn()
col2.SetName('Procrustes Distance')
tableNode.SetColumnType('ID',vtk.VTK_STRING)
tableNode.SetColumnType('Procrustes Distance',vtk.VTK_FLOAT)
for i in range(len(files)):
tableNode.AddEmptyRow()
tableNode.SetCellText(i,0,sortedArray['filename'][i])
tableNode.SetCellText(i,1,str(sortedArray['procdist'][i]))
barPlot = slicer.mrmlScene.AddNewNodeByClass('vtkMRMLPlotSeriesNode', 'Distances')
GPANodeCollection.AddItem(barPlot)
barPlot.SetAndObserveTableNodeID(tableNode.GetID())
barPlot.SetPlotType(slicer.vtkMRMLPlotSeriesNode.PlotTypeBar)
barPlot.SetLabelColumnName('ID') #displayed when hovering mouse
barPlot.SetYColumnName('Procrustes Distance') # for bar plots, index is the x-value
chartNode = slicer.mrmlScene.AddNewNodeByClass('vtkMRMLPlotChartNode', 'Procrustes Distance Chart')
GPANodeCollection.AddItem(chartNode)
chartNode.SetTitle('Procrustes Distances')
chartNode.SetLegendVisibility(False)
chartNode.SetYAxisTitle('Distance')
chartNode.SetXAxisTitle('Subjects')
chartNode.AddAndObservePlotSeriesNodeID(barPlot.GetID())
layoutManager = slicer.app.layoutManager()
self.assignLayoutDescription()
#set up custom layout
plotWidget = layoutManager.plotWidget(0)
plotViewNode = plotWidget.mrmlPlotViewNode()
plotViewNode.SetPlotChartNodeID(chartNode.GetID())
#add table to new layout
slicer.app.applicationLogic().GetSelectionNode().SetReferenceActiveTableID(tableNode.GetID())
slicer.app.applicationLogic().PropagateTableSelection()
def enterFactors(self):
sortedArray = np.zeros(len(self.files), dtype={'names':('filename', 'procdist'),'formats':('U50','f8')})
sortedArray['filename']=self.files
#check for an existing factor table, if so remove
if hasattr(self, 'factorTableNode'):
GPANodeCollection.RemoveItem(self.factorTableNode)
slicer.mrmlScene.RemoveNode(self.factorTableNode)
self.factorTableNode = slicer.mrmlScene.AddNewNodeByClass('vtkMRMLTableNode', 'Factor Table')
GPANodeCollection.AddItem(self.factorTableNode)
col1=self.factorTableNode.AddColumn()
col1.SetName('ID')
for i in range(len(self.files)):
self.factorTableNode.AddEmptyRow()
self.factorTableNode.SetCellText(i,0,sortedArray['filename'][i])
col2=self.factorTableNode.AddColumn()
col2.SetName(self.factorName.text)
self.selectFactor.addItem(self.factorName.text)
#add table to new layout
slicer.app.applicationLogic().GetSelectionNode().SetReferenceActiveTableID(self.factorTableNode.GetID())
slicer.app.applicationLogic().PropagateTableSelection()
self.factorTableNode.GetTable().Modified()
#reset text field for names
self.factorName.setText("")
self.inputFactorButton.enabled = False
def reset(self):
# delete the two data objects
# reset text fields
self.outputDirectory=None
self.outText.setText(" ")
self.LM_dir_name=None
self.openResultsButton.enabled = False
self.grayscaleSelector.setCurrentPath("")
self.FudSelect.setCurrentPath("")
self.grayscaleSelector.enabled = False
self.FudSelect.enabled = False
self.slider1.clear()
self.slider2.clear()
self.vectorOne.clear()
self.vectorTwo.clear()
self.vectorThree.clear()
self.XcomboBox.clear()
self.YcomboBox.clear()
self.selectFactor.clear()
self.factorName.setText("")
self.scaleSlider.value=3
self.scaleMeanShapeSlider.value=3
self.meanShapeColor.color=qt.QColor(250,128,114)
self.scaleSlider.enabled = False
# Disable buttons for workflow
self.plotButton.enabled = False
self.inputFactorButton.enabled = False
self.lolliButton.enabled = False
self.plotDistributionButton.enabled = False
self.plotMeanButton3D.enabled = False
self.showMeanLabelsButton.enabled = False
self.loadButton.enabled = False
self.landmarkVisualizationType.enabled = False
self.modelVisualizationType.enabled = False
self.selectorButton.enabled = False
self.stopRecordButton.enabled = False
self.startRecordButton.enabled = False
#delete data from previous runs
self.nodeCleanUp()
def nodeCleanUp(self):
# clear all nodes created by the module
for node in GPANodeCollection:
GPANodeCollection.RemoveItem(node)
slicer.mrmlScene.RemoveNode(node)
def addLayoutButton(self, layoutID, buttonAction, toolTip, imageFileName, layoutDiscription):
layoutManager = slicer.app.layoutManager()
layoutManager.layoutLogic().GetLayoutNode().AddLayoutDescription(layoutID, layoutDiscription)
viewToolBar = slicer.util.mainWindow().findChild('QToolBar', 'ViewToolBar')
layoutMenu = viewToolBar.widgetForAction(viewToolBar.actions()[0]).menu()
layoutSwitchActionParent = layoutMenu
# use `layoutMenu` to add inside layout list, use `viewToolBar` to add next the standard layout list
layoutSwitchAction = layoutSwitchActionParent.addAction(buttonAction) # add inside layout list
moduleDir = os.path.dirname(slicer.util.modulePath(self.__module__))
iconPath = os.path.join(moduleDir, 'Resources/Icons', imageFileName)
layoutSwitchAction.setIcon(qt.QIcon(iconPath))
layoutSwitchAction.setToolTip(toolTip)
layoutSwitchAction.connect('triggered()', lambda layoutId = layoutID: slicer.app.layoutManager().setLayout(layoutId))
layoutSwitchAction.setData(layoutID)
# Setup Analysis callbacks and helpers
def onClearButton(self):
self.inputFileTable.clear()
self.inputFilePaths = []
self.clearButton.enabled = False
def onSelectLandmarkFiles(self):
self.inputFileTable.clear()
self.inputFilePaths = []
filter = ["Landmarks (*.json *.mrk.json *.fcsv )"]
self.inputFilePaths = qt.QFileDialog().getOpenFileNames(None, "Window name", "", filter)
self.inputFileTable.plainText = '\n'.join(self.inputFilePaths)
self.clearButton.enabled = True
#enable load button if required fields are complete
filePathsExist = bool(self.inputFilePaths is not [] )
self.loadButton.enabled = bool (filePathsExist and hasattr(self, 'outputDirectory'))
if filePathsExist:
self.LM_dir_name = os.path.dirname(self.inputFilePaths[0])
basename, self.extension = os.path.splitext(self.inputFilePaths[0])
if self.extension == '.json':
basename, secondExtension = os.path.splitext(basename)
self.extension = secondExtension + self.extension
self.files=[]
for path in self.inputFilePaths:
basename = os.path.basename(path).rpartition('.')[0].rpartition('.')[0]
self.files.append(basename)
def onSelectOutputDirectory(self):
self.outputDirectory=qt.QFileDialog().getExistingDirectory()
| |
import json
from _sha1 import sha1
from datetime import datetime
from unittest import TestCase
from unittest.mock import patch, Mock
from urllib.parse import urlencode
from verification.application.handlers.jumio_verification_handlers import submit
from verification.application.handlers.verification_handlers import initiate, get_status, callback
from verification.application.services.verification_manager import verification_repository, jumio_repository
from verification.constants import JumioTransactionStatus, VerificationStatus, JumioVerificationStatus
from verification.infrastructure.models import JumioVerificationModel, VerificationModel
from verification.testcases.test_veriables import test_initiate_redirect_url, reject_reason
class TestJumioVerification(TestCase):
def setUp(self):
pass
@patch("common.boto_utils.BotoUtils", return_value=Mock(get_ssm_parameter=Mock(return_value="123"),
invoke_lambda=Mock(return_value={"statusCode": 201})))
@patch("requests.post", return_value=Mock(
json=Mock(return_value={
"timestamp": "2018-07-03T08:23:12.494Z", "transactionReference": "123-13-13-134-1234",
"redirectUrl": test_initiate_redirect_url
}),
status_code=200
))
def test_jumio_initiate(self, mock_requests_post, mock_boto_utils):
username = "<EMAIL>"
event = {
"requestContext": {"authorizer": {"claims": {"email": username}}},
"body": json.dumps({
"type": "JUMIO",
"entity_id": username
})
}
response = initiate(event, None)
self.assertDictEqual(json.loads(response["body"]), {
"status": "success",
"data": {"redirect_url":
"https://yourcompany.netverify.com/web/v4/app?locale=en-GB&authorizationToken=xxx"},
"error": {}})
verification = verification_repository.session.query(VerificationModel).first()
if verification is None:
assert False
self.assertEqual(verification.entity_id, username)
self.assertEqual(verification.status, VerificationStatus.PENDING.value)
self.assertEqual(verification.requestee, username)
self.assertEqual(verification.verification_type, "JUMIO")
assert verification.id is not None or verification.id != ""
jumio_verfication = jumio_repository.session.query(JumioVerificationModel).first()
if jumio_verfication is None:
assert False
self.assertEqual(jumio_verfication.jumio_reference_id, '123-13-13-134-1234')
self.assertEqual(jumio_verfication.redirect_url, test_initiate_redirect_url)
self.assertEqual(jumio_verfication.transaction_status, JumioTransactionStatus.PENDING.value)
self.assertEqual(jumio_verfication.username, username)
self.assertEqual(jumio_verfication.verification_status, JumioVerificationStatus.PENDING.value)
self.assertEqual(jumio_verfication.user_reference_id, sha1(username.encode("utf-8")).hexdigest())
@patch("common.boto_utils.BotoUtils", return_value=Mock(get_ssm_parameter=Mock(return_value="123"),
invoke_lambda=Mock(return_value={"statusCode": 201})))
def test_jumio_initiate_two(self, mock_boto_utils):
""" user is from verified domain list """
username = "<EMAIL>"
event = {
"requestContext": {"authorizer": {"claims": {"email": username}}},
"body": json.dumps({
"type": "JUMIO",
"entity_id": username
})
}
response = initiate(event, None)
verification = verification_repository.session.query(VerificationModel).first()
if verification is None:
assert False
self.assertEqual(verification.entity_id, username)
self.assertEqual(verification.status, VerificationStatus.APPROVED.value)
self.assertEqual(verification.requestee, username)
self.assertEqual(verification.verification_type, "JUMIO")
assert verification.id is not None or verification.id != ""
@patch("common.boto_utils.BotoUtils", return_value=Mock(get_ssm_parameter=Mock(return_value="123"),
invoke_lambda=Mock(return_value={"statusCode": 201})))
def test_jumio_submit_with_success(self, mock_boto_utils):
test_verification_id = "9f2c90119cb7424b8d69319ce211ddfc"
verification_type = "JUMIO"
username = "<EMAIL>"
current_time = datetime.utcnow()
verification_repository.add_item(VerificationModel(
id=test_verification_id, verification_type=verification_type, entity_id=username, status="PENDING",
requestee=username, created_at=current_time, updated_at=current_time
))
jumio_repository.add_item(JumioVerificationModel(
verification_id=test_verification_id, username=username, jumio_reference_id="123-13-13-134-1234",
user_reference_id=sha1(username.encode("utf-8")).hexdigest(),
redirect_url="https://yourcompany.netverify.com/web/v4/app?locale=en-GB&authorizationToken=xxx",
transaction_status="PENDING", verification_status="PENDING", transaction_date=current_time,
created_at=current_time
))
event = {
"queryStringParameters": {
"transactionStatus": "SUCCESS"
},
"pathParameters": {
"verification_id": test_verification_id
}
}
submit(event, None)
verification = verification_repository.session.query(VerificationModel).first()
if verification is None:
assert False
self.assertEqual(verification.entity_id, username)
self.assertEqual(verification.status, VerificationStatus.PENDING.value)
self.assertEqual(verification.requestee, username)
self.assertEqual(verification.verification_type, "JUMIO")
assert (verification.id is not None or verification.id != "") and verification.id == test_verification_id
jumio_verfication = jumio_repository.session.query(JumioVerificationModel).first()
if jumio_verfication is None:
assert False
self.assertEqual(jumio_verfication.jumio_reference_id, '123-13-13-134-1234')
self.assertEqual(jumio_verfication.redirect_url, test_initiate_redirect_url)
self.assertEqual(jumio_verfication.transaction_status, JumioTransactionStatus.SUCCESS.value)
self.assertEqual(jumio_verfication.verification_status, JumioVerificationStatus.PENDING.value)
self.assertEqual(jumio_verfication.username, username)
self.assertEqual(jumio_verfication.user_reference_id, sha1(username.encode("utf-8")).hexdigest())
@patch("common.boto_utils.BotoUtils", return_value=Mock(get_ssm_parameter=Mock(return_value="123"),
invoke_lambda=Mock(return_value={"statusCode": 201})))
def test_jumio_submit_with_error(self, mock_boto_utils):
test_verification_id = "9f2c90119cb7424b8d69319ce211ddfc"
verification_type = "JUMIO"
username = "<EMAIL>"
current_time = datetime.utcnow()
verification_repository.add_item(VerificationModel(
id=test_verification_id, verification_type=verification_type, entity_id=username, status="PENDING",
requestee=username, created_at=current_time, updated_at=current_time
))
jumio_repository.add_item(JumioVerificationModel(
verification_id=test_verification_id, username=username, jumio_reference_id="123-13-13-134-1234",
user_reference_id=sha1(username.encode("utf-8")).hexdigest(),
redirect_url="https://yourcompany.netverify.com/web/v4/app?locale=en-GB&authorizationToken=xxx",
transaction_status="PENDING", verification_status="PENDING", transaction_date=current_time,
created_at=current_time
))
event = {
"queryStringParameters": {
"transactionStatus": "ERROR"
},
"pathParameters": {
"verification_id": test_verification_id
}
}
submit(event, None)
verification = verification_repository.session.query(VerificationModel).first()
if verification is None:
assert False
self.assertEqual(verification.entity_id, username)
self.assertEqual(verification.status, VerificationStatus.ERROR.value)
self.assertEqual(verification.requestee, username)
self.assertEqual(verification.verification_type, "JUMIO")
assert (verification.id is not None or verification.id != "") and verification.id == test_verification_id
jumio_verfication = jumio_repository.session.query(JumioVerificationModel).first()
if jumio_verfication is None:
assert False
self.assertEqual(jumio_verfication.jumio_reference_id, '123-13-13-134-1234')
self.assertEqual(jumio_verfication.redirect_url, test_initiate_redirect_url)
self.assertEqual(jumio_verfication.transaction_status, JumioTransactionStatus.ERROR.value)
self.assertEqual(jumio_verfication.verification_status, JumioVerificationStatus.PENDING.value)
self.assertEqual(jumio_verfication.username, username)
self.assertEqual(jumio_verfication.user_reference_id, sha1(username.encode("utf-8")).hexdigest())
@patch("common.boto_utils.BotoUtils", return_value=Mock(get_ssm_parameter=Mock(return_value="123"),
invoke_lambda=Mock(return_value={"statusCode": 201})))
def test_jumio_get_status(self, mock_boto_utils):
test_verification_id = "9f2c90119cb7424b8d69319ce211ddfc"
verification_type = "JUMIO"
username = "<EMAIL>"
current_time = datetime.utcnow()
verification_repository.add_item(VerificationModel(
id=test_verification_id, verification_type=verification_type, entity_id=username, status="PENDING",
requestee=username, created_at=current_time, updated_at=current_time
))
jumio_repository.add_item(JumioVerificationModel(
verification_id=test_verification_id, username=username, jumio_reference_id="123-13-13-134-1234",
user_reference_id=sha1(username.encode("utf-8")).hexdigest(),
redirect_url="https://yourcompany.netverify.com/web/v4/app?locale=en-GB&authorizationToken=xxx",
transaction_status="PENDING", verification_status="PENDING", transaction_date=current_time,
created_at=current_time
))
event = {
"requestContext": {"authorizer": {"claims": {"email": username}}},
"queryStringParameters": {
"type": "JUMIO"
}
}
verification = json.loads(get_status(event, None)["body"])["data"]
self.assertEqual(verification["entity_id"], username)
self.assertEqual(verification["status"], VerificationStatus.PENDING.value)
self.assertEqual(verification["requestee"], username)
self.assertEqual(verification["type"], "JUMIO")
assert (verification["id"] is not None or verification["id"] != "") and verification[
"id"] == test_verification_id
@patch("common.boto_utils.BotoUtils", return_value=Mock(get_ssm_parameter=Mock(return_value="123"),
invoke_lambda=Mock(return_value={"statusCode": 201})))
def test_jumio_callback_approved(self, mock_boto_utils):
test_verification_id = "9f2c90119cb7424b8d69319ce211ddfc"
verification_type = "JUMIO"
username = "<EMAIL>"
current_time = datetime.utcnow()
verification_repository.add_item(VerificationModel(
id=test_verification_id, verification_type=verification_type, entity_id=username, status="PENDING",
requestee=username, created_at=current_time, updated_at=current_time
))
jumio_repository.add_item(JumioVerificationModel(
verification_id=test_verification_id, username=username, jumio_reference_id="123-13-13-134-1234",
user_reference_id=sha1(username.encode("utf-8")).hexdigest(),
redirect_url="https://yourcompany.netverify.com/web/v4/app?locale=en-GB&authorizationToken=xxx",
transaction_status=JumioTransactionStatus.SUCCESS.value,
verification_status=JumioVerificationStatus.PENDING.value, transaction_date=current_time,
created_at=current_time
))
event = {
"body": urlencode(
{
"callBackType": "NETVERIFYID",
"callbackDate": "2020-03-06T12:10:50.835Z",
"clientIp": "172.16.58.3",
"customerId": "14bb645983cafeb2bb14bf4df2dff297182aef9f",
"firstAttemptDate": "2020-03-06T12:10:31.339Z",
"idCheckDataPositions": "N/A",
"idCheckDocumentValidation": "N/A",
"idCheckHologram": "N/A",
"idCheckMRZcode": "N/A",
"idCheckMicroprint": "N/A",
"idCheckSecurityFeatures": "N/A",
"idCheckSignature": "N/A",
"idCountry": "IND",
"idScanImage": "https://lon.netverify.com/recognition/v1/idscan/cf657461-bf54-46dd-93e4-2496d6f115b1/front",
"idScanImageBackside": "https://lon.netverify.com/recognition/v1/idscan/cf657461-bf54-46dd-93e4-2496d6f115b1/back",
"idScanImageFace": "https://lon.netverify.com/recognition/v1/idscan/cf657461-bf54-46dd-93e4-2496d6f115b1/face",
"idScanSource": "WEB_UPLOAD",
"idScanStatus": "ERROR",
"idType": "ID_CARD",
"jumioIdScanReference": "cf657461-bf54-46dd-93e4-2496d6f115b1",
"merchantIdScanReference": "52c90d23cf6847edbac663bb770a0f58",
"rejectReason":
{"rejectReasonCode": "201", "rejectReasonDescription": "NO_DOCUMENT",
"rejectReasonDetails": []},
"transactionDate": "2020-03-06T12:02:56.028Z",
"verificationStatus": JumioVerificationStatus.APPROVED_VERIFIED.value
}),
"pathParameters": {
"verification_id": test_verification_id
}
}
callback(event, None)
verification = verification_repository.session.query(VerificationModel).first()
if verification is None:
assert False
self.assertEqual(verification.entity_id, username)
self.assertEqual(verification.status, VerificationStatus.APPROVED.value)
self.assertEqual(verification.requestee, username)
self.assertEqual(verification.verification_type, "JUMIO")
assert (verification.id is not None or verification.id != "") and verification.id == test_verification_id
jumio_verfication = jumio_repository.session.query(JumioVerificationModel).first()
if jumio_verfication is None:
assert False
self.assertEqual(jumio_verfication.jumio_reference_id, '123-13-13-134-1234')
self.assertEqual(jumio_verfication.redirect_url, test_initiate_redirect_url)
self.assertEqual(jumio_verfication.transaction_status, JumioTransactionStatus.DONE.value)
self.assertEqual(jumio_verfication.verification_status, JumioVerificationStatus.APPROVED_VERIFIED.value)
self.assertEqual(jumio_verfication.username, username)
self.assertEqual(jumio_verfication.user_reference_id, sha1(username.encode("utf-8")).hexdigest())
@patch("common.boto_utils.BotoUtils", return_value=Mock(get_ssm_parameter=Mock(return_value="123"),
invoke_lambda=Mock(return_value={"statusCode": 201})))
def test_jumio_callback_rejected_one(self, mock_boto_utils):
status = JumioVerificationStatus.DENIED_FRAUD.value
test_verification_id = "9f2c90119cb7424b8d69319ce211ddfc"
verification_type = "JUMIO"
username = "<EMAIL>"
current_time = datetime.utcnow()
verification_repository.add_item(VerificationModel(
id=test_verification_id, verification_type=verification_type, entity_id=username, status="PENDING",
requestee=username, created_at=current_time, updated_at=current_time
))
jumio_repository.add_item(JumioVerificationModel(
verification_id=test_verification_id, username=username, jumio_reference_id="123-13-13-134-<PASSWORD>",
user_reference_id=sha1(username.encode("utf-8")).hexdigest(),
redirect_url="https://yourcompany.netverify.com/web/v4/app?locale=en-GB&authorizationToken=xxx",
transaction_status=JumioTransactionStatus.SUCCESS.value,
verification_status=JumioVerificationStatus.PENDING.value, transaction_date=current_time,
created_at=current_time
))
event = {
"body": urlencode(
{
"callBackType": "NETVERIFYID",
"callbackDate": "2020-03-06T12:10:50.835Z",
"clientIp": "172.16.58.3",
"customerId": "14bb645983cafeb2bb14bf4df2dff297182aef9f",
"firstAttemptDate": "2020-03-06T12:10:31.339Z",
"idCheckDataPositions": "N/A",
"idCheckDocumentValidation": "N/A",
"idCheckHologram": "N/A",
"idCheckMRZcode": "N/A",
"idCheckMicroprint": "N/A",
"idCheckSecurityFeatures": "N/A",
"idCheckSignature": "N/A",
"idCountry": "IND",
"idScanImage": "https://lon.netverify.com/recognition/v1/idscan/cf657461-bf54-46dd-93e4-2496d6f115b1/front",
"idScanImageBackside": "https://lon.netverify.com/recognition/v1/idscan/cf657461-bf54-46dd-93e4-2496d6f115b1/back",
"idScanImageFace": "https://lon.netverify.com/recognition/v1/idscan/cf657461-bf54-46dd-93e4-2496d6f115b1/face",
"idScanSource": "WEB_UPLOAD",
"idScanStatus": "ERROR",
"idType": "ID_CARD",
"jumioIdScanReference": "cf657461-bf54-46dd-93e4-2496d6f115b1",
"merchantIdScanReference": "52c90d23cf6847edbac663bb770a0f58",
"rejectReason": json.dumps(reject_reason),
"transactionDate": "2020-03-06T12:02:56.028Z",
"verificationStatus": status
}),
"pathParameters": {
"verification_id": test_verification_id
}
}
callback(event, None)
verification = verification_repository.session.query(VerificationModel).first()
if verification is None:
assert False
self.assertEqual(verification.entity_id, username)
self.assertEqual(verification.status, VerificationStatus.REJECTED.value)
self.assertEqual(verification.requestee, username)
self.assertEqual(verification.verification_type, "JUMIO")
assert (verification.id is not None or verification.id != "") and verification.id == test_verification_id
jumio_verfication = jumio_repository.session.query(JumioVerificationModel).first()
if jumio_verfication is None:
assert False
self.assertEqual(jumio_verfication.jumio_reference_id, '123-13-13-134-1234')
self.assertEqual(jumio_verfication.redirect_url, test_initiate_redirect_url)
self.assertEqual(jumio_verfication.transaction_status, JumioTransactionStatus.DONE.value)
self.assertEqual(jumio_verfication.verification_status, status)
self.assertEqual(jumio_verfication.username, username)
self.assertEqual(jumio_verfication.user_reference_id, sha1(username.encode("utf-8")).hexdigest())
self.assertEqual(json.dumps(jumio_verfication.reject_reason), json.dumps(reject_reason))
@patch("common.boto_utils.BotoUtils", return_value=Mock(get_ssm_parameter=Mock(return_value="123"),
invoke_lambda=Mock(return_value={"statusCode": 201})))
def test_jumio_callback_rejected_two(self, mock_boto_utils):
status = JumioVerificationStatus.DENIED_UNSUPPORTED_ID_TYPE.value
test_verification_id = "9f2c90119cb7424b8d69319ce211ddfc"
verification_type = "JUMIO"
username = "<EMAIL>"
current_time = datetime.utcnow()
verification_repository.add_item(VerificationModel(
id=test_verification_id, verification_type=verification_type, entity_id=username, status="PENDING",
requestee=username, created_at=current_time, updated_at=current_time
))
jumio_repository.add_item(JumioVerificationModel(
verification_id=test_verification_id, username=username, jumio_reference_id="123-13-13-134-1234",
user_reference_id=sha1(username.encode("utf-8")).hexdigest(),
redirect_url="https://yourcompany.netverify.com/web/v4/app?locale=en-GB&authorizationToken=xxx",
transaction_status=JumioTransactionStatus.SUCCESS.value,
verification_status=JumioVerificationStatus.PENDING.value, transaction_date=current_time,
created_at=current_time
))
event = {
"body": urlencode(
{
"callBackType": "NETVERIFYID",
"callbackDate": "2020-03-06T12:10:50.835Z",
"clientIp": "172.16.58.3",
"customerId": "14bb645983cafeb2bb14bf4df2dff297182aef9f",
"firstAttemptDate": "2020-03-06T12:10:31.339Z",
"idCheckDataPositions": "N/A",
"idCheckDocumentValidation": "N/A",
"idCheckHologram": "N/A",
"idCheckMRZcode": "N/A",
"idCheckMicroprint": "N/A",
"idCheckSecurityFeatures": "N/A",
"idCheckSignature": "N/A",
"idCountry": "IND",
"idScanImage": "https://lon.netverify.com/recognition/v1/idscan/cf657461-bf54-46dd-93e4-2496d6f115b1/front",
"idScanImageBackside": "https://lon.netverify.com/recognition/v1/idscan/cf657461-bf54-46dd-93e4-2496d6f115b1/back",
"idScanImageFace": "https://lon.netverify.com/recognition/v1/idscan/cf657461-bf54-46dd-93e4-2496d6f115b1/face",
"idScanSource": "WEB_UPLOAD",
"idScanStatus": "ERROR",
"idType": "ID_CARD",
"jumioIdScanReference": "cf657461-bf54-46dd-93e4-2496d6f115b1",
"merchantIdScanReference": "52c90d23cf6847edbac663bb770a0f58",
"rejectReason": "N/A",
"transactionDate": "2020-03-06T12:02:56.028Z",
"verificationStatus": status
}),
"pathParameters": {
"verification_id": test_verification_id
}
}
callback(event, None)
verification = verification_repository.session.query(VerificationModel).first()
if verification is None:
assert False
self.assertEqual(verification.entity_id, username)
self.assertEqual(verification.status, VerificationStatus.REJECTED.value)
self.assertEqual(verification.requestee, username)
self.assertEqual(verification.verification_type, "JUMIO")
assert (verification.id is not None or verification.id != "") and verification.id == test_verification_id
jumio_verfication = jumio_repository.session.query(JumioVerificationModel).first()
if jumio_verfication is None:
assert False
self.assertEqual(jumio_verfication.jumio_reference_id, '123-13-13-134-1234')
self.assertEqual(jumio_verfication.redirect_url, test_initiate_redirect_url)
self.assertEqual(jumio_verfication.transaction_status, JumioTransactionStatus.DONE.value)
self.assertEqual(jumio_verfication.verification_status, status)
self.assertEqual(jumio_verfication.username, username)
self.assertEqual(jumio_verfication.user_reference_id, sha1(username.encode("utf-8")).hexdigest())
@patch("common.boto_utils.BotoUtils", return_value=Mock(get_ssm_parameter=Mock(return_value="123"),
invoke_lambda=Mock(return_value={"statusCode": 201})))
def test_jumio_callback_rejected_three(self, mock_boto_utils):
status = JumioVerificationStatus.DENIED_UNSUPPORTED_ID_COUNTRY.value
test_verification_id = "9f2c90119cb7424b8d69319ce211ddfc"
verification_type = "JUMIO"
username = "<EMAIL>"
current_time = datetime.utcnow()
verification_repository.add_item(VerificationModel(
id=test_verification_id, verification_type=verification_type, entity_id=username, status="PENDING",
requestee=username, created_at=current_time, updated_at=current_time
))
jumio_repository.add_item(JumioVerificationModel(
verification_id=test_verification_id, username=username, jumio_reference_id="123-13-13-134-1234",
user_reference_id=sha1(username.encode("utf-8")).hexdigest(),
redirect_url="https://yourcompany.netverify.com/web/v4/app?locale=en-GB&authorizationToken=xxx",
transaction_status=JumioTransactionStatus.SUCCESS.value,
verification_status=JumioVerificationStatus.PENDING.value, transaction_date=current_time,
created_at=current_time
))
event = {
"body": urlencode(
{
"callBackType": "NETVERIFYID",
"callbackDate": "2020-03-06T12:10:50.835Z",
"clientIp": "172.16.58.3",
"customerId": "14bb645983cafeb2bb14bf4df2dff297182aef9f",
"firstAttemptDate": "2020-03-06T12:10:31.339Z",
"idCheckDataPositions": "N/A",
"idCheckDocumentValidation": "N/A",
"idCheckHologram": "N/A",
"idCheckMRZcode": "N/A",
"idCheckMicroprint": "N/A",
"idCheckSecurityFeatures": "N/A",
"idCheckSignature": "N/A",
"idCountry": "IND",
"idScanImage": "https://lon.netverify.com/recognition/v1/idscan/cf657461-bf54-46dd-93e4-2496d6f115b1/front",
"idScanImageBackside": "https://lon.netverify.com/recognition/v1/idscan/cf657461-bf54-46dd-93e4-2496d6f115b1/back",
"idScanImageFace": "https://lon.netverify.com/recognition/v1/idscan/cf657461-bf54-46dd-93e4-2496d6f115b1/face",
"idScanSource": "WEB_UPLOAD",
"idScanStatus": "ERROR",
"idType": "ID_CARD",
"jumioIdScanReference": "cf657461-bf54-46dd-93e4-2496d6f115b1",
"merchantIdScanReference": "52c90d23cf6847edbac663bb770a0f58",
"rejectReason": "N/A",
"transactionDate": "2020-03-06T12:02:56.028Z",
"verificationStatus": status
}),
"pathParameters": {
"verification_id": test_verification_id
}
}
callback(event, None)
verification = verification_repository.session.query(VerificationModel).first()
if verification is None:
assert False
self.assertEqual(verification.entity_id, username)
self.assertEqual(verification.status, VerificationStatus.REJECTED.value)
self.assertEqual(verification.requestee, username)
self.assertEqual(verification.verification_type, "JUMIO")
assert (verification.id is not None or verification.id != "") and verification.id == test_verification_id
jumio_verfication = jumio_repository.session.query(JumioVerificationModel).first()
if jumio_verfication is None:
assert False
self.assertEqual(jumio_verfication.jumio_reference_id, '123-13-13-134-1234')
self.assertEqual(jumio_verfication.redirect_url, test_initiate_redirect_url)
self.assertEqual(jumio_verfication.transaction_status, JumioTransactionStatus.DONE.value)
self.assertEqual(jumio_verfication.verification_status, status)
self.assertEqual(jumio_verfication.username, username)
self.assertEqual(jumio_verfication.user_reference_id, sha1(username.encode("utf-8")).hexdigest())
self.tearDown()
@patch("common.boto_utils.BotoUtils", return_value=Mock(get_ssm_parameter=Mock(return_value="123"),
invoke_lambda=Mock(return_value={"statusCode": 201})))
def test_jumio_callback_failed_one(self, mock_boto_utils):
test_verification_id = "9f2c90119cb7424b8d69319ce211ddfc"
verification_type = "JUMIO"
username = "<EMAIL>"
current_time = datetime.utcnow()
verification_repository.add_item(VerificationModel(
id=test_verification_id, verification_type=verification_type, entity_id=username, status="PENDING",
requestee=username, created_at=current_time, updated_at=current_time
))
jumio_repository.add_item(JumioVerificationModel(
verification_id=test_verification_id, username=username, jumio_reference_id="123-13-13-134-1234",
user_reference_id=sha1(username.encode("utf-8")).hexdigest(),
redirect_url="https://yourcompany.netverify.com/web/v4/app?locale=en-GB&authorizationToken=xxx",
transaction_status=JumioTransactionStatus.SUCCESS.value,
verification_status=JumioVerificationStatus.PENDING.value, transaction_date=current_time,
created_at=current_time
))
event = {
"body": urlencode(
{
"callBackType": "NETVERIFYID",
"callbackDate": "2020-03-06T12:10:50.835Z",
"clientIp": "172.16.58.3",
"customerId": "1<PASSWORD>",
"firstAttemptDate": "2020-03-06T12:10:31.339Z",
"idCheckDataPositions": "N/A",
"idCheckDocumentValidation": "N/A",
"idCheckHologram": "N/A",
"idCheckMRZcode": "N/A",
"idCheckMicroprint": "N/A",
"idCheckSecurityFeatures": "N/A",
"idCheckSignature": "N/A",
"idCountry": "IND",
"idScanImage": "https://lon.netverify.com/recognition/v1/idscan/cf657461-bf54-46dd-93e4-2496d6f115b1/front",
"idScanImageBackside": "https://lon.netverify.com/recognition/v1/idscan/cf657461-bf54-46dd-93e4-2496d6f115b1/back",
"idScanImageFace": "https://lon.netverify.com/recognition/v1/idscan/cf657461-bf54-46dd-93e4-2496d6f115b1/face",
"idScanSource": "WEB_UPLOAD",
"idScanStatus": "ERROR",
"idType": "ID_CARD",
"jumioIdScanReference": "cf657461-bf54-46dd-93e4-2496d6f115b1",
"merchantIdScanReference": "52c90d23cf6847edbac663bb770a0f58",
"rejectReason": json.dumps(reject_reason),
"transactionDate": "2020-03-06T12:02:56.028Z",
"verificationStatus": JumioVerificationStatus.ERROR_NOT_READABLE_ID.value
}),
"pathParameters": {
"verification_id": test_verification_id
}
}
callback(event, None)
verification = verification_repository.session.query(VerificationModel).first()
if verification is None:
assert False
self.assertEqual(verification.entity_id, username)
self.assertEqual(verification.status, VerificationStatus.FAILED.value)
self.assertEqual(verification.requestee, username)
self.assertEqual(verification.verification_type, "JUMIO")
assert (verification.id is not None or verification.id != "") and verification.id == test_verification_id
jumio_verfication = jumio_repository.session.query(JumioVerificationModel).first()
if jumio_verfication is None:
assert False
self.assertEqual(jumio_verfication.jumio_reference_id, '123-13-13-134-1234')
self.assertEqual(jumio_verfication.redirect_url, test_initiate_redirect_url)
self.assertEqual(jumio_verfication.transaction_status, JumioTransactionStatus.DONE.value)
self.assertEqual(jumio_verfication.verification_status, JumioVerificationStatus.ERROR_NOT_READABLE_ID.value)
self.assertEqual(jumio_verfication.username, | |
<reponame>jeetsukumaran/spdw<gh_stars>0
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
import math
import os
import collections
import json
import sys
import argparse
import inspect
import itertools
import io
import dendropy
class Partition(object):
@staticmethod
def build_species_leafsets(d):
if d is None:
return None
return frozenset(frozenset(s) for s in d)
def __init__(self, **kwargs):
self.part_id = kwargs.get("id", None)
self.log_probability = kwargs.get("log_probability", None)
self.probability = kwargs.get("probability", None)
self.probability_given_constraints = kwargs.get("probability_given_constraints", None)
self.log_probability_given_constraints = kwargs.get("log_probability_given_constraints", None)
self.cumulative_probability = kwargs.get("cumulative_probability", None)
self.cumulative_probability_given_constraints = kwargs.get("cumulative_probability_given_constraints", None)
self.is_in_confidence_interval = kwargs.get("is_in_confidence_interval", None)
self.species_leafsets = Partition.build_species_leafsets(kwargs.get("species_leafsets", None))
def is_conspecific(self, lineage1, lineage2):
for sp in self.species_leafsets:
if lineage1 in sp and lineage2 in sp:
return True
return False
def __len__(self):
return len(self.species_leafsets)
class Evaluator(object):
def __init__(self):
pass
def execute(self, args):
self.config_path = args.configuration_filepath
if not self.config_path:
sys.exit("Path to configuration file needs to be specified")
self.analysis_results_filepath = args.analysis_results_filepath
if not self.analysis_results_filepath:
sys.exit("Path to analysis results file needs to be specified")
self.report_dest = sys.stdout
self.load_data()
perf_data_rows = []
if args.is_evaluate_marginal:
if args.lineage_tree_filepath is None:
sys.exit("Require path to lineage tree filepath to analyze marginal probabilities")
with open(os.path.expandvars(os.path.expanduser(args.lineage_tree_filepath))) as src:
self.read_lineage_tree(src, schema="nexus")
self.load_estimated_partitions()
for lineage_pair in self.all_distinct_pairs_of_unconstrained_lineages():
perf_data = collections.OrderedDict()
self.store_basic_features(perf_data)
perf_data.update(self.calc_lineage_pair_features(*lineage_pair))
results = self.calc_marginal_probability_of_conspecificity(*lineage_pair)
for key in [
"lineage_pair_is_true_conspecific",
"lineage_pair_conspecificity_marginal_probability",
"lineage_pair_conspecificity_marginal_probability_given_constraints",
"lineage_pair_nonconspecificity_marginal_probability",
"lineage_pair_nonconspecificity_marginal_probability_given_constraints",
]:
perf_data[key] = results[key]
perf_data_rows.append(perf_data)
else:
perf_data = collections.OrderedDict()
self.store_basic_features(perf_data)
self.standard_performance_assessment(perf_data)
perf_data_rows.append(perf_data)
assert perf_data_rows
self.report(perf_data_rows)
def load_data(self):
with open(self.config_path) as src:
self.config = json.load(src)
with open(self.analysis_results_filepath) as src:
self.estimation_results = json.load(src)
self.set_true_partition(species_leafsets=self.config["test_info"]["true_species_leafsets"])
def set_true_partition(self, species_leafsets):
self.true_partition = Partition(species_leafsets=species_leafsets)
def load_estimated_partitions(self):
self.partitions = [Partition(**p) for p in self.estimation_results["partitions"]]
return self.partitions
def read_lineage_tree(self, src, schema="nexus"):
self.set_lineage_tree(
file=src,
schema=schema,
rooting="force-rooted")
def set_lineage_tree(self, **kwargs):
self.lineage_tree = dendropy.Tree.get(**kwargs)
self.lineage_tree.encode_bipartitions()
self.lineage_tree.calc_node_ages()
# self.lineage_tree_label_node_map = {taxon.label:taxon for taxon in self.lineage_tree.taxon_namespace}
self.lineage_tree_label_node_map = {nd.taxon.label:nd for nd in self.lineage_tree.leaf_node_iter()}
self.phylogenetic_distance_matrix = self.lineage_tree.phylogenetic_distance_matrix(is_store_path_edges=False)
def all_distinct_pairs_of_unconstrained_lineages(self):
unconstrained_lineages = self.config["test_info"]["unconstrained_lineages"]
for x in itertools.combinations(unconstrained_lineages, 2):
yield x
def is_span_root(self, lineage1, lineage2):
n1 = self.lineage_tree_label_node_map[lineage1]
n2 = self.lineage_tree_label_node_map[lineage2]
assert n1 is not n2
r_left, r_right = self.lineage_tree.seed_node._child_nodes
if n1.bipartition.is_nested_within(r_left.bipartition):
return n2.bipartition.is_nested_within(r_right.bipartition)
else:
return n2.bipartition.is_nested_within(r_left.bipartition)
def calc_lineage_pair_features(self, lineage1, lineage2):
result = {}
result["lineage_pair_unnormalized_weighted_distance"] = self.phylogenetic_distance_matrix.distance(
self.lineage_tree_label_node_map[lineage1].taxon,
self.lineage_tree_label_node_map[lineage2].taxon,
is_weighted_edge_distances=True,
is_normalize_by_tree_size=False)
result["lineage_pair_normalized_weighted_distance"] = self.phylogenetic_distance_matrix.distance(
self.lineage_tree_label_node_map[lineage1].taxon,
self.lineage_tree_label_node_map[lineage2].taxon,
is_weighted_edge_distances=True,
is_normalize_by_tree_size=True)
result["lineage_pair_unnormalized_unweighted_distance"] = self.phylogenetic_distance_matrix.distance(
self.lineage_tree_label_node_map[lineage1].taxon,
self.lineage_tree_label_node_map[lineage2].taxon,
is_weighted_edge_distances=False,
is_normalize_by_tree_size=False)
result["lineage_pair_normalized_unweighted_distance"] = self.phylogenetic_distance_matrix.distance(
self.lineage_tree_label_node_map[lineage1].taxon,
self.lineage_tree_label_node_map[lineage2].taxon,
is_weighted_edge_distances=False,
is_normalize_by_tree_size=True)
mrca = self.phylogenetic_distance_matrix.mrca(
self.lineage_tree_label_node_map[lineage1].taxon,
self.lineage_tree_label_node_map[lineage2].taxon)
result["lineage_pair_mrca_age"] = mrca.age
result["lineage_pair_is_span_root"] = self.is_span_root(lineage1, lineage2)
return result
def calc_marginal_probability_of_conspecificity(self, lineage1, lineage2):
results = {
"lineage_pair_conspecificity_marginal_probability": 0.0,
"lineage_pair_conspecificity_marginal_probability_given_constraints": 0.0,
"lineage_pair_nonconspecificity_marginal_probability": 0.0,
"lineage_pair_nonconspecificity_marginal_probability_given_constraints": 0.0,
"lineage_pair_conspecific_partitions": [],
}
for partition in self.partitions:
if partition.is_conspecific(lineage1, lineage2):
results["lineage_pair_conspecific_partitions"].append(partition)
results["lineage_pair_conspecificity_marginal_probability"] += partition.probability
results["lineage_pair_conspecificity_marginal_probability_given_constraints"] += partition.probability_given_constraints
else:
results["lineage_pair_nonconspecificity_marginal_probability"] += partition.probability
results["lineage_pair_nonconspecificity_marginal_probability_given_constraints"] += partition.probability_given_constraints
results["lineage_pair_is_true_conspecific"] = self.true_partition.is_conspecific(lineage1, lineage2)
return results
def store_basic_features(self, perf_data):
perf_data["batch_id"] = self.estimation_results["batch_id"]
perf_data["root_age"] = self.estimation_results["root_age"]
perf_data["num_tips"] = self.estimation_results["num_tips"]
perf_data["total_num_partitions"] = self.estimation_results["num_partitions"]
perf_data["true_speciation_completion_rate"] = self.config["test_info"]["true_speciation_completion_rate"]
perf_data["true_num_species"] = len(self.true_partition)
perf_data["num_constrained_species"] = self.config["test_info"]["species_partition_estimation_num_constrained_species"] # number of species defined (may not include all lineages)
perf_data["num_constrained_lineages"] = self.config["test_info"]["species_partition_estimation_num_constrained_lineages"] # number of lineages assigned to species
perf_data["num_unconstrained_lineages"] = self.config["test_info"]["species_partition_estimation_num_unconstrained_lineages"] # number of lineages not assigned to species
perf_data["estimated_speciation_completion_rate"] = self.estimation_results["speciation_completion_rate"]
perf_data["speciation_completion_rate_source"] = self.estimation_results["speciation_completion_rate_source"]
def standard_performance_assessment(self, perf_data):
perf_data["total_num_partitions_in_confidence_interval"] = self.estimation_results["num_partitions_in_confidence_interval"]
perf_data["inferred_partition_num_species"] = len(self.estimation_results["partitions"][0]["species_leafsets"])
# perf_data["inferred_partition_log_probability"] = self.estimation_results["partitions"][0]["log_probability"]
perf_data["inferred_partition_probability"] = self.estimation_results["partitions"][0]["probability"]
# perf_data["inferred_partition_log_probability_given_constraints"] = self.estimation_results["partitions"][0]["log_probability_given_constraints"]
perf_data["inferred_partition_probability_given_constraints"] = self.estimation_results["partitions"][0]["probability_given_constraints"]
for partition_idx, partition_info in enumerate(self.estimation_results["partitions"]):
current_partition = Partition(**partition_info)
if current_partition.species_leafsets == self.true_partition.species_leafsets:
# perf_data["true_partition_log_probability"] = current_partition.log_probability
perf_data["true_partition_probability"] = current_partition.probability
perf_data["true_partition_cumulative_probability"] = current_partition.cumulative_probability
# perf_data["true_partition_log_probability_given_constraints"] = current_partition.log_probability_given_constraints
perf_data["true_partition_probability_given_constraints"] = current_partition.probability_given_constraints
perf_data["true_partition_cumulative_probability_given_constraints"] = current_partition.cumulative_probability_given_constraints
if partition_idx == 0:
perf_data["is_true_partition_preferred"] = True
else:
perf_data["is_true_partition_preferred"] = False
perf_data["is_true_partition_in_confidence_interval"] = current_partition.is_in_confidence_interval
break
else:
raise ValueError("True partition not found in results")
return perf_data
def report(self, perf_data_rows):
# json.dump(perf_data, sys.stdout, indent=4, separators=(',', ': '))
delimiter = "\t"
self.report_dest.write(delimiter.join(perf_data_rows[0].keys()))
self.report_dest.write("\n")
for perf_data in perf_data_rows:
value_row = []
for idx, v in enumerate(perf_data.values()):
if isinstance(v, bool):
value_row.append(str(v).upper()) # for R
else:
value_row.append(str(v))
self.report_dest.write(delimiter.join(value_row))
self.report_dest.write("\n")
class TestRunner(object):
def __init__(self):
self.test_log = lambda msg: sys.stdout.write("-[{}]: {}\n".format(inspect.stack()[1][3], msg))
self.test_data_dir = os.path.join(os.path.abspath(__file__), "_test_data")
def run_tests(self):
self.test_is_conspecific()
self.test_marginal_probability_of_conspecificity()
self.test_is_span_root()
self.test_lineage_pair_distances()
self.test_all_distinct_pairs_of_unconstrained_lineages()
def test_is_conspecific(self):
d = {
"species_leafsets": [
["a", "b", "c"],
["d", "e", "f"],
["g"],
["h"],
],
}
partition = Partition(**d)
assert partition.is_conspecific("a", "b")
assert partition.is_conspecific("a", "c")
assert not partition.is_conspecific("a", "d")
assert not partition.is_conspecific("a", "e")
assert not partition.is_conspecific("a", "f")
assert not partition.is_conspecific("a", "g")
assert not partition.is_conspecific("a", "h")
assert partition.is_conspecific("b", "a")
assert partition.is_conspecific("b", "c")
assert not partition.is_conspecific("b", "d")
assert not partition.is_conspecific("b", "e")
assert not partition.is_conspecific("b", "f")
assert not partition.is_conspecific("b", "g")
assert not partition.is_conspecific("b", "h")
assert partition.is_conspecific("c", "a")
assert partition.is_conspecific("c", "b")
assert not partition.is_conspecific("c", "d")
assert not partition.is_conspecific("c", "e")
assert not partition.is_conspecific("c", "f")
assert not partition.is_conspecific("c", "g")
assert not partition.is_conspecific("c", "h")
assert not partition.is_conspecific("d", "a")
assert not partition.is_conspecific("d", "b")
assert not partition.is_conspecific("d", "c")
assert partition.is_conspecific("d", "e")
assert partition.is_conspecific("d", "f")
assert not partition.is_conspecific("d", "g")
assert not partition.is_conspecific("d", "h")
assert not partition.is_conspecific("e", "a")
assert not partition.is_conspecific("e", "b")
assert not partition.is_conspecific("e", "c")
assert partition.is_conspecific("e", "d")
assert partition.is_conspecific("e", "f")
assert not partition.is_conspecific("e", "g")
assert not partition.is_conspecific("e", "h")
assert not partition.is_conspecific("f", "a")
assert not partition.is_conspecific("f", "b")
assert not partition.is_conspecific("f", "c")
assert partition.is_conspecific("f", "d")
assert partition.is_conspecific("f", "e")
assert not partition.is_conspecific("f", "g")
assert not partition.is_conspecific("f", "h")
assert not partition.is_conspecific("g", "a")
assert not partition.is_conspecific("g", "b")
assert not partition.is_conspecific("g", "c")
assert not partition.is_conspecific("g", "d")
assert not partition.is_conspecific("g", "e")
assert not partition.is_conspecific("g", "f")
assert not partition.is_conspecific("g", "h")
assert not partition.is_conspecific("h", "a")
assert not partition.is_conspecific("h", "b")
assert not partition.is_conspecific("h", "c")
assert not partition.is_conspecific("h", "d")
assert not partition.is_conspecific("h", "e")
assert not partition.is_conspecific("h", "f")
assert not partition.is_conspecific("h", "g")
self.test_log("OK")
def test_is_span_root(self):
tree_src = io.StringIO("(((a:1.25, b:1.25):1.25, c:2.5):1.5, (d:2.25, (e:0.5,f:0.5):1.75):1.75):2.5;")
ev = Evaluator()
ev.read_lineage_tree(src=tree_src, schema="newick")
expected = {
"ab": False,
"ac": False,
"ad": True,
"ae": True,
"af": True,
"ba": False,
"bc": False,
"bd": True,
"be": True,
"bf": True,
"ca": False,
"cb": False,
"cd": True,
"ce": True,
"cf": True,
"da": True,
"db": True,
"dc": True,
"de": False,
"df": False,
"ea": True,
"eb": True,
"ec": True,
"ed": False,
"ef": False,
"fa": True,
"fb": True,
"fc": True,
"fd": False,
"fe": False,
}
for k, val in expected.items():
assert ev.is_span_root(k[0], k[1]) is val
self.test_log("OK")
def test_lineage_pair_distances(self):
ev = Evaluator()
tree_src = io.StringIO("(((a:1.25, b:1.25):1.25, c:2.5):1.5, (d:2.25, (e:0.5,f:0.5):1.75):1.75):2.5;")
ev.read_lineage_tree(src=tree_src, schema="newick")
expected = {
frozenset({'a', 'b'}): {'lineage_pair_unnormalized_weighted_distance': 2.5, 'lineage_pair_normalized_weighted_distance': 0.14705882352941177, 'lineage_pair_unnormalized_unweighted_distance': 2, 'lineage_pair_normalized_unweighted_distance': 0.18181818181818182, 'lineage_pair_mrca_age': 1.25, 'lineage_pair_is_span_root': False},
frozenset({'a', 'c'}): {'lineage_pair_unnormalized_weighted_distance': 5.0, 'lineage_pair_normalized_weighted_distance': 0.29411764705882354, 'lineage_pair_unnormalized_unweighted_distance': 3, 'lineage_pair_normalized_unweighted_distance': 0.2727272727272727, 'lineage_pair_mrca_age': 2.5, 'lineage_pair_is_span_root': False},
frozenset({'a', 'd'}): {'lineage_pair_unnormalized_weighted_distance': 8.0, 'lineage_pair_normalized_weighted_distance': 0.47058823529411764, 'lineage_pair_unnormalized_unweighted_distance': 5, 'lineage_pair_normalized_unweighted_distance': 0.45454545454545453, 'lineage_pair_mrca_age': 4.0, 'lineage_pair_is_span_root': True},
frozenset({'a', 'e'}): {'lineage_pair_unnormalized_weighted_distance': 8.0, 'lineage_pair_normalized_weighted_distance': 0.47058823529411764, 'lineage_pair_unnormalized_unweighted_distance': 6, 'lineage_pair_normalized_unweighted_distance': 0.5454545454545454, 'lineage_pair_mrca_age': 4.0, 'lineage_pair_is_span_root': True},
frozenset({'a', 'f'}): {'lineage_pair_unnormalized_weighted_distance': 8.0, 'lineage_pair_normalized_weighted_distance': 0.47058823529411764, 'lineage_pair_unnormalized_unweighted_distance': 6, 'lineage_pair_normalized_unweighted_distance': 0.5454545454545454, 'lineage_pair_mrca_age': 4.0, 'lineage_pair_is_span_root': True},
frozenset({'b', 'c'}): {'lineage_pair_unnormalized_weighted_distance': 5.0, 'lineage_pair_normalized_weighted_distance': 0.29411764705882354, 'lineage_pair_unnormalized_unweighted_distance': 3, 'lineage_pair_normalized_unweighted_distance': 0.2727272727272727, 'lineage_pair_mrca_age': 2.5, 'lineage_pair_is_span_root': False},
frozenset({'b', 'd'}): {'lineage_pair_unnormalized_weighted_distance': 8.0, 'lineage_pair_normalized_weighted_distance': 0.47058823529411764, 'lineage_pair_unnormalized_unweighted_distance': 5, 'lineage_pair_normalized_unweighted_distance': 0.45454545454545453, 'lineage_pair_mrca_age': 4.0, 'lineage_pair_is_span_root': True},
frozenset({'b', 'e'}): {'lineage_pair_unnormalized_weighted_distance': 8.0, 'lineage_pair_normalized_weighted_distance': 0.47058823529411764, 'lineage_pair_unnormalized_unweighted_distance': 6, 'lineage_pair_normalized_unweighted_distance': 0.5454545454545454, 'lineage_pair_mrca_age': 4.0, 'lineage_pair_is_span_root': True},
frozenset({'b', 'f'}): {'lineage_pair_unnormalized_weighted_distance': 8.0, 'lineage_pair_normalized_weighted_distance': 0.47058823529411764, 'lineage_pair_unnormalized_unweighted_distance': 6, 'lineage_pair_normalized_unweighted_distance': 0.5454545454545454, 'lineage_pair_mrca_age': 4.0, 'lineage_pair_is_span_root': True},
frozenset({'c', 'd'}): {'lineage_pair_unnormalized_weighted_distance': 8.0, 'lineage_pair_normalized_weighted_distance': 0.47058823529411764, 'lineage_pair_unnormalized_unweighted_distance': 4, 'lineage_pair_normalized_unweighted_distance': 0.36363636363636365, 'lineage_pair_mrca_age': 4.0, 'lineage_pair_is_span_root': True},
frozenset({'c', 'e'}): {'lineage_pair_unnormalized_weighted_distance': 8.0, 'lineage_pair_normalized_weighted_distance': 0.47058823529411764, 'lineage_pair_unnormalized_unweighted_distance': 5, 'lineage_pair_normalized_unweighted_distance': 0.45454545454545453, 'lineage_pair_mrca_age': 4.0, 'lineage_pair_is_span_root': True},
frozenset({'f', 'c'}): {'lineage_pair_unnormalized_weighted_distance': 8.0, 'lineage_pair_normalized_weighted_distance': 0.47058823529411764, 'lineage_pair_unnormalized_unweighted_distance': 5, 'lineage_pair_normalized_unweighted_distance': 0.45454545454545453, 'lineage_pair_mrca_age': 4.0, 'lineage_pair_is_span_root': True},
frozenset({'e', 'd'}): {'lineage_pair_unnormalized_weighted_distance': 4.5, 'lineage_pair_normalized_weighted_distance': 0.2647058823529412, 'lineage_pair_unnormalized_unweighted_distance': 3, 'lineage_pair_normalized_unweighted_distance': 0.2727272727272727, 'lineage_pair_mrca_age': 2.25, 'lineage_pair_is_span_root': False},
frozenset({'f', 'd'}): {'lineage_pair_unnormalized_weighted_distance': 4.5, 'lineage_pair_normalized_weighted_distance': 0.2647058823529412, 'lineage_pair_unnormalized_unweighted_distance': 3, 'lineage_pair_normalized_unweighted_distance': 0.2727272727272727, 'lineage_pair_mrca_age': 2.25, 'lineage_pair_is_span_root': False},
frozenset({'f', 'e'}): {'lineage_pair_unnormalized_weighted_distance': 1.0, 'lineage_pair_normalized_weighted_distance': 0.058823529411764705, 'lineage_pair_unnormalized_unweighted_distance': 2, 'lineage_pair_normalized_unweighted_distance': 0.18181818181818182, 'lineage_pair_mrca_age': 0.5, 'lineage_pair_is_span_root': False},
}
for lineage1, lineage2 in itertools.combinations("abcdef", 2):
d = ev.calc_lineage_pair_features(lineage1, lineage2)
key = frozenset([lineage1,lineage2])
# print("{}: {}".format(key, d))
for field in d:
assert expected[key][field] == d[field]
self.test_log("OK")
def test_marginal_probability_of_conspecificity(self):
results_d = {
"partitions": [
{ "id": 0, "conspecifics": set(["ab", "cd"]), "species_leafsets": [["a", "b"], ["c", "d"], ["e"]], "probability": 2, "probability_given_constraints": 3, }, # ab, cd
{ "id": 1, "conspecifics": set(["ab", ]), "species_leafsets": [["a", "b", "c",], ["d"], ["e"]], "probability": 4, "probability_given_constraints": 5, }, # ab
{ "id": 2, "conspecifics": set(["ab", "cd"]), "species_leafsets": [["a", "b", "c", "d"], ["e"]], "probability": 6, "probability_given_constraints": 7, }, # ab, cd
{ "id": 3, "conspecifics": set(["ab", "cd"]), "species_leafsets": [["a", "b", "e"], ["c", "d"]], "probability": 8, "probability_given_constraints": 9, }, # ab, cd
{ "id": 4, "conspecifics": set(["ab", "cd"]), "species_leafsets": [["a", "b", "c", "d"], ["e"]], "probability": 10, "probability_given_constraints": 11, }, # ab, cd
| |
<reponame>cabrittin/elegansbrainmap
"""
connectome.py
Connectome data structures. Inherits from iGraph
See http://igraph.org/python/
Required 3rd party packages:
igraph
csv
numpy
Author: <NAME>
"""
import igraph
import csv
import numpy as np
import ioaux
SCREEN = ['old']
class Connectome:
"""
Class to represent connectome data.
...
Attributes
----------
db : str
database name
size : int
number of neurons in graph
neurons : list
list of neuron names
C : Network
Chemical connectivity graph
E : Network
Gap junction connectivity graph
A : Network
Adjacency (physical) connectivity graph
D : Network
Combined chemical and gap junction graph
Methods
--------
update_cells(_neurons)
Update the neuron list
remove_self_loops()
Remove self loops from graphs C and E
remove_cells(vertices)
Remove vertices from graphs C, E and A
group_cells(groups,key='group')
Group vertices based on dictionary groups. The grouping identified
by key (default 'group'). So multiple groups can be assigned to
same graphs.
load_chemical(synapses,add_poly=False)
Create chemical connectivity graph by loading edges from synapses.
If add_poly, then the number of polyad and monad synapses is tracked.
load_electrical(synapses,add_poly=False)
Create gap junction connectivity graph by loading edges from synapses.
If add_poly, then the number of polyad and monad synapses is tracked.
load_edges(G,vertices,edges,add_poly=False)
Load edges between vertices into graph G. If add_poly, then the
number of polyad and monad synapses is tracked.
load_adjacency(_adjacency,directed=False)
Load adjacency graph from _adjacency edges. If directed, adjacency graph
will be made directed.
combine_chem_and_elec()
Combine the chemical and gap junction connectivity graphs
reduce_to_adjacency()
Reduce chemical and gap junction connectivity graphs to nodes and
edges found in the adjacency graph
"""
def __init__(self,db,neurons):
"""
Parameters:
-----------
db : str
database name
neurons : list
list of neuron/cell/node names
"""
self.db = db
self.size = len(neurons)
self.neurons = neurons
self.C = None
self.E = None
self.A = None
self.D = None
def update_cells(self,_neurons):
"""
Update the neuron/cell/node list
Parameters:
-----------
_neurons : list
list of neurons/cell/node/names
"""
self.neurons = _neurons
self.size = len(_neurons)
def remove_self_loops(self):
"""
Remove self loops from graph C and E
"""
if self.C: self.C.simplify(multiple=True,loops=True)
if self.E: self.E.simplify(multiple=True,loops=True)
def remove_cells(self,vertices):
""""
Remove vertices from graphs C, E and A
Parameters
----------
vertices : list
List of vertex names
"""
self.neurons = list(set(self.neurons) - set(vertices))
if self.C:self.C.remove_vertices(vertices)
if self.E:self.E.remove_vertices(vertices)
if self.A:self.A.remove_vertices(vertices)
def group_cells(self,groups,key='group'):
"""
Group vertices based on list groups. The grouping identified
by key (default 'group'). So multiple groups can be assigned to
same graphs.
Parameters
----------
groups : list
List of vertex memberships. Order of list
should correspond to index labels of vertices.
key : str
Group ID. Multiple groups can be assigned and
retrieved with the ID.
"""
if self.C:
self.C.assign_membership(groups,key=key)
self.C.group_vertices(key)
if self.E:
self.E.assign_membership(groups,key=key)
self.E.group_vertices(key)
if self.A:
self.A.assign_membership(groups,key=key)
self.A.group_vertices(key)
def load_chemical(self,synapses,add_poly=False):
"""
Create chemical connectivity graph by loading edges from synapses.
If add_poly, then the number of polyad and monad synapses is tracked.
Parameters:
synapses : list
list of synapse data with row format
pre_cell,post_cell(s),synapse_weight,synapse_id,data_series
add_poly : bool (default False)
If true, then tabulate polyadic and monadic synapses
"""
self.C = Network(directed=True)
self.C.add_vertices(self.neurons)
self.load_edges(self.C,self.neurons,synapses,add_poly=add_poly)
def load_electrical(self,synapses,add_poly=False):
"""
Create gap junction connectivity graph by loading edges from synapses.
If add_poly, then the number of polyad and monad synapses is tracked.
Parameters:
synapses : list
list of synapse data with row format
pre_cell,post_cell(s),synapse_weight,synapse_id,data_series
add_poly : bool (default False)
If true, then tabulate polyadic and monadic synapses
"""
self.E = Network()
self.E.add_vertices(self.neurons)
self.load_edges(self.E,self.neurons,synapses,add_poly=add_poly)
def load_edges(self,G,vertices,edges,add_poly=False):
"""
Load edges between vertices into graph G. If add_poly, then the
number of polyad and monad synapses is tracked.
Parameters:
-----------
G : Network
Graph into which edges will be loaded
vertices : list
list of vertex names. At least one vertex in edge
must be in the list vertex names
edges : list
list of edge data with row format
pre_cell,post_cell(s),synapse_weight,synapse_id,data_series
add_poly : bool (default False)
If true, then tabulate polyadic and monadic synapses
"""
eid = -1
for e in edges:
pre = ioaux.format.rm_brack(e[0])
if pre not in vertices: continue
#i_pre = self.neurons[pre]
_post = list(set(map(ioaux.format.rm_brack,e[1].split(','))))
if add_poly:
if len(_post) == 1:
poly = 'S'
else:
poly = 'Sp'
if self.db == 'N2U' and e[4] in ['VC','DC']:
w = int(e[2])#2*int(e[2]) - 1
else:
w = int(e[2])
for post in _post:
if post not in vertices: continue
#i_post = self.neurons[post]
if not G.are_connected(pre,post):
eid += 1
G.add_edges([(pre,post)])
G.es[eid]['weight'] = 0
G.es[eid]['count'] = 0
if add_poly:
G.es[eid]['S'] = 0
G.es[eid]['Sp'] = 0
_eid = G.get_eid(pre,post)
G.es[_eid]['weight'] += w
G.es[_eid]['count'] += 1
if add_poly:
G.es[_eid][poly] += 1
#G.vs.select(_degree=0).delete()
def load_adjacency(self,adjacency,directed=False):
"""
Load adjacency graph from _adjacency edges. If directed, adjacency graph
will be made directed.
Parameters:
-----------
adjacency : list
List of adjacency data with row format
cell1,cell2,amount_of_contact,section_number
directed: bool
If true, the adjacency graph will be directed
"""
self.A = Network(directed=directed)
self.A.add_vertices(self.neurons)
eid = -1
for (i,j,weight,imgNum) in adjacency:
weight = weight
count = 1
if self.db == 'N2U' and 'VC' in imgNum:
weight *=1#2
count = 1
if not self.A.are_connected(i,j):
eid += 1
self.A.add_edges([(i,j)])
self.A.es[eid]['weight'] = 0
self.A.es[eid]['count'] = 0
_eid = self.A.get_eid(i,j)
self.A.es[_eid]['weight'] += weight
self.A.es[_eid]['count'] += count
def combine_chem_and_elec(self):
"""
Combine the chemical and gap junction connectivity graphs
Combined graph stored in attribute self.D
"""
self.D = self.C.copy()
for e in self.E.es:
i = self.E.vs[e.source]['name']
j = self.E.vs[e.target]['name']
w = 0.5*e['weight']
c = 0.5*e['count']
if self.D.are_connected(i,j):
eid = self.D.get_eid(i,j)
self.D.es[eid]['weight'] += w
self.D.es[eid]['count'] += c
else:
self.D.add_edge(i,j,weight=w,count=c)
def reduce_to_adjacency(self):
"""
Reduce chemical and gap junction connectivity graphs to nodes and
edges found in the adjacency graph
"""
if self.C: self.C.reduce_to(self.A)
if self.E: self.E.reduce_to(self.A)
class Network(igraph.Graph):
"""
Class for storing graph data. Inherits class Graph
from package igraph. See http://igraph.org/python/
for attributes and API.
Methods
-------
get_edge(source,target)
Returns the igraph edge going from source to target
get_edge_attr(source,target,attr)
Returns attribute of igraph edge going from source to target
remove_vertices(_remove)
Remove vertices from graph in list _remove
assign_membership(membership,key)
Assign vertices membership to some group. key defines the name
of the membership. Vertices can be assigned to multiple memberships
Membrship can be a list or dictionary
assign_membership_list(membership,key)
Use if membership is a list
assign_membership_dict(membership,key)
Use if membership is a dictionary
group_vertices(key,combine_attrs='first')
Group vertices beased on membership key.
get_numpy_array(directed=False,vertex_order=None,edge_attr=None)
Return adjacency matrix as a numpy array.
symmetrize_matrix()
Will convert directed graph to undirected
get_neighbors(vertex,mode="OUT",vattr="name")
Return the neighbors of vertex
map_vertex_names(vmap)
Changes vertex names to names in dictionary vmap
compute_strength(weight='weight')
Computes normalized values of the edge weights
reduce_to(G)
Removes edges in graph not in graph G
threshold_edge_greater_than(eattr,val)
Removes edges with edge attribute less than or equal to val
threshold_edge_less_than(eattr,val)
Remove edges with edge attributes greater than or equal to val
"""
def __init__(self,directed=False):
"""
Parameters:
-----------
directed : bool (default False)
If true, graph will be directed.
"""
igraph.Graph.__init__(self,directed=directed)
def __deepcopy__(self,memo):
from copy import deepcopy
return deepcopy(self)
def get_edge(self,source,target):
"""
Returns the igraph edge going from source to target
Parameters:
-----------
source : str
source vertex name
target : str
target vertex name
Returns:
igraph edge
"""
try:
i = self.vs.select(name=source)[0].index
j = self.vs.select(name=target)[0].index
return self.es.select(_source=i,_target=j)[0]
except IndexError:
return None
def get_edge_attr(self,source,target,attr):
"""
Returns attribute of igraph edge going from source to target
Parameters:
-----------
source : str
source vertex name
target : str
target vertex name
attr : str
attr key
Returns:
--------
attribute value
"""
i = self.vs.select(name=source)[0].index
j = self.vs.select(name=target)[0].index
w1 = self.es.select(_source=i,_target=j)[attr][0]
#w2 = self.es.select(_source=j,_target=i)[attr][0]
return w1
def remove_vertices(self,_remove):
"""
Remove vertices from graph in list _remove
Parameters:
----------
_remove : list
List of vertex names to be removed
"""
for n in _remove:
for v in self.vs(name=n):
v.delete()
def assign_membership(self,membership,key='member'):
"""
Assign vertices membership to some group. key defines the name
of the membership. Vertices can be | |
1
t = max(A, B, C)
if t >= p:
a -= 1
B += D[a]
A -= D[a]
break
p = t
ans = min(ans, p)
return p
def func_185eefb746f742679e04ba34753568f0(N, D):
S = sum(D)
ans = S
A, B, C = 0, 0, S
a = 0
b = -1
while b < N - 1:
b += 1
C -= D[b]
B += D[b]
p = max(A, B, C)
while a < b:
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
if t >= p:
a -= 1
B += D[a]
A -= D[a]
break
p = t
ans = min(ans, p)
return S
def func_f9b24485fce54ac0b3504d3d90b51f21(N, D):
S = sum(D)
ans = S
A, B, C = 0, 0, S
a = 0
b = -1
while b < N - 1:
b += 1
C -= D[b]
B += D[b]
p = max(A, B, C)
while a < b:
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
if t >= p:
a -= 1
B += D[a]
A -= D[a]
break
p = t
ans = min(ans, p)
return A
def func_fa6ae7351c00497f9c59d65516a0cfa6(N, D):
S = sum(D)
ans = S
A, B, C = 0, 0, S
a = 0
b = -1
while b < N - 1:
b += 1
C -= D[b]
B += D[b]
p = max(A, B, C)
while a < b:
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
if t >= p:
a -= 1
B += D[a]
A -= D[a]
break
p = t
ans = min(ans, p)
return ans
def func_8434807f302341d4821e7fe7ef8aaa9c(N, D):
S = sum(D)
ans = S
A, B, C = 0, 0, S
a = 0
b = -1
while b < N - 1:
b += 1
C -= D[b]
B += D[b]
p = max(A, B, C)
while a < b:
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
if t >= p:
a -= 1
B += D[a]
A -= D[a]
break
p = t
ans = min(ans, p)
return b
def func_9937d07bd06e4daead3ac43d97f96f34(N, D, S):
ans = S
A, B, C = 0, 0, S
a = 0
b = -1
while b < N - 1:
b += 1
C -= D[b]
B += D[b]
p = max(A, B, C)
while a < b:
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
if t >= p:
a -= 1
B += D[a]
A -= D[a]
break
p = t
ans = min(ans, p)
ans = float(S - ans) / S
return p
def func_68d29b3b887841e3afad74f5b725f2fc(N, D, S):
ans = S
A, B, C = 0, 0, S
a = 0
b = -1
while b < N - 1:
b += 1
C -= D[b]
B += D[b]
p = max(A, B, C)
while a < b:
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
if t >= p:
a -= 1
B += D[a]
A -= D[a]
break
p = t
ans = min(ans, p)
ans = float(S - ans) / S
return ans
def func_960986d49672403cbee2ccacc3b04f71(N, D, S):
ans = S
A, B, C = 0, 0, S
a = 0
b = -1
while b < N - 1:
b += 1
C -= D[b]
B += D[b]
p = max(A, B, C)
while a < b:
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
if t >= p:
a -= 1
B += D[a]
A -= D[a]
break
p = t
ans = min(ans, p)
ans = float(S - ans) / S
return B
def func_fc60f2eeaf0d46edb7c44c24f213e1d5(N, D, S):
ans = S
A, B, C = 0, 0, S
a = 0
b = -1
while b < N - 1:
b += 1
C -= D[b]
B += D[b]
p = max(A, B, C)
while a < b:
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
if t >= p:
a -= 1
B += D[a]
A -= D[a]
break
p = t
ans = min(ans, p)
ans = float(S - ans) / S
return t
def func_182df9a32a7e41b687aaeb2b19f3296c(N, D, S):
ans = S
A, B, C = 0, 0, S
a = 0
b = -1
while b < N - 1:
b += 1
C -= D[b]
B += D[b]
p = max(A, B, C)
while a < b:
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
if t >= p:
a -= 1
B += D[a]
A -= D[a]
break
p = t
ans = min(ans, p)
ans = float(S - ans) / S
return b
def func_fc21a49efab3477f8e4e95b803166a2e(N, D, S):
ans = S
A, B, C = 0, 0, S
a = 0
b = -1
while b < N - 1:
b += 1
C -= D[b]
B += D[b]
p = max(A, B, C)
while a < b:
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
if t >= p:
a -= 1
B += D[a]
A -= D[a]
break
p = t
ans = min(ans, p)
ans = float(S - ans) / S
return A
def func_8d91ef7bda574d67a7ee1de48e4e317b(N, D, S):
ans = S
A, B, C = 0, 0, S
a = 0
b = -1
while b < N - 1:
b += 1
C -= D[b]
B += D[b]
p = max(A, B, C)
while a < b:
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
if t >= p:
a -= 1
B += D[a]
A -= D[a]
break
p = t
ans = min(ans, p)
ans = float(S - ans) / S
return a
def func_f7794097883c48a2a832357c547e0a74(N, D, S):
ans = S
A, B, C = 0, 0, S
a = 0
b = -1
while b < N - 1:
b += 1
C -= D[b]
B += D[b]
p = max(A, B, C)
while a < b:
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
if t >= p:
a -= 1
B += D[a]
A -= D[a]
break
p = t
ans = min(ans, p)
ans = float(S - ans) / S
return C
def func_512c7b12097e471c9a86b44a01df14f7(test, N, D, S):
A, B, C = 0, 0, S
a = 0
b = -1
while b < N - 1:
b += 1
C -= D[b]
B += D[b]
p = max(A, B, C)
while a < b:
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
if t >= p:
a -= 1
B += D[a]
A -= D[a]
break
p = t
ans = min(ans, p)
ans = float(S - ans) / S('Case #%s: %.16f' % (test + 1, ans))
return p
def func_5b230b19425d4631a08ee8852d2ad07f(test, N, D, S):
A, B, C = 0, 0, S
a = 0
b = -1
while b < N - 1:
b += 1
C -= D[b]
B += D[b]
p = max(A, B, C)
while a < b:
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
if t >= p:
a -= 1
B += D[a]
A -= D[a]
break
p = t
ans = min(ans, p)
ans = float(S - ans) / S('Case #%s: %.16f' % (test + 1, ans))
return B
def func_56daceaef4c741239e4967470a991694(test, N, D, S):
A, B, C = 0, 0, S
a = 0
b = -1
while b < N - 1:
b += 1
C -= D[b]
B += D[b]
p = max(A, B, C)
while a < b:
B -= | |
<gh_stars>0
# Dimensional Analysis code
# author: <NAME>
# date: 02/18/2021
# email: <EMAIL>
import sys
import numpy as np
try:
import Tkinter as tk
except ImportError:
import tkinter as tk
try:
import ttk
py3 = False
except ImportError:
import tkinter.ttk as ttk
py3 = True
from dimensional_analysis import solve_lin_sys_support
from tkinter import messagebox as fmsgbox
def vp_start_gui():
'''Starting point when module is the main routine.'''
global val, w, root
root = tk.Tk()
solve_lin_sys_support.set_Tk_var()
top = Made_by_Mohammad_Afzal_Shadab (root)
solve_lin_sys_support.init(root, top)
root.mainloop()
w = None
def create_Made_by_Mohammad_Afzal_Shadab(rt, *args, **kwargs):
'''Starting point when module is imported by another module.
Correct form of call: 'create_Made_by_Mohammad_Afzal_Shadab(root, *args, **kwargs)' .'''
global w, w_win, root
#rt = root
root = rt
w = tk.Toplevel (root)
solve_lin_sys_support.set_Tk_var()
top = Made_by_Mohammad_Afzal_Shadab (w)
solve_lin_sys_support.init(w, top, *args, **kwargs)
return (w, top)
def destroy_Made_by_Mohammad_Afzal_Shadab():
global w
w.destroy()
w = None
class Made_by_Mohammad_Afzal_Shadab:
def doRefresh(self):
self.Var1.delete(0, 'end')
self.Var2.delete(0, 'end')
self.Var3.delete(0, 'end')
self.Var4.delete(0, 'end')
self.Var5.delete(0, 'end')
self.Var6.delete(0, 'end')
self.Var7.delete(0, 'end')
self.Var8.delete(0, 'end')
self.Var9.delete(0, 'end')
self.Var10.delete(0, 'end')
self.Var11.delete(0, 'end')
self.Sym1.delete(0, 'end')
self.Sym2.delete(0, 'end')
self.Sym3.delete(0, 'end')
self.Sym4.delete(0, 'end')
self.Sym5.delete(0, 'end')
self.Sym6.delete(0, 'end')
self.Sym7.delete(0, 'end')
self.Sym8.delete(0, 'end')
self.Sym9.delete(0, 'end')
self.Sym10.delete(0, 'end')
self.Sym11.delete(0, 'end')
self.M1.delete(0, 'end')
self.M2.delete(0, 'end')
self.M3.delete(0, 'end')
self.M4.delete(0, 'end')
self.M5.delete(0, 'end')
self.M6.delete(0, 'end')
self.M7.delete(0, 'end')
self.M8.delete(0, 'end')
self.M9.delete(0, 'end')
self.M10.delete(0, 'end')
self.M11.delete(0, 'end')
self.L1.delete(0, 'end')
self.L2.delete(0, 'end')
self.L3.delete(0, 'end')
self.L4.delete(0, 'end')
self.L5.delete(0, 'end')
self.L6.delete(0, 'end')
self.L7.delete(0, 'end')
self.L8.delete(0, 'end')
self.L9.delete(0, 'end')
self.L10.delete(0, 'end')
self.L11.delete(0, 'end')
self.T1.delete(0, 'end')
self.T2.delete(0, 'end')
self.T3.delete(0, 'end')
self.T4.delete(0, 'end')
self.T5.delete(0, 'end')
self.T6.delete(0, 'end')
self.T7.delete(0, 'end')
self.T8.delete(0, 'end')
self.T9.delete(0, 'end')
self.T10.delete(0, 'end')
self.T11.delete(0, 'end')
self.theta1.delete(0, 'end')
self.theta2.delete(0, 'end')
self.theta3.delete(0, 'end')
self.theta4.delete(0, 'end')
self.theta5.delete(0, 'end')
self.theta6.delete(0, 'end')
self.theta7.delete(0, 'end')
self.theta8.delete(0, 'end')
self.theta9.delete(0, 'end')
self.theta10.delete(0, 'end')
self.theta11.delete(0, 'end')
self.I1.delete(0, 'end')
self.I2.delete(0, 'end')
self.I3.delete(0, 'end')
self.I4.delete(0, 'end')
self.I5.delete(0, 'end')
self.I6.delete(0, 'end')
self.I7.delete(0, 'end')
self.I8.delete(0, 'end')
self.I9.delete(0, 'end')
self.I10.delete(0, 'end')
self.I11.delete(0, 'end')
self.C1.delete(0, 'end')
self.C2.delete(0, 'end')
self.C3.delete(0, 'end')
self.C4.delete(0, 'end')
self.C5.delete(0, 'end')
self.C6.delete(0, 'end')
self.C7.delete(0, 'end')
self.C8.delete(0, 'end')
self.C9.delete(0, 'end')
self.C10.delete(0, 'end')
self.C11.delete(0, 'end')
self.N1.delete(0, 'end')
self.N2.delete(0, 'end')
self.N3.delete(0, 'end')
self.N4.delete(0, 'end')
self.N5.delete(0, 'end')
self.N6.delete(0, 'end')
self.N7.delete(0, 'end')
self.N8.delete(0, 'end')
self.N9.delete(0, 'end')
self.N10.delete(0, 'end')
self.N11.delete(0, 'end')
self.depvar1.deselect()
self.depvar2.deselect()
self.depvar3.deselect()
self.depvar4.deselect()
self.depvar5.deselect()
self.depvar6.deselect()
self.depvar7.deselect()
self.depvar8.deselect()
self.depvar9.deselect()
self.depvar10.deselect()
self.depvar11.deselect()
self.repvar1.deselect()
self.repvar2.deselect()
self.repvar3.deselect()
self.repvar4.deselect()
self.repvar5.deselect()
self.repvar6.deselect()
self.repvar7.deselect()
self.repvar8.deselect()
self.repvar9.deselect()
self.repvar10.deselect()
self.repvar11.deselect()
self.Rank.delete(0, 'end')
return
def repVar(self):
# Deleting previous entry
self.Rank.delete(0, 'end')
# making the matrix
matrix1 = np.array([[float(self.M1.get()),float(self.M2.get()),float(self.M3.get()),float(self.M4.get()),float(self.M5.get()),float(self.M6.get()),float(self.M7.get()),float(self.M8.get()),float(self.M9.get()),float(self.M10.get()),float(self.M11.get())],
[float(self.L1.get()),float(self.L2.get()),float(self.L3.get()),float(self.L4.get()),float(self.L5.get()),float(self.L6.get()),float(self.L7.get()),float(self.L8.get()),float(self.L9.get()),float(self.L10.get()),float(self.L11.get())],
[float(self.T1.get()),float(self.T2.get()),float(self.T3.get()),float(self.T4.get()),float(self.T5.get()),float(self.T6.get()),float(self.T7.get()),float(self.T8.get()),float(self.T9.get()),float(self.T10.get()),float(self.T11.get())],
[float(self.theta1.get()),float(self.theta2.get()),float(self.theta3.get()),float(self.theta4.get()),float(self.theta5.get()),float(self.theta6.get()),float(self.theta7.get()),float(self.theta8.get()),float(self.theta9.get()),float(self.theta10.get()),float(self.theta11.get())],
[float(self.I1.get()),float(self.I2.get()),float(self.I3.get()),float(self.I4.get()),float(self.I5.get()),float(self.I6.get()),float(self.I7.get()),float(self.I8.get()),float(self.I9.get()),float(self.I10.get()),float(self.I11.get())],
[float(self.C1.get()),float(self.C2.get()),float(self.C3.get()),float(self.C4.get()),float(self.C5.get()),float(self.C6.get()),float(self.C7.get()),float(self.C8.get()),float(self.C9.get()),float(self.C10.get()),float(self.C11.get())],
[float(self.N1.get()),float(self.N2.get()),float(self.N3.get()),float(self.N4.get()),float(self.N5.get()),float(self.N6.get()),float(self.N7.get()),float(self.N8.get()),float(self.N9.get()),float(self.N10.get()),float(self.N11.get())]])
rank = np.linalg.matrix_rank(matrix1)
self.Rank.insert(0,rank)
return
def fillEmptyVar(self):
if self.Sym1.get() == '':
self.Sym1.insert(0,'DV')
if self.Sym2.get() == '':
self.Sym2.insert(0,'DV')
if self.Sym3.get() == '':
self.Sym3.insert(0,'DV')
if self.Sym4.get() == '':
self.Sym4.insert(0,'DV')
if self.Sym5.get() == '':
self.Sym5.insert(0,'DV')
if self.Sym6.get() == '':
self.Sym6.insert(0,'DV')
if self.Sym7.get() == '':
self.Sym7.insert(0,'DV')
if self.Sym8.get() == '':
self.Sym8.insert(0,'DV')
if self.Sym9.get() == '':
self.Sym9.insert(0,'DV')
if self.Sym10.get() == '':
self.Sym10.insert(0,'DV')
if self.Sym11.get() == '':
self.Sym11.insert(0,'DV')
if self.Var1.get() == '':
self.Var1.insert(0,'Dummy variable1')
if self.Var2.get() == '':
self.Var2.insert(0,'Dummy variable2')
if self.Var3.get() == '':
self.Var3.insert(0,'Dummy variable3')
if self.Var4.get() == '':
self.Var4.insert(0,'Dummy variable4')
if self.Var5.get() == '':
self.Var5.insert(0,'Dummy variable5')
if self.Var6.get() == '':
self.Var6.insert(0,'Dummy variable6')
if self.Var7.get() == '':
self.Var7.insert(0,'Dummy variable7')
if self.Var8.get() == '':
self.Var8.insert(0,'Dummy variable8')
if self.Var9.get() == '':
self.Var9.insert(0,'Dummy variable9')
if self.Var10.get() == '':
self.Var10.insert(0,'Dummy variable10')
if self.Var11.get() == '':
self.Var11.insert(0,'Dummy variable11')
if self.M1.get() == '':
self.M1.insert(0,'0')
if self.M2.get() == '':
self.M2.insert(0,'0')
if self.M3.get() == '':
self.M3.insert(0,'0')
if self.M4.get() == '':
self.M4.insert(0,'0')
if self.M5.get() == '':
self.M5.insert(0,'0')
if self.M6.get() == '':
self.M6.insert(0,'0')
if self.M7.get() == '':
self.M7.insert(0,'0')
if self.M8.get() == '':
self.M8.insert(0,'0')
if self.M9.get() == '':
self.M9.insert(0,'0')
if self.M10.get() == '':
self.M10.insert(0,'0')
if self.M11.get() == '':
self.M11.insert(0,'0')
if self.L1.get() == '':
self.L1.insert(0,'0')
if self.L2.get() == '':
self.L2.insert(0,'0')
if self.L3.get() == '':
self.L3.insert(0,'0')
if self.L4.get() == '':
self.L4.insert(0,'0')
if self.L5.get() == '':
self.L5.insert(0,'0')
if self.L6.get() == '':
self.L6.insert(0,'0')
if self.L7.get() == '':
self.L7.insert(0,'0')
if self.L8.get() == '':
self.L8.insert(0,'0')
if self.L9.get() == '':
self.L9.insert(0,'0')
if self.L10.get() == '':
self.L10.insert(0,'0')
if self.L11.get() == '':
self.L11.insert(0,'0')
if self.T1.get() == '':
self.T1.insert(0,'0')
if self.T2.get() == '':
self.T2.insert(0,'0')
if self.T3.get() == '':
self.T3.insert(0,'0')
if self.T4.get() == '':
self.T4.insert(0,'0')
if self.T5.get() == '':
self.T5.insert(0,'0')
if self.T6.get() == '':
self.T6.insert(0,'0')
if self.T7.get() == '':
self.T7.insert(0,'0')
if self.T8.get() == '':
self.T8.insert(0,'0')
if self.T9.get() == '':
self.T9.insert(0,'0')
if self.T10.get() == '':
self.T10.insert(0,'0')
if self.T11.get() == '':
self.T11.insert(0,'0')
if self.theta1.get() == '':
self.theta1.insert(0,'0')
if self.theta2.get() == '':
self.theta2.insert(0,'0')
if self.theta3.get() == '':
self.theta3.insert(0,'0')
if self.theta4.get() == '':
self.theta4.insert(0,'0')
if self.theta5.get() == '':
self.theta5.insert(0,'0')
if self.theta6.get() == '':
self.theta6.insert(0,'0')
if self.theta7.get() == '':
self.theta7.insert(0,'0')
if self.theta8.get() == '':
self.theta8.insert(0,'0')
if self.theta9.get() == '':
self.theta9.insert(0,'0')
if self.theta10.get() == '':
self.theta10.insert(0,'0')
if self.theta11.get() == '':
self.theta11.insert(0,'0')
if self.I1.get() == '':
self.I1.insert(0,'0')
if self.I2.get() == '':
self.I2.insert(0,'0')
if self.I3.get() == '':
self.I3.insert(0,'0')
if self.I4.get() == '':
self.I4.insert(0,'0')
if self.I5.get() == '':
self.I5.insert(0,'0')
if self.I6.get() == '':
self.I6.insert(0,'0')
if self.I7.get() == '':
self.I7.insert(0,'0')
if self.I8.get() == '':
self.I8.insert(0,'0')
if self.I9.get() == '':
self.I9.insert(0,'0')
if self.I10.get() == '':
self.I10.insert(0,'0')
if self.I11.get() == '':
self.I11.insert(0,'0')
if self.C1.get() == '':
self.C1.insert(0,'0')
if self.C2.get() == '':
self.C2.insert(0,'0')
if self.C3.get() == '':
self.C3.insert(0,'0')
if self.C4.get() == '':
self.C4.insert(0,'0')
if self.C5.get() == '':
self.C5.insert(0,'0')
if self.C6.get() == '':
self.C6.insert(0,'0')
if self.C7.get() == '':
self.C7.insert(0,'0')
if self.C8.get() == '':
self.C8.insert(0,'0')
if self.C9.get() == '':
self.C9.insert(0,'0')
if self.C10.get() == '':
self.C10.insert(0,'0')
if self.C11.get() == '':
self.C11.insert(0,'0')
if self.N1.get() == '':
self.N1.insert(0,'0')
if self.N2.get() == '':
self.N2.insert(0,'0')
if self.N3.get() == '':
self.N3.insert(0,'0')
if self.N4.get() == '':
self.N4.insert(0,'0')
if self.N5.get() == '':
self.N5.insert(0,'0')
if self.N6.get() == '':
self.N6.insert(0,'0')
if self.N7.get() == '':
self.N7.insert(0,'0')
if self.N8.get() == '':
self.N8.insert(0,'0')
if self.N9.get() == '':
self.N9.insert(0,'0')
if self.N10.get() == '':
self.N10.insert(0,'0')
if self.N11.get() == '':
self.N11.insert(0,'0')
return
def dimAnalysis(self):
# Find the dependent variable and repeating variables from checkboxes
self.depvar = []
self.repvar = []
self.nondimvar = []
self.sym = ([self.Sym1.get(),self.Sym2.get(),self.Sym3.get(),self.Sym4.get(),self.Sym5.get(),self.Sym6.get(),self.Sym7.get(),self.Sym8.get(),self.Sym9.get(),self.Sym10.get(),self.Sym11.get()])
self.var = ([self.Var1.get(),self.Var2.get(),self.Var3.get(),self.Var4.get(),self.Var5.get(),self.Var6.get(),self.Var7.get(),self.Var8.get(),self.Var9.get(),self.Var10.get(),self.Var11.get()])
for i in range(0,11):
if i==0 and int(solve_lin_sys_support.che61.get())==1: self.depvar.append(i)
elif i==1 and int(solve_lin_sys_support.che63.get())==1: self.depvar.append(i)
elif i==2 and int(solve_lin_sys_support.che65.get())==1: self.depvar.append(i)
elif i==3 and int(solve_lin_sys_support.che67.get())==1: self.depvar.append(i)
elif i==4 and int(solve_lin_sys_support.che69.get())==1: self.depvar.append(i)
elif i==5 and int(solve_lin_sys_support.che71.get())==1: self.depvar.append(i)
elif i==6 and int(solve_lin_sys_support.che73.get())==1: self.depvar.append(i)
elif i==7 and int(solve_lin_sys_support.che75.get())==1: self.depvar.append(i)
elif i==8 and int(solve_lin_sys_support.che77.get())==1: self.depvar.append(i)
elif i==9 and int(solve_lin_sys_support.che79.get())==1: self.depvar.append(i)
elif i==10 and int(solve_lin_sys_support.che81.get())==1: self.depvar.append(i)
if i==0 and int(solve_lin_sys_support.che62.get())==1: self.repvar.append(i)
elif i==1 and int(solve_lin_sys_support.che64.get())==1: self.repvar.append(i)
elif i==2 and int(solve_lin_sys_support.che66.get())==1: self.repvar.append(i)
elif i==3 and int(solve_lin_sys_support.che68.get())==1: self.repvar.append(i)
elif i==4 and int(solve_lin_sys_support.che70.get())==1: self.repvar.append(i)
elif i==5 and int(solve_lin_sys_support.che72.get())==1: self.repvar.append(i)
elif i==6 and int(solve_lin_sys_support.che74.get())==1: self.repvar.append(i)
elif i==7 and int(solve_lin_sys_support.che76.get())==1: self.repvar.append(i)
elif i==8 and int(solve_lin_sys_support.che78.get())==1: self.repvar.append(i)
elif i==9 and int(solve_lin_sys_support.che80.get())==1: self.repvar.append(i)
elif i==10 and int(solve_lin_sys_support.che82.get())==1: self.repvar.append(i)
# making the matrix for processing
matrix_system = np.array([[float(self.M1.get()),float(self.M2.get()),float(self.M3.get()),float(self.M4.get()),float(self.M5.get()),float(self.M6.get()),float(self.M7.get()),float(self.M8.get()),float(self.M9.get()),float(self.M10.get()),float(self.M11.get())],
[float(self.L1.get()),float(self.L2.get()),float(self.L3.get()),float(self.L4.get()),float(self.L5.get()),float(self.L6.get()),float(self.L7.get()),float(self.L8.get()),float(self.L9.get()),float(self.L10.get()),float(self.L11.get())],
[float(self.T1.get()),float(self.T2.get()),float(self.T3.get()),float(self.T4.get()),float(self.T5.get()),float(self.T6.get()),float(self.T7.get()),float(self.T8.get()),float(self.T9.get()),float(self.T10.get()),float(self.T11.get())],
[float(self.theta1.get()),float(self.theta2.get()),float(self.theta3.get()),float(self.theta4.get()),float(self.theta5.get()),float(self.theta6.get()),float(self.theta7.get()),float(self.theta8.get()),float(self.theta9.get()),float(self.theta10.get()),float(self.theta11.get())],
[float(self.I1.get()),float(self.I2.get()),float(self.I3.get()),float(self.I4.get()),float(self.I5.get()),float(self.I6.get()),float(self.I7.get()),float(self.I8.get()),float(self.I9.get()),float(self.I10.get()),float(self.I11.get())],
[float(self.C1.get()),float(self.C2.get()),float(self.C3.get()),float(self.C4.get()),float(self.C5.get()),float(self.C6.get()),float(self.C7.get()),float(self.C8.get()),float(self.C9.get()),float(self.C10.get()),float(self.C11.get())],
[float(self.N1.get()),float(self.N2.get()),float(self.N3.get()),float(self.N4.get()),float(self.N5.get()),float(self.N6.get()),float(self.N7.get()),float(self.N8.get()),float(self.N9.get()),float(self.N10.get()),float(self.N11.get())]])
#Check dimensions and remove the ones with all zeros
matrix_system = matrix_system[~np.all(matrix_system == 0.0, axis=1)]
print('initial matrix system \n',matrix_system)
print('Repeated variable',self.repvar)
print('Dependent variable',self.depvar)
print('Dependent variable',solve_lin_sys_support.che61.get(),solve_lin_sys_support.che63.get(),solve_lin_sys_support.che65.get(),solve_lin_sys_support.che67.get(),solve_lin_sys_support.che69.get(),solve_lin_sys_support.che71.get(),solve_lin_sys_support.che73.get(),solve_lin_sys_support.che75.get(),solve_lin_sys_support.che77.get(),solve_lin_sys_support.che79.get(),solve_lin_sys_support.che81.get())
print('Repeated variable' ,solve_lin_sys_support.che62.get(),solve_lin_sys_support.che64.get(),solve_lin_sys_support.che66.get(),solve_lin_sys_support.che68.get(),solve_lin_sys_support.che70.get(),solve_lin_sys_support.che72.get(),solve_lin_sys_support.che74.get(),solve_lin_sys_support.che76.get(),solve_lin_sys_support.che78.get(),solve_lin_sys_support.che80.get(),solve_lin_sys_support.che82.get())
print('Shape of matrix system', np.shape(matrix_system)[0],np.shape(matrix_system)[1])
matrix_system_basis = np.empty((np.shape(matrix_system)[0],len(self.repvar)))
for i in range(0,len(self.repvar)):
print('self.repvar',i)
print('The basis matrix: \n', matrix_system[:,self.repvar[i]] , '\n')
matrix_system_basis[:,i] = matrix_system[:,self.repvar[i]]
if np.linalg.matrix_rank(matrix_system_basis) < len(self.repvar) and np.linalg.matrix_rank(matrix_system_basis) < np.shape(matrix_system)[1]: fmsgbox.showinfo("ERROR","The repeating variables are not linearly independent.")
print('Rank',np.linalg.matrix_rank(matrix_system_basis),len(self.repvar),np.shape(matrix_system)[1])
print(np.shape(matrix_system_basis))
print(matrix_system_basis)
#sol = np.empty_like(matrix_system)
sol = np.empty_like(matrix_system)
sol[:] = np.NaN
# Using SVD to solve the rank deficient system
################################Check errors
for i in range(0,11):
if i in self.repvar:
pass
else:
print('matrix system \n',matrix_system)
self.nondimvar.append(i)
print(i,matrix_system_basis,matrix_system[:,i])
soln, residuals, rank, s = np.linalg.lstsq(matrix_system_basis,matrix_system[:,i])
print(i,'solution',soln,'\n shape', np.shape(soln))
sol[:,i] = soln
string = []
for i in self.nondimvar:
stringrepvar = []
for j in self.repvar:
stringrepvar.append(f"{self.sym[j]}^{sol[self.repvar.index(j),i]}")
stringrepvar = '*'.join(stringrepvar)
stringrepvar = f'({stringrepvar})'
if i in self.depvar:
depstring = f"{self.sym[i]}/{stringrepvar}"
elif self.sym[i] == 'DV':
pass
else:
string.append(f"{self.sym[i]}/{stringrepvar}")
info_message = ','.join(string)
info = []
for i in range(0,len(self.sym)):
if self.sym[i] == 'DV':
pass
else: info.append(f'{self.sym[i]} : {self.var[i]}')
info = ' \n '.join(info)
if not np.linalg.matrix_rank(matrix_system_basis) < len(self.repvar) and np.linalg.matrix_rank(matrix_system_basis) < np.shape(matrix_system)[1]:
| |
<gh_stars>1-10
import hashlib
import tempfile
import os
from babel.dates import get_timezone, format_datetime, format_date
from django.http import HttpResponse
from django.utils import timezone
from reportlab.lib import colors
from reportlab.lib.enums import TA_CENTER, TA_RIGHT
from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle
from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer, Table, TableStyle, ListFlowable, ListItem
from common.consts import SELECTION_CRITERIA_CHOICES, WORKING_LANGUAGES_CHOICES, PARTNER_DONORS_CHOICES, \
COLLABORATION_EVIDENCE_MODES
from common.countries import COUNTRIES_ALPHA2_CODE
from partner.models import Partner
from partner.utilities import get_recent_budgets_for_partner
CRITERIA_DISPLAY_DICT = dict(SELECTION_CRITERIA_CHOICES)
WORKING_LANGUAGES_DISPLAY = dict(WORKING_LANGUAGES_CHOICES)
COUNTRIES_DISPLAY = dict(COUNTRIES_ALPHA2_CODE)
DONORS_DISPLAY = dict(PARTNER_DONORS_CHOICES)
NO_INFO_PLACEHOLDER = '<i>Information not provided</i>'
BOOLEAN_DISPLAY = {
True: 'Yes',
False: 'No',
None: NO_INFO_PLACEHOLDER,
}
class TableMode:
VERTICAL = 1
HORIZONTAL = 2
class CustomParagraph(Paragraph):
"""
Safeguard against None values breaking export
"""
def __init__(self, content, *args, **kwargs):
content = str(content or NO_INFO_PLACEHOLDER)
super(CustomParagraph, self).__init__(content, *args, **kwargs)
class PartnerProfilePDFExporter:
def __init__(self, partner: Partner, timezone_name='UTC'):
self.partner = partner
self.tzinfo = get_timezone(timezone_name)
filename = hashlib.sha256(str(partner.id).encode()).hexdigest()
self.file_path = os.path.join(tempfile.gettempdir(), filename + '.pdf')
styles = getSampleStyleSheet()
styles.add(ParagraphStyle(name='Center', alignment=TA_CENTER))
styles.add(ParagraphStyle(name='TableHeader', textColor=colors.white))
styles.add(ParagraphStyle(name='SmallRight', alignment=TA_CENTER))
self.style_center = styles["Center"]
self.style_normal = styles["Normal"]
self.style_th = styles["TableHeader"]
self.style_right = styles["SmallRight"]
self.style_h1 = styles["Heading1"]
self.style_h2 = styles["Heading2"]
self.style_h3 = styles["Heading3"]
self.style_h4 = styles["Heading4"]
self.style_h1.alignment = TA_CENTER
self.style_h2.alignment = TA_CENTER
self.style_right.alignment = TA_RIGHT
self.style_right.fontSize = 8
self.margin = 24
self.horizontal_table_style = TableStyle([
('INNERGRID', (0, 0), (-1, -1), 0.25, colors.black),
('VALIGN', (0, 0), (-1, -1), "TOP"),
('BOX', (0, 0), (-1, -1), 0.25, colors.black),
('BACKGROUND', (0, 0), (0, -1), colors.darkgrey),
('TEXTCOLOR', (0, 0), (0, -1), colors.white),
])
self.vertical_table_style = TableStyle([
('INNERGRID', (0, 0), (-1, -1), 0.25, colors.black),
('VALIGN', (0, 0), (-1, -1), "TOP"),
('BOX', (0, 0), (-1, -1), 0.25, colors.black),
('BACKGROUND', (0, 0), (-1, 0), colors.darkgrey),
('TEXTCOLOR', (0, 0), (-1, 0), colors.white),
])
def wrap_table(self, table_rows, mode=TableMode.HORIZONTAL):
if not table_rows:
return CustomParagraph(NO_INFO_PLACEHOLDER, self.style_normal)
formatted_rows = []
for row_number, row in enumerate(table_rows):
if mode == TableMode.HORIZONTAL:
formatted_rows.append([
CustomParagraph(row[0], self.style_th),
] + list(
map(lambda cell: CustomParagraph(str(cell), self.style_normal), row[1:])))
else:
style = self.style_th if row_number == 0 else self.style_normal
formatted_rows.append(list(map(lambda cell: CustomParagraph(str(cell), style), row)))
table = Table(formatted_rows, colWidths='*')
if mode == TableMode.HORIZONTAL:
table.setStyle(self.horizontal_table_style)
else:
table.setStyle(self.vertical_table_style)
return table
def wrap_paragraph(self, content, style=None):
style = style or self.style_normal
return CustomParagraph(content, style)
def get_basic_information_table(self):
table_rows = [
[
'Organization\'s Legal Name',
self.partner.legal_name,
],
[
'Alias (if applicable)',
self.partner.profile.alias_name,
],
[
'Acronym (If applicable)',
self.partner.profile.acronym,
],
[
'Organization\'s former Legal Name (optional)',
self.partner.profile.former_legal_name,
],
[
'Country of Origin',
self.partner.get_country_code_display(),
],
[
'Type of organization',
self.partner.get_display_type_display(),
],
]
return self.wrap_table(table_rows)
def get_legal_paragraph(self):
return ListFlowable([
ListItem([
self.wrap_paragraph('Year of establishment in country of operation', style=self.style_h4),
self.wrap_paragraph(self.partner.profile.year_establishment),
]),
ListItem([
self.wrap_paragraph('Is the organization registered to operate in the country?', style=self.style_h4),
self.wrap_paragraph(BOOLEAN_DISPLAY[self.partner.profile.registered_to_operate_in_country]),
]),
ListItem([
self.wrap_paragraph('Registration comment', style=self.style_h4),
self.wrap_paragraph(self.partner.profile.missing_registration_document_comment),
]),
ListItem([
self.wrap_paragraph('Does the Organization have a Governing Document?', style=self.style_h4),
self.wrap_paragraph(BOOLEAN_DISPLAY[self.partner.profile.have_governing_document]),
]),
], bulletType='a')
def get_mailing_address_table(self):
table_rows = [
[
'Type of mailing address',
self.partner.mailing_address.get_mailing_type_display(),
],
[
'Street Address',
self.partner.mailing_address.street,
],
[
'City',
self.partner.mailing_address.city,
],
[
'Country',
self.partner.mailing_address.get_country_display(),
],
[
'Zip Code (optional)',
self.partner.mailing_address.zip_code,
],
[
'Telephone',
self.partner.mailing_address.telephone,
],
[
'Fax (optional)',
self.partner.mailing_address.fax,
],
[
'Website (optional)',
self.partner.mailing_address.website,
],
[
'Organization Email (optional)',
self.partner.mailing_address.org_email,
],
]
return self.wrap_table(table_rows)
def get_key_personnel_table(self):
table_rows = [
[
'Does your organization have a board of director(s)?',
BOOLEAN_DISPLAY[self.partner.profile.have_board_directors],
],
[
'Does your organization have any other authorized officers who are not listed above?',
BOOLEAN_DISPLAY[self.partner.profile.have_authorised_officers],
],
]
return self.wrap_table(table_rows)
def get_organization_head_table(self):
tables = []
for org_head in self.partner.organisation_heads.order_by('-created'):
table_rows = [
[
'Full name',
getattr(org_head, 'fullname') or NO_INFO_PLACEHOLDER,
],
[
'Job Title/Position',
getattr(org_head, 'job_title') or NO_INFO_PLACEHOLDER,
],
[
'Telephone',
getattr(org_head, 'telephone') or NO_INFO_PLACEHOLDER,
],
[
'Mobile (optional)',
getattr(org_head, 'mobile') or NO_INFO_PLACEHOLDER,
],
[
'Fax (optional)',
getattr(org_head, 'fax') or NO_INFO_PLACEHOLDER,
],
[
'Email',
getattr(org_head, 'email') or NO_INFO_PLACEHOLDER,
],
]
tables.append(ListItem(self.wrap_table(table_rows)))
return ListFlowable(tables, bulletType='a')
def get_connectivity_paragraph(self):
return ListFlowable([
ListItem([
self.wrap_paragraph(
'Does the organization have reliable access to internet in all of its operations?',
style=self.style_h4
),
self.wrap_paragraph(BOOLEAN_DISPLAY[self.partner.profile.connectivity]),
]),
ListItem([
self.wrap_paragraph(
'Please explain how communication is done with non-connected operations', style=self.style_h4
),
self.wrap_paragraph(self.partner.profile.connectivity_excuse),
]),
], bulletType='a')
def get_working_languages_paragraph(self):
return ListFlowable([
ListItem([
self.wrap_paragraph('Working language(s) of your organization', style=self.style_h4),
self.wrap_paragraph(
", ".join([WORKING_LANGUAGES_DISPLAY[code] for code in self.partner.profile.working_languages])
),
]),
ListItem([
self.wrap_paragraph('If other, please state', style=self.style_h4),
self.wrap_paragraph(self.partner.profile.working_languages_other),
]),
], bulletType='a')
def get_ethics_paragraphs(self):
return ListFlowable([
ListItem([
self.wrap_paragraph(
'Briefly describe the organization’s mechanisms to safeguard against the violation '
'and abuse of beneficiaries, including sexual exploitation and abuse.',
style=self.style_h4
),
self.wrap_paragraph(self.partner.mandate_mission.ethic_safeguard_comment),
]),
ListItem([
self.wrap_paragraph(
'Are these mechanisms formally documented in an organizational policy or code of conduct?',
style=self.style_h4
),
self.wrap_paragraph(BOOLEAN_DISPLAY[self.partner.mandate_mission.ethic_safeguard]),
]),
ListItem([
self.wrap_paragraph(
'Briefly describe the organization’s mechanisms to safeguard against '
'fraud, corruption and other unethical behaviour.',
style=self.style_h4
),
self.wrap_paragraph(self.partner.mandate_mission.ethic_fraud_comment),
]),
ListItem([
self.wrap_paragraph(
'Are these mechanisms formally documented in an organizational policy or code of conduct?',
style=self.style_h4
),
self.wrap_paragraph(BOOLEAN_DISPLAY[self.partner.mandate_mission.ethic_fraud]),
]),
], bulletType='a')
def get_experiences_table(self):
table_rows = []
for experience in self.partner.experiences.all():
table_rows.append((
f"{experience.specialization.category.name}: {experience.specialization.name}",
experience.get_years_display()
))
return self.wrap_table(table_rows)
def get_country_presence_paragraph(self):
return ListFlowable([
ListItem([
self.wrap_paragraph('Country', style=self.style_h4),
self.wrap_paragraph(
'; '.join([p.admin_level_1.name for p in self.partner.location_field_offices.all()])
),
]),
ListItem([
self.wrap_paragraph('Number of staff in country', style=self.style_h4),
self.wrap_paragraph(self.partner.get_staff_in_country_display()),
]),
ListItem([
self.wrap_paragraph(
'Briefly describe the organization\'s engagement with the communities in which you operate',
style=self.style_h4
),
self.wrap_paragraph(self.partner.engagement_operate_desc),
]),
], bulletType='a')
def get_security_paragraph(self):
return ListFlowable([
ListItem([
self.wrap_paragraph(
'Does the organization have the ability to work in high-risk security locations?',
style=self.style_h4
),
self.wrap_paragraph(
BOOLEAN_DISPLAY[self.partner.mandate_mission.security_high_risk_locations]
),
]),
ListItem([
self.wrap_paragraph(
'Does the organization have policies, procedures and practices related to security risk management',
style=self.style_h4
),
self.wrap_paragraph(BOOLEAN_DISPLAY[self.partner.mandate_mission.security_high_risk_policy]),
]),
ListItem([
self.wrap_paragraph(
'Briefly describe the organization\'s ability, if any, to scale-up operations in emergencies or '
'other situations requiring rapid response.',
style=self.style_h4
),
self.wrap_paragraph(self.partner.mandate_mission.security_desc),
]),
], bulletType='a')
def get_budget_table(self):
table_rows = []
for budget in get_recent_budgets_for_partner(self.partner):
table_rows.append((
str(budget.year),
budget.budget and budget.get_budget_display(),
))
return self.wrap_table(table_rows)
def get_major_donors_paragraph(self):
return ListFlowable([
ListItem([
self.wrap_paragraph(
'Please select the type of donors that fund your agency',
style=self.style_h4
),
self.wrap_paragraph(
', '.join([DONORS_DISPLAY[code] for code in self.partner.fund.major_donors])
),
]),
ListItem([
self.wrap_paragraph(
'Please list your main donors for programme activities',
style=self.style_h4
),
self.wrap_paragraph(self.partner.fund.main_donors_list),
]),
ListItem([
self.wrap_paragraph(
'Please list your main donors for core funding',
style=self.style_h4
),
self.wrap_paragraph(self.partner.fund.source_core_funding),
]),
], bulletType='a')
def get_collaborations_partnership_paragraph(self):
items = []
for cp in self.partner.collaborations_partnership.all():
items.append(ListItem([
self.wrap_paragraph(
cp.agency.name,
style=self.style_h4
),
self.wrap_paragraph(cp.description),
]))
return ListFlowable(items, bulletType='a')
def get_accreditations_table(self):
table_rows = [
(
'Certifying/Accrediting Body',
'Date Received',
),
]
for acc in self.partner.collaboration_evidences.filter(mode=COLLABORATION_EVIDENCE_MODES.accreditation):
table_rows.append((
acc.organization_name,
format_date(acc.date_received)
))
return self.wrap_table(table_rows, mode=TableMode.VERTICAL)
def get_references_table(self):
table_rows = [
(
'Name of referring organization',
'Date Received',
),
]
for ref in self.partner.collaboration_evidences.filter(mode=COLLABORATION_EVIDENCE_MODES.reference):
table_rows.append((
ref.organization_name,
format_date(ref.date_received)
))
return self.wrap_table(table_rows, mode=TableMode.VERTICAL)
def get_internal_controls_table(self):
items = []
for ic in self.partner.internal_controls.all():
items.append(ListItem([
self.wrap_paragraph(
f'<b>{ic.get_functional_responsibility_display()}</b>: {BOOLEAN_DISPLAY[ic.segregation_duties]}'
),
self.wrap_paragraph(ic.comment),
]))
return ListFlowable(items, bulletType='a')
def get_policy_areas_table(self):
table_rows = [
(
'Area of Responsibility',
'Documented Policies?',
),
]
for ap in self.partner.area_policies.all():
table_rows.append((
ap.get_area_display(),
BOOLEAN_DISPLAY[ap.document_policies],
))
return self.wrap_table(table_rows, mode=TableMode.VERTICAL)
def get_audit_reports_table(self):
table_rows = [
(
'Audit Type',
'Documented',
),
]
for ar in self.partner.audit_reports.all():
table_rows.append((
ar.get_org_audit_display(),
BOOLEAN_DISPLAY[bool(ar.most_recent_audit_report or ar.link_report)],
))
return self.wrap_table(table_rows, mode=TableMode.VERTICAL)
def get_capacity_assessments_item(self):
if not self.partner.audit.regular_capacity_assessments:
return Spacer(1, 0)
table_rows = [
(
'Assessment Type',
'Documented',
),
]
for ca in self.partner.capacity_assessments.all():
table_rows.append((
ca.get_assessment_type_display(),
BOOLEAN_DISPLAY[bool(ca.report_file or ca.report_url)],
))
return self.wrap_table(table_rows, mode=TableMode.VERTICAL)
def generate(self):
document = SimpleDocTemplate(
self.file_path,
title=self.partner.legal_name,
rightMargin=self.margin,
leftMargin=self.margin,
topMargin=self.margin,
bottomMargin=self.margin
)
paragraphs = []
timestamp = timezone.now()
paragraphs.append(CustomParagraph(
format_datetime(timestamp, 'medium', tzinfo=self.tzinfo), self.style_right
))
paragraphs.append(CustomParagraph(self.partner.legal_name, self.style_h1))
paragraphs.append(Spacer(1, self.margin))
main_content = [
ListItem([
CustomParagraph('Identification', style=self.style_h3),
CustomParagraph('Basic Information', self.style_h4),
self.get_basic_information_table(),
CustomParagraph('Legal Status', self.style_h4),
self.get_legal_paragraph(),
Spacer(1, self.margin / 2)
]),
ListItem([
CustomParagraph('Contact information', style=self.style_h3),
CustomParagraph('Mailing Address', self.style_h4),
self.get_mailing_address_table(),
CustomParagraph('Head(s) of Organization', self.style_h4),
self.get_organization_head_table(),
CustomParagraph('Key Personnel', self.style_h4),
self.get_key_personnel_table(),
CustomParagraph('Connectivity', self.style_h4),
self.get_connectivity_paragraph(),
CustomParagraph('Working Languages', self.style_h4),
self.get_working_languages_paragraph(),
Spacer(1, self.margin / 2)
]),
ListItem([
CustomParagraph('Mandate & Mission', style=self.style_h3),
CustomParagraph(
'Briefly state the background and rationale for the establishment of the organization',
self.style_h4
),
CustomParagraph(self.partner.mandate_mission.background_and_rationale, self.style_normal),
CustomParagraph('Briefly state the mandate and mission of the organization', self.style_h4),
CustomParagraph(self.partner.mandate_mission.mandate_and_mission, self.style_normal),
CustomParagraph('Briefly describe the organization\'s governance structure', self.style_h4),
CustomParagraph(self.partner.mandate_mission.governance_structure, self.style_normal),
CustomParagraph('Ethics', self.style_h4),
self.get_ethics_paragraphs(),
CustomParagraph('Experience(s)', self.style_h4),
self.get_experiences_table(),
CustomParagraph(
'Does your organization work with populations of concern as defined by UNHCR?', self.style_h4
),
CustomParagraph(BOOLEAN_DISPLAY[self.partner.mandate_mission.population_of_concern], self.style_normal),
CustomParagraph('Country Presence', self.style_h4),
self.get_country_presence_paragraph(),
CustomParagraph('Security', self.style_h4),
self.get_security_paragraph(),
Spacer(1, self.margin / 2)
]),
ListItem([
CustomParagraph('Funding', style=self.style_h3),
CustomParagraph(
'What is your organization\'s annual budget (in USD) for the current and two previous years?',
self.style_h4
),
self.get_budget_table(),
CustomParagraph('Major Donors', self.style_h4),
self.get_major_donors_paragraph(),
Spacer(1, self.margin / 2)
]),
ListItem([
CustomParagraph('Collaboration', style=self.style_h3),
CustomParagraph('Has your | |
{
"id": "56255f3807599c377bf0e5bf072359fd",
"username": "Chachou",
"email": "<EMAIL>",
}
response = self.client.get(
"/api/liveregistrations/",
HTTP_AUTHORIZATION=f"Bearer {jwt_token}",
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json()["count"], 1)
self.assertEqual(
response.json()["results"],
[
{
"consumer_site": str(video.playlist.consumer_site.id),
"email": "<EMAIL>",
"id": str(liveregistration.id),
"is_registered": False,
"lti_user_id": "56255f3807599c377bf0e5bf072359fd",
"lti_id": str(video.playlist.lti_id),
"should_send_reminders": False,
"username": None,
"video": str(video.id),
}
],
)
# if we try to set a new registration with the email in the token, it won't be allowed
response = self.client.post(
"/api/liveregistrations/",
{"email": "<EMAIL>", "should_send_reminders": True},
content_type="application/json",
HTTP_AUTHORIZATION=f"Bearer {jwt_token}",
)
self.assertEqual(response.status_code, 400)
self.assertEqual(
json.loads(response.content),
{
"lti_user_id": [
"This identified user is already registered "
"for this video and consumer site and course."
]
},
)
def test_api_liveregistration_create_role_none_email_empty(self):
"""Users with an empty email can register by setting one."""
video = VideoFactory(
live_state=IDLE,
live_type=RAW,
starting_at=timezone.now() + timedelta(days=100),
)
self.assertTrue(video.is_scheduled)
# token with context_id
jwt_token = AccessToken()
jwt_token.payload["resource_id"] = str(video.id)
jwt_token.payload["context_id"] = str(video.playlist.lti_id)
jwt_token.payload["consumer_site"] = str(video.playlist.consumer_site.id)
jwt_token.payload["user"] = {
"email": "",
"id": "56255f3807599c377bf0e5bf072359fd",
"username": "Token",
}
response = self.client.post(
"/api/liveregistrations/",
{"email": "<EMAIL>", "should_send_reminders": True},
content_type="application/json",
HTTP_AUTHORIZATION=f"Bearer {jwt_token}",
)
self.assertEqual(response.status_code, 201)
created_liveregistration = LiveRegistration.objects.last()
self.assertEqual(
json.loads(response.content),
{
"consumer_site": str(video.playlist.consumer_site.id),
"email": "<EMAIL>",
"id": str(created_liveregistration.id),
"is_registered": True,
"lti_id": str(video.playlist.lti_id),
"lti_user_id": "56255f3807599c377bf0e5bf072359fd",
"should_send_reminders": True,
"username": "Token",
"video": str(video.id),
},
)
def test_list_liveregistration_token_lti_wrong_is_registered_field(
self,
):
"""
Lti token can fetch list requests but will fetch only his registration.
A registration without the flag is_registered set to True is not returned by
liveregistrations API endpoint if it's filtered with flag is_registered.
"""
video = VideoFactory(
live_state=IDLE,
live_type=RAW,
starting_at=timezone.now() + timedelta(days=100),
)
# liveregistration for the right video, lti_user_id and consumer_site
LiveRegistrationFactory(
email="<EMAIL>",
consumer_site=video.playlist.consumer_site,
is_registered=False,
lti_user_id="56255f3807599c377bf0e5bf072359fd",
lti_id="Maths",
video=video,
)
# token has context_id and no email
jwt_token = AccessToken()
jwt_token.payload["resource_id"] = str(video.id)
jwt_token.payload["consumer_site"] = str(video.playlist.consumer_site.id)
jwt_token.payload["context_id"] = "Maths"
jwt_token.payload["roles"] = ["student"]
jwt_token.payload["user"] = {
"id": "56255f3807599c377bf0e5bf072359fd",
"username": "Chachou",
"email": "<EMAIL>",
}
response = self.client.get(
"/api/liveregistrations/?is_registered=True",
HTTP_AUTHORIZATION=f"Bearer {jwt_token}",
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json()["count"], 0)
def test_list_liveregistration_role_admin_instruc_email_with_consumer_with_no_filtered(
self,
):
"""
Admin/instructor can access all registrations that are registered or not.
"""
video = VideoFactory(
live_state=IDLE,
live_type=RAW,
starting_at=timezone.now() + timedelta(days=100),
)
other_video = VideoFactory(
live_state=IDLE,
live_type=RAW,
starting_at=timezone.now() + timedelta(days=100),
)
other_consumer = ConsumerSiteFactory()
liveregistration = LiveRegistrationFactory(
email="<EMAIL>",
consumer_site=video.playlist.consumer_site,
is_registered=False,
lti_user_id="56255f3807599c377bf0e5bf072359fd",
lti_id="Maths",
video=video,
)
# liveregistration for the same video and lti_user_id but different consumer_site
LiveRegistrationFactory(
email="<EMAIL>",
consumer_site=other_consumer,
lti_user_id="56255f3807599c377bf0e5bf072359fd",
lti_id="Maths",
is_registered=True,
video=video,
)
# liveregistration for the same consumer_site but different video
LiveRegistrationFactory(
email="<EMAIL>",
consumer_site=video.playlist.consumer_site,
is_registered=True,
lti_user_id="56255f3807599c377bf0e5bf072359fd",
lti_id="Maths",
video=other_video,
)
# liveregistration for the same video and consumer_site but different lti_user_id
liveregistration2 = LiveRegistrationFactory(
email="<EMAIL>",
is_registered=True,
consumer_site=video.playlist.consumer_site,
lti_user_id="DIFFFF3807599c377bf0e5bf072359fd",
lti_id="Maths",
video=video,
)
# liveregistration for different lti_id
LiveRegistrationFactory(
email="<EMAIL>",
consumer_site=video.playlist.consumer_site,
is_registered=False,
lti_user_id="56255f3807599c377bf0e5bf072359fd",
lti_id="Maths 2",
video=video,
)
# liveregistration for this video but not without is_registered set to False
liveregistration3 = LiveRegistrationFactory(
email="<EMAIL>",
is_registered=False,
consumer_site=video.playlist.consumer_site,
lti_user_id="NEWDIFFFF3807599c377bf0e5bf072359fd",
lti_id="Maths",
video=video,
)
# token has context_id and email
jwt_token = AccessToken()
jwt_token.payload["resource_id"] = str(video.id)
jwt_token.payload["consumer_site"] = str(video.playlist.consumer_site.id)
jwt_token.payload["context_id"] = "Maths"
jwt_token.payload["roles"] = [random.choice(["administrator", "instructor"])]
jwt_token.payload["user"] = {
"id": "56255f3807599c377bf0e5bf072359fd",
"username": "Chachou",
"email": "<EMAIL>",
}
response = self.client.get(
"/api/liveregistrations/",
HTTP_AUTHORIZATION=f"Bearer {jwt_token}",
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json()["count"], 3)
self.assertEqual(
response.json()["results"],
[
{
"consumer_site": str(video.playlist.consumer_site_id),
"email": liveregistration.email,
"id": str(liveregistration.id),
"is_registered": False,
"lti_id": "Maths",
"lti_user_id": "56255f3807599c377bf0e5bf072359fd",
"should_send_reminders": False,
"username": None,
"video": str(video.id),
},
{
"consumer_site": str(video.playlist.consumer_site_id),
"email": liveregistration2.email,
"id": str(liveregistration2.id),
"is_registered": True,
"lti_id": "Maths",
"lti_user_id": "DIFFFF3807599c377bf0e5bf072359fd",
"should_send_reminders": False,
"username": None,
"video": str(video.id),
},
{
"consumer_site": str(video.playlist.consumer_site_id),
"email": liveregistration3.email,
"id": str(liveregistration3.id),
"is_registered": False,
"lti_id": "Maths",
"lti_user_id": "NEWDIFFFF3807599c377bf0e5bf072359fd",
"should_send_reminders": False,
"username": None,
"video": str(video.id),
},
],
)
def test_api_liveregistration_post_attendance_no_payload(self):
"""Request without payload should raise an error."""
response = self.client.post(
"/api/liveregistrations/push_attendance/",
)
self.assertEqual(response.status_code, 401)
self.assertEqual(
response.json(), {"detail": "Authentication credentials were not provided."}
)
def test_api_liveregistration_post_attendance_no_attendance(self):
"""Request without attendance should raise an error."""
video = VideoFactory()
jwt_token = AccessToken()
jwt_token.payload["resource_id"] = str(video.id)
jwt_token.payload["context_id"] = str(video.playlist.lti_id)
jwt_token.payload["consumer_site"] = str(video.playlist.consumer_site.id)
jwt_token.payload["roles"] = [
random.choice(["administrator", "instructor", "student", ""])
]
jwt_token.payload["user"] = {
"id": "5555555",
}
response = self.client.post(
"/api/liveregistrations/push_attendance/",
content_type="application/json",
HTTP_AUTHORIZATION=f"Bearer {jwt_token}",
)
self.assertEqual(response.status_code, 400)
self.assertEqual(response.json(), {"detail": "Invalid request."})
def test_api_liveregistration_post_attendance_token_lti_email_none_previous_none(
self,
):
"""Endpoint push_attendance works with no email and no previous record."""
video = VideoFactory()
jwt_token = AccessToken()
jwt_token.payload["context_id"] = str(video.playlist.lti_id)
jwt_token.payload["consumer_site"] = str(video.playlist.consumer_site.id)
jwt_token.payload["resource_id"] = str(video.id)
jwt_token.payload["roles"] = [
random.choice(["administrator", "instructor", "student", ""])
]
jwt_token.payload["user"] = {
"email": None,
"id": "56255f3807599c377bf0e5bf072359fd",
"username": "Token",
}
response = self.client.post(
"/api/liveregistrations/push_attendance/",
{"live_attendance": {"data": "test"}},
content_type="application/json",
HTTP_AUTHORIZATION=f"Bearer {jwt_token}",
)
self.assertEqual(response.status_code, 200)
created_liveregistration = LiveRegistration.objects.last()
self.assertEqual(
json.loads(response.content),
{
"id": str(created_liveregistration.id),
"live_attendance": {"data": "test"},
"video": str(video.id),
},
)
self.assertEqual(
created_liveregistration.consumer_site, video.playlist.consumer_site
)
self.assertEqual(
created_liveregistration.lti_user_id, "56255f3807599c377bf0e5bf072359fd"
)
self.assertEqual(created_liveregistration.email, None)
self.assertEqual(created_liveregistration.username, "Token")
self.assertEqual(created_liveregistration.live_attendance, {"data": "test"})
self.assertEqual(created_liveregistration.is_registered, False)
def test_api_liveregistration_post_attendance_token_lti_existing_record(
self,
):
"""Endpoint push_attendance updates an existing record."""
video = VideoFactory()
liveregistration = LiveRegistrationFactory(
consumer_site=video.playlist.consumer_site,
email=None,
is_registered=False,
live_attendance={"key1": {"sound": "OFF", "tabs": "OFF"}},
lti_user_id="56255f3807599c377bf0e5bf072359fd",
lti_id="Maths",
video=video,
)
self.assertEqual(LiveRegistration.objects.count(), 1)
jwt_token = AccessToken()
jwt_token.payload["context_id"] = "Maths"
jwt_token.payload["consumer_site"] = str(video.playlist.consumer_site.id)
jwt_token.payload["resource_id"] = str(video.id)
jwt_token.payload["roles"] = [
random.choice(["administrator", "instructor", "student", ""])
]
jwt_token.payload["user"] = {
"email": "<EMAIL>",
"id": "56255f3807599c377bf0e5bf072359fd",
"username": "Token",
}
timestamp = str(timezone.now())
response = self.client.post(
"/api/liveregistrations/push_attendance/",
{"live_attendance": {timestamp: {"sound": "ON", "tabs": "OFF"}}},
content_type="application/json",
HTTP_AUTHORIZATION=f"Bearer {jwt_token}",
)
liveregistration.refresh_from_db()
self.assertEqual(response.status_code, 200)
self.assertEqual(
json.loads(response.content),
{
"id": str(liveregistration.id),
"live_attendance": {
"key1": {"sound": "OFF", "tabs": "OFF"},
timestamp: {"sound": "ON", "tabs": "OFF"},
},
"video": str(video.id),
},
)
# no new object has been created
self.assertEqual(LiveRegistration.objects.count(), 1)
# update username and email with current token
self.assertEqual(liveregistration.email, "<EMAIL>")
self.assertEqual(liveregistration.username, "Token")
self.assertEqual(
liveregistration.live_attendance,
{
"key1": {"sound": "OFF", "tabs": "OFF"},
timestamp: {"sound": "ON", "tabs": "OFF"},
},
)
def test_api_liveregistration_post_attendance_token_public(
self,
):
"""Can't create attendance when it's not a LTI token"""
video = VideoFactory()
self.assertEqual(LiveRegistration.objects.count(), 0)
jwt_token = AccessToken()
jwt_token.payload["resource_id"] = str(video.id)
response = self.client.post(
"/api/liveregistrations/push_attendance/",
{"live_attendance": {"k2": "v2"}},
content_type="application/json",
HTTP_AUTHORIZATION=f"Bearer {jwt_token}",
)
self.assertEqual(response.status_code, 404)
self.assertEqual(LiveRegistration.objects.count(), 0)
self.assertEqual(
json.loads(response.content),
{"detail": "Attendance from public video is not implemented yet."},
)
def test_api_liveregistration_post_attendance_token_ok_user_record_empty_attendance(
self,
):
"""Endpoint push_attendance updates an existing record without previous live_attendance."""
video = VideoFactory()
liveregistration = LiveRegistrationFactory(
consumer_site=video.playlist.consumer_site,
email="<EMAIL>",
is_registered=True,
lti_user_id="56255f3807599c377bf0e5bf072359fd",
lti_id="Maths",
video=video,
)
self.assertEqual(LiveRegistration.objects.count(), 1)
self.assertEqual(liveregistration.live_attendance, None)
jwt_token = AccessToken()
jwt_token.payload["consumer_site"] = str(video.playlist.consumer_site.id)
jwt_token.payload["context_id"] = "Maths"
jwt_token.payload["resource_id"] = str(video.id)
jwt_token.payload["roles"] = [
random.choice(["administrator", "instructor", "student", ""])
]
jwt_token.payload["user"] = {
"email": "<EMAIL>",
"id": "<KEY>",
"username": "Token",
}
response = self.client.post(
"/api/liveregistrations/push_attendance/",
{"live_attendance": {"key1": "val1"}},
content_type="application/json",
HTTP_AUTHORIZATION=f"Bearer {jwt_token}",
)
liveregistration.refresh_from_db()
self.assertEqual(response.status_code, 200)
self.assertEqual(
json.loads(response.content),
{
"id": str(liveregistration.id),
"live_attendance": {"key1": "val1"},
"video": str(video.id),
},
)
# no new object has been created
self.assertEqual(LiveRegistration.objects.count(), 1)
# liveregistration object updated with data from the token
self.assertEqual(liveregistration.email, "<EMAIL>")
self.assertEqual(liveregistration.username, "Token")
# live_attendance has been set
self.assertEqual(liveregistration.live_attendance, {"key1": "val1"})
def test_api_liveregistration_post_attendance_token_lti_no_update_username_email_none(
self,
):
"""Endpoint push_attendance matches record and doesn't update email and username
if they are not defined in the token"""
video = VideoFactory()
liveregistration = LiveRegistrationFactory(
consumer_site=video.playlist.consumer_site,
email="<EMAIL>",
is_registered=True,
lti_user_id="56255f3807599c377bf0e5bf072359fd",
lti_id="Maths",
username="Sylvie",
video=video,
)
self.assertEqual(LiveRegistration.objects.count(), 1)
self.assertEqual(liveregistration.live_attendance, None)
jwt_token = AccessToken()
jwt_token.payload["consumer_site"] = str(video.playlist.consumer_site.id)
jwt_token.payload["context_id"] = "Maths"
jwt_token.payload["resource_id"] = str(video.id)
jwt_token.payload["roles"] = [
random.choice(["administrator", "instructor", "student", ""])
]
jwt_token.payload["user"] = {
"id": "56255f3807599c377bf0e5bf072359fd",
}
response = self.client.post(
"/api/liveregistrations/push_attendance/",
{"live_attendance": {"key1": "val1"}},
content_type="application/json",
HTTP_AUTHORIZATION=f"Bearer {jwt_token}",
)
liveregistration.refresh_from_db()
self.assertEqual(response.status_code, 200)
self.assertEqual(
json.loads(response.content),
{
"id": str(liveregistration.id),
"live_attendance": {"key1": "val1"},
"video": str(video.id),
},
)
# no new object has been created
self.assertEqual(LiveRegistration.objects.count(), 1)
# liveregistration object updated with data from the token
self.assertEqual(liveregistration.email, "<EMAIL>")
self.assertEqual(liveregistration.username, "Sylvie")
# live_attendance has been set
self.assertEqual(liveregistration.live_attendance, {"key1": "val1"})
def test_api_liveregistration_post_attendance_token_lti_no_update_username_email_empty(
self,
):
"""Endpoint push_attendance matches record and doesn't update email and username
if they are empty in the token"""
video = VideoFactory()
liveregistration = LiveRegistrationFactory(
consumer_site=video.playlist.consumer_site,
email="<EMAIL>",
is_registered=True,
lti_user_id="56255f3807599c377bf0e5bf072359fd",
lti_id="Maths",
username="Sylvie",
video=video,
)
self.assertEqual(LiveRegistration.objects.count(), 1)
self.assertEqual(liveregistration.live_attendance, None)
jwt_token = AccessToken()
jwt_token.payload["consumer_site"] = str(video.playlist.consumer_site.id)
jwt_token.payload["context_id"] = "Maths"
jwt_token.payload["resource_id"] = str(video.id)
jwt_token.payload["roles"] = [
random.choice(["administrator", "instructor", "student", ""])
]
jwt_token.payload["user"] = {
"email": "",
"id": "56255f3807599c377bf0e5bf072359fd",
"username": "",
}
response = self.client.post(
"/api/liveregistrations/push_attendance/",
{"live_attendance": {"key1": "val1"}},
content_type="application/json",
HTTP_AUTHORIZATION=f"Bearer {jwt_token}",
)
liveregistration.refresh_from_db()
self.assertEqual(response.status_code, 200)
self.assertEqual(
json.loads(response.content),
{
"id": str(liveregistration.id),
"live_attendance": {"key1": "val1"},
"video": str(video.id),
},
)
# no new object has been created
self.assertEqual(LiveRegistration.objects.count(), 1)
# liveregistration object updated with data from the token
self.assertEqual(liveregistration.email, "<EMAIL>")
self.assertEqual(liveregistration.username, "Sylvie")
# live_attendance has been set
self.assertEqual(liveregistration.live_attendance, {"key1": "val1"})
def test_api_liveregistration_post_attendance_token_with_could_match_other_records(
self,
):
"""Match the record with the combinaison consumer_site/lti_id/lti_user_id/video."""
video = VideoFactory()
| |
<reponame>ThomasEdwardRiley/nestcheck<filename>nestcheck/diagnostics_tables.py
#!/usr/bin/env python
"""
High-level functions for calculating results of error analysis and diagnostic
tests for batches of nested sampling runs.
"""
import numpy as np
import pandas as pd
import nestcheck.error_analysis
import nestcheck.io_utils
import nestcheck.ns_run_utils
import nestcheck.parallel_utils as pu
import nestcheck.pandas_functions as pf
@nestcheck.io_utils.save_load_result
def run_list_error_values(run_list, estimator_list, estimator_names,
n_simulate=100, **kwargs):
"""Gets a data frame with calculation values and error diagnostics for each
run in the input run list.
NB when parallelised the results will not be produced in order (so results
from some run number will not nessesarily correspond to that number run in
run_list).
Parameters
----------
run_list: list of dicts
List of nested sampling run dicts.
estimator_list: list of functions
Estimators to apply to runs.
estimator_names: list of strs
Name of each func in estimator_list.
n_simulate: int, optional
Number of bootstrap replications to use on each run.
thread_pvalue: bool, optional
Whether or not to compute KS test diaganostic for correlations between
threads within a run.
bs_stat_dist: bool, optional
Whether or not to compute statistical distance between bootstrap error
distributions diaganostic.
parallel: bool, optional
Whether or not to parallelise - see parallel_utils.parallel_apply.
save_name: str or None, optional
See nestcheck.io_utils.save_load_result.
save: bool, optional
See nestcheck.io_utils.save_load_result.
load: bool, optional
See nestcheck.io_utils.save_load_result.
overwrite_existing: bool, optional
See nestcheck.io_utils.save_load_result.
Returns
-------
df: pandas DataFrame
Results table showing calculation values and diagnostics. Rows
show different runs (or pairs of runs for pairwise comparisons).
Columns have titles given by estimator_names and show results for the
different functions in estimators_list.
"""
thread_pvalue = kwargs.pop('thread_pvalue', False)
bs_stat_dist = kwargs.pop('bs_stat_dist', False)
parallel = kwargs.pop('parallel', True)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
assert len(estimator_list) == len(estimator_names), (
'len(estimator_list) = {0} != len(estimator_names = {1}'
.format(len(estimator_list), len(estimator_names)))
# Calculation results
# -------------------
df = estimator_values_df(run_list, estimator_list, parallel=parallel,
estimator_names=estimator_names)
df.index = df.index.map(str)
df['calculation type'] = 'values'
df.set_index('calculation type', drop=True, append=True, inplace=True)
df = df.reorder_levels(['calculation type', 'run'])
# Bootstrap stds
# --------------
# Create bs_vals_df then convert to stds so bs_vals_df does not need to be
# recomputed if bs_stat_dist is True
bs_vals_df = bs_values_df(run_list, estimator_list, estimator_names,
n_simulate, parallel=parallel)
bs_std_df = bs_vals_df.applymap(lambda x: np.std(x, ddof=1))
bs_std_df.index.name = 'run'
bs_std_df['calculation type'] = 'bootstrap std'
bs_std_df.set_index('calculation type', drop=True, append=True,
inplace=True)
bs_std_df = bs_std_df.reorder_levels(['calculation type', 'run'])
df = pd.concat([df, bs_std_df])
# Pairwise KS p-values on threads
# -------------------------------
if thread_pvalue:
t_vals_df = thread_values_df(
run_list, estimator_list, estimator_names, parallel=parallel)
t_d_df = pairwise_dists_on_cols(t_vals_df, earth_mover_dist=False,
energy_dist=False)
# Keep only the p value not the distance measures
t_d_df = t_d_df.xs('ks pvalue', level='calculation type',
drop_level=False)
# Append 'thread ' to caclulcation type
t_d_df.index.set_levels(['thread ks pvalue'], level='calculation type',
inplace=True)
df = pd.concat([df, t_d_df])
# Pairwise distances on BS distributions
# --------------------------------------
if bs_stat_dist:
b_d_df = pairwise_dists_on_cols(bs_vals_df)
# Select only statistical distances - not KS pvalue as this is not
# useful for the bootstrap resample distributions (see Higson 2018 for
# more details).
dists = ['ks distance', 'earth mover distance', 'energy distance']
b_d_df = b_d_df.loc[pd.IndexSlice[dists, :], :]
# Append 'bootstrap ' to caclulcation type
new_ind = ['bootstrap ' +
b_d_df.index.get_level_values('calculation type'),
b_d_df.index.get_level_values('run')]
b_d_df.set_index(new_ind, inplace=True)
df = pd.concat([df, b_d_df])
return df
@nestcheck.io_utils.save_load_result
def estimator_values_df(run_list, estimator_list, **kwargs):
"""Get a dataframe of estimator values.
NB when parallelised the results will not be produced in order (so results
from some run number will not nessesarily correspond to that number run in
run_list).
Parameters
----------
run_list: list of dicts
List of nested sampling run dicts.
estimator_list: list of functions
Estimators to apply to runs.
estimator_names: list of strs, optional
Name of each func in estimator_list.
parallel: bool, optional
Whether or not to parallelise - see parallel_utils.parallel_apply.
save_name: str or None, optional
See nestcheck.io_utils.save_load_result.
save: bool, optional
See nestcheck.io_utils.save_load_result.
load: bool, optional
See nestcheck.io_utils.save_load_result.
overwrite_existing: bool, optional
See nestcheck.io_utils.save_load_result.
Returns
-------
df: pandas DataFrame
Results table showing calculation values and diagnostics. Rows
show different runs.
Columns have titles given by estimator_names and show results for the
different functions in estimators_list.
"""
estimator_names = kwargs.pop(
'estimator_names',
['est_' + str(i) for i in range(len(estimator_list))])
parallel = kwargs.pop('parallel', True)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
values_list = pu.parallel_apply(
nestcheck.ns_run_utils.run_estimators, run_list,
func_args=(estimator_list,), parallel=parallel)
df = pd.DataFrame(np.stack(values_list, axis=0))
df.columns = estimator_names
df.index.name = 'run'
return df
def error_values_summary(error_values, **summary_df_kwargs):
"""Get summary statistics about calculation errors, including estimated
implementation errors.
Parameters
----------
error_values: pandas DataFrame
Of format output by run_list_error_values (look at it for more
details).
summary_df_kwargs: dict, optional
See pandas_functions.summary_df docstring for more details.
Returns
-------
df: pandas DataFrame
Table showing means and standard deviations of results and diagnostics
for the different runs. Also contains estimated numerical uncertainties
on results.
"""
df = pf.summary_df_from_multi(error_values, **summary_df_kwargs)
# get implementation stds
imp_std, imp_std_unc, imp_frac, imp_frac_unc = \
nestcheck.error_analysis.implementation_std(
df.loc[('values std', 'value')],
df.loc[('values std', 'uncertainty')],
df.loc[('bootstrap std mean', 'value')],
df.loc[('bootstrap std mean', 'uncertainty')])
df.loc[('implementation std', 'value'), df.columns] = imp_std
df.loc[('implementation std', 'uncertainty'), df.columns] = imp_std_unc
df.loc[('implementation std frac', 'value'), :] = imp_frac
df.loc[('implementation std frac', 'uncertainty'), :] = imp_frac_unc
# Get implementation RMSEs (calculated using the values RMSE instead of
# values std)
if 'values rmse' in set(df.index.get_level_values('calculation type')):
imp_rmse, imp_rmse_unc, imp_frac, imp_frac_unc = \
nestcheck.error_analysis.implementation_std(
df.loc[('values rmse', 'value')],
df.loc[('values rmse', 'uncertainty')],
df.loc[('bootstrap std mean', 'value')],
df.loc[('bootstrap std mean', 'uncertainty')])
df.loc[('implementation rmse', 'value'), df.columns] = imp_rmse
df.loc[('implementation rmse', 'uncertainty'), df.columns] = \
imp_rmse_unc
df.loc[('implementation rmse frac', 'value'), :] = imp_frac
df.loc[('implementation rmse frac', 'uncertainty'), :] = imp_frac_unc
# Return only the calculation types we are interested in, in order
calcs_to_keep = ['true values', 'values mean', 'values std',
'values rmse', 'bootstrap std mean',
'implementation std', 'implementation std frac',
'implementation rmse', 'implementation rmse frac',
'thread ks pvalue mean', 'bootstrap ks distance mean',
'bootstrap energy distance mean',
'bootstrap earth mover distance mean']
df = pd.concat([df.xs(calc, level='calculation type', drop_level=False) for
calc in calcs_to_keep if calc in
df.index.get_level_values('calculation type')])
return df
def run_list_error_summary(run_list, estimator_list, estimator_names,
n_simulate, **kwargs):
"""Wrapper which runs run_list_error_values then applies error_values
summary to the resulting dataframe. See the docstrings for those two
funcions for more details and for descriptions of parameters and output.
"""
true_values = kwargs.pop('true_values', None)
include_true_values = kwargs.pop('include_true_values', False)
include_rmse = kwargs.pop('include_rmse', False)
error_values = run_list_error_values(run_list, estimator_list,
estimator_names, n_simulate, **kwargs)
return error_values_summary(error_values, true_values=true_values,
include_true_values=include_true_values,
include_rmse=include_rmse)
def bs_values_df(run_list, estimator_list, estimator_names, n_simulate,
**kwargs):
"""Computes a data frame of bootstrap resampled values.
Parameters
----------
run_list: list of dicts
List of nested sampling run dicts.
estimator_list: list of functions
Estimators to apply to runs.
estimator_names: list of strs
Name of each func in estimator_list.
n_simulate: int
Number of bootstrap replications to use on each run.
kwargs:
Kwargs to pass to parallel_apply.
Returns
-------
bs_values_df: pandas data frame
Columns represent estimators and rows represent runs.
Each cell contains a 1d array of bootstrap resampled values for the run
and estimator.
"""
tqdm_kwargs = kwargs.pop('tqdm_kwargs', {'desc': 'bs values'})
assert len(estimator_list) == len(estimator_names), (
'len(estimator_list) = {0} != len(estimator_names = {1}'
.format(len(estimator_list), len(estimator_names)))
bs_values_list = pu.parallel_apply(
nestcheck.error_analysis.run_bootstrap_values, run_list,
func_args=(estimator_list,), func_kwargs={'n_simulate': n_simulate},
tqdm_kwargs=tqdm_kwargs, **kwargs)
df = pd.DataFrame()
for i, name in enumerate(estimator_names):
df[name] = [arr[i, :] for arr in bs_values_list]
# Check there are the correct number of bootstrap replications in each cell
for vals_shape in df.loc[0].apply(lambda x: x.shape).values:
assert vals_shape == (n_simulate,), (
'Should be n_simulate=' + str(n_simulate) + ' values in ' +
'each cell. The cell contains array with shape ' +
str(vals_shape))
return df
def thread_values_df(run_list, estimator_list, estimator_names, **kwargs):
"""Calculates estimator values for the constituent threads of the input
runs.
Parameters
----------
run_list: list of dicts
List of nested sampling run dicts.
estimator_list: list of functions
Estimators to apply to runs.
estimator_names: list of strs
Name of each func in estimator_list.
kwargs:
Kwargs to pass to parallel_apply.
Returns
-------
df: pandas data frame
Columns represent estimators and rows represent runs.
Each cell contains a 1d numpy array with length equal to the number
of threads in the run, containing the results from evaluating the
estimator on each thread.
"""
tqdm_kwargs = kwargs.pop('tqdm_kwargs', {'desc': 'thread values'})
assert len(estimator_list) == len(estimator_names), (
'len(estimator_list) = {0} != len(estimator_names = {1}'
.format(len(estimator_list), len(estimator_names)))
# get thread results
thread_vals_arrays = pu.parallel_apply(
nestcheck.error_analysis.run_thread_values, run_list,
func_args=(estimator_list,), tqdm_kwargs=tqdm_kwargs, | |
<reponame>maxwerhahn/Multi-pass-GAN
#******************************************************************************
#
# tempoGAN: A Temporally Coherent, Volumetric GAN for Super-resolution Fluid Flow
# Copyright 2018 <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
#
#******************************************************************************
import time
import shutil
import sys
import math
import gc
import scipy
import numpy as np
import os
import faulthandler
faulthandler.enable()
import tensorflow as tf
# load manta tools
sys.path.append("../tools_wscale")
import tilecreator_t as tc
import uniio
import fluiddataloader as FDL
import paramhelpers as ph
from GAN import GAN, lrelu
# initialize parameters / command line params
outputOnly = int(ph.getParam( "out", False ))>0 # output/generation mode, main mode switch
basePath = ph.getParam( "basePath", '../2ddata_gan/' )
randSeed = int(ph.getParam( "randSeed", 1 )) # seed for np and tf initialization
load_model_test_1 = int(ph.getParam( "load_model_test_1", -1 )) # the number of the test to load a model from. can be used in training and output mode. -1 to not load a model
load_model_test_2 = int(ph.getParam( "load_model_test_2", -1 )) # the number of the test to load a model from. can be used in training and output mode. -1 to not load a model
load_model_test_3 = int(ph.getParam( "load_model_test_3", -1 )) # the number of the test to load a model from. can be used in training and output mode. -1 to not load a model
load_model_no_1 = int(ph.getParam( "load_model_no_1", -1 )) # nubmber of the model to load
load_model_no_2 = int(ph.getParam( "load_model_no_2", -1 )) # nubmber of the model to load
load_model_no_3 = int(ph.getParam( "load_model_no_3", -1 )) # nubmber of the model to load
simSizeLow = int(ph.getParam( "simSize", 64 )) # tiles of low res sim
tileSizeLow = int(ph.getParam( "tileSize", 16 )) # size of low res tiles
upRes = int(ph.getParam( "upRes", 4 )) # scaling factor
#Data and Output
packedSimPath = ph.getParam( "packedSimPath", '/data/share/GANdata/2ddata_sim/' ) # path to training data
fromSim = int(ph.getParam( "fromSim", 1000 )) # range of sim data to use, start index
frame_min = int(ph.getParam( "frame_min", 0 ))
genModel = ph.getParam( "genModel", 'gen_test' ) # path to training data
discModel = ph.getParam( "discModel", 'disc_test' ) # path to training data
#Training
batch_norm = int(ph.getParam( "batchNorm", False ))>0 # apply batch normalization to conv and deconv layers
pixel_norm = int(ph.getParam( "pixelNorm", True ))>0 # apply batch normalization to conv and deconv layers
useVelocities = int(ph.getParam( "useVelocities", 0 )) # use velocities or not
useVorticities = int(ph.getParam( "useVorticities", 0 )) # use vorticities or not
useFlags = int(ph.getParam( "useFlags", 0 )) # use flags or not
useK_Eps_Turb = int(ph.getParam( "useK_Eps_Turb", 0 ))
transposeAxis = int(ph.getParam( "transposeAxis", 0 )) #
#Test and Save
testPathStartNo = int(ph.getParam( "testPathStartNo", 0 ))
frame_max = int(ph.getParam( "frame_max", 200 ))
change_velocity = int(ph.getParam( "change_velocity", False ))
upsampling_mode = int(ph.getParam( "upsamplingMode", 2 ))
upsampled_data = int(ph.getParam ( "upsampledData", False))
generateUni = int(ph.getParam("genUni", False))
usePixelShuffle = int(ph.getParam("usePixelShuffle", False))
addBicubicUpsample = int(ph.getParam("addBicubicUpsample", False))
add_adj_idcs1 = int(ph.getParam("add_adj_idcs1", False))
add_adj_idcs2 = int(ph.getParam("add_adj_idcs2", False))
add_adj_idcs3 = int(ph.getParam("add_adj_idcs3", False))
load_emas = int(ph.getParam("loadEmas", False))
firstNNArch = int(ph.getParam("firstNNArch", True))
upsampleMode = int(ph.getParam("upsampleMode", 1))
# parameters for growing approach
use_res_net1 = int(ph.getParam( "use_res_net1", False ))
use_res_net2 = int(ph.getParam( "use_res_net2", False ))
use_res_net3 = int(ph.getParam( "use_res_net3", False ))
use_mb_stddev = int(ph.getParam( "use_mb_stddev", False ))
start_fms_1 = int(ph.getParam("startFms1", 512))
max_fms_1 = int(ph.getParam("maxFms1", 256))
filterSize_1 = int(ph.getParam("filterSize1", 3))
start_fms_2 = int(ph.getParam("startFms2", 512))
max_fms_2 = int(ph.getParam("maxFms2", 256))
filterSize_2 = int(ph.getParam("filterSize2", 3))
start_fms_3 = int(ph.getParam("startFms3", 512))
max_fms_3 = int(ph.getParam("maxFms3", 256))
filterSize_3 = int(ph.getParam("filterSize3", 3))
velScale = float(ph.getParam("velScale", 1.0))
gpu_touse = int(ph.getParam("gpu", 0))
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]= str(gpu_touse)
ph.checkUnusedParams()
# initialize
simSizeHigh = simSizeLow * upRes
tileSizeHigh = tileSizeLow * upRes
channelLayout_low = 'd'
channelLayout_high = 'd'
lowfilename = "density_low_%04d.uni"
toSim = fromSim
dirIDs = np.linspace(fromSim, toSim, (toSim-fromSim+1),dtype='int16')
highfilename = "density_high_%04d.uni"
mfl = ["density"]
mfh = ["density"]
# load output of first network in high res data of tile creator -> separate when getting input
if useVelocities:
channelLayout_low += ',vx,vy,vz'
mfl = np.append(mfl, "velocity")
data_fraction = 1.0
kt = 0.0
kt_l = 0.0
useTempoD = False
useTempoL2 = False
useDataAugmentation = 0
# load data
floader = FDL.FluidDataLoader( print_info=3, base_path=packedSimPath, base_path_y = packedSimPath, numpy_seed = randSeed ,filename=lowfilename, filename_index_min = frame_min, oldNamingScheme=False, filename_y = None, filename_index_max=frame_max, indices=dirIDs, data_fraction=data_fraction, multi_file_list=mfl, multi_file_list_y=mfh)
x, y, _ = floader.get()
x_3d = x
x_3d[:,:,:,:,1:4] = velScale * x_3d[:,:,:,:,1:4] # scale velocity channels
# 2D: tileSize x tileSize tiles; 3D: tileSize x tileSize x tileSize chunks
n_input = tileSizeLow ** 2
n_output = tileSizeHigh ** 2
n_inputChannels = 1
if useVelocities:
n_inputChannels += 3
if useVorticities:
n_inputChannels += 3
n_input *= n_inputChannels
if not load_model_test_1 == -1:
if not os.path.exists(basePath + 'test_%04d/' % load_model_test_1):
print('ERROR: Test to load does not exist.')
if not load_emas:
load_path_1 = basePath + 'test_%04d/model_%04d.ckpt' % (load_model_test_1, load_model_no_1)
load_path_ema_1 = basePath + 'test_%04d/model_ema_%04d.ckpt' % (load_model_test_1, load_model_no_1)
else:
load_path_1 = basePath + 'test_%04d/model_ema_%04d.ckpt' % (load_model_test_1, load_model_no_1)
if outputOnly:
out_path_prefix = 'out_%04d-%04d' % (load_model_test_1,load_model_no_1)
test_path,_ = ph.getNextGenericPath(out_path_prefix, 0, basePath + 'test_%04d/' % load_model_test_1)
else:
test_path,_ = ph.getNextTestPath(testPathStartNo, basePath)
if not load_model_test_2 == -1:
if not os.path.exists(basePath + 'test_%04d/' % load_model_test_2):
print('ERROR: Test to load does not exist.')
if not load_emas:
load_path_2 = basePath + 'test_%04d/model_%04d.ckpt' % (load_model_test_2, load_model_no_2)
load_path_ema_2 = basePath + 'test_%04d/model_ema_%04d.ckpt' % (load_model_test_2, load_model_no_2)
else:
load_path_2 = basePath + 'test_%04d/model_ema_%04d.ckpt' % (load_model_test_2, load_model_no_2)
if outputOnly:
out_path_prefix = 'out_%04d-%04d' % (load_model_test_2,load_model_no_2)
test_path,_ = ph.getNextGenericPath(out_path_prefix, 0, basePath + 'test_%04d/' % load_model_test_2)
else:
test_path,_ = ph.getNextTestPath(testPathStartNo, basePath)
if not load_model_test_3 == -1:
if not os.path.exists(basePath + 'test_%04d/' % load_model_test_2):
print('ERROR: Test to load does not exist.')
print('Using two networks')
else:
print('Using three networks')
if not load_emas:
load_path_3 = basePath + 'test_%04d/model_%04d.ckpt' % (load_model_test_3, load_model_no_3)
load_path_ema_3 = basePath + 'test_%04d/model_ema_%04d.ckpt' % (load_model_test_3, load_model_no_3)
else:
load_path_3 = basePath + 'test_%04d/model_ema_%04d.ckpt' % (load_model_test_3, load_model_no_3)
if outputOnly:
out_path_prefix = 'out_%04d-%04d' % (load_model_test_3,load_model_no_3)
test_path,_ = ph.getNextGenericPath(out_path_prefix, 0, basePath + 'test_%04d/' % load_model_test_3)
else:
test_path,_ = ph.getNextTestPath(testPathStartNo, basePath)
# create session and saver
config = tf.ConfigProto(allow_soft_placement=True)
#config.gpu_options.per_process_gpu_memory_fraction = 0.8
sess = tf.InteractiveSession(config = config)
def save_img(out_path, img):
img = np.clip(img * 255.0, 0, 255).astype(np.uint8)
scipy.misc.imsave(out_path, img)
def save_img_3d(out_path, img): # y ↓ x →, z ↓ x →, z ↓ y →,3 in a column
data = np.concatenate([np.sum(img, axis=0), np.sum(img, axis=1), np.sum(img, axis=2)], axis=0)
save_img(out_path, data)
def lerp(x, y, t):
return tf.add(x, (y - x) * tf.clip_by_value(t,0.0,1.0))
def gaussian_noise_layer(input_layer, strength):
noise = tf.random_normal(shape=tf.shape(input_layer), mean=0.0, stddev=1.0, dtype=tf.float32)
return input_layer + noise * (strength * tf.sqrt(tf.cast(input_layer.get_shape().as_list()[3], tf.float32)))
# set up GAN structure
def resBlock(gan, inp, s1, s2, reuse, use_batch_norm, name, filter_size=3):
# note - leaky relu (lrelu) not too useful here
# convolutions of resnet block
filter = [filter_size,filter_size]
filter1 = [1,1]
gc1,_ = gan.convolutional_layer( s1, filter, tf.nn.relu, stride=[1], name="g_cA_"+name, in_layer=inp, reuse=reuse, batch_norm=use_batch_norm, train=train) #->16,64
if pixel_norm:
gc1 = gan.pixel_norm(gc1)
gc2,_ = gan.convolutional_layer( s2, filter, None, stride=[1], name="g_cB_"+name, reuse=reuse, batch_norm=use_batch_norm, train=train) #->8,128
# shortcut connection
gs1,_ = gan.convolutional_layer( s2, filter1 , None , stride=[1], name="g_s_"+name, in_layer=inp, reuse=reuse, batch_norm=use_batch_norm, train=train) #->16,64
resUnit1 = tf.nn.relu( tf.add( gc2, gs1 ) )
if pixel_norm:
resUnit1 = gan.pixel_norm(resUnit1)
return resUnit1
def growBlockGen(gan, inp, upres, fms, use_batch_norm, train, reuse, output = False, firstGen = True, filterSize = 3, first_nn_arch = False, use_res_net = True):
with tf.variable_scope("genBlock%d"%(upres), reuse=reuse) as scope:
if firstGen:
if not usePixelShuffle:
inDepool = gan.avg_depool(mode = upsampleMode)
else:
inDepool = gan.pixel_shuffle(inp, upres = 2, stage = "%d"%(upres))
else:
inDepool = inp
filter = [filterSize,filterSize]
if first_nn_arch:
# deeper network in lower levels for higher low-res receptive field - only for the first network
if upres == 2:
outp = resBlock(gan, inDepool, fms, fms, reuse, use_batch_norm, "first" , filter_size = filter[0]) #%(upres)
outp = resBlock(gan, outp, fms, fms, reuse, use_batch_norm, "second", filter_size = filter[0]) #%(upres,upres)
outp = resBlock(gan, outp, fms, fms, reuse, use_batch_norm, "third" , filter_size = filter[0]) #%(upres)
outp = resBlock(gan, outp, fms, fms, reuse, use_batch_norm, "fourth", filter_size = filter[0]) #%(upres,upres)
outp = resBlock(gan, outp, fms, fms, reuse, use_batch_norm, "fifth", filter_size = filter[0]) #%(upres,upres)
elif upres == 4:
outp = resBlock(gan, inDepool, fms*2, fms, reuse, use_batch_norm, "first" , filter_size = filter[0]) #%(upres)
outp = resBlock(gan, outp, fms, fms, reuse, use_batch_norm, "second" , filter_size = filter[0]) #%(upres)
outp = resBlock(gan, outp, fms, fms, reuse, use_batch_norm, "third", filter_size = filter[0]) #%(upres,upres)
if upres == 8:
outp = resBlock(gan, inDepool, fms*2, fms, reuse, use_batch_norm, "first" , filter_size = filter[0]) #%(upres)
outp = resBlock(gan, outp, fms, fms, reuse, use_batch_norm, "second", filter_size = filter[0]) #%(upres,upres)
else:
if use_res_net:
# two res blocks per growing block
outp = resBlock(gan, inDepool, fms, fms, reuse, use_batch_norm, "first" , filter_size = filter[0]) #%(upres)
outp = resBlock(gan, outp, fms//2, fms//2, reuse, use_batch_norm, "second", filter_size = filter[0]) #%(upres,upres)
else:
# "recursive" output
inp,_ = gan.convolutional_layer( fms, filter, lrelu, stride=[1], name="g_cA%d"%(upres), in_layer=inDepool, reuse=reuse, batch_norm=use_batch_norm, train=train) #->16,64
if pixel_norm:
inp = gan.pixel_norm(inp)
outp,_ = gan.convolutional_layer( fms, filter, lrelu, stride=[1], name="g_cB%d"%(upres), in_layer=inp, reuse=reuse, batch_norm=use_batch_norm, train=train) #->8,128
if pixel_norm:
outp = gan.pixel_norm(outp)
# density output for blending
if not output:
outpDens, _ = GAN(outp, bn_decay=0.0).convolutional_layer( 1, [1,1], None, stride=[1], name="g_cdensOut%d"%(upres), in_layer=outp, reuse=reuse, batch_norm=False, train=train, gain = 1)
return outp, outpDens
return outp
def growing_gen(_in, percentage, reuse=False, use_batch_norm=False, train=None, currentUpres = 2, output = False, firstGen = True, filterSize = 3, startFms = 256, maxFms = 256, add_adj_idcs = False, first_nn_arch = False, use_res_net = True):
global rbId
print("\n\tGenerator (growing-sliced-resnett3-deep)")
with tf.variable_scope("generator", reuse=reuse) as scope:
n_channels = n_inputChannels
if add_adj_idcs:
n_channels += 2
if firstGen:
_in = tf.reshape(_in, shape=[-1, tileSizeLow, tileSizeLow, n_channels]) #NHWC
else:
_in = tf.reshape(_in, shape=[-1, tileSizeHigh, tileSizeHigh, n_channels+1]) #NHWC
gan = GAN(_in, bn_decay=0.0)
# inital conv layers
filter = [filterSize,filterSize]
if first_nn_arch:
x_g = _in
else:
if use_res_net:
x_g = resBlock(gan, _in, 16, min(maxFms, startFms//2)//8, reuse, | |
<gh_stars>1-10
"""Base graph class specialized for neural networks on graphs."""
from __future__ import absolute_import
import networkx as nx
import numpy as np
import dgl
from .base import ALL, is_all, DGLError, dgl_warning
from . import backend as F
from .frame import FrameRef, Frame, merge_frames
from .graph_index import GraphIndex, create_graph_index
from .runtime import ir, scheduler, Runtime
from . import utils
from .view import NodeView, EdgeView
from .udf import NodeBatch, EdgeBatch
__all__ = ['DGLGraph']
class DGLGraph(object):
"""Base graph class.
The graph stores nodes, edges and also their features.
DGL graph is always directional. Undirected graph can be represented using
two bi-directional edges.
Nodes are identified by consecutive integers starting from zero.
Edges can be specified by two end points (u, v) or the integer id assigned
when the edges are added. Edge IDs are automatically assigned by the order
of addition, i.e. the first edge being added has an ID of 0, the second
being 1, so on so forth.
Node and edge features are stored as a dictionary from the feature name
to the feature data (in tensor).
Parameters
----------
graph_data : graph data, optional
Data to initialize graph. Same as networkx's semantics.
node_frame : FrameRef, optional
Node feature storage.
edge_frame : FrameRef, optional
Edge feature storage.
multigraph : bool, optional
Whether the graph would be a multigraph (default: False)
readonly : bool, optional
Whether the graph structure is read-only (default: False).
Examples
--------
Create an empty graph with no nodes and edges.
>>> G = dgl.DGLGraph()
G can be grown in several ways.
**Nodes:**
Add N nodes:
>>> G.add_nodes(10) # 10 isolated nodes are added
**Edges:**
Add one edge at a time,
>>> G.add_edge(0, 1)
or multiple edges,
>>> G.add_edges([1, 2, 3], [3, 4, 5]) # three edges: 1->3, 2->4, 3->5
or multiple edges starting from the same node,
>>> G.add_edges(4, [7, 8, 9]) # three edges: 4->7, 4->8, 4->9
or multiple edges pointing to the same node,
>>> G.add_edges([2, 6, 8], 5) # three edges: 2->5, 6->5, 8->5
or multiple edges using tensor type (demo in pytorch syntax).
>>> import torch as th
>>> G.add_edges(th.tensor([3, 4, 5]), 1) # three edges: 3->1, 4->1, 5->1
NOTE: Removing nodes and edges is not supported by DGLGraph.
**Features:**
Both nodes and edges can have feature data. Features are stored as
key/value pair. The key must be hashable while the value must be tensor
type. Features are batched on the first dimension.
Use G.ndata to get/set features for all nodes.
>>> G = dgl.DGLGraph()
>>> G.add_nodes(3)
>>> G.ndata['x'] = th.zeros((3, 5)) # init 3 nodes with zero vector(len=5)
>>> G.ndata
{'x' : tensor([[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.]])}
Use G.nodes to get/set features for some nodes.
>>> G.nodes[[0, 2]].data['x'] = th.ones((2, 5))
>>> G.ndata
{'x' : tensor([[1., 1., 1., 1., 1.],
[0., 0., 0., 0., 0.],
[1., 1., 1., 1., 1.]])}
Similarly, use G.edata and G.edges to get/set features for edges.
>>> G.add_edges([0, 1], 2) # 0->2, 1->2
>>> G.edata['y'] = th.zeros((2, 4)) # init 2 edges with zero vector(len=4)
>>> G.edata
{'y' : tensor([[0., 0., 0., 0.],
[0., 0., 0., 0.]])}
>>> G.edges[1, 2].data['y'] = th.ones((1, 4))
>>> G.edata
{'y' : tensor([[0., 0., 0., 0.],
[1., 1., 1., 1.]])}
Note that each edge is assigned a unique id equal to its adding
order. So edge 1->2 has id=1. DGL supports directly use edge id
to access edge features.
>>> G.edges[0].data['y'] += 2.
>>> G.edata
{'y' : tensor([[2., 2., 2., 2.],
[1., 1., 1., 1.]])}
"""
def __init__(self,
graph_data=None,
node_frame=None,
edge_frame=None,
multigraph=False,
readonly=False):
# graph
self._readonly=readonly
self._graph = create_graph_index(graph_data, multigraph, readonly)
# frame
if node_frame is None:
self._node_frame = FrameRef(Frame(num_rows=self.number_of_nodes()))
else:
self._node_frame = node_frame
if edge_frame is None:
self._edge_frame = FrameRef(Frame(num_rows=self.number_of_edges()))
else:
self._edge_frame = edge_frame
# msg graph & frame
self._msg_graph = create_graph_index(multigraph=multigraph)
self._msg_frame = FrameRef()
self.reset_messages()
# registered functions
self._message_func = None
self._reduce_func = None
self._apply_node_func = None
self._apply_edge_func = None
def add_nodes(self, num, data=None):
"""Add multiple new nodes.
Parameters
----------
num : int
Number of nodes to be added.
data : dict, optional
Feature data of the added nodes.
Notes
-----
If new nodes are added with features, and any of the old nodes
do not have some of the feature fields, those fields are filled
by initializers defined with ``set_n_initializer`` (default filling
with zeros).
Examples
--------
>>> G = dgl.DGLGraph()
>>> g.add_nodes(2)
>>> g.number_of_nodes()
2
>>> g.add_nodes(3)
>>> g.number_of_nodes()
5
Adding new nodes with features (using PyTorch as example):
>>> import torch as th
>>> g.add_nodes(2, {'x': th.ones(2, 4)}) # default zero initializer
>>> g.ndata['x']
tensor([[0., 0., 0., 0.],
[0., 0., 0., 0.],
[0., 0., 0., 0.],
[0., 0., 0., 0.],
[0., 0., 0., 0.],
[1., 1., 1., 1.],
[1., 1., 1., 1.]])
"""
self._graph.add_nodes(num)
self._msg_graph.add_nodes(num)
if data is None:
# Initialize feature placeholders if there are features existing
self._node_frame.add_rows(num)
else:
self._node_frame.append(data)
def add_edge(self, u, v, data=None):
"""Add one new edge between u and v.
Parameters
----------
u : int
The source node ID. Must exist in the graph.
v : int
The destination node ID. Must exist in the graph.
data : dict, optional
Feature data of the added edges.
Notes
-----
If new edges are added with features, and any of the old edges
do not have some of the feature fields, those fields are filled
by initializers defined with ``set_e_initializer`` (default filling
with zeros).
Examples
--------
The following example uses PyTorch backend.
>>> G = dgl.DGLGraph()
>>> G.add_nodes(3)
>>> G.add_edge(0, 1)
Adding new edge with features
>>> import torch as th
>>> G.add_edge(0, 2, {'x': th.ones(1, 4)})
>>> G.edges()
(tensor([0, 0]), tensor([1, 2]))
>>> G.edata['x']
tensor([[0., 0., 0., 0.],
[1., 1., 1., 1.]])
>>> G.edges[0, 2].data['x']
tensor([[1., 1., 1., 1.]])
See Also
--------
add_edges
"""
self._graph.add_edge(u, v)
if data is None:
# Initialize feature placeholders if there are features existing
self._edge_frame.add_rows(1)
else:
self._edge_frame.append(data)
def add_edges(self, u, v, data=None):
"""Add multiple edges for list of source nodes u and destination nodes
v. A single edge is added between every pair of ``u[i]`` and ``v[i]``.
Parameters
----------
u : list, tensor
The source node IDs. All nodes must exist in the graph.
v : list, tensor
The destination node IDs. All nodes must exist in the graph.
data : dict, optional
Feature data of the added edges.
Notes
-----
If new edges are added with features, and any of the old edges
do not have some of the feature fields, those fields are filled
by initializers defined with ``set_e_initializer`` (default filling
with zeros).
Examples
--------
The following example uses PyTorch backend.
>>> G = dgl.DGLGraph()
>>> G.add_nodes(4)
>>> G.add_edges([0, 2], [1, 3]) # add edges (0, 1) and (2, 3)
Adding new edges with features
>>> import torch as th
>>> G.add_edges([1, 3], [2, 0], {'x': th.ones(2, 4)}) # (1, 2), (3, 0)
>>> G.edata['x']
tensor([[0., 0., 0., 0.],
[0., 0., 0., 0.],
[1., 1., 1., 1.],
[1., 1., 1., 1.]])
See Also
--------
add_edge
"""
u = utils.toindex(u)
v = utils.toindex(v)
self._graph.add_edges(u, v)
if data is None:
# Initialize feature placeholders if there are features existing
# NOTE: use max due to edge broadcasting syntax
self._edge_frame.add_rows(max(len(u), len(v)))
else:
self._edge_frame.append(data)
def clear(self):
"""Remove all nodes and edges, as well as their features, from the
graph.
Examples
--------
>>> G = dgl.DGLGraph()
>>> G.add_nodes(4)
>>> G.add_edges([0, 1, 2, 3], [1, 2, 3, 0])
>>> G.number_of_nodes()
4
>>> G.number_of_edges()
4
>>> G.clear()
>>> G.number_of_nodes()
0
>>> G.number_of_edges()
0
"""
self._graph.clear()
self._node_frame.clear()
self._edge_frame.clear()
self._msg_graph.clear()
self._msg_frame.clear()
def reset_messages(self):
"""Clear all messages."""
self._msg_graph.clear()
self._msg_frame.clear()
self._msg_graph.add_nodes(self.number_of_nodes())
def number_of_nodes(self):
"""Return the number of nodes in the graph.
Returns
-------
int
The number of nodes
"""
return self._graph.number_of_nodes()
def __len__(self):
"""Return the number of nodes in the graph."""
return self.number_of_nodes()
@property
def is_multigraph(self):
"""True if the graph is a multigraph, False otherwise.
"""
return self._graph.is_multigraph()
def number_of_edges(self):
"""Return the number of edges in the | |
ret_val, response = self._make_rest_call_helper_oauth2(action_result, endpoint, json=param, method=method)
if phantom.is_fail(ret_val):
return None
if response.get("resources"):
list_ids_details.extend(response.get("resources"))
del list_ids[:min(100, len(list_ids))]
return list_ids_details
def _get_device_count(self, params, action_result):
ret_val, response = self._make_rest_call_helper_oauth2(action_result, CROWDSTRIKE_GET_DEVICE_COUNT_APIPATH, params=params)
if (phantom.is_fail(ret_val)):
return action_result.get_status()
try:
resources = response['resources']
except Exception as e:
return action_result.set_status(phantom.APP_ERROR, "Unable to parse response {}".format(self._get_error_message_from_exception(e)))
if (not resources):
action_result.update_summary({'device_count': 0})
return action_result.set_status(phantom.APP_SUCCESS)
result = resources[0]
# successful request
action_result.update_summary({'device_count': result.get('device_count', 0)})
return action_result.set_status(phantom.APP_SUCCESS)
def _get_devices_ran_on(self, ioc, ioc_type, param, action_result):
api_data = {
"type": ioc_type,
"value": ioc
}
count_only = param.get(CROWDSTRIKE_JSON_COUNT_ONLY, False)
if (count_only):
return self._get_device_count(api_data, action_result)
ret_val, response = self._make_rest_call_helper_oauth2(action_result, CROWDSTRIKE_GET_DEVICES_RAN_ON_APIPATH, params=api_data)
if (phantom.is_fail(ret_val)):
return action_result.get_status()
# successful request / "none found"
for d in response["resources"]:
action_result.add_data({"device_id": d})
action_result.set_summary({"device_count": len(response["resources"])})
return action_result.set_status(phantom.APP_SUCCESS)
def _handle_resolve_detection(self, param):
# Add an action result to the App Run
action_result = self.add_action_result(ActionResult(dict(param)))
detection_id = self._handle_py_ver_compat_for_input_str(param[CROWDSTRIKE_JSON_ID])
to_state = param[CROWDSTRIKE_RESOLVE_DETECTION_TO_STATE]
detection_id = [x.strip() for x in detection_id.split(',')]
detection_id = list(filter(None, detection_id))
api_data = {
"ids": detection_id,
"status": to_state
}
ret_val, response = self._make_rest_call_helper_oauth2(action_result, CROWDSTRIKE_RESOLVE_DETECTION_APIPATH, json=api_data, method="patch")
if (phantom.is_fail(ret_val)):
return action_result.get_status()
return action_result.set_status(phantom.APP_SUCCESS, "Status set successfully")
def _handle_hunt_file(self, param):
file_hash = param[phantom.APP_JSON_HASH]
action_result = self.add_action_result(ActionResult(dict(param)))
ret_val, ioc_type = self._get_hash_type(file_hash, action_result)
if (phantom.is_fail(ret_val)):
return action_result.get_status()
return self._get_devices_ran_on(file_hash, ioc_type, param, action_result)
def _handle_hunt_domain(self, param):
domain = param[phantom.APP_JSON_DOMAIN]
action_result = self.add_action_result(ActionResult(dict(param)))
return self._get_devices_ran_on(domain, "domain", param, action_result)
def _handle_get_device_detail(self, param):
# Add an action result to the App Run
self.save_progress("In action handler for: {0}".format(self.get_action_identifier()))
action_result = self.add_action_result(ActionResult(dict(param)))
fdid = self._handle_py_ver_compat_for_input_str(param[CROWDSTRIKE_GET_DEVICE_DETAIL_DEVICE_ID])
api_data = {
"ids": fdid
}
ret_val, response = self._make_rest_call_helper_oauth2(action_result, CROWDSTRIKE_GET_DEVICE_DETAILS_ENDPOINT, params=api_data)
if (phantom.is_fail(ret_val)):
return action_result.get_status()
# successful request
try:
data = dict(response["resources"][0])
except:
return action_result.set_status(phantom.APP_ERROR, "Error occured while parsing response of 'get_system_info' action. Unknown response retrieved")
action_result.add_data(data)
summary = action_result.update_summary({})
try:
summary['hostname'] = response["resources"][0]['hostname']
except:
pass
return action_result.set_status(phantom.APP_SUCCESS, "Device details fetched successfully")
def _handle_get_process_detail(self, param):
# Add an action result to the App Run
action_result = self.add_action_result(ActionResult(dict(param)))
fpid = self._handle_py_ver_compat_for_input_str(param.get(CROWDSTRIKE_GET_PROCESS_DETAIL_FALCON_PROCESS_ID, ''))
api_data = {
"ids": fpid
}
ret_val, response = self._make_rest_call_helper_oauth2(action_result, CROWDSTRIKE_GET_PROCESS_DETAIL_APIPATH, params=api_data)
if (phantom.is_fail(ret_val)):
return action_result.get_status()
try:
data = dict(response["resources"][0])
except:
return action_result.set_status(phantom.APP_ERROR, "Error occured while parsing response of 'get_process_detail' action. Unknown response retrieved")
action_result.add_data(data)
return action_result.set_status(phantom.APP_SUCCESS, "Process details fetched successfully")
def _handle_list_incidents(self, param):
self.save_progress("In action handler for: {0}".format(self.get_action_identifier()))
# Add an action result object to self (BaseConnector) to represent the action for this param
action_result = self.add_action_result(ActionResult(dict(param)))
max_limit = None
sort_data = ["assigned_to.asc", "assigned_to.desc", "assigned_to_name.asc", "assigned_to_name.desc", "end.asc", "end.desc", "modified_timestamp.asc",
"modified_timestamp.desc", "name.asc", "name.desc", "sort_score.asc", "sort_score.desc", "start.asc", "start.desc", "state.asc", "state.desc", "status.asc", "status.desc"]
resp = self._check_data(action_result, param, max_limit, sort_data)
if (phantom.is_fail(resp)):
return action_result.get_status()
endpoint = CROWDSTRIKE_LIST_INCIDENTS_ENDPOINT
id_list = self._get_ids(action_result, endpoint, param)
if id_list is None:
return action_result.get_status()
# Add the response into the data section
for id in id_list:
action_result.add_data(id)
return action_result.set_status(phantom.APP_SUCCESS, "Incidents listed successfully")
def _handle_list_incident_behaviors(self, param):
self.save_progress("In action handler for: {0}".format(self.get_action_identifier()))
# Add an action result object to self (BaseConnector) to represent the action for this param
action_result = self.add_action_result(ActionResult(dict(param)))
max_limit = None
sort_data = ["--", "timestamp.asc", "timestamp.desc"]
resp = self._check_data(action_result, param, max_limit, sort_data)
if (phantom.is_fail(resp)):
return action_result.get_status()
endpoint = CROWDSTRIKE_LIST_BEHAVIORS_ENDPOINT
id_list = self._get_ids(action_result, endpoint, param)
if id_list is None:
return action_result.get_status()
# Add the response into the data section
for id in id_list:
action_result.add_data(id)
return action_result.set_status(phantom.APP_SUCCESS, "Incident behaviors listed successfully")
def _handle_get_incident_details(self, param):
self.save_progress("In action handler for: {0}".format(self.get_action_identifier()))
# Add an action result object to self (BaseConnector) to represent the action for this param
action_result = self.add_action_result(ActionResult(dict(param)))
ids = self._handle_py_ver_compat_for_input_str(param.get("ids"))
ids = [x.strip() for x in ids.split(',')]
ids = list(filter(None, ids))
data = {"ids": ids}
endpoint = CROWDSTRIKE_GET_INCIDENT_DETAILS_ID_ENDPOINT
details_list = self._get_details(action_result, endpoint, data, method='post')
if details_list is None:
return action_result.get_status()
for incident in details_list:
action_result.add_data(incident)
summary = action_result.update_summary({})
summary['total_sessions'] = action_result.get_data_size()
return action_result.set_status(phantom.APP_SUCCESS, "Incidents fetched: {}".format(len(details_list)))
def _handle_get_incident_behaviors(self, param):
self.save_progress("In action handler for: {0}".format(self.get_action_identifier()))
# Add an action result object to self (BaseConnector) to represent the action for this param
action_result = self.add_action_result(ActionResult(dict(param)))
ids = self._handle_py_ver_compat_for_input_str(param.get("ids"))
ids = [x.strip() for x in ids.split(',')]
ids = list(filter(None, ids))
data = {"ids": ids}
endpoint = CROWDSTRIKE_GET_INCIDENT_BEHAVIORS_ID_ENDPOINT
details_list = self._get_details(action_result, endpoint, data, 'post')
if details_list is None:
return action_result.get_status()
# Add the response into the data section
for incident_behavior in details_list:
action_result.add_data(incident_behavior)
return action_result.set_status(phantom.APP_SUCCESS, "Incident behavior fetched successfully")
def _handle_list_crowdscores(self, param):
self.save_progress("In action handler for: {0}".format(self.get_action_identifier()))
# Add an action result object to self (BaseConnector) to represent the action for this param
action_result = self.add_action_result(ActionResult(dict(param)))
max_limit = None
sort_data = ["--", "score.asc", "score.desc", "timestamp.asc", "timestamp.desc"]
resp = self._check_data(action_result, param, max_limit, sort_data)
if (phantom.is_fail(resp)):
return action_result.get_status()
endpoint = CROWDSTRIKE_LIST_CROWDSCORES_ENDPOINT
id_list = self._get_ids(action_result, endpoint, param, is_str=False)
if id_list is None:
return action_result.get_status()
# Add the response into the data section
for crowdscore in id_list:
action_result.add_data(crowdscore)
summary = action_result.update_summary({})
summary['total_sessions'] = action_result.get_data_size()
return action_result.set_status(phantom.APP_SUCCESS, "Crowdscores listed successfully")
def _handle_update_incident(self, param):
self.save_progress("In action handler for: {0}".format(self.get_action_identifier()))
# Add an action result object to self (BaseConnector) to represent the action for this param
action_result = self.add_action_result(ActionResult(dict(param)))
# Hold the values for the status
statuses = {"new": 20, "reopened": 25, "in progress": 30, "closed": 40}
ids = self._handle_py_ver_compat_for_input_str(param.get("ids"))
ids = [x.strip() for x in ids.split(',')]
ids = list(filter(None, ids))
# Default data we will send
data = {"action_parameters": [], "ids": ids}
if param.get("add_tag"):
add_tags = self._handle_py_ver_compat_for_input_str(param.get("add_tag"))
add_tags = [x.strip() for x in add_tags.split(',')]
add_tags = list(filter(None, add_tags))
for tag in add_tags:
data["action_parameters"].append({"name": "add_tag", "value": tag})
if param.get("delete_tag"):
delete_tags = self._handle_py_ver_compat_for_input_str(param.get("delete_tag"))
delete_tags = [x.strip() for x in delete_tags.split(',')]
delete_tags = list(filter(None, delete_tags))
for tag in delete_tags:
data["action_parameters"].append({"name": "delete_tag", "value": tag})
if param.get("update_name"):
name = self._handle_py_ver_compat_for_input_str(param.get("update_name"))
data["action_parameters"].append({"name": "update_name", "value": name})
if param.get("update_description"):
description = self._handle_py_ver_compat_for_input_str(param.get("update_description"))
data["action_parameters"].append({"name": "update_description", "value": description})
data_list = ["New", "Reopened", "In Progress", "Closed"]
if param.get('update_status'):
if param.get('update_status') not in data_list:
return action_result.set_status(phantom.APP_ERROR, "Please provide a valid value in the 'update_status' parameter")
status = param.get("update_status").lower()
data["action_parameters"].append({"name": "update_status", "value": str(statuses[status])})
if param.get("add_comment"):
comment = self._handle_py_ver_compat_for_input_str(param.get("add_comment"))
data["action_parameters"].append({"name": "add_comment", "value": comment})
endpoint = CROWDSTRIKE_UPDATE_INCIDENT_ENDPOINT
ret_val, response = self._make_rest_call_helper_oauth2(action_result, endpoint, json=data, method="post")
if (phantom.is_fail(ret_val)):
return action_result.get_status()
# Add the response into the data section
action_result.add_data(response)
return action_result.set_status(phantom.APP_SUCCESS, "Incident updated successfully")
def _handle_list_users(self, param):
self.save_progress("In action handler for: {0}".format(self.get_action_identifier()))
# Add an action result object to self (BaseConnector) to represent the action for this param
action_result = self.add_action_result(ActionResult(dict(param)))
# Get all the UIDS from your Customer ID
endpoint = CROWDSTRIKE_LIST_USERS_UIDS_ENDPOINT
ret_val, response = self._make_rest_call_helper_oauth2(action_result, endpoint)
if phantom.is_fail(ret_val):
return action_result.get_status()
if not response.get('resources', []):
return action_result.set_status(phantom.APP_SUCCESS, "No data found for user resources")
params = {'ids': response.get('resources', [])}
endpoint = CROWDSTRIKE_GET_USER_INFO_ENDPOINT
ret_val, response = self._make_rest_call_helper_oauth2(action_result, endpoint, params=params)
if (phantom.is_fail(ret_val)):
return action_result.get_status()
# Add the response into the data section
action_result.add_data(response)
return action_result.set_status(phantom.APP_SUCCESS, "Users listed successfully")
def _handle_get_user_roles(self, param):
self.save_progress("In action handler for: {0}".format(self.get_action_identifier()))
# Add an action result object to self (BaseConnector) to represent the action for this param
action_result = self.add_action_result(ActionResult(dict(param)))
params = {"user_uuid": param["user_uuid"]}
endpoint = CROWDSTRIKE_GET_USER_ROLES_ENDPOINT
ret_val, response = self._make_rest_call_helper_oauth2(action_result, endpoint, params=params)
if (phantom.is_fail(ret_val)):
return action_result.get_status()
# Add the response into the data section
action_result.add_data(response)
return action_result.set_status(phantom.APP_SUCCESS, "User roles fetched successfully")
def _handle_get_roles(self, param):
self.save_progress("In action handler for: {0}".format(self.get_action_identifier()))
# Add an action result object to self (BaseConnector) to represent the action for this param
action_result = self.add_action_result(ActionResult(dict(param)))
list_ids = self._handle_py_ver_compat_for_input_str(param.get("role_id"))
list_ids = [x.strip() for x in list_ids.split(',')]
list_ids = list(filter(None, list_ids))
endpoint = CROWDSTRIKE_GET_ROLE_ENDPOINT
details_list = list()
while list_ids:
# Endpoint creation
ids = list_ids[:min(100, len(list_ids))]
# Create the param variable to send
params = {'ids': ids}
# Make REST call
ret_val, response = self._make_rest_call_helper_oauth2(action_result, endpoint, params=params)
if phantom.is_fail(ret_val):
return action_result.get_status()
if response.get("resources"):
details_list.extend(response.get("resources"))
del list_ids[:min(100, len(list_ids))]
if not details_list:
return action_result.set_status(phantom.APP_SUCCESS, "No data found")
details_data_list = [i for n, i in enumerate(details_list) if i not in details_list[n + 1:]]
for role in details_data_list:
action_result.add_data(role)
return action_result.set_status(phantom.APP_SUCCESS, "Role fetched successfully")
def _handle_list_roles(self, param):
self.save_progress("In action handler for: {0}".format(self.get_action_identifier()))
# Add an action result object to self (BaseConnector) to represent the action for this param
action_result = self.add_action_result(ActionResult(dict(param)))
# Get all the Roles from your Customer ID
endpoint = CROWDSTRIKE_LIST_USER_ROLES_ENDPOINT
ret_val, response = self._make_rest_call_helper_oauth2(action_result, endpoint)
if (phantom.is_fail(ret_val)):
return action_result.get_status()
| |
import libvirt
from xml.etree import ElementTree
from jinja2 import Environment, PackageLoader, FileSystemLoader
import os
import time
import shutil
import random
from qcow2 import Qcow2
from js9 import j
import atexit
LOCKCREATED = 1
LOCKREMOVED = 2
NOLOCK = 3
LOCKEXIST = 4
JSBASE = j.application.jsbase_get_class()
class LibvirtUtil(JSBASE):
def __init__(self, host='localhost'):
self._host = host
self.open()
atexit.register(self.close)
self.basepath = '/mnt/vmstor/kvm'
self.templatepath = '/mnt/vmstor/kvm/images'
self.env = Environment(loader=FileSystemLoader(
j.sal.fs.joinPaths(j.sal.fs.getParent(__file__), 'templates')))
JSBASE.__init__(self)
def open(self):
uri = None
if self._host != 'localhost':
uri = 'qemu+ssh://%s/system' % self._host
self.connection = libvirt.open(uri)
self.readonly = libvirt.openReadOnly(uri)
def close(self):
def close(con):
if con:
try:
con.close()
except BaseException:
pass
close(self.connection)
close(self.readonly)
def _get_domain(self, id):
try:
domain = self.connection.lookupByName(id)
except libvirt.libvirtError as e:
if e.get_error_code() == libvirt.VIR_ERR_NO_DOMAIN:
return None
return domain
def create(self, id, xml):
if self.isCurrentStorageAction(id):
raise Exception("Can't start a locked machine")
domain = self._get_domain(id)
if not domain and xml:
domain = self.connection.defineXML(xml)
if domain.state(0)[0] == libvirt.VIR_DOMAIN_RUNNING:
return True
return domain.create() == 0
def shutdown(self, id):
if self.isCurrentStorageAction(id):
raise Exception("Can't stop a locked machine")
domain = self._get_domain(id)
if domain.state(0)[0] in [libvirt.VIR_DOMAIN_SHUTDOWN, libvirt.VIR_DOMAIN_SHUTOFF, libvirt.VIR_DOMAIN_CRASHED]:
return True
return domain.shutdown() == 0
def suspend(self, id):
domain = self._get_domain(id)
if domain.state(0)[0] == libvirt.VIR_DOMAIN_PAUSED:
return True
return domain.suspend() == 0
def resume(self, id):
domain = self._get_domain(id)
if domain.state(0)[0] == libvirt.VIR_DOMAIN_RUNNING:
return True
return domain.resume() == 0
def backup_machine_to_filesystem(self, machineid, backuppath):
from shutil import make_archive
if self.isCurrentStorageAction(machineid):
raise Exception("Can't delete a locked machine")
domain = self.connection.lookupByUUIDString(machineid)
diskfiles = self._get_domain_disk_file_names(domain)
if domain.state(0)[0] != libvirt.VIR_DOMAIN_SHUTOFF:
domain.destroy()
for diskfile in diskfiles:
if os.path.exists(diskfile):
try:
vol = self.connection.storageVolLookupByPath(diskfile)
except BaseException:
continue
poolpath = os.path.join(self.basepath, domain.name())
if os.path.exists(poolpath):
archive_name = os.path.join(
backuppath, 'vm-%04x' % machineid)
root_dir = poolpath
make_archive(archive_name, gztar, root_dir)
return True
def delete_machine(self, id):
if self.isCurrentStorageAction(id):
raise Exception("Can't delete a locked machine")
domain = self._get_domain(id)
if domain:
if domain.state(0)[0] != libvirt.VIR_DOMAIN_SHUTOFF:
domain.destroy()
domain.undefineFlags(
libvirt.VIR_DOMAIN_UNDEFINE_SNAPSHOTS_METADATA)
poolpath = os.path.join(self.basepath, id)
try:
diskpool = self.connection.storagePoolLookupByName(id)
for vol in diskpool.listAllVolumes():
vol.delete()
diskpool.destroy()
except BaseException:
pass
if os.path.exists(poolpath):
shutil.rmtree(poolpath)
return True
def _get_domain_disk_file_names(self, dom):
if isinstance(dom, ElementTree.Element):
xml = dom
else:
xml = ElementTree.fromstring(dom.XMLDesc(0))
disks = xml.findall('devices/disk')
diskfiles = list()
for disk in disks:
if disk.attrib['device'] == 'disk' or disk.attrib['device'] == 'cdrom':
source = disk.find('source')
if source is not None:
if disk.attrib['device'] == 'disk':
if 'dev' in source.attrib:
diskfiles.append(source.attrib['dev'])
if 'file' in source.attrib:
diskfiles.append(source.attrib['file'])
if disk.attrib['device'] == 'cdrom':
diskfiles.append(source.attrib['file'])
return diskfiles
def check_disk(self, diskxml):
return True
def memory_usage(self):
ids = self.readonly.listDomainsID()
hostmem = self.readonly.getInfo()[1]
totalmax = 0
totalrunningmax = 0
for id in ids:
dom = self.readonly.lookupByID(id)
machinestate, maxmem, mem = dom.info()[0:3]
totalmax += maxmem / 1000
if machinestate == libvirt.VIR_DOMAIN_RUNNING:
totalrunningmax += maxmem / 1000
return (hostmem, totalmax, totalrunningmax)
def check_machine(self, machinexml):
xml = ElementTree.fromstring(machinexml)
memory = int(xml.find('memory').text)
hostmem, totalmax, totalrunningmax = self.memory_usage()
if (totalrunningmax + memory) > (hostmem - 1024):
return False
return True
def _snapshot(self, id, xml, snapshottype):
if self.isCurrentStorageAction(id):
raise Exception("Can't snapshot a locked machine")
domain = self._get_domain(id)
flags = 0 if snapshottype == 'internal' else libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY
snap = domain.snapshotCreateXML(xml, flags)
return {'name': snap.getName(), 'xml': snap.getXMLDesc()}
def listSnapshots(self, id):
domain = self._get_domain(id)
results = list()
for snapshot in domain.listAllSnapshots():
xml = ElementTree.fromstring(snapshot.getXMLDesc())
snap = {'name': xml.find('name').text,
'epoch': int(xml.find('creationTime').text)}
results.append(snap)
return results
def deleteVolume(self, path):
vol = self.connection.storageVolLookupByPath(path)
return vol.delete(0)
def getSnapshot(self, domain, name):
domain = self._get_domain(domain)
snap = domain.snapshotLookupByName('name')
return {'name': snap.getName(), 'epoch': snap.getXMLDesc()}
def _isRootVolume(self, domain, file):
diskfiles = self._getDomainDiskFiles(domain)
if file in diskfiles:
return True
return False
def _renameSnapshot(self, id, name, newname):
domain = self._get_domain(id)
snapshot = domain.snapshotLookupByName(name, 0)
xml = snapshot.getXMLDesc()
newxml = xml.replace('<name>%s</name>' %
name, '<name>%s</name>' % newname)
domain.snapshotCreateXML(
newxml, (libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REDEFINE or libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY))
snapshot.delete(libvirt.VIR_DOMAIN_UNDEFINE_SNAPSHOTS_METADATA)
return True
def deleteSnapshot(self, id, name):
if self.isCurrentStorageAction(id):
raise Exception("Can't delete a snapshot from a locked machine")
newname = '%s_%s' % (name, 'DELETING')
self._renameSnapshot(id, name, newname)
name = newname
domain = self._get_domain(id)
snapshot = domain.snapshotLookupByName(name, 0)
snapshotfiles = self._getSnapshotDisks(id, name)
volumes = []
todelete = []
for snapshotfile in snapshotfiles:
is_root_volume = self._isRootVolume(
domain, snapshotfile['file'].path)
if not is_root_volume:
self.logger.debug('Blockcommit from %s to %s' % (snapshotfile[
'file'].path, snapshotfile['file'].backing_file_path))
result = domain.blockCommit(snapshotfile['name'], snapshotfile[
'file'].backing_file_path, snapshotfile['file'].path)
todelete.append(snapshotfile['file'].path)
volumes.append(snapshotfile['name'])
else:
# we can't use blockcommit on topsnapshots
new_base = Qcow2(
snapshotfile['file'].backing_file_path).backing_file_path
todelete.append(snapshotfile['file'].backing_file_path)
if not new_base:
continue
self.logger.debug('Blockrebase from %s' % new_base)
flags = libvirt.VIR_DOMAIN_BLOCK_REBASE_COPY | libvirt.VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT | libvirt.VIR_DOMAIN_BLOCK_REBASE_SHALLOW
result = domain.blockRebase(
snapshotfile['name'], new_base, flags)
volumes.append(snapshotfile['name'])
while not self._block_job_domain_info(domain, volumes):
time.sleep(0.5)
# we can undefine the snapshot
snapshot.delete(flags=libvirt.VIR_DOMAIN_UNDEFINE_SNAPSHOTS_METADATA)
for disk in todelete:
if os.path.exists(disk):
os.remove(disk)
return True
def isCurrentStorageAction(self, domainid):
domain = self._get_domain(domainid)
if not domain:
return False
# at this moment we suppose the machine is following the default naming
# of disks
if domain.state()[0] not in [libvirt.VIR_DOMAIN_SHUTDOWN,
libvirt.VIR_DOMAIN_SHUTOFF, libvirt.VIR_DOMAIN_CRASHED]:
status = domain.blockJobInfo('vda', 0)
if 'cur' in status:
return True
# check also that there is no qemu-img job running
if self.isLocked(domainid):
return True
return False
def _getLockFile(self, domainid):
LOCKPATH = '%s/domain_locks' % j.dirs.VARDIR
if not j.sal.fs.exists(LOCKPATH):
j.sal.fs.createDir(LOCKPATH)
lockfile = '%s/%s.lock' % (LOCKPATH, domainid)
return lockfile
def _lockDomain(self, domainid):
if self.isLocked(domainid):
return LOCKEXIST
j.sal.fs.writeFile(self._getLockFile(domainid), str(time.time()))
return LOCKCREATED
def _unlockDomain(self, domainid):
if not self.isLocked(domainid):
return NOLOCK
j.sal.fs.remove(self._getLockFile(domainid))
return LOCKREMOVED
def isLocked(self, domainid):
if j.sal.fs.exists(self._getLockFile(domainid)):
return True
else:
return False
def _block_job_domain_info(self, domain, paths):
for path in paths:
done = self._block_job_info(domain, path)
if not done:
return False
return True
def _block_job_info(self, domain, path):
status = domain.blockJobInfo(path, 0)
self.logger.debug(status)
try:
cur = status.get('cur', 0)
end = status.get('end', 0)
if cur == end:
return True
except Exception:
return True
else:
return False
def rollbackSnapshot(self, id, name, deletechildren=True):
if self.isCurrentStorageAction(id):
raise Exception("Can't rollback a locked machine")
domain = self._get_domain(id)
snapshot = domain.snapshotLookupByName(name, 0)
snapshotdomainxml = ElementTree.fromstring(snapshot.getXMLDesc(0))
domainxml = snapshotdomainxml.find('domain')
newxml = ElementTree.tostring(domainxml)
self.connection.defineXML(newxml)
if deletechildren:
children = snapshot.listAllChildren(1)
for child in children:
snapshotfiles = self._getSnapshotDisks(id, child.getName())
for snapshotfile in snapshotfiles:
os.remove(snapshotfile['file'].path)
child.delete(libvirt.VIR_DOMAIN_UNDEFINE_SNAPSHOTS_METADATA)
snapshotfiles = self._getSnapshotDisks(id, name)
for snapshotfile in snapshotfiles:
os.remove(snapshotfile['file'].path)
snapshot.delete(libvirt.VIR_DOMAIN_UNDEFINE_SNAPSHOTS_METADATA)
return True
def _clone(self, id, name, clonefrom):
domain = self.connection.lookupByUUIDString(id)
domainconfig = domain.XMLDesc()
name = '%s_%s.qcow2' % (name, time.time())
destination_path = os.path.join(self.templatepath, name)
if domain.state()[0] in [
libvirt.VIR_DOMAIN_SHUTDOWN,
libvirt.VIR_DOMAIN_SHUTOFF,
libvirt.VIR_DOMAIN_CRASHED,
libvirt.VIR_DOMAIN_PAUSED] or not self._isRootVolume(
domain,
clonefrom):
if not self.isLocked(id):
lock = self._lockDomain(id)
if lock != LOCKCREATED:
raise Exception('Failed to create lock: %s' % str(lock))
else:
raise Exception("Can't perform this action on a locked domain")
q2 = Qcow2(clonefrom)
try:
q2.export(destination_path)
finally:
if self.isLocked(id):
self._unlockDomain(id)
else:
domain.undefine()
try:
domain.blockRebase(clonefrom, destination_path,
0, libvirt.VIR_DOMAIN_BLOCK_REBASE_COPY)
rebasedone = False
while not rebasedone:
rebasedone = self._block_job_info(domain, clonefrom)
domain.blockJobAbort(clonefrom, 0)
except BaseException:
self.connection.defineXML(domainconfig)
raise
self.connection.defineXML(domainconfig)
return destination_path
def _getImageId(self, path):
return j.data.hash.sha1(path)
def exportToTemplate(self, id, name, clonefrom):
if self.isCurrentStorageAction(id):
raise Exception("Can't export a locked machine")
domain = self.connection.lookupByUUIDString(id)
if not clonefrom:
domaindisks = self._getDomainDiskFiles(domain)
if len(domaindisks) > 0:
clonefrom = domaindisks[0]
else:
raise Exception('Node image found for this machine')
else:
snapshotfiles = self._getSnapshotDisks(id, name)
# we just take the first one at this moment
if len(snapshotfiles) > 0:
clonefrom = snapshotfiles[0]['file'].backing_file_path
else:
raise Exception('No snapshot found')
destination_path = self._clone(id, name, clonefrom)
imageid = self._getImageId(destination_path)
return imageid, destination_path
def create_disk(self, diskxml, poolname):
pool = self._get_pool(poolname)
pool.createXML(diskxml, 0)
return True
def _getSnapshotDisks(self, id, name):
domain = self._get_domain(id)
snapshot = domain.snapshotLookupByName(name, 0)
snapshotxml = ElementTree.fromstring(snapshot.getXMLDesc(0))
snapshotfiles = []
disks = snapshotxml.findall('disks/disk')
for disk in disks:
source = disk.find('source')
if source is not None and disk.attrib['snapshot'] == 'external':
snapshotfiles.append(
{'name': disk.attrib['name'], 'file': Qcow2(source.attrib['file'])})
return snapshotfiles
def _get_pool(self, poolname):
self.check_storagepool(poolname)
storagepool = self.connection.storagePoolLookupByName(poolname)
return storagepool
def check_storagepool(self, poolname):
if poolname not in self.connection.listStoragePools():
poolpath = os.path.join(self.basepath, poolname)
if not os.path.exists(poolpath):
os.makedirs(poolpath)
cmd = 'chattr +C %s ' % poolpath
j.sal.process.execute(
cmd, die=False, showout=False, useShell=False, ignoreErrorOutput=False)
pool = self.env.get_template('pool.xml').render(
poolname=poolname, basepath=self.basepath)
self.connection.storagePoolCreateXML(pool, 0)
return True
def create_machine(self, machinexml):
domain = self.connection.defineXML(machinexml)
domain.create()
return self._to_node(domain)
def _to_node(self, domain):
state, max_mem, memory, vcpu_count, used_cpu_time = domain.info()
locked = self.isCurrentStorageAction(domain.UUIDString())
extra = {'uuid': domain.UUIDString(), 'os_type': domain.OSType(), 'types': self.connection.getType(
), 'used_memory': memory / 1024, 'vcpu_count': vcpu_count, 'used_cpu_time': used_cpu_time, 'locked': locked}
return {'id': domain.UUIDString(), 'name': domain.name(), 'state': state,
'extra': extra, 'XMLDesc': domain.XMLDesc(0)}
def _to_node_list(self, domain):
state, max_mem, memory, vcpu_count, used_cpu_time = domain.info()
extra = {'uuid': domain.UUIDString(), 'os_type': domain.OSType(), 'types': self.connection.getType(
), 'used_memory': memory / 1024, 'vcpu_count': vcpu_count, 'used_cpu_time': used_cpu_time}
return {'id': domain.UUIDString(), 'name': domain.name(), | |
import argparse
import threading
import socket
import time
import _pickle as cpickle
import hashlib
import pygame
import copyreg
import ctypes
import lz4.frame
def get_ip_address():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
return s.getsockname()[0]
class GLL:
SCREEN = (300, 300)
LOCAL = '127.0.0.1'
DISTANT = None
VERBOSE = False
PORT = 59000
SIZE = int((SCREEN[0]) * (SCREEN[1]) * 3)
STOP = threading.Event()
condition = threading.Condition()
thread3 = threading.Condition()
inner = threading.Event()
event_trigger = threading.Event()
def unserialize_event(isset):
e = threading.Event()
if isset:
e.set()
return e
def serialize_event(e):
return unserialize_event, (e.isSet(),)
copyreg.pickle(threading.Event, serialize_event)
def my_timer():
kernel32 = ctypes.windll.kernel32
# This sets the priority of the process to realtime--the same priority as the mouse pointer.
kernel32.SetThreadPriority(kernel32.GetCurrentThread(), 31)
# This creates a timer. This only needs to be done once.
timer = kernel32.CreateWaitableTimerA(ctypes.c_void_p(), True, ctypes.c_void_p())
# The kernel measures in 100 nanosecond intervals, so we must multiply 1 by 10000
delay = ctypes.c_longlong(1 * 10000)
kernel32.SetWaitableTimer(timer, ctypes.byref(delay), 0, ctypes.c_void_p(), ctypes.c_void_p(), False)
kernel32.WaitForSingleObject(timer, 0xffffffff)
class Control(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
try:
self.ss = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# self.ss.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
print('\n[+]INFO - Control socket broadcasting to %s %s ' % (GLL.DISTANT, GLL.PORT - 3))
except socket.error as e:
print('\n[-]ERROR - Control socket not connected.. : %s ' % e)
GLL.STOP.set()
def run(self):
while not GLL.STOP.isSet():
try:
while not GLL.event_trigger.isSet():
my_timer()
else:
self.ss.sendto(cpickle.dumps(GLL.inner), (GLL.DISTANT, GLL.PORT - 3))
except Exception as e:
print('\n[-] Error - Control %s ' % e)
print('\n[+]INFO - Control : thread is now terminated.')
self.ss.close()
class VideoInputReceiver(threading.Thread):
"""
Class receiving all the data frames (video UDP packets send by the video generator).
Default address is 127.0.0.1 and PORT 59000
The data flows is synchronize with threading condition and events that allows a perfect synchronization
between receiver and transmitter (depend on system resources).
Threading condition is controlling the start of each transfer sessions and events between threads are signaling
to the packet generator that the receiver is ready for the next packet.
The VideoInputReceiver class is re-ordering the UDP packets, in fact it is receiving them sequentially.
A packet out of sync will be disregard and lost.
This version is not designed for inventorying and processing (re-transfer) lost data frames.
Packet received are checked with a checksum using hashlib library and synchronization is checked
for each packet received.
Data sent by the frame generator are bytes string like data composed with the function
pygame.image.tostring and then pickled using the module _pickle (fast C version of pickle for python).
All the data frames are fragmented and composed into packets with the following header
packet = _pickle(frame number, size, data chunk, checksum)
frame : number (integer) representing the actual frame number being sequenced
size : data size (integer), 1024 bytes for most packets and <1024 for the last packet.
data chunk : Chunk of 1024 bytes string
checksum : md5 hash value to check data integrity
Every packets are serialized object sent to the receiver.
The receiver has to follow the same reverse processes in order to de-serialized packets and build
the frame to be display to the video output.
The frames will be disregard for the following cases:
- out of sync data
- Received frame size is different from the source generator
- Checksum error
Data flow :
* loop until STOP signal
* wait for condition
* loop until all packet received
* wait for packet
process packets
integrity checks
build and display frame
signal ready
Nota: The socket is blocking the thread until the generator is sending packets (no timeout)
GL is the class holding all the global variable.
"""
def __init__(self):
threading.Thread.__init__(self)
def sort_(self, tuple_):
return sorted(tuple_, key=lambda x: int(x[0]), reverse=False)
def run(self):
width, height = GLL.SCREEN
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
except socket.error as error:
print('\n[-]Error - Receiver : %s ' % error)
GLL.STOP.set()
try:
sock.bind((GLL.LOCAL, GLL.PORT))
print('\n[+]INFO - Video socket listening to %s %s ' % (GLL.LOCAL, GLL.PORT))
except socket.error as error:
print('\n[-]Error - Receiver : %s ' % error)
GLL.STOP.set()
frame = 0
while not GLL.STOP.isSet():
capture = []
try:
buffer_ = b''
size = GLL.SIZE
packets = 0
while size > 0:
data_, addr = sock.recvfrom(2048)
data = cpickle.loads(data_)
capture.append(data)
# if not data_:
# break
# Receiver being told to abort reception
if data == b'quit':
GLL.STOP.set()
# Tell the generator to stop sending packets
GLL.inner.set()
GLL.event_trigger.set()
break
# if packets number is not equal to the packet number then
# the transfer is not synchronized, or a packet has been dropped
if packets != data[0]:
if VERBOSE:
print('\n[-]ERROR - Receiver : packet not synchronised, packet %s %s.'
% (packets, data[0]))
GLL.inner.set()
GLL.event_trigger.set()
break
checksum = hashlib.md5()
checksum.update(bytes(data[2]))
chk = checksum.hexdigest()
if chk != data[3]:
if VERBOSE:
print('\n[-]ERROR - Receiver : checksum error. ')
GLL.inner.set()
GLL.event_trigger.set()
break
size -= data[1]
packets += 1
# Receiver is now ready for the next packet.
GLL.inner.set()
GLL.event_trigger.set()
if not GLL.STOP.isSet():
# sorting out packets using the frame number index
# in the eventuality that packets are received asynchronously
# sort_capture = self.sort_(capture)
sort_capture = capture
# build the image by adding every chunks of bytes string received to
# compose the video buffer.
for element in range(len(sort_capture)):
buffer_ += sort_capture[element][2]
if len(buffer_) == GLL.SIZE:
global image_
image_ = pygame.image.frombuffer(buffer_, (width, height), 'RGB')
else:
if VERBOSE:
print('\n[-]ERROR - Receiver : Video buffer is corrupted.')
except Exception as e:
print('\n[-]ERROR - Receiver : %s ' % e)
finally:
frame += 1
print('\n[+]INFO - Receiver : thread is now terminated.')
class SoundSocketReceiver(threading.Thread):
"""
Class receiving a pygame sound object through a TCP socket.
The sound object is fragmented and compressed with the lz4 library by the sound generator.
Default address is 127.0.0.1 and PORT 58999
Data flow :
* loop until STOP signal
* wait for packet
if no data, close connection
if packet size=0 decompress data and play the sound object to the mixer
else build sound object by adding remaing chunks
"""
def __init__(self,
host_, # host address
port_, # PORT value
):
threading.Thread.__init__(self)
"""
Create a TCP socket server to received sound data frames
:param host_: String corresponding to the server address
:param port_: Integer used for the PORT.
Port to listen on (non-privileged ports are > 1023) and 0 < port_ < 65535
"""
assert isinstance(host_, str), \
'Expecting string for argument host_, got %s instead.' % type(host_)
assert isinstance(port_, int), \
'Expecting integer for argument port_, got %s instead.' % type(port_)
assert 0 < port_ < 65535, \
'Incorrect value assign to port_, 0 < port_ < 65535, got %s ' % port_
# Create a TCP/IP socket
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
# Bind the socket to the PORT
self.sock.bind((host_, port_))
except socket.error as error:
print('\n[-] Error : %s ' % error)
try:
# Listen for incoming connections
self.sock.listen(1)
print('\n[+]INFO - Sound socket listening to %s %s ' % (host_, port_))
except socket.error as error:
print('\n[-] Error : %s ' % error)
def run(self):
frame = 0
while not GLL.STOP.isSet():
try:
# Wait for a connection
connection, client_address = self.sock.accept()
except Exception as e:
print('\n[-]ERROR - Sound receiver %s ' % e)
GLL.STOP.set()
connection, client_address = None, None
break
try:
buffer = b''
# Receive the data in small chunks
while not GLL.STOP.isSet():
data = connection.recv(4096)
# build the sound by adding data chunks
if len(data) > 0:
buffer += data
else:
# Decompress the data frame
decompress_data = lz4.frame.decompress(buffer)
if decompress_data == b'quit':
GLL.STOP.set()
break
sound = pygame.mixer.Sound(decompress_data)
sound.play()
break
except Exception as e:
print('\n[-]ERROR - Sound receiver %s ' % e)
GLL.STOP.set()
finally:
if connection is not None:
connection.close()
frame += 1
print('\n[+]INFO - Sound receiver thread is now terminated.')
if __name__ == '__main__':
width_, height_ = (300, 300)
SCREENRECT = pygame.Rect(0, 0, width_, height_)
pygame.display.init()
SCREEN = pygame.display.set_mode(SCREENRECT.size, pygame.RESIZABLE, 32)
SCREEN.set_alpha(None)
pygame.display.set_caption('UDP Receiver')
pygame.mixer.pre_init(44100, 16, 2, 4095)
pygame.init()
image_ = pygame.Surface((300, 300))
ap = argparse.ArgumentParser()
ap.add_argument("-l", "--local", required=True, default=GLL.LOCAL, help="local ip e.g '192.168.1.108'")
ap.add_argument("-d", "--distant", required=True, | |
<reponame>lawrence14701/galaxy<gh_stars>10-100
#!/usr/bin/env python
# % brew vinstall samtools 1.0
# % brew vinstall samtools 0.1.19
# % brew vinstall samtools 1.1
# % brew env samtools 1.1
# PATH=/home/john/.linuxbrew/Cellar/htslib/1.1/bin:/home/john/.linuxbrew/Cellar/samtools/1.1/bin:$PATH
# export PATH
# LD_LIBRARY_PATH=/home/john/.linuxbrew/Cellar/htslib/1.1/lib:/home/john/.linuxbrew/Cellar/samtools/1.1/lib:$LD_LIBRARY_PATH
# export LD_LIBRARY_PATH
# % . <(brew env samtools 1.1)
# % which samtools
# /home/john/.linuxbrew/Cellar/samtools/1.1/bin/samtools
# % . <(brew env samtools 0.1.19)
# % which samtools
# /home/john/.linuxbrew/Cellar/samtools/0.1.19/bin/samtools
# % brew vuninstall samtools 1.0
# % brew vdeps samtools 1.1
# htslib@1.1
# % brew vdeps samtools 0.1.19
from __future__ import print_function
import argparse
import contextlib
import glob
import json
import os
import re
import string
import subprocess
import sys
WHITESPACE_PATTERN = re.compile(r"[\s]+")
DESCRIPTION = "Script built on top of linuxbrew to operate on isolated, versioned brew installed environments."
if sys.platform == "darwin":
DEFAULT_HOMEBREW_ROOT = "/usr/local"
else:
DEFAULT_HOMEBREW_ROOT = os.path.join(os.path.expanduser("~"), ".linuxbrew")
NO_BREW_ERROR_MESSAGE = "Could not find brew on PATH, please place on path or pass to script with --brew argument."
CANNOT_DETERMINE_TAP_ERROR_MESSAGE = "Cannot determine tap of specified recipe - please use fully qualified recipe (e.g. homebrew/science/samtools)."
VERBOSE = False
RELAXED = False
BREW_ARGS = []
class BrewContext(object):
def __init__(self, args=None):
ensure_brew_on_path(args)
raw_config = brew_execute(["config"])
config_lines = [l.strip().split(":", 1) for l in raw_config.split("\n") if l]
config = dict([(p[0].strip(), p[1].strip()) for p in config_lines])
# unset if "/usr/local" -> https://github.com/Homebrew/homebrew/blob/master/Library/Homebrew/cmd/config.rb
homebrew_prefix = config.get("HOMEBREW_PREFIX", "/usr/local")
homebrew_cellar = config.get("HOMEBREW_CELLAR", os.path.join(homebrew_prefix, "Cellar"))
self.homebrew_prefix = homebrew_prefix
self.homebrew_cellar = homebrew_cellar
class RecipeContext(object):
@staticmethod
def from_args(args, brew_context=None):
return RecipeContext(args.recipe, args.version, brew_context)
def __init__(self, recipe, version, brew_context=None):
self.recipe = recipe
self.version = version
self.brew_context = brew_context or BrewContext()
@property
def cellar_path(self):
return recipe_cellar_path(self.brew_context.homebrew_cellar, self.recipe, self.version)
@property
def tap_path(self):
return os.path.join(self.brew_context.homebrew_prefix, "Library", "Taps", self.__tap_path(self.recipe))
def __tap_path(self, recipe):
parts = recipe.split("/")
if len(parts) == 1:
info = brew_info(self.recipe)
from_url = info["from_url"]
if not from_url:
raise Exception(CANNOT_DETERMINE_TAP_ERROR_MESSAGE)
from_url_parts = from_url.split("/")
blob_index = from_url_parts.index("blob") # comes right after username and repository
if blob_index < 2:
raise Exception(CANNOT_DETERMINE_TAP_ERROR_MESSAGE)
username = from_url_parts[blob_index - 2]
repository = from_url_parts[blob_index - 1]
else:
assert len(parts) == 3
parts = recipe.split("/")
username = parts[0]
repository = "homebrew-%s" % parts[1]
path = os.path.join(username, repository)
return path
def main():
global VERBOSE
global RELAXED
global BREW_ARGS
parser = argparse.ArgumentParser(description=DESCRIPTION)
parser.add_argument("--brew", help="Path to linuxbrew 'brew' executable to target")
actions = ["vinstall", "vuninstall", "vdeps", "vinfo", "env"]
action = __action(sys)
if not action:
parser.add_argument('action', metavar='action', help="Versioned action to perform.", choices=actions)
parser.add_argument('recipe', metavar='recipe', help="Recipe for action - should be absolute (e.g. homebrew/science/samtools).")
parser.add_argument('version', metavar='version', help="Version for action (e.g. 0.1.19).")
parser.add_argument('--relaxed', action='store_true', help="Relaxed processing - for instance allow use of env on non-vinstall-ed recipes.")
parser.add_argument('--verbose', action='store_true', help="Verbose output")
parser.add_argument('restargs', nargs=argparse.REMAINDER)
args = parser.parse_args()
if args.verbose:
VERBOSE = True
if args.relaxed:
RELAXED = True
BREW_ARGS = args.restargs
if not action:
action = args.action
brew_context = BrewContext(args)
recipe_context = RecipeContext.from_args(args, brew_context)
if action == "vinstall":
versioned_install(recipe_context, args.recipe, args.version)
elif action == "vuninstall":
brew_execute(["switch", args.recipe, args.version])
brew_execute(["uninstall", args.recipe])
elif action == "vdeps":
print_versioned_deps(recipe_context, args.recipe, args.version)
elif action == "env":
env_statements = build_env_statements_from_recipe_context(recipe_context)
print(env_statements)
elif action == "vinfo":
with brew_head_at_version(recipe_context, args.recipe, args.version):
print(brew_info(args.recipe))
else:
raise NotImplementedError()
class CommandLineException(Exception):
def __init__(self, command, stdout, stderr):
self.command = command
self.stdout = stdout
self.stderr = stderr
self.message = ("Failed to execute command-line %s, stderr was:\n"
"-------->>begin stderr<<--------\n"
"%s\n"
"-------->>end stderr<<--------\n"
"-------->>begin stdout<<--------\n"
"%s\n"
"-------->>end stdout<<--------\n"
) % (command, stderr, stdout)
def __str__(self):
return self.message
def versioned_install(recipe_context, package=None, version=None, installed_deps=[]):
if package is None:
package = recipe_context.recipe
version = recipe_context.version
attempt_unlink(package)
with brew_head_at_version(recipe_context, package, version):
deps = brew_deps(package)
deps_metadata = []
dep_to_version = {}
for dep in deps:
version_info = brew_versions_info(dep, recipe_context.tap_path)[0]
dep_version = version_info[0]
dep_to_version[dep] = dep_version
versioned = version_info[2]
if versioned:
dep_to_version[dep] = dep_version
if dep in installed_deps:
continue
versioned_install(recipe_context, dep, dep_version)
installed_deps.append(dep)
else:
# Install latest.
dep_to_version[dep] = None
if dep in installed_deps:
continue
unversioned_install(dep)
try:
for dep in deps:
dep_version = dep_to_version[dep]
if dep_version:
brew_execute(["switch", dep, dep_version])
else:
brew_execute(["link", dep])
# dep_version obtained from brew versions doesn't
# include revision. This linked_keg attribute does.
keg_verion = brew_info(dep)["linked_keg"]
dep_metadata = {
'name': dep,
'version': keg_verion,
'versioned': versioned
}
deps_metadata.append(dep_metadata)
cellar_root = recipe_context.brew_context.homebrew_cellar
cellar_path = recipe_context.cellar_path
env_actions = build_env_actions(deps_metadata, cellar_root, cellar_path, custom_only=True)
env = EnvAction.build_env(env_actions)
args = ["install"]
if VERBOSE:
args.append("--verbose")
args.extend(BREW_ARGS)
args.append(package)
brew_execute(args, env=env)
deps = brew_execute(["deps", package])
deps = [d.strip() for d in deps.split("\n") if d]
metadata = {
'deps': deps_metadata
}
cellar_root = recipe_context.brew_context.homebrew_cellar
cellar_path = recipe_cellar_path(cellar_root, package, version)
v_metadata_path = os.path.join(cellar_path, "INSTALL_RECEIPT_VERSIONED.json")
with open(v_metadata_path, "w") as f:
json.dump(metadata, f)
finally:
attempt_unlink_all(package, deps)
def commit_for_version(recipe_context, package, version):
tap_path = recipe_context.tap_path
commit = None
with brew_head_at_commit("master", tap_path):
version_to_commit = brew_versions_info(package, tap_path)
if version is None:
version = version_to_commit[0][0]
commit = version_to_commit[0][1]
else:
for mapping in version_to_commit:
if mapping[0] == version:
commit = mapping[1]
if commit is None:
raise Exception("Failed to find commit for version %s" % version)
return commit
def print_versioned_deps(recipe_context, recipe, version):
deps = load_versioned_deps(recipe_context.cellar_path)
for dep in deps:
val = dep['name']
if dep['versioned']:
val += "@%s" % dep['version']
print(val)
def load_versioned_deps(cellar_path, relaxed=None):
if relaxed is None:
relaxed = RELAXED
v_metadata_path = os.path.join(cellar_path, "INSTALL_RECEIPT_VERSIONED.json")
if not os.path.isfile(v_metadata_path):
if RELAXED:
return []
else:
raise IOError("Could not locate versioned receipt file: {}".format(v_metadata_path))
with open(v_metadata_path, "r") as f:
metadata = json.load(f)
return metadata['deps']
def unversioned_install(package):
try:
deps = brew_deps(package)
for dep in deps:
brew_execute(["link", dep])
brew_execute(["install", package])
finally:
attempt_unlink_all(package, deps)
def attempt_unlink_all(package, deps):
for dep in deps:
attempt_unlink(dep)
attempt_unlink(package)
def attempt_unlink(package):
try:
brew_execute(["unlink", package])
except Exception:
# TODO: warn
pass
def brew_execute(args, env=None):
os.environ["HOMEBREW_NO_EMOJI"] = "1" # simplify brew parsing.
cmds = ["brew"] + args
return execute(cmds, env=env)
def build_env_statements_from_recipe_context(recipe_context, **kwds):
cellar_root = recipe_context.brew_context.homebrew_cellar
env_statements = build_env_statements(cellar_root, recipe_context.cellar_path, **kwds)
return env_statements
def build_env_statements(cellar_root, cellar_path, relaxed=None, custom_only=False):
deps = load_versioned_deps(cellar_path, relaxed=relaxed)
actions = build_env_actions(deps, cellar_root, cellar_path, relaxed, custom_only)
env_statements = []
for action in actions:
env_statements.extend(action.to_statements())
return "\n".join(env_statements)
def build_env_actions(deps, cellar_root, cellar_path, relaxed=None, custom_only=False):
path_appends = []
ld_path_appends = []
actions = []
def handle_keg(cellar_path):
bin_path = os.path.join(cellar_path, "bin")
if os.path.isdir(bin_path):
path_appends.append(bin_path)
lib_path = os.path.join(cellar_path, "lib")
if os.path.isdir(lib_path):
ld_path_appends.append(lib_path)
env_path = os.path.join(cellar_path, "platform_environment.json")
if os.path.exists(env_path):
with open(env_path, "r") as f:
env_metadata = json.load(f)
if "actions" in env_metadata:
def to_action(desc):
return EnvAction(cellar_path, desc)
actions.extend(map(to_action, env_metadata["actions"]))
for dep in deps:
package = dep['name']
version = dep['version']
dep_cellar_path = recipe_cellar_path(cellar_root, package, version)
handle_keg(dep_cellar_path)
handle_keg(cellar_path)
if not custom_only:
if path_appends:
actions.append(EnvAction(cellar_path, {"action": "prepend", "variable": "PATH", "value": ":".join(path_appends)}))
if ld_path_appends:
actions.append(EnvAction(cellar_path, {"action": "prepend", "variable": "LD_LIBRARY_PATH", "value": ":".join(path_appends)}))
return actions
class EnvAction(object):
def __init__(self, keg_root, action_description):
self.variable = action_description["variable"]
self.action = action_description["action"]
self.value = string.Template(action_description["value"]).safe_substitute({
'KEG_ROOT': keg_root,
})
@staticmethod
def build_env(env_actions):
new_env = os.environ.copy()
map(lambda env_action: env_action.modify_environ(new_env), env_actions)
return new_env
def modify_environ(self, environ):
if self.action == "set" or not environ.get(self.variable, ""):
environ[self.variable] = self.__eval("${value}")
elif self.action == "prepend":
environ[self.variable] = self.__eval("${value}:%s" % environ[self.variable])
else:
environ[self.variable] = self.__eval("%s:${value}" % environ[self.variable])
def __eval(self, template):
return string.Template(template).safe_substitute(
variable=self.variable,
value=self.value,
)
def to_statements(self):
if self.action == "set":
template = '''${variable}="${value}"'''
elif self.action == "prepend":
template = '''${variable}="${value}:$$${variable}"'''
else:
template = '''${variable}="$$${variable}:${value}"'''
return [
self.__eval(template),
"export %s" % self.variable
]
@contextlib.contextmanager
def brew_head_at_version(recipe_context, package, version):
commit = commit_for_version(recipe_context, package, version)
tap_path = recipe_context.tap_path
with brew_head_at_commit(commit, tap_path):
yield
@contextlib.contextmanager
def brew_head_at_commit(commit, tap_path):
try:
os.chdir(tap_path)
current_commit = git_execute(["rev-parse", "HEAD"]).strip()
try:
git_execute(["checkout", commit])
yield
finally:
git_execute(["checkout", current_commit])
finally:
# TODO: restore chdir - or better yet just don't chdir
# shouldn't be needed.
pass
def git_execute(args):
cmds = ["git"] + args
return execute(cmds)
def execute(cmds, env=None):
subprocess_kwds = dict(
shell=False,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
if env:
subprocess_kwds["env"] = env
p = subprocess.Popen(cmds, **subprocess_kwds)
# log = p.stdout.read()
global VERBOSE
stdout, stderr = p.communicate()
if p.returncode != 0:
raise CommandLineException(" ".join(cmds), stdout, stderr)
if VERBOSE:
print(stdout)
return stdout
def brew_deps(package):
args = ["deps"]
args.extend(BREW_ARGS)
args.append(package)
stdout = brew_execute(args)
return [p.strip() for p in stdout.split("\n") if p]
def brew_info(recipe):
info_json = brew_execute(["info", "--json=v1", recipe])
info = json.loads(info_json)[0]
info.update(extended_brew_info(recipe))
return info
def extended_brew_info(recipe):
# Extract more info from non-json variant. JSON variant should
# include this in a backward compatible way (TODO: Open PR).
raw_info = brew_execute(["info", recipe])
extra_info = dict(
from_url=None,
build_dependencies=[],
required_dependencies=[],
recommended_dependencies=[],
optional_dependencies=[],
)
for line in raw_info.split("\n"):
if line.startswith("From: "):
extra_info["from_url"] = line[len("From: "):].strip()
for dep_type in ["Build", "Required", "Recommended", "Optional"]:
if line.startswith("%s: " % dep_type):
key = "%s_dependencies" % dep_type.lower()
raw_val = line[len("%s: " % dep_type):]
extra_info[key].extend(raw_val.split(", "))
return extra_info
def brew_versions_info(package, tap_path):
def versioned(recipe_path):
if not os.path.isabs(recipe_path):
recipe_path = os.path.join(os.getcwd(), recipe_path)
# Dependencies in the same | |
X.509 cert"
x509Cert = X509().parse(open(os.path.join(dir, "serverX509Cert.pem")).read())
x509Chain = X509CertChain([x509Cert])
s = open(os.path.join(dir, "serverX509Key.pem")).read()
x509Key = parsePEMKey(s, private=True)
connection = connect()
connection.handshakeServer(verifierDB=verifierDB, \
certChain=x509Chain, privateKey=x509Key)
connection.close()
connection.sock.close()
print "Test 7 - X.509 with SRP faults"
for fault in Fault.clientSrpFaults + Fault.genericFaults:
connection = connect()
connection.fault = fault
try:
connection.handshakeServer(verifierDB=verifierDB, \
certChain=x509Chain, privateKey=x509Key)
assert()
except:
pass
connection.sock.close()
if cryptoIDlibLoaded:
print "Test 8 - good SRP: with cryptoID certs"
cryptoIDChain = CertChain().parse(open(os.path.join(dir, "serverCryptoIDChain.xml"), "r").read())
cryptoIDKey = parseXMLKey(open(os.path.join(dir, "serverCryptoIDKey.xml"), "r").read(), private=True)
connection = connect()
connection.handshakeServer(verifierDB=verifierDB, \
certChain=cryptoIDChain, privateKey=cryptoIDKey)
connection.close()
connection.sock.close()
print "Test 9 - cryptoID with SRP faults"
for fault in Fault.clientSrpFaults + Fault.genericFaults:
connection = connect()
connection.fault = fault
try:
connection.handshakeServer(verifierDB=verifierDB, \
certChain=cryptoIDChain, privateKey=cryptoIDKey)
assert()
except:
pass
connection.sock.close()
print "Test 10 - good X.509"
connection = connect()
connection.handshakeServer(certChain=x509Chain, privateKey=x509Key)
connection.close()
connection.sock.close()
print "Test 10.a - good X.509, SSL v3"
connection = connect()
settings = HandshakeSettings()
settings.minVersion = (3,0)
settings.maxVersion = (3,0)
connection.handshakeServer(certChain=x509Chain, privateKey=x509Key, settings=settings)
connection.close()
connection.sock.close()
print "Test 11 - X.509 faults"
for fault in Fault.clientNoAuthFaults + Fault.genericFaults:
connection = connect()
connection.fault = fault
try:
connection.handshakeServer(certChain=x509Chain, privateKey=x509Key)
assert()
except:
pass
connection.sock.close()
if cryptoIDlibLoaded:
print "Test 12 - good cryptoID"
connection = connect()
connection.handshakeServer(certChain=cryptoIDChain, privateKey=cryptoIDKey)
connection.close()
connection.sock.close()
print "Test 13 - cryptoID faults"
for fault in Fault.clientNoAuthFaults + Fault.genericFaults:
connection = connect()
connection.fault = fault
try:
connection.handshakeServer(certChain=cryptoIDChain, privateKey=cryptoIDKey)
assert()
except:
pass
connection.sock.close()
print "Test 14 - good mutual X.509"
connection = connect()
connection.handshakeServer(certChain=x509Chain, privateKey=x509Key, reqCert=True)
assert(isinstance(connection.session.serverCertChain, X509CertChain))
connection.close()
connection.sock.close()
print "Test 14a - good mutual X.509, SSLv3"
connection = connect()
settings = HandshakeSettings()
settings.minVersion = (3,0)
settings.maxVersion = (3,0)
connection.handshakeServer(certChain=x509Chain, privateKey=x509Key, reqCert=True, settings=settings)
assert(isinstance(connection.session.serverCertChain, X509CertChain))
connection.close()
connection.sock.close()
print "Test 15 - mutual X.509 faults"
for fault in Fault.clientCertFaults + Fault.genericFaults:
connection = connect()
connection.fault = fault
try:
connection.handshakeServer(certChain=x509Chain, privateKey=x509Key, reqCert=True)
assert()
except:
pass
connection.sock.close()
if cryptoIDlibLoaded:
print "Test 16 - good mutual cryptoID"
connection = connect()
connection.handshakeServer(certChain=cryptoIDChain, privateKey=cryptoIDKey, reqCert=True)
assert(isinstance(connection.session.serverCertChain, CertChain))
assert(connection.session.serverCertChain.validate())
connection.close()
connection.sock.close()
print "Test 17 - mutual cryptoID faults"
for fault in Fault.clientCertFaults + Fault.genericFaults:
connection = connect()
connection.fault = fault
try:
connection.handshakeServer(certChain=cryptoIDChain, privateKey=cryptoIDKey, reqCert=True)
assert()
except:
pass
connection.sock.close()
print "Test 18 - good SRP, prepare to resume"
sessionCache = SessionCache()
connection = connect()
connection.handshakeServer(verifierDB=verifierDB, sessionCache=sessionCache)
connection.close()
connection.sock.close()
print "Test 19 - resumption"
connection = connect()
connection.handshakeServer(verifierDB=verifierDB, sessionCache=sessionCache)
#Don't close! -- see next test
print "Test 20 - invalidated resumption"
try:
connection.read(min=1, max=1)
assert() #Client is going to close the socket without a close_notify
except TLSAbruptCloseError, e:
pass
connection = connect()
try:
connection.handshakeServer(verifierDB=verifierDB, sessionCache=sessionCache)
except TLSLocalAlert, alert:
if alert.description != AlertDescription.bad_record_mac:
raise
connection.sock.close()
print "Test 21 - HTTPS test X.509"
#Close the current listening socket
lsock.close()
#Create and run an HTTP Server using TLSSocketServerMixIn
class MyHTTPServer(TLSSocketServerMixIn,
BaseHTTPServer.HTTPServer):
def handshake(self, tlsConnection):
tlsConnection.handshakeServer(certChain=x509Chain, privateKey=x509Key)
return True
cd = os.getcwd()
os.chdir(dir)
address = address[0], address[1]+1
httpd = MyHTTPServer(address, SimpleHTTPServer.SimpleHTTPRequestHandler)
for x in range(6):
httpd.handle_request()
httpd.server_close()
cd = os.chdir(cd)
if cryptoIDlibLoaded:
print "Test 21a - HTTPS test SRP+cryptoID"
#Create and run an HTTP Server using TLSSocketServerMixIn
class MyHTTPServer(TLSSocketServerMixIn,
BaseHTTPServer.HTTPServer):
def handshake(self, tlsConnection):
tlsConnection.handshakeServer(certChain=cryptoIDChain, privateKey=cryptoIDKey,
verifierDB=verifierDB)
return True
cd = os.getcwd()
os.chdir(dir)
address = address[0], address[1]+1
httpd = MyHTTPServer(address, SimpleHTTPServer.SimpleHTTPRequestHandler)
for x in range(6):
httpd.handle_request()
httpd.server_close()
cd = os.chdir(cd)
#Re-connect the listening socket
lsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
address = address[0], address[1]+1
lsock.bind(address)
lsock.listen(5)
def connect():
return TLSConnection(lsock.accept()[0])
implementations = []
if cryptlibpyLoaded:
implementations.append("cryptlib")
if m2cryptoLoaded:
implementations.append("openssl")
if pycryptoLoaded:
implementations.append("pycrypto")
implementations.append("python")
print "Test 22 - different ciphers"
for implementation in ["python"] * len(implementations):
for cipher in ["aes128", "aes256", "rc4"]:
print "Test 22:",
connection = connect()
settings = HandshakeSettings()
settings.cipherNames = [cipher]
settings.cipherImplementations = [implementation, "python"]
connection.handshakeServer(sharedKeyDB=sharedKeyDB, settings=settings)
print connection.getCipherName(), connection.getCipherImplementation()
h = connection.read(min=5, max=5)
assert(h == "hello")
connection.write(h)
connection.close()
connection.sock.close()
print "Test 23 - throughput test"
for implementation in implementations:
for cipher in ["aes128", "aes256", "3des", "rc4"]:
if cipher == "3des" and implementation not in ("openssl", "cryptlib", "pycrypto"):
continue
print "Test 23:",
connection = connect()
settings = HandshakeSettings()
settings.cipherNames = [cipher]
settings.cipherImplementations = [implementation, "python"]
connection.handshakeServer(sharedKeyDB=sharedKeyDB, settings=settings)
print connection.getCipherName(), connection.getCipherImplementation()
h = connection.read(min=50000, max=50000)
assert(h == "hello"*10000)
connection.write(h)
connection.close()
connection.sock.close()
print "Test succeeded"
if len(sys.argv) == 1 or (len(sys.argv)==2 and sys.argv[1].lower().endswith("help")):
print ""
print "Version: 0.3.8"
print ""
print "RNG: %s" % prngName
print ""
print "Modules:"
if cryptlibpyLoaded:
print " cryptlib_py : Loaded"
else:
print " cryptlib_py : Not Loaded"
if m2cryptoLoaded:
print " M2Crypto : Loaded"
else:
print " M2Crypto : Not Loaded"
if pycryptoLoaded:
print " pycrypto : Loaded"
else:
print " pycrypto : Not Loaded"
if gmpyLoaded:
print " GMPY : Loaded"
else:
print " GMPY : Not Loaded"
if cryptoIDlibLoaded:
print " cryptoIDlib : Loaded"
else:
print " cryptoIDlib : Not Loaded"
print ""
print "Commands:"
print ""
print " clientcert <server> [<chain> <key>]"
print " clientsharedkey <server> <user> <pass>"
print " clientsrp <server> <user> <pass>"
print " clienttest <server> <dir>"
print ""
print " serversrp <server> <verifierDB>"
print " servercert <server> <chain> <key> [req]"
print " serversrpcert <server> <verifierDB> <chain> <key>"
print " serversharedkey <server> <sharedkeyDB>"
print " servertest <server> <dir>"
sys.exit()
cmd = sys.argv[1].lower()
class Args:
def __init__(self, argv):
self.argv = argv
def get(self, index):
if len(self.argv)<=index:
raise SyntaxError("Not enough arguments")
return self.argv[index]
def getLast(self, index):
if len(self.argv)>index+1:
raise SyntaxError("Too many arguments")
return self.get(index)
args = Args(sys.argv)
def reformatDocString(s):
lines = s.splitlines()
newLines = []
for line in lines:
newLines.append(" " + line.strip())
return "\n".join(newLines)
try:
if cmd == "clienttest":
address = args.get(2)
dir = args.getLast(3)
clientTest(address, dir)
sys.exit()
elif cmd.startswith("client"):
address = args.get(2)
#Split address into hostname/port tuple
address = address.split(":")
if len(address)==1:
address.append("4443")
address = ( address[0], int(address[1]) )
def connect():
#Connect to server
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if hasattr(sock, "settimeout"):
sock.settimeout(5)
sock.connect(address)
#Instantiate TLSConnections
return TLSConnection(sock)
try:
if cmd == "clientsrp":
username = args.get(3)
password = args.getLast(4)
connection = connect()
start = time.clock()
connection.handshakeClientSRP(username, password)
elif cmd == "clientsharedkey":
username = args.get(3)
password = args.getLast(4)
connection = connect()
start = time.clock()
connection.handshakeClientSharedKey(username, password)
elif cmd == "clientcert":
certChain = None
privateKey = None
if len(sys.argv) > 3:
certFilename = args.get(3)
keyFilename = args.getLast(4)
s1 = open(certFilename, "rb").read()
s2 = open(keyFilename, "rb").read()
#Try to create cryptoID cert chain
if cryptoIDlibLoaded:
try:
certChain = CertChain().parse(s1)
privateKey = parsePrivateKey(s2)
except:
certChain = None
privateKey = None
#Try to create X.509 cert chain
if not certChain:
x509 = X509()
x509.parse(s1)
certChain = X509CertChain([x509])
privateKey = parsePrivateKey(s2)
connection = connect()
start = time.clock()
connection.handshakeClientCert(certChain, privateKey)
else:
raise SyntaxError("Unknown command")
except TLSLocalAlert, a:
if a.description == AlertDescription.bad_record_mac:
if cmd == "clientsharedkey":
print "Bad sharedkey password"
else:
raise
elif a.description == AlertDescription.user_canceled:
print str(a)
else:
raise
sys.exit()
except TLSRemoteAlert, a:
if a.description == AlertDescription.unknown_srp_username:
if cmd == "clientsrp":
print "Unknown username"
else:
raise
elif a.description == AlertDescription.bad_record_mac:
if cmd == "clientsrp":
print "Bad username or password"
else:
raise
elif a.description == AlertDescription.handshake_failure:
print "Unable to negotiate mutually acceptable parameters"
else:
raise
sys.exit()
stop = time.clock()
print "Handshake success"
print " Handshake time: %.4f seconds" % (stop - start)
print " Version: %s.%s" % connection.version
print " Cipher: %s %s" % (connection.getCipherName(), connection.getCipherImplementation())
if connection.session.srpUsername:
print " Client SRP username: %s" % connection.session.srpUsername
if connection.session.sharedKeyUsername:
print " Client shared key username: %s" % connection.session.sharedKeyUsername
if connection.session.clientCertChain:
print " Client fingerprint: %s" % connection.session.clientCertChain.getFingerprint()
if connection.session.serverCertChain:
print " Server fingerprint: %s" % connection.session.serverCertChain.getFingerprint()
connection.close()
connection.sock.close()
elif cmd.startswith("server"):
address = args.get(2)
#Split address into hostname/port tuple
address = address.split(":")
if len(address)==1:
address.append("4443")
address = ( address[0], int(address[1]) )
verifierDBFilename = None
sharedKeyDBFilename = None
certFilename = None
keyFilename = None
sharedKeyDB = None
reqCert = False
if cmd == "serversrp":
verifierDBFilename = args.getLast(3)
elif cmd == "servercert":
certFilename = args.get(3)
keyFilename = args.get(4)
if len(sys.argv)>=6:
req = args.getLast(5)
if req.lower() != "req":
raise SyntaxError()
reqCert = True
elif cmd == "serversrpcert":
verifierDBFilename = args.get(3)
certFilename = args.get(4)
keyFilename = args.getLast(5)
elif cmd == "serversharedkey":
sharedKeyDBFilename = args.getLast(3)
elif cmd == "servertest":
address = args.get(2)
dir = args.getLast(3)
serverTest(address, dir)
sys.exit()
verifierDB = None
if | |
properties=dict(
addresses=relationship(
Address, cascade="all,delete-orphan", backref="user"
),
orders=relationship(Order, cascade="all, delete-orphan"),
),
)
s = Session()
# the standalone Address goes in, its foreign key
# allows NULL
a = Address()
s.add(a)
s.commit()
# the standalone Order does not.
o = Order()
s.add(o)
assert_raises(sa_exc.DBAPIError, s.commit)
s.rollback()
# can assign o.user_id by foreign key,
# flush succeeds
u = User()
s.add(u)
s.flush()
o = Order(user_id=u.user_id)
s.add(o)
s.commit()
assert o in s and o not in s.new
def test_pending_collection_expunge(self):
"""Removing a pending item from a collection expunges it from
the session."""
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
mapper(Address, addresses)
mapper(
User,
users,
properties=dict(
addresses=relationship(
Address, cascade="all,delete-orphan", backref="user"
)
),
)
s = Session()
u = User()
s.add(u)
s.flush()
a = Address()
u.addresses.append(a)
assert a in s
u.addresses.remove(a)
assert a not in s
s.delete(u)
s.flush()
assert a.address_id is None, "Error: address should not be persistent"
def test_nonorphans_ok(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
mapper(Address, addresses)
mapper(
User,
users,
properties=dict(
addresses=relationship(
Address, cascade="all,delete", backref="user"
)
),
)
s = Session()
u = User(name="u1", addresses=[Address(email_address="ad1")])
s.add(u)
a1 = u.addresses[0]
u.addresses.remove(a1)
assert a1 in s
s.flush()
s.expunge_all()
eq_(s.query(Address).all(), [Address(email_address="ad1")])
class PendingOrphanTestTwoLevel(fixtures.MappedTest):
"""test usages stated at
http://article.gmane.org/gmane.comp.python.sqlalchemy.user/3085
http://article.gmane.org/gmane.comp.python.sqlalchemy.user/3119
"""
@classmethod
def define_tables(cls, metadata):
Table(
"order",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
)
Table(
"item",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column(
"order_id", Integer, ForeignKey("order.id"), nullable=False
),
)
Table(
"attribute",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("item_id", Integer, ForeignKey("item.id"), nullable=False),
)
@classmethod
def setup_classes(cls):
class Order(cls.Comparable):
pass
class Item(cls.Comparable):
pass
class Attribute(cls.Comparable):
pass
def test_singlelevel_remove(self):
item, Order, order, Item = (
self.tables.item,
self.classes.Order,
self.tables.order,
self.classes.Item,
)
mapper(
Order,
order,
properties={
"items": relationship(Item, cascade="all, delete-orphan")
},
)
mapper(Item, item)
s = Session()
o1 = Order()
s.add(o1)
i1 = Item()
o1.items.append(i1)
o1.items.remove(i1)
s.commit()
assert i1 not in o1.items
def test_multilevel_remove(self):
Item, Attribute, order, item, attribute, Order = (
self.classes.Item,
self.classes.Attribute,
self.tables.order,
self.tables.item,
self.tables.attribute,
self.classes.Order,
)
mapper(
Order,
order,
properties={
"items": relationship(Item, cascade="all, delete-orphan")
},
)
mapper(
Item,
item,
properties={
"attributes": relationship(
Attribute, cascade="all, delete-orphan"
)
},
)
mapper(Attribute, attribute)
s = Session()
o1 = Order()
s.add(o1)
i1 = Item()
a1 = Attribute()
i1.attributes.append(a1)
o1.items.append(i1)
assert i1 in s
assert a1 in s
# i1 is an orphan so the operation
# removes 'i1'. The operation
# cascades down to 'a1'.
o1.items.remove(i1)
assert i1 not in s
assert a1 not in s
s.commit()
assert o1 in s
assert a1 not in s
assert i1 not in s
assert a1 not in o1.items
class DoubleParentO2MOrphanTest(fixtures.MappedTest):
"""Test orphan behavior on an entity that requires
two parents via many-to-one (one-to-many collection.).
"""
@classmethod
def define_tables(cls, meta):
Table(
"sales_reps",
meta,
Column(
"sales_rep_id",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("name", String(50)),
)
Table(
"accounts",
meta,
Column(
"account_id",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("balance", Integer),
)
Table(
"customers",
meta,
Column(
"customer_id",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("name", String(50)),
Column(
"sales_rep_id", Integer, ForeignKey("sales_reps.sales_rep_id")
),
Column("account_id", Integer, ForeignKey("accounts.account_id")),
)
def _fixture(self, legacy_is_orphan, uselist):
sales_reps, customers, accounts = (
self.tables.sales_reps,
self.tables.customers,
self.tables.accounts,
)
class Customer(fixtures.ComparableEntity):
pass
class Account(fixtures.ComparableEntity):
pass
class SalesRep(fixtures.ComparableEntity):
pass
mapper(Customer, customers, legacy_is_orphan=legacy_is_orphan)
mapper(
Account,
accounts,
properties=dict(
customers=relationship(
Customer,
cascade="all,delete-orphan",
backref="account",
uselist=uselist,
)
),
)
mapper(
SalesRep,
sales_reps,
properties=dict(
customers=relationship(
Customer,
cascade="all,delete-orphan",
backref="sales_rep",
uselist=uselist,
)
),
)
s = Session(expire_on_commit=False, autoflush=False)
a = Account(balance=0)
sr = SalesRep(name="John")
s.add_all((a, sr))
s.commit()
c = Customer(name="Jane")
if uselist:
a.customers.append(c)
sr.customers.append(c)
else:
a.customers = c
sr.customers = c
assert c in s
return s, c, a, sr
def test_double_parent_expunge_o2m_legacy(self):
"""test the delete-orphan uow event for multiple delete-orphan
parent relationships."""
s, c, a, sr = self._fixture(True, True)
a.customers.remove(c)
assert c in s, "Should not expunge customer yet, still has one parent"
sr.customers.remove(c)
assert c not in s, "Should expunge customer when both parents are gone"
def test_double_parent_expunge_o2m_current(self):
"""test the delete-orphan uow event for multiple delete-orphan
parent relationships."""
s, c, a, sr = self._fixture(False, True)
a.customers.remove(c)
assert c not in s, "Should expunge customer when either parent is gone"
sr.customers.remove(c)
assert c not in s, "Should expunge customer when both parents are gone"
def test_double_parent_expunge_o2o_legacy(self):
"""test the delete-orphan uow event for multiple delete-orphan
parent relationships."""
s, c, a, sr = self._fixture(True, False)
a.customers = None
assert c in s, "Should not expunge customer yet, still has one parent"
sr.customers = None
assert c not in s, "Should expunge customer when both parents are gone"
def test_double_parent_expunge_o2o_current(self):
"""test the delete-orphan uow event for multiple delete-orphan
parent relationships."""
s, c, a, sr = self._fixture(False, False)
a.customers = None
assert c not in s, "Should expunge customer when either parent is gone"
sr.customers = None
assert c not in s, "Should expunge customer when both parents are gone"
class DoubleParentM2OOrphanTest(fixtures.MappedTest):
"""Test orphan behavior on an entity that requires
two parents via one-to-many (many-to-one reference to the orphan).
"""
@classmethod
def define_tables(cls, metadata):
Table(
"addresses",
metadata,
Column(
"address_id",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("street", String(30)),
)
Table(
"homes",
metadata,
Column(
"home_id",
Integer,
primary_key=True,
key="id",
test_needs_autoincrement=True,
),
Column("description", String(30)),
Column(
"address_id",
Integer,
ForeignKey("addresses.address_id"),
nullable=False,
),
)
Table(
"businesses",
metadata,
Column(
"business_id",
Integer,
primary_key=True,
key="id",
test_needs_autoincrement=True,
),
Column("description", String(30), key="description"),
Column(
"address_id",
Integer,
ForeignKey("addresses.address_id"),
nullable=False,
),
)
def test_non_orphan(self):
"""test that an entity can have two parent delete-orphan
cascades, and persists normally."""
homes, businesses, addresses = (
self.tables.homes,
self.tables.businesses,
self.tables.addresses,
)
class Address(fixtures.ComparableEntity):
pass
class Home(fixtures.ComparableEntity):
pass
class Business(fixtures.ComparableEntity):
pass
mapper(Address, addresses)
mapper(
Home,
homes,
properties={
"address": relationship(
Address, cascade="all,delete-orphan", single_parent=True
)
},
)
mapper(
Business,
businesses,
properties={
"address": relationship(
Address, cascade="all,delete-orphan", single_parent=True
)
},
)
session = Session()
h1 = Home(description="home1", address=Address(street="address1"))
b1 = Business(
description="business1", address=Address(street="address2")
)
session.add_all((h1, b1))
session.flush()
session.expunge_all()
eq_(
session.get(Home, h1.id),
Home(description="home1", address=Address(street="address1")),
)
eq_(
session.get(Business, b1.id),
Business(
description="business1", address=Address(street="address2")
),
)
def test_orphan(self):
"""test that an entity can have two parent delete-orphan
cascades, and is detected as an orphan when saved without a
parent."""
homes, businesses, addresses = (
self.tables.homes,
self.tables.businesses,
self.tables.addresses,
)
class Address(fixtures.ComparableEntity):
pass
class Home(fixtures.ComparableEntity):
pass
class Business(fixtures.ComparableEntity):
pass
mapper(Address, addresses)
mapper(
Home,
homes,
properties={
"address": relationship(
Address, cascade="all,delete-orphan", single_parent=True
)
},
)
mapper(
Business,
businesses,
properties={
"address": relationship(
Address, cascade="all,delete-orphan", single_parent=True
)
},
)
session = Session()
a1 = Address()
session.add(a1)
session.flush()
class CollectionAssignmentOrphanTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table(
"table_a",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("name", String(30)),
)
Table(
"table_b",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("name", String(30)),
Column("a_id", Integer, ForeignKey("table_a.id")),
)
def test_basic(self):
table_b, table_a = self.tables.table_b, self.tables.table_a
class A(fixtures.ComparableEntity):
pass
class B(fixtures.ComparableEntity):
pass
mapper(
A,
table_a,
properties={"bs": relationship(B, cascade="all, delete-orphan")},
)
mapper(B, table_b)
a1 = A(name="a1", bs=[B(name="b1"), B(name="b2"), B(name="b3")])
sess = Session()
sess.add(a1)
sess.flush()
sess.expunge_all()
eq_(
sess.get(A, a1.id),
A(name="a1", bs=[B(name="b1"), B(name="b2"), B(name="b3")]),
)
a1 = sess.get(A, a1.id)
assert not class_mapper(B)._is_orphan(
attributes.instance_state(a1.bs[0])
)
a1.bs[0].foo = "b2modified"
a1.bs[1].foo = "b3modified"
sess.flush()
sess.expunge_all()
eq_(
sess.get(A, a1.id),
A(name="a1", bs=[B(name="b1"), B(name="b2"), B(name="b3")]),
)
class OrphanCriterionTest(fixtures.MappedTest):
@classmethod
def define_tables(self, metadata):
Table(
"core",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("related_one_id", Integer, ForeignKey("related_one.id")),
Column("related_two_id", Integer, ForeignKey("related_two.id")),
)
Table(
"related_one",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
)
Table(
"related_two",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
)
def _fixture(
self,
legacy_is_orphan,
persistent,
r1_present,
r2_present,
detach_event=True,
):
class Core(object):
pass
class RelatedOne(object):
def __init__(self, cores):
self.cores = cores
class RelatedTwo(object):
def __init__(self, cores):
self.cores = cores
mapper(Core, self.tables.core, legacy_is_orphan=legacy_is_orphan)
mapper(
RelatedOne,
self.tables.related_one,
properties={
"cores": relationship(
Core, cascade="all, delete-orphan", backref="r1"
)
},
)
mapper(
RelatedTwo,
self.tables.related_two,
properties={
"cores": relationship(
Core, cascade="all, delete-orphan", backref="r2"
)
},
)
c1 = Core()
if detach_event:
RelatedOne(cores=[c1])
RelatedTwo(cores=[c1])
else:
if r1_present:
RelatedOne(cores=[c1])
if r2_present:
RelatedTwo(cores=[c1])
if persistent:
s = Session()
s.add(c1)
s.flush()
if detach_event:
if not r1_present:
c1.r1 = None
if not r2_present:
c1.r2 = None
return c1
def _assert_not_orphan(self, c1):
mapper = object_mapper(c1)
state = instance_state(c1)
assert not mapper._is_orphan(state)
def _assert_is_orphan(self, c1):
mapper = object_mapper(c1)
state = instance_state(c1)
assert mapper._is_orphan(state)
def test_leg_pers_r1_r2(self):
c1 = self._fixture(True, True, True, True)
self._assert_not_orphan(c1)
def test_current_pers_r1_r2(self):
c1 = self._fixture(False, True, True, True)
self._assert_not_orphan(c1)
def test_leg_pers_r1_notr2(self):
c1 = self._fixture(True, True, True, False)
self._assert_not_orphan(c1)
| |
<gh_stars>0
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
depends_on = [
("core", "0001_initial"),
("environments", "0001_initial"),
("library", "0001_initial"),
]
def forwards(self, orm):
# Adding model 'Run'
db.create_table('execution_run', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created_on', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime(2012, 1, 24, 0, 50, 42, 282739))),
('created_by', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='+', null=True, to=orm['auth.User'])),
('modified_on', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime(2012, 1, 24, 0, 50, 42, 282921))),
('modified_by', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='+', null=True, to=orm['auth.User'])),
('deleted_on', self.gf('django.db.models.fields.DateTimeField')(db_index=True, null=True, blank=True)),
('deleted_by', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='+', null=True, to=orm['auth.User'])),
('has_team', self.gf('django.db.models.fields.BooleanField')(default=False)),
('status', self.gf('django.db.models.fields.CharField')(default='draft', max_length=30, db_index=True)),
('productversion', self.gf('django.db.models.fields.related.ForeignKey')(related_name='runs', to=orm['core.ProductVersion'])),
('name', self.gf('django.db.models.fields.CharField')(max_length=200)),
('description', self.gf('django.db.models.fields.TextField')(blank=True)),
('start', self.gf('django.db.models.fields.DateField')(default=datetime.date.today)),
('end', self.gf('django.db.models.fields.DateField')(null=True, blank=True)),
))
db.send_create_signal('execution', ['Run'])
# Adding M2M table for field own_team on 'Run'
db.create_table('execution_run_own_team', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('run', models.ForeignKey(orm['execution.run'], null=False)),
('user', models.ForeignKey(orm['auth.user'], null=False))
))
db.create_unique('execution_run_own_team', ['run_id', 'user_id'])
# Adding M2M table for field environments on 'Run'
db.create_table('execution_run_environments', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('run', models.ForeignKey(orm['execution.run'], null=False)),
('environment', models.ForeignKey(orm['environments.environment'], null=False))
))
db.create_unique('execution_run_environments', ['run_id', 'environment_id'])
# Adding model 'RunCaseVersion'
db.create_table('execution_runcaseversion', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created_on', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime(2012, 1, 24, 0, 50, 42, 275226))),
('created_by', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='+', null=True, to=orm['auth.User'])),
('modified_on', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime(2012, 1, 24, 0, 50, 42, 275406))),
('modified_by', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='+', null=True, to=orm['auth.User'])),
('deleted_on', self.gf('django.db.models.fields.DateTimeField')(db_index=True, null=True, blank=True)),
('deleted_by', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='+', null=True, to=orm['auth.User'])),
('run', self.gf('django.db.models.fields.related.ForeignKey')(related_name='runcaseversions', to=orm['execution.Run'])),
('caseversion', self.gf('django.db.models.fields.related.ForeignKey')(related_name='runcaseversions', to=orm['library.CaseVersion'])),
('order', self.gf('django.db.models.fields.IntegerField')(default=0, db_index=True)),
))
db.send_create_signal('execution', ['RunCaseVersion'])
# Adding M2M table for field environments on 'RunCaseVersion'
db.create_table('execution_runcaseversion_environments', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('runcaseversion', models.ForeignKey(orm['execution.runcaseversion'], null=False)),
('environment', models.ForeignKey(orm['environments.environment'], null=False))
))
db.create_unique('execution_runcaseversion_environments', ['runcaseversion_id', 'environment_id'])
# Adding model 'RunSuite'
db.create_table('execution_runsuite', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created_on', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime(2012, 1, 24, 0, 50, 42, 285466))),
('created_by', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='+', null=True, to=orm['auth.User'])),
('modified_on', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime(2012, 1, 24, 0, 50, 42, 285646))),
('modified_by', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='+', null=True, to=orm['auth.User'])),
('deleted_on', self.gf('django.db.models.fields.DateTimeField')(db_index=True, null=True, blank=True)),
('deleted_by', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='+', null=True, to=orm['auth.User'])),
('run', self.gf('django.db.models.fields.related.ForeignKey')(related_name='runsuites', to=orm['execution.Run'])),
('suite', self.gf('django.db.models.fields.related.ForeignKey')(related_name='runsuites', to=orm['library.Suite'])),
('order', self.gf('django.db.models.fields.IntegerField')(default=0, db_index=True)),
))
db.send_create_signal('execution', ['RunSuite'])
# Adding model 'Result'
db.create_table('execution_result', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created_on', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime(2012, 1, 24, 0, 50, 42, 286632))),
('created_by', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='+', null=True, to=orm['auth.User'])),
('modified_on', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime(2012, 1, 24, 0, 50, 42, 286903))),
('modified_by', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='+', null=True, to=orm['auth.User'])),
('deleted_on', self.gf('django.db.models.fields.DateTimeField')(db_index=True, null=True, blank=True)),
('deleted_by', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='+', null=True, to=orm['auth.User'])),
('tester', self.gf('django.db.models.fields.related.ForeignKey')(related_name='results', to=orm['auth.User'])),
('runcaseversion', self.gf('django.db.models.fields.related.ForeignKey')(related_name='results', to=orm['execution.RunCaseVersion'])),
('environment', self.gf('django.db.models.fields.related.ForeignKey')(related_name='results', to=orm['environments.Environment'])),
('status', self.gf('django.db.models.fields.CharField')(default='assigned', max_length=50, db_index=True)),
('started', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime(2012, 1, 24, 0, 50, 42, 287631))),
('completed', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('comment', self.gf('django.db.models.fields.TextField')(blank=True)),
('review', self.gf('django.db.models.fields.CharField')(default='pending', max_length=50, db_index=True)),
('reviewed_on', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('reviewed_by', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='reviews', null=True, to=orm['auth.User'])),
))
db.send_create_signal('execution', ['Result'])
# Adding model 'StepResult'
db.create_table('execution_stepresult', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created_on', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime(2012, 1, 24, 0, 50, 42, 274151))),
('created_by', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='+', null=True, to=orm['auth.User'])),
('modified_on', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime(2012, 1, 24, 0, 50, 42, 274353))),
('modified_by', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='+', null=True, to=orm['auth.User'])),
('deleted_on', self.gf('django.db.models.fields.DateTimeField')(db_index=True, null=True, blank=True)),
('deleted_by', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='+', null=True, to=orm['auth.User'])),
('result', self.gf('django.db.models.fields.related.ForeignKey')(related_name='stepresults', to=orm['execution.Result'])),
('step', self.gf('django.db.models.fields.related.ForeignKey')(related_name='stepresults', to=orm['library.CaseStep'])),
('status', self.gf('django.db.models.fields.CharField')(default='passed', max_length=50, db_index=True)),
('bug_url', self.gf('django.db.models.fields.URLField')(max_length=200, blank=True)),
))
db.send_create_signal('execution', ['StepResult'])
def backwards(self, orm):
# Deleting model 'Run'
db.delete_table('execution_run')
# Removing M2M table for field own_team on 'Run'
db.delete_table('execution_run_own_team')
# Removing M2M table for field environments on 'Run'
db.delete_table('execution_run_environments')
# Deleting model 'RunCaseVersion'
db.delete_table('execution_runcaseversion')
# Removing M2M table for field environments on 'RunCaseVersion'
db.delete_table('execution_runcaseversion_environments')
# Deleting model 'RunSuite'
db.delete_table('execution_runsuite')
# Deleting model 'Result'
db.delete_table('execution_result')
# Deleting model 'StepResult'
db.delete_table('execution_stepresult')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('<PASSWORD>', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'core.product': {
'Meta': {'object_name': 'Product'},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 1, 24, 0, 50, 42, 315433)'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'deleted_on': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'has_team': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'modified_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 1, 24, 0, 50, 42, 315634)'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'own_team': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'symmetrical': 'False', 'blank': 'True'})
},
'core.productversion': {
'Meta': {'ordering': "['order']", 'unique_together': "[('product', 'version')]", 'object_name': 'ProductVersion'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 1, 24, 0, 50, 42, 309053)'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'deleted_on': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'environments': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'productversion'", 'symmetrical': 'False', 'to': "orm['environments.Environment']"}),
'has_team': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latest': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'modified_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 1, 24, 0, 50, 42, 309376)'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'own_team': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'symmetrical': 'False', 'blank': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'versions'", 'to': "orm['core.Product']"}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'environments.category': {
'Meta': {'object_name': 'Category'},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 1, 24, 0, 50, 42, 306238)'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'deleted_on': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'modified_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 1, 24, 0, 50, 42, 306421)'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'environments.element': {
'Meta': {'object_name': 'Element'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'elements'", 'to': "orm['environments.Category']"}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 1, 24, 0, 50, 42, 321687)'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'deleted_on': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'modified_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 1, 24, 0, 50, 42, 321888)'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'environments.environment': {
'Meta': {'object_name': 'Environment'},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 1, 24, 0, 50, 42, 307522)'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'deleted_on': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'elements': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['environments.Element']", 'symmetrical': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'modified_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 1, 24, 0, 50, 42, 307815)'}),
'profile': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'environments'", 'null': 'True', 'to': "orm['environments.Profile']"})
},
'environments.profile': {
'Meta': {'object_name': 'Profile'},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 1, 24, 0, 50, 42, 305456)'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'deleted_on': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'modified_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 1, 24, 0, 50, 42, 305638)'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'execution.result': {
'Meta': {'object_name': 'Result'},
'comment': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'completed': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 1, 24, 0, 50, 42, 317594)'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': | |
:param str workload_type: Workload type for which registration was sent.
"""
if backup_management_type is not None:
pulumi.set(__self__, "backup_management_type", backup_management_type)
if container_type is not None:
pulumi.set(__self__, "container_type", 'AzureWorkloadContainer')
if extended_info is not None:
pulumi.set(__self__, "extended_info", extended_info)
if friendly_name is not None:
pulumi.set(__self__, "friendly_name", friendly_name)
if health_status is not None:
pulumi.set(__self__, "health_status", health_status)
if last_updated_time is not None:
pulumi.set(__self__, "last_updated_time", last_updated_time)
if operation_type is not None:
pulumi.set(__self__, "operation_type", operation_type)
if registration_status is not None:
pulumi.set(__self__, "registration_status", registration_status)
if source_resource_id is not None:
pulumi.set(__self__, "source_resource_id", source_resource_id)
if workload_type is not None:
pulumi.set(__self__, "workload_type", workload_type)
@property
@pulumi.getter(name="backupManagementType")
def backup_management_type(self) -> Optional[str]:
"""
Type of backup management for the container.
"""
return pulumi.get(self, "backup_management_type")
@property
@pulumi.getter(name="containerType")
def container_type(self) -> Optional[str]:
"""
Type of the container. The value of this property for: 1. Compute Azure VM is Microsoft.Compute/virtualMachines 2.
Classic Compute Azure VM is Microsoft.ClassicCompute/virtualMachines 3. Windows machines (like MAB, DPM etc) is
Windows 4. Azure SQL instance is AzureSqlContainer. 5. Storage containers is StorageContainer. 6. Azure workload
Backup is VMAppContainer
Expected value is 'AzureWorkloadContainer'.
"""
return pulumi.get(self, "container_type")
@property
@pulumi.getter(name="extendedInfo")
def extended_info(self) -> Optional['outputs.AzureWorkloadContainerExtendedInfoResponse']:
"""
Additional details of a workload container.
"""
return pulumi.get(self, "extended_info")
@property
@pulumi.getter(name="friendlyName")
def friendly_name(self) -> Optional[str]:
"""
Friendly name of the container.
"""
return pulumi.get(self, "friendly_name")
@property
@pulumi.getter(name="healthStatus")
def health_status(self) -> Optional[str]:
"""
Status of health of the container.
"""
return pulumi.get(self, "health_status")
@property
@pulumi.getter(name="lastUpdatedTime")
def last_updated_time(self) -> Optional[str]:
"""
Time stamp when this container was updated.
"""
return pulumi.get(self, "last_updated_time")
@property
@pulumi.getter(name="operationType")
def operation_type(self) -> Optional[str]:
"""
Re-Do Operation
"""
return pulumi.get(self, "operation_type")
@property
@pulumi.getter(name="registrationStatus")
def registration_status(self) -> Optional[str]:
"""
Status of registration of the container with the Recovery Services Vault.
"""
return pulumi.get(self, "registration_status")
@property
@pulumi.getter(name="sourceResourceId")
def source_resource_id(self) -> Optional[str]:
"""
ARM ID of the virtual machine represented by this Azure Workload Container
"""
return pulumi.get(self, "source_resource_id")
@property
@pulumi.getter(name="workloadType")
def workload_type(self) -> Optional[str]:
"""
Workload type for which registration was sent.
"""
return pulumi.get(self, "workload_type")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ContainerIdentityInfoResponse(dict):
"""
Container identity information
"""
def __init__(__self__, *,
aad_tenant_id: Optional[str] = None,
audience: Optional[str] = None,
service_principal_client_id: Optional[str] = None,
unique_name: Optional[str] = None):
"""
Container identity information
:param str aad_tenant_id: Protection container identity - AAD Tenant
:param str audience: Protection container identity - Audience
:param str service_principal_client_id: Protection container identity - AAD Service Principal
:param str unique_name: Unique name of the container
"""
if aad_tenant_id is not None:
pulumi.set(__self__, "aad_tenant_id", aad_tenant_id)
if audience is not None:
pulumi.set(__self__, "audience", audience)
if service_principal_client_id is not None:
pulumi.set(__self__, "service_principal_client_id", service_principal_client_id)
if unique_name is not None:
pulumi.set(__self__, "unique_name", unique_name)
@property
@pulumi.getter(name="aadTenantId")
def aad_tenant_id(self) -> Optional[str]:
"""
Protection container identity - AAD Tenant
"""
return pulumi.get(self, "aad_tenant_id")
@property
@pulumi.getter
def audience(self) -> Optional[str]:
"""
Protection container identity - Audience
"""
return pulumi.get(self, "audience")
@property
@pulumi.getter(name="servicePrincipalClientId")
def service_principal_client_id(self) -> Optional[str]:
"""
Protection container identity - AAD Service Principal
"""
return pulumi.get(self, "service_principal_client_id")
@property
@pulumi.getter(name="uniqueName")
def unique_name(self) -> Optional[str]:
"""
Unique name of the container
"""
return pulumi.get(self, "unique_name")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class DPMContainerExtendedInfoResponse(dict):
"""
Additional information of the DPMContainer.
"""
def __init__(__self__, *,
last_refreshed_at: Optional[str] = None):
"""
Additional information of the DPMContainer.
:param str last_refreshed_at: Last refresh time of the DPMContainer.
"""
if last_refreshed_at is not None:
pulumi.set(__self__, "last_refreshed_at", last_refreshed_at)
@property
@pulumi.getter(name="lastRefreshedAt")
def last_refreshed_at(self) -> Optional[str]:
"""
Last refresh time of the DPMContainer.
"""
return pulumi.get(self, "last_refreshed_at")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class DistributedNodesInfoResponse(dict):
"""
This is used to represent the various nodes of the distributed container.
"""
def __init__(__self__, *,
error_detail: Optional['outputs.ErrorDetailResponse'] = None,
node_name: Optional[str] = None,
status: Optional[str] = None):
"""
This is used to represent the various nodes of the distributed container.
:param 'ErrorDetailResponseArgs' error_detail: Error Details if the Status is non-success.
:param str node_name: Name of the node under a distributed container.
:param str status: Status of this Node.
Failed | Succeeded
"""
if error_detail is not None:
pulumi.set(__self__, "error_detail", error_detail)
if node_name is not None:
pulumi.set(__self__, "node_name", node_name)
if status is not None:
pulumi.set(__self__, "status", status)
@property
@pulumi.getter(name="errorDetail")
def error_detail(self) -> Optional['outputs.ErrorDetailResponse']:
"""
Error Details if the Status is non-success.
"""
return pulumi.get(self, "error_detail")
@property
@pulumi.getter(name="nodeName")
def node_name(self) -> Optional[str]:
"""
Name of the node under a distributed container.
"""
return pulumi.get(self, "node_name")
@property
@pulumi.getter
def status(self) -> Optional[str]:
"""
Status of this Node.
Failed | Succeeded
"""
return pulumi.get(self, "status")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class DpmContainerResponse(dict):
"""
DPM workload-specific protection container.
"""
def __init__(__self__, *,
backup_management_type: Optional[str] = None,
can_re_register: Optional[bool] = None,
container_id: Optional[str] = None,
container_type: Optional[str] = None,
dpm_agent_version: Optional[str] = None,
dpm_servers: Optional[Sequence[str]] = None,
extended_info: Optional['outputs.DPMContainerExtendedInfoResponse'] = None,
friendly_name: Optional[str] = None,
health_status: Optional[str] = None,
protected_item_count: Optional[float] = None,
protection_status: Optional[str] = None,
registration_status: Optional[str] = None,
upgrade_available: Optional[bool] = None):
"""
DPM workload-specific protection container.
:param str backup_management_type: Type of backup management for the container.
:param bool can_re_register: Specifies whether the container is re-registrable.
:param str container_id: ID of container.
:param str container_type: Type of the container. The value of this property for: 1. Compute Azure VM is Microsoft.Compute/virtualMachines 2.
Classic Compute Azure VM is Microsoft.ClassicCompute/virtualMachines 3. Windows machines (like MAB, DPM etc) is
Windows 4. Azure SQL instance is AzureSqlContainer. 5. Storage containers is StorageContainer. 6. Azure workload
Backup is VMAppContainer
Expected value is 'DPMContainer'.
:param str dpm_agent_version: Backup engine Agent version
:param Sequence[str] dpm_servers: List of BackupEngines protecting the container
:param 'DPMContainerExtendedInfoResponseArgs' extended_info: Extended Info of the container.
:param str friendly_name: Friendly name of the container.
:param str health_status: Status of health of the container.
:param float protected_item_count: Number of protected items in the BackupEngine
:param str protection_status: Protection status of the container.
:param str registration_status: Status of registration of the container with the Recovery Services Vault.
:param bool upgrade_available: To check if upgrade available
"""
if backup_management_type is not None:
pulumi.set(__self__, "backup_management_type", backup_management_type)
if can_re_register is not None:
pulumi.set(__self__, "can_re_register", can_re_register)
if container_id is not None:
pulumi.set(__self__, "container_id", container_id)
if container_type is not None:
pulumi.set(__self__, "container_type", 'DPMContainer')
if dpm_agent_version is not None:
pulumi.set(__self__, "dpm_agent_version", dpm_agent_version)
if dpm_servers is not None:
pulumi.set(__self__, "dpm_servers", dpm_servers)
if extended_info is not None:
pulumi.set(__self__, "extended_info", extended_info)
if friendly_name is not None:
pulumi.set(__self__, "friendly_name", friendly_name)
if health_status is not None:
pulumi.set(__self__, "health_status", health_status)
if protected_item_count is not None:
pulumi.set(__self__, "protected_item_count", protected_item_count)
if protection_status is not None:
pulumi.set(__self__, "protection_status", protection_status)
if registration_status is not None:
pulumi.set(__self__, "registration_status", registration_status)
if upgrade_available is not None:
pulumi.set(__self__, "upgrade_available", upgrade_available)
@property
@pulumi.getter(name="backupManagementType")
def backup_management_type(self) -> Optional[str]:
"""
Type of backup management for the container.
"""
return pulumi.get(self, "backup_management_type")
@property
@pulumi.getter(name="canReRegister")
def can_re_register(self) -> Optional[bool]:
"""
Specifies whether the container is re-registrable.
"""
return pulumi.get(self, "can_re_register")
@property
@pulumi.getter(name="containerId")
def container_id(self) -> Optional[str]:
"""
ID of container.
"""
return pulumi.get(self, "container_id")
@property
@pulumi.getter(name="containerType")
def container_type(self) -> Optional[str]:
"""
Type of the container. The value of this property for: 1. Compute Azure VM is Microsoft.Compute/virtualMachines 2.
Classic Compute Azure VM is Microsoft.ClassicCompute/virtualMachines 3. Windows machines (like MAB, DPM etc) is
Windows 4. Azure SQL instance is AzureSqlContainer. 5. Storage containers is StorageContainer. 6. Azure workload
Backup is VMAppContainer
Expected value is 'DPMContainer'.
"""
return pulumi.get(self, "container_type")
@property
@pulumi.getter(name="dpmAgentVersion")
def dpm_agent_version(self) -> Optional[str]:
"""
Backup engine Agent version
"""
return pulumi.get(self, "dpm_agent_version")
@property
@pulumi.getter(name="dpmServers")
def dpm_servers(self) -> Optional[Sequence[str]]:
"""
List of BackupEngines protecting the container
"""
return pulumi.get(self, "dpm_servers")
@property
@pulumi.getter(name="extendedInfo")
def extended_info(self) -> Optional['outputs.DPMContainerExtendedInfoResponse']:
"""
Extended Info of the container.
"""
return pulumi.get(self, "extended_info")
@property
@pulumi.getter(name="friendlyName")
def friendly_name(self) -> Optional[str]:
"""
Friendly name of the container.
"""
return pulumi.get(self, "friendly_name")
@property
@pulumi.getter(name="healthStatus")
def health_status(self) -> Optional[str]:
"""
Status of health of the container.
"""
return pulumi.get(self, "health_status")
@property
@pulumi.getter(name="protectedItemCount")
def protected_item_count(self) -> Optional[float]:
"""
Number of protected items in the BackupEngine
"""
return pulumi.get(self, "protected_item_count")
@property
@pulumi.getter(name="protectionStatus")
def protection_status(self) -> Optional[str]:
"""
Protection status of the container.
"""
return pulumi.get(self, | |
"""
Just another Python API for Travis CI (API).
A module which provides the "Repository" resource type.
Author:
<NAME>, @funilrys, contactTATAfunilrysTODTODcom
Project link:
https://github.com/funilrys/PyTravisCI
Project documentation:
https://pytravisci.readthedocs.io/en/latest/
License
::
MIT License
Copyright (c) 2019, 2020 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import base64
import os
import re
from io import IOBase
from typing import List, Optional, Union
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
import PyTravisCI.communicator._all as communicator
from PyTravisCI.encryption.data import DataEncryption
from PyTravisCI.encryption.file import FileEncryption
from . import _all as resource_types
from .base import ResourceTypesBase
class Repository(ResourceTypesBase):
"""
Provides the description of a repository.
Official Travis CI API documentation
- https://developer.travis-ci.org/resource/repository
:ivar int id:
Value uniquely identifying the repository.
:ivar str name:
The repository's name on GitHub.
:ivar str slug:
Same as {repository.owner.name}/{repository.name}.
:ivar str description:
The repository's description from GitHub.
:ivar int github_id:
The repository's id on GitHub.
:ivar vcs_id:
The repository's vcs_id.
:ivar vcs_type:
The repository's vcs_type.
:ivar str github_language:
The main programming language used according to GitHub.
:ivar bool active:
Whether or not this repository is currently enabled on Travis CI.
:ivar bool private:
Whether or not this repository is private.
:ivar owner:
GitHub user or organization the repository belongs to.
:vartype owner:
Union[
:class:`~PyTravisCI.resource_types.user.User`,
:class:`~PyTravisCI.resource_types.organization.Organization`
]
:ivar owner_name:
The repository's owner_name.
:ivar vcs_name:
The repository's vcs_name.
:ivar default_branch:
The default branch on GitHub.
:vartype default_branch: :class:`~PyTravisCI.resource_types.branch.Branch`
:ivar bool starred:
Whether or not this repository is starred.
:ivar bool managed_by_installation:
Whether or not this repository is managed by a GitHub App installation.
:ivar bool active_on_org:
Whether or not this repository runs builds on travis-ci.org (may also be null).
:ivar migration_status:
The repository's migration_status.
:ivar history_migration_status:
The repository's history_migration_status.
:ivar shared:
The repository's shared.
:ivar config_validation:
The repository's config_validation.
:ivar allow_migration:
The repository's allow_migration.
"""
# pylint: disable=too-many-public-methods
id: Optional[int] = None
name: Optional[str] = None
slug: Optional[str] = None
description: Optional[str] = None
github_id: Optional[int] = None
vcs_id = None
vcs_type = None
github_language: Optional[str] = None
active: Optional[bool] = None
private: Optional[bool] = None
owner: Optional[Union["resource_types.User", "resource_types.Organization"]] = None
owner_name = None
vcs_name = None
default_branch: Optional["resource_types.Branch"] = None
starred: Optional[bool] = None
managed_by_installation: Optional[bool] = None
active_on_org: Optional[bool] = None
migration_status = None
history_migration_status = None
shared = None
config_validation = None
allow_migration = None
def __init__(self, **kwargs) -> None:
if "default_branch" in kwargs:
kwargs["default_branch"] = resource_types.Branch(**kwargs["default_branch"])
if "owner" in kwargs and "_at_type" in kwargs["owner"]:
if kwargs["owner"]["_at_type"] == "user":
kwargs["owner"] = resource_types.User(**kwargs["owner"])
elif kwargs["owner"]["_at_type"] == "organization":
kwargs["owner"] = resource_types.Organization(**kwargs["owner"])
super().__init__(**kwargs)
def activate(self, *, params: Optional[dict] = None) -> "resource_types.Repository":
"""
Activates the current repository, allowing its test to be
run on Travis Ci.
:param params:
The query parameters to append to the URL.
"""
comm = getattr(communicator, self.__class__.__name__)(
self._PyTravisCI["com"]["requester"]
)
self.__dict__ = comm.activate(
repository_id_or_slug=self.id, parameters=params
).__dict__
return self
def deactivate(
self, *, params: Optional[dict] = None
) -> "resource_types.Repository":
"""
Activates the current repository, preventing any tests from
runningIs on Travis CI.
:param params:
The query parameters to append to the URL.
"""
comm = getattr(communicator, self.__class__.__name__)(
self._PyTravisCI["com"]["requester"]
)
self.__dict__ = comm.deactivate(
repository_id_or_slug=self.id, parameters=params
).__dict__
return self
def star(self, *, params: Optional[dict] = None) -> "resource_types.Repository":
"""
Stars the current repository.
:param params:
The query parameters to append to the URL.
"""
comm = getattr(communicator, self.__class__.__name__)(
self._PyTravisCI["com"]["requester"]
)
self.__dict__ = comm.star(
repository_id_or_slug=self.id, parameters=params
).__dict__
return self
def unstar(self, *, params: Optional[dict] = None) -> "resource_types.Repository":
"""
Unstars the current repository.
:param params:
The query parameters to append to the URL.
"""
comm = getattr(communicator, self.__class__.__name__)(
self._PyTravisCI["com"]["requester"]
)
self.__dict__ = comm.unstar(
repository_id_or_slug=self.id, parameters=params
).__dict__
return self
def get_branch(
self, branch_name: str, *, params: Optional[dict] = None
) -> "resource_types.Branch":
"""
Provides the information of a given branch.
Official Travis CI API documentation:
- https://developer.travis-ci.org/resource/branch
:param branch_name:
Name of the git branch.
:param params:
The query parameters to append to the URL.
"""
comm = getattr(communicator, "Branch")(self._PyTravisCI["com"]["requester"])
return comm.from_id_or_slug(
repository_id_or_slug=self.id, branch_name=branch_name, parameters=params
)
def get_branches(
self, *, params: Optional[dict] = None
) -> "resource_types.Branches":
"""
Provides the list of branches of the current repository.
Official Travis CI API documentation:
- https://developer.travis-ci.org/resource/branches
:param params:
The query parameters to append to the URL.
"""
comm = getattr(communicator, "Branches")(self._PyTravisCI["com"]["requester"])
return comm.from_id_or_slug(repository_id_or_slug=self.id, parameters=params)
def get_builds(self, *, params: Optional[dict] = None) -> "resource_types.Builds":
"""
Provides the list of builds of the current repository.
Official Travis CI API documentation:
- https://developer.travis-ci.org/resource/builds
:param params:
The query parameters to append to the URL.
"""
comm = getattr(communicator, "Builds")(self._PyTravisCI["com"]["requester"])
return comm.from_id_or_slug(repository_id_or_slug=self.id, parameters=params)
def get_caches(self, *, params: Optional[dict] = None) -> "resource_types.Caches":
"""
Provides the list of caches of the current repository.
Official Travis CI API documentation:
- https://developer.travis-ci.org/resource/caches
:param params:
The query parameters to append to the URL.
"""
comm = getattr(communicator, "Caches")(self._PyTravisCI["com"]["requester"])
return comm.from_id_or_slug(repository_id_or_slug=self.id, parameters=params)
def get_crons(self, *, params: Optional[dict] = None) -> "resource_types.Crons":
"""
Provides the list of crons of the current repository.
Official Travis CI API documentation:
- https://developer.travis-ci.org/resource/crons
:param params:
The query parameters to append to the URL.
"""
comm = getattr(communicator, "Crons")(self._PyTravisCI["com"]["requester"])
return comm.from_id_or_slug(repository_id_or_slug=self.id, parameters=params)
def get_env_var(
self, env_var_id: str, *, params: Optional[dict] = None
) -> "resource_types.EnvVar":
"""
Provides an environment variable from its ID.
Official Travis CI API documentation:
- https://developer.travis-ci.org/resource/env_var
:param env_var_id:
The ID of the environment variable to get.
:param params:
The query parameters to append to the URL.
"""
comm = getattr(communicator, "EnvVar")(self._PyTravisCI["com"]["requester"])
return comm.from_id_or_slug(
env_var_id=env_var_id, repository_id_or_slug=self.id, parameters=params
)
def get_env_vars(
self, *, params: Optional[dict] = None
) -> "resource_types.EnvVars":
"""
Provides the list of environment variables of the current repository.
Official Travis CI API documentation:
- https://developer.travis-ci.org/resource/env_vars
:param params:
The query parameters to append to the URL.
"""
comm = getattr(communicator, "EnvVars")(self._PyTravisCI["com"]["requester"])
return comm.from_id_or_slug(repository_id_or_slug=self.id, parameters=params)
def create_env_var(
self,
name: str,
value: str,
*,
is_public: bool = False,
branch: Optional[str] = None,
params: Optional[dict] = None,
) -> "resource_types.EnvVar":
"""
Creates a new environment variable into the current repository.
Official Travis CI API documentation:
- https://developer.travis-ci.org/resource/env_vars
:param name:
The environment variable name, e.g. FOO.
:param value:
The environment variable's value, e.g. bar.
:param is_public:
Whether this environment variable should be publicly visible or not.
:param branch:
The env_var's branch.
:raise TypeError:
When the types of :code:`name` and :code:`value` are not :py:class`str`.
"""
if not isinstance(name, str):
raise TypeError(f"<name> {name} should be {str}. {type(name)} given.")
if not isinstance(value, str):
raise TypeError(f"<value> {value} should be {str}. {type(value)} given.")
data = {
"env_var.name": name,
"env_var.value": value,
"env_var.public": is_public,
}
if branch is not None:
data["env_var.branch"] = branch
comm = getattr(communicator, "EnvVars")(self._PyTravisCI["com"]["requester"])
return comm.create(repository_id_or_slug=self.id, data=data, parameters=params)
def get_key_pair(
self, *, params: Optional[dict] = None
) -> "resource_types.KeyPair":
"""
Provides the RSA key pair of the current repository.
Official Travis CI API documentation:
- https://developer.travis-ci.org/resource/key_pair
:param params:
The query parameters to append to the URL.
"""
comm = getattr(communicator, "KeyPair")(self._PyTravisCI["com"]["requester"])
return comm.from_id_or_slug(repository_id_or_slug=self.id, parameters=params)
def create_key_pair(
self,
description: str,
value: Union[str, bytes],
*,
params: Optional[dict] = None,
) -> "resource_types.KeyPair":
"""
Creates a new RSA key pair.
:param description:
A text description.
:param value:
The private key.
:raise TypeError:
When the types of | |
# -*- coding: utf-8 -*-
import os
import json
from collections import OrderedDict
from django.conf import settings
from bs4 import BeautifulSoup
from portal.portal_helper import Content
class SphinxContent:
PADDLE = 'paddle'
PADDLE_API = 'api'
VISUALDL = 'visualdl'
def paddle_sphinx_fluid_sitemap(original_documentation_dir, generated_documentation_dir, version, output_dir_name):
"""
Generates a sitemap for all languages for the paddle documentation.
"""
print 'Generating sitemap for Paddle Fluid'
parent_path_map = { 'en': '/fluid/en/html/',
'zh': '/fluid/cn/html/' }
output_dir_name = output_dir_name + '/fluid'
_paddle_sphinx_sitemap(original_documentation_dir, generated_documentation_dir, version, output_dir_name,
parent_path_map)
def paddle_sphinx_v2v1_sitemap(original_documentation_dir, generated_documentation_dir, version, output_dir_name):
"""
Generates a sitemap for all languages for the paddle documentation.
"""
print 'Generating sitemap for Paddle V2V1'
parent_path_map = { 'en': '/v2/en/html/',
'zh': '/v2/cn/html/' }
_paddle_sphinx_sitemap(original_documentation_dir, generated_documentation_dir, version, output_dir_name,
parent_path_map)
def _paddle_sphinx_sitemap(original_documentation_dir, generated_documentation_dir, version, output_dir_name, parent_path_map):
"""
Generates a sitemap for all languages for the paddle documentation.
"""
versioned_dest_dir = get_destination_documentation_dir(version, output_dir_name)
for lang, parent_path in parent_path_map.items():
sitemap = None
# Using the index.html of the generated Sphinx HTML documentation,
# separately for each language, generate a sitemap.
index_html_path = '%s/%s/index.html' % (generated_documentation_dir, parent_path)
sitemap = _create_paddle_sphinx_site_map_from_index(index_html_path, lang, Content.DOCUMENTATION, output_dir_name)
_write_sphinx_sitemap(sitemap, versioned_dest_dir, lang)
def paddle_api_sphinx_fluid_sitemap(original_documentation_dir, generated_documentation_dir, version, output_dir_name):
"""
Generates a sitemap for all languages for the paddle documentation.
"""
print 'Generating sitemap for Paddle fluid API'
parent_path_map = { 'en': '/fluid/api/en/html/',
'zh': '/fluid/api/cn/html/' }
output_dir_name = output_dir_name + '/fluid'
_paddle_api_sphinx_sitemap(original_documentation_dir, generated_documentation_dir, version, output_dir_name, parent_path_map)
def paddle_api_sphinx_v2v1_sitemap(original_documentation_dir, generated_documentation_dir, version, output_dir_name):
"""
Generates a sitemap for all languages for the paddle documentation.
"""
versioned_dest_dir = get_destination_documentation_dir(version, output_dir_name)
print 'Generating sitemap for Paddle V2V1 API'
parent_path_map = { 'en': '/v2/api/en/html/',
'zh': '/v2/api/cn/html/' }
_paddle_api_sphinx_sitemap(original_documentation_dir, generated_documentation_dir, version, output_dir_name, parent_path_map)
def _paddle_api_sphinx_sitemap(original_documentation_dir, generated_documentation_dir, version, output_dir_name, parent_path_map):
"""
Generates a sitemap for all languages for the paddle documentation.
"""
versioned_dest_dir = get_destination_documentation_dir(version, output_dir_name)
for lang, parent_path in parent_path_map.items():
sitemap = None
# Using the index.html of the generated Sphinx HTML documentation,
# separately for each language, generate a sitemap.
index_html_path = '%s/%s/index.html' % (generated_documentation_dir, parent_path)
if lang == 'en':
sitemap = _create_paddle_sphinx_site_map_from_index(index_html_path, lang, Content.API, output_dir_name)
_write_sphinx_sitemap(sitemap, versioned_dest_dir, lang)
# Make a copy of EN documentation for now, since we only have API docs in english
# We override the link language prefix
# TODO(thuan): Fix this once we have chinese API documentation
sitemap = _create_paddle_sphinx_site_map_from_index(index_html_path, 'zh', Content.API, output_dir_name, 'en')
_write_sphinx_sitemap(sitemap, versioned_dest_dir, 'zh')
def visualdl_sphinx_sitemap(original_documentation_dir, generated_documentation_dir, version, output_dir_name):
"""
Generates a sitemap for all languages for the paddle documentation.
"""
versioned_dest_dir = get_destination_documentation_dir(version, output_dir_name)
print 'Generating sitemap for VisualDL'
parent_path_map = { 'en': '/en/html/',
'zh': '/cn/html/' }
for lang, parent_path in parent_path_map.items():
sitemap = None
# Using the index.html of the generated Sphinx HTML documentation,
# separately for each language, generate a sitemap.
index_html_path = '%s/%s/index.html' % (generated_documentation_dir, parent_path)
sitemap = _create_visualdl_sphinx_site_map_from_index(index_html_path, lang, output_dir_name)
_write_sphinx_sitemap(sitemap, versioned_dest_dir, lang)
def _sphinx_sitemap(original_documentation_dir, generated_documentation_dir, version, output_dir_name, sphinx_content):
"""
Generates a sitemap for all languages for the paddle documentation.
"""
versioned_dest_dir = get_destination_documentation_dir(version, output_dir_name)
print 'Generating sitemap for %s' % sphinx_content
parent_path_map = { 'en': '/en/html/',
'zh': '/cn/html/' }
if version == '0.9.0':
parent_path_map = { 'en': '/doc/',
'zh': '/doc_cn/'}
for lang, parent_path in parent_path_map.items():
sitemap = None
# Using the index.html of the generated Sphinx HTML documentation,
# separately for each language, generate a sitemap.
index_html_path = '%s/%s/index.html' % (generated_documentation_dir, parent_path)
if sphinx_content == SphinxContent.PADDLE:
sitemap = _create_paddle_sphinx_site_map_from_index(index_html_path, lang, Content.DOCUMENTATION, output_dir_name)
_write_sphinx_sitemap(sitemap, versioned_dest_dir, lang)
elif sphinx_content == SphinxContent.PADDLE_API:
if lang == 'en':
sitemap = _create_paddle_sphinx_site_map_from_index(index_html_path, lang, Content.API, output_dir_name)
_write_sphinx_sitemap(sitemap, versioned_dest_dir, lang)
# Make a copy of EN documentation for now, since we only have API docs in english
# We override the link language prefix
# TODO(thuan): Fix this once we have chinese API documentation
sitemap = _create_paddle_sphinx_site_map_from_index(index_html_path, 'zh', Content.API, output_dir_name, 'en')
_write_sphinx_sitemap(sitemap, versioned_dest_dir, 'zh')
elif sphinx_content == SphinxContent.VISUALDL:
sitemap = _create_visualdl_sphinx_site_map_from_index(index_html_path, lang, output_dir_name)
_write_sphinx_sitemap(sitemap, versioned_dest_dir, lang)
def _write_sphinx_sitemap(sitemap, versioned_dest_dir, lang):
# Write the sitemap into the specific content directory.
if sitemap:
sitemap_ouput_path = get_sitemap_destination_path(versioned_dest_dir, lang)
with open(sitemap_ouput_path, 'w') as outfile:
json.dump(sitemap, outfile)
def _create_paddle_sphinx_site_map_from_index(index_html_path, language, content_id, output_dir_name, link_language_prefix=None):
"""
Given an index.html generated from running Sphinx on a doc directory, parse
the HTML tree to get the links from the navigation menu.
Eg. creates Paddle doc TOC from HTML navigation. Example of HTML:
<nav class="doc-menu-vertical" role="navigation">
<ul>
<li class="toctree-l1">
<a class="reference internal" href="getstarted/index_en.html">GET STARTED</a>
<ul>
<li class="toctree-l2">
<a class="reference internal" href="getstarted/build_and_install/index_en.html">Install and Build</a>
<ul>
<li class="toctree-l3">
<a class="reference internal" href="getstarted/build_and_install/docker_install_en.html">PaddlePaddle in Docker Containers</a>
</li>
<li class="toctree-l3">
<a class="reference internal" href="getstarted/build_and_install/build_from_source_en.html">Installing from Sources</a>
</li>
</ul>
</li>
</ul>
</li>
<li class="toctree-l1">
<a class="reference internal" href="howto/index_en.html">HOW TO</a>
</li>
</ul>
</nav>
"""
allow_parent_links = True
title_en = 'Documentation'
title_zh = '使用文档'
if content_id == Content.API:
allow_parent_links = False # We do not allow parent links for API section
title_en = 'API'
title_zh = 'API'
with open(index_html_path) as html:
chapters = []
sitemap = OrderedDict()
sitemap['title'] = OrderedDict( { 'en': title_en, 'zh': title_zh} )
sitemap['sections'] = chapters
navs = BeautifulSoup(html, 'lxml').findAll('nav', class_='doc-menu-vertical')
if len(navs) > 0:
chapters_container = navs[0].find('ul', recursive=False)
if chapters_container:
for chapter in chapters_container.find_all('li', recursive=False):
_create_sphinx_site_map(chapters, chapter, language, content_id, output_dir_name, allow_parent_links, link_language_prefix)
else:
print 'Cannot generate sphinx sitemap, nav.doc-menu-vertical not found in %s' % index_html_path
return sitemap
def _create_visualdl_sphinx_site_map_from_index(index_html_path, language, output_dir_name):
with open(index_html_path) as html:
chapters = []
sitemap = OrderedDict()
sitemap['title'] = OrderedDict( { 'en': 'Documentation', 'zh': '文档'} )
sitemap['sections'] = chapters
navs = BeautifulSoup(html, 'lxml').findAll('nav', class_='doc-menu-vertical')
if len(navs) > 0:
chapters_container = navs[0].find('ul', recursive=True)
if chapters_container:
for chapter in chapters_container.find_all('li', recursive=False):
_create_sphinx_site_map(chapters, chapter, language, Content.VISUALDL, output_dir_name, allow_parent_links=False)
else:
print 'Cannot generate sphinx sitemap, nav.wy-nav-side not found in %s' % index_html_path
return sitemap
def _create_sphinx_site_map(parent_list, node, language, content_id, output_dir_name, allow_parent_links=True, link_language_prefix=None):
"""
Recursive function to append links to a new parent list object by going down the
nested lists inside the HTML, using BeautifulSoup tree parser.
"""
if node:
node_dict = OrderedDict()
if parent_list != None:
parent_list.append(node_dict)
sections = node.findAll('ul', recursive=False)
first_link = node.find('a')
if first_link:
link_language = link_language_prefix if link_language_prefix else language
link_url = '/%s/%s/%s' % (output_dir_name, link_language, first_link['href'])
node_dict['title'] = OrderedDict({ language: first_link.text })
if allow_parent_links:
# If we allow parent links, then we will add the link to the parent no matter what
node_dict['link'] = OrderedDict({language: link_url})
elif not sections:
# If parent links are not allowed, and the parent does not have children then add a link
node_dict['link'] = OrderedDict({ language: link_url})
for section in sections:
sub_sections = section.findAll('li', recursive=False)
if len(sub_sections) > 0:
node_dict['sections'] = []
for sub_section in sub_sections:
_create_sphinx_site_map(node_dict['sections'], sub_section, language, content_id, output_dir_name, allow_parent_links, link_language_prefix)
def inject_operators_link(sitemap, lang):
# Iterate through the sitemap, and insert "Operators" under the API section.
sections = None
for section in sitemap['sections']:
if section['title'][lang] == 'API':
sections = section['sections']
break
if sections:
sections.append({
'$ref': {
lang: '%s/%s' % (Content.DOCUMENTATION, _get_sitemap_name(lang, 'operators'))
}
})
def book_sitemap(original_documentation_dir, generated_documentation_dir, version, output_dir_name):
_book_sitemap_with_lang(original_documentation_dir, generated_documentation_dir, version, output_dir_name, 'en')
_book_sitemap_with_lang(original_documentation_dir, generated_documentation_dir, version, output_dir_name, 'zh')
def models_sitemap(original_documentation_dir, generated_documentation_dir, version, output_dir_name):
_create_models_sitemap(generated_documentation_dir, version, 'README.html', output_dir_name, 'en')
_create_models_sitemap(generated_documentation_dir, version, 'README.cn.html', output_dir_name, 'zh')
def _create_models_sitemap(generated_documentation_dir, version, html_file_name, output_dir_name, language):
"""
Generate a sitemap for models' content by parsing the content of the index
(root readme) file. Iterates through all the links inside list items of the
file, and writes the constructed sitemap file.
"""
github_path = 'https://github.com/PaddlePaddle/models/tree/'
root_html_path = os.path.join(generated_documentation_dir, html_file_name)
# Create models sitemap template
sections = []
title = '模型库' if language == 'zh' else 'Models'
link = '%s/%s' % (Content.MODELS, html_file_name)
sitemap = {
'title': { language: title },
'sections': [
{
'title': {language: title},
'link': {language: link},
'sections': sections
}
]
}
# Read the stripped html file.
# TODO [<NAME>]: Confirm the root_html_path is correct
with open(root_html_path) as original_html_file:
soup = BeautifulSoup(original_html_file, 'lxml')
anchor_tags = soup.select('li a[href]')
# Extract the links and the article titles
for tag in anchor_tags:
title = { language: tag.text }
# The absolute URLs link to the github site.
# Transform them into relative URL for local HTML files.
# Dynamically remove develop or v0.10.0, etc
# NOTE: Use of `link_zh` instead of `link` because all the links lead to Chinese pages.
link_zh = Content.MODELS + '/' + tag['href']
| |
Method': 'طريقة التحديث',
'Update Policy': 'سياسة التحديث',
'Update Report': 'تحديث التقرير',
'Update Request': 'تحديث الطلب',
'Update Service Profile': 'تحديث خدمة البيانات الشخصية',
'Update this entry': 'تحديث هذه الادخال',
'Update Unit': 'تحديث وحدة',
'Update your current ordered list': 'تحديث القائمة المرتبة الحالية',
'updated': 'تم التحديث',
'Updated By': 'تم تحديثه من طرف',
'updates only': 'تحديثات فقط',
'Upload an image file (png or jpeg), max. 400x400 pixels!': 'حمل ملف صورة ( png أو JPEG)، كحد أقصى. 400x400 بكسل!',
'Upload an image file here.': 'تحميل ملف الصور هنا.',
"Upload an image file here. If you don't upload an image file, then you must specify its location in the URL field.": 'حمل ملف صورة هنا. إذا لم يكن لتحميل ملف الصورة، ثم يجب تحديد موقعها في مجال URL.',
'Upload an image, such as a photo': 'تحميل صورة ، مثل صورة شمسية',
'Upload different Image': 'تحميل صورة مختلفة',
'Upload Format': 'تنسيق تحميل',
'Upload Image': 'تحميل الصور',
'Uploaded Image': 'صورة محملة',
'Urban area': 'المنطقة الحضرية',
'Urban Risk & Planning': 'المخاطر الحضرية والتخطيط',
'Urban Risk and Community Resilience': 'خطر التمدين والمرونة الجماعة',
'Urdu': 'أوردو',
'Urgent': 'عاجل',
'URL': 'موقع المعلومات العالمي',
'Use decimal': 'استخدام العشرية',
'Use deg, min, sec': 'استخدام درجة، دقيقة، ثانية',
'Use Geocoder for address lookups?': 'استخدام Geocoder لعمليات البحث عن عنوان؟',
'Used in onHover Tooltip & Cluster Popups to differentiate between types.': 'تستعمل على وضع مؤشرمرشد فوق الرموز في جملة واضحة للتفريق بين الأنواع .',
'Used to build onHover Tooltip & 1st field also used in Cluster Popups to differentiate between records.': 'تستعمل على وضع مؤشرمرشد فوق الرموز كم أثستخدم المجال الأول في جملة واضحة للتفريق بين السجلات.',
'Used to import data from spreadsheets into the database': 'تستخدم لأخذ بيانات من جداول البيانات إلى قاعدة البيانات',
'User': 'المستخدم',
'User & Administration Guide': 'دليل المستخدم والإدارة',
'User Account': 'حساب المستخدم',
'User Account has been Disabled': 'تم تعطيل حساب المستخدم',
'User added': 'تمت اضافة المستخدم',
'User deleted': 'تم حذف المستخدم',
'User Management': 'إدارة المستخدمين',
'User Profile': 'ملف تعريفي للمستخدم',
'User Roles': 'أدوار المستخدمين',
'User Updated': 'تم تحديث المستخدم',
'User updated': 'تم تحديث المستخدم',
'Users': 'المستخدمين',
'Users removed': 'المستخدمين الملغين',
'Valid From': 'صالح من تاريخ',
'Valid Until': 'صالحة حتى',
'Value': 'القيمة',
'Value of Indicator': 'قيمة المؤشر',
'Value per Pack': 'القيمة لكل حزمة',
'Value Required': 'القيمة المطلوبة',
'Various Reporting functionalities': 'تعدد الوظائف التقريرية',
'VCA (Vulnerability and Capacity Assessment)': 'VCA (تقييم الضعف والقدرات )',
'Vector Control': 'مكافحة ناقلات الأمراض',
'Vehicle': 'مركبة',
'Vehicle Plate Number': 'رقم اللوحة المرورية',
'Vehicle Types': 'أنواع السيارات',
'Venue': 'مكان',
'Verified': 'تم التحقق',
'Verified?': 'تم التحقق منه؟',
'Verify Password': '<PASSWORD>',
'Version': 'نص',
'Very Good': 'جيد جدا',
'Very High': 'عال جدا',
'Very Strong': 'قوي جدا',
'View': 'رأي',
'View and/or update their details': 'عرض و/أو تحديث بياناتهم',
'View full screen': 'المشاهدة بحجم الشاشة',
'View Fullscreen Map': ' عرض الخريطة بشاشة كاملة',
'View or update the status of a hospital.': 'عرض أو تحديث حالة المستشفى.',
'View Outbox': 'عرض البريد الصادر',
'View pending requests and pledge support.': 'عرض الطلبات المعلقة و تعهدات الدعم.',
'View Test Result Reports': 'View Test Result Reports',
'View the hospitals on a map.': 'عرض المستشفيات على الخريطة.',
'Village': 'قرية',
'Village Leader': 'زعيم القرية',
'Violence Prevention': 'الوقاية من العنف',
'Visible?': 'مرئي؟',
'Vocational Training and Employment Skills': 'التدريب المهني ومهارات التوظيف',
'Volcanic Ash Cloud': 'سحابة الرماد البركاني',
'Volcanic Event': 'حدث بركاني',
'Volcano': 'بركان',
'Volume (m3)': 'الحجم (m3)',
'Volunteer': 'المتطوعين',
'Volunteer added': 'تم اضافة متطوع',
'Volunteer and Staff Management': 'المتطوعين والموظفين والإدارة',
'Volunteer Availability': 'توفر المتطوعين',
'Volunteer availability deleted': 'تم إلغاء توفر المتطوعين',
'Volunteer availability updated': 'تحديث تَوفُرالمتطوعين',
'Volunteer deleted': 'تم حذف المتطوع',
'Volunteer Details': 'تفاصيل المتطوع',
'Volunteer Details updated': 'تم تحديث تفاصيل المتطوع',
'Volunteer Hours': 'ساعات التطوع',
'Volunteer ID': 'المتطوعين ID',
'Volunteer Information': 'معلومات حول المتطوع',
'Volunteer Insurance': 'تأمين المتطوعين ',
'Volunteer Management': 'إدارة المتطوعين',
'Volunteer Project': 'مشروع التطوع',
'Volunteer Recognition': 'تقييم المتطوعين',
'Volunteer Record': 'سجل المتطوع',
'Volunteer Recruitment': 'توظيف المتطوع',
'Volunteer Report': 'تقرير المتطوعين',
'Volunteer Role': 'دور المتطوعين',
'Volunteer Role added': 'إضافة دور المتطوع',
'Volunteer Role Catalog': 'فهرس دور المتطوعين',
'Volunteer Role deleted': 'حذف دور المتطوع',
'Volunteer Role Details': 'تفاصيل دور المتطوع',
'Volunteer Role updated': 'تحديث دور المتطوع',
'Volunteer Roles': 'أدوار المتطوعين',
'Volunteer Service Record': 'تسجيل خدمة المتطوع',
'Volunteer Start Date': 'تأريخ بدء التطوع',
'Volunteer Training': 'تدريب المتطوعين',
'Volunteering in Emergencies Guidelines/Toolkit': 'العمل التطوعي في حالات الطوارئ المبادئ التوجيهية / أدوات',
'Volunteering in Pandemic Emergency Situations': 'التطوع في الحالات الوبائية الطارئة',
'Volunteers': 'المتطوعين',
'Volunteers Report': 'تقارير المتطوعين',
'Volunteers were notified!': 'تم ابلاغ المتطوعين!',
'Votes': 'الأصوات',
'Vulnerability': 'مواطن الضعف',
'Vulnerable Populations': 'السكان المعرضين للخطر',
'Warehouse': 'المستودع',
'Warehouse added': 'أُضيف المستودع',
'Warehouse deleted': 'تم حذف المستودع',
'Warehouse Details': 'تفاصيل المستودع',
'Warehouse Manager': 'مدير المستودع',
'Warehouse Stock': 'المخزون في المستودع',
'Warehouse Stock Expiration Report': 'تقرير أنتهاء المخزون في المستودع',
'Warehouse Stock Report': 'تقرير سند المخزونات',
'Warehouse Type': 'نوع المخزن',
'Warehouse Type added': 'تم اضافة نوع مستودع',
'Warehouse Type deleted': 'تم حذف نوع مستودع',
'Warehouse Type Details': 'تفاصيل نوع المستودع',
'Warehouse Type updated': 'تم تحديث نوع مستودع',
'Warehouse Types': 'أنواع مستودع',
'Warehouse updated': 'تم تحديث المستودع',
'Warehouse/ Store': 'مستودع / مخزن',
'Warehouses': 'المخازن',
'WARNING': 'تحذير',
'Water': 'ماء',
'Water and Sanitation': 'الماء والنظافة',
'Water Sanitation and Hygiene Promotion': 'مياه الصرف الصحي وتعزيز النظافة',
'Water Sanitation Hygiene': 'نظافة مياه الصرف الصحي',
'Water Supply': 'إمدادات المياه',
'Water Testing': 'اختبار المياه',
'Watsan': 'المياه والصرف الصحي',
'WatSan': 'المياه والصرف الصحي',
'Watsan Officer': 'موظفي البناء والاصحاح',
'Watsan Technician': 'فني البناء والاصحاح',
'wavy': 'متموج',
'Waybill Number': 'رقم بوليصة الشحن',
'We have tried': 'لقد حاولنا',
'Weak': 'ضعيف',
'Weather': 'الطقس',
'Weather Stations': 'حالة الطقس',
'Web Server': 'سيرفر الويب',
'Website': 'موقع ويب',
'Wednesday': 'الأربعاء',
'Week': 'أسبوع',
'Weekends only': 'عطلة نهاية الأسبوع فقط',
'Weekly': 'أسبوعي',
'Weight': 'الوزن',
'Weight (kg)': 'الوزن (كلغ)',
'Weighting': 'الترجيح',
'Weightings should add up to 1.0': 'وينبغي أن تضيف ما يصل إلى 1.0 وزن',
'Welcome to the Sahana Portal at': 'مرحبا بكم في بوابة ساهانا في',
'What order to be contacted in.': 'ما هو الترتيب الذي سيتم الاتصال به.',
'When needed': 'عند الاحتياج',
'When reports were entered': 'متى أدخلت التقارير',
'Whiskers': 'شوارب',
'white': 'أبيض',
'Who is doing What Where': 'من يفعل ماذا أين',
'Who usually collects water for the family?': 'من الذي يجمع عادةالمياه للعائلة؟',
'widowed': 'أرمل',
'Width (m)': 'العرض (م)',
'Wild Fire': 'حريق بري',
'Will create and link your user account to the following records': 'سيتم إنشاء وربط حساب المستخدم الخاص بك إلى السجلات التالية',
'Wind Chill': 'رياح باردة',
'Window frame': 'إطار النافذة',
'Winter Storm': 'عاصفة شتائية',
'within human habitat': 'داخل المستوطنات البشرية',
'Women': 'نساء',
'Women of Child Bearing Age': 'النساء في سن الإنجاب',
'Wooden poles': 'أعمدة خشبية',
'Word': 'كلمة',
'Work': 'عمل',
'Work on Program': 'العمل ضمن برنامج',
'Work phone': 'هاتف عمل',
'Working hours start': 'بدء ساعات العمل',
'Writing': 'جاري الكتابة',
'written-only': 'كتابة فقط',
'X-Ray': 'X-راي',
'xlwt module not available within the running Python - this needs installing for XLS output!': 'وحدة xlwt غير متوفرة في Python - هذا يحتاج الى التثبيت لاخراجات XLS',
'Year': 'السنة',
'Year built': 'سنة البناء',
'Year of Birth': 'سنة الولادة',
'Year of Manufacture': 'سنة الانتاج',
'Year that the organization was founded': 'السنة التي تأسست المنظمة',
'Years': 'سنوات',
'Yellow': 'أصفر',
'yes': 'نعم',
'YES': 'نعم',
'Yes': 'نعم',
'Yes, delete the selected details': 'نعم، حذف التفاصيل المختارة',
'You are currently reported missing!': 'تم الإبلاغ عنكم كمفقودين!',
'You can click on the map below to select the Lat/Lon fields': 'يمكنك النقر على الخريطة أدناه لتحديد حقول خطوط العرض والطول',
"You can search by by group name, description or comments and by organization name or acronym. You may use % as wildcard. Press 'Search' without input to list all.": 'يمكنك البحث من قبل من قبل اسم المجموعة والوصف أو تعليقات واسم المؤسسة أو اختصار. يمكنك استخدام٪ كما البدل. اضغط على "بحث" دون إدخال لسرد كافة.',
"You can search by course name, venue name or event comments. You may use % as wildcard. Press 'Search' without input to list all events.": 'يمكنك البحث عن طريق اسم الدورة، اسم مكان أو تعليقات الحدث. يمكنك استخدام٪ كما البدل. اضغط على "بحث" دون إدخال لسرد كافة الأحداث.',
"You can search by job title or person name - enter any of the first, middle or last names, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons.": 'يمكنك البحث عن طريق المسمى الوظيفي أو اسم الشخص - دخول أي من الأسماء الأولى والمتوسطة أو مشاركة، مفصولة بمسافات. يمكنك استخدام٪ كما البدل. اضغط على "بحث" دون إدخال لسرد كافة الأشخاص.',
'You can search by name, acronym or comments': 'يمكنك البحث عن طريق الاسم، اختصار أو تعليقات',
'You can search by name, acronym, comments or parent name or acronym.': 'يمكنك البحث عن طريق الاسم، اختصار أو تعليقات أو اسم الأم أو اختصار.',
"You can search by person name - enter any of the first, middle or last names, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons.": 'يمكنك البحث عن طريق اسم الشخص - دخول أي من الأسماء الأولى والمتوسطة أو مشاركة، مفصولة بمسافات. يمكنك استخدام٪ كما البدل. اضغط على "بحث" دون إدخال لسرد كافة الأشخاص.',
"You can search by trainee name, course name or comments. You may use % as wildcard. Press 'Search' without input to list all trainees.": 'يمكنك البحث عن طريق اسم المتدرب، اسم الدورة أو تعليقات. يمكنك استخدام٪ | |
<reponame>samadhicsec/threatware
#!/usr/bin/env python3
"""
Handler to invoke verifier
"""
import logging
import sys
import argparse
import json
from pathlib import Path
from storage.gitrepo import GitStorage
from utils.error import ThreatwareError
from utils.output import FormatOutput
import utils.logging
from providers import provider
from language.translate import Translate
from schemes.schemes import load_scheme
import actions.convert as convert
import actions.verify as verify
import actions.manage as manage
import actions.measure as measure
from utils.output import FormatOutput, OutputType
from data.key import key as Key
utils.logging.configureLogging()
logger = logging.getLogger(utils.logging.getLoggerName(__name__))
HANDLER_TEXTS_YAML = "handler_texts.yaml"
HANDLER_TEXTS_YAML_PATH = str(Path(__file__).absolute().parent.joinpath(HANDLER_TEXTS_YAML))
ACTION_CONVERT = 'convert'
ACTION_VERIFY = 'verify'
ACTION_MANAGE_INDEXDATA = 'manage.indexdata'
ACTION_MANAGE_CREATE = 'manage.create'
ACTION_MANAGE_SUBMIT = 'manage.submit'
ACTION_MANAGE_CHECK = 'manage.check'
ACTION_MEASURE = 'measure'
def lambda_handler(event, context):
# Get space and page from query string parameters
qsp = event.get("queryStringParameters", {})
filtered_qsp = {"request":{}}
if (action := qsp.get("action", None)) is not None:
filtered_qsp["request"]["action"] = action
if (schemeID := qsp.get("scheme", None)) is not None:
filtered_qsp["request"]["scheme"] = schemeID
if (docloc := qsp.get("docloc", None)) is not None:
filtered_qsp["request"]["docloc"] = docloc
if (doctemplate := qsp.get("doctemplate", None)) is not None:
filtered_qsp["request"]["doctemplate"] = doctemplate
if (id := qsp.get("ID", None)) is not None:
filtered_qsp["request"]["ID"] = id
if (IDprefix := qsp.get("IDprefix", None)) is not None:
filtered_qsp["request"]["IDprefix"] = IDprefix
if (lang := qsp.get("lang", None)) is not None:
filtered_qsp["request"]["lang"] = lang
if (output_format := qsp.get("format", None)) is not None:
filtered_qsp["request"]["format"] = output_format
if (convert_meta := qsp.get("meta", None)) is not None:
filtered_qsp["request"]["meta"] = convert_meta
logger.info(f"Threatware called with parameters = '{ filtered_qsp['request'] }'")
# We need this to support localisation of keywords
Translate.init(lang, filtered_qsp)
# Determine output Content-Type
if "format" in filtered_qsp["request"] and filtered_qsp["request"]["format"].lower() in ["json", "yaml"]:
FormatOutput.output_format = filtered_qsp["request"]["format"].lower()
content_type = "application/json"
# We can treat the parameters as static
FormatOutput.request_parameters = filtered_qsp
# Load the texts file with localised error messages
handler_output = FormatOutput({"template-text-file":HANDLER_TEXTS_YAML_PATH})
try:
# Validate input
if action is None:
logger.error("action is a mandatory parameter")
handler_output.setError("action-is-mandatory", {})
content_type, body = handler_output.getContent()
elif action not in [ACTION_CONVERT, ACTION_VERIFY, ACTION_MANAGE_INDEXDATA, ACTION_MANAGE_CREATE, ACTION_MANAGE_SUBMIT, ACTION_MANAGE_CHECK, ACTION_MEASURE]:
logger.error(f"the action parameter must be one of {[ACTION_CONVERT, ACTION_VERIFY, ACTION_MANAGE_INDEXDATA, ACTION_MANAGE_CREATE, ACTION_MANAGE_SUBMIT, ACTION_MANAGE_CHECK, ACTION_MEASURE]}")
handler_output.setError("action-value", {"actions":[ACTION_CONVERT, ACTION_VERIFY, ACTION_MANAGE_INDEXDATA, ACTION_MANAGE_CREATE, ACTION_MANAGE_SUBMIT, ACTION_MANAGE_CHECK, ACTION_MEASURE]})
content_type, body = handler_output.getContent()
elif action in [ACTION_CONVERT, ACTION_VERIFY, ACTION_MANAGE_CREATE, ACTION_MANAGE_SUBMIT, ACTION_MANAGE_CHECK, ACTION_MEASURE] and schemeID is None:
logger.error("scheme is a mandatory parameter")
handler_output.setError("scheme-is-mandatory", {})
content_type, body = handler_output.getContent()
elif action in [ACTION_CONVERT, ACTION_VERIFY, ACTION_MANAGE_CREATE, ACTION_MANAGE_SUBMIT, ACTION_MANAGE_CHECK, ACTION_MEASURE] and docloc is None:
logger.error("docloc is a mandatory parameter")
handler_output.setError("docloc-is-mandatory", {})
content_type, body = handler_output.getContent()
elif action in [ACTION_VERIFY, ACTION_MEASURE] and doctemplate is None:
logger.error(f"doctemplate is a mandatory parameter when action = {action}")
handler_output.setError("doctemplate-is-mandatory", {"action":action})
content_type, body = handler_output.getContent()
elif action in [ACTION_MANAGE_INDEXDATA] and id is None:
logger.error(f"ID is a mandatory parameter when action = {action}")
handler_output.setError("id-is-mandatory", {"action":action})
content_type, body = handler_output.getContent()
elif action in [ACTION_MANAGE_CREATE] and IDprefix is None:
logger.error(f"IDprefix is a mandatory parameter when action = {action}")
handler_output.setError("idprefix-is-mandatory", {"action":action})
content_type, body = handler_output.getContent()
else:
# If we are being called locally then we'll pull credentials locally
if getattr(context, "threatware.cli", False):
# Get creds locally
execution_env = provider.get_provider("cli")
else:
GitStorage.containerised = True
# We are being called as a lambda, so get credentials from cloud
execution_env = provider.get_provider("aws.lambda")
#execution_env = provider.get_provider("cli")
if schemeID is not None:
schemeDict = load_scheme(schemeID)
if action == ACTION_CONVERT:
convert_config = convert.config()
# Convert the TM document
output = convert.convert(convert_config, execution_env, schemeDict, docloc)
content_type, body = output.getContent(lambda : Key.config_serialisation(convert_meta))
elif action == ACTION_VERIFY:
convert_config = convert.config()
# Convert the TM template
convert_output = convert.convert_template(convert_config, execution_env, schemeDict, doctemplate)
content_type, body = convert_output.getContent()
if convert_output.getResult() != OutputType.ERROR:
template_model = convert_output.getDetails()
# Convert the TM document
convert_output = convert.convert(convert_config, execution_env, schemeDict, docloc)
content_type, body = convert_output.getContent()
if convert_output.getResult() != OutputType.ERROR:
doc_model = convert_output.getDetails()
verify_config = verify.config(schemeDict)
# Verify the TM document
verify_output = verify.verify(verify_config, doc_model, template_model)
content_type, body = verify_output.getContent()
if verify_output.getResult() != OutputType.ERROR:
issues = verify_output.getDetails()
# Generate a report on verification issues and analysis
verify_output = verify.report(verify_config, doc_model, issues)
#body = verify_output.tojson()
content_type, body = verify_output.getContent()
elif action == ACTION_MANAGE_INDEXDATA:
manage_config = manage.config()
output = manage.indexdata(manage_config, execution_env, id)
content_type, body = output.getContent()
elif action == ACTION_MANAGE_CREATE:
manage_config = manage.config()
output = manage.create(manage_config, execution_env, IDprefix, schemeID, docloc)
content_type, body = output.getContent()
elif action == ACTION_MANAGE_CHECK:
convert_config = convert.config()
# Convert the TM document
convert_output = convert.convert(convert_config, execution_env, schemeDict, docloc)
content_type, body = convert_output.getContent()
if convert_output.getResult() != OutputType.ERROR:
doc_model = convert_output.getDetails()
manage_config = manage.config()
# 'check' relies on measure
measure_config = measure.config()
output = manage.check(manage_config, execution_env, docloc, schemeID, doc_model, measure_config, measure.distance)
content_type, body = output.getContent()
elif action == ACTION_MANAGE_SUBMIT:
convert_config = convert.config()
# Convert the TM document
convert_output = convert.convert(convert_config, execution_env, schemeDict, docloc)
content_type, body = convert_output.getContent()
if convert_output.getResult() != OutputType.ERROR:
doc_model = convert_output.getDetails()
manage_config = manage.config()
output = manage.submit(manage_config, execution_env, docloc, schemeID, doc_model)
content_type, body = output.getContent()
elif action == ACTION_MEASURE:
convert_config = convert.config()
# Convert the TM template
convert_output = convert.convert_template(convert_config, execution_env, schemeDict, doctemplate)
content_type, body = convert_output.getContent()
if convert_output.getResult() != OutputType.ERROR:
template_model = convert_output.getDetails()
# Convert the TM document
convert_output = convert.convert(convert_config, execution_env, schemeDict, docloc)
content_type, body = convert_output.getContent()
if convert_output.getResult() != OutputType.ERROR:
doc_model = convert_output.getDetails()
measure_config = measure.config()
output = measure.distance_to_template(measure_config, execution_env, doc_model, template_model)
content_type, body = output.getContent()
except Exception as e:
logger.error(e)
if issubclass(type(e), ThreatwareError):
handler_output.setError(e.text_key, e.template_values)
else:
handler_output.setError("internal-error", {})
content_type, body = handler_output.getContent()
# Respond
return {
'statusCode': 200,
"headers": {
"Content-Type": f"{content_type}"
},
'body': body
}
if __name__ == "__main__":
scheme_help = 'Identifier for the threat model scheme (which contains location information)'
doc_help = 'Location identifier of the document'
template_help = 'Identifier for the document template (overrides template in scheme)'
parser = argparse.ArgumentParser(description='Threat Model Verifier')
parser.add_argument("-l", "--lang", required=False, help="Language code for output texts")
parser.add_argument("-f", "--format", required=False, help="Format for output, either JSON or YAML", default="json", choices=['json', 'yaml'])
subparsers = parser.add_subparsers(dest="command")
# convert
parser_convert = subparsers.add_parser("convert", help='Convert a threat model for analysis')
parser_convert.add_argument('-s', '--scheme', required=True, help=scheme_help)
parser_convert.add_argument('-d', '--docloc', required=True, help=doc_help)
parser_convert.add_argument("-m", "--meta", required=False, help="What level of meta data about fields should be returned. Note, 'properties' returns 'tags' as well.", default="tags", choices=['none', 'tags', 'properties'])
# verify
parser_convert = subparsers.add_parser("verify", help='Verify a threat model is ready to be submitted for approval')
parser_convert.add_argument('-s', '--scheme', required=True, help=scheme_help)
parser_convert.add_argument('-d', '--docloc', required=True, help=doc_help)
parser_convert.add_argument('-t', '--doctemplate', help=template_help)
# manage
parser_manage = subparsers.add_parser("manage", help='Manage the status of threat models')
manage_subparsers = parser_manage.add_subparsers(dest="subcommand")
# manage.indexdata
parser_manage_indexdata = manage_subparsers.add_parser("indexdata", help='Get the threat model index metadata for a threat model')
parser_manage_indexdata.add_argument('-id', required=True, help='The document ID for the threat model index metadata to return')
# manage.createdata
parser_manage_create = manage_subparsers.add_parser("create", help='Create a new document ID for a new threat model')
parser_manage_create.add_argument('-idprefix', required=True, help='The prefix of the document ID e.g. "CMP.TMD"')
parser_manage_create.add_argument('-s', '--scheme', required=True, help=scheme_help)
parser_manage_create.add_argument('-d', '--docloc', required=True, help=doc_help)
# manage.check
parser_manage_check = manage_subparsers.add_parser("check", help='Check whether the current threat model requires re-approval')
parser_manage_check.add_argument('-s', '--scheme', required=True, help=scheme_help)
parser_manage_check.add_argument('-d', '--docloc', required=True, help=doc_help)
# manage.submit
parser_manage_submit = manage_subparsers.add_parser("submit", help='Submit a threat model for approval')
parser_manage_submit.add_argument('-s', '--scheme', required=True, help=scheme_help)
parser_manage_submit.add_argument('-d', '--docloc', required=True, help=doc_help)
# measure
parser_measure = subparsers.add_parser("measure", help='Measure the distance of a TM from its template')
parser_measure.add_argument('-s', '--scheme', required=True, help=scheme_help)
parser_measure.add_argument('-d', '--docloc', required=True, help=doc_help)
parser_measure.add_argument('-t', '--doctemplate', help=template_help)
#parser.add_argument('-a', '--action', required=True, help='The action to perform', choices=[ACTION_CONVERT, ACTION_VERIFY, ACTION_MANAGE, ACTION_MEASURE])
#parser.add_argument('-s', '--scheme', required=True, help='Identifier for the template scheme to load')
#parser.add_argument('-d', '--docloc', required=True, help='Identifier for the document to verify')
#parser.add_argument('-t', '--doctemplate', help='Identifier for the document template (overrides template in scheme)')
#parser.add_argument('-v', '--verifiers', nargs='*', help='Space separated list of verifiers to use (overrides verifiers in mapping)')
args = parser.parse_args()
# Build input for handler
event ={}
class Object(object):
pass
context = Object()
setattr(context, "threatware.cli", True)
action = args.command
if action == "manage":
action = action + "." + args.subcommand
event["queryStringParameters"] = {}
event["queryStringParameters"]["lang"] = args.lang if "lang" in args else None
event["queryStringParameters"]["format"] = args.format if "format" in args else None
event["queryStringParameters"]["action"] = action
event["queryStringParameters"]["scheme"] = args.scheme if "scheme" in args else None
event["queryStringParameters"]["docloc"] = args.docloc if "docloc" in args else None
event["queryStringParameters"]["meta"] = args.meta if "meta" in args else None
event["queryStringParameters"]["doctemplate"] = args.doctemplate if "doctemplate" in args else None
event["queryStringParameters"]["ID"] = args.id if "id" in args else None
event["queryStringParameters"]["IDprefix"] = args.idprefix if "idprefix" in args else None
#if args.verifiers:
# event["queryStringParameters"]["verifiers"] = ",".join(args.verifiers)
response | |
from __future__ import absolute_import
from __future__ import unicode_literals
import logging
from functools import reduce
import paramiko
import os
import MySQLdb
import random
from orchestration.database import database_update
from orchestration.moosefs import commands as mfs_commands
from orchestration.network import commands as net_commands
from orchestration.moosefs.commands import Moosefs
from orchestration.network.commands import Net
from docker.errors import APIError
from docker.errors import NotFound
from .config import ConfigurationError
from .config import get_service_name_from_net
from .const import DEFAULT_TIMEOUT
from .const import LABEL_ONE_OFF
from .const import LABEL_PROJECT
from .const import LABEL_SERVICE
from .container import Container
from .legacy import check_for_legacy_containers
from .service import ConvergenceStrategy
from .service import parse_volume_from_spec
from .service import nap_parse_volume_from_spec
from .service import Service
from .service import VolumeFromSpec
from .utils import parallel_execute
import sys
log = logging.getLogger(__name__)
# database_ip = '192.168.56.101'
def sort_service_dicts(services):
# Topological sort (Cormen/Tarjan algorithm).
unmarked = services[:]
temporary_marked = set()
sorted_services = []
def get_service_names(links):
return [link.split(':')[0] for link in links]
def get_service_names_from_volumes_from(volumes_from):
return [
parse_volume_from_spec(volume_from).source
for volume_from in volumes_from
]
def get_service_dependents(service_dict, services):
name = service_dict['name']
return [
service for service in services
if (name in get_service_names(service.get('links', [])) or
name in get_service_names_from_volumes_from(service.get('volumes_from', [])) or
name == get_service_name_from_net(service.get('net')))
]
def visit(n):
if n['name'] in temporary_marked:
if n['name'] in get_service_names(n.get('links', [])):
raise DependencyError('A service can not link to itself: %s' % n['name'])
if n['name'] in n.get('volumes_from', []):
raise DependencyError('A service can not mount itself as volume: %s' % n['name'])
else:
raise DependencyError('Circular import between %s' % ' and '.join(temporary_marked))
if n in unmarked:
temporary_marked.add(n['name'])
for m in get_service_dependents(n, services):
visit(m)
temporary_marked.remove(n['name'])
unmarked.remove(n)
sorted_services.insert(0, n)
while unmarked:
visit(unmarked[-1])
return sorted_services
class Project(object):
"""
A collection of services.
"""
links = []
def __init__(self, name, services, client, use_networking=False, network_driver=None):
self.name = name
self.links = []
self.services = services
self.client = client
self.use_networking = use_networking
self.network_driver = network_driver
self.net = ""
self.volume = ""
def labels(self, one_off=False):
return [
'{0}={1}'.format(LABEL_PROJECT, self.name),
'{0}={1}'.format(LABEL_ONE_OFF, "True" if one_off else "False"),
]
@classmethod
def nap_from_dicts(cls, username, password, name, service_dicts, client_list, use_networking=False, network_driver=None):
"""
Construct a ServiceCollection from a list of dicts representing services.
"""
project = cls(name, [], client_list, use_networking=use_networking, network_driver=network_driver)
project.net = Net(username, password)
project.volume = Moosefs(username, password).volume
if use_networking:
remove_links(service_dicts)
for srv_dict in service_dicts:
if not 'container_name' in srv_dict:
srv_dict['container_name'] = srv_dict['name']
srv_dict['hostname'] = username + '-' + name + '-' + srv_dict['container_name']
for srv_dict in service_dicts:
if 'command' in srv_dict:
command = srv_dict['command']
if "{{" in command:
for s_dict in service_dicts:
before = s_dict['container_name']
after = username + "-" + name + "-" + before
before = "{{" + before + "}}"
command = command.replace(before, after)
srv_dict['command'] = command
for service_dict in sort_service_dicts(service_dicts):
l = project.nap_get_links(service_dict)
log.info('from_dicts service_dict: %s', service_dict)
if len(l):
cls.links.append(l);
index = random.randint(0,1)
cc = client_list[index]
# a = database(username, password)
# service_dict['volumes_from'] = a.get_volume()
# log.info(a.get_volume())
# service_dict['volumes_from'] = database_update.get_volume(username, password)
# service_dict['volumes_from'] = mfs_commands.get_volume(username, password)
service_dict['volumes_from'] = project.volume
print project.volume
log.info(service_dict)
volumes_from = project.nap_get_volumes_from(service_dict, cc)
# net = Net(a.get_net())
# net = project.nap_net(service_dict, username, password)
net = project.net
database_update.create_service(username, password, service_dict['container_name'], index, name)
log.info("===============")
service_dict['name'] = username + "-" + name + "-" + service_dict['name']
service_dict['container_name'] = username + "-" + name + "-" + service_dict['container_name']
if 'ports' in service_dict:
ports = service_dict['ports']
if not '4200' in ports:
ports.append('4200')
service_dict['ports'] = ports
else:
ports = []
ports.append('4200')
service_dict['ports'] = ports
log.info(service_dict)
project.services.append(
Service(
client_list=cc,
project=name,
use_networking=use_networking,
links=[],
net=net,
volumes_from=volumes_from,
**service_dict))
return project
@property
def service_names(self):
return [service.name for service in self.services]
def get_service(self, name):
"""
Retrieve a service by name. Raises NoSuchService
if the named service does not exist.
"""
for service in self.services:
if service.name == name:
return service
raise NoSuchService(name)
def validate_service_names(self, service_names):
"""
Validate that the given list of service names only contains valid
services. Raises NoSuchService if one of the names is invalid.
"""
valid_names = self.service_names
for name in service_names:
if name not in valid_names:
raise NoSuchService(name)
def get_services(self, service_names=None, include_deps=False):
"""
Returns a list of this project's services filtered
by the provided list of names, or all services if service_names is None
or [].
If include_deps is specified, returns a list including the dependencies for
service_names, in order of dependency.
Preserves the original order of self.services where possible,
reordering as needed to resolve dependencies.
Raises NoSuchService if any of the named services do not exist.
"""
if service_names is None or len(service_names) == 0:
return self.get_services(
service_names=self.service_names,
include_deps=include_deps
)
else:
unsorted = [self.get_service(name) for name in service_names]
services = [s for s in self.services if s in unsorted]
if include_deps:
services = reduce(self._inject_deps, services, [])
uniques = []
[uniques.append(s) for s in services if s not in uniques]
return uniques
#return a->b like [(a,b,link1),(a,c,link2)]
def nap_get_links(self, service_dict):
links = []
if 'links' in service_dict:
for link in service_dict.get('links', []):
if ':' in link:
service_name, link_name = link.split(':', 1)
# log.info("get_links- service name: " + service_name)
# log.info("get_links- link name: " + link_name)
else:
service_name, link_name = link, None
try:
# log.info('get_links- %s->%s, %s', service_dict.get('name'), service_name, link_name)
links.append((service_dict.get('name'), service_name, link_name))
except NoSuchService:
raise ConfigurationError(
'Service "%s" has a link to service "%s" which does not '
'exist.' % (service_dict['name'], service_name))
del service_dict['links']
#log.info('len: %d',len(links))
return links
def nap_get_volumes_from(self, service_dict, client):
volumes_from = []
if 'volumes_from' in service_dict:
for volume_from_config in service_dict.get('volumes_from', []):
volume_from_spec = parse_volume_from_spec(volume_from_config)
# Get service
try:
service_name = self.get_service(volume_from_spec.source)
volume_from_spec = VolumeFromSpec(service_name, volume_from_spec.mode)
except NoSuchService:
try:
container_name = Container.from_id(client, volume_from_spec.source)
volume_from_spec = VolumeFromSpec(container_name, volume_from_spec.mode)
except APIError:
raise ConfigurationError(
'Service "%s" mounts volumes from "%s", which is '
'not the name of a service or container.' % (
service_dict['name'],
volume_from_spec.source))
volumes_from.append(volume_from_spec)
del service_dict['volumes_from']
# volume_from_spec = nap_parse_volume_from_spec()
# volumes_from.append(volume_from_spec)
return volumes_from
def start(self, service_names=None, **options):
for service in self.get_services(service_names):
service.start(**options)
def stop(self, service_names=None, **options):
parallel_execute(
objects=self.containers(service_names),
obj_callable=lambda c: c.stop(**options),
msg_index=lambda c: c.name,
msg="Stopping"
)
def pause(self, service_names=None, **options):
for service in reversed(self.get_services(service_names)):
service.pause(**options)
def unpause(self, service_names=None, **options):
for service in self.get_services(service_names):
service.unpause(**options)
def kill(self, service_names=None, **options):
parallel_execute(
objects=self.containers(service_names),
obj_callable=lambda c: c.kill(**options),
msg_index=lambda c: c.name,
msg="Killing"
)
def remove_stopped(self, service_names=None, **options):
all_containers = self.containers(service_names, stopped=True)
stopped_containers = [c for c in all_containers if not c.is_running]
parallel_execute(
objects=stopped_containers,
obj_callable=lambda c: c.remove(**options),
msg_index=lambda c: c.name,
msg="Removing"
)
def restart(self, service_names=None, **options):
for service in self.get_services(service_names):
service.restart(**options)
def build(self, service_names=None, no_cache=False, pull=False):
for service in self.get_services(service_names):
if service.can_be_built():
service.build(no_cache, pull)
else:
log.info('%s uses an image, skipping' % service.name)
def up(self,
service_names=None,
start_deps=True,
strategy=ConvergenceStrategy.changed,
do_build=True,
timeout=DEFAULT_TIMEOUT,
detached=False):
services = self.get_services(service_names, include_deps=start_deps)
for service in services:
service.remove_duplicate_containers()
plans = self._get_convergence_plans(services, strategy)
if self.use_networking:
self.ensure_network_exists()
list = [
container
for service in services
for container in service.execute_convergence_plan(
plans[service.name],
do_build=do_build,
timeout=timeout,
detached=detached
)
]
log.info("========")
for item in list:
tt = item.client.exec_create(container=item.name, cmd='shellinaboxd -t -b')
item.client.exec_start(exec_id=tt, detach=True)
# tt = item.client.exec_create(container=item.name, cmd='/bin/bash -c \"echo \\\"%s %s\\\" >> /etc/hosts\"' % (item.ip, item.name))
# item.client.exec_start(exec_id=tt, detach=True)
# command = 'useradd admin && echo -e "admin\nadmin" | passwd <PASSWORD>'
# tttt = item.client.exec_create(container=item.name, cmd='touch /useradd', stdout=True)
# item.client.exec_start(exec_id=tttt, detach=True)
# ttt = item.client.exec_create(container=item.name, cmd="/bin/bash -c \"echo \\\"hello\\\"\\\" > /hello\"")
# ttt = item.client.exec_create(container=item.name, cmd='/bin/bash -c "echo \\"useradd admin && echo -e \\\"admin\\\\nadmin\\\" | passwd admin\\" > /useradd"')
# useradd admin && echo -e adminnadmin | passwd <PASSWORD>
ttt = item.client.exec_create(container=item.name, cmd='/bin/bash -c \"useradd admin && echo -e \\\"admin\\\\nadmin\\\" | passwd admin\"')
item.client.exec_start(exec_id=ttt, detach=True, stream=True, tty=True)
# for ll in Project.links:
# for link in ll:
# a = link[0]
# b = link[1]
# cl = link[2]
#
# for c in list:
# if c.name == a:
# ca = c
# if c.name == b:
# cb = c
#
# hostlist = ca.client.base_url.split(':')
# hostname = hostlist[1].split('//')[1]
# # log.info('up- hostname: ' + hostname)
# username = 'root'
# paramiko.util.log_to_file('syslogin.log')
#
# client = paramiko.SSHClient()
# client.load_system_host_keys()
# private_key = os.path.expanduser('/home/monkey/.ssh/id_rsa')
# key = paramiko.RSAKey.from_private_key_file(private_key)
# client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
#
# client.connect(hostname=hostname, username=username, pkey=key)
# client.exec_command('echo "' + cb.ip + ' ' + cb.name + '" >> /var/lib/docker/containers/' + ca.id + '/hosts')
# client.exec_command('echo "' + cb.ip + ' ' + cl + '" >> /var/lib/docker/containers/' + ca.id + '/hosts')
# for c in list:
# log.info('up- container: %s', c)
# log.info('up- container name: ' + c.name)
# log.info('up- container id: ' + c.id)
# log.info('up- container ip: ' + c.ip)
# #client.exec_command('echo "' + c.ip + ' ' + c.name + '" >> /var/lib/docker/containers/' + list[0].id + '/hosts')
| |
# ===========================
# Imports required libraries
# ===========================
import pygame
import random
from sys import exit
# =============================================
# Sets up window - might move to display class
# =============================================
print("Setting up window")
pygame.init()
screen = pygame.display.set_mode((375, 500))
pygame.display.set_caption("Games Design Project")
pygame.display.flip()
print("Window is setup")
# =============================================
# Class for handling control inputs - not done
# =============================================
class Controls:
# Gets inputted key for processing - mainly menu inputs
def menuButton(selectedChoice):
print("Menu Button validation starting")
if Menu.currentMenu != 1:
print("Player is not in game")
if selectedChoice[pygame.K_1]:
print("Button 1 pressed")
if Menu.currentMenu == 5 or Menu.currentMenu == 2 or Menu.currentMenu == 3:
print("Player is in die or help screen, changing to main menu")
Menu.displayMainMenu()
elif Menu.currentMenu == 0:
print("Player is in menu screen, changing to game menu")
Menu.displayGameMenu()
else:
print("Did not expect key input: 1")
elif Menu.currentMenu == 0:
if selectedChoice[pygame.K_2]:
print("Detected Help Menu Press")
Menu.displayHelpMenu()
elif selectedChoice[pygame.K_4]:
pygame.quit()
else:
print("Unexpected input")
# processes inputs primarily used in the game
def gameControls(keyInputs):
print("Game button validation starting")
if Menu.currentMenu == 1:
print("Player is in game")
if keyInputs[pygame.K_a]:
# noinspection PyTypeChecker
Menu.updateGameMenu(Characters.playerPos - 10)
elif keyInputs[pygame.K_d]:
# noinspection PyTypeChecker
Menu.updateGameMenu(Characters.playerPos + 10)
else:
print("Game is currently not being played, ignoring inputs.")
# =================================================
# Class for anything display related - Basics done
# =================================================
class Display:
# Updates display with new assets
def update():
pygame.display.update()
# Adds a new text object
def displayText(textStr, font, size, colour, location):
font = pygame.font.Font(font, size)
textObject = font.render(textStr, True, colour)
screen.blit(textObject, location)
# Changes the background image
def setBackground(image):
backgroundImage = pygame.image.load(image)
screen.blit(backgroundImage, (0, 0))
# Makes the screen blank
def createBlank():
screen.fill(pygame.Color("black"))
Display.setBackground("files/background.png")
# ==========================================================
# Laser class - for displaying and processing hit detection
# ==========================================================
class Lasers:
enemyWidth = 50
laserWidth = 3
laserLength = 37
laserPos = {}
class Laser(pygame.sprite.Sprite):
# Creates a new laser object
def __init__(self, x, y):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.image.load("files/laser.png").convert_alpha()
self.mask = pygame.mask.from_surface(self.image)
self.rect = self.image.get_rect(center=(x, y))
# Moves the laser sprite
def moveBy(self, y):
Characters.playerPos = Characters.playerPos + y
self.rect.move_ip(0, y)
return self.rect.y
# Displays the laser on the screen
def displayLaser():
toRemove = None
for key, value in Lasers.laserPos.items():
loc = value.moveBy(10)
if loc >= 500:
toRemove = key
if toRemove is not None:
Lasers.laserPos.pop(toRemove)
# Makes the lasers move
def processAutoMovement():
for item, values in Lasers.laserPos.items():
newPos = {"x": values["x"], "y": values["y"] + 10}
Lasers.laserPos[item] = newPos
# Creates a new laser sprite
def new(x: int, y: int):
randomThing = random.random()
Lasers.laserPos[str(randomThing)] = Lasers.Laser(x, y)
sprites.add(Lasers.laserPos[str(randomThing)])
laserCollisions.add(Lasers.laserPos[str(randomThing)])
# ================================================
# Enemy class - for moving enemies and displaying
# ================================================
class Enemies:
# Stores the positions of the enemies
enemyPos = {}
# Stores the enemies that are currently going off the screen
enemyMoving = []
# Loads the enemy image into memory for use
enemy = pygame.image.load("files/enemy.png")
# created a new enemy
def new(x: int, y: int, num: int):
Enemies.enemyPos[str(num)] = {"x": x, "y": y, "WaitTime": Game.getCurrentTime() + 10}
# Removes the enemy
def remove(enemy):
Enemies.enemyPos.pop(enemy)
# Moves the enemies position
def move(x: int, y: int, enemy, timeLeft):
Enemies.enemyPos[str(enemy)] = {"x": x, "y": y, "WaitTime": timeLeft}
# Displays all the enemies on the display
def displayAll():
for key, value in Enemies.enemyPos.items():
screen.blit(Enemies.enemy, (value["x"], value["y"]))
# Decides if the enemy should fire a laser
def CalculateIfShouldFire():
for key, value in Enemies.enemyPos.items():
found = False
for items in Enemies.enemyMoving:
if key == items:
found = True
if not found:
randomChoice = random.randrange(1, 30)
if randomChoice == 7:
Lasers.new(value["x"] + (Lasers.enemyWidth / 2), value["y"] + (65 - Lasers.laserLength))
# Decides if the enemy should move or not
def calculateMovements():
if len(Enemies.enemyPos) > 0:
if random.randrange(0, 50) == 5:
if len(Enemies.enemyPos) == 1:
enemyToMove = 1
else:
enemyToMove = random.randrange(1, len(Enemies.enemyPos))
if not str(enemyToMove) in Enemies.enemyMoving:
for item, values in Enemies.enemyPos.items():
if item == str(enemyToMove):
if values["WaitTime"] < Game.getCurrentTime():
Enemies.enemyMoving.append(str(enemyToMove))
# Checks if the enemy will overlap with any other enemies already spawned in
def checkIfInArea(x: int):
if len(Enemies.enemyPos) > 0:
for key, value in Enemies.enemyPos.items():
firstCheck = value["x"] - 50
secondCheck = value["x"] + 50
if x > firstCheck:
if x < secondCheck:
return False
return True
else:
print("failsafe")
return True
# Decides if it should spawn a new enemy and picks a suitable position.
def processPossibleNewEnemies():
if len(Enemies.enemyPos) < 3:
if random.randrange(0, 30, 1) == 5:
if "1" in Enemies.enemyPos:
if "2" in Enemies.enemyPos:
randomer3 = random.randrange(0, 325)
if Enemies.checkIfInArea(randomer3):
Enemies.new(randomer3, 20, 3)
else:
print("Overlaps, skipping...")
else:
randomer2 = random.randrange(0, 325)
if Enemies.checkIfInArea(randomer2):
Enemies.new(randomer2, 20, 2)
else:
print("Overlaps, skipping...")
else:
randomer1 = random.randrange(0, 325)
if Enemies.checkIfInArea(randomer1):
Enemies.new(randomer1, 20, 1)
else:
print("Overlaps, skipping...")
# Moves the enemy off the screen if found in the array enemyMoving
def processAutoMovement():
toRemove = None
for item in Enemies.enemyMoving:
for key, value in Enemies.enemyPos.items():
if key == str(item):
if value["y"] >= 500:
Enemies.enemyMoving.remove(item)
toRemove = key
else:
Enemies.move(value["x"], value["y"] + 10, item, value["WaitTime"])
if toRemove is not None:
Enemies.enemyPos.pop(toRemove)
# ================================================================
# Collisions class - for detecting any collisions between sprites
# ================================================================
class Collisions:
# Detects if the player collided with any lasers
def detectHit():
if (pygame.sprite.spritecollide(player, laserCollisions, False, collided=pygame.sprite.collide_mask)):
Menu.displayDeadScreen()
# ======================================================
# Characters class - for basic movement and displaying
# ======================================================
class Characters(pygame.sprite.Sprite):
playerPos = 150
playerHeight = 0
playerWidth = 0
playerY = 425
# Creates a new player
def __init__(self):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.image.load("files/player.png").convert_alpha()
self.mask = pygame.mask.from_surface(self.image)
self.rect = self.image.get_rect(center=(Characters.playerPos, Characters.playerY))
# Legacy function - designed to move the players x position
def move(self, x):
newX = x - Characters.playerPos
player.moveBy(newX)
# Moves the players x position
def moveBy(self, x):
Characters.playerPos = Characters.playerPos + x
self.rect.move_ip(x, 0)
# ====================================================
# Class for changing menus - Only main menu completed
# ====================================================
class Menu:
# Variable for keeping track of the current screen displayed
# 0 = Main menu
# 1 = Game is being played
# 2 = Help menu
# 5 = game end screen
currentMenu = None
# Sets up text objects for the main menu
def displayMainMenu():
Menu.currentMenu = 0
Display.createBlank()
Display.displayText("Space Avoider", "files/BebasNeue-Regular.ttf", 80, (0, 0, 0), (5, 70))
Display.displayText("(1) Play", "files/BebasNeue-Regular.ttf", 50, (0, 0, 0), (80, 200))
Display.displayText("(2) How To Play", "files/BebasNeue-Regular.ttf", 50, (0, 0, 0), (80, 270))
Display.displayText("(4) Quit", "files/BebasNeue-Regular.ttf", 50, (0, 0, 0), (80, 410))
Display.update()
# Sets up the text objects for the help menu
def displayHelpMenu():
Menu.currentMenu = 2
Display.createBlank()
Display.displayText("How To Play", "files/BebasNeue-Regular.ttf", 80, (0, 0, 0), (30, 70))
Display.displayText("The aim of the game is to dodge bullets",
"files/BebasNeue-Regular.ttf", 25, (0, 0, 0), (5, 190))
Display.displayText("from enemy ships for as long as possible.",
"files/BebasNeue-Regular.ttf", 25, (0, 0, 0), (5, 215))
Display.displayText("When you get hit, you lose the game.",
"files/BebasNeue-Regular.ttf", 25, (0, 0, 0), (5, 240))
Display.displayText("Move Left: A", "files/BebasNeue-Regular.ttf", 40, (0, 0, 0), (80, 270))
Display.displayText("Move Right: D", "files/BebasNeue-Regular.ttf", 40, (0, 0, 0), (80, 310))
# Display.displayText("Pause: P", "files/BebasNeue-Regular.ttf", 40, (0, 0, 0), (80, 350))
Display.displayText("(1) Main Menu", "files/BebasNeue-Regular.ttf", 50, (0, 0, 0), (80, 420))
Display.update()
# sets up the game to display the game menu
def displayGameMenu():
Menu.currentMenu = 1
Display.createBlank()
player.move(150)
Game.setupTimer()
Display.displayText("Time: " + str(Game.getCurrentTime()), "files/BebasNeue-Regular.ttf", 40, (0, 0, 0), (0, 0))
Display.update()
# Triggers the respective functions related to the logic of the game
def updateGameMenu(pos):
Display.createBlank()
player.move(pos)
sprites.update()
Lasers.displayLaser()
sprites.draw(screen)
Enemies.displayAll()
Enemies.calculateMovements()
Enemies.processAutoMovement()
Enemies.CalculateIfShouldFire()
Enemies.processPossibleNewEnemies()
Display.displayText("Time: " + str(Game.getCurrentTime()), "files/BebasNeue-Regular.ttf", 40, (0, 0, 0), (0, 0))
pygame.display.flip()
Collisions.detectHit()
# Displays the death screen
def displayDeadScreen():
Menu.currentMenu = 5
Display.createBlank()
Display.displayText("Time survived:" + str(Game.getCurrentTime()), "files/BebasNeue-Regular.ttf", 40, (0, 0, 0),
(0, 40))
Display.displayText("seconds.", "files/BebasNeue-Regular.ttf", 40, (0, 0, 0), (0, 80))
Display.update()
# =========================
# Class for playing sounds
# =========================
class Sounds:
musicSetup = False
# Initialises the sound element of pygame
def prepareMixer():
pygame.mixer.init()
# Loads the music into memory
def SetupMusic():
pygame.mixer.music.load("files/backgroundMusic.ogg")
Sounds.musicSetup = True
# Plays the music so its constantly repeating
def PlayMusic():
if Sounds.musicSetup:
pygame.mixer.music.play(-1)
# Stops the music
def StopMusic():
if Sounds.musicSetup:
pygame.mixer.music.stop()
# =================================
# Class related to game operations
# =================================
class Game:
startTime = 0
wave = 0
# Sets the start time for the timer
def setupTimer():
Game.startTime = pygame.time.get_ticks()
# Calculates | |
# -*- coding: utf-8 -*-
from __future__ import (unicode_literals, absolute_import)
from julia import node
EQUIPMENT = {
'0': 'None',
'1': 'M4 Super90',
'2': 'Nova Pump',
'3': 'Shotgun',
'4': 'Less Lethal Shotgun',
'5': 'Pepper-ball',
'6': 'Colt M4A1 Carbine',
'7': 'AK-47 Machinegun',
'8': 'GB36s Assault Rifle',
'9': 'Gal Sub-machinegun',
'10': '9mm SMG',
'11': 'Suppressed 9mm SMG',
'12': '.45 SMG',
'13': 'M1911 Handgun',
'14': '9mm Handgun',
'15': 'Colt Python',
'16': 'Taser Stun Gun',
'17': 'VIP Colt M1911 Handgun',
'18': 'CS Gas',
'19': 'Light Armor',
'20': 'Heavy Armor',
'21': 'Gas Mask',
'22': 'Helmet',
'23': 'Flashbang',
'24': 'CS Gas',
'25': 'Stinger',
'26': 'Pepper Spray',
'27': 'Optiwand',
'28': 'Toolkit',
'29': 'Door Wedge',
'30': 'C2 (x3)',
'31': 'The Detonator',
'32': 'Zip-cuffs',
'33': 'IAmCuffed',
'34': 'Colt Accurized Rifle',
'35': '40mm Grenade Launcher',
'36': '5.56mm Light Machine Gun',
'37': '5.7x28mm Submachine Gun',
'38': 'Mark 19 Semi-Automatic Pistol',
'39': '9mm Machine Pistol',
'40': 'Cobra Stun Gun',
'41': 'Ammo Pouch',
'42': 'No Armor',
'43': 'Night Vision Goggles',
'44': 'Stinger',
'45': 'CS Gas',
'46': 'Flashbang',
'47': 'Baton',
}
AMMO = {
'0': 'None',
'1': 'M4Super90SGAmmo',
'2': 'M4Super90SGSabotAmmo',
'3': 'NovaPumpSGAmmo',
'4': 'NovaPumpSGSabotAmmo',
'5': 'LessLethalAmmo',
'6': 'CSBallLauncherAmmo',
'7': 'M4A1MG_JHP',
'8': 'M4A1MG_FMJ',
'9': 'AK47MG_FMJ',
'10': 'AK47MG_JHP',
'11': 'G36kMG_FMJ',
'12': 'G36kMG_JHP',
'13': 'UZISMG_FMJ',
'14': 'UZISMG_JHP',
'15': 'MP5SMG_JHP',
'16': 'MP5SMG_FMJ',
'17': 'UMP45SMG_FMJ',
'18': 'UMP45SMG_JHP',
'19': 'ColtM1911HG_JHP',
'20': 'ColtM1911HG_FMJ',
'21': 'Glock9mmHG_JHP',
'22': 'Glock9mmHG_FMJ',
'23': 'PythonRevolverHG_FMJ',
'24': 'PythonRevolverHG_JHP',
'25': 'TaserAmmo',
'26': 'VIPPistolAmmo_FMJ',
'27': 'ColtAR_FMJ',
'28': 'HK69GL_StingerGrenadeAmmo',
'29': 'HK69GL_FlashbangGrenadeAmmo',
'30': 'HK69GL_CSGasGrenadeAmmo',
'31': 'HK69GL_TripleBatonAmmo',
'32': 'SAWMG_JHP',
'33': 'SAWMG_FMJ',
'34': 'FNP90SMG_FMJ',
'35': 'FNP90SMG_JHP',
'36': 'DEHG_FMJ',
'37': 'DEHG_JHP',
'38': 'TEC9SMG_FMJ',
}
STREAM_PATTERN = {
# Unique identifier for this particular data set
'0': {
'type': node.StringPatternNode,
'name': 'tag',
'required': True,
},
# Mod version
'1': {
'type': node.StringPatternNode,
'name': 'version',
'required': True,
},
# Join port number
'2': {
'type': node.NumericPatternNode,
'name': 'port',
'required': True,
},
# Server time in the format of Unix Timestamp
# The server declares itself to be in UTC timezone, which makes this value untrustworthy
# On the other hand this is an excellent argument value for hashing
'3': {
'type': node.NumericPatternNode,
'name': 'timestamp',
'required': True,
},
# Last 32 bits of an md5 encoded request signature hash
# The original hash is a product of the following parameters:
# `server key` + `join port` + `timestamp`
'4': {
'type': node.StringPatternNode,
'name': 'hash',
'required': True,
},
# Game title
'5': {
'type': node.MappingPatternNode,
'name': 'gamename',
'required': False,
'default': '0',
'table': {
'0': 'SWAT 4',
'1': 'SWAT 4X',
'2': 'SAS',
'3': 'SSF Realism Mod',
'4': 'H.S.M.E',
}
},
# Game version
'6': {
'type': node.StringPatternNode,
'name': 'gamever',
'required': True,
},
# Hostname
'7': {
'type': node.StringPatternNode,
'name': 'hostname',
'required': True,
},
# Gametype
'8': {
'type': node.MappingPatternNode,
'name': 'gametype',
'required': False,
'default': '0',
'table': {
'0': 'Barricaded Suspects',
'1': 'VIP Escort',
'2': 'Rapid Deployment',
'3': 'CO-OP',
'4': 'Smash And Grab',
#'5': 'CO-OP QMM',
}
},
# Map
'9': {
'type': node.MappingPatternNode,
'name': 'mapname',
'required': False,
'default': '0',
'table': {
'-1': 'Unknown Map',
'0': 'A-Bomb Nightclub',
'1': 'Brewer County Courthouse',
'2': 'Children of Taronne Tenement',
'3': 'DuPlessis Diamond Center',
'4': 'Enverstar Power Plant',
'5': 'Fairfax Residence',
'6': 'Food Wall Restaurant',
'7': 'Meat Barn Restaurant',
'8': 'Mt. Threshold Research Center',
'9': 'Northside Vending',
'10': 'Old Granite Hotel',
'11': 'Qwik Fuel Convenience Store',
'12': 'Red Library Offices',
'13': 'Riverside Training Facility',
'14': 'St. Michael\'s Medical Center',
'15': 'The Wolcott Projects',
'16': 'Victory Imports Auto Center',
'17': '-EXP- Department of Agriculture',
'18': '-EXP- Drug Lab',
'19': '-EXP- Fresnal St. Station',
'20': '-EXP- FunTime Amusements',
'21': '-EXP- Sellers Street Auditorium',
'22': '-EXP- Sisters of Mercy Hostel',
'23': '-EXP- Stetchkov Warehouse',
# Custom maps (Thanks to ||ESA||RIddIK for all the hard work getting this list done)
'24': 'Untitled', # common name for all the "Unitled" maps
'25': 'Fairfaxe Reloaded', # SP-FAIRFAX-Reloaded.s4m
'26': 'Final Crack Down, COOP', # FinalCrackDown_COOP.s4m
'27': 'ApartmentNew', # SP-ApartmentNew.s4m
'28': 'Saint-Paul Asylum', # MP-Asylum.s4m, SP-St_Paul_Asylum_v1_0.s4m
'29': '[c=ffff00]ESA\'s [c=1e90ff]Foodwall Edit', # MP-ESA_FoodWall.s4m
'30': 'La Mina v.1.5', # MP-LA_MINA_15.s4m
'31': 'Operation Apollo COOP 1.1 - FIX', # SP-Apollo_COOP-FIX.s4m
'32': 'Cave Complex', # SP-CaveComplex.s4m
'33': 'Predator2', # SP-SWAT4Predator2.s4m
'34': '{EP}Matt´s Power Plant TSS', # SP-EPpower-TSS.s4m
'35': 'Qwik Fuel (Desrat\'s SAG)', # MP-ConvenienceStore-smash2.s4m
'36': 'Black Water-TTC 1.1', # MP-BlackWater.s4m, SP-BlackWater.s4m
'37': 'The Watercrip', # The_Watercrip.s4m
'38': '2940 Enemy Territory MP', # MP-2940_Enemy_Territory.s4m
'39': 'Newfort (Revision 24) TSS', # SP-Newfort24-TSS.s4m
'40': '-EXP- Drug Lab-RMX', # SP-DrugLab-RMX.s4m
'41': 'Riverside Training (Desrat\'s SAG)', # MP-Training-smash2.s4m
'42': 'The Building', # SP-TheBuilding.s4m
'43': 'Newfort (Revision 24)', # SP-Newfort24.s4m
'44': 'Wolcott (Desrat\'s SAG)', # MP-ArmsDeal-smash2.s4m
'45': 'Operation Apollo 1.1 - FIXED', # MP-Apollo-FIX.s4m
'46': 'Office Space V2.0', # SP-OfficeSpacev2.s4m, MP-OfficeSpacev2.s4m
'47': 'panic room', # SP-Panic-Room.s4m
'48': 'mistero18-byilmassacratore', # Mistero18.s4m
'49': 'The Phoenix Club', # SP-PhoenixClub.s4m, MP-PhoenixClub.s4m
'50': 'The Hive (VIP)', # MP-Hive.s4m
'51': 'U-273', # MP-U273.s4m, SP-U273.s4m
'52': 'The Manor - 1.1 - 2013', # Sp-TheManor.s4m
'53': '-EXP- Newfort (Revision 27)', # SP-Newfort27EXP.s4m
'54': 'City Streets 1.0', # MP-CityStreets.s4m
'55': 'LA City Hall', # MP-City_Hall_VIP.s4m, MP-City_Hall.s4m, SP-City_Hall.s4m
'56': '-MODv- California Security Bank - FIXED', # MP-Bank-FIX.s4m, SP-Bank-FIX.s4m
'57': 'Car\'s dealer v1.2', # SP-CARsDEALER.s4m
'58': 'Mout McKenna 1.0', # MP-MoutMckenna.s4m, SP-MoutMckenna.s4m
'59': 'Desert ops -Village- 1.0', # SP-DesertOpsVillage.s4m, MP-DesertOpsVillage.s4m
'60': 'INTERVAL - 17 - Rmx', # SP-INTERVAL-17-rmx.s4m
'61': 'Ashes and Ghosts -Night-', # MP-Ashes_And_Ghosts_Night.s4m, SP-Ashes_And_Ghosts_Night.s4m
'62': 'Penthouse', # SP-Penthouse.s4m
'63': 'Civil Unrest', # MP-Civil_Unrest.s4m, SP-Civil_Unrest.s4m
'64': 'Storm Front', # SP-StormFront.s4m
'65': 'Johnson Residence', # SP-JohnsonResidence.s4m
'66': 'Operation Prison Break', # SP-Prison.s4m, MP-Prison.s4m
'67': 'C-Block', # MP-CBlock.s4m, MP-CBlock1_1.s4m
'68': 'The Hive 1.1', # SP-Hive1_1.s4m
'69': 'BattleShips', # BattleShips.s4m
'70': 'Children of Taronne (Desrat\'s SAG)', # MP-Tenement-smash2.s4m
'71': 'Fast Break - Through', # MP-FastBreak-Through.s4m
'72': 'A-Bomb (Desrat\'s SAG)', # MP-ABomb-smash2.s4m
'73': 'Ashes and Ghosts -Day-', # SP-Ashes_And_Ghosts_Day.s4m
'74': 'ESA\'s 3or1', # MP-ESA-3or1.s4m
'75': 'MP-Terminal', # MP-Terminal.s4m
'76': 'The Entrepot', # SP-Entrepot.s4m
'77': 'E.T.E.R. Training Center', # MP-Eter_trainingcenter.s4m
'78': 'Subway Station v1.0', # MP-Sub.s4m, SP-Sub.s4m
'79': 'Stuck in the Woods', # SP-StuckInTheWoods.s4m
'80': '-EXP- Sisters of Mercy-RMX', # SP-SistersofMercy-RMX.s4m
'81': 'Research Center (Desrat\'s SAG)', # MP-DNA-smash2.s4m
'82': 'Brewer County (Desrat\'s SAG)', # MP-Courthouse-smash2.s4m
'83': 'Stuck in the woods', # MP-StuckInTheWoods.s4m
'84': '{EP}Matt´s Drugs Deal TSS', # SP-EPdrugsdeal-TSS.s4m
'85': 'Snake\'s loft', # SP-Snake-loft.s4m
'86': 'NewfortBeta', # MP-NewfortBetaV2.s4m
'87': 'BLUES CLUB', # MP-BCv1.s4m
'88': 'Fairfax Residence (Desrat\'s SAG)', # MP-FairfaxResidence-smash2.s4m
'89': 'Construction', # SP-Construction.s4m, MP-Construction.s4m
'90': 'Sky Tower', # SP-SkyTower.s4m
'91': 'Food Wall (Desrat\'s SAG)', # MP-Foodwall-smash2.s4m
'92': 'California Security Bank', # SP-Bank.s4m
'93': 'Dark Waters', # MP-DarkWaters.s4m
'94': 'Operation Apollo COOP 1.1', # SP-Apollo_COOP.s4m
'95': 'FAYA\'s REFUGEES v1.0', # SP-FAYAsREFUGEES.s4m
'96': 'Victory Imports (Desrat\'s SAG)', # MP-AutoGarage-smash2.s4m
'97': 'Residential Ops.', # SP-ResidentialOps.s4m
'98': '2940 Enemy Territory', # SP-2940_Enemy_Territory.s4m
'99': 'Clear - Room Service', # MP-Clear.s4m
'100': 'Tantive IV', # MP-TantiveIV.s4m
'101': 'Red Library (Desrat\'s SAG)', # MP-RedLibrary-smash2.s4m
'102': 'Dark Scarlet Restaurant', # SP-Dark_Scarlet.s4m
'103': 'LA MINA', # MP-LA_MINA.s4m
'104': 'Precinct HQ 1.1', # SP-PrecinctHQ.s4m, MP-PrecinctHQ.s4m
'105': 'Novatech\'s Building', # SP-NOVATECHsBUILDING.s4m
'106': 'Mout McKenna Snow 1.0', # MP-MoutMckennaSnow.s4m, SP-MoutMckennaSnow.s4m
'107': '(SEALMAP)Desert_Dust', # MP-Desert_Dust.s4m, SP-Desert_Dust.s4m, MP2-Desert_Dust.s4m
'108': 'Mogadishu Mile 1.0', # MP-DesertOps2.s4m
'109': 'ATL Convention Center', # MP-ATLConvention.s4m
'110': 'Gangster_Hangout', # MP-GangsterHangout.s4m
'111': '(SEALMAP)Renovate TSS', # SP-Renovate-TSS.s4m
'112': 'Brentwood Reloaded', # SP-BrentReloaded.s4m, MP-BrentReloaded.s4m
'113': 'Operation Apollo 1.1', # MP-Apollo.s4m, SP-Apollo.s4m
'114': 'The China Hotel', # SP-CHINA-HOTEL.s4m
'115': 'Mad Shopping', # SP-MadShopping.s4m, MP-MadShopping.s4m
'116': '(SEALMAP)School', # SP-School.s4m, MP-School.s4m
'117': 'Diamond Center (Desrat\'s SAG)', # MP-JewelryHeist-smash2.s4m
'118': 'Newfort2xSus', # SP-Newfort100Sus.s4m
'119': 'Ocean Avenue 112', # MP-Amityville_Horror_VIP.s4m, SP-Amityville_Horror.s4m, MP-Amityville_Horror.s4m
'120': '|ustt| Enemy Territory V2', # MP-USTT_Enemy_Territory2.s4m
'121': 'Project -SERO- 1.0', # MP-ProjectSero.s4m, SP-ProjectSero.s4m
'122': 'C-Block Taronne is back', # SP-CBlock.s4m
'123': 'Reality Simulation Logistic V1.0', # MP-Spedition.s4m
'124': 'Power Plant (Desrat\'s SAG)', # MP-PowerPlant-smash2.s4m
'125': '5455, Carlton Way', # SP-Getts.s4m, MP-Getts.s4m
'126': 'Assault On City Hall', # SP-CityHall.s4m, MP-CityHall.s4m
'127': 'Fy_Iceworld2005', # MP_Fy_iceworld2005.s4m
'128': 'Art Center 1.0', # SP-ArtCenter.s4m, MP-ArtCenter.s4m
'129': 'Wainwright Offices', # SP-Wainwright_Offices.s4m, MP-Wainwright_Offices.s4m
'130': 'Children of Tenement-RMX', # SP-Tenement-RMX.s4m
'131': 'Police Station 1.0 - 2013', # SP-PoliceStation.s4m
'132': 'Hotel Carlyle 2005 v.2.0', # SP-Carlyle2k5v2-0.s4m
'133': 'The | |
49, 'R', 'F') : 'RF2',
(131, 49, 'R', 'U') : 'UR0',
(131, 49, 'U', 'B') : 'UB2',
(131, 49, 'U', 'F') : 'UF2',
(131, 49, 'U', 'L') : 'UL2',
(131, 49, 'U', 'R') : 'UR2',
(135, 97, 'B', 'D') : 'DB2',
(135, 97, 'B', 'L') : 'LB2',
(135, 97, 'B', 'R') : 'RB2',
(135, 97, 'B', 'U') : 'UB2',
(135, 97, 'D', 'B') : 'DB0',
(135, 97, 'D', 'F') : 'DF0',
(135, 97, 'D', 'L') : 'DL0',
(135, 97, 'D', 'R') : 'DR0',
(135, 97, 'F', 'D') : 'DF2',
(135, 97, 'F', 'L') : 'LF2',
(135, 97, 'F', 'R') : 'RF2',
(135, 97, 'F', 'U') : 'UF2',
(135, 97, 'L', 'B') : 'LB0',
(135, 97, 'L', 'D') : 'DL2',
(135, 97, 'L', 'F') : 'LF0',
(135, 97, 'L', 'U') : 'UL2',
(135, 97, 'R', 'B') : 'RB0',
(135, 97, 'R', 'D') : 'DR2',
(135, 97, 'R', 'F') : 'RF0',
(135, 97, 'R', 'U') : 'UR2',
(135, 97, 'U', 'B') : 'UB0',
(135, 97, 'U', 'F') : 'UF0',
(135, 97, 'U', 'L') : 'UL0',
(135, 97, 'U', 'R') : 'UR0',
(136, 48, 'B', 'D') : 'DB1',
(136, 48, 'B', 'L') : 'LB1',
(136, 48, 'B', 'R') : 'RB1',
(136, 48, 'B', 'U') : 'UB1',
(136, 48, 'D', 'B') : 'DB1',
(136, 48, 'D', 'F') : 'DF1',
(136, 48, 'D', 'L') : 'DL1',
(136, 48, 'D', 'R') : 'DR1',
(136, 48, 'F', 'D') : 'DF1',
(136, 48, 'F', 'L') : 'LF1',
(136, 48, 'F', 'R') : 'RF1',
(136, 48, 'F', 'U') : 'UF1',
(136, 48, 'L', 'B') : 'LB1',
(136, 48, 'L', 'D') : 'DL1',
(136, 48, 'L', 'F') : 'LF1',
(136, 48, 'L', 'U') : 'UL1',
(136, 48, 'R', 'B') : 'RB1',
(136, 48, 'R', 'D') : 'DR1',
(136, 48, 'R', 'F') : 'RF1',
(136, 48, 'R', 'U') : 'UR1',
(136, 48, 'U', 'B') : 'UB1',
(136, 48, 'U', 'F') : 'UF1',
(136, 48, 'U', 'L') : 'UL1',
(136, 48, 'U', 'R') : 'UR1',
(140, 98, 'B', 'D') : 'DB1',
(140, 98, 'B', 'L') : 'LB1',
(140, 98, 'B', 'R') : 'RB1',
(140, 98, 'B', 'U') : 'UB1',
(140, 98, 'D', 'B') : 'DB1',
(140, 98, 'D', 'F') : 'DF1',
(140, 98, 'D', 'L') : 'DL1',
(140, 98, 'D', 'R') : 'DR1',
(140, 98, 'F', 'D') : 'DF1',
(140, 98, 'F', 'L') : 'LF1',
(140, 98, 'F', 'R') : 'RF1',
(140, 98, 'F', 'U') : 'UF1',
(140, 98, 'L', 'B') : 'LB1',
(140, 98, 'L', 'D') : 'DL1',
(140, 98, 'L', 'F') : 'LF1',
(140, 98, 'L', 'U') : 'UL1',
(140, 98, 'R', 'B') : 'RB1',
(140, 98, 'R', 'D') : 'DR1',
(140, 98, 'R', 'F') : 'RF1',
(140, 98, 'R', 'U') : 'UR1',
(140, 98, 'U', 'B') : 'UB1',
(140, 98, 'U', 'F') : 'UF1',
(140, 98, 'U', 'L') : 'UL1',
(140, 98, 'U', 'R') : 'UR1',
(141, 47, 'B', 'D') : 'DB2',
(141, 47, 'B', 'L') : 'LB2',
(141, 47, 'B', 'R') : 'RB2',
(141, 47, 'B', 'U') : 'UB2',
(141, 47, 'D', 'B') : 'DB0',
(141, 47, 'D', 'F') : 'DF0',
(141, 47, 'D', 'L') : 'DL0',
(141, 47, 'D', 'R') : 'DR0',
(141, 47, 'F', 'D') : 'DF2',
(141, 47, 'F', 'L') : 'LF2',
(141, 47, 'F', 'R') : 'RF2',
(141, 47, 'F', 'U') : 'UF2',
(141, 47, 'L', 'B') : 'LB0',
(141, 47, 'L', 'D') : 'DL2',
(141, 47, 'L', 'F') : 'LF0',
(141, 47, 'L', 'U') : 'UL2',
(141, 47, 'R', 'B') : 'RB0',
(141, 47, 'R', 'D') : 'DR2',
(141, 47, 'R', 'F') : 'RF0',
(141, 47, 'R', 'U') : 'UR2',
(141, 47, 'U', 'B') : 'UB0',
(141, 47, 'U', 'F') : 'UF0',
(141, 47, 'U', 'L') : 'UL0',
(141, 47, 'U', 'R') : 'UR0',
(145, 99, 'B', 'D') : 'DB0',
(145, 99, 'B', 'L') : 'LB0',
(145, 99, 'B', 'R') : 'RB0',
(145, 99, 'B', 'U') : 'UB0',
(145, 99, 'D', 'B') : 'DB2',
(145, 99, 'D', 'F') : 'DF2',
(145, 99, 'D', 'L') : 'DL2',
(145, 99, 'D', 'R') : 'DR2',
(145, 99, 'F', 'D') : 'DF0',
(145, 99, 'F', 'L') : 'LF0',
(145, 99, 'F', 'R') : 'RF0',
(145, 99, 'F', 'U') : 'UF0',
(145, 99, 'L', 'B') : 'LB2',
(145, 99, 'L', 'D') : 'DL0',
(145, 99, 'L', 'F') : 'LF2',
(145, 99, 'L', 'U') : 'UL0',
(145, 99, 'R', 'B') : 'RB2',
(145, 99, 'R', 'D') : 'DR0',
(145, 99, 'R', 'F') : 'RF2',
(145, 99, 'R', 'U') : 'UR0',
(145, 99, 'U', 'B') : 'UB2',
(145, 99, 'U', 'F') : 'UF2',
(145, 99, 'U', 'L') : 'UL2',
(145, 99, 'U', 'R') : 'UR2',
(147, 124, 'B', 'D') : 'DB0',
(147, 124, 'B', 'L') : 'LB0',
(147, 124, 'B', 'R') : 'RB0',
(147, 124, 'B', 'U') : 'UB0',
(147, 124, 'D', 'B') : 'DB2',
(147, 124, 'D', 'F') : 'DF2',
(147, 124, 'D', 'L') : 'DL2',
(147, 124, 'D', 'R') : 'DR2',
(147, 124, 'F', 'D') : 'DF0',
(147, 124, 'F', 'L') : 'LF0',
(147, 124, 'F', 'R') : 'RF0',
(147, 124, 'F', 'U') : 'UF0',
(147, 124, 'L', 'B') : 'LB2',
(147, 124, 'L', 'D') : 'DL0',
(147, 124, 'L', 'F') : 'LF2',
(147, 124, 'L', 'U') : 'UL0',
(147, 124, 'R', 'B') : 'RB2',
(147, 124, 'R', 'D') : 'DR0',
(147, 124, 'R', 'F') : 'RF2',
(147, 124, 'R', 'U') : 'UR0',
(147, 124, 'U', 'B') : 'UB2',
(147, 124, 'U', 'F') : 'UF2',
(147, 124, 'U', 'L') : 'UL2',
(147, 124, 'U', 'R') : 'UR2',
(148, 123, 'B', 'D') : 'DB1',
(148, 123, 'B', 'L') : 'LB1',
(148, 123, 'B', 'R') : 'RB1',
(148, 123, 'B', 'U') : 'UB1',
(148, 123, 'D', 'B') : 'DB1',
(148, 123, 'D', 'F') : 'DF1',
(148, 123, 'D', 'L') : 'DL1',
(148, 123, 'D', 'R') : 'DR1',
(148, 123, 'F', 'D') : 'DF1',
(148, 123, 'F', 'L') : 'LF1',
(148, 123, 'F', 'R') : 'RF1',
(148, 123, 'F', 'U') : 'UF1',
(148, 123, 'L', 'B') : 'LB1',
(148, 123, 'L', 'D') : 'DL1',
(148, 123, 'L', 'F') : 'LF1',
(148, 123, 'L', 'U') : 'UL1',
(148, 123, 'R', 'B') : 'RB1',
(148, 123, 'R', 'D') : 'DR1',
(148, 123, 'R', 'F') : 'RF1',
(148, 123, 'R', 'U') : 'UR1',
(148, 123, 'U', 'B') : 'UB1',
(148, 123, 'U', 'F') : 'UF1',
(148, 123, 'U', 'L') : 'UL1',
(148, 123, 'U', 'R') : 'UR1',
(149, 122, 'B', 'D') : 'DB2',
(149, 122, 'B', 'L') : 'LB2',
(149, 122, 'B', 'R') : 'RB2',
(149, 122, 'B', 'U') : 'UB2',
(149, 122, 'D', 'B') : 'DB0',
(149, 122, 'D', 'F') : 'DF0',
(149, 122, 'D', 'L') : 'DL0',
(149, 122, 'D', 'R') : 'DR0',
(149, 122, 'F', 'D') : 'DF2',
(149, 122, 'F', 'L') : 'LF2',
(149, 122, 'F', 'R') : 'RF2',
(149, 122, 'F', 'U') : 'UF2',
(149, 122, 'L', 'B') : 'LB0',
(149, 122, 'L', 'D') : 'DL2',
(149, 122, 'L', 'F') : 'LF0',
(149, 122, 'L', 'U') : 'UL2',
(149, 122, 'R', 'B') : 'RB0',
(149, 122, 'R', 'D') : 'DR2',
(149, 122, 'R', 'F') : 'RF0',
(149, 122, 'R', 'U') : 'UR2',
(149, 122, 'U', 'B') : 'UB0',
(149, 122, 'U', 'F') : 'UF0',
(149, 122, 'U', 'L') : 'UL0',
(149, 122, 'U', 'R') : 'UR0',
(15, 78, 'B', 'D') : 'DB1',
(15, 78, 'B', 'L') : 'LB1',
(15, 78, 'B', 'R') : 'RB1',
(15, 78, 'B', 'U') : 'UB1',
(15, 78, 'D', 'B') : 'DB1',
(15, 78, 'D', 'F') : 'DF1',
(15, 78, 'D', 'L') : 'DL1',
(15, 78, 'D', 'R') : 'DR1',
(15, 78, 'F', 'D') : 'DF1',
(15, 78, 'F', 'L') : 'LF1',
(15, 78, 'F', 'R') : 'RF1',
(15, 78, 'F', 'U') : 'UF1',
(15, 78, 'L', 'B') : 'LB1',
(15, 78, 'L', 'D') : 'DL1',
(15, 78, 'L', 'F') : 'LF1',
(15, 78, 'L', | |
'''
This is right after migrating last V2 env. refer to xls design file. -- Entrada_v2
/Users/qiyang/Documents/qduan/git/work_git/field_service_robot/doc/dispatch_ai_model_design.xlsx
step 1: move rules out of env definition.
step 2: Then I will change network layout to multiple input (cnn + dense)
https://www.pyimagesearch.com/2019/02/04/keras-multiple-inputs-and-mixed-data/
/Users/qiyang/Downloads/Houses-dataset
https://medium.com/datadriveninvestor/dual-input-cnn-with-keras-1e6d458cd979
'''
# This version works on top of json input and produce json out
# Observation: each worker has multiple working_time=days, then divided by slots, each slot with start and end time,
import gym
from gym import error, spaces, utils
from gym.utils import seeding
from six import StringIO, b
import sys
import numpy as np
import random
from operator import truediv # add
import kandbox_planner.util.planner_date_util as date_util
#
# each technicians has NBR_SLOT_PER_TECH time slots avaialble for dispatching customer visits
# each time slot is (start_time, end_time, Last task location X, Y) -> (int, int, Float, Float)
# X, Y Current Location of each agent accord to his last task
NBR_WORK_TIME_SLOT_PER_TECH = 1 # 10
NBR_FEATURE_PER_WORK_SLOT = 4
MAX_ASSIGNED_TIME_SLOT_PER_DAY = 5
MAX_WORKING_TIME_DURATION = 60*6
# NBR_FEATURE_PER_TECH = all dispatched time slots (start time, end time, gps x y) +
# all working_time slot statistics (only free duration)
# worker statistics (home gps)
NBR_FEATURE_PER_TECH = NBR_WORK_TIME_SLOT_PER_TECH*MAX_ASSIGNED_TIME_SLOT_PER_DAY*NBR_FEATURE_PER_WORK_SLOT \
+ NBR_WORK_TIME_SLOT_PER_TECH \
+ 4
NBR_FEATURE_VIST_n_OVERALL = 32
'''
max_nbr_of_working_time_per_worker=3
max_nbr_of_worker = 4
max_total_duration = 0 # 25
'''
# https://stackoverflow.com/questions/4913349/haversine-formula-in-python-bearing-and-distance-between-two-gps-points
from math import radians, cos, sin, asin, sqrt
def haversine(lon1, lat1, lon2, lat2):
"""
Calculate the great circle distance between two points
on the earth (specified in decimal degrees)
"""
# convert decimal degrees to radians
lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])
# haversine formula
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2
c = 2 * asin(sqrt(a))
r = 6371 # Radius of earth in kilometers. Use 3956 for miles
return c * r
# haversine( 0, 51.5, -77.1, 38.8) = 5918.185064088763 // From London to Arlington
def get_travel_time_2locations(loc_1, loc_2):
# distance = haversine(loc_1[0] , loc_1[1] , loc_2[0], loc_2[1])
y_1,x_1= loc_1[0] ,loc_1[1]
y_2,x_2= loc_2[0], loc_2[1]
delay_time = ( abs(int(x_1) - int(x_2)) / 12 ) + \
( abs(int(y_1) - int(y_2)) / 5 )
# print(delay_time)
return int(delay_time)
# For each task [ [start, end], gps[x,y], duration_left]
# for each agent [gps-x,y task(start_time_with_travel end_time)]
class DispatchV2Env(gym.Env):
"""
Has the following members
"""
metadata = {'render.modes': ['human']}
config = {
'run_mode' : 'train',
'env_code' : 'merged_slot_v2',
'game_code' : 'merged_slot_v2_',
'allow_overtime' : False,
'data_start_day':'20191108',
'nbr_of_days':1,
'minutes_per_day':60*24,
'reversible' : True,
}
def __init__(self, workers=None, jobs=None, env_config = None, run_mode = 'train', allow_overtime = False, max_nbr_of_worker = None):
# , max_nbr_of_working_time_per_worker = 10
# each worker [{ id , active, level, gps: [x, y], total_free_duration, free_time_slots: [start, end], }]
self.workers = workers
if env_config:
for x in env_config.keys():
self.config[x] = env_config[x]
if max_nbr_of_worker is None:
self.max_nbr_of_worker = len(self.workers)
else:
#NBR_AGENT = 6
#self.max_nbr_of_worker = NBR_AGENT
self.max_nbr_of_worker = max_nbr_of_worker
#if max_nbr_of_working_time_per_worker is None:
# self.max_nbr_of_working_time_per_worker = len(self.jobs)
#else:
# self.max_nbr_of_working_time_per_worker = NBR_WORK_TIME_SLOT_PER_TECH
self.allow_overtime = allow_overtime
self.run_mode = run_mode # 'train' for training, 'predict' for prediction new dataset.
# each job [{ duration, gps: [x, y], fixed_schudule: [indicator, fixed_start, fixed_start], level }]
self.jobs = jobs
self.JOB_COUNT = len(self.jobs )
self.current_job_i = 0
self.total_travel_time = 0
self.total_assigned_job_duration = 0
self.worker_jobs = [] # List of jobs assigned to each worker. Each one is [* 4 ]
self.reset()
def step(self, action ):
# action [1, 0, 0 ]. One hot coding, 1 means this worker take the job.
done = False
info='ok'
reward = self._get_reward()
# if sum(action) == 1:
# Single worker take the job
add_result = self._add_current_job_to_worker(action) # worker_index = action_worker, working_time_index = action_working_time_index)
# print("Adding job {}--{} to Worker {}-{} , result: {}...".format(self.current_job_i - 1, self.jobs[self.current_job_i - 1]['job_id'],action_worker, action_working_time_index,add_result ))
if add_result == False:
#done = True
info='error'
reward = self._get_reward() # 0
# break
else:
# print(' error ')
info = 'ok'
if self.current_job_i == self.JOB_COUNT:
done = True
reward = self._get_reward() # 1
obs = self._get_observation()
return (obs, reward, done, info)
def reset(self, shuffle_jobs=False):
self.current_job_i = 0
self.worker_ids = {}
self.total_travel_time = 0
self.total_assigned_job_duration = 0
self.worker_jobs = [] # List of jobs assigned to each worker. Each one is [* 4 ]
self.WORKER_COUNT = len(self.workers)
if shuffle_jobs :
random.shuffle(self.jobs)
for worker_index in range(len(self.workers)):
self.worker_ids[self.workers[worker_index]['worker_code']] = worker_index # no used...
self.workers[worker_index]['assigned_time_slots'] = []
self.worker_jobs.append([]) # corresponding to self.workers[worker_index]['assigned_time_slots']
if len(self.workers[worker_index]['working_time']) > 0 :
# TRUNCATE per model days
self.workers[worker_index]['working_time'] = self.workers[worker_index]['working_time'][0:NBR_WORK_TIME_SLOT_PER_TECH]
self.workers[worker_index]['free_time_slots'] = \
list(map(lambda x: [x[0], x[1], 0, 0, 0, 0 ], self.workers[worker_index]['working_time']))
# self.workers[worker_index]['assigned_time_slots'].append([])
self.workers[worker_index]['assigned_time_slots'] = \
list(map(lambda x:[], self.workers[worker_index]['working_time']))
self.workers[worker_index]['worker_jobs'] = \
list(map(lambda x:[], self.workers[worker_index]['working_time']))
self.workers[worker_index]['total_free_duration'] = \
sum(list(map(lambda x:x[1]-x[0], self.workers[worker_index]['free_time_slots'])))
# IT is an array, each working_time slot has a max.
self.workers[worker_index]['max_free_slot_duration'] = \
max(list(map(lambda x:x[1]-x[0], self.workers[worker_index]['free_time_slots'])))
return self._get_observation()
def render(self, mode='human'):
outfile = StringIO() if mode == 'ansi' else sys.stdout
if self.current_job_i < self.JOB_COUNT:
outfile.write(' '.join([ 'total: {}, '.format(len(self.jobs)), 'Next job:','{ curr=', str(self.current_job_i), ', dur=' , str( self.jobs[self.current_job_i]['duration'] ) \
,', gps=' , str( self.jobs[self.current_job_i]['job_gps'] ) , '}' \
, 'travel: ', str(self.total_travel_time), 'total_dur: ', str(self.total_assigned_job_duration),'\n' ]))
else:
outfile.write(' '.join([ 'No next jobs ...' \
, 'travel: ', str(self.total_travel_time), 'total_dur: ', str(self.total_assigned_job_duration), '\n' ]) )
for worker_index in range(len(self.worker_jobs)):
job_count = 0
job_list = list(map(lambda x: x[0:2] , self.workers[worker_index]['working_time']))
for work_time_i in range( len(self.workers[worker_index] ['assigned_time_slots'] ) ):
for time_slot_i in range( len(self.workers[worker_index] ['assigned_time_slots'][work_time_i] ) ):
job_list[work_time_i].append( \
[ 'D-{}'.format(work_time_i), float('%.2f'% (self.workers[worker_index] ['assigned_time_slots'][work_time_i][time_slot_i] [0] )), \
float('%.2f'% (self.workers[worker_index] ['assigned_time_slots'][work_time_i][time_slot_i] [1] )), \
self.workers[worker_index] ['assigned_time_slots'][work_time_i][time_slot_i] [4]
])
job_count += len(self.workers[worker_index] ['assigned_time_slots'][work_time_i][time_slot_i] [4] )
# print(work_time_i, time_slot_i)
# I assume that all assigned_time_slots are ordered ascendingly, same as working_time
# loop through
outfile.write(' '.join([
'Worker({:>1}): job_count({:>2})'.format(worker_index, job_count), '--->'.join([str(x) for x in job_list])
,'\n'] ))
def _check_all_start_time_within_range(self, assigned_time_start, job_list_orig, new_job): # _get_start_time_jobs
start_time_minute = assigned_time_start
job_list = job_list_orig.copy()
job_list.append(new_job)
start_time_list=[]
prev_job = -10000000
for assigned_job_j in range (len(job_list)):
# First find a possible time slot to insert into :
job_index = job_list[assigned_job_j]
if assigned_job_j == 0:
travel_time = 0
start_time_minute = assigned_time_start
prev_job = job_index
else:
travel_time = get_travel_time_2locations(self.jobs[job_index]['job_gps'], self.jobs[prev_job]['job_gps'])
start_time_minute = start_time_minute + self.jobs[prev_job]['duration'] + travel_time
prev_job = job_index
start_time_list.append( start_time_minute )
for ji in range(len(start_time_list)):
if not( self.jobs[job_list[ji]]['fixed_schudule']['fixed_minute_time_slot'][0] < start_time_list[ji] \
< self.jobs[job_list[ji]]['fixed_schudule']['fixed_minute_time_slot'][1]
) :
return False
return True
def get_solution_json(self):
job_solution=[]
'''
Worker(0): job_count(25)
[360, 1800, ['D-0', 1351.0, 1428.33, [0]], ['D-0', 1218.0, 1264.22, [40]], ['D-0', 1258.0, 1350.11, [68]]]
--->[360, 1800, ['D-1', 1353.0, 1428.56, [36]], ['D-1', 1259.0, 1347.06, [49]]]
--->[360, 1800, ['D-2', 755.0, 778.83, [21]], ['D-2', 810.0, 843.78, [59]], ['D-2', 755.0, 778.83, [67]]]
--->[360, 1800, ['D-3', 1199.0, 1252.17, [63]], ['D-3', 1283.0, 1333.33, [70]]]
'''
for worker_index in range (len(self.workers)):
for work_time_i in range (len(self.workers[worker_index]['assigned_time_slots'])): # nth day.
for assigned_i in range (len(self.workers[worker_index]['assigned_time_slots'][work_time_i])):
start_minutes = 0
prev_job = -1
start_time_minute = self.workers[worker_index]['assigned_time_slots'][work_time_i][assigned_i][0]
for assigned_job_j in range (len(self.workers[worker_index]['assigned_time_slots'][work_time_i][assigned_i][4])):
# First find a possible time slot to insert into :
job_index = self.workers[worker_index]['assigned_time_slots'][work_time_i][assigned_i][4][assigned_job_j]
if assigned_job_j == 0:
travel_time = 0
# prev_job = job_index
start_time_minute = self.workers[worker_index]['assigned_time_slots'][work_time_i][assigned_i][0]
else:
travel_time = get_travel_time_2locations(self.jobs[job_index]['job_gps'], self.jobs[prev_job]['job_gps'])
start_time_minute = start_time_minute + self.jobs[prev_job]['duration'] + travel_time
one_job = {
'job_code': self.jobs[job_index]['job_id'],
'job_type': self.jobs[job_index]['job_type'],
"planning_status": "I",
"location_code": self.jobs[job_index]['location_code'] ,
"geo_longitude": self.jobs[job_index]['geo_longitude'] ,
"geo_latitude": self.jobs[job_index]['geo_latitude'] ,
"requested_start_day": date_util.add_days_2_day_string (self.config['data_start_day'], self.jobs[job_index]['requested_start_day_sequence'] ) ,
"scheduled_worker_code": self.workers[worker_index]['worker_code'],
"scheduled_start_day": date_util.add_days_2_day_string (self.config['data_start_day'], work_time_i) ,
"scheduled_start_minutes": start_time_minute,
"scheduled_share_status": 'N' ,
#TODO
"scheduled_duration_minutes": self.jobs[job_index]['requested_duration_minutes'],
"scheduled_travel_minutes": 0 , #TODO, lookup previous one.
'scheduled_travel_prev_code': self.jobs[prev_job] ['job_code'],
'scheduled_travel_minutes_before' : self._get_travel_time_2jobs(prev_job,job_index ),
}
prev_job = job_index
job_solution.append(one_job)
return job_solution
def close(self):
pass
# ***************************************************************
# # Internal functions
# ***************************************************************
def _get_travel_time_2jobs(self, job_index_1, job_index_2):
distance = abs(self.jobs[job_index_1]['job_gps'][0] - self.jobs[job_index_2]['job_gps'][0]) + \
abs(self.jobs[job_index_1]['job_gps'][1] - self.jobs[job_index_2]['job_gps'][1])
return distance/2
def _get_best_fit_time_slot(self, worker_index, working_time_index, job_index):
# return the index of which timeslot has closest GPS to current job.
curr_free_slot_list = self.workers[worker_index][working_time_index]
curr_job = self.jobs[job_index]
def _calculate_travel_time(slot, job): # x is the free time slot,
if slot[1]-slot[0] < | |
__next__(self):
global set_global_value
set_global_value('MyFilter', '__next__')
return 'infinite loop'
o = MyFilter(lambda: None, [])
getattr(o, attribute_name)
assert is_global_value_set('MyFilter', '__getattribute__')
iter(o)
assert is_global_value_set('MyFilter', '__iter__')
next(o)
assert is_global_value_set('MyFilter', '__next__')
# =============
# === float ===
# =============
class MyFloat(float):
def __repr__(self):
return '__repr__'
def __str__(self):
return '__str__'
def __hash__(self):
global hash_value
return hash_value
def __eq__(self, o):
return o == '__eq__'
def __ne__(self, o):
return o == '__ne__'
def __lt__(self, o):
return o == '__lt__'
def __le__(self, o):
return o == '__le__'
def __gt__(self, o):
return o == '__gt__'
def __ge__(self, o):
return o == '__ge__'
def __bool__(self):
global set_global_value
set_global_value('MyFloat', '__bool__')
return False
def __int__(self):
global set_global_value
set_global_value('MyFloat', '__int__')
return 42
def __float__(self):
global set_global_value
set_global_value('MyFloat', '__float__')
return 42.3
def __getattribute__(self, name):
global attribute_name
if name == attribute_name:
global set_global_value
set_global_value('MyFloat', '__getattribute__')
return None
return super().__getattribute__(name)
def __pos__(self):
return '__pos__'
def __neg__(self):
return '__neg__'
def __abs__(self):
return '__abs__'
def __trunc__(self):
return '__trunc__'
def __round__(self, ndigits = None):
return '__round__'
def __add__(self, o):
return '__add__'
def __divmod__(self, o):
return '__divmod__'
def __floordiv__(self, o):
return '__floordiv__'
def __mod__(self, o):
return '__mod__'
def __mul__(self, o):
return '__mul__'
def __sub__(self, o):
return '__sub__'
def __truediv__(self, o):
return '__truediv__'
def __radd__(self, o):
return '__radd__'
def __rdivmod__(self, o):
return '__rdivmod__'
def __rfloordiv__(self, o):
return '__rfloordiv__'
def __rmod__(self, o):
return '__rmod__'
def __rmul__(self, o):
return '__rmul__'
def __rsub__(self, o):
return '__rsub__'
def __rtruediv__(self, o):
return '__rtruediv__'
def __pow__(self, o, mod=None):
return '__pow__'
def __rpow__(self, o, mod=None):
return '__rpow__'
o = MyFloat()
assert repr(o) == '__repr__'
assert str(o) == '__str__'
assert hash(o) == hash_value
assert o == '__eq__'
assert o != '__ne__'
assert o < '__lt__'
assert o <= '__le__'
assert o > '__gt__'
assert o >= '__ge__'
bool(o)
assert is_global_value_set('MyFloat', '__bool__')
int(o)
assert is_global_value_set('MyFloat', '__int__')
float(o)
assert is_global_value_set('MyFloat', '__float__')
getattr(o, attribute_name)
assert is_global_value_set('MyFloat', '__getattribute__')
assert o.__pos__() == '__pos__'
assert (+o) == '__pos__'
assert o.__neg__() == '__neg__'
assert (-o) == '__neg__'
assert o.__abs__() == '__abs__'
assert abs(o) == '__abs__'
assert o.__trunc__() == '__trunc__'
assert round(o) == '__round__'
assert o.__add__(1.0) == '__add__'
assert o + 1.0 == '__add__'
assert o.__divmod__(1.0) == '__divmod__'
assert o.__floordiv__(1.0) == '__floordiv__'
assert o.__mod__(1.0) == '__mod__'
assert o % 1.0 == '__mod__'
assert o.__mul__(1.0) == '__mul__'
assert o * 1.0 == '__mul__'
assert o.__sub__(1.0) == '__sub__'
assert o - 1.0 == '__sub__'
assert o.__truediv__(1.0) == '__truediv__'
assert o / 1.0 == '__truediv__'
assert o.__radd__(1.0) == '__radd__'
assert 1.0 + o == '__radd__'
assert o.__rdivmod__(1.0) == '__rdivmod__'
assert o.__rfloordiv__(1.0) == '__rfloordiv__'
assert o.__rmod__(1.0) == '__rmod__'
assert 1.0 % o == '__rmod__'
assert o.__rmul__(1.0) == '__rmul__'
assert 1.0 * o == '__rmul__'
assert o.__rsub__(1.0) == '__rsub__'
assert 1.0 - o == '__rsub__'
assert o.__rtruediv__(1.0) == '__rtruediv__'
assert 1.0 / o == '__rtruediv__'
assert o.__pow__(1.0) == '__pow__'
assert pow(o, 1.0) == '__pow__'
assert o.__rpow__(1.0) == '__rpow__'
assert pow(1.0, o) == '__rpow__'
# =================
# === frozenset ===
# =================
class MyFrozenset(frozenset):
def __repr__(self):
return '__repr__'
def __hash__(self):
global hash_value
return hash_value
def __eq__(self, o):
return o == '__eq__'
def __ne__(self, o):
return o == '__ne__'
def __lt__(self, o):
return o == '__lt__'
def __le__(self, o):
return o == '__le__'
def __gt__(self, o):
return o == '__gt__'
def __ge__(self, o):
return o == '__ge__'
def __getattribute__(self, name):
global attribute_name
if name == attribute_name:
global set_global_value
set_global_value('MyFrozenset', '__getattribute__')
return None
return super().__getattribute__(name)
def __iter__(self):
global set_global_value
set_global_value('MyFrozenset', '__iter__')
return super().__iter__()
def __len__(self):
global set_global_value
set_global_value('MyFrozenset', '__len__')
return 1
def __contains__(self, element):
global set_global_value
set_global_value('MyFrozenset', '__contains__')
return False
def __and__(self, o):
return '__and__'
def __or__(self, o):
return '__or__'
def __sub__(self, o):
return '__sub__'
def __xor__(self, o):
return '__xor__'
def __rand__(self, o):
return '__rand__'
def __ror__(self, o):
return '__ror__'
def __rsub__(self, o):
return '__rsub__'
def __rxor__(self, o):
return '__rxor__'
o = MyFrozenset()
assert repr(o) == '__repr__'
assert hash(o) == hash_value
assert o == '__eq__'
assert o != '__ne__'
assert o < '__lt__'
assert o <= '__le__'
assert o > '__gt__'
assert o >= '__ge__'
getattr(o, attribute_name)
assert is_global_value_set('MyFrozenset', '__getattribute__')
iter(o)
assert is_global_value_set('MyFrozenset', '__iter__')
len(o)
assert is_global_value_set('MyFrozenset', '__len__')
42 in o
assert is_global_value_set('MyFrozenset', '__contains__')
assert o.__and__(frozenset()) == '__and__'
assert o & frozenset() == '__and__'
assert o.__or__(frozenset()) == '__or__'
assert o | frozenset() == '__or__'
assert o.__sub__(frozenset()) == '__sub__'
assert o - frozenset() == '__sub__'
assert o.__xor__(frozenset()) == '__xor__'
assert o ^ frozenset() == '__xor__'
assert o.__rand__(frozenset()) == '__rand__'
assert frozenset() & o == '__rand__'
assert o.__ror__(frozenset()) == '__ror__'
assert frozenset() | o == '__ror__'
assert o.__rsub__(frozenset()) == '__rsub__'
assert frozenset() - o == '__rsub__'
assert o.__rxor__(frozenset()) == '__rxor__'
assert frozenset() ^ o == '__rxor__'
# ===========
# === int ===
# ===========
class MyInt(int):
def __repr__(self):
return '__repr__'
def __str__(self):
return '__str__'
def __hash__(self):
global hash_value
return hash_value
def __eq__(self, o):
return o == '__eq__'
def __ne__(self, o):
return o == '__ne__'
def __lt__(self, o):
return o == '__lt__'
def __le__(self, o):
return o == '__le__'
def __gt__(self, o):
return o == '__gt__'
def __ge__(self, o):
return o == '__ge__'
def __bool__(self):
global set_global_value
set_global_value('MyInt', '__bool__')
return False
def __int__(self):
global set_global_value
set_global_value('MyInt', '__int__')
return 42
def __float__(self):
global set_global_value
set_global_value('MyInt', '__float__')
return 42.3
def __index__(self):
global set_global_value
set_global_value('MyInt', '__index__')
return 42
def __getattribute__(self, name):
global attribute_name
if name == attribute_name:
global set_global_value
set_global_value('MyInt', '__getattribute__')
return None
return super().__getattribute__(name)
def __pos__(self):
return '__pos__'
def __neg__(self):
return '__neg__'
def __invert__(self):
return '__invert__'
def __abs__(self):
return '__abs__'
def __trunc__(self):
return '__trunc__'
def __round__(self, ndigits = None):
return '__round__'
def __add__(self, o):
return '__add__'
def __and__(self, o):
return '__and__'
def __divmod__(self, o):
return '__divmod__'
def __floordiv__(self, o):
return '__floordiv__'
def __lshift__(self, o):
return '__lshift__'
def __mod__(self, o):
return '__mod__'
def __mul__(self, o):
return '__mul__'
def __or__(self, o):
return '__or__'
def __rshift__(self, o):
return '__rshift__'
def __sub__(self, o):
return '__sub__'
def __truediv__(self, o):
return '__truediv__'
def __xor__(self, o):
return '__xor__'
def __radd__(self, o):
return '__radd__'
def __rand__(self, o):
return '__rand__'
def __rdivmod__(self, o):
return '__rdivmod__'
def __rfloordiv__(self, o):
return '__rfloordiv__'
def __rlshift__(self, o):
return '__rlshift__'
def __rmod__(self, o):
return '__rmod__'
def __rmul__(self, o):
return '__rmul__'
def __ror__(self, o):
return '__ror__'
def __rrshift__(self, o):
return '__rrshift__'
def __rsub__(self, o):
return '__rsub__'
def __rtruediv__(self, o):
return '__rtruediv__'
def __rxor__(self, o):
return '__rxor__'
def __pow__(self, o, mod=None):
return '__pow__'
def __rpow__(self, o, mod=None):
return '__rpow__'
o = MyInt()
assert repr(o) == '__repr__'
assert str(o) == '__str__'
assert hash(o) == hash_value
assert o == '__eq__'
assert o != '__ne__'
assert o < '__lt__'
assert o <= '__le__'
assert o > '__gt__'
assert o >= '__ge__'
bool(o)
assert is_global_value_set('MyInt', '__bool__')
int(o)
assert is_global_value_set('MyInt', '__int__')
float(o)
assert is_global_value_set('MyInt', '__float__')
o.__index__()
assert is_global_value_set('MyInt', '__index__')
getattr(o, attribute_name)
assert is_global_value_set('MyInt', '__getattribute__')
assert o.__pos__() == '__pos__'
assert (+o) == '__pos__'
assert o.__neg__() == '__neg__'
assert (-o) == '__neg__'
assert o.__invert__() == '__invert__'
assert (~o) == '__invert__'
assert o.__abs__() == '__abs__'
assert abs(o) == '__abs__'
assert o.__trunc__() == '__trunc__'
assert round(o) == '__round__'
assert o.__add__(1) == '__add__'
assert o + 1 == '__add__'
assert o.__and__(1) == '__and__'
assert o & 1 == '__and__'
assert o.__divmod__(1) == '__divmod__'
assert o.__floordiv__(1) == '__floordiv__'
assert o.__lshift__(1) == '__lshift__'
assert o << 1 == '__lshift__'
assert o.__mod__(1) == '__mod__'
assert o % 1 == '__mod__'
assert o.__mul__(1) == '__mul__'
assert o * 1 == '__mul__'
assert o.__or__(1) == '__or__'
assert o | 1 == '__or__'
assert o.__rshift__(1) == '__rshift__'
assert o >> 1 == '__rshift__'
assert o.__sub__(1) == '__sub__'
assert o - 1 == '__sub__'
assert o.__truediv__(1) == '__truediv__'
assert o / 1 == '__truediv__'
assert o.__xor__(1) == '__xor__'
assert o ^ 1 == '__xor__'
assert o.__radd__(1) == '__radd__'
assert 1 + o == '__radd__'
assert o.__rand__(1) == '__rand__'
assert 1 & o == '__rand__'
assert o.__rdivmod__(1) == '__rdivmod__'
assert o.__rfloordiv__(1) == '__rfloordiv__'
assert o.__rlshift__(1) == '__rlshift__'
assert 1 << o == '__rlshift__'
assert o.__rmod__(1) == '__rmod__'
assert 1 % o == '__rmod__'
assert o.__rmul__(1) == '__rmul__'
assert 1 * o == '__rmul__'
assert o.__ror__(1) == '__ror__'
assert 1 | o == '__ror__'
assert o.__rrshift__(1) == '__rrshift__'
assert 1 >> o == '__rrshift__'
assert o.__rsub__(1) == '__rsub__'
assert 1 - o == '__rsub__'
assert o.__rtruediv__(1) == '__rtruediv__'
assert 1 / o == '__rtruediv__'
assert o.__rxor__(1) == '__rxor__'
assert 1 ^ o == '__rxor__'
assert o.__pow__(1) == '__pow__'
assert pow(o, 1) == '__pow__'
assert o.__rpow__(1) == '__rpow__'
assert pow(1, o) == '__rpow__'
# ============
# === list ===
# ============
class MyList(list):
def __repr__(self):
return '__repr__'
def __hash__(self):
global hash_value
return hash_value
def __eq__(self, o):
return o == '__eq__'
def __ne__(self, o):
return o == '__ne__'
def __lt__(self, o):
return o == '__lt__'
def __le__(self, o):
return o == '__le__'
def __gt__(self, o):
return o == '__gt__'
def __ge__(self, o):
return o == '__ge__'
def __getattribute__(self, name):
global attribute_name
if name == attribute_name:
global set_global_value
set_global_value('MyList', '__getattribute__')
return None
return super().__getattribute__(name)
def __getitem__(self, index):
global index_value
if index == index_value:
global set_global_value
set_global_value('MyList', '__getitem__')
return None
return super().__getitem__(index)
def __setitem__(self, index, value):
global index_value
if index == index_value:
global set_global_value
set_global_value('MyList', '__setitem__')
return None
return super().__setitem__(index, value)
def __delitem__(self, index):
global index_value
if index | |
#!/usr/bin/env python
# coding: utf-8
# # 8. Interactivity in Python
# ### Announcements
# - Lab 7 due Wednesday
# - Interview with <NAME> on Friday
# - Code Review I for final projects due on November 19 (50% complete, working examples)
# So far this semester, our interaction with Python has been primarily based in what I'll call "CLI", or command-line interface. If we want to, say, change a calculation, or what's being plotted, we have to change some lines of code and re-run our code (or `jupyter` cell).
#
# In many of your final projects, you'll be attempting to craft some form of ***interactive*** elements. This may look like input boxes, sliders, and buttons in a *Graphical User Interface* (GUI) run on a local computer, or similar inputs run on a website (also known as a *webapp*).
# Getting to a point where we have a functional web app is a multi-step process, and we'll have to learn a few things over the next week or two before we get there. But we can start learning some more simple interactive elements now.
#
# Generally, when we move to creating "interactive apps", we no longer want to be working in a Jupyter notebook. For this in class demo, please use an editor of your choice (juypter has one, or atom, sublime, vscode, etc., are all options).
# ## Building an interactive Webapp with Streamlit
# We're going to go step by step today and build a functioning webapp (that runs locally) using a new tool called `streamlit`. I'm a fan of this package because it's explicit goal is making the jump from working with the data in python, to displaying it and making it interactive online, as simple as possible --- certainly simpler than other frameworks I've worked with.
# To start, you'll need to install it. You'll also need the package `spectral_cube`, as the data we're going to display in our app uses this.
#
# Go ahead and `pip install streamlit` and `pip install spectral_cube` in your `a330` environments.
# ## Understanding the Data
# Before we can display and interact with our data, we need to understand what it is. The file you need for this "lab" is in the *Data Access* folder on our website/dropbox. It's a `FITS` file of radio data. As some of you may know, radio data captures both spatial information (like an image) and spectral information (the spectrum in each pixel).
#
# A convenient way to store this information is in a "cube," where each slice is a flux map at a given wavelength (in radio, people use velocity as this axis, which is interchangeable).
# See below how to read in the data:
# In[30]:
from spectral_cube import SpectralCube
import astropy.units as u
cube = SpectralCube.read('ngc1333_12co.fits')
cube = cube.with_spectral_unit(u.km / u.s)
# Let's have a look at this cube object:
# In[32]:
cube
# In[31]:
cube.spectral_axis
# We can pull a slice at any channel number (i.e., any velocity) via the following:
# In[33]:
get_ipython().run_line_magic('matplotlib', 'inline')
# In[35]:
im = cube.unmasked_data[0]
fig, ax = plt.subplots(figsize=(5,5))
ax.imshow(im)
# Each of those images represents a slice of the emission of 12 CO at a single velocity.
# *Exercise: Write a **function** `load_data()` which loads this fits file as shown above, sets the spectral units, and returns it*.
# ## Why Interactivity?
# We can plot any individual frame (velocity slice) of our cube as shown above, re-running our code/cell for different channel numbers. But it is often valuable to "page through" this data, to quickly jump around, advance frames, etc. Additionally, what if we want to dynamically average or sum over some subset of slices? We can do that with interactivity. That brings us to Streamlit, and the concept of a webapp.
# As intimated by Yao on Friday, Python Webapps involve two components: a `server`, where the "backend" of the code is running, and a "frontend", which comprises the html/css/js/nodejs that defines how users interact with the program within a browser.
#
# There exists things called `Frameworks` (examples include Flask, Django, Emmett, and FastAPI). These frameworks have things under the hood that let you connect together the frontend stuff (webpages, urls, etc), with the backend (a python server they're running).
#
# These can be involved to learn, but provide the ultimate amount of control over the way a website looks/feels/operates. `Streamlit` is a little different. It does run a server, but rather than provide a full framework, it's a true library of Python classes/methods that allow you to simply get things up and going, while sacrificing some creative control. For now, we'll host these servers "locally" (like a notebook), but the ultimate goal is usually to host it "online" somewhere where others can then use it.
# We're going to use a few of the many elements available in streamlit for this demo:
# - sliders
# - range-sliders
# - buttons
# - image display
# - headers/text display
# ## The Basics
# Streamlit has a lot of specialized commands, but an easy one to use (most of the time) is `st.write()`. This is designed to be a magic command that works like `print()`, but will nicely format whatever you put in (dataframe, number, text string) and then display it on your little webpage.
#
# If you made a file with multiple `st.write()` calls, they would appear in order on the page (as in the file).
#
# Similarly, if we want to, say, display a `matplotlib` figure, we use `st.pyplot(fig)`, feeding in some created figure.
# The most important concept to remember when working with `streamlit` is that any time a change is made (user slides a slider, hits a button, hits submit on a form, etc.), *the entire script is re-executed*.
#
# This means that by default, you don't have a `memory` or global namespace where variables are sticking around. Luckily, `streamlit` has an answer: `st.session_state`.
#
# Whenever a browser connects to the app and runs, there's a special container called session state, where if we want, we can store input values and "remember" them across re-runs, modifying them as desired when the user inputs certain things.
# When we want to use a session state variable, we need to make sure that at the start of a re-run, the variable doesn't get overwritten if it exists, but does get created if it doesn't. For that, we use this format:
# In[ ]:
if 'key' not in st.session_state:
st.session_state['key'] = something
# Note, dot notation is also supported:
if 'key' not in st.session_state:
st.session_state.key = something
# Later, in our code, we can access that variable via `st.session_state.key`, (or the dict-style). We can also change or overwrite it. As long as the browser session is going, this can keep going.
# ## Building the App
# It's time to start building. There'll be some more new things, but they make more sense in the context of the app.
#
# The first thing we need is imports. I'll provide the ones you need here:
# In[ ]:
import streamlit as st
import matplotlib.pyplot as plt
import numpy as np
from spectral_cube import SpectralCube
import astropy.units as u
import copy
# Next, we need to get our data in. You've already done this in the exercise above: simply add your function in, and use it to load the data. But... for a bit of extra fun, add the following decorator (I'm about to explain decorators):
# In[ ]:
@st.cache(allow_output_mutation=True)
def load_data():
# Your code here
return something
cube = load_data()
# As I said above, streamlit runs your script everytime any of the interactive elements is interacted with. But we don't want to load this fits file every time, because that's slow. So we take advantage of a built-in streamlit trick in which it will `cache` the array containing the raw data (i.e., the execution of this function). This is sort of | |
<filename>type4py/preprocess.py<gh_stars>10-100
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from type4py import logger, AVAILABLE_TYPES_NUMBER, MAX_PARAM_TYPE_DEPTH
from libsa4py.merge import merge_jsons_to_dict, create_dataframe_fns, create_dataframe_vars
from libsa4py.cst_transformers import ParametricTypeDepthReducer
from libsa4py.cst_lenient_parser import lenient_parse_module
from libsa4py.utils import list_files
from typing import Tuple
from ast import literal_eval
from collections import Counter
from os.path import exists, join
from tqdm import tqdm
import regex
import os
import pickle
import pandas as pd
import numpy as np
logger.name = __name__
tqdm.pandas()
# Precompile often used regex
first_cap_regex = regex.compile('(.)([A-Z][a-z]+)')
all_cap_regex = regex.compile('([a-z0-9])([A-Z])')
sub_regex = r'typing\.|typing_extensions\.|t\.|builtins\.|collections\.'
def make_types_consistent(df_all: pd.DataFrame, df_vars: pd.DataFrame) -> Tuple[pd.DataFrame, pd.DataFrame]:
"""
Removes typing module from type annotations
"""
def remove_quote_types(t: str):
s = regex.search(r'^\'(.+)\'$', t)
if bool(s):
return s.group(1)
else:
#print(t)
return t
df_all['return_type'] = df_all['return_type'].progress_apply(lambda x: regex.sub(sub_regex, "", str(x)) if x else x)
df_all['arg_types'] = df_all['arg_types'].progress_apply(lambda x: str([regex.sub(sub_regex, "", t) \
if t else t for t in literal_eval(x)]))
df_all['return_type'] = df_all['return_type'].progress_apply(remove_quote_types)
df_all['arg_types'] = df_all['arg_types'].progress_apply(lambda x: str([remove_quote_types(t) if t else t for t in literal_eval(x)]))
df_vars['var_type'] = df_vars['var_type'].progress_apply(lambda x: regex.sub(sub_regex, "", str(x)))
df_vars['var_type'] = df_vars['var_type'].progress_apply(remove_quote_types)
return df_all, df_vars
def resolve_type_aliasing(df_param: pd.DataFrame, df_ret: pd.DataFrame,
df_vars: pd.DataFrame) -> Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame]:
"""
Resolves type aliasing and mappings. e.g. `[]` -> `list`
"""
import libcst as cst
# Problematic patterns: (?<=.*)Tuple\[Any, *?.*?\](?<=.*)
type_aliases = {'(?<=.*)any(?<=.*)|(?<=.*)unknown(?<=.*)': 'Any',
'^{}$|^Dict$|^Dict\[\]$|(?<=.*)Dict\[Any, *?Any\](?=.*)|^Dict\[unknown, *Any\]$': 'dict',
'^Set$|(?<=.*)Set\[\](?<=.*)|^Set\[Any\]$': 'set',
'^Tuple$|(?<=.*)Tuple\[\](?<=.*)|^Tuple\[Any\]$|(?<=.*)Tuple\[Any, *?\.\.\.\](?=.*)|^Tuple\[unknown, *?unknown\]$|^Tuple\[unknown, *?Any\]$|(?<=.*)tuple\[\](?<=.*)': 'tuple',
'^Tuple\[(.+), *?\.\.\.\]$': r'Tuple[\1]',
'\\bText\\b': 'str',
'^\[\]$|(?<=.*)List\[\](?<=.*)|^List\[Any\]$|^List$': 'list',
'^\[{}\]$': 'List[dict]',
'(?<=.*)Literal\[\'.*?\'\](?=.*)': 'Literal',
'(?<=.*)Literal\[\d+\](?=.*)': 'Literal', # Maybe int?!
'^Callable\[\.\.\., *?Any\]$|^Callable\[\[Any\], *?Any\]$|^Callable[[Named(x, Any)], Any]$': 'Callable',
'^Iterator[Any]$': 'Iterator',
'^OrderedDict[Any, *?Any]$': 'OrderedDict',
'^Counter[Any]$': 'Counter',
'(?<=.*)Match[Any](?<=.*)': 'Match'}
def resolve_type_alias(t: str):
for t_alias in type_aliases:
if regex.search(regex.compile(t_alias), t):
t = regex.sub(regex.compile(t_alias), type_aliases[t_alias], t)
return t
df_param['arg_type'] = df_param['arg_type'].progress_apply(resolve_type_alias)
df_ret['return_type'] = df_ret['return_type'].progress_apply(resolve_type_alias)
df_vars['var_type'] = df_vars['var_type'].progress_apply(resolve_type_alias)
return df_param, df_ret, df_vars
def preprocess_parametric_types(df_param: pd.DataFrame, df_ret: pd.DataFrame,
df_vars: pd.DataFrame) -> Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame]:
"""
Reduces the depth of parametric types
"""
from libcst import parse_module, ParserSyntaxError
global s
s = 0
def reduce_depth_param_type(t: str) -> str:
global s
if regex.match(r'.+\[.+\]', t):
try:
t = parse_module(t)
t = t.visit(ParametricTypeDepthReducer(max_annot_depth=MAX_PARAM_TYPE_DEPTH))
return t.code
except ParserSyntaxError:
try:
t = lenient_parse_module(t)
t = t.visit(ParametricTypeDepthReducer(max_annot_depth=MAX_PARAM_TYPE_DEPTH))
s += 1
return t.code
except ParserSyntaxError:
return None
else:
return t
df_param['arg_type'] = df_param['arg_type'].progress_apply(reduce_depth_param_type)
df_ret['return_type'] = df_ret['return_type'].progress_apply(reduce_depth_param_type)
df_vars['var_type'] = df_vars['var_type'].progress_apply(reduce_depth_param_type)
logger.info(f"Sucssesfull lenient parsing {s}")
return df_param, df_ret, df_vars
def filter_functions(df: pd.DataFrame, funcs=['str', 'unicode', 'repr', 'len', 'doc', 'sizeof']) -> pd.DataFrame:
"""
Filters functions which are not useful.
:param df: dataframe to use
:return: filtered dataframe
"""
df_len = len(df)
logger.info(f"Functions before dropping on __*__ methods {len(df):,}")
df = df[~df['name'].isin(funcs)]
logger.info(f"Functions after dropping on __*__ methods {len(df):,}")
logger.info(f"Filtered out {df_len - len(df):,} functions.")
return df
def filter_variables(df_vars: pd.DataFrame, types=['Any', 'None', 'object', 'type', 'Type[Any]',
'Type[cls]', 'Type[type]', 'Type', 'TypeVar', 'Optional[Any]']):
"""
Filters out variables with specified types such as Any or None
"""
df_var_len = len(df_vars)
logger.info(f"Variables before dropping on {','.join(types)}: {len(df_vars):,}")
df_vars = df_vars[~df_vars['var_type'].isin(types)]
logger.info(f"Variables after dropping on {','.join(types)}: {len(df_vars):,}")
logger.info(f"Filtered out {df_var_len - len(df_vars):,} variables.")
return df_vars
def filter_var_wo_type(df_vars: pd.DataFrame) -> pd.DataFrame:
"""
Filters out variables without a type
"""
df_var_len = len(df_vars)
logger.info(f"Variables before dropping: {len(df_vars):,}")
df_vars = df_vars[df_vars['var_type'].notnull()]
logger.info(f"Variables after dropping dropping: {len(df_vars):,}")
logger.info(f"Filtered out {df_var_len - len(df_vars):,} variables w/o a type.")
return df_vars
def gen_argument_df(df: pd.DataFrame) -> pd.DataFrame:
"""
Generates a new dataframe containing all argument data.
:param df: dataframe for which to extract argument
:return: argument dataframe
"""
arguments = []
for i, row in tqdm(df.iterrows(), total=len(df.index), desc="Processing arguments"):
for p_i, arg_name in enumerate(literal_eval(row['arg_names'])):
# Ignore self arg
if arg_name == 'self':
continue
arg_type = literal_eval(row['arg_types'])[p_i].strip('\"')
# Ignore Any or None types
# TODO: Ignore also object type
# TODO: Ignore Optional[Any]
if arg_type == '' or arg_type in {'Any', 'None', 'object'}:
continue
arg_descr = literal_eval(row['arg_descrs'])[p_i]
arg_occur = [a.replace('self', '').strip() if 'self' in a.split() else a for a in literal_eval(row['args_occur'])[p_i]]
other_args = " ".join([a for a in literal_eval(row['arg_names']) if a != 'self'])
arguments.append([row['file'], row['name'], row['func_descr'], arg_name, arg_type, arg_descr, other_args, arg_occur])
return pd.DataFrame(arguments, columns=['file', 'func_name', 'func_descr', 'arg_name', 'arg_type', 'arg_comment', 'other_args',
'arg_occur'])
def filter_return_dp(df: pd.DataFrame) -> pd.DataFrame:
"""
Filters return datapoints based on a set of criteria.
"""
logger.info(f"Functions before dropping on return type {len(df):,}")
df = df.dropna(subset=['return_type'])
logger.info(f"Functions after dropping on return type {len(df):,}")
logger.info(f"Functions before dropping nan, None, Any return type {len(df):,}")
to_drop = np.invert((df['return_type'] == 'nan') | (df['return_type'] == 'None') | (df['return_type'] == 'Any'))
df = df[to_drop]
logger.info(f"Functions after dropping nan return type {len(df):,}")
logger.info(f"Functions before dropping on empty return expression {len(df):,}")
df = df[df['return_expr'].apply(lambda x: len(literal_eval(x))) > 0]
logger.info(f"Functions after dropping on empty return expression {len(df):,}")
return df
def format_df(df: pd.DataFrame) -> pd.DataFrame:
df['arg_names'] = df['arg_names'].apply(lambda x: literal_eval(x))
df['arg_types'] = df['arg_types'].apply(lambda x: literal_eval(x))
df['arg_descrs'] = df['arg_descrs'].apply(lambda x: literal_eval(x))
df['return_expr'] = df['return_expr'].apply(lambda x: literal_eval(x))
return df
def encode_all_types(df_ret: pd.DataFrame, df_params: pd.DataFrame, df_vars: pd.DataFrame,
output_dir: str):
all_types = np.concatenate((df_ret['return_type'].values, df_params['arg_type'].values,
df_vars['var_type'].values), axis=0)
le_all = LabelEncoder()
le_all.fit(all_types)
df_ret['return_type_enc_all'] = le_all.transform(df_ret['return_type'].values)
df_params['arg_type_enc_all'] = le_all.transform(df_params['arg_type'].values)
df_vars['var_type_enc_all'] = le_all.transform(df_vars['var_type'].values)
unq_types, count_unq_types = np.unique(all_types, return_counts=True)
pd.DataFrame(
list(zip(le_all.transform(unq_types), [unq_types[i] for i in np.argsort(count_unq_types)[::-1]],
[count_unq_types[i] for i in np.argsort(count_unq_types)[::-1]])),
columns=['enc', 'type', 'count']
).to_csv(os.path.join(output_dir, "_most_frequent_all_types.csv"), index=False)
logger.info(f"Total no. of extracted types: {len(all_types):,}")
logger.info(f"Total no. of unique types: {len(unq_types):,}")
return df_ret, df_params, le_all
def gen_most_frequent_avl_types(avl_types_dir, output_dir, top_n: int = 1024) -> pd.DataFrame:
"""
It generates top n most frequent available types
:param top_n:
:return:
"""
aval_types_files = [os.path.join(avl_types_dir, f) for f in os.listdir(avl_types_dir) if os.path.isfile(os.path.join(avl_types_dir, f))]
# All available types across all Python projects
all_aval_types = []
for f in aval_types_files:
with open(f, 'r') as f_aval_type:
all_aval_types = all_aval_types + f_aval_type.read().splitlines()
counter = Counter(all_aval_types)
df = pd.DataFrame.from_records(counter.most_common(top_n), columns=['Types', 'Count'])
df.to_csv(os.path.join(output_dir, "top_%d_types.csv" % top_n), index=False)
return df
def encode_aval_types(df_param: pd.DataFrame, df_ret: pd.DataFrame, df_var: pd.DataFrame,
df_aval_types: pd.DataFrame):
"""
It encodes the type of parameters and return according to visible type hints
"""
types = df_aval_types['Types'].tolist()
def trans_aval_type(x):
for i, t in enumerate(types):
if x in t:
return i
return len(types) - 1
# If the arg type doesn't exist in top_n available types, we insert n + 1 into the vector as it represents the other type.
df_param['param_aval_enc'] = df_param['arg_type'].progress_apply(trans_aval_type)
df_ret['ret_aval_enc'] = df_ret['return_type'].progress_apply(trans_aval_type)
df_var['var_aval_enc'] = df_var['var_type'].progress_apply(trans_aval_type)
return df_param, df_ret
def preprocess_ext_fns(output_dir: str, limit: int = None):
"""
Applies preprocessing steps to the extracted functions
"""
if not (os.path.exists(os.path.join(output_dir, "all_fns.csv")) and os.path.exists(os.path.join(output_dir, "all_vars.csv"))):
logger.info("Merging JSON projects")
merged_jsons = merge_jsons_to_dict(list_files(os.path.join(output_dir, 'processed_projects'), ".json"), limit)
logger.info("Creating functions' Dataframe")
create_dataframe_fns(output_dir, merged_jsons)
logger.info("Creating variables' Dataframe")
create_dataframe_vars(output_dir, merged_jsons)
logger.info("Loading vars & fns Dataframe")
processed_proj_fns = pd.read_csv(os.path.join(output_dir, "all_fns.csv"), low_memory=False)
processed_proj_vars = pd.read_csv(os.path.join(output_dir, "all_vars.csv"), low_memory=False)
# Split the processed files into train, validation and test sets
if all(processed_proj_fns['set'].isin(['train', 'valid', 'test'])) and \
all(processed_proj_vars['set'].isin(['train', 'valid', 'test'])):
logger.info("Found the sets split in the input dataset")
train_files = processed_proj_fns['file'][processed_proj_fns['set'] == 'train']
valid_files = processed_proj_fns['file'][processed_proj_fns['set'] == 'valid']
test_files = processed_proj_fns['file'][processed_proj_fns['set'] == 'test']
train_files_vars = processed_proj_vars['file'][processed_proj_vars['set'] == 'train']
valid_files_vars = processed_proj_vars['file'][processed_proj_vars['set'] == 'valid']
test_files_vars = processed_proj_vars['file'][processed_proj_vars['set'] == 'test']
else:
logger.info("Splitting sets randomly")
uniq_files = np.unique(np.concatenate((processed_proj_fns['file'].to_numpy(), processed_proj_vars['file'].to_numpy())))
train_files, test_files = train_test_split(pd.DataFrame(uniq_files, columns=['file']), test_size=0.2)
train_files, valid_files = train_test_split(pd.DataFrame(train_files, columns=['file']), test_size=0.1)
train_files_vars, valid_files_vars, test_files_vars = train_files, valid_files, test_files
df_train = processed_proj_fns[processed_proj_fns['file'].isin(train_files.to_numpy().flatten())]
logger.info(f"No. of functions in train set: {df_train.shape[0]:,}")
df_valid = processed_proj_fns[processed_proj_fns['file'].isin(valid_files.to_numpy().flatten())]
logger.info(f"No. of functions in validation set: {df_valid.shape[0]:,}")
df_test = processed_proj_fns[processed_proj_fns['file'].isin(test_files.to_numpy().flatten())]
logger.info(f"No. of functions in test set: {df_test.shape[0]:,}")
df_var_train = processed_proj_vars[processed_proj_vars['file'].isin(train_files_vars.to_numpy().flatten())]
logger.info(f"No. of variables in train set: {df_var_train.shape[0]:,}")
df_var_valid = processed_proj_vars[processed_proj_vars['file'].isin(valid_files_vars.to_numpy().flatten())]
logger.info(f"No. of variables in validation set: {df_var_valid.shape[0]:,}")
df_var_test = processed_proj_vars[processed_proj_vars['file'].isin(test_files_vars.to_numpy().flatten())]
logger.info(f"No. of variables in test set: {df_var_test.shape[0]:,}")
assert list(set(df_train['file'].tolist()).intersection(set(df_test['file'].tolist()))) == []
assert list(set(df_train['file'].tolist()).intersection(set(df_valid['file'].tolist()))) == []
assert list(set(df_test['file'].tolist()).intersection(set(df_valid['file'].tolist()))) == []
# Exclude variables without a type
processed_proj_vars = filter_var_wo_type(processed_proj_vars)
logger.info(f"Making type annotations consistent")
# Makes type annotations consistent by removing `typing.`, `t.`, and `builtins` from a type.
processed_proj_fns, processed_proj_vars = make_types_consistent(processed_proj_fns, processed_proj_vars)
assert any([bool(regex.match(sub_regex, str(t))) for t in processed_proj_fns['return_type']]) == False
assert any([bool(regex.match(sub_regex, t)) for t in processed_proj_fns['arg_types']]) == False
assert any([bool(regex.match(sub_regex, t)) for t in processed_proj_vars['var_type']]) == False
# Filters variables with type Any or None
processed_proj_vars = filter_variables(processed_proj_vars)
# Filters trivial functions such as `__str__` and `__len__`
processed_proj_fns = filter_functions(processed_proj_fns)
# Extracts type hints for functions' arguments
processed_proj_fns_params = gen_argument_df(processed_proj_fns)
# Filters out functions: (1) without a return type (2) with the return type of Any or None (3) without a return expression
processed_proj_fns = filter_return_dp(processed_proj_fns)
processed_proj_fns | |
"""
Training script. Should be pretty adaptable to whatever.
"""
import argparse
import os
import shutil
import json
from copy import deepcopy
import multiprocessing
import numpy as np
import pandas as pd
import torch
from allennlp.common.params import Params
from allennlp.training.learning_rate_schedulers import LearningRateScheduler
from allennlp.training.optimizers import Optimizer
from torch.nn import DataParallel
from torch.nn.modules import BatchNorm2d
from tqdm import tqdm
from allennlp.nn.util import device_mapping
from vis import grounding_vis
from visualbert.utils.pytorch_misc import time_batch, save_checkpoint, clip_grad_norm, \
restore_checkpoint, print_para, restore_best_checkpoint, restore_checkpoint_flexible, load_state_dict_flexible, compute_score_with_logits
from visualbert.pytorch_pretrained_bert.tokenization import BertTokenizer
from visualbert.dataloaders.vcr import VCR, VCRLoader
try:
from visualbert.dataloaders.coco_dataset import COCODataset
except:
print("Import COCO dataset failed.")
try:
from visualbert.dataloaders.nlvr_dataset import NLVRDataset
except:
print("Import NLVR2 dataset failed.")
try:
from visualbert.dataloaders.vqa_dataset import VQADataset
except:
print("Import VQA dataset failed.")
try:
from visualbert.dataloaders.flickr_dataset import Flickr30kFeatureDataset
except:
print("Import Flickr30K dataset failed.")
from pytorch_pretrained_bert.optimization import BertAdam
import logging
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', level=logging.DEBUG)
from allennlp.models import Model
from visualbert.models.model_wrapper import ModelWrapper
from visualbert.models import model
from attrdict import AttrDict
def check_prob(val_probs, INDEX_OF_CHECKED_SAMPLE):
ps = np.exp(val_probs[INDEX_OF_CHECKED_SAMPLE])
ps /= np.sum(ps)
return ps
# If you want to play grounding analysis, feel free to use this function!
def grounding_analysis(args, input_batch, output_dict, question_orig, answer_orig, obj_added_index, \
file_name_list, annot_id_list, b):
if args.orig_or_new == "new":
bert_input_ids = input_batch["bert_input_ids"].detach().cpu().numpy()
labels = input_batch["label"].detach().cpu().numpy()
objects = input_batch["objects"].detach().cpu().numpy()
attention_weights = output_dict["attention_weights"][-1].detach().cpu().numpy()
question_orig_cur = question_orig[b*args.eval_batch_size: b*args.eval_batch_size+len(bert_input_ids)]
answer_orig_cur = answer_orig[b*args.eval_batch_size: b*args.eval_batch_size+len(bert_input_ids)]
obj_added_index_cur = obj_added_index[b*args.eval_batch_size: b*args.eval_batch_size+len(bert_input_ids)]
file_name_list_cur = file_name_list[b*args.eval_batch_size: b*args.eval_batch_size+len(bert_input_ids)]
annot_id_list_cur = annot_id_list[b*args.eval_batch_size: b*args.eval_batch_size+len(bert_input_ids)]
if args.addition_annotation_analysis:
for i in range(len(bert_input_ids)):
label = labels[i]
dets2use = dets2uses[i]
file_name = file_name_list_cur[i]
annot_id = annot_id_list_cur[i]
right_ans_input_ids = bert_input_ids[i][label]
attention_weights_i = attention_weights[i*4+label]
texts = tokenizer.convert_ids_to_tokens(right_ans_input_ids)
texts, people_names = recover(texts, question_orig_cur[i], answer_orig_cur[i])
j = 0
obj_list = []
for obj in objects[i]:
if obj == 0:
obj_list.append("[BG]")
elif obj == -1:
obj_list.append("[I_PAD]")
else:
obj_list.append("["+obj_added_index_cur[i][int(dets2use[j])]+"]\n(image)")
j += 1
texts += obj_list
indices = []
for j, token in enumerate(texts):
if token == "[CLS]" or token == "[SEP]" or token == "[PAD]" or token == "[I_PAD]" or token == ".":
indices.append(j)
texts = np.delete(texts, indices, axis=0)
for j in range(len(attention_weights_i)):
attention_temp = np.delete(attention_weights_i[j], indices, axis=0)
final_attention = np.delete(attention_temp, indices, axis=1)
assert len(texts) == len(final_attention)
pos_seg = file_name.find('/')
file_name = file_name[pos_seg+1:]
grounding_vis(final_attention, texts, file_name.replace(".", "_"+annot_id+"_head_"+str(j)+"_result."), args.region, args.single_or_multiple)
def recover(texts, question_orig, answer_orig):
all_orig = question_orig + answer_orig
classes_orig = []
people_names = []
GENDER_NEUTRAL_NAMES = ['Casey', 'Riley', 'Jessie', 'Jackie', 'Avery', 'Jaime', 'Peyton', 'Kerry', 'Jody', 'Kendall',
'Peyton', 'Skyler', 'Frankie', 'Pat', 'Quinn']
GENDER_NEUTRAL_NAMES_new = [name.lower() for name in GENDER_NEUTRAL_NAMES]
for token in all_orig:
if '[' in token and ']' in token:
classes_orig.append(token)
for i, token in enumerate(texts):
if token in GENDER_NEUTRAL_NAMES_new:
while "person" not in classes_orig[0]:
if len(classes_orig) == 1:
classes_orig = []
break
classes_orig = classes_orig[1:]
if classes_orig:
texts[i] = classes_orig[0]
people_names.append(classes_orig[0])
if len(classes_orig) >= 2:
classes_orig = classes_orig[1:]
return texts, people_names
def add_index(obj_orig_list):
added_index_all = []
for obj_orig in obj_orig_list:
added_index = []
freq = dict()
for obj in obj_orig:
if obj not in freq.keys():
freq[obj] = 1
else:
freq[obj] += 1
added_index.append(obj+str(freq[obj]))
added_index_all.append(added_index)
return added_index_all
parser = argparse.ArgumentParser(description='train')
parser.add_argument(
'-folder',
dest='folder',
help='folder location',
type=str,
)
parser.add_argument(
'-no_tqdm',
dest='no_tqdm',
action='store_true',
)
parser.add_argument(
'-config',
dest='config',
help='config location',
type=str,
)
parser.add_argument(
'-region',
dest='region',
default='any',
help='region',
type=str,
)
parser.add_argument(
'-single_or_multiple',
dest='single_or_multiple',
default='single',
help='single_or_multiple',
type=str,
)
parser.add_argument(
'-orig_or_new',
dest='orig_or_new',
default='new',
help='orig_or_new',
type=str,
)
parser.add_argument(
'-addition_annotation_analysis',
dest='addition_annotation_analysis',
action='store_true',
)
parser.add_argument(
'-grounding',
dest='grounding',
action='store_true',
)
parser.add_argument(
'-scene',
dest='scene',
default='none',
help='scene',
type=str,
)
parser.add_argument(
'-not_use_all_dets',
dest='not_use_all_dets',
action='store_false'
)
args = parser.parse_args()
args = ModelWrapper.read_and_insert_args(args, args.config)
#####################################################
if os.path.exists(args.folder):
create_flag = 0
else:
create_flag = 1
print("Making directories")
os.makedirs(args.folder, exist_ok=True)
import sys
run_log_counter = 0
while(os.path.exists(args.folder + '/run_{}.log'.format(run_log_counter))):
run_log_counter += 1
file_log = open(args.folder + '/run_{}.log'.format(run_log_counter),'w') # File where you need to keep the logs
file_log.write("")
class Unbuffered:
def __init__(self, stream):
self.stream = stream
def write(self, data):
self.stream.write(data)
self.stream.flush()
file_log.write(data) # Write the data of stdout here to a text file as well
def flush(self):
pass
sys.stdout = Unbuffered(sys.stdout)
NUM_GPUS = torch.cuda.device_count()
NUM_CPUS = multiprocessing.cpu_count()
if NUM_GPUS == 0:
raise ValueError("you need gpus!")
def _to_gpu(td):
if args.get("fp16", False):
_to_fp16(td)
if NUM_GPUS > 1:
return td
for k in td:
if k != 'metadata':
if td[k] is not None:
td[k] = {k2: v.cuda(non_blocking=True) for k2, v in td[k].items()} if isinstance(td[k], dict) else td[k].cuda(non_blocking=True)
return td
def _to_fp16(td):
for k in td:
if isinstance(td[k], torch.FloatTensor):
td[k] = td[k].to(dtype=torch.float16)
num_workers = args.get("num_workers", 2)
val_workers = args.get("val_workers", 0)
TEST_DATA_READING = False
if TEST_DATA_READING:
num_workers = 0
print(f"Using {num_workers} workers out of {NUM_CPUS} possible", flush=True)
loader_params = {'batch_size': args.train_batch_size // NUM_GPUS, 'num_gpus':NUM_GPUS, 'num_workers':num_workers}
def get_dataset_loader(args, dataset_name):
if dataset_name == "vcr":
train, orig_val, val, val_addition, test = VCR.splits(
mode='rationale' if args.rationale else 'answer',
region_keywords = args.region,
scene = args.scene,
single_or_multiple = args.single_or_multiple,
only_use_relevant_dets = args.not_use_all_dets,
do_lower_case = args.do_lower_case,
bert_model_name = args.bert_model_name,
max_seq_length = args.max_seq_length,
pretraining = args.pretraining,
pretraining_include_qa_and_qar = args.pretraining_include_qa_and_qar,
complete_shuffle = args.get("complete_shuffle", False),
use_alignment = args.get('use_alignment', False),
add_all_features = args.add_all_features,
answer_labels_path = args.get("answer_labels_path", None),
vcr_annots_dir = args.vcr_annots_dir,
vcr_image_dir = args.vcr_image_dir
)
elif dataset_name == "coco":
train, val, test = COCODataset.splits(args)
elif dataset_name == "nlvr":
train, val, test = NLVRDataset.splits(args)
elif dataset_name == "vqa":
train, val, test = VQADataset.splits(args)
elif dataset_name == "flickr":
train, val, test = Flickr30kFeatureDataset.splits(args)
else:
assert(0)
loader_params = {'batch_size': args.train_batch_size // NUM_GPUS, 'num_gpus':NUM_GPUS, 'num_workers':num_workers}
train_loader_params = deepcopy(loader_params)
loader_params_val = {'batch_size': args.eval_batch_size // NUM_GPUS, 'num_gpus':NUM_GPUS, 'num_workers':num_workers}
val_loader_params = deepcopy(loader_params_val)
val_loader_params["num_workers"] = val_workers
test_loader_params = deepcopy(loader_params_val)
test_loader_params["num_workers"] = val_workers
train_loader = VCRLoader.from_dataset(train, **train_loader_params)
val_loader = VCRLoader.from_dataset(val, **val_loader_params)
test_loader = VCRLoader.from_dataset(test, **test_loader_params)
if dataset_name == "vcr":
orig_val_loader_params = deepcopy(loader_params_val)
orig_val_loader_params["num_workers"] = val_workers
val_addition_loader_params = deepcopy(loader_params_val)
val_addition_loader_params["num_workers"] = val_workers
orig_val_loader = VCRLoader.from_dataset(orig_val, **orig_val_loader_params)
val_addition_loader = VCRLoader.from_dataset(val_addition, **val_addition_loader_params)
train_set_size = len(train)
print("orig_val size", len(orig_val))
print("val size", len(val))
print("val-addition size", len(val_addition))
if dataset_name == "vcr":
return train_loader, orig_val_loader, val_loader, val_addition_loader, test_loader, train_set_size
else:
return train_loader, val_loader, test_loader, train_set_size
print(args)
if args.dataset == "vcr":
train_loader, orig_val_loader, val_loader, val_addition_loader, test_loader, train_set_size = get_dataset_loader(args, args.dataset)
else:
train_loader, val_loader, test_loader, train_set_size = get_dataset_loader(args, args.dataset)
ARGS_RESET_EVERY = args.get("print_every", 100)
train_model = ModelWrapper(args, train_set_size)
# Loading from pre-trained model
if args.restore_bin:
train_model.restore_checkpoint_pretrained(args.restore_bin)
# Loading from previous checkpoint
'''if create_flag == 0:
start_epoch, val_metric_per_epoch = train_model.restore_checkpoint(serialization_dir=args.folder, epoch_to_load = args.get("epoch_to_load", None))
if val_metric_per_epoch is None:
val_metric_per_epoch = []
else:
create_flag = 1
start_epoch, val_metric_per_epoch = 0, []'''
start_epoch, val_metric_per_epoch = 0, []
shutil.copy2(args.config, args.folder) # Always copy the config
if args.get("freeze_detector", True):
train_model.freeze_detector()
param_shapes = print_para(train_model.model)
print(args)
print("########### Starting from {}".format(start_epoch))
num_batches = 0
stop_epoch = args.num_train_epochs
save_every = args.get("save_every", None)
with open('../dataloaders/cocoontology.json', 'r') as f1:
coco = json.load(f1)
coco_objects = ['__background__'] + [x['name'] for k, x in sorted(coco.items(), key=lambda x: int(x[0]))]
tokenizer = BertTokenizer.from_pretrained(args.bert_model_name, do_lower_case=args.do_lower_case)
for epoch_num in range(start_epoch, stop_epoch):
train_results = []
norms = []
train_model.model.train()
if not args.get("skip_training", False):
for b, (time_per_batch, batch) in enumerate(time_batch(tqdm(train_loader), reset_every=ARGS_RESET_EVERY)):
del batch["dets2use"]
batch = _to_gpu(batch)
output_dict = train_model.step(batch)
num_batches += 1
train_results.append(pd.Series({'loss': output_dict['loss'].mean().item(),
'crl': output_dict.get("cnn_regularization_loss", 0.0),
'next_sentence_loss': output_dict["next_sentence_loss"].mean().item() if "next_sentence_loss" in output_dict else 0.0,
'masked_lm_loss': output_dict["masked_lm_loss"].mean().item() if "masked_lm_loss" in output_dict else 0.0,
'accuracy': (train_model.model.module).get_metrics(
reset=(b % ARGS_RESET_EVERY) == 0)[
'accuracy'],
'sec_per_batch': time_per_batch,
'hr_per_epoch': len(train_loader) * time_per_batch / 3600,
}))
if b % ARGS_RESET_EVERY == 0 and b > 0:
print("e{:2d}b{:5d}/{:5d}. \nsumm:\n{}\n~~~~~~~~~~~~~~~~~~\n".format(
epoch_num, b, len(train_loader),
pd.DataFrame(train_results[-ARGS_RESET_EVERY:]).mean(),
), flush=True)
if save_every is not None and b % save_every == 0 and b != 0:
train_model.save_checkpoint_step(args.folder, b, epoch_num)
print("---\nTRAIN EPOCH {:2d}:\n{}\n----".format(epoch_num, pd.DataFrame(train_results).mean()))
try:
### This is the eval part
val_probs = []
val_labels = []
val_size = 0.0
val_loss_sum = 0.0
val_acc = 0.0
val_acc_upper = 0.0
val_instance_counter = 0.0
val_next_sentence_loss_sum = 0.0
train_model.eval()
val_counter = 0
keywords_list = []
regions_list = []
if not args.skip_training:
val_loader = orig_val_loader
val_dataset = orig_val_loader.dataset
else:
if args.orig_or_new == "new":
annot_fn = "val.jsonl"
with open(os.path.join(args.vcr_annots_dir, annot_fn), 'r') as f:
all_items = [json.loads(s) for s in f]
keywords_list = [it["keywords"] for it in all_items]
regions_list = [it["region"] for it in all_items]
if args.addition_annotation_analysis:
annot_fn = "val_addition_single.jsonl"
val_loader = val_addition_loader
val_dataset = val_addition_loader.dataset
if args.grounding:
question_orig = []
answer_orig = []
obj_orig_list = []
file_name_list = []
annot_id_list = []
items_temp = []
if args.region != "any":
for item in all_items:
if args.region in item["region"]:
items_temp.append(item)
else:
for item in all_items:
items_temp.append(item)
if args.addition_annotation_analysis and args.single_or_multiple == "multiple":
image_fn_list = [item["img_fn"] for item in items_temp]
with open(os.path.join(args.vcr_annots_dir, 'val.jsonl'), 'r') as f:
temp_val = [json.loads(s) for s in f]
temp = []
for item in temp_val:
if item["img_fn"] in image_fn_list:
temp.append(item)
items_temp = temp
if args.scene != "none":
for item in items_temp:
if args.scene | |
SP.array of I0 interaction variables to be included in the
background model when testing for interaction with Inters
K1r: [N x N] SP.array of LMM-covariance/kinship koefficients (optional)
If not provided, then linear regression analysis is performed
K1c: [P x P] SP.array of LMM-covariance/kinship koefficients (optional)
If not provided, then linear regression analysis is performed
K2r: [N x N] SP.array of LMM-covariance/kinship koefficients (optional)
If not provided, then linear regression analysis is performed
K2c: [P x P] SP.array of LMM-covariance/kinship koefficients (optional)
If not provided, then linear regression analysis is performed
covar_type: type of covaraince to use. Default 'freeform'. possible values are
'freeform': free form optimization,
'fixed': use a fixed matrix specified in covar_K0,
'diag': optimize a diagonal matrix,
'lowrank': optimize a low rank matrix. The rank of the lowrank part is specified in the variable rank,
'lowrank_id': optimize a low rank matrix plus the weight of a constant diagonal matrix. The rank of the lowrank part is specified in the variable rank,
'lowrank_diag': optimize a low rank matrix plus a free diagonal matrix. The rank of the lowrank part is specified in the variable rank,
'block': optimize the weight of a constant P x P block matrix of ones,
'block_id': optimize the weight of a constant P x P block matrix of ones plus the weight of a constant diagonal matrix,
'block_diag': optimize the weight of a constant P x P block matrix of ones plus a free diagonal matrix,
rank: rank of a possible lowrank component (default 1)
NumIntervalsDelta0: number of steps for delta optimization on the null model (100)
NumIntervalsDeltaAlt:number of steps for delta optimization on the alt. model (0 - no optimization)
searchDelta: Carry out delta optimization on the alternative model? if yes We use NumIntervalsDeltaAlt steps
Returns:
pv: P-values of the interaction test
pv0: P-values of the null model
pvAlt: P-values of the alternative model
"""
S=snps.shape[1]
#0. checks
N = phenos.shape[0]
P = phenos.shape[1]
if K1r==None:
K1r = SP.dot(snps,snps.T)
else:
assert K1r.shape[0]==N, 'K1r: dimensions dismatch'
assert K1r.shape[1]==N, 'K1r: dimensions dismatch'
if K2r==None:
K2r = SP.eye(N)
else:
assert K2r.shape[0]==N, 'K2r: dimensions dismatch'
assert K2r.shape[1]==N, 'K2r: dimensions dismatch'
covs,Acovs = updateKronCovs(covs,Acovs,N,P)
#Asnps can be several designs
if (Asnps0 is None):
Asnps0 = [SP.ones([1,P])]
if Asnps1 is None:
Asnps1 = [SP.eye([P])]
if (type(Asnps0)!=list):
Asnps0 = [Asnps0]
if (type(Asnps1)!=list):
Asnps1 = [Asnps1]
assert (len(Asnps0)==1) and (len(Asnps1)>0), "need at least one Snp design matrix for null and alt model"
#one row per column design matrix
pv = SP.zeros((len(Asnps1),snps.shape[1]))
lrt = SP.zeros((len(Asnps1),snps.shape[1]))
pvAlt = SP.zeros((len(Asnps1),snps.shape[1]))
lrtAlt = SP.zeros((len(Asnps1),snps.shape[1]))
#1. run GP model to infer suitable covariance structure
if K1c==None or K2c==None:
vc = estimateKronCovariances(phenos=phenos, K1r=K1r, K2r=K2r, K1c=K1c, K2c=K2c, covs=covs, Acovs=Acovs, covar_type=covar_type, rank=rank)
K1c = vc.getEstTraitCovar(0)
K2c = vc.getEstTraitCovar(1)
else:
assert K1c.shape[0]==P, 'K1c: dimensions dismatch'
assert K1c.shape[1]==P, 'K1c: dimensions dismatch'
assert K2c.shape[0]==P, 'K2c: dimensions dismatch'
assert K2c.shape[1]==P, 'K2c: dimensions dismatch'
#2. run kroneckerLMM for null model
lmm = limix.CKroneckerLMM()
lmm.setK1r(K1r)
lmm.setK1c(K1c)
lmm.setK2r(K2r)
lmm.setK2c(K2c)
lmm.setSNPs(snps)
#add covariates
for ic in range(len(Acovs)):
lmm.addCovariates(covs[ic],Acovs[ic])
lmm.setPheno(phenos)
#delta serch on alt. model?
if searchDelta:
lmm.setNumIntervalsAlt(NumIntervalsDeltaAlt)
lmm.setNumIntervals0_inter(NumIntervalsDeltaAlt)
else:
lmm.setNumIntervalsAlt(0)
lmm.setNumIntervals0_inter(0)
lmm.setNumIntervals0(NumIntervalsDelta0)
#add SNP design
lmm.setSNPcoldesign0_inter(Asnps0[0])
for iA in range(len(Asnps1)):
lmm.setSNPcoldesign(Asnps1[iA])
lmm.process()
pvAlt[iA,:] = lmm.getPv()[0]
pv[iA,:] = lmm.getPv()[1]
pv0 = lmm.getPv()[2]
return pv,pv0,pvAlt
## KroneckerLMM functions
def kronecker_lmm(snps,phenos,covs=None,Acovs=None,Asnps=None,K1r=None,K1c=None,K2r=None,K2c=None,covar_type='lowrank_diag',rank=1,NumIntervalsDelta0=100,NumIntervalsDeltaAlt=0,searchDelta=False):
"""
simple wrapper for kroneckerLMM code
Args:
snps: [N x S] SP.array of S SNPs for N individuals (test SNPs)
phenos: [N x P] SP.array of P phenotypes for N individuals
covs: list of SP.arrays holding covariates. Each covs[i] has one corresponding Acovs[i]
Acovs: list of SP.arrays holding the phenotype design matrices for covariates.
Each covs[i] has one corresponding Acovs[i].
Asnps: single SP.array of I0 interaction variables to be included in the
background model when testing for interaction with Inters
If not provided, the alternative model will be the independent model
K1r: [N x N] SP.array of LMM-covariance/kinship koefficients (optional)
If not provided, then linear regression analysis is performed
K1c: [P x P] SP.array of LMM-covariance/kinship koefficients (optional)
If not provided, then linear regression analysis is performed
K2r: [N x N] SP.array of LMM-covariance/kinship koefficients (optional)
If not provided, then linear regression analysis is performed
K2c: [P x P] SP.array of LMM-covariance/kinship koefficients (optional)
If not provided, then linear regression analysis is performed
covar_type: type of covaraince to use. Default 'freeform'. possible values are
'freeform': free form optimization,
'fixed': use a fixed matrix specified in covar_K0,
'diag': optimize a diagonal matrix,
'lowrank': optimize a low rank matrix. The rank of the lowrank part is specified in the variable rank,
'lowrank_id': optimize a low rank matrix plus the weight of a constant diagonal matrix. The rank of the lowrank part is specified in the variable rank,
'lowrank_diag': optimize a low rank matrix plus a free diagonal matrix. The rank of the lowrank part is specified in the variable rank,
'block': optimize the weight of a constant P x P block matrix of ones,
'block_id': optimize the weight of a constant P x P block matrix of ones plus the weight of a constant diagonal matrix,
'block_diag': optimize the weight of a constant P x P block matrix of ones plus a free diagonal matrix,
rank: rank of a possible lowrank component (default 1)
NumIntervalsDelta0: number of steps for delta optimization on the null model (100)
NumIntervalsDeltaAlt:number of steps for delta optimization on the alt. model (0 - no optimization)
searchDelta: Boolean indicator if delta is optimized during SNP testing (default False)
Returns:
CKroneckerLMM object
P-values for all SNPs from liklelihood ratio test
"""
#0. checks
N = phenos.shape[0]
P = phenos.shape[1]
if K1r==None:
K1r = SP.dot(snps,snps.T)
else:
assert K1r.shape[0]==N, 'K1r: dimensions dismatch'
assert K1r.shape[1]==N, 'K1r: dimensions dismatch'
if K2r==None:
K2r = SP.eye(N)
else:
assert K2r.shape[0]==N, 'K2r: dimensions dismatch'
assert K2r.shape[1]==N, 'K2r: dimensions dismatch'
covs,Acovs = updateKronCovs(covs,Acovs,N,P)
#Asnps can be several designs
if Asnps is None:
Asnps = [SP.ones([1,P])]
if (type(Asnps)!=list):
Asnps = [Asnps]
assert len(Asnps)>0, "need at least one Snp design matrix"
#one row per column design matrix
pv = SP.zeros((len(Asnps),snps.shape[1]))
#1. run GP model to infer suitable covariance structure
if K1c==None or K2c==None:
vc = estimateKronCovariances(phenos=phenos, K1r=K1r, K2r=K2r, K1c=K1c, K2c=K2c, covs=covs, Acovs=Acovs, covar_type=covar_type, rank=rank)
K1c = vc.getEstTraitCovar(0)
K2c = vc.getEstTraitCovar(1)
else:
assert K1c.shape[0]==P, 'K1c: dimensions dismatch'
assert K1c.shape[1]==P, 'K1c: dimensions dismatch'
assert K2c.shape[0]==P, 'K2c: dimensions dismatch'
assert K2c.shape[1]==P, 'K2c: dimensions dismatch'
#2. run kroneckerLMM
lmm = limix.CKroneckerLMM()
lmm.setK1r(K1r)
lmm.setK1c(K1c)
lmm.setK2r(K2r)
lmm.setK2c(K2c)
lmm.setSNPs(snps)
#add covariates
for ic in range(len(Acovs)):
lmm.addCovariates(covs[ic],Acovs[ic])
lmm.setPheno(phenos)
#delta serch on alt. model?
if searchDelta:
lmm.setNumIntervalsAlt(NumIntervalsDeltaAlt)
else:
lmm.setNumIntervalsAlt(0)
lmm.setNumIntervals0(NumIntervalsDelta0)
for iA in range(len(Asnps)):
#add SNP design
lmm.setSNPcoldesign(Asnps[iA])
lmm.process()
pv[iA,:] = lmm.getPv()[0]
return lmm,pv
def simple_lmm(snps,pheno,K=None,covs=None, test='lrt',NumIntervalsDelta0=100,NumIntervalsDeltaAlt=0,searchDelta=False):
"""
Univariate fixed effects linear mixed model test for all SNPs
Args:
snps: [N x S] SP.array of S SNPs for N individuals
pheno: [N x 1] SP.array of 1 phenotype for N individuals
K: [N x N] SP.array of LMM-covariance/kinship koefficients (optional)
If not provided, then linear regression analysis is performed
covs: [N x D] SP.array of D covariates for N individuals
test: 'lrt' for likelihood ratio test (default) or 'f' for F-test
NumIntervalsDelta0: number of steps for delta optimization on the null model (100)
NumIntervalsDeltaAlt:number of steps for delta optimization on the alt. model (0 - no optimization)
searchDelta: Carry out delta optimization on the alternative model? if yes We use NumIntervalsDeltaAlt steps
Returns:
limix LMM object
"""
t0=time.time()
if K is None:
K=SP.eye(snps.shape[0])
lm = limix.CLMM()
lm.setK(K)
lm.setSNPs(snps)
lm.setPheno(pheno)
if covs is None:
covs = SP.ones((snps.shape[0],1))
lm.setCovs(covs)
if test=='lrt':
lm.setTestStatistics(0)
elif test=='f':
lm.setTestStatistics(1)
else:
print(test)
raise NotImplementedError("only f or lrt are implemented")
#set number of delta grid optimizations?
lm.setNumIntervals0(NumIntervalsDelta0)
if searchDelta:
lm.setNumIntervalsAlt(NumIntervalsDeltaAlt)
else:
lm.setNumIntervalsAlt(0)
lm.process()
t1=time.time()
print(("finished GWAS testing in %.2f seconds" %(t1-t0)))
return lm
#TODO: we need to fix. THis does not work as interact_GxE is not existing
#I vote we also use **kw_args to forward parameters to interact_Gxe?
def interact_GxG(pheno,snps1,snps2=None,K=None,covs=None):
"""
Epistasis test between | |
"""Channel.
A channel is used to send values to streams.
The stream will iterate over incoming events in the channel.
"""
import asyncio
from typing import (
Any,
Awaitable,
Callable,
Mapping,
MutableSet,
Optional,
Set,
cast,
)
from weakref import WeakSet
from mode import Seconds, get_logger, want_seconds
from mode.utils.futures import maybe_async, stampede
from mode.utils.queues import ThrowableQueue
from .events import Event
from .types import (
AppT,
ChannelT,
CodecArg,
EventT,
FutureMessage,
K,
Message,
MessageSentCallback,
ModelArg,
PendingMessage,
RecordMetadata,
StreamT,
TP,
V,
)
from .types.core import HeadersArg, OpenHeadersArg, prepare_headers
from .types.tuples import _PendingMessage_to_Message
__all__ = ['Channel']
logger = get_logger(__name__)
class Channel(ChannelT):
"""Create new channel.
Arguments:
app: The app that created this channel (``app.channel()``)
key_type: The Model used for keys in this channel.
value_type: The Model used for values in this channel.
maxsize: The maximum number of messages this channel can hold.
If exceeded any new ``put`` call will block until a message
is removed from the channel.
is_iterator: When streams iterate over a channel they will call
``stream.clone(is_iterator=True)`` so this attribute
denotes that this channel instance is currently being iterated
over.
active_partition: Set of active topic partitions this
channel instance is assigned to.
loop: The :mod:`asyncio` event loop to use.
"""
app: AppT
key_type: Optional[ModelArg]
value_type: Optional[ModelArg]
is_iterator: bool
_queue: Optional[ThrowableQueue]
_root: Optional['Channel']
_subscribers: MutableSet['Channel']
def __init__(self,
app: AppT,
*,
key_type: ModelArg = None,
value_type: ModelArg = None,
is_iterator: bool = False,
queue: ThrowableQueue = None,
maxsize: int = None,
root: ChannelT = None,
active_partitions: Set[TP] = None,
loop: asyncio.AbstractEventLoop = None) -> None:
self.app = app
self.loop = loop
self.key_type = key_type
self.value_type = value_type
self.is_iterator = is_iterator
self._queue = queue
self.maxsize = maxsize
self.deliver = self._compile_deliver() # type: ignore
self._root = cast(Channel, root)
self.active_partitions = active_partitions
self._subscribers = WeakSet()
@property
def queue(self) -> ThrowableQueue:
"""Return the underlying queue/buffer backing this channel."""
if self._queue is None:
# this should only be set after clone = channel.__aiter__()
# which means the loop is not accessed by merely defining
# a channel at module scope.
maxsize = self.maxsize
if maxsize is None:
maxsize = self.app.conf.stream_buffer_maxsize
self._queue = self.app.FlowControlQueue(
maxsize=maxsize,
loop=self.loop,
clear_on_resume=True,
)
return self._queue
def clone(self, *, is_iterator: bool = None, **kwargs: Any) -> ChannelT:
"""Create clone of this channel.
Arguments:
is_iterator: Set to True if this is now a channel
that is being iterated over.
Keyword Arguments:
**kwargs: Any keyword arguments passed will override any
of the arguments supported by
:class:`Channel.__init__ <Channel>`.
"""
is_it = is_iterator if is_iterator is not None else self.is_iterator
subchannel: ChannelT = self._clone(is_iterator=is_it, **kwargs)
if is_it:
(self._root or self)._subscribers.add(cast(Channel, subchannel))
# make sure queue is created at this point
# ^ it's a cached_property
subchannel.queue
return subchannel
def clone_using_queue(self, queue: asyncio.Queue) -> ChannelT:
"""Create clone of this channel using specific queue instance."""
return self.clone(queue=queue, is_iterator=True)
def _clone(self, **kwargs: Any) -> ChannelT:
return type(self)(**{**self._clone_args(), **kwargs})
def _clone_args(self) -> Mapping:
# How to create a copy of this channel.
return {
'app': self.app,
'loop': self.loop,
'key_type': self.key_type,
'value_type': self.value_type,
'maxsize': self.maxsize,
'root': self._root if self._root is not None else self,
'queue': None,
'active_partitions': self.active_partitions,
}
def stream(self, **kwargs: Any) -> StreamT:
"""Create stream reading from this channel."""
return self.app.stream(self, **kwargs)
def get_topic_name(self) -> str:
"""Get the topic name, or raise if this is not a named channel."""
raise NotImplementedError('Channels are unnamed topics')
async def send(self,
*,
key: K = None,
value: V = None,
partition: int = None,
timestamp: float = None,
headers: HeadersArg = None,
key_serializer: CodecArg = None,
value_serializer: CodecArg = None,
callback: MessageSentCallback = None,
force: bool = False) -> Awaitable[RecordMetadata]:
"""Send message to channel."""
return await self._send_now(
key,
value,
partition=partition,
timestamp=timestamp,
headers=headers,
key_serializer=key_serializer,
value_serializer=value_serializer,
callback=callback,
)
def send_soon(self,
*,
key: K = None,
value: V = None,
partition: int = None,
timestamp: float = None,
headers: HeadersArg = None,
key_serializer: CodecArg = None,
value_serializer: CodecArg = None,
callback: MessageSentCallback = None,
force: bool = False) -> FutureMessage:
"""Produce message by adding to buffer.
This method is only supported by :class:`~faust.Topic`.
Raises:
NotImplementedError: always for in-memory channel.
"""
raise NotImplementedError()
def as_future_message(
self,
key: K = None,
value: V = None,
partition: int = None,
timestamp: float = None,
headers: HeadersArg = None,
key_serializer: CodecArg = None,
value_serializer: CodecArg = None,
callback: MessageSentCallback = None) -> FutureMessage:
"""Create promise that message will be transmitted."""
return FutureMessage(
PendingMessage(
self,
self.prepare_key(key, key_serializer),
self.prepare_value(value, value_serializer),
key_serializer=key_serializer,
value_serializer=value_serializer,
partition=partition,
timestamp=timestamp,
headers=self.prepare_headers(headers),
callback=callback,
# Python 3.6.0: NamedTuple doesn't support optional fields
# [ask]
topic=None,
offset=None,
),
)
def prepare_headers(
self, headers: Optional[HeadersArg]) -> OpenHeadersArg:
"""Prepare ``headers`` passed before publishing."""
if headers is not None:
return prepare_headers(headers)
return {}
async def _send_now(
self,
key: K = None,
value: V = None,
partition: int = None,
timestamp: float = None,
headers: HeadersArg = None,
key_serializer: CodecArg = None,
value_serializer: CodecArg = None,
callback: MessageSentCallback = None) -> Awaitable[RecordMetadata]:
return await self.publish_message(
self.as_future_message(
key, value, partition, timestamp, headers,
key_serializer, value_serializer, callback))
async def publish_message(self, fut: FutureMessage,
wait: bool = True) -> Awaitable[RecordMetadata]:
"""Publish message to channel.
This is the interface used by ``topic.send()``, etc.
to actually publish the message on the channel
after being buffered up or similar.
It takes a :class:`~faust.types.FutureMessage` object,
which contains all the information required to send
the message, and acts as a promise that is resolved
once the message has been fully transmitted.
"""
event = self._create_event(
fut.message.key, fut.message.value, fut.message.headers,
message=_PendingMessage_to_Message(fut.message))
await self.put(event)
topic, partition = tp = TP(
fut.message.topic or '<anon>',
fut.message.partition or -1)
return await self._finalize_message(
fut, RecordMetadata(
topic=topic,
partition=partition,
topic_partition=tp,
offset=-1,
timestamp=fut.message.timestamp,
timestamp_type=1,
),
)
async def _finalize_message(self, fut: FutureMessage,
result: RecordMetadata) -> FutureMessage:
fut.set_result(result)
if fut.message.callback:
await maybe_async(fut.message.callback(fut))
return fut
@stampede
async def maybe_declare(self) -> None:
"""Declare/create this channel, but only if it doesn't exist."""
...
async def declare(self) -> None:
"""Declare/create this channel.
This is used to create this channel on a server,
if that is required to operate it.
"""
...
def prepare_key(self, key: K, key_serializer: CodecArg) -> Any:
"""Prepare key before it is sent to this channel.
:class:`~faust.Topic` uses this to implement serialization of keys
sent to the channel.
"""
return key
def prepare_value(self, value: V, value_serializer: CodecArg) -> Any:
"""Prepare value before it is sent to this channel.
:class:`~faust.Topic` uses this to implement serialization of values
sent to the channel.
"""
return value
async def decode(self, message: Message, *,
propagate: bool = False) -> EventT:
"""Decode :class:`~faust.types.Message` into :class:`~faust.Event`."""
return self._create_event(
message.key, message.value, message.headers, message=message)
async def deliver(self, message: Message) -> None: # pragma: no cover
"""Deliver message to queue from consumer.
This is called by the consumer to deliver the message
to the channel.
"""
... # closure compiled at __init__
def _compile_deliver(self) -> Callable[[Message], Awaitable[None]]:
put = None
async def deliver(message: Message) -> None:
nonlocal put
if put is None:
# NOTE circumvents self.put, using queue directly
put = self.queue.put
event = await self.decode(message)
await put(event)
return deliver
def _create_event(self,
key: K,
value: V,
headers: Optional[HeadersArg],
message: Message) -> EventT:
return Event(self.app, key, value, headers, message)
async def put(self, value: Any) -> None:
"""Put event onto this channel."""
root = self._root if self._root is not None else self
for subscriber in root._subscribers:
await subscriber.queue.put(value)
async def get(self, *, timeout: Seconds = None) -> Any:
"""Get the next :class:`~faust.Event` received on this channel."""
timeout_: float = want_seconds(timeout)
if timeout_:
return await asyncio.wait_for(self.queue.get(), timeout=timeout_)
return await self.queue.get()
def empty(self) -> bool:
"""Return :const:`True` if the queue is empty."""
return self.queue.empty()
async def on_key_decode_error(self, exc: Exception,
message: Message) -> None:
"""Unable to decode the key of an item in the queue.
See Also:
:meth:`on_decode_error`
"""
await self.on_decode_error(exc, message)
await self.throw(exc)
async def on_value_decode_error(self, exc: Exception,
message: Message) -> None:
"""Unable to decode the value of an item in the queue.
See Also:
:meth:`on_decode_error`
"""
await self.on_decode_error(exc, message)
await self.throw(exc)
async def on_decode_error(self, exc: Exception, message: Message) -> None:
"""Signal that there was an error reading an event in the queue.
When a message in | |
<filename>position inference/csv_to_db_correctBAD.py
import numpy as np
import sqlite3
import pandas as pd
import time
import sys
bTime = time.time()
try:
f = open("../DATAcorrected.db", "xt")
f.close()
except:
print("\nDelete DATAcorrected.db in order to use the program")
sys.exit()
db = sqlite3.connect("../DATAcorrected.db")
db.execute("""
CREATE TABLE IF NOT EXISTS data (
matchId INT,
win BIT,
duration INT,
champIdA1 INT,
roleA1 INT,
killsA1 INT, deathsA1 INT, assistsA1 INT,
keyRuneA1 INT,
spell1A1 INT, spell2A1 INT,
ccStatA1 INT, ccTimeA1 INT,
visionScoreA1 INT, wardsPlacedA1 INT, wardsKilledA1 INT,
healingA1 INT,
damageTakenA1 INT, mitigatedA1 INT,
totalDealtA1 INT, totalMagicA1 INT,
champsDealtA1 INT, champsMagicA1 INT,
turretsDealtA1 INT, objectiveDealtA1 INT,
csA1 INT, csJunStolenA1 INT,
turretKA1 INT, inhibKA1 INT,
goldA1 INT,
xpD0_10A1 INT, xpD10_20A1 INT, xpD20_30A1 INT, xpD30_endA1 INT,
xpDiffD0_10A1 INT, xpDiffD10_20A1 INT, xpDiffD20_30A1 INT, xpDiffD30_endA1 INT,
csD0_10A1 INT, csD10_20A1 INT, csD20_30A1 INT, csD30_endA1 INT,
csDiffD0_10A1 INT, csDiffD10_20A1 INT, csDiffD20_30A1 INT, csDiffD30_endA1 INT,
goldD0_10A1 INT, goldD10_20A1 INT, goldD20_30A1 INT, goldD30_endA1 INT,
takenD0_10A1 INT, takenD10_20A1 INT, takenD20_30A1 INT, takenD30_endA1 INT,
dealtD0_10A1 INT, dealtD10_20A1 INT, dealtD20_30A1 INT, dealtD30_endA1 INT,
champIdA2 INT,
roleA2 INT,
killsA2 INT, deathsA2 INT, assistsA2 INT,
keyRuneA2 INT,
spell1A2 INT, spell2A2 INT,
ccStatA2 INT, ccTimeA2 INT,
visionScoreA2 INT, wardsPlacedA2 INT, wardsKilledA2 INT,
healingA2 INT,
damageTakenA2 INT, mitigatedA2 INT,
totalDealtA2 INT, totalMagicA2 INT,
champsDealtA2 INT, champsMagicA2 INT,
turretsDealtA2 INT, objectiveDealtA2 INT,
csA2 INT, csJunStolenA2 INT,
turretKA2 INT, inhibKA2 INT,
goldA2 INT,
xpD0_10A2 INT, xpD10_20A2 INT, xpD20_30A2 INT, xpD30_endA2 INT,
xpDiffD0_10A2 INT, xpDiffD10_20A2 INT, xpDiffD20_30A2 INT, xpDiffD30_endA2 INT,
csD0_10A2 INT, csD10_20A2 INT, csD20_30A2 INT, csD30_endA2 INT,
csDiffD0_10A2 INT, csDiffD10_20A2 INT, csDiffD20_30A2 INT, csDiffD30_endA2 INT,
goldD0_10A2 INT, goldD10_20A2 INT, goldD20_30A2 INT, goldD30_endA2 INT,
takenD0_10A2 INT, takenD10_20A2 INT, takenD20_30A2 INT, takenD30_endA2 INT,
dealtD0_10A2 INT, dealtD10_20A2 INT, dealtD20_30A2 INT, dealtD30_endA2 INT,
champIdA3 INT,
roleA3 INT,
killsA3 INT, deathsA3 INT, assistsA3 INT,
keyRuneA3 INT,
spell1A3 INT, spell2A3 INT,
ccStatA3 INT, ccTimeA3 INT,
visionScoreA3 INT, wardsPlacedA3 INT, wardsKilledA3 INT,
healingA3 INT,
damageTakenA3 INT, mitigatedA3 INT,
totalDealtA3 INT, totalMagicA3 INT,
champsDealtA3 INT, champsMagicA3 INT,
turretsDealtA3 INT, objectiveDealtA3 INT,
csA3 INT, csJunStolenA3 INT,
turretKA3 INT, inhibKA3 INT,
goldA3 INT,
xpD0_10A3 INT, xpD10_20A3 INT, xpD20_30A3 INT, xpD30_endA3 INT,
xpDiffD0_10A3 INT, xpDiffD10_20A3 INT, xpDiffD20_30A3 INT, xpDiffD30_endA3 INT,
csD0_10A3 INT, csD10_20A3 INT, csD20_30A3 INT, csD30_endA3 INT,
csDiffD0_10A3 INT, csDiffD10_20A3 INT, csDiffD20_30A3 INT, csDiffD30_endA3 INT,
goldD0_10A3 INT, goldD10_20A3 INT, goldD20_30A3 INT, goldD30_endA3 INT,
takenD0_10A3 INT, takenD10_20A3 INT, takenD20_30A3 INT, takenD30_endA3 INT,
dealtD0_10A3 INT, dealtD10_20A3 INT, dealtD20_30A3 INT, dealtD30_endA3 INT,
champIdA4 INT,
roleA4 INT,
killsA4 INT, deathsA4 INT, assistsA4 INT,
keyRuneA4 INT,
spell1A4 INT, spell2A4 INT,
ccStatA4 INT, ccTimeA4 INT,
visionScoreA4 INT, wardsPlacedA4 INT, wardsKilledA4 INT,
healingA4 INT,
damageTakenA4 INT, mitigatedA4 INT,
totalDealtA4 INT, totalMagicA4 INT,
champsDealtA4 INT, champsMagicA4 INT,
turretsDealtA4 INT, objectiveDealtA4 INT,
csA4 INT, csJunStolenA4 INT,
turretKA4 INT, inhibKA4 INT,
goldA4 INT,
xpD0_10A4 INT, xpD10_20A4 INT, xpD20_30A4 INT, xpD30_endA4 INT,
xpDiffD0_10A4 INT, xpDiffD10_20A4 INT, xpDiffD20_30A4 INT, xpDiffD30_endA4 INT,
csD0_10A4 INT, csD10_20A4 INT, csD20_30A4 INT, csD30_endA4 INT,
csDiffD0_10A4 INT, csDiffD10_20A4 INT, csDiffD20_30A4 INT, csDiffD30_endA4 INT,
goldD0_10A4 INT, goldD10_20A4 INT, goldD20_30A4 INT, goldD30_endA4 INT,
takenD0_10A4 INT, takenD10_20A4 INT, takenD20_30A4 INT, takenD30_endA4 INT,
dealtD0_10A4 INT, dealtD10_20A4 INT, dealtD20_30A4 INT, dealtD30_endA4 INT,
champIdA5 INT,
roleA5 INT,
killsA5 INT, deathsA5 INT, assistsA5 INT,
keyRuneA5 INT,
spell1A5 INT, spell2A5 INT,
ccStatA5 INT, ccTimeA5 INT,
visionScoreA5 INT, wardsPlacedA5 INT, wardsKilledA5 INT,
healingA5 INT,
damageTakenA5 INT, mitigatedA5 INT,
totalDealtA5 INT, totalMagicA5 INT,
champsDealtA5 INT, champsMagicA5 INT,
turretsDealtA5 INT, objectiveDealtA5 INT,
csA5 INT, csJunStolenA5 INT,
turretKA5 INT, inhibKA5 INT,
goldA5 INT,
xpD0_10A5 INT, xpD10_20A5 INT, xpD20_30A5 INT, xpD30_endA5 INT,
xpDiffD0_10A5 INT, xpDiffD10_20A5 INT, xpDiffD20_30A5 INT, xpDiffD30_endA5 INT,
csD0_10A5 INT, csD10_20A5 INT, csD20_30A5 INT, csD30_endA5 INT,
csDiffD0_10A5 INT, csDiffD10_20A5 INT, csDiffD20_30A5 INT, csDiffD30_endA5 INT,
goldD0_10A5 INT, goldD10_20A5 INT, goldD20_30A5 INT, goldD30_endA5 INT,
takenD0_10A5 INT, takenD10_20A5 INT, takenD20_30A5 INT, takenD30_endA5 INT,
dealtD0_10A5 INT, dealtD10_20A5 INT, dealtD20_30A5 INT, dealtD30_endA5 INT,
champIdE1 INT,
roleE1 INT,
killsE1 INT, deathsE1 INT, assistsE1 INT,
keyRuneE1 INT,
spell1E1 INT, spell2E1 INT,
ccStatE1 INT, ccTimeE1 INT,
visionScoreE1 INT, wardsPlacedE1 INT, wardsKilledE1 INT,
healingE1 INT,
damageTakenE1 INT, mitigatedE1 INT,
totalDealtE1 INT, totalMagicE1 INT,
champsDealtE1 INT, champsMagicE1 INT,
turretsDealtE1 INT, objectiveDealtE1 INT,
csE1 INT, csJunStolenE1 INT,
turretKE1 INT, inhibKE1 INT,
goldE1 INT,
xpD0_10E1 INT, xpD10_20E1 INT, xpD20_30E1 INT, xpD30_endE1 INT,
xpDiffD0_10E1 INT, xpDiffD10_20E1 INT, xpDiffD20_30E1 INT, xpDiffD30_endE1 INT,
csD0_10E1 INT, csD10_20E1 INT, csD20_30E1 INT, csD30_endE1 INT,
csDiffD0_10E1 INT, csDiffD10_20E1 INT, csDiffD20_30E1 INT, csDiffD30_endE1 INT,
goldD0_10E1 INT, goldD10_20E1 INT, goldD20_30E1 INT, goldD30_endE1 INT,
takenD0_10E1 INT, takenD10_20E1 INT, takenD20_30E1 INT, takenD30_endE1 INT,
dealtD0_10E1 INT, dealtD10_20E1 INT, dealtD20_30E1 INT, dealtD30_endE1 INT,
champIdE2 INT,
roleE2 INT,
killsE2 INT, deathsE2 INT, assistsE2 INT,
keyRuneE2 INT,
spell1E2 INT, spell2E2 INT,
ccStatE2 INT, ccTimeE2 INT,
visionScoreE2 INT, wardsPlacedE2 INT, wardsKilledE2 INT,
healingE2 INT,
damageTakenE2 INT, mitigatedE2 INT,
totalDealtE2 INT, totalMagicE2 INT,
champsDealtE2 INT, champsMagicE2 INT,
turretsDealtE2 INT, objectiveDealtE2 INT,
csE2 INT, csJunStolenE2 INT,
turretKE2 INT, inhibKE2 INT,
goldE2 INT,
xpD0_10E2 INT, xpD10_20E2 INT, xpD20_30E2 INT, xpD30_endE2 INT,
xpDiffD0_10E2 INT, xpDiffD10_20E2 INT, xpDiffD20_30E2 INT, xpDiffD30_endE2 INT,
csD0_10E2 INT, csD10_20E2 INT, csD20_30E2 INT, csD30_endE2 INT,
csDiffD0_10E2 INT, csDiffD10_20E2 INT, csDiffD20_30E2 INT, csDiffD30_endE2 INT,
goldD0_10E2 INT, goldD10_20E2 INT, goldD20_30E2 INT, goldD30_endE2 INT,
takenD0_10E2 INT, takenD10_20E2 INT, takenD20_30E2 INT, takenD30_endE2 INT,
dealtD0_10E2 INT, dealtD10_20E2 INT, dealtD20_30E2 INT, dealtD30_endE2 INT,
champIdE3 INT,
roleE3 INT,
killsE3 INT, deathsE3 INT, assistsE3 INT,
keyRuneE3 INT,
spell1E3 INT, spell2E3 INT,
ccStatE3 INT, ccTimeE3 INT,
visionScoreE3 INT, wardsPlacedE3 INT, wardsKilledE3 INT,
healingE3 INT,
damageTakenE3 INT, mitigatedE3 INT,
totalDealtE3 INT, totalMagicE3 INT,
champsDealtE3 INT, champsMagicE3 INT,
turretsDealtE3 INT, objectiveDealtE3 INT,
csE3 INT, csJunStolenE3 INT,
turretKE3 INT, inhibKE3 INT,
goldE3 INT,
xpD0_10E3 INT, xpD10_20E3 INT, xpD20_30E3 INT, xpD30_endE3 INT,
xpDiffD0_10E3 INT, xpDiffD10_20E3 INT, xpDiffD20_30E3 INT, xpDiffD30_endE3 INT,
csD0_10E3 INT, csD10_20E3 INT, csD20_30E3 INT, csD30_endE3 INT,
csDiffD0_10E3 INT, csDiffD10_20E3 INT, csDiffD20_30E3 INT, csDiffD30_endE3 INT,
goldD0_10E3 INT, goldD10_20E3 INT, goldD20_30E3 INT, goldD30_endE3 INT,
takenD0_10E3 INT, takenD10_20E3 INT, takenD20_30E3 INT, takenD30_endE3 INT,
dealtD0_10E3 INT, dealtD10_20E3 INT, dealtD20_30E3 INT, dealtD30_endE3 INT,
champIdE4 INT,
roleE4 INT,
killsE4 INT, deathsE4 INT, assistsE4 INT,
keyRuneE4 INT,
spell1E4 INT, spell2E4 INT,
ccStatE4 INT, ccTimeE4 INT,
visionScoreE4 INT, wardsPlacedE4 INT, wardsKilledE4 INT,
healingE4 INT,
damageTakenE4 INT, mitigatedE4 INT,
totalDealtE4 INT, totalMagicE4 INT,
champsDealtE4 INT, champsMagicE4 INT,
turretsDealtE4 INT, objectiveDealtE4 INT,
csE4 INT, csJunStolenE4 INT,
turretKE4 INT, inhibKE4 INT,
goldE4 INT,
xpD0_10E4 INT, xpD10_20E4 INT, xpD20_30E4 INT, xpD30_endE4 INT,
xpDiffD0_10E4 INT, xpDiffD10_20E4 INT, xpDiffD20_30E4 INT, xpDiffD30_endE4 INT,
csD0_10E4 INT, csD10_20E4 INT, csD20_30E4 INT, csD30_endE4 INT,
csDiffD0_10E4 INT, csDiffD10_20E4 INT, csDiffD20_30E4 INT, csDiffD30_endE4 INT,
goldD0_10E4 INT, goldD10_20E4 INT, goldD20_30E4 INT, goldD30_endE4 INT,
takenD0_10E4 INT, takenD10_20E4 INT, takenD20_30E4 INT, takenD30_endE4 INT,
dealtD0_10E4 INT, dealtD10_20E4 INT, dealtD20_30E4 INT, dealtD30_endE4 INT,
champIdE5 INT,
roleE5 INT,
killsE5 INT, deathsE5 INT, assistsE5 INT,
keyRuneE5 INT,
spell1E5 INT, spell2E5 INT,
ccStatE5 INT, ccTimeE5 INT,
visionScoreE5 INT, wardsPlacedE5 INT, wardsKilledE5 INT,
healingE5 INT,
damageTakenE5 INT, mitigatedE5 INT,
totalDealtE5 INT, totalMagicE5 INT,
champsDealtE5 INT, champsMagicE5 | |
RuntimeError("There isn't enough documents in the database for training the Top2Vec model.")
else:
if len(docs) > 1000:
self.train(docs=list(map(lambda d: d.text, docs))) # training the Top2Vec model with the uploaded documents
else:
raise RuntimeError("There isn't enough documents in the database or in the upload for training the Top2Vec model.")
def train(self, docs=None):
if docs is None:
# Get all documents from Document Store
logger.info("Getting all documents from Document Store.")
docs = self.document_store.get_all_documents(return_embedding=False)
docs = list(map(lambda d: d.text, docs))
logger.info(f"Beginning training of Top2Vec with {len(docs)} internal documents.")
else:
logger.info(f"Beginning training of Top2Vec with {len(docs)} external documents.")
self.model = Top2Vec2(
docs,
embedding_model=self.embedding_model,
keep_documents=False, # we don't need to keep the documents as the search isn't performed through top2vec
workers=None,
use_embedding_model_tokenizer=True,
umap_args=self.umap_args,
hdbscan_args=self.hdbscan_args
)
self.model.hierarchical_topic_reduction(20) # reduce the number of topics
self.model.save(self.saved_model_path)
class CrossEncoderReRanker(BaseReader):
"""
A re-ranker based on a BERT Cross-Encoder. The query and a candidate result are passed
simoultaneously to the trasnformer network, which then output a single score between
0 and 1 indicating how relevant the document is for the given query. Read the article
in https://www.sbert.net/examples/applications/retrieve_rerank/README.html for further
details.
"""
def __init__(
self,
cross_encoder: str = "cross-encoder/ms-marco-TinyBERT-L-6",
top_k: int = 10
):
"""
:param cross_encoder: Local path or name of cross-encoder model in Hugging Face's model hub such as ``'cross-encoder/ms-marco-TinyBERT-L-6'``
:param top_k: The maximum number of answers to return
"""
# # save init parameters to enable export of component config as YAML
# self.set_config(
# cross_encoder=cross_encoder, use_gpu=use_gpu, top_k=top_k
# )
self.top_k = top_k
try:
from sentence_transformers import CrossEncoder
except ImportError:
raise ImportError("Can't find package `sentence-transformers` \n"
"You can install it via `pip install sentence-transformers` \n"
"For details see https://github.com/UKPLab/sentence-transformers ")
# pretrained embedding models coming from: https://github.com/UKPLab/sentence-transformers#pretrained-models
# CrossEncoder uses cuda device if available
self.cross_encoder = CrossEncoder(cross_encoder)
def predict(self, query: str, documents: List[Document], top_k: Optional[int] = None):
"""
Use the cross-encoder to find answers for a query in the supplied list of Document.
Returns dictionaries containing answers sorted by (desc.) probability.
Example:
```python
|{
| 'query': 'What is the capital of the United States?',
| 'answers':[
| {'answer': 'Washington, D.C. (also known as simply Washington or D.C.,
| and officially as the District of Columbia) is the capital of
| the United States. It is a federal district. The President of
| the USA and many major national government offices are in the
| territory. This makes it the political center of the United
| States of America.',
| 'score': 0.717,
| 'document_id': 213
| },...
| ]
|}
```
:param query: Query string
:param documents: List of Document in which to search for the answer
:param top_k: The maximum number of answers to return
:return: Dict containing query and answers
"""
if top_k is None:
top_k = self.top_k
# Score every document with the cross_encoder
cross_inp = [[query, doc.text] for doc in documents]
cross_scores = self.cross_encoder.predict(cross_inp)
answers = [
{
'answer': documents[idx].text,
'score': cross_scores[idx],
'document_id': documents[idx].id,
'meta': documents[idx].meta
}
for idx in range(len(documents))
]
# Sort answers by the cross-encoder scores and select top-k
answers = sorted(
answers, key=lambda k: k["score"], reverse=True
)
answers = answers[:top_k]
results = {"query": query,
"answers": answers}
return results
def predict_batch(self, query_doc_list: List[dict], top_k: Optional[int] = None, batch_size: Optional[int] = None):
raise NotImplementedError("Batch prediction not yet available in CrossEncoderReRanker.")
class OpenDistroElasticsearchDocumentStore2(OpenDistroElasticsearchDocumentStore):
def query_by_embedding(self,
query_emb: np.ndarray,
filters: Optional[Union[List[dict], Dict[str, List[str]]]] = None,
top_k: int = 10,
index: Optional[str] = None,
return_embedding: Optional[bool] = None) -> List[Document]:
"""
Find the document that is most similar to the provided `query_emb` by using a vector similarity metric.
:param query_emb: Embedding of the query (e.g. gathered from DPR)
:param filters: Optional filters to narrow down the search space. Follows Open Distro for
Elasticsearch syntax: https://opendistro.github.io/for-elasticsearch-docs/docs/elasticsearch/bool/. Example:
[
{
"terms": {
"author": [
"<NAME>",
"<NAME>",
]
}
},
{
"range": {
"timestamp": {
"gte": "01-01-2021",
"lt": "01-06-2021"
}
}
}
]
:param top_k: How many documents to return
:param index: Index name for storing the docs and metadata
:param return_embedding: To return document embedding
:return:
"""
if index is None:
index = self.index
if return_embedding is None:
return_embedding = self.return_embedding
if not self.embedding_field:
raise RuntimeError("Please specify arg `embedding_field` in ElasticsearchDocumentStore()")
else:
# +1 in similarity to avoid negative numbers (for cosine sim)
body = {
"size": top_k,
"query": {
"bool": {
"must": [
self._get_vector_similarity_query(query_emb, top_k)
]
}
}
}
if filters:
body = self._filter_adapter(body, filters)
excluded_meta_data: Optional[list] = None
if self.excluded_meta_data:
excluded_meta_data = deepcopy(self.excluded_meta_data)
if return_embedding is True and self.embedding_field in excluded_meta_data:
excluded_meta_data.remove(self.embedding_field)
elif return_embedding is False and self.embedding_field not in excluded_meta_data:
excluded_meta_data.append(self.embedding_field)
elif return_embedding is False:
excluded_meta_data = [self.embedding_field]
if excluded_meta_data:
body["_source"] = {"excludes": excluded_meta_data}
logger.debug(f"Retriever query: {body}")
result = self.client.search(index=index, body=body, request_timeout=300)["hits"]["hits"]
documents = [
self._convert_es_hit_to_document(hit, adapt_score_for_embedding=True, return_embedding=return_embedding)
for hit in result
]
return documents
def get_document_count(
self,
filters: Optional[Union[List[dict], Dict[str, List[str]]]] = None,
index: Optional[str] = None,
only_documents_without_embedding: bool = False
) -> int:
"""
Return the number of documents in the document store.
"""
index = index or self.index
body: dict = {"query": {"bool": {}}}
if only_documents_without_embedding:
body['query']['bool']['must_not'] = [{"exists": {"field": self.embedding_field}}]
if filters:
body = self._filter_adapter(body, filters)
result = self.client.count(index=index, body=body)
count = result["count"]
return count
def get_all_documents(
self,
index: Optional[str] = None,
filters: Optional[Union[List[dict], Dict[str, List[str]]]] = None,
return_embedding: Optional[bool] = None,
batch_size: int = 10_000,
) -> List[Document]:
"""
Get documents from the document store.
:param index: Name of the index to get the documents from. If None, the
DocumentStore's default index (self.index) will be used.
:param filters: Optional filters to narrow down the documents to return.
Example: {"name": ["some", "more"], "category": ["only_one"]}
:param return_embedding: Whether to return the document embeddings.
:param batch_size: When working with large number of documents, batching can help reduce memory footprint.
"""
result = self.get_all_documents_generator(
index=index, filters=filters, return_embedding=return_embedding, batch_size=batch_size
)
documents = list(result)
return documents
def get_all_documents_generator(
self,
index: Optional[str] = None,
filters: Optional[Union[List[dict], Dict[str, List[str]]]] = None,
return_embedding: Optional[bool] = None,
batch_size: int = 10_000,
) -> Generator[Document, None, None]:
"""
Get documents from the document store. Under-the-hood, documents are fetched in batches from the
document store and yielded as individual documents. This method can be used to iteratively process
a large number of documents without having to load all documents in memory.
:param index: Name of the index to get the documents from. If None, the
DocumentStore's default index (self.index) will be used.
:param filters: Optional filters to narrow down the documents to return.
Example: {"name": ["some", "more"], "category": ["only_one"]}
:param return_embedding: Whether to return the document embeddings.
:param batch_size: When working with large number of documents, batching can help reduce memory footprint.
"""
if index is None:
index = self.index
if return_embedding is None:
return_embedding = self.return_embedding
result = self._get_all_documents_in_index(index=index, filters=filters, batch_size=batch_size)
for hit in result:
document = self._convert_es_hit_to_document(hit, return_embedding=return_embedding)
yield document
def _get_all_documents_in_index(
self,
index: str,
filters: Optional[Union[List[dict], Dict[str, List[str]]]] = None,
batch_size: int = 10_000,
only_documents_without_embedding: bool = False,
) -> Generator[dict, None, None]:
"""
Return all documents in a specific index in the document store
"""
body: dict = {"query": {"bool": {}}}
if filters:
body = self._filter_adapter(body, filters)
if only_documents_without_embedding:
body['query']['bool']['must_not'] = [{"exists": {"field": self.embedding_field}}]
result = scan(self.client, query=body, index=index, size=batch_size, scroll="1d")
yield from result
def _filter_adapter(
self,
query_body: dict,
filters: Optional[Union[List[dict], Dict[str, List[str]]]] = None,
) -> dict:
# To not disrupt any of the code of Haystack we can accept both
# the old filters format or the new format. The following if-else
# clause deals with the operations for the right format.
if isinstance(filters, dict):
filter_clause = []
for key, values in filters.items():
if type(values) != list:
raise ValueError(
f'Wrong filter format for key "{key}": Please provide a list of allowed values for each key. '
'Example: {"name": ["some", "more"], "category": ["only_one"]} ')
filter_clause.append(
| |
# Copyright 2016-2017 Capital One Services, LLC
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
import itertools
import operator
import zlib
import jmespath
from c7n.actions import BaseAction, ModifyVpcSecurityGroupsAction
from c7n.exceptions import PolicyValidationError, ClientError
from c7n.filters import (
DefaultVpcBase, Filter, ValueFilter)
import c7n.filters.vpc as net_filters
from c7n.filters.iamaccess import CrossAccountAccessFilter
from c7n.filters.related import RelatedResourceFilter
from c7n.filters.revisions import Diff
from c7n import query, resolver
from c7n.manager import resources
from c7n.resources.securityhub import OtherResourcePostFinding, PostFinding
from c7n.utils import (
chunks, local_session, type_schema, get_retry, parse_cidr)
from c7n.resources.aws import shape_validate
from c7n.resources.shield import IsShieldProtected, SetShieldProtection
@resources.register('vpc')
class Vpc(query.QueryResourceManager):
class resource_type(query.TypeInfo):
service = 'ec2'
arn_type = 'vpc'
enum_spec = ('describe_vpcs', 'Vpcs', None)
name = id = 'VpcId'
filter_name = 'VpcIds'
filter_type = 'list'
cfn_type = config_type = 'AWS::EC2::VPC'
id_prefix = "vpc-"
@Vpc.filter_registry.register('flow-logs')
class FlowLogFilter(Filter):
"""Are flow logs enabled on the resource.
ie to find all vpcs with flows logs disabled we can do this
:example:
.. code-block:: yaml
policies:
- name: flow-logs-enabled
resource: vpc
filters:
- flow-logs
or to find all vpcs with flow logs but that don't match a
particular configuration.
:example:
.. code-block:: yaml
policies:
- name: flow-mis-configured
resource: vpc
filters:
- not:
- type: flow-logs
enabled: true
set-op: or
op: equal
# equality operator applies to following keys
traffic-type: all
status: active
log-group: vpc-logs
"""
schema = type_schema(
'flow-logs',
**{'enabled': {'type': 'boolean', 'default': False},
'op': {'enum': ['equal', 'not-equal'], 'default': 'equal'},
'set-op': {'enum': ['or', 'and'], 'default': 'or'},
'status': {'enum': ['active']},
'deliver-status': {'enum': ['success', 'failure']},
'destination': {'type': 'string'},
'destination-type': {'enum': ['s3', 'cloud-watch-logs']},
'traffic-type': {'enum': ['accept', 'reject', 'all']},
'log-format': {'type': 'string'},
'log-group': {'type': 'string'}})
permissions = ('ec2:DescribeFlowLogs',)
def process(self, resources, event=None):
client = local_session(self.manager.session_factory).client('ec2')
# TODO given subnet/nic level logs, we should paginate, but we'll
# need to add/update botocore pagination support.
logs = client.describe_flow_logs().get('FlowLogs', ())
m = self.manager.get_model()
resource_map = {}
for fl in logs:
resource_map.setdefault(fl['ResourceId'], []).append(fl)
enabled = self.data.get('enabled', False)
log_group = self.data.get('log-group')
log_format = self.data.get('log-format')
traffic_type = self.data.get('traffic-type')
destination_type = self.data.get('destination-type')
destination = self.data.get('destination')
status = self.data.get('status')
delivery_status = self.data.get('deliver-status')
op = self.data.get('op', 'equal') == 'equal' and operator.eq or operator.ne
set_op = self.data.get('set-op', 'or')
results = []
# looping over vpc resources
for r in resources:
if r[m.id] not in resource_map:
# we didn't find a flow log for this vpc
if enabled:
# vpc flow logs not enabled so exclude this vpc from results
continue
results.append(r)
continue
flogs = resource_map[r[m.id]]
r['c7n:flow-logs'] = flogs
# config comparisons are pointless if we only want vpcs with no flow logs
if enabled:
fl_matches = []
for fl in flogs:
dest_type_match = (destination_type is None) or op(
fl['LogDestinationType'], destination_type)
dest_match = (destination is None) or op(
fl['LogDestination'], destination)
status_match = (status is None) or op(fl['FlowLogStatus'], status.upper())
delivery_status_match = (delivery_status is None) or op(
fl['DeliverLogsStatus'], delivery_status.upper())
traffic_type_match = (
traffic_type is None) or op(
fl['TrafficType'],
traffic_type.upper())
log_group_match = (log_group is None) or op(fl.get('LogGroupName'), log_group)
log_format_match = (log_format is None) or op(fl.get('LogFormat'), log_format)
# combine all conditions to check if flow log matches the spec
fl_match = (status_match and traffic_type_match and dest_match and
log_format_match and log_group_match and
dest_type_match and delivery_status_match)
fl_matches.append(fl_match)
if set_op == 'or':
if any(fl_matches):
results.append(r)
elif set_op == 'and':
if all(fl_matches):
results.append(r)
return results
@Vpc.filter_registry.register('security-group')
class VpcSecurityGroupFilter(RelatedResourceFilter):
"""Filter VPCs based on Security Group attributes
:example:
.. code-block:: yaml
policies:
- name: vpc-by-sg
resource: vpc
filters:
- type: security-group
key: tag:Color
value: Gray
"""
schema = type_schema(
'security-group', rinherit=ValueFilter.schema,
**{'match-resource': {'type': 'boolean'},
'operator': {'enum': ['and', 'or']}})
RelatedResource = "c7n.resources.vpc.SecurityGroup"
RelatedIdsExpression = '[SecurityGroups][].GroupId'
AnnotationKey = "matched-vpcs"
def get_related_ids(self, resources):
vpc_ids = [vpc['VpcId'] for vpc in resources]
vpc_group_ids = {
g['GroupId'] for g in
self.manager.get_resource_manager('security-group').resources()
if g.get('VpcId', '') in vpc_ids
}
return vpc_group_ids
@Vpc.filter_registry.register('subnet')
class VpcSubnetFilter(RelatedResourceFilter):
"""Filter VPCs based on Subnet attributes
:example:
.. code-block:: yaml
policies:
- name: vpc-by-subnet
resource: vpc
filters:
- type: subnet
key: tag:Color
value: Gray
"""
schema = type_schema(
'subnet', rinherit=ValueFilter.schema,
**{'match-resource': {'type': 'boolean'},
'operator': {'enum': ['and', 'or']}})
RelatedResource = "c7n.resources.vpc.Subnet"
RelatedIdsExpression = '[Subnets][].SubnetId'
AnnotationKey = "MatchedVpcsSubnets"
def get_related_ids(self, resources):
vpc_ids = [vpc['VpcId'] for vpc in resources]
vpc_subnet_ids = {
g['SubnetId'] for g in
self.manager.get_resource_manager('subnet').resources()
if g.get('VpcId', '') in vpc_ids
}
return vpc_subnet_ids
@Vpc.filter_registry.register('nat-gateway')
class VpcNatGatewayFilter(RelatedResourceFilter):
"""Filter VPCs based on NAT Gateway attributes
:example:
.. code-block:: yaml
policies:
- name: vpc-by-nat
resource: vpc
filters:
- type: nat-gateway
key: tag:Color
value: Gray
"""
schema = type_schema(
'nat-gateway', rinherit=ValueFilter.schema,
**{'match-resource': {'type': 'boolean'},
'operator': {'enum': ['and', 'or']}})
RelatedResource = "c7n.resources.vpc.NATGateway"
RelatedIdsExpression = '[NatGateways][].NatGatewayId'
AnnotationKey = "MatchedVpcsNatGateways"
def get_related_ids(self, resources):
vpc_ids = [vpc['VpcId'] for vpc in resources]
vpc_natgw_ids = {
g['NatGatewayId'] for g in
self.manager.get_resource_manager('nat-gateway').resources()
if g.get('VpcId', '') in vpc_ids
}
return vpc_natgw_ids
@Vpc.filter_registry.register('internet-gateway')
class VpcInternetGatewayFilter(RelatedResourceFilter):
"""Filter VPCs based on Internet Gateway attributes
:example:
.. code-block:: yaml
policies:
- name: vpc-by-igw
resource: vpc
filters:
- type: internet-gateway
key: tag:Color
value: Gray
"""
schema = type_schema(
'internet-gateway', rinherit=ValueFilter.schema,
**{'match-resource': {'type': 'boolean'},
'operator': {'enum': ['and', 'or']}})
RelatedResource = "c7n.resources.vpc.InternetGateway"
RelatedIdsExpression = '[InternetGateways][].InternetGatewayId'
AnnotationKey = "MatchedVpcsIgws"
def get_related_ids(self, resources):
vpc_ids = [vpc['VpcId'] for vpc in resources]
vpc_igw_ids = set()
for igw in self.manager.get_resource_manager('internet-gateway').resources():
for attachment in igw['Attachments']:
if attachment.get('VpcId', '') in vpc_ids:
vpc_igw_ids.add(igw['InternetGatewayId'])
return vpc_igw_ids
@Vpc.filter_registry.register('vpc-attributes')
class AttributesFilter(Filter):
"""Filters VPCs based on their DNS attributes
:example:
.. code-block:: yaml
policies:
- name: dns-hostname-enabled
resource: vpc
filters:
- type: vpc-attributes
dnshostnames: True
"""
schema = type_schema(
'vpc-attributes',
dnshostnames={'type': 'boolean'},
dnssupport={'type': 'boolean'})
permissions = ('ec2:DescribeVpcAttribute',)
def process(self, resources, event=None):
results = []
client = local_session(self.manager.session_factory).client('ec2')
dns_hostname = self.data.get('dnshostnames', None)
dns_support = self.data.get('dnssupport', None)
for r in resources:
if dns_hostname is not None:
hostname = client.describe_vpc_attribute(
VpcId=r['VpcId'],
Attribute='enableDnsHostnames'
)['EnableDnsHostnames']['Value']
if dns_support is not None:
support = client.describe_vpc_attribute(
VpcId=r['VpcId'],
Attribute='enableDnsSupport'
)['EnableDnsSupport']['Value']
if dns_hostname is not None and dns_support is not None:
if dns_hostname == hostname and dns_support == support:
results.append(r)
elif dns_hostname is not None and dns_support is None:
if dns_hostname == hostname:
results.append(r)
elif dns_support is not None and dns_hostname is None:
if dns_support == support:
results.append(r)
return results
@Vpc.filter_registry.register('dhcp-options')
class DhcpOptionsFilter(Filter):
"""Filter VPCs based on their dhcp options
:example:
.. code-block:: yaml
policies:
- name: vpcs-in-domain
resource: vpc
filters:
- type: dhcp-options
domain-name: ec2.internal
if an option value is specified as a list, then all elements must be present.
if an option value is specified as a string, then that string must be present.
vpcs not matching a given option value can be found via specifying
a `present: false` parameter.
"""
option_keys = ('domain-name', 'domain-name-servers', 'ntp-servers')
schema = type_schema('dhcp-options', **{
k: {'oneOf': [
{'type': 'array', 'items': {'type': 'string'}},
{'type': 'string'}]}
for k in option_keys})
schema['properties']['present'] = {'type': 'boolean'}
permissions = ('ec2:DescribeDhcpOptions',)
def validate(self):
if not any([self.data.get(k) for k in self.option_keys]):
raise PolicyValidationError("one of %s required" % (self.option_keys,))
return self
def process(self, resources, event=None):
client = local_session(self.manager.session_factory).client('ec2')
option_ids = [r['DhcpOptionsId'] for r in resources]
options_map = {}
results = []
for options in client.describe_dhcp_options(
Filters=[{
'Name': 'dhcp-options-id',
'Values': option_ids}]).get('DhcpOptions', ()):
options_map[options['DhcpOptionsId']] = {
o['Key']: [v['Value'] for v in o['Values']]
for o in options['DhcpConfigurations']}
for vpc in resources:
if self.process_vpc(vpc, options_map[vpc['DhcpOptionsId']]):
results.append(vpc)
return results
def process_vpc(self, vpc, dhcp):
vpc['c7n:DhcpConfiguration'] = dhcp
found = True
for k in self.option_keys:
if k not in self.data:
continue
is_list = isinstance(self.data[k], list)
if k not in dhcp:
found = False
elif not is_list and self.data[k] not in dhcp[k]:
found = False
elif is_list and sorted(self.data[k]) != sorted(dhcp[k]):
found = False
if not self.data.get('present', True):
found = not found
return found
@Vpc.action_registry.register('post-finding')
class VpcPostFinding(PostFinding):
resource_type = "AwsEc2Vpc"
def format_resource(self, r):
envelope, payload = self.format_envelope(r)
# more inane sechub formatting deltas
detail = {
'DhcpOptionsId': r.get('DhcpOptionsId'),
'State': r['State']}
for assoc in r.get('CidrBlockAssociationSet', ()):
detail.setdefault('CidrBlockAssociationSet', []).append(dict(
AssociationId=assoc['AssociationId'],
CidrBlock=assoc['CidrBlock'],
CidrBlockState=assoc['CidrBlockState']['State']))
for assoc in r.get('Ipv6CidrBlockAssociationSet', ()):
detail.setdefault('Ipv6CidrBlockAssociationSet', []).append(dict(
AssociationId=assoc['AssociationId'],
Ipv6CidrBlock=assoc['Ipv6CidrBlock'],
CidrBlockState=assoc['Ipv6CidrBlockState']['State']))
payload.update(self.filter_empty(detail))
return envelope
@resources.register('subnet')
class Subnet(query.QueryResourceManager):
class resource_type(query.TypeInfo):
service = 'ec2'
arn_type = 'subnet'
enum_spec = ('describe_subnets', 'Subnets', None)
name = id = 'SubnetId'
filter_name = 'SubnetIds'
filter_type = 'list'
cfn_type = config_type = 'AWS::EC2::Subnet'
id_prefix = "subnet-"
Subnet.filter_registry.register('flow-logs', FlowLogFilter)
@Subnet.filter_registry.register('vpc')
class SubnetVpcFilter(net_filters.VpcFilter):
RelatedIdsExpression = "VpcId"
class ConfigSG(query.ConfigSource):
def load_resource(self, item):
r = super(ConfigSG, self).load_resource(item)
for rset in ('IpPermissions', 'IpPermissionsEgress'):
for p in r.get(rset, ()):
if p.get('FromPort', '') is None:
p.pop('FromPort')
if p.get('ToPort', '') is None:
p.pop('ToPort')
if 'Ipv6Ranges' not in p:
p[u'Ipv6Ranges'] = []
for | |
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def cet_d_rainbow_bgymr_r(self):
cname = "cet_d_rainbow_bgymr_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorcet", "cet_d_rainbow_bgymr.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def cet_d_tritanopic_cwr(self):
cname = "cet_d_tritanopic_cwr"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorcet", "cet_d_tritanopic_cwr.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def cet_d_tritanopic_cwr_r(self):
cname = "cet_d_tritanopic_cwr_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorcet", "cet_d_tritanopic_cwr.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def cet_g_bw(self):
cname = "cet_g_bw"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorcet", "cet_g_bw.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def cet_g_bw_r(self):
cname = "cet_g_bw_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorcet", "cet_g_bw.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def cet_g_bw_minc(self):
cname = "cet_g_bw_minc"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorcet", "cet_g_bw_minc.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def cet_g_bw_minc_r(self):
cname = "cet_g_bw_minc_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorcet", "cet_g_bw_minc.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def cet_g_bw_minc1(self):
cname = "cet_g_bw_minc1"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorcet", "cet_g_bw_minc1.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def cet_g_bw_minc1_r(self):
cname = "cet_g_bw_minc1_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorcet", "cet_g_bw_minc1.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def cet_g_bw_minc_maxl(self):
cname = "cet_g_bw_minc_maxl"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorcet", "cet_g_bw_minc_maxl.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def cet_g_bw_minc_maxl_r(self):
cname = "cet_g_bw_minc_maxl_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorcet", "cet_g_bw_minc_maxl.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def cet_g_bw_minc_minl(self):
cname = "cet_g_bw_minc_minl"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorcet", "cet_g_bw_minc_minl.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def cet_g_bw_minc_minl_r(self):
cname = "cet_g_bw_minc_minl_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorcet", "cet_g_bw_minc_minl.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def cet_g_category10(self):
cname = "cet_g_category10"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorcet", "cet_g_category10.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def cet_g_category10_r(self):
cname = "cet_g_category10_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorcet", "cet_g_category10.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def cet_g_hv(self):
cname = "cet_g_hv"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorcet", "cet_g_hv.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def cet_g_hv_r(self):
cname = "cet_g_hv_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorcet", "cet_g_hv.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def cet_i(self):
cname = "cet_i"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorcet", "cet_i.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def cet_i_r(self):
cname = "cet_i_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorcet", "cet_i.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def cet_i_cgo(self):
cname = "cet_i_cgo"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorcet", "cet_i_cgo.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def cet_i_cgo_r(self):
cname = "cet_i_cgo_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorcet", "cet_i_cgo.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def cet_i_cgo1(self):
cname = "cet_i_cgo1"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorcet", "cet_i_cgo1.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def cet_i_cgo1_r(self):
cname = "cet_i_cgo1_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorcet", "cet_i_cgo1.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def cet_l_bgy(self):
cname = "cet_l_bgy"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorcet", "cet_l_bgy.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def cet_l_bgy_r(self):
cname = "cet_l_bgy_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorcet", "cet_l_bgy.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def cet_l_bgyw(self):
cname = "cet_l_bgyw"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorcet", "cet_l_bgyw.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def cet_l_bgyw_r(self):
cname = "cet_l_bgyw_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorcet", "cet_l_bgyw.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def cet_l_bgyw1(self):
cname = "cet_l_bgyw1"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorcet", "cet_l_bgyw1.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def cet_l_bgyw1_r(self):
cname = "cet_l_bgyw1_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorcet", "cet_l_bgyw1.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def cet_l_blue(self):
cname = "cet_l_blue"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorcet", "cet_l_blue.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def cet_l_blue_r(self):
cname = "cet_l_blue_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorcet", "cet_l_blue.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def cet_l_blue1(self):
cname = "cet_l_blue1"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorcet", "cet_l_blue1.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def cet_l_blue1_r(self):
cname = "cet_l_blue1_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorcet", "cet_l_blue1.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def cet_l_bmw(self):
cname = "cet_l_bmw"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorcet", "cet_l_bmw.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def cet_l_bmw_r(self):
cname = "cet_l_bmw_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorcet", "cet_l_bmw.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def cet_l_bmw1(self):
cname = "cet_l_bmw1"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorcet", "cet_l_bmw1.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def cet_l_bmw1_r(self):
cname = "cet_l_bmw1_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorcet", "cet_l_bmw1.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def cet_l_bmy(self):
cname = "cet_l_bmy"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorcet", "cet_l_bmy.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def cet_l_bmy_r(self):
cname = "cet_l_bmy_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorcet", "cet_l_bmy.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def cet_l_bmy1(self):
cname = "cet_l_bmy1"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorcet", "cet_l_bmy1.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def cet_l_bmy1_r(self):
cname = "cet_l_bmy1_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorcet", "cet_l_bmy1.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def cet_l_gow(self):
cname = "cet_l_gow"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorcet", "cet_l_gow.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def cet_l_gow_r(self):
cname = "cet_l_gow_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorcet", "cet_l_gow.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def cet_l_gow1(self):
cname = "cet_l_gow1"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorcet", "cet_l_gow1.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def cet_l_gow1_r(self):
cname = "cet_l_gow1_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorcet", "cet_l_gow1.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def cet_l_green(self):
cname = "cet_l_green"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorcet", "cet_l_green.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def cet_l_green_r(self):
cname = "cet_l_green_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorcet", "cet_l_green.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def cet_l_grey(self):
cname = "cet_l_grey"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorcet", "cet_l_grey.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def cet_l_grey_r(self):
cname = "cet_l_grey_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorcet", "cet_l_grey.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def cet_l_grey1(self):
cname = "cet_l_grey1"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorcet", "cet_l_grey1.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def cet_l_grey1_r(self):
cname = "cet_l_grey1_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorcet", "cet_l_grey1.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, | |
recvbuf=[Segment_X_spread[gtx].mask, MPI.BOOL], op=MPI.LAND)
Distributed_X_spread = None
#-- along-track Y coordinates of segment fit
Segment_Y_atc[gtx] = np.ma.zeros((n_seg),fill_value=fill_value)
Segment_Y_atc[gtx].mask = np.ones((n_seg),dtype=bool)
comm.Allreduce(sendbuf=[Distributed_Y_atc.data, MPI.DOUBLE], \
recvbuf=[Segment_Y_atc[gtx].data, MPI.DOUBLE], op=MPI.SUM)
comm.Allreduce(sendbuf=[Distributed_Y_atc.mask, MPI.BOOL], \
recvbuf=[Segment_Y_atc[gtx].mask, MPI.BOOL], op=MPI.LAND)
Distributed_Y_atc = None
#-- longitude of fit photons
Segment_Longitude[gtx] = np.ma.zeros((n_seg),fill_value=fill_value)
Segment_Longitude[gtx].mask = np.ones((n_seg),dtype=bool)
comm.Allreduce(sendbuf=[Distributed_Longitude.data, MPI.DOUBLE], \
recvbuf=[Segment_Longitude[gtx].data, MPI.DOUBLE], op=MPI.SUM)
comm.Allreduce(sendbuf=[Distributed_Longitude.mask, MPI.BOOL], \
recvbuf=[Segment_Longitude[gtx].mask, MPI.BOOL], op=MPI.LAND)
Distributed_Longitude = None
#-- latitude of fit photons
Segment_Latitude[gtx] = np.ma.zeros((n_seg),fill_value=fill_value)
Segment_Latitude[gtx].mask = np.ones((n_seg),dtype=bool)
comm.Allreduce(sendbuf=[Distributed_Latitude.data, MPI.DOUBLE], \
recvbuf=[Segment_Latitude[gtx].data, MPI.DOUBLE], op=MPI.SUM)
comm.Allreduce(sendbuf=[Distributed_Latitude.mask, MPI.BOOL], \
recvbuf=[Segment_Latitude[gtx].mask, MPI.BOOL], op=MPI.LAND)
Distributed_Latitude = None
#-- number of photons in fit
Segment_N_Fit[gtx] = np.ma.zeros((n_seg),fill_value=-1,dtype=int)
Segment_N_Fit[gtx].mask = np.ones((n_seg),dtype=bool)
comm.Allreduce(sendbuf=[Distributed_N_Fit.data, MPI.INT], \
recvbuf=[Segment_N_Fit[gtx].data, MPI.INT], op=MPI.SUM)
comm.Allreduce(sendbuf=[Distributed_N_Fit.mask, MPI.BOOL], \
recvbuf=[Segment_N_Fit[gtx].mask, MPI.BOOL], op=MPI.LAND)
Distributed_N_Fit = None
#-- size of the window used in the fit
Segment_Window[gtx] = np.ma.zeros((n_seg),fill_value=fill_value)
Segment_Window[gtx].mask = np.ones((n_seg),dtype=bool)
comm.Allreduce(sendbuf=[Distributed_Window.data, MPI.DOUBLE], \
recvbuf=[Segment_Window[gtx].data, MPI.DOUBLE], op=MPI.SUM)
comm.Allreduce(sendbuf=[Distributed_Window.mask, MPI.BOOL], \
recvbuf=[Segment_Window[gtx].mask, MPI.BOOL], op=MPI.LAND)
Distributed_Window = None
#-- robust dispersion estimator
Segment_RDE[gtx] = np.ma.zeros((n_seg),fill_value=fill_value)
Segment_RDE[gtx].mask = np.ones((n_seg),dtype=bool)
comm.Allreduce(sendbuf=[Distributed_RDE.data, MPI.DOUBLE], \
recvbuf=[Segment_RDE[gtx].data, MPI.DOUBLE], op=MPI.SUM)
comm.Allreduce(sendbuf=[Distributed_RDE.mask, MPI.BOOL], \
recvbuf=[Segment_RDE[gtx].mask, MPI.BOOL], op=MPI.LAND)
Distributed_RDE = None
#-- signal-to-noise ratio
Segment_SNR[gtx] = np.ma.zeros((n_seg),fill_value=fill_value)
Segment_SNR[gtx].mask = np.ones((n_seg),dtype=bool)
comm.Allreduce(sendbuf=[Distributed_SNR.data, MPI.DOUBLE], \
recvbuf=[Segment_SNR[gtx].data, MPI.DOUBLE], op=MPI.SUM)
comm.Allreduce(sendbuf=[Distributed_SNR.mask, MPI.BOOL], \
recvbuf=[Segment_SNR[gtx].mask, MPI.BOOL], op=MPI.LAND)
Distributed_SNR = None
#-- photon event signal-to-noise ratio from photon classifier
Segment_Photon_SNR[gtx] = np.ma.zeros((n_seg),fill_value=0,dtype=int)
Segment_Photon_SNR[gtx].mask = np.ones((n_seg),dtype=bool)
comm.Allreduce(sendbuf=[Distributed_Photon_SNR.data, MPI.INT], \
recvbuf=[Segment_Photon_SNR[gtx].data, MPI.INT], op=MPI.SUM)
comm.Allreduce(sendbuf=[Distributed_Photon_SNR.mask, MPI.BOOL], \
recvbuf=[Segment_Photon_SNR[gtx].mask, MPI.BOOL], op=MPI.LAND)
Distributed_Photon_SNR = None
#-- segment quality summary
Segment_Summary[gtx] = np.ma.zeros((n_seg),fill_value=-1,dtype=int)
Segment_Summary[gtx].mask = np.ones((n_seg),dtype=bool)
comm.Allreduce(sendbuf=[Distributed_Summary.data, MPI.INT], \
recvbuf=[Segment_Summary[gtx].data, MPI.INT], op=MPI.SUM)
comm.Allreduce(sendbuf=[Distributed_Summary.mask, MPI.BOOL], \
recvbuf=[Segment_Summary[gtx].mask, MPI.BOOL], op=MPI.LAND)
Distributed_Summary = None
#-- number of iterations for fit
Segment_Iterations[gtx] = np.ma.zeros((n_seg),fill_value=-1,dtype=int)
Segment_Iterations[gtx].mask = np.ones((n_seg),dtype=bool)
comm.Allreduce(sendbuf=[Distributed_Iterations.data, MPI.INT], \
recvbuf=[Segment_Iterations[gtx].data, MPI.INT], op=MPI.SUM)
comm.Allreduce(sendbuf=[Distributed_Iterations.mask, MPI.BOOL], \
recvbuf=[Segment_Iterations[gtx].mask, MPI.BOOL], op=MPI.LAND)
Distributed_Iterations = None
#-- number of photon event clusters
Segment_Clusters[gtx] = np.ma.zeros((n_seg),fill_value=0,dtype=int)
Segment_Clusters[gtx].mask = np.ones((n_seg),dtype=bool)
comm.Allreduce(sendbuf=[Distributed_Clusters.data, MPI.INT], \
recvbuf=[Segment_Clusters[gtx].data, MPI.INT], op=MPI.SUM)
comm.Allreduce(sendbuf=[Distributed_Clusters.mask, MPI.BOOL], \
recvbuf=[Segment_Clusters[gtx].mask, MPI.BOOL], op=MPI.LAND)
Distributed_Clusters = None
#-- signal source selection
Segment_Source[gtx] = np.ma.zeros((n_seg),fill_value=4,dtype=int)
Segment_Source[gtx].mask = np.ones((n_seg),dtype=bool)
comm.Allreduce(sendbuf=[Distributed_Source.data, MPI.INT], \
recvbuf=[Segment_Source[gtx].data, MPI.INT], op=MPI.SUM)
comm.Allreduce(sendbuf=[Distributed_Source.mask, MPI.BOOL], \
recvbuf=[Segment_Source[gtx].mask, MPI.BOOL], op=MPI.LAND)
Distributed_Source = None
#-- number of pulses in segment
Segment_Pulses[gtx] = np.ma.zeros((n_seg),fill_value=-1,dtype=int)
Segment_Pulses[gtx].mask = np.ones((n_seg),dtype=bool)
comm.Allreduce(sendbuf=[Distributed_Pulses.data, MPI.INT], \
recvbuf=[Segment_Pulses[gtx].data, MPI.INT], op=MPI.SUM)
comm.Allreduce(sendbuf=[Distributed_Pulses.mask, MPI.BOOL], \
recvbuf=[Segment_Pulses[gtx].mask, MPI.BOOL], op=MPI.LAND)
Distributed_Pulses = None
#-- first photon bias estimates
FPB_mean_corr[gtx] = np.ma.zeros((n_seg),fill_value=fill_value)
FPB_mean_corr[gtx].mask = np.ones((n_seg),dtype=bool)
comm.Allreduce(sendbuf=[Distributed_FPB_mean_corr.data, MPI.DOUBLE], \
recvbuf=[FPB_mean_corr[gtx].data, MPI.DOUBLE], op=MPI.SUM)
comm.Allreduce(sendbuf=[Distributed_FPB_mean_corr.mask, MPI.BOOL], \
recvbuf=[FPB_mean_corr[gtx].mask, MPI.BOOL], op=MPI.LAND)
Distributed_FPB_mean_corr = None
FPB_mean_sigma[gtx] = np.ma.zeros((n_seg),fill_value=fill_value)
FPB_mean_sigma[gtx].mask = np.ones((n_seg),dtype=bool)
comm.Allreduce(sendbuf=[Distributed_FPB_mean_sigma.data, MPI.DOUBLE], \
recvbuf=[FPB_mean_sigma[gtx].data, MPI.DOUBLE], op=MPI.SUM)
comm.Allreduce(sendbuf=[Distributed_FPB_mean_sigma.mask, MPI.BOOL], \
recvbuf=[FPB_mean_sigma[gtx].mask, MPI.BOOL], op=MPI.LAND)
Distributed_FPB_mean_sigma = None
FPB_median_corr[gtx] = np.ma.zeros((n_seg),fill_value=fill_value)
FPB_median_corr[gtx].mask = np.ones((n_seg),dtype=bool)
comm.Allreduce(sendbuf=[Distributed_FPB_median_corr.data, MPI.DOUBLE], \
recvbuf=[FPB_median_corr[gtx].data, MPI.DOUBLE], op=MPI.SUM)
comm.Allreduce(sendbuf=[Distributed_FPB_median_corr.mask, MPI.BOOL], \
recvbuf=[FPB_median_corr[gtx].mask, MPI.BOOL], op=MPI.LAND)
Distributed_FPB_median_corr = None
FPB_median_sigma[gtx] = np.ma.zeros((n_seg),fill_value=fill_value)
FPB_median_sigma[gtx].mask = np.ones((n_seg),dtype=bool)
comm.Allreduce(sendbuf=[Distributed_FPB_median_sigma.data, MPI.DOUBLE], \
recvbuf=[FPB_median_sigma[gtx].data, MPI.DOUBLE], op=MPI.SUM)
comm.Allreduce(sendbuf=[Distributed_FPB_median_sigma.mask, MPI.BOOL], \
recvbuf=[FPB_median_sigma[gtx].mask, MPI.BOOL], op=MPI.LAND)
Distributed_FPB_median_sigma = None
FPB_n_corr[gtx] = np.ma.zeros((n_seg),fill_value=-1,dtype=int)
FPB_n_corr[gtx].mask = np.ones((n_seg),dtype=bool)
comm.Allreduce(sendbuf=[Distributed_FPB_n_corr.data, MPI.INT], \
recvbuf=[FPB_n_corr[gtx].data, MPI.INT], op=MPI.SUM)
comm.Allreduce(sendbuf=[Distributed_FPB_n_corr.mask, MPI.BOOL], \
recvbuf=[FPB_n_corr[gtx].mask, MPI.BOOL], op=MPI.LAND)
Distributed_FPB_n_corr = None
FPB_cal_corr[gtx] = np.ma.zeros((n_seg),fill_value=fill_value)
FPB_cal_corr[gtx].mask = np.ones((n_seg),dtype=bool)
comm.Allreduce(sendbuf=[Distributed_FPB_cal_corr.data, MPI.DOUBLE], \
recvbuf=[FPB_cal_corr[gtx].data, MPI.DOUBLE], op=MPI.SUM)
comm.Allreduce(sendbuf=[Distributed_FPB_cal_corr.mask, MPI.BOOL], \
recvbuf=[FPB_cal_corr[gtx].mask, MPI.BOOL], op=MPI.LAND)
Distributed_FPB_cal_corr = None
#-- transmit pulse shape bias estimates
TPS_mean_corr[gtx] = np.ma.zeros((n_seg),fill_value=fill_value)
TPS_mean_corr[gtx].mask = np.ones((n_seg),dtype=bool)
comm.Allreduce(sendbuf=[Distributed_TPS_mean_corr.data, MPI.DOUBLE], \
recvbuf=[TPS_mean_corr[gtx].data, MPI.DOUBLE], op=MPI.SUM)
comm.Allreduce(sendbuf=[Distributed_TPS_mean_corr.mask, MPI.BOOL], \
recvbuf=[TPS_mean_corr[gtx].mask, MPI.BOOL], op=MPI.LAND)
Distributed_TPS_mean_corr = None
TPS_median_corr[gtx] = np.ma.zeros((n_seg),fill_value=fill_value)
TPS_median_corr[gtx].mask = np.ones((n_seg),dtype=bool)
comm.Allreduce(sendbuf=[Distributed_TPS_median_corr.data, MPI.DOUBLE], \
recvbuf=[TPS_median_corr[gtx].data, MPI.DOUBLE], op=MPI.SUM)
comm.Allreduce(sendbuf=[Distributed_TPS_median_corr.mask, MPI.BOOL], \
recvbuf=[TPS_median_corr[gtx].mask, MPI.BOOL], op=MPI.LAND)
Distributed_TPS_median_corr = None
#-- wait for all distributed processes to finish for beam
comm.Barrier()
#-- copy variables for outputting to HDF5 file
IS2_atl03_fit = {}
IS2_atl03_fill = {}
IS2_atl03_attrs = {}
#-- ICESat-2 spacecraft orientation at time
IS2_atl03_fit['orbit_info'] = {}
IS2_atl03_attrs['orbit_info'] = {}
for key,val in fileID['orbit_info'].items():
IS2_atl03_fit['orbit_info'][key] = val[:]
#-- Getting attributes of group and included variables
#-- Global Group Attributes
for att_name,att_val in fileID['orbit_info'].attrs.items():
IS2_atl03_attrs['orbit_info'][att_name] = att_val
#-- Variable Attributes
IS2_atl03_attrs['orbit_info'][key] = {}
for att_name,att_val in val.attrs.items():
IS2_atl03_attrs['orbit_info'][key][att_name] = att_val
#-- information ancillary to the data product
#-- number of GPS seconds between the GPS epoch (1980-01-06T00:00:00Z UTC)
#-- and ATLAS Standard Data Product (SDP) epoch (2018-01-01T00:00:00Z UTC)
#-- Add this value to delta time parameters to compute full gps_seconds
#-- could alternatively use the Julian day of the ATLAS SDP epoch: 2458119.5
#-- and add leap seconds since 2018-01-01T00:00:00Z UTC (ATLAS SDP epoch)
IS2_atl03_fit['ancillary_data'] = {}
IS2_atl03_attrs['ancillary_data'] = {}
for key in ['atlas_sdp_gps_epoch','data_end_utc','data_start_utc','end_cycle',
'end_geoseg','end_gpssow','end_gpsweek','end_orbit','end_region',
'end_rgt','granule_end_utc','granule_start_utc','release','start_cycle',
'start_geoseg','start_gpssow','start_gpsweek','start_orbit','start_region',
'start_rgt','version']:
#-- get each HDF5 variable
IS2_atl03_fit['ancillary_data'][key] = fileID['ancillary_data'][key][:]
#-- Getting attributes of group and included variables
IS2_atl03_attrs['ancillary_data'][key] = {}
for att_name,att_val in fileID['ancillary_data'][key].attrs.items():
IS2_atl03_attrs['ancillary_data'][key][att_name] = att_val
#-- for each output beam
for gtx in sorted(IS2_atl03_beams):
#-- atmospheric profile for beam gtx from ATL09 dataset
pfl = fileID[gtx].attrs['atmosphere_profile']
#-- complementary beam in pair
cmp = associated_beam_pair[gtx]
#-- extract and interpolate atmospheric parameters from ATL09
dtime = fileID[gtx]['geolocation']['delta_time'][:]
IS2_atl09_mds,IS2_atl09_attrs = read_HDF5_ATL09(args.ATL09, pfl,
dtime, ATTRIBUTES=True, VERBOSE=args.verbose, COMM=comm)
#-- segment fit across-track slopes
Distributed_dH_across = np.ma.zeros((n_seg),fill_value=fill_value)
Distributed_dH_across.mask = np.ones((n_seg),dtype=bool)
#-- segment fit across-track slope errors
Distributed_dH_across_Error = np.ma.zeros((n_seg),fill_value=fill_value)
Distributed_dH_across_Error.mask = np.ones((n_seg),dtype=bool)
#-- contribution of geolocation uncertainty to height error
Distributed_sigma_geo = np.ma.zeros((n_seg),fill_value=fill_value)
Distributed_sigma_geo.mask = np.ones((n_seg),dtype=bool)
#-- iterate over valid ATL03 segments
#-- in ATL03 1-based indexing: invalid == 0
#-- here in 0-based indexing: invalid == -1
segment_indices, = np.nonzero((Segment_Index_begin[gtx][:-1] >= 0) &
(Segment_Index_begin[gtx][1:] >= 0))
#-- verify that complementary beam pair is in list of beams
iteration_count = len(segment_indices) if (cmp in IS2_atl03_beams) else 0
#-- run for each geoseg (distributed over comm.size # of processes)
for iteration in range(comm.rank, iteration_count, comm.size):
#-- indice for iteration (can run through a subset of segments)
j = segment_indices[iteration]
#-- across track slopes for beam
if ((~Segment_Height[gtx].mask[j]) & (~Segment_Height[cmp].mask[j])):
#-- segment fit across-track slopes
dY = (Segment_Y_atc[gtx].data[j] - Segment_Y_atc[cmp].data[j])
Distributed_dH_across.data[j] = (Segment_Land_Ice[gtx].data[j] -
Segment_Land_Ice[cmp].data[j])/dY
Distributed_dH_across.mask[j] = False
#-- segment fit across-track slope errors
Distributed_dH_across_Error.data[j] = np.sqrt(
Segment_Land_Ice_Error[gtx].data[j]**2 +
Segment_Land_Ice_Error[cmp].data[j]**2)/np.abs(dY)
Distributed_dH_across_Error.mask[j] = False
#-- geolocation uncertainty
sigma_geo_across = fileID[gtx]['geolocation']['sigma_across'][j]
sigma_geo_along = fileID[gtx]['geolocation']['sigma_along'][j]
sigma_geo_h = fileID[gtx]['geolocation']['sigma_h'][j]
#-- contribution of geolocation uncertainty to height errors
Distributed_sigma_geo.data[j] = np.sqrt(sigma_geo_h**2 +
(sigma_geo_along*Segment_dH_along[gtx].data[j])**2 +
(sigma_geo_across*Distributed_dH_across.data[j])**2)
Distributed_sigma_geo.mask[j] = False
#-- segment fit across-track slopes
Segment_dH_across[gtx] = np.ma.zeros((n_seg),fill_value=fill_value)
Segment_dH_across[gtx].mask = np.ones((n_seg),dtype=bool)
comm.Allreduce(sendbuf=[Distributed_dH_across.data, MPI.DOUBLE], \
recvbuf=[Segment_dH_across[gtx].data, MPI.DOUBLE], op=MPI.SUM)
comm.Allreduce(sendbuf=[Distributed_dH_across.mask, MPI.BOOL], \
recvbuf=[Segment_dH_across[gtx].mask, MPI.BOOL], op=MPI.LAND)
Distributed_dH_across = None
#-- segment fit across-track slope errors
Segment_dH_across_Error[gtx] = np.ma.zeros((n_seg),fill_value=fill_value)
Segment_dH_across_Error[gtx].mask = np.ones((n_seg),dtype=bool)
comm.Allreduce(sendbuf=[Distributed_dH_across_Error.data, MPI.DOUBLE], \
recvbuf=[Segment_dH_across_Error[gtx].data, MPI.DOUBLE], op=MPI.SUM)
comm.Allreduce(sendbuf=[Distributed_dH_across_Error.mask, MPI.BOOL], \
recvbuf=[Segment_dH_across_Error[gtx].mask, MPI.BOOL], op=MPI.LAND)
Distributed_dH_across_Error = None
#-- contribution of geolocation uncertainty to height errors
Segment_sigma_geo[gtx] = np.ma.zeros((n_seg),fill_value=fill_value)
Segment_sigma_geo[gtx].mask = np.ones((n_seg),dtype=bool)
comm.Allreduce(sendbuf=[Distributed_sigma_geo.data, MPI.DOUBLE], \
recvbuf=[Segment_sigma_geo[gtx].data, MPI.DOUBLE], op=MPI.SUM)
comm.Allreduce(sendbuf=[Distributed_sigma_geo.mask, MPI.BOOL], \
recvbuf=[Segment_sigma_geo[gtx].mask, MPI.BOOL], op=MPI.LAND)
Distributed_sigma_geo = None
#-- wait for all distributed processes to finish for beam
comm.Barrier()
#-- set values for invalid segments to fill_value of each variable
Segment_delta_time[gtx].data[Segment_delta_time[gtx].mask] = Segment_delta_time[gtx].fill_value
Segment_Height[gtx].data[Segment_Height[gtx].mask] = Segment_Height[gtx].fill_value
Segment_Land_Ice[gtx].data[Segment_Land_Ice[gtx].mask] = Segment_Land_Ice[gtx].fill_value
Segment_dH_along[gtx].data[Segment_dH_along[gtx].mask] = Segment_dH_along[gtx].fill_value
Segment_dH_across[gtx].data[Segment_dH_across[gtx].mask] = Segment_dH_across[gtx].fill_value
Segment_Height_Error[gtx].data[Segment_Height_Error[gtx].mask] = Segment_Height_Error[gtx].fill_value
Segment_Land_Ice_Error[gtx].data[Segment_Land_Ice_Error[gtx].mask] = Segment_Land_Ice_Error[gtx].fill_value
Segment_dH_along_Error[gtx].data[Segment_dH_along_Error[gtx].mask] = Segment_dH_along_Error[gtx].fill_value
Segment_dH_across_Error[gtx].data[Segment_dH_across_Error[gtx].mask] = Segment_dH_across_Error[gtx].fill_value
Segment_Mean_Median[gtx].data[Segment_Mean_Median[gtx].mask] = Segment_Mean_Median[gtx].fill_value
Segment_X_atc[gtx].data[Segment_X_atc[gtx].mask] = Segment_X_atc[gtx].fill_value
Segment_X_spread[gtx].data[Segment_X_spread[gtx].mask] = Segment_X_spread[gtx].fill_value
Segment_Y_atc[gtx].data[Segment_Y_atc[gtx].mask] = Segment_Y_atc[gtx].fill_value
Segment_sigma_geo[gtx].data[Segment_sigma_geo[gtx].mask] = Segment_sigma_geo[gtx].fill_value
Segment_Longitude[gtx].data[Segment_Longitude[gtx].mask] = Segment_Longitude[gtx].fill_value
Segment_Latitude[gtx].data[Segment_Latitude[gtx].mask] = Segment_Latitude[gtx].fill_value
Segment_N_Fit[gtx].data[Segment_N_Fit[gtx].mask] = Segment_N_Fit[gtx].fill_value
Segment_Window[gtx].data[Segment_Window[gtx].mask] = Segment_Window[gtx].fill_value
Segment_RDE[gtx].data[Segment_RDE[gtx].mask] = Segment_RDE[gtx].fill_value
Segment_SNR[gtx].data[Segment_SNR[gtx].mask] = Segment_SNR[gtx].fill_value
Segment_Summary[gtx].data[Segment_Summary[gtx].mask] = Segment_Summary[gtx].fill_value
Segment_Iterations[gtx].data[Segment_Iterations[gtx].mask] = Segment_Iterations[gtx].fill_value
Segment_Source[gtx].data[Segment_Source[gtx].mask] = Segment_Source[gtx].fill_value
Segment_Pulses[gtx].data[Segment_Pulses[gtx].mask] = Segment_Pulses[gtx].fill_value
FPB_mean_corr[gtx].data[FPB_mean_corr[gtx].mask] = FPB_mean_corr[gtx].fill_value
FPB_mean_sigma[gtx].data[FPB_mean_sigma[gtx].mask] = FPB_mean_sigma[gtx].fill_value
FPB_median_corr[gtx].data[FPB_median_corr[gtx].mask] = FPB_median_corr[gtx].fill_value
FPB_median_sigma[gtx].data[FPB_median_sigma[gtx].mask] = FPB_median_sigma[gtx].fill_value
FPB_n_corr[gtx].data[FPB_n_corr[gtx].mask] = FPB_n_corr[gtx].fill_value
FPB_cal_corr[gtx].data[FPB_cal_corr[gtx].mask] = FPB_cal_corr[gtx].fill_value
TPS_mean_corr[gtx].data[TPS_mean_corr[gtx].mask] = TPS_mean_corr[gtx].fill_value
TPS_median_corr[gtx].data[TPS_median_corr[gtx].mask] = TPS_median_corr[gtx].fill_value
#-- save tep and dead time information and statistics
IS2_atl03_fit['ancillary_data'][gtx] = {}
IS2_atl03_attrs['ancillary_data'][gtx] = {}
#-- tep time of day
IS2_atl03_fit['ancillary_data'][gtx]['tep_tod'] = np.array(tep[gtx]['tep_tod'])
IS2_atl03_attrs['ancillary_data'][gtx]['tep_tod'] = {}
IS2_atl03_attrs['ancillary_data'][gtx]['tep_tod']['units'] = "seconds since 2018-01-01"
IS2_atl03_attrs['ancillary_data'][gtx]['tep_tod']['long_name'] = "TEP Time Of Day"
IS2_atl03_attrs['ancillary_data'][gtx]['tep_tod']['standard_name'] = "time"
IS2_atl03_attrs['ancillary_data'][gtx]['tep_tod']['source'] = tep[gtx]['pce']
IS2_atl03_attrs['ancillary_data'][gtx]['tep_tod']['description'] = ("The time of day "
"at of the start of the data within the TEP histogram, in seconds since the "
"ATLAS SDP GPS Epoch. The ATLAS Standard Data Products (SDP) epoch offset is "
"defined within /ancillary_data/atlas_sdp_gps_epoch as the number of GPS seconds "
"between the GPS epoch (1980-01-06T00:00:00.000000Z UTC) and the ATLAS SDP epoch. "
"By adding the offset contained within atlas_sdp_gps_epoch to delta time "
"parameters, the time in gps_seconds relative to the GPS epoch can be computed.")
| |
the neighbor allreduce precision (sum) 1D, 2D, 3D tensors correctly."""
size = bf.size()
rank = bf.rank()
if size <= 1:
fname = inspect.currentframe().f_code.co_name
warnings.warn("Skip {} due to size 1".format(fname))
return
dtypes = [torch.DoubleTensor]
if TEST_ON_GPU:
dtypes += [torch.cuda.DoubleTensor]
# By default, we use exponential two ring topology.
num_indegree = int(np.ceil(np.log2(size)))
neighbor_ranks = [(rank - 2**i) % size for i in range(num_indegree)]
sum_value = np.sum(neighbor_ranks) + rank
sum_value = (len(neighbor_ranks)+1)*(2**-256)
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
tensor = torch.DoubleTensor(*([23] * dim)).fill_(1).mul_(2**-256)
tensor = self.cast_and_place(tensor, dtype)
name = "neighbor_allreduce_{}_{}".format(dim, dtype)
nw = {i: 1.0 for i in neighbor_ranks}
reduced_tensor = bf.neighbor_allreduce(tensor, self_weight=1.0,
src_weights=nw, name=name)
assert (
list(reduced_tensor.shape) == [23] * dim
), "bf.neighbor_allreduce (avg) produces incorrect reduced shape"
assert (
(reduced_tensor.data - sum_value).abs().max() == 0
), "bf.neighbor_allreduce (avg) produces incorrect reduced tensor"
def test_neighbor_allreduce_avg_precision(self):
"""Test that the neighbor allreduce precision (avg) 1D, 2D, 3D tensors correctly."""
size = bf.size()
rank = bf.rank()
if size <= 1:
fname = inspect.currentframe().f_code.co_name
warnings.warn("Skip {} due to size 1".format(fname))
return
dtypes = [torch.DoubleTensor]
if TEST_ON_GPU:
dtypes += [torch.cuda.DoubleTensor]
# By default, we use exponential two ring topology.
num_indegree = int(np.ceil(np.log2(size)))
neighbor_ranks = [(rank - 2**i) % size for i in range(num_indegree)]
sum_value = np.sum(neighbor_ranks) + rank
sum_value = 2**-256
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
tensor = torch.DoubleTensor(*([23] * dim)).fill_(1).mul_(2**-256)
tensor = self.cast_and_place(tensor, dtype)
name = "neighbor_allreduce_{}_{}".format(dim, dtype)
reduced_tensor = bf.neighbor_allreduce(tensor, name=name)
assert (
list(reduced_tensor.shape) == [23] * dim
), "bf.neighbor_allreduce (avg) produces incorrect reduced shape"
assert (
(reduced_tensor.data - sum_value).abs().max() == 0
), "bf.neighbor_allreduce (avg) produces incorrect reduced tensor"
def test_neighbor_allreduce_dynamic_topo_check(self):
"""Test that the neighbor all reduce (avg) 1D, 2D, 3D tensors correctly."""
size = bf.size()
rank = bf.rank()
if size <= 1:
fname = inspect.currentframe().f_code.co_name
warnings.warn("Skip {} due to size 1".format(fname))
return
dtypes = [torch.HalfTensor, torch.FloatTensor, torch.DoubleTensor]
if TEST_ON_GPU:
dtypes += [torch.cuda.FloatTensor, torch.cuda.DoubleTensor]
# By default, we use exponential two ring topology.
self_weight = 0.0
neighbor_weights = {(rank-1) % size : 1.0}
send_ranks = [(rank + 2) % size]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
tensor = torch.FloatTensor(*([23] * dim)).fill_(1).mul_(rank)
tensor = self.cast_and_place(tensor, dtype)
name = "neighbor_allreduce_{}_{}".format(dim, dtype)
with pytest.raises(ValueError):
bf.neighbor_allreduce(tensor, name=name, self_weight=self_weight,
src_weights=neighbor_weights, dst_weights=send_ranks)
def test_neighbor_allreduce_dynamic_topo_outside_static_topo_move(self):
"""Test that the neighbor all reduce (move) 1D, 2D, 3D tensors correctly
outside the static topology."""
size = bf.size()
rank = bf.rank()
if size <= 1:
fname = inspect.currentframe().f_code.co_name
warnings.warn("Skip {} due to size 1".format(fname))
return
dtypes = [torch.HalfTensor, torch.FloatTensor, torch.DoubleTensor]
if TEST_ON_GPU:
dtypes += [torch.cuda.FloatTensor, torch.cuda.DoubleTensor]
# By default, we use power two ring topology.
self_weight = 0.0
neighbor_weights = {(rank+1) % size : 1.0}
send_ranks = [(rank - 1) % size]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
tensor = torch.FloatTensor(*([23] * dim)).fill_(1).mul_(rank)
tensor = self.cast_and_place(tensor, dtype)
name = "neighbor_allreduce_{}_{}".format(dim, dtype)
reduced_tensor = bf.neighbor_allreduce(
tensor, name=name, self_weight=self_weight,
src_weights=neighbor_weights, dst_weights=send_ranks)
eps = EPSILON if tensor.dtype != torch.float16 else LOOSE_EPSILON
tensor, reduced_tensor = self.convert_cpu_fp16_to_fp32(tensor, reduced_tensor)
assert (
list(reduced_tensor.shape) == [23] * dim
), "bf.neighbor_allreduce (move) produces incorrect reduced shape"
assert (
(reduced_tensor.data - (rank+1) % size).abs().max() < eps
), "bf.neighbor_allreduce (move) produces incorrect reduced tensor"
def test_neighbor_allreduce_dynamic_topo_move(self):
"""Test that the neighbor all reduce (move) 1D, 2D, 3D tensors correctly."""
size = bf.size()
rank = bf.rank()
if size <= 1:
fname = inspect.currentframe().f_code.co_name
warnings.warn("Skip {} due to size 1".format(fname))
return
dtypes = [torch.HalfTensor, torch.FloatTensor, torch.DoubleTensor]
if TEST_ON_GPU:
dtypes += [torch.cuda.FloatTensor, torch.cuda.DoubleTensor]
# By default, we use exponential two ring topology.
self_weight = 0.0
neighbor_weights = {(rank-1) % size : 1.0}
send_ranks = [(rank + 1) % size]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
tensor = torch.FloatTensor(*([23] * dim)).fill_(1).mul_(rank)
tensor = self.cast_and_place(tensor, dtype)
name = "neighbor_allreduce_{}_{}".format(dim, dtype)
reduced_tensor = bf.neighbor_allreduce(
tensor, name=name, self_weight=self_weight,
src_weights=neighbor_weights, dst_weights=send_ranks)
eps = EPSILON if tensor.dtype != torch.float16 else LOOSE_EPSILON
tensor, reduced_tensor = self.convert_cpu_fp16_to_fp32(tensor, reduced_tensor)
assert (
list(reduced_tensor.shape) == [23] * dim
), "bf.neighbor_allreduce (move) produces incorrect reduced shape"
assert (
(reduced_tensor.data - (rank-1) % size).abs().max() < eps
), "bf.neighbor_allreduce (move) produces incorrect reduced tensor"
<EMAIL>("Haven't fully clear on the usage due to sync issues. Temporarily disabled")
def test_neighbor_allreduce_dynamic_topo_with_empty_send_neighbors(self):
"""Test that the neighbor all reduce (avg) 1D, 2D, 3D tensors correctly with empty
send_neighbors."""
size = bf.size()
rank = bf.rank()
if size % 2 == 1:
fname = inspect.currentframe().f_code.co_name
warnings.warn("Skip {} due to odd size".format(fname))
return
dtypes = [torch.HalfTensor, torch.FloatTensor, torch.DoubleTensor]
if TEST_ON_GPU:
dtypes += [torch.cuda.FloatTensor, torch.cuda.DoubleTensor]
# By default, we use power two ring topology.
self_weight = 1.0
if rank % 2 == 0:
neighbor_weights = {}
send_ranks = [rank+1]
expected_value = rank
else:
neighbor_weights = {rank-1: 1.0}
send_ranks = []
expected_value = rank*2-1
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
tensor = torch.FloatTensor(*([23] * dim)).fill_(1).mul_(rank)
tensor = self.cast_and_place(tensor, dtype)
name = "neighbor_allreduce_{}_{}".format(dim, dtype)
reduced_tensor = bf.neighbor_allreduce(
tensor, name=name, self_weight=self_weight,
src_weights=neighbor_weights, dst_weights=send_ranks)
eps = EPSILON if tensor.dtype != torch.float16 else LOOSE_EPSILON
tensor, reduced_tensor = self.convert_cpu_fp16_to_fp32(tensor, reduced_tensor)
assert (
list(reduced_tensor.shape) == [23] * dim
), "bf.neighbor_allreduce (avg) produces incorrect reduced shape"
assert (
(reduced_tensor.data - expected_value).abs().max() < eps
), "bf.neighbor_allreduce (avg) produces incorrect reduced tensor"
def test_neighbor_allreduce_dynamic_topo_avg(self):
"""Test that the neighbor all reduce (avg) 1D, 2D, 3D tensors correctly."""
size = bf.size()
rank = bf.rank()
if size <= 1:
fname = inspect.currentframe().f_code.co_name
warnings.warn("Skip {} due to size 1".format(fname))
return
dtypes = [torch.HalfTensor, torch.FloatTensor, torch.DoubleTensor]
if TEST_ON_GPU:
dtypes += [torch.cuda.FloatTensor, torch.cuda.DoubleTensor]
# By default, we use exponential two ring topology.
num_indegree = int(np.ceil(np.log2(size)))
neighbor_ranks = [(rank - 2**i) % size for i in range(num_indegree)]
sum_value = np.sum(neighbor_ranks) + rank
self_weight = 1/(num_indegree+1)
neighbor_weights = {i: self_weight for i in neighbor_ranks}
send_ranks = [(rank + 2**i) % size for i in range(num_indegree)]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
tensor = torch.FloatTensor(*([23] * dim)).fill_(1).mul_(rank)
tensor = self.cast_and_place(tensor, dtype)
name = "neighbor_allreduce_{}_{}".format(dim, dtype)
reduced_tensor = bf.neighbor_allreduce(
tensor, name=name, self_weight=self_weight,
src_weights=neighbor_weights, dst_weights=send_ranks)
eps = EPSILON if tensor.dtype != torch.float16 else LOOSE_EPSILON
tensor, reduced_tensor = self.convert_cpu_fp16_to_fp32(tensor, reduced_tensor)
assert (
list(reduced_tensor.shape) == [23] * dim
), "bf.neighbor_allreduce (avg) produces incorrect reduced shape"
assert (
(reduced_tensor.data.mul_(num_indegree+1) -
sum_value).abs().max() < eps
), "bf.neighbor_allreduce (avg) produces incorrect reduced tensor"
def test_neighbor_allreduce_avg(self):
"""Test that the neighbor all reduce (avg) 1D, 2D, 3D tensors correctly."""
size = bf.size()
rank = bf.rank()
if size <= 1:
fname = inspect.currentframe().f_code.co_name
warnings.warn("Skip {} due to size 1".format(fname))
return
dtypes = [torch.HalfTensor, torch.FloatTensor, torch.DoubleTensor]
if TEST_ON_GPU:
dtypes += [torch.cuda.FloatTensor, torch.cuda.DoubleTensor]
# By default, we use exponential two ring topology.
num_indegree = int(np.ceil(np.log2(size)))
neighbor_ranks = [(rank - 2**i) % size for i in range(num_indegree)]
sum_value = np.sum(neighbor_ranks) + rank
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
tensor = torch.FloatTensor(*([23] * dim)).fill_(1).mul_(rank)
tensor = self.cast_and_place(tensor, dtype)
name = "neighbor_allreduce_{}_{}".format(dim, dtype)
reduced_tensor = bf.neighbor_allreduce(tensor, name=name)
eps = EPSILON if tensor.dtype != torch.float16 else LOOSE_EPSILON
tensor, reduced_tensor = self.convert_cpu_fp16_to_fp32(tensor, reduced_tensor)
assert (
list(reduced_tensor.shape) == [23] * dim
), "bf.neighbor_allreduce (avg) produces incorrect reduced shape"
assert (
(reduced_tensor.data.mul_(num_indegree+1) -
sum_value).abs().max() < eps
), "bf.neighbor_allreduce (avg) produces incorrect reduced tensor"
def test_neighbor_allreduce_avg_meshgrid_topo(self):
"""
Test that the neighbor all reduce (avg) 1D, 2D, 3D tensors
correctly in a 2D meshgrid topology.
"""
size = bf.size()
rank = bf.rank()
if size <= 1:
fname = inspect.currentframe().f_code.co_name
warnings.warn("Skip {} due to size 1".format(fname))
return
dtypes = [torch.HalfTensor, torch.FloatTensor, torch.DoubleTensor]
if TEST_ON_GPU:
dtypes += [torch.cuda.FloatTensor, torch.cuda.DoubleTensor]
is_set = bf.set_topology(topology_util.MeshGrid2DGraph(size))
assert is_set, "Topology set failed."
topology = bf.load_topology()
neighbor_array_with_self = np.nonzero(
nx.to_numpy_matrix(topology)[rank])[1]
num_indegree = len(neighbor_array_with_self)-1
sum_value = neighbor_array_with_self.sum()
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
tensor = torch.FloatTensor(*([23] * dim)).fill_(1).mul_(rank)
tensor = self.cast_and_place(tensor, dtype)
reduced_tensor = bf.neighbor_allreduce(tensor)
eps = EPSILON if tensor.dtype != torch.float16 else LOOSE_EPSILON
tensor, reduced_tensor = self.convert_cpu_fp16_to_fp32(tensor, reduced_tensor)
assert (
list(reduced_tensor.shape) == [23] * dim
), "bf.neighbor_allreduce (avg) produces incorrect reduced shape"
assert (
(reduced_tensor.data.mul_(num_indegree+1) -
sum_value).abs().max() | |
<reponame>yubocai-poly/CSE102
#! /usr/bin/env python3
# --------------------------------------------------------------------
import math
# --------------------------------------------------------------------
class Node:
def __init__(self, value, left=None, right=None):
self.value = value
self.left = left
self.right = right
# --------------------------------------------------------------------
T1 = Node(0, Node(1, Node(2), Node(3)), Node(4, Node(5)))
T2 = Node(0, Node(1), Node(2, Node(3), Node(4, Node(5))))
T3 = Node(11, Node(10, Node(9, None, Node(8)), Node(7)), Node(6))
# --------------------------------------------------------------------
def size(node):
# We solve this problem recursively
#
# - If node = None -> we have zero nodes
#
# - Otherwise, we (recursively) count the number of nodes in
# resp. the left and right children and return their sum plus 1
# (for counting the actual node)
if node is None:
return 0
return 1 + size(node.left) + size(node.right)
# --------------------------------------------------------------------
def sum_values(node):
# We solve this problem recursively
#
# - If node = None -> we return 0 (the neutral for additiion)
#
# - Otherwise, we (recursively) sum the values of nodes in
# resp. the left and right children and return their sum plus
# the value of the actual node.
#
# Note that by defining `sum_values(None)` as 0, we do not have to
# check that `node.left` or `node.right` is not `None.
if node is None:
return 0
return node.value + sum_values(node.left) + sum_values(node.right)
# --------------------------------------------------------------------
def accumulate(node, f, acc):
# We want to *fold* f on the values of `node`, starting at `acc`.
#
# I.e. if `node` contains the values `x0`, `x1`, ..., `xn`,
# we want to compute:
#
# f(xn, f(x_{n-1}, ..., f(f1, f(x0, acc)) ...))
#
# Note that the result may depend on the application
# order. However, for associative-commutative operators, the
# application order is irrelevant.
# If `root` is `None`, we are done and we return the accumulator
# value.
if node is None:
return acc
# Otherwise
acc = accumulate(node.left, f, acc) # Fold `f` on the left sub-tree
acc = accumulate(node.right, f, acc) # Fold `f` on the right sub-tree
acc = f(node.value, acc) # Apply `f` to the current node
return acc
# --------------------------------------------------------------------
def _second_min(node):
# We write an auxiliary function that returns a pair that contains
# the two smallest values of the tree rooted at `node` - with
# duplicates, the smallest element first.
if node is None:
# For the empty tree, we simply return `(math.inf, math.inf)`
# We do not choose this at random: `math.inf` is a neutral
# element for `min`.
return (math.inf, math.inf)
# We obtain the two smallest elements of the left subtree...
minl, sminl = _second_min(node.left)
# ...and do the same for the right subtree.
minr, sminr = _second_min(node.right)
# Finally, the two smallest values of the tree rooted at `node`
# are the two smallest values among `minl`, `sminl`, `minr`,
# `sminr` and the actual node value `node.value`.
#
# We simply create a list that contains this 5 elements, sort it
# and take the two first values.
mins = sorted([minl, sminl, node.value, minr, sminr])
return (mins[0], mins[1])
def second_min(root):
return _second_min(root)[1]
# --------------------------------------------------------------------
def height(node):
# We solve this problem by a direct reduction.
# By definition, the height of the empty tree is None
if node is None:
return -1
# Now, if the root is not `None`, we recursively compute the
# height of the left and right subtrees. Now, from the definition
# of height, the height of tree rooted at `node`, if 1 plus the
# maximum of the height of its subtrees.
#
# Note that by choosing `-1` for the height of the empty subtree,
# we do not have to check that wether `node.left` or `node.right`
# is `None`.
return 1 + max(height(node.left), height(node.right))
# --------------------------------------------------------------------
def mirrored(lnode, rnode):
# We are give a recursive definition of being mirror trees:
#
# `lnode` is a mirror tree or `rnode` iff both are empty or i)
# they have equal values at the root, ii) the left subtree of
# `lnode` is a mirror of the right subtree of `rnode`, and iii)
# the right subtree of `lnode` is a mirror of the left subtree of
# `rnode`.
#
# It remains to implement that
# First, we count the number of `None` parameters.
#
# Note that int(True) = 1 and int(False) = 0
cnull = (lnode is None) + (rnode is None)
if cnull > 0:
# If at least 1 of the parameters if `None`, then `lnode` is a
# mirror of `rnode` iff both are empty (i.e. equal to `None`)
return cnull == 2
# Otherwise, both are non empty. We just check that they have the
# same root value and that their left-right and right-left
# children are mirror trees.
return \
lnode.value == rnode.value and \
mirrored(lnode.left , rnode.right) and \
mirrored(lnode.right, rnode.left )
# --------------------------------------------------------------------
def check_symmetry(root):
# A tree is symmetric if it is its own mirror image.
return mirrored(root, root)
# --------------------------------------------------------------------
def check_BST(node, lmin=-math.inf, lmax=math.inf):
# We again solve this function recursively. We added two extra
# arguments and the function is s.t.
#
# - the tree rooted at `node` is a BST
#
# - all the values in the tree rooted at `node` are in the
# range `lmin`..`lmax`.
#
# When `lmin` (resp. `lmax`) is equal to `-math.inf`
# (resp. `math.inf`) -- i.e. their default values -- this amounts
# to check that `node` is a BST.
if node is None:
# The empty tree is a BST and validate the extra restrictions.
return True
# Otherwise, we check that `node.value` is in the range
# `lmin`..`lmax` and that `node.left` and `node.right` are
# BST. However, we cannot keep the range `lmin`..`lmax`
# here. Indeed, being a BST is not *compositional*, `node.left`
# and `node.right` could be BST while `node` is not.
#
# If we want `node` to be a BST, we need that:
# i) `node.left` and `node.right` are BST,
# ii) all the values in `node.left` are smaller than `node.value`,
# iii) all the values in `node.right` are greater than `node.value`.
#
# This is why we check that `node.left` (resp. `node.right`) is a
# BST in the range `lmin`..`node.value`
# (resp. `node.value`..`lmax`).
return \
(lmin <= node.value <= lmax) and \
check_BST(node.left , lmin, node.value) and \
check_BST(node.right, node.value, lmax)
# Mini-puzzle: Observe that this version of check_BST(root) will
# make up to 2*n comparisons to check that a tree with n nodes is
# a BST. Can you modify check_BST to use fewer comparisons? What
# is the minimum number needed?
# --------------------------------------------------------------------
def min_BST(node, acc=math.inf):
# By definition, the smallest value of a BST is the value attached
# to its left-mode node. here, we implement the function
# recursively. The second argument is the smallest element we have
# seen so far.
return acc if node is None else min_BST(node.left, node.value)
# --------------------------------------------------------------------
def _min_diff(node):
# Here again, we write an auxiliary function that returns a
# triplet containing:
#
# - the smallest value of the absolute difference between the
# values in different nodes of the tree rooted at `node`,
#
# - the smallest value among the nodes of the tree rooted at
# `node` (or `None` if `node` is empty),
#
# - the greatest value among the nodes of the tree rooted at
# `node` (or `None` if `node` is empty).
if node is None:
# For the empty tree, we return `math.inf` since `math.inf` is
# a neutral element for `min` (given that we are computing a
# minimum).
return (math.inf, None, None)
# We call the function recursively on the left and right subtrees
mdl, minl, maxl = _min_diff(node.left)
mdr, minr, maxr = _min_diff(node.right)
# Now, since we have a BST:
#
# - `node.value` is lower than any value in `node` right
# subtree. Hence, the (absolute) difference between
# `node.value` and one value of its | |
<gh_stars>0
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import options, utils
from fairseq.modules import (
AdaptiveInput, AdaptiveSoftmax, CharacterTokenEmbedder, LayerNorm,
LearnedPositionalEmbedding, MultiheadAttention, SinusoidalPositionalEmbedding,
RelativeMultiheadAttention,
)
from . import (
FairseqIncrementalDecoder, FairseqEncoder, FairseqLanguageModel,
FairseqModel, register_model, register_model_architecture,
)
@register_model('utransformer')
class STransformerModel(FairseqModel):
"""
Transformer model from `"Attention Is All You Need" (Vaswani, et al, 2017)
<https://arxiv.org/abs/1706.03762>`_.
Args:
encoder (TransformerEncoder): the encoder
decoder (TransformerDecoder): the decoder
The Transformer model provides the following named architectures and
command-line arguments:
.. argparse::
:ref: fairseq.models.transformer_parser
:prog:
"""
def __init__(self, encoder, decoder):
super().__init__(encoder, decoder)
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
# fmt: off
parser.add_argument('--dropout', type=float, metavar='D',
help='dropout probability')
parser.add_argument('--attention-dropout', type=float, metavar='D',
help='dropout probability for attention weights')
parser.add_argument('--relu-dropout', type=float, metavar='D',
help='dropout probability after ReLU in FFN')
parser.add_argument('--encoder-embed-path', type=str, metavar='STR',
help='path to pre-trained encoder embedding')
parser.add_argument('--encoder-embed-dim', type=int, metavar='N',
help='encoder embedding dimension')
parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N',
help='encoder embedding dimension for FFN')
parser.add_argument('--encoder-layers', type=int, metavar='N',
help='num encoder layers')
parser.add_argument('--encoder-attention-heads', type=int, metavar='N',
help='num encoder attention heads')
parser.add_argument('--encoder-normalize-before', action='store_true',
help='apply layernorm before each encoder block')
parser.add_argument('--encoder-learned-pos', action='store_true',
help='use learned positional embeddings in the encoder')
parser.add_argument('--decoder-embed-path', type=str, metavar='STR',
help='path to pre-trained decoder embedding')
parser.add_argument('--decoder-embed-dim', type=int, metavar='N',
help='decoder embedding dimension')
parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N',
help='decoder embedding dimension for FFN')
parser.add_argument('--decoder-layers', type=int, metavar='N',
help='num decoder layers')
parser.add_argument('--decoder-attention-heads', type=int, metavar='N',
help='num decoder attention heads')
parser.add_argument('--decoder-learned-pos', action='store_true',
help='use learned positional embeddings in the decoder')
parser.add_argument('--decoder-normalize-before', action='store_true',
help='apply layernorm before each decoder block')
parser.add_argument('--share-decoder-input-output-embed', action='store_true',
help='share decoder input and output embeddings')
parser.add_argument('--share-all-embeddings', action='store_true',
help='share encoder, decoder and output embeddings'
' (requires shared dictionary and embed dim)')
parser.add_argument('--no-token-positional-embeddings', default=False, action='store_true',
help='if set, disables positional embeddings (outside self attention)')
parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR',
help='comma separated list of adaptive softmax cutoff points. '
'Must be used with adaptive_loss criterion'),
parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D',
help='sets adaptive softmax dropout for the tail projections')
parser.add_argument('--max-relative-length', type=int, default=-1,
help='the max relative length')
parser.add_argument('--k-only', default=False, action='store_true',
help='select the relative mode to map relative position information')
# fmt: on
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure all arguments are present in older models
ubase_architecture(args)
if not hasattr(args, 'max_source_positions'):
args.max_source_positions = 1024
if not hasattr(args, 'max_target_positions'):
args.max_target_positions = 1024
src_dict, tgt_dict = task.source_dictionary, task.target_dictionary
def build_embedding(dictionary, embed_dim, path=None):
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
emb = Embedding(num_embeddings, embed_dim, padding_idx)
# if provided, load from preloaded dictionaries
if path:
embed_dict = utils.parse_embedding(path)
utils.load_embedding(embed_dict, dictionary, emb)
return emb
if args.share_all_embeddings:
if src_dict != tgt_dict:
raise ValueError('--share-all-embeddings requires a joined dictionary')
if args.encoder_embed_dim != args.decoder_embed_dim:
raise ValueError(
'--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim')
if args.decoder_embed_path and (
args.decoder_embed_path != args.encoder_embed_path):
raise ValueError('--share-all-embeddings not compatible with --decoder-embed-path')
encoder_embed_tokens = build_embedding(
src_dict, args.encoder_embed_dim, args.encoder_embed_path
)
decoder_embed_tokens = encoder_embed_tokens
args.share_decoder_input_output_embed = True
else:
encoder_embed_tokens = build_embedding(
src_dict, args.encoder_embed_dim, args.encoder_embed_path
)
decoder_embed_tokens = build_embedding(
tgt_dict, args.decoder_embed_dim, args.decoder_embed_path
)
encoder = UTransformerEncoder(args, src_dict, encoder_embed_tokens)
decoder = UTransformerDecoder(args, tgt_dict, decoder_embed_tokens)
return STransformerModel(encoder, decoder)
class UTransformerEncoder(FairseqEncoder):
"""
Transformer encoder consisting of *args.encoder_layers* layers. Each layer
is a :class:`TransformerEncoderLayer`.
Args:
args (argparse.Namespace): parsed command-line arguments
dictionary (~fairseq.data.Dictionary): encoding dictionary
embed_tokens (torch.nn.Embedding): input embedding
left_pad (bool, optional): whether the input is left-padded
(default: True).
"""
def __init__(self, args, dictionary, embed_tokens, left_pad=True):
super().__init__(dictionary)
self.dropout = args.dropout
embed_dim = embed_tokens.embedding_dim
self.padding_idx = embed_tokens.padding_idx
self.max_source_positions = args.max_source_positions
self.embed_tokens = embed_tokens
self.embed_scale = math.sqrt(embed_dim)
self.embed_positions = PositionalEmbedding(
args.max_source_positions, embed_dim, self.padding_idx,
left_pad=left_pad,
learned=args.encoder_learned_pos,
) if not args.no_token_positional_embeddings else None
self.layers = nn.ModuleList([])
self.layers.extend([
UTransformerEncoderLayer(args)
for i in range(args.encoder_layers)
])
self.register_buffer('version', torch.Tensor([2]))
self.normalize = args.encoder_normalize_before
if self.normalize:
self.layer_norm = LayerNorm(embed_dim)
def forward(self, src_tokens, src_lengths):
"""
Args:
src_tokens (LongTensor): tokens in the source language of shape
`(batch, src_len)`
src_lengths (torch.LongTensor): lengths of each source sentence of
shape `(batch)`
Returns:
dict:
- **encoder_out** (Tensor): the last encoder layer's output of
shape `(src_len, batch, embed_dim)`
- **encoder_padding_mask** (ByteTensor): the positions of
padding elements of shape `(batch, src_len)`
"""
# embed tokens and positions
x = self.embed_scale * self.embed_tokens(src_tokens)
if self.embed_positions is not None:
x += self.embed_positions(src_tokens)
x = F.dropout(x, p=self.dropout, training=self.training)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
# compute padding mask
encoder_padding_mask = src_tokens.eq(self.padding_idx)
if not encoder_padding_mask.any():
encoder_padding_mask = None
encoder_out_list = []
# encoder layers
for layer in self.layers:
x = layer(x, encoder_padding_mask)
encoder_out_list.append(x)
if self.normalize:
x = self.layer_norm(x)
# encoder_out_list.reverse()
return {
'encoder_out': encoder_out_list, # T x B x C
'encoder_padding_mask': encoder_padding_mask, # B x T
}
def reorder_encoder_out(self, encoder_out, new_order):
"""
Reorder encoder output according to *new_order*.
Args:
encoder_out: output from the ``forward()`` method
new_order (LongTensor): desired order
Returns:
*encoder_out* rearranged according to *new_order*
"""
if encoder_out['encoder_out'] is not None:
for i in range(len(encoder_out['encoder_out'])):
encoder_out['encoder_out'][i] = \
encoder_out['encoder_out'][i].index_select(1, new_order)
if encoder_out['encoder_padding_mask'] is not None:
encoder_out['encoder_padding_mask'] = \
encoder_out['encoder_padding_mask'].index_select(0, new_order)
return encoder_out
def max_positions(self):
"""Maximum input length supported by the encoder."""
if self.embed_positions is None:
return self.max_source_positions
return min(self.max_source_positions, self.embed_positions.max_positions())
def upgrade_state_dict_named(self, state_dict, name):
"""Upgrade a (possibly old) state dict for new versions of fairseq."""
if isinstance(self.embed_positions, SinusoidalPositionalEmbedding):
weights_key = '{}.embed_positions.weights'.format(name)
if weights_key in state_dict:
del state_dict[weights_key]
state_dict['{}.embed_positions._float_tensor'.format(name)] = torch.FloatTensor(1)
version_key = '{}.version'.format(name)
if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) < 2:
# earlier checkpoints did not normalize after the stack of layers
self.layer_norm = None
self.normalize = False
state_dict[version_key] = torch.Tensor([1])
return state_dict
class UTransformerDecoder(FairseqIncrementalDecoder):
"""
Transformer decoder consisting of *args.decoder_layers* layers. Each layer
is a :class:`TransformerDecoderLayer`.
Args:
args (argparse.Namespace): parsed command-line arguments
dictionary (~fairseq.data.Dictionary): decoding dictionary
embed_tokens (torch.nn.Embedding): output embedding
no_encoder_attn (bool, optional): whether to attend to encoder outputs
(default: False).
left_pad (bool, optional): whether the input is left-padded
(default: False).
final_norm (bool, optional): apply layer norm to the output of the
final decoder layer (default: True).
"""
def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False, left_pad=False, final_norm=True):
super().__init__(dictionary)
self.dropout = args.dropout
self.share_input_output_embed = args.share_decoder_input_output_embed
input_embed_dim = embed_tokens.embedding_dim
embed_dim = args.decoder_embed_dim
output_embed_dim = args.decoder_output_dim
padding_idx = embed_tokens.padding_idx
self.max_target_positions = args.max_target_positions
self.embed_tokens = embed_tokens
self.embed_scale = math.sqrt(embed_dim) # todo: try with input_embed_dim
self.project_in_dim = Linear(input_embed_dim, embed_dim, bias=False) if embed_dim != input_embed_dim else None
self.embed_positions = PositionalEmbedding(
args.max_target_positions, embed_dim, padding_idx,
left_pad=left_pad,
learned=args.decoder_learned_pos,
) if not args.no_token_positional_embeddings else None
self.layers = nn.ModuleList([])
self.layers.extend([
UTransformerDecoderLayer(args, no_encoder_attn)
for _ in range(args.decoder_layers)
])
self.adaptive_softmax = None
self.project_out_dim = Linear(embed_dim, output_embed_dim, bias=False) \
if embed_dim != output_embed_dim and not args.tie_adaptive_weights else None
if args.adaptive_softmax_cutoff is not None:
self.adaptive_softmax = AdaptiveSoftmax(
len(dictionary),
output_embed_dim,
options.eval_str_list(args.adaptive_softmax_cutoff, type=int),
dropout=args.adaptive_softmax_dropout,
adaptive_inputs=embed_tokens if args.tie_adaptive_weights else None,
factor=args.adaptive_softmax_factor,
tie_proj=args.tie_adaptive_proj,
)
elif not self.share_input_output_embed:
self.embed_out = nn.Parameter(torch.Tensor(len(dictionary), output_embed_dim))
nn.init.normal_(self.embed_out, mean=0, std=output_embed_dim ** -0.5)
self.register_buffer('version', torch.Tensor([2]))
self.normalize = args.decoder_normalize_before and final_norm
if self.normalize:
self.layer_norm = LayerNorm(embed_dim)
def forward(self, prev_output_tokens, encoder_out=None, incremental_state=None):
"""
Args:
prev_output_tokens (LongTensor): previous decoder outputs of shape
`(batch, tgt_len)`, for input feeding/teacher forcing
encoder_out (Tensor, optional): output from the encoder, used for
encoder-side attention
incremental_state (dict): dictionary used for storing state during
:ref:`Incremental decoding`
Returns:
tuple:
- the last decoder layer's output of shape `(batch, tgt_len,
vocab)`
- the last decoder layer's attention weights of shape `(batch,
tgt_len, src_len)`
"""
# embed positions
positions = self.embed_positions(
prev_output_tokens,
incremental_state=incremental_state,
) if self.embed_positions is not None else None
if incremental_state is not None:
prev_output_tokens = prev_output_tokens[:, -1:]
if positions is not None:
positions = positions[:, -1:]
# embed tokens and positions
x = self.embed_scale * self.embed_tokens(prev_output_tokens)
if self.project_in_dim is not None:
x = self.project_in_dim(x)
if positions is not None:
x += positions
x = F.dropout(x, p=self.dropout, training=self.training)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
attn = None
inner_states = [x]
# decoder layers
# average the corresponding encoder layer and above
encoder_out_list = [sum(encoder_out['encoder_out'][i:])/(len(encoder_out['encoder_out'])-i)
for i in range(len(encoder_out['encoder_out']))]
# pay more attention to the corresponding encoder layer
'''encoder_out_list = []
for i in range(len(encoder_out['encoder_out'])):
all_exp = [math.exp(m+i) for m in range(len(encoder_out['encoder_out'][i:]))]
sum_exp = sum(all_exp)
weight_encoder_out = []
| |
"""
This is to understand how Config.extra works.
See also: https://pydantic-docs.helpmanual.io/usage/model_config/
TL;DR - There are some surprises when you have properties on your Models both set/get and when exporting via dict()
"""
import pytest
from pydantic import BaseModel, Extra, ValidationError
DATA = {"thing1": "THING1", "thing2": "THING2", "thing3": "THING3", "thing4": "THING4"}
DATA4EXPORTS = {"thing1": "THING1", "thing2": "THING2", "thing3": "THING3", "thing4": "THING4", "thing5": "THING5"}
class TestExtra:
def test_allow(self):
class ExtrasAllow(BaseModel):
class Config:
extra = Extra.allow
thing1: str
thing2: str
thing3: str
data = ExtrasAllow(**DATA)
assert hasattr(data, "thing4")
assert data.thing4 == "THING4"
# We can also add new fields whenever we want.
assert not hasattr(data, "thing5")
data.thing5 = "THING5"
assert data.thing5 == "THING5"
def test_forbid(self):
class ExtrasForbid(BaseModel):
class Config:
extra = Extra.forbid
thing1: str
thing2: str
thing3: str
with pytest.raises(ValidationError):
data = ExtrasForbid(**DATA)
data = ExtrasForbid(**{"thing1": "THING1", "thing2": "THING2", "thing3": "THING3"})
# We cannot add new fields whenever we want.
with pytest.raises(ValueError):
data.thing4 = "THING4"
def test_ignore(self):
class ExtrasIgnore(BaseModel):
class Config:
extra = Extra.ignore
thing1: str
thing2: str
thing3: str
data = ExtrasIgnore(**DATA)
assert not hasattr(data, "thing4")
# We cannot add new fields whenever we want.
with pytest.raises(ValueError):
data.thing4 = "THING4"
class TestExtraExporting:
"""
How do extra fields behave when exporting a model?
"""
def test_allow(self):
"""
Extra fields are exported the same as declared fields.
"""
class ExtrasAllow(BaseModel):
class Config:
extra = Extra.allow
thing1: str
thing2: str
thing3: str
for exclude in [True, False]:
data = ExtrasAllow(**DATA4EXPORTS)
dyct = data.dict(exclude_unset=exclude, exclude_none=exclude, exclude_defaults=exclude)
assert "thing4" in dyct
assert dyct["thing4"] == "THING4"
def test_forbid(self):
"""
Nothing to check here since the extra fields are not loaded.
"""
pass
def test_ignore(self):
"""
Extra fields are never set on the Model and, therefore, not availalbe for dict() to export.
"""
class ExtrasIgnore(BaseModel):
class Config:
extra = Extra.ignore
thing1: str
thing2: str
thing3: str
for exclude in [True, False]:
data = ExtrasIgnore(**DATA4EXPORTS)
dyct = data.dict(exclude_unset=exclude, exclude_none=exclude, exclude_defaults=exclude)
assert "thing4" not in dyct
class TestExtraWithProperties:
"""
What happens when a Model has properties that overlap with data provided when the Model is instantiated?
"""
def test_class(self):
"""
An @property without a setter is read-only and will raise AttributeError when attempting to set it.
"""
class MyClass(object):
# A read-only property
@property
def thing4(self):
return "foo - bar - baz"
# A read-write property
@property
def thing5(self):
if hasattr(self, "_thing5"):
return self._thing5
return "foo - bar"
@thing5.setter
def thing5(self, value):
self._thing5 = value
data = MyClass()
assert hasattr(data, "thing4")
assert data.thing4 == "foo - bar - baz"
with pytest.raises(AttributeError):
data.thing4 = "THING4"
assert hasattr(data, "thing5")
assert data.thing5 == "foo - bar"
data.thing5 = "THING5"
assert data.thing5 == "THING5"
def test_allow(self):
"""
Some surprises. See comments.
"""
class MyModel(BaseModel):
class Config:
extra = Extra.allow
thing1: str
thing2: str
thing3: str
@property
def thing4(self):
return f"{self.thing1} - {self.thing2} - {self.thing3}"
@property
def thing5(self):
return f"{self.thing1} - {self.thing2}"
@thing5.setter
def thing5(self, value):
raise RuntimeError("This will never be raised.")
data = MyModel(**DATA)
# This is as expected. The properties exist and return the function values.
assert hasattr(data, "thing4")
assert data.thing4 == "THING1 - THING2 - THING3"
assert hasattr(data, "thing5")
assert data.thing5 == "THING1 - THING2"
# Setting read-only properties does not fail and setting read-write properties does not invoke the setter.
# In both cases, accessing the properties returns the functions' value.
# Property thing4 is not read-only. Setting its value silently fails. I was expecting AttributeError.
data.thing4 = "THING4"
assert data.thing4 == "THING1 - THING2 - THING3"
# The setter for property thing5 is silently ignored. I was expecting the RuntimeError.
data.thing5 = "THING5"
assert data.thing5 == "THING1 - THING2"
def test_forbid(self):
"""
Some surprises. See comments.
"""
class MyModel(BaseModel):
class Config:
extra = Extra.forbid
thing1: str
thing2: str
thing3: str
@property
def thing4(self):
return f"{self.thing1} - {self.thing2} - {self.thing3}"
@property
def thing5(self):
return f"{self.thing1} - {self.thing2}"
@thing5.setter
def thing5(self, value):
raise RuntimeError("This will never be raised.")
@property
def thing6(self):
return f"{self.thing1} - {self.thing2}"
@thing6.setter
def thing6(self, value):
this.thing1 = value
this.thing2 = value
this.thing3 = value
with pytest.raises(ValidationError):
# This fails.
# Not because thing4 is a read-only property but because thing4 is not a declared field on the Model.
# See below where we attempt to set thing4 and thing5.
data = MyModel(**DATA)
data = MyModel(**{"thing1": "THING1", "thing2": "THING2", "thing3": "THING3"})
# This is as expected. The properties exist and return the function values.
assert hasattr(data, "thing4")
assert data.thing4 == "THING1 - THING2 - THING3"
assert hasattr(data, "thing5")
assert data.thing5 == "THING1 - THING2"
# I was expecting AttributeError to be rasied (as it is for a non-Model object) but we get ValueError.
assert hasattr(data, "thing4")
with pytest.raises(ValueError):
data.thing4 = "THING4"
# I was expecting thing5's setter to be invoked but we get ValueError instead.
with pytest.raises(ValueError):
data.thing5 = "THING5"
# I was expecting thing6's setter to be invoked but we get ValueError instead.
# I included this just in case the RuntimeError was being caught & reraised.
with pytest.raises(ValueError):
data.thing6 = "THING6"
def test_ignore(self):
"""
When Config.extra == Extra.ignore the model acts exactly the same as in test_forbid WRT properties.
"""
class MyModel(BaseModel):
class Config:
extra = Extra.ignore
thing1: str
thing2: str
thing3: str
@property
def thing4(self):
return f"{self.thing1} - {self.thing2} - {self.thing3}"
@property
def thing5(self):
return f"{self.thing1} - {self.thing2}"
@thing5.setter
def thing5(self, value):
raise RuntimeError("This will never be raised.")
data = MyModel(**DATA)
assert hasattr(data, "thing4")
assert data.thing4 == "THING1 - THING2 - THING3"
assert hasattr(data, "thing5")
assert data.thing5 == "THING1 - THING2"
# The thing4 property is now read-only but raises ValueError instead of AttributeError as a regular class
# would.
assert hasattr(data, "thing4")
with pytest.raises(ValueError):
data.thing4 = "THING4"
# The thing5 property's setter is ignored and, instead, a ValueError is raised the same as with property
# thing4.
with pytest.raises(ValueError):
data.thing5 = "THING5"
assert data.thing5 == "THING1 - THING2"
class TestExtraExportingWithProperties:
"""
What happens when you export a model that has properties?
"""
def test_class(self):
# Nothing to check here since a regular class cannot export itself.
pass
def test_allow(self):
"""
Some surprises, see comments.
"""
class MyModel(BaseModel):
class Config:
extra = Extra.allow
thing1: str
thing2: str
thing3: str
@property
def thing4(self):
return f"{self.thing1} - {self.thing2} - {self.thing3}"
@property
def thing5(self):
return f"{self.thing1} - {self.thing2}"
@thing5.setter
def thing5(self, value):
raise RuntimeError("This will never be raised.")
@property
def thing6(self):
return f"{self.thing1} - {self.thing2} - {self.thing3}"
@property
def thing7(self):
return f"{self.thing1} - {self.thing2}"
@thing7.setter
def thing7(self, value):
raise RuntimeError("This will never be raised.")
for exclude in [True, False]:
data = MyModel(**DATA4EXPORTS)
dyct = data.dict(exclude_unset=exclude, exclude_none=exclude, exclude_defaults=exclude)
# As expected because thing4 & thing5 are provided when MyModel is created.
# We know from TestExtraWithProperties that extra properties will be available and it is reasonable to
# assume that dict() will include them.
assert "thing4" in dyct
assert "thing5" in dyct
# As expected because thing6 & thing7 are not provided when MyModel is created and we have no expectation
# that dict() would include properties which are, after all, methods on the instance.
assert "thing6" not in dyct
assert "thing7" not in dyct
# This may be a little surprising. From TestExtraWithProperties we know that accessing the field
# (data.thing4) actually invokes the property getter. So, it is not unreasonable to assume that the
# dict() would include the property value. Instead, however, it includes the value provided when the Model
# was created.
with pytest.raises(AssertionError):
assert dyct["thing4"] == "THING1 - THING2 - THING3"
assert dyct["thing4"] == "THING4" # From DATA4EXPORTS
with pytest.raises(AssertionError):
assert dyct["thing5"] == "THING1 - THING2"
assert dyct["thing5"] == "THING5" # From DATA4EXPORTS
# TestExtraWithProperties tells us that we can set the properties if they were given values when the Model
# was created. It also tells us that accessing the field will return the property value. From the test above
# we expect that the dict() would include the set value rather than either the value provided | |
is not None:
scale = scale.to(device=device, dtype=dtype)
assert len(scale.shape) == 1 and (
len(scale) == 2 or len(scale) == 4
), f"`scale` shall have 2 or 4 elements. Got {scale}."
_joint_range_check(cast(torch.Tensor, scale[:2]), "scale")
_scale = _adapted_uniform((batch_size,), scale[0], scale[1], same_on_batch).unsqueeze(1).repeat(1, 2)
if len(scale) == 4:
_joint_range_check(cast(torch.Tensor, scale[2:]), "scale_y")
_scale[:, 1] = _adapted_uniform((batch_size,), scale[2], scale[3], same_on_batch)
_scale = _scale.to(device=_device, dtype=_dtype)
else:
_scale = torch.ones((batch_size, 2), device=_device, dtype=_dtype)
if translate is not None:
translate = translate.to(device=device, dtype=dtype)
assert (
0.0 <= translate[0] <= 1.0 and 0.0 <= translate[1] <= 1.0 and translate.shape == torch.Size([2])
), f"Expect translate contains two elements and ranges are in [0, 1]. Got {translate}."
max_dx: torch.Tensor = translate[0] * width
max_dy: torch.Tensor = translate[1] * height
translations = torch.stack(
[
_adapted_uniform((batch_size,), -max_dx, max_dx, same_on_batch),
_adapted_uniform((batch_size,), -max_dy, max_dy, same_on_batch),
],
dim=-1,
)
translations = translations.to(device=_device, dtype=_dtype)
else:
translations = torch.zeros((batch_size, 2), device=_device, dtype=_dtype)
center: torch.Tensor = torch.tensor([width, height], device=_device, dtype=_dtype).view(1, 2) / 2.0 - 0.5
center = center.expand(batch_size, -1)
if shear is not None:
shear = shear.to(device=device, dtype=dtype)
_joint_range_check(cast(torch.Tensor, shear)[0], "shear")
_joint_range_check(cast(torch.Tensor, shear)[1], "shear")
sx = _adapted_uniform((batch_size,), shear[0][0], shear[0][1], same_on_batch)
sy = _adapted_uniform((batch_size,), shear[1][0], shear[1][1], same_on_batch)
sx = sx.to(device=_device, dtype=_dtype)
sy = sy.to(device=_device, dtype=_dtype)
else:
sx = sy = torch.tensor([0] * batch_size, device=_device, dtype=_dtype)
return dict(translations=translations, center=center, scale=_scale, angle=angle, sx=sx, sy=sy)
def random_rotation_generator(
batch_size: int,
degrees: torch.Tensor,
same_on_batch: bool = False,
device: torch.device = torch.device('cpu'),
dtype: torch.dtype = torch.float32,
) -> Dict[str, torch.Tensor]:
r"""Get parameters for ``rotate`` for a random rotate transform.
Args:
batch_size (int): the tensor batch size.
degrees (torch.Tensor): range of degrees with shape (2) to select from.
same_on_batch (bool): apply the same transformation across the batch. Default: False.
device (torch.device): the device on which the random numbers will be generated. Default: cpu.
dtype (torch.dtype): the data type of the generated random numbers. Default: float32.
Returns:
params Dict[str, torch.Tensor]: parameters to be passed for transformation.
- degrees (torch.Tensor): element-wise rotation degrees with a shape of (B,).
Note:
The generated random numbers are not reproducible across different devices and dtypes.
"""
_common_param_check(batch_size, same_on_batch)
_joint_range_check(degrees, "degrees")
_degrees = _adapted_uniform(
(batch_size,),
degrees[0].to(device=device, dtype=dtype),
degrees[1].to(device=device, dtype=dtype),
same_on_batch,
)
_degrees = _degrees.to(device=degrees.device, dtype=degrees.dtype)
return dict(degrees=_degrees)
def random_crop_generator(
batch_size: int,
input_size: Tuple[int, int],
size: Union[Tuple[int, int], torch.Tensor],
resize_to: Optional[Tuple[int, int]] = None,
same_on_batch: bool = False,
device: torch.device = torch.device('cpu'),
dtype: torch.dtype = torch.float32,
) -> Dict[str, torch.Tensor]:
r"""Get parameters for ```crop``` transformation for crop transform.
Args:
batch_size (int): the tensor batch size.
input_size (tuple): Input image shape, like (h, w).
size (tuple): Desired size of the crop operation, like (h, w).
If tensor, it must be (B, 2).
resize_to (tuple): Desired output size of the crop, like (h, w). If None, no resize will be performed.
same_on_batch (bool): apply the same transformation across the batch. Default: False.
device (torch.device): the device on which the random numbers will be generated. Default: cpu.
dtype (torch.dtype): the data type of the generated random numbers. Default: float32.
Returns:
params Dict[str, torch.Tensor]: parameters to be passed for transformation.
- src (torch.Tensor): cropping bounding boxes with a shape of (B, 4, 2).
- dst (torch.Tensor): output bounding boxes with a shape (B, 4, 2).
Note:
The generated random numbers are not reproducible across different devices and dtypes.
Example:
>>> _ = torch.manual_seed(0)
>>> crop_size = torch.tensor([[25, 28], [27, 29], [26, 28]])
>>> random_crop_generator(3, (30, 30), size=crop_size, same_on_batch=False)
{'src': tensor([[[ 1., 0.],
[28., 0.],
[28., 24.],
[ 1., 24.]],
<BLANKLINE>
[[ 1., 1.],
[29., 1.],
[29., 27.],
[ 1., 27.]],
<BLANKLINE>
[[ 0., 3.],
[27., 3.],
[27., 28.],
[ 0., 28.]]]), 'dst': tensor([[[ 0., 0.],
[27., 0.],
[27., 24.],
[ 0., 24.]],
<BLANKLINE>
[[ 0., 0.],
[28., 0.],
[28., 26.],
[ 0., 26.]],
<BLANKLINE>
[[ 0., 0.],
[27., 0.],
[27., 25.],
[ 0., 25.]]]), 'input_size': tensor([[30, 30],
[30, 30],
[30, 30]])}
"""
_common_param_check(batch_size, same_on_batch)
_device, _dtype = _extract_device_dtype([size if isinstance(size, torch.Tensor) else None])
# Use float point instead
_dtype = _dtype if _dtype in [torch.float16, torch.float32, torch.float64] else dtype
if not isinstance(size, torch.Tensor):
size = torch.tensor(size, device=_device, dtype=_dtype).repeat(batch_size, 1)
else:
size = size.to(device=_device, dtype=_dtype)
assert size.shape == torch.Size([batch_size, 2]), (
"If `size` is a tensor, it must be shaped as (B, 2). "
f"Got {size.shape} while expecting {torch.Size([batch_size, 2])}."
)
assert (
input_size[0] > 0 and input_size[1] > 0 and (size > 0).all()
), f"Got non-positive input size or size. {input_size}, {size}."
size = size.floor()
x_diff = input_size[1] - size[:, 1] + 1
y_diff = input_size[0] - size[:, 0] + 1
# Start point will be 0 if diff < 0
x_diff = x_diff.clamp(0)
y_diff = y_diff.clamp(0)
if batch_size == 0:
return dict(
src=torch.zeros([0, 4, 2], device=_device, dtype=_dtype),
dst=torch.zeros([0, 4, 2], device=_device, dtype=_dtype),
)
if same_on_batch:
# If same_on_batch, select the first then repeat.
x_start = _adapted_uniform((batch_size,), 0, x_diff[0].to(device=device, dtype=dtype), same_on_batch).floor()
y_start = _adapted_uniform((batch_size,), 0, y_diff[0].to(device=device, dtype=dtype), same_on_batch).floor()
else:
x_start = _adapted_uniform((1,), 0, x_diff.to(device=device, dtype=dtype), same_on_batch).floor()
y_start = _adapted_uniform((1,), 0, y_diff.to(device=device, dtype=dtype), same_on_batch).floor()
crop_src = bbox_generator(
x_start.view(-1).to(device=_device, dtype=_dtype),
y_start.view(-1).to(device=_device, dtype=_dtype),
torch.where(size[:, 1] == 0, torch.tensor(input_size[1], device=_device, dtype=_dtype), size[:, 1]),
torch.where(size[:, 0] == 0, torch.tensor(input_size[0], device=_device, dtype=_dtype), size[:, 0]),
)
if resize_to is None:
crop_dst = bbox_generator(
torch.tensor([0] * batch_size, device=_device, dtype=_dtype),
torch.tensor([0] * batch_size, device=_device, dtype=_dtype),
size[:, 1],
size[:, 0],
)
else:
assert (
len(resize_to) == 2
and isinstance(resize_to[0], (int,))
and isinstance(resize_to[1], (int,))
and resize_to[0] > 0
and resize_to[1] > 0
), f"`resize_to` must be a tuple of 2 positive integers. Got {resize_to}."
crop_dst = torch.tensor(
[[[0, 0], [resize_to[1] - 1, 0], [resize_to[1] - 1, resize_to[0] - 1], [0, resize_to[0] - 1]]],
device=_device,
dtype=_dtype,
).repeat(batch_size, 1, 1)
_input_size = torch.tensor(input_size, device=_device, dtype=torch.long).expand(batch_size, -1)
return dict(src=crop_src, dst=crop_dst, input_size=_input_size)
def random_crop_size_generator(
batch_size: int,
size: Tuple[int, int],
scale: torch.Tensor,
ratio: torch.Tensor,
same_on_batch: bool = False,
device: torch.device = torch.device('cpu'),
dtype: torch.dtype = torch.float32,
) -> Dict[str, torch.Tensor]:
r"""Get cropping heights and widths for ```crop``` transformation for resized crop transform.
Args:
batch_size (int): the tensor batch size.
size (Tuple[int, int]): expected output size of each edge.
scale (torch.Tensor): range of size of the origin size cropped with (2,) shape.
ratio (torch.Tensor): range of aspect ratio of the origin aspect ratio cropped with (2,) shape.
same_on_batch (bool): apply the same transformation across the batch. Default: False.
device (torch.device): the device on which the random numbers will be generated. Default: cpu.
dtype (torch.dtype): the data type of the generated random numbers. Default: float32.
Returns:
params Dict[str, torch.Tensor]: parameters to be passed for transformation.
- size (torch.Tensor): element-wise cropping sizes with a shape of (B, 2).
Note:
The generated random numbers are not reproducible across different devices and dtypes.
Examples:
>>> _ = torch.manual_seed(42)
>>> random_crop_size_generator(3, (30, 30), scale=torch.tensor([.7, 1.3]), ratio=torch.tensor([.9, 1.]))
{'size': tensor([[29., 29.],
[27., 28.],
[26., 29.]])}
"""
_common_param_check(batch_size, same_on_batch)
_joint_range_check(scale, "scale")
_joint_range_check(ratio, "ratio")
assert (
len(size) == 2 and type(size[0]) is int and size[1] > 0 and type(size[1]) is int and size[1] > 0
), f"'height' and 'width' must be integers. Got {size}."
_device, _dtype = _extract_device_dtype([scale, ratio])
if batch_size == 0:
return dict(size=torch.zeros([0, 2], device=_device, dtype=_dtype))
scale = scale.to(device=device, dtype=dtype)
ratio = ratio.to(device=device, dtype=dtype)
# 10 trails for each element
area = _adapted_uniform((batch_size, 10), scale[0] * size[0] * size[1], scale[1] * size[0] * size[1], same_on_batch)
log_ratio = _adapted_uniform((batch_size, 10), torch.log(ratio[0]), torch.log(ratio[1]), same_on_batch)
aspect_ratio = torch.exp(log_ratio)
w = torch.sqrt(area * aspect_ratio).round().floor()
h = torch.sqrt(area / aspect_ratio).round().floor()
# Element-wise w, h condition
cond = ((0 < w) * (w < size[0]) * (0 < h) * (h < size[1])).int()
# torch.argmax is not reproducible accross devices: https://github.com/pytorch/pytorch/issues/17738
# Here, we will select the first occurance of the duplicated elements.
cond_bool, argmax_dim1 = ((cond.cumsum(1) == 1) & cond.bool()).max(1)
h_out = w[torch.arange(0, batch_size, device=device, dtype=torch.long), argmax_dim1]
w_out = h[torch.arange(0, batch_size, device=device, dtype=torch.long), argmax_dim1]
if not cond_bool.all():
# Fallback to center crop
in_ratio = float(size[0]) / float(size[1])
if in_ratio < ratio.min():
h_ct = torch.tensor(size[0], device=device, dtype=dtype)
w_ct = torch.round(h_ct / ratio.min())
elif in_ratio > |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.